1 #ifndef __PARTICULAR_ORDERINGS_CPP_ 2 #define __PARTICULAR_ORDERINGS_CPP_ 23 #include "system_constants.hpp" 26 #include "particular_orderings.hpp" 44 void * WGrevlex_Order_Data::operator
new(
size_t size) {
50 void WGrevlex_Order_Data::operator
delete(
void *t) {
60 const EXP_TYPE * a = t.
log();
61 const EXP_TYPE * b = u.
log();
62 for (NVAR_TYPE k = 0; k < n; ++k)
64 for (NVAR_TYPE k = 0; k < n; ++k)
66 bool searching = dtk == duk;
68 for (; searching and k < n; ++k) {
71 searching = dtk == duk;
83 const EXP_TYPE * a = t.
log();
84 const EXP_TYPE * b = u.
log();
85 for (NVAR_TYPE k = 0; k < n; ++k)
87 for (NVAR_TYPE k = 0; k < n; ++k)
89 bool searching = dtk == duk;
91 for (; searching and k < n; ++k) {
94 searching = dtk == duk;
107 const EXP_TYPE * a = t.
log();
108 const EXP_TYPE * b = u.
log();
109 const EXP_TYPE * c = v.
log();
110 for (NVAR_TYPE k = 0; k < n; ++k)
112 for (NVAR_TYPE k = 0; k < n; ++k)
114 for (NVAR_TYPE k = 0; k < n; ++k)
116 bool searching = dtk == duk + dvk;
118 for (; searching and k < n; ++k) {
122 searching = dtk == duk + dvk;
125 return dtk > duk + dvk;
132 n(num_vars), weights(wts), thorough_weighting(thorough)
145 const EXP_TYPE * a = t.
log();
146 const EXP_TYPE * b = u.
log();
147 for (NVAR_TYPE k = 0; k < n; ++k)
148 dtk += a[k] * weights[k];
149 for (NVAR_TYPE k = 0; k < n; ++k)
150 duk += b[k] * weights[k];
151 bool searching = dtk == duk;
153 for (; searching and k < n; ++k) {
154 dtk -= a[n - k - 1] * weights[n - k - 1];
155 duk -= b[n - k - 1] * weights[n - k - 1];
156 searching = dtk == duk;
167 const EXP_TYPE * a = t.
log();
168 const EXP_TYPE * b = u.
log();
169 for (NVAR_TYPE k = 0; k < n; ++k)
170 dtk += a[k] * weights[k];
171 for (NVAR_TYPE k = 0; k < n; ++k)
172 duk += b[k] * weights[k];
173 bool searching = dtk == duk;
175 for (; searching and k < n; ++k) {
176 dtk -= a[n - k - 1] * weights[n - k - 1];
177 duk -= b[n - k - 1] * weights[n - k - 1];
178 searching = dtk == duk;
190 const EXP_TYPE * a = t.
log();
191 const EXP_TYPE * b = u.
log();
192 const EXP_TYPE * c = v.
log();
193 for (NVAR_TYPE k = 0; k < n; ++k)
194 dtk += a[k] * weights[k];
195 for (NVAR_TYPE k = 0; k < n; ++k)
196 duk += b[k] * weights[k];
197 for (NVAR_TYPE k = 0; k < n; ++k)
198 duk += c[k] * weights[k];
199 bool searching = dtk == duk + dvk;
201 for (; searching and k < n; ++k) {
202 dtk -= a[n - k - 1] * weights[n - k - 1];
203 duk -= b[n - k - 1] * weights[n - k - 1];
204 dvk -= c[n - k - 1] * weights[n - k - 1];
205 searching = dtk == duk + dvk;
207 return dtk > duk + dvk;
212 for (NVAR_TYPE l = 0; l < number_of_gradings; ++l) {
214 gradings[number_of_gradings-l-1] = value;
221 const NVAR_TYPE n = number_of_gradings;
231 memcpy(gradings, other.
gradings, number_of_gradings*
sizeof(DEG_TYPE));
245 : n(number_of_variables)
250 bool still_tied =
true;
252 for (NVAR_TYPE k = 0; still_tied and k < n; ++k) {
253 DEG_TYPE dtk = partial_degree(t, k);
254 DEG_TYPE duk = partial_degree(u, k);
257 else if (dtk > duk) {
267 bool still_tied =
true;
269 for (NVAR_TYPE k = 0; still_tied and k < n; ++k) {
270 DEG_TYPE dtk = partial_degree(t, k);
271 DEG_TYPE duk = partial_degree(u, k);
274 else if (dtk < duk) {
285 bool still_tied =
true;
287 for (NVAR_TYPE k = 0; still_tied and k < n; ++k) {
288 DEG_TYPE dtk = partial_degree(t, k);
289 DEG_TYPE duk = partial_degree(u, k);
290 DEG_TYPE dvk = partial_degree(v, k);
293 else if (dtk > duk + dvk) {
311 for (NVAR_TYPE k = 0; k < n - i; ++k)
328 bool still_tied =
true;
330 const EXP_TYPE * a = t.
log();
331 const EXP_TYPE * b = u.
log();
332 for (NVAR_TYPE k = 0; still_tied and k < n; ++k) {
335 else if (a[k] > b[k]) {
344 bool still_tied =
true;
346 const EXP_TYPE * a = t.
log();
347 const EXP_TYPE * b = u.
log();
348 for (NVAR_TYPE k = 0; still_tied and k < n; ++k) {
351 else if (a[k] < b[k]) {
362 bool still_tied =
true;
364 const EXP_TYPE * a = t.
log();
365 const EXP_TYPE * b = u.
log();
366 const EXP_TYPE * c = v.
log();
367 for (NVAR_TYPE k = 0; still_tied and k < n; ++k) {
368 if (a[k] < b[k] + c[k])
370 else if (a[k] > b[k] + c[k]) {
384 const EXP_TYPE * a = t.
log();
385 for (NVAR_TYPE l = 0; l < number_of_gradings; ++l) {
386 value += w[l] * a[l];
387 gradings[number_of_gradings-l-1] = value;
392 : number_of_gradings(t.num_vars())
413 DEG_TYPE WGrevlex_Order_Data::operator [] (NVAR_TYPE i)
const {
418 NVAR_TYPE number_of_variables, WT_TYPE * w,
bool thorough
419 ) : n(number_of_variables), weights(w), fully_apply(thorough)
424 for (NVAR_TYPE k = 0; result == 0 and k < n; ++k) {
425 DEG_TYPE dtk = partial_degree(t, k);
426 DEG_TYPE duk = partial_degree(u, k);
436 bool still_tied =
true;
438 for (NVAR_TYPE k = 0; still_tied and k < n; ++k) {
439 DEG_TYPE dtk = partial_degree(t, k);
440 DEG_TYPE duk = partial_degree(u, k);
443 else if (dtk > duk) {
453 bool still_tied =
true;
455 for (NVAR_TYPE k = 0; still_tied and k < n; ++k) {
456 DEG_TYPE dtk = partial_degree(t, k);
457 DEG_TYPE duk = partial_degree(u, k);
460 else if (dtk < duk) {
475 bool still_tied =
true;
477 for (NVAR_TYPE k = 0; still_tied and k < n; ++k) {
478 DEG_TYPE dtk = partial_degree(t, k);
479 DEG_TYPE duk = partial_degree(u, k);
480 DEG_TYPE dvk = partial_degree(v, k);
483 else if (dtk > duk + dvk) {
501 const EXP_TYPE * a = t.
log();
503 for (NVAR_TYPE k = 0; k < n; ++k)
504 result += weights[k]*a[k];
506 for (NVAR_TYPE k = 0; k < n - i; ++k)
508 result += weights[k]*a[k];
521 ->assign_gradings(t);
524 const char * Nonsingular_Matrix_Ordering_Exception::what()
const throw() {
525 return "Nonsingular matrix supplied for matrix ordering";
540 bool nonsingular(NVAR_TYPE m, NVAR_TYPE n,
const WT_TYPE **A) {
541 bool possibly =
true;
543 long long ** M =
new long long * [m];
544 for (NVAR_TYPE i = 0; i < m; ++i) {
545 M[i] =
new long long [n];
546 for (NVAR_TYPE j = 0; j < n; ++j)
549 std::cout << m <<
',' << n << std::endl;
550 for (NVAR_TYPE i = 0; i < m; ++i) {
551 for (NVAR_TYPE j = 0; j < n; ++j)
552 std::cout << M[i][j] <<
' ';
553 std::cout << std::endl;
555 std::cout << std::endl;
556 for (NVAR_TYPE i = 0; possibly and i < m; ++i) {
558 bool searching =
true;
560 for (; searching and j < m; ++j)
565 possibly = not searching;
566 if (possibly and j < m) {
569 for (NVAR_TYPE k = i; k < n; ++k) {
570 long long temp = M[j][k];
576 for (j = i + 1; j < m; ++j) {
577 WT_TYPE header = M[j][i];
580 for (NVAR_TYPE k = i; k < n; ++k)
581 M[j][k] = M[j][k] * M[i][i] - M[i][k] * header;
586 std::cout << m <<
',' << n << std::endl;
587 for (NVAR_TYPE i = 0; i < m; ++i) {
588 for (NVAR_TYPE j = 0; j < n; ++j)
589 std::cout << M[i][j] <<
' ';
590 std::cout << std::endl;
592 std::cout << std::endl;
594 for (NVAR_TYPE i = 0; i < m; ++i)
603 NVAR_TYPE rows, NVAR_TYPE cols,
const WT_TYPE **data
604 ) : m(rows), n(cols), W(data) {
612 bool searching =
true;
613 const EXP_TYPE * a = t.
log();
614 const EXP_TYPE * b = u.
log();
615 for (NVAR_TYPE i = 0; searching and i < m; ++i) {
618 for (NVAR_TYPE j = 0; j < n; ++j) {
619 wt += W[i][j] * a[j];
620 wu += W[i][j] * b[j];
635 bool searching =
true;
636 const EXP_TYPE * a = t.
log();
637 const EXP_TYPE * b = u.
log();
638 for (NVAR_TYPE i = 0; searching and i < m; ++i) {
641 for (NVAR_TYPE j = 0; j < n; ++j) {
642 wt += W[i][j] * a[j];
643 wu += W[i][j] * b[j];
659 bool searching =
true;
660 const EXP_TYPE * a = t.
log();
661 const EXP_TYPE * b = u.
log();
662 const EXP_TYPE * c = v.
log();
663 for (NVAR_TYPE i = 0; searching and i < m; ++i) {
667 for (NVAR_TYPE j = 0; j < n; ++j) {
668 wt += W[i][j] * a[j];
669 wu += W[i][j] * b[j];
670 wv += W[i][j] * c[j];
674 else if (wt > wu + wv) {
virtual const WT_TYPE * order_weights() const override
the weights that define this ordering
void set_ordering_data(Monomial_Order_Data *mordat)
sets the Monomial_Order_Data associated with this Monomial
virtual bool first_larger_than_multiple(const Monomial &t, const Monomial &u, const Monomial &v) const override
returns true iff by weighted sums of successively fewer exponents
special memory pool allocator for Grevlex_Order_Data and WGrevlex_Order_Data
virtual bool first_smaller(const Monomial &t, const Monomial &u) const override
void assign_gradings(const Monomial &)
assigns gradings to a pre-allocated array
virtual void set_data(Monomial &t) const override
sets the Monomial’s monomial_ordering_data
virtual bool first_larger(const Monomial &t, const Monomial &u) const override
returns true iff by sums of successively fewer exponents
data for the grevlex monomial ordering
~WGrevlex_Order_Data()
deletes the array of partial weights
virtual void set_data(Monomial &t) const override
sets the Monomial’s monomial_ordering_data
const NVAR_TYPE number_of_gradings
length of gradings
virtual bool first_smaller(const Monomial &t, const Monomial &u) const override
returns true iff by weighted sums of successively fewer exponents
virtual bool first_larger_than_multiple(const Monomial &t, const Monomial &u, const Monomial &v) const override
returns true iff by sums of successively fewer exponents
~Grevlex_Order_Data()
deletes the array creates by the constructor
virtual bool first_larger_than_multiple(const Monomial &t, const Monomial &u, const Monomial &v) const override
DEG_TYPE operator[](NVAR_TYPE i) const
returns the sum of the first variables’ exponents
DEG_TYPE partial_degree(const Monomial &t, NVAR_TYPE i) const
virtual bool first_larger_than_multiple(const Monomial &, const Monomial &, const Monomial &) const override
returns true iff the first Monomial is larger than the product of the second and the third ...
TYPE * get_new_block()
allocates and returns a block of memory
virtual bool first_larger(const Monomial &, const Monomial &) const override
returns true iff the first Monomial is larger than the second
exceptions for Matrix_Ordering
CachedWGrevlex_Ordering(NVAR_TYPE number_of_variables, WT_TYPE *w, bool thorough=true)
creates a weighted grevlex ordering specific to variables, using the weights specified by ...
virtual bool first_smaller(const Monomial &, const Monomial &) const override
returns true iff the first Monomial is smaller than the second
Lex_Ordering(NVAR_TYPE number_of_variables)
creates a lex ordering specific to variables
DEG_TYPE * gradings
array of partial weighted sums of exponents
Grading_Order_Data_Allocator< WGrevlex_Order_Data > * woda
Memory manager for graded orderings (not their data; see goda for that).
Matrix_Ordering(NVAR_TYPE rows, NVAR_TYPE cols, const WT_TYPE **data)
checks that data defines a nonsingular matrix, and sets things up
DEG_TYPE partial_degree(const Monomial &t, NVAR_TYPE i) const
virtual bool first_larger(const Monomial &t, const Monomial &u) const override
returns true iff by sums of successively fewer exponents
WGrevlex(NVAR_TYPE, WT_TYPE *, bool=true)
Creates a grevlex ordering specific to the specified number of variables, with the given weights...
virtual bool first_smaller(const Monomial &t, const Monomial &u) const override
NVAR_TYPE num_vars() const
number of variables
WGrevlex_Order_Data(Monomial &t)
creates an array of partial weights of t
Grevlex_Order_Data(const Monomial &t)
creates an array of partial weights of t
virtual bool first_larger_than_multiple(const Monomial &t, const Monomial &u, const Monomial &v) const override
virtual bool first_larger(const Monomial &t, const Monomial &u) const override
Implementation of monomials.
virtual int cmp(const Monomial &, const Monomial &) const override
resturns 0 if they are alike; positive if first larger; negative otherwise
interface to a monomial ordering
Grading_Order_Data_Allocator< WT_TYPE > * goda
Memory manager for graded ordering data.
bool nonsingular(NVAR_TYPE m, NVAR_TYPE n, const WT_TYPE **A)
verifies that a matrix supplied for an ordering is not nonsingular
DEG_TYPE degree(NVAR_TYPE i) const
Degree of th variable.
virtual Grevlex_Order_Data * clone() override
clone “constructor”
const EXP_TYPE * log() const
Direct access to the exponents, for whatever reason.
generic grevlex ordering, works with any number of variables
Monomial_Order_Data * monomial_ordering_data() const
the Monomial_Order_Data associated with this Monomial
const Monomial_Ordering * monomial_ordering() const
the Monomial_Ordering associated with this Monomial
DEG_TYPE * gradings
list of partial sums of exponents
virtual bool first_larger_than_multiple(const Monomial &t, const Monomial &u, const Monomial &v) const override
virtual WGrevlex_Order_Data * clone() override
clone constructor
virtual void set_data(Monomial &t) const override
sets the Monomial’s monomial_ordering_data
void return_used_block(TYPE *freed_block)
returns a block of memory that is no longer needed to the pool
virtual bool first_smaller(const Monomial &t, const Monomial &u) const override
returns true iff by sums of successively fewer exponents
void assign_gradings(Monomial &t)
assigns gradings to a pre-allocated array
DEG_TYPE compute_ith_weight(const Monomial &t, NVAR_TYPE i) const
computes the sum of the first i exponents
virtual bool first_larger(const Monomial &t, const Monomial &u) const override
data for the weighted grevlex monomial ordering
the weighted grevlex ordering for a specified number of variables, with cached weights for each monom...
virtual bool first_larger(const Monomial &t, const Monomial &u) const override
returns true iff by weighted sums of successively fewer exponents
virtual bool first_smaller(const Monomial &t, const Monomial &u) const override
returns true iff by sums of successively fewer exponents
const NVAR_TYPE number_of_gradings
length of gradings
virtual DEG_TYPE grading(NVAR_TYPE) const
default value is useless; orderings that supply gradings should redefine
Grevlex_Ordering(NVAR_TYPE number_of_variables)
creates a grevlex ordering specific to the specified number of variables
DEG_TYPE compute_ith_weight(const Monomial &t, NVAR_TYPE i) const