X-Git-Url: https://mattmccutchen.net/bigint/bigint.git/blobdiff_plain/3aaa5ce601b009373fa7464f6de044f7b2e017ea..5ff40cf5d6e822051da902b041ae7ae8f545123e:/BigUnsigned.cc diff --git a/BigUnsigned.cc b/BigUnsigned.cc index ad96026..f6a925c 100644 --- a/BigUnsigned.cc +++ b/BigUnsigned.cc @@ -218,11 +218,37 @@ BigUnsigned::CmpRes BigUnsigned::compareTo(const BigUnsigned &x) const { * See Section 4.3.1 of Knuth's ``The Art of Computer Programming''. */ +/* + * On most calls to put-here operations, it's safe to read the inputs little by + * little and write the outputs little by little. However, if one of the + * inputs is coming from the same variable into which the output is to be + * stored (an "aliased" call), we risk overwriting the input before we read it. + * In this case, we first compute the result into a temporary BigUnsigned + * variable and then copy it into the requested output variable *this. + * Each put-here operation uses the DTRT_ALIASED macro (Do The Right Thing on + * aliased calls) to generate code for this check. + * + * I adopted this approach on 2007.02.13 (see Assignment Operators in + * BigUnsigned.hh). Before then, put-here operations rejected aliased calls + * with an exception. I think doing the right thing is better. + * + * Some of the put-here operations can probably handle aliased calls safely + * without the extra copy because (for example) they process blocks strictly + * right-to-left. At some point I might determine which ones don't need the + * copy, but my reasoning would need to be verified very carefully. For now + * I'll leave in the copy. + */ +#define DTRT_ALIASED(cond, op) \ + if (cond) { \ + BigUnsigned tmpThis; \ + tmpThis.op; \ + *this = tmpThis; \ + return; \ + } + // Addition void BigUnsigned::add(const BigUnsigned &a, const BigUnsigned &b) { - // Block unsafe calls - if (this == &a || this == &b) - throw "BigUnsigned::add: One of the arguments is the invoked object"; + DTRT_ALIASED(this == &a || this == &b, add(a, b)); // If one argument is zero, copy the other. if (a.len == 0) { operator =(b); @@ -283,9 +309,7 @@ void BigUnsigned::add(const BigUnsigned &a, const BigUnsigned &b) { // Subtraction void BigUnsigned::subtract(const BigUnsigned &a, const BigUnsigned &b) { - // Block unsafe calls - if (this == &a || this == &b) - throw "BigUnsigned::subtract: One of the arguments is the invoked object"; + DTRT_ALIASED(this == &a || this == &b, subtract(a, b)); // If b is zero, copy a. If a is shorter than b, the result is negative. if (b.len == 0) { operator =(a); @@ -397,9 +421,7 @@ inline BigUnsigned::Blk getShiftedBlock(const BigUnsigned &num, // Multiplication void BigUnsigned::multiply(const BigUnsigned &a, const BigUnsigned &b) { - // Block unsafe calls - if (this == &a || this == &b) - throw "BigUnsigned::multiply: One of the arguments is the invoked object"; + DTRT_ALIASED(this == &a || this == &b, multiply(a, b)); // If either a or b is zero, set to zero. if (a.len == 0 || b.len == 0) { len = 0; @@ -483,12 +505,29 @@ void BigUnsigned::multiply(const BigUnsigned &a, const BigUnsigned &b) { * and provide outputs in the most convenient places so that no value ever needs * to be copied in its entirety. That way, the client can perform exactly the * copying it needs depending on where the inputs are and where it wants the output. +* A better name for this function might be "modWithQuotient", but I would rather +* not change the name now. */ void BigUnsigned::divideWithRemainder(const BigUnsigned &b, BigUnsigned &q) { - // Block unsafe calls - if (this == &b || &q == &b || this == &q) - throw "BigUnsigned::divideWithRemainder: Some two objects involved are the same"; - + /* + * Defending against aliased calls is a bit tricky because we are + * writing to both *this and q. + * + * It would be silly to try to write quotient and remainder to the + * same variable. Rule that out right away. + */ + if (this == &q) + throw "BigUnsigned::divideWithRemainder: Cannot write quotient and remainder into the same variable"; + /* + * Now *this and q are separate, so the only concern is that b might be + * aliased to one of them. If so, use a temporary copy of b. + */ + if (this == &b || &q == &b) { + BigUnsigned tmpB(b); + divideWithRemainder(tmpB, q); + return; + } + /* * Note that the mathematical definition of mod (I'm trusting Knuth) is somewhat * different from the way the normal C++ % operator behaves in the case of division by 0. @@ -502,7 +541,7 @@ void BigUnsigned::divideWithRemainder(const BigUnsigned &b, BigUnsigned &q) { q.len = 0; return; } - + /* * If *this.len < b.len, then *this < b, and we can be sure that b doesn't go into * *this at all. The quotient is 0 and *this is already the remainder (so leave it alone). @@ -511,11 +550,11 @@ void BigUnsigned::divideWithRemainder(const BigUnsigned &b, BigUnsigned &q) { q.len = 0; return; } - + /* * At this point we know *this > b > 0. (Whew!) */ - + /* * Overall method: * @@ -547,7 +586,7 @@ void BigUnsigned::divideWithRemainder(const BigUnsigned &b, BigUnsigned &q) { unsigned int i2; Blk temp; bool borrowIn, borrowOut; - + /* * Make sure we have an extra zero block just past the value. * @@ -566,17 +605,17 @@ void BigUnsigned::divideWithRemainder(const BigUnsigned &b, BigUnsigned &q) { allocateAndCopy(len + 1); // Get the space. len++; // Increase the length. blk[origLen] = 0; // Zero the extra block. - + // work2 holds part of the result of a subtraction; see above. Blk *work2 = new Blk[len]; - + // Set preliminary length for quotient and make room q.len = origLen - b.len + 1; q.allocate(q.len); // Zero out the quotient for (i = 0; i < q.len; i++) q.blk[i] = 0; - + // For each possible left-shift of b in blocks... i = q.len; while (i > 0) { @@ -639,7 +678,7 @@ void BigUnsigned::divideWithRemainder(const BigUnsigned &b, BigUnsigned &q) { // Deallocate temporary array. // (Thanks to Brad Spencer for noticing my accidental omission of this!) delete [] work2; - + } /* * The out-of-bounds accesses story: @@ -684,9 +723,7 @@ void BigUnsigned::divideWithRemainder(const BigUnsigned &b, BigUnsigned &q) { // Bitwise and void BigUnsigned::bitAnd(const BigUnsigned &a, const BigUnsigned &b) { - // Block unsafe calls - if (this == &a || this == &b) - throw "BigUnsigned::bitAnd: One of the arguments is the invoked object"; + DTRT_ALIASED(this == &a || this == &b, bitAnd(a, b)); len = (a.len >= b.len) ? b.len : a.len; allocate(len); Index i; @@ -697,9 +734,7 @@ void BigUnsigned::bitAnd(const BigUnsigned &a, const BigUnsigned &b) { // Bitwise or void BigUnsigned::bitOr(const BigUnsigned &a, const BigUnsigned &b) { - // Block unsafe calls - if (this == &a || this == &b) - throw "BigUnsigned::bitOr: One of the arguments is the invoked object"; + DTRT_ALIASED(this == &a || this == &b, bitOr(a, b)); Index i; const BigUnsigned *a2, *b2; if (a.len >= b.len) { @@ -719,9 +754,7 @@ void BigUnsigned::bitOr(const BigUnsigned &a, const BigUnsigned &b) { // Bitwise xor void BigUnsigned::bitXor(const BigUnsigned &a, const BigUnsigned &b) { - // Block unsafe calls - if (this == &a || this == &b) - throw "BigUnsigned::bitXor: One of the arguments is the invoked object"; + DTRT_ALIASED(this == &a || this == &b, bitXor(a, b)); Index i; const BigUnsigned *a2, *b2; if (a.len >= b.len) { @@ -740,6 +773,51 @@ void BigUnsigned::bitXor(const BigUnsigned &a, const BigUnsigned &b) { zapLeadingZeros(); } +// Bitwise shift left +void BigUnsigned::bitShiftLeft(const BigUnsigned &a, unsigned int b) { + DTRT_ALIASED(this == &a, bitShiftLeft(a, b)); + Index shiftBlocks = b / N; + unsigned int shiftBits = b % N; + // + 1: room for high bits nudged left into another block + len = a.len + shiftBlocks + 1; + allocate(len); + Index i, j; + for (i = 0; i < shiftBlocks; i++) + blk[i] = 0; + for (j = 0, i = shiftBlocks; j <= a.len; j++, i++) + blk[i] = getShiftedBlock(a, j, shiftBits); + // Zap possible leading zero + if (blk[len - 1] == 0) + len--; +} + +// Bitwise shift right +void BigUnsigned::bitShiftRight(const BigUnsigned &a, unsigned int b) { + DTRT_ALIASED(this == &a, bitShiftRight(a, b)); + // This calculation is wacky, but expressing the shift as a left bit shift + // within each block lets us use getShiftedBlock. + Index rightShiftBlocks = (b + N - 1) / N; + unsigned int leftShiftBits = N * rightShiftBlocks - b; + // Now (N * rightShiftBlocks - leftShiftBits) == b + // and 0 <= leftShiftBits < N. + if (rightShiftBlocks >= a.len + 1) { + // All of a is guaranteed to be shifted off, even considering the left + // bit shift. + len = 0; + return; + } + // Now we're allocating a positive amount. + // + 1: room for high bits nudged left into another block + len = a.len + 1 - rightShiftBlocks; + allocate(len); + Index i, j; + for (j = rightShiftBlocks, i = 0; j <= a.len; j++, i++) + blk[i] = getShiftedBlock(a, j, leftShiftBits); + // Zap possible leading zero + if (blk[len - 1] == 0) + len--; +} + // INCREMENT/DECREMENT OPERATORS // Prefix increment