Make run-testsuite recognize NO_VALGRIND environment variable to work around a
[bigint/bigint.git] / BigUnsigned.cc
CommitLineData
05780f4b
MM
1#include "BigUnsigned.hh"
2
3e132790 3// Memory management definitions have moved to the bottom of NumberlikeArray.hh.
05780f4b 4
83a639e6
MM
5// The templates used by these constructors and converters are at the bottom of
6// BigUnsigned.hh.
05780f4b 7
3e132790
MM
8BigUnsigned::BigUnsigned(unsigned long x) { initFromPrimitive (x); }
9BigUnsigned::BigUnsigned(unsigned int x) { initFromPrimitive (x); }
10BigUnsigned::BigUnsigned(unsigned short x) { initFromPrimitive (x); }
11BigUnsigned::BigUnsigned( long x) { initFromSignedPrimitive(x); }
12BigUnsigned::BigUnsigned( int x) { initFromSignedPrimitive(x); }
13BigUnsigned::BigUnsigned( short x) { initFromSignedPrimitive(x); }
05780f4b 14
83a639e6
MM
15unsigned long BigUnsigned::toUnsignedLong () const { return convertToPrimitive <unsigned long >(); }
16unsigned int BigUnsigned::toUnsignedInt () const { return convertToPrimitive <unsigned int >(); }
17unsigned short BigUnsigned::toUnsignedShort() const { return convertToPrimitive <unsigned short>(); }
18long BigUnsigned::toLong () const { return convertToSignedPrimitive< long >(); }
19int BigUnsigned::toInt () const { return convertToSignedPrimitive< int >(); }
20short BigUnsigned::toShort () const { return convertToSignedPrimitive< short>(); }
05780f4b
MM
21
22// COMPARISON
23BigUnsigned::CmpRes BigUnsigned::compareTo(const BigUnsigned &x) const {
24 // A bigger length implies a bigger number.
25 if (len < x.len)
26 return less;
27 else if (len > x.len)
28 return greater;
29 else {
30 // Compare blocks one by one from left to right.
31 Index i = len;
32 while (i > 0) {
33 i--;
34 if (blk[i] == x.blk[i])
35 continue;
36 else if (blk[i] > x.blk[i])
37 return greater;
38 else
39 return less;
40 }
41 // If no blocks differed, the numbers are equal.
42 return equal;
43 }
44}
45
3e132790 46// COPY-LESS OPERATIONS
4efbb076 47
8c16728a 48/*
3e132790 49 * On most calls to copy-less operations, it's safe to read the inputs little by
8c16728a
MM
50 * little and write the outputs little by little. However, if one of the
51 * inputs is coming from the same variable into which the output is to be
52 * stored (an "aliased" call), we risk overwriting the input before we read it.
53 * In this case, we first compute the result into a temporary BigUnsigned
54 * variable and then copy it into the requested output variable *this.
ef2b7c59 55 * Each put-here operation uses the DTRT_ALIASED macro (Do The Right Thing on
8c16728a
MM
56 * aliased calls) to generate code for this check.
57 *
58 * I adopted this approach on 2007.02.13 (see Assignment Operators in
59 * BigUnsigned.hh). Before then, put-here operations rejected aliased calls
60 * with an exception. I think doing the right thing is better.
61 *
62 * Some of the put-here operations can probably handle aliased calls safely
63 * without the extra copy because (for example) they process blocks strictly
64 * right-to-left. At some point I might determine which ones don't need the
65 * copy, but my reasoning would need to be verified very carefully. For now
66 * I'll leave in the copy.
67 */
ef2b7c59 68#define DTRT_ALIASED(cond, op) \
8c16728a
MM
69 if (cond) { \
70 BigUnsigned tmpThis; \
71 tmpThis.op; \
72 *this = tmpThis; \
73 return; \
74 }
75
3e132790
MM
76
77
05780f4b 78void BigUnsigned::add(const BigUnsigned &a, const BigUnsigned &b) {
ef2b7c59 79 DTRT_ALIASED(this == &a || this == &b, add(a, b));
05780f4b
MM
80 // If one argument is zero, copy the other.
81 if (a.len == 0) {
82 operator =(b);
83 return;
84 } else if (b.len == 0) {
85 operator =(a);
86 return;
87 }
4efbb076 88 // Some variables...
05780f4b
MM
89 // Carries in and out of an addition stage
90 bool carryIn, carryOut;
91 Blk temp;
92 Index i;
93 // a2 points to the longer input, b2 points to the shorter
94 const BigUnsigned *a2, *b2;
95 if (a.len >= b.len) {
96 a2 = &a;
97 b2 = &b;
98 } else {
99 a2 = &b;
100 b2 = &a;
101 }
102 // Set prelimiary length and make room in this BigUnsigned
103 len = a2->len + 1;
104 allocate(len);
105 // For each block index that is present in both inputs...
106 for (i = 0, carryIn = false; i < b2->len; i++) {
107 // Add input blocks
108 temp = a2->blk[i] + b2->blk[i];
109 // If a rollover occurred, the result is less than either input.
110 // This test is used many times in the BigUnsigned code.
111 carryOut = (temp < a2->blk[i]);
112 // If a carry was input, handle it
113 if (carryIn) {
114 temp++;
115 carryOut |= (temp == 0);
116 }
117 blk[i] = temp; // Save the addition result
118 carryIn = carryOut; // Pass the carry along
119 }
120 // If there is a carry left over, increase blocks until
121 // one does not roll over.
122 for (; i < a2->len && carryIn; i++) {
123 temp = a2->blk[i] + 1;
124 carryIn = (temp == 0);
125 blk[i] = temp;
126 }
127 // If the carry was resolved but the larger number
128 // still has blocks, copy them over.
129 for (; i < a2->len; i++)
130 blk[i] = a2->blk[i];
131 // Set the extra block if there's still a carry, decrease length otherwise
132 if (carryIn)
133 blk[i] = 1;
134 else
135 len--;
136}
137
05780f4b 138void BigUnsigned::subtract(const BigUnsigned &a, const BigUnsigned &b) {
ef2b7c59 139 DTRT_ALIASED(this == &a || this == &b, subtract(a, b));
05780f4b 140 if (b.len == 0) {
3e132790 141 // If b is zero, copy a.
05780f4b
MM
142 operator =(a);
143 return;
144 } else if (a.len < b.len)
3e132790
MM
145 // If a is shorter than b, the result is negative.
146 throw "BigUnsigned::subtract: "
147 "Negative result in unsigned calculation";
4efbb076 148 // Some variables...
05780f4b
MM
149 bool borrowIn, borrowOut;
150 Blk temp;
151 Index i;
152 // Set preliminary length and make room
153 len = a.len;
154 allocate(len);
155 // For each block index that is present in both inputs...
156 for (i = 0, borrowIn = false; i < b.len; i++) {
157 temp = a.blk[i] - b.blk[i];
3e132790
MM
158 // If a reverse rollover occurred,
159 // the result is greater than the block from a.
05780f4b
MM
160 borrowOut = (temp > a.blk[i]);
161 // Handle an incoming borrow
162 if (borrowIn) {
163 borrowOut |= (temp == 0);
164 temp--;
165 }
166 blk[i] = temp; // Save the subtraction result
167 borrowIn = borrowOut; // Pass the borrow along
168 }
169 // If there is a borrow left over, decrease blocks until
170 // one does not reverse rollover.
171 for (; i < a.len && borrowIn; i++) {
172 borrowIn = (a.blk[i] == 0);
173 blk[i] = a.blk[i] - 1;
174 }
3e132790
MM
175 /* If there's still a borrow, the result is negative.
176 * Throw an exception, but zero out this object so as to leave it in a
177 * predictable state. */
05780f4b
MM
178 if (borrowIn) {
179 len = 0;
180 throw "BigUnsigned::subtract: Negative result in unsigned calculation";
3e132790
MM
181 } else
182 // Copy over the rest of the blocks
183 for (; i < a.len; i++)
184 blk[i] = a.blk[i];
05780f4b
MM
185 // Zap leading zeros
186 zapLeadingZeros();
187}
188
4efbb076 189/*
6e1e0f2f
MM
190 * About the multiplication and division algorithms:
191 *
3e132790 192 * I searched unsucessfully for fast C++ built-in operations like the `b_0'
6e1e0f2f
MM
193 * and `c_0' Knuth describes in Section 4.3.1 of ``The Art of Computer
194 * Programming'' (replace `place' by `Blk'):
195 *
196 * ``b_0[:] multiplication of a one-place integer by another one-place
197 * integer, giving a two-place answer;
198 *
199 * ``c_0[:] division of a two-place integer by a one-place integer,
200 * provided that the quotient is a one-place integer, and yielding
201 * also a one-place remainder.''
202 *
203 * I also missed his note that ``[b]y adjusting the word size, if
204 * necessary, nearly all computers will have these three operations
205 * available'', so I gave up on trying to use algorithms similar to his.
206 * A future version of the library might include such algorithms; I
207 * would welcome contributions from others for this.
208 *
209 * I eventually decided to use bit-shifting algorithms. To multiply `a'
210 * and `b', we zero out the result. Then, for each `1' bit in `a', we
211 * shift `b' left the appropriate amount and add it to the result.
212 * Similarly, to divide `a' by `b', we shift `b' left varying amounts,
213 * repeatedly trying to subtract it from `a'. When we succeed, we note
214 * the fact by setting a bit in the quotient. While these algorithms
215 * have the same O(n^2) time complexity as Knuth's, the ``constant factor''
216 * is likely to be larger.
217 *
218 * Because I used these algorithms, which require single-block addition
219 * and subtraction rather than single-block multiplication and division,
220 * the innermost loops of all four routines are very similar. Study one
221 * of them and all will become clear.
222 */
4efbb076
MM
223
224/*
6e1e0f2f
MM
225 * This is a little inline function used by both the multiplication
226 * routine and the division routine.
227 *
228 * `getShiftedBlock' returns the `x'th block of `num << y'.
229 * `y' may be anything from 0 to N - 1, and `x' may be anything from
230 * 0 to `num.len'.
231 *
232 * Two things contribute to this block:
233 *
234 * (1) The `N - y' low bits of `num.blk[x]', shifted `y' bits left.
235 *
236 * (2) The `y' high bits of `num.blk[x-1]', shifted `N - y' bits right.
237 *
238 * But we must be careful if `x == 0' or `x == num.len', in
239 * which case we should use 0 instead of (2) or (1), respectively.
240 *
241 * If `y == 0', then (2) contributes 0, as it should. However,
242 * in some computer environments, for a reason I cannot understand,
243 * `a >> b' means `a >> (b % N)'. This means `num.blk[x-1] >> (N - y)'
244 * will return `num.blk[x-1]' instead of the desired 0 when `y == 0';
245 * the test `y == 0' handles this case specially.
246 */
4efbb076
MM
247inline BigUnsigned::Blk getShiftedBlock(const BigUnsigned &num,
248 BigUnsigned::Index x, unsigned int y) {
249 BigUnsigned::Blk part1 = (x == 0 || y == 0) ? 0 : (num.blk[x - 1] >> (BigUnsigned::N - y));
250 BigUnsigned::Blk part2 = (x == num.len) ? 0 : (num.blk[x] << y);
251 return part1 | part2;
252}
253
05780f4b 254void BigUnsigned::multiply(const BigUnsigned &a, const BigUnsigned &b) {
ef2b7c59 255 DTRT_ALIASED(this == &a || this == &b, multiply(a, b));
05780f4b
MM
256 // If either a or b is zero, set to zero.
257 if (a.len == 0 || b.len == 0) {
258 len = 0;
259 return;
260 }
4efbb076 261 /*
6e1e0f2f
MM
262 * Overall method:
263 *
264 * Set this = 0.
265 * For each 1-bit of `a' (say the `i2'th bit of block `i'):
266 * Add `b << (i blocks and i2 bits)' to *this.
267 */
05780f4b
MM
268 // Variables for the calculation
269 Index i, j, k;
270 unsigned int i2;
4efbb076 271 Blk temp;
05780f4b
MM
272 bool carryIn, carryOut;
273 // Set preliminary length and make room
274 len = a.len + b.len;
275 allocate(len);
276 // Zero out this object
277 for (i = 0; i < len; i++)
278 blk[i] = 0;
279 // For each block of the first number...
280 for (i = 0; i < a.len; i++) {
281 // For each 1-bit of that block...
4efbb076 282 for (i2 = 0; i2 < N; i2++) {
26a5f52b 283 if ((a.blk[i] & (Blk(1) << i2)) == 0)
05780f4b 284 continue;
4efbb076 285 /*
6e1e0f2f
MM
286 * Add b to this, shifted left i blocks and i2 bits.
287 * j is the index in b, and k = i + j is the index in this.
288 *
289 * `getShiftedBlock', a short inline function defined above,
290 * is now used for the bit handling. It replaces the more
291 * complex `bHigh' code, in which each run of the loop dealt
292 * immediately with the low bits and saved the high bits to
293 * be picked up next time. The last run of the loop used to
294 * leave leftover high bits, which were handled separately.
295 * Instead, this loop runs an additional time with j == b.len.
296 * These changes were made on 2005.01.11.
297 */
4efbb076
MM
298 for (j = 0, k = i, carryIn = false; j <= b.len; j++, k++) {
299 /*
6e1e0f2f
MM
300 * The body of this loop is very similar to the body of the first loop
301 * in `add', except that this loop does a `+=' instead of a `+'.
302 */
4efbb076 303 temp = blk[k] + getShiftedBlock(b, j, i2);
05780f4b
MM
304 carryOut = (temp < blk[k]);
305 if (carryIn) {
306 temp++;
307 carryOut |= (temp == 0);
308 }
309 blk[k] = temp;
310 carryIn = carryOut;
05780f4b 311 }
4efbb076
MM
312 // No more extra iteration to deal with `bHigh'.
313 // Roll-over a carry as necessary.
05780f4b
MM
314 for (; carryIn; k++) {
315 blk[k]++;
316 carryIn = (blk[k] == 0);
317 }
318 }
319 }
320 // Zap possible leading zero
321 if (blk[len - 1] == 0)
322 len--;
323}
324
325/*
6e1e0f2f 326 * DIVISION WITH REMAINDER
3e132790
MM
327 * This monstrous function mods *this by the given divisor b while storing the
328 * quotient in the given object q; at the end, *this contains the remainder.
329 * The seemingly bizarre pattern of inputs and outputs was chosen so that the
330 * function copies as little as possible (since it is implemented by repeated
331 * subtraction of multiples of b from *this).
332 *
333 * "modWithQuotient" might be a better name for this function, but I would
334 * rather not change the name now.
6e1e0f2f 335 */
05780f4b 336void BigUnsigned::divideWithRemainder(const BigUnsigned &b, BigUnsigned &q) {
3e132790
MM
337 /* Defending against aliased calls is more complex than usual because we
338 * are writing to both *this and q.
8c16728a
MM
339 *
340 * It would be silly to try to write quotient and remainder to the
3e132790 341 * same variable. Rule that out right away. */
8c16728a
MM
342 if (this == &q)
343 throw "BigUnsigned::divideWithRemainder: Cannot write quotient and remainder into the same variable";
3e132790
MM
344 /* Now *this and q are separate, so the only concern is that b might be
345 * aliased to one of them. If so, use a temporary copy of b. */
8c16728a
MM
346 if (this == &b || &q == &b) {
347 BigUnsigned tmpB(b);
348 divideWithRemainder(tmpB, q);
349 return;
350 }
5ff40cf5 351
05780f4b 352 /*
3e132790
MM
353 * Knuth's definition of mod (which this function uses) is somewhat
354 * different from the C++ definition of % in case of division by 0.
6e1e0f2f 355 *
3e132790
MM
356 * We let a / 0 == 0 (it doesn't matter much) and a % 0 == a, no
357 * exceptions thrown. This allows us to preserve both Knuth's demand
358 * that a mod 0 == a and the useful property that
359 * (a / b) * b + (a % b) == a.
6e1e0f2f 360 */
05780f4b
MM
361 if (b.len == 0) {
362 q.len = 0;
363 return;
364 }
5ff40cf5 365
05780f4b 366 /*
6e1e0f2f
MM
367 * If *this.len < b.len, then *this < b, and we can be sure that b doesn't go into
368 * *this at all. The quotient is 0 and *this is already the remainder (so leave it alone).
369 */
05780f4b
MM
370 if (len < b.len) {
371 q.len = 0;
372 return;
373 }
5ff40cf5 374
3e132790 375 // At this point we know (*this).len >= b.len > 0. (Whew!)
5ff40cf5 376
05780f4b 377 /*
6e1e0f2f
MM
378 * Overall method:
379 *
380 * For each appropriate i and i2, decreasing:
3e132790
MM
381 * Subtract (b << (i blocks and i2 bits)) from *this, storing the
382 * result in subtractBuf.
383 * If the subtraction succeeds with a nonnegative result:
6e1e0f2f 384 * Turn on bit i2 of block i of the quotient q.
3e132790
MM
385 * Copy subtractBuf back into *this.
386 * Otherwise bit i2 of block i remains off, and *this is unchanged.
6e1e0f2f
MM
387 *
388 * Eventually q will contain the entire quotient, and *this will
389 * be left with the remainder.
390 *
3e132790
MM
391 * subtractBuf[x] corresponds to blk[x], not blk[x+i], since 2005.01.11.
392 * But on a single iteration, we don't touch the i lowest blocks of blk
393 * (and don't use those of subtractBuf) because these blocks are
394 * unaffected by the subtraction: we are subtracting
395 * (b << (i blocks and i2 bits)), which ends in at least `i' zero
396 * blocks. */
05780f4b
MM
397 // Variables for the calculation
398 Index i, j, k;
399 unsigned int i2;
4efbb076 400 Blk temp;
05780f4b 401 bool borrowIn, borrowOut;
5ff40cf5 402
2f145f11 403 /*
6e1e0f2f
MM
404 * Make sure we have an extra zero block just past the value.
405 *
406 * When we attempt a subtraction, we might shift `b' so
407 * its first block begins a few bits left of the dividend,
408 * and then we'll try to compare these extra bits with
409 * a nonexistent block to the left of the dividend. The
410 * extra zero block ensures sensible behavior; we need
3e132790 411 * an extra block in `subtractBuf' for exactly the same reason.
6e1e0f2f 412 */
4efbb076 413 Index origLen = len; // Save real length.
3e132790
MM
414 /* To avoid an out-of-bounds access in case of reallocation, allocate
415 * first and then increment the logical length. */
416 allocateAndCopy(len + 1);
417 len++;
418 blk[origLen] = 0; // Zero the added block.
5ff40cf5 419
3e132790
MM
420 // subtractBuf holds part of the result of a subtraction; see above.
421 Blk *subtractBuf = new Blk[len];
5ff40cf5 422
05780f4b 423 // Set preliminary length for quotient and make room
2f145f11 424 q.len = origLen - b.len + 1;
05780f4b
MM
425 q.allocate(q.len);
426 // Zero out the quotient
427 for (i = 0; i < q.len; i++)
428 q.blk[i] = 0;
5ff40cf5 429
05780f4b
MM
430 // For each possible left-shift of b in blocks...
431 i = q.len;
432 while (i > 0) {
433 i--;
434 // For each possible left-shift of b in bits...
4efbb076 435 // (Remember, N is the number of bits in a Blk.)
05780f4b 436 q.blk[i] = 0;
4efbb076 437 i2 = N;
05780f4b
MM
438 while (i2 > 0) {
439 i2--;
440 /*
6e1e0f2f 441 * Subtract b, shifted left i blocks and i2 bits, from *this,
3e132790 442 * and store the answer in subtractBuf. In the for loop, `k == i + j'.
6e1e0f2f
MM
443 *
444 * Compare this to the middle section of `multiply'. They
445 * are in many ways analogous. See especially the discussion
446 * of `getShiftedBlock'.
447 */
4efbb076
MM
448 for (j = 0, k = i, borrowIn = false; j <= b.len; j++, k++) {
449 temp = blk[k] - getShiftedBlock(b, j, i2);
05780f4b
MM
450 borrowOut = (temp > blk[k]);
451 if (borrowIn) {
452 borrowOut |= (temp == 0);
453 temp--;
454 }
3e132790
MM
455 // Since 2005.01.11, indices of `subtractBuf' directly match those of `blk', so use `k'.
456 subtractBuf[k] = temp;
05780f4b 457 borrowIn = borrowOut;
05780f4b 458 }
4efbb076
MM
459 // No more extra iteration to deal with `bHigh'.
460 // Roll-over a borrow as necessary.
461 for (; k < origLen && borrowIn; k++) {
05780f4b 462 borrowIn = (blk[k] == 0);
3e132790 463 subtractBuf[k] = blk[k] - 1;
05780f4b 464 }
4efbb076 465 /*
6e1e0f2f
MM
466 * If the subtraction was performed successfully (!borrowIn),
467 * set bit i2 in block i of the quotient.
468 *
3e132790 469 * Then, copy the portion of subtractBuf filled by the subtraction
6e1e0f2f
MM
470 * back to *this. This portion starts with block i and ends--
471 * where? Not necessarily at block `i + b.len'! Well, we
3e132790
MM
472 * increased k every time we saved a block into subtractBuf, so
473 * the region of subtractBuf we copy is just [i, k).
6e1e0f2f 474 */
05780f4b 475 if (!borrowIn) {
26a5f52b 476 q.blk[i] |= (Blk(1) << i2);
4efbb076 477 while (k > i) {
05780f4b 478 k--;
3e132790 479 blk[k] = subtractBuf[k];
05780f4b
MM
480 }
481 }
482 }
483 }
484 // Zap possible leading zero in quotient
485 if (q.blk[q.len - 1] == 0)
486 q.len--;
487 // Zap any/all leading zeros in remainder
488 zapLeadingZeros();
3e132790 489 // Deallocate subtractBuf.
05780f4b 490 // (Thanks to Brad Spencer for noticing my accidental omission of this!)
3e132790 491 delete [] subtractBuf;
05780f4b
MM
492}
493
3e132790
MM
494/* BITWISE OPERATORS
495 * These are straightforward blockwise operations except that they differ in
496 * the output length and the necessity of zapLeadingZeros. */
497
05780f4b 498void BigUnsigned::bitAnd(const BigUnsigned &a, const BigUnsigned &b) {
ef2b7c59 499 DTRT_ALIASED(this == &a || this == &b, bitAnd(a, b));
3e132790 500 // The bitwise & can't be longer than either operand.
05780f4b
MM
501 len = (a.len >= b.len) ? b.len : a.len;
502 allocate(len);
503 Index i;
504 for (i = 0; i < len; i++)
505 blk[i] = a.blk[i] & b.blk[i];
506 zapLeadingZeros();
507}
508
05780f4b 509void BigUnsigned::bitOr(const BigUnsigned &a, const BigUnsigned &b) {
ef2b7c59 510 DTRT_ALIASED(this == &a || this == &b, bitOr(a, b));
05780f4b
MM
511 Index i;
512 const BigUnsigned *a2, *b2;
513 if (a.len >= b.len) {
514 a2 = &a;
515 b2 = &b;
516 } else {
517 a2 = &b;
518 b2 = &a;
519 }
520 allocate(a2->len);
521 for (i = 0; i < b2->len; i++)
522 blk[i] = a2->blk[i] | b2->blk[i];
523 for (; i < a2->len; i++)
524 blk[i] = a2->blk[i];
525 len = a2->len;
3e132790 526 // Doesn't need zapLeadingZeros.
05780f4b
MM
527}
528
05780f4b 529void BigUnsigned::bitXor(const BigUnsigned &a, const BigUnsigned &b) {
ef2b7c59 530 DTRT_ALIASED(this == &a || this == &b, bitXor(a, b));
05780f4b
MM
531 Index i;
532 const BigUnsigned *a2, *b2;
533 if (a.len >= b.len) {
534 a2 = &a;
535 b2 = &b;
536 } else {
537 a2 = &b;
538 b2 = &a;
539 }
3aaa5ce6 540 allocate(a2->len);
05780f4b
MM
541 for (i = 0; i < b2->len; i++)
542 blk[i] = a2->blk[i] ^ b2->blk[i];
543 for (; i < a2->len; i++)
544 blk[i] = a2->blk[i];
545 len = a2->len;
546 zapLeadingZeros();
547}
548
0afe80d5 549void BigUnsigned::bitShiftLeft(const BigUnsigned &a, int b) {
ef2b7c59 550 DTRT_ALIASED(this == &a, bitShiftLeft(a, b));
0afe80d5
MM
551 if (b < 0) {
552 if (b << 1 == 0)
553 throw "BigUnsigned::bitShiftLeft: "
554 "Pathological shift amount not implemented";
555 else {
556 bitShiftRight(a, -b);
557 return;
558 }
559 }
ef2b7c59
MM
560 Index shiftBlocks = b / N;
561 unsigned int shiftBits = b % N;
562 // + 1: room for high bits nudged left into another block
563 len = a.len + shiftBlocks + 1;
564 allocate(len);
565 Index i, j;
566 for (i = 0; i < shiftBlocks; i++)
567 blk[i] = 0;
568 for (j = 0, i = shiftBlocks; j <= a.len; j++, i++)
569 blk[i] = getShiftedBlock(a, j, shiftBits);
570 // Zap possible leading zero
571 if (blk[len - 1] == 0)
572 len--;
573}
574
0afe80d5 575void BigUnsigned::bitShiftRight(const BigUnsigned &a, int b) {
ef2b7c59 576 DTRT_ALIASED(this == &a, bitShiftRight(a, b));
0afe80d5
MM
577 if (b < 0) {
578 if (b << 1 == 0)
579 throw "BigUnsigned::bitShiftRight: "
580 "Pathological shift amount not implemented";
581 else {
582 bitShiftLeft(a, -b);
583 return;
584 }
585 }
ef2b7c59
MM
586 // This calculation is wacky, but expressing the shift as a left bit shift
587 // within each block lets us use getShiftedBlock.
588 Index rightShiftBlocks = (b + N - 1) / N;
589 unsigned int leftShiftBits = N * rightShiftBlocks - b;
590 // Now (N * rightShiftBlocks - leftShiftBits) == b
591 // and 0 <= leftShiftBits < N.
592 if (rightShiftBlocks >= a.len + 1) {
593 // All of a is guaranteed to be shifted off, even considering the left
594 // bit shift.
595 len = 0;
596 return;
597 }
598 // Now we're allocating a positive amount.
599 // + 1: room for high bits nudged left into another block
600 len = a.len + 1 - rightShiftBlocks;
601 allocate(len);
602 Index i, j;
603 for (j = rightShiftBlocks, i = 0; j <= a.len; j++, i++)
604 blk[i] = getShiftedBlock(a, j, leftShiftBits);
605 // Zap possible leading zero
606 if (blk[len - 1] == 0)
607 len--;
608}
609
05780f4b
MM
610// INCREMENT/DECREMENT OPERATORS
611
612// Prefix increment
613void BigUnsigned::operator ++() {
614 Index i;
615 bool carry = true;
616 for (i = 0; i < len && carry; i++) {
617 blk[i]++;
618 carry = (blk[i] == 0);
619 }
620 if (carry) {
3e132790 621 // Allocate and then increase length, as in divideWithRemainder
918d66f2 622 allocateAndCopy(len + 1);
05780f4b 623 len++;
05780f4b
MM
624 blk[i] = 1;
625 }
626}
627
628// Postfix increment: same as prefix
629void BigUnsigned::operator ++(int) {
630 operator ++();
631}
632
633// Prefix decrement
634void BigUnsigned::operator --() {
635 if (len == 0)
636 throw "BigUnsigned::operator --(): Cannot decrement an unsigned zero";
637 Index i;
638 bool borrow = true;
639 for (i = 0; borrow; i++) {
640 borrow = (blk[i] == 0);
641 blk[i]--;
642 }
643 // Zap possible leading zero (there can only be one)
644 if (blk[len - 1] == 0)
645 len--;
646}
647
648// Postfix decrement: same as prefix
649void BigUnsigned::operator --(int) {
650 operator --();
651}