summaryrefslogtreecommitdiffstats
path: root/Modules/_decimal/libmpdec/mpdecimal.c
diff options
context:
space:
mode:
authorStefan Krah <skrah@bytereef.org>2012-05-31 18:01:05 (GMT)
committerStefan Krah <skrah@bytereef.org>2012-05-31 18:01:05 (GMT)
commit4d3e0a695aec69b6c3f6638a65bd7469cd4fd568 (patch)
tree6ec0be80830470b2a510ab8434f856a26219d50a /Modules/_decimal/libmpdec/mpdecimal.c
parentd3487be8768bc763cdd931e0de73e2e7d3d3e0a0 (diff)
downloadcpython-4d3e0a695aec69b6c3f6638a65bd7469cd4fd568.zip
cpython-4d3e0a695aec69b6c3f6638a65bd7469cd4fd568.tar.gz
cpython-4d3e0a695aec69b6c3f6638a65bd7469cd4fd568.tar.bz2
Improve Underflow handling in the correct-rounding loop. The case for
Underflow to zero hasn't changed: _mpd_qexp() internally uses MIN_EMIN, so the result would also underflow to zero for all emin > MIN_EMIN. In case digits are left, the informal argument is as follows: Underflow can occur only once in the last multiplication of the power stage (in the Horner stage Underflow provably cannot occur, and if Underflow occurred twice in the power stage, the result would underflow to zero on the second occasion). Since there is no double rounding during Underflow, the effective work precision is now 1 <= result->digits < prec. It can be shown by a somewhat tedious argument that abs(result - e**x) < ulp(result, result->digits). Therefore the correct rounding loop now uses ulp(result, result->digits) to generate the bounds for e**x in case of Underflow.
Diffstat (limited to 'Modules/_decimal/libmpdec/mpdecimal.c')
-rw-r--r--Modules/_decimal/libmpdec/mpdecimal.c21
1 files changed, 17 insertions, 4 deletions
diff --git a/Modules/_decimal/libmpdec/mpdecimal.c b/Modules/_decimal/libmpdec/mpdecimal.c
index e5d3bf2..801b9a1 100644
--- a/Modules/_decimal/libmpdec/mpdecimal.c
+++ b/Modules/_decimal/libmpdec/mpdecimal.c
@@ -4122,6 +4122,8 @@ mpd_qexp(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
MPD_NEW_STATIC(ulp, 0,0,0,0);
MPD_NEW_STATIC(aa, 0,0,0,0);
mpd_ssize_t prec;
+ mpd_ssize_t ulpexp;
+ uint32_t workstatus;
if (result == a) {
if (!mpd_qcopy(&aa, a, status)) {
@@ -4135,12 +4137,20 @@ mpd_qexp(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
prec = ctx->prec + 3;
while (1) {
workctx.prec = prec;
- _mpd_qexp(result, a, &workctx, status);
- _ssettriple(&ulp, MPD_POS, 1,
- result->exp + result->digits-workctx.prec);
+ workstatus = 0;
+
+ _mpd_qexp(result, a, &workctx, &workstatus);
+ *status |= workstatus;
+
+ ulpexp = result->exp + result->digits - workctx.prec;
+ if (workstatus & MPD_Underflow) {
+ /* The effective work precision is result->digits. */
+ ulpexp = result->exp;
+ }
+ _ssettriple(&ulp, MPD_POS, 1, ulpexp);
/*
- * At this point:
+ * At this point [1]:
* 1) abs(result - e**x) < 0.5 * 10**(-prec) * e**x
* 2) result - ulp < e**x < result + ulp
* 3) result - ulp < result < result + ulp
@@ -4148,6 +4158,9 @@ mpd_qexp(mpd_t *result, const mpd_t *a, const mpd_context_t *ctx,
* If round(result-ulp)==round(result+ulp), then
* round(result)==round(e**x). Therefore the result
* is correctly rounded.
+ *
+ * [1] If abs(a) <= 9 * 10**(-prec-1), use the absolute
+ * error for a similar argument.
*/
workctx.prec = ctx->prec;
mpd_qadd(&t1, result, &ulp, &workctx, &workctx.status);