mp-gamic.c
1 //! @file mp-gamic.c
2 //! @author J. Marcel van der Veer
3 //!
4 //! @section Copyright
5 //!
6 //! This file is part of Algol68G - an Algol 68 compiler-interpreter.
7 //! Copyright 2001-2023 J. Marcel van der Veer [algol68g@xs4all.nl].
8 //!
9 //! @section License
10 //!
11 //! This program is free software; you can redistribute it and/or modify it
12 //! under the terms of the GNU General Public License as published by the
13 //! Free Software Foundation; either version 3 of the License, or
14 //! (at your option) any later version.
15 //!
16 //! This program is distributed in the hope that it will be useful, but
17 //! WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
18 //! or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 //! more details. You should have received a copy of the GNU General Public
20 //! License along with this program. If not, see [http://www.gnu.org/licenses/].
21
22 //! @section Synopsis
23 //!
24 //! [LONG] LONG REAL generalised incomplete gamma function.
25
26 // Generalised incomplete gamma code in this file was downloaded from
27 // [http://helios.mi.parisdescartes.fr/~rabergel/]
28 // and adapted for Algol 68 Genie.
29 //
30 // Reference:
31 // Rémy Abergel, Lionel Moisan. Fast and accurate evaluation of a
32 // generalized incomplete gamma function. 2019. hal-01329669v2
33 //
34 // Original source code copyright and license:
35 //
36 // DELTAGAMMAINC Fast and Accurate Evaluation of a Generalized Incomplete Gamma
37 // Function. Copyright (C) 2016 Remy Abergel (remy.abergel AT gmail.com), Lionel
38 // Moisan (Lionel.Moisan AT parisdescartes.fr).
39 //
40 // This file is a part of the DELTAGAMMAINC software, dedicated to the
41 // computation of a generalized incomplete gammafunction. See the Companion paper
42 // for a complete description of the algorithm.
43 //
44 // ``Fast and accurate evaluation of a generalized incomplete gamma function''
45 // (Rémy Abergel, Lionel Moisan), preprint MAP5 nº2016-14, revision 1.
46 //
47 // This program is free software: you can redistribute it and/or modify it under
48 // the terms of the GNU General Public License as published by the Free Software
49 // Foundation, either version 3 of the License, or (at your option) any later
50 // version.
51 //
52 // This program is distributed in the hope that it will be useful, but WITHOUT
53 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
54 // FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
55 // details.
56 //
57 // You should have received a copy of the GNU General Public License along with
58 // this program. If not, see [http://www.gnu.org/licenses/].
59
60 // References
61 //
62 // R. Abergel and L. Moisan. 2016. Fast and accurate evaluation of a
63 // generalized incomplete gamma function, preprint MAP5 nº2016-14, revision 1
64 //
65 // Rémy Abergel, Lionel Moisan. Fast and accurate evaluation of a
66 // generalized incomplete gamma function. 2019. hal-01329669v2
67 //
68 // F. W. J. Olver, D. W. Lozier, R. F. Boisvert, and C. W. Clark
69 // (Eds.). 2010. NIST Handbook of Mathematical Functions. Cambridge University
70 // Press. (see online version at [http://dlmf.nist.gov/])
71 //
72 // W. H. Press, S. A. Teukolsky, W. T. Vetterling, and
73 // B. P. Flannery. 1992. Numerical recipes in C: the art of scientific
74 // computing (2nd ed.).
75 //
76 // G. R. Pugh, 2004. An analysis of the Lanczos Gamma approximation (phd
77 // thesis)
78
79 #include "a68g.h"
80 #include "a68g-genie.h"
81 #include "a68g-prelude.h"
82 #include "a68g-double.h"
83 #include "a68g-mp.h"
84 #include "a68g-lib.h"
85
86 // Processing time of Abergel's algorithms rises steeply with precision.
87 #define MAX_PRECISION (LONG_LONG_MP_DIGITS + LONG_MP_DIGITS)
88 #define GAM_DIGITS(digs) FUN_DIGITS (digs)
89
90 #define ITMAX(p, digits) lit_mp (p, 1000000, 0, digits);
91 #define DPMIN(p, digits) lit_mp (p, 1, 10 - MAX_MP_EXPONENT, digits)
92 #define EPS(p, digits) lit_mp (p, 1, 1 - digits, digits)
93 #define NITERMAX_ROMBERG 16 // Maximum allowed number of Romberg iterations
94 #define TOL_ROMBERG(p, digits) lit_mp (p, MP_RADIX / 10, -1, digits)
95 #define TOL_DIFF(p, digits) lit_mp (p, MP_RADIX / 5, -1, digits)
96
97 //! @brief compute G(p,x) in the domain x <= p >= 0 using a continued fraction
98
99 MP_T *G_cfrac_lower_mp (NODE_T *q, MP_T * Gcfrac, MP_T *p, MP_T *x, int digs)
100 {
101 if (IS_ZERO_MP (x)) {
102 SET_MP_ZERO (Gcfrac, digs);
103 return Gcfrac;
104 }
105 ADDR_T pop_sp = A68_SP;
106 MP_T *c = nil_mp (q, digs);
107 MP_T *d = nil_mp (q, digs);
108 MP_T *del = nil_mp (q, digs);
109 MP_T *f = nil_mp (q, digs);
110 // Evaluate the continued fraction using Modified Lentz's method. However,
111 // as detailed in the paper, perform manually the first pass (n=1), of the
112 // initial Modified Lentz's method.
113 // an = 1; bn = p; f = an / bn; c = an / DPMIN; d = 1 / bn; n = 2;
114 MP_T *an = lit_mp (q, 1, 0, digs);
115 MP_T *bn = nil_mp (q, digs);
116 MP_T *trm = nil_mp (q, digs);
117 MP_T *dpmin = DPMIN (q, digs);
118 MP_T *eps = EPS (q, digs);
119 MP_T *itmax = ITMAX (q, digs);
120 (void) move_mp (bn, p, digs);
121 (void) div_mp (q, f, an, bn, digs);
122 (void) div_mp (q, c, an, dpmin, digs);
123 (void) rec_mp (q, d, bn, digs);
124 MP_T *n = lit_mp (q, 2, 0, digs);
125 MP_T *k = nil_mp (q, digs);
126 MP_T *two = lit_mp (q, 2, 0, digs);
127 BOOL_T odd = A68_FALSE, cont = A68_TRUE;
128 while (cont) {
129 A68_BOOL ge, lt;
130 // k = n / 2;
131 (void) over_mp (q, k, n, two, digs);
132 // an = (n & 1 ? k : -(p - 1 + k)) * x;
133 if (odd) {
134 (void) move_mp (an, k, digs);
135 odd = A68_FALSE;
136 } else {
137 (void) minus_one_mp (q, trm, p, digs);
138 (void) add_mp (q, trm, trm, k, digs);
139 (void) minus_mp (q, an, trm, digs);
140 odd = A68_TRUE;
141 }
142 (void) mul_mp (q, an, an, x, digs);
143 // bn++;
144 (void) plus_one_mp (q, bn, bn, digs);
145 // d = an * d + bn;
146 (void) mul_mp (q, trm, an, d, digs);
147 (void) add_mp (q, d, trm, bn, digs);
148 // if (d == 0) { d = DPMIN; }
149 if (IS_ZERO_MP (d)) {
150 (void) move_mp (d, dpmin, digs);
151 }
152 // c = bn + an / c; mind possible overflow.
153 (void) div_mp (q, trm, an, c, digs);
154 (void) add_mp (q, c, bn, trm, digs);
155 // if (c == 0) { c = DPMIN; }
156 if (IS_ZERO_MP (c)) {
157 (void) move_mp (c, dpmin, digs);
158 }
159 // d = 1 / d;
160 (void) rec_mp (q, d, d, digs);
161 // del = d * c;
162 (void) mul_mp (q, del, d, c, digs);
163 // f *= del;
164 (void) mul_mp (q, f, f, del, digs);
165 // n++;
166 (void) plus_one_mp (q, n, n, digs);
167 // while ((fabsq (del - 1) >= EPS) && (n < ITMAX));
168 (void) minus_one_mp (q, trm, del, digs);
169 (void) abs_mp (q, trm, trm, digs);
170 (void) ge_mp (q, &ge, trm, eps, digs);
171 (void) lt_mp (q, <, n, itmax, digs);
172 cont = VALUE (&ge) && VALUE (<);
173 }
174 (void) move_mp (Gcfrac, f, digs);
175 A68_SP = pop_sp;
176 return Gcfrac;
177 }
178
179 //! @brief compute the G-function in the domain x > p using a
180 // continued fraction.
181 //
182 // 0 < p < x, or x = +infinity
183
184 MP_T *G_cfrac_upper_mp (NODE_T *q, MP_T * Gcfrac, MP_T *p, MP_T *x, int digs)
185 {
186 ADDR_T pop_sp = A68_SP;
187 if (PLUS_INF_MP (x)) {
188 SET_MP_ZERO (Gcfrac, digs);
189 return Gcfrac;
190 }
191 MP_T *c = nil_mp (q, digs);
192 MP_T *d = nil_mp (q, digs);
193 MP_T *del = nil_mp (q, digs);
194 MP_T *f = nil_mp (q, digs);
195 MP_T *trm = nil_mp (q, digs);
196 MP_T *dpmin = DPMIN (q, digs);
197 MP_T *eps = EPS (q, digs);
198 MP_T *itmax = ITMAX (q, digs);
199 MP_T *n = lit_mp (q, 2, 0, digs);
200 MP_T *i = nil_mp (q, digs);
201 MP_T *two = lit_mp (q, 2, 0, digs);
202 // an = 1;
203 MP_T *an = lit_mp (q, 1, 0, digs);
204 // bn = x + 1 - p;
205 MP_T *bn = lit_mp (q, 1, 0, digs);
206 (void) add_mp (q, bn, x, bn, digs);
207 (void) sub_mp (q, bn, bn, p, digs);
208 BOOL_T t = !IS_ZERO_MP (bn);
209 // Evaluate the continued fraction using Modified Lentz's method. However,
210 // as detailed in the paper, perform manually the first pass (n=1), of the
211 // initial Modified Lentz's method.
212 if (t) {
213 // b{1} is non-zero
214 // f = an / bn;
215 (void) div_mp (q, f, an, bn, digs);
216 // c = an / DPMIN;
217 (void) div_mp (q, c, an, dpmin, digs);
218 // d = 1 / bn;
219 (void) rec_mp (q, d, bn, digs);
220 // n = 2;
221 set_mp (n, 2, 0, digs);
222 } else {
223 // b{1}=0 but b{2} is non-zero, compute Mcfrac = a{1}/f with f = a{2}/(b{2}+) a{3}/(b{3}+) ...
224 // an = -(1 - p);
225 (void) minus_one_mp (q, an, p, digs);
226 // bn = x + 3 - p;
227 (void) set_mp (bn, 3, 0, digs);
228 (void) add_mp (q, bn, x, bn, digs);
229 (void) sub_mp (q, bn, bn, p, digs);
230 // f = an / bn;
231 (void) div_mp (q, f, an, bn, digs);
232 // c = an / DPMIN;
233 (void) div_mp (q, c, an, dpmin, digs);
234 // d = 1 / bn;
235 (void) rec_mp (q, d, bn, digs);
236 // n = 3;
237 set_mp (n, 3, 0, digs);
238 }
239 // i = n - 1;
240 minus_one_mp (q, i, n, digs);
241 BOOL_T cont = A68_TRUE;
242 while (cont) {
243 A68_BOOL ge, lt;
244 // an = -i * (i - p);
245 (void) sub_mp (q, trm, p, i, digs);
246 (void) mul_mp (q, an, i, trm, digs);
247 // bn += 2;
248 (void) add_mp (q, bn, bn, two, digs);
249 // d = an * d + bn;
250 (void) mul_mp (q, trm, an, d, digs);
251 (void) add_mp (q, d, trm, bn, digs);
252 // if (d == 0) { d = DPMIN; }
253 if (IS_ZERO_MP (d)) {
254 (void) move_mp (d, dpmin, digs);
255 }
256 // c = bn + an / c; mind possible overflow.
257 (void) div_mp (q, trm, an, c, digs);
258 (void) add_mp (q, c, bn, trm, digs);
259 // if (c == 0) { c = DPMIN; }
260 if (IS_ZERO_MP (c)) {
261 (void) move_mp (c, dpmin, digs);
262 }
263 // d = 1 / d;
264 (void) rec_mp (q, d, d, digs);
265 // del = d * c;
266 (void) mul_mp (q, del, d, c, digs);
267 // f *= del;
268 (void) mul_mp (q, f, f, del, digs);
269 // i++;
270 (void) plus_one_mp (q, i, i, digs);
271 // n++;
272 (void) plus_one_mp (q, n, n, digs);
273 // while ((fabsq (del - 1) >= EPS) && (n < ITMAX));
274 (void) minus_one_mp (q, trm, del, digs);
275 (void) abs_mp (q, trm, trm, digs);
276 (void) ge_mp (q, &ge, trm, eps, digs);
277 (void) lt_mp (q, <, n, itmax, digs);
278 cont = VALUE (&ge) && VALUE (<);
279 }
280 A68_SP = pop_sp;
281 // *Gcfrac = t ? f : 1 / f;
282 if (t) {
283 (void) move_mp (Gcfrac, f, digs);
284 } else {
285 (void) rec_mp (q, Gcfrac, f, digs);
286 }
287 return Gcfrac;
288 }
289
290 //! @brief compute the G-function in the domain x < 0 and |x| < max (1,p-1)
291 // using a recursive integration by parts relation.
292 // This function cannot be used when mu > 0.
293 //
294 // p > 0, integer; x < 0, |x| < max (1,p-1)
295
296 MP_T *G_ibp_mp (NODE_T *q, MP_T * Gibp, MP_T *p, MP_T *x, int digs)
297 {
298 ADDR_T pop_sp = A68_SP;
299 MP_T *trm = nil_mp (q, digs), *trn = nil_mp (q, digs);
300 MP_T *eps = EPS (q, digs);
301 // t = fabsq (x);
302 MP_T *t = nil_mp (q, digs);
303 (void) abs_mp (q, x, x, digs);
304 // tt = 1 / (t * t);
305 MP_T *tt = nil_mp (q, digs);
306 (void) mul_mp (q, tt, t, t, digs);
307 (void) rec_mp (q, tt, tt, digs);
308 // odd = (INT_T) (p) % 2 != 0;
309 MP_T *two = lit_mp (q, 2, 0, digs);
310 (void) trunc_mp (q, trm, p, digs);
311 (void) mod_mp (q, trm, trm, two, digs);
312 BOOL_T odd = !IS_ZERO_MP (trm);
313 // c = 1 / t;
314 MP_T *c = nil_mp (q, digs);
315 (void) rec_mp (q, c, t, digs);
316 // d = (p - 1);
317 MP_T *d = nil_mp (q, digs);
318 (void) minus_one_mp (q, d, p, digs);
319 // s = c * (t - d);
320 MP_T *s = nil_mp (q, digs);
321 (void) sub_mp (q, trm, t, d, digs);
322 (void) mul_mp (q, s, c, trm, digs);
323 // l = 0;
324 MP_T *l = nil_mp (q, digs);
325 //
326 BOOL_T cont = A68_TRUE, stop;
327 MP_T *del = nil_mp (q, digs);
328 while (cont) {
329 // c *= d * (d - 1) * tt;
330 (void) minus_one_mp (q, trm, d, digs);
331 (void) mul_mp (q, trm, d, trm, digs);
332 (void) mul_mp (q, trm, trm, tt, digs);
333 (void) mul_mp (q, c, c, trm, digs);
334 // d -= 2;
335 (void) sub_mp (q, d, d, two, digs);
336 // del = c * (t - d);
337 (void) sub_mp (q, trm, t, d, digs);
338 (void) mul_mp (q, del, c, trm, digs);
339 // s += del;
340 (void) add_mp (q, s, s, del, digs);
341 // l++;
342 (void) plus_one_mp (q, l, l, digs);
343 // stop = fabsq (del) < fabsq (s) * EPS;
344 (void) abs_mp (q, trm, del, digs);
345 (void) abs_mp (q, trn, s, digs);
346 (void) mul_mp (q, trn, trn, eps, digs);
347 A68_BOOL lt;
348 (void) lt_mp (q, <, trm, trn, digs);
349 stop = VALUE (<);
350 //while ((l < floorq ((p - 2) / 2)) && !stop);
351 (void) sub_mp (q, trm, p, two, digs);
352 (void) half_mp (q, trm, trm, digs);
353 (void) floor_mp (q, trm, trm, digs);
354 (void) lt_mp (q, <, l, trm, digs);
355 cont = VALUE (<) && !stop;
356 }
357 if (odd && !stop) {
358 // s += d * c / t;
359 (void) div_mp (q, trm, c, t, digs);
360 (void) mul_mp (q, trm, d, trm, digs);
361 (void) add_mp (q, s, s, trm, digs);
362 }
363 // Gibp = ((odd ? -1 : 1) * expq (-t + lgammaq (p) - (p - 1) * logq (t)) + s) / t;
364 (void) ln_mp (q, trn, t, digs);
365 (void) minus_one_mp (q, trm, p, digs);
366 (void) mul_mp (q, trm, trm, trn, digs);
367 (void) lngamma_mp (q, trn, p, digs);
368 (void) sub_mp (q, trm, trn, trm, digs);
369 (void) sub_mp (q, trm, trm, t, digs);
370 (void) exp_mp (q, Gibp, trm, digs);
371 if (odd) {
372 (void) minus_mp (q, Gibp, Gibp, digs);
373 }
374 (void) add_mp (q, Gibp, Gibp, s, digs);
375 (void) div_mp (q, Gibp, Gibp, t, digs);
376 A68_SP = pop_sp;
377 return Gibp;
378 }
379
380 MP_T *plim_mp (NODE_T *p, MP_T *z, MP_T *x, int digs)
381 {
382 ADDR_T pop_sp = A68_SP;
383 if (MP_DIGIT (x, 1) > 0) {
384 (void) move_mp (z, x, digs);
385 } else {
386 MP_T *five = lit_mp (p, 5, 0, digs);
387 MP_T *nine = lit_mp (p, -9, 0, digs);
388 A68_BOOL ge;
389 (void) ge_mp (p, &ge, x, nine, digs);
390 if (VALUE (&ge)) {
391 SET_MP_ZERO (z, digs);
392 } else {
393 (void) minus_mp (p, z, x, digs);
394 (void) sqrt_mp (p, z, z, digs);
395 (void) mul_mp (p, z, five, z, digs);
396 (void) sub_mp (p, z, z, five, digs);
397 }
398 }
399 A68_SP = pop_sp;
400 return z;
401 }
402
403 //! @brief compute G : (p,x) --> R defined as follows
404 //
405 // if x <= p:
406 // G(p,x) = exp (x-p*ln (|x|)) * integral of s^{p-1} * exp (-sign (x)*s) ds from s = 0 to |x|
407 // otherwise:
408 // G(p,x) = exp (x-p*ln (|x|)) * integral of s^{p-1} * exp (-s) ds from s = x to infinity
409 //
410 // p > 0; x is a real number or +infinity.
411
412 void G_func_mp (NODE_T *q, MP_T * G, MP_T *p, MP_T *x, int digs)
413 {
414 ADDR_T pop_sp = A68_SP;
415 MP_T *pl = nil_mp (q, digs);
416 A68_BOOL ge;
417 (void) plim_mp (q, pl, x, digs);
418 (void) ge_mp (q, &ge, p, pl, digs);
419 if (VALUE (&ge)) {
420 G_cfrac_lower_mp (q, G, p, x, digs);
421 } else if (MP_DIGIT (x, 1) < 0) {
422 G_ibp_mp (q, G, p, x, digs);
423 } else {
424 G_cfrac_upper_mp (q, G, p, x, digs);
425 }
426 A68_SP = pop_sp;
427 }
428
429 //! @brief compute I_{x,y}^{mu,p} using a Romberg approximation.
430 // Compute rho and sigma so I_{x,y}^{mu,p} = rho * exp (sigma)
431
432 //! @brief iteration of the Romberg approximation of I_{x,y}^{mu,p}
433
434 #define ROMBERG_N (((NITERMAX_ROMBERG + 1) * (NITERMAX_ROMBERG + 2)) / 2)
435
436 static inline int IX (int n, int digs) {
437 int offset = n * SIZE_MP (digs);
438 return offset;
439 }
440
441 void mp_romberg_iterations
442 (NODE_T *q, MP_T *R, MP_T *sigma, INT_T n, MP_T *x, MP_T *y, MP_T *mu, MP_T *p, MP_T *h, MP_T *pow2, int digs)
443 {
444 MP_T *trm = nil_mp (q, digs), *trn = nil_mp (q, digs);
445 MP_T *sum = nil_mp (q, digs), *xx = nil_mp (q, digs);
446 INT_T adr0_prev = ((n - 1) * n) / 2;
447 INT_T adr0 = (n * (n + 1)) / 2;
448 MP_T *j = lit_mp (q, 1, 0, digs);
449 A68_BOOL le;
450 VALUE (&le) = A68_TRUE;
451 while (VALUE (&le)) {
452 // xx = x + ((y - x) * (2 * j - 1)) / (2 * pow2);
453 (void) add_mp (q, trm, j, j, digs);
454 (void) minus_one_mp (q, trm, trm, digs);
455 (void) sub_mp (q, trn, y, x, digs);
456 (void) mul_mp (q, trm, trm, trn, digs);
457 (void) div_mp (q, trm, trm, pow2, digs);
458 (void) half_mp (q, trm, trm, digs);
459 (void) add_mp (q, xx, x, trm, digs);
460 // sum += exp (-mu * xx + (p - 1) * a68_ln (xx) - sigma);
461 (void) ln_mp (q, trn, xx, digs);
462 (void) minus_one_mp (q, trm, p, digs);
463 (void) mul_mp (q, trm, trm, trn, digs);
464 (void) mul_mp (q, trn, mu, xx, digs);
465 (void) sub_mp (q, trm, trm, trn, digs);
466 (void) sub_mp (q, trm, trm, sigma, digs);
467 (void) exp_mp (q, trm, trm, digs);
468 (void) add_mp (q, sum, sum, trm, digs);
469 // j++;
470 (void) plus_one_mp (q, j, j, digs);
471 (void) le_mp (q, &le, j, pow2, digs);
472 }
473 // R[adr0] = 0.5 * R[adr0_prev] + h * sum;
474 (void) half_mp (q, trm, &R[IX (adr0_prev, digs)], digs);
475 (void) mul_mp (q, trn, h, sum, digs);
476 (void) add_mp (q, &R[IX (adr0, digs)], trm, trn, digs);
477 // REAL_T pow4 = 4;
478 MP_T *pow4 = lit_mp (q, 4, 0, digs);
479 for (unt m = 1; m <= n; m++) {
480 // R[adr0 + m] = (pow4 * R[adr0 + (m - 1)] - R[adr0_prev + (m - 1)]) / (pow4 - 1);
481 (void) mul_mp (q, trm, pow4, &R[IX (adr0 + m - 1, digs)], digs);
482 (void) sub_mp (q, trm, trm, &R[IX (adr0_prev + m - 1, digs)], digs);
483 (void) minus_one_mp (q, trn, pow4, digs);
484 (void) div_mp (q, &R[IX (adr0 + m, digs)], trm, trn, digs);
485 // pow4 *= 4;
486 (void) add_mp (q, trm, pow4, pow4, digs);
487 (void) add_mp (q, pow4, trm, trm, digs);
488 }
489 }
490
491 void mp_romberg_estimate (NODE_T *q, MP_T * rho, MP_T * sigma, MP_T *x, MP_T *y, MP_T *mu, MP_T *p, int digs)
492 {
493 ADDR_T pop_sp = A68_SP;
494 MP_T *R = (MP_T *) get_heap_space (ROMBERG_N * SIZE_MP (digs));
495 // Initialization (n=1)
496 MP_T *trm = nil_mp (q, digs), *trn = nil_mp (q, digs);
497 // sigma = -mu * y + (p - 1) * ln (y);
498 (void) ln_mp (q, trn, y, digs);
499 (void) minus_one_mp (q, trm, p, digs);
500 (void) mul_mp (q, trm, trm, trn, digs);
501 (void) mul_mp (q, trn, mu, y, digs);
502 (void) sub_mp (q, sigma, trm, trn, digs);
503 // R[0] = 0.5 * (y - x) * (exp (-mu * x + (p - 1) * ln (x) - (*sigma)) + 1);
504 (void) ln_mp (q, trn, x, digs);
505 (void) minus_one_mp (q, trm, p, digs);
506 (void) mul_mp (q, trm, trm, trn, digs);
507 (void) mul_mp (q, trn, mu, x, digs);
508 (void) sub_mp (q, trm, trm, trn, digs);
509 (void) sub_mp (q, trm, trm, sigma, digs);
510 (void) exp_mp (q, trm, trm, digs);
511 (void) plus_one_mp (q, trm, trm, digs);
512 (void) sub_mp (q, trn, y, x, digs);
513 (void) mul_mp (q, trm, trm, trn, digs);
514 (void) half_mp (q, &R[IX (0, digs)], trm, digs);
515 // Loop for n > 0
516 MP_T *relerr = nil_mp (q, digs);
517 MP_T *relneeded = EPS (q, digs);
518 (void) div_mp (q, relneeded, relneeded, TOL_ROMBERG (q, digs), digs);
519 INT_T adr0 = 0;
520 INT_T n = 1;
521 // REAL_T h = (y - x) / 2; // n=1, h = (y-x)/2^n
522 MP_T *h = nil_mp (q, digs);
523 (void) sub_mp (q, h, y, x, digs);
524 (void) half_mp (q, h, h, digs);
525 // REAL_T pow2 = 1; // n=1; pow2 = 2^(n-1)
526 MP_T *pow2 = lit_mp (q, 1, 0, digs);
527 BOOL_T cont = A68_TRUE;
528 while (cont) {
529 // while (n <= NITERMAX_ROMBERG && relerr > relneeded);
530 // mp_romberg_iterations (R, *sigma, n, x, y, mu, p, h, pow2);
531 ADDR_T pop_sp_2 = A68_SP;
532 mp_romberg_iterations (q, R, sigma, n, x, y, mu, p, h, pow2, digs);
533 A68_SP = pop_sp_2;
534 // h /= 2;
535 (void) half_mp (q, h, h, digs);
536 // pow2 *= 2;
537 (void) add_mp (q, pow2, pow2, pow2, digs);
538 // adr0 = (n * (n + 1)) / 2;
539 adr0 = (n * (n + 1)) / 2;
540 // relerr = abs ((R[adr0 + n] - R[adr0 + n - 1]) / R[adr0 + n]);
541 (void) sub_mp (q, trm, &R[IX (adr0 + n, digs)], &R[IX (adr0 + n - 1, digs)], digs);
542 (void) div_mp (q, trm, trm, &R[IX (adr0 + n, digs)], digs);
543 (void) abs_mp (q, relerr, trm, digs);
544 // n++;
545 n++;
546 A68_BOOL gt;
547 (void) gt_mp (q, >, relerr, relneeded, digs);
548 cont = (n <= NITERMAX_ROMBERG) && VALUE (>);
549 }
550 // save Romberg estimate and free memory
551 // rho = R[adr0 + (n - 1)];
552 (void) move_mp (rho, &R[IX (adr0 + n - 1, digs)], digs);
553 a68_free (R);
554 A68_SP = pop_sp;
555 }
556
557 //! @brief compute generalized incomplete gamma function I_{x,y}^{mu,p}
558 //
559 // I_{x,y}^{mu,p} = integral from x to y of s^{p-1} * exp (-mu*s) ds
560 //
561 // This procedure computes (rho, sigma) described below.
562 // The approximated value of I_{x,y}^{mu,p} is I = rho * exp (sigma)
563 //
564 // mu is a real number non equal to zero
565 // (in general we take mu = 1 or -1 but any nonzero real number is allowed)
566 //
567 // x, y are two numbers with 0 <= x <= y <= +infinity,
568 // (the setting y=+infinity is allowed only when mu > 0)
569 //
570 // p is a real number > 0, p must be an integer when mu < 0.
571
572 void Dgamic_mp (NODE_T *q, MP_T * rho, MP_T * sigma, MP_T *x, MP_T *y, MP_T *mu, MP_T *p, int digs)
573 {
574 ADDR_T pop_sp = A68_SP;
575 // Particular cases
576 if (PLUS_INF_MP (x) && PLUS_INF_MP (y)) {
577 SET_MP_ZERO (rho, digs);
578 SET_MP_ZERO (sigma, digs);
579 MP_STATUS (sigma) = (UNSIGNED_T) MP_STATUS (sigma) | MINUS_INF_MASK;
580 A68_SP = pop_sp;
581 return;
582 } else if (same_mp (q, x, y, digs)) {
583 SET_MP_ZERO (rho, digs);
584 SET_MP_ZERO (sigma, digs);
585 MP_STATUS (sigma) = (UNSIGNED_T) MP_STATUS (sigma) | MINUS_INF_MASK;
586 A68_SP = pop_sp;
587 return;
588 }
589 if (IS_ZERO_MP (x) && PLUS_INF_MP (y)) {
590 set_mp (rho, 1, 0, digs);
591 MP_T *lgam = nil_mp (q, digs);
592 MP_T *lnmu = nil_mp (q, digs);
593 (void) lngamma_mp (q, lgam, p, digs);
594 (void) ln_mp (q, lnmu, mu, digs);
595 (void) mul_mp (q, lnmu, p, lnmu, digs);
596 (void) sub_mp (q, sigma, lgam, lnmu, digs);
597 return;
598 }
599 // Initialization
600 MP_T *mx = nil_mp (q, digs);
601 MP_T *nx = nil_mp (q, digs);
602 MP_T *my = nil_mp (q, digs);
603 MP_T *ny = nil_mp (q, digs);
604 MP_T *mux = nil_mp (q, digs);
605 MP_T *muy = nil_mp (q, digs);
606 // Initialization
607 // nx = (a68_isinf (x) ? a68_neginf () : -mu * x + p * ln (x));
608 if (PLUS_INF_MP (x)) {
609 SET_MP_ZERO (mx, digs);
610 MP_STATUS (nx) = (UNSIGNED_T) MP_STATUS (nx) | MINUS_INF_MASK;
611 } else {
612 (void) mul_mp (q, mux, mu, x, digs);
613 G_func_mp (q, mx, p, mux, digs);
614 (void) ln_mp (q, nx, x, digs);
615 (void) mul_mp (q, nx, p, nx, digs);
616 (void) sub_mp (q, nx, nx, mux, digs);
617 }
618 // ny = (a68_isinf (y) ? a68_neginf () : -mu * y + p * ln (y));
619 if (PLUS_INF_MP (y)) {
620 SET_MP_ZERO (my, digs);
621 MP_STATUS (ny) = (UNSIGNED_T) MP_STATUS (ny) | MINUS_INF_MASK;
622 } else {
623 (void) mul_mp (q, muy, mu, y, digs);
624 G_func_mp (q, my, p, muy, digs);
625 (void) ln_mp (q, ny, y, digs);
626 (void) mul_mp (q, ny, p, ny, digs);
627 (void) sub_mp (q, ny, ny, muy, digs);
628 }
629 // Compute (mA,nA) and (mB,nB) such as I_{x,y}^{mu,p} can be
630 // approximated by the difference A-B, where A >= B >= 0, A = mA*exp (nA) an
631 // B = mB*exp (nB). When the difference involves more than one digit loss due to
632 // cancellation errors, the integral I_{x,y}^{mu,p} is evaluated using the
633 // Romberg approximation method.
634 MP_T *mA = nil_mp (q, digs);
635 MP_T *mB = nil_mp (q, digs);
636 MP_T *nA = nil_mp (q, digs);
637 MP_T *nB = nil_mp (q, digs);
638 MP_T *trm = nil_mp (q, digs);
639 if (MP_DIGIT (mu, 1) < 0) {
640 (void) move_mp (mA, my, digs);
641 (void) move_mp (nA, ny, digs);
642 (void) move_mp (mB, mx, digs);
643 (void) move_mp (nB, nx, digs);
644 goto compute;
645 }
646 MP_T *pl = nil_mp (q, digs);
647 A68_BOOL lt;
648 if (PLUS_INF_MP (x)) {
649 VALUE (<) = A68_TRUE;
650 } else {
651 (void) mul_mp (q, mux, mu, x, digs);
652 (void) plim_mp (q, pl, mux, digs);
653 (void) lt_mp (q, <, p, pl, digs);
654 }
655 if (VALUE (<)) {
656 (void) move_mp (mA, mx, digs);
657 (void) move_mp (nA, nx, digs);
658 (void) move_mp (mB, my, digs);
659 (void) move_mp (nB, ny, digs);
660 goto compute;
661 }
662 if (PLUS_INF_MP (y)) {
663 VALUE (<) = A68_TRUE;
664 } else {
665 (void) mul_mp (q, muy, mu, y, digs);
666 (void) plim_mp (q, pl, muy, digs);
667 (void) lt_mp (q, <, p, pl, digs);
668 }
669 if (VALUE (<)) {
670 // mA = 1;
671 set_mp (mA, 1, 0, digs);
672 // nA = lgammaq (p) - p * logq (mu);
673 MP_T *lgam = nil_mp (q, digs);
674 MP_T *lnmu = nil_mp (q, digs);
675 (void) lngamma_mp (q, lgam, p, digs);
676 (void) ln_mp (q, lnmu, mu, digs);
677 (void) mul_mp (q, lnmu, p, lnmu, digs);
678 (void) sub_mp (q, nA, lgam, lnmu, digs);
679 // nB = fmax (nx, ny);
680 A68_BOOL ge;
681 if (MINUS_INF_MP (ny)) {
682 VALUE (&ge) = A68_TRUE;
683 } else {
684 (void) ge_mp (q, &ge, nx, ny, digs);
685 }
686 if (VALUE (&ge)) {
687 (void) move_mp (nB, nx, digs);
688 } else {
689 (void) move_mp (nB, ny, digs);
690 }
691 // mB = mx * expq (nx - nB) + my * expq (ny - nB);
692 (void) sub_mp (q, trm, nx, nB, digs);
693 (void) exp_mp (q, trm, trm, digs);
694 (void) mul_mp (q, mB, mx, trm, digs);
695 if (! MINUS_INF_MP (ny)) {
696 (void) sub_mp (q, trm, ny, nB, digs);
697 (void) exp_mp (q, trm, trm, digs);
698 (void) mul_mp (q, trm, my, trm, digs);
699 (void) add_mp (q, mB, nB, trm, digs);
700 }
701 goto compute;
702 }
703 (void) move_mp (mA, my, digs);
704 (void) move_mp (nA, ny, digs);
705 (void) move_mp (mB, mx, digs);
706 (void) move_mp (nB, nx, digs);
707 compute:
708 // Compute (rho,sigma) such that rho*exp (sigma) = A-B
709 // 1. rho = mA - mB * expq (nB - nA);
710 (void) sub_mp (q, trm, nB, nA, digs);
711 (void) exp_mp (q, trm, trm, digs);
712 (void) mul_mp (q, trm, mB, trm, digs);
713 (void) sub_mp (q, rho, mA, trm, digs);
714 // 2. sigma = nA;
715 (void) move_mp (sigma, nA, digs);
716 // If the difference involved a significant loss of precision, compute Romberg estimate.
717 // if (!isinfq (y) && ((*rho) / mA < TOL_DIFF)) {
718 (void) div_mp (q, trm, rho, mA, digs);
719 (void) lt_mp (q, <, trm, TOL_DIFF (q, digs), digs);
720 if (!PLUS_INF_MP (y) && VALUE (<)) {
721 mp_romberg_estimate (q, rho, sigma, x, y, mu, p, digs);
722 }
723 A68_SP = pop_sp;
724 }
725
726 void Dgamic_wrap_mp (NODE_T *q, MP_T * s, MP_T * rho, MP_T * sigma, MP_T *x, MP_T *y, MP_T *mu, MP_T *p, int digs)
727 {
728 ADDR_T pop_sp = A68_SP;
729 int gdigs = GAM_DIGITS (MAX_PRECISION);
730 errno = 0;
731 if (digs <= gdigs) {
732 gdigs = GAM_DIGITS (digs);
733 MP_T *rho_g = len_mp (q, rho, digs, gdigs);
734 MP_T *sigma_g = len_mp (q, sigma, digs, gdigs);
735 MP_T *x_g = len_mp (q, x, digs, gdigs);
736 MP_T *y_g = len_mp (q, y, digs, gdigs);
737 MP_T *mu_g = len_mp (q, mu, digs, gdigs);
738 MP_T *p_g = len_mp (q, p, digs, gdigs);
739 Dgamic_mp (q, rho_g, sigma_g, x_g, y_g, mu_g, p_g, gdigs);
740 if (IS_ZERO_MP (rho_g) || MINUS_INF_MP (sigma_g)) {
741 SET_MP_ZERO (s, digs);
742 } else {
743 (void) exp_mp (q, sigma_g, sigma_g, gdigs);
744 (void) mul_mp (q, rho_g, rho_g, sigma_g, gdigs);
745 (void) shorten_mp (q, s, digs, rho_g, gdigs);
746 }
747 } else {
748 diagnostic (A68_MATH_WARNING, q, WARNING_MATH_PRECISION, MOID (q), CALL, NULL);
749 MP_T *rho_g = cut_mp (q, rho, digs, gdigs);
750 MP_T *sigma_g = cut_mp (q, sigma, digs, gdigs);
751 MP_T *x_g = cut_mp (q, x, digs, gdigs);
752 MP_T *y_g = cut_mp (q, y, digs, gdigs);
753 MP_T *mu_g = cut_mp (q, mu, digs, gdigs);
754 MP_T *p_g = cut_mp (q, p, digs, gdigs);
755 Dgamic_mp (q, rho_g, sigma_g, x_g, y_g, mu_g, p_g, gdigs);
756 if (IS_ZERO_MP (rho_g) || MINUS_INF_MP (sigma_g)) {
757 SET_MP_ZERO (s, digs);
758 } else {
759 (void) exp_mp (q, sigma_g, sigma_g, gdigs);
760 (void) mul_mp (q, rho_g, rho_g, sigma_g, gdigs);
761 MP_T *tmp = nil_mp (q, MAX_PRECISION);
762 (void) shorten_mp (q, tmp, MAX_PRECISION, rho_g, gdigs);
763 (void) lengthen_mp (q, s, digs, tmp, MAX_PRECISION);
764 }
765 }
766 PRELUDE_ERROR (errno != 0, q, ERROR_MATH, MOID (q));
767 A68_SP = pop_sp;
768 }
769
770 //! @brief PROC long long gamma inc f = (LONG LONG REAL p, x) LONG LONG REAL
771
772 void genie_gamma_inc_f_mp (NODE_T *p)
773 {
774 int digs = DIGITS (MOID (p)), size = SIZE (MOID (p));
775 ADDR_T pop_sp = A68_SP;
776 MP_T *x = (MP_T *) STACK_OFFSET (-size);
777 MP_T *s = (MP_T *) STACK_OFFSET (-2 * size);
778 MP_T *mu = lit_mp (p, 1, 0, digs);
779 MP_T *y = nil_mp (p, digs);
780 MP_T *rho = nil_mp (p, digs);
781 MP_T *sigma = nil_mp (p, digs);
782 MP_STATUS (y) = (UNSIGNED_T) MP_STATUS (y) | PLUS_INF_MASK;
783 Dgamic_wrap_mp (p, s, rho, sigma, x, y, mu, s, digs);
784 A68_SP = pop_sp;
785 A68_SP -= size;
786 }
787
788 //! @brief PROC long long gamma inc g = (LONG LONG REAL p, x, y, mu) LONG LONG REAL
789
790 void genie_gamma_inc_g_mp (NODE_T *p)
791 {
792 int digs = DIGITS (MOID (p)), size = SIZE (MOID (p));
793 ADDR_T pop_sp = A68_SP;
794 MP_T *mu = (MP_T *) STACK_OFFSET (-size);
795 MP_T *y = (MP_T *) STACK_OFFSET (-2 * size);
796 MP_T *x = (MP_T *) STACK_OFFSET (-3 * size);
797 MP_T *s = (MP_T *) STACK_OFFSET (-4 * size);
798 MP_T *rho = nil_mp (p, digs);
799 MP_T *sigma = nil_mp (p, digs);
800 Dgamic_wrap_mp (p, s, rho, sigma, x, y, mu, s, digs);
801 A68_SP = pop_sp;
802 A68_SP -= 3 * size;
803 }
804
805 //! @brief PROC long long gamma inc gf = (LONG LONG REAL p, x, y, mu) LONG LONG REAL
806
807 void genie_gamma_inc_gf_mp (NODE_T *p)
808 {
809 // if x <= p: G(p,x) = exp (x-p*ln (|x|)) * integral over [0,|x|] of s^{p-1} * exp (-sign (x)*s) ds
810 // otherwise: G(p,x) = exp (x-p*ln (x)) * integral over [x,inf] of s^{p-1} * exp (-s) ds
811 int digs = DIGITS (MOID (p)), size = SIZE (MOID (p));
812 ADDR_T pop_sp = A68_SP;
813 MP_T *x = (MP_T *) STACK_OFFSET (-size);
814 MP_T *s = (MP_T *) STACK_OFFSET (-2 * size);
815 int gdigs = GAM_DIGITS (MAX_PRECISION);
816 errno = 0;
817 if (digs <= gdigs) {
818 gdigs = GAM_DIGITS (digs);
819 MP_T *x_g = len_mp (p, x, digs, gdigs);
820 MP_T *s_g = len_mp (p, s, digs, gdigs);
821 MP_T *G = nil_mp (p, gdigs);
822 G_func_mp (p, G, s_g, x_g, gdigs);
823 PRELUDE_ERROR (errno != 0, p, ERROR_MATH, MOID (p));
824 (void) shorten_mp (p, s, digs, G, gdigs);
825 } else {
826 diagnostic (A68_MATH_WARNING, p, WARNING_MATH_PRECISION, MOID (p), CALL, NULL);
827 MP_T *x_g = cut_mp (p, x, digs, gdigs);
828 MP_T *s_g = cut_mp (p, s, digs, gdigs);
829 MP_T *G = nil_mp (p, gdigs);
830 G_func_mp (p, G, s_g, x_g, gdigs);
831 PRELUDE_ERROR (errno != 0, p, ERROR_MATH, MOID (p));
832 MP_T *tmp = nil_mp (p, MAX_PRECISION);
833 (void) shorten_mp (p, tmp, MAX_PRECISION, G, gdigs);
834 (void) lengthen_mp (p, s, digs, tmp, MAX_PRECISION);
835 }
836 A68_SP = pop_sp - size;
837 }
838
839 //! @brief PROC long long gamma inc = (LONG LONG REAL p, x) LONG LONG REAL
840
841 void genie_gamma_inc_h_mp (NODE_T *p)
842 {
843 //
844 // #if defined (HAVE_GNU_MPFR) && (A68_LEVEL >= 3)
845 // genie_gamma_inc_mpfr (p);
846 // #else
847 // genie_gamma_inc_f_mp (p);
848 // #endif
849 //
850 genie_gamma_inc_f_mp (p);
851 }