gmp-impl.h
上传用户:qaz666999
上传日期:2022-08-06
资源大小:2570k
文件大小:174k
- /* Include file for internal GNU MP types and definitions.
- THE CONTENTS OF THIS FILE ARE FOR INTERNAL USE AND ARE ALMOST CERTAIN TO
- BE SUBJECT TO INCOMPATIBLE CHANGES IN FUTURE GNU MP RELEASES.
- Copyright 1991, 1993, 1994, 1995, 1996, 1997, 1999, 2000, 2001, 2002, 2003,
- 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
- This file is part of the GNU MP Library.
- The GNU MP Library is free software; you can redistribute it and/or modify
- it under the terms of the GNU Lesser General Public License as published by
- the Free Software Foundation; either version 3 of the License, or (at your
- option) any later version.
- The GNU MP Library is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
- License for more details.
- You should have received a copy of the GNU Lesser General Public License
- along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
- /* __GMP_DECLSPEC must be given on any global data that will be accessed
- from outside libgmp, meaning from the test or development programs, or
- from libgmpxx. Failing to do this will result in an incorrect address
- being used for the accesses. On functions __GMP_DECLSPEC makes calls
- from outside libgmp more efficient, but they'll still work fine without
- it. */
- #ifndef __GMP_IMPL_H__
- #define __GMP_IMPL_H__
- #if defined _CRAY
- #include <intrinsics.h> /* for _popcnt */
- #endif
- /* limits.h is not used in general, since it's an ANSI-ism, and since on
- solaris gcc 2.95 under -mcpu=ultrasparc in ABI=32 ends up getting wrong
- values (the ABI=64 values).
- On Cray vector systems, however, we need the system limits.h since sizes
- of signed and unsigned types can differ there, depending on compiler
- options (eg. -hnofastmd), making our SHRT_MAX etc expressions fail. For
- reference, int can be 46 or 64 bits, whereas uint is always 64 bits; and
- short can be 24, 32, 46 or 64 bits, and different for ushort. */
- #if defined _CRAY
- #include <limits.h>
- #endif
- /* For fat.h and other fat binary stuff.
- No need for __GMP_ATTRIBUTE_PURE or __GMP_NOTHROW, since functions
- declared this way are only used to set function pointers in __gmp_cpuvec,
- they're not called directly. */
- #define DECL_add_n(name)
- __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t))
- #define DECL_addmul_1(name)
- __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t))
- #define DECL_copyd(name)
- __GMP_DECLSPEC void name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t))
- #define DECL_copyi(name)
- DECL_copyd (name)
- #define DECL_divexact_1(name)
- __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t))
- #define DECL_divexact_by3c(name)
- __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t))
- #define DECL_divrem_1(name)
- __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t))
- #define DECL_gcd_1(name)
- __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t))
- #define DECL_lshift(name)
- __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, unsigned))
- #define DECL_mod_1(name)
- __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t))
- #define DECL_mod_34lsub1(name)
- __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_srcptr, mp_size_t))
- #define DECL_modexact_1c_odd(name)
- __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t))
- #define DECL_mul_1(name)
- DECL_addmul_1 (name)
- #define DECL_mul_basecase(name)
- __GMP_DECLSPEC void name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t))
- #define DECL_preinv_divrem_1(name)
- __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t, int))
- #define DECL_preinv_mod_1(name)
- __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t))
- #define DECL_rshift(name)
- DECL_lshift (name)
- #define DECL_sqr_basecase(name)
- __GMP_DECLSPEC void name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t))
- #define DECL_sub_n(name)
- DECL_add_n (name)
- #define DECL_submul_1(name)
- DECL_addmul_1 (name)
- #if ! __GMP_WITHIN_CONFIGURE
- #include "config.h"
- #include "gmp-mparam.h"
- #include "fib_table.h"
- #include "mp_bases.h"
- #if WANT_FAT_BINARY
- #include "fat.h"
- #endif
- #endif
- #if HAVE_INTTYPES_H /* for uint_least32_t */
- # include <inttypes.h>
- #else
- # if HAVE_STDINT_H
- # include <stdint.h>
- # endif
- #endif
- #ifdef __cplusplus
- #include <cstring> /* for strlen */
- #include <string> /* for std::string */
- #endif
- #ifndef WANT_TMP_DEBUG /* for TMP_ALLOC_LIMBS_2 and others */
- #define WANT_TMP_DEBUG 0
- #endif
- /* The following tries to get a good version of alloca. The tests are
- adapted from autoconf AC_FUNC_ALLOCA, with a couple of additions.
- Whether this succeeds is tested by GMP_FUNC_ALLOCA and HAVE_ALLOCA will
- be setup appropriately.
- ifndef alloca - a cpp define might already exist.
- glibc <stdlib.h> includes <alloca.h> which uses GCC __builtin_alloca.
- HP cc +Olibcalls adds a #define of alloca to __builtin_alloca.
- GCC __builtin_alloca - preferred whenever available.
- _AIX pragma - IBM compilers need a #pragma in "each module that needs to
- use alloca". Pragma indented to protect pre-ANSI cpp's. _IBMR2 was
- used in past versions of GMP, retained still in case it matters.
- The autoconf manual says this pragma needs to be at the start of a C
- file, apart from comments and preprocessor directives. Is that true?
- xlc on aix 4.xxx doesn't seem to mind it being after prototypes etc
- from gmp.h.
- */
- #ifndef alloca
- # ifdef __GNUC__
- # define alloca __builtin_alloca
- # else
- # ifdef __DECC
- # define alloca(x) __ALLOCA(x)
- # else
- # ifdef _MSC_VER
- # include <malloc.h>
- # define alloca _alloca
- # else
- # if HAVE_ALLOCA_H
- # include <alloca.h>
- # else
- # if defined (_AIX) || defined (_IBMR2)
- #pragma alloca
- # else
- char *alloca ();
- # endif
- # endif
- # endif
- # endif
- # endif
- #endif
- /* if not provided by gmp-mparam.h */
- #ifndef BYTES_PER_MP_LIMB
- #define BYTES_PER_MP_LIMB SIZEOF_MP_LIMB_T
- #endif
- #define GMP_LIMB_BYTES BYTES_PER_MP_LIMB
- #ifndef GMP_LIMB_BITS
- #define GMP_LIMB_BITS (8 * SIZEOF_MP_LIMB_T)
- #endif
- #define BITS_PER_ULONG (8 * SIZEOF_UNSIGNED_LONG)
- /* gmp_uint_least32_t is an unsigned integer type with at least 32 bits. */
- #if HAVE_UINT_LEAST32_T
- typedef uint_least32_t gmp_uint_least32_t;
- #else
- #if SIZEOF_UNSIGNED_SHORT >= 4
- typedef unsigned short gmp_uint_least32_t;
- #else
- #if SIZEOF_UNSIGNED >= 4
- typedef unsigned gmp_uint_least32_t;
- #else
- typedef unsigned long gmp_uint_least32_t;
- #endif
- #endif
- #endif
- /* gmp_intptr_t, for pointer to integer casts */
- #if HAVE_INTPTR_T
- typedef intptr_t gmp_intptr_t;
- #else /* fallback */
- typedef size_t gmp_intptr_t;
- #endif
- /* pre-inverse types for truncating division and modulo */
- typedef struct {mp_limb_t inv32;} gmp_pi1_t;
- typedef struct {mp_limb_t inv21, inv32, inv53;} gmp_pi2_t;
- /* const and signed must match __gmp_const and __gmp_signed, so follow the
- decision made for those in gmp.h. */
- #if ! __GMP_HAVE_CONST
- #define const /* empty */
- #define signed /* empty */
- #endif
- /* "const" basically means a function does nothing but examine its arguments
- and give a return value, it doesn't read or write any memory (neither
- global nor pointed to by arguments), and has no other side-effects. This
- is more restrictive than "pure". See info node "(gcc)Function
- Attributes". __GMP_NO_ATTRIBUTE_CONST_PURE lets tune/common.c etc turn
- this off when trying to write timing loops. */
- #if HAVE_ATTRIBUTE_CONST && ! defined (__GMP_NO_ATTRIBUTE_CONST_PURE)
- #define ATTRIBUTE_CONST __attribute__ ((const))
- #else
- #define ATTRIBUTE_CONST
- #endif
- #if HAVE_ATTRIBUTE_NORETURN
- #define ATTRIBUTE_NORETURN __attribute__ ((noreturn))
- #else
- #define ATTRIBUTE_NORETURN
- #endif
- /* "malloc" means a function behaves like malloc in that the pointer it
- returns doesn't alias anything. */
- #if HAVE_ATTRIBUTE_MALLOC
- #define ATTRIBUTE_MALLOC __attribute__ ((malloc))
- #else
- #define ATTRIBUTE_MALLOC
- #endif
- #if ! HAVE_STRCHR
- #define strchr(s,c) index(s,c)
- #endif
- #if ! HAVE_MEMSET
- #define memset(p, c, n)
- do {
- ASSERT ((n) >= 0);
- char *__memset__p = (p);
- int __i;
- for (__i = 0; __i < (n); __i++)
- __memset__p[__i] = (c);
- } while (0)
- #endif
- /* va_copy is standard in C99, and gcc provides __va_copy when in strict C89
- mode. Falling back to a memcpy will give maximum portability, since it
- works no matter whether va_list is a pointer, struct or array. */
- #if ! defined (va_copy) && defined (__va_copy)
- #define va_copy(dst,src) __va_copy(dst,src)
- #endif
- #if ! defined (va_copy)
- #define va_copy(dst,src)
- do { memcpy (&(dst), &(src), sizeof (va_list)); } while (0)
- #endif
- /* HAVE_HOST_CPU_alpha_CIX is 1 on an alpha with the CIX instructions
- (ie. ctlz, ctpop, cttz). */
- #if HAVE_HOST_CPU_alphaev67 || HAVE_HOST_CPU_alphaev68
- || HAVE_HOST_CPU_alphaev7
- #define HAVE_HOST_CPU_alpha_CIX 1
- #endif
- #if defined (__cplusplus)
- extern "C" {
- #endif
- /* Usage: TMP_DECL;
- TMP_MARK;
- ptr = TMP_ALLOC (bytes);
- TMP_FREE;
- Small allocations should use TMP_SALLOC, big allocations should use
- TMP_BALLOC. Allocations that might be small or big should use TMP_ALLOC.
- Functions that use just TMP_SALLOC should use TMP_SDECL, TMP_SMARK, and
- TMP_SFREE.
- TMP_DECL just declares a variable, but might be empty and so must be last
- in a list of variables. TMP_MARK must be done before any TMP_ALLOC.
- TMP_ALLOC(0) is not allowed. TMP_FREE doesn't need to be done if a
- TMP_MARK was made, but then no TMP_ALLOCs. */
- /* The alignment in bytes, used for TMP_ALLOCed blocks, when alloca or
- __gmp_allocate_func doesn't already determine it. Currently TMP_ALLOC
- isn't used for "double"s, so that's not in the union. */
- union tmp_align_t {
- mp_limb_t l;
- char *p;
- };
- #define __TMP_ALIGN sizeof (union tmp_align_t)
- /* Return "a" rounded upwards to a multiple of "m", if it isn't already.
- "a" must be an unsigned type.
- This is designed for use with a compile-time constant "m".
- The POW2 case is expected to be usual, and gcc 3.0 and up recognises
- "(-(8*n))%8" or the like is always zero, which means the rounding up in
- the WANT_TMP_NOTREENTRANT version of TMP_ALLOC below will be a noop. */
- #define ROUND_UP_MULTIPLE(a,m)
- (POW2_P(m) ? (a) + (-(a))%(m)
- : (a)+(m)-1 - (((a)+(m)-1) % (m)))
- #if defined (WANT_TMP_ALLOCA) || defined (WANT_TMP_REENTRANT)
- struct tmp_reentrant_t {
- struct tmp_reentrant_t *next;
- size_t size; /* bytes, including header */
- };
- __GMP_DECLSPEC void *__gmp_tmp_reentrant_alloc __GMP_PROTO ((struct tmp_reentrant_t **, size_t)) ATTRIBUTE_MALLOC;
- __GMP_DECLSPEC void __gmp_tmp_reentrant_free __GMP_PROTO ((struct tmp_reentrant_t *));
- #endif
- #if WANT_TMP_ALLOCA
- #define TMP_SDECL
- #define TMP_DECL struct tmp_reentrant_t *__tmp_marker
- #define TMP_SMARK
- #define TMP_MARK __tmp_marker = 0
- #define TMP_SALLOC(n) alloca(n)
- #define TMP_BALLOC(n) __gmp_tmp_reentrant_alloc (&__tmp_marker, n)
- #define TMP_ALLOC(n)
- (LIKELY ((n) < 65536) ? TMP_SALLOC(n) : TMP_BALLOC(n))
- #define TMP_SFREE
- #define TMP_FREE
- do {
- if (UNLIKELY (__tmp_marker != 0)) __gmp_tmp_reentrant_free (__tmp_marker);
- } while (0)
- #endif
- #if WANT_TMP_REENTRANT
- #define TMP_SDECL TMP_DECL
- #define TMP_DECL struct tmp_reentrant_t *__tmp_marker
- #define TMP_SMARK TMP_MARK
- #define TMP_MARK __tmp_marker = 0
- #define TMP_SALLOC(n) TMP_ALLOC(n)
- #define TMP_BALLOC(n) TMP_ALLOC(n)
- #define TMP_ALLOC(n) __gmp_tmp_reentrant_alloc (&__tmp_marker, n)
- #define TMP_SFREE TMP_FREE
- #define TMP_FREE __gmp_tmp_reentrant_free (__tmp_marker)
- #endif
- #if WANT_TMP_NOTREENTRANT
- struct tmp_marker
- {
- struct tmp_stack *which_chunk;
- void *alloc_point;
- };
- __GMP_DECLSPEC void *__gmp_tmp_alloc __GMP_PROTO ((unsigned long)) ATTRIBUTE_MALLOC;
- __GMP_DECLSPEC void __gmp_tmp_mark __GMP_PROTO ((struct tmp_marker *));
- __GMP_DECLSPEC void __gmp_tmp_free __GMP_PROTO ((struct tmp_marker *));
- #define TMP_SDECL TMP_DECL
- #define TMP_DECL struct tmp_marker __tmp_marker
- #define TMP_SMARK TMP_MARK
- #define TMP_MARK __gmp_tmp_mark (&__tmp_marker)
- #define TMP_SALLOC(n) TMP_ALLOC(n)
- #define TMP_BALLOC(n) TMP_ALLOC(n)
- #define TMP_ALLOC(n)
- __gmp_tmp_alloc (ROUND_UP_MULTIPLE ((unsigned long) (n), __TMP_ALIGN))
- #define TMP_SFREE TMP_FREE
- #define TMP_FREE __gmp_tmp_free (&__tmp_marker)
- #endif
- #if WANT_TMP_DEBUG
- /* See tal-debug.c for some comments. */
- struct tmp_debug_t {
- struct tmp_debug_entry_t *list;
- const char *file;
- int line;
- };
- struct tmp_debug_entry_t {
- struct tmp_debug_entry_t *next;
- char *block;
- size_t size;
- };
- __GMP_DECLSPEC void __gmp_tmp_debug_mark __GMP_PROTO ((const char *, int, struct tmp_debug_t **,
- struct tmp_debug_t *,
- const char *, const char *));
- __GMP_DECLSPEC void *__gmp_tmp_debug_alloc __GMP_PROTO ((const char *, int, int,
- struct tmp_debug_t **, const char *,
- size_t)) ATTRIBUTE_MALLOC;
- __GMP_DECLSPEC void __gmp_tmp_debug_free __GMP_PROTO ((const char *, int, int,
- struct tmp_debug_t **,
- const char *, const char *));
- #define TMP_SDECL TMP_DECL_NAME(__tmp_xmarker, "__tmp_marker")
- #define TMP_DECL TMP_DECL_NAME(__tmp_xmarker, "__tmp_marker")
- #define TMP_SMARK TMP_MARK_NAME(__tmp_xmarker, "__tmp_marker")
- #define TMP_MARK TMP_MARK_NAME(__tmp_xmarker, "__tmp_marker")
- #define TMP_SFREE TMP_FREE_NAME(__tmp_xmarker, "__tmp_marker")
- #define TMP_FREE TMP_FREE_NAME(__tmp_xmarker, "__tmp_marker")
- /* The marker variable is designed to provoke an uninitialized variable
- warning from the compiler if TMP_FREE is used without a TMP_MARK.
- __tmp_marker_inscope does the same for TMP_ALLOC. Runtime tests pick
- these things up too. */
- #define TMP_DECL_NAME(marker, marker_name)
- int marker;
- int __tmp_marker_inscope;
- const char *__tmp_marker_name = marker_name;
- struct tmp_debug_t __tmp_marker_struct;
- /* don't demand NULL, just cast a zero */
- struct tmp_debug_t *__tmp_marker = (struct tmp_debug_t *) 0
- #define TMP_MARK_NAME(marker, marker_name)
- do {
- marker = 1;
- __tmp_marker_inscope = 1;
- __gmp_tmp_debug_mark (ASSERT_FILE, ASSERT_LINE,
- &__tmp_marker, &__tmp_marker_struct,
- __tmp_marker_name, marker_name);
- } while (0)
- #define TMP_SALLOC(n) TMP_ALLOC(n)
- #define TMP_BALLOC(n) TMP_ALLOC(n)
- #define TMP_ALLOC(size)
- __gmp_tmp_debug_alloc (ASSERT_FILE, ASSERT_LINE,
- __tmp_marker_inscope,
- &__tmp_marker, __tmp_marker_name, size)
- #define TMP_FREE_NAME(marker, marker_name)
- do {
- __gmp_tmp_debug_free (ASSERT_FILE, ASSERT_LINE,
- marker, &__tmp_marker,
- __tmp_marker_name, marker_name);
- } while (0)
- #endif /* WANT_TMP_DEBUG */
- /* Allocating various types. */
- #define TMP_ALLOC_TYPE(n,type) ((type *) TMP_ALLOC ((n) * sizeof (type)))
- #define TMP_SALLOC_TYPE(n,type) ((type *) TMP_SALLOC ((n) * sizeof (type)))
- #define TMP_BALLOC_TYPE(n,type) ((type *) TMP_BALLOC ((n) * sizeof (type)))
- #define TMP_ALLOC_LIMBS(n) TMP_ALLOC_TYPE(n,mp_limb_t)
- #define TMP_SALLOC_LIMBS(n) TMP_SALLOC_TYPE(n,mp_limb_t)
- #define TMP_BALLOC_LIMBS(n) TMP_BALLOC_TYPE(n,mp_limb_t)
- #define TMP_ALLOC_MP_PTRS(n) TMP_ALLOC_TYPE(n,mp_ptr)
- #define TMP_SALLOC_MP_PTRS(n) TMP_SALLOC_TYPE(n,mp_ptr)
- #define TMP_BALLOC_MP_PTRS(n) TMP_BALLOC_TYPE(n,mp_ptr)
- /* It's more efficient to allocate one block than two. This is certainly
- true of the malloc methods, but it can even be true of alloca if that
- involves copying a chunk of stack (various RISCs), or a call to a stack
- bounds check (mingw). In any case, when debugging keep separate blocks
- so a redzoning malloc debugger can protect each individually. */
- #define TMP_ALLOC_LIMBS_2(xp,xsize, yp,ysize)
- do {
- if (WANT_TMP_DEBUG)
- {
- (xp) = TMP_ALLOC_LIMBS (xsize);
- (yp) = TMP_ALLOC_LIMBS (ysize);
- }
- else
- {
- (xp) = TMP_ALLOC_LIMBS ((xsize) + (ysize));
- (yp) = (xp) + (xsize);
- }
- } while (0)
- /* From gmp.h, nicer names for internal use. */
- #define CRAY_Pragma(str) __GMP_CRAY_Pragma(str)
- #define MPN_CMP(result, xp, yp, size) __GMPN_CMP(result, xp, yp, size)
- #define LIKELY(cond) __GMP_LIKELY(cond)
- #define UNLIKELY(cond) __GMP_UNLIKELY(cond)
- #define ABS(x) ((x) >= 0 ? (x) : -(x))
- #undef MIN
- #define MIN(l,o) ((l) < (o) ? (l) : (o))
- #undef MAX
- #define MAX(h,i) ((h) > (i) ? (h) : (i))
- #define numberof(x) (sizeof (x) / sizeof ((x)[0]))
- /* Field access macros. */
- #define SIZ(x) ((x)->_mp_size)
- #define ABSIZ(x) ABS (SIZ (x))
- #define PTR(x) ((x)->_mp_d)
- #define LIMBS(x) ((x)->_mp_d)
- #define EXP(x) ((x)->_mp_exp)
- #define PREC(x) ((x)->_mp_prec)
- #define ALLOC(x) ((x)->_mp_alloc)
- /* n-1 inverts any low zeros and the lowest one bit. If n&(n-1) leaves zero
- then that lowest one bit must have been the only bit set. n==0 will
- return true though, so avoid that. */
- #define POW2_P(n) (((n) & ((n) - 1)) == 0)
- /* The "short" defines are a bit different because shorts are promoted to
- ints by ~ or >> etc.
- #ifndef's are used since on some systems (HP?) header files other than
- limits.h setup these defines. We could forcibly #undef in that case, but
- there seems no need to worry about that. */
- #ifndef ULONG_MAX
- #define ULONG_MAX __GMP_ULONG_MAX
- #endif
- #ifndef UINT_MAX
- #define UINT_MAX __GMP_UINT_MAX
- #endif
- #ifndef USHRT_MAX
- #define USHRT_MAX __GMP_USHRT_MAX
- #endif
- #define MP_LIMB_T_MAX (~ (mp_limb_t) 0)
- /* Must cast ULONG_MAX etc to unsigned long etc, since they might not be
- unsigned on a K&R compiler. In particular the HP-UX 10 bundled K&R cc
- treats the plain decimal values in <limits.h> as signed. */
- #define ULONG_HIGHBIT (ULONG_MAX ^ ((unsigned long) ULONG_MAX >> 1))
- #define UINT_HIGHBIT (UINT_MAX ^ ((unsigned) UINT_MAX >> 1))
- #define USHRT_HIGHBIT ((unsigned short) (USHRT_MAX ^ ((unsigned short) USHRT_MAX >> 1)))
- #define GMP_LIMB_HIGHBIT (MP_LIMB_T_MAX ^ (MP_LIMB_T_MAX >> 1))
- #ifndef LONG_MIN
- #define LONG_MIN ((long) ULONG_HIGHBIT)
- #endif
- #ifndef LONG_MAX
- #define LONG_MAX (-(LONG_MIN+1))
- #endif
- #ifndef INT_MIN
- #define INT_MIN ((int) UINT_HIGHBIT)
- #endif
- #ifndef INT_MAX
- #define INT_MAX (-(INT_MIN+1))
- #endif
- #ifndef SHRT_MIN
- #define SHRT_MIN ((short) USHRT_HIGHBIT)
- #endif
- #ifndef SHRT_MAX
- #define SHRT_MAX ((short) (-(SHRT_MIN+1)))
- #endif
- #if __GMP_MP_SIZE_T_INT
- #define MP_SIZE_T_MAX INT_MAX
- #define MP_SIZE_T_MIN INT_MIN
- #else
- #define MP_SIZE_T_MAX LONG_MAX
- #define MP_SIZE_T_MIN LONG_MIN
- #endif
- /* mp_exp_t is the same as mp_size_t */
- #define MP_EXP_T_MAX MP_SIZE_T_MAX
- #define MP_EXP_T_MIN MP_SIZE_T_MIN
- #define LONG_HIGHBIT LONG_MIN
- #define INT_HIGHBIT INT_MIN
- #define SHRT_HIGHBIT SHRT_MIN
- #define GMP_NUMB_HIGHBIT (CNST_LIMB(1) << (GMP_NUMB_BITS-1))
- #if GMP_NAIL_BITS == 0
- #define GMP_NAIL_LOWBIT CNST_LIMB(0)
- #else
- #define GMP_NAIL_LOWBIT (CNST_LIMB(1) << GMP_NUMB_BITS)
- #endif
- #if GMP_NAIL_BITS != 0
- /* Set various *_THRESHOLD values to be used for nails. Thus we avoid using
- code that has not yet been qualified. */
- #undef DC_DIV_QR_THRESHOLD
- #define DC_DIV_QR_THRESHOLD 50
- #undef DIVREM_1_NORM_THRESHOLD
- #undef DIVREM_1_UNNORM_THRESHOLD
- #undef MOD_1_NORM_THRESHOLD
- #undef MOD_1_UNNORM_THRESHOLD
- #undef USE_PREINV_DIVREM_1
- #undef DIVREM_2_THRESHOLD
- #undef DIVEXACT_1_THRESHOLD
- #define DIVREM_1_NORM_THRESHOLD MP_SIZE_T_MAX /* no preinv */
- #define DIVREM_1_UNNORM_THRESHOLD MP_SIZE_T_MAX /* no preinv */
- #define MOD_1_NORM_THRESHOLD MP_SIZE_T_MAX /* no preinv */
- #define MOD_1_UNNORM_THRESHOLD MP_SIZE_T_MAX /* no preinv */
- #define USE_PREINV_DIVREM_1 0 /* no preinv */
- #define DIVREM_2_THRESHOLD MP_SIZE_T_MAX /* no preinv */
- /* mpn/generic/mul_fft.c is not nails-capable. */
- #undef MUL_FFT_THRESHOLD
- #undef SQR_FFT_THRESHOLD
- #define MUL_FFT_THRESHOLD MP_SIZE_T_MAX
- #define SQR_FFT_THRESHOLD MP_SIZE_T_MAX
- #endif
- /* Swap macros. */
- #define MP_LIMB_T_SWAP(x, y)
- do {
- mp_limb_t __mp_limb_t_swap__tmp = (x);
- (x) = (y);
- (y) = __mp_limb_t_swap__tmp;
- } while (0)
- #define MP_SIZE_T_SWAP(x, y)
- do {
- mp_size_t __mp_size_t_swap__tmp = (x);
- (x) = (y);
- (y) = __mp_size_t_swap__tmp;
- } while (0)
- #define MP_PTR_SWAP(x, y)
- do {
- mp_ptr __mp_ptr_swap__tmp = (x);
- (x) = (y);
- (y) = __mp_ptr_swap__tmp;
- } while (0)
- #define MP_SRCPTR_SWAP(x, y)
- do {
- mp_srcptr __mp_srcptr_swap__tmp = (x);
- (x) = (y);
- (y) = __mp_srcptr_swap__tmp;
- } while (0)
- #define MPN_PTR_SWAP(xp,xs, yp,ys)
- do {
- MP_PTR_SWAP (xp, yp);
- MP_SIZE_T_SWAP (xs, ys);
- } while(0)
- #define MPN_SRCPTR_SWAP(xp,xs, yp,ys)
- do {
- MP_SRCPTR_SWAP (xp, yp);
- MP_SIZE_T_SWAP (xs, ys);
- } while(0)
- #define MPZ_PTR_SWAP(x, y)
- do {
- mpz_ptr __mpz_ptr_swap__tmp = (x);
- (x) = (y);
- (y) = __mpz_ptr_swap__tmp;
- } while (0)
- #define MPZ_SRCPTR_SWAP(x, y)
- do {
- mpz_srcptr __mpz_srcptr_swap__tmp = (x);
- (x) = (y);
- (y) = __mpz_srcptr_swap__tmp;
- } while (0)
- /* Enhancement: __gmp_allocate_func could have "__attribute__ ((malloc))",
- but current gcc (3.0) doesn't seem to support that. */
- __GMP_DECLSPEC extern void * (*__gmp_allocate_func) __GMP_PROTO ((size_t));
- __GMP_DECLSPEC extern void * (*__gmp_reallocate_func) __GMP_PROTO ((void *, size_t, size_t));
- __GMP_DECLSPEC extern void (*__gmp_free_func) __GMP_PROTO ((void *, size_t));
- __GMP_DECLSPEC void *__gmp_default_allocate __GMP_PROTO ((size_t));
- __GMP_DECLSPEC void *__gmp_default_reallocate __GMP_PROTO ((void *, size_t, size_t));
- __GMP_DECLSPEC void __gmp_default_free __GMP_PROTO ((void *, size_t));
- #define __GMP_ALLOCATE_FUNC_TYPE(n,type)
- ((type *) (*__gmp_allocate_func) ((n) * sizeof (type)))
- #define __GMP_ALLOCATE_FUNC_LIMBS(n) __GMP_ALLOCATE_FUNC_TYPE (n, mp_limb_t)
- #define __GMP_REALLOCATE_FUNC_TYPE(p, old_size, new_size, type)
- ((type *) (*__gmp_reallocate_func)
- (p, (old_size) * sizeof (type), (new_size) * sizeof (type)))
- #define __GMP_REALLOCATE_FUNC_LIMBS(p, old_size, new_size)
- __GMP_REALLOCATE_FUNC_TYPE(p, old_size, new_size, mp_limb_t)
- #define __GMP_FREE_FUNC_TYPE(p,n,type) (*__gmp_free_func) (p, (n) * sizeof (type))
- #define __GMP_FREE_FUNC_LIMBS(p,n) __GMP_FREE_FUNC_TYPE (p, n, mp_limb_t)
- #define __GMP_REALLOCATE_FUNC_MAYBE(ptr, oldsize, newsize)
- do {
- if ((oldsize) != (newsize))
- (ptr) = (*__gmp_reallocate_func) (ptr, oldsize, newsize);
- } while (0)
- #define __GMP_REALLOCATE_FUNC_MAYBE_TYPE(ptr, oldsize, newsize, type)
- do {
- if ((oldsize) != (newsize))
- (ptr) = (type *) (*__gmp_reallocate_func)
- (ptr, (oldsize) * sizeof (type), (newsize) * sizeof (type));
- } while (0)
- /* Dummy for non-gcc, code involving it will go dead. */
- #if ! defined (__GNUC__) || __GNUC__ < 2
- #define __builtin_constant_p(x) 0
- #endif
- /* In gcc 2.96 and up on i386, tail calls are optimized to jumps if the
- stack usage is compatible. __attribute__ ((regparm (N))) helps by
- putting leading parameters in registers, avoiding extra stack.
- regparm cannot be used with calls going through the PLT, because the
- binding code there may clobber the registers (%eax, %edx, %ecx) used for
- the regparm parameters. Calls to local (ie. static) functions could
- still use this, if we cared to differentiate locals and globals.
- On athlon-unknown-freebsd4.9 with gcc 3.3.3, regparm cannot be used with
- -p or -pg profiling, since that version of gcc doesn't realize the
- .mcount calls will clobber the parameter registers. Other systems are
- ok, like debian with glibc 2.3.2 (mcount doesn't clobber), but we don't
- bother to try to detect this. regparm is only an optimization so we just
- disable it when profiling (profiling being a slowdown anyway). */
- #if HAVE_HOST_CPU_FAMILY_x86 && __GMP_GNUC_PREREQ (2,96) && ! defined (PIC)
- && ! WANT_PROFILING_PROF && ! WANT_PROFILING_GPROF
- #define USE_LEADING_REGPARM 1
- #else
- #define USE_LEADING_REGPARM 0
- #endif
- /* Macros for altering parameter order according to regparm usage. */
- #if USE_LEADING_REGPARM
- #define REGPARM_2_1(a,b,x) x,a,b
- #define REGPARM_3_1(a,b,c,x) x,a,b,c
- #define REGPARM_ATTR(n) __attribute__ ((regparm (n)))
- #else
- #define REGPARM_2_1(a,b,x) a,b,x
- #define REGPARM_3_1(a,b,c,x) a,b,c,x
- #define REGPARM_ATTR(n)
- #endif
- /* ASM_L gives a local label for a gcc asm block, for use when temporary
- local labels like "1:" might not be available, which is the case for
- instance on the x86s (the SCO assembler doesn't support them).
- The label generated is made unique by including "%=" which is a unique
- number for each insn. This ensures the same name can be used in multiple
- asm blocks, perhaps via a macro. Since jumps between asm blocks are not
- allowed there's no need for a label to be usable outside a single
- block. */
- #define ASM_L(name) LSYM_PREFIX "asm_%=_" #name
- #if defined (__GNUC__) && HAVE_HOST_CPU_FAMILY_x86
- #if 0
- /* FIXME: Check that these actually improve things.
- FIXME: Need a cld after each std.
- FIXME: Can't have inputs in clobbered registers, must describe them as
- dummy outputs, and add volatile. */
- #define MPN_COPY_INCR(DST, SRC, N)
- __asm__ ("cldntrepntmovsl" : :
- "D" (DST), "S" (SRC), "c" (N) :
- "cx", "di", "si", "memory")
- #define MPN_COPY_DECR(DST, SRC, N)
- __asm__ ("stdntrepntmovsl" : :
- "D" ((DST) + (N) - 1), "S" ((SRC) + (N) - 1), "c" (N) :
- "cx", "di", "si", "memory")
- #endif
- #endif
- __GMP_DECLSPEC void __gmpz_aorsmul_1 __GMP_PROTO ((REGPARM_3_1 (mpz_ptr, mpz_srcptr, mp_limb_t, mp_size_t))) REGPARM_ATTR(1);
- #define mpz_aorsmul_1(w,u,v,sub) __gmpz_aorsmul_1 (REGPARM_3_1 (w, u, v, sub))
- #define mpz_n_pow_ui __gmpz_n_pow_ui
- __GMP_DECLSPEC void mpz_n_pow_ui __GMP_PROTO ((mpz_ptr, mp_srcptr, mp_size_t, unsigned long));
- #define mpn_addmul_1c __MPN(addmul_1c)
- __GMP_DECLSPEC mp_limb_t mpn_addmul_1c __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t));
- #define mpn_addmul_2 __MPN(addmul_2)
- __GMP_DECLSPEC mp_limb_t mpn_addmul_2 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
- #define mpn_addmul_3 __MPN(addmul_3)
- __GMP_DECLSPEC mp_limb_t mpn_addmul_3 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
- #define mpn_addmul_4 __MPN(addmul_4)
- __GMP_DECLSPEC mp_limb_t mpn_addmul_4 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
- #define mpn_addmul_5 __MPN(addmul_5)
- __GMP_DECLSPEC mp_limb_t mpn_addmul_5 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
- #define mpn_addmul_6 __MPN(addmul_6)
- __GMP_DECLSPEC mp_limb_t mpn_addmul_6 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
- #define mpn_addmul_7 __MPN(addmul_7)
- __GMP_DECLSPEC mp_limb_t mpn_addmul_7 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
- #define mpn_addmul_8 __MPN(addmul_8)
- __GMP_DECLSPEC mp_limb_t mpn_addmul_8 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
- /* mpn_addlsh1_n(c,a,b,n), when it exists, sets {c,n} to {a,n}+2*{b,n}, and
- returns the carry out (0, 1 or 2). */
- #define mpn_addlsh1_n __MPN(addlsh1_n)
- __GMP_DECLSPEC mp_limb_t mpn_addlsh1_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
- /* mpn_addlsh2_n(c,a,b,n), when it exists, sets {c,n} to {a,n}+4*{b,n}, and
- returns the carry out (0, ..., 4). */
- #define mpn_addlsh2_n __MPN(addlsh2_n)
- __GMP_DECLSPEC mp_limb_t mpn_addlsh2_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
- /* mpn_addlsh_n(c,a,b,n,k), when it exists, sets {c,n} to {a,n}+2^k*{b,n}, and
- returns the carry out (0, ..., 2^k). */
- #define mpn_addlsh_n __MPN(addlsh_n)
- __GMP_DECLSPEC mp_limb_t mpn_addlsh_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, unsigned int));
- /* mpn_sublsh1_n(c,a,b,n), when it exists, sets {c,n} to {a,n}-2*{b,n}, and
- returns the borrow out (0, 1 or 2). */
- #define mpn_sublsh1_n __MPN(sublsh1_n)
- __GMP_DECLSPEC mp_limb_t mpn_sublsh1_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
- /* mpn_rsblsh1_n(c,a,b,n), when it exists, sets {c,n} to 2*{b,n}-{a,n}, and
- returns the carry out (-1, 0, 1). */
- #define mpn_rsblsh1_n __MPN(rsblsh1_n)
- __GMP_DECLSPEC mp_limb_signed_t mpn_rsblsh1_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
- /* mpn_sublsh2_n(c,a,b,n), when it exists, sets {c,n} to {a,n}-4*{b,n}, and
- returns the borrow out (FIXME 0, 1, 2 or 3). */
- #define mpn_sublsh2_n __MPN(sublsh2_n)
- __GMP_DECLSPEC mp_limb_t mpn_sublsh2_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
- /* mpn_rsblsh2_n(c,a,b,n), when it exists, sets {c,n} to 4*{b,n}-{a,n}, and
- returns the carry out (-1, ..., 3). */
- #define mpn_rsblsh2_n __MPN(rsblsh2_n)
- __GMP_DECLSPEC mp_limb_signed_t mpn_rsblsh2_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
- /* mpn_rsblsh_n(c,a,b,n,k), when it exists, sets {c,n} to 2^k*{b,n}-{a,n}, and
- returns the carry out (-1, 0, ..., 2^k-1). */
- #define mpn_rsblsh_n __MPN(rsblsh_n)
- __GMP_DECLSPEC mp_limb_signed_t mpn_rsblsh_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, unsigned int));
- /* mpn_rsh1add_n(c,a,b,n), when it exists, sets {c,n} to ({a,n} + {b,n}) >> 1,
- and returns the bit rshifted out (0 or 1). */
- #define mpn_rsh1add_n __MPN(rsh1add_n)
- __GMP_DECLSPEC mp_limb_t mpn_rsh1add_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
- #define mpn_rsh1add_nc __MPN(rsh1add_nc)
- __GMP_DECLSPEC mp_limb_t mpn_rsh1add_nc __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t));
- /* mpn_rsh1sub_n(c,a,b,n), when it exists, sets {c,n} to ({a,n} - {b,n}) >> 1,
- and returns the bit rshifted out (0 or 1). If there's a borrow from the
- subtract, it's stored as a 1 in the high bit of c[n-1], like a twos
- complement negative. */
- #define mpn_rsh1sub_n __MPN(rsh1sub_n)
- __GMP_DECLSPEC mp_limb_t mpn_rsh1sub_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
- #define mpn_rsh1sub_nc __MPN(rsh1sub_nc)
- __GMP_DECLSPEC mp_limb_t mpn_rsh1sub_nc __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t));
- #define mpn_lshiftc __MPN(lshiftc)
- __GMP_DECLSPEC mp_limb_t mpn_lshiftc __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, unsigned int));
- #define mpn_add_n_sub_n __MPN(add_n_sub_n)
- __GMP_DECLSPEC mp_limb_t mpn_add_n_sub_n __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
- #define mpn_add_n_sub_nc __MPN(add_n_sub_nc)
- __GMP_DECLSPEC mp_limb_t mpn_add_n_sub_nc __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t));
- #define mpn_addaddmul_1msb0 __MPN(addaddmul_1msb0)
- __GMP_DECLSPEC mp_limb_t mpn_addaddmul_1msb0 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t));
- #define mpn_divrem_1c __MPN(divrem_1c)
- __GMP_DECLSPEC mp_limb_t mpn_divrem_1c __GMP_PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t));
- #define mpn_dump __MPN(dump)
- __GMP_DECLSPEC void mpn_dump __GMP_PROTO ((mp_srcptr, mp_size_t));
- #define mpn_fib2_ui __MPN(fib2_ui)
- __GMP_DECLSPEC mp_size_t mpn_fib2_ui __GMP_PROTO ((mp_ptr, mp_ptr, unsigned long));
- /* Remap names of internal mpn functions. */
- #define __clz_tab __MPN(clz_tab)
- #define mpn_udiv_w_sdiv __MPN(udiv_w_sdiv)
- #define mpn_jacobi_base __MPN(jacobi_base)
- __GMP_DECLSPEC int mpn_jacobi_base __GMP_PROTO ((mp_limb_t, mp_limb_t, int)) ATTRIBUTE_CONST;
- #define mpn_mod_1c __MPN(mod_1c)
- __GMP_DECLSPEC mp_limb_t mpn_mod_1c __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t)) __GMP_ATTRIBUTE_PURE;
- #define mpn_mul_1c __MPN(mul_1c)
- __GMP_DECLSPEC mp_limb_t mpn_mul_1c __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t));
- #define mpn_mul_2 __MPN(mul_2)
- __GMP_DECLSPEC mp_limb_t mpn_mul_2 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
- #define mpn_mul_3 __MPN(mul_3)
- __GMP_DECLSPEC mp_limb_t mpn_mul_3 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
- #define mpn_mul_4 __MPN(mul_4)
- __GMP_DECLSPEC mp_limb_t mpn_mul_4 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
- #ifndef mpn_mul_basecase /* if not done with cpuvec in a fat binary */
- #define mpn_mul_basecase __MPN(mul_basecase)
- __GMP_DECLSPEC void mpn_mul_basecase __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t));
- #endif
- #define mpn_mullo_n __MPN(mullo_n)
- __GMP_DECLSPEC void mpn_mullo_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
- #define mpn_mullo_basecase __MPN(mullo_basecase)
- __GMP_DECLSPEC void mpn_mullo_basecase __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
- #define mpn_sqr __MPN(sqr)
- __GMP_DECLSPEC void mpn_sqr __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t));
- #ifndef mpn_sqr_basecase /* if not done with cpuvec in a fat binary */
- #define mpn_sqr_basecase __MPN(sqr_basecase)
- __GMP_DECLSPEC void mpn_sqr_basecase __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t));
- #endif
- #define mpn_submul_1c __MPN(submul_1c)
- __GMP_DECLSPEC mp_limb_t mpn_submul_1c __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t));
- #define mpn_redc_1 __MPN(redc_1)
- __GMP_DECLSPEC void mpn_redc_1 __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t));
- #define mpn_redc_2 __MPN(redc_2)
- __GMP_DECLSPEC void mpn_redc_2 __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
- #define mpn_redc_n __MPN(redc_n)
- __GMP_DECLSPEC void mpn_redc_n __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
- #define mpn_mod_1_1p_cps __MPN(mod_1_1p_cps)
- __GMP_DECLSPEC void mpn_mod_1_1p_cps __GMP_PROTO ((mp_limb_t [4], mp_limb_t));
- #define mpn_mod_1_1p __MPN(mod_1_1p)
- __GMP_DECLSPEC mp_limb_t mpn_mod_1_1p __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t [4])) __GMP_ATTRIBUTE_PURE;
- #define mpn_mod_1s_2p_cps __MPN(mod_1s_2p_cps)
- __GMP_DECLSPEC void mpn_mod_1s_2p_cps __GMP_PROTO ((mp_limb_t [5], mp_limb_t));
- #define mpn_mod_1s_2p __MPN(mod_1s_2p)
- __GMP_DECLSPEC mp_limb_t mpn_mod_1s_2p __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t [5])) __GMP_ATTRIBUTE_PURE;
- #define mpn_mod_1s_3p_cps __MPN(mod_1s_3p_cps)
- __GMP_DECLSPEC void mpn_mod_1s_3p_cps __GMP_PROTO ((mp_limb_t [6], mp_limb_t));
- #define mpn_mod_1s_3p __MPN(mod_1s_3p)
- __GMP_DECLSPEC mp_limb_t mpn_mod_1s_3p __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t [6])) __GMP_ATTRIBUTE_PURE;
- #define mpn_mod_1s_4p_cps __MPN(mod_1s_4p_cps)
- __GMP_DECLSPEC void mpn_mod_1s_4p_cps __GMP_PROTO ((mp_limb_t [7], mp_limb_t));
- #define mpn_mod_1s_4p __MPN(mod_1s_4p)
- __GMP_DECLSPEC mp_limb_t mpn_mod_1s_4p __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t [7])) __GMP_ATTRIBUTE_PURE;
- #define mpn_bc_mulmod_bnm1 __MPN(bc_mulmod_bnm1)
- __GMP_DECLSPEC void mpn_bc_mulmod_bnm1 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_mulmod_bnm1 __MPN(mulmod_bnm1)
- __GMP_DECLSPEC void mpn_mulmod_bnm1 __GMP_PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_mulmod_bnm1_next_size __MPN(mulmod_bnm1_next_size)
- __GMP_DECLSPEC mp_size_t mpn_mulmod_bnm1_next_size __GMP_PROTO ((mp_size_t)) ATTRIBUTE_CONST;
- static inline mp_size_t
- mpn_mulmod_bnm1_itch (mp_size_t rn, mp_size_t an, mp_size_t bn) {
- mp_size_t n, itch;
- n = rn >> 1;
- itch = rn + 4 +
- (an > n ? (bn > n ? rn : n) : 0);
- return itch;
- }
- #define mpn_sqrmod_bnm1 __MPN(sqrmod_bnm1)
- __GMP_DECLSPEC void mpn_sqrmod_bnm1 __GMP_PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_sqrmod_bnm1_next_size __MPN(sqrmod_bnm1_next_size)
- __GMP_DECLSPEC mp_size_t mpn_sqrmod_bnm1_next_size __GMP_PROTO ((mp_size_t)) ATTRIBUTE_CONST;
- static inline mp_size_t
- mpn_sqrmod_bnm1_itch (mp_size_t rn, mp_size_t an) {
- mp_size_t n, itch;
- n = rn >> 1;
- itch = rn + 3 +
- (an > n ? an : 0);
- return itch;
- }
- typedef __gmp_randstate_struct *gmp_randstate_ptr;
- typedef const __gmp_randstate_struct *gmp_randstate_srcptr;
- /* Pseudo-random number generator function pointers structure. */
- typedef struct {
- void (*randseed_fn) __GMP_PROTO ((gmp_randstate_t, mpz_srcptr));
- void (*randget_fn) __GMP_PROTO ((gmp_randstate_t, mp_ptr, unsigned long int));
- void (*randclear_fn) __GMP_PROTO ((gmp_randstate_t));
- void (*randiset_fn) __GMP_PROTO ((gmp_randstate_ptr, gmp_randstate_srcptr));
- } gmp_randfnptr_t;
- /* Macro to obtain a void pointer to the function pointers structure. */
- #define RNG_FNPTR(rstate) ((rstate)->_mp_algdata._mp_lc)
- /* Macro to obtain a pointer to the generator's state.
- When used as a lvalue the rvalue needs to be cast to mp_ptr. */
- #define RNG_STATE(rstate) ((rstate)->_mp_seed->_mp_d)
- /* Write a given number of random bits to rp. */
- #define _gmp_rand(rp, state, bits)
- do {
- gmp_randstate_ptr __rstate = (state);
- (*((gmp_randfnptr_t *) RNG_FNPTR (__rstate))->randget_fn)
- (__rstate, rp, bits);
- } while (0)
- __GMP_DECLSPEC void __gmp_randinit_mt_noseed __GMP_PROTO ((gmp_randstate_t));
- /* __gmp_rands is the global state for the old-style random functions, and
- is also used in the test programs (hence the __GMP_DECLSPEC).
- There's no seeding here, so mpz_random etc will generate the same
- sequence every time. This is not unlike the C library random functions
- if you don't seed them, so perhaps it's acceptable. Digging up a seed
- from /dev/random or the like would work on many systems, but might
- encourage a false confidence, since it'd be pretty much impossible to do
- something that would work reliably everywhere. In any case the new style
- functions are recommended to applications which care about randomness, so
- the old functions aren't too important. */
- __GMP_DECLSPEC extern char __gmp_rands_initialized;
- __GMP_DECLSPEC extern gmp_randstate_t __gmp_rands;
- #define RANDS
- ((__gmp_rands_initialized ? 0
- : (__gmp_rands_initialized = 1,
- __gmp_randinit_mt_noseed (__gmp_rands), 0)),
- __gmp_rands)
- /* this is used by the test programs, to free memory */
- #define RANDS_CLEAR()
- do {
- if (__gmp_rands_initialized)
- {
- __gmp_rands_initialized = 0;
- gmp_randclear (__gmp_rands);
- }
- } while (0)
- /* For a threshold between algorithms A and B, size>=thresh is where B
- should be used. Special value MP_SIZE_T_MAX means only ever use A, or
- value 0 means only ever use B. The tests for these special values will
- be compile-time constants, so the compiler should be able to eliminate
- the code for the unwanted algorithm. */
- #define ABOVE_THRESHOLD(size,thresh)
- ((thresh) == 0
- || ((thresh) != MP_SIZE_T_MAX
- && (size) >= (thresh)))
- #define BELOW_THRESHOLD(size,thresh) (! ABOVE_THRESHOLD (size, thresh))
- #define MPN_TOOM22_MUL_MINSIZE 4
- #define MPN_TOOM2_SQR_MINSIZE 4
- #define MPN_TOOM33_MUL_MINSIZE 17
- #define MPN_TOOM3_SQR_MINSIZE 17
- #define MPN_TOOM44_MUL_MINSIZE 30
- #define MPN_TOOM4_SQR_MINSIZE 30
- #define MPN_TOOM6H_MUL_MINSIZE 46
- #define MPN_TOOM6_SQR_MINSIZE 46
- #define MPN_TOOM8H_MUL_MINSIZE 86
- #define MPN_TOOM8_SQR_MINSIZE 86
- #define MPN_TOOM32_MUL_MINSIZE 10
- #define MPN_TOOM42_MUL_MINSIZE 10
- #define MPN_TOOM43_MUL_MINSIZE 49 /* ??? */
- #define MPN_TOOM53_MUL_MINSIZE 49 /* ??? */
- #define MPN_TOOM63_MUL_MINSIZE 49
- #define mpn_sqr_diagonal __MPN(sqr_diagonal)
- __GMP_DECLSPEC void mpn_sqr_diagonal __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t));
- #define mpn_toom_interpolate_5pts __MPN(toom_interpolate_5pts)
- __GMP_DECLSPEC void mpn_toom_interpolate_5pts __GMP_PROTO ((mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_size_t, int, mp_limb_t));
- enum toom6_flags {toom6_all_pos = 0, toom6_vm1_neg = 1, toom6_vm2_neg = 2};
- #define mpn_toom_interpolate_6pts __MPN(toom_interpolate_6pts)
- __GMP_DECLSPEC void mpn_toom_interpolate_6pts __GMP_PROTO ((mp_ptr, mp_size_t, enum toom6_flags, mp_ptr, mp_ptr, mp_ptr, mp_size_t));
- enum toom7_flags { toom7_w1_neg = 1, toom7_w3_neg = 2 };
- #define mpn_toom_interpolate_7pts __MPN(toom_interpolate_7pts)
- __GMP_DECLSPEC void mpn_toom_interpolate_7pts __GMP_PROTO ((mp_ptr, mp_size_t, enum toom7_flags, mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_ptr));
- #define mpn_toom_interpolate_8pts __MPN(toom_interpolate_8pts)
- __GMP_DECLSPEC void mpn_toom_interpolate_8pts __GMP_PROTO ((mp_ptr, mp_size_t, mp_ptr, mp_ptr, mp_size_t, mp_ptr));
- #define mpn_toom_interpolate_12pts __MPN(toom_interpolate_12pts)
- __GMP_DECLSPEC void mpn_toom_interpolate_12pts __GMP_PROTO ((mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_size_t, int, mp_ptr));
- #define mpn_toom_interpolate_16pts __MPN(toom_interpolate_16pts)
- __GMP_DECLSPEC void mpn_toom_interpolate_16pts __GMP_PROTO ((mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_size_t, int, mp_ptr));
- #define mpn_toom_couple_handling __MPN(toom_couple_handling)
- __GMP_DECLSPEC void mpn_toom_couple_handling __GMP_PROTO ((mp_ptr, mp_size_t, mp_ptr, int, mp_size_t, int, int));
- #define mpn_toom_eval_dgr3_pm1 __MPN(toom_eval_dgr3_pm1)
- __GMP_DECLSPEC int mpn_toom_eval_dgr3_pm1 __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_size_t, mp_ptr));
- #define mpn_toom_eval_dgr3_pm2 __MPN(toom_eval_dgr3_pm2)
- __GMP_DECLSPEC int mpn_toom_eval_dgr3_pm2 __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_size_t, mp_ptr));
- #define mpn_toom_eval_pm1 __MPN(toom_eval_pm1)
- __GMP_DECLSPEC int mpn_toom_eval_pm1 __GMP_PROTO ((mp_ptr, mp_ptr, unsigned, mp_srcptr, mp_size_t, mp_size_t, mp_ptr));
- #define mpn_toom_eval_pm2 __MPN(toom_eval_pm2)
- __GMP_DECLSPEC int mpn_toom_eval_pm2 __GMP_PROTO ((mp_ptr, mp_ptr, unsigned, mp_srcptr, mp_size_t, mp_size_t, mp_ptr));
- #define mpn_toom_eval_pm2exp __MPN(toom_eval_pm2exp)
- __GMP_DECLSPEC int mpn_toom_eval_pm2exp __GMP_PROTO ((mp_ptr, mp_ptr, unsigned, mp_srcptr, mp_size_t, mp_size_t, unsigned, mp_ptr));
- #define mpn_toom_eval_pm2rexp __MPN(toom_eval_pm2rexp)
- __GMP_DECLSPEC int mpn_toom_eval_pm2rexp __GMP_PROTO ((mp_ptr, mp_ptr, unsigned, mp_srcptr, mp_size_t, mp_size_t, unsigned, mp_ptr));
- #define mpn_toom22_mul __MPN(toom22_mul)
- __GMP_DECLSPEC void mpn_toom22_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_toom32_mul __MPN(toom32_mul)
- __GMP_DECLSPEC void mpn_toom32_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_toom42_mul __MPN(toom42_mul)
- __GMP_DECLSPEC void mpn_toom42_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_toom52_mul __MPN(toom52_mul)
- __GMP_DECLSPEC void mpn_toom52_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_toom62_mul __MPN(toom62_mul)
- __GMP_DECLSPEC void mpn_toom62_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_toom2_sqr __MPN(toom2_sqr)
- __GMP_DECLSPEC void mpn_toom2_sqr __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_toom33_mul __MPN(toom33_mul)
- __GMP_DECLSPEC void mpn_toom33_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_toom43_mul __MPN(toom43_mul)
- __GMP_DECLSPEC void mpn_toom43_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_toom53_mul __MPN(toom53_mul)
- __GMP_DECLSPEC void mpn_toom53_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_toom63_mul __MPN(toom63_mul)
- __GMP_DECLSPEC void mpn_toom63_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_toom3_sqr __MPN(toom3_sqr)
- __GMP_DECLSPEC void mpn_toom3_sqr __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_toom44_mul __MPN(toom44_mul)
- __GMP_DECLSPEC void mpn_toom44_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_toom4_sqr __MPN(toom4_sqr)
- __GMP_DECLSPEC void mpn_toom4_sqr __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_toom6h_mul __MPN(toom6h_mul)
- __GMP_DECLSPEC void mpn_toom6h_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_toom6_sqr __MPN(toom6_sqr)
- __GMP_DECLSPEC void mpn_toom6_sqr __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_toom8h_mul __MPN(toom8h_mul)
- __GMP_DECLSPEC void mpn_toom8h_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_toom8_sqr __MPN(toom8_sqr)
- __GMP_DECLSPEC void mpn_toom8_sqr __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_fft_best_k __MPN(fft_best_k)
- __GMP_DECLSPEC int mpn_fft_best_k __GMP_PROTO ((mp_size_t, int)) ATTRIBUTE_CONST;
- #define mpn_mul_fft __MPN(mul_fft)
- __GMP_DECLSPEC mp_limb_t mpn_mul_fft __GMP_PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, int));
- #define mpn_mul_fft_full __MPN(mul_fft_full)
- __GMP_DECLSPEC void mpn_mul_fft_full __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t));
- #define mpn_nussbaumer_mul __MPN(nussbaumer_mul)
- __GMP_DECLSPEC void mpn_nussbaumer_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t));
- #define mpn_fft_next_size __MPN(fft_next_size)
- __GMP_DECLSPEC mp_size_t mpn_fft_next_size __GMP_PROTO ((mp_size_t, int)) ATTRIBUTE_CONST;
- #define mpn_sbpi1_div_qr __MPN(sbpi1_div_qr)
- __GMP_DECLSPEC mp_limb_t mpn_sbpi1_div_qr __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t));
- #define mpn_sbpi1_div_q __MPN(sbpi1_div_q)
- __GMP_DECLSPEC mp_limb_t mpn_sbpi1_div_q __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t));
- #define mpn_sbpi1_divappr_q __MPN(sbpi1_divappr_q)
- __GMP_DECLSPEC mp_limb_t mpn_sbpi1_divappr_q __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t));
- #define mpn_dcpi1_div_qr __MPN(dcpi1_div_qr)
- __GMP_DECLSPEC mp_limb_t mpn_dcpi1_div_qr __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, gmp_pi1_t *));
- #define mpn_dcpi1_div_qr_n __MPN(dcpi1_div_qr_n)
- __GMP_DECLSPEC mp_limb_t mpn_dcpi1_div_qr_n __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, gmp_pi1_t *, mp_ptr));
- #define mpn_dcpi1_div_q __MPN(dcpi1_div_q)
- __GMP_DECLSPEC mp_limb_t mpn_dcpi1_div_q __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, gmp_pi1_t *));
- #define mpn_dcpi1_divappr_q __MPN(dcpi1_divappr_q)
- __GMP_DECLSPEC mp_limb_t mpn_dcpi1_divappr_q __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, gmp_pi1_t *));
- #define mpn_dcpi1_divappr_q_n __MPN(dcpi1_divappr_q_n)
- __GMP_DECLSPEC mp_limb_t mpn_dcpi1_divappr_q_n __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, gmp_pi1_t *, mp_ptr));
- #define mpn_mu_div_qr __MPN(mu_div_qr)
- __GMP_DECLSPEC mp_limb_t mpn_mu_div_qr __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_mu_div_qr_itch __MPN(mu_div_qr_itch)
- __GMP_DECLSPEC mp_size_t mpn_mu_div_qr_itch __GMP_PROTO ((mp_size_t, mp_size_t, int));
- #define mpn_mu_div_qr_choose_in __MPN(mu_div_qr_choose_in)
- __GMP_DECLSPEC mp_size_t mpn_mu_div_qr_choose_in __GMP_PROTO ((mp_size_t, mp_size_t, int));
- #define mpn_preinv_mu_div_qr __MPN(preinv_mu_div_qr)
- __GMP_DECLSPEC mp_limb_t mpn_preinv_mu_div_qr __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_mu_divappr_q __MPN(mu_divappr_q)
- __GMP_DECLSPEC mp_limb_t mpn_mu_divappr_q __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_mu_divappr_q_itch __MPN(mu_divappr_q_itch)
- __GMP_DECLSPEC mp_size_t mpn_mu_divappr_q_itch __GMP_PROTO ((mp_size_t, mp_size_t, int));
- #define mpn_mu_divappr_q_choose_in __MPN(mu_divappr_q_choose_in)
- __GMP_DECLSPEC mp_size_t mpn_mu_divappr_q_choose_in __GMP_PROTO ((mp_size_t, mp_size_t, int));
- #define mpn_preinv_mu_divappr_q __MPN(preinv_mu_divappr_q)
- __GMP_DECLSPEC mp_limb_t mpn_preinv_mu_divappr_q __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_mu_div_q __MPN(mu_div_q)
- __GMP_DECLSPEC mp_limb_t mpn_mu_div_q __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_mu_div_q_itch __MPN(mu_div_q_itch)
- __GMP_DECLSPEC mp_size_t mpn_mu_div_q_itch __GMP_PROTO ((mp_size_t, mp_size_t, int));
- #define mpn_div_q __MPN(div_q)
- __GMP_DECLSPEC void mpn_div_q __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_invert __MPN(invert)
- __GMP_DECLSPEC void mpn_invert __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_invert_itch(n) mpn_invertappr_itch(n)
- #define mpn_ni_invertappr __MPN(ni_invertappr)
- __GMP_DECLSPEC mp_limb_t mpn_ni_invertappr __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_invertappr __MPN(invertappr)
- __GMP_DECLSPEC mp_limb_t mpn_invertappr __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_invertappr_itch(n) (3 * (n) + 2)
- #define mpn_binvert __MPN(binvert)
- __GMP_DECLSPEC void mpn_binvert __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_binvert_itch __MPN(binvert_itch)
- __GMP_DECLSPEC mp_size_t mpn_binvert_itch __GMP_PROTO ((mp_size_t));
- #define mpn_bdiv_q_1 __MPN(bdiv_q_1)
- __GMP_DECLSPEC mp_limb_t mpn_bdiv_q_1 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t));
- #define mpn_pi1_bdiv_q_1 __MPN(pi1_bdiv_q_1)
- __GMP_DECLSPEC mp_limb_t mpn_pi1_bdiv_q_1 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t, int));
- #define mpn_sbpi1_bdiv_qr __MPN(sbpi1_bdiv_qr)
- __GMP_DECLSPEC mp_limb_t mpn_sbpi1_bdiv_qr __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t));
- #define mpn_sbpi1_bdiv_q __MPN(sbpi1_bdiv_q)
- __GMP_DECLSPEC void mpn_sbpi1_bdiv_q __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t));
- #define mpn_dcpi1_bdiv_qr __MPN(dcpi1_bdiv_qr)
- __GMP_DECLSPEC mp_limb_t mpn_dcpi1_bdiv_qr __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t));
- #define mpn_dcpi1_bdiv_qr_n_itch __MPN(dcpi1_bdiv_qr_n_itch)
- __GMP_DECLSPEC mp_size_t mpn_dcpi1_bdiv_qr_n_itch __GMP_PROTO ((mp_size_t));
- #define mpn_dcpi1_bdiv_qr_n __MPN(dcpi1_bdiv_qr_n)
- __GMP_DECLSPEC mp_limb_t mpn_dcpi1_bdiv_qr_n __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_ptr));
- #define mpn_dcpi1_bdiv_q __MPN(dcpi1_bdiv_q)
- __GMP_DECLSPEC void mpn_dcpi1_bdiv_q __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t));
- #define mpn_dcpi1_bdiv_q_n_itch __MPN(dcpi1_bdiv_q_n_itch)
- __GMP_DECLSPEC mp_size_t mpn_dcpi1_bdiv_q_n_itch __GMP_PROTO ((mp_size_t));
- #define mpn_dcpi1_bdiv_q_n __MPN(dcpi1_bdiv_q_n)
- __GMP_DECLSPEC void mpn_dcpi1_bdiv_q_n __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_ptr));
- #define mpn_mu_bdiv_qr __MPN(mu_bdiv_qr)
- __GMP_DECLSPEC mp_limb_t mpn_mu_bdiv_qr __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_mu_bdiv_qr_itch __MPN(mu_bdiv_qr_itch)
- __GMP_DECLSPEC mp_size_t mpn_mu_bdiv_qr_itch __GMP_PROTO ((mp_size_t, mp_size_t));
- #define mpn_mu_bdiv_q __MPN(mu_bdiv_q)
- __GMP_DECLSPEC void mpn_mu_bdiv_q __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_mu_bdiv_q_itch __MPN(mu_bdiv_q_itch)
- __GMP_DECLSPEC mp_size_t mpn_mu_bdiv_q_itch __GMP_PROTO ((mp_size_t, mp_size_t));
- #define mpn_bdiv_qr __MPN(bdiv_qr)
- __GMP_DECLSPEC mp_limb_t mpn_bdiv_qr __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_bdiv_qr_itch __MPN(bdiv_qr_itch)
- __GMP_DECLSPEC mp_size_t mpn_bdiv_qr_itch __GMP_PROTO ((mp_size_t, mp_size_t));
- #define mpn_bdiv_q __MPN(bdiv_q)
- __GMP_DECLSPEC void mpn_bdiv_q __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_bdiv_q_itch __MPN(bdiv_q_itch)
- __GMP_DECLSPEC mp_size_t mpn_bdiv_q_itch __GMP_PROTO ((mp_size_t, mp_size_t));
- #define mpn_divexact __MPN(divexact)
- __GMP_DECLSPEC void mpn_divexact __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t));
- #define mpn_divexact_itch __MPN(divexact_itch)
- __GMP_DECLSPEC mp_size_t mpn_divexact_itch __GMP_PROTO ((mp_size_t, mp_size_t));
- #define mpn_bdiv_dbm1c __MPN(bdiv_dbm1c)
- __GMP_DECLSPEC mp_limb_t mpn_bdiv_dbm1c __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t));
- #define mpn_bdiv_dbm1(dst, src, size, divisor)
- mpn_bdiv_dbm1c (dst, src, size, divisor, __GMP_CAST (mp_limb_t, 0))
- #define mpn_powm __MPN(powm)
- __GMP_DECLSPEC void mpn_powm __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_powlo __MPN(powlo)
- __GMP_DECLSPEC void mpn_powlo __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_size_t, mp_ptr));
- #define mpn_powm_sec __MPN(powm_sec)
- __GMP_DECLSPEC void mpn_powm_sec __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
- #define mpn_powm_sec_itch __MPN(powm_sec_itch)
- __GMP_DECLSPEC mp_size_t mpn_powm_sec_itch __GMP_PROTO ((mp_size_t, mp_size_t, mp_size_t));
- #define mpn_subcnd_n __MPN(subcnd_n)
- __GMP_DECLSPEC mp_limb_t mpn_subcnd_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t));
- #define mpn_tabselect __MPN(tabselect)
- __GMP_DECLSPEC void mpn_tabselect __GMP_PROTO ((volatile mp_limb_t *, volatile mp_limb_t *, mp_size_t, mp_size_t, mp_size_t));
- #define mpn_redc_1_sec __MPN(redc_1_sec)
- __GMP_DECLSPEC void mpn_redc_1_sec __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t));
- #ifndef DIVEXACT_BY3_METHOD
- #if GMP_NUMB_BITS % 2 == 0 && ! defined (HAVE_NATIVE_mpn_divexact_by3c)
- #define DIVEXACT_BY3_METHOD 0 /* default to using mpn_bdiv_dbm1c */
- #else
- #define DIVEXACT_BY3_METHOD 1
- #endif
- #endif
- #if DIVEXACT_BY3_METHOD == 0
- #undef mpn_divexact_by3
- #define mpn_divexact_by3(dst,src,size)
- (3 & mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 3)))
- /* override mpn_divexact_by3c defined in gmp.h */
- /*
- #undef mpn_divexact_by3c
- #define mpn_divexact_by3c(dst,src,size,cy)
- (3 & mpn_bdiv_dbm1c (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 3, GMP_NUMB_MASK / 3 * cy)))
- */
- #endif
- #if GMP_NUMB_BITS % 4 == 0
- #define mpn_divexact_by5(dst,src,size)
- (7 & 3 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 5)))
- #endif
- #if GMP_NUMB_BITS % 6 == 0
- #define mpn_divexact_by7(dst,src,size)
- (7 & 1 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 7)))
- #endif
- #if GMP_NUMB_BITS % 6 == 0
- #define mpn_divexact_by9(dst,src,size)
- (15 & 7 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 9)))
- #endif
- #if GMP_NUMB_BITS % 10 == 0
- #define mpn_divexact_by11(dst,src,size)
- (15 & 5 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 11)))
- #endif
- #if GMP_NUMB_BITS % 12 == 0
- #define mpn_divexact_by13(dst,src,size)
- (15 & 3 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 13)))
- #endif
- #if GMP_NUMB_BITS % 4 == 0
- #define mpn_divexact_by15(dst,src,size)
- (15 & 1 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 15)))
- #endif
- #define mpz_divexact_gcd __gmpz_divexact_gcd
- __GMP_DECLSPEC void mpz_divexact_gcd __GMP_PROTO ((mpz_ptr, mpz_srcptr, mpz_srcptr));
- #define mpz_inp_str_nowhite __gmpz_inp_str_nowhite
- #ifdef _GMP_H_HAVE_FILE
- __GMP_DECLSPEC size_t mpz_inp_str_nowhite __GMP_PROTO ((mpz_ptr, FILE *, int, int, size_t));
- #endif
- #define mpn_divisible_p __MPN(divisible_p)
- __GMP_DECLSPEC int mpn_divisible_p __GMP_PROTO ((mp_srcptr, mp_size_t, mp_srcptr, mp_size_t)) __GMP_ATTRIBUTE_PURE;
- #define mpn_rootrem __MPN(rootrem)
- __GMP_DECLSPEC mp_size_t mpn_rootrem __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t));
- #if defined (_CRAY)
- #define MPN_COPY_INCR(dst, src, n)
- do {
- int __i; /* Faster on some Crays with plain int */
- _Pragma ("_CRI ivdep");
- for (__i = 0; __i < (n); __i++)
- (dst)[__i] = (src)[__i];
- } while (0)
- #endif
- /* used by test programs, hence __GMP_DECLSPEC */
- #ifndef mpn_copyi /* if not done with cpuvec in a fat binary */
- #define mpn_copyi __MPN(copyi)
- __GMP_DECLSPEC void mpn_copyi __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t));
- #endif
- #if ! defined (MPN_COPY_INCR) && HAVE_NATIVE_mpn_copyi
- #define MPN_COPY_INCR(dst, src, size)
- do {
- ASSERT ((size) >= 0);
- ASSERT (MPN_SAME_OR_INCR_P (dst, src, size));
- mpn_copyi (dst, src, size);
- } while (0)
- #endif
- /* Copy N limbs from SRC to DST incrementing, N==0 allowed. */
- #if ! defined (MPN_COPY_INCR)
- #define MPN_COPY_INCR(dst, src, n)
- do {
- ASSERT ((n) >= 0);
- ASSERT (MPN_SAME_OR_INCR_P (dst, src, n));
- if ((n) != 0)
- {
- mp_size_t __n = (n) - 1;
- mp_ptr __dst = (dst);
- mp_srcptr __src = (src);
- mp_limb_t __x;
- __x = *__src++;
- if (__n != 0)
- {
- do
- {
- *__dst++ = __x;
- __x = *__src++;
- }
- while (--__n);
- }
- *__dst++ = __x;
- }
- } while (0)
- #endif
- #if defined (_CRAY)
- #define MPN_COPY_DECR(dst, src, n)
- do {
- int __i; /* Faster on some Crays with plain int */
- _Pragma ("_CRI ivdep");
- for (__i = (n) - 1; __i >= 0; __i--)
- (dst)[__i] = (src)[__i];
- } while (0)
- #endif
- /* used by test programs, hence __GMP_DECLSPEC */
- #ifndef mpn_copyd /* if not done with cpuvec in a fat binary */
- #define mpn_copyd __MPN(copyd)
- __GMP_DECLSPEC void mpn_copyd __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t));
- #endif
- #if ! defined (MPN_COPY_DECR) && HAVE_NATIVE_mpn_copyd
- #define MPN_COPY_DECR(dst, src, size)
- do {
- ASSERT ((size) >= 0);
- ASSERT (MPN_SAME_OR_DECR_P (dst, src, size));
- mpn_copyd (dst, src, size);
- } while (0)
- #endif
- /* Copy N limbs from SRC to DST decrementing, N==0 allowed. */
- #if ! defined (MPN_COPY_DECR)
- #define MPN_COPY_DECR(dst, src, n)
- do {
- ASSERT ((n) >= 0);
- ASSERT (MPN_SAME_OR_DECR_P (dst, src, n));
- if ((n) != 0)
- {
- mp_size_t __n = (n) - 1;
- mp_ptr __dst = (dst) + __n;
- mp_srcptr __src = (src) + __n;
- mp_limb_t __x;
- __x = *__src--;
- if (__n != 0)
- {
- do
- {
- *__dst-- = __x;
- __x = *__src--;
- }
- while (--__n);
- }
- *__dst-- = __x;
- }
- } while (0)
- #endif
- #ifndef MPN_COPY
- #define MPN_COPY(d,s,n)
- do {
- ASSERT (MPN_SAME_OR_SEPARATE_P (d, s, n));
- MPN_COPY_INCR (d, s, n);
- } while (0)
- #endif
- /* Set {dst,size} to the limbs of {src,size} in reverse order. */
- #define MPN_REVERSE(dst, src, size)
- do {
- mp_ptr __dst = (dst);
- mp_size_t __size = (size);
- mp_srcptr __src = (src) + __size - 1;
- mp_size_t __i;
- ASSERT ((size) >= 0);
- ASSERT (! MPN_OVERLAP_P (dst, size, src, size));
- CRAY_Pragma ("_CRI ivdep");
- for (__i = 0; __i < __size; __i++)
- {
- *__dst = *__src;
- __dst++;
- __src--;
- }
- } while (0)
- /* Zero n limbs at dst.
- For power and powerpc we want an inline stu/bdnz loop for zeroing. On
- ppc630 for instance this is optimal since it can sustain only 1 store per
- cycle.
- gcc 2.95.x (for powerpc64 -maix64, or powerpc32) doesn't recognise the
- "for" loop in the generic code below can become stu/bdnz. The do/while
- here helps it get to that. The same caveat about plain -mpowerpc64 mode
- applies here as to __GMPN_COPY_INCR in gmp.h.
- xlc 3.1 already generates stu/bdnz from the generic C, and does so from
- this loop too.
- Enhancement: GLIBC does some trickery with dcbz to zero whole cache lines
- at a time. MPN_ZERO isn't all that important in GMP, so it might be more
- trouble than it's worth to do the same, though perhaps a call to memset
- would be good when on a GNU system. */
- #if HAVE_HOST_CPU_FAMILY_power || HAVE_HOST_CPU_FAMILY_powerpc
- #define MPN_ZERO(dst, n)
- do {
- ASSERT ((n) >= 0);
- if ((n) != 0)
- {
- mp_ptr __dst = (dst) - 1;
- mp_size_t __n = (n);
- do
- *++__dst = 0;
- while (--__n);
- }
- } while (0)
- #endif
- #ifndef MPN_ZERO
- #define MPN_ZERO(dst, n)
- do {
- ASSERT ((n) >= 0);
- if ((n) != 0)
- {
- mp_ptr __dst = (dst);
- mp_size_t __n = (n);
- do
- *__dst++ = 0;
- while (--__n);
- }
- } while (0)
- #endif
- /* On the x86s repe/scasl doesn't seem useful, since it takes many cycles to
- start up and would need to strip a lot of zeros before it'd be faster
- than a simple cmpl loop. Here are some times in cycles for
- std/repe/scasl/cld and cld/repe/scasl (the latter would be for stripping
- low zeros).
- std cld
- P5 18 16
- P6 46 38
- K6 36 13
- K7 21 20
- */
- #ifndef MPN_NORMALIZE
- #define MPN_NORMALIZE(DST, NLIMBS)
- do {
- while ((NLIMBS) > 0)
- {
- if ((DST)[(NLIMBS) - 1] != 0)
- break;
- (NLIMBS)--;
- }
- } while (0)
- #endif
- #ifndef MPN_NORMALIZE_NOT_ZERO
- #define MPN_NORMALIZE_NOT_ZERO(DST, NLIMBS)
- do {
- ASSERT ((NLIMBS) >= 1);
- while (1)
- {
- if ((DST)[(NLIMBS) - 1] != 0)
- break;
- (NLIMBS)--;
- }
- } while (0)
- #endif
- /* Strip least significant zero limbs from {ptr,size} by incrementing ptr
- and decrementing size. low should be ptr[0], and will be the new ptr[0]
- on returning. The number in {ptr,size} must be non-zero, ie. size!=0 and
- somewhere a non-zero limb. */
- #define MPN_STRIP_LOW_ZEROS_NOT_ZERO(ptr, size, low)
- do {
- ASSERT ((size) >= 1);
- ASSERT ((low) == (ptr)[0]);
-
- while ((low) == 0)
- {
- (size)--;
- ASSERT ((size) >= 1);
- (ptr)++;
- (low) = *(ptr);
- }
- } while (0)
- /* Initialize X of type mpz_t with space for NLIMBS limbs. X should be a
- temporary variable; it will be automatically cleared out at function
- return. We use __x here to make it possible to accept both mpz_ptr and
- mpz_t arguments. */
- #define MPZ_TMP_INIT(X, NLIMBS)
- do {
- mpz_ptr __x = (X);
- ASSERT ((NLIMBS) >= 1);
- __x->_mp_alloc = (NLIMBS);
- __x->_mp_d = TMP_ALLOC_LIMBS (NLIMBS);
- } while (0)
- /* Realloc for an mpz_t WHAT if it has less than NEEDED limbs. */
- #define MPZ_REALLOC(z,n) (UNLIKELY ((n) > ALLOC(z))
- ? (mp_ptr) _mpz_realloc(z,n)
- : PTR(z))
- #define MPZ_EQUAL_1_P(z) (SIZ(z)==1 && PTR(z)[0] == 1)
- /* MPN_FIB2_SIZE(n) is the size in limbs required by mpn_fib2_ui for fp and
- f1p.
- From Knuth vol 1 section 1.2.8, F[n] = phi^n/sqrt(5) rounded to the
- nearest integer, where phi=(1+sqrt(5))/2 is the golden ratio. So the
- number of bits required is n*log_2((1+sqrt(5))/2) = n*0.6942419.
- The multiplier used is 23/32=0.71875 for efficient calculation on CPUs
- without good floating point. There's +2 for rounding up, and a further
- +2 since at the last step x limbs are doubled into a 2x+1 limb region
- whereas the actual F[2k] value might be only 2x-1 limbs.
- Note that a division is done first, since on a 32-bit system it's at
- least conceivable to go right up to n==ULONG_MAX. (F[2^32-1] would be
- about 380Mbytes, plus temporary workspace of about 1.2Gbytes here and
- whatever a multiply of two 190Mbyte numbers takes.)
- Enhancement: When GMP_NUMB_BITS is not a power of 2 the division could be
- worked into the multiplier. */
- #define MPN_FIB2_SIZE(n)
- ((mp_size_t) ((n) / 32 * 23 / GMP_NUMB_BITS) + 4)
- /* FIB_TABLE(n) returns the Fibonacci number F[n]. Must have n in the range
- -1 <= n <= FIB_TABLE_LIMIT (that constant in fib_table.h).
- FIB_TABLE_LUCNUM_LIMIT (in fib_table.h) is the largest n for which L[n] =
- F[n] + 2*F[n-1] fits in a limb. */
- __GMP_DECLSPEC extern const mp_limb_t __gmp_fib_table[];
- #define FIB_TABLE(n) (__gmp_fib_table[(n)+1])
- #define SIEVESIZE 512 /* FIXME: Allow gmp_init_primesieve to choose */
- typedef struct
- {
- unsigned long d; /* current index in s[] */
- unsigned long s0; /* number corresponding to s[0] */
- unsigned long sqrt_s0; /* misnomer for sqrt(s[SIEVESIZE-1]) */
- unsigned char s[SIEVESIZE + 1]; /* sieve table */
- } gmp_primesieve_t;
- #define gmp_init_primesieve __gmp_init_primesieve
- __GMP_DECLSPEC void gmp_init_primesieve (gmp_primesieve_t *);
- #define gmp_nextprime __gmp_nextprime
- __GMP_DECLSPEC unsigned long int gmp_nextprime (gmp_primesieve_t *);
- #ifndef MUL_TOOM22_THRESHOLD
- #define MUL_TOOM22_THRESHOLD 30
- #endif
- #ifndef MUL_TOOM33_THRESHOLD
- #define MUL_TOOM33_THRESHOLD 100
- #endif
- #ifndef MUL_TOOM44_THRESHOLD
- #define MUL_TOOM44_THRESHOLD 300
- #endif
- #ifndef MUL_TOOM6H_THRESHOLD
- #define MUL_TOOM6H_THRESHOLD 350
- #endif
- #ifndef SQR_TOOM6_THRESHOLD
- #define SQR_TOOM6_THRESHOLD MUL_TOOM6H_THRESHOLD
- #endif
- #ifndef MUL_TOOM8H_THRESHOLD
- #define MUL_TOOM8H_THRESHOLD 450
- #endif
- #ifndef SQR_TOOM8_THRESHOLD
- #define SQR_TOOM8_THRESHOLD MUL_TOOM8H_THRESHOLD
- #endif
- #ifndef MUL_TOOM32_TO_TOOM43_THRESHOLD
- #define MUL_TOOM32_TO_TOOM43_THRESHOLD 100
- #endif
- #ifndef MUL_TOOM32_TO_TOOM53_THRESHOLD
- #define MUL_TOOM32_TO_TOOM53_THRESHOLD 110
- #endif
- #ifndef MUL_TOOM42_TO_TOOM53_THRESHOLD
- #define MUL_TOOM42_TO_TOOM53_THRESHOLD 100
- #endif
- #ifndef MUL_TOOM42_TO_TOOM63_THRESHOLD
- #define MUL_TOOM42_TO_TOOM63_THRESHOLD 110
- #endif
- /* MUL_TOOM22_THRESHOLD_LIMIT is the maximum for MUL_TOOM22_THRESHOLD. In a
- normal build MUL_TOOM22_THRESHOLD is a constant and we use that. In a fat
- binary or tune program build MUL_TOOM22_THRESHOLD is a variable and a
- separate hard limit will have been defined. Similarly for TOOM3. */
- #ifndef MUL_TOOM22_THRESHOLD_LIMIT
- #define MUL_TOOM22_THRESHOLD_LIMIT MUL_TOOM22_THRESHOLD
- #endif
- #ifndef MUL_TOOM33_THRESHOLD_LIMIT
- #define MUL_TOOM33_THRESHOLD_LIMIT MUL_TOOM33_THRESHOLD
- #endif
- #ifndef MULLO_BASECASE_THRESHOLD_LIMIT
- #define MULLO_BASECASE_THRESHOLD_LIMIT MULLO_BASECASE_THRESHOLD
- #endif
- /* SQR_BASECASE_THRESHOLD is where mpn_sqr_basecase should take over from
- mpn_mul_basecase. Default is to use mpn_sqr_basecase from 0. (Note that we
- certainly always want it if there's a native assembler mpn_sqr_basecase.)
- If it turns out that mpn_toom2_sqr becomes faster than mpn_mul_basecase
- before mpn_sqr_basecase does, then SQR_BASECASE_THRESHOLD is the toom2
- threshold and SQR_TOOM2_THRESHOLD is 0. This oddity arises more or less
- because SQR_TOOM2_THRESHOLD represents the size up to which mpn_sqr_basecase
- should be used, and that may be never. */
- #ifndef SQR_BASECASE_THRESHOLD
- #define SQR_BASECASE_THRESHOLD 0
- #endif
- #ifndef SQR_TOOM2_THRESHOLD
- #define SQR_TOOM2_THRESHOLD 50
- #endif
- #ifndef SQR_TOOM3_THRESHOLD
- #define SQR_TOOM3_THRESHOLD 120
- #endif
- #ifndef SQR_TOOM4_THRESHOLD
- #define SQR_TOOM4_THRESHOLD 400
- #endif
- /* See comments above about MUL_TOOM33_THRESHOLD_LIMIT. */
- #ifndef SQR_TOOM3_THRESHOLD_LIMIT
- #define SQR_TOOM3_THRESHOLD_LIMIT SQR_TOOM3_THRESHOLD
- #endif
- #ifndef DC_DIV_QR_THRESHOLD
- #define DC_DIV_QR_THRESHOLD 50
- #endif
- #ifndef DC_DIVAPPR_Q_THRESHOLD
- #define DC_DIVAPPR_Q_THRESHOLD 200
- #endif
- #ifndef DC_BDIV_QR_THRESHOLD
- #define DC_BDIV_QR_THRESHOLD 50
- #endif
- #ifndef DC_BDIV_Q_THRESHOLD
- #define DC_BDIV_Q_THRESHOLD 180
- #endif
- #ifndef DIVEXACT_JEB_THRESHOLD
- #define DIVEXACT_JEB_THRESHOLD 25
- #endif
- #ifndef INV_MULMOD_BNM1_THRESHOLD
- #define INV_MULMOD_BNM1_THRESHOLD (5*MULMOD_BNM1_THRESHOLD)
- #endif
- #ifndef INV_APPR_THRESHOLD
- #define INV_APPR_THRESHOLD INV_NEWTON_THRESHOLD
- #endif
- #ifndef INV_NEWTON_THRESHOLD
- #define INV_NEWTON_THRESHOLD 200
- #endif
- #ifndef BINV_NEWTON_THRESHOLD
- #define BINV_NEWTON_THRESHOLD 300
- #endif
- #ifndef MU_DIVAPPR_Q_THRESHOLD
- #define MU_DIVAPPR_Q_THRESHOLD 2000
- #endif
- #ifndef MU_DIV_QR_THRESHOLD
- #define MU_DIV_QR_THRESHOLD 2000
- #endif
- #ifndef MUPI_DIV_QR_THRESHOLD
- #define MUPI_DIV_QR_THRESHOLD 200
- #endif
- #ifndef MU_BDIV_Q_THRESHOLD
- #define MU_BDIV_Q_THRESHOLD 2000
- #endif
- #ifndef MU_BDIV_QR_THRESHOLD
- #define MU_BDIV_QR_THRESHOLD 2000
- #endif
- #ifndef MULMOD_BNM1_THRESHOLD
- #define MULMOD_BNM1_THRESHOLD 16
- #endif
- #ifndef SQRMOD_BNM1_THRESHOLD
- #define SQRMOD_BNM1_THRESHOLD 16
- #endif
- #ifndef MUL_TO_MULMOD_BNM1_FOR_2NXN_THRESHOLD
- #define MUL_TO_MULMOD_BNM1_FOR_2NXN_THRESHOLD (INV_MULMOD_BNM1_THRESHOLD/2)
- #endif
- #if HAVE_NATIVE_mpn_addmul_2 || HAVE_NATIVE_mpn_redc_2
- #ifndef REDC_1_TO_REDC_2_THRESHOLD
- #define REDC_1_TO_REDC_2_THRESHOLD 15
- #endif
- #ifndef REDC_2_TO_REDC_N_THRESHOLD
- #define REDC_2_TO_REDC_N_THRESHOLD 100
- #endif
- #else
- #ifndef REDC_1_TO_REDC_N_THRESHOLD
- #define REDC_1_TO_REDC_N_THRESHOLD 100
- #endif
- #endif /* HAVE_NATIVE_mpn_addmul_2 || HAVE_NATIVE_mpn_redc_2 */
- /* First k to use for an FFT modF multiply. A modF FFT is an order
- log(2^k)/log(2^(k-1)) algorithm, so k=3 is merely 1.5 like karatsuba,
- whereas k=4 is 1.33 which is faster than toom3 at 1.485. */
- #define FFT_FIRST_K 4
- /* Threshold at which FFT should be used to do a modF NxN -> N multiply. */
- #ifndef MUL_FFT_MODF_THRESHOLD
- #define MUL_FFT_MODF_THRESHOLD (MUL_TOOM33_THRESHOLD * 3)
- #endif
- #ifndef SQR_FFT_MODF_THRESHOLD
- #define SQR_FFT_MODF_THRESHOLD (SQR_TOOM3_THRESHOLD * 3)
- #endif
- /* Threshold at which FFT should be used to do an NxN -> 2N multiply. This
- will be a size where FFT is using k=7 or k=8, since an FFT-k used for an
- NxN->2N multiply and not recursing into itself is an order
- log(2^k)/log(2^(k-2)) algorithm, so it'll be at least k=7 at 1.39 which
- is the first better than toom3. */
- #ifndef MUL_FFT_THRESHOLD
- #define MUL_FFT_THRESHOLD (MUL_FFT_MODF_THRESHOLD * 10)
- #endif
- #ifndef SQR_FFT_THRESHOLD
- #define SQR_FFT_THRESHOLD (SQR_FFT_MODF_THRESHOLD * 10)
- #endif
- /* Table of thresholds for successive modF FFT "k"s. The first entry is
- where FFT_FIRST_K+1 should be used, the second FFT_FIRST_K+2,
- etc. See mpn_fft_best_k(). */
- #ifndef MUL_FFT_TABLE
- #define MUL_FFT_TABLE
- { MUL_TOOM33_THRESHOLD * 4, /* k=5 */
- MUL_TOOM33_THRESHOLD * 8, /* k=6 */
- MUL_TOOM33_THRESHOLD * 16, /* k=7 */
- MUL_TOOM33_THRESHOLD * 32, /* k=8 */
- MUL_TOOM33_THRESHOLD * 96, /* k=9 */
- MUL_TOOM33_THRESHOLD * 288, /* k=10 */
- 0 }
- #endif
- #ifndef SQR_FFT_TABLE
- #define SQR_FFT_TABLE
- { SQR_TOOM3_THRESHOLD * 4, /* k=5 */
- SQR_TOOM3_THRESHOLD * 8, /* k=6 */
- SQR_TOOM3_THRESHOLD * 16, /* k=7 */
- SQR_TOOM3_THRESHOLD * 32, /* k=8 */
- SQR_TOOM3_THRESHOLD * 96, /* k=9 */
- SQR_TOOM3_THRESHOLD * 288, /* k=10 */
- 0 }
- #endif
- struct fft_table_nk
- {
- unsigned int n:27;
- unsigned int k:5;
- };
- #ifndef FFT_TABLE_ATTRS
- #define FFT_TABLE_ATTRS static const
- #endif
- #define MPN_FFT_TABLE_SIZE 16
- #ifndef DC_DIV_QR_THRESHOLD
- #define DC_DIV_QR_THRESHOLD (3 * MUL_TOOM22_THRESHOLD)
- #endif
- #ifndef GET_STR_DC_THRESHOLD
- #define GET_STR_DC_THRESHOLD 18
- #endif
- #ifndef GET_STR_PRECOMPUTE_THRESHOLD
- #define GET_STR_PRECOMPUTE_THRESHOLD 35
- #endif
- #ifndef SET_STR_DC_THRESHOLD
- #define SET_STR_DC_THRESHOLD 750
- #endif
- #ifndef SET_STR_PRECOMPUTE_THRESHOLD
- #define SET_STR_PRECOMPUTE_THRESHOLD 2000
- #endif
- /* Return non-zero if xp,xsize and yp,ysize overlap.
- If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no
- overlap. If both these are false, there's an overlap. */
- #define MPN_OVERLAP_P(xp, xsize, yp, ysize)
- ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp))
- #define MEM_OVERLAP_P(xp, xsize, yp, ysize)
- ( (char *) (xp) + (xsize) > (char *) (yp)
- && (char *) (yp) + (ysize) > (char *) (xp))
- /* Return non-zero if xp,xsize and yp,ysize are either identical or not
- overlapping. Return zero if they're partially overlapping. */
- #define MPN_SAME_OR_SEPARATE_P(xp, yp, size)
- MPN_SAME_OR_SEPARATE2_P(xp, size, yp, size)
- #define MPN_SAME_OR_SEPARATE2_P(xp, xsize, yp, ysize)
- ((xp) == (yp) || ! MPN_OVERLAP_P (xp, xsize, yp, ysize))
- /* Return non-zero if dst,dsize and src,ssize are either identical or
- overlapping in a way suitable for an incrementing/decrementing algorithm.
- Return zero if they're partially overlapping in an unsuitable fashion. */
- #define MPN_SAME_OR_INCR2_P(dst, dsize, src, ssize)
- ((dst) <= (src) || ! MPN_OVERLAP_P (dst, dsize, src, ssize))
- #define MPN_SAME_OR_INCR_P(dst, src, size)
- MPN_SAME_OR_INCR2_P(dst, size, src, size)
- #define MPN_SAME_OR_DECR2_P(dst, dsize, src, ssize)
- ((dst) >= (src) || ! MPN_OVERLAP_P (dst, dsize, src, ssize))
- #define MPN_SAME_OR_DECR_P(dst, src, size)
- MPN_SAME_OR_DECR2_P(dst, size, src, size)
- /* ASSERT() is a private assertion checking scheme, similar to <assert.h>.
- ASSERT() does the check only if WANT_ASSERT is selected, ASSERT_ALWAYS()
- does it always. Generally assertions are meant for development, but
- might help when looking for a problem later too.
- Note that strings shouldn't be used within the ASSERT expression,
- eg. ASSERT(strcmp(s,"notgood")!=0), since the quotes upset the "expr"
- used in the !HAVE_STRINGIZE case (ie. K&R). */
- #ifdef __LINE__
- #define ASSERT_LINE __LINE__
- #else
- #define ASSERT_LINE -1
- #endif
- #ifdef __FILE__
- #define ASSERT_FILE __FILE__
- #else
- #define ASSERT_FILE ""
- #endif
- __GMP_DECLSPEC void __gmp_assert_header __GMP_PROTO ((const char *, int));
- __GMP_DECLSPEC void __gmp_assert_fail __GMP_PROTO ((const char *, int, const char *)) ATTRIBUTE_NORETURN;
- #if HAVE_STRINGIZE
- #define ASSERT_FAIL(expr) __gmp_assert_fail (ASSERT_FILE, ASSERT_LINE, #expr)
- #else
- #define ASSERT_FAIL(expr) __gmp_assert_fail (ASSERT_FILE, ASSERT_LINE, "expr")
- #endif
- #define ASSERT_ALWAYS(expr)
- do {
- if (!(expr))
- ASSERT_FAIL (expr);
- } while (0)
- #if WANT_ASSERT
- #define ASSERT(expr) ASSERT_ALWAYS (expr)
- #else
- #define ASSERT(expr) do {} while (0)
- #endif
- /* ASSERT_CARRY checks the expression is non-zero, and ASSERT_NOCARRY checks
- that it's zero. In both cases if assertion checking is disabled the
- expression is still evaluated. These macros are meant for use with
- routines like mpn_add_n() where the return value represents a carry or
- whatever that should or shouldn't occur in some context. For example,
- ASSERT_NOCARRY (mpn_add_n (rp, s1p, s2p, size)); */
- #if WANT_ASSERT
- #define ASSERT_CARRY(expr) ASSERT_ALWAYS ((expr) != 0)
- #define ASSERT_NOCARRY(expr) ASSERT_ALWAYS ((expr) == 0)
- #else
- #define ASSERT_CARRY(expr) (expr)
- #define ASSERT_NOCARRY(expr) (expr)
- #endif
- /* ASSERT_CODE includes code when assertion checking is wanted. This is the
- same as writing "#if WANT_ASSERT", but more compact. */
- #if WANT_ASSERT
- #define ASSERT_CODE(expr) expr
- #else
- #define ASSERT_CODE(expr)
- #endif
- /* Test that an mpq_t is in fully canonical form. This can be used as
- protection on routines like mpq_equal which give wrong results on
- non-canonical inputs. */
- #if WANT_ASSERT
- #define ASSERT_MPQ_CANONICAL(q)
- do {
- ASSERT (q->_mp_den._mp_size > 0);
- if (q->_mp_num._mp_size == 0)
- {
- /* zero should be 0/1 */
- ASSERT (mpz_cmp_ui (mpq_denref(q), 1L) == 0);
- }
- else
- {
- /* no common factors */
- mpz_t __g;
- mpz_init (__g);
- mpz_gcd (__g, mpq_numref(q), mpq_denref(q));
- ASSERT (mpz_cmp_ui (__g, 1) == 0);
- mpz_clear (__g);
- }
- } while (0)
- #else
- #define ASSERT_MPQ_CANONICAL(q) do {} while (0)
- #endif
- /* Check that the nail parts are zero. */
- #define ASSERT_ALWAYS_LIMB(limb)
- do {
- mp_limb_t __nail = (limb) & GMP_NAIL_MASK;
- ASSERT_ALWAYS (__nail == 0);
- } while (0)
- #define ASSERT_ALWAYS_MPN(ptr, size)
- do {
- /* let whole loop go dead when no nails */
- if (GMP_NAIL_BITS != 0)
- {
- mp_size_t __i;
- for (__i = 0; __i < (size); __i++)
- ASSERT_ALWAYS_LIMB ((ptr)[__i]);
- }
- } while (0)
- #if WANT_ASSERT
- #define ASSERT_LIMB(limb) ASSERT_ALWAYS_LIMB (limb)
- #define ASSERT_MPN(ptr, size) ASSERT_ALWAYS_MPN (ptr, size)
- #else
- #define ASSERT_LIMB(limb) do {} while (0)
- #define ASSERT_MPN(ptr, size) do {} while (0)
- #endif
- /* Assert that an mpn region {ptr,size} is zero, or non-zero.
- size==0 is allowed, and in that case {ptr,size} considered to be zero. */
- #if WANT_ASSERT
- #define ASSERT_MPN_ZERO_P(ptr,size)
- do {
- mp_size_t __i;
- ASSERT ((size) >= 0);
- for (__i = 0; __i < (size); __i++)
- ASSERT ((ptr)[__i] == 0);
- } while (0)
- #define ASSERT_MPN_NONZERO_P(ptr,size)
- do {
- mp_size_t __i;
- int __nonzero = 0;
- ASSERT ((size) >= 0);
- for (__i = 0; __i < (size); __i++)
- if ((ptr)[__i] != 0)
- {
- __nonzero = 1;
- break;
- }
- ASSERT (__nonzero);
- } while (0)
- #else
- #define ASSERT_MPN_ZERO_P(ptr,size) do {} while (0)
- #define ASSERT_MPN_NONZERO_P(ptr,size) do {} while (0)
- #endif
- #if ! HAVE_NATIVE_mpn_com
- #undef mpn_com
- #define mpn_com(d,s,n)
- do {
- mp_ptr __d = (d);
- mp_srcptr __s = (s);
- mp_size_t __n = (n);
- ASSERT (__n >= 1);
- ASSERT (MPN_SAME_OR_SEPARATE_P (__d, __s, __n));
- do
- *__d++ = (~ *__s++) & GMP_NUMB_MASK;
- while (--__n);
- } while (0)
- #endif
- #define MPN_LOGOPS_N_INLINE(rp, up, vp, n, operation)
- do {
- mp_srcptr __up = (up);
- mp_srcptr __vp = (vp);
- mp_ptr __rp = (rp);
- mp_size_t __n = (n);
- mp_limb_t __a, __b;
- ASSERT (__n > 0);
- ASSERT (MPN_SAME_OR_SEPARATE_P (__rp, __up, __n));
- ASSERT (MPN_SAME_OR_SEPARATE_P (__rp, __vp, __n));
- __up += __n;
- __vp += __n;
- __rp += __n;
- __n = -__n;
- do {
- __a = __up[__n];
- __b = __vp[__n];
- __rp[__n] = operation;
- } while (++__n);
- } while (0)
- #if ! HAVE_NATIVE_mpn_and_n
- #undef mpn_and_n
- #define mpn_and_n(rp, up, vp, n)
- MPN_LOGOPS_N_INLINE (rp, up, vp, n, __a & __b)
- #endif
- #if ! HAVE_NATIVE_mpn_andn_n
- #undef mpn_andn_n
- #define mpn_andn_n(rp, up, vp, n)
- MPN_LOGOPS_N_INLINE (rp, up, vp, n, __a & ~__b)
- #endif
- #if ! HAVE_NATIVE_mpn_nand_n
- #undef mpn_nand_n
- #define mpn_nand_n(rp, up, vp, n)
- MPN_LOGOPS_N_INLINE (rp, up, vp, n, ~(__a & __b) & GMP_NUMB_MASK)
- #endif
- #if ! HAVE_NATIVE_mpn_ior_n
- #undef mpn_ior_n
- #define mpn_ior_n(rp, up, vp, n)
- MPN_LOGOPS_N_INLINE (rp, up, vp, n, __a | __b)
- #endif
- #if ! HAVE_NATIVE_mpn_iorn_n
- #undef mpn_iorn_n
- #define mpn_iorn_n(rp, up, vp, n)
- MPN_LOGOPS_N_INLINE (rp, up, vp, n, (__a | ~__b) & GMP_NUMB_MASK)
- #endif
- #if ! HAVE_NATIVE_mpn_nior_n
- #undef mpn_nior_n
- #define mpn_nior_n(rp, up, vp, n)
- MPN_LOGOPS_N_INLINE (rp, up, vp, n, ~(__a | __b) & GMP_NUMB_MASK)
- #endif
- #if ! HAVE_NATIVE_mpn_xor_n
- #undef mpn_xor_n
- #define mpn_xor_n(rp, up, vp, n)
- MPN_LOGOPS_N_INLINE (rp, up, vp, n, __a ^ __b)
- #endif
- #if ! HAVE_NATIVE_mpn_xnor_n
- #undef mpn_xnor_n
- #define mpn_xnor_n(rp, up, vp, n)
- MPN_LOGOPS_N_INLINE (rp, up, vp, n, ~(__a ^ __b) & GMP_NUMB_MASK)
- #endif
- #define mpn_trialdiv __MPN(trialdiv)
- __GMP_DECLSPEC mp_limb_t mpn_trialdiv __GMP_PROTO ((mp_srcptr, mp_size_t, mp_size_t, int *));
- #define mpn_remove __MPN(remove)
- __GMP_DECLSPEC mp_bitcnt_t mpn_remove __GMP_PROTO ((mp_ptr, mp_size_t *, mp_ptr, mp_size_t, mp_ptr, mp_size_t, mp_bitcnt_t));
- /* ADDC_LIMB sets w=x+y and cout to 0 or 1 for a carry from that addition. */
- #if GMP_NAIL_BITS == 0
- #define ADDC_LIMB(cout, w, x, y)
- do {
- mp_limb_t __x = (x);
- mp_limb_t __y = (y);
- mp_limb_t __w = __x + __y;
- (w) = __w;
- (cout) = __w < __x;
- } while (0)
- #else
- #define ADDC_LIMB(cout, w, x, y)
- do {
- mp_limb_t __w;
- ASSERT_LIMB (x);
- ASSERT_LIMB (y);
- __w = (x) + (y);
- (w) = __w & GMP_NUMB_MASK;
- (cout) = __w >> GMP_NUMB_BITS;
- } while (0)
- #endif
- /* SUBC_LIMB sets w=x-y and cout to 0 or 1 for a borrow from that
- subtract. */
- #if GMP_NAIL_BITS == 0
- #define SUBC_LIMB(cout, w, x, y)
- do {
- mp_limb_t __x = (x);
- mp_limb_t __y = (y);
- mp_limb_t __w = __x - __y;
- (w) = __w;
- (cout) = __w > __x;
- } while (0)
- #else
- #define SUBC_LIMB(cout, w, x, y)
- do {
- mp_limb_t __w = (x) - (y);
- (w) = __w & GMP_NUMB_MASK;
- (cout) = __w >> (GMP_LIMB_BITS-1);
- } while (0)
- #endif
- /* MPN_INCR_U does {ptr,size} += n, MPN_DECR_U does {ptr,size} -= n, both
- expecting no carry (or borrow) from that.
- The size parameter is only for the benefit of assertion checking. In a
- normal build it's unused and the carry/borrow is just propagated as far
- as it needs to go.
- On random data, usually only one or two limbs of {ptr,size} get updated,
- so there's no need for any sophisticated looping, just something compact
- and sensible.
- FIXME: Switch all code from mpn_{incr,decr}_u to MPN_{INCR,DECR}_U,
- declaring their operand sizes, then remove the former. This is purely
- for the benefit of assertion checking. */
- #if defined (__GNUC__) && HAVE_HOST_CPU_FAMILY_x86 && GMP_NAIL_BITS == 0
- && GMP_LIMB_BITS == 32 && ! defined (NO_ASM) && ! WANT_ASSERT
- /* Better flags handling than the generic C gives on i386, saving a few
- bytes of code and maybe a cycle or two. */
- #define MPN_IORD_U(ptr, incr, aors)
- do {
- mp_ptr __ptr_dummy;
- if (__builtin_constant_p (incr) && (incr) == 1)
- {
- __asm__ __volatile__
- ("n" ASM_L(top) ":n"
- "t" aors " $1, (%0)n"
- "tleal 4(%0),%0n"
- "tjc " ASM_L(top)
- : "=r" (__ptr_dummy)
- : "0" (ptr)
- : "memory");
- }
- else
- {
- __asm__ __volatile__
- ( aors " %2,(%0)n"
- "tjnc " ASM_L(done) "n"
- ASM_L(top) ":n"
- "t" aors " $1,4(%0)n"
- "tleal 4(%0),%0n"
- "tjc " ASM_L(top) "n"
- ASM_L(done) ":n"
- : "=r" (__ptr_dummy)
- : "0" (ptr),
- "ri" (incr)
- : "memory");
- }
- } while (0)
- #define MPN_INCR_U(ptr, size, incr) MPN_IORD_U (ptr, incr, "addl")
- #define MPN_DECR_U(ptr, size, incr) MPN_IORD_U (ptr, incr, "subl")
- #define mpn_incr_u(ptr, incr) MPN_INCR_U (ptr, 0, incr)
- #define mpn_decr_u(ptr, incr) MPN_DECR_U (ptr, 0, incr)
- #endif
- #if GMP_NAIL_BITS == 0
- #ifndef mpn_incr_u
- #define mpn_incr_u(p,incr)
- do {
- mp_limb_t __x;
- mp_ptr __p = (p);
- if (__builtin_constant_p (incr) && (incr) == 1)
- {
- while (++(*(__p++)) == 0)
- ;
- }
- else
- {
- __x = *__p + (incr);
- *__p = __x;
- if (__x < (incr))
- while (++(*(++__p)) == 0)
- ;
- }
- } while (0)