The package rpms/ntl.git has added or updated architecture specific content in its
spec file (ExclusiveArch/ExcludeArch or %ifarch/%ifnarch) in commit(s):
https://src.fedoraproject.org/cgit/rpms/ntl.git/commit/?id=5a278c9f008cdf....
Change:
-%ifarch x86_64
Thanks.
Full change:
============
commit 5a278c9f008cdf7a52e9da88c1ebe19e9557bea5
Author: Jerry James <loganjerry(a)gmail.com>
Date: Sat Oct 13 10:46:27 2018 -0600
ntl-11.3.0.
diff --git a/ntl-loadtime-cpu.patch b/ntl-loadtime-cpu.patch
index 3fbd660..b5bf12f 100644
--- a/ntl-loadtime-cpu.patch
+++ b/ntl-loadtime-cpu.patch
@@ -1,6 +1,6 @@
---- doc/config.txt.orig 2018-07-15 06:11:56.000000000 -0600
-+++ doc/config.txt 2018-07-31 13:58:29.512185407 -0600
-@@ -367,6 +367,7 @@ NTL_AVOID_BRANCHING=off
+--- doc/config.txt.orig 2018-08-17 12:24:27.000000000 -0600
++++ doc/config.txt 2018-10-08 15:09:32.776186425 -0600
+@@ -397,6 +397,7 @@ NTL_AVOID_BRANCHING=off
NTL_GF2X_NOINLINE=off
NTL_GF2X_ALTCODE=off
NTL_GF2X_ALTCODE1=off
@@ -8,7 +8,7 @@
GMP_INCDIR=$(GMP_PREFIX)/include
GMP_LIBDIR=$(GMP_PREFIX)/lib
-@@ -680,6 +681,10 @@ NTL_GF2X_ALTCODE1=off
+@@ -710,6 +711,10 @@ NTL_GF2X_ALTCODE1=off
# Yet another alternative implementation for GF2X multiplication.
@@ -19,11 +19,11 @@
########## More GMP Options:
---- include/NTL/config.h.orig 2018-07-15 06:11:56.000000000 -0600
-+++ include/NTL/config.h 2018-07-31 13:58:29.537185085 -0600
-@@ -517,6 +517,20 @@ to be defined. Of course, to unset a f
- #endif
+--- include/NTL/config.h.orig 2018-08-17 12:24:27.000000000 -0600
++++ include/NTL/config.h 2018-10-09 08:28:16.464126340 -0600
+@@ -513,6 +513,19 @@ to be defined. Of course, to unset a f
+ #endif
+#if 0
+#define NTL_LOADTIME_CPU
@@ -38,16 +38,15 @@
+ */
+
+#endif
-+
-
---- include/NTL/ctools.h.orig 2018-07-15 06:11:56.000000000 -0600
-+++ include/NTL/ctools.h 2018-07-31 13:58:29.537185085 -0600
-@@ -514,6 +514,155 @@ char *_ntl_make_aligned(char *p, long al
+ /* sanity checks */
+--- include/NTL/ctools.h.orig 2018-08-17 12:24:27.000000000 -0600
++++ include/NTL/ctools.h 2018-10-09 08:28:52.785432464 -0600
+@@ -518,6 +518,155 @@ char *_ntl_make_aligned(char *p, long al
+ // this should be big enough to satisfy any SIMD instructions,
// and it should also be as big as a cache line
-
+/* Determine CPU characteristics at runtime */
+#ifdef NTL_LOADTIME_CPU
+#if !defined(__x86_64__)
@@ -198,10 +197,10 @@
+ st type __attribute__((ifunc ("resolve_" #name))) name params
+#endif
- #ifdef NTL_HAVE_BUILTIN_CLZL
---- include/NTL/MatPrime.h.orig 2018-07-15 06:11:56.000000000 -0600
-+++ include/NTL/MatPrime.h 2018-07-31 13:58:29.538185072 -0600
+ #ifdef NTL_HAVE_BUILTIN_CLZL
+--- include/NTL/MatPrime.h.orig 2018-08-17 12:24:27.000000000 -0600
++++ include/NTL/MatPrime.h 2018-10-08 15:13:01.606492380 -0600
@@ -20,7 +20,7 @@ NTL_OPEN_NNS
@@ -211,20 +210,20 @@
#define NTL_MatPrime_NBITS (23)
#else
#define NTL_MatPrime_NBITS NTL_SP_NBITS
---- include/NTL/REPORT_ALL_FEATURES.h.orig 2018-07-15 06:11:57.000000000 -0600
-+++ include/NTL/REPORT_ALL_FEATURES.h 2018-07-31 13:58:29.538185072 -0600
-@@ -51,3 +51,6 @@
+--- include/NTL/REPORT_ALL_FEATURES.h.orig 2018-08-17 12:24:27.000000000 -0600
++++ include/NTL/REPORT_ALL_FEATURES.h 2018-10-08 15:13:35.838865507 -0600
+@@ -55,3 +55,6 @@
std::cerr << "NTL_HAVE_POSIX_TIME\n";
#endif
+#ifdef NTL_LOADTIME_CPU
+ std::cerr << "NTL_LOADTIME_CPU\n";
+#endif
---- src/cfile.orig 2018-07-15 06:11:56.000000000 -0600
-+++ src/cfile 2018-07-31 13:58:29.538185072 -0600
-@@ -471,6 +471,20 @@ to be defined. Of course, to unset a f
- #elif @{NTL_GF2X_ALTCODE1}
- #define NTL_GF2X_ALTCODE1
+--- src/cfile.orig 2018-08-17 12:24:27.000000000 -0600
++++ src/cfile 2018-10-09 08:29:19.248926910 -0600
+@@ -513,6 +513,19 @@ to be defined. Of course, to unset a f
+
+ #endif
+#if @{NTL_LOADTIME_CPU}
+#define NTL_LOADTIME_CPU
@@ -239,25 +238,23 @@
+ */
+
+#endif
-+
- /*
- * Yest another alternative strategy for implementing GF2X
---- src/DispSettings.cpp.orig 2018-07-15 06:11:56.000000000 -0600
-+++ src/DispSettings.cpp 2018-07-31 13:58:29.539185060 -0600
-@@ -179,6 +179,10 @@ cout << "Performance Options:\n";
+
+ /* sanity checks */
+--- src/DispSettings.cpp.orig 2018-08-17 12:24:27.000000000 -0600
++++ src/DispSettings.cpp 2018-10-09 08:29:37.247583069 -0600
+@@ -187,6 +187,9 @@ cout << "Performance Options:\n";
cout << "NTL_GF2X_NOINLINE\n";
#endif
+#ifdef NTL_LOADTIME_CPU
+ cout << "NTL_LOADTIME_CPU\n";
+#endif
-+
cout << "***************************/\n";
cout << "\n\n";
---- src/DoConfig.orig 2018-07-15 06:11:56.000000000 -0600
-+++ src/DoConfig 2018-07-31 13:58:29.539185060 -0600
+--- src/DoConfig.orig 2018-08-17 12:24:27.000000000 -0600
++++ src/DoConfig 2018-10-09 08:30:06.200029962 -0600
@@ -1,6 +1,7 @@
# This is a perl script, invoked from a shell
@@ -266,7 +263,7 @@
system("echo '*** CompilerOutput.log ***' > CompilerOutput.log");
-@@ -90,6 +91,7 @@ system("echo '*** CompilerOutput.log ***
+@@ -92,6 +93,7 @@ system("echo '*** CompilerOutput.log ***
'NTL_GF2X_NOINLINE' => 'off',
'NTL_GF2X_ALTCODE' => 'off',
'NTL_GF2X_ALTCODE1' => 'off',
@@ -274,9 +271,9 @@
);
-@@ -222,6 +224,15 @@ if ($MakeFlag{'SHARED'} eq 'off') {
-
- }
+@@ -175,6 +177,14 @@ if ($MakeVal{'CXXFLAGS'} =~ '-march=') {
+ $MakeFlag{'NATIVE'} = 'off';
+ }
+# special processing: NTL_LOADTIME_CPU on x86/x86_64 only and => NTL_GF2X_NOINLINE
+
@@ -286,12 +283,22 @@
+ }
+ $ConfigFlag{'NTL_GF2X_NOINLINE'} = 'on';
+}
-+
+ # some special MakeVal values that are determined by SHARED
- }
---- src/GF2X1.cpp.orig 2018-07-15 06:11:56.000000000 -0600
-+++ src/GF2X1.cpp 2018-07-31 13:58:29.540185047 -0600
+--- src/GF2EX.cpp.orig 2018-08-17 12:24:26.000000000 -0600
++++ src/GF2EX.cpp 2018-10-08 16:30:16.185776322 -0600
+@@ -771,7 +771,7 @@ void mul(GF2EX& c, const GF2EX& a, const
+
+ if (GF2E::WordLength() <= 1) use_kron_mul = true;
+
+-#if (defined(NTL_GF2X_LIB) && defined(NTL_HAVE_PCLMUL))
++#if (defined(NTL_GF2X_LIB) && (defined(NTL_HAVE_PCLMUL) ||
defined(NTL_LOADTIME_CPU)))
+ // With gf2x library and pclmul, KronMul is better in a larger range, but
+ // it is very hard to characterize that range. The following is very
+ // conservative.
+--- src/GF2X1.cpp.orig 2018-08-17 12:24:26.000000000 -0600
++++ src/GF2X1.cpp 2018-10-08 15:17:28.540246907 -0600
@@ -18,7 +18,7 @@
// simple scaling factor for some crossover points:
// we use a lower crossover of the underlying multiplication
@@ -301,13 +308,13 @@
#define XOVER_SCALE (1L)
#else
#define XOVER_SCALE (2L)
---- src/GF2X.cpp.orig 2018-07-15 06:11:56.000000000 -0600
-+++ src/GF2X.cpp 2018-07-31 13:58:29.541185034 -0600
+--- src/GF2X.cpp.orig 2018-08-17 12:24:26.000000000 -0600
++++ src/GF2X.cpp 2018-10-09 08:31:10.814795569 -0600
@@ -27,6 +27,22 @@ pclmul_mul1 (unsigned long *c, unsigned
_mm_storeu_si128((__m128i*)c, _mm_clmulepi64_si128(aa, bb, 0));
}
-+#elif defined (NTL_LOADTIME_CPU)
++#elif defined(NTL_LOADTIME_CPU)
+
+#include <wmmintrin.h>
+
@@ -521,15 +528,14 @@
static
void Mul1_short(_ntl_ulong *cp, const _ntl_ulong *bp, long sb, _ntl_ulong a)
-@@ -677,10 +853,31 @@ NTL_EFF_SHORT_BB_MUL_CODE1
+@@ -677,9 +853,29 @@ NTL_EFF_SHORT_BB_MUL_CODE1
// warning #13200: No EMMS instruction before return
}
+#endif
-
-+#ifdef NTL_LOADTIME_CPU
++#ifdef NTL_LOADTIME_CPUE
+BASE_FUNC(void,mul_half)(_ntl_ulong *c, _ntl_ulong a, _ntl_ulong b)
+{
@@ -540,20 +546,19 @@
+{
+ pclmul_mul1(c, a, b);
+}
-+
++
+AVX_FUNC(void,mul_half)(_ntl_ulong *c, _ntl_ulong a, _ntl_ulong b)
+{
+ pclmul_mul1(c, a, b);
+}
-+
++
+PCLMUL_RESOLVER(static,void,mul_half,(_ntl_ulong *c, _ntl_ulong a, _ntl_ulong b));
-+
++
+#else
-+
+
static
void mul_half(_ntl_ulong *c, _ntl_ulong a, _ntl_ulong b)
- {
-@@ -694,6 +891,7 @@ NTL_EFF_HALF_BB_MUL_CODE0
+@@ -694,6 +890,7 @@ NTL_EFF_HALF_BB_MUL_CODE0
}
@@ -561,7 +566,7 @@
// mul2...mul8 hard-code 2x2...8x8 word multiplies.
// I adapted these routines from LiDIA (except mul3, see below).
-@@ -1611,6 +1809,77 @@ static const _ntl_ulong sqrtab[256] = {
+@@ -1611,6 +1808,77 @@ static const _ntl_ulong sqrtab[256] = {
@@ -639,7 +644,7 @@
static inline
void sqr1(_ntl_ulong *c, _ntl_ulong a)
{
-@@ -1651,6 +1920,7 @@ void sqr(GF2X& c, const GF2X& a)
+@@ -1651,6 +1919,7 @@ void sqr(GF2X& c, const GF2X& a)
return;
}
@@ -647,9 +652,9 @@
void LeftShift(GF2X& c, const GF2X& a, long n)
---- src/InitSettings.cpp.orig 2018-07-15 06:11:56.000000000 -0600
-+++ src/InitSettings.cpp 2018-07-31 13:58:29.541185034 -0600
-@@ -172,6 +172,11 @@ int main()
+--- src/InitSettings.cpp.orig 2018-08-17 12:24:27.000000000 -0600
++++ src/InitSettings.cpp 2018-10-08 15:31:06.504619910 -0600
+@@ -184,6 +184,11 @@ int main()
cout << "NTL_RANGE_CHECK=0\n";
#endif
@@ -661,8 +666,8 @@
// the following are not actual config flags, but help
---- src/mat_lzz_p.cpp.orig 2018-07-15 06:11:56.000000000 -0600
-+++ src/mat_lzz_p.cpp 2018-07-31 13:58:29.543185008 -0600
+--- src/mat_lzz_p.cpp.orig 2018-08-17 12:24:27.000000000 -0600
++++ src/mat_lzz_p.cpp 2018-10-09 08:43:09.480066234 -0600
@@ -9,6 +9,15 @@
#ifdef NTL_HAVE_AVX
@@ -679,7 +684,7 @@
#endif
NTL_START_IMPL
-@@ -625,7 +634,7 @@ void mul(mat_zz_p& X, const mat_zz_p& A,
+@@ -634,7 +643,7 @@ void mul(mat_zz_p& X, const mat_zz_p& A,
#ifdef NTL_HAVE_LL_TYPE
@@ -688,7 +693,7 @@
#define MAX_DBL_INT ((1L << NTL_DOUBLE_PRECISION)-1)
// max int representable exactly as a double
-@@ -639,18 +648,120 @@ void mul(mat_zz_p& X, const mat_zz_p& A,
+@@ -648,10 +657,12 @@ void mul(mat_zz_p& X, const mat_zz_p& A,
// MUL_ADD(a, b, c): a += b*c
@@ -702,14 +707,19 @@
+#define MUL_ADD(a, b, c) AVX_MUL_ADD(a, b, c)
#endif
--static
--void muladd1_by_32(double *x, const double *a, const double *b, long n)
-+#ifdef NTL_LOADTIME_CPU
+
+@@ -931,6 +942,94 @@ void muladd3_by_16(double *x, const doub
+
+
+ #else
++#if defined(NTL_LOADTIME_CPU)
+
+AVX_FUNC(void,muladd1_by_32)
+(double *x, const double *a, const double *b, long n)
- {
-- __m256d avec, bvec;
++{
++ __m256d avec, bvec;
++
++
+ __m256d acc0=_mm256_load_pd(x + 0*4);
+ __m256d acc1=_mm256_load_pd(x + 1*4);
+ __m256d acc2=_mm256_load_pd(x + 2*4);
@@ -719,73 +729,10 @@
+ __m256d acc6=_mm256_load_pd(x + 6*4);
+ __m256d acc7=_mm256_load_pd(x + 7*4);
+
-+ long i = 0;
-+ for (; i <= n-4; i +=4) {
-+
-+ // the following code sequences are a bit faster than
-+ // just doing 4 _mm256_broadcast_sd's
-+ // it requires a to point to aligned storage, however
+
-+#if 1
-+ // this one seems slightly faster
-+ __m256d a0101 = _mm256_broadcast_pd((const __m128d*)(a+0));
-+ __m256d a2323 = _mm256_broadcast_pd((const __m128d*)(a+2));
-+#else
-+ __m256d avec = _mm256_load_pd(a);
-+ __m256d a0101 = _mm256_permute2f128_pd(avec, avec, 0);
-+ __m256d a2323 = _mm256_permute2f128_pd(avec, avec, 0x11);
-+
-+#endif
-+
-+ __m256d avec0 = _mm256_permute_pd(a0101, 0);
-+ __m256d avec1 = _mm256_permute_pd(a0101, 0xf);
-+ __m256d avec2 = _mm256_permute_pd(a2323, 0);
-+ __m256d avec3 = _mm256_permute_pd(a2323, 0xf);
-+
-+ a += 4;
-+
-+ __m256d bvec;
-+
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc0, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc1, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc2, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc3, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc4, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc5, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc6, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc7, avec0, bvec);
-
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc0, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc1, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc2, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc3, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc4, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc5, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc6, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc7, avec1, bvec);
-
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc0, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc1, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc2, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc3, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc4, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc5, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc6, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc7, avec2, bvec);
-+
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc0, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc1, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc2, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc3, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc4, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc5, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc6, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc7, avec3, bvec);
-+ }
++ for (long i = 0; i < n; i++) {
++ avec = _mm256_broadcast_sd(a); a++;
+
-+ for (; i < n; i++) {
-+ __m256d avec = _mm256_broadcast_sd(a); a++;
-+ __m256d bvec;
+
+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc0, avec, bvec);
+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc1, avec, bvec);
@@ -811,112 +758,9 @@
+FMA_FUNC(void,muladd1_by_32)
+(double *x, const double *a, const double *b, long n)
+{
- __m256d acc0=_mm256_load_pd(x + 0*4);
- __m256d acc1=_mm256_load_pd(x + 1*4);
- __m256d acc2=_mm256_load_pd(x + 2*4);
-@@ -660,10 +771,179 @@ void muladd1_by_32(double *x, const doub
- __m256d acc6=_mm256_load_pd(x + 6*4);
- __m256d acc7=_mm256_load_pd(x + 7*4);
-
-+ long i = 0;
-+ for (; i <= n-4; i +=4) {
-
-- for (long i = 0; i < n; i++) {
-- avec = _mm256_broadcast_sd(a); a++;
-+ // the following code sequences are a bit faster than
-+ // just doing 4 _mm256_broadcast_sd's
-+ // it requires a to point to aligned storage, however
-+
-+#if 1
-+ // this one seems slightly faster
-+ __m256d a0101 = _mm256_broadcast_pd((const __m128d*)(a+0));
-+ __m256d a2323 = _mm256_broadcast_pd((const __m128d*)(a+2));
-+#else
-+ __m256d avec = _mm256_load_pd(a);
-+ __m256d a0101 = _mm256_permute2f128_pd(avec, avec, 0);
-+ __m256d a2323 = _mm256_permute2f128_pd(avec, avec, 0x11);
-+
-+#endif
-+
-+ __m256d avec0 = _mm256_permute_pd(a0101, 0);
-+ __m256d avec1 = _mm256_permute_pd(a0101, 0xf);
-+ __m256d avec2 = _mm256_permute_pd(a2323, 0);
-+ __m256d avec3 = _mm256_permute_pd(a2323, 0xf);
-+
-+ a += 4;
-+
-+ __m256d bvec;
-+
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc0, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc1, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc2, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc3, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc4, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc5, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc6, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc7, avec0, bvec);
-+
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc0, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc1, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc2, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc3, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc4, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc5, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc6, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc7, avec1, bvec);
-+
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc0, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc1, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc2, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc3, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc4, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc5, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc6, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc7, avec2, bvec);
-+
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc0, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc1, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc2, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc3, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc4, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc5, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc6, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc7, avec3, bvec);
-+ }
-+
-+ for (; i < n; i++) {
-+ __m256d avec = _mm256_broadcast_sd(a); a++;
-+ __m256d bvec;
-+
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc0, avec, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc1, avec, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc2, avec, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc3, avec, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc4, avec, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc5, avec, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc6, avec, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc7, avec, bvec);
-+ }
-+
-+
-+ _mm256_store_pd(x + 0*4, acc0);
-+ _mm256_store_pd(x + 1*4, acc1);
-+ _mm256_store_pd(x + 2*4, acc2);
-+ _mm256_store_pd(x + 3*4, acc3);
-+ _mm256_store_pd(x + 4*4, acc4);
-+ _mm256_store_pd(x + 5*4, acc5);
-+ _mm256_store_pd(x + 6*4, acc6);
-+ _mm256_store_pd(x + 7*4, acc7);
-+}
++ __m256d avec, bvec;
+
-+FMA_RESOLVER(static,void,muladd1_by_32,
-+ (double *x, const double *a, const double *b, long n));
+
-+#else
-+
-+static
-+void muladd1_by_32(double *x, const double *a, const double *b, long n)
-+{
+ __m256d acc0=_mm256_load_pd(x + 0*4);
+ __m256d acc1=_mm256_load_pd(x + 1*4);
+ __m256d acc2=_mm256_load_pd(x + 2*4);
@@ -926,125 +770,6 @@
+ __m256d acc6=_mm256_load_pd(x + 6*4);
+ __m256d acc7=_mm256_load_pd(x + 7*4);
+
-+ long i = 0;
-+ for (; i <= n-4; i +=4) {
-+
-+ // the following code sequences are a bit faster than
-+ // just doing 4 _mm256_broadcast_sd's
-+ // it requires a to point to aligned storage, however
-+
-+#if 1
-+ // this one seems slightly faster
-+ __m256d a0101 = _mm256_broadcast_pd((const __m128d*)(a+0));
-+ __m256d a2323 = _mm256_broadcast_pd((const __m128d*)(a+2));
-+#else
-+ __m256d avec = _mm256_load_pd(a);
-+ __m256d a0101 = _mm256_permute2f128_pd(avec, avec, 0);
-+ __m256d a2323 = _mm256_permute2f128_pd(avec, avec, 0x11);
-+
-+#endif
-+
-+ __m256d avec0 = _mm256_permute_pd(a0101, 0);
-+ __m256d avec1 = _mm256_permute_pd(a0101, 0xf);
-+ __m256d avec2 = _mm256_permute_pd(a2323, 0);
-+ __m256d avec3 = _mm256_permute_pd(a2323, 0xf);
-+
-+ a += 4;
-+
-+ __m256d bvec;
-+
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc0, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc1, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc2, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc3, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc4, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc5, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc6, avec0, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc7, avec0, bvec);
-
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc0, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc1, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc2, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc3, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc4, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc5, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc6, avec1, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc7, avec1, bvec);
-+
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc0, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc1, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc2, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc3, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc4, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc5, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc6, avec2, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc7, avec2, bvec);
-+
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc0, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc1, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc2, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc3, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc4, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc5, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc6, avec3, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc7, avec3, bvec);
-+ }
-+
-+ for (; i < n; i++) {
-+ __m256d avec = _mm256_broadcast_sd(a); a++;
-+ __m256d bvec;
-
- bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc0, avec, bvec);
- bvec = _mm256_load_pd(b); b += 4; MUL_ADD(acc1, avec, bvec);
-@@ -686,6 +966,75 @@ void muladd1_by_32(double *x, const doub
- _mm256_store_pd(x + 7*4, acc7);
- }
-
-+#endif
-+
-+#ifdef NTL_LOADTIME_CPU
-+
-+AVX_FUNC(void,muladd1_by_16)
-+(double *x, const double *a, const double *b, long n)
-+{
-+ __m256d avec, bvec;
-+
-+
-+ __m256d acc0=_mm256_load_pd(x + 0*4);
-+ __m256d acc1=_mm256_load_pd(x + 1*4);
-+ __m256d acc2=_mm256_load_pd(x + 2*4);
-+ __m256d acc3=_mm256_load_pd(x + 3*4);
-+
-+
-+ for (long i = 0; i < n; i++) {
-+ avec = _mm256_broadcast_sd(a); a++;
-+
-+
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc0, avec, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc1, avec, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc2, avec, bvec);
-+ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc3, avec, bvec);
-+ b += 16;
-+ }
-+
-+
-+ _mm256_store_pd(x + 0*4, acc0);
-+ _mm256_store_pd(x + 1*4, acc1);
-+ _mm256_store_pd(x + 2*4, acc2);
-+ _mm256_store_pd(x + 3*4, acc3);
-+}
-+
-+FMA_FUNC(void,muladd1_by_16)
-+(double *x, const double *a, const double *b, long n)
-+{
-+ __m256d avec, bvec;
-+
-+
-+ __m256d acc0=_mm256_load_pd(x + 0*4);
-+ __m256d acc1=_mm256_load_pd(x + 1*4);
-+ __m256d acc2=_mm256_load_pd(x + 2*4);
-+ __m256d acc3=_mm256_load_pd(x + 3*4);
-+
+
+ for (long i = 0; i < n; i++) {
+ avec = _mm256_broadcast_sd(a); a++;
@@ -1054,7 +779,10 @@
+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc1, avec, bvec);
+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc2, avec, bvec);
+ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc3, avec, bvec);
-+ b += 16;
++ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc4, avec, bvec);
++ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc5, avec, bvec);
++ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc6, avec, bvec);
++ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc7, avec, bvec);
+ }
+
+
@@ -1062,23 +790,27 @@
+ _mm256_store_pd(x + 1*4, acc1);
+ _mm256_store_pd(x + 2*4, acc2);
+ _mm256_store_pd(x + 3*4, acc3);
++ _mm256_store_pd(x + 4*4, acc4);
++ _mm256_store_pd(x + 5*4, acc5);
++ _mm256_store_pd(x + 6*4, acc6);
++ _mm256_store_pd(x + 7*4, acc7);
+}
+
-+FMA_RESOLVER(static,void,muladd1_by_16,
++FMA_RESOLVER(static,void,muladd1_by_32,
+ (double *x, const double *a, const double *b, long n));
+
+#else
-+
+
static
- void muladd1_by_16(double *x, const double *a, const double *b, long n)
- {
-@@ -716,6 +1065,165 @@ void muladd1_by_16(double *x, const doub
- _mm256_store_pd(x + 3*4, acc3);
+ void muladd1_by_32(double *x, const double *a, const double *b, long n)
+@@ -973,6 +1072,167 @@ void muladd1_by_32(double *x, const doub
+ _mm256_store_pd(x + 7*4, acc7);
}
+#endif
+
+#ifdef NTL_LOADTIME_CPU
++
+AVX_FUNC(void,muladd2_by_32)
+(double *x, const double *a, const double *b, long n)
+{
@@ -1235,10 +967,11 @@
+ (double *x, const double *a, const double *b, long n));
+
+#else
-
- // experiment: process two rows at a time
++
static
-@@ -794,6 +1302,211 @@ void muladd2_by_32(double *x, const doub
+ void muladd2_by_32(double *x, const double *a, const double *b, long n)
+ {
+@@ -1049,6 +1309,212 @@ void muladd2_by_32(double *x, const doub
}
@@ -1276,10 +1009,10 @@
+ avec1 = _mm256_broadcast_sd(&a[i+MAT_BLK_SZ]);
+ avec2 = _mm256_broadcast_sd(&a[i+2*MAT_BLK_SZ]);
+
-+ bvec = _mm256_load_pd(&b[i*MAT_BLK_SZ+0*4]); FMA_MUL_ADD(acc00, avec0, bvec);
MUL_ADD(acc10, avec1, bvec); FMA_MUL_ADD(acc20, avec2, bvec);
-+ bvec = _mm256_load_pd(&b[i*MAT_BLK_SZ+1*4]); FMA_MUL_ADD(acc01, avec0, bvec);
MUL_ADD(acc11, avec1, bvec); FMA_MUL_ADD(acc21, avec2, bvec);
-+ bvec = _mm256_load_pd(&b[i*MAT_BLK_SZ+2*4]); FMA_MUL_ADD(acc02, avec0, bvec);
MUL_ADD(acc12, avec1, bvec); FMA_MUL_ADD(acc22, avec2, bvec);
-+ bvec = _mm256_load_pd(&b[i*MAT_BLK_SZ+3*4]); FMA_MUL_ADD(acc03, avec0, bvec);
MUL_ADD(acc13, avec1, bvec); FMA_MUL_ADD(acc23, avec2, bvec);
++ bvec = _mm256_load_pd(&b[i*MAT_BLK_SZ+0*4]); FMA_MUL_ADD(acc00, avec0, bvec);
FMA_MUL_ADD(acc10, avec1, bvec); FMA_MUL_ADD(acc20, avec2, bvec);
++ bvec = _mm256_load_pd(&b[i*MAT_BLK_SZ+1*4]); FMA_MUL_ADD(acc01, avec0, bvec);
FMA_MUL_ADD(acc11, avec1, bvec); FMA_MUL_ADD(acc21, avec2, bvec);
++ bvec = _mm256_load_pd(&b[i*MAT_BLK_SZ+2*4]); FMA_MUL_ADD(acc02, avec0, bvec);
FMA_MUL_ADD(acc12, avec1, bvec); FMA_MUL_ADD(acc22, avec2, bvec);
++ bvec = _mm256_load_pd(&b[i*MAT_BLK_SZ+3*4]); FMA_MUL_ADD(acc03, avec0, bvec);
FMA_MUL_ADD(acc13, avec1, bvec); FMA_MUL_ADD(acc23, avec2, bvec);
+ }
+
+
@@ -1447,23 +1180,101 @@
+ (double *x, const double *a, const double *b, long n));
+
+#else
-
- // experiment: process three rows at a time
++
// NOTE: this makes things slower on an AVX1 platform --- not enough registers
-@@ -898,8 +1611,10 @@ void muladd3_by_32(double *x, const doub
+ // it could be faster on AVX2/FMA, where there should be enough registers
+ static
+@@ -1150,6 +1616,75 @@ void muladd3_by_32(double *x, const doub
}
--static
--void muladd2_by_16(double *x, const double *a, const double *b, long n)
+#endif
+
++#ifdef NTL_LOADTIME_CPU
++
++AVX_FUNC(void,muladd1_by_16)
++(double *x, const double *a, const double *b, long n)
++{
++ __m256d avec, bvec;
++
++
++ __m256d acc0=_mm256_load_pd(x + 0*4);
++ __m256d acc1=_mm256_load_pd(x + 1*4);
++ __m256d acc2=_mm256_load_pd(x + 2*4);
++ __m256d acc3=_mm256_load_pd(x + 3*4);
++
++
++ for (long i = 0; i < n; i++) {
++ avec = _mm256_broadcast_sd(a); a++;
++
++
++ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc0, avec, bvec);
++ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc1, avec, bvec);
++ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc2, avec, bvec);
++ bvec = _mm256_load_pd(b); b += 4; AVX_MUL_ADD(acc3, avec, bvec);
++ b += 16;
++ }
++
++
++ _mm256_store_pd(x + 0*4, acc0);
++ _mm256_store_pd(x + 1*4, acc1);
++ _mm256_store_pd(x + 2*4, acc2);
++ _mm256_store_pd(x + 3*4, acc3);
++}
++
++FMA_FUNC(void,muladd1_by_16)
++(double *x, const double *a, const double *b, long n)
++{
++ __m256d avec, bvec;
++
++
++ __m256d acc0=_mm256_load_pd(x + 0*4);
++ __m256d acc1=_mm256_load_pd(x + 1*4);
++ __m256d acc2=_mm256_load_pd(x + 2*4);
++ __m256d acc3=_mm256_load_pd(x + 3*4);
++
++
++ for (long i = 0; i < n; i++) {
++ avec = _mm256_broadcast_sd(a); a++;
++
++
++ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc0, avec, bvec);
++ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc1, avec, bvec);
++ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc2, avec, bvec);
++ bvec = _mm256_load_pd(b); b += 4; FMA_MUL_ADD(acc3, avec, bvec);
++ b += 16;
++ }
++
++
++ _mm256_store_pd(x + 0*4, acc0);
++ _mm256_store_pd(x + 1*4, acc1);
++ _mm256_store_pd(x + 2*4, acc2);
++ _mm256_store_pd(x + 3*4, acc3);
++}
++
++FMA_RESOLVER(static,void,muladd1_by_16,
++ (double *x, const double *a, const double *b, long n));
++
++#else
++
+ static
+ void muladd1_by_16(double *x, const double *a, const double *b, long n)
+ {
+@@ -1180,10 +1715,11 @@ void muladd1_by_16(double *x, const doub
+ _mm256_store_pd(x + 3*4, acc3);
+ }
+
++#endif
+
+
+-static
+-void muladd2_by_16(double *x, const double *a, const double *b, long n)
+static void __attribute__((target ("avx,pclmul")))
+muladd2_by_16(double *x, const double *a, const double *b, long n)
{
__m256d avec0, avec1, bvec;
__m256d acc00, acc01, acc02, acc03;
-@@ -922,10 +1637,10 @@ void muladd2_by_16(double *x, const doub
+@@ -1206,10 +1742,10 @@ void muladd2_by_16(double *x, const doub
avec0 = _mm256_broadcast_sd(&a[i]);
avec1 = _mm256_broadcast_sd(&a[i+MAT_BLK_SZ]);
@@ -1478,18 +1289,18 @@
}
-@@ -941,8 +1656,8 @@ void muladd2_by_16(double *x, const doub
-
+@@ -1226,8 +1762,8 @@ void muladd2_by_16(double *x, const doub
}
+
-static
-void muladd3_by_16(double *x, const double *a, const double *b, long n)
-+static void __attribute__((target ("fma,pclmul")))
++static void __attribute__((target("fma,pclmul")))
+muladd3_by_16(double *x, const double *a, const double *b, long n)
{
__m256d avec0, avec1, avec2, bvec;
__m256d acc00, acc01, acc02, acc03;
-@@ -972,10 +1687,10 @@ void muladd3_by_16(double *x, const doub
+@@ -1257,10 +1793,10 @@ void muladd3_by_16(double *x, const doub
avec1 = _mm256_broadcast_sd(&a[i+MAT_BLK_SZ]);
avec2 = _mm256_broadcast_sd(&a[i+2*MAT_BLK_SZ]);
@@ -1504,19 +1315,18 @@
}
-@@ -996,6 +1711,30 @@ void muladd3_by_16(double *x, const doub
+@@ -1289,6 +1825,29 @@ void muladd3_by_16(double *x, const doub
+
- }
+#ifdef NTL_LOADTIME_CPU
-+
+static inline
+void muladd_all_by_32(long first, long last, double *x, const double *a, const double
*b, long n)
+{
+ long i = first;
+
+ if (have_fma) {
-+ // processing three rows at a time is faster
++ // process three rows at a time
+ for (; i <= last-3; i+=3)
+ muladd3_by_32(x + i*MAT_BLK_SZ, a + i*MAT_BLK_SZ, b, n);
+ for (; i < last; i++)
@@ -1535,7 +1345,7 @@
static inline
void muladd_all_by_32(long first, long last, double *x, const double *a, const double
*b, long n)
{
-@@ -1015,6 +1754,30 @@ void muladd_all_by_32(long first, long l
+@@ -1308,6 +1867,30 @@ void muladd_all_by_32(long first, long l
#endif
}
@@ -1566,7 +1376,7 @@
static inline
void muladd_all_by_16(long first, long last, double *x, const double *a, const double
*b, long n)
-@@ -1035,6 +1798,8 @@ void muladd_all_by_16(long first, long l
+@@ -1328,6 +1911,8 @@ void muladd_all_by_16(long first, long l
#endif
}
@@ -1575,7 +1385,7 @@
static inline
void muladd_all_by_32_width(long first, long last, double *x, const double *a, const
double *b, long n, long width)
{
-@@ -1050,6 +1815,72 @@ void muladd_all_by_32_width(long first,
+@@ -1343,6 +1928,74 @@ void muladd_all_by_32_width(long first,
// this assumes n is a multiple of 16
@@ -1644,14 +1454,16 @@
+
+FMA_RESOLVER(static,void,muladd_interval,
+ (double * NTL_RESTRICT x, double * NTL_RESTRICT y, double c, long n));
++
+#else
++
static inline
void muladd_interval(double * NTL_RESTRICT x, double * NTL_RESTRICT y, double c, long
n)
{
-@@ -1080,7 +1911,105 @@ void muladd_interval(double * NTL_RESTRI
- _mm256_store_pd(x + 3*4, xvec3);
+@@ -1374,6 +2027,106 @@ void muladd_interval(double * NTL_RESTRI
}
}
+
+#endif
+
+#ifdef NTL_LOADTIME_CPU
@@ -1704,7 +1516,7 @@
+FMA_FUNC(void,muladd_interval1)
+(double * NTL_RESTRICT x, double * NTL_RESTRICT y, double c, long n)
+{
-
++
+ __m256d xvec0, xvec1, xvec2, xvec3;
+ __m256d yvec0, yvec1, yvec2, yvec3;
+ __m256d cvec;
@@ -1751,18 +1563,19 @@
+ (double * NTL_RESTRICT x, double * NTL_RESTRICT y, double c, long n));
+
+#else
++
// this one is more general: does not assume that n is a
// multiple of 16
static inline
-@@ -1131,6 +2060,7 @@ void muladd_interval1(double * NTL_RESTR
-
+@@ -1422,6 +2175,7 @@ void muladd_interval1(double * NTL_RESTR
+ }
+ }
- #endif
+#endif
+ #endif
- //#define DO_MUL(a, b) ((unsigned long) (long(a)*long(b)))
-@@ -2716,10 +3646,10 @@ void alt_mul_LL(const mat_window_zz_p& X
+@@ -3009,10 +3763,10 @@ void alt_mul_LL(const mat_window_zz_p& X
}
@@ -1771,12 +1584,12 @@
-static
-void blk_mul_DD(const mat_window_zz_p& X,
-+static void __attribute__((target ("avx,pclmul")))
++static void __attribute__((target("avx,pclmul")))
+blk_mul_DD(const mat_window_zz_p& X,
const const_mat_window_zz_p& A, const const_mat_window_zz_p& B)
{
long n = A.NumRows();
-@@ -3058,12 +3988,13 @@ void mul_base (const mat_window_zz_p& X,
+@@ -3351,12 +4105,13 @@ void mul_base (const mat_window_zz_p& X,
long p = zz_p::modulus();
long V = MAT_BLK_SZ*4;
@@ -1792,7 +1605,7 @@
p-1 <= MAX_DBL_INT &&
V <= (MAX_DBL_INT-(p-1))/(p-1) &&
V*(p-1) <= (MAX_DBL_INT-(p-1))/(p-1))
-@@ -3158,7 +4089,8 @@ void mul_strassen(const mat_window_zz_p&
+@@ -3451,7 +4206,8 @@ void mul_strassen(const mat_window_zz_p&
// this code determines if mul_base triggers blk_mul_DD,
// in which case a higher crossover is used
@@ -1802,7 +1615,7 @@
{
long V = MAT_BLK_SZ*4;
long p = zz_p::modulus();
-@@ -3658,10 +4590,10 @@ void alt_inv_L(zz_p& d, mat_zz_p& X, con
+@@ -3950,10 +4706,10 @@ void alt_inv_L(zz_p& d, mat_zz_p& X, con
@@ -1811,12 +1624,12 @@
-static
-void alt_inv_DD(zz_p& d, mat_zz_p& X, const mat_zz_p& A, bool relax)
-+static void __attribute__((target ("avx,pclmul")))
++static void __attribute__((target("avx,pclmul")))
+alt_inv_DD(zz_p& d, mat_zz_p& X, const mat_zz_p& A, bool relax)
{
long n = A.NumRows();
-@@ -3827,10 +4759,10 @@ void alt_inv_DD(zz_p& d, mat_zz_p& X, co
+@@ -4118,10 +4874,10 @@ void alt_inv_DD(zz_p& d, mat_zz_p& X, co
@@ -1825,12 +1638,12 @@
-static
-void blk_inv_DD(zz_p& d, mat_zz_p& X, const mat_zz_p& A, bool relax)
-+static void __attribute__((target ("avx,pclmul")))
++static void __attribute__((target("avx,pclmul")))
+blk_inv_DD(zz_p& d, mat_zz_p& X, const mat_zz_p& A, bool relax)
{
long n = A.NumRows();
-@@ -4588,8 +5520,9 @@ void relaxed_inv(zz_p& d, mat_zz_p& X, c
+@@ -4879,8 +5635,9 @@ void relaxed_inv(zz_p& d, mat_zz_p& X, c
else if (n/MAT_BLK_SZ < 4) {
long V = 64;
@@ -1842,7 +1655,7 @@
V <= (MAX_DBL_INT-(p-1))/(p-1) &&
V*(p-1) <= (MAX_DBL_INT-(p-1))/(p-1)) {
-@@ -4614,8 +5547,9 @@ void relaxed_inv(zz_p& d, mat_zz_p& X, c
+@@ -4905,8 +5662,9 @@ void relaxed_inv(zz_p& d, mat_zz_p& X, c
else {
long V = 4*MAT_BLK_SZ;
@@ -1850,11 +1663,11 @@
- if (p-1 <= MAX_DBL_INT &&
+#if defined(NTL_HAVE_AVX) || defined(NTL_LOADTIME_CPU)
+ if (AVX_ACTIVE &&
-+ p-1 <= MAX_DBL_INT &&
++ p-1 <= MAX_DBL_INT &&
V <= (MAX_DBL_INT-(p-1))/(p-1) &&
V*(p-1) <= (MAX_DBL_INT-(p-1))/(p-1)) {
-@@ -5021,10 +5955,10 @@ void alt_tri_L(zz_p& d, const mat_zz_p&
+@@ -5312,10 +6070,10 @@ void alt_tri_L(zz_p& d, const mat_zz_p&
@@ -1863,12 +1676,12 @@
-static
-void alt_tri_DD(zz_p& d, const mat_zz_p& A, const vec_zz_p *bp,
-+static void __attribute__((target ("avx,pclmul")))
++static void __attribute__((target("avx,pclmul")))
+alt_tri_DD(zz_p& d, const mat_zz_p& A, const vec_zz_p *bp,
vec_zz_p *xp, bool trans, bool relax)
{
long n = A.NumRows();
-@@ -5211,10 +6145,10 @@ void alt_tri_DD(zz_p& d, const mat_zz_p&
+@@ -5502,10 +6260,10 @@ void alt_tri_DD(zz_p& d, const mat_zz_p&
@@ -1877,12 +1690,12 @@
-static
-void blk_tri_DD(zz_p& d, const mat_zz_p& A, const vec_zz_p *bp,
-+static void __attribute__((target ("avx,pclmul")))
++static void __attribute__((target("avx,pclmul")))
+blk_tri_DD(zz_p& d, const mat_zz_p& A, const vec_zz_p *bp,
vec_zz_p *xp, bool trans, bool relax)
{
long n = A.NumRows();
-@@ -6025,8 +6959,9 @@ void tri(zz_p& d, const mat_zz_p& A, con
+@@ -6316,8 +7074,9 @@ void tri(zz_p& d, const mat_zz_p& A, con
else if (n/MAT_BLK_SZ < 4) {
long V = 64;
@@ -1894,7 +1707,7 @@
V <= (MAX_DBL_INT-(p-1))/(p-1) &&
V*(p-1) <= (MAX_DBL_INT-(p-1))/(p-1)) {
-@@ -6051,8 +6986,9 @@ void tri(zz_p& d, const mat_zz_p& A, con
+@@ -6342,8 +7101,9 @@ void tri(zz_p& d, const mat_zz_p& A, con
else {
long V = 4*MAT_BLK_SZ;
@@ -1906,7 +1719,7 @@
V <= (MAX_DBL_INT-(p-1))/(p-1) &&
V*(p-1) <= (MAX_DBL_INT-(p-1))/(p-1)) {
-@@ -6298,7 +7234,7 @@ long elim_basic(const mat_zz_p& A, mat_z
+@@ -6589,7 +7349,7 @@ long elim_basic(const mat_zz_p& A, mat_z
#ifdef NTL_HAVE_LL_TYPE
@@ -1915,7 +1728,7 @@
static inline
-@@ -7751,8 +8687,9 @@ long elim(const mat_zz_p& A, mat_zz_p *i
+@@ -8042,8 +8802,9 @@ long elim(const mat_zz_p& A, mat_zz_p *i
else {
long V = 4*MAT_BLK_SZ;
@@ -1927,9 +1740,9 @@
V <= (MAX_DBL_INT-(p-1))/(p-1) &&
V*(p-1) <= (MAX_DBL_INT-(p-1))/(p-1)) {
---- src/QuickTest.cpp.orig 2018-07-15 06:11:56.000000000 -0600
-+++ src/QuickTest.cpp 2018-07-31 13:58:29.544184995 -0600
-@@ -316,6 +316,9 @@ cerr << "Performance Options:\n";
+--- src/QuickTest.cpp.orig 2018-08-17 12:24:27.000000000 -0600
++++ src/QuickTest.cpp 2018-10-08 16:15:39.290232803 -0600
+@@ -324,6 +324,9 @@ cerr << "Performance Options:\n";
cerr << "NTL_GF2X_NOINLINE\n";
#endif
@@ -1939,8 +1752,8 @@
cerr << "\n\n";
---- src/WizardAux.orig 2018-07-15 06:11:56.000000000 -0600
-+++ src/WizardAux 2018-07-31 13:58:29.544184995 -0600
+--- src/WizardAux.orig 2018-08-17 12:24:27.000000000 -0600
++++ src/WizardAux 2018-10-08 16:16:09.609702059 -0600
@@ -89,6 +89,7 @@ system("$ARGV[0] InitSettings");
'NTL_GF2X_NOINLINE' => 0,
'NTL_FFT_BIGTAB' => 0,
@@ -1949,8 +1762,8 @@
'WIZARD_HACK' => '#define NTL_WIZARD_HACK',
---- src/ZZ.cpp.orig 2018-07-15 06:11:56.000000000 -0600
-+++ src/ZZ.cpp 2018-07-31 13:58:29.545184982 -0600
+--- src/ZZ.cpp.orig 2018-08-17 12:24:26.000000000 -0600
++++ src/ZZ.cpp 2018-10-09 08:32:28.926303337 -0600
@@ -14,6 +14,13 @@
#elif defined(NTL_HAVE_SSSE3)
#include <emmintrin.h>
@@ -1965,9 +1778,9 @@
#endif
-@@ -2344,6 +2351,591 @@ struct RandomStream_impl {
- };
+@@ -2343,6 +2350,590 @@ struct RandomStream_impl {
+ };
+#elif defined(NTL_LOADTIME_CPU)
+
@@ -2098,7 +1911,7 @@
+#define AVX2_RANSTREAM_BUFSZ (AVX2_RANSTREAM_NCHUNKS*AVX2_RANSTREAM_CHUNKSZ)
+
+static void allocate_space(AlignedArray<unsigned char> &state_store,
-+ AlignedArray<unsigned char> &buf_store)
++ AlignedArray<unsigned char> &buf_store)
+{
+ if (have_avx2) {
+ state_store.SetLength(AVX2_RANSTREAM_STATESZ);
@@ -2208,18 +2021,18 @@
+ for (i = 0; i <= n-64; i += 64) {
+ salsa20_apply(state, wdata);
+ for (j = 0; j < 16; j++)
-+ FROMLE(res + i + 4*j, wdata[j]);
++ FROMLE(res + i + 4*j, wdata[j]);
+ }
+
+ if (i < n) {
+ salsa20_apply(state, wdata);
+
+ for (j = 0; j < 16; j++)
-+ FROMLE(buf + 4*j, wdata[j]);
++ FROMLE(buf + 4*j, wdata[j]);
+
+ pos = n-i;
+ for (j = 0; j < pos; j++)
-+ res[i+j] = buf[j];
++ res[i+j] = buf[j];
+ }
+
+ return pos;
@@ -2265,24 +2078,24 @@
+ chunk_count |= SSSE3_RANSTREAM_NCHUNKS; // disable small buffer strategy
+
+ for (long j = 0; j < SSSE3_RANSTREAM_NCHUNKS; j++) {
-+ ssse3_ivec_t v0=d0, v1=d1, v2=d2, v3=d3;
-+ ssse3_ivec_t v4=d0, v5=d1, v6=d2, v7=SSSE3_ADD_VEC_64(d3, SSSE3_DELTA);
-+
-+ for (long k = 0; k < CHACHA_RNDS/2; k++) {
-+ SSSE3_DQROUND_VECTORS_VEC(v0,v1,v2,v3)
-+ SSSE3_DQROUND_VECTORS_VEC(v4,v5,v6,v7)
-+ }
-+
-+ SSSE3_WRITEU_VEC(res+i+j*(8*SSSE3_SZ_VEC), 0, SSSE3_ADD_VEC_32(v0,d0),
SSSE3_ADD_VEC_32(v1,d1), SSSE3_ADD_VEC_32(v2,d2), SSSE3_ADD_VEC_32(v3,d3))
-+ d3 = SSSE3_ADD_VEC_64(d3, SSSE3_DELTA);
-+ SSSE3_WRITEU_VEC(res+i+j*(8*SSSE3_SZ_VEC), 4*SSSE3_SZ_VEC, SSSE3_ADD_VEC_32(v4,d0),
SSSE3_ADD_VEC_32(v5,d1), SSSE3_ADD_VEC_32(v6,d2), SSSE3_ADD_VEC_32(v7,d3))
-+ d3 = SSSE3_ADD_VEC_64(d3, SSSE3_DELTA);
++ ssse3_ivec_t v0=d0, v1=d1, v2=d2, v3=d3;
++ ssse3_ivec_t v4=d0, v5=d1, v6=d2, v7=SSSE3_ADD_VEC_64(d3, SSSE3_DELTA);
++
++ for (long k = 0; k < CHACHA_RNDS/2; k++) {
++ SSSE3_DQROUND_VECTORS_VEC(v0,v1,v2,v3)
++ SSSE3_DQROUND_VECTORS_VEC(v4,v5,v6,v7)
++ }
++
++ SSSE3_WRITEU_VEC(res+i+j*(8*SSSE3_SZ_VEC), 0, SSSE3_ADD_VEC_32(v0,d0),
SSSE3_ADD_VEC_32(v1,d1), SSSE3_ADD_VEC_32(v2,d2), SSSE3_ADD_VEC_32(v3,d3))
++ d3 = SSSE3_ADD_VEC_64(d3, SSSE3_DELTA);
++ SSSE3_WRITEU_VEC(res+i+j*(8*SSSE3_SZ_VEC), 4*SSSE3_SZ_VEC,
SSSE3_ADD_VEC_32(v4,d0), SSSE3_ADD_VEC_32(v5,d1), SSSE3_ADD_VEC_32(v6,d2),
SSSE3_ADD_VEC_32(v7,d3))
++ d3 = SSSE3_ADD_VEC_64(d3, SSSE3_DELTA);
+ }
+
+ }
+
+ if (i < n) {
-+
++
+ long nchunks;
+
+ if (chunk_count < SSSE3_RANSTREAM_NCHUNKS) {
@@ -2296,18 +2109,18 @@
+ buf += pos_offset;
+
+ for (long j = 0; j < nchunks; j++) {
-+ ssse3_ivec_t v0=d0, v1=d1, v2=d2, v3=d3;
-+ ssse3_ivec_t v4=d0, v5=d1, v6=d2, v7=SSSE3_ADD_VEC_64(d3, SSSE3_DELTA);
-+
-+ for (long k = 0; k < CHACHA_RNDS/2; k++) {
-+ SSSE3_DQROUND_VECTORS_VEC(v0,v1,v2,v3)
-+ SSSE3_DQROUND_VECTORS_VEC(v4,v5,v6,v7)
-+ }
-+
-+ SSSE3_WRITE_VEC(buf+j*(8*SSSE3_SZ_VEC), 0, SSSE3_ADD_VEC_32(v0,d0),
SSSE3_ADD_VEC_32(v1,d1), SSSE3_ADD_VEC_32(v2,d2), SSSE3_ADD_VEC_32(v3,d3))
-+ d3 = SSSE3_ADD_VEC_64(d3, SSSE3_DELTA);
-+ SSSE3_WRITE_VEC(buf+j*(8*SSSE3_SZ_VEC), 4*SSSE3_SZ_VEC, SSSE3_ADD_VEC_32(v4,d0),
SSSE3_ADD_VEC_32(v5,d1), SSSE3_ADD_VEC_32(v6,d2), SSSE3_ADD_VEC_32(v7,d3))
-+ d3 = SSSE3_ADD_VEC_64(d3, SSSE3_DELTA);
++ ssse3_ivec_t v0=d0, v1=d1, v2=d2, v3=d3;
++ ssse3_ivec_t v4=d0, v5=d1, v6=d2, v7=SSSE3_ADD_VEC_64(d3, SSSE3_DELTA);
++
++ for (long k = 0; k < CHACHA_RNDS/2; k++) {
++ SSSE3_DQROUND_VECTORS_VEC(v0,v1,v2,v3)
++ SSSE3_DQROUND_VECTORS_VEC(v4,v5,v6,v7)
++ }
++
++ SSSE3_WRITE_VEC(buf+j*(8*SSSE3_SZ_VEC), 0, SSSE3_ADD_VEC_32(v0,d0),
SSSE3_ADD_VEC_32(v1,d1), SSSE3_ADD_VEC_32(v2,d2), SSSE3_ADD_VEC_32(v3,d3))
++ d3 = SSSE3_ADD_VEC_64(d3, SSSE3_DELTA);
++ SSSE3_WRITE_VEC(buf+j*(8*SSSE3_SZ_VEC), 4*SSSE3_SZ_VEC,
SSSE3_ADD_VEC_32(v4,d0), SSSE3_ADD_VEC_32(v5,d1), SSSE3_ADD_VEC_32(v6,d2),
SSSE3_ADD_VEC_32(v7,d3))
++ d3 = SSSE3_ADD_VEC_64(d3, SSSE3_DELTA);
+ }
+
+ pos = n-i+pos_offset;
@@ -2359,18 +2172,18 @@
+ chunk_count |= AVX2_RANSTREAM_NCHUNKS; // disable small buffer strategy
+
+ for (long j = 0; j < AVX2_RANSTREAM_NCHUNKS; j++) {
-+ avx2_ivec_t v0=d0, v1=d1, v2=d2, v3=d3;
-+ avx2_ivec_t v4=d0, v5=d1, v6=d2, v7=AVX2_ADD_VEC_64(d3, AVX2_DELTA);
-+
-+ for (long k = 0; k < CHACHA_RNDS/2; k++) {
-+ AVX2_DQROUND_VECTORS_VEC(v0,v1,v2,v3)
-+ AVX2_DQROUND_VECTORS_VEC(v4,v5,v6,v7)
-+ }
-+
-+ AVX2_WRITEU_VEC(res+i+j*(8*AVX2_SZ_VEC), 0, AVX2_ADD_VEC_32(v0,d0),
AVX2_ADD_VEC_32(v1,d1), AVX2_ADD_VEC_32(v2,d2), AVX2_ADD_VEC_32(v3,d3))
-+ d3 = AVX2_ADD_VEC_64(d3, AVX2_DELTA);
-+ AVX2_WRITEU_VEC(res+i+j*(8*AVX2_SZ_VEC), 4*AVX2_SZ_VEC, AVX2_ADD_VEC_32(v4,d0),
AVX2_ADD_VEC_32(v5,d1), AVX2_ADD_VEC_32(v6,d2), AVX2_ADD_VEC_32(v7,d3))
-+ d3 = AVX2_ADD_VEC_64(d3, AVX2_DELTA);
++ avx2_ivec_t v0=d0, v1=d1, v2=d2, v3=d3;
++ avx2_ivec_t v4=d0, v5=d1, v6=d2, v7=AVX2_ADD_VEC_64(d3, AVX2_DELTA);
++
++ for (long k = 0; k < CHACHA_RNDS/2; k++) {
++ AVX2_DQROUND_VECTORS_VEC(v0,v1,v2,v3)
++ AVX2_DQROUND_VECTORS_VEC(v4,v5,v6,v7)
++ }
++
++ AVX2_WRITEU_VEC(res+i+j*(8*AVX2_SZ_VEC), 0, AVX2_ADD_VEC_32(v0,d0),
AVX2_ADD_VEC_32(v1,d1), AVX2_ADD_VEC_32(v2,d2), AVX2_ADD_VEC_32(v3,d3))
++ d3 = AVX2_ADD_VEC_64(d3, AVX2_DELTA);
++ AVX2_WRITEU_VEC(res+i+j*(8*AVX2_SZ_VEC), 4*AVX2_SZ_VEC, AVX2_ADD_VEC_32(v4,d0),
AVX2_ADD_VEC_32(v5,d1), AVX2_ADD_VEC_32(v6,d2), AVX2_ADD_VEC_32(v7,d3))
++ d3 = AVX2_ADD_VEC_64(d3, AVX2_DELTA);
+ }
+
+ }
@@ -2390,18 +2203,18 @@
+ buf += pos_offset;
+
+ for (long j = 0; j < nchunks; j++) {
-+ avx2_ivec_t v0=d0, v1=d1, v2=d2, v3=d3;
-+ avx2_ivec_t v4=d0, v5=d1, v6=d2, v7=AVX2_ADD_VEC_64(d3, AVX2_DELTA);
-+
-+ for (long k = 0; k < CHACHA_RNDS/2; k++) {
-+ AVX2_DQROUND_VECTORS_VEC(v0,v1,v2,v3)
-+ AVX2_DQROUND_VECTORS_VEC(v4,v5,v6,v7)
-+ }
-+
-+ AVX2_WRITE_VEC(buf+j*(8*AVX2_SZ_VEC), 0, AVX2_ADD_VEC_32(v0,d0),
AVX2_ADD_VEC_32(v1,d1), AVX2_ADD_VEC_32(v2,d2), AVX2_ADD_VEC_32(v3,d3))
-+ d3 = AVX2_ADD_VEC_64(d3, AVX2_DELTA);
-+ AVX2_WRITE_VEC(buf+j*(8*AVX2_SZ_VEC), 4*AVX2_SZ_VEC, AVX2_ADD_VEC_32(v4,d0),
AVX2_ADD_VEC_32(v5,d1), AVX2_ADD_VEC_32(v6,d2), AVX2_ADD_VEC_32(v7,d3))
-+ d3 = AVX2_ADD_VEC_64(d3, AVX2_DELTA);
++ avx2_ivec_t v0=d0, v1=d1, v2=d2, v3=d3;
++ avx2_ivec_t v4=d0, v5=d1, v6=d2, v7=AVX2_ADD_VEC_64(d3, AVX2_DELTA);
++
++ for (long k = 0; k < CHACHA_RNDS/2; k++) {
++ AVX2_DQROUND_VECTORS_VEC(v0,v1,v2,v3)
++ AVX2_DQROUND_VECTORS_VEC(v4,v5,v6,v7)
++ }
++
++ AVX2_WRITE_VEC(buf+j*(8*AVX2_SZ_VEC), 0, AVX2_ADD_VEC_32(v0,d0),
AVX2_ADD_VEC_32(v1,d1), AVX2_ADD_VEC_32(v2,d2), AVX2_ADD_VEC_32(v3,d3))
++ d3 = AVX2_ADD_VEC_64(d3, AVX2_DELTA);
++ AVX2_WRITE_VEC(buf+j*(8*AVX2_SZ_VEC), 4*AVX2_SZ_VEC, AVX2_ADD_VEC_32(v4,d0),
AVX2_ADD_VEC_32(v5,d1), AVX2_ADD_VEC_32(v6,d2), AVX2_ADD_VEC_32(v7,d3))
++ d3 = AVX2_ADD_VEC_64(d3, AVX2_DELTA);
+ }
+
+ pos = n-i+pos_offset;
@@ -2527,7 +2340,7 @@
+ get_buf_len() const
+ {
+ if (have_avx2) {
-+ return AVX2_RANSTREAM_BUFSZ;
++ return AVX2_RANSTREAM_BUFSZ;
+ } else if (have_ssse3) {
+ return SSSE3_RANSTREAM_BUFSZ;
+ } else {
@@ -2544,7 +2357,7 @@
+ long n, long pos)
+ {
+ return randomstream_get_bytes(state, buf, state_store, buf_store,
-+ chunk_count, res, n, pos);
++ chunk_count, res, n, pos);
+ }
+
+ void
@@ -2553,7 +2366,6 @@
+ randomstream_set_nonce(state, state_store, chunk_count, nonce);
+ }
+};
-+
+
#else
- struct RandomStream_impl {
diff --git a/ntl.spec b/ntl.spec
index 5d1c648..d33938f 100644
--- a/ntl.spec
+++ b/ntl.spec
@@ -10,7 +10,7 @@
Summary: High-performance algorithms for vectors, matrices, and polynomials
Name: ntl
-Version: 11.2.1
+Version: 11.3.0
Release: 1%{?dist}
License: LGPLv2+
@@ -64,8 +64,7 @@ Requires: %{name}-devel%{?_isa} = %{version}-%{release}
%prep
-%setup -q
-%patch0
+%autosetup -p0
%build
@@ -96,10 +95,7 @@ make -C src V=1
%check
-# skip on non-x86_64, takes a *long, long, long* (days?) time -- Rex
-%ifarch x86_64
make -C src check
-%endif
%install
@@ -138,7 +134,7 @@ done
%files
%doc README
%license doc/copying.txt
-%{_libdir}/libntl.so.38*
+%{_libdir}/libntl.so.39*
%files devel
%doc doc/*
@@ -152,6 +148,9 @@ done
%changelog
+* Fri Oct 5 2018 Jerry James <loganjerry(a)gmail.com> - 11.3.0-1
+- ntl-11.3.0
+
* Fri Aug 10 2018 Jerry James <loganjerry(a)gmail.com> - 11.2.1-1
- ntl-11.2.1
diff --git a/sources b/sources
index 3890d06..28c827b 100644
--- a/sources
+++ b/sources
@@ -1 +1 @@
-SHA512 (ntl-11.2.1.tar.gz) =
edb41309a03f5e151a8c66c5acca164b7d6346ab7b8fd9c8ae6aa05da4155c9e89026efa59b52492713774661edc39012c4d33d6315d2bb287e14c2292e5f0bb
+SHA512 (ntl-11.3.0.tar.gz) =
0c812e7355b7c01dcfc00093a009fa04de92e9c8c2b2431ce45ca3fafda52813fb62a35b4c8e950d63e0278a78b1bc4d9ebe806868978252e15a6dea293bd3f6