NetBSD-Bugs archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

bin/59093: compile errors for userland on amd64 with gcc and -march=nativ



>Number:         59093
>Category:       bin
>Synopsis:       Subject: compile errors for userland on amd64 with gcc and -march=native
>Confidential:   no
>Severity:       non-critical
>Priority:       low
>Responsible:    bin-bug-people
>State:          open
>Class:          sw-bug
>Submitter-Id:   net
>Arrival-Date:   Sat Feb 22 11:30:01 +0000 2025
>Originator:     Onno van der Linden <o.vd.linden%quicknet.nl@localhost>
>Release:        NetBSD 10.99.12
>Organization:
>Environment:
System: NetBSD sheep 10.99.12 NetBSD 10.99.12 (SHEEP) #0: Sun Feb 16 23:42:11 CET 2025 onno@sheep:/usr/src/sys/arch/amd64/compile/SHEEP amd64
Architecture: x86_64
Machine: amd64
>Description:
compiling userland on amd64 with gcc and the -march=native compiler option
results in compile errors in libm for libm/ld80/e_lgammal_r.c and in zstd
for most of the files in lib.
>How-To-Repeat:
Add CPUFLAGS=-march=native in /etc/mk.conf on a amd64 machine and compile
libm and zstd
>Fix:
Fix for lgammal_r.c
--- /usr/src/lib/libm/ld80/e_lgammal_r.c.orig	2025-02-21 19:19:42.227612980 +0100
+++ /usr/src/lib/libm/ld80/e_lgammal_r.c	2025-02-21 19:28:51.125599005 +0100
@@ -242,7 +242,7 @@
 long double
 lgammal_r(long double x, int *signgamp)
 {
-	long double nadj,p,p1,p2,q,r,t,w,y,z;
+	long double nadj = 0, p,p1,p2,q,r,t,w,y,z;
 	uint64_t lx;
 	int i;
 	uint16_t hx,ix;

Fix for zstd:
--- /usr/src/external/bsd/zstd/dist/lib/common/bitstream.h	2025-02-22 11:30:31.613540814 +0100
+++ /usr/src/external/bsd/zstd/dist/lib/common/bitstream.h.new	2025-02-22 11:20:41.986663437 +0100
@@ -38,7 +38,10 @@
 =========================================*/
 #ifndef ZSTD_NO_INTRINSICS
 #  if (defined(__BMI__) || defined(__BMI2__)) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
 #    include <immintrin.h>   /* support for bextr (experimental)/bzhi */
+#pragma GCC diagnostic pop
 #  elif defined(__ICCARM__)
 #    include <intrinsics.h>
 #  endif

Alternative fix for zstd is to fix the gcc headers and make them look like
the llvm headers by adding const to several type casts. This doesn't work
for _mm256_stream_load_si256 because __builtin_ia32_movntdqa256 has
a non-const argument. Possible fixes in that case are #pragma, __UNCONST or
changing the argument of __builtin_ia32_movntdqa256 to a const. I opted for
the first one.

--- /usr/src/external/gpl3/gcc/dist/gcc/config/i386/avx2intrin.h	2023-07-30 10:11:55.590996415 +0200
+++ /usr/src/external/gpl3/gcc/dist/gcc/config/i386/avx2intrin.h.new	2025-02-22 11:07:03.868781092 +0100
@@ -919,7 +919,14 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_stream_load_si256 (__m256i const *__X)
 {
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
   return (__m256i) __builtin_ia32_movntdqa256 ((__v4di *) __X);
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
 }
 
 extern __inline __m128
--- /usr/src/external/gpl3/gcc/dist/gcc/config/i386/avx512bwintrin.h	2023-07-30 10:11:55.611241653 +0200
+++ /usr/src/external/gpl3/gcc/dist/gcc/config/i386/avx512bwintrin.h.new	2025-02-22 11:07:03.873160880 +0100
@@ -309,7 +309,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_loadu_epi16 (void const *__P)
 {
-  return (__m512i) (*(__v32hi_u *) __P);
+  return (__m512i) (*(const __v32hi_u *) __P);
 }
 
 extern __inline __m512i
@@ -402,7 +402,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_loadu_epi8 (void const *__P)
 {
-  return (__m512i) (*(__v64qi_u *) __P);
+  return (__m512i) (*(const __v64qi_u *) __P);
 }
 
 extern __inline __m512i
--- /usr/src/external/gpl3/gcc/dist/gcc/config/i386/avx512fintrin.h	2023-07-30 10:11:55.722942211 +0200
+++ /usr/src/external/gpl3/gcc/dist/gcc/config/i386/avx512fintrin.h.new	2025-02-22 11:07:03.878562205 +0100
@@ -387,7 +387,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_load_pd (void const *__P)
 {
-  return *(__m512d *) __P;
+  return *(const __m512d *) __P;
 }
 
 extern __inline __m512d
@@ -428,7 +428,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_load_ps (void const *__P)
 {
-  return *(__m512 *) __P;
+  return *(const __m512 *) __P;
 }
 
 extern __inline __m512
@@ -488,7 +488,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_load_epi64 (void const *__P)
 {
-  return *(__m512i *) __P;
+  return *(const __m512i *) __P;
 }
 
 extern __inline __m512i
@@ -548,14 +548,14 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_load_si512 (void const *__P)
 {
-  return *(__m512i *) __P;
+  return *(const __m512i *) __P;
 }
 
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_load_epi32 (void const *__P)
 {
-  return *(__m512i *) __P;
+  return *(const __m512i *) __P;
 }
 
 extern __inline __m512i
@@ -6298,7 +6298,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_loadu_pd (void const *__P)
 {
-  return *(__m512d_u *)__P;
+  return *(const __m512d_u *)__P;
 }
 
 extern __inline __m512d
@@ -6339,7 +6339,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_loadu_ps (void const *__P)
 {
-  return *(__m512_u *)__P;
+  return *(const __m512_u *)__P;
 }
 
 extern __inline __m512
@@ -6457,7 +6457,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_loadu_epi64 (void const *__P)
 {
-  return *(__m512i_u *) __P;
+  return *(const __m512i_u *) __P;
 }
 
 extern __inline __m512i
@@ -6498,14 +6498,14 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_loadu_si512 (void const *__P)
 {
-  return *(__m512i_u *)__P;
+  return *(const __m512i_u *)__P;
 }
 
 extern __inline __m512i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm512_loadu_epi32 (void const *__P)
 {
-  return *(__m512i_u *) __P;
+  return *(const __m512i_u *) __P;
 }
 
 extern __inline __m512i
--- /usr/src/external/gpl3/gcc/dist/gcc/config/i386/avx512vlbwintrin.h	2023-07-30 10:11:56.697259945 +0200
+++ /usr/src/external/gpl3/gcc/dist/gcc/config/i386/avx512vlbwintrin.h.new	2025-02-22 11:07:03.882921173 +0100
@@ -118,7 +118,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_loadu_epi16 (void const *__P)
 {
-  return (__m256i) (*(__v16hi_u *) __P);
+  return (__m256i) (*(const __v16hi_u *) __P);
 }
 
 extern __inline __m256i
@@ -144,7 +144,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_loadu_epi16 (void const *__P)
 {
-  return (__m128i) (*(__v8hi_u *) __P);
+  return (__m128i) (*(const __v8hi_u *) __P);
 }
 
 extern __inline __m128i
@@ -209,7 +209,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_loadu_epi8 (void const *__P)
 {
-  return (__m256i) (*(__v32qi_u *) __P);
+  return (__m256i) (*(const __v32qi_u *) __P);
 }
 
 extern __inline __m256i
@@ -235,7 +235,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_loadu_epi8 (void const *__P)
 {
-  return (__m128i) (*(__v16qi_u *) __P);
+  return (__m128i) (*(const __v16qi_u *) __P);
 }
 
 extern __inline __m128i
--- /usr/src/external/gpl3/gcc/dist/gcc/config/i386/avx512vlintrin.h	2023-07-30 10:11:56.809195380 +0200
+++ /usr/src/external/gpl3/gcc/dist/gcc/config/i386/avx512vlintrin.h.new	2025-02-22 11:07:03.888239581 +0100
@@ -87,7 +87,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_mask_load_pd (__m256d __W, __mmask8 __U, void const *__P)
 {
-  return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P,
+  return (__m256d) __builtin_ia32_loadapd256_mask ((const __v4df *) __P,
 						   (__v4df) __W,
 						   (__mmask8) __U);
 }
@@ -96,7 +96,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_maskz_load_pd (__mmask8 __U, void const *__P)
 {
-  return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P,
+  return (__m256d) __builtin_ia32_loadapd256_mask ((const __v4df *) __P,
 						   (__v4df)
 						   _mm256_setzero_pd (),
 						   (__mmask8) __U);
@@ -106,7 +106,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_mask_load_pd (__m128d __W, __mmask8 __U, void const *__P)
 {
-  return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P,
+  return (__m128d) __builtin_ia32_loadapd128_mask ((const __v2df *) __P,
 						   (__v2df) __W,
 						   (__mmask8) __U);
 }
@@ -115,7 +115,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_maskz_load_pd (__mmask8 __U, void const *__P)
 {
-  return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P,
+  return (__m128d) __builtin_ia32_loadapd128_mask ((const __v2df *) __P,
 						   (__v2df)
 						   _mm_setzero_pd (),
 						   (__mmask8) __U);
@@ -181,7 +181,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_mask_load_ps (__m256 __W, __mmask8 __U, void const *__P)
 {
-  return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P,
+  return (__m256) __builtin_ia32_loadaps256_mask ((const __v8sf *) __P,
 						  (__v8sf) __W,
 						  (__mmask8) __U);
 }
@@ -190,7 +190,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_maskz_load_ps (__mmask8 __U, void const *__P)
 {
-  return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P,
+  return (__m256) __builtin_ia32_loadaps256_mask ((const __v8sf *) __P,
 						  (__v8sf)
 						  _mm256_setzero_ps (),
 						  (__mmask8) __U);
@@ -200,7 +200,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_mask_load_ps (__m128 __W, __mmask8 __U, void const *__P)
 {
-  return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P,
+  return (__m128) __builtin_ia32_loadaps128_mask ((const __v4sf *) __P,
 						  (__v4sf) __W,
 						  (__mmask8) __U);
 }
@@ -209,7 +209,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_maskz_load_ps (__mmask8 __U, void const *__P)
 {
-  return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P,
+  return (__m128) __builtin_ia32_loadaps128_mask ((const __v4sf *) __P,
 						  (__v4sf)
 						  _mm_setzero_ps (),
 						  (__mmask8) __U);
@@ -275,14 +275,14 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_load_epi64 (void const *__P)
 {
-  return (__m256i) (*(__v4di *) __P);
+  return (__m256i) (*(const __v4di *) __P);
 }
 
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_mask_load_epi64 (__m256i __W, __mmask8 __U, void const *__P)
 {
-  return (__m256i) __builtin_ia32_movdqa64load256_mask ((__v4di *) __P,
+  return (__m256i) __builtin_ia32_movdqa64load256_mask ((const __v4di *) __P,
 							(__v4di) __W,
 							(__mmask8)
 							__U);
@@ -292,7 +292,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_maskz_load_epi64 (__mmask8 __U, void const *__P)
 {
-  return (__m256i) __builtin_ia32_movdqa64load256_mask ((__v4di *) __P,
+  return (__m256i) __builtin_ia32_movdqa64load256_mask ((const __v4di *) __P,
 							(__v4di)
 							_mm256_setzero_si256 (),
 							(__mmask8)
@@ -303,14 +303,14 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_load_epi64 (void const *__P)
 {
-  return (__m128i) (*(__v2di *) __P);
+  return (__m128i) (*(const __v2di *) __P);
 }
 
 extern __inline __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_mask_load_epi64 (__m128i __W, __mmask8 __U, void const *__P)
 {
-  return (__m128i) __builtin_ia32_movdqa64load128_mask ((__v2di *) __P,
+  return (__m128i) __builtin_ia32_movdqa64load128_mask ((const __v2di *) __P,
 							(__v2di) __W,
 							(__mmask8)
 							__U);
@@ -320,7 +320,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_maskz_load_epi64 (__mmask8 __U, void const *__P)
 {
-  return (__m128i) __builtin_ia32_movdqa64load128_mask ((__v2di *) __P,
+  return (__m128i) __builtin_ia32_movdqa64load128_mask ((const __v2di *) __P,
 							(__v2di)
 							_mm_setzero_si128 (),
 							(__mmask8)
@@ -387,14 +387,14 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_load_epi32 (void const *__P)
 {
-  return (__m256i) (*(__v8si *) __P);
+  return (__m256i) (*(const __v8si *) __P);
 }
 
 extern __inline __m256i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_mask_load_epi32 (__m256i __W, __mmask8 __U, void const *__P)
 {
-  return (__m256i) __builtin_ia32_movdqa32load256_mask ((__v8si *) __P,
+  return (__m256i) __builtin_ia32_movdqa32load256_mask ((const __v8si *) __P,
 							(__v8si) __W,
 							(__mmask8)
 							__U);
@@ -404,7 +404,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_maskz_load_epi32 (__mmask8 __U, void const *__P)
 {
-  return (__m256i) __builtin_ia32_movdqa32load256_mask ((__v8si *) __P,
+  return (__m256i) __builtin_ia32_movdqa32load256_mask ((const __v8si *) __P,
 							(__v8si)
 							_mm256_setzero_si256 (),
 							(__mmask8)
@@ -415,14 +415,14 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_load_epi32 (void const *__P)
 {
-  return (__m128i) (*(__v4si *) __P);
+  return (__m128i) (*(const __v4si *) __P);
 }
 
 extern __inline __m128i
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_mask_load_epi32 (__m128i __W, __mmask8 __U, void const *__P)
 {
-  return (__m128i) __builtin_ia32_movdqa32load128_mask ((__v4si *) __P,
+  return (__m128i) __builtin_ia32_movdqa32load128_mask ((const __v4si *) __P,
 							(__v4si) __W,
 							(__mmask8)
 							__U);
@@ -432,7 +432,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_maskz_load_epi32 (__mmask8 __U, void const *__P)
 {
-  return (__m128i) __builtin_ia32_movdqa32load128_mask ((__v4si *) __P,
+  return (__m128i) __builtin_ia32_movdqa32load128_mask ((const __v4si *) __P,
 							(__v4si)
 							_mm_setzero_si128 (),
 							(__mmask8)
@@ -771,7 +771,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_loadu_epi64 (void const *__P)
 {
-  return (__m256i) (*(__v4di_u *) __P);
+  return (__m256i) (*(const __v4di_u *) __P);
 }
 
 extern __inline __m256i
@@ -797,7 +797,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_loadu_epi64 (void const *__P)
 {
-  return (__m128i) (*(__v2di_u *) __P);
+  return (__m128i) (*(const __v2di_u *) __P);
 }
 
 extern __inline __m128i
@@ -855,7 +855,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_loadu_epi32 (void const *__P)
 {
-  return (__m256i) (*(__v8si_u *) __P);
+  return (__m256i) (*(const __v8si_u *) __P);
 }
 
 extern __inline __m256i
@@ -881,7 +881,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_loadu_epi32 (void const *__P)
 {
-  return (__m128i) (*(__v4si_u *) __P);
+  return (__m128i) (*(const __v4si_u *) __P);
 }
 
 extern __inline __m128i
@@ -6130,7 +6130,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_mask_expandloadu_pd (__m256d __W, __mmask8 __U, void const *__P)
 {
-  return (__m256d) __builtin_ia32_expandloaddf256_mask ((__v4df *) __P,
+  return (__m256d) __builtin_ia32_expandloaddf256_mask ((const __v4df *) __P,
 							(__v4df) __W,
 							(__mmask8)
 							__U);
@@ -6140,7 +6140,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_maskz_expandloadu_pd (__mmask8 __U, void const *__P)
 {
-  return (__m256d) __builtin_ia32_expandloaddf256_maskz ((__v4df *) __P,
+  return (__m256d) __builtin_ia32_expandloaddf256_maskz ((const __v4df *) __P,
 							 (__v4df)
 							 _mm256_setzero_pd (),
 							 (__mmask8)
@@ -6170,7 +6170,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_mask_expandloadu_pd (__m128d __W, __mmask8 __U, void const *__P)
 {
-  return (__m128d) __builtin_ia32_expandloaddf128_mask ((__v2df *) __P,
+  return (__m128d) __builtin_ia32_expandloaddf128_mask ((const __v2df *) __P,
 							(__v2df) __W,
 							(__mmask8)
 							__U);
@@ -6180,7 +6180,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_maskz_expandloadu_pd (__mmask8 __U, void const *__P)
 {
-  return (__m128d) __builtin_ia32_expandloaddf128_maskz ((__v2df *) __P,
+  return (__m128d) __builtin_ia32_expandloaddf128_maskz ((const __v2df *) __P,
 							 (__v2df)
 							 _mm_setzero_pd (),
 							 (__mmask8)
@@ -6210,7 +6210,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_mask_expandloadu_ps (__m256 __W, __mmask8 __U, void const *__P)
 {
-  return (__m256) __builtin_ia32_expandloadsf256_mask ((__v8sf *) __P,
+  return (__m256) __builtin_ia32_expandloadsf256_mask ((const __v8sf *) __P,
 						       (__v8sf) __W,
 						       (__mmask8) __U);
 }
@@ -6219,7 +6219,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_maskz_expandloadu_ps (__mmask8 __U, void const *__P)
 {
-  return (__m256) __builtin_ia32_expandloadsf256_maskz ((__v8sf *) __P,
+  return (__m256) __builtin_ia32_expandloadsf256_maskz ((const __v8sf *) __P,
 							(__v8sf)
 							_mm256_setzero_ps (),
 							(__mmask8)
@@ -6249,7 +6249,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_mask_expandloadu_ps (__m128 __W, __mmask8 __U, void const *__P)
 {
-  return (__m128) __builtin_ia32_expandloadsf128_mask ((__v4sf *) __P,
+  return (__m128) __builtin_ia32_expandloadsf128_mask ((const __v4sf *) __P,
 						       (__v4sf) __W,
 						       (__mmask8) __U);
 }
@@ -6258,7 +6258,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_maskz_expandloadu_ps (__mmask8 __U, void const *__P)
 {
-  return (__m128) __builtin_ia32_expandloadsf128_maskz ((__v4sf *) __P,
+  return (__m128) __builtin_ia32_expandloadsf128_maskz ((const __v4sf *) __P,
 							(__v4sf)
 							_mm_setzero_ps (),
 							(__mmask8)
@@ -6289,7 +6289,7 @@
 _mm256_mask_expandloadu_epi64 (__m256i __W, __mmask8 __U,
 			       void const *__P)
 {
-  return (__m256i) __builtin_ia32_expandloaddi256_mask ((__v4di *) __P,
+  return (__m256i) __builtin_ia32_expandloaddi256_mask ((const __v4di *) __P,
 							(__v4di) __W,
 							(__mmask8)
 							__U);
@@ -6299,7 +6299,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P)
 {
-  return (__m256i) __builtin_ia32_expandloaddi256_maskz ((__v4di *) __P,
+  return (__m256i) __builtin_ia32_expandloaddi256_maskz ((const __v4di *) __P,
 							 (__v4di)
 							 _mm256_setzero_si256 (),
 							 (__mmask8)
@@ -6329,7 +6329,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_mask_expandloadu_epi64 (__m128i __W, __mmask8 __U, void const *__P)
 {
-  return (__m128i) __builtin_ia32_expandloaddi128_mask ((__v2di *) __P,
+  return (__m128i) __builtin_ia32_expandloaddi128_mask ((const __v2di *) __P,
 							(__v2di) __W,
 							(__mmask8)
 							__U);
@@ -6339,7 +6339,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P)
 {
-  return (__m128i) __builtin_ia32_expandloaddi128_maskz ((__v2di *) __P,
+  return (__m128i) __builtin_ia32_expandloaddi128_maskz ((const __v2di *) __P,
 							 (__v2di)
 							 _mm_setzero_si128 (),
 							 (__mmask8)
@@ -6370,7 +6370,7 @@
 _mm256_mask_expandloadu_epi32 (__m256i __W, __mmask8 __U,
 			       void const *__P)
 {
-  return (__m256i) __builtin_ia32_expandloadsi256_mask ((__v8si *) __P,
+  return (__m256i) __builtin_ia32_expandloadsi256_mask ((const __v8si *) __P,
 							(__v8si) __W,
 							(__mmask8)
 							__U);
@@ -6380,7 +6380,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P)
 {
-  return (__m256i) __builtin_ia32_expandloadsi256_maskz ((__v8si *) __P,
+  return (__m256i) __builtin_ia32_expandloadsi256_maskz ((const __v8si *) __P,
 							 (__v8si)
 							 _mm256_setzero_si256 (),
 							 (__mmask8)
@@ -6410,7 +6410,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_mask_expandloadu_epi32 (__m128i __W, __mmask8 __U, void const *__P)
 {
-  return (__m128i) __builtin_ia32_expandloadsi128_mask ((__v4si *) __P,
+  return (__m128i) __builtin_ia32_expandloadsi128_mask ((const __v4si *) __P,
 							(__v4si) __W,
 							(__mmask8)
 							__U);
@@ -6420,7 +6420,7 @@
 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
 _mm_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P)
 {
-  return (__m128i) __builtin_ia32_expandloadsi128_maskz ((__v4si *) __P,
+  return (__m128i) __builtin_ia32_expandloadsi128_maskz ((const __v4si *) __P,
 							 (__v4si)
 							 _mm_setzero_si128 (),
 							 (__mmask8)
--- /usr/src/external/gpl3/gcc/dist/gcc/config/i386/avxintrin.h	2023-07-30 10:11:56.822854938 +0200
+++ /usr/src/external/gpl3/gcc/dist/gcc/config/i386/avxintrin.h.new	2025-02-22 11:07:03.892404760 +0100
@@ -866,7 +866,7 @@
 extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_load_pd (double const *__P)
 {
-  return *(__m256d *)__P;
+  return *(const __m256d *)__P;
 }
 
 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -878,7 +878,7 @@
 extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_load_ps (float const *__P)
 {
-  return *(__m256 *)__P;
+  return *(const __m256 *)__P;
 }
 
 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -890,7 +890,7 @@
 extern __inline __m256d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_loadu_pd (double const *__P)
 {
-  return *(__m256d_u *)__P;
+  return *(const __m256d_u *)__P;
 }
 
 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -902,7 +902,7 @@
 extern __inline __m256 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
 _mm256_loadu_ps (float const *__P)
 {
-  return *(__m256_u *)__P;
+  return *(const __m256_u *)__P;
 }
 
 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
--- /usr/src/external/gpl3/gcc/dist/gcc/config/i386/keylockerintrin.h	2023-07-30 07:21:03.000000000 +0200
+++ /usr/src/external/gpl3/gcc/dist/gcc/config/i386/keylockerintrin.h.new	2025-02-22 11:07:03.896372098 +0100
@@ -99,28 +99,28 @@
 unsigned char __attribute__((__gnu_inline__, __always_inline__, __artificial__))
 _mm_aesdecwide128kl_u8(__m128i __A[8], const __m128i __B[8], const void * __P)
 {
-  return __builtin_ia32_aesdecwide128kl_u8 ((__v2di *) __A, (__v2di *) __B, __P);
+  return __builtin_ia32_aesdecwide128kl_u8 ((__v2di *) __A, (const __v2di *) __B, __P);
 }
 
 extern __inline
 unsigned char __attribute__((__gnu_inline__, __always_inline__, __artificial__))
 _mm_aesdecwide256kl_u8(__m128i __A[8], const __m128i __B[8], const void * __P)
 {
-  return __builtin_ia32_aesdecwide256kl_u8 ((__v2di *) __A, (__v2di *) __B, __P);
+  return __builtin_ia32_aesdecwide256kl_u8 ((__v2di *) __A, (const __v2di *) __B, __P);
 }
 
 extern __inline
 unsigned char __attribute__((__gnu_inline__, __always_inline__, __artificial__))
 _mm_aesencwide128kl_u8(__m128i __A[8], const __m128i __B[8], const void * __P)
 {
-  return __builtin_ia32_aesencwide128kl_u8 ((__v2di *) __A, (__v2di *) __B, __P);
+  return __builtin_ia32_aesencwide128kl_u8 ((__v2di *) __A, (const __v2di *) __B, __P);
 }
 
 extern __inline
 unsigned char __attribute__((__gnu_inline__, __always_inline__, __artificial__))
 _mm_aesencwide256kl_u8(__m128i __A[8], const __m128i __B[8], const void * __P)
 {
-  return __builtin_ia32_aesencwide256kl_u8 ((__v2di *) __A, (__v2di *) __B, __P);
+  return __builtin_ia32_aesencwide256kl_u8 ((__v2di *) __A, (const __v2di *) __B, __P);
 }
 #ifdef __DISABLE_WIDEKL__
 #undef __DISABLE_WIDEKL__



Home | Main Index | Thread Index | Old Index