131
|
1 /* Copyright (C) 2013-2018 Free Software Foundation, Inc.
|
111
|
2
|
|
3 This file is part of GCC.
|
|
4
|
|
5 GCC is free software; you can redistribute it and/or modify
|
|
6 it under the terms of the GNU General Public License as published by
|
|
7 the Free Software Foundation; either version 3, or (at your option)
|
|
8 any later version.
|
|
9
|
|
10 GCC is distributed in the hope that it will be useful,
|
|
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
13 GNU General Public License for more details.
|
|
14
|
|
15 Under Section 7 of GPL version 3, you are granted additional
|
|
16 permissions described in the GCC Runtime Library Exception, version
|
|
17 3.1, as published by the Free Software Foundation.
|
|
18
|
|
19 You should have received a copy of the GNU General Public License and
|
|
20 a copy of the GCC Runtime Library Exception along with this program;
|
|
21 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
|
22 <http://www.gnu.org/licenses/>. */
|
|
23
|
|
24 #ifndef _IMMINTRIN_H_INCLUDED
|
|
25 #error "Never use <avx512vbmivlintrin.h> directly; include <immintrin.h> instead."
|
|
26 #endif
|
|
27
|
|
28 #ifndef _AVX512VBMIVLINTRIN_H_INCLUDED
|
|
29 #define _AVX512VBMIVLINTRIN_H_INCLUDED
|
|
30
|
|
31 #if !defined(__AVX512VL__) || !defined(__AVX512VBMI__)
|
|
32 #pragma GCC push_options
|
|
33 #pragma GCC target("avx512vbmi,avx512vl")
|
|
34 #define __DISABLE_AVX512VBMIVL__
|
|
35 #endif /* __AVX512VBMIVL__ */
|
|
36
|
|
37 extern __inline __m256i
|
|
38 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
|
|
39 _mm256_mask_multishift_epi64_epi8 (__m256i __W, __mmask32 __M, __m256i __X, __m256i __Y)
|
|
40 {
|
|
41 return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X,
|
|
42 (__v32qi) __Y,
|
|
43 (__v32qi) __W,
|
|
44 (__mmask32) __M);
|
|
45 }
|
|
46
|
|
47 extern __inline __m256i
|
|
48 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
|
|
49 _mm256_maskz_multishift_epi64_epi8 (__mmask32 __M, __m256i __X, __m256i __Y)
|
|
50 {
|
|
51 return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X,
|
|
52 (__v32qi) __Y,
|
|
53 (__v32qi)
|
|
54 _mm256_setzero_si256 (),
|
|
55 (__mmask32) __M);
|
|
56 }
|
|
57
|
|
58 extern __inline __m256i
|
|
59 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
|
|
60 _mm256_multishift_epi64_epi8 (__m256i __X, __m256i __Y)
|
|
61 {
|
|
62 return (__m256i) __builtin_ia32_vpmultishiftqb256_mask ((__v32qi) __X,
|
|
63 (__v32qi) __Y,
|
|
64 (__v32qi)
|
|
65 _mm256_undefined_si256 (),
|
|
66 (__mmask32) -1);
|
|
67 }
|
|
68
|
|
69 extern __inline __m128i
|
|
70 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
|
|
71 _mm_mask_multishift_epi64_epi8 (__m128i __W, __mmask16 __M, __m128i __X, __m128i __Y)
|
|
72 {
|
|
73 return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X,
|
|
74 (__v16qi) __Y,
|
|
75 (__v16qi) __W,
|
|
76 (__mmask16) __M);
|
|
77 }
|
|
78
|
|
79 extern __inline __m128i
|
|
80 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
|
|
81 _mm_maskz_multishift_epi64_epi8 (__mmask16 __M, __m128i __X, __m128i __Y)
|
|
82 {
|
|
83 return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X,
|
|
84 (__v16qi) __Y,
|
|
85 (__v16qi)
|
|
86 _mm_setzero_si128 (),
|
|
87 (__mmask16) __M);
|
|
88 }
|
|
89
|
|
90 extern __inline __m128i
|
|
91 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
|
|
92 _mm_multishift_epi64_epi8 (__m128i __X, __m128i __Y)
|
|
93 {
|
|
94 return (__m128i) __builtin_ia32_vpmultishiftqb128_mask ((__v16qi) __X,
|
|
95 (__v16qi) __Y,
|
|
96 (__v16qi)
|
|
97 _mm_undefined_si128 (),
|
|
98 (__mmask16) -1);
|
|
99 }
|
|
100
|
|
101 extern __inline __m256i
|
|
102 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
|
|
103 _mm256_permutexvar_epi8 (__m256i __A, __m256i __B)
|
|
104 {
|
|
105 return (__m256i) __builtin_ia32_permvarqi256_mask ((__v32qi) __B,
|
|
106 (__v32qi) __A,
|
|
107 (__v32qi)
|
|
108 _mm256_undefined_si256 (),
|
|
109 (__mmask32) -1);
|
|
110 }
|
|
111
|
|
112 extern __inline __m256i
|
|
113 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
|
|
114 _mm256_maskz_permutexvar_epi8 (__mmask32 __M, __m256i __A,
|
|
115 __m256i __B)
|
|
116 {
|
|
117 return (__m256i) __builtin_ia32_permvarqi256_mask ((__v32qi) __B,
|
|
118 (__v32qi) __A,
|
|
119 (__v32qi)
|
|
120 _mm256_setzero_si256 (),
|
|
121 (__mmask32) __M);
|
|
122 }
|
|
123
|
|
124 extern __inline __m256i
|
|
125 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
|
|
126 _mm256_mask_permutexvar_epi8 (__m256i __W, __mmask32 __M, __m256i __A,
|
|
127 __m256i __B)
|
|
128 {
|
|
129 return (__m256i) __builtin_ia32_permvarqi256_mask ((__v32qi) __B,
|
|
130 (__v32qi) __A,
|
|
131 (__v32qi) __W,
|
|
132 (__mmask32) __M);
|
|
133 }
|
|
134
|
|
135 extern __inline __m128i
|
|
136 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
|
|
137 _mm_permutexvar_epi8 (__m128i __A, __m128i __B)
|
|
138 {
|
|
139 return (__m128i) __builtin_ia32_permvarqi128_mask ((__v16qi) __B,
|
|
140 (__v16qi) __A,
|
|
141 (__v16qi)
|
|
142 _mm_undefined_si128 (),
|
|
143 (__mmask16) -1);
|
|
144 }
|
|
145
|
|
146 extern __inline __m128i
|
|
147 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
|
|
148 _mm_maskz_permutexvar_epi8 (__mmask16 __M, __m128i __A, __m128i __B)
|
|
149 {
|
|
150 return (__m128i) __builtin_ia32_permvarqi128_mask ((__v16qi) __B,
|
|
151 (__v16qi) __A,
|
|
152 (__v16qi)
|
|
153 _mm_setzero_si128 (),
|
|
154 (__mmask16) __M);
|
|
155 }
|
|
156
|
|
157 extern __inline __m128i
|
|
158 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
|
|
159 _mm_mask_permutexvar_epi8 (__m128i __W, __mmask16 __M, __m128i __A,
|
|
160 __m128i __B)
|
|
161 {
|
|
162 return (__m128i) __builtin_ia32_permvarqi128_mask ((__v16qi) __B,
|
|
163 (__v16qi) __A,
|
|
164 (__v16qi) __W,
|
|
165 (__mmask16) __M);
|
|
166 }
|
|
167
|
|
168 extern __inline __m256i
|
|
169 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
|
|
170 _mm256_permutex2var_epi8 (__m256i __A, __m256i __I, __m256i __B)
|
|
171 {
|
|
172 return (__m256i) __builtin_ia32_vpermt2varqi256_mask ((__v32qi) __I
|
|
173 /* idx */ ,
|
|
174 (__v32qi) __A,
|
|
175 (__v32qi) __B,
|
|
176 (__mmask32) -1);
|
|
177 }
|
|
178
|
|
179 extern __inline __m256i
|
|
180 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
|
|
181 _mm256_mask_permutex2var_epi8 (__m256i __A, __mmask32 __U,
|
|
182 __m256i __I, __m256i __B)
|
|
183 {
|
|
184 return (__m256i) __builtin_ia32_vpermt2varqi256_mask ((__v32qi) __I
|
|
185 /* idx */ ,
|
|
186 (__v32qi) __A,
|
|
187 (__v32qi) __B,
|
|
188 (__mmask32)
|
|
189 __U);
|
|
190 }
|
|
191
|
|
192 extern __inline __m256i
|
|
193 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
|
|
194 _mm256_mask2_permutex2var_epi8 (__m256i __A, __m256i __I,
|
|
195 __mmask32 __U, __m256i __B)
|
|
196 {
|
|
197 return (__m256i) __builtin_ia32_vpermi2varqi256_mask ((__v32qi) __A,
|
|
198 (__v32qi) __I
|
|
199 /* idx */ ,
|
|
200 (__v32qi) __B,
|
|
201 (__mmask32)
|
|
202 __U);
|
|
203 }
|
|
204
|
|
205 extern __inline __m256i
|
|
206 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
|
|
207 _mm256_maskz_permutex2var_epi8 (__mmask32 __U, __m256i __A,
|
|
208 __m256i __I, __m256i __B)
|
|
209 {
|
|
210 return (__m256i) __builtin_ia32_vpermt2varqi256_maskz ((__v32qi) __I
|
|
211 /* idx */ ,
|
|
212 (__v32qi) __A,
|
|
213 (__v32qi) __B,
|
|
214 (__mmask32)
|
|
215 __U);
|
|
216 }
|
|
217
|
|
218 extern __inline __m128i
|
|
219 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
|
|
220 _mm_permutex2var_epi8 (__m128i __A, __m128i __I, __m128i __B)
|
|
221 {
|
|
222 return (__m128i) __builtin_ia32_vpermt2varqi128_mask ((__v16qi) __I
|
|
223 /* idx */ ,
|
|
224 (__v16qi) __A,
|
|
225 (__v16qi) __B,
|
|
226 (__mmask16) -1);
|
|
227 }
|
|
228
|
|
229 extern __inline __m128i
|
|
230 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
|
|
231 _mm_mask_permutex2var_epi8 (__m128i __A, __mmask16 __U, __m128i __I,
|
|
232 __m128i __B)
|
|
233 {
|
|
234 return (__m128i) __builtin_ia32_vpermt2varqi128_mask ((__v16qi) __I
|
|
235 /* idx */ ,
|
|
236 (__v16qi) __A,
|
|
237 (__v16qi) __B,
|
|
238 (__mmask16)
|
|
239 __U);
|
|
240 }
|
|
241
|
|
242 extern __inline __m128i
|
|
243 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
|
|
244 _mm_mask2_permutex2var_epi8 (__m128i __A, __m128i __I, __mmask16 __U,
|
|
245 __m128i __B)
|
|
246 {
|
|
247 return (__m128i) __builtin_ia32_vpermi2varqi128_mask ((__v16qi) __A,
|
|
248 (__v16qi) __I
|
|
249 /* idx */ ,
|
|
250 (__v16qi) __B,
|
|
251 (__mmask16)
|
|
252 __U);
|
|
253 }
|
|
254
|
|
255 extern __inline __m128i
|
|
256 __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
|
|
257 _mm_maskz_permutex2var_epi8 (__mmask16 __U, __m128i __A, __m128i __I,
|
|
258 __m128i __B)
|
|
259 {
|
|
260 return (__m128i) __builtin_ia32_vpermt2varqi128_maskz ((__v16qi) __I
|
|
261 /* idx */ ,
|
|
262 (__v16qi) __A,
|
|
263 (__v16qi) __B,
|
|
264 (__mmask16)
|
|
265 __U);
|
|
266 }
|
|
267
|
|
268 #ifdef __DISABLE_AVX512VBMIVL__
|
|
269 #undef __DISABLE_AVX512VBMIVL__
|
|
270 #pragma GCC pop_options
|
|
271 #endif /* __DISABLE_AVX512VBMIVL__ */
|
|
272
|
|
273 #endif /* _AVX512VBMIVLINTRIN_H_INCLUDED */
|