shuffle-avx2.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757
  1. /*********************************************************************
  2. Blosc - Blocked Shuffling and Compression Library
  3. Author: Francesc Alted <francesc@blosc.org>
  4. See LICENSES/BLOSC.txt for details about copyright and rights to use.
  5. **********************************************************************/
  6. #include "shuffle-generic.h"
  7. #include "shuffle-avx2.h"
  8. /* Make sure AVX2 is available for the compilation target and compiler. */
  9. #if !defined(__AVX2__)
  10. #error AVX2 is not supported by the target architecture/platform and/or this compiler.
  11. #endif
  12. #include <immintrin.h>
  13. /* The next is useful for debugging purposes */
  14. #if 0
  15. #include <stdio.h>
  16. #include <string.h>
  17. static void printymm(__m256i ymm0)
  18. {
  19. uint8_t buf[32];
  20. ((__m256i *)buf)[0] = ymm0;
  21. printf("%x,%x,%x,%x,%x,%x,%x,%x,%x,%x,%x,%x,%x,%x,%x,%x\n",
  22. buf[0], buf[1], buf[2], buf[3],
  23. buf[4], buf[5], buf[6], buf[7],
  24. buf[8], buf[9], buf[10], buf[11],
  25. buf[12], buf[13], buf[14], buf[15],
  26. buf[16], buf[17], buf[18], buf[19],
  27. buf[20], buf[21], buf[22], buf[23],
  28. buf[24], buf[25], buf[26], buf[27],
  29. buf[28], buf[29], buf[30], buf[31]);
  30. }
  31. #endif
  32. /* GCC doesn't include the split load/store intrinsics
  33. needed for the tiled shuffle, so define them here. */
  34. #if defined(__GNUC__) && !defined(__clang__) && !defined(__ICC)
  35. static inline __m256i
  36. __attribute__((__always_inline__))
  37. _mm256_loadu2_m128i(const __m128i* const hiaddr, const __m128i* const loaddr)
  38. {
  39. return _mm256_inserti128_si256(
  40. _mm256_castsi128_si256(_mm_loadu_si128(loaddr)), _mm_loadu_si128(hiaddr), 1);
  41. }
  42. static inline void
  43. __attribute__((__always_inline__))
  44. _mm256_storeu2_m128i(__m128i* const hiaddr, __m128i* const loaddr, const __m256i a)
  45. {
  46. _mm_storeu_si128(loaddr, _mm256_castsi256_si128(a));
  47. _mm_storeu_si128(hiaddr, _mm256_extracti128_si256(a, 1));
  48. }
  49. #endif /* defined(__GNUC__) */
  50. /* Routine optimized for shuffling a buffer for a type size of 2 bytes. */
  51. static void
  52. shuffle2_avx2(uint8_t* const dest, const uint8_t* const src,
  53. const size_t vectorizable_elements, const size_t total_elements)
  54. {
  55. static const size_t bytesoftype = 2;
  56. size_t j;
  57. int k;
  58. __m256i ymm0[2], ymm1[2];
  59. /* Create the shuffle mask.
  60. NOTE: The XMM/YMM 'set' intrinsics require the arguments to be ordered from
  61. most to least significant (i.e., their order is reversed when compared to
  62. loading the mask from an array). */
  63. const __m256i shmask = _mm256_set_epi8(
  64. 0x0f, 0x0d, 0x0b, 0x09, 0x07, 0x05, 0x03, 0x01,
  65. 0x0e, 0x0c, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x00,
  66. 0x0f, 0x0d, 0x0b, 0x09, 0x07, 0x05, 0x03, 0x01,
  67. 0x0e, 0x0c, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x00);
  68. for (j = 0; j < vectorizable_elements; j += sizeof(__m256i)) {
  69. /* Fetch 32 elements (64 bytes) then transpose bytes, words and double words. */
  70. for (k = 0; k < 2; k++) {
  71. ymm0[k] = _mm256_loadu_si256((__m256i*)(src + (j * bytesoftype) + (k * sizeof(__m256i))));
  72. ymm1[k] = _mm256_shuffle_epi8(ymm0[k], shmask);
  73. }
  74. ymm0[0] = _mm256_permute4x64_epi64(ymm1[0], 0xd8);
  75. ymm0[1] = _mm256_permute4x64_epi64(ymm1[1], 0x8d);
  76. ymm1[0] = _mm256_blend_epi32(ymm0[0], ymm0[1], 0xf0);
  77. ymm0[1] = _mm256_blend_epi32(ymm0[0], ymm0[1], 0x0f);
  78. ymm1[1] = _mm256_permute4x64_epi64(ymm0[1], 0x4e);
  79. /* Store the result vectors */
  80. uint8_t* const dest_for_jth_element = dest + j;
  81. for (k = 0; k < 2; k++) {
  82. _mm256_storeu_si256((__m256i*)(dest_for_jth_element + (k * total_elements)), ymm1[k]);
  83. }
  84. }
  85. }
  86. /* Routine optimized for shuffling a buffer for a type size of 4 bytes. */
  87. static void
  88. shuffle4_avx2(uint8_t* const dest, const uint8_t* const src,
  89. const size_t vectorizable_elements, const size_t total_elements)
  90. {
  91. static const size_t bytesoftype = 4;
  92. size_t i;
  93. int j;
  94. __m256i ymm0[4], ymm1[4];
  95. /* Create the shuffle mask.
  96. NOTE: The XMM/YMM 'set' intrinsics require the arguments to be ordered from
  97. most to least significant (i.e., their order is reversed when compared to
  98. loading the mask from an array). */
  99. const __m256i mask = _mm256_set_epi32(
  100. 0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00);
  101. for (i = 0; i < vectorizable_elements; i += sizeof(__m256i)) {
  102. /* Fetch 32 elements (128 bytes) then transpose bytes and words. */
  103. for (j = 0; j < 4; j++) {
  104. ymm0[j] = _mm256_loadu_si256((__m256i*)(src + (i * bytesoftype) + (j * sizeof(__m256i))));
  105. ymm1[j] = _mm256_shuffle_epi32(ymm0[j], 0xd8);
  106. ymm0[j] = _mm256_shuffle_epi32(ymm0[j], 0x8d);
  107. ymm0[j] = _mm256_unpacklo_epi8(ymm1[j], ymm0[j]);
  108. ymm1[j] = _mm256_shuffle_epi32(ymm0[j], 0x04e);
  109. ymm0[j] = _mm256_unpacklo_epi16(ymm0[j], ymm1[j]);
  110. }
  111. /* Transpose double words */
  112. for (j = 0; j < 2; j++) {
  113. ymm1[j*2] = _mm256_unpacklo_epi32(ymm0[j*2], ymm0[j*2+1]);
  114. ymm1[j*2+1] = _mm256_unpackhi_epi32(ymm0[j*2], ymm0[j*2+1]);
  115. }
  116. /* Transpose quad words */
  117. for (j = 0; j < 2; j++) {
  118. ymm0[j*2] = _mm256_unpacklo_epi64(ymm1[j], ymm1[j+2]);
  119. ymm0[j*2+1] = _mm256_unpackhi_epi64(ymm1[j], ymm1[j+2]);
  120. }
  121. for (j = 0; j < 4; j++) {
  122. ymm0[j] = _mm256_permutevar8x32_epi32(ymm0[j], mask);
  123. }
  124. /* Store the result vectors */
  125. uint8_t* const dest_for_ith_element = dest + i;
  126. for (j = 0; j < 4; j++) {
  127. _mm256_storeu_si256((__m256i*)(dest_for_ith_element + (j * total_elements)), ymm0[j]);
  128. }
  129. }
  130. }
  131. /* Routine optimized for shuffling a buffer for a type size of 8 bytes. */
  132. static void
  133. shuffle8_avx2(uint8_t* const dest, const uint8_t* const src,
  134. const size_t vectorizable_elements, const size_t total_elements)
  135. {
  136. static const size_t bytesoftype = 8;
  137. size_t j;
  138. int k, l;
  139. __m256i ymm0[8], ymm1[8];
  140. for (j = 0; j < vectorizable_elements; j += sizeof(__m256i)) {
  141. /* Fetch 32 elements (256 bytes) then transpose bytes. */
  142. for (k = 0; k < 8; k++) {
  143. ymm0[k] = _mm256_loadu_si256((__m256i*)(src + (j * bytesoftype) + (k * sizeof(__m256i))));
  144. ymm1[k] = _mm256_shuffle_epi32(ymm0[k], 0x4e);
  145. ymm1[k] = _mm256_unpacklo_epi8(ymm0[k], ymm1[k]);
  146. }
  147. /* Transpose words */
  148. for (k = 0, l = 0; k < 4; k++, l +=2) {
  149. ymm0[k*2] = _mm256_unpacklo_epi16(ymm1[l], ymm1[l+1]);
  150. ymm0[k*2+1] = _mm256_unpackhi_epi16(ymm1[l], ymm1[l+1]);
  151. }
  152. /* Transpose double words */
  153. for (k = 0, l = 0; k < 4; k++, l++) {
  154. if (k == 2) l += 2;
  155. ymm1[k*2] = _mm256_unpacklo_epi32(ymm0[l], ymm0[l+2]);
  156. ymm1[k*2+1] = _mm256_unpackhi_epi32(ymm0[l], ymm0[l+2]);
  157. }
  158. /* Transpose quad words */
  159. for (k = 0; k < 4; k++) {
  160. ymm0[k*2] = _mm256_unpacklo_epi64(ymm1[k], ymm1[k+4]);
  161. ymm0[k*2+1] = _mm256_unpackhi_epi64(ymm1[k], ymm1[k+4]);
  162. }
  163. for(k = 0; k < 8; k++) {
  164. ymm1[k] = _mm256_permute4x64_epi64(ymm0[k], 0x72);
  165. ymm0[k] = _mm256_permute4x64_epi64(ymm0[k], 0xD8);
  166. ymm0[k] = _mm256_unpacklo_epi16(ymm0[k], ymm1[k]);
  167. }
  168. /* Store the result vectors */
  169. uint8_t* const dest_for_jth_element = dest + j;
  170. for (k = 0; k < 8; k++) {
  171. _mm256_storeu_si256((__m256i*)(dest_for_jth_element + (k * total_elements)), ymm0[k]);
  172. }
  173. }
  174. }
  175. /* Routine optimized for shuffling a buffer for a type size of 16 bytes. */
  176. static void
  177. shuffle16_avx2(uint8_t* const dest, const uint8_t* const src,
  178. const size_t vectorizable_elements, const size_t total_elements)
  179. {
  180. static const size_t bytesoftype = 16;
  181. size_t j;
  182. int k, l;
  183. __m256i ymm0[16], ymm1[16];
  184. /* Create the shuffle mask.
  185. NOTE: The XMM/YMM 'set' intrinsics require the arguments to be ordered from
  186. most to least significant (i.e., their order is reversed when compared to
  187. loading the mask from an array). */
  188. const __m256i shmask = _mm256_set_epi8(
  189. 0x0f, 0x07, 0x0e, 0x06, 0x0d, 0x05, 0x0c, 0x04,
  190. 0x0b, 0x03, 0x0a, 0x02, 0x09, 0x01, 0x08, 0x00,
  191. 0x0f, 0x07, 0x0e, 0x06, 0x0d, 0x05, 0x0c, 0x04,
  192. 0x0b, 0x03, 0x0a, 0x02, 0x09, 0x01, 0x08, 0x00);
  193. for (j = 0; j < vectorizable_elements; j += sizeof(__m256i)) {
  194. /* Fetch 32 elements (512 bytes) into 16 YMM registers. */
  195. for (k = 0; k < 16; k++) {
  196. ymm0[k] = _mm256_loadu_si256((__m256i*)(src + (j * bytesoftype) + (k * sizeof(__m256i))));
  197. }
  198. /* Transpose bytes */
  199. for (k = 0, l = 0; k < 8; k++, l +=2) {
  200. ymm1[k*2] = _mm256_unpacklo_epi8(ymm0[l], ymm0[l+1]);
  201. ymm1[k*2+1] = _mm256_unpackhi_epi8(ymm0[l], ymm0[l+1]);
  202. }
  203. /* Transpose words */
  204. for (k = 0, l = -2; k < 8; k++, l++) {
  205. if ((k%2) == 0) l += 2;
  206. ymm0[k*2] = _mm256_unpacklo_epi16(ymm1[l], ymm1[l+2]);
  207. ymm0[k*2+1] = _mm256_unpackhi_epi16(ymm1[l], ymm1[l+2]);
  208. }
  209. /* Transpose double words */
  210. for (k = 0, l = -4; k < 8; k++, l++) {
  211. if ((k%4) == 0) l += 4;
  212. ymm1[k*2] = _mm256_unpacklo_epi32(ymm0[l], ymm0[l+4]);
  213. ymm1[k*2+1] = _mm256_unpackhi_epi32(ymm0[l], ymm0[l+4]);
  214. }
  215. /* Transpose quad words */
  216. for (k = 0; k < 8; k++) {
  217. ymm0[k*2] = _mm256_unpacklo_epi64(ymm1[k], ymm1[k+8]);
  218. ymm0[k*2+1] = _mm256_unpackhi_epi64(ymm1[k], ymm1[k+8]);
  219. }
  220. for (k = 0; k < 16; k++) {
  221. ymm0[k] = _mm256_permute4x64_epi64(ymm0[k], 0xd8);
  222. ymm0[k] = _mm256_shuffle_epi8(ymm0[k], shmask);
  223. }
  224. /* Store the result vectors */
  225. uint8_t* const dest_for_jth_element = dest + j;
  226. for (k = 0; k < 16; k++) {
  227. _mm256_storeu_si256((__m256i*)(dest_for_jth_element + (k * total_elements)), ymm0[k]);
  228. }
  229. }
  230. }
  231. /* Routine optimized for shuffling a buffer for a type size larger than 16 bytes. */
  232. static void
  233. shuffle16_tiled_avx2(uint8_t* const dest, const uint8_t* const src,
  234. const size_t vectorizable_elements, const size_t total_elements, const size_t bytesoftype)
  235. {
  236. size_t j;
  237. int k, l;
  238. __m256i ymm0[16], ymm1[16];
  239. const lldiv_t vecs_per_el = lldiv(bytesoftype, sizeof(__m128i));
  240. /* Create the shuffle mask.
  241. NOTE: The XMM/YMM 'set' intrinsics require the arguments to be ordered from
  242. most to least significant (i.e., their order is reversed when compared to
  243. loading the mask from an array). */
  244. const __m256i shmask = _mm256_set_epi8(
  245. 0x0f, 0x07, 0x0e, 0x06, 0x0d, 0x05, 0x0c, 0x04,
  246. 0x0b, 0x03, 0x0a, 0x02, 0x09, 0x01, 0x08, 0x00,
  247. 0x0f, 0x07, 0x0e, 0x06, 0x0d, 0x05, 0x0c, 0x04,
  248. 0x0b, 0x03, 0x0a, 0x02, 0x09, 0x01, 0x08, 0x00);
  249. for (j = 0; j < vectorizable_elements; j += sizeof(__m256i)) {
  250. /* Advance the offset into the type by the vector size (in bytes), unless this is
  251. the initial iteration and the type size is not a multiple of the vector size.
  252. In that case, only advance by the number of bytes necessary so that the number
  253. of remaining bytes in the type will be a multiple of the vector size. */
  254. size_t offset_into_type;
  255. for (offset_into_type = 0; offset_into_type < bytesoftype;
  256. offset_into_type += (offset_into_type == 0 && vecs_per_el.rem > 0 ? vecs_per_el.rem : sizeof(__m128i))) {
  257. /* Fetch elements in groups of 512 bytes */
  258. const uint8_t* const src_with_offset = src + offset_into_type;
  259. for (k = 0; k < 16; k++) {
  260. ymm0[k] = _mm256_loadu2_m128i(
  261. (__m128i*)(src_with_offset + (j + (2 * k) + 1) * bytesoftype),
  262. (__m128i*)(src_with_offset + (j + (2 * k)) * bytesoftype));
  263. }
  264. /* Transpose bytes */
  265. for (k = 0, l = 0; k < 8; k++, l +=2) {
  266. ymm1[k*2] = _mm256_unpacklo_epi8(ymm0[l], ymm0[l+1]);
  267. ymm1[k*2+1] = _mm256_unpackhi_epi8(ymm0[l], ymm0[l+1]);
  268. }
  269. /* Transpose words */
  270. for (k = 0, l = -2; k < 8; k++, l++) {
  271. if ((k%2) == 0) l += 2;
  272. ymm0[k*2] = _mm256_unpacklo_epi16(ymm1[l], ymm1[l+2]);
  273. ymm0[k*2+1] = _mm256_unpackhi_epi16(ymm1[l], ymm1[l+2]);
  274. }
  275. /* Transpose double words */
  276. for (k = 0, l = -4; k < 8; k++, l++) {
  277. if ((k%4) == 0) l += 4;
  278. ymm1[k*2] = _mm256_unpacklo_epi32(ymm0[l], ymm0[l+4]);
  279. ymm1[k*2+1] = _mm256_unpackhi_epi32(ymm0[l], ymm0[l+4]);
  280. }
  281. /* Transpose quad words */
  282. for (k = 0; k < 8; k++) {
  283. ymm0[k*2] = _mm256_unpacklo_epi64(ymm1[k], ymm1[k+8]);
  284. ymm0[k*2+1] = _mm256_unpackhi_epi64(ymm1[k], ymm1[k+8]);
  285. }
  286. for (k = 0; k < 16; k++) {
  287. ymm0[k] = _mm256_permute4x64_epi64(ymm0[k], 0xd8);
  288. ymm0[k] = _mm256_shuffle_epi8(ymm0[k], shmask);
  289. }
  290. /* Store the result vectors */
  291. uint8_t* const dest_for_jth_element = dest + j;
  292. for (k = 0; k < 16; k++) {
  293. _mm256_storeu_si256((__m256i*)(dest_for_jth_element + (total_elements * (offset_into_type + k))), ymm0[k]);
  294. }
  295. }
  296. }
  297. }
  298. /* Routine optimized for unshuffling a buffer for a type size of 2 bytes. */
  299. static void
  300. unshuffle2_avx2(uint8_t* const dest, const uint8_t* const src,
  301. const size_t vectorizable_elements, const size_t total_elements)
  302. {
  303. static const size_t bytesoftype = 2;
  304. size_t i;
  305. int j;
  306. __m256i ymm0[2], ymm1[2];
  307. for (i = 0; i < vectorizable_elements; i += sizeof(__m256i)) {
  308. /* Load 32 elements (64 bytes) into 2 YMM registers. */
  309. const uint8_t* const src_for_ith_element = src + i;
  310. for (j = 0; j < 2; j++) {
  311. ymm0[j] = _mm256_loadu_si256((__m256i*)(src_for_ith_element + (j * total_elements)));
  312. }
  313. /* Shuffle bytes */
  314. for (j = 0; j < 2; j++) {
  315. ymm0[j] = _mm256_permute4x64_epi64(ymm0[j], 0xd8);
  316. }
  317. /* Compute the low 64 bytes */
  318. ymm1[0] = _mm256_unpacklo_epi8(ymm0[0], ymm0[1]);
  319. /* Compute the hi 64 bytes */
  320. ymm1[1] = _mm256_unpackhi_epi8(ymm0[0], ymm0[1]);
  321. /* Store the result vectors in proper order */
  322. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (0 * sizeof(__m256i))), ymm1[0]);
  323. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (1 * sizeof(__m256i))), ymm1[1]);
  324. }
  325. }
  326. /* Routine optimized for unshuffling a buffer for a type size of 4 bytes. */
  327. static void
  328. unshuffle4_avx2(uint8_t* const dest, const uint8_t* const src,
  329. const size_t vectorizable_elements, const size_t total_elements)
  330. {
  331. static const size_t bytesoftype = 4;
  332. size_t i;
  333. int j;
  334. __m256i ymm0[4], ymm1[4];
  335. for (i = 0; i < vectorizable_elements; i += sizeof(__m256i)) {
  336. /* Load 32 elements (128 bytes) into 4 YMM registers. */
  337. const uint8_t* const src_for_ith_element = src + i;
  338. for (j = 0; j < 4; j++) {
  339. ymm0[j] = _mm256_loadu_si256((__m256i*)(src_for_ith_element + (j * total_elements)));
  340. }
  341. /* Shuffle bytes */
  342. for (j = 0; j < 2; j++) {
  343. /* Compute the low 64 bytes */
  344. ymm1[j] = _mm256_unpacklo_epi8(ymm0[j*2], ymm0[j*2+1]);
  345. /* Compute the hi 64 bytes */
  346. ymm1[2+j] = _mm256_unpackhi_epi8(ymm0[j*2], ymm0[j*2+1]);
  347. }
  348. /* Shuffle 2-byte words */
  349. for (j = 0; j < 2; j++) {
  350. /* Compute the low 64 bytes */
  351. ymm0[j] = _mm256_unpacklo_epi16(ymm1[j*2], ymm1[j*2+1]);
  352. /* Compute the hi 64 bytes */
  353. ymm0[2+j] = _mm256_unpackhi_epi16(ymm1[j*2], ymm1[j*2+1]);
  354. }
  355. ymm1[0] = _mm256_permute2x128_si256(ymm0[0], ymm0[2], 0x20);
  356. ymm1[1] = _mm256_permute2x128_si256(ymm0[1], ymm0[3], 0x20);
  357. ymm1[2] = _mm256_permute2x128_si256(ymm0[0], ymm0[2], 0x31);
  358. ymm1[3] = _mm256_permute2x128_si256(ymm0[1], ymm0[3], 0x31);
  359. /* Store the result vectors in proper order */
  360. for (j = 0; j < 4; j++) {
  361. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (j * sizeof(__m256i))), ymm1[j]);
  362. }
  363. }
  364. }
  365. /* Routine optimized for unshuffling a buffer for a type size of 8 bytes. */
  366. static void
  367. unshuffle8_avx2(uint8_t* const dest, const uint8_t* const src,
  368. const size_t vectorizable_elements, const size_t total_elements)
  369. {
  370. static const size_t bytesoftype = 8;
  371. size_t i;
  372. int j;
  373. __m256i ymm0[8], ymm1[8];
  374. for (i = 0; i < vectorizable_elements; i += sizeof(__m256i)) {
  375. /* Fetch 32 elements (256 bytes) into 8 YMM registers. */
  376. const uint8_t* const src_for_ith_element = src + i;
  377. for (j = 0; j < 8; j++) {
  378. ymm0[j] = _mm256_loadu_si256((__m256i*)(src_for_ith_element + (j * total_elements)));
  379. }
  380. /* Shuffle bytes */
  381. for (j = 0; j < 4; j++) {
  382. /* Compute the low 32 bytes */
  383. ymm1[j] = _mm256_unpacklo_epi8(ymm0[j*2], ymm0[j*2+1]);
  384. /* Compute the hi 32 bytes */
  385. ymm1[4+j] = _mm256_unpackhi_epi8(ymm0[j*2], ymm0[j*2+1]);
  386. }
  387. /* Shuffle words */
  388. for (j = 0; j < 4; j++) {
  389. /* Compute the low 32 bytes */
  390. ymm0[j] = _mm256_unpacklo_epi16(ymm1[j*2], ymm1[j*2+1]);
  391. /* Compute the hi 32 bytes */
  392. ymm0[4+j] = _mm256_unpackhi_epi16(ymm1[j*2], ymm1[j*2+1]);
  393. }
  394. for (j = 0; j < 8; j++) {
  395. ymm0[j] = _mm256_permute4x64_epi64(ymm0[j], 0xd8);
  396. }
  397. /* Shuffle 4-byte dwords */
  398. for (j = 0; j < 4; j++) {
  399. /* Compute the low 32 bytes */
  400. ymm1[j] = _mm256_unpacklo_epi32(ymm0[j*2], ymm0[j*2+1]);
  401. /* Compute the hi 32 bytes */
  402. ymm1[4+j] = _mm256_unpackhi_epi32(ymm0[j*2], ymm0[j*2+1]);
  403. }
  404. /* Store the result vectors in proper order */
  405. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (0 * sizeof(__m256i))), ymm1[0]);
  406. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (1 * sizeof(__m256i))), ymm1[2]);
  407. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (2 * sizeof(__m256i))), ymm1[1]);
  408. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (3 * sizeof(__m256i))), ymm1[3]);
  409. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (4 * sizeof(__m256i))), ymm1[4]);
  410. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (5 * sizeof(__m256i))), ymm1[6]);
  411. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (6 * sizeof(__m256i))), ymm1[5]);
  412. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (7 * sizeof(__m256i))), ymm1[7]);
  413. }
  414. }
  415. /* Routine optimized for unshuffling a buffer for a type size of 16 bytes. */
  416. static void
  417. unshuffle16_avx2(uint8_t* const dest, const uint8_t* const src,
  418. const size_t vectorizable_elements, const size_t total_elements)
  419. {
  420. static const size_t bytesoftype = 16;
  421. size_t i;
  422. int j;
  423. __m256i ymm0[16], ymm1[16];
  424. for (i = 0; i < vectorizable_elements; i += sizeof(__m256i)) {
  425. /* Fetch 32 elements (512 bytes) into 16 YMM registers. */
  426. const uint8_t* const src_for_ith_element = src + i;
  427. for (j = 0; j < 16; j++) {
  428. ymm0[j] = _mm256_loadu_si256((__m256i*)(src_for_ith_element + (j * total_elements)));
  429. }
  430. /* Shuffle bytes */
  431. for (j = 0; j < 8; j++) {
  432. /* Compute the low 32 bytes */
  433. ymm1[j] = _mm256_unpacklo_epi8(ymm0[j*2], ymm0[j*2+1]);
  434. /* Compute the hi 32 bytes */
  435. ymm1[8+j] = _mm256_unpackhi_epi8(ymm0[j*2], ymm0[j*2+1]);
  436. }
  437. /* Shuffle 2-byte words */
  438. for (j = 0; j < 8; j++) {
  439. /* Compute the low 32 bytes */
  440. ymm0[j] = _mm256_unpacklo_epi16(ymm1[j*2], ymm1[j*2+1]);
  441. /* Compute the hi 32 bytes */
  442. ymm0[8+j] = _mm256_unpackhi_epi16(ymm1[j*2], ymm1[j*2+1]);
  443. }
  444. /* Shuffle 4-byte dwords */
  445. for (j = 0; j < 8; j++) {
  446. /* Compute the low 32 bytes */
  447. ymm1[j] = _mm256_unpacklo_epi32(ymm0[j*2], ymm0[j*2+1]);
  448. /* Compute the hi 32 bytes */
  449. ymm1[8+j] = _mm256_unpackhi_epi32(ymm0[j*2], ymm0[j*2+1]);
  450. }
  451. /* Shuffle 8-byte qwords */
  452. for (j = 0; j < 8; j++) {
  453. /* Compute the low 32 bytes */
  454. ymm0[j] = _mm256_unpacklo_epi64(ymm1[j*2], ymm1[j*2+1]);
  455. /* Compute the hi 32 bytes */
  456. ymm0[8+j] = _mm256_unpackhi_epi64(ymm1[j*2], ymm1[j*2+1]);
  457. }
  458. for (j = 0; j < 8; j++) {
  459. ymm1[j] = _mm256_permute2x128_si256(ymm0[j], ymm0[j+8], 0x20);
  460. ymm1[j+8] = _mm256_permute2x128_si256(ymm0[j], ymm0[j+8], 0x31);
  461. }
  462. /* Store the result vectors in proper order */
  463. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (0 * sizeof(__m256i))), ymm1[0]);
  464. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (1 * sizeof(__m256i))), ymm1[4]);
  465. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (2 * sizeof(__m256i))), ymm1[2]);
  466. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (3 * sizeof(__m256i))), ymm1[6]);
  467. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (4 * sizeof(__m256i))), ymm1[1]);
  468. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (5 * sizeof(__m256i))), ymm1[5]);
  469. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (6 * sizeof(__m256i))), ymm1[3]);
  470. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (7 * sizeof(__m256i))), ymm1[7]);
  471. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (8 * sizeof(__m256i))), ymm1[8]);
  472. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (9 * sizeof(__m256i))), ymm1[12]);
  473. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (10 * sizeof(__m256i))), ymm1[10]);
  474. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (11 * sizeof(__m256i))), ymm1[14]);
  475. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (12 * sizeof(__m256i))), ymm1[9]);
  476. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (13 * sizeof(__m256i))), ymm1[13]);
  477. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (14 * sizeof(__m256i))), ymm1[11]);
  478. _mm256_storeu_si256((__m256i*)(dest + (i * bytesoftype) + (15 * sizeof(__m256i))), ymm1[15]);
  479. }
  480. }
  481. /* Routine optimized for unshuffling a buffer for a type size larger than 16 bytes. */
  482. static void
  483. unshuffle16_tiled_avx2(uint8_t* const dest, const uint8_t* const src,
  484. const size_t vectorizable_elements, const size_t total_elements, const size_t bytesoftype)
  485. {
  486. size_t i;
  487. int j;
  488. __m256i ymm0[16], ymm1[16];
  489. const lldiv_t vecs_per_el = lldiv(bytesoftype, sizeof(__m128i));
  490. /* The unshuffle loops are inverted (compared to shuffle_tiled16_avx2)
  491. to optimize cache utilization. */
  492. size_t offset_into_type;
  493. for (offset_into_type = 0; offset_into_type < bytesoftype;
  494. offset_into_type += (offset_into_type == 0 && vecs_per_el.rem > 0 ? vecs_per_el.rem : sizeof(__m128i))) {
  495. for (i = 0; i < vectorizable_elements; i += sizeof(__m256i)) {
  496. /* Load the first 16 bytes of 32 adjacent elements (512 bytes) into 16 YMM registers */
  497. const uint8_t* const src_for_ith_element = src + i;
  498. for (j = 0; j < 16; j++) {
  499. ymm0[j] = _mm256_loadu_si256((__m256i*)(src_for_ith_element + (total_elements * (offset_into_type + j))));
  500. }
  501. /* Shuffle bytes */
  502. for (j = 0; j < 8; j++) {
  503. /* Compute the low 32 bytes */
  504. ymm1[j] = _mm256_unpacklo_epi8(ymm0[j*2], ymm0[j*2+1]);
  505. /* Compute the hi 32 bytes */
  506. ymm1[8+j] = _mm256_unpackhi_epi8(ymm0[j*2], ymm0[j*2+1]);
  507. }
  508. /* Shuffle 2-byte words */
  509. for (j = 0; j < 8; j++) {
  510. /* Compute the low 32 bytes */
  511. ymm0[j] = _mm256_unpacklo_epi16(ymm1[j*2], ymm1[j*2+1]);
  512. /* Compute the hi 32 bytes */
  513. ymm0[8+j] = _mm256_unpackhi_epi16(ymm1[j*2], ymm1[j*2+1]);
  514. }
  515. /* Shuffle 4-byte dwords */
  516. for (j = 0; j < 8; j++) {
  517. /* Compute the low 32 bytes */
  518. ymm1[j] = _mm256_unpacklo_epi32(ymm0[j*2], ymm0[j*2+1]);
  519. /* Compute the hi 32 bytes */
  520. ymm1[8+j] = _mm256_unpackhi_epi32(ymm0[j*2], ymm0[j*2+1]);
  521. }
  522. /* Shuffle 8-byte qwords */
  523. for (j = 0; j < 8; j++) {
  524. /* Compute the low 32 bytes */
  525. ymm0[j] = _mm256_unpacklo_epi64(ymm1[j*2], ymm1[j*2+1]);
  526. /* Compute the hi 32 bytes */
  527. ymm0[8+j] = _mm256_unpackhi_epi64(ymm1[j*2], ymm1[j*2+1]);
  528. }
  529. for (j = 0; j < 8; j++) {
  530. ymm1[j] = _mm256_permute2x128_si256(ymm0[j], ymm0[j+8], 0x20);
  531. ymm1[j+8] = _mm256_permute2x128_si256(ymm0[j], ymm0[j+8], 0x31);
  532. }
  533. /* Store the result vectors in proper order */
  534. const uint8_t* const dest_with_offset = dest + offset_into_type;
  535. _mm256_storeu2_m128i(
  536. (__m128i*)(dest_with_offset + (i + 0x01) * bytesoftype),
  537. (__m128i*)(dest_with_offset + (i + 0x00) * bytesoftype), ymm1[0]);
  538. _mm256_storeu2_m128i(
  539. (__m128i*)(dest_with_offset + (i + 0x03) * bytesoftype),
  540. (__m128i*)(dest_with_offset + (i + 0x02) * bytesoftype), ymm1[4]);
  541. _mm256_storeu2_m128i(
  542. (__m128i*)(dest_with_offset + (i + 0x05) * bytesoftype),
  543. (__m128i*)(dest_with_offset + (i + 0x04) * bytesoftype), ymm1[2]);
  544. _mm256_storeu2_m128i(
  545. (__m128i*)(dest_with_offset + (i + 0x07) * bytesoftype),
  546. (__m128i*)(dest_with_offset + (i + 0x06) * bytesoftype), ymm1[6]);
  547. _mm256_storeu2_m128i(
  548. (__m128i*)(dest_with_offset + (i + 0x09) * bytesoftype),
  549. (__m128i*)(dest_with_offset + (i + 0x08) * bytesoftype), ymm1[1]);
  550. _mm256_storeu2_m128i(
  551. (__m128i*)(dest_with_offset + (i + 0x0b) * bytesoftype),
  552. (__m128i*)(dest_with_offset + (i + 0x0a) * bytesoftype), ymm1[5]);
  553. _mm256_storeu2_m128i(
  554. (__m128i*)(dest_with_offset + (i + 0x0d) * bytesoftype),
  555. (__m128i*)(dest_with_offset + (i + 0x0c) * bytesoftype), ymm1[3]);
  556. _mm256_storeu2_m128i(
  557. (__m128i*)(dest_with_offset + (i + 0x0f) * bytesoftype),
  558. (__m128i*)(dest_with_offset + (i + 0x0e) * bytesoftype), ymm1[7]);
  559. _mm256_storeu2_m128i(
  560. (__m128i*)(dest_with_offset + (i + 0x11) * bytesoftype),
  561. (__m128i*)(dest_with_offset + (i + 0x10) * bytesoftype), ymm1[8]);
  562. _mm256_storeu2_m128i(
  563. (__m128i*)(dest_with_offset + (i + 0x13) * bytesoftype),
  564. (__m128i*)(dest_with_offset + (i + 0x12) * bytesoftype), ymm1[12]);
  565. _mm256_storeu2_m128i(
  566. (__m128i*)(dest_with_offset + (i + 0x15) * bytesoftype),
  567. (__m128i*)(dest_with_offset + (i + 0x14) * bytesoftype), ymm1[10]);
  568. _mm256_storeu2_m128i(
  569. (__m128i*)(dest_with_offset + (i + 0x17) * bytesoftype),
  570. (__m128i*)(dest_with_offset + (i + 0x16) * bytesoftype), ymm1[14]);
  571. _mm256_storeu2_m128i(
  572. (__m128i*)(dest_with_offset + (i + 0x19) * bytesoftype),
  573. (__m128i*)(dest_with_offset + (i + 0x18) * bytesoftype), ymm1[9]);
  574. _mm256_storeu2_m128i(
  575. (__m128i*)(dest_with_offset + (i + 0x1b) * bytesoftype),
  576. (__m128i*)(dest_with_offset + (i + 0x1a) * bytesoftype), ymm1[13]);
  577. _mm256_storeu2_m128i(
  578. (__m128i*)(dest_with_offset + (i + 0x1d) * bytesoftype),
  579. (__m128i*)(dest_with_offset + (i + 0x1c) * bytesoftype), ymm1[11]);
  580. _mm256_storeu2_m128i(
  581. (__m128i*)(dest_with_offset + (i + 0x1f) * bytesoftype),
  582. (__m128i*)(dest_with_offset + (i + 0x1e) * bytesoftype), ymm1[15]);
  583. }
  584. }
  585. }
  586. /* Shuffle a block. This can never fail. */
  587. void
  588. shuffle_avx2(const size_t bytesoftype, const size_t blocksize,
  589. const uint8_t* const _src, uint8_t* const _dest) {
  590. const size_t vectorized_chunk_size = bytesoftype * sizeof(__m256i);
  591. /* If the block size is too small to be vectorized,
  592. use the generic implementation. */
  593. if (blocksize < vectorized_chunk_size) {
  594. shuffle_generic(bytesoftype, blocksize, _src, _dest);
  595. return;
  596. }
  597. /* If the blocksize is not a multiple of both the typesize and
  598. the vector size, round the blocksize down to the next value
  599. which is a multiple of both. The vectorized shuffle can be
  600. used for that portion of the data, and the naive implementation
  601. can be used for the remaining portion. */
  602. const size_t vectorizable_bytes = blocksize - (blocksize % vectorized_chunk_size);
  603. const size_t vectorizable_elements = vectorizable_bytes / bytesoftype;
  604. const size_t total_elements = blocksize / bytesoftype;
  605. /* Optimized shuffle implementations */
  606. switch (bytesoftype)
  607. {
  608. case 2:
  609. shuffle2_avx2(_dest, _src, vectorizable_elements, total_elements);
  610. break;
  611. case 4:
  612. shuffle4_avx2(_dest, _src, vectorizable_elements, total_elements);
  613. break;
  614. case 8:
  615. shuffle8_avx2(_dest, _src, vectorizable_elements, total_elements);
  616. break;
  617. case 16:
  618. shuffle16_avx2(_dest, _src, vectorizable_elements, total_elements);
  619. break;
  620. default:
  621. /* For types larger than 16 bytes, use the AVX2 tiled shuffle. */
  622. if (bytesoftype > sizeof(__m128i)) {
  623. shuffle16_tiled_avx2(_dest, _src, vectorizable_elements, total_elements, bytesoftype);
  624. }
  625. else {
  626. /* Non-optimized shuffle */
  627. shuffle_generic(bytesoftype, blocksize, _src, _dest);
  628. /* The non-optimized function covers the whole buffer,
  629. so we're done processing here. */
  630. return;
  631. }
  632. }
  633. /* If the buffer had any bytes at the end which couldn't be handled
  634. by the vectorized implementations, use the non-optimized version
  635. to finish them up. */
  636. if (vectorizable_bytes < blocksize) {
  637. shuffle_generic_inline(bytesoftype, vectorizable_bytes, blocksize, _src, _dest);
  638. }
  639. }
  640. /* Unshuffle a block. This can never fail. */
  641. void
  642. unshuffle_avx2(const size_t bytesoftype, const size_t blocksize,
  643. const uint8_t* const _src, uint8_t* const _dest) {
  644. const size_t vectorized_chunk_size = bytesoftype * sizeof(__m256i);
  645. /* If the block size is too small to be vectorized,
  646. use the generic implementation. */
  647. if (blocksize < vectorized_chunk_size) {
  648. unshuffle_generic(bytesoftype, blocksize, _src, _dest);
  649. return;
  650. }
  651. /* If the blocksize is not a multiple of both the typesize and
  652. the vector size, round the blocksize down to the next value
  653. which is a multiple of both. The vectorized unshuffle can be
  654. used for that portion of the data, and the naive implementation
  655. can be used for the remaining portion. */
  656. const size_t vectorizable_bytes = blocksize - (blocksize % vectorized_chunk_size);
  657. const size_t vectorizable_elements = vectorizable_bytes / bytesoftype;
  658. const size_t total_elements = blocksize / bytesoftype;
  659. /* Optimized unshuffle implementations */
  660. switch (bytesoftype)
  661. {
  662. case 2:
  663. unshuffle2_avx2(_dest, _src, vectorizable_elements, total_elements);
  664. break;
  665. case 4:
  666. unshuffle4_avx2(_dest, _src, vectorizable_elements, total_elements);
  667. break;
  668. case 8:
  669. unshuffle8_avx2(_dest, _src, vectorizable_elements, total_elements);
  670. break;
  671. case 16:
  672. unshuffle16_avx2(_dest, _src, vectorizable_elements, total_elements);
  673. break;
  674. default:
  675. /* For types larger than 16 bytes, use the AVX2 tiled unshuffle. */
  676. if (bytesoftype > sizeof(__m128i)) {
  677. unshuffle16_tiled_avx2(_dest, _src, vectorizable_elements, total_elements, bytesoftype);
  678. }
  679. else {
  680. /* Non-optimized unshuffle */
  681. unshuffle_generic(bytesoftype, blocksize, _src, _dest);
  682. /* The non-optimized function covers the whole buffer,
  683. so we're done processing here. */
  684. return;
  685. }
  686. }
  687. /* If the buffer had any bytes at the end which couldn't be handled
  688. by the vectorized implementations, use the non-optimized version
  689. to finish them up. */
  690. if (vectorizable_bytes < blocksize) {
  691. unshuffle_generic_inline(bytesoftype, vectorizable_bytes, blocksize, _src, _dest);
  692. }
  693. }