Skip to content

Commit

Permalink
x86: better implementations for MSVC and others without SIMDE_STATEME…
Browse files Browse the repository at this point in the history
…NT_EXPR_

Closes: #1219
  • Loading branch information
mr-c committed Sep 14, 2024
1 parent 6686232 commit 2fd7612
Show file tree
Hide file tree
Showing 4 changed files with 116 additions and 40 deletions.
22 changes: 18 additions & 4 deletions simde/x86/avx.h
Original file line number Diff line number Diff line change
Expand Up @@ -2083,7 +2083,11 @@ simde_mm256_round_ps (simde__m256 a, const int rounding) {
simde__m256_private
r_,
a_ = simde__m256_to_private(a);

#if SIMDE_NATURAL_VECTOR_SIZE_LE(128) && !defined(SIMDE_STATEMENT_EXPR_)
for (size_t i = 0 ; i < (sizeof(r_.m128) / sizeof(r_.m128[0])) ; i++) {
SIMDE_CONSTIFY_16_(simde_mm_round_ps, r_.m128[i], (HEDLEY_UNREACHABLE(), simde_mm_undefined_ps()), rounding, a_.m128[i]);
}
#else
switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) {
#if defined(simde_math_nearbyintf)
case SIMDE_MM_FROUND_CUR_DIRECTION:
Expand Down Expand Up @@ -2128,7 +2132,7 @@ simde_mm256_round_ps (simde__m256 a, const int rounding) {
default:
HEDLEY_UNREACHABLE_RETURN(simde_mm256_undefined_ps());
}

#endif
return simde__m256_from_private(r_);
}
#if defined(SIMDE_X86_AVX_NATIVE)
Expand Down Expand Up @@ -2157,6 +2161,11 @@ simde_mm256_round_pd (simde__m256d a, const int rounding) {
simde__m256d_private
r_,
a_ = simde__m256d_to_private(a);
#if SIMDE_NATURAL_VECTOR_SIZE_LE(128) && !defined(SIMDE_STATEMENT_EXPR_)
for (size_t i = 0 ; i < (sizeof(r_.m128d) / sizeof(r_.m128d[0])) ; i++) {
SIMDE_CONSTIFY_16_(simde_mm_round_pd, r_.m128d[i], (HEDLEY_UNREACHABLE(), simde_mm_undefined_pd()), rounding, a_.m128d[i]);
}
#else

switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) {
#if defined(simde_math_nearbyint)
Expand Down Expand Up @@ -2202,7 +2211,7 @@ simde_mm256_round_pd (simde__m256d a, const int rounding) {
default:
HEDLEY_UNREACHABLE_RETURN(simde_mm256_undefined_pd());
}

#endif
return simde__m256d_from_private(r_);
}
#if defined(SIMDE_X86_AVX_NATIVE)
Expand Down Expand Up @@ -2894,6 +2903,11 @@ simde_mm256_cmp_ps
a_ = simde__m256_to_private(a),
b_ = simde__m256_to_private(b);

#if defined(SIMDE_STATEMENT_EXPR_) && SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128) / sizeof(r_.m128[0])) ; i++) {
SIMDE_CONSTIFY_32_(simde_mm_cmp_ps, r_.m128[i], (HEDLEY_UNREACHABLE(), simde_mm_undefined_ps()), imm8, a_.m128[i], b_.m128[i]);
}
#else
switch (imm8) {
case SIMDE_CMP_EQ_OQ:
case SIMDE_CMP_EQ_OS:
Expand Down Expand Up @@ -3076,7 +3090,7 @@ simde_mm256_cmp_ps
default:
HEDLEY_UNREACHABLE();
}

#endif
return simde__m256_from_private(r_);
}
#if defined(__clang__) && defined(__AVX512DQ__)
Expand Down
24 changes: 20 additions & 4 deletions simde/x86/avx512/cmp.h
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,15 @@ simde_mm512_cmp_ps_mask (simde__m512 a, simde__m512 b, const int imm8)
r_,
a_ = simde__m512_to_private(a),
b_ = simde__m512_to_private(b);

#if !defined(SIMDE_STATEMENT_EXPR_) && SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128) / sizeof(r_.m128[0])) ; i++) {
SIMDE_CONSTIFY_32_(simde_mm_cmp_ps, r_.m128[i], (HEDLEY_UNREACHABLE(), simde_mm_undefined_ps()), imm8, a_.m128[i], b_.m128[i]);
}
#elif !defined(SIMDE_STATEMENT_EXPR_) && SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256) / sizeof(r_.m256[0])) ; i++) {
SIMDE_CONSTIFY_32_(simde_mm256_cmp_ps, r_.m256[i], (HEDLEY_UNREACHABLE(), simde_mm256_undefined_ps()), imm8, a_.m256[i], b_.m256[i]);
}
#else
switch (imm8) {
case SIMDE_CMP_EQ_OQ:
case SIMDE_CMP_EQ_OS:
Expand Down Expand Up @@ -431,7 +439,7 @@ simde_mm512_cmp_ps_mask (simde__m512 a, simde__m512 b, const int imm8)
default:
HEDLEY_UNREACHABLE();
}

#endif
return simde_mm512_movepi32_mask(simde_mm512_castps_si512(simde__m512_from_private(r_)));
}
#if defined(SIMDE_X86_AVX512F_NATIVE)
Expand Down Expand Up @@ -496,7 +504,15 @@ simde_mm512_cmp_pd_mask (simde__m512d a, simde__m512d b, const int imm8)
r_,
a_ = simde__m512d_to_private(a),
b_ = simde__m512d_to_private(b);

#if !defined(SIMDE_STATEMENT_EXPR_) && SIMDE_NATURAL_VECTOR_SIZE_LE(128)
for (size_t i = 0 ; i < (sizeof(r_.m128d) / sizeof(r_.m128d[0])) ; i++) {
SIMDE_CONSTIFY_32_(simde_mm_cmp_pd, r_.m128d[i], (HEDLEY_UNREACHABLE(), simde_mm_undefined_pd()), imm8, a_.m128d[i], b_.m128d[i]);
}
#elif !defined(SIMDE_STATEMENT_EXPR_) && SIMDE_NATURAL_VECTOR_SIZE_LE(256)
for (size_t i = 0 ; i < (sizeof(r_.m256) / sizeof(r_.m256[0])) ; i++) {
SIMDE_CONSTIFY_32_(simde_mm256_cmp_pd, r_.m256d[i], (HEDLEY_UNREACHABLE(), simde_mm256_undefined_pd()), imm8, a_.m256d[i], b_.m256d[i]);
}
#else
switch (imm8) {
case SIMDE_CMP_EQ_OQ:
case SIMDE_CMP_EQ_OS:
Expand Down Expand Up @@ -679,7 +695,7 @@ simde_mm512_cmp_pd_mask (simde__m512d a, simde__m512d b, const int imm8)
default:
HEDLEY_UNREACHABLE();
}

#endif
return simde_mm512_movepi64_mask(simde_mm512_castpd_si512(simde__m512d_from_private(r_)));
}
#if defined(SIMDE_X86_AVX512F_NATIVE)
Expand Down
33 changes: 22 additions & 11 deletions simde/x86/sse.h
Original file line number Diff line number Diff line change
Expand Up @@ -664,7 +664,7 @@ simde_x_mm_round_ps (simde__m128 a, int rounding, int lax_rounding)
r_.f32[i] = simde_math_nearbyintf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_ps());
#endif
break;

Expand All @@ -683,7 +683,7 @@ simde_x_mm_round_ps (simde__m128 a, int rounding, int lax_rounding)
r_.f32[i] = simde_math_roundevenf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_ps());
#endif
break;

Expand All @@ -702,7 +702,7 @@ simde_x_mm_round_ps (simde__m128 a, int rounding, int lax_rounding)
r_.f32[i] = simde_math_floorf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_ps());
#endif
break;

Expand All @@ -721,7 +721,7 @@ simde_x_mm_round_ps (simde__m128 a, int rounding, int lax_rounding)
r_.f32[i] = simde_math_ceilf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_ps());
#endif
break;

Expand All @@ -740,12 +740,12 @@ simde_x_mm_round_ps (simde__m128 a, int rounding, int lax_rounding)
r_.f32[i] = simde_math_truncf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_ps());
#endif
break;

default:
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_ps());
}

return simde__m128_from_private(r_);
Expand Down Expand Up @@ -4122,11 +4122,22 @@ simde_mm_shuffle_ps (simde__m128 a, simde__m128 b, const int imm8)
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);

r_.f32[0] = a_.f32[(imm8 >> 0) & 3];
r_.f32[1] = a_.f32[(imm8 >> 2) & 3];
r_.f32[2] = b_.f32[(imm8 >> 4) & 3];
r_.f32[3] = b_.f32[(imm8 >> 6) & 3];
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_STATEMENT_EXPR_)
simde_float32_t temp1, temp2;
SIMDE_CONSTIFY_4(vgetq_lane_f32, temp1, (HEDLEY_UNREACHABLE(), 0.0f), (imm8) & (0x3), a_);
r_ = vmovq_n_f32(temp1);
SIMDE_CONSTIFY_4(vgetq_lane_f32, temp1, (HEDLEY_UNREACHABLE(), 0.0f), (((imm8) >> 2) & 0x3), a_);
r_ = vsetq_lane_f32(temp1, r_, 1);
SIMDE_CONSTIFY_4(vgetq_lane_f32, temp1, (HEDLEY_UNREACHABLE(), 0.0f), (((imm8) >> 4) & 0x3), b_);
r_ = vsetq_lane_f32(temp1, r_, 2);
SIMDE_CONSTIFY_4(vgetq_lane_f32, temp2, (HEDLEY_UNREACHABLE(), 0.0f), (((imm8) >> 6) & 0x3), b_);
r_ = vsetq_lane_f32(temp1, r_, 3);
#else
r_.f32[0] = a_.f32[(imm8 >> 0) & 3];
r_.f32[1] = a_.f32[(imm8 >> 2) & 3];
r_.f32[2] = b_.f32[(imm8 >> 4) & 3];
r_.f32[3] = b_.f32[(imm8 >> 6) & 3];
#endif

return simde__m128_from_private(r_);
}
Expand Down
77 changes: 56 additions & 21 deletions simde/x86/sse2.h
Original file line number Diff line number Diff line change
Expand Up @@ -5504,10 +5504,21 @@ simde_mm_shuffle_epi32 (simde__m128i a, const int imm8)
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);

for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[(imm8 >> (i * 2)) & 3];
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_STATEMENT_EXPR_)
int32_t temp;
SIMDE_CONSTIFY_4(vgetq_lane_s32, temp, (HEDLEY_UNREACHABLE(), 0), (imm8) & (0x3), a_);
r_ = vmovq_n_s32(temp);
SIMDE_CONSTIFY_4(vgetq_lane_s32, temp, (HEDLEY_UNREACHABLE(), 0), (((imm8) >> 2) & 0x3), a_);
r_ = vsetq_lane_s32(temp, r_, 1);
SIMDE_CONSTIFY_4(vgetq_lane_s32, temp, (HEDLEY_UNREACHABLE(), 0), (((imm8) >> 4) & 0x3), a_);
r_ = vsetq_lane_s32(temp, r_, 2);
SIMDE_CONSTIFY_4(vgetq_lane_s32, temp, (HEDLEY_UNREACHABLE(), 0), (((imm8) >> 6) & 0x3), a_);
r_ = vsetq_lane_s32(temp, r_, 3);
#else
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[(imm8 >> (i * 2)) & 3];
}
#endif

return simde__m128i_from_private(r_);
}
Expand Down Expand Up @@ -5587,15 +5598,26 @@ simde_mm_shufflehi_epi16 (simde__m128i a, const int imm8)
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);

SIMDE_VECTORIZE
for (size_t i = 0 ; i < ((sizeof(a_.i16) / sizeof(a_.i16[0])) / 2) ; i++) {
r_.i16[i] = a_.i16[i];
}
for (size_t i = ((sizeof(a_.i16) / sizeof(a_.i16[0])) / 2) ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = a_.i16[((imm8 >> ((i - 4) * 2)) & 3) + 4];
}

#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_STATEMENT_EXPR_)
r_ = a_;
int16_t temp;
SIMDE_CONSTIFY_8(vgetq_lane_s16, temp1, (HEDLEY_UNREACHABLE(), 0), ((imm8) & 0x3) + 4, a_);
r_ = vsetq_lane_s16(temp, r_, 4);
SIMDE_CONSTIFY_8(vgetq_lane_s16, temp1, (HEDLEY_UNREACHABLE(), 0), (((imm8) >> 2) & 0x3) + 4, a_);
r_ = vsetq_lane_s16(temp, r_, 5);
SIMDE_CONSTIFY_8(vgetq_lane_s16, temp1, (HEDLEY_UNREACHABLE(), 0), (((imm8) >> 4) & 0x3) + 4, a_);
r_ = vsetq_lane_s16(temp, r_, 6);
SIMDE_CONSTIFY_8(vgetq_lane_s16, temp1, (HEDLEY_UNREACHABLE(), 0), (((imm8) >> 6) & 0x3) + 4, a_);
r_ = vsetq_lane_s16(temp, r_, 7);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < ((sizeof(a_.i16) / sizeof(a_.i16[0])) / 2) ; i++) {
r_.i16[i] = a_.i16[i];
}
for (size_t i = ((sizeof(a_.i16) / sizeof(a_.i16[0])) / 2) ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = a_.i16[((imm8 >> ((i - 4) * 2)) & 3) + 4];
}
#endif
return simde__m128i_from_private(r_);
}
#if defined(SIMDE_X86_SSE2_NATIVE)
Expand Down Expand Up @@ -5647,14 +5669,27 @@ simde_mm_shufflelo_epi16 (simde__m128i a, const int imm8)
simde__m128i_private
r_,
a_ = simde__m128i_to_private(a);

for (size_t i = 0 ; i < ((sizeof(r_.i16) / sizeof(r_.i16[0])) / 2) ; i++) {
r_.i16[i] = a_.i16[((imm8 >> (i * 2)) & 3)];
}
SIMDE_VECTORIZE
for (size_t i = ((sizeof(a_.i16) / sizeof(a_.i16[0])) / 2) ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = a_.i16[i];
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(SIMDE_STATEMENT_EXPR_)
r_ = a_;
int16_t temp;
SIMDE_CONSTIFY_8(vgetq_lane_s16, temp1, (HEDLEY_UNREACHABLE(), 0), ((imm8) & 0x3), a_);
r_ = vsetq_lane_s16(temp, r_, 0);
SIMDE_CONSTIFY_8(vgetq_lane_s16, temp1, (HEDLEY_UNREACHABLE(), 0), (((imm8) >> 2) & 0x3), a_);
r_ = vsetq_lane_s16(temp, r_, 1);
SIMDE_CONSTIFY_8(vgetq_lane_s16, temp1, (HEDLEY_UNREACHABLE(), 0), (((imm8) >> 4) & 0x3), a_);
r_ = vsetq_lane_s16(temp, r_, 2);
SIMDE_CONSTIFY_8(vgetq_lane_s16, temp1, (HEDLEY_UNREACHABLE(), 0), (((imm8) >> 6) & 0x3), a_);
r_ = vsetq_lane_s16(temp, r_, 3);

#else
for (size_t i = 0 ; i < ((sizeof(r_.i16) / sizeof(r_.i16[0])) / 2) ; i++) {
r_.i16[i] = a_.i16[((imm8 >> (i * 2)) & 3)];
}
SIMDE_VECTORIZE
for (size_t i = ((sizeof(a_.i16) / sizeof(a_.i16[0])) / 2) ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = a_.i16[i];
}
#endif

return simde__m128i_from_private(r_);
}
Expand Down

0 comments on commit 2fd7612

Please sign in to comment.