#pragma once // DO NOT DEFINE STATIC DATA IN THIS HEADER! // See Note [Do not compile initializers with AVX] #include #include #include #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) #include #endif namespace at { namespace vec { // See Note [CPU_CAPABILITY namespace] inline namespace CPU_CAPABILITY { #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) template <> class Vectorized { private: static constexpr __m512i zero_vec {0, 0, 0, 0, 0, 0, 0, 0}; public: __m512 values; using value_type = float; using size_type = int; static constexpr size_type size() { return 16; } Vectorized() {} Vectorized(__m512 v) : values(v) {} Vectorized(float val) { values = _mm512_set1_ps(val); } Vectorized(float val1, float val2, float val3, float val4, float val5, float val6, float val7, float val8, float val9, float val10, float val11, float val12, float val13, float val14, float val15, float val16) { values = _mm512_setr_ps(val1, val2, val3, val4, val5, val6, val7, val8, val9, val10, val11, val12, val13, val14, val15, val16); } operator __m512() const { return values; } template static Vectorized blend(const Vectorized& a, const Vectorized& b) { return _mm512_mask_blend_ps(mask, a.values, b.values); } static Vectorized blendv(const Vectorized& a, const Vectorized& b, const Vectorized& mask) { auto all_ones = _mm512_set1_epi32(0xFFFFFFFF); auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask.values), all_ones, _MM_CMPINT_EQ); return _mm512_mask_blend_ps(mmask, a.values, b.values); } template static Vectorized arange(float base = 0.f, step_t step = static_cast(1)) { return Vectorized( base, base + step, base + 2 * step, base + 3 * step, base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step, base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step, base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step); } static Vectorized set(const Vectorized& a, const Vectorized& b, int64_t count = size()) { switch (count) { case 0: return a; case 1: return blend<1>(a, b); case 2: return blend<3>(a, b); case 3: return blend<7>(a, b); case 4: return blend<15>(a, b); case 5: return blend<31>(a, b); case 6: return blend<63>(a, b); case 7: return blend<127>(a, b); case 8: return blend<255>(a, b); case 9: return blend<511>(a, b); case 10: return blend<1023>(a, b); case 11: return blend<2047>(a, b); case 12: return blend<4095>(a, b); case 13: return blend<8191>(a, b); case 14: return blend<16383>(a, b); case 15: return blend<32767>(a, b); } return b; } static Vectorized loadu(const void* ptr, int64_t count = size()) { if (count == size()) return _mm512_loadu_ps(reinterpret_cast(ptr)); __at_align__ float tmp_values[size()]; // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two // instructions while a loop would be compiled to one instruction. for (const auto i : c10::irange(size())) { tmp_values[i] = 0.0; } std::memcpy( tmp_values, reinterpret_cast(ptr), count * sizeof(float)); return _mm512_loadu_ps(tmp_values); } void store(void* ptr, int64_t count = size()) const { if (count == size()) { _mm512_storeu_ps(reinterpret_cast(ptr), values); } else if (count > 0) { float tmp_values[size()]; _mm512_storeu_ps(reinterpret_cast(tmp_values), values); std::memcpy(ptr, tmp_values, count * sizeof(float)); } } const float& operator[](int idx) const = delete; float& operator[](int idx) = delete; int zero_mask() const { // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit __mmask16 cmp = _mm512_cmp_ps_mask(values, _mm512_set1_ps(0.0), _CMP_EQ_OQ); return static_cast(cmp); } Vectorized isnan() const { auto mask = _mm512_cmp_ps_mask(values, _mm512_set1_ps(0.0), _CMP_UNORD_Q); return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, 0xFFFFFFFF)); } Vectorized map(float (*const f)(float)) const { __at_align__ float tmp[size()]; store(tmp); for (const auto i : c10::irange(size())) { tmp[i] = f(tmp[i]); } return loadu(tmp); } Vectorized abs() const { auto mask = _mm512_set1_ps(-0.f); return _mm512_andnot_ps(mask, values); } Vectorized angle() const { __m512 zero_vec = _mm512_set1_ps(0.f); const auto nan_vec = _mm512_set1_ps(NAN); const auto not_nan_mask = _mm512_cmp_ps_mask(values, values, _CMP_EQ_OQ); const auto not_nan_vec = _mm512_mask_set1_epi32(_mm512_castps_si512(zero_vec), not_nan_mask, 0xFFFFFFFF); const auto nan_mask = _mm512_cmp_ps_mask(_mm512_castsi512_ps(not_nan_vec), zero_vec, _CMP_EQ_OQ); const auto pi = _mm512_set1_ps(c10::pi); const auto neg_mask = _mm512_cmp_ps_mask(values, zero_vec, _CMP_LT_OQ); auto angle = _mm512_mask_blend_ps(neg_mask, zero_vec, pi); angle = _mm512_mask_blend_ps(nan_mask, angle, nan_vec); return angle; } Vectorized real() const { return *this; } Vectorized imag() const { return _mm512_set1_ps(0); } Vectorized conj() const { return *this; } Vectorized acos() const { return Vectorized(Sleef_acosf16_u10(values)); } Vectorized asin() const { return Vectorized(Sleef_asinf16_u10(values)); } Vectorized atan() const { return Vectorized(Sleef_atanf16_u10(values)); } Vectorized atan2(const Vectorized &b) const { return Vectorized(Sleef_atan2f16_u10(values, b)); } Vectorized copysign(const Vectorized &sign) const { return Vectorized(Sleef_copysignf16(values, sign)); } Vectorized erf() const { return Vectorized(Sleef_erff16_u10(values)); } Vectorized erfc() const { return Vectorized(Sleef_erfcf16_u15(values)); } Vectorized erfinv() const { return map(calc_erfinv); } Vectorized exp() const { return Vectorized(Sleef_expf16_u10(values)); } Vectorized expm1() const { return Vectorized(Sleef_expm1f16_u10(values)); } Vectorized fmod(const Vectorized& q) const { return Vectorized(Sleef_fmodf16(values, q)); } Vectorized log() const { return Vectorized(Sleef_logf16_u10(values)); } Vectorized log2() const { return Vectorized(Sleef_log2f16_u10(values)); } Vectorized log10() const { return Vectorized(Sleef_log10f16_u10(values)); } Vectorized log1p() const { return Vectorized(Sleef_log1pf16_u10(values)); } Vectorized frac() const; Vectorized sin() const { return Vectorized(Sleef_sinf16_u10(values)); } Vectorized sinh() const { return Vectorized(Sleef_sinhf16_u10(values)); } Vectorized cos() const { return Vectorized(Sleef_cosf16_u10(values)); } Vectorized cosh() const { return Vectorized(Sleef_coshf16_u10(values)); } Vectorized ceil() const { return _mm512_ceil_ps(values); } Vectorized floor() const { return _mm512_floor_ps(values); } Vectorized hypot(const Vectorized &b) const { return Vectorized(Sleef_hypotf16_u05(values, b)); } Vectorized i0() const { return map(calc_i0); } Vectorized i0e() const { return map(calc_i0e); } Vectorized igamma(const Vectorized &x) const { __at_align__ float tmp[size()]; __at_align__ float tmp_x[size()]; store(tmp); x.store(tmp_x); for (const auto i : c10::irange(size())) { tmp[i] = calc_igamma(tmp[i], tmp_x[i]); } return loadu(tmp); } Vectorized igammac(const Vectorized &x) const { __at_align__ float tmp[size()]; __at_align__ float tmp_x[size()]; store(tmp); x.store(tmp_x); for (const auto i : c10::irange(size())) { tmp[i] = calc_igammac(tmp[i], tmp_x[i]); } return loadu(tmp); } Vectorized neg() const { return _mm512_xor_ps(_mm512_set1_ps(-0.f), values); } Vectorized nextafter(const Vectorized &b) const { return Vectorized(Sleef_nextafterf16(values, b)); } Vectorized round() const { return _mm512_roundscale_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); } Vectorized tan() const { return Vectorized(Sleef_tanf16_u10(values)); } Vectorized tanh() const { return Vectorized(Sleef_tanhf16_u10(values)); } Vectorized trunc() const { return _mm512_roundscale_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); } Vectorized lgamma() const { return Vectorized(Sleef_lgammaf16_u10(values)); } Vectorized sqrt() const { return _mm512_sqrt_ps(values); } Vectorized reciprocal() const { return _mm512_div_ps(_mm512_set1_ps(1), values); } Vectorized rsqrt() const { return _mm512_div_ps(_mm512_set1_ps(1), _mm512_sqrt_ps(values)); } Vectorized pow(const Vectorized &b) const { return Vectorized(Sleef_powf16_u10(values, b)); } // Comparison using the _CMP_**_OQ predicate. // `O`: get false if an operand is NaN // `Q`: do not raise if an operand is NaN Vectorized operator==(const Vectorized& other) const { auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_EQ_OQ); return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, 0xFFFFFFFF)); } Vectorized operator!=(const Vectorized& other) const { auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_NEQ_OQ); return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, 0xFFFFFFFF)); } Vectorized operator<(const Vectorized& other) const { auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_LT_OQ); return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, 0xFFFFFFFF)); } Vectorized operator<=(const Vectorized& other) const { auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_LE_OQ); return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, 0xFFFFFFFF)); } Vectorized operator>(const Vectorized& other) const { auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_GT_OQ); return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, 0xFFFFFFFF)); } Vectorized operator>=(const Vectorized& other) const { auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_GE_OQ); return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, 0xFFFFFFFF)); } Vectorized eq(const Vectorized& other) const; Vectorized ne(const Vectorized& other) const; Vectorized gt(const Vectorized& other) const; Vectorized ge(const Vectorized& other) const; Vectorized lt(const Vectorized& other) const; Vectorized le(const Vectorized& other) const; }; template <> Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { return _mm512_add_ps(a, b); } template <> Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { return _mm512_sub_ps(a, b); } template <> Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { return _mm512_mul_ps(a, b); } template <> Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { return _mm512_div_ps(a, b); } // frac. Implement this here so we can use subtraction inline Vectorized Vectorized::frac() const { return *this - this->trunc(); } // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if // either input is a NaN. template <> Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { auto zero_vec = _mm512_set1_epi32(0); auto max = _mm512_max_ps(a, b); auto isnan_mask = _mm512_cmp_ps_mask(a, b, _CMP_UNORD_Q); auto isnan = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, isnan_mask, 0xFFFFFFFF)); // Exploit the fact that all-ones is a NaN. return _mm512_or_ps(max, isnan); } // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if // either input is a NaN. template <> Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { auto zero_vec = _mm512_set1_epi32(0); auto min = _mm512_min_ps(a, b); auto isnan_mask = _mm512_cmp_ps_mask(a, b, _CMP_UNORD_Q); auto isnan = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, isnan_mask, 0xFFFFFFFF)); // Exploit the fact that all-ones is a NaN. return _mm512_or_ps(min, isnan); } template <> Vectorized inline clamp(const Vectorized& a, const Vectorized& min, const Vectorized& max) { return _mm512_min_ps(max, _mm512_max_ps(min, a)); } template <> Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { return _mm512_min_ps(max, a); } template <> Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { return _mm512_max_ps(min, a); } template <> Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { return _mm512_and_ps(a, b); } template <> Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { return _mm512_or_ps(a, b); } template <> Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { return _mm512_xor_ps(a, b); } inline Vectorized Vectorized::eq(const Vectorized& other) const { return (*this == other) & Vectorized(1.0f); } inline Vectorized Vectorized::ne(const Vectorized& other) const { return (*this != other) & Vectorized(1.0f); } inline Vectorized Vectorized::gt(const Vectorized& other) const { return (*this > other) & Vectorized(1.0f); } inline Vectorized Vectorized::ge(const Vectorized& other) const { return (*this >= other) & Vectorized(1.0f); } inline Vectorized Vectorized::lt(const Vectorized& other) const { return (*this < other) & Vectorized(1.0f); } inline Vectorized Vectorized::le(const Vectorized& other) const { return (*this <= other) & Vectorized(1.0f); } template <> inline void convert(const float* src, float* dst, int64_t n) { int64_t i; #pragma unroll for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { _mm512_storeu_ps(dst + i, _mm512_loadu_ps(src + i)); } #pragma unroll for (; i < n; i++) { dst[i] = src[i]; } } template <> Vectorized inline fmadd(const Vectorized& a, const Vectorized& b, const Vectorized& c) { return _mm512_fmadd_ps(a, b, c); } #endif }}}