From be7a5be891050330556f950731889d5c352ce58c Mon Sep 17 00:00:00 2001
From: Nathan Bossart <nathandbossart@gmail.com>
Date: Thu, 25 Aug 2022 22:59:12 -0700
Subject: [PATCH v3 2/2] use ARM Advanced SIMD intrinsic functions where
 available

---
 src/include/port/simd.h | 46 +++++++++++++++++++++++++++++++++++++++--
 1 file changed, 44 insertions(+), 2 deletions(-)

diff --git a/src/include/port/simd.h b/src/include/port/simd.h
index bd4f1a3f39..839c0f25db 100644
--- a/src/include/port/simd.h
+++ b/src/include/port/simd.h
@@ -34,6 +34,19 @@
 typedef __m128i Vector8;
 typedef __m128i Vector32;
 
+/*
+ * Include arm_neon.h if the compiler is targeting an architecture that
+ * supports ARM Advanced SIMD (Neon) intrinsics.  While Neon support is
+ * technically optional for aarch64, we assume it's unlikely that anyone will
+ * run PostgreSQL on specialized hardware lacking this feature, and we assume
+ * that compilers targeting this architecture understand Neon intrinsics.
+ */
+#elif defined(__aarch64__)
+#include <arm_neon.h>
+#define USE_NEON
+typedef uint8x16_t Vector8;
+typedef uint32x4_t Vector32;
+
 #else
 /*
  * If no SIMD instructions are available, we can in some cases emulate vector
@@ -70,6 +83,8 @@ vector8_load(Vector8 *v, const uint8 *s)
 {
 #ifdef USE_SSE2
 	*v = _mm_loadu_si128((const __m128i *) s);
+#elif USE_NEON
+	*v = vld1q_u8(s);
 #else
 	memcpy(v, s, sizeof(Vector8));
 #endif
@@ -80,6 +95,8 @@ vector32_load(Vector32 *v, const uint32 *s)
 {
 #ifdef USE_SSE2
 	*v = _mm_loadu_si128((const __m128i *) s);
+#elif USE_NEON
+	*v = vld1q_u32(s);
 #else
 	memcpy(v, s, sizeof(Vector32));
 #endif
@@ -95,6 +112,8 @@ vector8_broadcast(const uint8 c)
 {
 #ifdef USE_SSE2
 	return _mm_set1_epi8(c);
+#elif USE_NEON
+	return vdupq_n_u8(c);
 #else
 	return ~UINT64CONST(0) / 0xFF * c;
 #endif
@@ -105,6 +124,8 @@ vector32_broadcast(const uint32 c)
 {
 #ifdef USE_SSE2
 	return _mm_set1_epi32(c);
+#elif USE_NEON
+	return vdupq_n_u32(c);
 #else
 	return ~UINT64CONST(0) / 0xFFFFFFFF * c;
 #endif
@@ -120,6 +141,8 @@ vector8_has_zero(const Vector8 v)
 {
 #ifdef USE_SSE2
 	return _mm_movemask_epi8(_mm_cmpeq_epi8(v, _mm_setzero_si128()));
+#elif USE_NEON
+	return vmaxvq_u8(vceqzq_u8(v)) != 0;
 #else
 	return vector8_has_le(v, 0);
 #endif
@@ -146,6 +169,8 @@ vector8_has(const Vector8 v, const uint8 c)
 
 #ifdef USE_SSE2
 	result = _mm_movemask_epi8(_mm_cmpeq_epi8(v, vector8_broadcast(c)));
+#elif USE_NEON
+	result = vmaxvq_u8(vceqq_u8(v, vector8_broadcast(c))) != 0;
 #else
 	/* any bytes in v equal to c will evaluate to zero via XOR */
 	result = vector8_has_zero(v ^ vector8_broadcast(c));
@@ -159,8 +184,8 @@ static inline bool
 vector8_has_le(const Vector8 v, const uint8 c)
 {
 	bool		result = false;
-#ifdef USE_SSE2
-	__m128i		sub;
+#ifndef USE_NO_SIMD
+	Vector8		sub;
 #endif
 
 	/* pre-compute the result for assert checking */
@@ -185,6 +210,11 @@ vector8_has_le(const Vector8 v, const uint8 c)
 	 */
 	sub = _mm_subs_epu8(v, vector8_broadcast(c));
 	result = vector8_has_zero(sub);
+#elif USE_NEON
+
+	/* use the same approach as the USE_SSE2 block above */
+	sub = vqsubq_u8(v, vector8_broadcast(c));
+	result = vector8_has_zero(sub);
 #else
 
 	/*
@@ -218,6 +248,8 @@ vector8_is_highbit_set(const Vector8 v)
 {
 #ifdef USE_SSE2
 	return _mm_movemask_epi8(v) != 0;
+#elif USE_NEON
+	return vmaxvq_u8(vandq_u8(v, vector8_broadcast(0x80))) != 0;
 #else
 	return v & vector8_broadcast(0x80);
 #endif
@@ -234,6 +266,8 @@ vector32_any_lane_set(const Vector32 v)
 {
 #ifdef USE_SSE2
 	return _mm_movemask_epi8(v) != 0;
+#elif USE_NEON
+	return vmaxvq_u32(v) != 0;
 #endif
 }
 #endif
@@ -246,6 +280,8 @@ vector8_eq(const Vector8 v1, const Vector8 v2)
 {
 #ifdef USE_SSE2
 	return _mm_cmpeq_epi8(v1, v2);
+#elif USE_NEON
+	return vceqq_u8(v1, v2);
 #endif
 }
 
@@ -254,6 +290,8 @@ vector32_eq(const Vector32 v1, const Vector32 v2)
 {
 #ifdef USE_SSE2
 	return _mm_cmpeq_epi32(v1, v2);
+#elif USE_NEON
+	return vceqq_u32(v1, v2);
 #endif
 }
 #endif
@@ -265,6 +303,8 @@ vector8_or(const Vector8 v1, const Vector8 v2)
 {
 #ifdef USE_SSE2
 	return _mm_or_si128(v1, v2);
+#elif USE_NEON
+	return vorrq_u8(v1, v2);
 #else
 	return v1 | v2;
 #endif
@@ -275,6 +315,8 @@ vector32_or(const Vector32 v1, const Vector32 v2)
 {
 #ifdef USE_SSE2
 	return _mm_or_si128(v1, v2);
+#elif USE_NEON
+	return vorrq_u32(v1, v2);
 #else
 	return v1 | v2;
 #endif
-- 
2.25.1

