From 0ac8651bd6adf7d2d7095ca38bf2f9d9a34462ba Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 13 Mar 2023 01:30:53 +0200 Subject: [PATCH] 10% performance boost on ARM --- ggml.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/ggml.c b/ggml.c index fbd7b93..e664e71 100644 --- a/ggml.c +++ b/ggml.c @@ -1360,6 +1360,22 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void const int8x16_t v1_1hs = vsubq_s8(v1_1h, s8b); // dot product into int16x8_t +#if defined(__ARM_FEATURE_DOTPROD) + int32x4_t p_0 = vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0ls); + int32x4_t p_1 = vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1ls); + + p_0 = vdotq_s32(p_0, v0_0hs, v1_0hs); + p_1 = vdotq_s32(p_1, v0_1hs, v1_1hs); + + // scalar +#if defined(__ARM_FEATURE_QRDMX) + sum0 += d0_0*d1_0*vaddvq_s32(p_0); + sum1 += d0_1*d1_1*vaddvq_s32(p_1); +#else + sum0 += d0_0*d1_0*(vgetq_lane_s32(p_0, 0) + vgetq_lane_s32(p_0, 1) + vgetq_lane_s32(p_0, 2) + vgetq_lane_s32(p_0, 3)); + sum1 += d0_1*d1_1*(vgetq_lane_s32(p_1, 0) + vgetq_lane_s32(p_1, 1) + vgetq_lane_s32(p_1, 2) + vgetq_lane_s32(p_1, 3)); +#endif +#else const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0ls)); const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0ls)); @@ -1388,6 +1404,7 @@ inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void #else sum0 += d0_0*d1_0*(vgetq_lane_s16(p_0, 0) + vgetq_lane_s16(p_0, 1) + vgetq_lane_s16(p_0, 2) + vgetq_lane_s16(p_0, 3) + vgetq_lane_s16(p_0, 4) + vgetq_lane_s16(p_0, 5) + vgetq_lane_s16(p_0, 6) + vgetq_lane_s16(p_0, 7)); sum1 += d0_1*d1_1*(vgetq_lane_s16(p_1, 0) + vgetq_lane_s16(p_1, 1) + vgetq_lane_s16(p_1, 2) + vgetq_lane_s16(p_1, 3) + vgetq_lane_s16(p_1, 4) + vgetq_lane_s16(p_1, 5) + vgetq_lane_s16(p_1, 6) + vgetq_lane_s16(p_1, 7)); +#endif #endif }