diff --git a/ggml/src/ggml-cpu/llamafile/sgemm.cpp b/ggml/src/ggml-cpu/llamafile/sgemm.cpp index 00f7f11704ead..0b484ffd4c437 100644 --- a/ggml/src/ggml-cpu/llamafile/sgemm.cpp +++ b/ggml/src/ggml-cpu/llamafile/sgemm.cpp @@ -54,6 +54,7 @@ #include "ggml-quants.h" #include +#include #ifdef _MSC_VER #define NOINLINE __declspec(noinline) @@ -1049,6 +1050,704 @@ class tinyBLAS_Q0_AVX { } \ } \ +template +class tinyBLAS_Q0_PPC { + public: + tinyBLAS_Q0_PPC(int64_t k, + const TA *A, int64_t lda, + const TB *B, int64_t ldb, + TC *C, int64_t ldc, + int ith, int nth) + : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) { + } + + void matmul(int64_t m, int64_t n) { + mnpack(0, m, 0, n); + } + + private: + + template + void save_res(int ii, int jj, int idx, vector float* fin_res) { + for (int I = 0; I < RM; I++) { + for (int J = 0; J < RN; J++) { + *((float*)(C+ii+((jj+J)*ldc)+I)) = *((float*)&fin_res[idx+I]+J); + } + } + } + + template + void compute(acc_t* ACC, int c_idx, int s_idx, std::array& comparray, vector float* vs, vector float* fin_res) { + vector signed int vec_C[4]; + vector float CA[4] = {0}; + vector float res[4] = {0}; + __builtin_mma_disassemble_acc(vec_C, ACC); + for (int i = 0; i < 4; i++) { + CA[i] = vec_splats((float)(((double)comparray[c_idx+i]) * -128.0)); + res[i] = vec_add(vec_ctf(vec_C[i], 0), CA[i]); + fin_res[s_idx+i] = vec_madd(res[i], vs[s_idx+i], fin_res[s_idx+i]); + } + } + + template + void packNormal(const TA* a, int64_t lda, int rows, int cols, VA* vec, bool flip) { + int64_t i, j; + TA *aoffset = NULL; + VA *vecOffset = NULL; + TA *aoffset1 = NULL, *aoffset2 = NULL, *aoffset3 = NULL, *aoffset4 = NULL; + TA *aoffset5 = NULL, *aoffset6 = NULL, *aoffset7 = NULL, *aoffset8 = NULL; + __vector_pair C1, C2, C3, C4, C5, C6, C7, C8; + VB c1[2] = {0}, c2[2] = {0}, c3[2] = {0}, c4[2]={0}; + VB c5[2] = {0}, c6[2] = {0}, c7[2] = {0}, c8[2]={0}; + VB t1, t2, t3, t4, t5, t6, t7, t8; + vector unsigned char xor_vector; + uint8_t flip_vec = 0x80; + xor_vector = vec_splats(flip_vec); + vector unsigned char swiz1 = {0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23}; + vector unsigned char swiz2 = {8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31}; + vector unsigned char swiz3 = {0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27}; + vector unsigned char swiz4 = {4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31}; + + aoffset = const_cast(a); + vecOffset = vec; + j = (rows >> 3); + if (j > 0) { + do { + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset5 = aoffset4 + lda; + aoffset6 = aoffset5 + lda; + aoffset7 = aoffset6 + lda; + aoffset8 = aoffset7 + lda; + aoffset += 8 * lda; + + i = (cols >> 3); + if (i > 0) { + do { + C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1->qs); + C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2->qs); + C3 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset3->qs); + C4 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset4->qs); + C5 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset5->qs); + C6 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset6->qs); + C7 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset7->qs); + C8 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset8->qs); + + __builtin_vsx_disassemble_pair(c1, &C1); + __builtin_vsx_disassemble_pair(c2, &C2); + __builtin_vsx_disassemble_pair(c3, &C3); + __builtin_vsx_disassemble_pair(c4, &C4); + __builtin_vsx_disassemble_pair(c5, &C5); + __builtin_vsx_disassemble_pair(c6, &C6); + __builtin_vsx_disassemble_pair(c7, &C7); + __builtin_vsx_disassemble_pair(c8, &C8); + + t1 = vec_perm(c1[0], c2[0], swiz1); + t2 = vec_perm(c1[0], c2[0], swiz2); + t3 = vec_perm(c3[0], c4[0], swiz1); + t4 = vec_perm(c3[0], c4[0], swiz2); + t5 = vec_perm(t1, t3, swiz3); + t6 = vec_perm(t1, t3, swiz4); + t7 = vec_perm(t2, t4, swiz3); + t8 = vec_perm(t2, t4, swiz4); + if (flip == true) { + t5 = vec_xor(t5, xor_vector); + t6 = vec_xor(t6, xor_vector); + t7 = vec_xor(t7, xor_vector); + t8 = vec_xor(t8, xor_vector); + } + vec_xst(t5, 0, vecOffset); + vec_xst(t6, 0, vecOffset+16); + vec_xst(t7, 0, vecOffset+32); + vec_xst(t8, 0, vecOffset+48); + + t1 = vec_perm(c1[1], c2[1], swiz1); + t2 = vec_perm(c1[1], c2[1], swiz2); + t3 = vec_perm(c3[1], c4[1], swiz1); + t4 = vec_perm(c3[1], c4[1], swiz2); + t5 = vec_perm(t1, t3, swiz3); + t6 = vec_perm(t1, t3, swiz4); + t7 = vec_perm(t2, t4, swiz3); + t8 = vec_perm(t2, t4, swiz4); + if (flip == true) { + t5 = vec_xor(t5, xor_vector); + t6 = vec_xor(t6, xor_vector); + t7 = vec_xor(t7, xor_vector); + t8 = vec_xor(t8, xor_vector); + } + vec_xst(t5, 0, vecOffset+64); + vec_xst(t6, 0, vecOffset+80); + vec_xst(t7, 0, vecOffset+96); + vec_xst(t8, 0, vecOffset+112); + + t1 = vec_perm(c5[0], c6[0], swiz1); + t2 = vec_perm(c5[0], c6[0], swiz2); + t3 = vec_perm(c7[0], c8[0], swiz1); + t4 = vec_perm(c7[0], c8[0], swiz2); + t5 = vec_perm(t1, t3, swiz3); + t6 = vec_perm(t1, t3, swiz4); + t7 = vec_perm(t2, t4, swiz3); + t8 = vec_perm(t2, t4, swiz4); + if (flip == true) { + t5 = vec_xor(t5, xor_vector); + t6 = vec_xor(t6, xor_vector); + t7 = vec_xor(t7, xor_vector); + t8 = vec_xor(t8, xor_vector); + } + vec_xst(t5, 0, vecOffset+128); + vec_xst(t6, 0, vecOffset+144); + vec_xst(t7, 0, vecOffset+160); + vec_xst(t8, 0, vecOffset+176); + + t1 = vec_perm(c5[1], c6[1], swiz1); + t2 = vec_perm(c5[1], c6[1], swiz2); + t3 = vec_perm(c7[1], c8[1], swiz1); + t4 = vec_perm(c7[1], c8[1], swiz2); + t5 = vec_perm(t1, t3, swiz3); + t6 = vec_perm(t1, t3, swiz4); + t7 = vec_perm(t2, t4, swiz3); + t8 = vec_perm(t2, t4, swiz4); + if (flip == true) { + t5 = vec_xor(t5, xor_vector); + t6 = vec_xor(t6, xor_vector); + t7 = vec_xor(t7, xor_vector); + t8 = vec_xor(t8, xor_vector); + } + vec_xst(t5, 0, vecOffset+192); + vec_xst(t6, 0, vecOffset+208); + vec_xst(t7, 0, vecOffset+224); + vec_xst(t8, 0, vecOffset+240); + + aoffset1 += lda; + aoffset2 += lda; + aoffset3 += lda; + aoffset4 += lda; + aoffset5 += lda; + aoffset6 += lda; + aoffset7 += lda; + aoffset8 += lda; + vecOffset += 256; + i--; + } while(i > 0); + } + j--; + } while(j > 0); + } + + if (rows & 4) { + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset += 4 * lda; + + i = (cols >> 3); + if (i > 0) { + do { + C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1->qs); + C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2->qs); + C3 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset3->qs); + C4 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset4->qs); + + __builtin_vsx_disassemble_pair(c1, &C1); + __builtin_vsx_disassemble_pair(c2, &C2); + __builtin_vsx_disassemble_pair(c3, &C3); + __builtin_vsx_disassemble_pair(c4, &C4); + + t1 = vec_perm(c1[0], c2[0], swiz1); + t2 = vec_perm(c1[0], c2[0], swiz2); + t3 = vec_perm(c3[0], c4[0], swiz1); + t4 = vec_perm(c3[0], c4[0], swiz2); + t5 = vec_perm(t1, t3, swiz3); + t6 = vec_perm(t1, t3, swiz4); + t7 = vec_perm(t2, t4, swiz3); + t8 = vec_perm(t2, t4, swiz4); + if (flip == true) { + t5 = vec_xor(t5, xor_vector); + t6 = vec_xor(t6, xor_vector); + t7 = vec_xor(t7, xor_vector); + t8 = vec_xor(t8, xor_vector); + } + vec_xst(t5, 0, vecOffset); + vec_xst(t6, 0, vecOffset+16); + vec_xst(t7, 0, vecOffset+32); + vec_xst(t8, 0, vecOffset+48); + + t1 = vec_perm(c1[1], c2[1], swiz1); + t2 = vec_perm(c1[1], c2[1], swiz2); + t3 = vec_perm(c3[1], c4[1], swiz1); + t4 = vec_perm(c3[1], c4[1], swiz2); + t5 = vec_perm(t1, t3, swiz3); + t6 = vec_perm(t1, t3, swiz4); + t7 = vec_perm(t2, t4, swiz3); + t8 = vec_perm(t2, t4, swiz4); + if (flip == true) { + t5 = vec_xor(t5, xor_vector); + t6 = vec_xor(t6, xor_vector); + t7 = vec_xor(t7, xor_vector); + t8 = vec_xor(t8, xor_vector); + } + vec_xst(t5, 0, vecOffset+64); + vec_xst(t6, 0, vecOffset+80); + vec_xst(t7, 0, vecOffset+96); + vec_xst(t8, 0, vecOffset+112); + + aoffset1 += lda; + aoffset2 += lda; + aoffset3 += lda; + aoffset4 += lda; + vecOffset += 128; + i--; + } while(i > 0); + } + } + if (rows & 3) { + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + i = (cols >> 3); + if (i > 0) { + do { + switch(rows) { + case 3: C3 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset3->qs); + __builtin_vsx_disassemble_pair(c3, &C3); + case 2: C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2->qs); + __builtin_vsx_disassemble_pair(c2, &C2); + case 1: C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1->qs); + __builtin_vsx_disassemble_pair(c1, &C1); + break; + } + t1 = vec_perm(c1[0], c2[0], swiz1); + t2 = vec_perm(c1[0], c2[0], swiz2); + t3 = vec_perm(c3[0], c4[0], swiz1); + t4 = vec_perm(c3[0], c4[0], swiz2); + t5 = vec_perm(t1, t3, swiz3); + t6 = vec_perm(t1, t3, swiz4); + t7 = vec_perm(t2, t4, swiz3); + t8 = vec_perm(t2, t4, swiz4); + if (flip == true) { + t5 = vec_xor(t5, xor_vector); + t6 = vec_xor(t6, xor_vector); + t7 = vec_xor(t7, xor_vector); + t8 = vec_xor(t8, xor_vector); + } + vec_xst(t5, 0, vecOffset); + vec_xst(t6, 0, vecOffset+16); + vec_xst(t7, 0, vecOffset+32); + vec_xst(t8, 0, vecOffset+48); + + t1 = vec_perm(c1[1], c2[1], swiz1); + t2 = vec_perm(c1[1], c2[1], swiz2); + t3 = vec_perm(c3[1], c4[1], swiz1); + t4 = vec_perm(c3[1], c4[1], swiz2); + t5 = vec_perm(t1, t3, swiz3); + t6 = vec_perm(t1, t3, swiz4); + t7 = vec_perm(t2, t4, swiz3); + t8 = vec_perm(t2, t4, swiz4); + if (flip == true) { + t5 = vec_xor(t5, xor_vector); + t6 = vec_xor(t6, xor_vector); + t7 = vec_xor(t7, xor_vector); + t8 = vec_xor(t8, xor_vector); + } + vec_xst(t5, 0, vecOffset+64); + vec_xst(t6, 0, vecOffset+80); + vec_xst(t7, 0, vecOffset+96); + vec_xst(t8, 0, vecOffset+112); + + aoffset1 += lda; + aoffset2 += lda; + aoffset3 += lda; + vecOffset += 128; + i--; + } while(i > 0); + } + } + } + + void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) { + int64_t mc, nc, mp, np; + int m_rem = MIN(m - m0, 8); + int n_rem = MIN(n - n0, 8); + // TO-DO: KERNEL_16x8 and KERNEL_8x16 are having some performance + // issues. After resolving them, below code will be enabled. + /*if (m_rem >= 16 && n_rem >= 8) { + mc = 16; + nc = 8; + gemm<16,8>(m0, m, n0, n); + } else if(m_rem >= 8 && n_rem >= 16) { + mc = 8; + nc = 16; + gemm<8,16>(m0, m, n0, n); + }*/ + if (m_rem >= 8 && n_rem >= 8) { + mc = 8; + nc = 8; + gemm<8,8>(m0, m, n0, n); + } else if (m_rem >= 4 && n_rem >= 8) { + mc = 4; + nc = 8; + gemm<4,8>(m0, m, n0, n); + } else if (m_rem >= 8 && n_rem >= 4) { + mc = 8; + nc = 4; + gemm<8,4>(m0, m, n0, n); + } else if (m_rem >= 4 && n_rem >= 4) { + mc = 4; + nc = 4; + gemm_small<4, 4>(m0, m, n0, n); + } else if ((m_rem < 4) && (n_rem > 4)) { + nc = 4; + switch(m_rem) { + case 1: + mc = 1; + gemm_small<1, 4>(m0, m, n0, n); + break; + case 2: + mc = 2; + gemm_small<2, 4>(m0, m, n0, n); + break; + case 3: + mc = 3; + gemm_small<3, 4>(m0, m, n0, n); + break; + default: + return; + } + } else if ((m_rem > 4) && (n_rem < 4)) { + mc = 4; + switch(n_rem) { + case 1: + nc = 1; + gemm_small<4, 1>(m0, m, n0, n); + break; + case 2: + nc = 2; + gemm_small<4, 2>(m0, m, n0, n); + break; + case 3: + nc = 3; + gemm_small<4, 3>(m0, m, n0, n); + break; + default: + return; + } + } else { + switch((m_rem << 4) | n_rem) { + case 0x43: + mc = 4; + nc = 3; + gemm_small<4, 3>(m0, m, n0, n); + break; + case 0x42: + mc = 4; + nc = 2; + gemm_small<4, 2>(m0, m, n0, n); + break; + case 0x41: + mc = 4; + nc = 1; + gemm_small<4, 1>(m0, m, n0, n); + break; + case 0x34: + mc = 3; + nc = 4; + gemm_small<3, 4>(m0, m, n0, n); + break; + case 0x33: + mc = 3; + nc = 3; + gemm_small<3, 3>(m0, m, n0, n); + break; + case 0x32: + mc = 3; + nc = 2; + gemm_small<3, 2>(m0, m, n0, n); + break; + case 0x31: + mc = 3; + nc = 1; + gemm_small<3, 1>(m0, m, n0, n); + break; + case 0x24: + mc = 2; + nc = 4; + gemm_small<2, 4>(m0, m, n0, n); + break; + case 0x23: + mc = 2; + nc = 3; + gemm_small<2, 3>(m0, m, n0, n); + break; + case 0x22: + mc = 2; + nc = 2; + gemm_small<2, 2>(m0, m, n0, n); + break; + case 0x21: + mc = 2; + nc = 1; + gemm_small<2, 1>(m0, m, n0, n); + break; + case 0x14: + mc = 1; + nc = 4; + gemm_small<1, 4>(m0, m, n0, n); + break; + case 0x13: + mc = 1; + nc = 3; + gemm_small<1, 3>(m0, m, n0, n); + break; + case 0x12: + mc = 1; + nc = 2; + gemm_small<1, 2>(m0, m, n0, n); + break; + case 0x11: + mc = 1; + nc = 1; + gemm_small<1, 1>(m0, m, n0, n); + break; + default: + return; + } + } + mp = m0 + (m - m0) / mc * mc; + np = n0 + (n - n0) / nc * nc; + mnpack(mp, m, n0, np); + mnpack(m0, m, np, n); + } + + void KERNEL_4x8(int64_t ii, int64_t jj) { + vec_t vec_A[8], vec_B[16] = {0}; + acc_t acc_0, acc_1; + std::array comparray; + vector float fin_res[8] = {0}; + vector float vs[8] = {0}; + for (int l = 0; l < k; l++) { + __builtin_mma_xxsetaccz(&acc_0); + __builtin_mma_xxsetaccz(&acc_1); + packNormal((A+(ii*lda)+l), lda, 4, 8, (int8_t*)vec_A, false); + packNormal((B+(jj*ldb)+l), ldb, 8, 8, (uint8_t*)vec_B, true); + for(int x = 0; x < 8; x++) { + __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x], vec_B[x]); + __builtin_mma_xvi8ger4pp(&acc_1, vec_A[x], vec_B[x+8]); + } + for (int I = 0; I<4; I++) { + for (int J = 0; J<4; J++) { + *((float*)&vs[I]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J)*ldb)+l)->d)); + *((float*)&vs[I+4]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J+4)*ldb)+l)->d)); + } + } + auto aoffset = A+(ii*lda)+l; + for (int i = 0; i < 4; i++) { + comparray[i] = 0; + int ca = 0; + const int8_t *at = aoffset->qs; + for (int j = 0; j < 32; j++) + ca += (int)*at++; + comparray[i] = ca; + aoffset += lda; + } + compute<4>(&acc_0, 0, 0, comparray, vs, fin_res); + compute<4>(&acc_1, 0, 4, comparray, vs, fin_res); + } + save_res<4, 4>(ii, jj, 0, fin_res); + save_res<4, 4>(ii, jj+4, 4, fin_res); + } + + void KERNEL_8x4(int64_t ii, int64_t jj) { + vec_t vec_A[16], vec_B[8] = {0}; + acc_t acc_0, acc_1; + std::array comparray; + vector float fin_res[8] = {0}; + vector float vs[8] = {0}; + for (int l = 0; l < k; l++) { + __builtin_mma_xxsetaccz(&acc_0); + __builtin_mma_xxsetaccz(&acc_1); + packNormal((A+(ii*lda)+l), lda, 8, 8, (int8_t*)vec_A, false); + packNormal((B+(jj*ldb)+l), ldb, 4, 8, (uint8_t*)vec_B, true); + for(int x = 0; x < 8; x++) { + __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x], vec_B[x]); + __builtin_mma_xvi8ger4pp(&acc_1, vec_A[x+8], vec_B[x]); + } + for (int I = 0; I<8; I++) { + for (int J = 0; J<4; J++) { + *((float*)&vs[I]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J)*ldb)+l)->d)); + } + } + auto aoffset = A+(ii*lda)+l; + for (int i = 0; i < 8; i++) { + comparray[i] = 0; + int ca = 0; + const int8_t *at = aoffset->qs; + for (int j = 0; j < 32; j++) + ca += (int)*at++; + comparray[i] = ca; + aoffset += lda; + } + compute<8>(&acc_0, 0, 0, comparray, vs, fin_res); + compute<8>(&acc_1, 4, 4, comparray, vs, fin_res); + } + save_res<4, 4>(ii, jj, 0, fin_res); + save_res<4, 4>(ii+4, jj, 4, fin_res); + } + + void KERNEL_8x8(int64_t ii, int64_t jj) { + vec_t vec_A[16], vec_B[16] = {0}; + acc_t acc_0, acc_1, acc_2, acc_3; + std::array comparray; + vector float fin_res[16] = {0}; + vector float vs[16] = {0}; + for (int l = 0; l < k; l++) { + __builtin_mma_xxsetaccz(&acc_0); + __builtin_mma_xxsetaccz(&acc_1); + __builtin_mma_xxsetaccz(&acc_2); + __builtin_mma_xxsetaccz(&acc_3); + packNormal((A+(ii*lda)+l), lda, 8, 8, (int8_t*)vec_A, false); + packNormal((B+(jj*ldb)+l), ldb, 8, 8, (uint8_t*)vec_B, true); + for(int x = 0; x < 8; x++) { + __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x], vec_B[x]); + __builtin_mma_xvi8ger4pp(&acc_1, vec_A[x+8], vec_B[x]); + __builtin_mma_xvi8ger4pp(&acc_2, vec_A[x], vec_B[x+8]); + __builtin_mma_xvi8ger4pp(&acc_3, vec_A[x+8], vec_B[x+8]); + } + for (int I = 0; I<8; I++) { + for (int J = 0; J<4; J++) { + *((float*)&vs[I]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J)*ldb)+l)->d)); + *((float*)&vs[I+8]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J+4)*ldb)+l)->d)); + } + } + auto aoffset = A+(ii*lda)+l; + for (int i = 0; i < 8; i++) { + comparray[i] = 0; + int ca = 0; + const int8_t *at = aoffset->qs; + for (int j = 0; j < 32; j++) + ca += (int)*at++; + comparray[i] = ca; + aoffset += lda; + } + compute<8>(&acc_0, 0, 0, comparray, vs, fin_res); + compute<8>(&acc_1, 4, 4, comparray, vs, fin_res); + compute<8>(&acc_2, 0, 8, comparray, vs, fin_res); + compute<8>(&acc_3, 4, 12, comparray, vs, fin_res); + } + save_res<4, 4>(ii, jj, 0, fin_res); + save_res<4, 4>(ii+4, jj, 4, fin_res); + save_res<4, 4>(ii, jj+4, 8, fin_res); + save_res<4, 4>(ii+4, jj+4, 12, fin_res); + } + + template + void gemm_small(int64_t m0, int64_t m, int64_t n0, int64_t n) { + int64_t ytiles = (m - m0) / RM; + int64_t xtiles = (n - n0) / RN; + int64_t tiles = xtiles * ytiles; + int64_t duty = (tiles + nth - 1) / nth; + int64_t start = duty * ith; + int64_t end = start + duty; + vec_t vec_A[8], vec_B[8] = {0}; + vector signed int vec_C[4]; + acc_t acc_0; + + if (end > tiles) + end = tiles; + for (int64_t job = start; job < end; ++job) { + int64_t ii = m0 + job / xtiles * RM; + int64_t jj = n0 + job % xtiles * RN; + std::array comparray; + vector float res[4] = {0}; + vector float fin_res[4] = {0}; + vector float vs[4] = {0}; + vector float CA[4] = {0}; + __builtin_prefetch((A+(ii*lda)+0)->qs, 0, 1); // prefetch first value + __builtin_prefetch((B+(jj*ldb)+0)->qs, 0, 1); // prefetch first value + for (int l = 0; l < k; l++) { + __builtin_prefetch((A+(ii*lda)+(l+1))->qs, 0, 1); // prefetch one loop ahead + __builtin_prefetch((B+(jj*ldb)+(l+1))->qs, 0, 1); // prefetch one loop ahead + __builtin_mma_xxsetaccz(&acc_0); + packNormal((A+(ii*lda)+l), lda, RM, 8, (int8_t*)vec_A, false); + packNormal((B+(jj*ldb)+l), ldb, RN, 8, (uint8_t*)vec_B, true); + for(int x = 0; x < 8; x+=4) { + __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x], vec_B[x]); + __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x+1], vec_B[x+1]); + __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x+2], vec_B[x+2]); + __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x+3], vec_B[x+3]); + } + for (int I = 0; Id) * unhalf((B+((jj+J)*ldb)+l)->d)); + } + } + __builtin_mma_disassemble_acc(vec_C, &acc_0); + auto aoffset = A+(ii*lda)+l; + for (int i = 0; i < RM; i++) { + comparray[i] = 0; + int ca = 0; + const int8_t *at = aoffset->qs; + for (int j = 0; j < 32; j++) + ca += (int)*at++; + comparray[i] = ca; + aoffset += lda; + } + + for (int i = 0; i < RM; i++) { + CA[i] = vec_splats((float)(((double)comparray[i]) * -128.0)); + res[i] = vec_add(vec_ctf(vec_C[i], 0), CA[i]); + fin_res[i] = vec_madd(res[i], vs[i], fin_res[i]); + } + } + save_res(ii, jj, 0, fin_res); + } + } + + template + inline void kernel(int64_t ii, int64_t jj) { + if constexpr(RM == 4 && RN == 8) { + KERNEL_4x8(ii,jj); + } else if constexpr(RM == 8 && RN == 4) { + KERNEL_8x4(ii,jj); + } else if constexpr(RM == 8 && RN == 8) { + KERNEL_8x8(ii,jj); + } else { + static_assert(false, "RN/RM values not supported"); + } + } + + template + NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) { + int64_t ytiles = (m - m0) / RM; + int64_t xtiles = (n - n0) / RN; + int64_t tiles = xtiles * ytiles; + int64_t duty = (tiles + nth - 1) / nth; + int64_t start = duty * ith; + int64_t end = start + duty; + if (end > tiles) + end = tiles; + for (int64_t job = start; job < end; ++job) { + int64_t ii = m0 + job / xtiles * RM; + int64_t jj = n0 + job % xtiles * RN; + kernel(ii, jj); + } + } + + const TA *const A; + const TB *const B; + TC *C; + TA *At; + TB *Bt; + const int64_t k; + const int64_t lda; + const int64_t ldb; + const int64_t ldc; + const int ith; + const int nth; +}; + template class tinyBLAS_PPC { public: @@ -1068,13 +1767,17 @@ class tinyBLAS_PPC { void (tinyBLAS_PPC::*kernel)(int64_t, int64_t); - void READ_BLOCK(const float* a, int64_t lda, int rows, int cols, float* vec) { + template + void packTranspose(const TA* a, int64_t lda, int rows, int cols, TA* vec) { int64_t i, j; - float *aoffset = NULL, *boffset = NULL; - float *aoffset1 = NULL, *aoffset2 = NULL, *aoffset3 = NULL, *aoffset4 = NULL; - float *aoffset5 = NULL, *aoffset6 = NULL, *aoffset7 = NULL, *aoffset8 = NULL; - - aoffset = const_cast(a); + TA *aoffset = NULL, *boffset = NULL; + TA *aoffset1 = NULL, *aoffset2 = NULL, *aoffset3 = NULL, *aoffset4 = NULL; + TA *aoffset5 = NULL, *aoffset6 = NULL, *aoffset7 = NULL, *aoffset8 = NULL; + __vector_pair C1, C2, C3, C4, C5, C6, C7, C8; + VA c1[2] = {0}, c2[2] = {0}, c3[2] = {0}, c4[2] = {0}; + VA c5[2] = {0}, c6[2] = {0}, c7[2] = {0}, c8[2] = {0}; + VA t1, t2, t3, t4, t5, t6, t7, t8; + aoffset = const_cast(a); boffset = vec; j = (rows >> 3); if (j > 0) { @@ -1090,9 +1793,6 @@ class tinyBLAS_PPC { aoffset += 8 * lda; i = (cols >> 3); if (i > 0) { - __vector_pair C1, C2, C3, C4, C5, C6, C7, C8; - vector float c1[2], c2[2], c3[2], c4[2], c5[2], c6[2], c7[2], c8[2]; - vector float t1, t2, t3, t4, t5, t6, t7, t8; do { C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1); C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2); @@ -1172,21 +1872,19 @@ class tinyBLAS_PPC { } while(i > 0); } if (cols & 4) { - vector float c1, c2, c3, c4, c5, c6, c7, c8; - vector float t1, t2, t3, t4, t5, t6, t7, t8; - c1 = vec_xl(0, aoffset1); - c2 = vec_xl(0, aoffset2); - c3 = vec_xl(0, aoffset3); - c4 = vec_xl(0, aoffset4); - c5 = vec_xl(0, aoffset5); - c6 = vec_xl(0, aoffset6); - c7 = vec_xl(0, aoffset7); - c8 = vec_xl(0, aoffset8); - - t1 = vec_mergeh(c1, c2); - t2 = vec_mergeh(c3, c4); - t3 = vec_mergeh(c5, c6); - t4 = vec_mergeh(c7, c8); + c1[0] = vec_xl(0, aoffset1); + c2[0] = vec_xl(0, aoffset2); + c3[0] = vec_xl(0, aoffset3); + c4[0] = vec_xl(0, aoffset4); + c5[0] = vec_xl(0, aoffset5); + c6[0] = vec_xl(0, aoffset6); + c7[0] = vec_xl(0, aoffset7); + c8[0] = vec_xl(0, aoffset8); + + t1 = vec_mergeh(c1[0], c2[0]); + t2 = vec_mergeh(c3[0], c4[0]); + t3 = vec_mergeh(c5[0], c6[0]); + t4 = vec_mergeh(c7[0], c8[0]); t5 = vec_xxpermdi(t1, t2, 0); t6 = vec_xxpermdi(t3, t4, 0); t7 = vec_xxpermdi(t1, t2, 3); @@ -1196,10 +1894,10 @@ class tinyBLAS_PPC { vec_xst(t7, 0, boffset+8); vec_xst(t8, 0, boffset+12); - t1 = vec_mergel(c1, c2); - t2 = vec_mergel(c3, c4); - t3 = vec_mergel(c5, c6); - t4 = vec_mergel(c7, c8); + t1 = vec_mergel(c1[0], c2[0]); + t2 = vec_mergel(c3[0], c4[0]); + t3 = vec_mergel(c5[0], c6[0]); + t4 = vec_mergel(c7[0], c8[0]); t5 = vec_xxpermdi(t1, t2, 0); t6 = vec_xxpermdi(t3, t4, 0); t7 = vec_xxpermdi(t1, t2, 3); @@ -1221,9 +1919,6 @@ class tinyBLAS_PPC { aoffset += 4 * lda; i = (cols >> 3); if (i > 0) { - __vector_pair C1, C2, C3, C4; - vector float c1[2], c2[2], c3[2], c4[2]; - vector float t1, t2, t3, t4, t5, t6, t7, t8; do { C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1); C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2); @@ -1270,22 +1965,20 @@ class tinyBLAS_PPC { } if (cols & 4) { - vector float c1, c2, c3, c4; - vector float t1, t2, t3, t4; - c1 = vec_xl(0, aoffset1); - c2 = vec_xl(0, aoffset2); - c3 = vec_xl(0, aoffset3); - c4 = vec_xl(0, aoffset4); - - t1 = vec_mergeh(c1, c2); - t2 = vec_mergeh(c3, c4); + c1[0] = vec_xl(0, aoffset1); + c2[0] = vec_xl(0, aoffset2); + c3[0] = vec_xl(0, aoffset3); + c4[0] = vec_xl(0, aoffset4); + + t1 = vec_mergeh(c1[0], c2[0]); + t2 = vec_mergeh(c3[0], c4[0]); t3 = vec_xxpermdi(t1, t2, 0); t4 = vec_xxpermdi(t1, t2, 3); vec_xst(t3, 0, boffset); vec_xst(t4, 0, boffset+4); - t1 = vec_mergel(c1, c2); - t2 = vec_mergel(c3, c4); + t1 = vec_mergel(c1[0], c2[0]); + t2 = vec_mergel(c3[0], c4[0]); t3 = vec_xxpermdi(t1, t2, 0); t4 = vec_xxpermdi(t1, t2, 3); vec_xst(t3, 0, boffset+8); @@ -1297,21 +1990,19 @@ class tinyBLAS_PPC { aoffset2 = aoffset1 + lda; aoffset3 = aoffset2 + lda; if (cols & 4) { - vector float c1, c2, c3, c4 = {0}; - vector float t1, t2, t3, t4; - c1 = vec_xl(0, aoffset1); - c2 = vec_xl(0, aoffset2); - c3 = vec_xl(0, aoffset3); - - t1 = vec_mergeh(c1, c2); - t2 = vec_mergeh(c3, c4); + c1[0] = vec_xl(0, aoffset1); + c2[0] = vec_xl(0, aoffset2); + c3[0] = vec_xl(0, aoffset3); + + t1 = vec_mergeh(c1[0], c2[0]); + t2 = vec_mergeh(c3[0], c4[0]); t3 = vec_xxpermdi(t1, t2, 0); t4 = vec_xxpermdi(t1, t2, 3); vec_xst(t3, 0, boffset); vec_xst(t4, 0, boffset+4); - t1 = vec_mergel(c1, c2); - t2 = vec_mergel(c3, c4); + t1 = vec_mergel(c1[0], c2[0]); + t2 = vec_mergel(c3[0], c4[0]); t3 = vec_xxpermdi(t1, t2, 0); t4 = vec_xxpermdi(t1, t2, 3); vec_xst(t3, 0, boffset+8); @@ -1319,14 +2010,13 @@ class tinyBLAS_PPC { } } } - void KERNEL_4x4(int64_t ii, int64_t jj) { vec_t vec_A[4], vec_B[4], vec_C[4]; acc_t acc_0; __builtin_mma_xxsetaccz(&acc_0); for (int l = 0; l < k; l+=4) { - READ_BLOCK(A+(ii*lda)+l, lda, 4, 4, (float*)vec_A); - READ_BLOCK(B+(jj*ldb)+l, ldb, 4, 4, (float*)vec_B); + packTranspose(A+(ii*lda)+l, lda, 4, 4, (TA*)vec_A); + packTranspose(B+(jj*ldb)+l, ldb, 4, 4, (TA*)vec_B); __builtin_mma_xvf32gerpp(&acc_0, vec_A[0], vec_B[0]); __builtin_mma_xvf32gerpp(&acc_0, vec_A[1], vec_B[1]); __builtin_mma_xvf32gerpp(&acc_0, vec_A[2], vec_B[2]); @@ -1341,8 +2031,8 @@ class tinyBLAS_PPC { __builtin_mma_xxsetaccz(&acc_0); __builtin_mma_xxsetaccz(&acc_1); for (int64_t l = 0; l < k; l+=4) { - READ_BLOCK(A+(ii*lda)+l, lda, 4, 4, (float*)vec_A); - READ_BLOCK(B+(jj*ldb)+l, ldb, 8, 4, (float*)vec_B); + packTranspose(A+(ii*lda)+l, lda, 4, 4, (TA*)vec_A); + packTranspose(B+(jj*ldb)+l, ldb, 8, 4, (TA*)vec_B); __builtin_mma_xvf32gerpp(&acc_0, vec_A[0], (vec_t)vec_B[0]); __builtin_mma_xvf32gerpp(&acc_1, vec_A[0], (vec_t)vec_B[1]); __builtin_mma_xvf32gerpp(&acc_0, vec_A[1], (vec_t)vec_B[2]); @@ -1362,8 +2052,8 @@ class tinyBLAS_PPC { __builtin_mma_xxsetaccz(&acc_0); __builtin_mma_xxsetaccz(&acc_1); for (int64_t l = 0; l < k; l+=4) { - READ_BLOCK(A+(ii*lda)+l, lda, 8, 4, (float*)vec_A); - READ_BLOCK(B+(jj*ldb)+l, ldb, 4, 4, (float*)vec_B); + packTranspose(A+(ii*lda)+l, lda, 8, 4, (TA*)vec_A); + packTranspose(B+(jj*ldb)+l, ldb, 4, 4, (TA*)vec_B); __builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[0], vec_B[0]); __builtin_mma_xvf32gerpp(&acc_1, (vec_t)vec_A[1], vec_B[0]); __builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[2], vec_B[1]); @@ -1385,8 +2075,8 @@ class tinyBLAS_PPC { __builtin_mma_xxsetaccz(&acc_2); __builtin_mma_xxsetaccz(&acc_3); for (int l = 0; l < k; l+=8) { - READ_BLOCK(A+(ii*lda)+l, lda, 8, 8, (float*)vec_A); - READ_BLOCK(B+(jj*ldb)+l, ldb, 8, 8, (float*)vec_B); + packTranspose(A+(ii*lda)+l, lda, 8, 8, (TA*)vec_A); + packTranspose(B+(jj*ldb)+l, ldb, 8, 8, (TA*)vec_B); for(int x = 0; x < 16; x+=2) { __builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[x], vec_B[x]); __builtin_mma_xvf32gerpp(&acc_1, (vec_t)vec_A[x], vec_B[x+1]); @@ -1569,15 +2259,15 @@ class tinyBLAS_PPC { vec_t vec_A[4], vec_B[4]; for (int l=0; l= 4 && RM == 1) { - float* a = const_cast(A+(ii)*lda+l); - READ_BLOCK(B+(jj*ldb)+l, ldb, 4, 4, (float*)vec_B); + TA* a = const_cast(A+(ii)*lda+l); + packTranspose(B+(jj*ldb)+l, ldb, 4, 4, (TA*)vec_B); vec_A[0] = (vec_t)vec_xl(0,a); - vec_A[1] = (vec_t)vec_splats(*((float*)&vec_A+1)); - vec_A[2] = (vec_t)vec_splats(*((float*)&vec_A+2)); - vec_A[3] = (vec_t)vec_splats(*((float*)&vec_A+3)); + vec_A[1] = (vec_t)vec_splats(*((TA*)&vec_A+1)); + vec_A[2] = (vec_t)vec_splats(*((TA*)&vec_A+2)); + vec_A[3] = (vec_t)vec_splats(*((TA*)&vec_A+3)); } else { - READ_BLOCK(A+(ii*lda)+l, lda, RM, 4, (float*)vec_A); - READ_BLOCK(B+(jj*ldb)+l, ldb, RN, 4, (float*)vec_B); + packTranspose(A+(ii*lda)+l, lda, RM, 4, (TA*)vec_A); + packTranspose(B+(jj*ldb)+l, ldb, RN, 4, (TA*)vec_B); } __builtin_mma_xvf32gerpp(&acc_0, vec_A[0], vec_B[0]); __builtin_mma_xvf32gerpp(&acc_0, vec_A[1], vec_B[1]); @@ -1587,7 +2277,7 @@ class tinyBLAS_PPC { __builtin_mma_disassemble_acc(vec_C, &acc_0); for (int I = 0; I < RM; I++) { for (int J = 0; J < RN; J++) { - *((float*)(C+ii+((jj+J)*ldc)+I)) = *((float*)&vec_C[I]+J); + *((TC*)(C+ii+((jj+J)*ldc)+I)) = *((TC*)&vec_C[I]+J); } } } @@ -1810,6 +2500,20 @@ bool llamafile_sgemm(const struct ggml_compute_params * params, int64_t m, int64 params->ith, params->nth}; tb.matmul(m, n); return true; + +#elif defined(__MMA__) + if (n < 8 && n != 4) + return false; + if (m < 8 && m != 4) + return false; + tinyBLAS_Q0_PPC tb{ + k, (const block_q8_0 *)A, lda, + (const block_q8_0 *)B, ldb, + (float *)C, ldc, + params->ith, params->nth}; + tb.matmul(m, n); + return true; + #else return false; #endif