diff --git a/CMakeLists.txt b/CMakeLists.txt index d88c5b1..54d18b0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -31,7 +31,7 @@ option(GGML_NO_ACCELERATE "ggml: disable Accelerate framework" OFF) # sanitizers if (GGML_SANITIZE_THREAD) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=thread") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=thread") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=thread") endif() diff --git a/examples/gpt-2/CMakeLists.txt b/examples/gpt-2/CMakeLists.txt index 9960cfe..3b7ab5e 100644 --- a/examples/gpt-2/CMakeLists.txt +++ b/examples/gpt-2/CMakeLists.txt @@ -4,3 +4,10 @@ set(TEST_TARGET gpt-2) add_executable(${TEST_TARGET} main.cpp) target_link_libraries(${TEST_TARGET} PRIVATE ggml ggml_utils) + +# +# gpt-2-quantize + +set(TEST_TARGET gpt-2-quantize) +add_executable(${TEST_TARGET} quantize.cpp) +target_link_libraries(${TEST_TARGET} PRIVATE ggml ggml_utils) diff --git a/examples/gpt-2/README.md b/examples/gpt-2/README.md index 60fea55..ed61f86 100644 --- a/examples/gpt-2/README.md +++ b/examples/gpt-2/README.md @@ -94,7 +94,7 @@ Done! Model '117M' saved in 'models/gpt-2-117M/' Run the convert-ckpt-to-ggml.py script to convert the model to ggml format. - python /Users/john/ggml/examples/gpt-2/convert-ckpt-to-ggml.py models/gpt-2-117M/ + python /Users/john/ggml/examples/gpt-2/convert-ckpt-to-ggml.py models/gpt-2-117M/ 1 ``` diff --git a/examples/gpt-2/convert-ckpt-to-ggml.py b/examples/gpt-2/convert-ckpt-to-ggml.py index 7ae4380..60cd963 100644 --- a/examples/gpt-2/convert-ckpt-to-ggml.py +++ b/examples/gpt-2/convert-ckpt-to-ggml.py @@ -45,8 +45,18 @@ def bytes_to_unicode(): cs = [chr(n) for n in cs] return dict(zip(bs, cs)) -if len(sys.argv) < 2: - print("Usage: convert-ckpt-to-ggml.py dir-model [use-f32]\n") +# helper method to convert a numpy array to different float types +def convert_to_ftype(data, ftype): + # fp16 + if ftype == 1: + return data.astype(np.float16) + + assert False, "Invalid ftype: " + str(ftype) + +if len(sys.argv) < 3: + print("Usage: convert-ckpt-to-ggml.py dir-model ftype\n") + print(" ftype == 0 -> float32") + print(" ftype == 1 -> float16") sys.exit(1) # output in the same directory as the model @@ -59,11 +69,20 @@ with open(dir_model + "/encoder.json", "r") as f: with open(dir_model + "/hparams.json", "r") as f: hparams = json.load(f) -# use 16-bit or 32-bit floats -use_f16 = True +# possible data types +# ftype == 0 -> float32 +# ftype == 1 -> float16 +# +# map from ftype to string +ftype_str = ["f32", "f16"] + +ftype = 1 if len(sys.argv) > 2: - use_f16 = False - fname_out = sys.argv[1] + "/ggml-model-f32.bin" + ftype = int(sys.argv[2]) + if ftype < 0 or ftype > 1: + print("Invalid ftype: " + str(ftype)) + sys.exit(1) + fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".bin" list_vars = tf.train.list_variables(dir_model) @@ -75,7 +94,7 @@ fout.write(struct.pack("i", hparams["n_ctx"])) fout.write(struct.pack("i", hparams["n_embd"])) fout.write(struct.pack("i", hparams["n_head"])) fout.write(struct.pack("i", hparams["n_layer"])) -fout.write(struct.pack("i", use_f16)) +fout.write(struct.pack("i", ftype)) byte_encoder = bytes_to_unicode() byte_decoder = {v:k for k, v in byte_encoder.items()} @@ -93,9 +112,22 @@ for name, shape in list_vars: data = tf.train.load_variable(dir_model, name).squeeze() n_dims = len(data.shape); - # ftype == 0 -> float32, ftype == 1 -> float16 - ftype = 0; - if use_f16: + # for efficiency - transpose the projection matrices + # "model/h.*/attn/c_attn/w" + # "model/h.*/attn/c_proj/w" + # "model/h.*/mlp/c_fc/w" + # "model/h.*/mlp/c_proj/w" + if name[-14:] == "/attn/c_attn/w" or \ + name[-14:] == "/attn/c_proj/w" or \ + name[-11:] == "/mlp/c_fc/w" or \ + name[-13:] == "/mlp/c_proj/w": + print(" Transposing") + data = data.transpose() + + dshape = data.shape + + ftype_cur = 0 + if ftype != 0: # match name: # "model/wte" # "model/h.*/attn/c_attn/w" @@ -103,24 +135,19 @@ for name, shape in list_vars: # "model/h.*/mlp/c_fc/w" # "model/h.*/mlp/c_proj/w" if name == "model/wte" or name[-2:] == "/w": - print(" Converting to float16") - data = data.astype(np.float16) - ftype = 1 + print(" Converting to " + ftype_str[ftype]) + data = convert_to_ftype(data, ftype) + ftype_cur = ftype else: print(" Converting to float32") data = data.astype(np.float32) - ftype = 0 - - # for efficiency - transpose the projection matrices - if name[-13:] == "/mlp/c_proj/w": - print(" Transposing") - data = data.transpose() + ftype_cur = 0 # header str = name.encode('utf-8') - fout.write(struct.pack("iii", n_dims, len(str), ftype)) + fout.write(struct.pack("iii", n_dims, len(str), ftype_cur)) for i in range(n_dims): - fout.write(struct.pack("i", data.shape[n_dims - 1 - i])) + fout.write(struct.pack("i", dshape[n_dims - 1 - i])) fout.write(str); # data diff --git a/examples/gpt-2/main.cpp b/examples/gpt-2/main.cpp index 134a930..6173ed3 100644 --- a/examples/gpt-2/main.cpp +++ b/examples/gpt-2/main.cpp @@ -128,9 +128,23 @@ bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab & } } - // for the big tensors, we have the option to store the data in 16-bit floats + // for the big tensors, we have the option to store the data in 16-bit floats or quantized // in order to save memory and also to speed up the computation - const ggml_type wtype = model.hparams.f16 ? GGML_TYPE_F16 : GGML_TYPE_F32; + ggml_type wtype = GGML_TYPE_COUNT; + switch (model.hparams.f16) { + case 0: wtype = GGML_TYPE_F32; break; + case 1: wtype = GGML_TYPE_F16; break; + case 2: wtype = GGML_TYPE_Q4_0; break; + case 3: wtype = GGML_TYPE_Q4_1; break; + default: + { + fprintf(stderr, "%s: invalid model file '%s' (bad f16 value %d)\n", + __func__, fname.c_str(), model.hparams.f16); + return false; + } + } + + const ggml_type wtype2 = GGML_TYPE_F32; auto & ctx = model.ctx; @@ -144,32 +158,32 @@ bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab & const int n_ctx = hparams.n_ctx; const int n_vocab = hparams.n_vocab; - ctx_size += n_embd*ggml_type_size(GGML_TYPE_F32); // ln_f_g - ctx_size += n_embd*ggml_type_size(GGML_TYPE_F32); // ln_f_b + ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_g + ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_b - ctx_size += n_vocab*n_embd*ggml_type_size(wtype); // wte - ctx_size += n_ctx*n_embd*ggml_type_size(GGML_TYPE_F32); // wpe + ctx_size += n_vocab*n_embd*ggml_type_sizef(wtype); // wte + ctx_size += n_ctx*n_embd*ggml_type_sizef(GGML_TYPE_F32); // wpe - ctx_size += n_layer*(n_embd*ggml_type_size(GGML_TYPE_F32)); // ln_1_g - ctx_size += n_layer*(n_embd*ggml_type_size(GGML_TYPE_F32)); // ln_1_b + ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_g + ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_b - ctx_size += n_layer*(n_embd*ggml_type_size(GGML_TYPE_F32)); // ln_2_g - ctx_size += n_layer*(n_embd*ggml_type_size(GGML_TYPE_F32)); // ln_2_b + ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_2_g + ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_2_b - ctx_size += n_layer*(3*n_embd*n_embd*ggml_type_size(wtype)); // c_attn_attn_w - ctx_size += n_layer*( 3*n_embd*ggml_type_size(GGML_TYPE_F32)); // c_attn_attn_b + ctx_size += n_layer*(3*n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_attn_w + ctx_size += n_layer*( 3*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_attn_attn_b - ctx_size += n_layer*(n_embd*n_embd*ggml_type_size(wtype)); // c_attn_proj_w - ctx_size += n_layer*( n_embd*ggml_type_size(GGML_TYPE_F32)); // c_attn_proj_b + ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_proj_w + ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_attn_proj_b - ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_size(wtype)); // c_mlp_fc_w - ctx_size += n_layer*( 4*n_embd*ggml_type_size(GGML_TYPE_F32)); // c_mlp_fc_b + ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_fc_w + ctx_size += n_layer*( 4*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_fc_b - ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_size(wtype)); // c_mlp_proj_w - ctx_size += n_layer*( n_embd*ggml_type_size(GGML_TYPE_F32)); // c_mlp_proj_b + ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w + ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b - ctx_size += n_ctx*n_layer*n_embd*ggml_type_size(GGML_TYPE_F32); // memory_k - ctx_size += n_ctx*n_layer*n_embd*ggml_type_size(GGML_TYPE_F32); // memory_v + ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_k + ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_v ctx_size += (6 + 12*n_layer)*256; // object overhead @@ -223,13 +237,13 @@ bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab & layer.ln_2_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); layer.ln_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - layer.c_attn_attn_w = ggml_new_tensor_2d(ctx, wtype, 3*n_embd, n_embd); + layer.c_attn_attn_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 3*n_embd); layer.c_attn_attn_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 3*n_embd); layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); layer.c_attn_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd); + layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd); layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd); layer.c_mlp_proj_w_trans = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd); @@ -319,9 +333,26 @@ bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab & return false; } - const size_t bpe = (ftype == 0) ? sizeof(float) : sizeof(ggml_fp16_t); + if (0) { + static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", }; + printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ftype_str[ftype], ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); + } - if (nelements*bpe != ggml_nbytes(tensor)) { + size_t bpe = 0; + + switch (ftype) { + case 0: bpe = ggml_type_size(GGML_TYPE_F32); break; + case 1: bpe = ggml_type_size(GGML_TYPE_F16); break; + case 2: bpe = ggml_type_size(GGML_TYPE_Q4_0); assert(ne[0] % 64 == 0); break; + case 3: bpe = ggml_type_size(GGML_TYPE_Q4_1); assert(ne[0] % 64 == 0); break; + default: + { + fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype); + return false; + } + }; + + if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", __func__, name.data(), ggml_nbytes(tensor), nelements*bpe); return false; @@ -329,7 +360,6 @@ bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab & fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); - //printf("%24s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0); total_size += ggml_nbytes(tensor); } @@ -431,7 +461,7 @@ bool gpt2_eval( // [2304, N] { cur = ggml_mul_mat(ctx0, - ggml_transpose(ctx0, model.layers[il].c_attn_attn_w), + model.layers[il].c_attn_attn_w, cur); cur = ggml_add(ctx0, @@ -538,7 +568,7 @@ bool gpt2_eval( // [768, N] { cur = ggml_mul_mat(ctx0, - ggml_transpose(ctx0, model.layers[il].c_attn_proj_w), + model.layers[il].c_attn_proj_w, cur); cur = ggml_add(ctx0, @@ -575,7 +605,7 @@ bool gpt2_eval( // cur = fc_w*cur + fc_b // [3072, N] cur = ggml_mul_mat(ctx0, - ggml_transpose(ctx0, model.layers[il].c_mlp_fc_w), + model.layers[il].c_mlp_fc_w, cur); cur = ggml_add(ctx0, @@ -705,8 +735,12 @@ int main(int argc, char ** argv) { params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size()); - printf("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size()); - printf("\n"); + printf("%s: prompt: '%s'\n", __func__, params.prompt.c_str()); + printf("%s: number of tokens in prompt = %zu, first 8 tokens: ", __func__, embd_inp.size()); + for (int i = 0; i < std::min(8, (int) embd_inp.size()); i++) { + printf("%d ", embd_inp[i]); + } + printf("\n\n"); // submit the input prompt token-by-token // this reduces the memory usage during inference, at the cost of a bit of speed at the beginning diff --git a/examples/gpt-2/quantize.cpp b/examples/gpt-2/quantize.cpp new file mode 100644 index 0000000..ee9b493 --- /dev/null +++ b/examples/gpt-2/quantize.cpp @@ -0,0 +1,325 @@ +#include "ggml/ggml.h" + +#include "utils.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// TODO: move somewhere else +#define QK 32 + +// default hparams (GPT-2 117M) +struct gpt2_hparams { + int32_t n_vocab = 50257; + int32_t n_ctx = 1024; + int32_t n_embd = 768; + int32_t n_head = 12; + int32_t n_layer = 12; + int32_t f16 = 1; +}; + +// quantize a model +bool gpt2_model_quantize(const std::string & fname_inp, const std::string & fname_out, int itype) { + ggml_type type = GGML_TYPE_Q4_1; + + switch (itype) { + case 2: type = GGML_TYPE_Q4_0; break; + case 3: type = GGML_TYPE_Q4_1; break; + default: fprintf(stderr, "%s: invalid quantization type %d\n", __func__, itype); return 1; + }; + + if (type != GGML_TYPE_Q4_0 && type != GGML_TYPE_Q4_1) { + fprintf(stderr, "%s: invalid quantization type %d\n", __func__, type); + return false; + } + + gpt_vocab vocab; + + printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str()); + + auto finp = std::ifstream(fname_inp, std::ios::binary); + if (!finp) { + fprintf(stderr, "%s: failed to open '%s' for reading\n", __func__, fname_inp.c_str()); + return false; + } + + auto fout = std::ofstream(fname_out, std::ios::binary); + if (!fout) { + fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname_out.c_str()); + return false; + } + + // verify magic + { + uint32_t magic; + finp.read((char *) &magic, sizeof(magic)); + if (magic != 0x67676d6c) { + fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname_inp.c_str()); + return false; + } + + fout.write((char *) &magic, sizeof(magic)); + } + + gpt2_hparams hparams; + + // load hparams + { + finp.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); + finp.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); + finp.read((char *) &hparams.n_embd, sizeof(hparams.n_embd)); + finp.read((char *) &hparams.n_head, sizeof(hparams.n_head)); + finp.read((char *) &hparams.n_layer, sizeof(hparams.n_layer)); + finp.read((char *) &hparams.f16, sizeof(hparams.f16)); + + printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); + printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); + printf("%s: n_embd = %d\n", __func__, hparams.n_embd); + printf("%s: n_head = %d\n", __func__, hparams.n_head); + printf("%s: n_layer = %d\n", __func__, hparams.n_layer); + printf("%s: f16 = %d\n", __func__, hparams.f16); + + fout.write((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); + fout.write((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); + fout.write((char *) &hparams.n_embd, sizeof(hparams.n_embd)); + fout.write((char *) &hparams.n_head, sizeof(hparams.n_head)); + fout.write((char *) &hparams.n_layer, sizeof(hparams.n_layer)); + fout.write((char *) &itype, sizeof(hparams.f16)); + } + + // load vocab + { + int32_t n_vocab = 0; + finp.read ((char *) &n_vocab, sizeof(n_vocab)); + fout.write((char *) &n_vocab, sizeof(n_vocab)); + + if (n_vocab != hparams.n_vocab) { + fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n", + __func__, fname_inp.c_str(), n_vocab, hparams.n_vocab); + return false; + } + + std::string word; + for (int i = 0; i < n_vocab; i++) { + uint32_t len; + finp.read ((char *) &len, sizeof(len)); + fout.write((char *) &len, sizeof(len)); + + word.resize(len); + finp.read ((char *) word.data(), len); + fout.write((char *) word.data(), len); + + vocab.token_to_id[word] = i; + vocab.id_to_token[i] = word; + } + } + + // load weights + { + size_t total_size_org = 0; + size_t total_size_new = 0; + + std::vector work; + + std::vector data_u8; + std::vector data_f16; + std::vector data_f32; + + std::vector hist_all(1 << 4, 0); + + while (true) { + int32_t n_dims; + int32_t length; + int32_t ftype; + + finp.read(reinterpret_cast(&n_dims), sizeof(n_dims)); + finp.read(reinterpret_cast(&length), sizeof(length)); + finp.read(reinterpret_cast(&ftype), sizeof(ftype)); + + if (finp.eof()) { + break; + } + + int32_t nelements = 1; + int32_t ne[2] = { 1, 1 }; + for (int i = 0; i < n_dims; ++i) { + finp.read (reinterpret_cast(&ne[i]), sizeof(ne[i])); + nelements *= ne[i]; + } + + std::string name(length, 0); + finp.read (&name[0], length); + + { + static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", }; + printf("%24s - [%5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ftype_str[ftype]); + } + + // regexes of tensor names to be quantized + const std::vector k_names = { + "model/wte", + "model/h.*/attn/c_attn/w", + "model/h.*/attn/c_proj/w", + "model/h.*/mlp/c_fc/w", + "model/h.*/mlp/c_proj/w", + }; + + bool quantize = false; + for (const auto & s : k_names) { + if (std::regex_match(name, std::regex(s))) { + quantize = true; + break; + } + } + + if (quantize) { + if (ftype != 0 && ftype != 1) { + fprintf(stderr, "%s: unsupported ftype %d for integer quantization\n", __func__, ftype); + return false; + } + + if (ftype == 1) { + data_f16.resize(nelements); + finp.read(reinterpret_cast(data_f16.data()), nelements * sizeof(ggml_fp16_t)); + data_f32.resize(nelements); + for (int i = 0; i < nelements; ++i) { + data_f32[i] = ggml_fp16_to_fp32(data_f16[i]); + } + } else { + data_f32.resize(nelements); + finp.read(reinterpret_cast(data_f32.data()), nelements * sizeof(float)); + } + + ftype = itype; + } else { + const int bpe = (ftype == 0) ? sizeof(float) : sizeof(uint16_t); + + data_u8.resize(nelements*bpe); + finp.read(reinterpret_cast(data_u8.data()), nelements * bpe); + } + + fout.write(reinterpret_cast(&n_dims), sizeof(n_dims)); + fout.write(reinterpret_cast(&length), sizeof(length)); + fout.write(reinterpret_cast(&ftype), sizeof(ftype)); + for (int i = 0; i < n_dims; ++i) { + fout.write(reinterpret_cast(&ne[i]), sizeof(ne[i])); + } + fout.write(&name[0], length); + + if (quantize) { + printf("quantizing .. "); + work.resize(nelements); // for quantization + + size_t cur_size = 0; + std::vector hist_cur(1 << 4, 0); + + switch (type) { + case GGML_TYPE_Q4_0: + { + cur_size = ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], QK, hist_cur.data()); + } break; + case GGML_TYPE_Q4_1: + { + cur_size = ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], QK, hist_cur.data()); + } break; + default: + { + fprintf(stderr, "%s: unsupported quantization type %d\n", __func__, type); + return false; + } + } + + fout.write(reinterpret_cast(work.data()), cur_size); + total_size_new += cur_size; + + printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0); + for (int i = 0; i < hist_cur.size(); ++i) { + hist_all[i] += hist_cur[i]; + } + + for (int i = 0; i < hist_cur.size(); ++i) { + printf("%5.3f ", hist_cur[i] / (float)nelements); + } + printf("\n"); + } else { + printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0); + fout.write(reinterpret_cast(data_u8.data()), data_u8.size()); + total_size_new += data_u8.size(); + } + + total_size_org += nelements * sizeof(float); + } + + printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); + printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0); + + { + int64_t sum_all = 0; + for (int i = 0; i < hist_all.size(); ++i) { + sum_all += hist_all[i]; + } + + printf("%s: hist: ", __func__); + for (int i = 0; i < hist_all.size(); ++i) { + printf("%5.3f ", hist_all[i] / (float)sum_all); + } + printf("\n"); + } + } + + finp.close(); + fout.close(); + + return true; +} + +// usage: +// ./gpt-2-quantize models/gpt-2-117M/ggml-model.bin models/gpt-2-117M/ggml-model-quant.bin type +// +int main(int argc, char ** argv) { + if (argc != 4) { + fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]); + fprintf(stderr, " type = 2 - q4_0\n"); + fprintf(stderr, " type = 3 - q4_1\n"); + return 1; + } + + const std::string fname_inp = argv[1]; + const std::string fname_out = argv[2]; + + const int itype = atoi(argv[3]); + + const int64_t t_main_start_us = ggml_time_us(); + + int64_t t_quantize_us = 0; + + // load the model + { + const int64_t t_start_us = ggml_time_us(); + + if (!gpt2_model_quantize(fname_inp, fname_out, itype)) { + fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); + return 1; + } + + t_quantize_us = ggml_time_us() - t_start_us; + } + + // report timing + { + const int64_t t_main_end_us = ggml_time_us(); + + printf("\n"); + printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0f); + printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); + } + + return 0; +} diff --git a/examples/gpt-j/CMakeLists.txt b/examples/gpt-j/CMakeLists.txt index 4199a3f..390746d 100644 --- a/examples/gpt-j/CMakeLists.txt +++ b/examples/gpt-j/CMakeLists.txt @@ -4,3 +4,10 @@ set(TEST_TARGET gpt-j) add_executable(${TEST_TARGET} main.cpp) target_link_libraries(${TEST_TARGET} PRIVATE ggml ggml_utils) + +# +# gpt-j-quantize + +set(TEST_TARGET gpt-j-quantize) +add_executable(${TEST_TARGET} quantize.cpp) +target_link_libraries(${TEST_TARGET} PRIVATE ggml ggml_utils) diff --git a/examples/gpt-j/convert-h5-to-ggml.py b/examples/gpt-j/convert-h5-to-ggml.py index 310e60e..e254f2c 100644 --- a/examples/gpt-j/convert-h5-to-ggml.py +++ b/examples/gpt-j/convert-h5-to-ggml.py @@ -47,8 +47,10 @@ def bytes_to_unicode(): cs = [chr(n) for n in cs] return dict(zip(bs, cs)) -if len(sys.argv) < 2: +if len(sys.argv) < 3: print("Usage: convert-h5-to-ggml.py dir-model [use-f32]\n") + print(" ftype == 0 -> float32") + print(" ftype == 1 -> float16") sys.exit(1) # output in the same directory as the model @@ -64,11 +66,21 @@ with open(dir_model + "/added_tokens.json", "r") as f: with open(dir_model + "/config.json", "r") as f: hparams = json.load(f) -# use 16-bit or 32-bit floats -use_f16 = True +# possible data types +# ftype == 0 -> float32 +# ftype == 1 -> float16 +# +# map from ftype to string +ftype_str = ["f32", "f16"] + +ftype = 1 if len(sys.argv) > 2: - use_f16 = False - fname_out = sys.argv[1] + "/ggml-model-f32.bin" + ftype = int(sys.argv[2]) + if ftype < 0 or ftype > 1: + print("Invalid ftype: " + str(ftype)) + sys.exit(1) + fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".bin" + model = GPTJForCausalLM.from_pretrained(dir_model, low_cpu_mem_usage=True) #print (model) @@ -85,7 +97,7 @@ fout.write(struct.pack("i", hparams["n_embd"])) fout.write(struct.pack("i", hparams["n_head"])) fout.write(struct.pack("i", hparams["n_layer"])) fout.write(struct.pack("i", hparams["rotary_dim"])) -fout.write(struct.pack("i", use_f16)) +fout.write(struct.pack("i", ftype)) byte_encoder = bytes_to_unicode() byte_decoder = {v:k for k, v in byte_encoder.items()} @@ -114,34 +126,40 @@ for name in list_vars.keys(): n_dims = len(data.shape); # ftype == 0 -> float32, ftype == 1 -> float16 - ftype = 0; - if use_f16: + ftype_cur = 0; + if ftype != 0: if name[-7:] == ".weight" and n_dims == 2: print(" Converting to float16") data = data.astype(np.float16) - ftype = 1 + ftype_cur = 1 else: print(" Converting to float32") data = data.astype(np.float32) - ftype = 0 + ftype_cur = 0 + else: + if data.dtype != np.float32: + print(" Converting to float32") + data = data.astype(np.float32) + ftype_cur = 0 # for efficiency - transpose these matrices: - # "transformer.h.*.mlp.fc_in.weight - # "transformer.h.*.attn.out_proj.weight + # (note - with latest ggml this is no longer more efficient, so disabling it) + # "transformer.h.*.mlp.fc_in.weight" + # "transformer.h.*.attn.out_proj.weight" # "transformer.h.*.attn.q_proj.weight" # "transformer.h.*.attn.k_proj.weight" # "transformer.h.*.attn.v_proj.weight" - if name.endswith(".mlp.fc_in.weight") or \ - name.endswith(".attn.out_proj.weight") or \ - name.endswith(".attn.q_proj.weight") or \ - name.endswith(".attn.k_proj.weight") or \ - name.endswith(".attn.v_proj.weight"): - print(" Transposing") - data = data.transpose() + #if name.endswith(".mlp.fc_in.weight") or \ + # name.endswith(".attn.out_proj.weight") or \ + # name.endswith(".attn.q_proj.weight") or \ + # name.endswith(".attn.k_proj.weight") or \ + # name.endswith(".attn.v_proj.weight"): + # print(" Transposing") + # data = data.transpose() # header str = name.encode('utf-8') - fout.write(struct.pack("iii", n_dims, len(str), ftype)) + fout.write(struct.pack("iii", n_dims, len(str), ftype_cur)) for i in range(n_dims): fout.write(struct.pack("i", data.shape[n_dims - 1 - i])) fout.write(str); diff --git a/examples/gpt-j/main.cpp b/examples/gpt-j/main.cpp index 63248d7..b71b907 100644 --- a/examples/gpt-j/main.cpp +++ b/examples/gpt-j/main.cpp @@ -130,9 +130,23 @@ bool gptj_model_load(const std::string & fname, gptj_model & model, gpt_vocab & } } - // for the big tensors, we have the option to store the data in 16-bit floats + // for the big tensors, we have the option to store the data in 16-bit floats or quantized // in order to save memory and also to speed up the computation - const ggml_type wtype = model.hparams.f16 ? GGML_TYPE_F16 : GGML_TYPE_F32; + ggml_type wtype = GGML_TYPE_COUNT; + switch (model.hparams.f16) { + case 0: wtype = GGML_TYPE_F32; break; + case 1: wtype = GGML_TYPE_F16; break; + case 2: wtype = GGML_TYPE_Q4_0; break; + case 3: wtype = GGML_TYPE_Q4_1; break; + default: + { + fprintf(stderr, "%s: invalid model file '%s' (bad f16 value %d)\n", + __func__, fname.c_str(), model.hparams.f16); + return false; + } + } + + const ggml_type wtype2 = GGML_TYPE_F32; auto & ctx = model.ctx; @@ -146,31 +160,31 @@ bool gptj_model_load(const std::string & fname, gptj_model & model, gpt_vocab & const int n_ctx = hparams.n_ctx; const int n_vocab = hparams.n_vocab; - ctx_size += n_embd*ggml_type_size(GGML_TYPE_F32); // ln_f_g - ctx_size += n_embd*ggml_type_size(GGML_TYPE_F32); // ln_f_b + ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_g + ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_b - ctx_size += n_embd*n_vocab*ggml_type_size(wtype); // wte + ctx_size += n_embd*n_vocab*ggml_type_sizef(wtype); // wte - ctx_size += n_embd*n_vocab*ggml_type_size(wtype); // lmh_g - ctx_size += n_vocab*ggml_type_size(GGML_TYPE_F32); // lmh_b + ctx_size += n_embd*n_vocab*ggml_type_sizef(wtype); // lmh_g + ctx_size += n_vocab*ggml_type_sizef(GGML_TYPE_F32); // lmh_b - ctx_size += n_layer*(n_embd*ggml_type_size(GGML_TYPE_F32)); // ln_1_g - ctx_size += n_layer*(n_embd*ggml_type_size(GGML_TYPE_F32)); // ln_1_b + ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_g + ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_b - ctx_size += n_layer*(n_embd*n_embd*ggml_type_size(wtype)); // c_attn_q_proj_w - ctx_size += n_layer*(n_embd*n_embd*ggml_type_size(wtype)); // c_attn_k_proj_w - ctx_size += n_layer*(n_embd*n_embd*ggml_type_size(wtype)); // c_attn_v_proj_w + ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_q_proj_w + ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_k_proj_w + ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_v_proj_w - ctx_size += n_layer*(n_embd*n_embd*ggml_type_size(wtype)); // c_attn_proj_w + ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_proj_w - ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_size(wtype)); // c_mlp_fc_w - ctx_size += n_layer*( 4*n_embd*ggml_type_size(GGML_TYPE_F32)); // c_mlp_fc_b + ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_fc_w + ctx_size += n_layer*( 4*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_fc_b - ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_size(wtype)); // c_mlp_proj_w_trans - ctx_size += n_layer*( n_embd*ggml_type_size(GGML_TYPE_F32)); // c_mlp_proj_b + ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w_trans + ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b - ctx_size += n_ctx*n_layer*n_embd*ggml_type_size(GGML_TYPE_F32); // memory_k - ctx_size += n_ctx*n_layer*n_embd*ggml_type_size(GGML_TYPE_F32); // memory_v + ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_k + ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_v ctx_size += (5 + 10*n_layer)*256; // object overhead @@ -231,7 +245,7 @@ bool gptj_model_load(const std::string & fname, gptj_model & model, gpt_vocab & layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); - layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd); + layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd); layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd); layer.c_mlp_proj_w_trans = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd); @@ -321,9 +335,26 @@ bool gptj_model_load(const std::string & fname, gptj_model & model, gpt_vocab & return false; } - const size_t bpe = tensor->type == GGML_TYPE_I8 ? 1 : (ftype == 0) ? sizeof(float) : sizeof(ggml_fp16_t); + if (0) { + static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", }; + printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ftype_str[ftype], ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); + } - if (nelements*bpe != ggml_nbytes(tensor)) { + size_t bpe = 0; + + switch (ftype) { + case 0: bpe = ggml_type_size(GGML_TYPE_F32); break; + case 1: bpe = ggml_type_size(GGML_TYPE_F16); break; + case 2: bpe = ggml_type_size(GGML_TYPE_Q4_0); assert(ne[0] % 64 == 0); break; + case 3: bpe = ggml_type_size(GGML_TYPE_Q4_1); assert(ne[0] % 64 == 0); break; + default: + { + fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype); + return false; + } + }; + + if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", __func__, name.data(), ggml_nbytes(tensor), nelements*bpe); return false; @@ -428,9 +459,9 @@ bool gptj_eval( // self-attention { - struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, ggml_transpose(ctx0, model.layers[il].c_attn_q_proj_w), cur); - struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, ggml_transpose(ctx0, model.layers[il].c_attn_k_proj_w), cur); - struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, ggml_transpose(ctx0, model.layers[il].c_attn_v_proj_w), cur); + struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].c_attn_q_proj_w, cur); + struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].c_attn_k_proj_w, cur); + struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].c_attn_v_proj_w, cur); // store key and value to memory if (N >= 1) { @@ -498,7 +529,7 @@ bool gptj_eval( // projection (no bias) cur = ggml_mul_mat(ctx0, - ggml_transpose(ctx0, model.layers[il].c_attn_proj_w), + model.layers[il].c_attn_proj_w, cur); } @@ -509,7 +540,7 @@ bool gptj_eval( { // note here we pass inpSA instead of cur cur = ggml_mul_mat(ctx0, - ggml_transpose(ctx0, model.layers[il].c_mlp_fc_w), + model.layers[il].c_mlp_fc_w, inpSA); cur = ggml_add(ctx0, diff --git a/examples/gpt-j/quantize.cpp b/examples/gpt-j/quantize.cpp new file mode 100644 index 0000000..ff54fea --- /dev/null +++ b/examples/gpt-j/quantize.cpp @@ -0,0 +1,327 @@ +#include "ggml/ggml.h" + +#include "utils.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// TODO: move somewhere else +#define QK 32 + +// default hparams (GPT-J 6B) +struct gptj_hparams { + int32_t n_vocab = 50400; + int32_t n_ctx = 2048; + int32_t n_embd = 4096; + int32_t n_head = 16; + int32_t n_layer = 28; + int32_t n_rot = 64; + int32_t f16 = 1; +}; + +// quantize a model +bool gptj_model_quantize(const std::string & fname_inp, const std::string & fname_out, int itype) { + ggml_type type = GGML_TYPE_Q4_1; + + switch (itype) { + case 2: type = GGML_TYPE_Q4_0; break; + case 3: type = GGML_TYPE_Q4_1; break; + default: fprintf(stderr, "%s: invalid quantization type %d\n", __func__, itype); return 1; + }; + + if (type != GGML_TYPE_Q4_0 && type != GGML_TYPE_Q4_1) { + fprintf(stderr, "%s: invalid quantization type %d\n", __func__, type); + return false; + } + + gpt_vocab vocab; + + printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str()); + + auto finp = std::ifstream(fname_inp, std::ios::binary); + if (!finp) { + fprintf(stderr, "%s: failed to open '%s' for reading\n", __func__, fname_inp.c_str()); + return false; + } + + auto fout = std::ofstream(fname_out, std::ios::binary); + if (!fout) { + fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname_out.c_str()); + return false; + } + + // verify magic + { + uint32_t magic; + finp.read((char *) &magic, sizeof(magic)); + if (magic != 0x67676d6c) { + fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname_inp.c_str()); + return false; + } + + fout.write((char *) &magic, sizeof(magic)); + } + + gptj_hparams hparams; + + // load hparams + { + finp.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); + finp.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); + finp.read((char *) &hparams.n_embd, sizeof(hparams.n_embd)); + finp.read((char *) &hparams.n_head, sizeof(hparams.n_head)); + finp.read((char *) &hparams.n_layer, sizeof(hparams.n_layer)); + finp.read((char *) &hparams.n_rot, sizeof(hparams.n_rot)); + finp.read((char *) &hparams.f16, sizeof(hparams.f16)); + + printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); + printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); + printf("%s: n_embd = %d\n", __func__, hparams.n_embd); + printf("%s: n_head = %d\n", __func__, hparams.n_head); + printf("%s: n_layer = %d\n", __func__, hparams.n_layer); + printf("%s: f16 = %d\n", __func__, hparams.f16); + + fout.write((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); + fout.write((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); + fout.write((char *) &hparams.n_embd, sizeof(hparams.n_embd)); + fout.write((char *) &hparams.n_head, sizeof(hparams.n_head)); + fout.write((char *) &hparams.n_layer, sizeof(hparams.n_layer)); + fout.write((char *) &hparams.n_rot, sizeof(hparams.n_rot)); + fout.write((char *) &itype, sizeof(hparams.f16)); + } + + // load vocab + { + int32_t n_vocab = 0; + finp.read ((char *) &n_vocab, sizeof(n_vocab)); + fout.write((char *) &n_vocab, sizeof(n_vocab)); + + if (n_vocab != hparams.n_vocab) { + fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n", + __func__, fname_inp.c_str(), n_vocab, hparams.n_vocab); + return false; + } + + std::string word; + for (int i = 0; i < n_vocab; i++) { + uint32_t len; + finp.read ((char *) &len, sizeof(len)); + fout.write((char *) &len, sizeof(len)); + + word.resize(len); + finp.read ((char *) word.data(), len); + fout.write((char *) word.data(), len); + + vocab.token_to_id[word] = i; + vocab.id_to_token[i] = word; + } + } + + // load weights + { + size_t total_size_org = 0; + size_t total_size_new = 0; + + std::vector work; + + std::vector data_u8; + std::vector data_f16; + std::vector data_f32; + + std::vector hist_all(1 << 4, 0); + + while (true) { + int32_t n_dims; + int32_t length; + int32_t ftype; + + finp.read(reinterpret_cast(&n_dims), sizeof(n_dims)); + finp.read(reinterpret_cast(&length), sizeof(length)); + finp.read(reinterpret_cast(&ftype), sizeof(ftype)); + + if (finp.eof()) { + break; + } + + int32_t nelements = 1; + int32_t ne[2] = { 1, 1 }; + for (int i = 0; i < n_dims; ++i) { + finp.read (reinterpret_cast(&ne[i]), sizeof(ne[i])); + nelements *= ne[i]; + } + + std::string name(length, 0); + finp.read (&name[0], length); + + { + static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", }; + printf("%48s - [%5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ftype_str[ftype]); + } + + // regexes of tensor names to be quantized + const std::vector k_names = { + ".*weight", + }; + + bool quantize = false; + for (const auto & s : k_names) { + if (std::regex_match(name, std::regex(s))) { + quantize = true; + break; + } + } + + // quantize only 2D tensors + quantize &= (n_dims == 2); + + if (quantize) { + if (ftype != 0 && ftype != 1) { + fprintf(stderr, "%s: unsupported ftype %d for integer quantization\n", __func__, ftype); + return false; + } + + if (ftype == 1) { + data_f16.resize(nelements); + finp.read(reinterpret_cast(data_f16.data()), nelements * sizeof(ggml_fp16_t)); + data_f32.resize(nelements); + for (int i = 0; i < nelements; ++i) { + data_f32[i] = ggml_fp16_to_fp32(data_f16[i]); + } + } else { + data_f32.resize(nelements); + finp.read(reinterpret_cast(data_f32.data()), nelements * sizeof(float)); + } + + ftype = itype; + } else { + const int bpe = (ftype == 0) ? sizeof(float) : sizeof(uint16_t); + + data_u8.resize(nelements*bpe); + finp.read(reinterpret_cast(data_u8.data()), nelements * bpe); + } + + fout.write(reinterpret_cast(&n_dims), sizeof(n_dims)); + fout.write(reinterpret_cast(&length), sizeof(length)); + fout.write(reinterpret_cast(&ftype), sizeof(ftype)); + for (int i = 0; i < n_dims; ++i) { + fout.write(reinterpret_cast(&ne[i]), sizeof(ne[i])); + } + fout.write(&name[0], length); + + if (quantize) { + printf("quantizing .. "); + work.resize(nelements); // for quantization + + size_t cur_size = 0; + std::vector hist_cur(1 << 4, 0); + + switch (type) { + case GGML_TYPE_Q4_0: + { + cur_size = ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], QK, hist_cur.data()); + } break; + case GGML_TYPE_Q4_1: + { + cur_size = ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], QK, hist_cur.data()); + } break; + default: + { + fprintf(stderr, "%s: unsupported quantization type %d\n", __func__, type); + return false; + } + } + + fout.write(reinterpret_cast(work.data()), cur_size); + total_size_new += cur_size; + + printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0); + for (int i = 0; i < hist_cur.size(); ++i) { + hist_all[i] += hist_cur[i]; + } + + for (int i = 0; i < hist_cur.size(); ++i) { + printf("%5.3f ", hist_cur[i] / (float)nelements); + } + printf("\n"); + } else { + printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0); + fout.write(reinterpret_cast(data_u8.data()), data_u8.size()); + total_size_new += data_u8.size(); + } + + total_size_org += nelements * sizeof(float); + } + + printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); + printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0); + + { + int64_t sum_all = 0; + for (int i = 0; i < hist_all.size(); ++i) { + sum_all += hist_all[i]; + } + + printf("%s: hist: ", __func__); + for (int i = 0; i < hist_all.size(); ++i) { + printf("%5.3f ", hist_all[i] / (float)sum_all); + } + printf("\n"); + } + } + + finp.close(); + fout.close(); + + return true; +} + +// usage: +// ./gpt-2-quantize models/gpt-2-117M/ggml-model.bin models/gpt-2-117M/ggml-model-quant.bin type +// +int main(int argc, char ** argv) { + if (argc != 4) { + fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]); + fprintf(stderr, " type = 2 - q4_0\n"); + fprintf(stderr, " type = 3 - q4_1\n"); + return 1; + } + + const std::string fname_inp = argv[1]; + const std::string fname_out = argv[2]; + + const int itype = atoi(argv[3]); + + const int64_t t_main_start_us = ggml_time_us(); + + int64_t t_quantize_us = 0; + + // load the model + { + const int64_t t_start_us = ggml_time_us(); + + if (!gptj_model_quantize(fname_inp, fname_out, itype)) { + fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); + return 1; + } + + t_quantize_us = ggml_time_us() - t_start_us; + } + + // report timing + { + const int64_t t_main_end_us = ggml_time_us(); + + printf("\n"); + printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0f); + printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); + } + + return 0; +} diff --git a/examples/utils.cpp b/examples/utils.cpp index 30057b7..402a1fd 100644 --- a/examples/utils.cpp +++ b/examples/utils.cpp @@ -328,3 +328,113 @@ gpt_vocab::id gpt_sample_top_k_top_p( return logits_id[idx].second; } + +size_t ggml_quantize_q4_0(float * src, void * dst, int n, int k, int qk, int64_t * hist) { + const int nb = k / qk; + const size_t row_size = nb*(sizeof(float) + sizeof(uint8_t)*qk/2); + + assert(k % qk == 0); + + uint8_t pp[qk/2]; + + char * pdst = (char *) dst; + + for (int j = 0; j < n; j += k) { + float * pd = (float *) (pdst + (j/k)*row_size); + uint8_t * pb = (uint8_t *) (pd + nb); + + for (int i = 0; i < nb; i++) { + float amax = 0.0f; // absolute max + + { + for (int l = 0; l < qk; l++) { + const float v = src[j + i*qk + l]; + amax = std::max(amax, fabsf(v)); + } + + const float d = amax / ((1 << 3) - 1); + const float id = d ? 1.0f/d : 0.0f; + + pd[i] = d; + + for (int l = 0; l < qk; l += 2) { + const float v0 = (src[j + i*qk + l + 0])*id; + const float v1 = (src[j + i*qk + l + 1])*id; + + const uint8_t vi0 = ((int8_t) (round(v0))) + 8; + const uint8_t vi1 = ((int8_t) (round(v1))) + 8; + + assert(vi0 >= 0 && vi0 < 16); + assert(vi1 >= 0 && vi1 < 16); + + hist[vi0]++; + hist[vi1]++; + + pp[l/2] = vi0 | (vi1 << 4); + } + + memcpy(pb + i*qk/2, pp, sizeof(pp)); + } + } + } + + return (n/k)*row_size; +} + +size_t ggml_quantize_q4_1(float * src, void * dst, int n, int k, int qk, int64_t * hist) { + const int nb = k / qk; + const size_t row_size = nb*(2*sizeof(float) + sizeof(uint8_t)*qk/2); + + assert(k % qk == 0); + + uint8_t pp[qk/2]; + + char * pdst = (char *) dst; + + for (int j = 0; j < n; j += k) { + float * pm = (float *) (pdst + (j/k)*row_size); + float * pd = (float *) (pm + nb); + uint8_t * pb = (uint8_t *) (pd + nb); + + //printf("n = %d, k = %d, nb = %d, row_size = %d, j = %d, pm = %p, pd = %p, pb = %p\n", n, k, nb, row_size, j, pm, pd, pb); + + for (int i = 0; i < nb; i++) { + float min = std::numeric_limits::max(); + float max = std::numeric_limits::min(); + + { + for (int l = 0; l < qk; l++) { + const float v = src[j + i*qk + l]; + if (v < min) min = v; + if (v > max) max = v; + } + + const float d = (max - min) / ((1 << 4) - 1); + const float id = d ? 1.0f/d : 0.0f; + + pm[i] = min; + pd[i] = d; + + for (int l = 0; l < qk; l += 2) { + const float v0 = (src[j + i*qk + l + 0] - min)*id; + const float v1 = (src[j + i*qk + l + 1] - min)*id; + + const uint8_t vi0 = round(v0); + const uint8_t vi1 = round(v1); + + assert(vi0 >= 0 && vi0 < 16); + assert(vi1 >= 0 && vi1 < 16); + + hist[vi0]++; + hist[vi1]++; + + pp[l/2] = vi0 | (vi1 << 4); + } + + memcpy(pb + i*qk/2, pp, sizeof(pp)); + } + } + } + + return (n/k)*row_size; +} diff --git a/examples/utils.h b/examples/utils.h index d091d3d..f7d0dbc 100644 --- a/examples/utils.h +++ b/examples/utils.h @@ -82,3 +82,9 @@ gpt_vocab::id gpt_sample_top_k_top_p( double temp, std::mt19937 & rng); +// +// Quantization +// + +size_t ggml_quantize_q4_0(float * src, void * dst, int n, int k, int qk, int64_t * hist); +size_t ggml_quantize_q4_1(float * src, void * dst, int n, int k, int qk, int64_t * hist); diff --git a/examples/whisper/CMakeLists.txt b/examples/whisper/CMakeLists.txt index c8fa83a..c7f5ff5 100644 --- a/examples/whisper/CMakeLists.txt +++ b/examples/whisper/CMakeLists.txt @@ -13,3 +13,10 @@ set(TEST_TARGET whisper) add_executable(${TEST_TARGET} main.cpp common.cpp) target_link_libraries(${TEST_TARGET} PRIVATE whisper-cpp) target_include_directories(${TEST_TARGET} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/..) + +# +# whisper-quantize + +set(TEST_TARGET whisper-quantize) +add_executable(${TEST_TARGET} quantize.cpp) +target_link_libraries(${TEST_TARGET} PRIVATE ggml ggml_utils) diff --git a/examples/whisper/convert-pt-to-ggml.py b/examples/whisper/convert-pt-to-ggml.py index 9e9b2dc..749f99c 100644 --- a/examples/whisper/convert-pt-to-ggml.py +++ b/examples/whisper/convert-pt-to-ggml.py @@ -303,8 +303,9 @@ for name in list_vars.keys(): data = data.astype(np.float32) ftype = 0 else: - data = data.astype(np.float32) - ftype = 0 + if n_dims < 3 and data.dtype != np.float32: + data = data.astype(np.float32) + ftype = 0 #if name.startswith("encoder"): # if name.endswith("mlp.0.weight") or \ diff --git a/examples/whisper/quantize.cpp b/examples/whisper/quantize.cpp new file mode 100644 index 0000000..8fc292f --- /dev/null +++ b/examples/whisper/quantize.cpp @@ -0,0 +1,376 @@ +#include "ggml/ggml.h" + +#include "utils.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// TODO: move somewhere else +#define QK 32 + +// default hparams (Whisper tiny) +struct whisper_hparams { + int32_t n_vocab = 51864; + int32_t n_audio_ctx = 1500; + int32_t n_audio_state = 384; + int32_t n_audio_head = 6; + int32_t n_audio_layer = 4; + int32_t n_text_ctx = 448; + int32_t n_text_state = 384; + int32_t n_text_head = 6; + int32_t n_text_layer = 4; + int32_t n_mels = 80; + int32_t f16 = 1; +}; + +struct whisper_filters { + int32_t n_mel; + int32_t n_fft; + + std::vector data; +}; + +// quantize a model +bool whisper_model_quantize(const std::string & fname_inp, const std::string & fname_out, int itype) { + ggml_type type = GGML_TYPE_Q4_1; + + switch (itype) { + case 2: type = GGML_TYPE_Q4_0; break; + case 3: type = GGML_TYPE_Q4_1; break; + default: fprintf(stderr, "%s: invalid quantization type %d\n", __func__, itype); return 1; + }; + + if (type != GGML_TYPE_Q4_0 && type != GGML_TYPE_Q4_1) { + fprintf(stderr, "%s: invalid quantization type %d\n", __func__, type); + return false; + } + + gpt_vocab vocab; + + printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str()); + + auto finp = std::ifstream(fname_inp, std::ios::binary); + if (!finp) { + fprintf(stderr, "%s: failed to open '%s' for reading\n", __func__, fname_inp.c_str()); + return false; + } + + auto fout = std::ofstream(fname_out, std::ios::binary); + if (!fout) { + fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname_out.c_str()); + return false; + } + + // verify magic + { + uint32_t magic; + finp.read((char *) &magic, sizeof(magic)); + if (magic != 0x67676d6c) { + fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname_inp.c_str()); + return false; + } + + fout.write((char *) &magic, sizeof(magic)); + } + + whisper_hparams hparams; + + // load hparams + { + finp.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); + finp.read((char *) &hparams.n_audio_ctx, sizeof(hparams.n_audio_ctx)); + finp.read((char *) &hparams.n_audio_state, sizeof(hparams.n_audio_state)); + finp.read((char *) &hparams.n_audio_head, sizeof(hparams.n_audio_head)); + finp.read((char *) &hparams.n_audio_layer, sizeof(hparams.n_audio_layer)); + finp.read((char *) &hparams.n_text_ctx, sizeof(hparams.n_text_ctx)); + finp.read((char *) &hparams.n_text_state, sizeof(hparams.n_text_state)); + finp.read((char *) &hparams.n_text_head, sizeof(hparams.n_text_head)); + finp.read((char *) &hparams.n_text_layer, sizeof(hparams.n_text_layer)); + finp.read((char *) &hparams.n_mels, sizeof(hparams.n_mels)); + finp.read((char *) &hparams.f16, sizeof(hparams.f16)); + + fprintf(stderr, "%s: n_vocab = %d\n", __func__, hparams.n_vocab); + fprintf(stderr, "%s: n_audio_ctx = %d\n", __func__, hparams.n_audio_ctx); + fprintf(stderr, "%s: n_audio_state = %d\n", __func__, hparams.n_audio_state); + fprintf(stderr, "%s: n_audio_head = %d\n", __func__, hparams.n_audio_head); + fprintf(stderr, "%s: n_audio_layer = %d\n", __func__, hparams.n_audio_layer); + fprintf(stderr, "%s: n_text_ctx = %d\n", __func__, hparams.n_text_ctx); + fprintf(stderr, "%s: n_text_state = %d\n", __func__, hparams.n_text_state); + fprintf(stderr, "%s: n_text_head = %d\n", __func__, hparams.n_text_head); + fprintf(stderr, "%s: n_text_layer = %d\n", __func__, hparams.n_text_layer); + fprintf(stderr, "%s: n_mels = %d\n", __func__, hparams.n_mels); + fprintf(stderr, "%s: f16 = %d\n", __func__, hparams.f16); + + fout.write((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); + fout.write((char *) &hparams.n_audio_ctx, sizeof(hparams.n_audio_ctx)); + fout.write((char *) &hparams.n_audio_state, sizeof(hparams.n_audio_state)); + fout.write((char *) &hparams.n_audio_head, sizeof(hparams.n_audio_head)); + fout.write((char *) &hparams.n_audio_layer, sizeof(hparams.n_audio_layer)); + fout.write((char *) &hparams.n_text_ctx, sizeof(hparams.n_text_ctx)); + fout.write((char *) &hparams.n_text_state, sizeof(hparams.n_text_state)); + fout.write((char *) &hparams.n_text_head, sizeof(hparams.n_text_head)); + fout.write((char *) &hparams.n_text_layer, sizeof(hparams.n_text_layer)); + fout.write((char *) &hparams.n_mels, sizeof(hparams.n_mels)); + fout.write((char *) &itype, sizeof(hparams.f16)); + } + + // load mel filters + { + whisper_filters filters; + + finp.read ((char *) &filters.n_mel, sizeof(filters.n_mel)); + fout.write((char *) &filters.n_mel, sizeof(filters.n_mel)); + finp.read ((char *) &filters.n_fft, sizeof(filters.n_fft)); + fout.write((char *) &filters.n_fft, sizeof(filters.n_fft)); + + filters.data.resize(filters.n_mel * filters.n_fft); + finp.read ((char *) filters.data.data(), filters.data.size() * sizeof(float)); + fout.write((char *) filters.data.data(), filters.data.size() * sizeof(float)); + } + + // load vocab + { + int32_t n_vocab = 0; + finp.read ((char *) &n_vocab, sizeof(n_vocab)); + fout.write((char *) &n_vocab, sizeof(n_vocab)); + + //if (n_vocab != hparams.n_vocab) { + // fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n", + // __func__, fname_inp.c_str(), n_vocab, hparams.n_vocab); + // return false; + //} + + std::string word; + for (int i = 0; i < n_vocab; i++) { + uint32_t len; + finp.read ((char *) &len, sizeof(len)); + fout.write((char *) &len, sizeof(len)); + + word.resize(len); + finp.read ((char *) word.data(), len); + fout.write((char *) word.data(), len); + + vocab.token_to_id[word] = i; + vocab.id_to_token[i] = word; + } + } + + // load weights + { + size_t total_size_org = 0; + size_t total_size_new = 0; + + std::vector work; + + std::vector data_u8; + std::vector data_f16; + std::vector data_f32; + + std::vector hist_all(1 << 4, 0); + + while (true) { + int32_t n_dims; + int32_t length; + int32_t ftype; + + finp.read(reinterpret_cast(&n_dims), sizeof(n_dims)); + finp.read(reinterpret_cast(&length), sizeof(length)); + finp.read(reinterpret_cast(&ftype), sizeof(ftype)); + + if (finp.eof()) { + break; + } + + int32_t nelements = 1; + int32_t ne[3] = { 1, 1, 1 }; + for (int i = 0; i < n_dims; ++i) { + finp.read (reinterpret_cast(&ne[i]), sizeof(ne[i])); + nelements *= ne[i]; + } + + std::string name(length, 0); + finp.read (&name[0], length); + + { + static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", }; + printf("%48s - [%5d, %5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ne[2], ftype_str[ftype]); + } + + // regexes of tensor names to not be quantized + const std::vector k_names = { + //"encoder.*", + "encoder.conv1.bias", + "encoder.conv2.bias", + "encoder.positional_embedding", + "decoder.positional_embedding", + }; + + bool quantize = true; + for (const auto & s : k_names) { + if (std::regex_match(name, std::regex(s))) { + quantize = false; + break; + } + } + + // quantize only 2D and 3D tensors + quantize &= (n_dims == 2); + + if (quantize) { + if (ftype != 0 && ftype != 1) { + fprintf(stderr, "%s: unsupported ftype %d for integer quantization\n", __func__, ftype); + return false; + } + + if (ftype == 1) { + data_f16.resize(nelements); + finp.read(reinterpret_cast(data_f16.data()), nelements * sizeof(ggml_fp16_t)); + data_f32.resize(nelements); + for (int i = 0; i < nelements; ++i) { + data_f32[i] = ggml_fp16_to_fp32(data_f16[i]); + } + } else { + data_f32.resize(nelements); + finp.read(reinterpret_cast(data_f32.data()), nelements * sizeof(float)); + } + + ftype = itype; + } else { + const int bpe = (ftype == 0) ? sizeof(float) : sizeof(uint16_t); + + data_u8.resize(nelements*bpe); + finp.read(reinterpret_cast(data_u8.data()), nelements * bpe); + } + + fout.write(reinterpret_cast(&n_dims), sizeof(n_dims)); + fout.write(reinterpret_cast(&length), sizeof(length)); + fout.write(reinterpret_cast(&ftype), sizeof(ftype)); + for (int i = 0; i < n_dims; ++i) { + fout.write(reinterpret_cast(&ne[i]), sizeof(ne[i])); + } + fout.write(&name[0], length); + + if (quantize) { + printf("quantizing .. "); + work.resize(nelements); // for quantization + + size_t cur_size = 0; + std::vector hist_cur(1 << 4, 0); + + switch (type) { + case GGML_TYPE_Q4_0: + { + cur_size = ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], QK, hist_cur.data()); + } break; + case GGML_TYPE_Q4_1: + { + cur_size = ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], QK, hist_cur.data()); + } break; + default: + { + fprintf(stderr, "%s: unsupported quantization type %d\n", __func__, type); + return false; + } + } + + fout.write(reinterpret_cast(work.data()), cur_size); + total_size_new += cur_size; + + printf("size = %8.3f MB -> %8.3f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0); + for (int i = 0; i < hist_cur.size(); ++i) { + hist_all[i] += hist_cur[i]; + } + + for (int i = 0; i < hist_cur.size(); ++i) { + printf("%5.3f ", hist_cur[i] / (float)nelements); + } + printf("\n"); + } else { + printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0); + fout.write(reinterpret_cast(data_u8.data()), data_u8.size()); + total_size_new += data_u8.size(); + } + + total_size_org += nelements * sizeof(float); + } + + printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); + printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0); + + { + int64_t sum_all = 0; + for (int i = 0; i < hist_all.size(); ++i) { + sum_all += hist_all[i]; + } + + printf("%s: hist: ", __func__); + for (int i = 0; i < hist_all.size(); ++i) { + printf("%5.3f ", hist_all[i] / (float)sum_all); + } + printf("\n"); + } + } + + finp.close(); + fout.close(); + + return true; +} + +// usage: +// ./gpt-2-quantize models/gpt-2-117M/ggml-model.bin models/gpt-2-117M/ggml-model-quant.bin type +// +int main(int argc, char ** argv) { + if (argc != 4) { + fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]); + fprintf(stderr, " type = 2 - q4_0\n"); + fprintf(stderr, " type = 3 - q4_1\n"); + return 1; + } + + // needed to initialize f16 tables + { + struct ggml_init_params params = { 0, NULL }; + struct ggml_context * ctx = ggml_init(params); + ggml_free(ctx); + } + + const std::string fname_inp = argv[1]; + const std::string fname_out = argv[2]; + + const int itype = atoi(argv[3]); + + const int64_t t_main_start_us = ggml_time_us(); + + int64_t t_quantize_us = 0; + + // load the model + { + const int64_t t_start_us = ggml_time_us(); + + if (!whisper_model_quantize(fname_inp, fname_out, itype)) { + fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); + return 1; + } + + t_quantize_us = ggml_time_us() - t_start_us; + } + + // report timing + { + const int64_t t_main_end_us = ggml_time_us(); + + printf("\n"); + printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0f); + printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); + } + + return 0; +} diff --git a/examples/whisper/whisper.cpp b/examples/whisper/whisper.cpp index 3a21581..027175e 100644 --- a/examples/whisper/whisper.cpp +++ b/examples/whisper/whisper.cpp @@ -252,12 +252,34 @@ static const std::map MEM_REQ_SCRATCH3 = { { MODEL_LARGE, 9ull*MB }, }; -static const std::map MEM_REQ_MODEL = { - { MODEL_TINY, 74ull*MB }, - { MODEL_BASE, 142ull*MB }, - { MODEL_SMALL, 466ull*MB }, - { MODEL_MEDIUM, 1464ull*MB }, - { MODEL_LARGE, 2952ull*MB }, +static const std::map> MEM_REQ_MODEL = { + { GGML_TYPE_F16, + { + { MODEL_TINY, 74ull*MB }, + { MODEL_BASE, 142ull*MB }, + { MODEL_SMALL, 466ull*MB }, + { MODEL_MEDIUM, 1464ull*MB }, + { MODEL_LARGE, 2952ull*MB }, + }, + }, + { GGML_TYPE_Q4_0, + { + { MODEL_TINY, 26ull*MB }, + { MODEL_BASE, 50ull*MB }, + { MODEL_SMALL, 154ull*MB }, + { MODEL_MEDIUM, 470ull*MB }, + { MODEL_LARGE, 940ull*MB }, + }, + }, + { GGML_TYPE_Q4_1, + { + { MODEL_TINY, 31ull*MB }, + { MODEL_BASE, 57ull*MB }, + { MODEL_SMALL, 181ull*MB }, + { MODEL_MEDIUM, 559ull*MB }, + { MODEL_LARGE, 1122ull*MB }, + }, + }, }; static const std::map MEM_REQ_KV_SELF = { @@ -681,7 +703,7 @@ static bool kv_cache_reinit(struct whisper_kv_cache & cache) { const ggml_type wtype = cache.k->type; WHISPER_ASSERT(wtype == cache.v->type); - WHISPER_ASSERT(cache.buf.size() >= 2*n_elements*ggml_type_size(wtype)); + WHISPER_ASSERT(cache.buf.size() >= 2*n_elements*ggml_type_sizef(wtype)); struct ggml_init_params params; params.mem_size = cache.buf.size(); @@ -776,12 +798,25 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con model.type = e_model::MODEL_LARGE; } - // for the big tensors, we have the option to store the data in 16-bit floats + // for the big tensors, we have the option to store the data in 16-bit floats or quantized // in order to save memory and also to speed up the computation - wctx.wtype = model.hparams.f16 ? GGML_TYPE_F16 : GGML_TYPE_F32; + wctx.wtype = GGML_TYPE_COUNT; + switch (model.hparams.f16) { + case 0: wctx.wtype = GGML_TYPE_F32; break; + case 1: wctx.wtype = GGML_TYPE_F16; break; + case 2: wctx.wtype = GGML_TYPE_Q4_0; break; + case 3: wctx.wtype = GGML_TYPE_Q4_1; break; + default: + { + fprintf(stderr, "%s: invalid model (bad f16 value %d)\n", __func__, model.hparams.f16); + return false; + } + } const size_t scale = model.hparams.f16 ? 1 : 2; + static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", }; + fprintf(stderr, "%s: n_vocab = %d\n", __func__, hparams.n_vocab); fprintf(stderr, "%s: n_audio_ctx = %d\n", __func__, hparams.n_audio_ctx); fprintf(stderr, "%s: n_audio_state = %d\n", __func__, hparams.n_audio_state); @@ -792,7 +827,7 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con fprintf(stderr, "%s: n_text_head = %d\n", __func__, hparams.n_text_head); fprintf(stderr, "%s: n_text_layer = %d\n", __func__, hparams.n_text_layer); fprintf(stderr, "%s: n_mels = %d\n", __func__, hparams.n_mels); - fprintf(stderr, "%s: f16 = %d\n", __func__, hparams.f16); + fprintf(stderr, "%s: ftype = %s\n", __func__, ftype_str[model.hparams.f16]); fprintf(stderr, "%s: type = %d\n", __func__, model.type); // print memory requirements @@ -803,7 +838,7 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con MEM_REQ_SCRATCH1.at (model.type) + MEM_REQ_SCRATCH2.at (model.type) + MEM_REQ_SCRATCH3.at (model.type) + - scale*MEM_REQ_MODEL.at (model.type) + + scale*MEM_REQ_MODEL.at(wctx.wtype).at(model.type) + scale*MEM_REQ_KV_CROSS.at(model.type) + scale*std::max(MEM_REQ_ENCODE.at(model.type), MEM_REQ_DECODE.at(model.type)); @@ -819,9 +854,9 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con // always have at least one decoder wctx.model.buf = new std::vector(); - wctx.model.buf->resize(scale*MEM_REQ_MODEL.at(model.type)); + wctx.model.buf->resize(scale*MEM_REQ_MODEL.at(wctx.wtype).at(model.type)); - if (!kv_cache_init(model.hparams, scale*MEM_REQ_KV_SELF.at(model.type), wctx.decoders[0].kv_self, wctx.wtype, model.hparams.n_text_ctx)) { + if (!kv_cache_init(model.hparams, scale*MEM_REQ_KV_SELF.at(model.type), wctx.decoders[0].kv_self, GGML_TYPE_F16, model.hparams.n_text_ctx)) { fprintf(stderr, "%s: kv_cache_init() failed for self-attention cache\n", __func__); return false; } @@ -831,7 +866,7 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con fprintf(stderr, "%s: kv self size = %7.2f MB\n", __func__, memory_size/1024.0/1024.0); } - if (!kv_cache_init(model.hparams, scale*MEM_REQ_KV_CROSS.at(model.type), wctx.kv_cross, wctx.wtype, model.hparams.n_audio_ctx)) { + if (!kv_cache_init(model.hparams, scale*MEM_REQ_KV_CROSS.at(model.type), wctx.kv_cross, GGML_TYPE_F16, model.hparams.n_audio_ctx)) { fprintf(stderr, "%s: kv_cache_init() failed for cross-attention cache\n", __func__); return false; } @@ -963,92 +998,92 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con // encoder { - ctx_size += n_audio_ctx*n_audio_state*ggml_type_size(GGML_TYPE_F32); // e_pe; + ctx_size += n_audio_ctx*n_audio_state*ggml_type_sizef(GGML_TYPE_F32); // e_pe; - ctx_size += 3*n_mels*n_audio_state*ggml_type_size(wtype); // e_conv_1_w - ctx_size += n_audio_state*ggml_type_size(GGML_TYPE_F32); // e_conv_1_b + ctx_size += 3*n_mels*n_audio_state*ggml_type_sizef(GGML_TYPE_F16); // e_conv_1_w + ctx_size += n_audio_state*ggml_type_sizef(GGML_TYPE_F32); // e_conv_1_b - ctx_size += 3*n_audio_state*n_audio_state*ggml_type_size(wtype); // e_conv_2_w - ctx_size += n_audio_state*ggml_type_size(GGML_TYPE_F32); // e_conv_2_b + ctx_size += 3*n_audio_state*n_audio_state*ggml_type_sizef(GGML_TYPE_F16); // e_conv_2_w + ctx_size += n_audio_state*ggml_type_sizef(GGML_TYPE_F32); // e_conv_2_b - ctx_size += n_audio_state*ggml_type_size(GGML_TYPE_F32); // e_ln_w; - ctx_size += n_audio_state*ggml_type_size(GGML_TYPE_F32); // e_ln_b; + ctx_size += n_audio_state*ggml_type_sizef(GGML_TYPE_F32); // e_ln_w; + ctx_size += n_audio_state*ggml_type_sizef(GGML_TYPE_F32); // e_ln_b; } // decoder { - ctx_size += n_text_ctx*n_text_state*ggml_type_size(GGML_TYPE_F32); // d_pe; + ctx_size += n_text_ctx*n_text_state*ggml_type_sizef(GGML_TYPE_F32); // d_pe; - ctx_size += n_vocab*n_text_state*ggml_type_size(wtype); // d_te; + ctx_size += n_vocab*n_text_state*ggml_type_sizef(wtype); // d_te; - ctx_size += n_text_state*ggml_type_size(GGML_TYPE_F32); // d_ln_w; - ctx_size += n_text_state*ggml_type_size(GGML_TYPE_F32); // d_ln_b; + ctx_size += n_text_state*ggml_type_sizef(GGML_TYPE_F32); // d_ln_w; + ctx_size += n_text_state*ggml_type_sizef(GGML_TYPE_F32); // d_ln_b; } // encoder layers { - ctx_size += n_audio_layer*(n_audio_state*ggml_type_size(GGML_TYPE_F32)); // mlp_ln_w - ctx_size += n_audio_layer*(n_audio_state*ggml_type_size(GGML_TYPE_F32)); // mlp_ln_b + ctx_size += n_audio_layer*(n_audio_state*ggml_type_sizef(GGML_TYPE_F32)); // mlp_ln_w + ctx_size += n_audio_layer*(n_audio_state*ggml_type_sizef(GGML_TYPE_F32)); // mlp_ln_b - ctx_size += n_audio_layer*(4*n_audio_state*n_audio_state*ggml_type_size(wtype)); // mlp_0_w - ctx_size += n_audio_layer*( 4*n_audio_state*ggml_type_size(GGML_TYPE_F32)); // mlp_0_b + ctx_size += n_audio_layer*(4*n_audio_state*n_audio_state*ggml_type_sizef(wtype)); // mlp_0_w + ctx_size += n_audio_layer*( 4*n_audio_state*ggml_type_sizef(GGML_TYPE_F32)); // mlp_0_b - ctx_size += n_audio_layer*(4*n_audio_state*n_audio_state*ggml_type_size(wtype)); // mlp_1_w - ctx_size += n_audio_layer*( n_audio_state*ggml_type_size(GGML_TYPE_F32)); // mlp_1_b + ctx_size += n_audio_layer*(4*n_audio_state*n_audio_state*ggml_type_sizef(wtype)); // mlp_1_w + ctx_size += n_audio_layer*( n_audio_state*ggml_type_sizef(GGML_TYPE_F32)); // mlp_1_b - ctx_size += n_audio_layer*(n_audio_state*ggml_type_size(GGML_TYPE_F32)); // attn_ln_0_w - ctx_size += n_audio_layer*(n_audio_state*ggml_type_size(GGML_TYPE_F32)); // attn_ln_0_b + ctx_size += n_audio_layer*(n_audio_state*ggml_type_sizef(GGML_TYPE_F32)); // attn_ln_0_w + ctx_size += n_audio_layer*(n_audio_state*ggml_type_sizef(GGML_TYPE_F32)); // attn_ln_0_b - ctx_size += n_audio_layer*(n_audio_state*n_audio_state*ggml_type_size(wtype)); // attn_q_w - ctx_size += n_audio_layer*( n_audio_state*ggml_type_size(GGML_TYPE_F32)); // attn_q_b + ctx_size += n_audio_layer*(n_audio_state*n_audio_state*ggml_type_sizef(wtype)); // attn_q_w + ctx_size += n_audio_layer*( n_audio_state*ggml_type_sizef(GGML_TYPE_F32)); // attn_q_b - ctx_size += n_audio_layer*(n_audio_state*n_audio_state*ggml_type_size(wtype)); // attn_k_w + ctx_size += n_audio_layer*(n_audio_state*n_audio_state*ggml_type_sizef(wtype)); // attn_k_w - ctx_size += n_audio_layer*(n_audio_state*n_audio_state*ggml_type_size(wtype)); // attn_v_w - ctx_size += n_audio_layer*( n_audio_state*ggml_type_size(GGML_TYPE_F32)); // attn_v_b + ctx_size += n_audio_layer*(n_audio_state*n_audio_state*ggml_type_sizef(wtype)); // attn_v_w + ctx_size += n_audio_layer*( n_audio_state*ggml_type_sizef(GGML_TYPE_F32)); // attn_v_b - ctx_size += n_audio_layer*(n_audio_state*n_audio_state*ggml_type_size(wtype)); // attn_ln_1_w - ctx_size += n_audio_layer*( n_audio_state*ggml_type_size(GGML_TYPE_F32)); // attn_ln_1_b + ctx_size += n_audio_layer*(n_audio_state*n_audio_state*ggml_type_sizef(wtype)); // attn_ln_1_w + ctx_size += n_audio_layer*( n_audio_state*ggml_type_sizef(GGML_TYPE_F32)); // attn_ln_1_b } // decoder layers { - ctx_size += n_text_layer*(n_text_state*ggml_type_size(GGML_TYPE_F32)); // mlp_ln_w - ctx_size += n_text_layer*(n_text_state*ggml_type_size(GGML_TYPE_F32)); // mlp_ln_b + ctx_size += n_text_layer*(n_text_state*ggml_type_sizef(GGML_TYPE_F32)); // mlp_ln_w + ctx_size += n_text_layer*(n_text_state*ggml_type_sizef(GGML_TYPE_F32)); // mlp_ln_b - ctx_size += n_text_layer*(4*n_text_state*n_text_state*ggml_type_size(wtype)); // mlp_0_w - ctx_size += n_text_layer*( 4*n_text_state*ggml_type_size(GGML_TYPE_F32)); // mlp_0_b + ctx_size += n_text_layer*(4*n_text_state*n_text_state*ggml_type_sizef(wtype)); // mlp_0_w + ctx_size += n_text_layer*( 4*n_text_state*ggml_type_sizef(GGML_TYPE_F32)); // mlp_0_b - ctx_size += n_text_layer*(4*n_text_state*n_text_state*ggml_type_size(wtype)); // mlp_1_w - ctx_size += n_text_layer*( n_text_state*ggml_type_size(GGML_TYPE_F32)); // mlp_1_b + ctx_size += n_text_layer*(4*n_text_state*n_text_state*ggml_type_sizef(wtype)); // mlp_1_w + ctx_size += n_text_layer*( n_text_state*ggml_type_sizef(GGML_TYPE_F32)); // mlp_1_b - ctx_size += n_text_layer*(n_text_state*ggml_type_size(GGML_TYPE_F32)); // attn_ln_0_w - ctx_size += n_text_layer*(n_text_state*ggml_type_size(GGML_TYPE_F32)); // attn_ln_0_b + ctx_size += n_text_layer*(n_text_state*ggml_type_sizef(GGML_TYPE_F32)); // attn_ln_0_w + ctx_size += n_text_layer*(n_text_state*ggml_type_sizef(GGML_TYPE_F32)); // attn_ln_0_b - ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_size(wtype)); // attn_q_w - ctx_size += n_text_layer*( n_text_state*ggml_type_size(GGML_TYPE_F32)); // attn_q_b + ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_sizef(wtype)); // attn_q_w + ctx_size += n_text_layer*( n_text_state*ggml_type_sizef(GGML_TYPE_F32)); // attn_q_b - ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_size(wtype)); // attn_k_w + ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_sizef(wtype)); // attn_k_w - ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_size(wtype)); // attn_v_w - ctx_size += n_text_layer*( n_text_state*ggml_type_size(GGML_TYPE_F32)); // attn_v_b + ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_sizef(wtype)); // attn_v_w + ctx_size += n_text_layer*( n_text_state*ggml_type_sizef(GGML_TYPE_F32)); // attn_v_b - ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_size(wtype)); // attn_ln_1_w - ctx_size += n_text_layer*( n_text_state*ggml_type_size(GGML_TYPE_F32)); // attn_ln_1_b + ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_sizef(wtype)); // attn_ln_1_w + ctx_size += n_text_layer*( n_text_state*ggml_type_sizef(GGML_TYPE_F32)); // attn_ln_1_b // - ctx_size += n_text_layer*(n_text_state*ggml_type_size(GGML_TYPE_F32)); // cross_attn_ln_0_w - ctx_size += n_text_layer*(n_text_state*ggml_type_size(GGML_TYPE_F32)); // cross_attn_ln_0_b + ctx_size += n_text_layer*(n_text_state*ggml_type_sizef(GGML_TYPE_F32)); // cross_attn_ln_0_w + ctx_size += n_text_layer*(n_text_state*ggml_type_sizef(GGML_TYPE_F32)); // cross_attn_ln_0_b - ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_size(wtype)); // cross_attn_q_w - ctx_size += n_text_layer*( n_text_state*ggml_type_size(GGML_TYPE_F32)); // cross_attn_q_b + ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_sizef(wtype)); // cross_attn_q_w + ctx_size += n_text_layer*( n_text_state*ggml_type_sizef(GGML_TYPE_F32)); // cross_attn_q_b - ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_size(wtype)); // cross_attn_k_w + ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_sizef(wtype)); // cross_attn_k_w - ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_size(wtype)); // cross_attn_v_w - ctx_size += n_text_layer*( n_text_state*ggml_type_size(GGML_TYPE_F32)); // cross_attn_v_b + ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_sizef(wtype)); // cross_attn_v_w + ctx_size += n_text_layer*( n_text_state*ggml_type_sizef(GGML_TYPE_F32)); // cross_attn_v_b - ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_size(wtype)); // cross_attn_ln_1_w - ctx_size += n_text_layer*( n_text_state*ggml_type_size(GGML_TYPE_F32)); // cross_attn_ln_1_b + ctx_size += n_text_layer*(n_text_state*n_text_state*ggml_type_sizef(wtype)); // cross_attn_ln_1_w + ctx_size += n_text_layer*( n_text_state*ggml_type_sizef(GGML_TYPE_F32)); // cross_attn_ln_1_b } ctx_size += (15 + 15*n_audio_layer + 24*n_text_layer)*256; // object overhead @@ -1094,10 +1129,10 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con { model.e_pe = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_audio_state, n_audio_ctx); - model.e_conv_1_w = ggml_new_tensor_3d(ctx, wtype, 3, n_mels, n_audio_state); + model.e_conv_1_w = ggml_new_tensor_3d(ctx, GGML_TYPE_F16, 3, n_mels, n_audio_state); model.e_conv_1_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 1, n_audio_state); - model.e_conv_2_w = ggml_new_tensor_3d(ctx, wtype, 3, n_audio_state, n_audio_state); + model.e_conv_2_w = ggml_new_tensor_3d(ctx, GGML_TYPE_F16, 3, n_audio_state, n_audio_state); model.e_conv_2_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 1, n_audio_state); model.e_ln_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_audio_state); @@ -1313,9 +1348,21 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con return false; } - const size_t bpe = (ftype == 0) ? sizeof(float) : sizeof(ggml_fp16_t); + size_t bpe = 0; + + switch (ftype) { + case 0: bpe = ggml_type_size(GGML_TYPE_F32); break; + case 1: bpe = ggml_type_size(GGML_TYPE_F16); break; + case 2: bpe = ggml_type_size(GGML_TYPE_Q4_0); assert(ne[0] % 64 == 0); break; + case 3: bpe = ggml_type_size(GGML_TYPE_Q4_1); assert(ne[0] % 64 == 0); break; + default: + { + fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype); + return false; + } + }; - if (nelements*bpe != ggml_nbytes(tensor)) { + if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", __func__, name.data(), ggml_nbytes(tensor), nelements*bpe); return false; @@ -1513,14 +1560,14 @@ static bool whisper_encode( ggml_permute(ctx0, ggml_cpy(ctx0, Qcur, - ggml_new_tensor_3d(ctx0, wctx.wtype, n_state/n_head, n_head, n_ctx)), + ggml_new_tensor_3d(ctx0, GGML_TYPE_F16, n_state/n_head, n_head, n_ctx)), 0, 2, 1, 3); struct ggml_tensor * K = ggml_permute(ctx0, ggml_cpy(ctx0, Kcur, - ggml_new_tensor_3d(ctx0, wctx.wtype, n_state/n_head, n_head, n_ctx)), + ggml_new_tensor_3d(ctx0, GGML_TYPE_F16, n_state/n_head, n_head, n_ctx)), 0, 2, 1, 3); struct ggml_tensor * V = @@ -1530,7 +1577,7 @@ static bool whisper_encode( Vcur, n_state/n_head, n_head, n_ctx), 1, 2, 0, 3), - ggml_new_tensor_3d(ctx0, wctx.wtype, n_ctx, n_state/n_head, n_head) + ggml_new_tensor_3d(ctx0, GGML_TYPE_F16, n_ctx, n_state/n_head, n_head) ); struct ggml_tensor * KQV = ggml_flash_attn(ctx0, Q, K, V, false); @@ -1546,7 +1593,7 @@ static bool whisper_encode( ggml_permute(ctx0, ggml_cpy(ctx0, Kcur, - ggml_new_tensor_3d(ctx0, wctx.wtype, n_state/n_head, n_head, n_ctx)), + ggml_new_tensor_3d(ctx0, GGML_TYPE_F16, n_state/n_head, n_head, n_ctx)), 0, 2, 1, 3); // K * Q @@ -1564,7 +1611,7 @@ static bool whisper_encode( // ggml_permute(ctx0, // ggml_cpy(ctx0, // Vcur, - // ggml_new_tensor_3d(ctx0, wctx.wtype, n_state/n_head, n_head, n_ctx)), + // ggml_new_tensor_3d(ctx0, GGML_TYPE_F16, n_state/n_head, n_head, n_ctx)), // 1, 2, 0, 3); //struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max); @@ -1576,7 +1623,7 @@ static bool whisper_encode( Vcur, n_state/n_head, n_head, n_ctx), 0, 2, 1, 3), - ggml_new_tensor_3d(ctx0, wctx.wtype, n_state/n_head, n_ctx, n_head) + ggml_new_tensor_3d(ctx0, GGML_TYPE_F16, n_state/n_head, n_ctx, n_head) ); struct ggml_tensor * KQV = ggml_mul_mat(ctx0, ggml_transpose(ctx0, V), KQ_soft_max); @@ -1634,7 +1681,7 @@ static bool whisper_encode( wctx.use_buf(ctx0, 0); cur = ggml_flash_ff(ctx0, - ggml_cpy(ctx0, cur, ggml_new_tensor_2d(ctx0, wctx.wtype, n_state, n_ctx)), + ggml_cpy(ctx0, cur, ggml_new_tensor_2d(ctx0, GGML_TYPE_F16, n_state, n_ctx)), layer.mlp_0_w, layer.mlp_0_b, layer.mlp_1_w, layer.mlp_1_b); #else wctx.use_buf(ctx0, 0); diff --git a/include/ggml/ggml.h b/include/ggml/ggml.h index 18f317b..cdd8553 100644 --- a/include/ggml/ggml.h +++ b/include/ggml/ggml.h @@ -198,6 +198,8 @@ struct ggml_object; struct ggml_context; enum ggml_type { + GGML_TYPE_Q4_0, + GGML_TYPE_Q4_1, GGML_TYPE_I8, GGML_TYPE_I16, GGML_TYPE_I32, @@ -326,7 +328,10 @@ void ggml_print_objects(const struct ggml_context * ctx); int ggml_nelements(const struct ggml_tensor * tensor); size_t ggml_nbytes (const struct ggml_tensor * tensor); -size_t ggml_type_size (enum ggml_type type); +int ggml_blck_size (enum ggml_type type); +size_t ggml_type_size (enum ggml_type type); // size in bytes for all elements in a block +float ggml_type_sizef(enum ggml_type type); // ggml_type_size()/ggml_blck_size() as float + size_t ggml_element_size(const struct ggml_tensor * tensor); struct ggml_context * ggml_init(struct ggml_init_params params); diff --git a/src/ggml.c b/src/ggml.c index d67612c..2c60942 100644 --- a/src/ggml.c +++ b/src/ggml.c @@ -13,6 +13,7 @@ #include #include #include +#include // if C99 - static_assert is noop // ref: https://stackoverflow.com/a/53923785/4039976 @@ -348,6 +349,249 @@ int64_t ggml_cycles_per_ms(void) { static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float); +// +// quantization +// + +#define QK 32 + +// method 5 +// blocks of QK elements +// represented with a single float (delta) and QK/2 8-bit ints (i.e QK 4-bit signed integer factors) +void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) { + assert(k % QK == 0); + + const int nb = k / QK; + + float * restrict pd = (float *) (y); + uint8_t * restrict pb = (uint8_t *) (pd + nb); + + uint8_t pp[QK/2]; + +#if __ARM_NEON +#if QK == 32 + for (int i = 0; i < nb; i++) { + float amax = 0.0f; // absolute max + + float32x4_t srcv [8]; + float32x4_t asrcv[8]; + float32x4_t amaxv[8]; + + for (int l = 0; l < 8; l++) srcv[l] = vld1q_f32(x + i*32 + 4*l); + for (int l = 0; l < 8; l++) asrcv[l] = vabsq_f32(srcv[l]); + + for (int l = 0; l < 4; l++) amaxv[2*l] = vmaxq_f32(asrcv[2*l], asrcv[2*l+1]); + for (int l = 0; l < 2; l++) amaxv[4*l] = vmaxq_f32(amaxv[4*l], amaxv[4*l+2]); + for (int l = 0; l < 1; l++) amaxv[8*l] = vmaxq_f32(amaxv[8*l], amaxv[8*l+4]); + + amax = MAX( + MAX(vgetq_lane_f32(amaxv[0], 0), vgetq_lane_f32(amaxv[0], 1)), + MAX(vgetq_lane_f32(amaxv[0], 2), vgetq_lane_f32(amaxv[0], 3))); + + const float d = amax / ((1 << 3) - 1); + const float id = d ? 1.0/d : 0.0; + + pd[i] = d; + + for (int l = 0; l < 8; l++) { + const float32x4_t v = vmulq_n_f32(srcv[l], id); + const float32x4_t vf = vaddq_f32(v, vdupq_n_f32(8.5f)); + const int32x4_t vi = vcvtq_s32_f32(vf); + + pp[2*l + 0] = vgetq_lane_s32(vi, 0) | (vgetq_lane_s32(vi, 1) << 4); + pp[2*l + 1] = vgetq_lane_s32(vi, 2) | (vgetq_lane_s32(vi, 3) << 4); + } + + memcpy(pb + i*16, pp, sizeof(pp)); + } +#else +#error "not implemented for QK" +#endif +#elif defined(__wasm_simd128__) +#if QK == 32 + for (int i = 0; i < nb; i++) { + float amax = 0.0f; // absolute max + + v128_t srcv [8]; + v128_t asrcv[8]; + v128_t amaxv[8]; + + for (int l = 0; l < 8; l++) srcv[l] = wasm_v128_load(x + i*32 + 4*l); + for (int l = 0; l < 8; l++) asrcv[l] = wasm_f32x4_abs(srcv[l]); + + for (int l = 0; l < 4; l++) amaxv[2*l] = wasm_f32x4_max(asrcv[2*l], asrcv[2*l+1]); + for (int l = 0; l < 2; l++) amaxv[4*l] = wasm_f32x4_max(amaxv[4*l], amaxv[4*l+2]); + for (int l = 0; l < 1; l++) amaxv[8*l] = wasm_f32x4_max(amaxv[8*l], amaxv[8*l+4]); + + amax = MAX( + MAX(wasm_f32x4_extract_lane(amaxv[0], 0), wasm_f32x4_extract_lane(amaxv[0], 1)), + MAX(wasm_f32x4_extract_lane(amaxv[0], 2), wasm_f32x4_extract_lane(amaxv[0], 3))); + + const float d = amax / ((1 << 3) - 1); + const float id = d ? 1.0/d : 0.0; + + pd[i] = d; + + for (int l = 0; l < 8; l++) { + const v128_t v = wasm_f32x4_mul(srcv[l], wasm_f32x4_splat(id)); + const v128_t vf = wasm_f32x4_add(v, wasm_f32x4_splat(8.5f)); + const v128_t vi = wasm_i32x4_trunc_sat_f32x4(vf); + + pp[2*l + 0] = wasm_i32x4_extract_lane(vi, 0) | (wasm_i32x4_extract_lane(vi, 1) << 4); + pp[2*l + 1] = wasm_i32x4_extract_lane(vi, 2) | (wasm_i32x4_extract_lane(vi, 3) << 4); + } + + memcpy(pb + i*16, pp, sizeof(pp)); + } +#else +#error "not implemented for QK" +#endif +#else + // scalar + for (int i = 0; i < nb; i++) { + float amax = 0.0f; // absolute max + + for (int l = 0; l < QK; l++) { + const float v = x[i*QK + l]; + amax = MAX(amax, fabsf(v)); + } + + const float d = amax / ((1 << 3) - 1); + const float id = d ? 1.0f/d : 0.0f; + + pd[i] = d; + + for (int l = 0; l < QK; l += 2) { + const float v0 = x[i*QK + l + 0]*id; + const float v1 = x[i*QK + l + 1]*id; + + const uint8_t vi0 = ((int8_t) (round(v0))) + 8; + const uint8_t vi1 = ((int8_t) (round(v1))) + 8; + + assert(vi0 >= 0 && vi0 < 16); + assert(vi1 >= 0 && vi1 < 16); + + pp[l/2] = vi0 | (vi1 << 4); + } + + memcpy(pb + i*QK/2, pp, sizeof(pp)); + } +#endif +} + +// method 4 +// blocks of QK elements +// represented with 2 floats (min + delta) and QK/2 8-bit ints (i.e QK 4-bit unsigned integer factors) +void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) { + assert(k % QK == 0); + + const int nb = k / QK; + + float * restrict pm = (float *) (y); + float * restrict pd = (float *) (pm + nb); + uint8_t * restrict pb = (uint8_t *) (pd + nb); + + uint8_t pp[QK/2]; + + for (int i = 0; i < nb; i++) { + float min = FLT_MAX; + float max = -FLT_MAX; + + for (int l = 0; l < QK; l++) { + const float v = x[i*QK + l]; + if (v < min) min = v; + if (v > max) max = v; + } + + const float d = (max - min) / ((1 << 4) - 1); + const float id = d ? 1.0f/d : 0.0f; + + pm[i] = min; + pd[i] = d; + + for (int l = 0; l < QK; l += 2) { + const float v0 = (x[i*QK + l + 0] - min)*id; + const float v1 = (x[i*QK + l + 1] - min)*id; + + const uint8_t vi0 = round(v0); + const uint8_t vi1 = round(v1); + + assert(vi0 >= 0 && vi0 < 16); + assert(vi1 >= 0 && vi1 < 16); + + pp[l/2] = vi0 | (vi1 << 4); + } + + memcpy(pb + i*QK/2, pp, sizeof(pp)); + } +} + +// TODO: vectorize +void dequantize_row_q4_0(const void * restrict x, float * restrict y, int k) { + assert(k % QK == 0); + + const int nb = k / QK; + + const float * restrict pd = (const float *) (x); + const uint8_t * restrict pb = (const uint8_t *) (pd + nb); + + // scalar + for (int i = 0; i < nb; i++) { + const float d = pd[i]; + + const uint8_t * restrict pp = pb + i*QK/2; + + for (int l = 0; l < QK; l += 2) { + const uint8_t vi = pp[l/2]; + + const int8_t vi0 = vi & 0xf; + const int8_t vi1 = vi >> 4; + + const float v0 = (vi0 - 8)*d; + const float v1 = (vi1 - 8)*d; + + y[i*QK + l + 0] = v0; + y[i*QK + l + 1] = v1; + + assert(!isnan(y[i*QK + l + 0])); + assert(!isnan(y[i*QK + l + 1])); + } + } +} + +void dequantize_row_q4_1(const void * restrict x, float * restrict y, int k) { + assert(k % QK == 0); + + const int nb = k / QK; + + const float * restrict pm = (const float *) (x); + const float * restrict pd = (const float *) (pm + nb); + const uint8_t * restrict pb = (const uint8_t *) (pd + nb); + + for (int i = 0; i < nb; i++) { + const float m = pm[i]; + const float d = pd[i]; + + const uint8_t * restrict pp = pb + i*QK/2; + + for (int l = 0; l < QK; l += 2) { + const uint8_t vi = pp[l/2]; + + const int8_t vi0 = vi & 0xf; + const int8_t vi1 = vi >> 4; + + const float v0 = vi0*d + m; + const float v1 = vi1*d + m; + + y[i*QK + l + 0] = v0; + y[i*QK + l + 1] = v1; + + assert(!isnan(y[i*QK + l + 0])); + assert(!isnan(y[i*QK + l + 1])); + } + } +} + // // simd mappings // @@ -925,6 +1169,264 @@ inline static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t *s = sumf; } +inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void * restrict x, const void * restrict y) { + const int nb = n / QK; + + assert(n % QK == 0); + assert(nb % 2 == 0); + + const float * restrict pd0 = (const float *) x; + const float * restrict pd1 = (const float *) y; + + const uint8_t * restrict pb0 = (const uint8_t *) (pd0 + nb); + const uint8_t * restrict pb1 = (const uint8_t *) (pd1 + nb); + + float sumf = 0.0; + +#ifdef __ARM_NEON +#if QK == 32 + float sum0 = 0.0f; + float sum1 = 0.0f; + + for (int i = 0; i < nb; i += 2) { + const float d0_0 = pd0[i + 0]; + const float d1_0 = pd1[i + 0]; + const float d0_1 = pd0[i + 1]; + const float d1_1 = pd1[i + 1]; + + //printf("d0_0: %f, d1_0: %f, d0_1: %f, d1_1: %f\n", d0_0, d1_0, d0_1, d1_1); + + const uint8_t * restrict p0 = pb0 + i*16; + const uint8_t * restrict p1 = pb1 + i*16; + + const uint8x16_t m4b = vdupq_n_u8(0xf); + const int8x16_t s8b = vdupq_n_s8(0x8); + + const uint8x16_t v0_0 = vld1q_u8(p0); + const uint8x16_t v1_0 = vld1q_u8(p1); + const uint8x16_t v0_1 = vld1q_u8(p0 + 16); + const uint8x16_t v1_1 = vld1q_u8(p1 + 16); + + // 4-bit -> 8-bit + const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8(v0_0, m4b)); + const int8x16_t v1_0l = vreinterpretq_s8_u8(vandq_u8(v1_0, m4b)); + + const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); + const int8x16_t v1_0h = vreinterpretq_s8_u8(vshrq_n_u8(v1_0, 4)); + + const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8(v0_1, m4b)); + const int8x16_t v1_1l = vreinterpretq_s8_u8(vandq_u8(v1_1, m4b)); + + const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); + const int8x16_t v1_1h = vreinterpretq_s8_u8(vshrq_n_u8(v1_1, 4)); + + // sub 8 + const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b); + const int8x16_t v1_0ls = vsubq_s8(v1_0l, s8b); + + const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b); + const int8x16_t v1_0hs = vsubq_s8(v1_0h, s8b); + + const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b); + const int8x16_t v1_1ls = vsubq_s8(v1_1l, s8b); + + const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b); + const int8x16_t v1_1hs = vsubq_s8(v1_1h, s8b); + + // dot product into int16x8_t + const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0ls)); + const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0ls)); + + const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0hs)); + const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0hs)); + + const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1ls), vget_low_s8 (v1_1ls)); + const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1ls)); + + const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1hs)); + const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1hs)); + + const int16x8_t pl_0 = vaddq_s16(pl0l, pl0h); + const int16x8_t ph_0 = vaddq_s16(ph0l, ph0h); + + const int16x8_t pl_1 = vaddq_s16(pl1l, pl1h); + const int16x8_t ph_1 = vaddq_s16(ph1l, ph1h); + + const int16x8_t p_0 = vaddq_s16(pl_0, ph_0); + const int16x8_t p_1 = vaddq_s16(pl_1, ph_1); + + // scalar +#if defined(__ARM_FEATURE_QRDMX) + sum0 += d0_0*d1_0*vaddvq_s16(p_0); + sum1 += d0_1*d1_1*vaddvq_s16(p_1); +#else + sum0 += d0_0*d1_0*(vgetq_lane_s16(p_0, 0) + vgetq_lane_s16(p_0, 1) + vgetq_lane_s16(p_0, 2) + vgetq_lane_s16(p_0, 3) + vgetq_lane_s16(p_0, 4) + vgetq_lane_s16(p_0, 5) + vgetq_lane_s16(p_0, 6) + vgetq_lane_s16(p_0, 7)); + sum1 += d0_1*d1_1*(vgetq_lane_s16(p_1, 0) + vgetq_lane_s16(p_1, 1) + vgetq_lane_s16(p_1, 2) + vgetq_lane_s16(p_1, 3) + vgetq_lane_s16(p_1, 4) + vgetq_lane_s16(p_1, 5) + vgetq_lane_s16(p_1, 6) + vgetq_lane_s16(p_1, 7)); +#endif + } + + sumf = sum0 + sum1; +#else +#error "not implemented for QK" +#endif +#elif defined(__wasm_simd128__) +#if QK == 32 + // wasm simd + float sum0 = 0.0f; + float sum1 = 0.0f; + + for (int i = 0; i < nb; i += 2) { + const float d0_0 = pd0[i + 0]; + const float d0_1 = pd0[i + 1]; + const float d1_0 = pd1[i + 0]; + const float d1_1 = pd1[i + 1]; + + const uint8_t * restrict p0 = pb0 + i*16; + const uint8_t * restrict p1 = pb1 + i*16; + + const v128_t m4b = wasm_u8x16_splat(0xf); + const v128_t s8b = wasm_i8x16_splat(0x8); + + const v128_t v0_0 = wasm_v128_load(p0); + const v128_t v0_1 = wasm_v128_load(p0 + 16); + const v128_t v1_0 = wasm_v128_load(p1); + const v128_t v1_1 = wasm_v128_load(p1 + 16); + + // 4-bit -> 8-bit + const v128_t v0_0l = wasm_v128_and(v0_0, m4b); + const v128_t v1_0l = wasm_v128_and(v1_0, m4b); + + const v128_t v0_0h = wasm_u8x16_shr(v0_0, 4); + const v128_t v1_0h = wasm_u8x16_shr(v1_0, 4); + + const v128_t v0_1l = wasm_v128_and(v0_1, m4b); + const v128_t v1_1l = wasm_v128_and(v1_1, m4b); + + const v128_t v0_1h = wasm_u8x16_shr(v0_1, 4); + const v128_t v1_1h = wasm_u8x16_shr(v1_1, 4); + + // sub 8 + const v128_t v0_0ls = wasm_i8x16_sub(v0_0l, s8b); + const v128_t v1_0ls = wasm_i8x16_sub(v1_0l, s8b); + + const v128_t v0_0hs = wasm_i8x16_sub(v0_0h, s8b); + const v128_t v1_0hs = wasm_i8x16_sub(v1_0h, s8b); + + const v128_t v0_1ls = wasm_i8x16_sub(v0_1l, s8b); + const v128_t v1_1ls = wasm_i8x16_sub(v1_1l, s8b); + + const v128_t v0_1hs = wasm_i8x16_sub(v0_1h, s8b); + const v128_t v1_1hs = wasm_i8x16_sub(v1_1h, s8b); + + // dot product into int16x8_t + const v128_t pl0l = wasm_i16x8_mul(wasm_i16x8_extend_low_i8x16(v0_0ls), wasm_i16x8_extend_low_i8x16(v1_0ls)); + const v128_t pl0h = wasm_i16x8_mul(wasm_i16x8_extend_high_i8x16(v0_0ls), wasm_i16x8_extend_high_i8x16(v1_0ls)); + + const v128_t ph0l = wasm_i16x8_mul(wasm_i16x8_extend_low_i8x16(v0_0hs), wasm_i16x8_extend_low_i8x16(v1_0hs)); + const v128_t ph0h = wasm_i16x8_mul(wasm_i16x8_extend_high_i8x16(v0_0hs), wasm_i16x8_extend_high_i8x16(v1_0hs)); + + const v128_t pl1l = wasm_i16x8_mul(wasm_i16x8_extend_low_i8x16(v0_1ls), wasm_i16x8_extend_low_i8x16(v1_1ls)); + const v128_t pl1h = wasm_i16x8_mul(wasm_i16x8_extend_high_i8x16(v0_1ls), wasm_i16x8_extend_high_i8x16(v1_1ls)); + + const v128_t ph1l = wasm_i16x8_mul(wasm_i16x8_extend_low_i8x16(v0_1hs), wasm_i16x8_extend_low_i8x16(v1_1hs)); + const v128_t ph1h = wasm_i16x8_mul(wasm_i16x8_extend_high_i8x16(v0_1hs), wasm_i16x8_extend_high_i8x16(v1_1hs)); + + const v128_t pl_0 = wasm_i16x8_add(pl0l, pl0h); + const v128_t ph_0 = wasm_i16x8_add(ph0l, ph0h); + + const v128_t pl_1 = wasm_i16x8_add(pl1l, pl1h); + const v128_t ph_1 = wasm_i16x8_add(ph1l, ph1h); + + const v128_t p_0 = wasm_i16x8_add(pl_0, ph_0); + const v128_t p_1 = wasm_i16x8_add(pl_1, ph_1); + + sum0 += d0_0*d1_0*( + wasm_i16x8_extract_lane(p_0, 0) + wasm_i16x8_extract_lane(p_0, 1) + + wasm_i16x8_extract_lane(p_0, 2) + wasm_i16x8_extract_lane(p_0, 3) + + wasm_i16x8_extract_lane(p_0, 4) + wasm_i16x8_extract_lane(p_0, 5) + + wasm_i16x8_extract_lane(p_0, 6) + wasm_i16x8_extract_lane(p_0, 7)); + sum1 += d0_1*d1_1*( + wasm_i16x8_extract_lane(p_1, 0) + wasm_i16x8_extract_lane(p_1, 1) + + wasm_i16x8_extract_lane(p_1, 2) + wasm_i16x8_extract_lane(p_1, 3) + + wasm_i16x8_extract_lane(p_1, 4) + wasm_i16x8_extract_lane(p_1, 5) + + wasm_i16x8_extract_lane(p_1, 6) + wasm_i16x8_extract_lane(p_1, 7)); + } + + sumf = sum0 + sum1; +#else +#error "not implemented for QK" +#endif +#else + // scalar + for (int i = 0; i < nb; i++) { + const float d0 = pd0[i]; + const float d1 = pd1[i]; + + const uint8_t * restrict p0 = pb0 + i*QK/2; + const uint8_t * restrict p1 = pb1 + i*QK/2; + + for (int j = 0; j < QK/2; j++) { + const uint8_t v0 = p0[j]; + const uint8_t v1 = p1[j]; + + const float f0 = d0*((int8_t) (v0 & 0xf) - 8); + const float f1 = d0*((int8_t) (v0 >> 4) - 8); + + const float f2 = d1*((int8_t) (v1 & 0xf) - 8); + const float f3 = d1*((int8_t) (v1 >> 4) - 8); + + sumf += f0*f2 + f1*f3; + } + } +#endif + + *s = sumf; +} + +inline static void ggml_vec_dot_q4_1(const int n, float * restrict s, const void * restrict x, const void * restrict y) { + const int nb = n / QK; + + const float * restrict pm0 = (const float *) x; + const float * restrict pm1 = (const float *) y; + + const float * restrict pd0 = (const float *) (pm0 + nb); + const float * restrict pd1 = (const float *) (pm1 + nb); + + const uint8_t * restrict pb0 = (const uint8_t *) (pd0 + nb); + const uint8_t * restrict pb1 = (const uint8_t *) (pd1 + nb); + + float sumf = 0.0; + +#if 1 + // scalar + for (int i = 0; i < nb; i++) { + const float m0 = pm0[i]; + const float m1 = pm1[i]; + + const float d0 = pd0[i]; + const float d1 = pd1[i]; + + const uint8_t * restrict p0 = pb0 + i*QK/2; + const uint8_t * restrict p1 = pb1 + i*QK/2; + + for (int j = 0; j < QK/2; j++) { + const uint8_t v0 = p0[j]; + const uint8_t v1 = p1[j]; + + const float f0 = d0*(v0 & 0xf) + m0; + const float f1 = d0*(v0 >> 4) + m0; + + const float f2 = d1*(v1 & 0xf) + m1; + const float f3 = d1*(v1 >> 4) + m1; + + sumf += f0*f2 + f1*f3; + } + } +#endif + + *s = sumf; +} + // compute GGML_VEC_DOT_UNROLL dot products at once // xs - x row stride in bytes inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) { @@ -1042,6 +1544,134 @@ inline static void ggml_vec_mad_f16(const int n, ggml_fp16_t * restrict y, ggml_ #endif } +inline static void ggml_vec_mad_q4_0(const int n, float * restrict y, void * restrict x, const float v) { + assert(n % QK == 0); + + const int nb = n / QK; + + const float * restrict pd = (const float *) (x); + const uint8_t * restrict pb = (const uint8_t *) (pd + nb); + +#if __ARM_NEON +#if QK == 32 + for (int i = 0; i < nb; ++i) { + const float d0 = pd[i]*v; + + const uint8_t * restrict pp = pb + i*16; + + const uint8x8_t m4b = vdup_n_u8(0xf); + const int8x8_t s8b = vdup_n_s8(0x8); + + const float32x4_t vd = vdupq_n_f32(d0); + + for (int j = 0; j < 2; j++) { + const uint8x8_t vx = vld1_u8(pp + j*8); + + const int8x8_t vxl = vreinterpret_s8_u8(vand_u8(vx, m4b)); + const int8x8_t vxh = vreinterpret_s8_u8(vshr_n_u8(vx, 4)); + + // sub 8 + const int8x8_t vxls = vsub_s8(vxl, s8b); + const int8x8_t vxhs = vsub_s8(vxh, s8b); + + //const int8x8_t vxlt = vzip_s8(vxls, vxhs)[0]; + //const int8x8_t vxht = vzip_s8(vxls, vxhs)[1]; + const int8x8_t vxlt = vzip1_s8(vxls, vxhs); + const int8x8_t vxht = vzip2_s8(vxls, vxhs); + + const int8x16_t vxq = vcombine_s8(vxlt, vxht); + + // convert to 2x int16x8_t + const int16x8_t vxq0 = vmovl_s8(vget_low_s8 (vxq)); + const int16x8_t vxq1 = vmovl_s8(vget_high_s8(vxq)); + + // convert to 4x float32x4_t + const float32x4_t vx0 = vcvtq_f32_s32(vmovl_s16(vget_low_s16 (vxq0))); + const float32x4_t vx1 = vcvtq_f32_s32(vmovl_s16(vget_high_s16(vxq0))); + const float32x4_t vx2 = vcvtq_f32_s32(vmovl_s16(vget_low_s16 (vxq1))); + const float32x4_t vx3 = vcvtq_f32_s32(vmovl_s16(vget_high_s16(vxq1))); + + const float32x4_t vy0 = vld1q_f32(y + i*32 + j*16 + 0); + const float32x4_t vy1 = vld1q_f32(y + i*32 + j*16 + 4); + const float32x4_t vy2 = vld1q_f32(y + i*32 + j*16 + 8); + const float32x4_t vy3 = vld1q_f32(y + i*32 + j*16 + 12); + + const float32x4_t vr0 = vfmaq_f32(vy0, vx0, vd); + const float32x4_t vr1 = vfmaq_f32(vy1, vx1, vd); + const float32x4_t vr2 = vfmaq_f32(vy2, vx2, vd); + const float32x4_t vr3 = vfmaq_f32(vy3, vx3, vd); + + vst1q_f32(y + i*32 + j*16 + 0, vr0); + vst1q_f32(y + i*32 + j*16 + 4, vr1); + vst1q_f32(y + i*32 + j*16 + 8, vr2); + vst1q_f32(y + i*32 + j*16 + 12, vr3); + } + } +#endif +#else + // scalar + for (int i = 0; i < nb; i++) { + const float d = pd[i]; + + const uint8_t * restrict pp = pb + i*QK/2; + + for (int l = 0; l < QK; l += 2) { + const uint8_t vi = pp[l/2]; + + const int8_t vi0 = vi & 0xf; + const int8_t vi1 = vi >> 4; + + const float v0 = (vi0 - 8)*d; + const float v1 = (vi1 - 8)*d; + + y[i*QK + l + 0] += v0*v; + y[i*QK + l + 1] += v1*v; + + assert(!isnan(y[i*QK + l + 0])); + assert(!isnan(y[i*QK + l + 1])); + assert(!isinf(y[i*QK + l + 0])); + assert(!isinf(y[i*QK + l + 1])); + } + } +#endif +} + +inline static void ggml_vec_mad_q4_1(const int n, float * restrict y, void * restrict x, const float v) { + assert(n % QK == 0); + + const int nb = n / QK; + + const float * restrict pm = (const float *) (x); + const float * restrict pd = (const float *) (pm + nb); + const uint8_t * restrict pb = (const uint8_t *) (pd + nb); + + for (int i = 0; i < nb; i++) { + const float m = pm[i]; + const float d = pd[i]; + + const uint8_t * restrict pp = pb + i*QK/2; + + for (int l = 0; l < QK; l += 2) { + const uint8_t vi = pp[l/2]; + + const uint8_t vi0 = vi & 0xf; + const uint8_t vi1 = vi >> 4; + + const float v0 = d*vi0 + m; + const float v1 = d*vi1 + m; + + y[i*QK + l + 0] += v0*v; + y[i*QK + l + 1] += v1*v; + + assert(!isnan(y[i*QK + l + 0])); + assert(!isnan(y[i*QK + l + 1])); + assert(!isinf(y[i*QK + l + 0])); + assert(!isinf(y[i*QK + l + 1])); + //printf("mad: v0 %f v1 %f, i = %d, l = %d, d = %f, vi = %d, vi0 = %d, vi1 = %d\n", v0, v1, i, l, d, vi, vi0, vi1); + } + } +} + //inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; } inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { #if defined(GGML_SIMD) @@ -1165,7 +1795,21 @@ inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x // data types // +static const int GGML_BLCK_SIZE[GGML_TYPE_COUNT] = { + QK, + QK, + 1, + 1, + 1, + 1, + 1, +}; + +static_assert(GGML_TYPE_COUNT == 7, "GGML_TYPE_COUNT != 5"); + static const size_t GGML_TYPE_SIZE[GGML_TYPE_COUNT] = { + sizeof(float ) + QK/2, + sizeof(float )*2 + QK/2, sizeof(int8_t ), sizeof(int16_t), sizeof(int32_t), @@ -1173,6 +1817,9 @@ static const size_t GGML_TYPE_SIZE[GGML_TYPE_COUNT] = { sizeof(float ), }; +// don't forget to update the array above when adding new types +static_assert(GGML_TYPE_COUNT == 7, "GGML_TYPE_COUNT != 5"); + static const char * GGML_OP_LABEL[GGML_OP_COUNT] = { "NONE", @@ -1213,6 +1860,8 @@ static const char * GGML_OP_LABEL[GGML_OP_COUNT] = { "FLASH_FF", }; +static_assert(GGML_OP_COUNT == 33, "GGML_OP_COUNT != 33"); + static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "none", @@ -1253,6 +1902,8 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "flash_ff(x)", }; +static_assert(GGML_OP_COUNT == 33, "GGML_OP_COUNT != 33"); + // // ggml object // @@ -1380,13 +2031,21 @@ int ggml_nrows(const struct ggml_tensor * tensor) { size_t ggml_nbytes(const struct ggml_tensor * tensor) { static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); - return ggml_nelements(tensor)*GGML_TYPE_SIZE[tensor->type]; + return (ggml_nelements(tensor)*GGML_TYPE_SIZE[tensor->type])/GGML_BLCK_SIZE[tensor->type]; +} + +int ggml_blck_size(enum ggml_type type) { + return GGML_BLCK_SIZE[type]; } size_t ggml_type_size(enum ggml_type type) { return GGML_TYPE_SIZE[type]; } +float ggml_type_sizef(enum ggml_type type) { + return ((float)(GGML_TYPE_SIZE[type]))/GGML_BLCK_SIZE[type]; +} + size_t ggml_element_size(const struct ggml_tensor * tensor) { return GGML_TYPE_SIZE[tensor->type]; } @@ -1423,7 +2082,7 @@ static inline bool ggml_is_contiguous(const struct ggml_tensor * tensor) { return tensor->nb[0] == GGML_TYPE_SIZE[tensor->type] && - tensor->nb[1] == tensor->nb[0]*tensor->ne[0] && + tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/GGML_BLCK_SIZE[tensor->type] && tensor->nb[2] == tensor->nb[1]*tensor->ne[1] && tensor->nb[3] == tensor->nb[2]*tensor->ne[2]; } @@ -1623,8 +2282,8 @@ struct ggml_tensor * ggml_new_tensor_impl( size_t size_needed = 0; if (data == NULL) { - size_needed += GGML_TYPE_SIZE[type]; - for (int i = 0; i < n_dims; i++) { + size_needed += GGML_TYPE_SIZE[type]*(ne[0]/GGML_BLCK_SIZE[type]); + for (int i = 1; i < n_dims; i++) { size_needed *= ne[i]; } // align to GGML_MEM_ALIGN @@ -1717,7 +2376,8 @@ struct ggml_tensor * ggml_new_tensor_impl( } result->nb[0] = GGML_TYPE_SIZE[type]; - for (int i = 1; i < GGML_MAX_DIMS; i++) { + result->nb[1] = result->nb[0]*(result->ne[0]/GGML_BLCK_SIZE[type]); + for (int i = 2; i < GGML_MAX_DIMS; i++) { result->nb[i] = result->nb[i - 1]*result->ne[i - 1]; } @@ -1814,6 +2474,14 @@ struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) { char * const data = tensor->data; switch (tensor->type) { + case GGML_TYPE_Q4_0: + { + GGML_ASSERT(false); + } break; + case GGML_TYPE_Q4_1: + { + GGML_ASSERT(false); + } break; case GGML_TYPE_I8: { assert(tensor->nb[0] == sizeof(int8_t)); @@ -1851,7 +2519,7 @@ struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) { } break; case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } @@ -1866,6 +2534,14 @@ struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) { char * const data = tensor->data; switch (tensor->type) { + case GGML_TYPE_Q4_0: + { + GGML_ASSERT(false); + } break; + case GGML_TYPE_Q4_1: + { + GGML_ASSERT(false); + } break; case GGML_TYPE_I8: { assert(tensor->nb[0] == sizeof(int8_t)); @@ -1903,7 +2579,7 @@ struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) { } break; case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } @@ -1912,6 +2588,14 @@ struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) { int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) { switch (tensor->type) { + case GGML_TYPE_Q4_0: + { + GGML_ASSERT(false); + } break; + case GGML_TYPE_Q4_1: + { + GGML_ASSERT(false); + } break; case GGML_TYPE_I8: { GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); @@ -1948,6 +2632,14 @@ int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) { void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) { switch (tensor->type) { + case GGML_TYPE_Q4_0: + { + GGML_ASSERT(false); + } break; + case GGML_TYPE_Q4_1: + { + GGML_ASSERT(false); + } break; case GGML_TYPE_I8: { GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); @@ -1982,6 +2674,14 @@ void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) { float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) { switch (tensor->type) { + case GGML_TYPE_Q4_0: + { + GGML_ASSERT(false); + } break; + case GGML_TYPE_Q4_1: + { + GGML_ASSERT(false); + } break; case GGML_TYPE_I8: { GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); @@ -2018,6 +2718,14 @@ float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) { void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) { switch (tensor->type) { + case GGML_TYPE_Q4_0: + { + GGML_ASSERT(false); + } break; + case GGML_TYPE_Q4_1: + { + GGML_ASSERT(false); + } break; case GGML_TYPE_I8: { GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); @@ -2108,7 +2816,7 @@ struct ggml_tensor * ggml_add_impl( struct ggml_tensor * a, struct ggml_tensor * b, bool inplace) { - assert(ggml_are_same_shape(a, b)); + GGML_ASSERT(ggml_are_same_shape(a, b)); bool is_node = false; @@ -2147,7 +2855,7 @@ struct ggml_tensor * ggml_sub_impl( struct ggml_tensor * a, struct ggml_tensor * b, bool inplace) { - assert(ggml_are_same_shape(a, b)); + GGML_ASSERT(ggml_are_same_shape(a, b)); bool is_node = false; @@ -2186,7 +2894,7 @@ struct ggml_tensor * ggml_mul_impl( struct ggml_tensor * a, struct ggml_tensor * b, bool inplace) { - assert(ggml_are_same_shape(a, b)); + GGML_ASSERT(ggml_are_same_shape(a, b)); bool is_node = false; @@ -2195,7 +2903,7 @@ struct ggml_tensor * ggml_mul_impl( } if (inplace) { - assert(is_node == false); + GGML_ASSERT(is_node == false); } struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); @@ -2229,7 +2937,7 @@ struct ggml_tensor * ggml_div_impl( struct ggml_tensor * a, struct ggml_tensor * b, bool inplace) { - assert(ggml_are_same_shape(a, b)); + GGML_ASSERT(ggml_are_same_shape(a, b)); bool is_node = false; @@ -2238,7 +2946,7 @@ struct ggml_tensor * ggml_div_impl( } if (inplace) { - assert(is_node == false); + GGML_ASSERT(is_node == false); } struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); @@ -2362,7 +3070,7 @@ struct ggml_tensor * ggml_mean( bool is_node = false; if (a->grad) { - assert(false); // TODO: implement + GGML_ASSERT(false); // TODO: implement is_node = true; } @@ -2383,7 +3091,7 @@ struct ggml_tensor * ggml_repeat( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) { - assert(ggml_can_repeat(a, b)); + GGML_ASSERT(ggml_can_repeat(a, b)); bool is_node = false; @@ -2619,7 +3327,7 @@ struct ggml_tensor * ggml_norm_impl( bool is_node = false; if (!inplace && (a->grad)) { - assert(false); // TODO: implement backward + GGML_ASSERT(false); // TODO: implement backward is_node = true; } @@ -2651,7 +3359,7 @@ struct ggml_tensor * ggml_mul_mat( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) { - assert(ggml_can_mul_mat(a, b)); + GGML_ASSERT(ggml_can_mul_mat(a, b)); bool is_node = false; @@ -2677,13 +3385,13 @@ struct ggml_tensor * ggml_scale_impl( struct ggml_tensor * a, struct ggml_tensor * b, bool inplace) { - assert(ggml_is_scalar(b)); - assert(ggml_is_padded_1d(a)); + GGML_ASSERT(ggml_is_scalar(b)); + GGML_ASSERT(ggml_is_padded_1d(a)); bool is_node = false; if (!inplace && (a->grad || b->grad)) { - assert(false); // TODO: implement backward + GGML_ASSERT(false); // TODO: implement backward is_node = true; } @@ -2720,12 +3428,12 @@ struct ggml_tensor * ggml_cpy_impl( struct ggml_tensor * a, struct ggml_tensor * b, bool inplace) { - assert(ggml_nelements(a) == ggml_nelements(b)); + GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b)); bool is_node = false; if (!inplace && (a->grad || b->grad)) { - assert(false); // TODO: implement backward + GGML_ASSERT(false); // TODO: implement backward is_node = true; } @@ -2760,14 +3468,14 @@ struct ggml_tensor * ggml_reshape( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) { - assert(ggml_is_contiguous(a)); - assert(ggml_is_contiguous(b)); - assert(ggml_nelements(a) == ggml_nelements(b)); + GGML_ASSERT(ggml_is_contiguous(a)); + GGML_ASSERT(ggml_is_contiguous(b)); + GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b)); bool is_node = false; if (a->grad || b->grad) { - assert(false); // TODO: implement backward + GGML_ASSERT(false); // TODO: implement backward is_node = true; } @@ -2786,13 +3494,13 @@ struct ggml_tensor * ggml_reshape_2d( struct ggml_tensor * a, int ne0, int ne1) { - assert(ggml_is_contiguous(a)); - assert(ggml_nelements(a) == ne0*ne1); + GGML_ASSERT(ggml_is_contiguous(a)); + GGML_ASSERT(ggml_nelements(a) == ne0*ne1); bool is_node = false; if (a->grad) { - assert(false); // TODO: implement backward + GGML_ASSERT(false); // TODO: implement backward is_node = true; } @@ -2813,13 +3521,13 @@ struct ggml_tensor * ggml_reshape_3d( int ne0, int ne1, int ne2) { - assert(ggml_is_contiguous(a)); - assert(ggml_nelements(a) == ne0*ne1*ne2); + GGML_ASSERT(ggml_is_contiguous(a)); + GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2); bool is_node = false; if (a->grad) { - assert(false); // TODO: implement backward + GGML_ASSERT(false); // TODO: implement backward is_node = true; } @@ -2842,7 +3550,7 @@ struct ggml_tensor * ggml_view_1d( int ne0, size_t offset) { if (a->grad) { - assert(false); // gradient propagation is not supported + GGML_ASSERT(false); // gradient propagation is not supported } struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, &ne0, (char *) a->data + offset); @@ -2865,7 +3573,7 @@ struct ggml_tensor * ggml_view_2d( size_t nb1, size_t offset) { if (a->grad) { - assert(false); // gradient propagation is not supported + GGML_ASSERT(false); // gradient propagation is not supported } const int ne[GGML_MAX_DIMS] = { ne0, ne1, 1, 1 }; @@ -2893,22 +3601,22 @@ struct ggml_tensor * ggml_permute( int axis1, int axis2, int axis3) { - assert(axis0 >= 0 && axis0 < GGML_MAX_DIMS); - assert(axis1 >= 0 && axis1 < GGML_MAX_DIMS); - assert(axis2 >= 0 && axis2 < GGML_MAX_DIMS); - assert(axis3 >= 0 && axis3 < GGML_MAX_DIMS); - - assert(axis0 != axis1); - assert(axis0 != axis2); - assert(axis0 != axis3); - assert(axis1 != axis2); - assert(axis1 != axis3); - assert(axis2 != axis3); + GGML_ASSERT(axis0 >= 0 && axis0 < GGML_MAX_DIMS); + GGML_ASSERT(axis1 >= 0 && axis1 < GGML_MAX_DIMS); + GGML_ASSERT(axis2 >= 0 && axis2 < GGML_MAX_DIMS); + GGML_ASSERT(axis3 >= 0 && axis3 < GGML_MAX_DIMS); + + GGML_ASSERT(axis0 != axis1); + GGML_ASSERT(axis0 != axis2); + GGML_ASSERT(axis0 != axis3); + GGML_ASSERT(axis1 != axis2); + GGML_ASSERT(axis1 != axis3); + GGML_ASSERT(axis2 != axis3); bool is_node = false; if (a->grad) { - assert(false); // TODO: implement backward + GGML_ASSERT(false); // TODO: implement backward is_node = true; } @@ -2953,7 +3661,7 @@ struct ggml_tensor * ggml_transpose( bool is_node = false; if (a->grad) { - assert(false); // TODO: implement backward + GGML_ASSERT(false); // TODO: implement backward is_node = true; } @@ -2979,12 +3687,12 @@ struct ggml_tensor * ggml_get_rows( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) { - assert(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32); + GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32); bool is_node = false; if (a->grad || b->grad) { - assert(false); // TODO: implement backward + GGML_ASSERT(false); // TODO: implement backward is_node = true; } @@ -3009,7 +3717,7 @@ struct ggml_tensor * ggml_diag_mask_inf( bool is_node = false; if (a->grad) { - assert(false); // TODO: implement backward + GGML_ASSERT(false); // TODO: implement backward is_node = true; } @@ -3034,7 +3742,7 @@ struct ggml_tensor * ggml_soft_max( bool is_node = false; if (a->grad) { - assert(false); // TODO: implement backward + GGML_ASSERT(false); // TODO: implement backward is_node = true; } @@ -3058,11 +3766,11 @@ struct ggml_tensor * ggml_rope( int n_past, int n_dims, int mode) { - assert(n_past >= 0); + GGML_ASSERT(n_past >= 0); bool is_node = false; if (a->grad) { - assert(false); // TODO: implement backward + GGML_ASSERT(false); // TODO: implement backward is_node = true; } @@ -3089,13 +3797,13 @@ struct ggml_tensor * ggml_conv_1d_1s( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) { - assert(ggml_is_matrix(b)); - assert(a->ne[1] == b->ne[1]); - assert(a->ne[3] == 1); + GGML_ASSERT(ggml_is_matrix(b)); + GGML_ASSERT(a->ne[1] == b->ne[1]); + GGML_ASSERT(a->ne[3] == 1); bool is_node = false; if (a->grad || b->grad) { - assert(false); // TODO: implement backward + GGML_ASSERT(false); // TODO: implement backward is_node = true; } @@ -3116,13 +3824,13 @@ struct ggml_tensor * ggml_conv_1d_2s( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) { - assert(ggml_is_matrix(b)); - assert(a->ne[1] == b->ne[1]); - assert(a->ne[3] == 1); + GGML_ASSERT(ggml_is_matrix(b)); + GGML_ASSERT(a->ne[1] == b->ne[1]); + GGML_ASSERT(a->ne[3] == 1); bool is_node = false; if (a->grad || b->grad) { - assert(false); // TODO: implement backward + GGML_ASSERT(false); // TODO: implement backward is_node = true; } @@ -3145,7 +3853,7 @@ struct ggml_tensor * ggml_flash_attn( struct ggml_tensor * k, struct ggml_tensor * v, bool masked) { - assert(ggml_can_mul_mat(k, q)); + GGML_ASSERT(ggml_can_mul_mat(k, q)); // TODO: check if vT can be multiplied by (k*qT) bool is_node = false; @@ -3177,7 +3885,7 @@ struct ggml_tensor * ggml_flash_ff( struct ggml_tensor * b1, struct ggml_tensor * c0, struct ggml_tensor * c1) { - assert(ggml_can_mul_mat(b0, a)); + GGML_ASSERT(ggml_can_mul_mat(b0, a)); // TODO: more checks bool is_node = false; @@ -3208,7 +3916,7 @@ void ggml_set_param( struct ggml_tensor * tensor) { tensor->is_param = true; - assert(tensor->grad == NULL); + GGML_ASSERT(tensor->grad == NULL); tensor->grad = ggml_dup_tensor(ctx, tensor); } @@ -3218,9 +3926,9 @@ static void ggml_compute_forward_dup_f16( const struct ggml_compute_params * params, const struct ggml_tensor * src0, struct ggml_tensor * dst) { - assert(params->ith == 0); - assert(ggml_is_contiguous(dst)); - assert(ggml_nelements(dst) == ggml_nelements(src0)); + GGML_ASSERT(params->ith == 0); + GGML_ASSERT(ggml_is_contiguous(dst)); + GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { return; @@ -3435,6 +4143,8 @@ static void ggml_compute_forward_dup( { ggml_compute_forward_dup_f32(params, src0, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -3510,13 +4220,15 @@ static void ggml_compute_forward_add( { ggml_compute_forward_add_f32(params, src0, src1, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_F16: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -3560,13 +4272,15 @@ static void ggml_compute_forward_sub( { ggml_compute_forward_sub_f32(params, src0, src1, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_F16: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -3610,13 +4324,15 @@ static void ggml_compute_forward_mul( { ggml_compute_forward_mul_f32(params, src0, src1, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_F16: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -3660,13 +4376,15 @@ static void ggml_compute_forward_div( { ggml_compute_forward_div_f32(params, src0, src1, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_F16: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -3706,13 +4424,15 @@ static void ggml_compute_forward_sqr( { ggml_compute_forward_sqr_f32(params, src0, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_F16: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -3752,13 +4472,15 @@ static void ggml_compute_forward_sqrt( { ggml_compute_forward_sqrt_f32(params, src0, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_F16: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -3808,13 +4530,15 @@ static void ggml_compute_forward_sum( { ggml_compute_forward_sum_f32(params, src0, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_F16: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -3883,13 +4607,15 @@ static void ggml_compute_forward_mean( { ggml_compute_forward_mean_f32(params, src0, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_F16: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -3945,13 +4671,15 @@ static void ggml_compute_forward_repeat( { ggml_compute_forward_repeat_f32(params, src0, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_F16: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -3991,13 +4719,15 @@ static void ggml_compute_forward_abs( { ggml_compute_forward_abs_f32(params, src0, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_F16: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -4037,13 +4767,15 @@ static void ggml_compute_forward_sgn( { ggml_compute_forward_sgn_f32(params, src0, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_F16: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -4083,13 +4815,15 @@ static void ggml_compute_forward_neg( { ggml_compute_forward_neg_f32(params, src0, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_F16: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -4129,13 +4863,15 @@ static void ggml_compute_forward_step( { ggml_compute_forward_step_f32(params, src0, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_F16: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -4175,13 +4911,15 @@ static void ggml_compute_forward_relu( { ggml_compute_forward_relu_f32(params, src0, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_F16: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -4238,15 +4976,19 @@ static void ggml_compute_forward_gelu( { ggml_compute_forward_gelu_f32(params, src0, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_F16: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } + + //printf("XXXXXXXX gelu\n"); } // ggml_compute_forward_norm @@ -4320,13 +5062,15 @@ static void ggml_compute_forward_norm( { ggml_compute_forward_norm_f32(params, src0, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_F16: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -4348,9 +5092,8 @@ static bool ggml_compute_forward_mul_mat_use_blas( const int ne1 = dst->ne[1]; // TODO: find the optimal values for these - if (ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && ( - (ne0 >= 32 && ne1 >= 32 && ne10 >= 32) - )) { + if (ggml_is_contiguous(src0) && + ggml_is_contiguous(src1) && ((ne0 >= 32 && ne1 >= 32 && ne10 >= 32))) { //printf("BLAS: %d %d %d\n", ne0, ne1, ne10); return true; } @@ -4449,25 +5192,631 @@ static void ggml_compute_forward_mul_mat_f32( float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3); - // zT = y * xT + // zT = y * xT + { + cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, + ne11, ne01, ne10, + 1.0f, y, ne10, + x, ne10, + 0.0f, d, ne01); + } + } + } + + //printf("CBLAS F32 = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3); + + return; + } +#endif + + if (params->type == GGML_TASK_INIT) { + if (nb01 >= nb00) { + return; + } + + // TODO: fix this memset (wsize is overestimated) + memset(params->wdata, 0, params->wsize); + return; + } + + if (params->type == GGML_TASK_FINALIZE) { + if (nb01 >= nb00) { + return; + } + + // TODO: fix this memset (wsize is overestimated) + //assert(params->wsize == (ggml_nbytes(dst) + CACHE_LINE_SIZE)*nth); + + float * const wdata = params->wdata; + + // cols per thread + const int dc = (ne + nth - 1)/nth; + + // col range for this thread + const int ic0 = dc*ith; + const int ic1 = MIN(ic0 + dc, ne); + + ggml_vec_cpy_f32(ic1 - ic0, (float *) dst->data + ic0, wdata + ic0); + + for (int k = 1; k < nth; k++) { + ggml_vec_acc_f32(ic1 - ic0, (float *) dst->data + ic0, wdata + (ne + CACHE_LINE_SIZE_F32)*k + ic0); + } + + return; + } + + if (nb01 >= nb00) { + // TODO: do not support transposed src1 + assert(nb10 == sizeof(float)); + + // parallelize by src0 rows using ggml_vec_dot_f32 + + // total rows in src0 + const int nr = ne01*ne02*ne03; + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 indices + const int i03 = ir/(ne02*ne01); + const int i02 = (ir - i03*ne02*ne01)/ne01; + const int i01 = (ir - i03*ne02*ne01 - i02*ne01); + + for (int ic = 0; ic < ne11; ++ic) { + // src1 indices + const int i13 = i03; + const int i12 = i02; + const int i11 = ic; + + // dst indices + const int i0 = i01; + const int i1 = i11; + const int i2 = i02; + const int i3 = i03; + + ggml_vec_dot_f32(ne00, + (float *) ((char *) dst->data + (i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), + (float *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)), + (float *) ((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13))); + } + } + } else { + // parallelize by src1 columns using ggml_vec_mad_f32 + // each thread has its own work data + // during FINALIZE we accumulate all work data into dst + + // total columns in src1 + const int nc = ne10; + + // columns per thread + const int dc = (nc + nth - 1)/nth; + + // column range for this thread + const int ic0 = dc*ith; + const int ic1 = MIN(ic0 + dc, nc); + + // work data for thread + const int wo = (ne + CACHE_LINE_SIZE_F32)*ith; + float * const wdata = params->wdata; + + for (int i13 = 0; i13 < ne13; ++i13) { + for (int i12 = 0; i12 < ne12; ++i12) { + for (int i11 = 0; i11 < ne11; ++i11) { + for (int ic = ic0; ic < ic1; ++ic) { + // src1 indices + const int i10 = ic; + + // src0 indices + const int i03 = i13; + const int i02 = i12; + const int i00 = ic; + + // dst indices + const int i1 = i11; + const int i2 = i12; + const int i3 = i13; + + assert(sizeof(float)*(wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + ne01) <= params->wsize); + + ggml_vec_mad_f32(ne01, + (float *) (wdata + wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0), + (float *) ((char *) src0->data + (i00*nb00 + i02*nb02 + i03*nb03)), + *(float *) ((char *) src1->data + (i10*nb10 + i11*nb11 + i12*nb12 + i13*nb13))); + } + } + } + } + } + + //int64_t t1 = ggml_perf_time_us(); + //static int64_t acc = 0; + //acc += t1 - t0; + //if (t1 - t0 > 10) { + // printf("\n"); + // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03); + // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03); + // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13); + // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13); + + // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc); + //} +} + +static void ggml_compute_forward_mul_mat_f16_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + int64_t t0 = ggml_perf_time_us(); + UNUSED(t0); + + const int ne00 = src0->ne[0]; + const int ne01 = src0->ne[1]; + const int ne02 = src0->ne[2]; + const int ne03 = src0->ne[3]; + + const int ne10 = src1->ne[0]; + const int ne11 = src1->ne[1]; + const int ne12 = src1->ne[2]; + const int ne13 = src1->ne[3]; + + const int ne0 = dst->ne[0]; + const int ne1 = dst->ne[1]; + const int ne2 = dst->ne[2]; + const int ne3 = dst->ne[3]; + const int ne = ne0*ne1*ne2*ne3; + + const int nb00 = src0->nb[0]; + const int nb01 = src0->nb[1]; + const int nb02 = src0->nb[2]; + const int nb03 = src0->nb[3]; + + const int nb10 = src1->nb[0]; + const int nb11 = src1->nb[1]; + const int nb12 = src1->nb[2]; + const int nb13 = src1->nb[3]; + + const int nb0 = dst->nb[0]; + const int nb1 = dst->nb[1]; + const int nb2 = dst->nb[2]; + const int nb3 = dst->nb[3]; + + const int ith = params->ith; + const int nth = params->nth; + + GGML_ASSERT(ne02 == ne12); + GGML_ASSERT(ne03 == ne13); + GGML_ASSERT(ne2 == ne12); + GGML_ASSERT(ne3 == ne13); + + // TODO: we don't support permuted src0 + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t) || nb01 == sizeof(ggml_fp16_t)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + GGML_ASSERT(ne0 == ne01); + GGML_ASSERT(ne1 == ne11); + GGML_ASSERT(ne2 == ne02); + GGML_ASSERT(ne3 == ne03); + + // nb01 >= nb00 - src0 is not transposed + // compute by src0 rows + // + // nb00 < nb01 - src0 is transposed + // compute by src0 columns + +#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) + if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) { + GGML_ASSERT(nb10 == sizeof(float)); + + if (params->ith != 0) { + return; + } + + if (params->type == GGML_TASK_INIT) { + return; + } + + if (params->type == GGML_TASK_FINALIZE) { + return; + } + + float * const wdata = params->wdata; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + { + int id = 0; + for (int i01 = 0; i01 < ne01; ++i01) { + for (int i00 = 0; i00 < ne00; ++i00) { + wdata[id++] = GGML_FP16_TO_FP32(*(ggml_fp16_t *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00)); + } + } + } + + const float * x = wdata; + const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13); + + // float * z = wdata + ne00*ne01; + + // z = x * yT + //{ + // cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, + // ne01, ne11, ne00, + // 1.0f, x, ne00, + // y, ne00, + // 0.0f, z, ne11); + //} + + float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3); + + // transpose z + //for (int j = 0; j < ne11; ++j) { + // for (int i = 0; i < ne01; ++i) { + // d[j*ne01 + i] = z[i*ne11 + j]; + // } + //} + + { +#if 1 + // zT = y * xT + cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, + ne11, ne01, ne10, + 1.0f, y, ne00, + x, ne00, + 0.0f, d, ne01); +#else + // zT = (xT * y)T + cblas_sgemm(CblasColMajor, CblasTrans, CblasNoTrans, + ne01, ne11, ne10, + 1.0f, x, ne00, + y, ne00, + 0.0f, d, ne01); +#endif + } + } + } + + /*printf("CBLAS F16 = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);*/ + + return; + } +#endif + + if (params->type == GGML_TASK_INIT) { + if (nb01 >= nb00) { + ggml_fp16_t * const wdata = params->wdata; + + int id = 0; + for (int i13 = 0; i13 < ne13; ++i13) { + for (int i12 = 0; i12 < ne12; ++i12) { + for (int i11 = 0; i11 < ne11; ++i11) { + for (int i10 = 0; i10 < ne10; ++i10) { + wdata[id++] = GGML_FP32_TO_FP16(*(float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10)); + } + } + } + } + + GGML_ASSERT(id*sizeof(ggml_fp16_t) <= params->wsize); + + return; + } + + // TODO: fix this memset (wsize is overestimated) + memset(params->wdata, 0, params->wsize); + return; + } + + if (params->type == GGML_TASK_FINALIZE) { + if (nb01 >= nb00) { + return; + } + + // TODO: fix this memset (wsize is overestimated) + //assert(params->wsize == (ggml_nbytes(dst) + CACHE_LINE_SIZE)*nth); + + ggml_fp16_t * const wdata = params->wdata; + + // cols per thread + const int dc = (ne + nth - 1)/nth; + + // col range for this thread + const int ic0 = dc*ith; + const int ic1 = MIN(ic0 + dc, ne); + + for (int i = ic0; i < ic1; ++i) { + ((float *) dst->data)[i] = GGML_FP16_TO_FP32(wdata[i]); + } + + for (int k = 1; k < nth; k++) { + for (int i = ic0; i < ic1; ++i) { + ((float *) dst->data)[i] += GGML_FP16_TO_FP32(wdata[(ne + CACHE_LINE_SIZE_F32)*k + i]); + } + } + + return; + } + + if (nb01 >= nb00) { + // fp16 -> half the size, so divide by 2 + // TODO: do not support transposed src1 + assert(nb10/2 == sizeof(ggml_fp16_t)); + + // parallelize by src0 rows using ggml_vec_dot_f16 + + // total rows in src0 + const int nr = ne01*ne02*ne03; + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + ggml_fp16_t * wdata = params->wdata; + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 indices + const int i03 = ir/(ne02*ne01); + const int i02 = (ir - i03*ne02*ne01)/ne01; + const int i01 = (ir - i03*ne02*ne01 - i02*ne01); + + const int i13 = i03; + const int i12 = i02; + + const int i0 = i01; + const int i2 = i02; + const int i3 = i03; + + ggml_fp16_t * src0_row = (ggml_fp16_t *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)); + ggml_fp16_t * src1_col = wdata + ( 0 + i12*ne11 + i13*ne12*ne11)*ne00; + + float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3)); + + assert(ne00 % 32 == 0); + + for (int ic = 0; ic < ne11; ++ic) { + ggml_vec_dot_f16(ne00, &dst_col[ic*ne0], src0_row, src1_col + ic*ne00); + } + } + } else { + // parallelize by src1 columns using ggml_vec_mad_f16 + // each thread has its own work data + // during FINALIZE we accumulate all work data into dst + + // total columns in src1 + const int nc = ne10; + + // columns per thread + const int dc = (nc + nth - 1)/nth; + + // column range for this thread + const int ic0 = dc*ith; + const int ic1 = MIN(ic0 + dc, nc); + + // work data for thread + const int wo = (ne + CACHE_LINE_SIZE_F32)*ith; + ggml_fp16_t * const wdata = params->wdata; + + for (int i13 = 0; i13 < ne13; ++i13) { + for (int i12 = 0; i12 < ne12; ++i12) { + for (int i11 = 0; i11 < ne11; ++i11) { + // dst indices + const int i1 = i11; + const int i2 = i12; + const int i3 = i13; + + ggml_fp16_t * dst_row = wdata + wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0; + + for (int ic = ic0; ic < ic1; ++ic) { + // src1 indices + const int i10 = ic; + + // src0 indices + const int i03 = i13; + const int i02 = i12; + const int i00 = ic; + + assert(sizeof(ggml_fp16_t)*(wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + ne01) <= params->wsize); + + ggml_fp16_t * src0_col = (ggml_fp16_t *) ((char *) src0->data + (i00*nb00 + i02*nb02 + i03*nb03)); + float src1_val = * (float *) ((char *) src1->data + (i10*nb10 + i11*nb11 + i12*nb12 + i13*nb13)); + + ggml_vec_mad_f16(ne01, dst_row, src0_col, src1_val); + } + } + } + } + } + + //int64_t t1 = ggml_time_us(); + //static int64_t acc = 0; + //acc += t1 - t0; + //if (t1 - t0 > 10) { + // printf("\n"); + // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03); + // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03); + // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13); + + // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc); + //} +} + +static void ggml_compute_forward_mul_mat_q4_0_f32( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + int64_t t0 = ggml_perf_time_us(); + UNUSED(t0); + + const int ne00 = src0->ne[0]; + const int ne01 = src0->ne[1]; + const int ne02 = src0->ne[2]; + const int ne03 = src0->ne[3]; + + const int ne10 = src1->ne[0]; + const int ne11 = src1->ne[1]; + const int ne12 = src1->ne[2]; + const int ne13 = src1->ne[3]; + + const int ne0 = dst->ne[0]; + const int ne1 = dst->ne[1]; + const int ne2 = dst->ne[2]; + const int ne3 = dst->ne[3]; + const int ne = ne0*ne1*ne2*ne3; + + const int nb00 = src0->nb[0]; + const int nb01 = src0->nb[1]; + const int nb02 = src0->nb[2]; + const int nb03 = src0->nb[3]; + + const int nb10 = src1->nb[0]; + const int nb11 = src1->nb[1]; + const int nb12 = src1->nb[2]; + const int nb13 = src1->nb[3]; + + const int nb0 = dst->nb[0]; + const int nb1 = dst->nb[1]; + const int nb2 = dst->nb[2]; + const int nb3 = dst->nb[3]; + + const int ith = params->ith; + const int nth = params->nth; + + GGML_ASSERT(ne02 == ne12); + GGML_ASSERT(ne03 == ne13); + GGML_ASSERT(ne2 == ne12); + GGML_ASSERT(ne3 == ne13); + + // TODO: we don't support permuted src0 + GGML_ASSERT(nb00 == (int) GGML_TYPE_SIZE[GGML_TYPE_Q4_0] || nb01 == (int) GGML_TYPE_SIZE[GGML_TYPE_Q4_0]); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + GGML_ASSERT(ne0 == ne01); + GGML_ASSERT(ne1 == ne11); + GGML_ASSERT(ne2 == ne02); + GGML_ASSERT(ne3 == ne03); + + // nb01 >= nb00 - src0 is not transposed + // compute by src0 rows + // + // nb00 < nb01 - src0 is transposed + // compute by src0 columns + +#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) + if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) { + GGML_ASSERT(nb10 == sizeof(float)); + + if (params->ith != 0) { + return; + } + + if (params->type == GGML_TASK_INIT) { + return; + } + + if (params->type == GGML_TASK_FINALIZE) { + return; + } + + float * const wdata = params->wdata; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + { + int id = 0; + for (int i01 = 0; i01 < ne01; ++i01) { + //for (int i00 = 0; i00 < ne00; ++i00) { + // wdata[id++] = GGML_FP16_TO_FP32(*(ggml_fp16_t *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00)); + //} + dequantize_row_q4_0((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01, wdata + id, ne00); + id += ne00; + } + } + + const float * x = wdata; + const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13); + + // float * z = wdata + ne00*ne01; + + // z = x * yT + //{ + // cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, + // ne01, ne11, ne00, + // 1.0f, x, ne00, + // y, ne00, + // 0.0f, z, ne11); + //} + + float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3); + + // transpose z + //for (int j = 0; j < ne11; ++j) { + // for (int i = 0; i < ne01; ++i) { + // d[j*ne01 + i] = z[i*ne11 + j]; + // } + //} + { +#if 1 + // zT = y * xT cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, ne11, ne01, ne10, - 1.0f, y, ne10, - x, ne10, + 1.0f, y, ne00, + x, ne00, + 0.0f, d, ne01); +#else + // zT = (xT * y)T + cblas_sgemm(CblasColMajor, CblasTrans, CblasNoTrans, + ne01, ne11, ne10, + 1.0f, x, ne00, + y, ne00, 0.0f, d, ne01); +#endif } } } - //printf("CBLAS F32 = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3); + /*printf("CBLAS Q4_0 = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);*/ return; } #endif if (params->type == GGML_TASK_INIT) { + //printf("HHHHHHHHH ith = %d, nth = %d\n", ith, nth); if (nb01 >= nb00) { + char * wdata = params->wdata; + + for (int i13 = 0; i13 < ne13; ++i13) { + for (int i12 = 0; i12 < ne12; ++i12) { + for (int i11 = 0; i11 < ne11; ++i11) { + //for (int i10 = 0; i10 < ne10; ++i10) { + // wdata[id++] = GGML_FP32_TO_FP16(*(float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10)); + //} + quantize_row_q4_0((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10); + wdata += (ne10*GGML_TYPE_SIZE[GGML_TYPE_Q4_0])/GGML_BLCK_SIZE[GGML_TYPE_Q4_0]; + } + } + } + return; } @@ -4481,9 +5830,6 @@ static void ggml_compute_forward_mul_mat_f32( return; } - // TODO: fix this memset (wsize is overestimated) - //assert(params->wsize == (ggml_nbytes(dst) + CACHE_LINE_SIZE)*nth); - float * const wdata = params->wdata; // cols per thread @@ -4504,9 +5850,8 @@ static void ggml_compute_forward_mul_mat_f32( if (nb01 >= nb00) { // TODO: do not support transposed src1 - assert(nb10 == sizeof(float)); - // parallelize by src0 rows using ggml_vec_dot_f32 + // parallelize by src0 rows using ggml_vec_dot_q4_0 // total rows in src0 const int nr = ne01*ne02*ne03; @@ -4518,32 +5863,35 @@ static void ggml_compute_forward_mul_mat_f32( const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); + void * wdata = params->wdata; + for (int ir = ir0; ir < ir1; ++ir) { // src0 indices const int i03 = ir/(ne02*ne01); const int i02 = (ir - i03*ne02*ne01)/ne01; const int i01 = (ir - i03*ne02*ne01 - i02*ne01); - for (int ic = 0; ic < ne11; ++ic) { - // src1 indices - const int i13 = i03; - const int i12 = i02; - const int i11 = ic; + const int i13 = i03; + const int i12 = i02; - // dst indices - const int i0 = i01; - const int i1 = i11; - const int i2 = i02; - const int i3 = i03; + const int i0 = i01; + const int i2 = i02; + const int i3 = i03; - ggml_vec_dot_f32(ne00, - (float *) ((char *) dst->data + (i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), - (float *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)), - (float *) ((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13))); + void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)); + char * src1_col = ((char *) wdata + ( (0 + i12*ne11 + i13*ne12*ne11)*ne00*GGML_TYPE_SIZE[GGML_TYPE_Q4_0])/GGML_BLCK_SIZE[GGML_TYPE_Q4_0]); + + float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3)); + + assert(ne00 % 32 == 0); + + for (int ic = 0; ic < ne11; ++ic) { + ggml_vec_dot_q4_0(ne00, &dst_col[ic*ne0], src0_row, ((void *) (src1_col + (ic*ne00*GGML_TYPE_SIZE[GGML_TYPE_Q4_0])/GGML_BLCK_SIZE[GGML_TYPE_Q4_0]))); } } } else { - // parallelize by src1 columns using ggml_vec_mad_f32 + //printf("AAAAA ith = %d, nth = %d\n", ith, nth); + // parallelize by src1 columns using ggml_vec_mad_q4_0 // each thread has its own work data // during FINALIZE we accumulate all work data into dst @@ -4564,6 +5912,13 @@ static void ggml_compute_forward_mul_mat_f32( for (int i13 = 0; i13 < ne13; ++i13) { for (int i12 = 0; i12 < ne12; ++i12) { for (int i11 = 0; i11 < ne11; ++i11) { + // dst indices + const int i1 = i11; + const int i2 = i12; + const int i3 = i13; + + float * dst_row = wdata + wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0; + for (int ic = ic0; ic < ic1; ++ic) { // src1 indices const int i10 = ic; @@ -4573,24 +5928,19 @@ static void ggml_compute_forward_mul_mat_f32( const int i02 = i12; const int i00 = ic; - // dst indices - const int i1 = i11; - const int i2 = i12; - const int i3 = i13; - assert(sizeof(float)*(wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + ne01) <= params->wsize); - ggml_vec_mad_f32(ne01, - (float *) (wdata + wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0), - (float *) ((char *) src0->data + (i00*nb00 + i02*nb02 + i03*nb03)), - *(float *) ((char *) src1->data + (i10*nb10 + i11*nb11 + i12*nb12 + i13*nb13))); + void * src0_col = (void *) ((char *) src0->data + (i00*nb00 + i02*nb02 + i03*nb03)); + float src1_val = *(float *) ((char *) src1->data + (i10*nb10 + i11*nb11 + i12*nb12 + i13*nb13)); + + ggml_vec_mad_q4_0(ne01, dst_row, src0_col, src1_val); } } } } } - //int64_t t1 = ggml_perf_time_us(); + //int64_t t1 = ggml_time_us(); //static int64_t acc = 0; //acc += t1 - t0; //if (t1 - t0 > 10) { @@ -4598,13 +5948,12 @@ static void ggml_compute_forward_mul_mat_f32( // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03); // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03); // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13); - // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13); // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc); //} } -static void ggml_compute_forward_mul_mat_f16_f32( +static void ggml_compute_forward_mul_mat_q4_1_f32( const struct ggml_compute_params * params, const struct ggml_tensor * src0, const struct ggml_tensor * src1, @@ -4652,7 +6001,7 @@ static void ggml_compute_forward_mul_mat_f16_f32( GGML_ASSERT(ne3 == ne13); // TODO: we don't support permuted src0 - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t) || nb01 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb00 == (int) GGML_TYPE_SIZE[GGML_TYPE_Q4_1] || nb01 == (int) GGML_TYPE_SIZE[GGML_TYPE_Q4_1]); // dst cannot be transposed or permuted GGML_ASSERT(nb0 == sizeof(float)); @@ -4694,9 +6043,11 @@ static void ggml_compute_forward_mul_mat_f16_f32( { int id = 0; for (int i01 = 0; i01 < ne01; ++i01) { - for (int i00 = 0; i00 < ne00; ++i00) { - wdata[id++] = GGML_FP16_TO_FP32(*(ggml_fp16_t *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00)); - } + //for (int i00 = 0; i00 < ne00; ++i00) { + // wdata[id++] = GGML_FP16_TO_FP32(*(ggml_fp16_t *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00)); + //} + dequantize_row_q4_1((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01, wdata + id, ne00); + id += ne00; } } @@ -4750,22 +6101,22 @@ static void ggml_compute_forward_mul_mat_f16_f32( #endif if (params->type == GGML_TASK_INIT) { + //printf("HHHHHHHHH ith = %d, nth = %d\n", ith, nth); if (nb01 >= nb00) { - ggml_fp16_t * const wdata = params->wdata; + char * wdata = params->wdata; - int id = 0; for (int i13 = 0; i13 < ne13; ++i13) { for (int i12 = 0; i12 < ne12; ++i12) { for (int i11 = 0; i11 < ne11; ++i11) { - for (int i10 = 0; i10 < ne10; ++i10) { - wdata[id++] = GGML_FP32_TO_FP16(*(float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10)); - } + //for (int i10 = 0; i10 < ne10; ++i10) { + // wdata[id++] = GGML_FP32_TO_FP16(*(float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10)); + //} + quantize_row_q4_1((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10); + wdata += (ne10*GGML_TYPE_SIZE[GGML_TYPE_Q4_1])/GGML_BLCK_SIZE[GGML_TYPE_Q4_1]; } } } - GGML_ASSERT(id*sizeof(ggml_fp16_t) <= params->wsize); - return; } @@ -4779,10 +6130,7 @@ static void ggml_compute_forward_mul_mat_f16_f32( return; } - // TODO: fix this memset (wsize is overestimated) - //assert(params->wsize == (ggml_nbytes(dst) + CACHE_LINE_SIZE)*nth); - - ggml_fp16_t * const wdata = params->wdata; + float * const wdata = params->wdata; // cols per thread const int dc = (ne + nth - 1)/nth; @@ -4791,25 +6139,19 @@ static void ggml_compute_forward_mul_mat_f16_f32( const int ic0 = dc*ith; const int ic1 = MIN(ic0 + dc, ne); - for (int i = ic0; i < ic1; ++i) { - ((float *) dst->data)[i] = GGML_FP16_TO_FP32(wdata[i]); - } + ggml_vec_cpy_f32(ic1 - ic0, (float *) dst->data + ic0, wdata + ic0); for (int k = 1; k < nth; k++) { - for (int i = ic0; i < ic1; ++i) { - ((float *) dst->data)[i] += GGML_FP16_TO_FP32(wdata[(ne + CACHE_LINE_SIZE_F32)*k + i]); - } + ggml_vec_acc_f32(ic1 - ic0, (float *) dst->data + ic0, wdata + (ne + CACHE_LINE_SIZE_F32)*k + ic0); } return; } if (nb01 >= nb00) { - // fp16 -> half the size, so divide by 2 // TODO: do not support transposed src1 - assert(nb10/2 == sizeof(ggml_fp16_t)); - // parallelize by src0 rows using ggml_vec_dot_f16 + // parallelize by src0 rows using ggml_vec_dot_q4_1 // total rows in src0 const int nr = ne01*ne02*ne03; @@ -4821,7 +6163,7 @@ static void ggml_compute_forward_mul_mat_f16_f32( const int ir0 = dr*ith; const int ir1 = MIN(ir0 + dr, nr); - ggml_fp16_t * wdata = params->wdata; + void * wdata = params->wdata; for (int ir = ir0; ir < ir1; ++ir) { // src0 indices @@ -4836,19 +6178,20 @@ static void ggml_compute_forward_mul_mat_f16_f32( const int i2 = i02; const int i3 = i03; - ggml_fp16_t * src0_row = (ggml_fp16_t *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)); - ggml_fp16_t * src1_col = wdata + ( 0 + i12*ne11 + i13*ne12*ne11)*ne00; + void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)); + char * src1_col = ((char *) wdata + ( (0 + i12*ne11 + i13*ne12*ne11)*ne00*GGML_TYPE_SIZE[GGML_TYPE_Q4_1])/GGML_BLCK_SIZE[GGML_TYPE_Q4_1]); float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3)); assert(ne00 % 32 == 0); for (int ic = 0; ic < ne11; ++ic) { - ggml_vec_dot_f16(ne00, &dst_col[ic*ne0], src0_row, src1_col + ic*ne00); + ggml_vec_dot_q4_1(ne00, &dst_col[ic*ne0], src0_row, ((void *) (src1_col + (ic*ne00*GGML_TYPE_SIZE[GGML_TYPE_Q4_1])/GGML_BLCK_SIZE[GGML_TYPE_Q4_1]))); } } } else { - // parallelize by src1 columns using ggml_vec_mad_f16 + //printf("AAAAA ith = %d, nth = %d\n", ith, nth); + // parallelize by src1 columns using ggml_vec_mad_q4_1 // each thread has its own work data // during FINALIZE we accumulate all work data into dst @@ -4864,7 +6207,7 @@ static void ggml_compute_forward_mul_mat_f16_f32( // work data for thread const int wo = (ne + CACHE_LINE_SIZE_F32)*ith; - ggml_fp16_t * const wdata = params->wdata; + float * const wdata = params->wdata; for (int i13 = 0; i13 < ne13; ++i13) { for (int i12 = 0; i12 < ne12; ++i12) { @@ -4874,7 +6217,7 @@ static void ggml_compute_forward_mul_mat_f16_f32( const int i2 = i12; const int i3 = i13; - ggml_fp16_t * dst_row = wdata + wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0; + float * dst_row = wdata + wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0; for (int ic = ic0; ic < ic1; ++ic) { // src1 indices @@ -4885,12 +6228,12 @@ static void ggml_compute_forward_mul_mat_f16_f32( const int i02 = i12; const int i00 = ic; - assert(sizeof(ggml_fp16_t)*(wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + ne01) <= params->wsize); + assert(sizeof(float)*(wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + ne01) <= params->wsize); - ggml_fp16_t * src0_col = (ggml_fp16_t *) ((char *) src0->data + (i00*nb00 + i02*nb02 + i03*nb03)); - float src1_val = * (float *) ((char *) src1->data + (i10*nb10 + i11*nb11 + i12*nb12 + i13*nb13)); + void * src0_col = (void *) ((char *) src0->data + (i00*nb00 + i02*nb02 + i03*nb03)); + float src1_val = *(float *) ((char *) src1->data + (i10*nb10 + i11*nb11 + i12*nb12 + i13*nb13)); - ggml_vec_mad_f16(ne01, dst_row, src0_col, src1_val); + ggml_vec_mad_q4_1(ne01, dst_row, src0_col, src1_val); } } } @@ -4916,6 +6259,14 @@ static void ggml_compute_forward_mul_mat( const struct ggml_tensor * src1, struct ggml_tensor * dst) { switch (src0->type) { + case GGML_TYPE_Q4_0: + { + ggml_compute_forward_mul_mat_q4_0_f32(params, src0, src1, dst); + } break; + case GGML_TYPE_Q4_1: + { + ggml_compute_forward_mul_mat_q4_1_f32(params, src0, src1, dst); + } break; case GGML_TYPE_F16: { ggml_compute_forward_mul_mat_f16_f32(params, src0, src1, dst); @@ -4929,9 +6280,37 @@ static void ggml_compute_forward_mul_mat( case GGML_TYPE_I32: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } + +#if 0 + if (src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_Q4_1) { + static int first = 8; + printf("src0: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", src0->ne[0], src0->ne[1], src0->ne[2]); + printf("src1: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", src1->ne[0], src1->ne[1], src1->ne[2]); + printf("dst: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", dst->ne[0], dst->ne[1], dst->ne[2]); + if (first) { + --first; + } else { + for (int k = 0; k < dst->ne[1]; ++k) { + for (int j = 0; j < dst->ne[0]/16; ++j) { + for (int i = 0; i < 16; ++i) { + printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]); + } + printf("\n"); + } + printf("\n"); + } + printf("\n"); + exit(0); + } + } else { + printf("aaaa src0: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", src0->ne[0], src0->ne[1], src0->ne[2]); + printf("aaaa src1: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", src1->ne[0], src1->ne[1], src1->ne[2]); + printf("aaaa dst: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", dst->ne[0], dst->ne[1], dst->ne[2]); + } +#endif } // ggml_compute_forward_scale @@ -4981,13 +6360,15 @@ static void ggml_compute_forward_scale( { ggml_compute_forward_scale_f32(params, src0, src1, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_F16: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -5045,6 +6426,60 @@ static void ggml_compute_forward_transpose( // ggml_compute_forward_get_rows +static void ggml_compute_forward_get_rows_q4_0( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + assert(params->ith == 0); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int nc = src0->ne[0]; + const int nr = ggml_nelements(src1); + + assert( dst->ne[0] == nc); + assert( dst->ne[1] == nr); + assert(src0->nb[0] == GGML_TYPE_SIZE[GGML_TYPE_Q4_0]); + + for (int i = 0; i < nr; ++i) { + const int r = ((int32_t *) src1->data)[i]; + + dequantize_row_q4_0( + (const void *) ((char *) src0->data + r*src0->nb[1]), + (float *) ((char *) dst->data + i*dst->nb[1]), nc); + } +} + +static void ggml_compute_forward_get_rows_q4_1( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + assert(params->ith == 0); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int nc = src0->ne[0]; + const int nr = ggml_nelements(src1); + + assert( dst->ne[0] == nc); + assert( dst->ne[1] == nr); + assert(src0->nb[0] == GGML_TYPE_SIZE[GGML_TYPE_Q4_1]); + + for (int i = 0; i < nr; ++i) { + const int r = ((int32_t *) src1->data)[i]; + + dequantize_row_q4_1( + (const void *) ((char *) src0->data + r*src0->nb[1]), + (float *) ((char *) dst->data + i*dst->nb[1]), nc); + } +} + static void ggml_compute_forward_get_rows_f16( const struct ggml_compute_params * params, const struct ggml_tensor * src0, @@ -5106,6 +6541,14 @@ static void ggml_compute_forward_get_rows( const struct ggml_tensor * src1, struct ggml_tensor * dst) { switch (src0->type) { + case GGML_TYPE_Q4_0: + { + ggml_compute_forward_get_rows_q4_0(params, src0, src1, dst); + } break; + case GGML_TYPE_Q4_1: + { + ggml_compute_forward_get_rows_q4_1(params, src0, src1, dst); + } break; case GGML_TYPE_F16: { ggml_compute_forward_get_rows_f16(params, src0, src1, dst); @@ -5119,9 +6562,27 @@ static void ggml_compute_forward_get_rows( case GGML_TYPE_I32: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } + + //static bool first = true; + //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]); + //if (first) { + // first = false; + //} else { + // for (int k = 0; k < dst->ne[1]; ++k) { + // for (int j = 0; j < dst->ne[0]/16; ++j) { + // for (int i = 0; i < 16; ++i) { + // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]); + // } + // printf("\n"); + // } + // printf("\n"); + // } + // printf("\n"); + // exit(0); + //} } // ggml_compute_forward_diag_mask_inf @@ -5172,13 +6633,15 @@ static void ggml_compute_forward_diag_mask_inf( { ggml_compute_forward_diag_mask_inf_f32(params, src0, src1, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_F16: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -5217,6 +6680,7 @@ static void ggml_compute_forward_soft_max_f32( #ifndef NDEBUG for (int i = 0; i < nc; ++i) { + //printf("p[%d] = %f\n", i, p[i]); assert(!isnan(p[i])); } #endif @@ -5263,13 +6727,15 @@ static void ggml_compute_forward_soft_max( { ggml_compute_forward_soft_max_f32(params, src0, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_F16: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -5333,23 +6799,84 @@ static void ggml_compute_forward_rope_f32( } } +static void ggml_compute_forward_rope_f16( + const struct ggml_compute_params * params, + const struct ggml_tensor * src0, + const struct ggml_tensor * src1, + struct ggml_tensor * dst) { + assert(params->ith == 0); + assert(src1->type == GGML_TYPE_I32); + assert(ggml_nelements(src1) == 3); + + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + + const int n_past = ((int32_t *) src1->data)[0]; + const int n_dims = ((int32_t *) src1->data)[1]; + const int mode = ((int32_t *) src1->data)[2]; + + //const int ne0 = src0->ne[0]; + const int ne1 = src0->ne[1]; + const int ne2 = src0->ne[2]; + const int ne3 = src0->ne[3]; + + const int nb0 = src0->nb[0]; + const int nb1 = src0->nb[1]; + const int nb2 = src0->nb[2]; + const int nb3 = src0->nb[3]; + + //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3); + //printf("n_past = %d, ne2 = %d\n", n_past, ne2); + + assert(nb0 == sizeof(ggml_fp16_t)); + + for (int i3 = 0; i3 < ne3; i3++) { + for (int i2 = (mode == 0 ? 0 : n_past); i2 < ne2; i2++) { + const int p = (mode == 0 ? n_past + i2 : i2); + for (int i1 = 0; i1 < ne1; i1++) { + for (int i0 = 0; i0 < n_dims; i0 += 2) { + const double theta = pow(10000.0, ((double)-i0)/n_dims); + + const double cos_theta = cos(p*theta); + const double sin_theta = sin(p*theta); + + const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + + double x0 = ggml_fp16_to_fp32(src[0]); + double x1 = ggml_fp16_to_fp32(src[1]); + + dst_data[0] = ggml_fp32_to_fp16(x0*cos_theta - x1*sin_theta); + dst_data[1] = ggml_fp32_to_fp16(x0*sin_theta + x1*cos_theta); + } + } + } + } +} + static void ggml_compute_forward_rope( const struct ggml_compute_params * params, const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) { switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_rope_f16(params, src0, src1, dst); + } break; case GGML_TYPE_F32: { ggml_compute_forward_rope_f32(params, src0, src1, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: - case GGML_TYPE_F16: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -5610,6 +7137,8 @@ static void ggml_compute_forward_conv_1d_1s( { ggml_compute_forward_conv_1d_1s_f32(params, src0, src1, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -5876,6 +7405,8 @@ static void ggml_compute_forward_conv_1d_2s( { ggml_compute_forward_conv_1d_2s_f32(params, src0, src1, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -6359,12 +7890,14 @@ static void ggml_compute_forward_flash_attn( { ggml_compute_forward_flash_attn_f32(params, q, k, v, masked, dst); } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -6568,12 +8101,14 @@ static void ggml_compute_forward_flash_ff( { GGML_ASSERT(false); // TODO } break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: case GGML_TYPE_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } @@ -6581,7 +8116,7 @@ static void ggml_compute_forward_flash_ff( ///////////////////////////////// static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) { - assert(params); + GGML_ASSERT(params); switch (tensor->op) { case GGML_OP_DUP: @@ -6829,7 +8364,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor } break; case GGML_OP_MEAN: { - assert(false); // TODO: implement + GGML_ASSERT(false); // TODO: implement } break; case GGML_OP_REPEAT: { @@ -6884,17 +8419,17 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor } break; case GGML_OP_GELU: { - assert(false); // TODO: not implemented + GGML_ASSERT(false); // TODO: not implemented } break; case GGML_OP_NORM: { - assert(false); // TODO: not implemented + GGML_ASSERT(false); // TODO: not implemented } break; case GGML_OP_MUL_MAT: { if (src0->grad) { // TODO: this requires outer product - ggml_out_prod(ctx, src1, tensor->grad); - assert(false); + GGML_ASSERT(false); } if (src1->grad) { src1->grad = @@ -7010,12 +8545,12 @@ static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * if (node->op == GGML_OP_NONE && node->grad == NULL) { // reached a leaf node, not part of the gradient graph (e.g. a constant) - assert(cgraph->n_leafs < GGML_MAX_NODES); + GGML_ASSERT(cgraph->n_leafs < GGML_MAX_NODES); cgraph->leafs[cgraph->n_leafs] = node; cgraph->n_leafs++; } else { - assert(cgraph->n_nodes < GGML_MAX_NODES); + GGML_ASSERT(cgraph->n_nodes < GGML_MAX_NODES); cgraph->nodes[cgraph->n_nodes] = node; cgraph->grads[cgraph->n_nodes] = node->grad; @@ -7039,7 +8574,7 @@ static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_ten if (n_new > 0) { // the last added node should always be starting point - assert(cgraph->nodes[cgraph->n_nodes - 1] == tensor); + GGML_ASSERT(cgraph->nodes[cgraph->n_nodes - 1] == tensor); } } @@ -7070,7 +8605,7 @@ struct ggml_cgraph ggml_build_forward(struct ggml_tensor * tensor) { struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep) { struct ggml_cgraph result = *gf; - assert(gf->n_nodes > 0); + GGML_ASSERT(gf->n_nodes > 0); // if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph if (keep) { @@ -7269,7 +8804,7 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) }; int rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_thread, &workers[j]); - assert(rc == 0); + GGML_ASSERT(rc == 0); UNUSED(rc); } } @@ -7331,6 +8866,7 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) // TODO: better way to determine if the matrix is transposed if (node->src0->nb[1] < node->src0->nb[0]) { cur = ggml_nbytes(node)*node->n_tasks; // TODO: this can become (n_tasks-1) + // TODO: overestimated by factor of x2 for FP16 } else { if (node->src0->type == GGML_TYPE_F16 && node->src1->type == GGML_TYPE_F32) { @@ -7338,19 +8874,43 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) { node->n_tasks = 1; // TODO: this actually is doing nothing // the threads are still spinning - cur = sizeof(float)*(node->src0->ne[0]*node->src0->ne[1]); + cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]); //printf("src0: ne0 = %d, ne1 = %d, ne = %d\n", node->src0->ne[0], node->src0->ne[1], node->src0->ne[0]*node->src0->ne[1]); //printf("src1: ne0 = %d, ne1 = %d, ne = %d\n", node->src1->ne[0], node->src1->ne[1], node->src1->ne[0]*node->src1->ne[1]); //printf("cur = %zu\n", cur); } else { - cur = sizeof(ggml_fp16_t)*ggml_nelements(node->src1); + cur = GGML_TYPE_SIZE[GGML_TYPE_F16]*ggml_nelements(node->src1); } #else - cur = sizeof(ggml_fp16_t)*ggml_nelements(node->src1); + cur = GGML_TYPE_SIZE[GGML_TYPE_F16]*ggml_nelements(node->src1); #endif } else if (node->src0->type == GGML_TYPE_F32 && node->src1->type == GGML_TYPE_F32) { cur = 0; + } else if (node->src0->type == GGML_TYPE_Q4_0 && + node->src1->type == GGML_TYPE_F32) { +#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) + if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) { + node->n_tasks = 1; + cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]); + } else { + cur = (GGML_TYPE_SIZE[GGML_TYPE_Q4_0]*ggml_nelements(node->src1))/GGML_BLCK_SIZE[GGML_TYPE_Q4_0]; + } +#else + cur = (GGML_TYPE_SIZE[GGML_TYPE_Q4_0]*ggml_nelements(node->src1))/GGML_BLCK_SIZE[GGML_TYPE_Q4_0]; +#endif + } else if (node->src0->type == GGML_TYPE_Q4_1 && + node->src1->type == GGML_TYPE_F32) { +#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) + if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) { + node->n_tasks = 1; + cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]); + } else { + cur = (GGML_TYPE_SIZE[GGML_TYPE_Q4_1]*ggml_nelements(node->src1))/GGML_BLCK_SIZE[GGML_TYPE_Q4_1]; + } +#else + cur = (GGML_TYPE_SIZE[GGML_TYPE_Q4_1]*ggml_nelements(node->src1))/GGML_BLCK_SIZE[GGML_TYPE_Q4_1]; +#endif } else { GGML_ASSERT(false); } @@ -7454,13 +9014,13 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) } break; case GGML_OP_COUNT: { - assert(false); + GGML_ASSERT(false); } break; } } if (cgraph->work != NULL && work_size > cgraph->work_size) { - assert(false); // TODO: better handling + GGML_ASSERT(false); // TODO: better handling } if (work_size > 0 && cgraph->work == NULL) { @@ -7626,7 +9186,7 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) for (int j = 0; j < n_threads - 1; j++) { int rc = ggml_thread_join(workers[j].thrd, NULL); - assert(rc == 0); + GGML_ASSERT(rc == 0); UNUSED(rc); } @@ -7733,7 +9293,7 @@ void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph char color[16]; FILE * fp = fopen(filename, "w"); - assert(fp); + GGML_ASSERT(fp); fprintf(fp, "digraph G {\n"); fprintf(fp, " newrank = true;\n"); @@ -7891,7 +9451,7 @@ static enum ggml_opt_result ggml_opt_adam( struct ggml_tensor * f, struct ggml_cgraph * gf, struct ggml_cgraph * gb) { - assert(ggml_is_scalar(f)); + GGML_ASSERT(ggml_is_scalar(f)); gf->n_threads = params.n_threads; gb->n_threads = params.n_threads; @@ -7905,7 +9465,7 @@ static enum ggml_opt_result ggml_opt_adam( if (gf->nodes[i]->is_param) { GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op); - assert(np < GGML_MAX_PARAMS); + GGML_ASSERT(np < GGML_MAX_PARAMS); ps[np++] = gf->nodes[i]; nx += ggml_nelements(gf->nodes[i]); @@ -8205,7 +9765,7 @@ static enum ggml_opt_result ggml_opt_lbfgs( if (gf->nodes[i]->is_param) { GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op); - assert(np < GGML_MAX_PARAMS); + GGML_ASSERT(np < GGML_MAX_PARAMS); ps[np++] = gf->nodes[i]; nx += ggml_nelements(gf->nodes[i]); diff --git a/tests/test-mul-mat2.c b/tests/test-mul-mat2.c index bb7dd8d..be7b038 100644 --- a/tests/test-mul-mat2.c +++ b/tests/test-mul-mat2.c @@ -13,8 +13,10 @@ #include -#ifdef __ARM_NEON +#if defined(__ARM_NEON) #include "arm_neon.h" +#elif defined(__AVX__) || defined(__AVX2__) +#include "immintrin.h" #endif #ifndef MIN @@ -26,8 +28,12 @@ const int M = 1280; const int N = 1536; const int K = 1280; -const int QK = 64; -#define QB 7 +//const int M = 64; +//const int N = 64; +//const int K = 64; + +#define QK 64 +#define QB 4 //#define GGML_GQ_USE_FP16_SCALE @@ -41,8 +47,12 @@ const int QK = 64; #define GGML_GQ_TO_FP32(x) (x) #endif -#define gq_quant_t uint64_t #define gq_t_bits 64 +#define gq_quant_t uint64_t + +float frand() { + return (float) rand() / (float) RAND_MAX; +} uint64_t get_time_us() { struct timeval tv; @@ -50,6 +60,47 @@ uint64_t get_time_us() { return tv.tv_sec * 1000000 + tv.tv_usec; } +#if defined(__AVX2__) +// horizontally reduce 8 32-bit integers +static inline uint32_t _mm256_hadd_epi32_gg(__m256i v) { + __m128i v0 = _mm256_extractf128_si256(v, 0); + __m128i v1 = _mm256_extractf128_si256(v, 1); + + v0 = _mm_add_epi32(v0, v1); + + v1 = _mm_shuffle_epi32(v0, 0x0e); + v0 = _mm_add_epi32(v0, v1); + + v1 = _mm_shuffle_epi32(v0, 0x01); + v0 = _mm_add_epi32(v0, v1); + + return _mm_cvtsi128_si32(v0); +} + +//static inline float _mm256_hadd_epi32_gg(__m256i v) { +// const __m256 v0 = _mm256_cvtepi32_ps(v); +// const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(v0), _mm256_extractf128_ps(v0, 1)); +// const __m128 t1 = _mm_hadd_ps(t0, t0); +// +// return _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); +//} + +// horizontally reduce 32 8-bit integers +static inline int32_t _mm256_hadd_epi8_gg(__m256i v0) { + __m256i v1 = _mm256_maddubs_epi16(v0, _mm256_set1_epi8(1)); + __m256i v2 = _mm256_madd_epi16 (v1, _mm256_set1_epi16(1)); + + return _mm256_hadd_epi32_gg(v2); +} + +static inline float _mm256_hadd_ps_gg(__m256 v) { + const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(v), _mm256_extractf128_ps(v, 1)); + const __m128 t1 = _mm_hadd_ps(t0, t0); + + return _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); +} +#endif + // // naive implementation // @@ -74,6 +125,21 @@ void mul_mat_f32_naive( // method 1 // +static inline int quantize_1_blocks_per_row(int k) { + return k/QK; +} + +static inline int quantize_1_quants_per_block() { + return QK/gq_t_bits; +} + +static inline int quantize_1_row_size(int k) { + const int nb = quantize_1_blocks_per_row(k); + const int nq = quantize_1_quants_per_block(); + + return nb*(2*sizeof(gq_scale_t) + nq*QB*sizeof(gq_quant_t)); +} + void quantize_1(const float * src, void * dst, int n, int k) { char * p0 = dst; @@ -215,6 +281,7 @@ void mul_mat_gq_1( // // method 2 +// n-bit quantization (2nd attempt) // static inline int quantize_2_blocks_per_row(int k) { @@ -244,15 +311,41 @@ void quantize_2_row(const float * restrict src, void * restrict dst, int k) { gq_quant_t pp[QB]; + static const int32_t sh[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + }; + for (int i = 0; i < nb; i++) { float min = FLT_MAX; float max = -FLT_MAX; - for (int l = 0; l < QK; l++) { - const float v = src[i*QK + l]; - if (v < min) min = v; - if (v > max) max = v; +#ifdef __ARM_NEON + { + float32x4_t minv = vdupq_n_f32(FLT_MAX); + float32x4_t maxv = vdupq_n_f32(-FLT_MAX); + + for (int l = 0; l < QK; l += 4) { + float32x4_t v = vld1q_f32(src + i*QK + l); + minv = vminq_f32(minv, v); + maxv = vmaxq_f32(maxv, v); + } + + float32x2_t minv32 = vpmin_f32(vget_low_f32(minv), vget_high_f32(minv)); + float32x2_t maxv32 = vpmax_f32(vget_low_f32(maxv), vget_high_f32(maxv)); + + min = MIN(vget_lane_f32(minv32, 0), vget_lane_f32(minv32, 1)); + max = MAX(vget_lane_f32(maxv32, 0), vget_lane_f32(maxv32, 1)); } +#else + { + for (int l = 0; l < QK; l++) { + const float v = src[i*QK + l]; + if (v < min) min = v; + if (v > max) max = v; + } + } +#endif const float d = (max - min) / ((1 << QB) - 1); const float id = d ? 1.0/d : 0.0; @@ -263,18 +356,150 @@ void quantize_2_row(const float * restrict src, void * restrict dst, int k) { for (int s = 0; s < nq; ++s) { memset(pp, 0, sizeof(pp)); +#if 1 for (int l = 0; l < gq_t_bits; l++) { const float v = src[i*QK + s*gq_t_bits + l]; - const uint8_t q = (v - min)*id; + const uint8_t q = (v - min)*id + frand(); for (int b = 0; b < QB; b++) { pp[b] |= q & (1 << b) ? (1ULL << l) : 0; } } +#elif defined(__ARM_NEON) +#if 1 + { + uint32_t ppt[2*4*QB]; + + float32x4_t minv = vdupq_n_f32(min); + float32x4_t idv = vdupq_n_f32(id); + + assert(gq_t_bits % 16 == 0); + + uint32x4_t p0[QB] = { vdupq_n_u32(0) }; + uint32x4_t p1[QB] = { vdupq_n_u32(0) }; + + for (int l = 0; l < gq_t_bits; l += 16) { + float32x4_t v0 = vld1q_f32(src + i*QK + s*gq_t_bits + l + 0); + float32x4_t v1 = vld1q_f32(src + i*QK + s*gq_t_bits + l + 4); + float32x4_t v2 = vld1q_f32(src + i*QK + s*gq_t_bits + l + 8); + float32x4_t v3 = vld1q_f32(src + i*QK + s*gq_t_bits + l + 12); + + v0 = vsubq_f32(v0, minv); + v1 = vsubq_f32(v1, minv); + v2 = vsubq_f32(v2, minv); + v3 = vsubq_f32(v3, minv); + + v0 = vmulq_f32(v0, idv); + v1 = vmulq_f32(v1, idv); + v2 = vmulq_f32(v2, idv); + v3 = vmulq_f32(v3, idv); + +#if 1 + v0[0] += frand(); v0[1] += frand(); v0[2] += frand(); v0[3] += frand(); + v1[0] += frand(); v1[1] += frand(); v1[2] += frand(); v1[3] += frand(); + v2[0] += frand(); v2[1] += frand(); v2[2] += frand(); v2[3] += frand(); + v3[0] += frand(); v3[1] += frand(); v3[2] += frand(); v3[3] += frand(); +#endif + + uint32x4_t q0 = vcvtq_u32_f32(v0); + uint32x4_t q1 = vcvtq_u32_f32(v1); + uint32x4_t q2 = vcvtq_u32_f32(v2); + uint32x4_t q3 = vcvtq_u32_f32(v3); + + for (int b = 0; b < QB; ++b) { + uint32x4_t m = vdupq_n_u32(1 << b); + uint32x4_t r = vdupq_n_u32(-b); + + if (l < 32) { + p0[b] = vorrq_u32(p0[b], vshlq_u32(vshlq_u32(vandq_u32(q0, m), r), vld1q_s32(sh + l + 0))); + p0[b] = vorrq_u32(p0[b], vshlq_u32(vshlq_u32(vandq_u32(q1, m), r), vld1q_s32(sh + l + 4))); + p0[b] = vorrq_u32(p0[b], vshlq_u32(vshlq_u32(vandq_u32(q2, m), r), vld1q_s32(sh + l + 8))); + p0[b] = vorrq_u32(p0[b], vshlq_u32(vshlq_u32(vandq_u32(q3, m), r), vld1q_s32(sh + l + 12))); + } else { + p1[b] = vorrq_u32(p1[b], vshlq_u32(vshlq_u32(vandq_u32(q0, m), r), vld1q_s32(sh + l - 32))); + p1[b] = vorrq_u32(p1[b], vshlq_u32(vshlq_u32(vandq_u32(q1, m), r), vld1q_s32(sh + l - 28))); + p1[b] = vorrq_u32(p1[b], vshlq_u32(vshlq_u32(vandq_u32(q2, m), r), vld1q_s32(sh + l - 24))); + p1[b] = vorrq_u32(p1[b], vshlq_u32(vshlq_u32(vandq_u32(q3, m), r), vld1q_s32(sh + l - 20))); + } + } + } + +#if QB == 4 + vst1q_u32((uint32_t *) ppt + 0, p0[0]); + vst1q_u32((uint32_t *) ppt + 4, p1[0]); + vst1q_u32((uint32_t *) ppt + 8, p0[1]); + vst1q_u32((uint32_t *) ppt + 12, p1[1]); + vst1q_u32((uint32_t *) ppt + 16, p0[2]); + vst1q_u32((uint32_t *) ppt + 20, p1[2]); + vst1q_u32((uint32_t *) ppt + 24, p0[3]); + vst1q_u32((uint32_t *) ppt + 28, p1[3]); + + pp[0] = (ppt[0] | ppt[1] | ppt[2] | ppt[3] ) | ((uint64_t) (ppt[4] | ppt[5] | ppt[6] | ppt[7]) ) << 32; + pp[1] = (ppt[8] | ppt[9] | ppt[10] | ppt[11]) | ((uint64_t) (ppt[12] | ppt[13] | ppt[14] | ppt[15])) << 32; + pp[2] = (ppt[16] | ppt[17] | ppt[18] | ppt[19]) | ((uint64_t) (ppt[20] | ppt[21] | ppt[22] | ppt[23])) << 32; + pp[3] = (ppt[24] | ppt[25] | ppt[26] | ppt[27]) | ((uint64_t) (ppt[28] | ppt[29] | ppt[30] | ppt[31])) << 32; +#else + for (int b = 0; b < QB; ++b) { + vst1q_u32((uint32_t *) ppt + 0, p0[b]); + vst1q_u32((uint32_t *) ppt + 4, p1[b]); + + pp[b] = (ppt[0] | ppt[1] | ppt[2] | ppt[3]) | ((uint64_t) (ppt[4] | ppt[5] | ppt[6] | ppt[7])) << 32; + } +#endif + } +#else + // less optimal SIMD + { + float32x4_t minv = vdupq_n_f32(min); + float32x4_t idv = vdupq_n_f32(id); + + assert(gq_t_bits == 64); + uint8_t qq[gq_t_bits]; + + for (int l = 0; l < gq_t_bits; l += 16) { + float32x4_t v0 = vld1q_f32(src + i*QK + s*gq_t_bits + l + 0); + float32x4_t v1 = vld1q_f32(src + i*QK + s*gq_t_bits + l + 4); + float32x4_t v2 = vld1q_f32(src + i*QK + s*gq_t_bits + l + 8); + float32x4_t v3 = vld1q_f32(src + i*QK + s*gq_t_bits + l + 12); + + v0 = vsubq_f32(v0, minv); + v1 = vsubq_f32(v1, minv); + v2 = vsubq_f32(v2, minv); + v3 = vsubq_f32(v3, minv); + + v0 = vmulq_f32(v0, idv); + v1 = vmulq_f32(v1, idv); + v2 = vmulq_f32(v2, idv); + v3 = vmulq_f32(v3, idv); + +#if 0 + v0[0] += frand(); v0[1] += frand(); v0[2] += frand(); v0[3] += frand(); + v1[0] += frand(); v1[1] += frand(); v1[2] += frand(); v1[3] += frand(); + v2[0] += frand(); v2[1] += frand(); v2[2] += frand(); v2[3] += frand(); + v3[0] += frand(); v3[1] += frand(); v3[2] += frand(); v3[3] += frand(); +#endif + + uint32x4_t q0 = vcvtq_u32_f32(v0); + uint32x4_t q1 = vcvtq_u32_f32(v1); + uint32x4_t q2 = vcvtq_u32_f32(v2); + uint32x4_t q3 = vcvtq_u32_f32(v3); + + // store in qq as uint8_t + vst1_u8(qq + l + 0, vmovn_u16(vcombine_u16(vmovn_u32(q0), vmovn_u32(q1)))); + vst1_u8(qq + l + 8, vmovn_u16(vcombine_u16(vmovn_u32(q2), vmovn_u32(q3)))); + } - for (int b = 0; b < QB; b++) { - pb[i*nq*QB + s*QB + b] = pp[b]; + for (int l = 0; l < gq_t_bits; l++) { + for (int b = 0; b < QB; b++) { + const uint64_t ql = qq[l]; + /*pp[b] |= qq[l] & (1 << b) ? (1ULL << l) : 0;*/ + pp[b] |= ((ql & (1 << b)) >> b) << l; + } + } } +#endif +#endif + memcpy(pb + i*nq*QB + s*QB, pp, sizeof(pp)); } } } @@ -290,9 +515,6 @@ void quantize_2(const float * restrict src, char * restrict dst, int n, int k) { } void vec_dot_gq_2(const int n, float * restrict s, const void * restrict x, const void * restrict y) { - float sumf[(QB + 1)*(QB + 1)]; - memset(sumf, 0, sizeof(sumf)); - const int nb = quantize_2_blocks_per_row(n); const int nq = quantize_2_quants_per_block(); @@ -305,10 +527,9 @@ void vec_dot_gq_2(const int n, float * restrict s, const void * restrict x, cons const gq_quant_t * restrict pb0 = (const gq_quant_t *) (pd0 + nb); const gq_quant_t * restrict pb1 = (const gq_quant_t *) (pd1 + nb); -#if 1 - float s0[QB + 1]; - float s1[QB + 1]; + float sumf = 0.0; +#if 1 for (int i = 0; i < nb; i++) { const float m0 = GGML_GQ_TO_FP32(pm0[i]); const float d0 = GGML_GQ_TO_FP32(pd0[i]); @@ -316,6 +537,99 @@ void vec_dot_gq_2(const int n, float * restrict s, const void * restrict x, cons const float m1 = GGML_GQ_TO_FP32(pm1[i]); const float d1 = GGML_GQ_TO_FP32(pd1[i]); +#if QB == 4 + int isum01 = 0; + int isum10 = 0; + int isum11 = 0; + + for (int s = 0; s < nq; ++s) { + const gq_quant_t * restrict mm0 = pb0 + i*nq*QB + s*QB; + const gq_quant_t * restrict mm1 = pb1 + i*nq*QB + s*QB; + +#define bpcnt(x) __builtin_popcountll(x) + isum01 += (1 << 0)*(bpcnt(mm1[0])); + isum01 += (1 << 1)*(bpcnt(mm1[1])); + isum01 += (1 << 2)*(bpcnt(mm1[2])); + isum01 += (1 << 3)*(bpcnt(mm1[3])); + + isum10 += (1 << 0)*(bpcnt(mm0[0])); + isum10 += (1 << 1)*(bpcnt(mm0[1])); + isum10 += (1 << 2)*(bpcnt(mm0[2])); + isum10 += (1 << 3)*(bpcnt(mm0[3])); + + isum11 += (1 << 0)*(bpcnt(mm0[0] & mm1[0])); + isum11 += (1 << 1)*(bpcnt(mm0[0] & mm1[1]) + bpcnt(mm0[1] & mm1[0])); + isum11 += (1 << 2)*(bpcnt(mm0[0] & mm1[2]) + bpcnt(mm0[1] & mm1[1]) + bpcnt(mm0[2] & mm1[0])); + isum11 += (1 << 3)*(bpcnt(mm0[0] & mm1[3]) + bpcnt(mm0[1] & mm1[2]) + bpcnt(mm0[2] & mm1[1]) + bpcnt(mm0[3] & mm1[0])); + isum11 += (1 << 4)*(bpcnt(mm0[1] & mm1[3]) + bpcnt(mm0[2] & mm1[2]) + bpcnt(mm0[3] & mm1[1])); + isum11 += (1 << 5)*(bpcnt(mm0[2] & mm1[3]) + bpcnt(mm0[3] & mm1[2])); + isum11 += (1 << 6)*(bpcnt(mm0[3] & mm1[3])); +#undef bpcnt + } + + sumf += nq*gq_t_bits*(m0*m1) + isum01*(m0*d1) + isum10*(m1*d0) + isum11*(d0*d1); +#elif QB == 3 + int isum01 = 0; + int isum10 = 0; + int isum11 = 0; + + for (int s = 0; s < nq; ++s) { + const gq_quant_t * restrict mm0 = pb0 + i*nq*QB + s*QB; + const gq_quant_t * restrict mm1 = pb1 + i*nq*QB + s*QB; + +#if gq_t_bits == 32 +#define bpcnt(x) __builtin_popcount(x) +#else +#define bpcnt(x) __builtin_popcountll(x) +#endif + isum01 += (1 << 0)*(bpcnt(mm1[0])); + isum01 += (1 << 1)*(bpcnt(mm1[1])); + isum01 += (1 << 2)*(bpcnt(mm1[2])); + + isum10 += (1 << 0)*(bpcnt(mm0[0])); + isum10 += (1 << 1)*(bpcnt(mm0[1])); + isum10 += (1 << 2)*(bpcnt(mm0[2])); + + isum11 += (1 << 0)*(bpcnt(mm0[0] & mm1[0])); + isum11 += (1 << 1)*(bpcnt(mm0[0] & mm1[1]) + bpcnt(mm0[1] & mm1[0])); + isum11 += (1 << 2)*(bpcnt(mm0[0] & mm1[2]) + bpcnt(mm0[1] & mm1[1]) + bpcnt(mm0[2] & mm1[0])); + isum11 += (1 << 3)*(bpcnt(mm0[1] & mm1[2]) + bpcnt(mm0[2] & mm1[1])); + isum11 += (1 << 4)*(bpcnt(mm0[2] & mm1[2])); +#undef bpcnt + } + + sumf += nq*gq_t_bits*(m0*m1) + isum01*(m0*d1) + isum10*(m1*d0) + isum11*(d0*d1); +#elif QB == 2 + int isum01 = 0; + int isum10 = 0; + int isum11 = 0; + + for (int s = 0; s < nq; ++s) { + const gq_quant_t * restrict mm0 = pb0 + i*nq*QB + s*QB; + const gq_quant_t * restrict mm1 = pb1 + i*nq*QB + s*QB; + +#if gq_t_bits == 32 +#define bpcnt(x) __builtin_popcount(x) +#else +#define bpcnt(x) __builtin_popcountll(x) +#endif + isum01 += (1 << 0)*(bpcnt(mm1[0])); + isum01 += (1 << 1)*(bpcnt(mm1[1])); + + isum10 += (1 << 0)*(bpcnt(mm0[0])); + isum10 += (1 << 1)*(bpcnt(mm0[1])); + + isum11 += (1 << 0)*(bpcnt(mm0[0] & mm1[0])); + isum11 += (1 << 1)*(bpcnt(mm0[0] & mm1[1]) + bpcnt(mm0[1] & mm1[0])); + isum11 += (1 << 2)*(bpcnt(mm0[1] & mm1[1])); +#undef bpcnt + } + + sumf += nq*gq_t_bits*(m0*m1) + isum01*(m0*d1) + isum10*(m1*d0) + isum11*(d0*d1); +#else + float s0[QB + 1]; + float s1[QB + 1]; + s0[0] = m0; s1[0] = m1; @@ -329,36 +643,17 @@ void vec_dot_gq_2(const int n, float * restrict s, const void * restrict x, cons const gq_quant_t mm0 = q0 ? pb0[i*nq*QB + s*QB + q0 - 1] : -1ULL; for (int q1 = 0; q1 < QB + 1; q1++) { const gq_quant_t mm1 = q1 ? pb1[i*nq*QB + s*QB + q1 - 1] : -1ULL; - sumf[q0*(QB + 1) + q1] += s0[q0]*s1[q1]*__builtin_popcountll(mm0 & mm1); + sumf += s0[q0]*s1[q1]*__builtin_popcountll(mm0 & mm1); } } } +#endif } #else - // SIMD-ify with the assumptions: - // - nb is a multiple of 4 - // - gq_scale_t is float - // - gq_quant_t is uint64_t - // - QB == 7 - assert(nb % 4 == 0); - -#ifdef __ARM_NEON -#else - // TODO -#endif - +#error "not implemented" #endif - for (int q0 = 0; q0 < QB + 1; q0++) { - for (int q1 = 1; q1 < QB + 1; q1++) { - sumf[q0*(QB + 1)] += sumf[q0*(QB + 1) + q1]; - } - } - - *s = sumf[0]; - for (int q0 = 1; q0 < QB + 1; q0++) { - *s += sumf[q0*(QB + 1)]; - } + *s = sumf; } // use vec_dot_gq_2 to compute the dot product of two rows @@ -384,83 +679,1904 @@ void mul_mat_gq_2( } } -int main(int argc, const char ** argv) { - assert(sizeof(gq_quant_t)*8 == gq_t_bits); +// +// method 3 +// (does not work) +// - float * src0 = (float *)malloc(sizeof(float)*M*K); - float * src1 = (float *)malloc(sizeof(float)*N*K); - float * dst = (float *)malloc(sizeof(float)*M*N); +static inline int quantize_3_blocks_per_row(int k) { + return k/QK; +} - for (int i = 0; i < M*K; i++) { - src0[i] = rand() / (float)RAND_MAX; - } +static inline int quantize_3_quants_per_block() { + return QK/gq_t_bits; +} - for (int i = 0; i < N*K; i++) { - src1[i] = rand() / (float)RAND_MAX; - } +static inline int quantize_3_row_size(int k) { + const int nb = quantize_3_blocks_per_row(k); + const int nq = quantize_3_quants_per_block(); + + return nb*(sizeof(gq_scale_t) + nq*QB*sizeof(gq_quant_t)); +} - void * src0_gq = calloc(1, quantize_2_row_size(K)*M); - void * src1_gq = calloc(1, quantize_2_row_size(K)*N); +void quantize_3_row(const float * restrict src, void * restrict dst, int k) { + assert(k % QK == 0); - const size_t sizef16 = sizeof(ggml_fp16_t)*M*K + sizeof(ggml_fp16_t)*N*K; - const size_t sizegq = quantize_2_row_size(K)*M + quantize_2_row_size(K)*N; + const int nb = quantize_3_blocks_per_row(k); + const int nq = quantize_3_quants_per_block(); - printf("compression: %f\n", (float)sizegq/sizef16); + gq_scale_t * restrict pd = (gq_scale_t *) (dst); + gq_quant_t * restrict pb = (gq_quant_t *) (pd + nb); - int method = 0; - if (argc > 1) { - method = atoi(argv[1]); - } + gq_quant_t pp[QB]; - // convert fp32 -> gq - { - const uint64_t t_start = get_time_us(); + static const int32_t sh[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + }; - if (method == 1) { - quantize_1(src0, src0_gq, M, K); - quantize_1(src1, src1_gq, N, K); + for (int i = 0; i < nb; i++) { + float amax = 0.0f; // abs max + +#ifdef __ARM_NEON + { + // min / max + //float32x4_t minv = vdupq_n_f32(FLT_MAX); + //float32x4_t maxv = vdupq_n_f32(-FLT_MAX); + + //for (int l = 0; l < QK; l += 4) { + // float32x4_t v = vld1q_f32(src + i*QK + l); + // minv = vminq_f32(minv, v); + // maxv = vmaxq_f32(maxv, v); + //} + + //float32x2_t minv32 = vpmin_f32(vget_low_f32(minv), vget_high_f32(minv)); + //float32x2_t maxv32 = vpmax_f32(vget_low_f32(maxv), vget_high_f32(maxv)); + + //min = MIN(vget_lane_f32(minv32, 0), vget_lane_f32(minv32, 1)); + //max = MAX(vget_lane_f32(maxv32, 0), vget_lane_f32(maxv32, 1)); + + // abs max + float32x4_t amaxv = vdupq_n_f32(0.0f); + + for (int l = 0; l < QK; l += 4) { + float32x4_t v = vld1q_f32(src + i*QK + l); + amaxv = vmaxq_f32(amaxv, vabsq_f32(v)); + } + + float32x2_t amaxv32 = vpmax_f32(vget_low_f32(amaxv), vget_high_f32(amaxv)); + + amax = MAX(vget_lane_f32(amaxv32, 0), vget_lane_f32(amaxv32, 1)); + } +#else + { + for (int l = 0; l < QK; l++) { + const float v = src[i*QK + l]; + amax = MAX(amax, fabsf(v)); + } } +#endif - if (method == 2) { - quantize_2(src0, src0_gq, M, K); - quantize_2(src1, src1_gq, N, K); + const float d = amax / ((1 << (QB - 1)) - 1); + const float id = d ? 1.0/d : 0.0; + + pd[i] = GGML_FP32_TO_GQ(d); + + for (int s = 0; s < nq; ++s) { + memset(pp, 0, sizeof(pp)); + +#if 0 + for (int l = 0; l < gq_t_bits; l++) { + const float v = src[i*QK + s*gq_t_bits + l]; + const uint8_t q = v*id + frand(); + + for (int b = 0; b < QB; b++) { + pp[b] |= q & (1 << b) ? (1ULL << l) : 0; + } + } +#elif defined(__ARM_NEON) + { + uint32_t ppt[2*4*QB]; + + float32x4_t idv = vdupq_n_f32(id); + + assert(gq_t_bits == 64); + + uint32x4_t p0[QB] = { vdupq_n_u32(0) }; + uint32x4_t p1[QB] = { vdupq_n_u32(0) }; + + for (int l = 0; l < gq_t_bits; l += 16) { + float32x4_t v0 = vld1q_f32(src + i*QK + s*gq_t_bits + l + 0); + float32x4_t v1 = vld1q_f32(src + i*QK + s*gq_t_bits + l + 4); + float32x4_t v2 = vld1q_f32(src + i*QK + s*gq_t_bits + l + 8); + float32x4_t v3 = vld1q_f32(src + i*QK + s*gq_t_bits + l + 12); + + v0 = vmulq_f32(v0, idv); + v1 = vmulq_f32(v1, idv); + v2 = vmulq_f32(v2, idv); + v3 = vmulq_f32(v3, idv); + +#if 1 + v0[0] += frand(); v0[1] += frand(); v0[2] += frand(); v0[3] += frand(); + v1[0] += frand(); v1[1] += frand(); v1[2] += frand(); v1[3] += frand(); + v2[0] += frand(); v2[1] += frand(); v2[2] += frand(); v2[3] += frand(); + v3[0] += frand(); v3[1] += frand(); v3[2] += frand(); v3[3] += frand(); +#endif + + uint32x4_t q0 = vcvtq_u32_f32(v0); + uint32x4_t q1 = vcvtq_u32_f32(v1); + uint32x4_t q2 = vcvtq_u32_f32(v2); + uint32x4_t q3 = vcvtq_u32_f32(v3); + + for (int b = 0; b < QB; ++b) { + uint32x4_t m = vdupq_n_u32(1 << b); + uint32x4_t r = vdupq_n_u32(-b); + + if (l < 32) { + p0[b] = vorrq_u32(p0[b], vshlq_u32(vshlq_u32(vandq_u32(q0, m), r), vld1q_s32(sh + l + 0))); + p0[b] = vorrq_u32(p0[b], vshlq_u32(vshlq_u32(vandq_u32(q1, m), r), vld1q_s32(sh + l + 4))); + p0[b] = vorrq_u32(p0[b], vshlq_u32(vshlq_u32(vandq_u32(q2, m), r), vld1q_s32(sh + l + 8))); + p0[b] = vorrq_u32(p0[b], vshlq_u32(vshlq_u32(vandq_u32(q3, m), r), vld1q_s32(sh + l + 12))); + } else { + p1[b] = vorrq_u32(p1[b], vshlq_u32(vshlq_u32(vandq_u32(q0, m), r), vld1q_s32(sh + l - 32))); + p1[b] = vorrq_u32(p1[b], vshlq_u32(vshlq_u32(vandq_u32(q1, m), r), vld1q_s32(sh + l - 28))); + p1[b] = vorrq_u32(p1[b], vshlq_u32(vshlq_u32(vandq_u32(q2, m), r), vld1q_s32(sh + l - 24))); + p1[b] = vorrq_u32(p1[b], vshlq_u32(vshlq_u32(vandq_u32(q3, m), r), vld1q_s32(sh + l - 20))); + } + } + } + +#if QB == 4 + vst1q_u32((uint32_t *) ppt + 0, p0[0]); + vst1q_u32((uint32_t *) ppt + 4, p1[0]); + vst1q_u32((uint32_t *) ppt + 8, p0[1]); + vst1q_u32((uint32_t *) ppt + 12, p1[1]); + vst1q_u32((uint32_t *) ppt + 16, p0[2]); + vst1q_u32((uint32_t *) ppt + 20, p1[2]); + vst1q_u32((uint32_t *) ppt + 24, p0[3]); + vst1q_u32((uint32_t *) ppt + 28, p1[3]); + + pp[0] = (ppt[0] | ppt[1] | ppt[2] | ppt[3] ) | ((uint64_t) (ppt[4] | ppt[5] | ppt[6] | ppt[7]) ) << 32; + pp[1] = (ppt[8] | ppt[9] | ppt[10] | ppt[11]) | ((uint64_t) (ppt[12] | ppt[13] | ppt[14] | ppt[15])) << 32; + pp[2] = (ppt[16] | ppt[17] | ppt[18] | ppt[19]) | ((uint64_t) (ppt[20] | ppt[21] | ppt[22] | ppt[23])) << 32; + pp[3] = (ppt[24] | ppt[25] | ppt[26] | ppt[27]) | ((uint64_t) (ppt[28] | ppt[29] | ppt[30] | ppt[31])) << 32; +#else + for (int q = 0; q < QB; ++q) { + vst1q_u32((uint32_t *) ppt + 0, p0[q]); + vst1q_u32((uint32_t *) ppt + 4, p1[q]); + + pp[q] = (ppt[0] | ppt[1] | ppt[2] | ppt[3]) | ((uint64_t) (ppt[4] | ppt[5] | ppt[6] | ppt[7])) << 32; + } +#endif + } +#endif + memcpy(pb + i*nq*QB + s*QB, pp, sizeof(pp)); } + } +} - const uint64_t t_end = get_time_us(); - printf("convert time: %f ms / method = %d\n", (t_end - t_start) / 1000.0, method); +// reimplementation of quantize_3 using quantize_3_row +void quantize_3(const float * restrict src, char * restrict dst, int n, int k) { + assert(k % QK == 0); + + for (int j = 0; j < n; j++) { + quantize_3_row(src + j*k, dst, k); + dst = (char *) dst + quantize_3_row_size(k); } +} - const int nIter = 1; +void vec_dot_gq_3(const int n, float * restrict s, const void * restrict x, const void * restrict y) { + float sumf = 0.0f; - const clock_t start = clock(); - const uint64_t start_us = get_time_us(); + const int nb = quantize_3_blocks_per_row(n); + const int nq = quantize_3_quants_per_block(); - double iM = 1.0/M; - double sum = 0.0f; - for (int i = 0; i < nIter; i++) { - if (method == 0) { - mul_mat_f32_naive(src0, src1, dst, M, N, K); - } + const gq_scale_t * restrict pd0 = (const gq_scale_t *) x; + const gq_scale_t * restrict pd1 = (const gq_scale_t *) y; - if (method == 1) { - mul_mat_gq_1(src0_gq, src1_gq, dst, M, N, K); - } + const gq_quant_t * restrict pb0 = (const gq_quant_t *) (pd0 + nb); + const gq_quant_t * restrict pb1 = (const gq_quant_t *) (pd1 + nb); - if (method == 2) { - mul_mat_gq_2(src0_gq, src1_gq, dst, M, N, K); +#if 1 + for (int i = 0; i < nb; i++) { + int isum = 0; + +#if QB == 4 + for (int s = 0; s < nq; ++s) { + const gq_quant_t * restrict m0 = pb0 + i*nq*QB + s*QB; + const gq_quant_t * restrict m1 = pb1 + i*nq*QB + s*QB; + + isum += (1 << 0)*(__builtin_popcountll(m0[0] & m1[0])); + isum += (1 << 1)*(__builtin_popcountll(m0[0] & m1[1]) + __builtin_popcountll(m0[1] & m1[0])); + isum += (1 << 2)*(__builtin_popcountll(m0[0] & m1[2]) + __builtin_popcountll(m0[1] & m1[1]) + __builtin_popcountll(m0[2] & m1[0])); + isum += (1 << 3)*(__builtin_popcountll(m0[0] & m1[3]) + __builtin_popcountll(m0[1] & m1[2]) + __builtin_popcountll(m0[2] & m1[1]) + __builtin_popcountll(m0[3] & m1[0])); + isum += (1 << 4)*(__builtin_popcountll(m0[1] & m1[3]) + __builtin_popcountll(m0[2] & m1[2]) + __builtin_popcountll(m0[3] & m1[1])); + isum += (1 << 5)*(__builtin_popcountll(m0[2] & m1[3]) + __builtin_popcountll(m0[3] & m1[2])); + isum += (1 << 6)*(__builtin_popcountll(m0[3] & m1[3])); } - } +#else + for (int s = 0; s < nq; ++s) { + for (int q0 = 0; q0 < QB; q0++) { + const gq_quant_t mm0 = pb0[i*nq*QB + s*QB + q0]; + for (int q1 = 0; q1 < QB; q1++) { + const gq_quant_t mm1 = pb1[i*nq*QB + s*QB + q1]; + isum += (1 << (q0 + q1))*(__builtin_popcountll(mm0 & mm1)); + } + } + } +#endif - for (int i = 0; i < N; i++) { - sum += dst[i]*iM; + const float d0 = GGML_GQ_TO_FP32(pd0[i]); + const float d1 = GGML_GQ_TO_FP32(pd1[i]); + + sumf += d0*d1*isum; } +#else +#ifdef __ARM_NEON + // gq_quant_t == uint64_t + for (int i = 0; i < nb; i += 4) { + int isum[4] = {0, 0, 0, 0}; + + for (int k = 0; k < 4; ++k) { + for (int s = 0; s < nq; ++s) { + const gq_quant_t * restrict m0 = pb0 + (i+k)*nq*QB + s*QB; + const gq_quant_t * restrict m1 = pb1 + (i+k)*nq*QB + s*QB; + +#if QB == 4 +#define bpcnt(x) __builtin_popcountll(x) + //isum[k] += (1ULL << 0)*(bpcnt(m0[0] & m1[0])) + + // (1ULL << 1)*(bpcnt(m0[0] & m1[1]) + bpcnt(m0[1] & m1[0])) + + // (1ULL << 2)*(bpcnt(m0[0] & m1[2]) + bpcnt(m0[1] & m1[1]) + bpcnt(m0[2] & m1[0])) + + // (1ULL << 3)*(bpcnt(m0[0] & m1[3]) + bpcnt(m0[1] & m1[2]) + bpcnt(m0[2] & m1[1]) + bpcnt(m0[3] & m1[0])) + + // (1ULL << 4)*(bpcnt(m0[1] & m1[3]) + bpcnt(m0[2] & m1[2]) + bpcnt(m0[3] & m1[1])) + + // (1ULL << 5)*(bpcnt(m0[2] & m1[3]) + bpcnt(m0[3] & m1[2])) + + // (1ULL << 6)*(bpcnt(m0[3] & m1[3])); +#undef bpcnt + + const uint8x8_t m00 = vld1_u8((const uint8_t *) (m0 + 0)); + const uint8x8_t m01 = vld1_u8((const uint8_t *) (m0 + 1)); + const uint8x8_t m02 = vld1_u8((const uint8_t *) (m0 + 2)); + const uint8x8_t m03 = vld1_u8((const uint8_t *) (m0 + 3)); + + const uint8x8_t m10 = vld1_u8((const uint8_t *) (m1 + 0)); + const uint8x8_t m11 = vld1_u8((const uint8_t *) (m1 + 1)); + const uint8x8_t m12 = vld1_u8((const uint8_t *) (m1 + 2)); + const uint8x8_t m13 = vld1_u8((const uint8_t *) (m1 + 3)); + + const uint8x8_t m00m10 = vand_u8(m00, m10); + + const uint8x8_t m00m11 = vand_u8(m00, m11); + const uint8x8_t m01m10 = vand_u8(m01, m10); + + const uint8x8_t m00m12 = vand_u8(m00, m12); + const uint8x8_t m01m11 = vand_u8(m01, m11); + const uint8x8_t m02m10 = vand_u8(m02, m10); + + const uint8x8_t m00m13 = vand_u8(m00, m13); + const uint8x8_t m01m12 = vand_u8(m01, m12); + const uint8x8_t m02m11 = vand_u8(m02, m11); + const uint8x8_t m03m10 = vand_u8(m03, m10); + + const uint8x8_t m01m13 = vand_u8(m01, m13); + const uint8x8_t m02m12 = vand_u8(m02, m12); + const uint8x8_t m03m11 = vand_u8(m03, m11); + + const uint8x8_t m02m13 = vand_u8(m02, m13); + const uint8x8_t m03m12 = vand_u8(m03, m12); + + const uint8x8_t m03m13 = vand_u8(m03, m13); + +#define bpcnt(x) vaddv_u8(vcnt_u8(x)) + isum[k] += (1ULL << 0)*(bpcnt(m00m10)) + + (1ULL << 1)*(bpcnt(m00m11) + bpcnt(m01m10)) + + (1ULL << 2)*(bpcnt(m00m12) + bpcnt(m01m11) + bpcnt(m02m10)) + + (1ULL << 3)*(bpcnt(m00m13) + bpcnt(m01m12) + bpcnt(m02m11) + bpcnt(m03m10)) + + (1ULL << 4)*(bpcnt(m01m13) + bpcnt(m02m12) + bpcnt(m03m11)) + + (1ULL << 5)*(bpcnt(m02m13) + bpcnt(m03m12)) + + (1ULL << 6)*(bpcnt(m03m13)); +#undef bpcnt +#else + for (int q0 = 0; q0 < QB; q0++) { + const gq_quant_t mm0 = m0[q0]; + for (int q1 = 0; q1 < QB; q1++) { + const gq_quant_t mm1 = m1[q1]; + isum[k] += (1ULL << (q0 + q1))*(__builtin_popcountll(mm0 & mm1)); + } + } +#endif + } + } - { - const clock_t end = clock(); - const uint64_t end_us = get_time_us(); - printf("%s: elapsed ticks: %ld\n", __func__, end - start); - printf("%s: elapsed us: %d / %f ms\n", __func__, (int)(end_us - start_us), (end_us - start_us) / 1000.0 / nIter); + int32x4_t isumv = vld1q_s32(isum); + + float32x4_t d0v = vld1q_f32(pd0 + i); + float32x4_t d1v = vld1q_f32(pd1 + i); + + float32x4_t sumfv = vmulq_f32(d0v, d1v); + + sumfv = vmulq_f32(sumfv, vcvtq_f32_s32(isumv)); + sumf += vaddvq_f32(sumfv); } +#else +#error "not implemented" +#endif + +#endif + *s = sumf; +} + +// use vec_dot_gq_3 to compute the dot product of two rows +void mul_mat_gq_3( + const void * src0, + const void * src1, // transposed + float * dst, + int m, int n, int k) { + assert(k % QK == 0); + + const int nb = quantize_3_blocks_per_row(k); + const int nq = quantize_3_quants_per_block(); + + for (int ir0 = 0; ir0 < m; ir0++) { + for (int ir1 = 0; ir1 < n; ir1++) { + vec_dot_gq_3(k, dst + ir1, src0, src1); + src1 = (const char *) src1 + quantize_3_row_size(k); + } + src0 = (const char *) src0 + quantize_3_row_size(k); + src1 = (const char *) src1 - n*quantize_3_row_size(k); + + dst = (float *) dst + n; + } +} + +// +// method 4 +// 4-bit quantization +// + +static inline int quantize_4_blocks_per_row(int k) { + return k/QK; +} + +static inline int quantize_4_row_size(int k) { + const int nb = quantize_4_blocks_per_row(k); + + return nb*(2*sizeof(gq_scale_t) + QK/2); +} + +void quantize_4_row(const float * restrict src, void * restrict dst, int k) { + assert(k % QK == 0); + assert(QB == 4); + + const int nb = quantize_4_blocks_per_row(k); + + gq_scale_t * restrict pm = (gq_scale_t *) (dst); + gq_scale_t * restrict pd = (gq_scale_t *) (pm + nb); + uint8_t * restrict pb = (uint8_t *) (pd + nb); + + uint8_t pp[QK/2]; + + for (int i = 0; i < nb; i++) { + memset(pp, 0, sizeof(pp)); + + float min = FLT_MAX; + float max = -FLT_MAX; + +#if defined(__AVX2__) + { + assert(QK == 64); + const int QK8 = QK/8; + + __m256 srcv[QK8]; + __m256 minv[QK8]; + __m256 maxv[QK8]; + + for (int l = 0; l < QK8; l++) { + srcv[l] = _mm256_loadu_ps(src + i*QK + 8*l); + } + + for (int l = 0; l < QK8/2; l++) { + minv[2*l] = _mm256_min_ps(srcv[2*l], srcv[2*l+1]); + maxv[2*l] = _mm256_max_ps(srcv[2*l], srcv[2*l+1]); + } + + for (int l = 0; l < QK8/4; l++) { + minv[4*l] = _mm256_min_ps(minv[4*l], minv[4*l+2]); + maxv[4*l] = _mm256_max_ps(maxv[4*l], maxv[4*l+2]); + } + + for (int l = 0; l < QK8/8; l++) { + minv[8*l] = _mm256_min_ps(minv[8*l], minv[8*l+4]); + maxv[8*l] = _mm256_max_ps(maxv[8*l], maxv[8*l+4]); + } + + //min = MIN(minv[0][0], MIN(minv[0][1], MIN(minv[0][2], MIN(minv[0][3], MIN(minv[0][4], MIN(minv[0][5], MIN(minv[0][6], minv[0][7]))))))); + //max = MAX(maxv[0][0], MAX(maxv[0][1], MAX(maxv[0][2], MAX(maxv[0][3], MAX(maxv[0][4], MAX(maxv[0][5], MAX(maxv[0][6], maxv[0][7]))))))); + + const __m256 minv0_0 = _mm256_permute2f128_ps(minv[0], minv[0], 3); + const __m256 minv0_1 = _mm256_min_ps(minv[0], minv0_0); + const __m256 minv0_2 = _mm256_permute_ps(minv0_1, 0x4e); + const __m256 minv0_3 = _mm256_min_ps(minv0_1, minv0_2); + const __m256 minv0_4 = _mm256_permute_ps(minv0_3, 0xb1); + const __m256 minv0_5 = _mm256_min_ps(minv0_3, minv0_4); + + const __m256 maxv0_0 = _mm256_permute2f128_ps(maxv[0], maxv[0], 3); + const __m256 maxv0_1 = _mm256_max_ps(maxv[0], maxv0_0); + const __m256 maxv0_2 = _mm256_permute_ps(maxv0_1, 0x4e); + const __m256 maxv0_3 = _mm256_max_ps(maxv0_1, maxv0_2); + const __m256 maxv0_4 = _mm256_permute_ps(maxv0_3, 0xb1); + const __m256 maxv0_5 = _mm256_max_ps(maxv0_3, maxv0_4); + + min = _mm256_cvtss_f32(minv0_5); + max = _mm256_cvtss_f32(maxv0_5); + + const float d = (max - min) / ((1 << QB) - 2); + const float id = d ? 1.0/d : 0.0; + + pm[i] = GGML_FP32_TO_GQ(min); + pd[i] = GGML_FP32_TO_GQ(d); + + const __m256 idv = _mm256_set1_ps(id); + + for (int l = 0; l < QK/8; l++) { + __m256 v = _mm256_mul_ps(_mm256_sub_ps(srcv[l], _mm256_set1_ps(min)), idv); +#if 0 + v[0] += frand(); v[1] += frand(); v[2] += frand(); v[3] += frand(); + v[4] += frand(); v[5] += frand(); v[6] += frand(); v[7] += frand(); +#endif + + // convert to uint8 + __m256i vi = _mm256_cvtps_epi32(v); + + uint32_t vi_0 = _mm256_extract_epi32(vi, 0); + uint32_t vi_1 = _mm256_extract_epi32(vi, 1); + uint32_t vi_2 = _mm256_extract_epi32(vi, 2); + uint32_t vi_3 = _mm256_extract_epi32(vi, 3); + + uint32_t vi_4 = _mm256_extract_epi32(vi, 4); + uint32_t vi_5 = _mm256_extract_epi32(vi, 5); + uint32_t vi_6 = _mm256_extract_epi32(vi, 6); + uint32_t vi_7 = _mm256_extract_epi32(vi, 7); + + // convert to 4-bit, 2 consecutive packed into 1 byte + pp[4*l + 0] = vi_0 | (vi_1 << 4); + pp[4*l + 1] = vi_2 | (vi_3 << 4); + pp[4*l + 2] = vi_4 | (vi_5 << 4); + pp[4*l + 3] = vi_6 | (vi_7 << 4); + + //printf("vi: %7d %7d %7d %7d %7d %7d %7d %7d\n", vi_0, vi_1, vi_2, vi_3, vi_4, vi_5, vi_6, vi_7); + //printf("v : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\n", v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]); + } + + memcpy(pb + i*QK/2, pp, sizeof(pp)); + } +#elif defined(__ARM_NEON) && 0 + { + // TODO + } +#else + { + for (int l = 0; l < QK; l++) { + const float v = src[i*QK + l]; + if (v < min) min = v; + if (v > max) max = v; + } + + const float d = (max - min) / ((1 << QB) - 1); + const float id = d ? 1.0/d : 0.0; + + pm[i] = GGML_FP32_TO_GQ(min); + pd[i] = GGML_FP32_TO_GQ(d); + + for (int l = 0; l < QK; l++) { + const float v = (src[i*QK + l] - min) * id; + const uint8_t vi = (uint8_t) (v + frand()); + pp[l/2] |= (vi & 0xf) << (4*(l & 1)); + } + + memcpy(pb + i*QK/2, pp, sizeof(pp)); + } +#endif + //printf("min %f max %f\n", min, max); + } +} + +// reimplementation of quantize_4 using quantize_4_row +void quantize_4(const float * restrict src, char * restrict dst, int n, int k) { + assert(k % QK == 0); + + for (int j = 0; j < n; j++) { + quantize_4_row(src + j*k, dst, k); + dst = (char *) dst + quantize_4_row_size(k); + } +} + +void vec_dot_gq_4(const int n, float * restrict s, const void * restrict x, const void * restrict y) { + const int nb = quantize_4_blocks_per_row(n); + + const gq_scale_t * restrict pm0 = (const gq_scale_t *) x; + const gq_scale_t * restrict pm1 = (const gq_scale_t *) y; + + const gq_scale_t * restrict pd0 = pm0 + nb; + const gq_scale_t * restrict pd1 = pm1 + nb; + + const uint8_t * restrict pb0 = (const uint8_t *) (pd0 + nb); + const uint8_t * restrict pb1 = (const uint8_t *) (pd1 + nb); + + float sumf = 0.0; + +#if 0 + // scalar + for (int i = 0; i < nb; i++) { + const float m0 = GGML_GQ_TO_FP32(pm0[i]); + const float d0 = GGML_GQ_TO_FP32(pd0[i]); + + const float m1 = GGML_GQ_TO_FP32(pm1[i]); + const float d1 = GGML_GQ_TO_FP32(pd1[i]); + + const uint8_t * restrict p0 = pb0 + i*QK/2; + const uint8_t * restrict p1 = pb1 + i*QK/2; + + for (int j = 0; j < QK/2; j++) { + const uint8_t v0 = p0[j]; + const uint8_t v1 = p1[j]; + + const float f0 = d0*(v0 & 0xf) + m0; + const float f1 = d0*(v0 >> 4) + m0; + + const float f2 = d1*(v1 & 0xf) + m1; + const float f3 = d1*(v1 >> 4) + m1; + + sumf += f0*f2 + f1*f3; + } + } +#else +#if defined(__AVX2__) +#if QK == 64 && 0 + __m256 sumv0 = _mm256_setzero_ps(); + __m256 sumv1 = _mm256_setzero_ps(); + + for (int i = 0; i < nb; i++) { + const float m0 = GGML_GQ_TO_FP32(pm0[i]); + const float d0 = GGML_GQ_TO_FP32(pd0[i]); + + const float m1 = GGML_GQ_TO_FP32(pm1[i]); + const float d1 = GGML_GQ_TO_FP32(pd1[i]); + + const uint8_t * restrict p0 = pb0 + i*QK/2; + const uint8_t * restrict p1 = pb1 + i*QK/2; + + const __m256 m0v = _mm256_set1_ps(m0); + const __m256 d0v = _mm256_set1_ps(d0); + + const __m256 m1v = _mm256_set1_ps(m1); + const __m256 d1v = _mm256_set1_ps(d1); + + const __m256i m4b = _mm256_set1_epi8(0xf); + + __m256i v0 = _mm256_loadu_si256((__m256i *) p0); + + //_mm_prefetch((const char *) (p0 + 32), _MM_HINT_T0); + //_mm_prefetch((const char *) (p1 + 32), _MM_HINT_T0); + //_mm_prefetch((const char *) (pm0 + i + 1), _MM_HINT_T0); + //_mm_prefetch((const char *) (pm1 + i + 1), _MM_HINT_T0); + //_mm_prefetch((const char *) (pd0 + i + 1), _MM_HINT_T0); + //_mm_prefetch((const char *) (pd1 + i + 1), _MM_HINT_T0); + + __m256i v00 = _mm256_and_si256(v0, _mm256_set1_epi32(0x000000FF)); + __m256i v01 = _mm256_srli_epi32(_mm256_and_si256(v0, _mm256_set1_epi32(0x0000FFFF)), 8); + __m256i v02 = _mm256_srli_epi32(_mm256_and_si256(v0, _mm256_set1_epi32(0x00FFFFFF)), 16); + __m256i v03 = _mm256_srli_epi32(v0, 24); + + ////////////////////// + + //{ + // uint32_t vi_0 = _mm256_extract_epi32(v00, 0); + // uint32_t vi_1 = _mm256_extract_epi32(v00, 1); + // uint32_t vi_2 = _mm256_extract_epi32(v00, 2); + // uint32_t vi_3 = _mm256_extract_epi32(v00, 3); + // uint32_t vi_4 = _mm256_extract_epi32(v00, 4); + // uint32_t vi_5 = _mm256_extract_epi32(v00, 5); + // uint32_t vi_6 = _mm256_extract_epi32(v00, 6); + // uint32_t vi_7 = _mm256_extract_epi32(v00, 7); + // printf("v0: %7d %7d %7d %7d %7d %7d %7d %7d\n", vi_0, vi_1, vi_2, vi_3, vi_4, vi_5, vi_6, vi_7); + // printf("p0: %7d %7d %7d %7d %7d %7d %7d %7d\n", p0[0], p0[4], p0[8], p0[12], p0[16], p0[20], p0[24], p0[28]); + // printf("p1: %7d %7d %7d %7d %7d %7d %7d %7d\n", p0[1], p0[5], p0[9], p0[13], p0[17], p0[21], p0[25], p0[29]); + // printf("p2: %7d %7d %7d %7d %7d %7d %7d %7d\n", p0[2], p0[6], p0[10], p0[14], p0[18], p0[22], p0[26], p0[30]); + // printf("p3: %7d %7d %7d %7d %7d %7d %7d %7d\n", p0[3], p0[7], p0[11], p0[15], p0[19], p0[23], p0[27], p0[31]); + //} + + // compute 32 x 4-bit values (low and high) + __m256i v00l = _mm256_and_si256(v00, m4b); + __m256i v01l = _mm256_and_si256(v01, m4b); + __m256i v02l = _mm256_and_si256(v02, m4b); + __m256i v03l = _mm256_and_si256(v03, m4b); + + __m256i v00h = _mm256_srli_epi32(v00, 4); + __m256i v01h = _mm256_srli_epi32(v01, 4); + __m256i v02h = _mm256_srli_epi32(v02, 4); + __m256i v03h = _mm256_srli_epi32(v03, 4); + + //{ + // uint32_t vi_0 = _mm256_extract_epi32(v00l, 0); + // uint32_t vi_1 = _mm256_extract_epi32(v00l, 1); + // uint32_t vi_2 = _mm256_extract_epi32(v00l, 2); + // uint32_t vi_3 = _mm256_extract_epi32(v00l, 3); + // uint32_t vi_4 = _mm256_extract_epi32(v00l, 4); + // uint32_t vi_5 = _mm256_extract_epi32(v00l, 5); + // uint32_t vi_6 = _mm256_extract_epi32(v00l, 6); + // uint32_t vi_7 = _mm256_extract_epi32(v00l, 7); + + // printf("v0l: %7d %7d %7d %7d %7d %7d %7d %7d\n", vi_0, vi_1, vi_2, vi_3, vi_4, vi_5, vi_6, vi_7); + + // vi_0 = _mm256_extract_epi32(v00h, 0); + // vi_1 = _mm256_extract_epi32(v00h, 1); + // vi_2 = _mm256_extract_epi32(v00h, 2); + // vi_3 = _mm256_extract_epi32(v00h, 3); + // vi_4 = _mm256_extract_epi32(v00h, 4); + // vi_5 = _mm256_extract_epi32(v00h, 5); + // vi_6 = _mm256_extract_epi32(v00h, 6); + // vi_7 = _mm256_extract_epi32(v00h, 7); + + // printf("v0h: %7d %7d %7d %7d %7d %7d %7d %7d\n", vi_0, vi_1, vi_2, vi_3, vi_4, vi_5, vi_6, vi_7); + //} + + // convert to float + __m256 vf00l = _mm256_cvtepi32_ps(v00l); + __m256 vf01l = _mm256_cvtepi32_ps(v01l); + __m256 vf02l = _mm256_cvtepi32_ps(v02l); + __m256 vf03l = _mm256_cvtepi32_ps(v03l); + + __m256 vf00h = _mm256_cvtepi32_ps(v00h); + __m256 vf01h = _mm256_cvtepi32_ps(v01h); + __m256 vf02h = _mm256_cvtepi32_ps(v02h); + __m256 vf03h = _mm256_cvtepi32_ps(v03h); + + //{ + // printf("vf00l: %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\n", vf00l[0], vf00l[1], vf00l[2], vf00l[3], vf00l[4], vf00l[5], vf00l[6], vf00l[7]); + // printf("vf01l: %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\n", vf01l[0], vf01l[1], vf01l[2], vf01l[3], vf01l[4], vf01l[5], vf01l[6], vf01l[7]); + // printf("vf02l: %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\n", vf02l[0], vf02l[1], vf02l[2], vf02l[3], vf02l[4], vf02l[5], vf02l[6], vf02l[7]); + // printf("vf03l: %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\n", vf03l[0], vf03l[1], vf03l[2], vf03l[3], vf03l[4], vf03l[5], vf03l[6], vf03l[7]); + //} + + // multiply by scale and add offset + vf00l = _mm256_fmadd_ps(vf00l, d0v, m0v); + vf01l = _mm256_fmadd_ps(vf01l, d0v, m0v); + vf02l = _mm256_fmadd_ps(vf02l, d0v, m0v); + vf03l = _mm256_fmadd_ps(vf03l, d0v, m0v); + + vf00h = _mm256_fmadd_ps(vf00h, d0v, m0v); + vf01h = _mm256_fmadd_ps(vf01h, d0v, m0v); + vf02h = _mm256_fmadd_ps(vf02h, d0v, m0v); + vf03h = _mm256_fmadd_ps(vf03h, d0v, m0v); + + __m256i v1 = _mm256_loadu_si256((__m256i *) p1); + + __m256i v10 = _mm256_and_si256(v1, _mm256_set1_epi32(0x000000FF)); + __m256i v11 = _mm256_srli_epi32(_mm256_and_si256(v1, _mm256_set1_epi32(0x0000FFFF)), 8); + __m256i v12 = _mm256_srli_epi32(_mm256_and_si256(v1, _mm256_set1_epi32(0x00FFFFFF)), 16); + __m256i v13 = _mm256_srli_epi32(v1, 24); + + __m256i v10l = _mm256_and_si256(v10, m4b); + __m256i v11l = _mm256_and_si256(v11, m4b); + __m256i v12l = _mm256_and_si256(v12, m4b); + __m256i v13l = _mm256_and_si256(v13, m4b); + + __m256i v10h = _mm256_srli_epi32(v10, 4); + __m256i v11h = _mm256_srli_epi32(v11, 4); + __m256i v12h = _mm256_srli_epi32(v12, 4); + __m256i v13h = _mm256_srli_epi32(v13, 4); + + __m256 vf10l = _mm256_cvtepi32_ps(v10l); + __m256 vf11l = _mm256_cvtepi32_ps(v11l); + __m256 vf12l = _mm256_cvtepi32_ps(v12l); + __m256 vf13l = _mm256_cvtepi32_ps(v13l); + + __m256 vf10h = _mm256_cvtepi32_ps(v10h); + __m256 vf11h = _mm256_cvtepi32_ps(v11h); + __m256 vf12h = _mm256_cvtepi32_ps(v12h); + __m256 vf13h = _mm256_cvtepi32_ps(v13h); + + vf10l = _mm256_fmadd_ps(vf10l, d1v, m1v); + vf11l = _mm256_fmadd_ps(vf11l, d1v, m1v); + vf12l = _mm256_fmadd_ps(vf12l, d1v, m1v); + vf13l = _mm256_fmadd_ps(vf13l, d1v, m1v); + + vf10h = _mm256_fmadd_ps(vf10h, d1v, m1v); + vf11h = _mm256_fmadd_ps(vf11h, d1v, m1v); + vf12h = _mm256_fmadd_ps(vf12h, d1v, m1v); + vf13h = _mm256_fmadd_ps(vf13h, d1v, m1v); + + // compute dot product + sumv0 = _mm256_fmadd_ps(vf00l, vf10l, sumv0); + sumv0 = _mm256_fmadd_ps(vf01l, vf11l, sumv0); + sumv0 = _mm256_fmadd_ps(vf02l, vf12l, sumv0); + sumv0 = _mm256_fmadd_ps(vf03l, vf13l, sumv0); + + sumv1 = _mm256_fmadd_ps(vf00h, vf10h, sumv1); + sumv1 = _mm256_fmadd_ps(vf01h, vf11h, sumv1); + sumv1 = _mm256_fmadd_ps(vf02h, vf12h, sumv1); + sumv1 = _mm256_fmadd_ps(vf03h, vf13h, sumv1); + } + + // accumulate (horizontal sum) + const __m256 vdot = _mm256_add_ps(sumv0, sumv1); + const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(vdot), _mm256_extractf128_ps(vdot, 1)); + const __m128 t1 = _mm_hadd_ps(t0, t0); + + sumf += _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); +#elif QK == 64 && 0 + float sum00 = 0.0f; + float sum01 = 0.0f; + float sum10 = 0.0f; + float sum11 = 0.0f; + + const __m256i m4b = _mm256_set1_epi8(0xf); + + for (int i = 0; i < nb; i++) { + const float m0 = GGML_GQ_TO_FP32(pm0[i]); + const float d0 = GGML_GQ_TO_FP32(pd0[i]); + + const float m1 = GGML_GQ_TO_FP32(pm1[i]); + const float d1 = GGML_GQ_TO_FP32(pd1[i]); + + const uint8_t * restrict p0 = pb0 + i*QK/2; + const uint8_t * restrict p1 = pb1 + i*QK/2; + + // 64 x 4 + const __m256i v0 = _mm256_loadu_si256((__m256i *) p0); + const __m256i v1 = _mm256_loadu_si256((__m256i *) p1); + + // 32 x 8 + const __m256i v0l = _mm256_and_si256(v0, m4b); + const __m256i v1l = _mm256_and_si256(v1, m4b); + + const __m256i v0h = _mm256_and_si256(_mm256_srli_epi16(v0, 4), m4b); + const __m256i v1h = _mm256_and_si256(_mm256_srli_epi16(v1, 4), m4b); + + const __m256i pl = _mm256_maddubs_epi16(v0l, v1l); + const __m256i ph = _mm256_maddubs_epi16(v0h, v1h); + + const __m256i p16 = _mm256_add_epi16(ph, pl); + const __m256i p = _mm256_madd_epi16(_mm256_set1_epi16(1), p16); + + sum00 += m0*m1; + sum01 += m1*d0*(_mm256_hadd_epi8_gg(_mm256_add_epi8(v0l, v0h))); + sum10 += m0*d1*(_mm256_hadd_epi8_gg(_mm256_add_epi8(v1l, v1h))); + sum11 += d0*d1*(_mm256_hadd_epi32_gg(p)); + } + + sumf = 64.0*sum00 + sum01 + sum10 + sum11; +#elif QK == 64 && 1 // this is the best when using min + d + float sum00 = 0.0f; + + __m256 sum01 = _mm256_setzero_ps(); + __m256 sum10 = _mm256_setzero_ps(); + __m256 sum11 = _mm256_setzero_ps(); + + for (int i = 0; i < nb; i++) { + const float m0 = GGML_GQ_TO_FP32(pm0[i]); + const float d0 = GGML_GQ_TO_FP32(pd0[i]); + + const float m1 = GGML_GQ_TO_FP32(pm1[i]); + const float d1 = GGML_GQ_TO_FP32(pd1[i]); + + const uint8_t * restrict p0 = pb0 + i*QK/2; + const uint8_t * restrict p1 = pb1 + i*QK/2; + + const __m256 m0v = _mm256_set1_ps(m0); + const __m256 d0v = _mm256_set1_ps(d0); + + const __m256 m1v = _mm256_set1_ps(m1); + const __m256 d1v = _mm256_set1_ps(d1); + + const __m256 m1d0v = _mm256_mul_ps(m1v, d0v); + const __m256 m0d1v = _mm256_mul_ps(m0v, d1v); + const __m256 d0d1v = _mm256_mul_ps(d0v, d1v); + + const __m256i m4b = _mm256_set1_epi8(0xf); + + // 64 x 4 + const __m256i v0 = _mm256_loadu_si256((__m256i *) p0); + const __m256i v1 = _mm256_loadu_si256((__m256i *) p1); + + // 32 x 8 + const __m256i v0l = _mm256_and_si256(v0, m4b); + const __m256i v1l = _mm256_and_si256(v1, m4b); + + const __m256i v0h = _mm256_and_si256(_mm256_srli_epi16(v0, 4), m4b); + const __m256i v1h = _mm256_and_si256(_mm256_srli_epi16(v1, 4), m4b); + + const __m256i v0a = _mm256_add_epi8(v0l, v0h); + const __m256i v1a = _mm256_add_epi8(v1l, v1h); + + const __m128i v0al = _mm256_extracti128_si256(v0a, 0); + const __m128i v0ah = _mm256_extracti128_si256(v0a, 1); + + const __m128i v1al = _mm256_extracti128_si256(v1a, 0); + const __m128i v1ah = _mm256_extracti128_si256(v1a, 1); + + const __m128i v0as = _mm_add_epi8(v0al, v0ah); + const __m128i v1as = _mm_add_epi8(v1al, v1ah); + + const __m256i v0as_0 = _mm256_cvtepu8_epi32(v0as); + const __m256i v0as_1 = _mm256_cvtepu8_epi32(_mm_srli_si128(v0as, 8)); + + const __m256i v1as_0 = _mm256_cvtepu8_epi32(v1as); + const __m256i v1as_1 = _mm256_cvtepu8_epi32(_mm_srli_si128(v1as, 8)); + + const __m256i v0ass = _mm256_add_epi32(v0as_0, v0as_1); + const __m256i v1ass = _mm256_add_epi32(v1as_0, v1as_1); + + const __m256 v0f = _mm256_cvtepi32_ps(v0ass); + const __m256 v1f = _mm256_cvtepi32_ps(v1ass); + + const __m256i pl = _mm256_maddubs_epi16(v0l, v1l); + const __m256i ph = _mm256_maddubs_epi16(v0h, v1h); + + const __m256i p16 = _mm256_add_epi16(ph, pl); + const __m256i p = _mm256_madd_epi16(_mm256_set1_epi16(1), p16); + + sum00 += m0*m1; + sum01 = _mm256_fmadd_ps(m1d0v, v0f, sum01); + sum10 = _mm256_fmadd_ps(m0d1v, v1f, sum10); + sum11 = _mm256_fmadd_ps(d0d1v, _mm256_cvtepi32_ps(p), sum11); + } + + sumf = 64.0*sum00 + _mm256_hadd_ps_gg(sum01) + _mm256_hadd_ps_gg(sum10) + _mm256_hadd_ps_gg(sum11); +#endif +#elif defined (__ARM_NEON) + float sum00 = 0.0f; + float sum01 = 0.0f; + float sum10 = 0.0f; + float sum11 = 0.0f; + + for (int i = 0; i < nb; i++) { + const float m0 = GGML_GQ_TO_FP32(pm0[i]); + const float d0 = GGML_GQ_TO_FP32(pd0[i]); + + const float m1 = GGML_GQ_TO_FP32(pm1[i]); + const float d1 = GGML_GQ_TO_FP32(pd1[i]); + + const uint8_t * restrict p0 = pb0 + i*QK/2; + const uint8_t * restrict p1 = pb1 + i*QK/2; + + const uint8x16_t m4b = vdupq_n_u8(0xf); + + const uint8x16_t v0_0 = vld1q_u8(p0); + const uint8x16_t v0_1 = vld1q_u8(p0 + 16); + const uint8x16_t v1_0 = vld1q_u8(p1); + const uint8x16_t v1_1 = vld1q_u8(p1 + 16); + + // and with 0xf + const uint8x16_t v0_0l = vandq_u8(v0_0, m4b); + const uint8x16_t v0_1l = vandq_u8(v0_1, m4b); + const uint8x16_t v1_0l = vandq_u8(v1_0, m4b); + const uint8x16_t v1_1l = vandq_u8(v1_1, m4b); + + const uint8x16_t v0_0h = vshrq_n_u8(v0_0, 4); + const uint8x16_t v0_1h = vshrq_n_u8(v0_1, 4); + const uint8x16_t v1_0h = vshrq_n_u8(v1_0, 4); + const uint8x16_t v1_1h = vshrq_n_u8(v1_1, 4); + + // dot product into uint16x8_t + const uint16x8_t pl0l = vmull_u8(vget_low_u8 (v0_0l), vget_low_u8 (v1_0l)); + const uint16x8_t pl0h = vmull_u8(vget_high_u8(v0_0l), vget_high_u8(v1_0l)); + const uint16x8_t pl1l = vmull_u8(vget_low_u8 (v0_1l), vget_low_u8 (v1_1l)); + const uint16x8_t pl1h = vmull_u8(vget_high_u8(v0_1l), vget_high_u8(v1_1l)); + + const uint16x8_t ph0l = vmull_u8(vget_low_u8 (v0_0h), vget_low_u8 (v1_0h)); + const uint16x8_t ph0h = vmull_u8(vget_high_u8(v0_0h), vget_high_u8(v1_0h)); + const uint16x8_t ph1l = vmull_u8(vget_low_u8 (v0_1h), vget_low_u8 (v1_1h)); + const uint16x8_t ph1h = vmull_u8(vget_high_u8(v0_1h), vget_high_u8(v1_1h)); + + const uint16x8_t pl0 = vaddq_u16(pl0l, pl0h); + const uint16x8_t pl1 = vaddq_u16(pl1l, pl1h); + const uint16x8_t ph0 = vaddq_u16(ph0l, ph0h); + const uint16x8_t ph1 = vaddq_u16(ph1l, ph1h); + + const uint16x8_t pl = vaddq_u16(pl0, pl1); + const uint16x8_t ph = vaddq_u16(ph0, ph1); + + sum00 += m0*m1; + sum01 += m1*d0*(vaddvq_u8(v0_0l) + vaddvq_u8(v0_0h) + vaddvq_u8(v0_1l) + vaddvq_u8(v0_1h)); + sum10 += m0*d1*(vaddvq_u8(v1_0l) + vaddvq_u8(v1_0h) + vaddvq_u8(v1_1l) + vaddvq_u8(v1_1h)); + //sum11 += d0*d1*( + // vaddvq_u16(vaddq_u16(vaddq_u16(pl0l, pl0h), vaddq_u16(pl1l, pl1h))) + + // vaddvq_u16(vaddq_u16(vaddq_u16(ph0l, ph0h), vaddq_u16(ph1l, ph1h)))); + sum11 += d0*d1*vaddvq_u16(vaddq_u16(pl, ph)); + } + + sumf = 64.0*sum00 + sum01 + sum10 + sum11; +#endif +#endif + + *s = sumf; +} + +// use vec_dot_gq_4 to compute the dot product of two rows +void mul_mat_gq_4( + const void * src0, + const void * src1, // transposed + float * dst, + int m, int n, int k) { + assert(k % QK == 0); + + const int nb = quantize_4_blocks_per_row(k); + + for (int ir0 = 0; ir0 < m; ir0++) { + for (int ir1 = 0; ir1 < n; ir1++) { + vec_dot_gq_4(k, dst + ir1, src0, src1); + src1 = (const char *) src1 + quantize_4_row_size(k); + } + src0 = (const char *) src0 + quantize_4_row_size(k); + src1 = (const char *) src1 - n*quantize_4_row_size(k); + + dst = (float *) dst + n; + } +} + +// +// method 5 +// 4-bit quantization (without min, only delta) +// + +static inline int quantize_5_blocks_per_row(int k) { + return k/QK; +} + +static inline int quantize_5_row_size(int k) { + const int nb = quantize_5_blocks_per_row(k); + + return nb*(sizeof(gq_scale_t) + QK/2); +} + +void quantize_5_row(const float * restrict src, void * restrict dst, int k) { + assert(k % QK == 0); + assert(QB == 4); + + const int nb = quantize_5_blocks_per_row(k); + + gq_scale_t * restrict pd = (gq_scale_t *) (dst); + uint8_t * restrict pb = (uint8_t *) (pd + nb); + + uint8_t pp[QK/2]; + + for (int i = 0; i < nb; i++) { + memset(pp, 0, sizeof(pp)); + + float amax = 0.0f; // absolute max + +#if defined(__AVX2__) + { + assert(QK == 64); + const int QK8 = QK/8; + + __m256 srcv [QK8]; + __m256 asrcv[QK8]; + __m256 amaxv[QK8]; + + for (int l = 0; l < QK8; l++) { + srcv[l] = _mm256_loadu_ps(src + i*QK + 8*l); + } + + for (int l = 0; l < QK8; l++) { + asrcv[l] = _mm256_and_ps(srcv[l], (__m256) _mm256_set1_epi32(0x7fffffff)); + } + + + for (int l = 0; l < QK8/2; l++) { + amaxv[2*l] = _mm256_max_ps(asrcv[2*l], asrcv[2*l+1]); + } + + for (int l = 0; l < QK8/4; l++) { + amaxv[4*l] = _mm256_max_ps(amaxv[4*l], amaxv[4*l+2]); + } + + for (int l = 0; l < QK8/8; l++) { + amaxv[8*l] = _mm256_max_ps(amaxv[8*l], amaxv[8*l+4]); + } + + //amax = MAX(amaxv[0][0], MAX(amaxv[0][1], MAX(amaxv[0][2], MAX(amaxv[0][3], MAX(amaxv[0][4], MAX(amaxv[0][5], MAX(amaxv[0][6], amaxv[0][7]))))))); + + const __m256 amaxv0_0 = _mm256_permute2f128_ps(amaxv[0], amaxv[0], 3); + const __m256 amaxv0_1 = _mm256_max_ps(amaxv[0], amaxv0_0); + const __m256 amaxv0_2 = _mm256_permute_ps(amaxv0_1, 0x4e); + const __m256 amaxv0_3 = _mm256_max_ps(amaxv0_1, amaxv0_2); + const __m256 amaxv0_4 = _mm256_permute_ps(amaxv0_3, 0xb1); + const __m256 amaxv0_5 = _mm256_max_ps(amaxv0_3, amaxv0_4); + + amax = _mm256_cvtss_f32(amaxv0_5); + + //printf("amax = %f\n", amax); + + const float d = amax / ((1 << (QB - 1)) - 1); + const float id = d ? 1.0/d : 0.0; + + pd[i] = GGML_FP32_TO_GQ(d); + + const __m256 idv = _mm256_set1_ps(id); + + for (int l = 0; l < QK/8; l++) { + __m256 v = _mm256_mul_ps(srcv[l], idv); +#if 0 + v[0] += frand(); v[1] += frand(); v[2] += frand(); v[3] += frand(); + v[4] += frand(); v[5] += frand(); v[6] += frand(); v[7] += frand(); +#endif + + // convert to int8 + __m256i vi = _mm256_cvtps_epi32(v); + vi = _mm256_add_epi32(vi, _mm256_set1_epi32(8)); + + int32_t vi_0 = _mm256_extract_epi32(vi, 0); + int32_t vi_1 = _mm256_extract_epi32(vi, 1); + int32_t vi_2 = _mm256_extract_epi32(vi, 2); + int32_t vi_3 = _mm256_extract_epi32(vi, 3); + + int32_t vi_4 = _mm256_extract_epi32(vi, 4); + int32_t vi_5 = _mm256_extract_epi32(vi, 5); + int32_t vi_6 = _mm256_extract_epi32(vi, 6); + int32_t vi_7 = _mm256_extract_epi32(vi, 7); + + // convert to 4-bit, 2 consecutive packed into 1 byte + pp[4*l + 0] = vi_0 | (vi_1 << 4); + pp[4*l + 1] = vi_2 | (vi_3 << 4); + pp[4*l + 2] = vi_4 | (vi_5 << 4); + pp[4*l + 3] = vi_6 | (vi_7 << 4); + + //printf("vi: %7d %7d %7d %7d %7d %7d %7d %7d\n", vi_0, vi_1, vi_2, vi_3, vi_4, vi_5, vi_6, vi_7); + ////printf("v : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f\n", v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]); + + assert(vi_0 >= 0 && vi_0 < 16); + assert(vi_1 >= 0 && vi_1 < 16); + assert(vi_2 >= 0 && vi_2 < 16); + assert(vi_3 >= 0 && vi_3 < 16); + + assert(vi_4 >= 0 && vi_4 < 16); + assert(vi_5 >= 0 && vi_5 < 16); + assert(vi_6 >= 0 && vi_6 < 16); + assert(vi_7 >= 0 && vi_7 < 16); + } + + memcpy(pb + i*QK/2, pp, sizeof(pp)); + } +#elif defined(__ARM_NEON) && 0 + { + // TODO + } +#else + { + for (int l = 0; l < QK; l++) { + const float v = src[i*QK + l]; + amax = MAX(amax, fabsf(v)); + } + + const float d = amax / ((1 << (QB - 1)) - 1); + const float id = d ? 1.0/d : 0.0; + + pd[i] = GGML_FP32_TO_GQ(d); + + for (int l = 0; l < QK; l++) { + const float v = src[i*QK + l]*id; + const int8_t vi = ((int8_t) (round(v))) + 8; + assert(vi >= 0 && vi < 16); + pp[l/2] |= (vi & 0xf) << (4*(l & 1)); + } + + memcpy(pb + i*QK/2, pp, sizeof(pp)); + } +#endif + //printf("min %f max %f\n", min, max); + } +} + +// reimplementation of quantize_5 using quantize_5_row +void quantize_5(const float * restrict src, char * restrict dst, int n, int k) { + assert(k % QK == 0); + + for (int j = 0; j < n; j++) { + quantize_5_row(src + j*k, dst, k); + dst = (char *) dst + quantize_5_row_size(k); + } +} + +void vec_dot_gq_5(const int n, float * restrict s, const void * restrict x, const void * restrict y) { + const int nb = quantize_5_blocks_per_row(n); + + const gq_scale_t * restrict pd0 = (const gq_scale_t *) x; + const gq_scale_t * restrict pd1 = (const gq_scale_t *) y; + + const uint8_t * restrict pb0 = (const uint8_t *) (pd0 + nb); + const uint8_t * restrict pb1 = (const uint8_t *) (pd1 + nb); + + float sumf = 0.0; + +#if 0 + // scalar + for (int i = 0; i < nb; i++) { + const float d0 = GGML_GQ_TO_FP32(pd0[i]); + const float d1 = GGML_GQ_TO_FP32(pd1[i]); + + const uint8_t * restrict p0 = pb0 + i*QK/2; + const uint8_t * restrict p1 = pb1 + i*QK/2; + + for (int j = 0; j < QK/2; j++) { + const uint8_t v0 = p0[j]; + const uint8_t v1 = p1[j]; + + const float f0 = d0*((int8_t) (v0 & 0xf) - 8); + const float f1 = d0*((int8_t) (v0 >> 4) - 8); + + const float f2 = d1*((int8_t) (v1 & 0xf) - 8); + const float f3 = d1*((int8_t) (v1 >> 4) - 8); + + sumf += f0*f2 + f1*f3; + } + } +#else +#if defined(__AVX2__) +#if QK == 64 && 1 + __m256 sum11 = _mm256_setzero_ps(); + + for (int i = 0; i < nb; i++) { + const float d0 = GGML_GQ_TO_FP32(pd0[i]); + const float d1 = GGML_GQ_TO_FP32(pd1[i]); + + const uint8_t * restrict p0 = pb0 + i*QK/2; + const uint8_t * restrict p1 = pb1 + i*QK/2; + + const __m256 d0v = _mm256_set1_ps(d0); + const __m256 d1v = _mm256_set1_ps(d1); + + const __m256 d0d1v = _mm256_mul_ps(d0v, d1v); + + const __m256i m4b = _mm256_set1_epi8(0xf); + + // 64 x 4 + const __m256i v0 = _mm256_loadu_si256((__m256i *) p0); + const __m256i v1 = _mm256_loadu_si256((__m256i *) p1); + + // 32 x 8 + __m256i v0l = _mm256_and_si256(v0, m4b); + __m256i v1l = _mm256_and_si256(v1, m4b); + + __m256i v0h = _mm256_and_si256(_mm256_srli_epi16(v0, 4), m4b); + __m256i v1h = _mm256_and_si256(_mm256_srli_epi16(v1, 4), m4b); + + // sub 8 + v0l = _mm256_sub_epi8(v0l, _mm256_set1_epi8(8)); + v0h = _mm256_sub_epi8(v0h, _mm256_set1_epi8(8)); + + v1l = _mm256_sub_epi8(v1l, _mm256_set1_epi8(8)); + v1h = _mm256_sub_epi8(v1h, _mm256_set1_epi8(8)); + + // abs + const __m256i v0la = _mm256_sign_epi8(v0l, v0l); + const __m256i v0ha = _mm256_sign_epi8(v0h, v0h); + + // sign + const __m256i v1ls = _mm256_sign_epi8(v1l, v0l); + const __m256i v1hs = _mm256_sign_epi8(v1h, v0h); + + const __m256i pl = _mm256_maddubs_epi16(v0la, v1ls); + const __m256i ph = _mm256_maddubs_epi16(v0ha, v1hs); + + const __m256i p16 = _mm256_add_epi16(ph, pl); + const __m256i p = _mm256_madd_epi16(_mm256_set1_epi16(1), p16); + + sum11 = _mm256_fmadd_ps(d0d1v, _mm256_cvtepi32_ps(p), sum11); + } + + sumf = _mm256_hadd_ps_gg(sum11); +#endif +#elif defined (__ARM_NEON) + float sum11 = 0.0f; + + //float32x4_t sum_0 = vdupq_n_f32(0.0f); + //float32x4_t sum_1 = vdupq_n_f32(0.0f); + + //float16x8_t sum_0 = vdupq_n_f16(0.0f); + //float16x8_t sum_1 = vdupq_n_f16(0.0f); + + for (int i = 0; i < nb; i++) { + const float d0 = GGML_GQ_TO_FP32(pd0[i]); + const float d1 = GGML_GQ_TO_FP32(pd1[i]); + + //float32x4_t d0d1v = vdupq_n_f32(d0*d1); + //float16x8_t d0d1v = vdupq_n_f16(d0*d1); + + const uint8_t * restrict p0 = pb0 + i*QK/2; + const uint8_t * restrict p1 = pb1 + i*QK/2; + + const uint8x16_t m4b = vdupq_n_u8(0xf); + const int8x16_t s8b = vdupq_n_s8(0x8); + + const uint8x16_t v0_0 = vld1q_u8(p0); + const uint8x16_t v0_1 = vld1q_u8(p0 + 16); + const uint8x16_t v1_0 = vld1q_u8(p1); + const uint8x16_t v1_1 = vld1q_u8(p1 + 16); + + // 4-bit -> 8-bit + const uint8x16_t v0_0l = vandq_u8(v0_0, m4b); + const uint8x16_t v0_1l = vandq_u8(v0_1, m4b); + const uint8x16_t v1_0l = vandq_u8(v1_0, m4b); + const uint8x16_t v1_1l = vandq_u8(v1_1, m4b); + + const uint8x16_t v0_0h = vshrq_n_u8(v0_0, 4); + const uint8x16_t v0_1h = vshrq_n_u8(v0_1, 4); + const uint8x16_t v1_0h = vshrq_n_u8(v1_0, 4); + const uint8x16_t v1_1h = vshrq_n_u8(v1_1, 4); + + // sub 8 + const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b); + const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b); + const int8x16_t v1_0ls = vsubq_s8(v1_0l, s8b); + const int8x16_t v1_1ls = vsubq_s8(v1_1l, s8b); + + const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b); + const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b); + const int8x16_t v1_0hs = vsubq_s8(v1_0h, s8b); + const int8x16_t v1_1hs = vsubq_s8(v1_1h, s8b); + + // dot product into int16x8_t + const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0ls)); + const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0ls)); + const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1ls), vget_low_s8 (v1_1ls)); + const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1ls)); + + const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0hs)); + const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0hs)); + const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1hs)); + const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1hs)); + + const int16x8_t pl0 = vaddq_s16(pl0l, pl0h); + const int16x8_t pl1 = vaddq_s16(pl1l, pl1h); + const int16x8_t ph0 = vaddq_s16(ph0l, ph0h); + const int16x8_t ph1 = vaddq_s16(ph1l, ph1h); + + const int16x8_t pl = vaddq_s16(pl0, pl1); + const int16x8_t ph = vaddq_s16(ph0, ph1); + + //const int8x16_t pl0 = vmulq_s8(v0_0ls, v1_0ls); + //const int8x16_t pl1 = vmulq_s8(v0_1ls, v1_1ls); + //const int8x16_t ph0 = vmulq_s8(v0_0hs, v1_0hs); + //const int8x16_t ph1 = vmulq_s8(v0_1hs, v1_1hs); + + //const int16x8_t pll = vaddl_s8(vget_low_s8(pl0), vget_low_s8(pl1)); + //const int16x8_t plh = vaddl_s8(vget_high_s8(pl0), vget_high_s8(pl1)); + //const int16x8_t phl = vaddl_s8(vget_low_s8(ph0), vget_low_s8(ph1)); + //const int16x8_t phh = vaddl_s8(vget_high_s8(ph0), vget_high_s8(ph1)); + + //const int16x8_t pl = vaddq_s16(pll, plh); + //const int16x8_t ph = vaddq_s16(phl, phh); + + const int16x8_t p = vaddq_s16(pl, ph); + + // convert to float + //const float32x4_t pf0 = vcvtq_f32_s32(vmovl_s16(vget_low_s16 (p))); + //const float32x4_t pf1 = vcvtq_f32_s32(vmovl_s16(vget_high_s16(p))); + + // scalar + sum11 += d0*d1*vaddvq_s16(p); + //sum11 += d0*d1*(vaddvq_s16(pl) + vaddvq_s16(ph)); + //sum11 += d0*d1*vaddvq_s16(vaddq_s16(pl, ph)); + //sum11 += d0*d1*(vaddvq_s8(pl0) + vaddvq_s8(pl1) + vaddvq_s8(ph0) + vaddvq_s8(ph1)); + //sum11 += d0*d1*(vaddvq_s16(pll) + vaddvq_s16(plh) + vaddvq_s16(phl) + vaddvq_s16(phh)); + + //sum_0 = vfmaq_f16(sum_0, d0d1v, vcvtq_f16_s16(p)); + //sum_0 = vfmaq_f16(sum_0, d0d1v, vcvtq_f16_s16(pl)); + //sum_1 = vfmaq_f16(sum_1, d0d1v, vcvtq_f16_s16(ph)); + + // vectorize + //sum_0 = vmlaq_f32(sum_0, d0d1v, pf0); + //sum_1 = vmlaq_f32(sum_1, d0d1v, pf1); + } + + sumf = sum11; + //sumf = vaddvq_f32(sum_0) + vaddvq_f32(sum_1); + //sumf = sum_0[0] + sum_0[1] + sum_0[2] + sum_0[3] + sum_0[4] + sum_0[5] + sum_0[6] + sum_0[7]; + //sum_0 = vaddq_f16(sum_0, sum_1); + //sumf = sum_0[0] + sum_0[1] + sum_0[2] + sum_0[3] + sum_0[4] + sum_0[5] + sum_0[6] + sum_0[7]; +#endif +#endif + + *s = sumf; +} + +// use vec_dot_gq_5 to compute the dot product of two rows +void mul_mat_gq_5( + const void * src0, + const void * src1, // transposed + float * dst, + int m, int n, int k) { + assert(k % QK == 0); + + const int nb = quantize_5_blocks_per_row(k); + + for (int ir0 = 0; ir0 < m; ir0++) { + for (int ir1 = 0; ir1 < n; ir1++) { + vec_dot_gq_5(k, dst + ir1, src0, src1); + src1 = (const char *) src1 + quantize_5_row_size(k); + } + src0 = (const char *) src0 + quantize_5_row_size(k); + src1 = (const char *) src1 - n*quantize_5_row_size(k); + + dst = (float *) dst + n; + } +} + +// +// method 6 +// same as 5 but with 32 element blocks +// + +static inline int quantize_6_blocks_per_row(int k) { + return k/32; +} + +static inline int quantize_6_row_size(int k) { + const int nb = quantize_6_blocks_per_row(k); + + return nb*(sizeof(gq_scale_t) + 16); +} + +void quantize_6_row(const float * restrict src, void * restrict dst, int k) { + assert(k % 32 == 0); + assert(QB == 4); + + const int nb = quantize_6_blocks_per_row(k); + + gq_scale_t * restrict pd = (gq_scale_t *) (dst); + uint8_t * restrict pb = (uint8_t *) (pd + nb); + + uint8_t pp[16]; + + for (int i = 0; i < nb; i++) { + memset(pp, 0, sizeof(pp)); + + float amax = 0.0f; // absolute max + +#if defined(__AVX2__) + { + const int QK8 = 4; + + __m256 srcv [QK8]; + __m256 asrcv[QK8]; + __m256 amaxv[QK8]; + + for (int l = 0; l < QK8; l++) { + srcv[l] = _mm256_loadu_ps(src + i*32 + 8*l); + } + + for (int l = 0; l < QK8; l++) { + asrcv[l] = _mm256_and_ps(srcv[l], (__m256) _mm256_set1_epi32(0x7fffffff)); + } + + for (int l = 0; l < QK8/2; l++) { + amaxv[2*l] = _mm256_max_ps(asrcv[2*l], asrcv[2*l+1]); + } + + for (int l = 0; l < QK8/4; l++) { + amaxv[4*l] = _mm256_max_ps(amaxv[4*l], amaxv[4*l+2]); + } + + const __m256 amaxv0_0 = _mm256_permute2f128_ps(amaxv[0], amaxv[0], 3); + const __m256 amaxv0_1 = _mm256_max_ps(amaxv[0], amaxv0_0); + const __m256 amaxv0_2 = _mm256_permute_ps(amaxv0_1, 0x4e); + const __m256 amaxv0_3 = _mm256_max_ps(amaxv0_1, amaxv0_2); + const __m256 amaxv0_4 = _mm256_permute_ps(amaxv0_3, 0xb1); + const __m256 amaxv0_5 = _mm256_max_ps(amaxv0_3, amaxv0_4); + + amax = _mm256_cvtss_f32(amaxv0_5); + + const float d = amax / ((1 << (QB - 1)) - 1); + const float id = d ? 1.0/d : 0.0; + + pd[i] = GGML_FP32_TO_GQ(d); + + const __m256 idv = _mm256_set1_ps(id); + + for (int l = 0; l < 4; l++) { + __m256 v = _mm256_mul_ps(srcv[l], idv); + + // convert to int8 + __m256i vi = _mm256_cvtps_epi32(v); + vi = _mm256_add_epi32(vi, _mm256_set1_epi32(8)); + + int32_t vi_0 = _mm256_extract_epi32(vi, 0); + int32_t vi_1 = _mm256_extract_epi32(vi, 1); + int32_t vi_2 = _mm256_extract_epi32(vi, 2); + int32_t vi_3 = _mm256_extract_epi32(vi, 3); + + int32_t vi_4 = _mm256_extract_epi32(vi, 4); + int32_t vi_5 = _mm256_extract_epi32(vi, 5); + int32_t vi_6 = _mm256_extract_epi32(vi, 6); + int32_t vi_7 = _mm256_extract_epi32(vi, 7); + + // convert to 4-bit, 2 consecutive packed into 1 byte + pp[4*l + 0] = vi_0 | (vi_1 << 4); + pp[4*l + 1] = vi_2 | (vi_3 << 4); + pp[4*l + 2] = vi_4 | (vi_5 << 4); + pp[4*l + 3] = vi_6 | (vi_7 << 4); + + assert(vi_0 >= 0 && vi_0 < 16); + assert(vi_1 >= 0 && vi_1 < 16); + assert(vi_2 >= 0 && vi_2 < 16); + assert(vi_3 >= 0 && vi_3 < 16); + + assert(vi_4 >= 0 && vi_4 < 16); + assert(vi_5 >= 0 && vi_5 < 16); + assert(vi_6 >= 0 && vi_6 < 16); + assert(vi_7 >= 0 && vi_7 < 16); + } + + memcpy(pb + i*16, pp, sizeof(pp)); + } +#elif defined(__ARM_NEON) + { + float32x4_t srcv [8]; + float32x4_t asrcv[8]; + float32x4_t amaxv[8]; + + for (int l = 0; l < 8; l++) srcv[l] = vld1q_f32(src + i*32 + 4*l); + for (int l = 0; l < 8; l++) asrcv[l] = vabsq_f32(srcv[l]); + + for (int l = 0; l < 4; l++) amaxv[2*l] = vmaxq_f32(asrcv[2*l], asrcv[2*l+1]); + for (int l = 0; l < 2; l++) amaxv[4*l] = vmaxq_f32(amaxv[4*l], amaxv[4*l+2]); + for (int l = 0; l < 1; l++) amaxv[8*l] = vmaxq_f32(amaxv[8*l], amaxv[8*l+4]); + + amax = MAX( + MAX(vgetq_lane_f32(amaxv[0], 0), vgetq_lane_f32(amaxv[0], 1)), + MAX(vgetq_lane_f32(amaxv[0], 2), vgetq_lane_f32(amaxv[0], 3))); + + const float d = amax / ((1 << 3) - 1); + const float id = d ? 1.0/d : 0.0; + + pd[i] = GGML_FP32_TO_GQ(d); + + for (int l = 0; l < 8; l++) { + const float32x4_t v = vmulq_n_f32(srcv[l], id); + const float32x4_t vf = vaddq_f32(v, vdupq_n_f32(8.5f)); + const int32x4_t vi = vcvtq_s32_f32(vf); + + pp[2*l + 0] = vgetq_lane_s32(vi, 0) | (vgetq_lane_s32(vi, 1) << 4); + pp[2*l + 1] = vgetq_lane_s32(vi, 2) | (vgetq_lane_s32(vi, 3) << 4); + } + + memcpy(pb + i*16, pp, sizeof(pp)); + } +#else + { + for (int l = 0; l < 32; l++) { + const float v = src[i*32 + l]; + amax = MAX(amax, fabsf(v)); + } + + const float d = amax / ((1 << (QB - 1)) - 1); + const float id = d ? 1.0/d : 0.0; + + pd[i] = GGML_FP32_TO_GQ(d); + + for (int l = 0; l < 32; l++) { + const float v = src[i*32 + l]*id; + const int8_t vi = ((int8_t) (round(v))) + 8; + assert(vi >= 0 && vi < 16); + pp[l/2] |= (vi & 0xf) << (4*(l & 1)); + } + + memcpy(pb + i*16, pp, sizeof(pp)); + } +#endif + //printf("amax = %f\n", amax); + } +} + +// reimplementation of quantize__6using quantize_6_row +void quantize_6(const float * restrict src, char * restrict dst, int n, int k) { + assert(k % 32 == 0); + + for (int j = 0; j < n; j++) { + quantize_6_row(src + j*k, dst, k); + dst = (char *) dst + quantize_6_row_size(k); + } +} + +void vec_dot_gq_6(const int n, float * restrict s, const void * restrict x, const void * restrict y) { + const int nb = quantize_6_blocks_per_row(n); + + const gq_scale_t * restrict pd0 = (const gq_scale_t *) x; + const gq_scale_t * restrict pd1 = (const gq_scale_t *) y; + + const uint8_t * restrict pb0 = (const uint8_t *) (pd0 + nb); + const uint8_t * restrict pb1 = (const uint8_t *) (pd1 + nb); + + float sumf = 0.0; + +#if 0 + // scalar + for (int i = 0; i < nb; i++) { + const float d0 = GGML_GQ_TO_FP32(pd0[i]); + const float d1 = GGML_GQ_TO_FP32(pd1[i]); + + const uint8_t * restrict p0 = pb0 + i*16; + const uint8_t * restrict p1 = pb1 + i*16; + + for (int j = 0; j < 16; j++) { + const uint8_t v0 = p0[j]; + const uint8_t v1 = p1[j]; + + const float f0 = d0*((int8_t) (v0 & 0xf) - 8); + const float f1 = d0*((int8_t) (v0 >> 4) - 8); + + const float f2 = d1*((int8_t) (v1 & 0xf) - 8); + const float f3 = d1*((int8_t) (v1 >> 4) - 8); + + sumf += f0*f2 + f1*f3; + } + } +#else +#if defined(__AVX2__) + // TODO +#elif defined (__ARM_NEON) +#if 0 + float sum0 = 0.0f; + + for (int i = 0; i < nb; i++) { + const float d0 = GGML_GQ_TO_FP32(pd0[i]); + const float d1 = GGML_GQ_TO_FP32(pd1[i]); + + //float32x4_t d0d1v = vdupq_n_f32(d0*d1); + //float16x8_t d0d1v = vdupq_n_f16(d0*d1); + + const uint8_t * restrict p0 = pb0 + i*16; + const uint8_t * restrict p1 = pb1 + i*16; + + const uint8x16_t m4b = vdupq_n_u8(0xf); + const int8x16_t s8b = vdupq_n_s8(0x8); + + const uint8x16_t v0_0 = vld1q_u8(p0); + const uint8x16_t v1_0 = vld1q_u8(p1); + + // 4-bit -> 8-bit + const uint8x16_t v0_0l = vandq_u8(v0_0, m4b); + const uint8x16_t v1_0l = vandq_u8(v1_0, m4b); + + const uint8x16_t v0_0h = vshrq_n_u8(v0_0, 4); + const uint8x16_t v1_0h = vshrq_n_u8(v1_0, 4); + + // sub 8 + const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b); + const int8x16_t v1_0ls = vsubq_s8(v1_0l, s8b); + + const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b); + const int8x16_t v1_0hs = vsubq_s8(v1_0h, s8b); + + // dot product into int16x8_t + const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0ls)); + const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0ls)); + + const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0hs)); + const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0hs)); + + const int16x8_t pl = vaddq_s16(pl0l, pl0h); + const int16x8_t ph = vaddq_s16(ph0l, ph0h); + + const int16x8_t p = vaddq_s16(pl, ph); + + // scalar + sum0 += d0*d1*vaddvq_s16(p); + } + + sumf = sum0; +#elif 1 // this is a bit faster than the above + float sum0 = 0.0f; + float sum1 = 0.0f; + + for (int i = 0; i < nb; i += 2) { + const float d0_0 = GGML_GQ_TO_FP32(pd0[i + 0]); + const float d1_0 = GGML_GQ_TO_FP32(pd1[i + 0]); + const float d0_1 = GGML_GQ_TO_FP32(pd0[i + 1]); + const float d1_1 = GGML_GQ_TO_FP32(pd1[i + 1]); + + const uint8_t * restrict p0 = pb0 + i*16; + const uint8_t * restrict p1 = pb1 + i*16; + + const uint8x16_t m4b = vdupq_n_u8(0xf); + const int8x16_t s8b = vdupq_n_s8(0x8); + + const uint8x16_t v0_0 = vld1q_u8(p0); + const uint8x16_t v0_1 = vld1q_u8(p0 + 16); + const uint8x16_t v1_0 = vld1q_u8(p1); + const uint8x16_t v1_1 = vld1q_u8(p1 + 16); + + // 4-bit -> 8-bit + const uint8x16_t v0_0l = vandq_u8(v0_0, m4b); + const uint8x16_t v1_0l = vandq_u8(v1_0, m4b); + + const uint8x16_t v0_0h = vshrq_n_u8(v0_0, 4); + const uint8x16_t v1_0h = vshrq_n_u8(v1_0, 4); + + const uint8x16_t v0_1l = vandq_u8(v0_1, m4b); + const uint8x16_t v1_1l = vandq_u8(v1_1, m4b); + + const uint8x16_t v0_1h = vshrq_n_u8(v0_1, 4); + const uint8x16_t v1_1h = vshrq_n_u8(v1_1, 4); + + // sub 8 + const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b); + const int8x16_t v1_0ls = vsubq_s8(v1_0l, s8b); + + const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b); + const int8x16_t v1_0hs = vsubq_s8(v1_0h, s8b); + + const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b); + const int8x16_t v1_1ls = vsubq_s8(v1_1l, s8b); + + const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b); + const int8x16_t v1_1hs = vsubq_s8(v1_1h, s8b); + + // dot product into int16x8_t + const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0ls)); + const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0ls)); + + const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0hs)); + const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0hs)); + + const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1ls), vget_low_s8 (v1_1ls)); + const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1ls)); + + const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1hs)); + const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1hs)); + + const int16x8_t pl_0 = vaddq_s16(pl0l, pl0h); + const int16x8_t ph_0 = vaddq_s16(ph0l, ph0h); + + const int16x8_t pl_1 = vaddq_s16(pl1l, pl1h); + const int16x8_t ph_1 = vaddq_s16(ph1l, ph1h); + + const int16x8_t p_0 = vaddq_s16(pl_0, ph_0); + const int16x8_t p_1 = vaddq_s16(pl_1, ph_1); + + // scalar + sum0 += d0_0*d1_0*vaddvq_s16(p_0); + sum1 += d0_1*d1_1*vaddvq_s16(p_1); + } + + sumf = sum0 + sum1; +#endif +#endif +#endif + + *s = sumf; +} + +// use vec_dot_gq_6 to compute the dot product of two rows +void mul_mat_gq_6( + const void * src0, + const void * src1, // transposed + float * dst, + int m, int n, int k) { + assert(k % 32 == 0); + + const int nb = quantize_6_blocks_per_row(k); + + for (int ir0 = 0; ir0 < m; ir0++) { + for (int ir1 = 0; ir1 < n; ir1++) { + vec_dot_gq_6(k, dst + ir1, src0, src1); + src1 = (const char *) src1 + quantize_6_row_size(k); + } + src0 = (const char *) src0 + quantize_6_row_size(k); + src1 = (const char *) src1 - n*quantize_6_row_size(k); + + dst = (float *) dst + n; + } +} + +int main(int argc, const char ** argv) { + assert(sizeof(gq_quant_t)*8 == gq_t_bits); + + // needed to initialize f16 tables + { + struct ggml_init_params params = { 0, NULL }; + struct ggml_context * ctx = ggml_init(params); + ggml_free(ctx); + } + + int method = 0; + if (argc > 1) { + method = atoi(argv[1]); + } + + float * src0 = (float *)malloc(sizeof(float)*M*K); + float * src1 = (float *)malloc(sizeof(float)*N*K); + float * dst = (float *)malloc(sizeof(float)*M*N); + + // allocate aligned memory + //float * src0 = (float *)aligned_alloc(32, sizeof(float)*M*K); + //float * src1 = (float *)aligned_alloc(32, sizeof(float)*N*K); + //float * dst = (float *)aligned_alloc(32, sizeof(float)*M*N); + + for (int i = 0; i < M*K; i++) { + src0[i] = 0.8 - rand() / (float)RAND_MAX; + /*src0[i] = rand() / (float)RAND_MAX;*/ + /*src0[i] = i % 2;*/ + } + + for (int i = 0; i < N*K; i++) { + src1[i] = 0.8 - rand() / (float)RAND_MAX; + /*src1[i] = rand() / (float)RAND_MAX;*/ + /*src1[i] = i % 3;*/ + } + + void * src0_gq = NULL; + void * src1_gq = NULL; + + size_t sizegq = 0; + + { + if (method == 1) { + src0_gq = calloc(1, quantize_1_row_size(K)*M); + src1_gq = calloc(1, quantize_1_row_size(K)*N); + + sizegq = quantize_1_row_size(K)*M + quantize_1_row_size(K)*N; + } + + if (method == 2) { + src0_gq = calloc(1, quantize_2_row_size(K)*M); + src1_gq = calloc(1, quantize_2_row_size(K)*N); + + sizegq = quantize_2_row_size(K)*M + quantize_2_row_size(K)*N; + } + + if (method == 3) { + src0_gq = calloc(1, quantize_3_row_size(K)*M); + src1_gq = calloc(1, quantize_3_row_size(K)*N); + + sizegq = quantize_3_row_size(K)*M + quantize_3_row_size(K)*N; + } + + if (method == 4) { + src0_gq = calloc(1, quantize_4_row_size(K)*M); + src1_gq = calloc(1, quantize_4_row_size(K)*N); + + sizegq = quantize_4_row_size(K)*M + quantize_4_row_size(K)*N; + } + + if (method == 5) { + src0_gq = calloc(1, quantize_5_row_size(K)*M); + src1_gq = calloc(1, quantize_5_row_size(K)*N); + + sizegq = quantize_5_row_size(K)*M + quantize_5_row_size(K)*N; + } + + if (method == 6) { + src0_gq = calloc(1, quantize_6_row_size(K)*M); + src1_gq = calloc(1, quantize_6_row_size(K)*N); + + sizegq = quantize_6_row_size(K)*M + quantize_6_row_size(K)*N; + } + } + + const size_t sizef16 = sizeof(ggml_fp16_t)*M*K + sizeof(ggml_fp16_t)*N*K; + + printf("compression: %f\n", (float)sizegq/sizef16); + + // convert fp32 -> gq + { + const uint64_t t_start = get_time_us(); + + if (method == 1) { + quantize_1(src0, src0_gq, M, K); + quantize_1(src1, src1_gq, N, K); + } + + if (method == 2) { + quantize_2(src0, src0_gq, M, K); + quantize_2(src1, src1_gq, N, K); + } + + if (method == 3) { + quantize_3(src0, src0_gq, M, K); + quantize_3(src1, src1_gq, N, K); + } + + if (method == 4) { + quantize_4(src0, src0_gq, M, K); + quantize_4(src1, src1_gq, N, K); + } + + if (method == 5) { + quantize_5(src0, src0_gq, M, K); + quantize_5(src1, src1_gq, N, K); + } + + if (method == 6) { + quantize_6(src0, src0_gq, M, K); + quantize_6(src1, src1_gq, N, K); + } + + const uint64_t t_end = get_time_us(); + printf("convert time: %f ms / method = %d\n", (t_end - t_start) / 1000.0, method); + } + + for (int i = 0; i < 16; ++i) { + printf("%f %f\n", src0[i], src1[i]); + } + + const int nIter = 1; + + const clock_t start = clock(); + const uint64_t start_us = get_time_us(); + + double iM = 1.0/M; + double sum = 0.0f; + for (int i = 0; i < nIter; i++) { + if (method == 0) { + mul_mat_f32_naive(src0, src1, dst, M, N, K); + } + + if (method == 1) { + mul_mat_gq_1(src0_gq, src1_gq, dst, M, N, K); + } + + if (method == 2) { + mul_mat_gq_2(src0_gq, src1_gq, dst, M, N, K); + } + + if (method == 3) { + mul_mat_gq_3(src0_gq, src1_gq, dst, M, N, K); + } + + if (method == 4) { + mul_mat_gq_4(src0_gq, src1_gq, dst, M, N, K); + } + + if (method == 5) { + mul_mat_gq_5(src0_gq, src1_gq, dst, M, N, K); + } + + if (method == 6) { + mul_mat_gq_6(src0_gq, src1_gq, dst, M, N, K); + } + } + + for (int i = 0; i < N; i++) { + sum += dst[i]*iM; + } + + { + const clock_t end = clock(); + const uint64_t end_us = get_time_us(); + printf("%s: elapsed ticks: %ld\n", __func__, end - start); + printf("%s: elapsed us: %d / %f ms\n", __func__, (int)(end_us - start_us), (end_us - start_us) / 1000.0 / nIter); + } + +#if 0 + // print src0 + printf("src0:\n"); + for (int i = 0; i < M; i++) { + for (int j = 0; j < K; j++) { + printf("%4.1f ", src0[i*K+j]); + } + printf("\n"); + } + + // print src1 + printf("src1:\n"); + for (int i = 0; i < N; i++) { + for (int j = 0; j < K; j++) { + printf("%4.1f ", src1[i*K+j]); + } + printf("\n"); + } + + printf("dst:\n"); + for (int i = 0; i < M; i++) { + for (int j = 0; j < N; j++) { + printf("%4.1f ", dst[i*N+j]); + } + printf("\n"); + } +#endif printf("%f\n", sum); @@ -468,8 +2584,8 @@ int main(int argc, const char ** argv) { free(src1); free(dst); - free(src0_gq); - free(src1_gq); + if (src0_gq) free(src0_gq); + if (src1_gq) free(src1_gq); return 0; }