ggml : add ggml_compute_forward_rope_f16()

gq
Georgi Gerganov 2 years ago
parent 39265de79f
commit fde29bd005
No known key found for this signature in database
GPG Key ID: 449E073F9DC10735

@ -182,8 +182,8 @@ bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab &
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_size(wtype)); // c_mlp_proj_w ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_size(wtype)); // c_mlp_proj_w
ctx_size += n_layer*( n_embd*ggml_type_size(GGML_TYPE_F32)); // c_mlp_proj_b ctx_size += n_layer*( n_embd*ggml_type_size(GGML_TYPE_F32)); // c_mlp_proj_b
ctx_size += n_ctx*n_layer*n_embd*ggml_type_size(GGML_TYPE_F32); // memory_k ctx_size += n_ctx*n_layer*n_embd*ggml_type_size(GGML_TYPE_F16); // memory_k
ctx_size += n_ctx*n_layer*n_embd*ggml_type_size(GGML_TYPE_F32); // memory_v ctx_size += n_ctx*n_layer*n_embd*ggml_type_size(GGML_TYPE_F16); // memory_v
ctx_size += (6 + 12*n_layer)*256; // object overhead ctx_size += (6 + 12*n_layer)*256; // object overhead
@ -281,8 +281,8 @@ bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab &
const int n_mem = n_layer*n_ctx; const int n_mem = n_layer*n_ctx;
const int n_elements = n_embd*n_mem; const int n_elements = n_embd*n_mem;
model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v); const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);

@ -183,8 +183,8 @@ bool gptj_model_load(const std::string & fname, gptj_model & model, gpt_vocab &
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_size(wtype)); // c_mlp_proj_w_trans ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_size(wtype)); // c_mlp_proj_w_trans
ctx_size += n_layer*( n_embd*ggml_type_size(GGML_TYPE_F32)); // c_mlp_proj_b ctx_size += n_layer*( n_embd*ggml_type_size(GGML_TYPE_F32)); // c_mlp_proj_b
ctx_size += n_ctx*n_layer*n_embd*ggml_type_size(GGML_TYPE_F32); // memory_k ctx_size += n_ctx*n_layer*n_embd*ggml_type_size(GGML_TYPE_F16); // memory_k
ctx_size += n_ctx*n_layer*n_embd*ggml_type_size(GGML_TYPE_F32); // memory_v ctx_size += n_ctx*n_layer*n_embd*ggml_type_size(GGML_TYPE_F16); // memory_v
ctx_size += (5 + 10*n_layer)*256; // object overhead ctx_size += (5 + 10*n_layer)*256; // object overhead
@ -280,8 +280,8 @@ bool gptj_model_load(const std::string & fname, gptj_model & model, gpt_vocab &
const int n_mem = n_layer*n_ctx; const int n_mem = n_layer*n_ctx;
const int n_elements = n_embd*n_mem; const int n_elements = n_embd*n_mem;
model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v); const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);

@ -6693,12 +6693,72 @@ static void ggml_compute_forward_rope_f32(
} }
} }
static void ggml_compute_forward_rope_f16(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
assert(params->ith == 0);
assert(src1->type == GGML_TYPE_I32);
assert(ggml_nelements(src1) == 3);
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
return;
}
const int n_past = ((int32_t *) src1->data)[0];
const int n_dims = ((int32_t *) src1->data)[1];
const int mode = ((int32_t *) src1->data)[2];
//const int ne0 = src0->ne[0];
const int ne1 = src0->ne[1];
const int ne2 = src0->ne[2];
const int ne3 = src0->ne[3];
const int nb0 = src0->nb[0];
const int nb1 = src0->nb[1];
const int nb2 = src0->nb[2];
const int nb3 = src0->nb[3];
//printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
//printf("n_past = %d, ne2 = %d\n", n_past, ne2);
assert(nb0 == sizeof(ggml_fp16_t));
for (int i3 = 0; i3 < ne3; i3++) {
for (int i2 = (mode == 0 ? 0 : n_past); i2 < ne2; i2++) {
const int p = (mode == 0 ? n_past + i2 : i2);
for (int i1 = 0; i1 < ne1; i1++) {
for (int i0 = 0; i0 < n_dims; i0 += 2) {
const double theta = pow(10000.0, ((double)-i0)/n_dims);
const double cos_theta = cos(p*theta);
const double sin_theta = sin(p*theta);
const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
double x0 = ggml_fp16_to_fp32(src[0]);
double x1 = ggml_fp16_to_fp32(src[1]);
dst_data[0] = ggml_fp32_to_fp16(x0*cos_theta - x1*sin_theta);
dst_data[1] = ggml_fp32_to_fp16(x0*sin_theta + x1*cos_theta);
}
}
}
}
}
static void ggml_compute_forward_rope( static void ggml_compute_forward_rope(
const struct ggml_compute_params * params, const struct ggml_compute_params * params,
const struct ggml_tensor * src0, const struct ggml_tensor * src0,
const struct ggml_tensor * src1, const struct ggml_tensor * src1,
struct ggml_tensor * dst) { struct ggml_tensor * dst) {
switch (src0->type) { switch (src0->type) {
case GGML_TYPE_F16:
{
ggml_compute_forward_rope_f16(params, src0, src1, dst);
} break;
case GGML_TYPE_F32: case GGML_TYPE_F32:
{ {
ggml_compute_forward_rope_f32(params, src0, src1, dst); ggml_compute_forward_rope_f32(params, src0, src1, dst);
@ -6708,7 +6768,6 @@ static void ggml_compute_forward_rope(
case GGML_TYPE_I8: case GGML_TYPE_I8:
case GGML_TYPE_I16: case GGML_TYPE_I16:
case GGML_TYPE_I32: case GGML_TYPE_I32:
case GGML_TYPE_F16:
case GGML_TYPE_COUNT: case GGML_TYPE_COUNT:
{ {
GGML_ASSERT(false); GGML_ASSERT(false);

Loading…
Cancel
Save