From a0f2f68cdbac091e8ae88951dd6bb8fda7a0f545 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 8 Jan 2023 20:28:38 +0200 Subject: [PATCH] gpt-2 : fix broken prompt due to recent experiments No idea why I commited that!? --- examples/gpt-2/main.cpp | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/examples/gpt-2/main.cpp b/examples/gpt-2/main.cpp index 6507ec2..333d93b 100644 --- a/examples/gpt-2/main.cpp +++ b/examples/gpt-2/main.cpp @@ -496,7 +496,6 @@ bool gpt2_eval( ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head)) ); -#if 0 // KQ_masked = mask_past(KQ_scaled) // [n_past + N, N, 12] struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past); @@ -504,15 +503,6 @@ bool gpt2_eval( // KQ = soft_max(KQ_masked) // [n_past + N, N, 12] struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); -#else - // KQ_masked = mask_past(KQ_scaled) - // [n_past + N, N, 12] - //struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past); - - // KQ = soft_max(KQ_masked) - // [n_past + N, N, 12] - struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_scaled); -#endif // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() // [n_past + N, 64, 12]