gpt-2 : fix broken prompt due to recent experiments

No idea why I commited that!?
pull/15/head
Georgi Gerganov 2 years ago
parent dee3684fec
commit a0f2f68cdb
No known key found for this signature in database
GPG Key ID: 449E073F9DC10735

@ -496,7 +496,6 @@ bool gpt2_eval(
ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head)) ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
); );
#if 0
// KQ_masked = mask_past(KQ_scaled) // KQ_masked = mask_past(KQ_scaled)
// [n_past + N, N, 12] // [n_past + N, N, 12]
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past); struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
@ -504,15 +503,6 @@ bool gpt2_eval(
// KQ = soft_max(KQ_masked) // KQ = soft_max(KQ_masked)
// [n_past + N, N, 12] // [n_past + N, N, 12]
struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
#else
// KQ_masked = mask_past(KQ_scaled)
// [n_past + N, N, 12]
//struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
// KQ = soft_max(KQ_masked)
// [n_past + N, N, 12]
struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_scaled);
#endif
// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
// [n_past + N, 64, 12] // [n_past + N, 64, 12]

Loading…
Cancel
Save