From f385f8dee83d1baf59896b2eb09f1524dc9cde45 Mon Sep 17 00:00:00 2001 From: Ben Garney Date: Sun, 12 Mar 2023 13:28:36 -0700 Subject: [PATCH] Allow using prompt files (#59) --- utils.cpp | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/utils.cpp b/utils.cpp index 5435d47..13d4aa0 100644 --- a/utils.cpp +++ b/utils.cpp @@ -4,6 +4,10 @@ #include #include #include +#include +#include +#include +#include #if defined(_MSC_VER) || defined(__MINGW32__) #include // using malloc.h with MSC/MINGW @@ -21,6 +25,14 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { params.n_threads = std::stoi(argv[++i]); } else if (arg == "-p" || arg == "--prompt") { params.prompt = argv[++i]; + } else if (arg == "-f" || arg == "--file") { + + std::ifstream file(argv[++i]); + + std::copy(std::istreambuf_iterator(file), + std::istreambuf_iterator(), + back_inserter(params.prompt)); + } else if (arg == "-n" || arg == "--n_predict") { params.n_predict = std::stoi(argv[++i]); } else if (arg == "--top_k") { @@ -59,6 +71,8 @@ void gpt_print_usage(int argc, char ** argv, const gpt_params & params) { fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); fprintf(stderr, " -p PROMPT, --prompt PROMPT\n"); fprintf(stderr, " prompt to start generation with (default: random)\n"); + fprintf(stderr, " -f FNAME, --file FNAME\n"); + fprintf(stderr, " prompt file to start generation.\n"); fprintf(stderr, " -n N, --n_predict N number of tokens to predict (default: %d)\n", params.n_predict); fprintf(stderr, " --top_k N top-k sampling (default: %d)\n", params.top_k); fprintf(stderr, " --top_p N top-p sampling (default: %.1f)\n", params.top_p);