Use buffering

pull/43/head
maekawatoshiki 2 years ago
parent 129c7d1ea8
commit 640fd77a25

@ -73,7 +73,11 @@ struct llama_model {
bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab & vocab, int n_ctx) { bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab & vocab, int n_ctx) {
printf("%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str()); printf("%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str());
const size_t f_buf_size = 1024*1024;
char *f_buf = (char *)malloc(f_buf_size);
auto fin = std::ifstream(fname, std::ios::binary); auto fin = std::ifstream(fname, std::ios::binary);
fin.rdbuf()->pubsetbuf(f_buf, f_buf_size);
if (!fin) { if (!fin) {
fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str()); fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str());
return false; return false;
@ -311,6 +315,7 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
printf("%s: loading model part %d/%d from '%s'\n", __func__, i+1, n_parts, fname_part.c_str()); printf("%s: loading model part %d/%d from '%s'\n", __func__, i+1, n_parts, fname_part.c_str());
fin = std::ifstream(fname_part, std::ios::binary); fin = std::ifstream(fname_part, std::ios::binary);
fin.rdbuf()->pubsetbuf(f_buf, f_buf_size);
fin.seekg(file_offset); fin.seekg(file_offset);
// load weights // load weights
@ -496,6 +501,8 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
fin.close(); fin.close();
} }
free(f_buf);
return true; return true;
} }

Loading…
Cancel
Save