parent
bd9f710a45
commit
ed683187cb
@ -0,0 +1,6 @@
|
|||||||
|
#
|
||||||
|
# t5
|
||||||
|
|
||||||
|
set(TEST_TARGET t5)
|
||||||
|
add_executable(${TEST_TARGET} main.cpp)
|
||||||
|
target_link_libraries(${TEST_TARGET} PRIVATE ggml ggml_utils)
|
@ -0,0 +1,3 @@
|
|||||||
|
# t5
|
||||||
|
|
||||||
|
ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py
|
@ -0,0 +1,25 @@
|
|||||||
|
import io
|
||||||
|
import sys
|
||||||
|
import torch
|
||||||
|
|
||||||
|
import code
|
||||||
|
|
||||||
|
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
||||||
|
|
||||||
|
if len(sys.argv) < 3:
|
||||||
|
print("Usage: convert-flan-t5-pt-to-ggml.py path-to-pt-model dir-output [use-f32]\n")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
fname_inp=sys.argv[1] + "/pytorch_model.bin"
|
||||||
|
|
||||||
|
try:
|
||||||
|
model_bytes = open(fname_inp, "rb").read()
|
||||||
|
with io.BytesIO(model_bytes) as fp:
|
||||||
|
checkpoint = torch.load(fp, map_location="cpu")
|
||||||
|
except:
|
||||||
|
print("Error: failed to load PyTorch model file: %s" % fname_inp)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# list all keys
|
||||||
|
for k in checkpoint.keys():
|
||||||
|
print(k)
|
@ -0,0 +1,3 @@
|
|||||||
|
int main() {
|
||||||
|
return 0;
|
||||||
|
}
|
Loading…
Reference in new issue