diff --git a/examples/whisper.wasm/index-tmpl.html b/examples/whisper.wasm/index-tmpl.html
index 182527f..5511f69 100644
--- a/examples/whisper.wasm/index-tmpl.html
+++ b/examples/whisper.wasm/index-tmpl.html
@@ -297,6 +297,11 @@
storeFS(fname, buf);
}
reader.readAsArrayBuffer(file);
+
+ document.getElementById('fetch-whisper-tiny-en').style.display = 'none';
+ document.getElementById('fetch-whisper-base-en').style.display = 'none';
+ document.getElementById('fetch-whisper-tiny').style.display = 'none';
+ document.getElementById('fetch-whisper-base').style.display = 'none';
}
// fetch a remote file from remote URL using the Fetch API
diff --git a/whisper.cpp b/whisper.cpp
index 6c2e0e0..4f23cde 100644
--- a/whisper.cpp
+++ b/whisper.cpp
@@ -1042,7 +1042,7 @@ static bool whisper_model_load(const std::string & fname, whisper_context & wctx
fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor));
- //printf("%24s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0);
+ //printf("%48s - [%5d, %5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ne[2], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0);
total_size += ggml_nbytes(tensor);
model.n_loaded++;
}
@@ -2708,7 +2708,7 @@ int whisper_full(
//{
// const auto tt = token.pt > 0.10 ? ctx->vocab.id_to_token[token.tid] : "[?]";
- // printf("%s: %10s %6.3f '%s'\n", __func__, tt.c_str(), token.pt, ctx->vocab.id_to_token[token.id].c_str());
+ // printf("%s: %10s %6d %6.3f '%s'\n", __func__, tt.c_str(), token.id, token.pt, ctx->vocab.id_to_token[token.id].c_str());
//}
// end of text token