Merge branch 'master' of https://github.com/jafri/whisper.cpp into HEAD

pull/121/head
jafri 3 years ago
commit 2ef576379a

@ -168,6 +168,8 @@ target_include_directories(${TARGET} PUBLIC
if (MSVC) if (MSVC)
target_link_libraries(${TARGET} PRIVATE ${WHISPER_EXTRA_LIBS} ${CMAKE_THREAD_LIBS_INIT}) target_link_libraries(${TARGET} PRIVATE ${WHISPER_EXTRA_LIBS} ${CMAKE_THREAD_LIBS_INIT})
set(WHISPER_EXTRA_FLAGS ${WHISPER_EXTRA_FLAGS} -D_CRT_SECURE_NO_WARNINGS)
else() else()
target_link_libraries(${TARGET} PRIVATE m ${WHISPER_EXTRA_LIBS} ${CMAKE_THREAD_LIBS_INIT}) target_link_libraries(${TARGET} PRIVATE m ${WHISPER_EXTRA_LIBS} ${CMAKE_THREAD_LIBS_INIT})
endif() endif()
@ -180,10 +182,6 @@ if (BUILD_SHARED_LIBS)
target_compile_definitions(${TARGET} PUBLIC target_compile_definitions(${TARGET} PUBLIC
WHISPER_SHARED WHISPER_SHARED
) )
if (MSVC)
target_compile_definitions(${TARGET} PUBLIC __AVX2__ _CRT_SECURE_NO_WARNINGS)
endif()
endif() endif()
target_compile_definitions(${TARGET} PUBLIC target_compile_definitions(${TARGET} PUBLIC

@ -273,6 +273,45 @@ to highlight words with high or low confidence:
<img width="965" alt="image" src="https://user-images.githubusercontent.com/1991296/197356445-311c8643-9397-4e5e-b46e-0b4b4daa2530.png"> <img width="965" alt="image" src="https://user-images.githubusercontent.com/1991296/197356445-311c8643-9397-4e5e-b46e-0b4b4daa2530.png">
## Word-level timestamps (experimental)
The [main](examples/main) example has experimental support for word-level timestamp generation. The accuracy
is not great, but might be improved in the future.
To use it, simply add the `-owts` command-line argument. There is a free parameter `-wt` that should be around `0.01`.
Here are a few *"typical"* examples:
```java
./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -owts
source ./samples/jfk.wav.wts
ffplay ./samples/jfk.wav.mp4
```
https://user-images.githubusercontent.com/1991296/199337465-dbee4b5e-9aeb-48a3-b1c6-323ac4db5b2c.mp4
---
```java
./main -m ./models/ggml-base.en.bin -f ./samples/mm0.wav -owts
source ./samples/mm0.wav.wts
ffplay ./samples/mm0.wav.mp4
```
https://user-images.githubusercontent.com/1991296/199337504-cc8fd233-0cb7-4920-95f9-4227de3570aa.mp4
---
```java
./main -m ./models/ggml-base.en.bin -f ./samples/gb0.wav -owts
source ./samples/gb0.wav.wts
ffplay ./samples/gb0.wav.mp4
```
https://user-images.githubusercontent.com/1991296/199337538-b7b0c7a3-2753-4a88-a0cd-f28a317987ba.mp4
---
## Implementation details ## Implementation details
- The core tensor operations are implemented in C ([ggml.h](ggml.h) / [ggml.c](ggml.c)) - The core tensor operations are implemented in C ([ggml.h](ggml.h) / [ggml.c](ggml.c))

@ -46,8 +46,6 @@ EMSCRIPTEN_BINDINGS(whisper) {
struct whisper_full_params params = whisper_full_default_params(whisper_sampling_strategy::WHISPER_SAMPLING_GREEDY); struct whisper_full_params params = whisper_full_default_params(whisper_sampling_strategy::WHISPER_SAMPLING_GREEDY);
printf("full_default: available threads %d\n", std::thread::hardware_concurrency());
params.print_realtime = true; params.print_realtime = true;
params.print_progress = false; params.print_progress = false;
params.print_timestamps = true; params.print_timestamps = true;
@ -57,9 +55,6 @@ EMSCRIPTEN_BINDINGS(whisper) {
params.n_threads = std::min(8, (int) std::thread::hardware_concurrency()); params.n_threads = std::min(8, (int) std::thread::hardware_concurrency());
params.offset_ms = 0; params.offset_ms = 0;
printf("full_default: using %d threads\n", params.n_threads);
printf("full_default: language '%s'\n", params.language);
std::vector<float> pcmf32; std::vector<float> pcmf32;
const int n = audio["length"].as<int>(); const int n = audio["length"].as<int>();
@ -71,6 +66,20 @@ EMSCRIPTEN_BINDINGS(whisper) {
emscripten::val memoryView = audio["constructor"].new_(memory, reinterpret_cast<uintptr_t>(pcmf32.data()), n); emscripten::val memoryView = audio["constructor"].new_(memory, reinterpret_cast<uintptr_t>(pcmf32.data()), n);
memoryView.call<void>("set", audio); memoryView.call<void>("set", audio);
// print system information
{
printf("system_info: n_threads = %d / %d | %s\n",
params.n_threads, std::thread::hardware_concurrency(), whisper_print_system_info());
printf("%s: processing %d samples, %.1f sec, %d threads, %d processors, lang = %s, task = %s ...\n",
__func__, int(pcmf32.size()), float(pcmf32.size())/WHISPER_SAMPLE_RATE,
params.n_threads, 1,
params.language,
params.translate ? "translate" : "transcribe");
printf("\n");
}
int ret = whisper_full(g_contexts[index], params, pcmf32.data(), pcmf32.size()); int ret = whisper_full(g_contexts[index], params, pcmf32.data(), pcmf32.size());
whisper_print_timings(g_contexts[index]); whisper_print_timings(g_contexts[index]);

File diff suppressed because one or more lines are too long

@ -6,21 +6,28 @@ It can be used as a reference for using the `whisper.cpp` library in other proje
``` ```
./main -h ./main -h
usage: ./main [options] file0.wav file1.wav ... usage: ./bin/main [options] file0.wav file1.wav ...
options: options:
-h, --help show this help message and exit -h, --help show this help message and exit
-s SEED, --seed SEED RNG seed (default: -1) -s SEED, --seed SEED RNG seed (default: -1)
-t N, --threads N number of threads to use during computation (default: 4) -t N, --threads N number of threads to use during computation (default: 4)
-o N, --offset N offset in milliseconds (default: 0) -p N, --processors N number of processors to use during computation (default: 1)
-ot N, --offset-t N time offset in milliseconds (default: 0)
-on N, --offset-n N segment index offset (default: 0)
-mc N, --max-context N maximum number of text context tokens to store (default: max)
-wt N, --word-thold N word timestamp probability threshold (default: 0.010000)
-v, --verbose verbose output -v, --verbose verbose output
--translate translate from source language to english --translate translate from source language to english
-otxt, --output-txt output result in a text file -otxt, --output-txt output result in a text file
-ovtt, --output-vtt output result in a vtt file -ovtt, --output-vtt output result in a vtt file
-osrt, --output-srt output result in a srt file -osrt, --output-srt output result in a srt file
-owts, --output-words output word-level timestamps to a text file
-ps, --print_special print special tokens -ps, --print_special print special tokens
-pc, --print_colors print colors
-nt, --no_timestamps do not print timestamps -nt, --no_timestamps do not print timestamps
-l LANG, --language LANG spoken language (default: en) -l LANG, --language LANG spoken language (default: en)
-m FNAME, --model FNAME model path (default: models/ggml-base.en.bin) -m FNAME, --model FNAME model path (default: models/ggml-base.en.bin)
-f FNAME, --file FNAME input WAV file path -f FNAME, --file FNAME input WAV file path
``` ```

@ -36,18 +36,57 @@ std::string to_timestamp(int64_t t, bool comma = false) {
return std::string(buf); return std::string(buf);
} }
void replace_all(std::string & s, const std::string & search, const std::string & replace) {
for (size_t pos = 0; ; pos += replace.length()) {
pos = s.find(search, pos);
if (pos == std::string::npos) break;
s.erase(pos, search.length());
s.insert(pos, replace);
}
}
// a cost-function that is high for text that takes longer to pronounce
float voice_length(const std::string & text) {
float res = 0.0f;
for (size_t i = 0; i < text.size(); ++i) {
if (text[i] == ' ') {
res += 0.01f;
} else if (text[i] == ',') {
res += 2.00f;
} else if (text[i] == '.') {
res += 3.00f;
} else if (text[i] == '!') {
res += 3.00f;
} else if (text[i] == '?') {
res += 3.00f;
} else if (text[i] >= '0' && text[i] <= '9') {
res += 3.00f;
} else {
res += 1.00f;
}
}
return res;
}
// command-line parameters // command-line parameters
struct whisper_params { struct whisper_params {
int32_t seed = -1; // RNG seed, not used currently int32_t seed = -1; // RNG seed, not used currently
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
int32_t n_processors = 1;
int32_t offset_t_ms = 0; int32_t offset_t_ms = 0;
int32_t offset_n = 0; int32_t offset_n = 0;
int32_t max_context = -1;
float word_thold = 0.01f;
bool verbose = false; bool verbose = false;
bool translate = false; bool translate = false;
bool output_txt = false; bool output_txt = false;
bool output_vtt = false; bool output_vtt = false;
bool output_srt = false; bool output_srt = false;
bool output_wts = false;
bool print_special_tokens = false; bool print_special_tokens = false;
bool print_colors = false; bool print_colors = false;
bool no_timestamps = false; bool no_timestamps = false;
@ -73,10 +112,16 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
params.seed = std::stoi(argv[++i]); params.seed = std::stoi(argv[++i]);
} else if (arg == "-t" || arg == "--threads") { } else if (arg == "-t" || arg == "--threads") {
params.n_threads = std::stoi(argv[++i]); params.n_threads = std::stoi(argv[++i]);
} else if (arg == "-p" || arg == "--processors") {
params.n_processors = std::stoi(argv[++i]);
} else if (arg == "-ot" || arg == "--offset-t") { } else if (arg == "-ot" || arg == "--offset-t") {
params.offset_t_ms = std::stoi(argv[++i]); params.offset_t_ms = std::stoi(argv[++i]);
} else if (arg == "-on" || arg == "--offset-n") { } else if (arg == "-on" || arg == "--offset-n") {
params.offset_n = std::stoi(argv[++i]); params.offset_n = std::stoi(argv[++i]);
} else if (arg == "-mc" || arg == "--max-context") {
params.max_context = std::stoi(argv[++i]);
} else if (arg == "-wt" || arg == "--word-thold") {
params.word_thold = std::stof(argv[++i]);
} else if (arg == "-v" || arg == "--verbose") { } else if (arg == "-v" || arg == "--verbose") {
params.verbose = true; params.verbose = true;
} else if (arg == "--translate") { } else if (arg == "--translate") {
@ -94,6 +139,8 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
params.output_vtt = true; params.output_vtt = true;
} else if (arg == "-osrt" || arg == "--output-srt") { } else if (arg == "-osrt" || arg == "--output-srt") {
params.output_srt = true; params.output_srt = true;
} else if (arg == "-owts" || arg == "--output-words") {
params.output_wts = true;
} else if (arg == "-ps" || arg == "--print_special") { } else if (arg == "-ps" || arg == "--print_special") {
params.print_special_tokens = true; params.print_special_tokens = true;
} else if (arg == "-pc" || arg == "--print_colors") { } else if (arg == "-pc" || arg == "--print_colors") {
@ -125,13 +172,17 @@ void whisper_print_usage(int argc, char ** argv, const whisper_params & params)
fprintf(stderr, " -h, --help show this help message and exit\n"); fprintf(stderr, " -h, --help show this help message and exit\n");
fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1)\n"); fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1)\n");
fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
fprintf(stderr, " -p N, --processors N number of processors to use during computation (default: %d)\n", params.n_processors);
fprintf(stderr, " -ot N, --offset-t N time offset in milliseconds (default: %d)\n", params.offset_t_ms); fprintf(stderr, " -ot N, --offset-t N time offset in milliseconds (default: %d)\n", params.offset_t_ms);
fprintf(stderr, " -on N, --offset-n N segment index offset (default: %d)\n", params.offset_n); fprintf(stderr, " -on N, --offset-n N segment index offset (default: %d)\n", params.offset_n);
fprintf(stderr, " -mc N, --max-context N maximum number of text context tokens to store (default: max)\n");
fprintf(stderr, " -wt N, --word-thold N word timestamp probability threshold (default: %f)\n", params.word_thold);
fprintf(stderr, " -v, --verbose verbose output\n"); fprintf(stderr, " -v, --verbose verbose output\n");
fprintf(stderr, " --translate translate from source language to english\n"); fprintf(stderr, " --translate translate from source language to english\n");
fprintf(stderr, " -otxt, --output-txt output result in a text file\n"); fprintf(stderr, " -otxt, --output-txt output result in a text file\n");
fprintf(stderr, " -ovtt, --output-vtt output result in a vtt file\n"); fprintf(stderr, " -ovtt, --output-vtt output result in a vtt file\n");
fprintf(stderr, " -osrt, --output-srt output result in a srt file\n"); fprintf(stderr, " -osrt, --output-srt output result in a srt file\n");
fprintf(stderr, " -owts, --output-words output word-level timestamps to a text file\n");
fprintf(stderr, " -ps, --print_special print special tokens\n"); fprintf(stderr, " -ps, --print_special print special tokens\n");
fprintf(stderr, " -pc, --print_colors print colors\n"); fprintf(stderr, " -pc, --print_colors print colors\n");
fprintf(stderr, " -nt, --no_timestamps do not print timestamps\n"); fprintf(stderr, " -nt, --no_timestamps do not print timestamps\n");
@ -269,6 +320,385 @@ bool output_srt(struct whisper_context * ctx, const char * fname, const whisper_
return true; return true;
} }
// word-level timestamps (experimental)
// TODO: make ffmpeg output optional
// TODO: extra pass to detect unused speech and assign to tokens
// TODO: font parameter adjustments
// TODO: move to whisper.h/whisper.cpp and add parameter to select max line-length of subtitles
bool output_wts(struct whisper_context * ctx, const char * fname, const char * fname_inp, const whisper_params & params, const std::vector<float> & pcmf32) {
std::vector<float> pcm_avg(pcmf32.size(), 0);
// average the fabs of the signal
{
const int hw = 32;
for (int i = 0; i < pcmf32.size(); i++) {
float sum = 0;
for (int j = -hw; j <= hw; j++) {
if (i + j >= 0 && i + j < pcmf32.size()) {
sum += fabs(pcmf32[i + j]);
}
}
pcm_avg[i] = sum/(2*hw + 1);
}
}
struct token_info {
int64_t t0 = -1;
int64_t t1 = -1;
int64_t tt0 = -1;
int64_t tt1 = -1;
whisper_token id;
whisper_token tid;
float p = 0.0f;
float pt = 0.0f;
float ptsum = 0.0f;
std::string text;
float vlen = 0.0f; // voice length of this token
};
int64_t t_beg = 0;
int64_t t_last = 0;
whisper_token tid_last = 0;
std::ofstream fout(fname);
fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname);
fout << "!/bin/bash" << "\n";
fout << "\n";
fout << "ffmpeg -i " << fname_inp << " -f lavfi -i color=size=1200x120:duration=" << float(pcmf32.size() + 1000)/WHISPER_SAMPLE_RATE << ":rate=25:color=black -vf \"";
bool is_first = true;
for (int i = 0; i < whisper_full_n_segments(ctx); i++) {
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
const char *text = whisper_full_get_segment_text(ctx, i);
const int s0 = std::max(0, (int) (t0*WHISPER_SAMPLE_RATE/100));
const int s1 = std::min((int) pcmf32.size(), (int) (t1*WHISPER_SAMPLE_RATE/100));
const int n = whisper_full_n_tokens(ctx, i);
std::vector<token_info> tokens(n);
if (n <= 1) {
continue;
}
for (int j = 0; j < n; ++j) {
struct whisper_token_data token = whisper_full_get_token_data(ctx, i, j);
if (j == 0) {
if (token.id == whisper_token_beg(ctx)) {
tokens[j ].t0 = t0;
tokens[j ].t1 = t0;
tokens[j + 1].t0 = t0;
t_beg = t0;
t_last = t0;
tid_last = whisper_token_beg(ctx);
} else {
tokens[j ].t0 = t_last;
}
}
const int64_t tt = t_beg + 2*(token.tid - whisper_token_beg(ctx));
tokens[j].id = token.id;
tokens[j].tid = token.tid;
tokens[j].p = token.p;
tokens[j].pt = token.pt;
tokens[j].ptsum = token.ptsum;
tokens[j].text = whisper_token_to_str(ctx, token.id);
tokens[j].vlen = voice_length(tokens[j].text);
if (token.pt > params.word_thold && token.ptsum > 0.01 && token.tid > tid_last && tt <= t1) {
if (j > 0) {
tokens[j - 1].t1 = tt;
}
tokens[j].t0 = tt;
tid_last = token.tid;
}
}
tokens[n - 2].t1 = t1;
tokens[n - 1].t0 = t1;
tokens[n - 1].t1 = t1;
t_last = t1;
// find intervals of tokens with unknown timestamps
// fill the timestamps by proportionally splitting the interval based on the token voice lengths
{
int p0 = 0;
int p1 = 0;
while (true) {
while (p1 < n && tokens[p1].t1 < 0) {
p1++;
}
if (p1 >= n) {
p1--;
}
if (p1 > p0) {
double psum = 0.0;
for (int j = p0; j <= p1; j++) {
psum += tokens[j].vlen;
}
//printf("analyzing %d - %d, psum = %f\n", p0, p1, psum);
const double dt = tokens[p1].t1 - tokens[p0].t0;
// split the time proportionally to the voice length
for (int j = p0 + 1; j <= p1; j++) {
const double ct = tokens[j - 1].t0 + dt*tokens[j - 1].vlen/psum;
tokens[j - 1].t1 = ct;
tokens[j ].t0 = ct;
}
}
p1++;
p0 = p1;
if (p1 >= n) {
break;
}
}
}
// fix up (just in case)
for (int j = 0; j < n - 1; j++) {
if (tokens[j].t1 < 0) {
tokens[j + 1].t0 = tokens[j].t1;
}
if (j > 0) {
if (tokens[j - 1].t1 > tokens[j].t0) {
tokens[j].t0 = tokens[j - 1].t1;
tokens[j].t1 = std::max(tokens[j].t0, tokens[j].t1);
}
}
tokens[j].tt0 = tokens[j].t0;
tokens[j].tt1 = tokens[j].t1;
}
// VAD
// expand or contract tokens based on voice activity
{
const int hw = WHISPER_SAMPLE_RATE/8;
for (int j = 0; j < n; j++) {
if (tokens[j].id >= whisper_token_eot(ctx)) {
continue;
}
const int64_t t0 = tokens[j].t0;
const int64_t t1 = tokens[j].t1;
int s0 = std::max(0, (int) (t0*WHISPER_SAMPLE_RATE/100));
int s1 = std::min((int) pcmf32.size() - 1, (int) (t1*WHISPER_SAMPLE_RATE/100));
const int ss0 = std::max(0, (int) (t0*WHISPER_SAMPLE_RATE/100) - hw);
const int ss1 = std::min((int) pcmf32.size() - 1, (int) (t1*WHISPER_SAMPLE_RATE/100) + hw);
const int n = ss1 - ss0;
float sum = 0.0f;
for (int k = ss0; k < ss1; k++) {
sum += pcm_avg[k];
}
const float thold = 0.5*sum/n;
{
int k = s0;
if (pcm_avg[k] > thold && j > 0) {
while (k > 0 && pcm_avg[k] > thold) {
k--;
}
tokens[j].t0 = (int64_t) (100*k/WHISPER_SAMPLE_RATE);
if (tokens[j].t0 < tokens[j - 1].t1) {
tokens[j].t0 = tokens[j - 1].t1;
} else {
s0 = k;
}
} else {
while (pcm_avg[k] < thold && k < s1) {
k++;
}
s0 = k;
tokens[j].t0 = 100*k/WHISPER_SAMPLE_RATE;
}
}
{
int k = s1;
if (pcm_avg[k] > thold) {
while (k < (int) pcmf32.size() - 1 && pcm_avg[k] > thold) {
k++;
}
tokens[j].t1 = 100*k/WHISPER_SAMPLE_RATE;
if (j < n - 1 && tokens[j].t1 > tokens[j + 1].t0) {
tokens[j].t1 = tokens[j + 1].t0;
} else {
s1 = k;
}
} else {
while (pcm_avg[k] < thold && k > s0) {
k--;
}
s1 = k;
tokens[j].t1 = 100*k/WHISPER_SAMPLE_RATE;
}
}
}
}
// fixed token expand (optional)
{
const int t_expand = 0;
for (int j = 0; j < n; j++) {
if (j > 0) {
tokens[j].t0 = std::max(0, (int) (tokens[j].t0 - t_expand));
}
if (j < n - 1) {
tokens[j].t1 = tokens[j].t1 + t_expand;
}
}
}
// debug info
// TODO: toggle via parameter
for (int j = 0; j < n; ++j) {
const auto & token = tokens[j];
const auto tt = token.pt > params.word_thold && token.ptsum > 0.01 ? whisper_token_to_str(ctx, token.tid) : "[?]";
printf("%s: %10s %6.3f %6.3f %6.3f %6.3f %5d %5d '%s'\n", __func__,
tt, token.p, token.pt, token.ptsum, token.vlen, (int) token.t0, (int) token.t1, token.text.c_str());
if (tokens[j].id >= whisper_token_eot(ctx)) {
continue;
}
//printf("[%s --> %s] %s\n", to_timestamp(token.t0).c_str(), to_timestamp(token.t1).c_str(), whisper_token_to_str(ctx, token.id));
//fout << "# " << to_timestamp(token.t0) << " --> " << to_timestamp(token.t1) << " " << whisper_token_to_str(ctx, token.id) << "\n";
}
// TODO: become parameters
static const int line_wrap = 60;
static const char * font = "/System/Library/Fonts/Supplemental/Courier New Bold.ttf";
if (!is_first) {
fout << ",";
}
// background text
fout << "drawtext=fontfile='" << font << "':fontsize=24:fontcolor=gray:x=(w-text_w)/2:y=h/2:text='':enable='between(t," << t0/100.0 << "," << t0/100.0 << ")'";
is_first = false;
for (int j = 0; j < n; ++j) {
const auto & token = tokens[j];
if (tokens[j].id >= whisper_token_eot(ctx)) {
continue;
}
std::string txt_bg;
std::string txt_fg; // highlight token
std::string txt_ul; // underline
txt_bg = "> ";
txt_fg = "> ";
txt_ul = "\\ \\ ";
{
int ncnt = 0;
for (int k = 0; k < n; ++k) {
const auto & token2 = tokens[k];
if (tokens[k].id >= whisper_token_eot(ctx)) {
continue;
}
const std::string txt = whisper_token_to_str(ctx, token2.id);
txt_bg += txt;
if (k == j) {
for (int l = 0; l < (int) txt.size(); ++l) {
txt_fg += txt[l];
txt_ul += "_";
}
txt_fg += "|";
} else {
for (int l = 0; l < (int) txt.size(); ++l) {
txt_fg += "\\ ";
txt_ul += "\\ ";
}
}
ncnt += txt.size();
if (ncnt > line_wrap) {
if (k < j) {
txt_bg = "> ";
txt_fg = "> ";
txt_ul = "\\ \\ ";
ncnt = 0;
} else {
break;
}
}
}
::replace_all(txt_bg, "'", "");
::replace_all(txt_bg, "\"", "\\\"");
::replace_all(txt_fg, "'", "");
::replace_all(txt_fg, "\"", "\\\"");
}
// background text
fout << ",drawtext=fontfile='" << font << "':fontsize=24:fontcolor=gray:x=(w-text_w)/2:y=h/2:text='" << txt_bg << "':enable='between(t," << token.tt0/100.0 << "," << token.tt1/100.0 << ")'";
// foreground text
fout << ",drawtext=fontfile='" << font << "':fontsize=24:fontcolor=lightgreen:x=(w-text_w)/2+8:y=h/2:text='" << txt_fg << "':enable='between(t," << token.t0/100.0 << "," << token.t1/100.0 << ")'";
// underline
fout << ",drawtext=fontfile='" << font << "':fontsize=24:fontcolor=lightgreen:x=(w-text_w)/2+8:y=h/2+16:text='" << txt_ul << "':enable='between(t," << token.t0/100.0 << "," << token.t1/100.0 << ")'";
}
}
fout << "\" -c:v libx264 -pix_fmt yuv420p -y " << fname_inp << ".mp4" << "\n";
fout << "\n\n";
fout << "echo \"Your video has been saved to " << fname_inp << ".mp4\"" << "\n";
fout << "\n";
fout << "echo \" ffplay " << fname_inp << ".mp4\"\n";
fout << "\n";
fout.close();
fprintf(stderr, "%s: run 'source %s' to generate karaoke video\n", __func__, fname);
return true;
}
int main(int argc, char ** argv) { int main(int argc, char ** argv) {
whisper_params params; whisper_params params;
@ -346,7 +776,8 @@ int main(int argc, char ** argv) {
// print system information // print system information
{ {
fprintf(stderr, "\n"); fprintf(stderr, "\n");
fprintf(stderr, "system_info: n_threads = %d / %d | %s\n", params.n_threads, std::thread::hardware_concurrency(), whisper_print_system_info()); fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
params.n_threads*params.n_processors, std::thread::hardware_concurrency(), whisper_print_system_info());
} }
// print some info about the processing // print some info about the processing
@ -359,8 +790,9 @@ int main(int argc, char ** argv) {
fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__); fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__);
} }
} }
fprintf(stderr, "%s: processing '%s' (%d samples, %.1f sec), %d threads, lang = %s, task = %s, timestamps = %d ...\n", fprintf(stderr, "%s: processing '%s' (%d samples, %.1f sec), %d threads, %d processors, lang = %s, task = %s, timestamps = %d ...\n",
__func__, fname_inp.c_str(), int(pcmf32.size()), float(pcmf32.size())/WHISPER_SAMPLE_RATE, params.n_threads, __func__, fname_inp.c_str(), int(pcmf32.size()), float(pcmf32.size())/WHISPER_SAMPLE_RATE,
params.n_threads, params.n_processors,
params.language.c_str(), params.language.c_str(),
params.translate ? "translate" : "transcribe", params.translate ? "translate" : "transcribe",
params.no_timestamps ? 0 : 1); params.no_timestamps ? 0 : 1);
@ -380,6 +812,7 @@ int main(int argc, char ** argv) {
wparams.translate = params.translate; wparams.translate = params.translate;
wparams.language = params.language.c_str(); wparams.language = params.language.c_str();
wparams.n_threads = params.n_threads; wparams.n_threads = params.n_threads;
wparams.n_max_text_ctx = params.max_context >= 0 ? params.max_context : wparams.n_max_text_ctx;
wparams.offset_ms = params.offset_t_ms; wparams.offset_ms = params.offset_t_ms;
// this callback is called on each new segment // this callback is called on each new segment
@ -388,11 +821,14 @@ int main(int argc, char ** argv) {
wparams.new_segment_callback_user_data = &params; wparams.new_segment_callback_user_data = &params;
} }
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) { if (whisper_full_parallel(ctx, wparams, pcmf32.data(), pcmf32.size(), params.n_processors) != 0) {
fprintf(stderr, "%s: failed to process audio\n", argv[0]); fprintf(stderr, "%s: failed to process audio\n", argv[0]);
return 8; return 8;
} }
}
// output stuff
{
printf("\n"); printf("\n");
// output to text file // output to text file
@ -412,6 +848,12 @@ int main(int argc, char ** argv) {
const auto fname_srt = fname_inp + ".srt"; const auto fname_srt = fname_inp + ".srt";
output_srt(ctx, fname_srt.c_str(), params); output_srt(ctx, fname_srt.c_str(), params);
} }
// output to WTS file
if (params.output_wts) {
const auto fname_wts = fname_inp + ".wts";
output_wts(ctx, fname_wts.c_str(), fname_inp.c_str(), params, pcmf32);
}
} }
} }

@ -39,6 +39,7 @@ struct whisper_params {
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
int32_t step_ms = 3000; int32_t step_ms = 3000;
int32_t length_ms = 10000; int32_t length_ms = 10000;
int32_t capture_id = -1;
bool verbose = false; bool verbose = false;
bool translate = false; bool translate = false;
@ -65,6 +66,8 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
params.step_ms = std::stoi(argv[++i]); params.step_ms = std::stoi(argv[++i]);
} else if (arg == "--length") { } else if (arg == "--length") {
params.length_ms = std::stoi(argv[++i]); params.length_ms = std::stoi(argv[++i]);
} else if (arg == "-c" || arg == "--capture") {
params.capture_id = std::stoi(argv[++i]);
} else if (arg == "-v" || arg == "--verbose") { } else if (arg == "-v" || arg == "--verbose") {
params.verbose = true; params.verbose = true;
} else if (arg == "--translate") { } else if (arg == "--translate") {
@ -109,6 +112,7 @@ void whisper_print_usage(int argc, char ** argv, const whisper_params & params)
fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
fprintf(stderr, " --step N audio step size in milliseconds (default: %d)\n", params.step_ms); fprintf(stderr, " --step N audio step size in milliseconds (default: %d)\n", params.step_ms);
fprintf(stderr, " --length N audio length in milliseconds (default: %d)\n", params.length_ms); fprintf(stderr, " --length N audio length in milliseconds (default: %d)\n", params.length_ms);
fprintf(stderr, " -c ID, --capture ID capture device ID (default: -1)\n");
fprintf(stderr, " -v, --verbose verbose output\n"); fprintf(stderr, " -v, --verbose verbose output\n");
fprintf(stderr, " --translate translate from source language to english\n"); fprintf(stderr, " --translate translate from source language to english\n");
fprintf(stderr, " -kc, --keep-context keep text context from earlier audio (default: false)\n"); fprintf(stderr, " -kc, --keep-context keep text context from earlier audio (default: false)\n");
@ -201,7 +205,7 @@ int main(int argc, char ** argv) {
// init audio // init audio
if (!audio_sdl_init(-1)) { if (!audio_sdl_init(params.capture_id)) {
fprintf(stderr, "%s: audio_sdl_init() failed!\n", __func__); fprintf(stderr, "%s: audio_sdl_init() failed!\n", __func__);
return 1; return 1;
} }

@ -79,6 +79,12 @@ There are a lot of ways to improve this idea and I don't have much experience wi
The plugin would then make an appropriate query using the selected text and code context to Copilot or GPT-3 and return the result. The plugin would then make an appropriate query using the selected text and code context to Copilot or GPT-3 and return the result.
Here is a proof-of-concept:
https://user-images.githubusercontent.com/1991296/199078847-0278fcde-5667-4748-ba0d-7d55381d6047.mp4
For explanation how this works see: https://twitter.com/ggerganov/status/1587168771789258756
## Discussion ## Discussion
If you find this idea interesting, you can join the discussion here: https://github.com/ggerganov/whisper.cpp/discussions/108 If you find this idea interesting, you can join the discussion here: https://github.com/ggerganov/whisper.cpp/discussions/108

@ -469,7 +469,6 @@
printTextarea('js: processing - this might take a while ...'); printTextarea('js: processing - this might take a while ...');
printTextarea('js: the page will be unresponsive until the processing is completed'); printTextarea('js: the page will be unresponsive until the processing is completed');
printTextarea(''); printTextarea('');
printTextarea('');
setTimeout(function() { setTimeout(function() {
var ret = Module.full_default(instance, audio, document.getElementById('language').value, translate); var ret = Module.full_default(instance, audio, document.getElementById('language').value, translate);

@ -44,6 +44,11 @@ static int pthread_create(pthread_t* out, void* unused, thread_ret_t(*func)(void
static int pthread_join(pthread_t thread, void* unused) { static int pthread_join(pthread_t thread, void* unused) {
return (int) WaitForSingleObject(thread, INFINITE); return (int) WaitForSingleObject(thread, INFINITE);
} }
static int sched_yield (void) {
Sleep (0);
return 0;
}
#else #else
#include <pthread.h> #include <pthread.h>
#include <stdatomic.h> #include <stdatomic.h>
@ -1136,6 +1141,7 @@ struct ggml_state {
// global state // global state
struct ggml_state g_state; struct ggml_state g_state;
atomic_int g_state_barrier = 0;
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -1265,6 +1271,17 @@ int ggml_up64(int n) {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
struct ggml_context * ggml_init(struct ggml_init_params params) { struct ggml_context * ggml_init(struct ggml_init_params params) {
// make this function thread safe
{
int processing = atomic_fetch_add(&g_state_barrier, 1);
while (processing > 0) {
// wait for other threads to finish
atomic_fetch_sub(&g_state_barrier, 1);
sched_yield();
processing = atomic_fetch_add(&g_state_barrier, 1);
}
}
static bool is_first_call = true; static bool is_first_call = true;
if (is_first_call) { if (is_first_call) {
const uint64_t t_start = ggml_time_us(); UNUSED(t_start); const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
@ -1308,6 +1325,9 @@ struct ggml_context * ggml_init(struct ggml_init_params params) {
if (ctx == NULL) { if (ctx == NULL) {
GGML_PRINT_DEBUG("%s: no unused context found\n", __func__); GGML_PRINT_DEBUG("%s: no unused context found\n", __func__);
atomic_fetch_sub(&g_state_barrier, 1);
return NULL; return NULL;
} }
@ -1322,10 +1342,25 @@ struct ggml_context * ggml_init(struct ggml_init_params params) {
ggml_assert_aligned(ctx->mem_buffer); ggml_assert_aligned(ctx->mem_buffer);
GGML_PRINT_DEBUG("%s: context initialized\n", __func__);
atomic_fetch_sub(&g_state_barrier, 1);
return ctx; return ctx;
} }
void ggml_free(struct ggml_context * ctx) { void ggml_free(struct ggml_context * ctx) {
// make this function thread safe
{
int processing = atomic_fetch_add(&g_state_barrier, 1);
while (processing > 0) {
// wait for other threads to finish
atomic_fetch_sub(&g_state_barrier, 1);
sched_yield();
processing = atomic_fetch_add(&g_state_barrier, 1);
}
}
for (int i = 0; i < GGML_MAX_CONTEXTS; i++) { for (int i = 0; i < GGML_MAX_CONTEXTS; i++) {
if (&g_state.contexts[i].context == ctx) { if (&g_state.contexts[i].context == ctx) {
g_state.contexts[i].used = false; g_state.contexts[i].used = false;
@ -1337,11 +1372,15 @@ void ggml_free(struct ggml_context * ctx) {
free(ctx->mem_buffer); free(ctx->mem_buffer);
} }
atomic_fetch_sub(&g_state_barrier, 1);
return; return;
} }
} }
GGML_PRINT_DEBUG("%s: context not found\n", __func__); GGML_PRINT_DEBUG("%s: context not found\n", __func__);
atomic_fetch_sub(&g_state_barrier, 1);
} }
size_t ggml_used_mem(const struct ggml_context * ctx) { size_t ggml_used_mem(const struct ggml_context * ctx) {

@ -11,7 +11,7 @@ extern "C" {
#define GGML_MAX_DIMS 4 #define GGML_MAX_DIMS 4
#define GGML_MAX_NODES 4096 #define GGML_MAX_NODES 4096
#define GGML_MAX_PARAMS 16 #define GGML_MAX_PARAMS 16
#define GGML_MAX_CONTEXTS 16 #define GGML_MAX_CONTEXTS 64
#define GGML_MAX_OPT 4 #define GGML_MAX_OPT 4
#ifdef __ARM_NEON #ifdef __ARM_NEON

@ -0,0 +1,63 @@
@echo off
pushd %~dp0
set models_path=%CD%
popd
set argc=0
for %%x in (%*) do set /A argc+=1
set models=tiny.en tiny base.en base small.en small medium.en medium large
if %argc% neq 1 (
echo.
echo Usage: download-ggml-model.cmd model
CALL :list_models
goto :eof
)
set model=%1
for %%b in (%models%) do (
if "%%b"=="%model%" (
CALL :download_model
goto :eof
)
)
echo Invalid model: %model%
CALL :list_models
goto :eof
:download_model
echo Downloading ggml model %model%...
cd %models_path%
if exist "ggml-%model%.bin" (
echo Model %model% already exists. Skipping download.
goto :eof
)
PowerShell -NoProfile -ExecutionPolicy Bypass -Command "Invoke-WebRequest -Uri https://ggml.ggerganov.com/ggml-model-whisper-%model%.bin -OutFile ggml-%model%.bin"
if %ERRORLEVEL% neq 0 (
echo Failed to download ggml model %model%
echo Please try again later or download the original Whisper model files and convert them yourself.
goto :eof
)
echo Done! Model %model% saved in %models_path%\models\ggml-%model%.bin
echo You can now use it like this:
echo main.exe -m %models_path%\models\ggml-%model%.bin -f %models_path%\samples\jfk.wav
goto :eof
:list_models
echo.
echo Available models:
(for %%a in (%models%) do (
echo %%a
))
echo.
exit /b

@ -1,3 +1,4 @@
#define WHISPER_BUILD
#include "whisper.h" #include "whisper.h"
#include "ggml.h" #include "ggml.h"
@ -210,14 +211,6 @@ struct whisper_vocab {
} }
}; };
struct whisper_token_data {
whisper_token id; // token id
whisper_token tid; // forced timestamp token id
float p; // probability of the token
float pt; // probability of the timestamp token
};
struct whisper_segment { struct whisper_segment {
int64_t t0; int64_t t0;
int64_t t1; int64_t t1;
@ -386,6 +379,7 @@ struct whisper_model {
// context // context
struct ggml_context * ctx; struct ggml_context * ctx;
struct ggml_context * ctx_mem;
// tensors // tensors
int n_loaded; int n_loaded;
@ -400,7 +394,8 @@ struct whisper_context {
int64_t t_decode_us = 0; int64_t t_decode_us = 0;
int64_t t_start_us = 0; int64_t t_start_us = 0;
std::vector<uint8_t> buf_model; std::vector<uint8_t> * buf_model; // the model buffer is read-only and can be shared between processors
std::vector<uint8_t> buf_memory;
std::vector<uint8_t> buf_compute; std::vector<uint8_t> buf_compute;
std::vector<uint8_t> buf_compute_layer; std::vector<uint8_t> buf_compute_layer;
@ -412,7 +407,6 @@ struct whisper_context {
std::vector<float> probs; std::vector<float> probs;
std::vector<float> logits; std::vector<float> logits;
std::vector<whisper_token_data> tokens_cur;
std::vector<whisper_segment> result_all; std::vector<whisper_segment> result_all;
std::vector<whisper_token> prompt_past; std::vector<whisper_token> prompt_past;
@ -502,13 +496,16 @@ bool whisper_model_load(const std::string & fname, whisper_context & wctx) {
fprintf(stderr, "%s: f16 = %d\n", __func__, hparams.f16); fprintf(stderr, "%s: f16 = %d\n", __func__, hparams.f16);
fprintf(stderr, "%s: type = %d\n", __func__, model.type); fprintf(stderr, "%s: type = %d\n", __func__, model.type);
wctx.buf_model.resize(MEM_REQ_MODEL.at(model.type)); wctx.buf_model = new std::vector<uint8_t>();
wctx.buf_model->resize(MEM_REQ_MODEL.at(model.type));
wctx.buf_memory.resize(std::max(MEM_REQ_MODEL.at(model.type), MEM_REQ_MODEL.at(model.type))); // TODO: TMP !!!
wctx.buf_compute.resize(std::max(MEM_REQ_ENCODE.at(model.type), MEM_REQ_DECODE.at(model.type))); wctx.buf_compute.resize(std::max(MEM_REQ_ENCODE.at(model.type), MEM_REQ_DECODE.at(model.type)));
wctx.buf_compute_layer.resize(std::max(MEM_REQ_ENCODE_LAYER.at(model.type), MEM_REQ_DECODE_LAYER.at(model.type))); wctx.buf_compute_layer.resize(std::max(MEM_REQ_ENCODE_LAYER.at(model.type), MEM_REQ_DECODE_LAYER.at(model.type)));
// this is the total memory required to run the inference // this is the total memory required to run the inference
const size_t mem_required = const size_t mem_required =
wctx.buf_model.size() + wctx.buf_model->size() +
wctx.buf_memory.size() +
wctx.buf_compute.size() + wctx.buf_compute.size() +
wctx.buf_compute_layer.size(); wctx.buf_compute_layer.size();
@ -591,6 +588,7 @@ bool whisper_model_load(const std::string & fname, whisper_context & wctx) {
size_t ctx_size = 0; size_t ctx_size = 0;
size_t ctx_mem_size = 0;
{ {
const auto & hparams = model.hparams; const auto & hparams = model.hparams;
@ -699,11 +697,11 @@ bool whisper_model_load(const std::string & fname, whisper_context & wctx) {
ctx_size += n_text_layer*( n_text_state*ggml_type_size(GGML_TYPE_F32)); // cross_attn_ln_1_b ctx_size += n_text_layer*( n_text_state*ggml_type_size(GGML_TYPE_F32)); // cross_attn_ln_1_b
} }
ctx_size += n_text_layer*n_text_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_k ctx_mem_size += n_text_layer*n_text_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_k
ctx_size += n_text_layer*n_text_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_v ctx_mem_size += n_text_layer*n_text_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_v
ctx_size += n_text_layer*n_audio_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_cross_k ctx_mem_size += n_text_layer*n_audio_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_cross_k
ctx_size += n_text_layer*n_audio_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_cross_v ctx_mem_size += n_text_layer*n_audio_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_cross_v
ctx_size += (15 + 15*n_audio_layer + 24*n_text_layer)*256; // object overhead ctx_size += (15 + 15*n_audio_layer + 24*n_text_layer)*256; // object overhead
@ -713,8 +711,8 @@ bool whisper_model_load(const std::string & fname, whisper_context & wctx) {
// create the ggml context // create the ggml context
{ {
struct ggml_init_params params = { struct ggml_init_params params = {
.mem_size = wctx.buf_model.size(), .mem_size = wctx.buf_model->size(),
.mem_buffer = wctx.buf_model.data(), .mem_buffer = wctx.buf_model->data(),
}; };
model.ctx = ggml_init(params); model.ctx = ggml_init(params);
@ -724,6 +722,20 @@ bool whisper_model_load(const std::string & fname, whisper_context & wctx) {
} }
} }
// create the ggml memory context
{
struct ggml_init_params params = {
.mem_size = wctx.buf_memory.size(),
.mem_buffer = wctx.buf_memory.data(),
};
model.ctx_mem = ggml_init(params);
if (!model.ctx_mem) {
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
return false;
}
}
// prepare memory for the weights // prepare memory for the weights
{ {
auto & ctx = model.ctx; auto & ctx = model.ctx;
@ -922,7 +934,7 @@ bool whisper_model_load(const std::string & fname, whisper_context & wctx) {
// key + value memory // key + value memory
{ {
auto & ctx = model.ctx; auto & ctx = model.ctx_mem;
const auto & hparams = model.hparams; const auto & hparams = model.hparams;
@ -1831,7 +1843,8 @@ whisper_token_data whisper_sample_best(
} }
} }
result.pt = max_ts/(sum_ts + 1e-6); result.pt = max_ts/(sum_ts + 1e-10);
result.ptsum = sum_ts;
} }
// find the top K tokens // find the top K tokens
@ -1898,14 +1911,19 @@ whisper_vocab::id whisper_sample_timestamp(
return probs_id[0].second; return probs_id[0].second;
} }
static std::string to_timestamp(int64_t t) { // 500 -> 00:05.000
int64_t sec = t/100; // 6000 -> 01:00.000
int64_t msec = t - sec*100; static std::string to_timestamp(int64_t t, bool comma = false) {
int64_t min = sec/60; int64_t msec = t * 10;
sec = sec - min*60; int64_t hr = msec / (1000 * 60 * 60);
msec = msec - hr * (1000 * 60 * 60);
int64_t min = msec / (1000 * 60);
msec = msec - min * (1000 * 60);
int64_t sec = msec / 1000;
msec = msec - sec * 1000;
char buf[32]; char buf[32];
snprintf(buf, sizeof(buf), "%02d:%02d.%03d", (int) min, (int) sec, (int) msec); snprintf(buf, sizeof(buf), "%02d:%02d:%02d%s%03d", (int) hr, (int) min, (int) sec, comma ? "," : ".", (int) msec);
return std::string(buf); return std::string(buf);
} }
@ -2127,6 +2145,9 @@ struct whisper_context * whisper_init(const char * path_model) {
void whisper_free(struct whisper_context * ctx) { void whisper_free(struct whisper_context * ctx) {
if (ctx) { if (ctx) {
if (ctx->buf_model) {
delete ctx->buf_model;
}
delete ctx; delete ctx;
} }
} }
@ -2189,7 +2210,7 @@ int whisper_decode(struct whisper_context * ctx, const whisper_token * tokens, i
return 0; return 0;
} }
whisper_token whisper_sample_best(struct whisper_context * ctx) { struct whisper_token_data whisper_sample_best(struct whisper_context * ctx) {
const int64_t t_start_sample_us = ggml_time_us(); const int64_t t_start_sample_us = ggml_time_us();
// TODO: simplify // TODO: simplify
@ -2197,7 +2218,7 @@ whisper_token whisper_sample_best(struct whisper_context * ctx) {
ctx->t_sample_us += ggml_time_us() - t_start_sample_us; ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
return res.id; return res;
} }
whisper_token whisper_sample_timestamp(struct whisper_context * ctx) { whisper_token whisper_sample_timestamp(struct whisper_context * ctx) {
@ -2300,6 +2321,7 @@ struct whisper_full_params whisper_full_default_params(enum whisper_sampling_str
/*.strategy =*/ WHISPER_SAMPLING_GREEDY, /*.strategy =*/ WHISPER_SAMPLING_GREEDY,
/*.n_threads =*/ std::min(4, (int32_t) std::thread::hardware_concurrency()), /*.n_threads =*/ std::min(4, (int32_t) std::thread::hardware_concurrency()),
/*.n_max_text_ctx =*/ 16384,
/*.offset_ms =*/ 0, /*.offset_ms =*/ 0,
/*.translate =*/ false, /*.translate =*/ false,
@ -2331,6 +2353,7 @@ struct whisper_full_params whisper_full_default_params(enum whisper_sampling_str
/*.strategy =*/ WHISPER_SAMPLING_BEAM_SEARCH, /*.strategy =*/ WHISPER_SAMPLING_BEAM_SEARCH,
/*.n_threads =*/ std::min(4, (int32_t) std::thread::hardware_concurrency()), /*.n_threads =*/ std::min(4, (int32_t) std::thread::hardware_concurrency()),
/*.n_max_text_ctx =*/ 16384,
/*.offset_ms =*/ 0, /*.offset_ms =*/ 0,
/*.translate =*/ false, /*.translate =*/ false,
@ -2368,7 +2391,6 @@ int whisper_full(
int n_samples) { int n_samples) {
// clear old results // clear old results
auto & result_all = ctx->result_all; auto & result_all = ctx->result_all;
auto & tokens_cur = ctx->tokens_cur;
result_all.clear(); result_all.clear();
@ -2378,10 +2400,12 @@ int whisper_full(
return -1; return -1;
} }
const int seek_start = params.offset_ms/10;
// if length of spectrogram is less than 1s (100 samples), then return // if length of spectrogram is less than 1s (100 samples), then return
// basically don't process anything that is less than 1s // basically don't process anything that is less than 1s
// see issue #39: https://github.com/ggerganov/whisper.cpp/issues/39 // see issue #39: https://github.com/ggerganov/whisper.cpp/issues/39
if (whisper_n_len(ctx) < 100) { if (whisper_n_len(ctx) < 100 + seek_start) {
return 0; return 0;
} }
@ -2405,8 +2429,14 @@ int whisper_full(
int progress_prev = 0; int progress_prev = 0;
int progress_step = 5; int progress_step = 5;
std::vector<whisper_token_data> tokens_cur;
tokens_cur.reserve(whisper_n_text_ctx(ctx));
std::vector<whisper_token> prompt;
prompt.reserve(whisper_n_text_ctx(ctx));
// main loop // main loop
int seek = params.offset_ms/10; int seek = seek_start;
while (true) { while (true) {
int progress_cur = (100*seek)/whisper_n_len(ctx); int progress_cur = (100*seek)/whisper_n_len(ctx);
while (progress_cur >= progress_prev + progress_step) { while (progress_cur >= progress_prev + progress_step) {
@ -2426,13 +2456,12 @@ int whisper_full(
return 7; return 7;
} }
std::vector<whisper_token> prompt;
int n_past = 0; int n_past = 0;
prompt.clear();
// if we have already generated some text, use it as a prompt to condition the next generation // if we have already generated some text, use it as a prompt to condition the next generation
if (prompt_past.size() > 0) { if (prompt_past.size() > 0) {
int n_take = std::min(whisper_n_text_ctx(ctx)/2, int(prompt_past.size())); int n_take = std::min(std::min(params.n_max_text_ctx, whisper_n_text_ctx(ctx)/2), int(prompt_past.size()));
prompt = { whisper_token_prev(ctx) }; prompt = { whisper_token_prev(ctx) };
prompt.insert(prompt.begin() + 1, prompt_past.end() - n_take, prompt_past.end()); prompt.insert(prompt.begin() + 1, prompt_past.end() - n_take, prompt_past.end());
@ -2474,7 +2503,7 @@ int whisper_full(
// feel free to experiment! // feel free to experiment!
// //
{ {
auto token = whisper_sample_best(ctx->vocab, ctx->probs.data() + (ctx->probs.size() - ctx->vocab.n_vocab)); auto token = whisper_sample_best(ctx);
if (i == 0) { if (i == 0) {
token.tid = whisper_token_beg(ctx); token.tid = whisper_token_beg(ctx);
@ -2490,7 +2519,10 @@ int whisper_full(
prompt.push_back(token.id); prompt.push_back(token.id);
tokens_cur.push_back(token); tokens_cur.push_back(token);
//printf("%s: %s\n", __func__, ctx->vocab.id_to_token[id].c_str()); //{
// const auto tt = token.pt > 0.10 ? ctx->vocab.id_to_token[token.tid] : "[?]";
// printf("%s: %10s %6.3f '%s'\n", __func__, tt.c_str(), token.pt, ctx->vocab.id_to_token[token.id].c_str());
//}
// end of text token // end of text token
if (token.id == whisper_token_eot(ctx)) { if (token.id == whisper_token_eot(ctx)) {
@ -2597,6 +2629,156 @@ int whisper_full(
return 0; return 0;
} }
int whisper_full_parallel(
struct whisper_context * ctx,
struct whisper_full_params params,
const float * samples,
int n_samples,
const int n_processors) {
if (n_processors == 1) {
return whisper_full(ctx, params, samples, n_samples);
}
int ret = 0;
// prepare separate contexts for each thread
std::vector<struct whisper_context> ctxs(n_processors - 1);
for (int i = 0; i < n_processors - 1; ++i) {
ctxs[i] = *ctx;
auto & model = ctxs[i].model;
// create the ggml memory context
{
struct ggml_init_params params = {
.mem_size = ctxs[i].buf_memory.size(),
.mem_buffer = ctxs[i].buf_memory.data(),
};
model.ctx_mem = ggml_init(params);
if (!model.ctx_mem) {
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
return false;
}
}
// separate key + value memory for each processor
{
auto & ctx = model.ctx_mem;
const auto & hparams = model.hparams;
const int n_text_state = hparams.n_text_state;
const int n_text_layer = hparams.n_text_layer;
const int n_text_ctx = hparams.n_text_ctx;
// key/value memory for the self-attention layer
{
const int n_mem = n_text_layer*n_text_ctx;
const int n_elements = n_text_state*n_mem;
model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
}
// key/value memory for the cross-attention layer
{
const int n_audio_ctx = hparams.n_audio_ctx;
const int n_mem = n_text_layer*n_audio_ctx;
const int n_elements = n_text_state*n_mem;
model.memory_cross_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
model.memory_cross_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
}
const size_t memory_size =
ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v) +
ggml_nbytes(model.memory_cross_k) + ggml_nbytes(model.memory_cross_v);
}
}
const int offset_samples = (WHISPER_SAMPLE_RATE*params.offset_ms)/1000;
const int n_samples_per_processor = (n_samples - offset_samples)/n_processors;
// the calling thread will process the first chunk
// while the other threads will process the remaining chunks
std::vector<std::thread> workers(n_processors - 1);
for (int i = 0; i < n_processors - 1; ++i) {
const int start_samples = offset_samples + (i + 1)*n_samples_per_processor;
const int n_samples_cur = (i == n_processors - 2) ? n_samples - start_samples : n_samples_per_processor;
auto params_cur = params;
params_cur.offset_ms = 0;
params_cur.print_progress = false;
params_cur.print_realtime = false;
params_cur.new_segment_callback = nullptr;
params_cur.new_segment_callback_user_data = nullptr;
workers[i] = std::thread(whisper_full, &ctxs[i], std::move(params_cur), samples + start_samples, n_samples_cur);
}
{
auto params_cur = params;
ret = whisper_full(ctx, std::move(params_cur), samples, offset_samples + n_samples_per_processor);
}
for (int i = 0; i < n_processors - 1; ++i) {
workers[i].join();
}
const int64_t offset_t = (int64_t) params.offset_ms/10.0;
// combine results into ctx->result_all
for (int i = 0; i < n_processors - 1; ++i) {
auto & results_i = ctxs[i].result_all;
for (int j = 0; j < (int) results_i.size(); ++j) {
// correct the segment timestamp taking into account the offset
results_i[j].t0 += 100*((i + 1)*n_samples_per_processor)/WHISPER_SAMPLE_RATE + offset_t;
results_i[j].t1 += 100*((i + 1)*n_samples_per_processor)/WHISPER_SAMPLE_RATE + offset_t;
// make sure that segments are not overlapping
if (ctx->result_all.size() > 0) {
results_i[j].t0 = std::max(results_i[j].t0, ctx->result_all.back().t1);
}
ctx->result_all.push_back(std::move(results_i[j]));
// call the new_segment_callback for each segment
if (params.new_segment_callback) {
params.new_segment_callback(ctx, params.new_segment_callback_user_data);
}
}
ctx->t_mel_us += ctxs[i].t_mel_us;
ctx->t_sample_us += ctxs[i].t_sample_us;
ctx->t_encode_us += ctxs[i].t_encode_us;
ctx->t_decode_us += ctxs[i].t_decode_us;
}
// average the timings
ctx->t_mel_us /= n_processors;
ctx->t_sample_us /= n_processors;
ctx->t_encode_us /= n_processors;
ctx->t_decode_us /= n_processors;
// print information about the audio boundaries
fprintf(stderr, "\n");
fprintf(stderr, "%s: the audio has been split into %d chunks at the following times:\n", __func__, n_processors);
for (int i = 0; i < n_processors - 1; ++i) {
fprintf(stderr, "%s: split %d - %s\n", __func__, (i + 1), to_timestamp(100*((i + 1)*n_samples_per_processor)/WHISPER_SAMPLE_RATE + offset_t).c_str());
}
fprintf(stderr, "%s: the transcription quality may be degraded near these boundaries\n", __func__);
return ret;
}
int whisper_full_n_segments(struct whisper_context * ctx) { int whisper_full_n_segments(struct whisper_context * ctx) {
return ctx->result_all.size(); return ctx->result_all.size();
} }
@ -2625,6 +2807,10 @@ whisper_token whisper_full_get_token_id(struct whisper_context * ctx, int i_segm
return ctx->result_all[i_segment].tokens[i_token].id; return ctx->result_all[i_segment].tokens[i_token].id;
} }
struct whisper_token_data whisper_full_get_token_data(struct whisper_context * ctx, int i_segment, int i_token) {
return ctx->result_all[i_segment].tokens[i_token];
}
float whisper_full_get_token_p(struct whisper_context * ctx, int i_segment, int i_token) { float whisper_full_get_token_p(struct whisper_context * ctx, int i_segment, int i_token) {
return ctx->result_all[i_segment].tokens[i_token].p; return ctx->result_all[i_segment].tokens[i_token].p;
} }

@ -68,6 +68,15 @@ extern "C" {
typedef int whisper_token; typedef int whisper_token;
struct whisper_token_data {
whisper_token id; // token id
whisper_token tid; // forced timestamp token id
float p; // probability of the token
float pt; // probability of the timestamp token
float ptsum; // sum of probabilities of all timestamp tokens
};
// Allocates all memory needed for the model and loads the model from the given file. // Allocates all memory needed for the model and loads the model from the given file.
// Returns NULL on failure. // Returns NULL on failure.
WHISPER_API struct whisper_context * whisper_init(const char * path_model); WHISPER_API struct whisper_context * whisper_init(const char * path_model);
@ -120,7 +129,7 @@ extern "C" {
// You can also implement your own sampling method using the whisper_get_probs() function. // You can also implement your own sampling method using the whisper_get_probs() function.
// whisper_sample_best() returns the token with the highest probability // whisper_sample_best() returns the token with the highest probability
// whisper_sample_timestamp() returns the most probable timestamp token // whisper_sample_timestamp() returns the most probable timestamp token
WHISPER_API whisper_token whisper_sample_best(struct whisper_context * ctx); WHISPER_API struct whisper_token_data whisper_sample_best(struct whisper_context * ctx);
WHISPER_API whisper_token whisper_sample_timestamp(struct whisper_context * ctx); WHISPER_API whisper_token whisper_sample_timestamp(struct whisper_context * ctx);
// Return the id of the specified language, returns -1 if not found // Return the id of the specified language, returns -1 if not found
@ -169,6 +178,7 @@ extern "C" {
enum whisper_sampling_strategy strategy; enum whisper_sampling_strategy strategy;
int n_threads; int n_threads;
int n_max_text_ctx;
int offset_ms; int offset_ms;
bool translate; bool translate;
@ -204,6 +214,16 @@ extern "C" {
const float * samples, const float * samples,
int n_samples); int n_samples);
// Split the input audio in chunks and process each chunk separately using whisper_full()
// It seems this approach can offer some speedup in some cases.
// However, the transcription accuracy can be worse at the beginning and end of each chunk.
WHISPER_API int whisper_full_parallel(
struct whisper_context * ctx,
struct whisper_full_params params,
const float * samples,
int n_samples,
const int n_processors);
// Number of generated text segments. // Number of generated text segments.
// A segment can be a few words, a sentence, or even a paragraph. // A segment can be a few words, a sentence, or even a paragraph.
WHISPER_API int whisper_full_n_segments(struct whisper_context * ctx); WHISPER_API int whisper_full_n_segments(struct whisper_context * ctx);
@ -222,6 +242,10 @@ extern "C" {
WHISPER_API const char * whisper_full_get_token_text(struct whisper_context * ctx, int i_segment, int i_token); WHISPER_API const char * whisper_full_get_token_text(struct whisper_context * ctx, int i_segment, int i_token);
WHISPER_API whisper_token whisper_full_get_token_id (struct whisper_context * ctx, int i_segment, int i_token); WHISPER_API whisper_token whisper_full_get_token_id (struct whisper_context * ctx, int i_segment, int i_token);
// Get token data for the specified token in the specified segment.
// This contains probabilities, timestamps, etc.
WHISPER_API struct whisper_token_data whisper_full_get_token_data(struct whisper_context * ctx, int i_segment, int i_token);
// Get the probability of the specified token in the specified segment. // Get the probability of the specified token in the specified segment.
WHISPER_API float whisper_full_get_token_p(struct whisper_context * ctx, int i_segment, int i_token); WHISPER_API float whisper_full_get_token_p(struct whisper_context * ctx, int i_segment, int i_token);

Loading…
Cancel
Save