diff --git a/examples/command.wasm/README.md b/examples/command.wasm/README.md index a6e0cf1..d317f3a 100644 --- a/examples/command.wasm/README.md +++ b/examples/command.wasm/README.md @@ -5,7 +5,7 @@ It runs in fully in the browser via WebAseembly. Online demo: https://whisper.ggerganov.com/command/ -Terminal version: https://github.com/ggerganov/whisper.cpp/examples/command +Terminal version: [examples/command](/examples/command) ## Build instructions diff --git a/examples/command/README.md b/examples/command/README.md index de8b61c..3a521fb 100644 --- a/examples/command/README.md +++ b/examples/command/README.md @@ -13,7 +13,7 @@ More info is available in [issue #171](https://github.com/ggerganov/whisper.cpp/ https://user-images.githubusercontent.com/1991296/204038393-2f846eae-c255-4099-a76d-5735c25c49da.mp4 -Web version: https://github.com/ggerganov/whisper.cpp/examples/command.wasm +Web version: [examples/command.wasm](/examples/command.wasm) ## Building diff --git a/examples/whisper.objc/README.md b/examples/whisper.objc/README.md index 05c6613..5317282 100644 --- a/examples/whisper.objc/README.md +++ b/examples/whisper.objc/README.md @@ -5,6 +5,10 @@ The inference runs locally, on-device. https://user-images.githubusercontent.com/1991296/197385372-962a6dea-bca1-4d50-bf96-1d8c27b98c81.mp4 +Real-time transcription demo: + +https://user-images.githubusercontent.com/1991296/204126266-ce4177c6-6eca-4bd9-bca8-0e46d9da2364.mp4 + ## Usage ```java diff --git a/examples/whisper.objc/whisper.objc/ViewController.m b/examples/whisper.objc/whisper.objc/ViewController.m index d294178..d6aef36 100644 --- a/examples/whisper.objc/whisper.objc/ViewController.m +++ b/examples/whisper.objc/whisper.objc/ViewController.m @@ -198,6 +198,7 @@ void AudioInputCallback(void * inUserData, params.language = "en"; params.n_threads = max_threads; params.offset_ms = 0; + params.no_context = true; params.single_segment = self->stateInp.isRealtime; CFTimeInterval startTime = CACurrentMediaTime(); @@ -228,8 +229,11 @@ void AudioInputCallback(void * inUserData, result = [result stringByAppendingString:[NSString stringWithUTF8String:text_cur]]; } + const float tRecording = (float)self->stateInp.n_samples / (float)self->stateInp.dataFormat.mSampleRate; + // append processing time - result = [result stringByAppendingString:[NSString stringWithFormat:@"\n\n[processing time: %5.3f s]", endTime - startTime]]; + result = [result stringByAppendingString:[NSString stringWithFormat:@"\n\n[recording time: %5.3f s]", tRecording]]; + result = [result stringByAppendingString:[NSString stringWithFormat:@" \n[processing time: %5.3f s]", endTime - startTime]]; // dispatch the result to the main thread dispatch_async(dispatch_get_main_queue(), ^{