Add .clang-format

pull/359/head
Andy Maloney 2 years ago
parent 0be6a1afd9
commit 71ad534079

@ -0,0 +1,92 @@
# Options are listed here:
# https://clang.llvm.org/docs/ClangFormatStyleOptions.html
---
AccessModifierOffset: -3
AlignAfterOpenBracket: BlockIndent
AlignArrayOfStructures: Left
AlwaysBreakTemplateDeclarations: Yes
AlignConsecutiveAssignments: true
AlignConsecutiveDeclarations: false
AlignConsecutiveMacros: true
AlignEscapedNewlines: Right
AlignOperands: true
AlignTrailingComments: true
AllowAllArgumentsOnNextLine: false
AllowAllConstructorInitializersOnNextLine: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: None
AllowShortLambdasOnASingleLine: All
AllowShortIfStatementsOnASingleLine: Never
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: MultiLine
BinPackArguments: false
BinPackParameters: false
BitFieldColonSpacing: Both
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Attach
BreakBeforeConceptDeclarations: Always
BreakBeforeInheritanceComma: false
BreakInheritanceList: BeforeColon
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: true
BreakConstructorInitializers: AfterColon
BreakStringLiterals: true
ColumnLimit: 120
CompactNamespaces: false
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: false
DeriveLineEnding: false
DerivePointerAlignment: false
EmptyLineBeforeAccessModifier: LogicalBlock
EmptyLineAfterAccessModifier: Never
FixNamespaceComments: false
IncludeBlocks: Preserve
IncludeIsMainRegex: '(Test)?$'
IndentCaseLabels: true
IndentPPDirectives: AfterHash
IndentWidth: 4
IndentWrappedFunctionNames: true
InsertBraces: true
KeepEmptyLinesAtTheStartOfBlocks: true
Language: Cpp
MaxEmptyLinesToKeep: 1
NamespaceIndentation: All
PackConstructorInitializers: Never
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 19
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 1000
PointerAlignment: Middle
ReflowComments: true
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterCStyleCast: true
SpaceAfterLogicalNot: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCaseColon: false
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: c++11
TabWidth: 4
UseCRLF: false
UseTab: Never

217
ggml.h

@ -173,9 +173,9 @@
extern "C" { extern "C" {
#endif #endif
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h> #include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#define GGML_MAX_DIMS 4 #define GGML_MAX_DIMS 4
#define GGML_MAX_NODES 4096 #define GGML_MAX_NODES 4096
@ -313,13 +313,13 @@ int64_t ggml_time_us(void);
int64_t ggml_cycles(void); int64_t ggml_cycles(void);
int64_t ggml_cycles_per_ms(void); int64_t ggml_cycles_per_ms(void);
void ggml_print_object (const struct ggml_object * obj); void ggml_print_object(const struct ggml_object * obj);
void ggml_print_objects(const struct ggml_context * ctx); void ggml_print_objects(const struct ggml_context * ctx);
int ggml_nelements(const struct ggml_tensor * tensor); int ggml_nelements(const struct ggml_tensor * tensor);
size_t ggml_nbytes (const struct ggml_tensor * tensor); size_t ggml_nbytes(const struct ggml_tensor * tensor);
size_t ggml_type_size (enum ggml_type type); size_t ggml_type_size(enum ggml_type type);
size_t ggml_element_size(const struct ggml_tensor * tensor); size_t ggml_element_size(const struct ggml_tensor * tensor);
struct ggml_context * ggml_init(struct ggml_init_params params); struct ggml_context * ggml_init(struct ggml_init_params params);
@ -327,29 +327,13 @@ void ggml_free(struct ggml_context * ctx);
size_t ggml_used_mem(const struct ggml_context * ctx); size_t ggml_used_mem(const struct ggml_context * ctx);
struct ggml_tensor * ggml_new_tensor( struct ggml_tensor * ggml_new_tensor(struct ggml_context * ctx, enum ggml_type type, int n_dims, const int * ne);
struct ggml_context * ctx,
enum ggml_type type,
int n_dims,
const int *ne);
struct ggml_tensor * ggml_new_tensor_1d( struct ggml_tensor * ggml_new_tensor_1d(struct ggml_context * ctx, enum ggml_type type, int ne0);
struct ggml_context * ctx,
enum ggml_type type,
int ne0);
struct ggml_tensor * ggml_new_tensor_2d( struct ggml_tensor * ggml_new_tensor_2d(struct ggml_context * ctx, enum ggml_type type, int ne0, int ne1);
struct ggml_context * ctx,
enum ggml_type type,
int ne0,
int ne1);
struct ggml_tensor * ggml_new_tensor_3d( struct ggml_tensor * ggml_new_tensor_3d(struct ggml_context * ctx, enum ggml_type type, int ne0, int ne1, int ne2);
struct ggml_context * ctx,
enum ggml_type type,
int ne0,
int ne1,
int ne2);
struct ggml_tensor * ggml_new_tensor_4d( struct ggml_tensor * ggml_new_tensor_4d(
struct ggml_context * ctx, struct ggml_context * ctx,
@ -357,17 +341,18 @@ struct ggml_tensor * ggml_new_tensor_4d(
int ne0, int ne0,
int ne1, int ne1,
int ne2, int ne2,
int ne3); int ne3
);
struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value); struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value);
struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value); struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value);
struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src); struct ggml_tensor * ggml_dup_tensor(struct ggml_context * ctx, const struct ggml_tensor * src);
struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, const struct ggml_tensor * src); struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, const struct ggml_tensor * src);
struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor); struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor);
struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value); struct ggml_tensor * ggml_set_i32(struct ggml_tensor * tensor, int32_t value);
struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value); struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value);
int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i); int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i);
void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value); void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value);
@ -375,148 +360,84 @@ void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value)
float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i); float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i);
void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value); void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value);
void * ggml_get_data (const struct ggml_tensor * tensor); void * ggml_get_data(const struct ggml_tensor * tensor);
float * ggml_get_data_f32(const struct ggml_tensor * tensor); float * ggml_get_data_f32(const struct ggml_tensor * tensor);
// //
// operations on tensors with backpropagation // operations on tensors with backpropagation
// //
struct ggml_tensor * ggml_dup( struct ggml_tensor * ggml_dup(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_add( struct ggml_tensor * ggml_add(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_sub( struct ggml_tensor * ggml_sub(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_mul( struct ggml_tensor * ggml_mul(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_div( struct ggml_tensor * ggml_div(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_sqr( struct ggml_tensor * ggml_sqr(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_sqrt( struct ggml_tensor * ggml_sqrt(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// return scalar // return scalar
// TODO: compute sum along rows // TODO: compute sum along rows
struct ggml_tensor * ggml_sum( struct ggml_tensor * ggml_sum(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// mean along rows // mean along rows
struct ggml_tensor * ggml_mean( struct ggml_tensor * ggml_mean(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// if a is the same shape as b, and a is not parameter, return a // if a is the same shape as b, and a is not parameter, return a
// otherwise, return a new tensor: repeat(a) to fit in b // otherwise, return a new tensor: repeat(a) to fit in b
struct ggml_tensor * ggml_repeat( struct ggml_tensor * ggml_repeat(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_abs( struct ggml_tensor * ggml_abs(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_sgn( struct ggml_tensor * ggml_sgn(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_neg( struct ggml_tensor * ggml_neg(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_step( struct ggml_tensor * ggml_step(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_relu( struct ggml_tensor * ggml_relu(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// TODO: double-check this computation is correct // TODO: double-check this computation is correct
struct ggml_tensor * ggml_gelu( struct ggml_tensor * ggml_gelu(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// normalize along rows // normalize along rows
// TODO: eps is hardcoded to 1e-5 for now // TODO: eps is hardcoded to 1e-5 for now
struct ggml_tensor * ggml_norm( struct ggml_tensor * ggml_norm(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// A: m rows, n columns // A: m rows, n columns
// B: p rows, n columns (i.e. we transpose it internally) // B: p rows, n columns (i.e. we transpose it internally)
// result is m columns, p rows // result is m columns, p rows
struct ggml_tensor * ggml_mul_mat( struct ggml_tensor * ggml_mul_mat(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
// //
// operations on tensors without backpropagation // operations on tensors without backpropagation
// //
// in-place, returns view(a) // in-place, returns view(a)
struct ggml_tensor * ggml_scale( struct ggml_tensor * ggml_scale(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
// a -> b, return view(b) // a -> b, return view(b)
struct ggml_tensor * ggml_cpy( struct ggml_tensor * ggml_cpy(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
// return view(a), b specifies the new shape // return view(a), b specifies the new shape
// TODO: when we start computing gradient, make a copy instead of view // TODO: when we start computing gradient, make a copy instead of view
struct ggml_tensor * ggml_reshape( struct ggml_tensor * ggml_reshape(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
// return view(a) // return view(a)
// TODO: when we start computing gradient, make a copy instead of view // TODO: when we start computing gradient, make a copy instead of view
struct ggml_tensor * ggml_reshape_2d( struct ggml_tensor * ggml_reshape_2d(struct ggml_context * ctx, struct ggml_tensor * a, int ne0, int ne1);
struct ggml_context * ctx,
struct ggml_tensor * a,
int ne0,
int ne1);
// return view(a) // return view(a)
// TODO: when we start computing gradient, make a copy instead of view // TODO: when we start computing gradient, make a copy instead of view
struct ggml_tensor * ggml_reshape_3d( struct ggml_tensor * ggml_reshape_3d(struct ggml_context * ctx, struct ggml_tensor * a, int ne0, int ne1, int ne2);
struct ggml_context * ctx,
struct ggml_tensor * a,
int ne0,
int ne1,
int ne2);
// offset in bytes // offset in bytes
struct ggml_tensor * ggml_view_1d( struct ggml_tensor * ggml_view_1d(struct ggml_context * ctx, struct ggml_tensor * a, int ne0, size_t offset);
struct ggml_context * ctx,
struct ggml_tensor * a,
int ne0,
size_t offset);
struct ggml_tensor * ggml_view_2d( struct ggml_tensor * ggml_view_2d(
struct ggml_context * ctx, struct ggml_context * ctx,
@ -524,7 +445,8 @@ struct ggml_tensor * ggml_view_2d(
int ne0, int ne0,
int ne1, int ne1,
size_t nb1, // row stride in bytes size_t nb1, // row stride in bytes
size_t offset); size_t offset
);
struct ggml_tensor * ggml_permute( struct ggml_tensor * ggml_permute(
struct ggml_context * ctx, struct ggml_context * ctx,
@ -532,61 +454,42 @@ struct ggml_tensor * ggml_permute(
int axis0, int axis0,
int axis1, int axis1,
int axis2, int axis2,
int axis3); int axis3
);
// alias for ggml_permute(ctx, a, 1, 0, 2, 3) // alias for ggml_permute(ctx, a, 1, 0, 2, 3)
struct ggml_tensor * ggml_transpose( struct ggml_tensor * ggml_transpose(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_get_rows( struct ggml_tensor * ggml_get_rows(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
// set elements above the diagonal to -INF // set elements above the diagonal to -INF
// in-place, returns view(a) // in-place, returns view(a)
struct ggml_tensor * ggml_diag_mask_inf( struct ggml_tensor * ggml_diag_mask_inf(struct ggml_context * ctx, struct ggml_tensor * a, int n_past);
struct ggml_context * ctx,
struct ggml_tensor * a,
int n_past);
// in-place, returns view(a) // in-place, returns view(a)
struct ggml_tensor * ggml_soft_max( struct ggml_tensor * ggml_soft_max(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// rotary position embedding // rotary position embedding
// in-place, returns view(a) // in-place, returns view(a)
// if mode == 1, skip n_past elements // if mode == 1, skip n_past elements
// TODO: avoid creating a new tensor every time // TODO: avoid creating a new tensor every time
struct ggml_tensor * ggml_rope( struct ggml_tensor * ggml_rope(struct ggml_context * ctx, struct ggml_tensor * a, int n_past, int n_dims, int mode);
struct ggml_context * ctx,
struct ggml_tensor * a,
int n_past,
int n_dims,
int mode);
// padding = 1 // padding = 1
// TODO: we don't support extra parameters for now // TODO: we don't support extra parameters for now
// that's why we are hard-coding the stride, padding, and dilation // that's why we are hard-coding the stride, padding, and dilation
// not great .. // not great ..
struct ggml_tensor * ggml_conv_1d_1s( struct ggml_tensor * ggml_conv_1d_1s(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_conv_1d_2s( struct ggml_tensor * ggml_conv_1d_2s(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_flash_attn( struct ggml_tensor * ggml_flash_attn(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * q, struct ggml_tensor * q,
struct ggml_tensor * k, struct ggml_tensor * k,
struct ggml_tensor * v, struct ggml_tensor * v,
bool masked); bool masked
);
struct ggml_tensor * ggml_flash_ff( struct ggml_tensor * ggml_flash_ff(
struct ggml_context * ctx, struct ggml_context * ctx,
@ -594,23 +497,22 @@ struct ggml_tensor * ggml_flash_ff(
struct ggml_tensor * b0, struct ggml_tensor * b0,
struct ggml_tensor * b1, struct ggml_tensor * b1,
struct ggml_tensor * c0, struct ggml_tensor * c0,
struct ggml_tensor * c1); struct ggml_tensor * c1
);
// //
// automatic differentiation // automatic differentiation
// //
void ggml_set_param( void ggml_set_param(struct ggml_context * ctx, struct ggml_tensor * tensor);
struct ggml_context * ctx,
struct ggml_tensor * tensor);
void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor); void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor); struct ggml_cgraph ggml_build_forward(struct ggml_tensor * tensor);
struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep); struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep);
void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph); void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph);
void ggml_graph_reset (struct ggml_cgraph * cgraph); void ggml_graph_reset(struct ggml_cgraph * cgraph);
// print info and performance information for the graph // print info and performance information for the graph
void ggml_graph_print(const struct ggml_cgraph * cgraph); void ggml_graph_print(const struct ggml_cgraph * cgraph);
@ -712,10 +614,7 @@ struct ggml_opt_params {
struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type); struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type);
// optimize the function defined by the tensor f // optimize the function defined by the tensor f
enum ggml_opt_result ggml_opt( enum ggml_opt_result ggml_opt(struct ggml_context * ctx, struct ggml_opt_params params, struct ggml_tensor * f);
struct ggml_context * ctx,
struct ggml_opt_params params,
struct ggml_tensor * f);
// //
// system info // system info

File diff suppressed because it is too large Load Diff

@ -1,8 +1,8 @@
#ifndef WHISPER_H #ifndef WHISPER_H
#define WHISPER_H #define WHISPER_H
#include <stdint.h>
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h>
#ifdef WHISPER_SHARED #ifdef WHISPER_SHARED
# ifdef _WIN32 # ifdef _WIN32
@ -12,7 +12,7 @@
# define WHISPER_API __declspec(dllimport) # define WHISPER_API __declspec(dllimport)
# endif # endif
# else # else
# define WHISPER_API __attribute__ ((visibility ("default"))) # define WHISPER_API __attribute__((visibility("default")))
# endif # endif
#else #else
# define WHISPER_API # define WHISPER_API
@ -28,47 +28,47 @@
extern "C" { extern "C" {
#endif #endif
// //
// C interface // C interface
// //
// The following interface is thread-safe as long as the sample whisper_context is not used by multiple threads // The following interface is thread-safe as long as the sample whisper_context is not used by multiple threads
// concurrently. // concurrently.
// //
// Basic usage: // Basic usage:
// //
// #include "whisper.h" // #include "whisper.h"
// //
// ... // ...
// //
// struct whisper_context * ctx = whisper_init("/path/to/ggml-base.en.bin"); // struct whisper_context * ctx = whisper_init("/path/to/ggml-base.en.bin");
// //
// if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) { // if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
// fprintf(stderr, "failed to process audio\n"); // fprintf(stderr, "failed to process audio\n");
// return 7; // return 7;
// } // }
// //
// const int n_segments = whisper_full_n_segments(ctx); // const int n_segments = whisper_full_n_segments(ctx);
// for (int i = 0; i < n_segments; ++i) { // for (int i = 0; i < n_segments; ++i) {
// const char * text = whisper_full_get_segment_text(ctx, i); // const char * text = whisper_full_get_segment_text(ctx, i);
// printf("%s", text); // printf("%s", text);
// } // }
// //
// whisper_free(ctx); // whisper_free(ctx);
// //
// ... // ...
// //
// This is a demonstration of the most straightforward usage of the library. // This is a demonstration of the most straightforward usage of the library.
// "pcmf32" contains the RAW audio data in 32-bit floating point format. // "pcmf32" contains the RAW audio data in 32-bit floating point format.
// //
// The interface also allows for more fine-grained control over the computation, but it requires a deeper // The interface also allows for more fine-grained control over the computation, but it requires a deeper
// understanding of how the model works. // understanding of how the model works.
// //
struct whisper_context; struct whisper_context;
typedef int whisper_token; typedef int whisper_token;
typedef struct whisper_token_data { typedef struct whisper_token_data {
whisper_token id; // token id whisper_token id; // token id
whisper_token tid; // forced timestamp token id whisper_token tid; // forced timestamp token id
@ -82,152 +82,144 @@ extern "C" {
int64_t t1; // end time of the token int64_t t1; // end time of the token
float vlen; // voice length of the token float vlen; // voice length of the token
} whisper_token_data; } whisper_token_data;
// Allocates all memory needed for the model and loads the model from the given file. // Allocates all memory needed for the model and loads the model from the given file.
// Returns NULL on failure. // Returns NULL on failure.
WHISPER_API struct whisper_context * whisper_init(const char * path_model); WHISPER_API struct whisper_context * whisper_init(const char * path_model);
// Frees all memory allocated by the model. // Frees all memory allocated by the model.
WHISPER_API void whisper_free(struct whisper_context * ctx); WHISPER_API void whisper_free(struct whisper_context * ctx);
// Convert RAW PCM audio to log mel spectrogram. // Convert RAW PCM audio to log mel spectrogram.
// The resulting spectrogram is stored inside the provided whisper context. // The resulting spectrogram is stored inside the provided whisper context.
// Returns 0 on success // Returns 0 on success
WHISPER_API int whisper_pcm_to_mel( WHISPER_API int whisper_pcm_to_mel(struct whisper_context * ctx, const float * samples, int n_samples, int n_threads);
struct whisper_context * ctx,
const float * samples, // This can be used to set a custom log mel spectrogram inside the provided whisper context.
int n_samples, // Use this instead of whisper_pcm_to_mel() if you want to provide your own log mel spectrogram.
int n_threads); // n_mel must be 80
// Returns 0 on success
// This can be used to set a custom log mel spectrogram inside the provided whisper context. WHISPER_API int whisper_set_mel(struct whisper_context * ctx, const float * data, int n_len, int n_mel);
// Use this instead of whisper_pcm_to_mel() if you want to provide your own log mel spectrogram.
// n_mel must be 80 // Run the Whisper encoder on the log mel spectrogram stored inside the provided whisper context.
// Returns 0 on success // Make sure to call whisper_pcm_to_mel() or whisper_set_mel() first.
WHISPER_API int whisper_set_mel( // offset can be used to specify the offset of the first frame in the spectrogram.
struct whisper_context * ctx, // Returns 0 on success
const float * data, WHISPER_API int whisper_encode(struct whisper_context * ctx, int offset, int n_threads);
int n_len,
int n_mel); // Run the Whisper decoder to obtain the logits and probabilities for the next token.
// Make sure to call whisper_encode() first.
// Run the Whisper encoder on the log mel spectrogram stored inside the provided whisper context. // tokens + n_tokens is the provided context for the decoder.
// Make sure to call whisper_pcm_to_mel() or whisper_set_mel() first. // n_past is the number of tokens to use from previous decoder calls.
// offset can be used to specify the offset of the first frame in the spectrogram. // Returns 0 on success
// Returns 0 on success WHISPER_API int whisper_decode(
WHISPER_API int whisper_encode(
struct whisper_context * ctx,
int offset,
int n_threads);
// Run the Whisper decoder to obtain the logits and probabilities for the next token.
// Make sure to call whisper_encode() first.
// tokens + n_tokens is the provided context for the decoder.
// n_past is the number of tokens to use from previous decoder calls.
// Returns 0 on success
WHISPER_API int whisper_decode(
struct whisper_context * ctx, struct whisper_context * ctx,
const whisper_token * tokens, const whisper_token * tokens,
int n_tokens, int n_tokens,
int n_past, int n_past,
int n_threads); int n_threads
);
// Token sampling methods.
// These are provided for convenience and can be used after each call to whisper_decode(). // Token sampling methods.
// You can also implement your own sampling method using the whisper_get_probs() function. // These are provided for convenience and can be used after each call to whisper_decode().
// whisper_sample_best() returns the token with the highest probability // You can also implement your own sampling method using the whisper_get_probs() function.
// whisper_sample_timestamp() returns the most probable timestamp token // whisper_sample_best() returns the token with the highest probability
WHISPER_API whisper_token_data whisper_sample_best(struct whisper_context * ctx); // whisper_sample_timestamp() returns the most probable timestamp token
WHISPER_API whisper_token_data whisper_sample_timestamp(struct whisper_context * ctx, bool is_initial); WHISPER_API whisper_token_data whisper_sample_best(struct whisper_context * ctx);
WHISPER_API whisper_token_data whisper_sample_timestamp(struct whisper_context * ctx, bool is_initial);
// Convert the provided text into tokens.
// The tokens pointer must be large enough to hold the resulting tokens. // Convert the provided text into tokens.
// Returns the number of tokens on success, no more than n_max_tokens // The tokens pointer must be large enough to hold the resulting tokens.
// Returns -1 on failure // Returns the number of tokens on success, no more than n_max_tokens
// TODO: not sure if correct // Returns -1 on failure
WHISPER_API int whisper_tokenize( // TODO: not sure if correct
WHISPER_API int whisper_tokenize(
struct whisper_context * ctx, struct whisper_context * ctx,
const char * text, const char * text,
whisper_token * tokens, whisper_token * tokens,
int n_max_tokens); int n_max_tokens
);
// Largest language id (i.e. number of available languages - 1)
WHISPER_API int whisper_lang_max_id(); // Largest language id (i.e. number of available languages - 1)
WHISPER_API int whisper_lang_max_id();
// Return the id of the specified language, returns -1 if not found
// Examples: // Return the id of the specified language, returns -1 if not found
// "de" -> 2 // Examples:
// "german" -> 2 // "de" -> 2
WHISPER_API int whisper_lang_id(const char * lang); // "german" -> 2
WHISPER_API int whisper_lang_id(const char * lang);
// Return the short string of the specified language id (e.g. 2 -> "de"), returns nullptr if not found
WHISPER_API const char * whisper_lang_str(int id); // Return the short string of the specified language id (e.g. 2 -> "de"), returns nullptr if not found
WHISPER_API const char * whisper_lang_str(int id);
// Use mel data at offset_ms to try and auto-detect the spoken language
// Make sure to call whisper_pcm_to_mel() or whisper_set_mel() first // Use mel data at offset_ms to try and auto-detect the spoken language
// Returns the top language id or negative on failure // Make sure to call whisper_pcm_to_mel() or whisper_set_mel() first
// If not null, fills the lang_probs array with the probabilities of all languages // Returns the top language id or negative on failure
// The array must be whispe_lang_max_id() + 1 in size // If not null, fills the lang_probs array with the probabilities of all languages
// ref: https://github.com/openai/whisper/blob/main/whisper/decoding.py#L18-L69 // The array must be whispe_lang_max_id() + 1 in size
WHISPER_API int whisper_lang_auto_detect( // ref: https://github.com/openai/whisper/blob/main/whisper/decoding.py#L18-L69
WHISPER_API int whisper_lang_auto_detect(
struct whisper_context * ctx, struct whisper_context * ctx,
int offset_ms, int offset_ms,
int n_threads, int n_threads,
float * lang_probs); float * lang_probs
);
WHISPER_API int whisper_n_len (struct whisper_context * ctx); // mel length WHISPER_API int whisper_n_len(struct whisper_context * ctx); // mel length
WHISPER_API int whisper_n_vocab (struct whisper_context * ctx); WHISPER_API int whisper_n_vocab(struct whisper_context * ctx);
WHISPER_API int whisper_n_text_ctx (struct whisper_context * ctx); WHISPER_API int whisper_n_text_ctx(struct whisper_context * ctx);
WHISPER_API int whisper_n_audio_ctx (struct whisper_context * ctx); WHISPER_API int whisper_n_audio_ctx(struct whisper_context * ctx);
WHISPER_API int whisper_is_multilingual(struct whisper_context * ctx); WHISPER_API int whisper_is_multilingual(struct whisper_context * ctx);
// The probabilities for the next token // The probabilities for the next token
WHISPER_API float * whisper_get_probs(struct whisper_context * ctx); WHISPER_API float * whisper_get_probs(struct whisper_context * ctx);
// Token Id -> String. Uses the vocabulary in the provided context // Token Id -> String. Uses the vocabulary in the provided context
WHISPER_API const char * whisper_token_to_str(struct whisper_context * ctx, whisper_token token); WHISPER_API const char * whisper_token_to_str(struct whisper_context * ctx, whisper_token token);
// Special tokens // Special tokens
WHISPER_API whisper_token whisper_token_eot (struct whisper_context * ctx); WHISPER_API whisper_token whisper_token_eot(struct whisper_context * ctx);
WHISPER_API whisper_token whisper_token_sot (struct whisper_context * ctx); WHISPER_API whisper_token whisper_token_sot(struct whisper_context * ctx);
WHISPER_API whisper_token whisper_token_prev(struct whisper_context * ctx); WHISPER_API whisper_token whisper_token_prev(struct whisper_context * ctx);
WHISPER_API whisper_token whisper_token_solm(struct whisper_context * ctx); WHISPER_API whisper_token whisper_token_solm(struct whisper_context * ctx);
WHISPER_API whisper_token whisper_token_not (struct whisper_context * ctx); WHISPER_API whisper_token whisper_token_not(struct whisper_context * ctx);
WHISPER_API whisper_token whisper_token_beg (struct whisper_context * ctx); WHISPER_API whisper_token whisper_token_beg(struct whisper_context * ctx);
WHISPER_API whisper_token whisper_token_lang(struct whisper_context * ctx, int lang_id); WHISPER_API whisper_token whisper_token_lang(struct whisper_context * ctx, int lang_id);
// Task tokens // Task tokens
WHISPER_API whisper_token whisper_token_translate (void); WHISPER_API whisper_token whisper_token_translate(void);
WHISPER_API whisper_token whisper_token_transcribe(void); WHISPER_API whisper_token whisper_token_transcribe(void);
// Performance information // Performance information
WHISPER_API void whisper_print_timings(struct whisper_context * ctx); WHISPER_API void whisper_print_timings(struct whisper_context * ctx);
WHISPER_API void whisper_reset_timings(struct whisper_context * ctx); WHISPER_API void whisper_reset_timings(struct whisper_context * ctx);
// Print system information // Print system information
WHISPER_API const char * whisper_print_system_info(void); WHISPER_API const char * whisper_print_system_info(void);
//////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////
// Available sampling strategies // Available sampling strategies
enum whisper_sampling_strategy { enum whisper_sampling_strategy {
WHISPER_SAMPLING_GREEDY, // Always select the most probable token WHISPER_SAMPLING_GREEDY, // Always select the most probable token
WHISPER_SAMPLING_BEAM_SEARCH, // TODO: not implemented yet! WHISPER_SAMPLING_BEAM_SEARCH, // TODO: not implemented yet!
}; };
// Text segment callback // Text segment callback
// Called on every newly generated text segment // Called on every newly generated text segment
// Use the whisper_full_...() functions to obtain the text segments // Use the whisper_full_...() functions to obtain the text segments
typedef void (*whisper_new_segment_callback)(struct whisper_context * ctx, int n_new, void * user_data); typedef void (*whisper_new_segment_callback)(struct whisper_context * ctx, int n_new, void * user_data);
// Encoder begin callback // Encoder begin callback
// If not NULL, called before the encoder starts // If not NULL, called before the encoder starts
// If it returns false, the computation is aborted // If it returns false, the computation is aborted
typedef bool (*whisper_encoder_begin_callback)(struct whisper_context * ctx, void * user_data); typedef bool (*whisper_encoder_begin_callback)(struct whisper_context * ctx, void * user_data);
// Parameters for the whisper_full() function // Parameters for the whisper_full() function
// If you chnage the order or add new parameters, make sure to update the default values in whisper.cpp: // If you chnage the order or add new parameters, make sure to update the default values in whisper.cpp:
// whisper_full_default_params() // whisper_full_default_params()
struct whisper_full_params { struct whisper_full_params {
enum whisper_sampling_strategy strategy; enum whisper_sampling_strategy strategy;
int n_threads; int n_threads;
@ -277,52 +269,54 @@ extern "C" {
whisper_encoder_begin_callback encoder_begin_callback; whisper_encoder_begin_callback encoder_begin_callback;
void * encoder_begin_callback_user_data; void * encoder_begin_callback_user_data;
}; };
WHISPER_API struct whisper_full_params whisper_full_default_params(enum whisper_sampling_strategy strategy); WHISPER_API struct whisper_full_params whisper_full_default_params(enum whisper_sampling_strategy strategy);
// Run the entire model: PCM -> log mel spectrogram -> encoder -> decoder -> text // Run the entire model: PCM -> log mel spectrogram -> encoder -> decoder -> text
// Uses the specified decoding strategy to obtain the text. // Uses the specified decoding strategy to obtain the text.
WHISPER_API int whisper_full( WHISPER_API int whisper_full(
struct whisper_context * ctx, struct whisper_context * ctx,
struct whisper_full_params params, struct whisper_full_params params,
const float * samples, const float * samples,
int n_samples); int n_samples
);
// Split the input audio in chunks and process each chunk separately using whisper_full() // Split the input audio in chunks and process each chunk separately using whisper_full()
// It seems this approach can offer some speedup in some cases. // It seems this approach can offer some speedup in some cases.
// However, the transcription accuracy can be worse at the beginning and end of each chunk. // However, the transcription accuracy can be worse at the beginning and end of each chunk.
WHISPER_API int whisper_full_parallel( WHISPER_API int whisper_full_parallel(
struct whisper_context * ctx, struct whisper_context * ctx,
struct whisper_full_params params, struct whisper_full_params params,
const float * samples, const float * samples,
int n_samples, int n_samples,
int n_processors); int n_processors
);
// Number of generated text segments. // Number of generated text segments.
// A segment can be a few words, a sentence, or even a paragraph. // A segment can be a few words, a sentence, or even a paragraph.
WHISPER_API int whisper_full_n_segments(struct whisper_context * ctx); WHISPER_API int whisper_full_n_segments(struct whisper_context * ctx);
// Get the start and end time of the specified segment. // Get the start and end time of the specified segment.
WHISPER_API int64_t whisper_full_get_segment_t0(struct whisper_context * ctx, int i_segment); WHISPER_API int64_t whisper_full_get_segment_t0(struct whisper_context * ctx, int i_segment);
WHISPER_API int64_t whisper_full_get_segment_t1(struct whisper_context * ctx, int i_segment); WHISPER_API int64_t whisper_full_get_segment_t1(struct whisper_context * ctx, int i_segment);
// Get the text of the specified segment. // Get the text of the specified segment.
WHISPER_API const char * whisper_full_get_segment_text(struct whisper_context * ctx, int i_segment); WHISPER_API const char * whisper_full_get_segment_text(struct whisper_context * ctx, int i_segment);
// Get number of tokens in the specified segment. // Get number of tokens in the specified segment.
WHISPER_API int whisper_full_n_tokens(struct whisper_context * ctx, int i_segment); WHISPER_API int whisper_full_n_tokens(struct whisper_context * ctx, int i_segment);
// Get the token text of the specified token in the specified segment. // Get the token text of the specified token in the specified segment.
WHISPER_API const char * whisper_full_get_token_text(struct whisper_context * ctx, int i_segment, int i_token); WHISPER_API const char * whisper_full_get_token_text(struct whisper_context * ctx, int i_segment, int i_token);
WHISPER_API whisper_token whisper_full_get_token_id (struct whisper_context * ctx, int i_segment, int i_token); WHISPER_API whisper_token whisper_full_get_token_id(struct whisper_context * ctx, int i_segment, int i_token);
// Get token data for the specified token in the specified segment. // Get token data for the specified token in the specified segment.
// This contains probabilities, timestamps, etc. // This contains probabilities, timestamps, etc.
WHISPER_API whisper_token_data whisper_full_get_token_data(struct whisper_context * ctx, int i_segment, int i_token); WHISPER_API whisper_token_data whisper_full_get_token_data(struct whisper_context * ctx, int i_segment, int i_token);
// Get the probability of the specified token in the specified segment. // Get the probability of the specified token in the specified segment.
WHISPER_API float whisper_full_get_token_p(struct whisper_context * ctx, int i_segment, int i_token); WHISPER_API float whisper_full_get_token_p(struct whisper_context * ctx, int i_segment, int i_token);
#ifdef __cplusplus #ifdef __cplusplus
} }

Loading…
Cancel
Save