Add .clang-format

pull/359/head
Andy Maloney 2 years ago
parent 0be6a1afd9
commit 71ad534079

@ -0,0 +1,92 @@
# Options are listed here:
# https://clang.llvm.org/docs/ClangFormatStyleOptions.html
---
AccessModifierOffset: -3
AlignAfterOpenBracket: BlockIndent
AlignArrayOfStructures: Left
AlwaysBreakTemplateDeclarations: Yes
AlignConsecutiveAssignments: true
AlignConsecutiveDeclarations: false
AlignConsecutiveMacros: true
AlignEscapedNewlines: Right
AlignOperands: true
AlignTrailingComments: true
AllowAllArgumentsOnNextLine: false
AllowAllConstructorInitializersOnNextLine: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: None
AllowShortLambdasOnASingleLine: All
AllowShortIfStatementsOnASingleLine: Never
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: MultiLine
BinPackArguments: false
BinPackParameters: false
BitFieldColonSpacing: Both
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Attach
BreakBeforeConceptDeclarations: Always
BreakBeforeInheritanceComma: false
BreakInheritanceList: BeforeColon
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: true
BreakConstructorInitializers: AfterColon
BreakStringLiterals: true
ColumnLimit: 120
CompactNamespaces: false
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: false
DeriveLineEnding: false
DerivePointerAlignment: false
EmptyLineBeforeAccessModifier: LogicalBlock
EmptyLineAfterAccessModifier: Never
FixNamespaceComments: false
IncludeBlocks: Preserve
IncludeIsMainRegex: '(Test)?$'
IndentCaseLabels: true
IndentPPDirectives: AfterHash
IndentWidth: 4
IndentWrappedFunctionNames: true
InsertBraces: true
KeepEmptyLinesAtTheStartOfBlocks: true
Language: Cpp
MaxEmptyLinesToKeep: 1
NamespaceIndentation: All
PackConstructorInitializers: Never
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 19
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 1000
PointerAlignment: Middle
ReflowComments: true
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterCStyleCast: true
SpaceAfterLogicalNot: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCaseColon: false
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: c++11
TabWidth: 4
UseCRLF: false
UseTab: Never

@ -173,12 +173,12 @@ static inline float fp32_from_bits(uint32_t w) {
} }
static inline uint32_t fp32_to_bits(float f) { static inline uint32_t fp32_to_bits(float f) {
union { union {
float as_value; float as_value;
uint32_t as_bits; uint32_t as_bits;
} fp32; } fp32;
fp32.as_value = f; fp32.as_value = f;
return fp32.as_bits; return fp32.as_bits;
} }
float ggml_fp16_to_fp32(ggml_fp16_t h) { float ggml_fp16_to_fp32(ggml_fp16_t h) {

297
ggml.h

@ -169,13 +169,13 @@
// //
// //
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h> #include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#define GGML_MAX_DIMS 4 #define GGML_MAX_DIMS 4
#define GGML_MAX_NODES 4096 #define GGML_MAX_NODES 4096
@ -191,7 +191,7 @@ typedef uint16_t ggml_fp16_t;
#endif #endif
// convert FP16 <-> FP32 // convert FP16 <-> FP32
float ggml_fp16_to_fp32(ggml_fp16_t x); float ggml_fp16_to_fp32(ggml_fp16_t x);
ggml_fp16_t ggml_fp32_to_fp16(float x); ggml_fp16_t ggml_fp32_to_fp16(float x);
struct ggml_object; struct ggml_object;
@ -253,8 +253,8 @@ enum ggml_op {
struct ggml_tensor { struct ggml_tensor {
enum ggml_type type; enum ggml_type type;
int n_dims; int n_dims;
int ne[GGML_MAX_DIMS]; // number of elements int ne[GGML_MAX_DIMS]; // number of elements
size_t nb[GGML_MAX_DIMS]; // stride in bytes: size_t nb[GGML_MAX_DIMS]; // stride in bytes:
// nb[0] = sizeof(type) // nb[0] = sizeof(type)
// nb[1] = nb[0] * ne[0] + padding // nb[1] = nb[0] * ne[0] + padding
@ -274,7 +274,7 @@ struct ggml_tensor {
int n_tasks; int n_tasks;
// performance // performance
int perf_runs; int perf_runs;
int64_t perf_cycles; int64_t perf_cycles;
int64_t perf_time_us; int64_t perf_time_us;
@ -296,7 +296,7 @@ struct ggml_cgraph {
struct ggml_tensor * leafs[GGML_MAX_NODES]; struct ggml_tensor * leafs[GGML_MAX_NODES];
// performance // performance
int perf_runs; int perf_runs;
int64_t perf_cycles; int64_t perf_cycles;
int64_t perf_time_us; int64_t perf_time_us;
}; };
@ -307,19 +307,19 @@ struct ggml_init_params {
void * mem_buffer; // if NULL, memory will be allocated internally void * mem_buffer; // if NULL, memory will be allocated internally
}; };
void ggml_time_init(void); // call this once at the beginning of the program void ggml_time_init(void); // call this once at the beginning of the program
int64_t ggml_time_ms(void); int64_t ggml_time_ms(void);
int64_t ggml_time_us(void); int64_t ggml_time_us(void);
int64_t ggml_cycles(void); int64_t ggml_cycles(void);
int64_t ggml_cycles_per_ms(void); int64_t ggml_cycles_per_ms(void);
void ggml_print_object (const struct ggml_object * obj); void ggml_print_object(const struct ggml_object * obj);
void ggml_print_objects(const struct ggml_context * ctx); void ggml_print_objects(const struct ggml_context * ctx);
int ggml_nelements(const struct ggml_tensor * tensor); int ggml_nelements(const struct ggml_tensor * tensor);
size_t ggml_nbytes (const struct ggml_tensor * tensor); size_t ggml_nbytes(const struct ggml_tensor * tensor);
size_t ggml_type_size (enum ggml_type type); size_t ggml_type_size(enum ggml_type type);
size_t ggml_element_size(const struct ggml_tensor * tensor); size_t ggml_element_size(const struct ggml_tensor * tensor);
struct ggml_context * ggml_init(struct ggml_init_params params); struct ggml_context * ggml_init(struct ggml_init_params params);
@ -327,290 +327,192 @@ void ggml_free(struct ggml_context * ctx);
size_t ggml_used_mem(const struct ggml_context * ctx); size_t ggml_used_mem(const struct ggml_context * ctx);
struct ggml_tensor * ggml_new_tensor( struct ggml_tensor * ggml_new_tensor(struct ggml_context * ctx, enum ggml_type type, int n_dims, const int * ne);
struct ggml_context * ctx,
enum ggml_type type, struct ggml_tensor * ggml_new_tensor_1d(struct ggml_context * ctx, enum ggml_type type, int ne0);
int n_dims,
const int *ne); struct ggml_tensor * ggml_new_tensor_2d(struct ggml_context * ctx, enum ggml_type type, int ne0, int ne1);
struct ggml_tensor * ggml_new_tensor_1d( struct ggml_tensor * ggml_new_tensor_3d(struct ggml_context * ctx, enum ggml_type type, int ne0, int ne1, int ne2);
struct ggml_context * ctx,
enum ggml_type type,
int ne0);
struct ggml_tensor * ggml_new_tensor_2d(
struct ggml_context * ctx,
enum ggml_type type,
int ne0,
int ne1);
struct ggml_tensor * ggml_new_tensor_3d(
struct ggml_context * ctx,
enum ggml_type type,
int ne0,
int ne1,
int ne2);
struct ggml_tensor * ggml_new_tensor_4d( struct ggml_tensor * ggml_new_tensor_4d(
struct ggml_context * ctx, struct ggml_context * ctx,
enum ggml_type type, enum ggml_type type,
int ne0, int ne0,
int ne1, int ne1,
int ne2, int ne2,
int ne3); int ne3
);
struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value); struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value);
struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value); struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value);
struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src); struct ggml_tensor * ggml_dup_tensor(struct ggml_context * ctx, const struct ggml_tensor * src);
struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, const struct ggml_tensor * src); struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, const struct ggml_tensor * src);
struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor); struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor);
struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value); struct ggml_tensor * ggml_set_i32(struct ggml_tensor * tensor, int32_t value);
struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value); struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value);
int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i); int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i);
void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value); void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value);
float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i); float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i);
void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value); void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value);
void * ggml_get_data (const struct ggml_tensor * tensor); void * ggml_get_data(const struct ggml_tensor * tensor);
float * ggml_get_data_f32(const struct ggml_tensor * tensor); float * ggml_get_data_f32(const struct ggml_tensor * tensor);
// //
// operations on tensors with backpropagation // operations on tensors with backpropagation
// //
struct ggml_tensor * ggml_dup( struct ggml_tensor * ggml_dup(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_add( struct ggml_tensor * ggml_add(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_sub( struct ggml_tensor * ggml_sub(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_mul( struct ggml_tensor * ggml_mul(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_div( struct ggml_tensor * ggml_div(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_sqr( struct ggml_tensor * ggml_sqr(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_sqrt( struct ggml_tensor * ggml_sqrt(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// return scalar // return scalar
// TODO: compute sum along rows // TODO: compute sum along rows
struct ggml_tensor * ggml_sum( struct ggml_tensor * ggml_sum(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// mean along rows // mean along rows
struct ggml_tensor * ggml_mean( struct ggml_tensor * ggml_mean(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// if a is the same shape as b, and a is not parameter, return a // if a is the same shape as b, and a is not parameter, return a
// otherwise, return a new tensor: repeat(a) to fit in b // otherwise, return a new tensor: repeat(a) to fit in b
struct ggml_tensor * ggml_repeat( struct ggml_tensor * ggml_repeat(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_abs( struct ggml_tensor * ggml_abs(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_sgn( struct ggml_tensor * ggml_sgn(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_neg( struct ggml_tensor * ggml_neg(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_step( struct ggml_tensor * ggml_step(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_relu( struct ggml_tensor * ggml_relu(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// TODO: double-check this computation is correct // TODO: double-check this computation is correct
struct ggml_tensor * ggml_gelu( struct ggml_tensor * ggml_gelu(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// normalize along rows // normalize along rows
// TODO: eps is hardcoded to 1e-5 for now // TODO: eps is hardcoded to 1e-5 for now
struct ggml_tensor * ggml_norm( struct ggml_tensor * ggml_norm(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// A: m rows, n columns // A: m rows, n columns
// B: p rows, n columns (i.e. we transpose it internally) // B: p rows, n columns (i.e. we transpose it internally)
// result is m columns, p rows // result is m columns, p rows
struct ggml_tensor * ggml_mul_mat( struct ggml_tensor * ggml_mul_mat(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
// //
// operations on tensors without backpropagation // operations on tensors without backpropagation
// //
// in-place, returns view(a) // in-place, returns view(a)
struct ggml_tensor * ggml_scale( struct ggml_tensor * ggml_scale(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
// a -> b, return view(b) // a -> b, return view(b)
struct ggml_tensor * ggml_cpy( struct ggml_tensor * ggml_cpy(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
// return view(a), b specifies the new shape // return view(a), b specifies the new shape
// TODO: when we start computing gradient, make a copy instead of view // TODO: when we start computing gradient, make a copy instead of view
struct ggml_tensor * ggml_reshape( struct ggml_tensor * ggml_reshape(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
// return view(a) // return view(a)
// TODO: when we start computing gradient, make a copy instead of view // TODO: when we start computing gradient, make a copy instead of view
struct ggml_tensor * ggml_reshape_2d( struct ggml_tensor * ggml_reshape_2d(struct ggml_context * ctx, struct ggml_tensor * a, int ne0, int ne1);
struct ggml_context * ctx,
struct ggml_tensor * a,
int ne0,
int ne1);
// return view(a) // return view(a)
// TODO: when we start computing gradient, make a copy instead of view // TODO: when we start computing gradient, make a copy instead of view
struct ggml_tensor * ggml_reshape_3d( struct ggml_tensor * ggml_reshape_3d(struct ggml_context * ctx, struct ggml_tensor * a, int ne0, int ne1, int ne2);
struct ggml_context * ctx,
struct ggml_tensor * a,
int ne0,
int ne1,
int ne2);
// offset in bytes // offset in bytes
struct ggml_tensor * ggml_view_1d( struct ggml_tensor * ggml_view_1d(struct ggml_context * ctx, struct ggml_tensor * a, int ne0, size_t offset);
struct ggml_context * ctx,
struct ggml_tensor * a,
int ne0,
size_t offset);
struct ggml_tensor * ggml_view_2d( struct ggml_tensor * ggml_view_2d(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
int ne0, int ne0,
int ne1, int ne1,
size_t nb1, // row stride in bytes size_t nb1, // row stride in bytes
size_t offset); size_t offset
);
struct ggml_tensor * ggml_permute( struct ggml_tensor * ggml_permute(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
int axis0, int axis0,
int axis1, int axis1,
int axis2, int axis2,
int axis3); int axis3
);
// alias for ggml_permute(ctx, a, 1, 0, 2, 3) // alias for ggml_permute(ctx, a, 1, 0, 2, 3)
struct ggml_tensor * ggml_transpose( struct ggml_tensor * ggml_transpose(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_get_rows( struct ggml_tensor * ggml_get_rows(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
// set elements above the diagonal to -INF // set elements above the diagonal to -INF
// in-place, returns view(a) // in-place, returns view(a)
struct ggml_tensor * ggml_diag_mask_inf( struct ggml_tensor * ggml_diag_mask_inf(struct ggml_context * ctx, struct ggml_tensor * a, int n_past);
struct ggml_context * ctx,
struct ggml_tensor * a,
int n_past);
// in-place, returns view(a) // in-place, returns view(a)
struct ggml_tensor * ggml_soft_max( struct ggml_tensor * ggml_soft_max(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// rotary position embedding // rotary position embedding
// in-place, returns view(a) // in-place, returns view(a)
// if mode == 1, skip n_past elements // if mode == 1, skip n_past elements
// TODO: avoid creating a new tensor every time // TODO: avoid creating a new tensor every time
struct ggml_tensor * ggml_rope( struct ggml_tensor * ggml_rope(struct ggml_context * ctx, struct ggml_tensor * a, int n_past, int n_dims, int mode);
struct ggml_context * ctx,
struct ggml_tensor * a,
int n_past,
int n_dims,
int mode);
// padding = 1 // padding = 1
// TODO: we don't support extra parameters for now // TODO: we don't support extra parameters for now
// that's why we are hard-coding the stride, padding, and dilation // that's why we are hard-coding the stride, padding, and dilation
// not great .. // not great ..
struct ggml_tensor * ggml_conv_1d_1s( struct ggml_tensor * ggml_conv_1d_1s(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_conv_1d_2s( struct ggml_tensor * ggml_conv_1d_2s(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_flash_attn( struct ggml_tensor * ggml_flash_attn(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * q, struct ggml_tensor * q,
struct ggml_tensor * k, struct ggml_tensor * k,
struct ggml_tensor * v, struct ggml_tensor * v,
bool masked); bool masked
);
struct ggml_tensor * ggml_flash_ff( struct ggml_tensor * ggml_flash_ff(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * a, struct ggml_tensor * a,
struct ggml_tensor * b0, struct ggml_tensor * b0,
struct ggml_tensor * b1, struct ggml_tensor * b1,
struct ggml_tensor * c0, struct ggml_tensor * c0,
struct ggml_tensor * c1); struct ggml_tensor * c1
);
// //
// automatic differentiation // automatic differentiation
// //
void ggml_set_param( void ggml_set_param(struct ggml_context * ctx, struct ggml_tensor * tensor);
struct ggml_context * ctx,
struct ggml_tensor * tensor);
void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor); void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor); struct ggml_cgraph ggml_build_forward(struct ggml_tensor * tensor);
struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep); struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep);
void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph); void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph);
void ggml_graph_reset (struct ggml_cgraph * cgraph); void ggml_graph_reset(struct ggml_cgraph * cgraph);
// print info and performance information for the graph // print info and performance information for the graph
void ggml_graph_print(const struct ggml_cgraph * cgraph); void ggml_graph_print(const struct ggml_cgraph * cgraph);
@ -699,8 +601,8 @@ struct ggml_opt_params {
int n_iter; int n_iter;
int max_linesearch; int max_linesearch;
float eps; // convergence tolerance float eps; // convergence tolerance
float ftol; // line search tolerance float ftol; // line search tolerance
float wolfe; float wolfe;
float min_step; float min_step;
float max_step; float max_step;
@ -712,10 +614,7 @@ struct ggml_opt_params {
struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type); struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type);
// optimize the function defined by the tensor f // optimize the function defined by the tensor f
enum ggml_opt_result ggml_opt( enum ggml_opt_result ggml_opt(struct ggml_context * ctx, struct ggml_opt_params params, struct ggml_tensor * f);
struct ggml_context * ctx,
struct ggml_opt_params params,
struct ggml_tensor * f);
// //
// system info // system info
@ -732,6 +631,6 @@ int ggml_cpu_has_fp16_va(void);
int ggml_cpu_has_wasm_simd(void); int ggml_cpu_has_wasm_simd(void);
int ggml_cpu_has_blas(void); int ggml_cpu_has_blas(void);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

File diff suppressed because it is too large Load Diff

@ -1,8 +1,8 @@
#ifndef WHISPER_H #ifndef WHISPER_H
#define WHISPER_H #define WHISPER_H
#include <stdint.h>
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h>
#ifdef WHISPER_SHARED #ifdef WHISPER_SHARED
# ifdef _WIN32 # ifdef _WIN32
@ -12,7 +12,7 @@
# define WHISPER_API __declspec(dllimport) # define WHISPER_API __declspec(dllimport)
# endif # endif
# else # else
# define WHISPER_API __attribute__ ((visibility ("default"))) # define WHISPER_API __attribute__((visibility("default")))
# endif # endif
#else #else
# define WHISPER_API # define WHISPER_API
@ -28,301 +28,295 @@
extern "C" { extern "C" {
#endif #endif
// //
// C interface // C interface
// //
// The following interface is thread-safe as long as the sample whisper_context is not used by multiple threads // The following interface is thread-safe as long as the sample whisper_context is not used by multiple threads
// concurrently. // concurrently.
// //
// Basic usage: // Basic usage:
// //
// #include "whisper.h" // #include "whisper.h"
// //
// ... // ...
// //
// struct whisper_context * ctx = whisper_init("/path/to/ggml-base.en.bin"); // struct whisper_context * ctx = whisper_init("/path/to/ggml-base.en.bin");
// //
// if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) { // if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
// fprintf(stderr, "failed to process audio\n"); // fprintf(stderr, "failed to process audio\n");
// return 7; // return 7;
// } // }
// //
// const int n_segments = whisper_full_n_segments(ctx); // const int n_segments = whisper_full_n_segments(ctx);
// for (int i = 0; i < n_segments; ++i) { // for (int i = 0; i < n_segments; ++i) {
// const char * text = whisper_full_get_segment_text(ctx, i); // const char * text = whisper_full_get_segment_text(ctx, i);
// printf("%s", text); // printf("%s", text);
// } // }
// //
// whisper_free(ctx); // whisper_free(ctx);
// //
// ... // ...
// //
// This is a demonstration of the most straightforward usage of the library. // This is a demonstration of the most straightforward usage of the library.
// "pcmf32" contains the RAW audio data in 32-bit floating point format. // "pcmf32" contains the RAW audio data in 32-bit floating point format.
// //
// The interface also allows for more fine-grained control over the computation, but it requires a deeper // The interface also allows for more fine-grained control over the computation, but it requires a deeper
// understanding of how the model works. // understanding of how the model works.
// //
struct whisper_context; struct whisper_context;
typedef int whisper_token; typedef int whisper_token;
typedef struct whisper_token_data { typedef struct whisper_token_data {
whisper_token id; // token id whisper_token id; // token id
whisper_token tid; // forced timestamp token id whisper_token tid; // forced timestamp token id
float p; // probability of the token float p; // probability of the token
float pt; // probability of the timestamp token float pt; // probability of the timestamp token
float ptsum; // sum of probabilities of all timestamp tokens float ptsum; // sum of probabilities of all timestamp tokens
// token-level timestamp data // token-level timestamp data
// do not use if you haven't computed token-level timestamps // do not use if you haven't computed token-level timestamps
int64_t t0; // start time of the token int64_t t0; // start time of the token
int64_t t1; // end time of the token int64_t t1; // end time of the token
float vlen; // voice length of the token float vlen; // voice length of the token
} whisper_token_data; } whisper_token_data;
// Allocates all memory needed for the model and loads the model from the given file. // Allocates all memory needed for the model and loads the model from the given file.
// Returns NULL on failure. // Returns NULL on failure.
WHISPER_API struct whisper_context * whisper_init(const char * path_model); WHISPER_API struct whisper_context * whisper_init(const char * path_model);
// Frees all memory allocated by the model. // Frees all memory allocated by the model.
WHISPER_API void whisper_free(struct whisper_context * ctx); WHISPER_API void whisper_free(struct whisper_context * ctx);
// Convert RAW PCM audio to log mel spectrogram. // Convert RAW PCM audio to log mel spectrogram.
// The resulting spectrogram is stored inside the provided whisper context. // The resulting spectrogram is stored inside the provided whisper context.
// Returns 0 on success // Returns 0 on success
WHISPER_API int whisper_pcm_to_mel( WHISPER_API int whisper_pcm_to_mel(struct whisper_context * ctx, const float * samples, int n_samples, int n_threads);
struct whisper_context * ctx,
const float * samples, // This can be used to set a custom log mel spectrogram inside the provided whisper context.
int n_samples, // Use this instead of whisper_pcm_to_mel() if you want to provide your own log mel spectrogram.
int n_threads); // n_mel must be 80
// Returns 0 on success
// This can be used to set a custom log mel spectrogram inside the provided whisper context. WHISPER_API int whisper_set_mel(struct whisper_context * ctx, const float * data, int n_len, int n_mel);
// Use this instead of whisper_pcm_to_mel() if you want to provide your own log mel spectrogram.
// n_mel must be 80 // Run the Whisper encoder on the log mel spectrogram stored inside the provided whisper context.
// Returns 0 on success // Make sure to call whisper_pcm_to_mel() or whisper_set_mel() first.
WHISPER_API int whisper_set_mel( // offset can be used to specify the offset of the first frame in the spectrogram.
struct whisper_context * ctx, // Returns 0 on success
const float * data, WHISPER_API int whisper_encode(struct whisper_context * ctx, int offset, int n_threads);
int n_len,
int n_mel); // Run the Whisper decoder to obtain the logits and probabilities for the next token.
// Make sure to call whisper_encode() first.
// Run the Whisper encoder on the log mel spectrogram stored inside the provided whisper context. // tokens + n_tokens is the provided context for the decoder.
// Make sure to call whisper_pcm_to_mel() or whisper_set_mel() first. // n_past is the number of tokens to use from previous decoder calls.
// offset can be used to specify the offset of the first frame in the spectrogram. // Returns 0 on success
// Returns 0 on success WHISPER_API int whisper_decode(
WHISPER_API int whisper_encode( struct whisper_context * ctx,
struct whisper_context * ctx, const whisper_token * tokens,
int offset, int n_tokens,
int n_threads); int n_past,
int n_threads
// Run the Whisper decoder to obtain the logits and probabilities for the next token. );
// Make sure to call whisper_encode() first.
// tokens + n_tokens is the provided context for the decoder. // Token sampling methods.
// n_past is the number of tokens to use from previous decoder calls. // These are provided for convenience and can be used after each call to whisper_decode().
// Returns 0 on success // You can also implement your own sampling method using the whisper_get_probs() function.
WHISPER_API int whisper_decode( // whisper_sample_best() returns the token with the highest probability
struct whisper_context * ctx, // whisper_sample_timestamp() returns the most probable timestamp token
const whisper_token * tokens, WHISPER_API whisper_token_data whisper_sample_best(struct whisper_context * ctx);
int n_tokens, WHISPER_API whisper_token_data whisper_sample_timestamp(struct whisper_context * ctx, bool is_initial);
int n_past,
int n_threads); // Convert the provided text into tokens.
// The tokens pointer must be large enough to hold the resulting tokens.
// Token sampling methods. // Returns the number of tokens on success, no more than n_max_tokens
// These are provided for convenience and can be used after each call to whisper_decode(). // Returns -1 on failure
// You can also implement your own sampling method using the whisper_get_probs() function. // TODO: not sure if correct
// whisper_sample_best() returns the token with the highest probability WHISPER_API int whisper_tokenize(
// whisper_sample_timestamp() returns the most probable timestamp token struct whisper_context * ctx,
WHISPER_API whisper_token_data whisper_sample_best(struct whisper_context * ctx); const char * text,
WHISPER_API whisper_token_data whisper_sample_timestamp(struct whisper_context * ctx, bool is_initial); whisper_token * tokens,
int n_max_tokens
// Convert the provided text into tokens. );
// The tokens pointer must be large enough to hold the resulting tokens.
// Returns the number of tokens on success, no more than n_max_tokens // Largest language id (i.e. number of available languages - 1)
// Returns -1 on failure WHISPER_API int whisper_lang_max_id();
// TODO: not sure if correct
WHISPER_API int whisper_tokenize( // Return the id of the specified language, returns -1 if not found
struct whisper_context * ctx, // Examples:
const char * text, // "de" -> 2
whisper_token * tokens, // "german" -> 2
int n_max_tokens); WHISPER_API int whisper_lang_id(const char * lang);
// Largest language id (i.e. number of available languages - 1) // Return the short string of the specified language id (e.g. 2 -> "de"), returns nullptr if not found
WHISPER_API int whisper_lang_max_id(); WHISPER_API const char * whisper_lang_str(int id);
// Return the id of the specified language, returns -1 if not found // Use mel data at offset_ms to try and auto-detect the spoken language
// Examples: // Make sure to call whisper_pcm_to_mel() or whisper_set_mel() first
// "de" -> 2 // Returns the top language id or negative on failure
// "german" -> 2 // If not null, fills the lang_probs array with the probabilities of all languages
WHISPER_API int whisper_lang_id(const char * lang); // The array must be whispe_lang_max_id() + 1 in size
// ref: https://github.com/openai/whisper/blob/main/whisper/decoding.py#L18-L69
// Return the short string of the specified language id (e.g. 2 -> "de"), returns nullptr if not found WHISPER_API int whisper_lang_auto_detect(
WHISPER_API const char * whisper_lang_str(int id); struct whisper_context * ctx,
int offset_ms,
// Use mel data at offset_ms to try and auto-detect the spoken language int n_threads,
// Make sure to call whisper_pcm_to_mel() or whisper_set_mel() first float * lang_probs
// Returns the top language id or negative on failure );
// If not null, fills the lang_probs array with the probabilities of all languages
// The array must be whispe_lang_max_id() + 1 in size WHISPER_API int whisper_n_len(struct whisper_context * ctx); // mel length
// ref: https://github.com/openai/whisper/blob/main/whisper/decoding.py#L18-L69 WHISPER_API int whisper_n_vocab(struct whisper_context * ctx);
WHISPER_API int whisper_lang_auto_detect( WHISPER_API int whisper_n_text_ctx(struct whisper_context * ctx);
struct whisper_context * ctx, WHISPER_API int whisper_n_audio_ctx(struct whisper_context * ctx);
int offset_ms, WHISPER_API int whisper_is_multilingual(struct whisper_context * ctx);
int n_threads,
float * lang_probs); // The probabilities for the next token
WHISPER_API float * whisper_get_probs(struct whisper_context * ctx);
WHISPER_API int whisper_n_len (struct whisper_context * ctx); // mel length
WHISPER_API int whisper_n_vocab (struct whisper_context * ctx); // Token Id -> String. Uses the vocabulary in the provided context
WHISPER_API int whisper_n_text_ctx (struct whisper_context * ctx); WHISPER_API const char * whisper_token_to_str(struct whisper_context * ctx, whisper_token token);
WHISPER_API int whisper_n_audio_ctx (struct whisper_context * ctx);
WHISPER_API int whisper_is_multilingual(struct whisper_context * ctx); // Special tokens
WHISPER_API whisper_token whisper_token_eot(struct whisper_context * ctx);
// The probabilities for the next token WHISPER_API whisper_token whisper_token_sot(struct whisper_context * ctx);
WHISPER_API float * whisper_get_probs(struct whisper_context * ctx); WHISPER_API whisper_token whisper_token_prev(struct whisper_context * ctx);
WHISPER_API whisper_token whisper_token_solm(struct whisper_context * ctx);
// Token Id -> String. Uses the vocabulary in the provided context WHISPER_API whisper_token whisper_token_not(struct whisper_context * ctx);
WHISPER_API const char * whisper_token_to_str(struct whisper_context * ctx, whisper_token token); WHISPER_API whisper_token whisper_token_beg(struct whisper_context * ctx);
WHISPER_API whisper_token whisper_token_lang(struct whisper_context * ctx, int lang_id);
// Special tokens
WHISPER_API whisper_token whisper_token_eot (struct whisper_context * ctx); // Task tokens
WHISPER_API whisper_token whisper_token_sot (struct whisper_context * ctx); WHISPER_API whisper_token whisper_token_translate(void);
WHISPER_API whisper_token whisper_token_prev(struct whisper_context * ctx); WHISPER_API whisper_token whisper_token_transcribe(void);
WHISPER_API whisper_token whisper_token_solm(struct whisper_context * ctx);
WHISPER_API whisper_token whisper_token_not (struct whisper_context * ctx); // Performance information
WHISPER_API whisper_token whisper_token_beg (struct whisper_context * ctx); WHISPER_API void whisper_print_timings(struct whisper_context * ctx);
WHISPER_API whisper_token whisper_token_lang(struct whisper_context * ctx, int lang_id); WHISPER_API void whisper_reset_timings(struct whisper_context * ctx);
// Task tokens // Print system information
WHISPER_API whisper_token whisper_token_translate (void); WHISPER_API const char * whisper_print_system_info(void);
WHISPER_API whisper_token whisper_token_transcribe(void);
////////////////////////////////////////////////////////////////////////////
// Performance information
WHISPER_API void whisper_print_timings(struct whisper_context * ctx); // Available sampling strategies
WHISPER_API void whisper_reset_timings(struct whisper_context * ctx); enum whisper_sampling_strategy {
WHISPER_SAMPLING_GREEDY, // Always select the most probable token
// Print system information WHISPER_SAMPLING_BEAM_SEARCH, // TODO: not implemented yet!
WHISPER_API const char * whisper_print_system_info(void); };
//////////////////////////////////////////////////////////////////////////// // Text segment callback
// Called on every newly generated text segment
// Available sampling strategies // Use the whisper_full_...() functions to obtain the text segments
enum whisper_sampling_strategy { typedef void (*whisper_new_segment_callback)(struct whisper_context * ctx, int n_new, void * user_data);
WHISPER_SAMPLING_GREEDY, // Always select the most probable token
WHISPER_SAMPLING_BEAM_SEARCH, // TODO: not implemented yet! // Encoder begin callback
}; // If not NULL, called before the encoder starts
// If it returns false, the computation is aborted
// Text segment callback typedef bool (*whisper_encoder_begin_callback)(struct whisper_context * ctx, void * user_data);
// Called on every newly generated text segment
// Use the whisper_full_...() functions to obtain the text segments // Parameters for the whisper_full() function
typedef void (*whisper_new_segment_callback)(struct whisper_context * ctx, int n_new, void * user_data); // If you chnage the order or add new parameters, make sure to update the default values in whisper.cpp:
// whisper_full_default_params()
// Encoder begin callback struct whisper_full_params {
// If not NULL, called before the encoder starts enum whisper_sampling_strategy strategy;
// If it returns false, the computation is aborted
typedef bool (*whisper_encoder_begin_callback)(struct whisper_context * ctx, void * user_data); int n_threads;
int n_max_text_ctx;
// Parameters for the whisper_full() function int offset_ms; // start offset in ms
// If you chnage the order or add new parameters, make sure to update the default values in whisper.cpp: int duration_ms; // audio duration to process in ms
// whisper_full_default_params()
struct whisper_full_params { bool translate;
enum whisper_sampling_strategy strategy; bool no_context;
bool single_segment; // force single segment output (useful for streaming)
int n_threads; bool print_special;
int n_max_text_ctx; bool print_progress;
int offset_ms; // start offset in ms bool print_realtime;
int duration_ms; // audio duration to process in ms bool print_timestamps;
bool translate; // [EXPERIMENTAL] token-level timestamps
bool no_context; bool token_timestamps; // enable token-level timestamps
bool single_segment; // force single segment output (useful for streaming) float thold_pt; // timestamp token probability threshold (~0.01)
bool print_special; float thold_ptsum; // timestamp token sum probability threshold (~0.01)
bool print_progress; int max_len; // max segment length in characters
bool print_realtime; int max_tokens; // max tokens per segment (0 = no limit)
bool print_timestamps;
// [EXPERIMENTAL] speed-up techniques
// [EXPERIMENTAL] token-level timestamps bool speed_up; // speed-up the audio by 2x using Phase Vocoder
bool token_timestamps; // enable token-level timestamps int audio_ctx; // overwrite the audio context size (0 = use default)
float thold_pt; // timestamp token probability threshold (~0.01)
float thold_ptsum; // timestamp token sum probability threshold (~0.01) // tokens to provide the whisper model as initial prompt
int max_len; // max segment length in characters // these are prepended to any existing text context from a previous call
int max_tokens; // max tokens per segment (0 = no limit) const whisper_token * prompt_tokens;
int prompt_n_tokens;
// [EXPERIMENTAL] speed-up techniques
bool speed_up; // speed-up the audio by 2x using Phase Vocoder // for auto-detection, set to nullptr, "" or "auto"
int audio_ctx; // overwrite the audio context size (0 = use default) const char * language;
// tokens to provide the whisper model as initial prompt struct {
// these are prepended to any existing text context from a previous call int n_past;
const whisper_token * prompt_tokens; } greedy;
int prompt_n_tokens;
struct {
// for auto-detection, set to nullptr, "" or "auto" int n_past;
const char * language; int beam_width;
int n_best;
struct { } beam_search;
int n_past;
} greedy; whisper_new_segment_callback new_segment_callback;
void * new_segment_callback_user_data;
struct {
int n_past; whisper_encoder_begin_callback encoder_begin_callback;
int beam_width; void * encoder_begin_callback_user_data;
int n_best; };
} beam_search;
WHISPER_API struct whisper_full_params whisper_full_default_params(enum whisper_sampling_strategy strategy);
whisper_new_segment_callback new_segment_callback;
void * new_segment_callback_user_data; // Run the entire model: PCM -> log mel spectrogram -> encoder -> decoder -> text
// Uses the specified decoding strategy to obtain the text.
whisper_encoder_begin_callback encoder_begin_callback; WHISPER_API int whisper_full(
void * encoder_begin_callback_user_data; struct whisper_context * ctx,
}; struct whisper_full_params params,
const float * samples,
WHISPER_API struct whisper_full_params whisper_full_default_params(enum whisper_sampling_strategy strategy); int n_samples
);
// Run the entire model: PCM -> log mel spectrogram -> encoder -> decoder -> text
// Uses the specified decoding strategy to obtain the text. // Split the input audio in chunks and process each chunk separately using whisper_full()
WHISPER_API int whisper_full( // It seems this approach can offer some speedup in some cases.
struct whisper_context * ctx, // However, the transcription accuracy can be worse at the beginning and end of each chunk.
struct whisper_full_params params, WHISPER_API int whisper_full_parallel(
const float * samples, struct whisper_context * ctx,
int n_samples); struct whisper_full_params params,
const float * samples,
// Split the input audio in chunks and process each chunk separately using whisper_full() int n_samples,
// It seems this approach can offer some speedup in some cases. int n_processors
// However, the transcription accuracy can be worse at the beginning and end of each chunk. );
WHISPER_API int whisper_full_parallel(
struct whisper_context * ctx, // Number of generated text segments.
struct whisper_full_params params, // A segment can be a few words, a sentence, or even a paragraph.
const float * samples, WHISPER_API int whisper_full_n_segments(struct whisper_context * ctx);
int n_samples,
int n_processors); // Get the start and end time of the specified segment.
WHISPER_API int64_t whisper_full_get_segment_t0(struct whisper_context * ctx, int i_segment);
// Number of generated text segments. WHISPER_API int64_t whisper_full_get_segment_t1(struct whisper_context * ctx, int i_segment);
// A segment can be a few words, a sentence, or even a paragraph.
WHISPER_API int whisper_full_n_segments(struct whisper_context * ctx); // Get the text of the specified segment.
WHISPER_API const char * whisper_full_get_segment_text(struct whisper_context * ctx, int i_segment);
// Get the start and end time of the specified segment.
WHISPER_API int64_t whisper_full_get_segment_t0(struct whisper_context * ctx, int i_segment); // Get number of tokens in the specified segment.
WHISPER_API int64_t whisper_full_get_segment_t1(struct whisper_context * ctx, int i_segment); WHISPER_API int whisper_full_n_tokens(struct whisper_context * ctx, int i_segment);
// Get the text of the specified segment. // Get the token text of the specified token in the specified segment.
WHISPER_API const char * whisper_full_get_segment_text(struct whisper_context * ctx, int i_segment); WHISPER_API const char * whisper_full_get_token_text(struct whisper_context * ctx, int i_segment, int i_token);
WHISPER_API whisper_token whisper_full_get_token_id(struct whisper_context * ctx, int i_segment, int i_token);
// Get number of tokens in the specified segment.
WHISPER_API int whisper_full_n_tokens(struct whisper_context * ctx, int i_segment); // Get token data for the specified token in the specified segment.
// This contains probabilities, timestamps, etc.
// Get the token text of the specified token in the specified segment. WHISPER_API whisper_token_data whisper_full_get_token_data(struct whisper_context * ctx, int i_segment, int i_token);
WHISPER_API const char * whisper_full_get_token_text(struct whisper_context * ctx, int i_segment, int i_token);
WHISPER_API whisper_token whisper_full_get_token_id (struct whisper_context * ctx, int i_segment, int i_token); // Get the probability of the specified token in the specified segment.
WHISPER_API float whisper_full_get_token_p(struct whisper_context * ctx, int i_segment, int i_token);
// Get token data for the specified token in the specified segment.
// This contains probabilities, timestamps, etc.
WHISPER_API whisper_token_data whisper_full_get_token_data(struct whisper_context * ctx, int i_segment, int i_token);
// Get the probability of the specified token in the specified segment.
WHISPER_API float whisper_full_get_token_p(struct whisper_context * ctx, int i_segment, int i_token);
#ifdef __cplusplus #ifdef __cplusplus
} }

Loading…
Cancel
Save