Add .clang-format

pull/359/head
Andy Maloney 2 years ago
parent 0be6a1afd9
commit 71ad534079

@ -0,0 +1,92 @@
# Options are listed here:
# https://clang.llvm.org/docs/ClangFormatStyleOptions.html
---
AccessModifierOffset: -3
AlignAfterOpenBracket: BlockIndent
AlignArrayOfStructures: Left
AlwaysBreakTemplateDeclarations: Yes
AlignConsecutiveAssignments: true
AlignConsecutiveDeclarations: false
AlignConsecutiveMacros: true
AlignEscapedNewlines: Right
AlignOperands: true
AlignTrailingComments: true
AllowAllArgumentsOnNextLine: false
AllowAllConstructorInitializersOnNextLine: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: None
AllowShortLambdasOnASingleLine: All
AllowShortIfStatementsOnASingleLine: Never
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: MultiLine
BinPackArguments: false
BinPackParameters: false
BitFieldColonSpacing: Both
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Attach
BreakBeforeConceptDeclarations: Always
BreakBeforeInheritanceComma: false
BreakInheritanceList: BeforeColon
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: true
BreakConstructorInitializers: AfterColon
BreakStringLiterals: true
ColumnLimit: 120
CompactNamespaces: false
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: false
DeriveLineEnding: false
DerivePointerAlignment: false
EmptyLineBeforeAccessModifier: LogicalBlock
EmptyLineAfterAccessModifier: Never
FixNamespaceComments: false
IncludeBlocks: Preserve
IncludeIsMainRegex: '(Test)?$'
IndentCaseLabels: true
IndentPPDirectives: AfterHash
IndentWidth: 4
IndentWrappedFunctionNames: true
InsertBraces: true
KeepEmptyLinesAtTheStartOfBlocks: true
Language: Cpp
MaxEmptyLinesToKeep: 1
NamespaceIndentation: All
PackConstructorInitializers: Never
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 19
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 1000
PointerAlignment: Middle
ReflowComments: true
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterCStyleCast: true
SpaceAfterLogicalNot: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCaseColon: false
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: c++11
TabWidth: 4
UseCRLF: false
UseTab: Never

199
ggml.h

@ -173,9 +173,9 @@
extern "C" { extern "C" {
#endif #endif
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h> #include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#define GGML_MAX_DIMS 4 #define GGML_MAX_DIMS 4
#define GGML_MAX_NODES 4096 #define GGML_MAX_NODES 4096
@ -327,29 +327,13 @@ void ggml_free(struct ggml_context * ctx);
size_t ggml_used_mem(const struct ggml_context * ctx); size_t ggml_used_mem(const struct ggml_context * ctx);
struct ggml_tensor * ggml_new_tensor( struct ggml_tensor * ggml_new_tensor(struct ggml_context * ctx, enum ggml_type type, int n_dims, const int * ne);
struct ggml_context * ctx,
enum ggml_type type,
int n_dims,
const int *ne);
struct ggml_tensor * ggml_new_tensor_1d( struct ggml_tensor * ggml_new_tensor_1d(struct ggml_context * ctx, enum ggml_type type, int ne0);
struct ggml_context * ctx,
enum ggml_type type,
int ne0);
struct ggml_tensor * ggml_new_tensor_2d( struct ggml_tensor * ggml_new_tensor_2d(struct ggml_context * ctx, enum ggml_type type, int ne0, int ne1);
struct ggml_context * ctx,
enum ggml_type type,
int ne0,
int ne1);
struct ggml_tensor * ggml_new_tensor_3d( struct ggml_tensor * ggml_new_tensor_3d(struct ggml_context * ctx, enum ggml_type type, int ne0, int ne1, int ne2);
struct ggml_context * ctx,
enum ggml_type type,
int ne0,
int ne1,
int ne2);
struct ggml_tensor * ggml_new_tensor_4d( struct ggml_tensor * ggml_new_tensor_4d(
struct ggml_context * ctx, struct ggml_context * ctx,
@ -357,7 +341,8 @@ struct ggml_tensor * ggml_new_tensor_4d(
int ne0, int ne0,
int ne1, int ne1,
int ne2, int ne2,
int ne3); int ne3
);
struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value); struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value);
struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value); struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value);
@ -382,141 +367,77 @@ float * ggml_get_data_f32(const struct ggml_tensor * tensor);
// operations on tensors with backpropagation // operations on tensors with backpropagation
// //
struct ggml_tensor * ggml_dup( struct ggml_tensor * ggml_dup(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_add( struct ggml_tensor * ggml_add(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_sub( struct ggml_tensor * ggml_sub(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_mul( struct ggml_tensor * ggml_mul(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_div( struct ggml_tensor * ggml_div(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_sqr( struct ggml_tensor * ggml_sqr(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_sqrt( struct ggml_tensor * ggml_sqrt(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// return scalar // return scalar
// TODO: compute sum along rows // TODO: compute sum along rows
struct ggml_tensor * ggml_sum( struct ggml_tensor * ggml_sum(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// mean along rows // mean along rows
struct ggml_tensor * ggml_mean( struct ggml_tensor * ggml_mean(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// if a is the same shape as b, and a is not parameter, return a // if a is the same shape as b, and a is not parameter, return a
// otherwise, return a new tensor: repeat(a) to fit in b // otherwise, return a new tensor: repeat(a) to fit in b
struct ggml_tensor * ggml_repeat( struct ggml_tensor * ggml_repeat(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_abs( struct ggml_tensor * ggml_abs(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_sgn( struct ggml_tensor * ggml_sgn(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_neg( struct ggml_tensor * ggml_neg(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_step( struct ggml_tensor * ggml_step(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_relu( struct ggml_tensor * ggml_relu(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// TODO: double-check this computation is correct // TODO: double-check this computation is correct
struct ggml_tensor * ggml_gelu( struct ggml_tensor * ggml_gelu(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// normalize along rows // normalize along rows
// TODO: eps is hardcoded to 1e-5 for now // TODO: eps is hardcoded to 1e-5 for now
struct ggml_tensor * ggml_norm( struct ggml_tensor * ggml_norm(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// A: m rows, n columns // A: m rows, n columns
// B: p rows, n columns (i.e. we transpose it internally) // B: p rows, n columns (i.e. we transpose it internally)
// result is m columns, p rows // result is m columns, p rows
struct ggml_tensor * ggml_mul_mat( struct ggml_tensor * ggml_mul_mat(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
// //
// operations on tensors without backpropagation // operations on tensors without backpropagation
// //
// in-place, returns view(a) // in-place, returns view(a)
struct ggml_tensor * ggml_scale( struct ggml_tensor * ggml_scale(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
// a -> b, return view(b) // a -> b, return view(b)
struct ggml_tensor * ggml_cpy( struct ggml_tensor * ggml_cpy(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
// return view(a), b specifies the new shape // return view(a), b specifies the new shape
// TODO: when we start computing gradient, make a copy instead of view // TODO: when we start computing gradient, make a copy instead of view
struct ggml_tensor * ggml_reshape( struct ggml_tensor * ggml_reshape(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
// return view(a) // return view(a)
// TODO: when we start computing gradient, make a copy instead of view // TODO: when we start computing gradient, make a copy instead of view
struct ggml_tensor * ggml_reshape_2d( struct ggml_tensor * ggml_reshape_2d(struct ggml_context * ctx, struct ggml_tensor * a, int ne0, int ne1);
struct ggml_context * ctx,
struct ggml_tensor * a,
int ne0,
int ne1);
// return view(a) // return view(a)
// TODO: when we start computing gradient, make a copy instead of view // TODO: when we start computing gradient, make a copy instead of view
struct ggml_tensor * ggml_reshape_3d( struct ggml_tensor * ggml_reshape_3d(struct ggml_context * ctx, struct ggml_tensor * a, int ne0, int ne1, int ne2);
struct ggml_context * ctx,
struct ggml_tensor * a,
int ne0,
int ne1,
int ne2);
// offset in bytes // offset in bytes
struct ggml_tensor * ggml_view_1d( struct ggml_tensor * ggml_view_1d(struct ggml_context * ctx, struct ggml_tensor * a, int ne0, size_t offset);
struct ggml_context * ctx,
struct ggml_tensor * a,
int ne0,
size_t offset);
struct ggml_tensor * ggml_view_2d( struct ggml_tensor * ggml_view_2d(
struct ggml_context * ctx, struct ggml_context * ctx,
@ -524,7 +445,8 @@ struct ggml_tensor * ggml_view_2d(
int ne0, int ne0,
int ne1, int ne1,
size_t nb1, // row stride in bytes size_t nb1, // row stride in bytes
size_t offset); size_t offset
);
struct ggml_tensor * ggml_permute( struct ggml_tensor * ggml_permute(
struct ggml_context * ctx, struct ggml_context * ctx,
@ -532,61 +454,42 @@ struct ggml_tensor * ggml_permute(
int axis0, int axis0,
int axis1, int axis1,
int axis2, int axis2,
int axis3); int axis3
);
// alias for ggml_permute(ctx, a, 1, 0, 2, 3) // alias for ggml_permute(ctx, a, 1, 0, 2, 3)
struct ggml_tensor * ggml_transpose( struct ggml_tensor * ggml_transpose(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * ggml_get_rows( struct ggml_tensor * ggml_get_rows(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
// set elements above the diagonal to -INF // set elements above the diagonal to -INF
// in-place, returns view(a) // in-place, returns view(a)
struct ggml_tensor * ggml_diag_mask_inf( struct ggml_tensor * ggml_diag_mask_inf(struct ggml_context * ctx, struct ggml_tensor * a, int n_past);
struct ggml_context * ctx,
struct ggml_tensor * a,
int n_past);
// in-place, returns view(a) // in-place, returns view(a)
struct ggml_tensor * ggml_soft_max( struct ggml_tensor * ggml_soft_max(struct ggml_context * ctx, struct ggml_tensor * a);
struct ggml_context * ctx,
struct ggml_tensor * a);
// rotary position embedding // rotary position embedding
// in-place, returns view(a) // in-place, returns view(a)
// if mode == 1, skip n_past elements // if mode == 1, skip n_past elements
// TODO: avoid creating a new tensor every time // TODO: avoid creating a new tensor every time
struct ggml_tensor * ggml_rope( struct ggml_tensor * ggml_rope(struct ggml_context * ctx, struct ggml_tensor * a, int n_past, int n_dims, int mode);
struct ggml_context * ctx,
struct ggml_tensor * a,
int n_past,
int n_dims,
int mode);
// padding = 1 // padding = 1
// TODO: we don't support extra parameters for now // TODO: we don't support extra parameters for now
// that's why we are hard-coding the stride, padding, and dilation // that's why we are hard-coding the stride, padding, and dilation
// not great .. // not great ..
struct ggml_tensor * ggml_conv_1d_1s( struct ggml_tensor * ggml_conv_1d_1s(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_conv_1d_2s( struct ggml_tensor * ggml_conv_1d_2s(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b);
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
struct ggml_tensor * ggml_flash_attn( struct ggml_tensor * ggml_flash_attn(
struct ggml_context * ctx, struct ggml_context * ctx,
struct ggml_tensor * q, struct ggml_tensor * q,
struct ggml_tensor * k, struct ggml_tensor * k,
struct ggml_tensor * v, struct ggml_tensor * v,
bool masked); bool masked
);
struct ggml_tensor * ggml_flash_ff( struct ggml_tensor * ggml_flash_ff(
struct ggml_context * ctx, struct ggml_context * ctx,
@ -594,15 +497,14 @@ struct ggml_tensor * ggml_flash_ff(
struct ggml_tensor * b0, struct ggml_tensor * b0,
struct ggml_tensor * b1, struct ggml_tensor * b1,
struct ggml_tensor * c0, struct ggml_tensor * c0,
struct ggml_tensor * c1); struct ggml_tensor * c1
);
// //
// automatic differentiation // automatic differentiation
// //
void ggml_set_param( void ggml_set_param(struct ggml_context * ctx, struct ggml_tensor * tensor);
struct ggml_context * ctx,
struct ggml_tensor * tensor);
void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor); void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
@ -712,10 +614,7 @@ struct ggml_opt_params {
struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type); struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type);
// optimize the function defined by the tensor f // optimize the function defined by the tensor f
enum ggml_opt_result ggml_opt( enum ggml_opt_result ggml_opt(struct ggml_context * ctx, struct ggml_opt_params params, struct ggml_tensor * f);
struct ggml_context * ctx,
struct ggml_opt_params params,
struct ggml_tensor * f);
// //
// system info // system info

File diff suppressed because it is too large Load Diff

@ -1,8 +1,8 @@
#ifndef WHISPER_H #ifndef WHISPER_H
#define WHISPER_H #define WHISPER_H
#include <stdint.h>
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h>
#ifdef WHISPER_SHARED #ifdef WHISPER_SHARED
# ifdef _WIN32 # ifdef _WIN32
@ -94,30 +94,19 @@ extern "C" {
// Convert RAW PCM audio to log mel spectrogram. // Convert RAW PCM audio to log mel spectrogram.
// The resulting spectrogram is stored inside the provided whisper context. // The resulting spectrogram is stored inside the provided whisper context.
// Returns 0 on success // Returns 0 on success
WHISPER_API int whisper_pcm_to_mel( WHISPER_API int whisper_pcm_to_mel(struct whisper_context * ctx, const float * samples, int n_samples, int n_threads);
struct whisper_context * ctx,
const float * samples,
int n_samples,
int n_threads);
// This can be used to set a custom log mel spectrogram inside the provided whisper context. // This can be used to set a custom log mel spectrogram inside the provided whisper context.
// Use this instead of whisper_pcm_to_mel() if you want to provide your own log mel spectrogram. // Use this instead of whisper_pcm_to_mel() if you want to provide your own log mel spectrogram.
// n_mel must be 80 // n_mel must be 80
// Returns 0 on success // Returns 0 on success
WHISPER_API int whisper_set_mel( WHISPER_API int whisper_set_mel(struct whisper_context * ctx, const float * data, int n_len, int n_mel);
struct whisper_context * ctx,
const float * data,
int n_len,
int n_mel);
// Run the Whisper encoder on the log mel spectrogram stored inside the provided whisper context. // Run the Whisper encoder on the log mel spectrogram stored inside the provided whisper context.
// Make sure to call whisper_pcm_to_mel() or whisper_set_mel() first. // Make sure to call whisper_pcm_to_mel() or whisper_set_mel() first.
// offset can be used to specify the offset of the first frame in the spectrogram. // offset can be used to specify the offset of the first frame in the spectrogram.
// Returns 0 on success // Returns 0 on success
WHISPER_API int whisper_encode( WHISPER_API int whisper_encode(struct whisper_context * ctx, int offset, int n_threads);
struct whisper_context * ctx,
int offset,
int n_threads);
// Run the Whisper decoder to obtain the logits and probabilities for the next token. // Run the Whisper decoder to obtain the logits and probabilities for the next token.
// Make sure to call whisper_encode() first. // Make sure to call whisper_encode() first.
@ -129,7 +118,8 @@ extern "C" {
const whisper_token * tokens, const whisper_token * tokens,
int n_tokens, int n_tokens,
int n_past, int n_past,
int n_threads); int n_threads
);
// Token sampling methods. // Token sampling methods.
// These are provided for convenience and can be used after each call to whisper_decode(). // These are provided for convenience and can be used after each call to whisper_decode().
@ -148,7 +138,8 @@ extern "C" {
struct whisper_context * ctx, struct whisper_context * ctx,
const char * text, const char * text,
whisper_token * tokens, whisper_token * tokens,
int n_max_tokens); int n_max_tokens
);
// Largest language id (i.e. number of available languages - 1) // Largest language id (i.e. number of available languages - 1)
WHISPER_API int whisper_lang_max_id(); WHISPER_API int whisper_lang_max_id();
@ -172,7 +163,8 @@ extern "C" {
struct whisper_context * ctx, struct whisper_context * ctx,
int offset_ms, int offset_ms,
int n_threads, int n_threads,
float * lang_probs); float * lang_probs
);
WHISPER_API int whisper_n_len(struct whisper_context * ctx); // mel length WHISPER_API int whisper_n_len(struct whisper_context * ctx); // mel length
WHISPER_API int whisper_n_vocab(struct whisper_context * ctx); WHISPER_API int whisper_n_vocab(struct whisper_context * ctx);
@ -287,7 +279,8 @@ extern "C" {
struct whisper_context * ctx, struct whisper_context * ctx,
struct whisper_full_params params, struct whisper_full_params params,
const float * samples, const float * samples,
int n_samples); int n_samples
);
// Split the input audio in chunks and process each chunk separately using whisper_full() // Split the input audio in chunks and process each chunk separately using whisper_full()
// It seems this approach can offer some speedup in some cases. // It seems this approach can offer some speedup in some cases.
@ -297,7 +290,8 @@ extern "C" {
struct whisper_full_params params, struct whisper_full_params params,
const float * samples, const float * samples,
int n_samples, int n_samples,
int n_processors); int n_processors
);
// Number of generated text segments. // Number of generated text segments.
// A segment can be a few words, a sentence, or even a paragraph. // A segment can be a few words, a sentence, or even a paragraph.

Loading…
Cancel
Save