parent
a169bb889c
commit
34af8a97e8
@ -0,0 +1,324 @@
|
|||||||
|
// SOURCE: https://github.com/google/sentencepiece/blob/9ffb33a14c97c512103be0ee74740099660b39aa/src/sentencepiece_model.proto#L282
|
||||||
|
|
||||||
|
|
||||||
|
// Copyright 2016 Google Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.!
|
||||||
|
|
||||||
|
syntax = "proto2";
|
||||||
|
|
||||||
|
// TODO(taku): Needs to use LITE RUNTIME in OSS release.
|
||||||
|
option optimize_for = LITE_RUNTIME;
|
||||||
|
|
||||||
|
package sentencepiece;
|
||||||
|
|
||||||
|
// TrainerSpec encodes a various parameters for SentencePiece training.
|
||||||
|
// Next id: 53
|
||||||
|
message TrainerSpec {
|
||||||
|
///////////////////////////////////////////////////////////////////
|
||||||
|
// General parameters
|
||||||
|
//
|
||||||
|
// Input corpus files.
|
||||||
|
// Trainer accepts the following two formats:
|
||||||
|
// A) Monolingual: plain text, one sentence per line.
|
||||||
|
// B) Bilingual: TSV, source sentence <tab> target sentence
|
||||||
|
// When bilingual data is passed, shared vocabulary model is built.
|
||||||
|
// Note that the input file must be raw corpus, not a preprocessed corpus.
|
||||||
|
// Trainer only loads the first `input_sentence_size` sentences specified
|
||||||
|
// with this parameter.
|
||||||
|
repeated string input = 1;
|
||||||
|
|
||||||
|
// Input corpus format:
|
||||||
|
// "text": one-sentence-per-line text format (default)
|
||||||
|
// "tsv": sentence <tab> freq
|
||||||
|
optional string input_format = 7;
|
||||||
|
|
||||||
|
// Output model file prefix.
|
||||||
|
// <model_prefix>.model and <model_prefix>.vocab are generated.
|
||||||
|
optional string model_prefix = 2;
|
||||||
|
|
||||||
|
// Model type. only have UNIGRAM now.
|
||||||
|
enum ModelType {
|
||||||
|
UNIGRAM = 1; // Unigram language model with dynamic algorithm
|
||||||
|
BPE = 2; // Byte Pair Encoding
|
||||||
|
WORD = 3; // Delimitered by whitespace.
|
||||||
|
CHAR = 4; // tokenizes into character sequence
|
||||||
|
}
|
||||||
|
optional ModelType model_type = 3 [default = UNIGRAM];
|
||||||
|
|
||||||
|
// Vocabulary size. 8k is the default size.
|
||||||
|
optional int32 vocab_size = 4 [default = 8000];
|
||||||
|
|
||||||
|
// List of the languages this model can accept.
|
||||||
|
// Since the model is language-agnostic, this field is used as a reference.
|
||||||
|
repeated string accept_language = 5;
|
||||||
|
|
||||||
|
// Size of self-test samples, which are encoded in the model file.
|
||||||
|
optional int32 self_test_sample_size = 6 [default = 0];
|
||||||
|
|
||||||
|
// Whether to use DP version of sentencepiece. Use it with TSV input format
|
||||||
|
// (requires precomputed word tab counts to work).
|
||||||
|
optional bool enable_differential_privacy = 50 [default = false];
|
||||||
|
// Set these parameters if you need DP version of sentencepiece.
|
||||||
|
// std of noise to add.
|
||||||
|
optional float differential_privacy_noise_level = 51 [default = 0.0];
|
||||||
|
// Clipping threshold to apply after adding noise. All the words with
|
||||||
|
// frequency less than this value are dropped.
|
||||||
|
optional uint64 differential_privacy_clipping_threshold = 52 [default = 0];
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////
|
||||||
|
// Training parameters.
|
||||||
|
//
|
||||||
|
// Uses characters which cover the corpus with the ratio of `chars_coverage`.
|
||||||
|
// This parameter determines the set of basic Alphabet of sentence piece.
|
||||||
|
// 1.0 - `chars_coverage` characters are treated as UNK.
|
||||||
|
// See also required_chars field.
|
||||||
|
optional float character_coverage = 10 [default = 0.9995];
|
||||||
|
|
||||||
|
// Maximum size of sentences the trainer loads from `input` parameter.
|
||||||
|
// Trainer simply loads the `input` files in sequence.
|
||||||
|
// It is better to shuffle the input corpus randomly.
|
||||||
|
optional uint64 input_sentence_size = 11 [default = 0];
|
||||||
|
optional bool shuffle_input_sentence = 19 [default = true];
|
||||||
|
|
||||||
|
// Maximum size of sentences to make seed sentence pieces.
|
||||||
|
// Extended suffix array is constructed to extract frequent
|
||||||
|
// sub-strings from the corpus. This uses 20N working space,
|
||||||
|
// where N is the size of corpus.
|
||||||
|
optional int32 mining_sentence_size = 12 [deprecated = true];
|
||||||
|
|
||||||
|
// Maximum size of sentences to train sentence pieces.
|
||||||
|
optional int32 training_sentence_size = 13 [deprecated = true];
|
||||||
|
|
||||||
|
// The size of seed sentencepieces.
|
||||||
|
// `seed_sentencepiece_size` must be larger than `vocab_size`.
|
||||||
|
optional int32 seed_sentencepiece_size = 14 [default = 1000000];
|
||||||
|
|
||||||
|
// In every EM sub-iterations, keeps top
|
||||||
|
// `shrinking_factor` * `current sentencepieces size` with respect to
|
||||||
|
// the loss of the sentence piece. This value should be smaller than 1.0.
|
||||||
|
optional float shrinking_factor = 15 [default = 0.75];
|
||||||
|
|
||||||
|
// The maximum sentence length in byte. The sentences with the length
|
||||||
|
// larger than `max_sentence_length` is simply ignored.
|
||||||
|
// Longer input tends to bring the following risks:
|
||||||
|
// * Overflow during EM training (unigram language model only)
|
||||||
|
// * Performance drop because of O(n log n) cost in BPE.
|
||||||
|
optional int32 max_sentence_length = 18 [default = 4192];
|
||||||
|
|
||||||
|
// Number of threads in the training.
|
||||||
|
optional int32 num_threads = 16 [default = 16];
|
||||||
|
|
||||||
|
// Number of EM sub iterations.
|
||||||
|
optional int32 num_sub_iterations = 17 [default = 2];
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////
|
||||||
|
// SentencePiece parameters which control the shapes of sentence piece.
|
||||||
|
//
|
||||||
|
// Maximum length of sentencepiece.
|
||||||
|
optional int32 max_sentencepiece_length = 20 [default = 16];
|
||||||
|
|
||||||
|
// Uses Unicode script to split sentence pieces.
|
||||||
|
// When `split_by_unicode_script` is true, we do not allow sentence piece to
|
||||||
|
// include multiple Unicode scripts, e.g. "F1" is not a valid piece.
|
||||||
|
// Exception: CJ characters (Hiragana/Katakana/Han) are all handled
|
||||||
|
// as one script type, since Japanese word can consist of multiple scripts.
|
||||||
|
// This exception is always applied regardless of the accept-language
|
||||||
|
// parameter.
|
||||||
|
optional bool split_by_unicode_script = 21 [default = true];
|
||||||
|
|
||||||
|
// When `split_by_number` is true, put a boundary between number and
|
||||||
|
// non-number transition. If we want to treat "F1" is one token, set this flag
|
||||||
|
// to be false.
|
||||||
|
optional bool split_by_number = 23 [default = true];
|
||||||
|
|
||||||
|
// Use a white space to split sentence pieces.
|
||||||
|
// When `split_by_whitespace` is false, we may have the piece containing
|
||||||
|
// a white space in the middle. e.g., "in_the".
|
||||||
|
optional bool split_by_whitespace = 22 [default = true];
|
||||||
|
|
||||||
|
// Adds whitespace symbol (_) as a suffix instead of prefix. e.g., _hello =>
|
||||||
|
// hello_. When `treat_whitespace_as_suffix` is true,
|
||||||
|
// NormalizerSpec::add_dummy_prefix will add the dummy whitespace to the end
|
||||||
|
// of sentence.
|
||||||
|
optional bool treat_whitespace_as_suffix = 24 [default = false];
|
||||||
|
|
||||||
|
// Allows pieces that only contain whitespaces instead of appearing only as
|
||||||
|
// prefix or suffix of other pieces.
|
||||||
|
optional bool allow_whitespace_only_pieces = 26 [default = false];
|
||||||
|
|
||||||
|
// Split all digits (0-9) into separate pieces.
|
||||||
|
optional bool split_digits = 25 [default = false];
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////
|
||||||
|
// Vocabulary management
|
||||||
|
//
|
||||||
|
// Defines control symbols used as an indicator to
|
||||||
|
// change the behavior of the decoder. <s> and </s> are pre-defined.
|
||||||
|
// We can use this field to encode various meta information,
|
||||||
|
// including language indicator in multilingual model.
|
||||||
|
// These symbols are not visible to users, but visible to
|
||||||
|
// the decoder. Note that when the input sentence contains control symbols,
|
||||||
|
// they are not treated as one token, but segmented into normal pieces.
|
||||||
|
// Control symbols must be inserted independently from the segmentation.
|
||||||
|
repeated string control_symbols = 30;
|
||||||
|
|
||||||
|
// Defines user defined symbols.
|
||||||
|
// These symbols are added with extremely high score
|
||||||
|
// so they are always treated as one unique symbol in any context.
|
||||||
|
// Typical usage of user_defined_symbols is placeholder for named entities.
|
||||||
|
repeated string user_defined_symbols = 31;
|
||||||
|
|
||||||
|
// Defines required characters. Each UTF8 character in this string is included
|
||||||
|
// in the character set regardless of character_coverage value. Unlike
|
||||||
|
// user_defined_symbols, these characters have scores based on the frequency
|
||||||
|
// on input sentences, and the model can form subwords using characters
|
||||||
|
// in this field.
|
||||||
|
optional string required_chars = 36;
|
||||||
|
|
||||||
|
// Decomposes unknown pieces into UTF-8 bytes.
|
||||||
|
optional bool byte_fallback = 35 [default = false];
|
||||||
|
|
||||||
|
// When creating the vocabulary file, defines whether or not to additionally
|
||||||
|
// output the score for each piece.
|
||||||
|
optional bool vocabulary_output_piece_score = 32 [default = true];
|
||||||
|
|
||||||
|
// `vocab_size` is treated as hard limit. Crash if
|
||||||
|
// the model can not produce the vocab of size `vocab_size`,
|
||||||
|
// When `hard_vocab_limit` is false, vocab_size is treated
|
||||||
|
// as soft limit. Note that when model_type=char,
|
||||||
|
// always assumes hard_vocab_limit = false.
|
||||||
|
optional bool hard_vocab_limit = 33 [default = true];
|
||||||
|
|
||||||
|
// use all symbols for vocab extraction. This flag is valid
|
||||||
|
// if model type is either CHAR or WORD
|
||||||
|
optional bool use_all_vocab = 34 [default = false];
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////
|
||||||
|
// Reserved special meta tokens.
|
||||||
|
// * -1 is not used.
|
||||||
|
// * unk_id must not be -1.
|
||||||
|
// Id must starts with 0 and be contigous.
|
||||||
|
optional int32 unk_id = 40 [default = 0]; // <unk>
|
||||||
|
optional int32 bos_id = 41 [default = 1]; // <s>
|
||||||
|
optional int32 eos_id = 42 [default = 2]; // </s>
|
||||||
|
optional int32 pad_id = 43 [default = -1]; // <pad> (padding)
|
||||||
|
optional string unk_piece = 45 [default = "<unk>"];
|
||||||
|
optional string bos_piece = 46 [default = "<s>"];
|
||||||
|
optional string eos_piece = 47 [default = "</s>"];
|
||||||
|
optional string pad_piece = 48 [default = "<pad>"];
|
||||||
|
|
||||||
|
// Encodes <unk> into U+2047 (DOUBLE QUESTION MARK),
|
||||||
|
// since this character can be useful both for user and
|
||||||
|
// developer. We can easily figure out that <unk> is emitted.
|
||||||
|
optional string unk_surface = 44 [default = " \xE2\x81\x87 "];
|
||||||
|
|
||||||
|
// Increase bit depth to allow unigram model training on large
|
||||||
|
// (>10M sentences) corpora. A Side-effect of enabling this flag
|
||||||
|
// is increased memory usage.
|
||||||
|
optional bool train_extremely_large_corpus = 49 [default = false];
|
||||||
|
|
||||||
|
// Customized extensions: the range of field numbers
|
||||||
|
// are open to third-party extensions.
|
||||||
|
extensions 200 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
// NormalizerSpec encodes a various parameters for string normalizaiton
|
||||||
|
message NormalizerSpec {
|
||||||
|
// name of normalization rule.
|
||||||
|
optional string name = 1;
|
||||||
|
|
||||||
|
// Pre-compiled normalization rule created by
|
||||||
|
// Builder::GetPrecompiledCharsMap() or Builder::CompileCharsMap() method.
|
||||||
|
// Usually this field is set by Builder::GetNormalizerSpec() method.
|
||||||
|
optional bytes precompiled_charsmap = 2;
|
||||||
|
|
||||||
|
// Adds dummy whitespace at the beginning of text in order to
|
||||||
|
// treat "world" in "world" and "hello world" in the same way.
|
||||||
|
optional bool add_dummy_prefix = 3 [default = true];
|
||||||
|
|
||||||
|
// Removes leading, trailing, and duplicate internal whitespace.
|
||||||
|
optional bool remove_extra_whitespaces = 4 [default = true];
|
||||||
|
|
||||||
|
// Replaces whitespace with meta symbol.
|
||||||
|
// This field must be true to train sentence piece model.
|
||||||
|
optional bool escape_whitespaces = 5 [default = true];
|
||||||
|
|
||||||
|
// Custom normalization rule file in TSV format.
|
||||||
|
// https://github.com/google/sentencepiece/blob/master/doc/normalization.md
|
||||||
|
// This field is only used in SentencePieceTrainer::Train() method, which
|
||||||
|
// compiles the rule into the binary rule stored in `precompiled_charsmap`.
|
||||||
|
optional string normalization_rule_tsv = 6;
|
||||||
|
|
||||||
|
// Customized extensions: the range of field numbers
|
||||||
|
// are open to third-party extensions.
|
||||||
|
extensions 200 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Proto to store samples for self-testing.
|
||||||
|
message SelfTestData {
|
||||||
|
message Sample {
|
||||||
|
optional string input = 1;
|
||||||
|
optional string expected = 2;
|
||||||
|
}
|
||||||
|
repeated Sample samples = 1;
|
||||||
|
|
||||||
|
// Customized extensions: the range of field numbers
|
||||||
|
// are open to third-party extensions.
|
||||||
|
extensions 200 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelProto stores model parameters.
|
||||||
|
// SentencePieceProcessor is supposed to be self-contained.
|
||||||
|
// All settings/parameters which may change the behavior must be encoded
|
||||||
|
// in ModelProto.
|
||||||
|
message ModelProto {
|
||||||
|
message SentencePiece {
|
||||||
|
enum Type {
|
||||||
|
NORMAL = 1; // normal symbol
|
||||||
|
UNKNOWN = 2; // unknown symbol. only <unk> for now.
|
||||||
|
CONTROL = 3; // control symbols. </s>, <s>, <2ja> etc.
|
||||||
|
USER_DEFINED = 4; // user defined symbols.
|
||||||
|
// Typical usage of USER_DEFINED symbol
|
||||||
|
// is placeholder.
|
||||||
|
BYTE = 6; // byte symbols. Used when `byte_fallback` is true.
|
||||||
|
UNUSED = 5; // this piece is not used.
|
||||||
|
}
|
||||||
|
optional string piece = 1; // piece must not be empty.
|
||||||
|
optional float score = 2;
|
||||||
|
optional Type type = 3 [default = NORMAL];
|
||||||
|
|
||||||
|
// Customized extensions: the range of field numbers
|
||||||
|
// are open to third-party extensions.
|
||||||
|
extensions 200 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sentence pieces with scores.
|
||||||
|
repeated SentencePiece pieces = 1;
|
||||||
|
|
||||||
|
// Spec used to generate this model file.
|
||||||
|
optional TrainerSpec trainer_spec = 2;
|
||||||
|
|
||||||
|
// Spec for text normalization.
|
||||||
|
optional NormalizerSpec normalizer_spec = 3;
|
||||||
|
|
||||||
|
// Stores sample input and its expected segmentation to verify the model.
|
||||||
|
optional SelfTestData self_test_data = 4;
|
||||||
|
|
||||||
|
// Spec for text de-normalization.
|
||||||
|
optional NormalizerSpec denormalizer_spec = 5;
|
||||||
|
|
||||||
|
// Customized extensions: the range of field numbers
|
||||||
|
// are open to third-party extensions.
|
||||||
|
extensions 200 to max;
|
||||||
|
}
|
@ -0,0 +1,44 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||||
|
# source: sentencepiece_model.proto
|
||||||
|
"""Generated protocol buffer code."""
|
||||||
|
from google.protobuf.internal import builder as _builder
|
||||||
|
from google.protobuf import descriptor as _descriptor
|
||||||
|
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||||
|
from google.protobuf import symbol_database as _symbol_database
|
||||||
|
# @@protoc_insertion_point(imports)
|
||||||
|
|
||||||
|
_sym_db = _symbol_database.Default()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\xdb\x0b\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03')
|
||||||
|
|
||||||
|
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||||
|
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', globals())
|
||||||
|
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||||
|
|
||||||
|
DESCRIPTOR._options = None
|
||||||
|
DESCRIPTOR._serialized_options = b'H\003'
|
||||||
|
_TRAINERSPEC.fields_by_name['mining_sentence_size']._options = None
|
||||||
|
_TRAINERSPEC.fields_by_name['mining_sentence_size']._serialized_options = b'\030\001'
|
||||||
|
_TRAINERSPEC.fields_by_name['training_sentence_size']._options = None
|
||||||
|
_TRAINERSPEC.fields_by_name['training_sentence_size']._serialized_options = b'\030\001'
|
||||||
|
_TRAINERSPEC._serialized_start=45
|
||||||
|
_TRAINERSPEC._serialized_end=1544
|
||||||
|
_TRAINERSPEC_MODELTYPE._serialized_start=1480
|
||||||
|
_TRAINERSPEC_MODELTYPE._serialized_end=1533
|
||||||
|
_NORMALIZERSPEC._serialized_start=1547
|
||||||
|
_NORMALIZERSPEC._serialized_end=1756
|
||||||
|
_SELFTESTDATA._serialized_start=1758
|
||||||
|
_SELFTESTDATA._serialized_end=1879
|
||||||
|
_SELFTESTDATA_SAMPLE._serialized_start=1827
|
||||||
|
_SELFTESTDATA_SAMPLE._serialized_end=1868
|
||||||
|
_MODELPROTO._serialized_start=1882
|
||||||
|
_MODELPROTO._serialized_end=2392
|
||||||
|
_MODELPROTO_SENTENCEPIECE._serialized_start=2171
|
||||||
|
_MODELPROTO_SENTENCEPIECE._serialized_end=2381
|
||||||
|
_MODELPROTO_SENTENCEPIECE_TYPE._serialized_start=2286
|
||||||
|
_MODELPROTO_SENTENCEPIECE_TYPE._serialized_end=2370
|
||||||
|
# @@protoc_insertion_point(module_scope)
|
Loading…
Reference in new issue