完成了7M数据集tokenizer的训练
This commit is contained in:
@@ -16,7 +16,7 @@ MAX_WIDTH = 1280
|
||||
TEXIFY_INPUT_DENSITY = 100
|
||||
|
||||
# ocr模型的tokenizer中的词典数量
|
||||
VOCAB_SIZE = 10000
|
||||
VOCAB_SIZE = 15000
|
||||
|
||||
# ocr模型是否固定输入图片的大小
|
||||
OCR_FIX_SIZE = True
|
||||
|
||||
14740
src/models/tokenizer/roberta-tokenizer-7Mformulas/merges.txt
Normal file
14740
src/models/tokenizer/roberta-tokenizer-7Mformulas/merges.txt
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"bos_token": "<s>",
|
||||
"cls_token": "<s>",
|
||||
"eos_token": "</s>",
|
||||
"mask_token": {
|
||||
"content": "<mask>",
|
||||
"lstrip": true,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"pad_token": "<pad>",
|
||||
"sep_token": "</s>",
|
||||
"unk_token": "<unk>"
|
||||
}
|
||||
29830
src/models/tokenizer/roberta-tokenizer-7Mformulas/tokenizer.json
Normal file
29830
src/models/tokenizer/roberta-tokenizer-7Mformulas/tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,57 @@
|
||||
{
|
||||
"add_prefix_space": false,
|
||||
"added_tokens_decoder": {
|
||||
"0": {
|
||||
"content": "<s>",
|
||||
"lstrip": false,
|
||||
"normalized": true,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"1": {
|
||||
"content": "<pad>",
|
||||
"lstrip": false,
|
||||
"normalized": true,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"2": {
|
||||
"content": "</s>",
|
||||
"lstrip": false,
|
||||
"normalized": true,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"3": {
|
||||
"content": "<unk>",
|
||||
"lstrip": false,
|
||||
"normalized": true,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"4": {
|
||||
"content": "<mask>",
|
||||
"lstrip": true,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
}
|
||||
},
|
||||
"bos_token": "<s>",
|
||||
"clean_up_tokenization_spaces": true,
|
||||
"cls_token": "<s>",
|
||||
"eos_token": "</s>",
|
||||
"errors": "replace",
|
||||
"mask_token": "<mask>",
|
||||
"model_max_length": 1000000000000000019884624838656,
|
||||
"pad_token": "<pad>",
|
||||
"sep_token": "</s>",
|
||||
"tokenizer_class": "RobertaTokenizer",
|
||||
"trim_offsets": true,
|
||||
"unk_token": "<unk>"
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -4,8 +4,8 @@ from ...globals import VOCAB_SIZE
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
tokenizer = TexTeller.get_tokenizer('/home/lhy/code/TeXify/src/models/tokenizer/roberta-tokenizer-raw')
|
||||
tokenizer = TexTeller.get_tokenizer('/home/lhy/code/TexTeller/src/models/tokenizer/roberta-tokenizer-raw')
|
||||
dataset = load_dataset("/home/lhy/code/TexTeller/src/models/ocr_model/train/data/loader.py")['train']
|
||||
new_tokenizer = tokenizer.train_new_from_iterator(text_iterator=dataset['latex_formula'], vocab_size=VOCAB_SIZE)
|
||||
new_tokenizer.save_pretrained('/home/lhy/code/TeXify/src/models/tokenizer/roberta-tokenizer-550Kformulas')
|
||||
new_tokenizer.save_pretrained('/home/lhy/code/TexTeller/src/models/tokenizer/roberta-tokenizer-7Mformulas')
|
||||
pause = 1
|
||||
|
||||
Reference in New Issue
Block a user