2024-03-25 16:35:34 +08:00
|
|
|
import os
|
|
|
|
|
|
|
|
|
|
from functools import partial
|
|
|
|
|
from pathlib import Path
|
|
|
|
|
|
|
|
|
|
from datasets import load_dataset
|
2024-04-21 00:48:24 +08:00
|
|
|
from transformers import (
|
2025-02-28 19:56:49 +08:00
|
|
|
Trainer,
|
|
|
|
|
TrainingArguments,
|
|
|
|
|
Seq2SeqTrainer,
|
|
|
|
|
Seq2SeqTrainingArguments,
|
|
|
|
|
GenerationConfig,
|
2024-04-21 00:48:24 +08:00
|
|
|
)
|
2024-03-25 16:35:34 +08:00
|
|
|
|
|
|
|
|
from .training_args import CONFIG
|
|
|
|
|
from ..model.TexTeller import TexTeller
|
2025-02-28 19:56:49 +08:00
|
|
|
from ..utils.functional import (
|
|
|
|
|
tokenize_fn,
|
|
|
|
|
collate_fn,
|
|
|
|
|
img_train_transform,
|
|
|
|
|
img_inf_transform,
|
|
|
|
|
filter_fn,
|
|
|
|
|
)
|
2024-03-25 16:35:34 +08:00
|
|
|
from ..utils.metrics import bleu_metric
|
2025-02-28 19:56:49 +08:00
|
|
|
from ...globals import MAX_TOKEN_SIZE, MIN_WIDTH, MIN_HEIGHT
|
2024-03-25 16:35:34 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def train(model, tokenizer, train_dataset, eval_dataset, collate_fn_with_tokenizer):
|
|
|
|
|
training_args = TrainingArguments(**CONFIG)
|
|
|
|
|
trainer = Trainer(
|
|
|
|
|
model,
|
|
|
|
|
training_args,
|
|
|
|
|
train_dataset=train_dataset,
|
|
|
|
|
eval_dataset=eval_dataset,
|
2025-02-28 19:56:49 +08:00
|
|
|
tokenizer=tokenizer,
|
2024-03-25 16:35:34 +08:00
|
|
|
data_collator=collate_fn_with_tokenizer,
|
|
|
|
|
)
|
|
|
|
|
|
2024-04-21 00:48:24 +08:00
|
|
|
trainer.train(resume_from_checkpoint=None)
|
2024-03-25 16:35:34 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def evaluate(model, tokenizer, eval_dataset, collate_fn):
|
|
|
|
|
eval_config = CONFIG.copy()
|
|
|
|
|
eval_config['predict_with_generate'] = True
|
|
|
|
|
generate_config = GenerationConfig(
|
2024-04-21 00:48:24 +08:00
|
|
|
max_new_tokens=MAX_TOKEN_SIZE,
|
2024-03-25 16:35:34 +08:00
|
|
|
num_beams=1,
|
|
|
|
|
do_sample=False,
|
|
|
|
|
pad_token_id=tokenizer.pad_token_id,
|
|
|
|
|
eos_token_id=tokenizer.eos_token_id,
|
|
|
|
|
bos_token_id=tokenizer.bos_token_id,
|
|
|
|
|
)
|
|
|
|
|
eval_config['generation_config'] = generate_config
|
|
|
|
|
seq2seq_config = Seq2SeqTrainingArguments(**eval_config)
|
|
|
|
|
|
|
|
|
|
trainer = Seq2SeqTrainer(
|
|
|
|
|
model,
|
|
|
|
|
seq2seq_config,
|
|
|
|
|
eval_dataset=eval_dataset,
|
2025-02-28 19:56:49 +08:00
|
|
|
tokenizer=tokenizer,
|
2024-03-25 16:35:34 +08:00
|
|
|
data_collator=collate_fn,
|
2025-02-28 19:56:49 +08:00
|
|
|
compute_metrics=partial(bleu_metric, tokenizer=tokenizer),
|
2024-03-25 16:35:34 +08:00
|
|
|
)
|
|
|
|
|
|
2024-04-21 00:48:24 +08:00
|
|
|
eval_res = trainer.evaluate()
|
|
|
|
|
print(eval_res)
|
2025-02-28 19:56:49 +08:00
|
|
|
|
2024-03-25 16:35:34 +08:00
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
script_dirpath = Path(__file__).resolve().parent
|
|
|
|
|
os.chdir(script_dirpath)
|
|
|
|
|
|
2025-02-28 19:56:49 +08:00
|
|
|
# dataset = load_dataset(str(Path('./dataset/loader.py').resolve()))['train']
|
|
|
|
|
dataset = load_dataset("imagefolder", data_dir=str(script_dirpath / 'dataset'))['train']
|
|
|
|
|
dataset = dataset.filter(
|
|
|
|
|
lambda x: x['image'].height > MIN_HEIGHT and x['image'].width > MIN_WIDTH
|
|
|
|
|
)
|
2024-03-25 16:35:34 +08:00
|
|
|
dataset = dataset.shuffle(seed=42)
|
|
|
|
|
dataset = dataset.flatten_indices()
|
|
|
|
|
|
2024-04-21 00:48:24 +08:00
|
|
|
tokenizer = TexTeller.get_tokenizer()
|
|
|
|
|
# If you want use your own tokenizer, please modify the path to your tokenizer
|
2025-02-28 19:56:49 +08:00
|
|
|
# +tokenizer = TexTeller.get_tokenizer('/path/to/your/tokenizer')
|
2024-05-12 07:49:04 +00:00
|
|
|
filter_fn_with_tokenizer = partial(filter_fn, tokenizer=tokenizer)
|
2025-02-28 19:56:49 +08:00
|
|
|
dataset = dataset.filter(filter_fn_with_tokenizer, num_proc=8)
|
2024-04-21 00:48:24 +08:00
|
|
|
|
2024-03-25 16:35:34 +08:00
|
|
|
map_fn = partial(tokenize_fn, tokenizer=tokenizer)
|
2025-02-28 19:56:49 +08:00
|
|
|
tokenized_dataset = dataset.map(
|
|
|
|
|
map_fn, batched=True, remove_columns=dataset.column_names, num_proc=8
|
|
|
|
|
)
|
2024-03-25 16:35:34 +08:00
|
|
|
|
2024-04-21 00:48:24 +08:00
|
|
|
# Split dataset into train and eval, ratio 9:1
|
2025-02-28 19:56:49 +08:00
|
|
|
split_dataset = tokenized_dataset.train_test_split(test_size=0.1, seed=42)
|
2024-03-25 16:35:34 +08:00
|
|
|
train_dataset, eval_dataset = split_dataset['train'], split_dataset['test']
|
2024-05-12 07:49:04 +00:00
|
|
|
train_dataset = train_dataset.with_transform(img_train_transform)
|
2025-02-28 19:56:49 +08:00
|
|
|
eval_dataset = eval_dataset.with_transform(img_inf_transform)
|
2024-01-31 10:20:27 +00:00
|
|
|
collate_fn_with_tokenizer = partial(collate_fn, tokenizer=tokenizer)
|
2024-01-30 08:36:23 +00:00
|
|
|
|
2024-04-21 00:48:24 +08:00
|
|
|
# Train from scratch
|
|
|
|
|
model = TexTeller()
|
|
|
|
|
# or train from TexTeller pre-trained model: model = TexTeller.from_pretrained()
|
|
|
|
|
|
|
|
|
|
# If you want to train from pre-trained model, please modify the path to your pre-trained checkpoint
|
2025-02-28 19:56:49 +08:00
|
|
|
# +e.g.
|
|
|
|
|
# +model = TexTeller.from_pretrained(
|
|
|
|
|
# + '/path/to/your/model_checkpoint'
|
|
|
|
|
# +)
|
2024-03-25 16:35:34 +08:00
|
|
|
|
2024-04-21 00:48:24 +08:00
|
|
|
enable_train = True
|
|
|
|
|
enable_evaluate = False
|
2024-03-25 16:35:34 +08:00
|
|
|
if enable_train:
|
2025-02-28 19:56:49 +08:00
|
|
|
train(model, tokenizer, train_dataset, eval_dataset, collate_fn_with_tokenizer)
|
2024-04-21 00:48:24 +08:00
|
|
|
if enable_evaluate and len(eval_dataset) > 0:
|
2024-03-25 16:35:34 +08:00
|
|
|
evaluate(model, tokenizer, eval_dataset, collate_fn_with_tokenizer)
|