Adding TF translation example (#12667)
* Adding TF translation example * Fixes and style pass for TF translation example * Remove unused postprocess_text copied from run_summarization * Adding README * Review fixes * Move changes to model.config to after we've initialized the model
This commit is contained in:
parent
cee2d2135f
commit
65bf05cd18
|
@ -0,0 +1,69 @@
|
|||
<!---
|
||||
Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
# Translation example
|
||||
|
||||
This script shows an example of training a *translation* model with the 🤗 Transformers library.
|
||||
For straightforward use-cases you may be able to use these scripts without modification, although we have also
|
||||
included comments in the code to indicate areas that you may need to adapt to your own projects.
|
||||
|
||||
### Multi-GPU and TPU usage
|
||||
|
||||
By default, these scripts use a `MirroredStrategy` and will use multiple GPUs effectively if they are available. TPUs
|
||||
can also be used by passing the name of the TPU resource with the `--tpu` argument.
|
||||
|
||||
### Example commands and caveats
|
||||
|
||||
MBart and some T5 models require special handling.
|
||||
|
||||
T5 models `t5-small`, `t5-base`, `t5-large`, `t5-3b` and `t5-11b` must use an additional argument: `--source_prefix "translate {source_lang} to {target_lang}"`. For example:
|
||||
|
||||
```bash
|
||||
python run_translation.py \
|
||||
--model_name_or_path t5-small \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--source_lang en \
|
||||
--target_lang ro \
|
||||
--source_prefix "translate English to Romanian: " \
|
||||
--dataset_name wmt16 \
|
||||
--dataset_config_name ro-en \
|
||||
--output_dir /tmp/tst-translation \
|
||||
--per_device_train_batch_size=16 \
|
||||
--per_device_eval_batch_size=16 \
|
||||
--overwrite_output_dir
|
||||
```
|
||||
|
||||
If you get a terrible BLEU score, make sure that you didn't forget to use the `--source_prefix` argument.
|
||||
|
||||
For the aforementioned group of T5 models it's important to remember that if you switch to a different language pair, make sure to adjust the source and target values in all 3 language-specific command line argument: `--source_lang`, `--target_lang` and `--source_prefix`.
|
||||
|
||||
MBart models require a different format for `--source_lang` and `--target_lang` values, e.g. instead of `en` it expects `en_XX`, for `ro` it expects `ro_RO`. The full MBart specification for language codes can be found [here](https://huggingface.co/facebook/mbart-large-cc25). For example:
|
||||
|
||||
```bash
|
||||
python run_translation.py \
|
||||
--model_name_or_path facebook/mbart-large-en-ro \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--dataset_name wmt16 \
|
||||
--dataset_config_name ro-en \
|
||||
--source_lang en_XX \
|
||||
--target_lang ro_RO \
|
||||
--output_dir /tmp/tst-translation \
|
||||
--per_device_train_batch_size=16 \
|
||||
--per_device_eval_batch_size=16 \
|
||||
--overwrite_output_dir
|
||||
```
|
|
@ -0,0 +1,620 @@
|
|||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Fine-tuning the library models for translation.
|
||||
"""
|
||||
# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from functools import partial
|
||||
from typing import Optional
|
||||
|
||||
import datasets
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
from datasets import load_dataset, load_metric
|
||||
from tqdm import tqdm
|
||||
|
||||
import transformers
|
||||
from transformers import (
|
||||
AutoConfig,
|
||||
AutoTokenizer,
|
||||
HfArgumentParser,
|
||||
M2M100Tokenizer,
|
||||
MBart50Tokenizer,
|
||||
MBart50TokenizerFast,
|
||||
MBartTokenizer,
|
||||
MBartTokenizerFast,
|
||||
TFAutoModelForSeq2SeqLM,
|
||||
TFTrainingArguments,
|
||||
create_optimizer,
|
||||
set_seed,
|
||||
)
|
||||
from transformers.trainer_utils import get_last_checkpoint
|
||||
from transformers.utils import check_min_version
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# region Dependencies and constants
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.9.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
MULTILINGUAL_TOKENIZERS = [MBartTokenizer, MBartTokenizerFast, MBart50Tokenizer, MBart50TokenizerFast, M2M100Tokenizer]
|
||||
# endregion
|
||||
|
||||
|
||||
# region Arguments
|
||||
@dataclass
|
||||
class ModelArguments:
|
||||
"""
|
||||
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
|
||||
"""
|
||||
|
||||
model_name_or_path: str = field(
|
||||
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
|
||||
)
|
||||
config_name: Optional[str] = field(
|
||||
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
|
||||
)
|
||||
tokenizer_name: Optional[str] = field(
|
||||
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
|
||||
)
|
||||
cache_dir: Optional[str] = field(
|
||||
default=None,
|
||||
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
|
||||
)
|
||||
use_fast_tokenizer: bool = field(
|
||||
default=True,
|
||||
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
|
||||
)
|
||||
model_revision: str = field(
|
||||
default="main",
|
||||
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
|
||||
)
|
||||
use_auth_token: bool = field(
|
||||
default=False,
|
||||
metadata={
|
||||
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
|
||||
"with private models)."
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DataTrainingArguments:
|
||||
"""
|
||||
Arguments pertaining to what data we are going to input our model for training and eval.
|
||||
"""
|
||||
|
||||
source_lang: str = field(default=None, metadata={"help": "Source language id for translation."})
|
||||
target_lang: str = field(default=None, metadata={"help": "Target language id for translation."})
|
||||
dataset_name: Optional[str] = field(
|
||||
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
|
||||
)
|
||||
dataset_config_name: Optional[str] = field(
|
||||
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
|
||||
)
|
||||
train_file: Optional[str] = field(
|
||||
default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."}
|
||||
)
|
||||
validation_file: Optional[str] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": "An optional input evaluation data file to evaluate the metrics (rouge) on "
|
||||
"(a jsonlines or csv file)."
|
||||
},
|
||||
)
|
||||
test_file: Optional[str] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": "An optional input test data file to evaluate the metrics (rouge) on " "(a jsonlines or csv file)."
|
||||
},
|
||||
)
|
||||
overwrite_cache: bool = field(
|
||||
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
|
||||
)
|
||||
preprocessing_num_workers: Optional[int] = field(
|
||||
default=None,
|
||||
metadata={"help": "The number of processes to use for the preprocessing."},
|
||||
)
|
||||
max_source_length: Optional[int] = field(
|
||||
default=1024,
|
||||
metadata={
|
||||
"help": "The maximum total input sequence length after tokenization. Sequences longer "
|
||||
"than this will be truncated, sequences shorter will be padded."
|
||||
},
|
||||
)
|
||||
max_target_length: Optional[int] = field(
|
||||
default=128,
|
||||
metadata={
|
||||
"help": "The maximum total sequence length for target text after tokenization. Sequences longer "
|
||||
"than this will be truncated, sequences shorter will be padded."
|
||||
},
|
||||
)
|
||||
val_max_target_length: Optional[int] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": "The maximum total sequence length for validation target text after tokenization. Sequences longer "
|
||||
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
|
||||
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
|
||||
"during ``evaluate`` and ``predict``."
|
||||
},
|
||||
)
|
||||
pad_to_max_length: bool = field(
|
||||
default=False,
|
||||
metadata={
|
||||
"help": "Whether to pad all samples to model maximum sentence length. "
|
||||
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
|
||||
"efficient on GPU but very bad for TPU."
|
||||
},
|
||||
)
|
||||
max_train_samples: Optional[int] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
|
||||
"value if set."
|
||||
},
|
||||
)
|
||||
max_eval_samples: Optional[int] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
|
||||
"value if set."
|
||||
},
|
||||
)
|
||||
max_predict_samples: Optional[int] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
|
||||
"value if set."
|
||||
},
|
||||
)
|
||||
num_beams: Optional[int] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
|
||||
"which is used during ``evaluate`` and ``predict``."
|
||||
},
|
||||
)
|
||||
ignore_pad_token_for_loss: bool = field(
|
||||
default=True,
|
||||
metadata={
|
||||
"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."
|
||||
},
|
||||
)
|
||||
source_prefix: Optional[str] = field(
|
||||
default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."}
|
||||
)
|
||||
|
||||
def __post_init__(self):
|
||||
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
|
||||
raise ValueError("Need either a dataset name or a training/validation file.")
|
||||
else:
|
||||
if self.train_file is not None:
|
||||
extension = self.train_file.split(".")[-1]
|
||||
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
|
||||
if self.validation_file is not None:
|
||||
extension = self.validation_file.split(".")[-1]
|
||||
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
|
||||
if self.val_max_target_length is None:
|
||||
self.val_max_target_length = self.max_target_length
|
||||
|
||||
|
||||
# endregion
|
||||
|
||||
# region Data generator
|
||||
def sample_generator(dataset, model, tokenizer, shuffle, pad_to_multiple_of=None):
|
||||
if shuffle:
|
||||
sample_ordering = np.random.permutation(len(dataset))
|
||||
else:
|
||||
sample_ordering = np.arange(len(dataset))
|
||||
for sample_idx in sample_ordering:
|
||||
example = dataset[int(sample_idx)]
|
||||
# Handle dicts with proper padding and conversion to tensor.
|
||||
example = tokenizer.pad(example, return_tensors="np", pad_to_multiple_of=pad_to_multiple_of)
|
||||
example = {key: tf.convert_to_tensor(arr, dtype_hint=tf.int32) for key, arr in example.items()}
|
||||
if model is not None and hasattr(model, "prepare_decoder_input_ids_from_labels"):
|
||||
decoder_input_ids = model.prepare_decoder_input_ids_from_labels(
|
||||
labels=tf.expand_dims(example["labels"], 0)
|
||||
)
|
||||
example["decoder_input_ids"] = tf.squeeze(decoder_input_ids, 0)
|
||||
yield example, example["labels"] # TF needs some kind of labels, even if we don't use them
|
||||
return
|
||||
|
||||
|
||||
# endregion
|
||||
|
||||
|
||||
# region Helper functions
|
||||
def dataset_to_tf(dataset, model, tokenizer, total_batch_size, num_epochs, shuffle):
|
||||
if dataset is None:
|
||||
return None
|
||||
train_generator = partial(sample_generator, dataset, model, tokenizer, shuffle=shuffle)
|
||||
train_signature = {
|
||||
feature: tf.TensorSpec(shape=(None,), dtype=tf.int32)
|
||||
for feature in dataset.features
|
||||
if feature != "special_tokens_mask"
|
||||
}
|
||||
if (
|
||||
model is not None
|
||||
and "decoder_input_ids" not in train_signature
|
||||
and hasattr(model, "prepare_decoder_input_ids_from_labels")
|
||||
):
|
||||
train_signature["decoder_input_ids"] = train_signature["labels"]
|
||||
# This may need to be changed depending on your particular model or tokenizer!
|
||||
padding_values = {
|
||||
key: tf.convert_to_tensor(tokenizer.pad_token_id if tokenizer.pad_token_id is not None else 0, dtype=tf.int32)
|
||||
for key in train_signature.keys()
|
||||
}
|
||||
padding_values["labels"] = tf.convert_to_tensor(-100, dtype=tf.int32)
|
||||
train_signature["labels"] = train_signature["input_ids"]
|
||||
train_signature = (train_signature, train_signature["labels"])
|
||||
options = tf.data.Options()
|
||||
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
|
||||
tf_dataset = (
|
||||
tf.data.Dataset.from_generator(train_generator, output_signature=train_signature)
|
||||
.with_options(options)
|
||||
.padded_batch(
|
||||
batch_size=total_batch_size,
|
||||
drop_remainder=True,
|
||||
padding_values=(padding_values, np.array(-100, dtype=np.int32)),
|
||||
)
|
||||
.repeat(int(num_epochs))
|
||||
)
|
||||
return tf_dataset
|
||||
|
||||
|
||||
# endregion
|
||||
|
||||
|
||||
def main():
|
||||
# region Argument parsing
|
||||
# See all possible arguments in src/transformers/training_args.py
|
||||
# or by passing the --help flag to this script.
|
||||
# We now keep distinct sets of args, for a cleaner separation of concerns.
|
||||
|
||||
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
|
||||
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
|
||||
# If we pass only one argument to the script and it's the path to a json file,
|
||||
# let's parse it to get our arguments.
|
||||
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
|
||||
else:
|
||||
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
||||
# endregion
|
||||
|
||||
# region Logging
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
||||
datefmt="%m/%d/%Y %H:%M:%S",
|
||||
handlers=[logging.StreamHandler(sys.stdout)],
|
||||
)
|
||||
logger.setLevel(logging.INFO)
|
||||
datasets.utils.logging.set_verbosity(logging.INFO)
|
||||
transformers.utils.logging.set_verbosity(logging.INFO)
|
||||
|
||||
# Log on each process the small summary:
|
||||
logger.info(f"Training/evaluation parameters {training_args}")
|
||||
# endregion
|
||||
|
||||
# region Detecting last checkpoint
|
||||
last_checkpoint = None
|
||||
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
|
||||
last_checkpoint = get_last_checkpoint(training_args.output_dir)
|
||||
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
|
||||
raise ValueError(
|
||||
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
|
||||
"Use --overwrite_output_dir to overcome."
|
||||
)
|
||||
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
|
||||
logger.info(
|
||||
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
|
||||
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
|
||||
)
|
||||
# endregion
|
||||
|
||||
# Set seed before initializing model.
|
||||
set_seed(training_args.seed)
|
||||
|
||||
# region Load datasets
|
||||
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
|
||||
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
|
||||
# (the dataset will be downloaded automatically from the datasets Hub).
|
||||
#
|
||||
# For CSV/JSON files this script will use the first column for the full texts and the second column for the
|
||||
# summaries (unless you specify column names for this with the `text_column` and `summary_column` arguments).
|
||||
#
|
||||
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
|
||||
# download the dataset.
|
||||
if data_args.dataset_name is not None:
|
||||
# Downloading and loading a dataset from the hub.
|
||||
raw_datasets = load_dataset(
|
||||
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir
|
||||
)
|
||||
else:
|
||||
data_files = {}
|
||||
if data_args.train_file is not None:
|
||||
data_files["train"] = data_args.train_file
|
||||
extension = data_args.train_file.split(".")[-1]
|
||||
if data_args.validation_file is not None:
|
||||
data_files["validation"] = data_args.validation_file
|
||||
extension = data_args.validation_file.split(".")[-1]
|
||||
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
|
||||
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
||||
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
||||
# endregion
|
||||
|
||||
# region Load model config and tokenizer
|
||||
#
|
||||
# Distributed training:
|
||||
# The .from_pretrained methods guarantee that only one local process can concurrently
|
||||
# download model & vocab.
|
||||
|
||||
config = AutoConfig.from_pretrained(
|
||||
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
|
||||
cache_dir=model_args.cache_dir,
|
||||
revision=model_args.model_revision,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_fast=model_args.use_fast_tokenizer,
|
||||
revision=model_args.model_revision,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
|
||||
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
|
||||
# endregion
|
||||
|
||||
# region Dataset preprocessing
|
||||
# We need to tokenize inputs and targets.
|
||||
if training_args.do_train:
|
||||
column_names = raw_datasets["train"].column_names
|
||||
elif training_args.do_eval:
|
||||
column_names = raw_datasets["validation"].column_names
|
||||
else:
|
||||
logger.info("There is nothing to do. Please pass `do_train`, and/or `do_eval`.")
|
||||
return
|
||||
|
||||
column_names = raw_datasets["train"].column_names
|
||||
|
||||
# For translation we set the codes of our source and target languages (only useful for mBART, the others will
|
||||
# ignore those attributes).
|
||||
if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)):
|
||||
assert data_args.target_lang is not None and data_args.source_lang is not None, (
|
||||
f"{tokenizer.__class__.__name__} is a multilingual tokenizer which requires --source_lang and "
|
||||
"--target_lang arguments."
|
||||
)
|
||||
tokenizer.src_lang = data_args.source_lang
|
||||
tokenizer.tgt_lang = data_args.target_lang
|
||||
forced_bos_token_id = (
|
||||
tokenizer.lang_code_to_id[data_args.forced_bos_token] if data_args.forced_bos_token is not None else None
|
||||
)
|
||||
|
||||
# Get the language codes for input/target.
|
||||
source_lang = data_args.source_lang.split("_")[0]
|
||||
target_lang = data_args.target_lang.split("_")[0]
|
||||
|
||||
padding = "max_length" if data_args.pad_to_max_length else False
|
||||
|
||||
# Temporarily set max_target_length for training.
|
||||
max_target_length = data_args.max_target_length
|
||||
padding = "max_length" if data_args.pad_to_max_length else False
|
||||
|
||||
def preprocess_function(examples):
|
||||
inputs = [ex[source_lang] for ex in examples["translation"]]
|
||||
targets = [ex[target_lang] for ex in examples["translation"]]
|
||||
inputs = [prefix + inp for inp in inputs]
|
||||
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
|
||||
|
||||
# Setup the tokenizer for targets
|
||||
with tokenizer.as_target_tokenizer():
|
||||
labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
|
||||
|
||||
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
|
||||
# padding in the loss.
|
||||
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
|
||||
labels["input_ids"] = [
|
||||
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
|
||||
]
|
||||
|
||||
model_inputs["labels"] = labels["input_ids"]
|
||||
return model_inputs
|
||||
|
||||
if training_args.do_train:
|
||||
if "train" not in raw_datasets:
|
||||
raise ValueError("--do_train requires a train dataset")
|
||||
train_dataset = raw_datasets["train"]
|
||||
if data_args.max_train_samples is not None:
|
||||
train_dataset = train_dataset.select(range(data_args.max_train_samples))
|
||||
with training_args.main_process_first(desc="train dataset map pre-processing"):
|
||||
train_dataset = train_dataset.map(
|
||||
preprocess_function,
|
||||
batched=True,
|
||||
num_proc=data_args.preprocessing_num_workers,
|
||||
remove_columns=column_names,
|
||||
load_from_cache_file=not data_args.overwrite_cache,
|
||||
desc="Running tokenizer on train dataset",
|
||||
)
|
||||
else:
|
||||
train_dataset = None
|
||||
|
||||
if training_args.do_eval:
|
||||
max_target_length = data_args.val_max_target_length
|
||||
if "validation" not in raw_datasets:
|
||||
raise ValueError("--do_eval requires a validation dataset")
|
||||
eval_dataset = raw_datasets["validation"]
|
||||
if data_args.max_eval_samples is not None:
|
||||
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
|
||||
with training_args.main_process_first(desc="validation dataset map pre-processing"):
|
||||
eval_dataset = eval_dataset.map(
|
||||
preprocess_function,
|
||||
batched=True,
|
||||
num_proc=data_args.preprocessing_num_workers,
|
||||
remove_columns=column_names,
|
||||
load_from_cache_file=not data_args.overwrite_cache,
|
||||
desc="Running tokenizer on validation dataset",
|
||||
)
|
||||
else:
|
||||
eval_dataset = None
|
||||
# endregion
|
||||
|
||||
with training_args.strategy.scope():
|
||||
# region Prepare model
|
||||
model = TFAutoModelForSeq2SeqLM.from_pretrained(
|
||||
model_args.model_name_or_path,
|
||||
config=config,
|
||||
cache_dir=model_args.cache_dir,
|
||||
revision=model_args.model_revision,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
|
||||
model.resize_token_embeddings(len(tokenizer))
|
||||
if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)):
|
||||
model.config.forced_bos_token_id = forced_bos_token_id
|
||||
# endregion
|
||||
|
||||
# region Set decoder_start_token_id
|
||||
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
|
||||
assert (
|
||||
data_args.target_lang is not None and data_args.source_lang is not None
|
||||
), "mBart requires --target_lang and --source_lang"
|
||||
if isinstance(tokenizer, MBartTokenizer):
|
||||
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]
|
||||
else:
|
||||
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang)
|
||||
|
||||
if model.config.decoder_start_token_id is None:
|
||||
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
|
||||
# endregion
|
||||
|
||||
# region Prepare TF Dataset objects
|
||||
num_replicas = training_args.strategy.num_replicas_in_sync
|
||||
total_train_batch_size = training_args.per_device_train_batch_size * num_replicas
|
||||
total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas
|
||||
tf_train_dataset = dataset_to_tf(
|
||||
train_dataset,
|
||||
model,
|
||||
tokenizer,
|
||||
total_batch_size=total_train_batch_size,
|
||||
num_epochs=training_args.num_train_epochs,
|
||||
shuffle=True,
|
||||
)
|
||||
tf_eval_dataset = dataset_to_tf(
|
||||
eval_dataset,
|
||||
model,
|
||||
tokenizer,
|
||||
total_eval_batch_size,
|
||||
num_epochs=1,
|
||||
shuffle=False,
|
||||
)
|
||||
# endregion
|
||||
|
||||
# region Optimizer, loss and LR scheduling
|
||||
# Scheduler and math around the number of training steps.
|
||||
num_update_steps_per_epoch = len(train_dataset) // training_args.per_device_train_batch_size
|
||||
num_train_steps = training_args.num_train_epochs * num_update_steps_per_epoch
|
||||
optimizer, lr_schedule = create_optimizer(
|
||||
init_lr=training_args.learning_rate,
|
||||
num_train_steps=num_train_steps,
|
||||
num_warmup_steps=training_args.warmup_steps,
|
||||
)
|
||||
|
||||
def masked_sparse_categorical_crossentropy(y_true, y_pred):
|
||||
# We clip the negative labels to 0 to avoid NaNs appearing in the output and
|
||||
# fouling up everything that comes afterwards. The loss values corresponding to clipped values
|
||||
# will be masked later anyway, but even masked NaNs seem to cause overflows for some reason.
|
||||
# 1e6 is chosen as a reasonable upper bound for the number of token indices - in the unlikely
|
||||
# event that you have more than 1 million tokens in your vocabulary, consider increasing this value.
|
||||
# More pragmatically, consider redesigning your tokenizer.
|
||||
losses = tf.keras.losses.sparse_categorical_crossentropy(
|
||||
tf.clip_by_value(y_true, 0, int(1e6)), y_pred, from_logits=True
|
||||
)
|
||||
# Compute the per-sample loss only over the unmasked tokens
|
||||
losses = tf.ragged.boolean_mask(losses, y_true != -100)
|
||||
losses = tf.reduce_mean(losses, axis=-1)
|
||||
return losses
|
||||
|
||||
# endregion
|
||||
|
||||
# region Metric and postprocessing
|
||||
metric = load_metric("sacrebleu")
|
||||
|
||||
def postprocess_text(preds, labels):
|
||||
preds = [pred.strip() for pred in preds]
|
||||
labels = [[label.strip()] for label in labels]
|
||||
|
||||
return preds, labels
|
||||
|
||||
# endregion
|
||||
|
||||
# region Training
|
||||
model.compile(loss={"logits": masked_sparse_categorical_crossentropy}, optimizer=optimizer)
|
||||
|
||||
if training_args.do_train:
|
||||
logger.info("***** Running training *****")
|
||||
logger.info(f" Num examples = {len(train_dataset)}")
|
||||
logger.info(f" Num Epochs = {training_args.num_train_epochs}")
|
||||
logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}")
|
||||
logger.info(f" Total train batch size = {total_train_batch_size}")
|
||||
logger.info(f" Total optimization steps = {num_train_steps}")
|
||||
|
||||
model.fit(
|
||||
tf_train_dataset,
|
||||
epochs=int(training_args.num_train_epochs),
|
||||
steps_per_epoch=num_update_steps_per_epoch,
|
||||
)
|
||||
# endregion
|
||||
|
||||
# region Validation
|
||||
if data_args.val_max_target_length is None:
|
||||
data_args.val_max_target_length = data_args.max_target_length
|
||||
|
||||
gen_kwargs = {
|
||||
"max_length": data_args.val_max_target_length,
|
||||
"num_beams": data_args.num_beams,
|
||||
}
|
||||
if training_args.do_eval:
|
||||
logger.info("Evaluation...")
|
||||
for batch, labels in tqdm(
|
||||
tf_eval_dataset, total=len(eval_dataset) // training_args.per_device_eval_batch_size
|
||||
):
|
||||
batch.update(gen_kwargs)
|
||||
generated_tokens = model.generate(**batch)
|
||||
if isinstance(generated_tokens, tuple):
|
||||
generated_tokens = generated_tokens[0]
|
||||
decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
||||
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
|
||||
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
|
||||
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
|
||||
|
||||
metric.add_batch(predictions=decoded_preds, references=decoded_labels)
|
||||
eval_metric = metric.compute()
|
||||
logger.info({"bleu": eval_metric["score"]})
|
||||
# endregion
|
||||
|
||||
if training_args.output_dir is not None:
|
||||
model.save_pretrained(training_args.output_dir)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Loading…
Reference in New Issue