fix: The 'warn' method is deprecated (#11105)

* The 'warn' method is deprecated

* fix test
This commit is contained in:
Stas Bekman 2021-04-07 06:20:06 -07:00 committed by GitHub
parent 247bed3857
commit c9035e4537
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
52 changed files with 68 additions and 66 deletions

View File

@ -330,14 +330,14 @@ def main():
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warn(
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warn(
logger.warning(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)

View File

@ -305,14 +305,14 @@ def main():
if args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warn(
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if args.block_size > tokenizer.model_max_length:
logger.warn(
logger.warning(
f"The block_size passed ({args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)

View File

@ -324,14 +324,14 @@ def main():
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warn(
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warn(
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)

View File

@ -308,14 +308,14 @@ def main():
if args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warn(
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
max_seq_length = 1024
else:
if args.max_seq_length > tokenizer.model_max_length:
logger.warn(
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)

View File

@ -319,7 +319,7 @@ def main():
text_column_name = "text" if "text" in column_names else column_names[0]
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warn(
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)

View File

@ -436,7 +436,7 @@ def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=Fal
raise ImportError("If not data_dir is specified, tensorflow_datasets needs to be installed.")
if args.version_2_with_negative:
logger.warn("tensorflow_datasets does not handle version 2 of SQuAD.")
logger.warning("tensorflow_datasets does not handle version 2 of SQuAD.")
tfds_examples = tfds.load("squad")
examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate)

View File

@ -73,7 +73,7 @@ class Seq2SeqTrainer(Trainer):
), "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss calculation or doing label smoothing."
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warn(
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for padding.."
)
@ -127,7 +127,7 @@ class Seq2SeqTrainer(Trainer):
if self.lr_scheduler is None:
self.lr_scheduler = self._get_lr_scheduler(num_training_steps)
else: # ignoring --lr_scheduler
logger.warn("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.")
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.")
def _get_lr_scheduler(self, num_training_steps):
schedule_func = arg_to_scheduler[self.args.lr_scheduler]

View File

@ -310,14 +310,14 @@ def main():
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warn(
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warn(
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)

View File

@ -324,7 +324,7 @@ def main():
pad_on_right = tokenizer.padding_side == "right"
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warn(
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)

View File

@ -313,7 +313,7 @@ def main():
pad_on_right = tokenizer.padding_side == "right"
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warn(
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)

View File

@ -291,7 +291,7 @@ def main():
pad_on_right = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warn(
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)

View File

@ -343,7 +343,7 @@ def main():
pad_on_right = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warn(
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)

View File

@ -181,7 +181,7 @@ def main():
# Get datasets
if data_args.use_tfds:
if data_args.version_2_with_negative:
logger.warn("tensorflow_datasets does not handle version 2 of SQuAD. Switch to version 1 automatically")
logger.warning("tensorflow_datasets does not handle version 2 of SQuAD. Switch to version 1 automatically")
try:
import tensorflow_datasets as tfds

View File

@ -629,7 +629,7 @@ def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=Fal
raise ImportError("If not data_dir is specified, tensorflow_datasets needs to be installed.")
if args.version_2_with_negative:
logger.warn("tensorflow_datasets does not handle version 2 of SQuAD.")
logger.warning("tensorflow_datasets does not handle version 2 of SQuAD.")
tfds_examples = tfds.load("squad")
examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate)

View File

@ -394,7 +394,7 @@ def main():
padding = "max_length" if data_args.pad_to_max_length else False
if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"):
logger.warn(
logger.warning(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory"
)

View File

@ -367,7 +367,7 @@ def main():
padding = "max_length" if data_args.pad_to_max_length else False
if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"):
logger.warn(
logger.warning(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory"
)

View File

@ -351,7 +351,7 @@ def main():
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
else:
logger.warn(
logger.warning(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
@ -360,7 +360,7 @@ def main():
label_to_id = {v: i for i, v in enumerate(label_list)}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warn(
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)

View File

@ -274,7 +274,7 @@ def main():
)
label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)}
else:
logger.warn(
logger.warning(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",

View File

@ -262,7 +262,7 @@ class PretrainedConfig(object):
# TPU arguments
if kwargs.pop("xla_device", None) is not None:
logger.warn(
logger.warning(
"The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can "
"safely remove it from your `config.json` file."
)

View File

@ -152,7 +152,7 @@ class SquadDataset(Dataset):
)
if self.dataset is None or self.examples is None:
logger.warn(
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in future run"
)
else:

View File

@ -194,7 +194,7 @@ if (
and "PYTORCH_TRANSFORMERS_CACHE" not in os.environ
and "TRANSFORMERS_CACHE" not in os.environ
):
logger.warn(
logger.warning(
"In Transformers v4.0.0, the default path to cache downloaded models changed from "
"'~/.cache/torch/transformers' to '~/.cache/huggingface/transformers'. Since you don't seem to have overridden "
"and '~/.cache/torch/transformers' is a directory that exists, we're moving it to "

View File

@ -54,7 +54,7 @@ from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy # n
def is_wandb_available():
# any value of WANDB_DISABLED disables wandb
if os.getenv("WANDB_DISABLED", "").upper() in ENV_VARS_TRUE_VALUES:
logger.warn(
logger.warning(
"Using the `WAND_DISABLED` environment variable is deprecated and will be removed in v5. Use the "
"--report_to flag to control the integrations used for logging result (for instance --report_to none)."
)

View File

@ -290,7 +290,7 @@ def booleans_processing(config, **kwargs):
or kwargs["output_hidden_states"] is not None
or ("use_cache" in kwargs and kwargs["use_cache"] is not None)
):
tf_logger.warn(
tf_logger.warning(
"The parameters `output_attentions`, `output_hidden_states` and `use_cache` cannot be updated when calling a model."
"They have to be set to True/False in the config object (i.e.: `config=XConfig.from_pretrained('name', output_attentions=True)`)."
)
@ -299,7 +299,9 @@ def booleans_processing(config, **kwargs):
final_booleans["output_hidden_states"] = config.output_hidden_states
if kwargs["return_dict"] is not None:
tf_logger.warn("The parameter `return_dict` cannot be set in graph mode and will always be set to `True`.")
tf_logger.warning(
"The parameter `return_dict` cannot be set in graph mode and will always be set to `True`."
)
final_booleans["return_dict"] = True
if "use_cache" in kwargs:
@ -398,7 +400,7 @@ def input_processing(func, config, input_ids, **kwargs):
if isinstance(v, allowed_types) or v is None:
output[k] = v
elif k not in parameter_names and "args" not in parameter_names:
logger.warn(
logger.warning(
f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored."
)
continue

View File

@ -409,7 +409,7 @@ class AutoTokenizer:
# if model is an encoder decoder, the encoder tokenizer class is used by default
if isinstance(config, EncoderDecoderConfig):
if type(config.decoder) is not type(config.encoder): # noqa: E721
logger.warn(
logger.warning(
f"The encoder model config class: {config.encoder.__class__} is different from the decoder model "
f"config class: {config.decoder.__class}. It is not recommended to use the "
"`AutoTokenizer.from_pretrained()` method in this case. Please use the encoder and decoder "

View File

@ -1011,7 +1011,7 @@ class BartDecoder(BartPretrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)

View File

@ -544,7 +544,7 @@ class BertEncoder(nn.Module):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)

View File

@ -450,7 +450,7 @@ class BertGenerationDecoder(BertGenerationPreTrainedModel):
super().__init__(config)
if not config.is_decoder:
logger.warn("If you want to use `BertGenerationDecoder` as a standalone, add `is_decoder=True.`")
logger.warning("If you want to use `BertGenerationDecoder` as a standalone, add `is_decoder=True.`")
self.bert = BertGenerationEncoder(config)
self.lm_head = BertGenerationOnlyLMHead(config)

View File

@ -1586,7 +1586,7 @@ class BigBirdEncoder(nn.Module):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)

View File

@ -973,7 +973,7 @@ class BlenderbotDecoder(BlenderbotPreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)

View File

@ -974,7 +974,7 @@ class BlenderbotSmallDecoder(BlenderbotSmallPreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)

View File

@ -541,7 +541,7 @@ class ElectraEncoder(nn.Module):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)

View File

@ -726,7 +726,7 @@ class GPT2Model(GPT2PreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)

View File

@ -823,7 +823,7 @@ class GPTNeoModel(GPTNeoPreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)

View File

@ -470,7 +470,7 @@ class LayoutLMEncoder(nn.Module):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)

View File

@ -2070,7 +2070,7 @@ class LEDDecoder(LEDPreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)

View File

@ -968,7 +968,7 @@ class M2M100Decoder(M2M100PreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)

View File

@ -981,7 +981,7 @@ class MarianDecoder(MarianPreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)

View File

@ -1020,7 +1020,7 @@ class MBartDecoder(MBartPreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)

View File

@ -987,7 +987,7 @@ class PegasusDecoder(PegasusPreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)

View File

@ -1475,7 +1475,7 @@ class ProphetNetDecoder(ProphetNetPreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)

View File

@ -484,7 +484,7 @@ class RobertaEncoder(nn.Module):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)

View File

@ -1015,7 +1015,7 @@ class Speech2TextDecoder(Speech2TextPreTrainedModel):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache = True` is incompatible with `config.gradient_checkpointing = True`. Setting `use_cache = False`..."
)
use_cache = False

View File

@ -111,7 +111,7 @@ def recursively_load_weights(fairseq_model, hf_model, is_finetuned):
if not is_used:
unused_weights.append(name)
logger.warn(f"Unused weights: {unused_weights}")
logger.warning(f"Unused weights: {unused_weights}")
def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm):

View File

@ -1140,7 +1140,7 @@ class TFXLMForMultipleChoice(TFXLMPreTrainedModel, TFMultipleChoiceLoss):
)
if inputs["lengths"] is not None:
logger.warn(
logger.warning(
"The `lengths` parameter cannot be used with the XLM multiple choice models. Please use the "
"attention mask instead.",
)

View File

@ -1232,7 +1232,7 @@ class XLMForMultipleChoice(XLMPreTrainedModel):
)
if lengths is not None:
logger.warn(
logger.warning(
"The `lengths` parameter cannot be used with the XLM multiple choice models. Please use the "
"attention mask instead."
)

View File

@ -142,7 +142,7 @@ class ZeroShotClassificationPipeline(Pipeline):
"""
if "multi_class" in kwargs and kwargs["multi_class"] is not None:
multi_label = kwargs.pop("multi_class")
logger.warn(
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers."
)

View File

@ -289,7 +289,7 @@ class CallbackHandler(TrainerCallback):
self.eval_dataloader = None
if not any(isinstance(cb, DefaultFlowCallback) for cb in self.callbacks):
logger.warn(
logger.warning(
"The Trainer will not work properly if you don't have a `DefaultFlowCallback` in its callbacks. You\n"
+ "should add one before training with `trainer.add_callback(DefaultFlowCallback). The current list of"
+ "callbacks is\n:"
@ -300,7 +300,7 @@ class CallbackHandler(TrainerCallback):
cb = callback() if isinstance(callback, type) else callback
cb_class = callback if isinstance(callback, type) else callback.__class__
if cb_class in [c.__class__ for c in self.callbacks]:
logger.warn(
logger.warning(
f"You are adding a {cb_class} to the callbacks of this Trainer, but there is already one. The current"
+ "list of callbacks is\n:"
+ self.callback_list

View File

@ -391,7 +391,7 @@ class DistributedTensorGatherer:
if self._storage is None:
return
if self._offsets[0] != self.process_length:
logger.warn("Not all data has been set. Are you sure you passed all values?")
logger.warning("Not all data has been set. Are you sure you passed all values?")
return nested_truncate(self._storage, self.num_samples)
@ -589,7 +589,7 @@ def _get_learning_rate(self):
last_lr = self.lr_scheduler.get_last_lr()[0]
except AssertionError as e:
if "need to call step" in str(e):
logger.warn("tried to get lr value before scheduler/optimizer started stepping, returning lr=0")
logger.warning("tried to get lr value before scheduler/optimizer started stepping, returning lr=0")
last_lr = 0
else:
raise

View File

@ -531,7 +531,7 @@ class {{cookiecutter.camelcase_modelname}}Encoder(nn.Module):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
@ -2512,7 +2512,7 @@ class {{cookiecutter.camelcase_modelname}}Decoder({{cookiecutter.camelcase_model
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn("`use_cache = True` is incompatible with `config.gradient_checkpointing = True`. Setting `use_cache = False`...")
logger.warning("`use_cache = True` is incompatible with `config.gradient_checkpointing = True`. Setting `use_cache = False`...")
use_cache = False
def create_custom_forward(module):

View File

@ -353,7 +353,7 @@ def main():
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
else:
logger.warn(
logger.warning(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
@ -362,7 +362,7 @@ def main():
label_to_id = {v: i for i, v in enumerate(label_list)}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warn(
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)

View File

@ -51,7 +51,7 @@ class HfArgumentParserTest(unittest.TestCase):
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(logger) as cl:
logger.warn(msg)
logger.warning(msg)
self.assertEqual(cl.out, msg + "\n")
# this is setting the level for all of `transformers.*` loggers
@ -59,7 +59,7 @@ class HfArgumentParserTest(unittest.TestCase):
# should not be able to log warnings
with CaptureLogger(logger) as cl:
logger.warn(msg)
logger.warning(msg)
self.assertEqual(cl.out, "")
# should be able to log warnings again

View File

@ -234,7 +234,7 @@ class TrainerCallbackTest(unittest.TestCase):
self.assertEqual(events, self.get_expected_events(trainer))
# warning should be emitted for duplicated callbacks
with unittest.mock.patch("transformers.trainer_callback.logger.warn") as warn_mock:
with unittest.mock.patch("transformers.trainer_callback.logger.warning") as warn_mock:
trainer = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback],
)