Use new evaluation loop in TrainerQA (#11746)

This commit is contained in:
Sylvain Gugger 2021-05-17 10:10:13 -04:00 committed by GitHub
parent 73893fc771
commit 936b57158a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 4 additions and 2 deletions

View File

@ -39,8 +39,9 @@ class QuestionAnsweringTrainer(Trainer):
# Temporarily disable metric computation, we will do it in the loop here. # Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics compute_metrics = self.compute_metrics
self.compute_metrics = None self.compute_metrics = None
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try: try:
output = self.prediction_loop( output = eval_loop(
eval_dataloader, eval_dataloader,
description="Evaluation", description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to # No point gathering the predictions if there are no metrics, otherwise we defer to
@ -72,8 +73,9 @@ class QuestionAnsweringTrainer(Trainer):
# Temporarily disable metric computation, we will do it in the loop here. # Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics compute_metrics = self.compute_metrics
self.compute_metrics = None self.compute_metrics = None
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try: try:
output = self.prediction_loop( output = eval_loop(
predict_dataloader, predict_dataloader,
description="Prediction", description="Prediction",
# No point gathering the predictions if there are no metrics, otherwise we defer to # No point gathering the predictions if there are no metrics, otherwise we defer to