Use new evaluation loop in TrainerQA (#11746)
This commit is contained in:
parent
73893fc771
commit
936b57158a
|
@ -39,8 +39,9 @@ class QuestionAnsweringTrainer(Trainer):
|
|||
# Temporarily disable metric computation, we will do it in the loop here.
|
||||
compute_metrics = self.compute_metrics
|
||||
self.compute_metrics = None
|
||||
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
|
||||
try:
|
||||
output = self.prediction_loop(
|
||||
output = eval_loop(
|
||||
eval_dataloader,
|
||||
description="Evaluation",
|
||||
# No point gathering the predictions if there are no metrics, otherwise we defer to
|
||||
|
@ -72,8 +73,9 @@ class QuestionAnsweringTrainer(Trainer):
|
|||
# Temporarily disable metric computation, we will do it in the loop here.
|
||||
compute_metrics = self.compute_metrics
|
||||
self.compute_metrics = None
|
||||
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
|
||||
try:
|
||||
output = self.prediction_loop(
|
||||
output = eval_loop(
|
||||
predict_dataloader,
|
||||
description="Prediction",
|
||||
# No point gathering the predictions if there are no metrics, otherwise we defer to
|
||||
|
|
Loading…
Reference in New Issue