From 68793e64637a135ebb4db234bc0d27b6fd8de3d8 Mon Sep 17 00:00:00 2001 From: sileod Date: Thu, 2 Nov 2023 10:18:17 +0100 Subject: [PATCH] Update models.py --- src/tasknet/models.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/tasknet/models.py b/src/tasknet/models.py index 057c5a9..b9875ec 100755 --- a/src/tasknet/models.py +++ b/src/tasknet/models.py @@ -342,7 +342,6 @@ class default: save_steps = 1000000 label_names = ["labels"] include_inputs_for_metrics = True - model_name = "sileod/deberta-v3-base-tasksource-nli" default, hparams = to_dict(default), to_dict(hparams) self.p = hparams.get('p', 1) @@ -350,7 +349,7 @@ class default: self.batched = hparams.get('batched',False) trainer_args = transformers.TrainingArguments( - **fc.project({**default,**hparams}, dir(transformers.TrainingArguments)) + **{**default, **fc.project(hparams, dir(transformers.TrainingArguments))}, ) if not tokenizer: tokenizer = AutoTokenizer.from_pretrained(hparams["model_name"]) @@ -383,7 +382,7 @@ class default: task: dataset["test"] for task, dataset in self.processed_tasks.items() } - # We preventstrainer from automatically evaluating on each dataset: + # We prevent Trainer from automatically evaluating on each dataset: # transformers.Trainer recognizes eval_dataset instances of "dict" # But we use a custom "evaluate" function so that we can use different metrics for each task self.eval_dataset = MappingProxyType(self.eval_dataset)