From fe8fb10b445b14adf872b205681fa41a7a932b28 Mon Sep 17 00:00:00 2001 From: Luis <30115537+Lawiss@users.noreply.github.com> Date: Thu, 29 Aug 2019 09:54:45 +0200 Subject: [PATCH] Small modification of comment in the run_glue.py example Add RoBERTa to the comment as it was not explicit that RoBERTa don't use token_type_ids. --- examples/run_glue.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/run_glue.py b/examples/run_glue.py index 53b46fc1025..89fb957b47f 100644 --- a/examples/run_glue.py +++ b/examples/run_glue.py @@ -128,7 +128,7 @@ def train(args, train_dataset, model, tokenizer): batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], - 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids + 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM and RoBERTa don't use segment_ids 'labels': batch[3]} outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)