Small modification of comment in the run_glue.py example

Add RoBERTa to the comment as it was not explicit that RoBERTa don't use token_type_ids.
This commit is contained in:
Luis 2019-08-29 09:54:45 +02:00 committed by Julien Chaumond
parent bf3dc778b8
commit fe8fb10b44

View File

@ -128,7 +128,7 @@ def train(args, train_dataset, model, tokenizer):
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM and RoBERTa don't use segment_ids
'labels': batch[3]}
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)