mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-03 03:31:05 +06:00
update readme to mention add_special_tokens more clearly in example
This commit is contained in:
parent
50e6daf83a
commit
306af132d7
@ -93,7 +93,7 @@ for model_class, tokenizer_class, pretrained_weights in MODELS:
|
||||
model = model_class.from_pretrained(pretrained_weights)
|
||||
|
||||
# Encode text
|
||||
input_ids = torch.tensor([tokenizer.encode("Here is some text to encode")])
|
||||
input_ids = torch.tensor([tokenizer.encode("Here is some text to encode", add_special_tokens=True)]) # Add special tokens takes care of adding [CLS], [SEP], <s>... tokens in the right way for each model.
|
||||
with torch.no_grad():
|
||||
last_hidden_states = model(input_ids)[0] # Models outputs are now tuples
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user