mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-02 03:01:07 +06:00
corrected typo in example for t5 model input argument
This commit is contained in:
parent
8efc6dd544
commit
ed6ba93912
@ -693,7 +693,7 @@ class T5Model(T5PreTrainedModel):
|
|||||||
tokenizer = T5Tokenizer.from_pretrained('t5-small')
|
tokenizer = T5Tokenizer.from_pretrained('t5-small')
|
||||||
model = T5Model.from_pretrained('t5-small')
|
model = T5Model.from_pretrained('t5-small')
|
||||||
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
|
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
|
||||||
outputs = model(input_ids)
|
outputs = model(input_ids=input_ids)
|
||||||
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
|
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
|
||||||
|
|
||||||
"""
|
"""
|
||||||
@ -798,7 +798,7 @@ class T5WithLMHeadModel(T5PreTrainedModel):
|
|||||||
tokenizer = T5Tokenizer.from_pretrained('t5-small')
|
tokenizer = T5Tokenizer.from_pretrained('t5-small')
|
||||||
model = T5WithLMHeadModel.from_pretrained('t5-small')
|
model = T5WithLMHeadModel.from_pretrained('t5-small')
|
||||||
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
|
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
|
||||||
outputs = model(input_ids, lm_labels=input_ids)
|
outputs = model(input_ids=input_ids, lm_labels=input_ids)
|
||||||
loss, prediction_scores = outputs[:2]
|
loss, prediction_scores = outputs[:2]
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -610,7 +610,7 @@ class TFT5Model(TFT5PreTrainedModel):
|
|||||||
tokenizer = T5Tokenizer.from_pretrained('t5-small')
|
tokenizer = T5Tokenizer.from_pretrained('t5-small')
|
||||||
model = TFT5Model.from_pretrained('t5-small')
|
model = TFT5Model.from_pretrained('t5-small')
|
||||||
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
|
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
|
||||||
outputs = model(input_ids)
|
outputs = model(input_ids=input_ids)
|
||||||
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
|
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
|
||||||
|
|
||||||
"""
|
"""
|
||||||
@ -701,7 +701,7 @@ class TFT5WithLMHeadModel(TFT5PreTrainedModel):
|
|||||||
tokenizer = T5Tokenizer.from_pretrained('t5-small')
|
tokenizer = T5Tokenizer.from_pretrained('t5-small')
|
||||||
model = TFT5WithLMHeadModel.from_pretrained('t5-small')
|
model = TFT5WithLMHeadModel.from_pretrained('t5-small')
|
||||||
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
|
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
|
||||||
outputs = model(input_ids)
|
outputs = model(input_ids=input_ids)
|
||||||
prediction_scores = outputs[0]
|
prediction_scores = outputs[0]
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
Loading…
Reference in New Issue
Block a user