mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-03 03:31:05 +06:00
add imports to examples (#3160)
This commit is contained in:
parent
6ffe03a0a1
commit
e58b3ec5df
@ -913,7 +913,7 @@ class BartForConditionalGeneration(PretrainedBartModel):
|
||||
|
||||
# Mask filling only works for bart-large
|
||||
from transformers import BartTokenizer, BartForConditionalGeneration
|
||||
tokenizer = AutoTokenizer.from_pretrained('bart-large')
|
||||
tokenizer = BartTokenizer.from_pretrained('bart-large')
|
||||
TXT = "My friends are <mask> but they eat too many carbs."
|
||||
model = BartForConditionalGeneration.from_pretrained('bart-large')
|
||||
input_ids = tokenizer.batch_encode_plus([TXT], return_tensors='pt')['input_ids']
|
||||
@ -1031,8 +1031,7 @@ class BartForConditionalGeneration(PretrainedBartModel):
|
||||
Examples::
|
||||
from transformers import BartTokenizer, BartForConditionalGeneration, BartConfig
|
||||
# see ``examples/summarization/bart/evaluate_cnn.py`` for a longer example
|
||||
config = BartConfig(vocab_size=50264, output_past=True) # no mask_token_id
|
||||
model = BartForConditionalGeneration.from_pretrained('bart-large-cnn', config=config)
|
||||
model = BartForConditionalGeneration.from_pretrained('bart-large-cnn')
|
||||
tokenizer = BartTokenizer.from_pretrained('bart-large-cnn')
|
||||
ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs."
|
||||
inputs = tokenizer.batch_encode_plus([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')
|
||||
|
Loading…
Reference in New Issue
Block a user