diff --git a/src/transformers/file_utils.py b/src/transformers/file_utils.py index 40d73d89ac6..7de267c0595 100644 --- a/src/transformers/file_utils.py +++ b/src/transformers/file_utils.py @@ -791,10 +791,10 @@ def _prepare_output_docstrings(output_type, config_class): PT_TOKEN_CLASSIFICATION_SAMPLE = r""" Example:: - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} >>> import torch - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") @@ -808,10 +808,10 @@ PT_TOKEN_CLASSIFICATION_SAMPLE = r""" PT_QUESTION_ANSWERING_SAMPLE = r""" Example:: - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} >>> import torch - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" @@ -828,10 +828,10 @@ PT_QUESTION_ANSWERING_SAMPLE = r""" PT_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example:: - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} >>> import torch - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") @@ -844,10 +844,10 @@ PT_SEQUENCE_CLASSIFICATION_SAMPLE = r""" PT_MASKED_LM_SAMPLE = r""" Example:: - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} >>> import torch - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="pt") @@ -861,10 +861,10 @@ PT_MASKED_LM_SAMPLE = r""" PT_BASE_MODEL_SAMPLE = r""" Example:: - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} >>> import torch - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") @@ -876,10 +876,10 @@ PT_BASE_MODEL_SAMPLE = r""" PT_MULTIPLE_CHOICE_SAMPLE = r""" Example:: - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} >>> import torch - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." @@ -899,9 +899,9 @@ PT_CAUSAL_LM_SAMPLE = r""" Example:: >>> import torch - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") @@ -910,6 +910,79 @@ PT_CAUSAL_LM_SAMPLE = r""" >>> logits = outputs.logits """ +PT_SPEECH_BASE_MODEL_SAMPLE = r""" + Example:: + + >>> from transformers import {processor_class}, {model_class} + >>> from datasets import load_dataset + + >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") + >>> sampling_rate = dataset.features["audio"].sampling_rate + + >>> processor = {processor_class}.from_pretrained('{checkpoint}') + >>> model = {model_class}.from_pretrained('{checkpoint}') + + >>> # audio file is decoded on the fly + >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") + >>> outputs = model(**inputs) + + >>> last_hidden_states = outputs.last_hidden_state +""" + +PT_SPEECH_CTC_SAMPLE = r""" + Example:: + + >>> from transformers import {processor_class}, {model_class} + >>> from datasets import load_dataset + >>> import torch + + >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") + >>> sampling_rate = dataset.features["audio"].sampling_rate + + >>> processor = {processor_class}.from_pretrained('{checkpoint}') + >>> model = {model_class}.from_pretrained('{checkpoint}') + + >>> # audio file is decoded on the fly + >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") + >>> logits = model(**inputs).logits + >>> predicted_ids = torch.argmax(logits, dim=-1) + + >>> # transcribe speech + >>> transcription = processor.batch_decode(predicted_ids) + + >>> # compute loss + >>> with processor.as_target_processor(): + ... inputs["labels"] = processor(dataset[0]["text"], return_tensors="pt").input_ids + + >>> loss = model(**inputs).loss +""" + +PT_SPEECH_SEQ_CLASS_SAMPLE = r""" + Example:: + + >>> from transformers import {processor_class}, {model_class} + >>> from datasets import load_dataset + >>> import torch + + >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") + >>> sampling_rate = dataset.features["audio"].sampling_rate + + >>> feature_extractor = {processor_class}.from_pretrained('{checkpoint}') + >>> model = {model_class}.from_pretrained('{checkpoint}') + + >>> # audio file is decoded on the fly + >>> inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt") + >>> logits = model(**inputs).logits + >>> predicted_class_ids = torch.argmax(logits, dim=-1) + >>> predicted_label = model.config.id2label[predicted_class_ids] + + >>> # compute loss - target_label is e.g. "down" + >>> target_label = model.config.id2label[0] + >>> inputs["labels"] = torch.tensor([model.config.label2id[target_label]]) + >>> loss = model(**inputs).loss +""" + + PT_SAMPLE_DOCSTRINGS = { "SequenceClassification": PT_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": PT_QUESTION_ANSWERING_SAMPLE, @@ -918,16 +991,19 @@ PT_SAMPLE_DOCSTRINGS = { "MaskedLM": PT_MASKED_LM_SAMPLE, "LMHead": PT_CAUSAL_LM_SAMPLE, "BaseModel": PT_BASE_MODEL_SAMPLE, + "SpeechBaseModel": PT_SPEECH_BASE_MODEL_SAMPLE, + "CTC": PT_SPEECH_CTC_SAMPLE, + "AudioClassification": PT_SPEECH_SEQ_CLASS_SAMPLE, } TF_TOKEN_CLASSIFICATION_SAMPLE = r""" Example:: - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} >>> import tensorflow as tf - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") @@ -942,10 +1018,10 @@ TF_TOKEN_CLASSIFICATION_SAMPLE = r""" TF_QUESTION_ANSWERING_SAMPLE = r""" Example:: - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} >>> import tensorflow as tf - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" @@ -961,10 +1037,10 @@ TF_QUESTION_ANSWERING_SAMPLE = r""" TF_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example:: - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} >>> import tensorflow as tf - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") @@ -978,10 +1054,10 @@ TF_SEQUENCE_CLASSIFICATION_SAMPLE = r""" TF_MASKED_LM_SAMPLE = r""" Example:: - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} >>> import tensorflow as tf - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="tf") @@ -995,10 +1071,10 @@ TF_MASKED_LM_SAMPLE = r""" TF_BASE_MODEL_SAMPLE = r""" Example:: - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} >>> import tensorflow as tf - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") @@ -1010,10 +1086,10 @@ TF_BASE_MODEL_SAMPLE = r""" TF_MULTIPLE_CHOICE_SAMPLE = r""" Example:: - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} >>> import tensorflow as tf - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." @@ -1031,10 +1107,10 @@ TF_MULTIPLE_CHOICE_SAMPLE = r""" TF_CAUSAL_LM_SAMPLE = r""" Example:: - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} >>> import tensorflow as tf - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") @@ -1056,9 +1132,9 @@ TF_SAMPLE_DOCSTRINGS = { FLAX_TOKEN_CLASSIFICATION_SAMPLE = r""" Example:: - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') @@ -1070,9 +1146,9 @@ FLAX_TOKEN_CLASSIFICATION_SAMPLE = r""" FLAX_QUESTION_ANSWERING_SAMPLE = r""" Example:: - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" @@ -1086,9 +1162,9 @@ FLAX_QUESTION_ANSWERING_SAMPLE = r""" FLAX_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example:: - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') @@ -1100,9 +1176,9 @@ FLAX_SEQUENCE_CLASSIFICATION_SAMPLE = r""" FLAX_MASKED_LM_SAMPLE = r""" Example:: - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors='jax') @@ -1114,9 +1190,9 @@ FLAX_MASKED_LM_SAMPLE = r""" FLAX_BASE_MODEL_SAMPLE = r""" Example:: - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> inputs = tokenizer("Hello, my dog is cute", return_tensors='jax') @@ -1128,9 +1204,9 @@ FLAX_BASE_MODEL_SAMPLE = r""" FLAX_MULTIPLE_CHOICE_SAMPLE = r""" Example:: - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." @@ -1146,9 +1222,9 @@ FLAX_MULTIPLE_CHOICE_SAMPLE = r""" FLAX_CAUSAL_LM_SAMPLE = r""" Example:: - >>> from transformers import {tokenizer_class}, {model_class} + >>> from transformers import {processor_class}, {model_class} - >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') + >>> tokenizer = {processor_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np") @@ -1170,7 +1246,14 @@ FLAX_SAMPLE_DOCSTRINGS = { def add_code_sample_docstrings( - *docstr, tokenizer_class=None, checkpoint=None, output_type=None, config_class=None, mask=None, model_cls=None + *docstr, + processor_class=None, + checkpoint=None, + output_type=None, + config_class=None, + mask=None, + model_cls=None, + modality=None ): def docstring_decorator(fn): # model_class defaults to function's class if not specified otherwise @@ -1183,9 +1266,11 @@ def add_code_sample_docstrings( else: sample_docstrings = PT_SAMPLE_DOCSTRINGS - doc_kwargs = dict(model_class=model_class, tokenizer_class=tokenizer_class, checkpoint=checkpoint) + doc_kwargs = dict(model_class=model_class, processor_class=processor_class, checkpoint=checkpoint) - if "SequenceClassification" in model_class: + if "SequenceClassification" in model_class and modality == "audio": + code_sample = sample_docstrings["AudioClassification"] + elif "SequenceClassification" in model_class: code_sample = sample_docstrings["SequenceClassification"] elif "QuestionAnswering" in model_class: code_sample = sample_docstrings["QuestionAnswering"] @@ -1198,6 +1283,10 @@ def add_code_sample_docstrings( code_sample = sample_docstrings["MaskedLM"] elif "LMHead" in model_class or "CausalLM" in model_class: code_sample = sample_docstrings["LMHead"] + elif "CTC" in model_class: + code_sample = sample_docstrings["CTC"] + elif "Model" in model_class and modality == "audio": + code_sample = sample_docstrings["SpeechBaseModel"] elif "Model" in model_class or "Encoder" in model_class: code_sample = sample_docstrings["BaseModel"] else: diff --git a/src/transformers/modeling_flax_utils.py b/src/transformers/modeling_flax_utils.py index f2bad92fe5c..b4bc0729caf 100644 --- a/src/transformers/modeling_flax_utils.py +++ b/src/transformers/modeling_flax_utils.py @@ -528,7 +528,7 @@ def overwrite_call_docstring(model_class, docstring): def append_call_sample_docstring(model_class, tokenizer_class, checkpoint, output_type, config_class, mask=None): model_class.__call__ = copy_func(model_class.__call__) model_class.__call__ = add_code_sample_docstrings( - tokenizer_class=tokenizer_class, + processor_class=tokenizer_class, checkpoint=checkpoint, output_type=output_type, config_class=config_class, diff --git a/src/transformers/models/albert/modeling_albert.py b/src/transformers/models/albert/modeling_albert.py index e8d925b32eb..442242ad43c 100755 --- a/src/transformers/models/albert/modeling_albert.py +++ b/src/transformers/models/albert/modeling_albert.py @@ -665,7 +665,7 @@ class AlbertModel(AlbertPreTrainedModel): @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, @@ -916,7 +916,7 @@ class AlbertForMaskedLM(AlbertPreTrainedModel): @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -995,7 +995,7 @@ class AlbertForSequenceClassification(AlbertPreTrainedModel): @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1101,7 +1101,7 @@ class AlbertForTokenClassification(AlbertPreTrainedModel): @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1191,7 +1191,7 @@ class AlbertForQuestionAnswering(AlbertPreTrainedModel): @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1290,7 +1290,7 @@ class AlbertForMultipleChoice(AlbertPreTrainedModel): @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/albert/modeling_tf_albert.py b/src/transformers/models/albert/modeling_tf_albert.py index ba54f36940d..204b27f9cf9 100644 --- a/src/transformers/models/albert/modeling_tf_albert.py +++ b/src/transformers/models/albert/modeling_tf_albert.py @@ -783,7 +783,7 @@ class TFAlbertModel(TFAlbertPreTrainedModel): @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, @@ -1000,7 +1000,7 @@ class TFAlbertForMaskedLM(TFAlbertPreTrainedModel, TFMaskedLanguageModelingLoss) @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1105,7 +1105,7 @@ class TFAlbertForSequenceClassification(TFAlbertPreTrainedModel, TFSequenceClass @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1214,7 +1214,7 @@ class TFAlbertForTokenClassification(TFAlbertPreTrainedModel, TFTokenClassificat @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1315,7 +1315,7 @@ class TFAlbertForQuestionAnswering(TFAlbertPreTrainedModel, TFQuestionAnsweringL @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1443,7 +1443,7 @@ class TFAlbertForMultipleChoice(TFAlbertPreTrainedModel, TFMultipleChoiceLoss): @add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/bart/modeling_bart.py b/src/transformers/models/bart/modeling_bart.py index 318b084b715..61a9db8d8bb 100755 --- a/src/transformers/models/bart/modeling_bart.py +++ b/src/transformers/models/bart/modeling_bart.py @@ -1130,7 +1130,7 @@ class BartModel(BartPretrainedModel): @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1400,7 +1400,7 @@ class BartForSequenceClassification(BartPretrainedModel): @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1512,7 +1512,7 @@ class BartForQuestionAnswering(BartPretrainedModel): @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/bart/modeling_tf_bart.py b/src/transformers/models/bart/modeling_tf_bart.py index 9d5363cbdf9..fc7823e73a3 100644 --- a/src/transformers/models/bart/modeling_tf_bart.py +++ b/src/transformers/models/bart/modeling_tf_bart.py @@ -1196,7 +1196,7 @@ class TFBartModel(TFBartPretrainedModel): @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/bert/modeling_bert.py b/src/transformers/models/bert/modeling_bert.py index 9914f5d46f3..33681397338 100755 --- a/src/transformers/models/bert/modeling_bert.py +++ b/src/transformers/models/bert/modeling_bert.py @@ -886,7 +886,7 @@ class BertModel(BertPreTrainedModel): @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -1302,7 +1302,7 @@ class BertForMaskedLM(BertPreTrainedModel): @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1501,7 +1501,7 @@ class BertForSequenceClassification(BertPreTrainedModel): @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1600,7 +1600,7 @@ class BertForMultipleChoice(BertPreTrainedModel): @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1698,7 +1698,7 @@ class BertForTokenClassification(BertPreTrainedModel): @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1788,7 +1788,7 @@ class BertForQuestionAnswering(BertPreTrainedModel): @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/bert/modeling_tf_bert.py b/src/transformers/models/bert/modeling_tf_bert.py index 3791cd1c04a..25f9feb9902 100644 --- a/src/transformers/models/bert/modeling_tf_bert.py +++ b/src/transformers/models/bert/modeling_tf_bert.py @@ -1064,7 +1064,7 @@ class TFBertModel(TFBertPreTrainedModel): @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -1335,7 +1335,7 @@ class TFBertForMaskedLM(TFBertPreTrainedModel, TFMaskedLanguageModelingLoss): @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1451,7 +1451,7 @@ class TFBertLMHeadModel(TFBertPreTrainedModel, TFCausalLanguageModelingLoss): } @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -1704,7 +1704,7 @@ class TFBertForSequenceClassification(TFBertPreTrainedModel, TFSequenceClassific @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1814,7 +1814,7 @@ class TFBertForMultipleChoice(TFBertPreTrainedModel, TFMultipleChoiceLoss): @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1973,7 +1973,7 @@ class TFBertForTokenClassification(TFBertPreTrainedModel, TFTokenClassificationL @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -2080,7 +2080,7 @@ class TFBertForQuestionAnswering(TFBertPreTrainedModel, TFQuestionAnsweringLoss) @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/bert_generation/modeling_bert_generation.py b/src/transformers/models/bert_generation/modeling_bert_generation.py index cbb8a7f12c7..ad0d5ba8b76 100755 --- a/src/transformers/models/bert_generation/modeling_bert_generation.py +++ b/src/transformers/models/bert_generation/modeling_bert_generation.py @@ -300,7 +300,7 @@ class BertGenerationEncoder(BertGenerationPreTrainedModel): @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/big_bird/modeling_big_bird.py b/src/transformers/models/big_bird/modeling_big_bird.py index bdd55494f8d..ae415a7a79a 100755 --- a/src/transformers/models/big_bird/modeling_big_bird.py +++ b/src/transformers/models/big_bird/modeling_big_bird.py @@ -1974,7 +1974,7 @@ class BigBirdModel(BigBirdPreTrainedModel): @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -2380,7 +2380,7 @@ class BigBirdForMaskedLM(BigBirdPreTrainedModel): @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -2646,7 +2646,7 @@ class BigBirdForSequenceClassification(BigBirdPreTrainedModel): @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -2743,7 +2743,7 @@ class BigBirdForMultipleChoice(BigBirdPreTrainedModel): BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -2838,7 +2838,7 @@ class BigBirdForTokenClassification(BigBirdPreTrainedModel): @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -2946,7 +2946,7 @@ class BigBirdForQuestionAnswering(BigBirdPreTrainedModel): @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="google/bigbird-base-trivia-itc", output_type=BigBirdForQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py index db21e9684e1..7a040298108 100755 --- a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +++ b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py @@ -2338,7 +2338,7 @@ class BigBirdPegasusModel(BigBirdPegasusPreTrainedModel): @add_start_docstrings_to_model_forward(BIGBIRD_PEGASUS_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, @@ -2611,7 +2611,7 @@ class BigBirdPegasusForSequenceClassification(BigBirdPegasusPreTrainedModel): @add_start_docstrings_to_model_forward(BIGBIRD_PEGASUS_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -2724,7 +2724,7 @@ class BigBirdPegasusForQuestionAnswering(BigBirdPegasusPreTrainedModel): @add_start_docstrings_to_model_forward(BIGBIRD_PEGASUS_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/blenderbot/modeling_tf_blenderbot.py b/src/transformers/models/blenderbot/modeling_tf_blenderbot.py index 44db8312929..b8e7e6fe7ba 100644 --- a/src/transformers/models/blenderbot/modeling_tf_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_tf_blenderbot.py @@ -1206,7 +1206,7 @@ class TFBlenderbotModel(TFBlenderbotPreTrainedModel): @add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py index 3bbf3216601..85fac373141 100644 --- a/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py @@ -1194,7 +1194,7 @@ class TFBlenderbotSmallModel(TFBlenderbotSmallPreTrainedModel): @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/canine/modeling_canine.py b/src/transformers/models/canine/modeling_canine.py index 7afad51c1de..b461a6c0452 100644 --- a/src/transformers/models/canine/modeling_canine.py +++ b/src/transformers/models/canine/modeling_canine.py @@ -1096,7 +1096,7 @@ class CanineModel(CaninePreTrainedModel): @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=CanineModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, @@ -1277,7 +1277,7 @@ class CanineForSequenceClassification(CaninePreTrainedModel): @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1373,7 +1373,7 @@ class CanineForMultipleChoice(CaninePreTrainedModel): @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1465,7 +1465,7 @@ class CanineForTokenClassification(CaninePreTrainedModel): @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1552,7 +1552,7 @@ class CanineForQuestionAnswering(CaninePreTrainedModel): @add_start_docstrings_to_model_forward(CANINE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/convbert/modeling_convbert.py b/src/transformers/models/convbert/modeling_convbert.py index 99d8ae5dd4c..2d4b0c57ca2 100755 --- a/src/transformers/models/convbert/modeling_convbert.py +++ b/src/transformers/models/convbert/modeling_convbert.py @@ -793,7 +793,7 @@ class ConvBertModel(ConvBertPreTrainedModel): @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -896,7 +896,7 @@ class ConvBertForMaskedLM(ConvBertPreTrainedModel): @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -999,7 +999,7 @@ class ConvBertForSequenceClassification(ConvBertPreTrainedModel): @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1096,7 +1096,7 @@ class ConvBertForMultipleChoice(ConvBertPreTrainedModel): CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1191,7 +1191,7 @@ class ConvBertForTokenClassification(ConvBertPreTrainedModel): @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1278,7 +1278,7 @@ class ConvBertForQuestionAnswering(ConvBertPreTrainedModel): @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/convbert/modeling_tf_convbert.py b/src/transformers/models/convbert/modeling_tf_convbert.py index 0cd7e6fa022..0888756fba7 100644 --- a/src/transformers/models/convbert/modeling_tf_convbert.py +++ b/src/transformers/models/convbert/modeling_tf_convbert.py @@ -754,7 +754,7 @@ class TFConvBertModel(TFConvBertPreTrainedModel): @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, @@ -886,7 +886,7 @@ class TFConvBertForMaskedLM(TFConvBertPreTrainedModel, TFMaskedLanguageModelingL @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1010,7 +1010,7 @@ class TFConvBertForSequenceClassification(TFConvBertPreTrainedModel, TFSequenceC @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1119,7 +1119,7 @@ class TFConvBertForMultipleChoice(TFConvBertPreTrainedModel, TFMultipleChoiceLos CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1257,7 +1257,7 @@ class TFConvBertForTokenClassification(TFConvBertPreTrainedModel, TFTokenClassif @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1352,7 +1352,7 @@ class TFConvBertForQuestionAnswering(TFConvBertPreTrainedModel, TFQuestionAnswer @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/ctrl/modeling_ctrl.py b/src/transformers/models/ctrl/modeling_ctrl.py index cdf32828b9f..ea98f1322e7 100644 --- a/src/transformers/models/ctrl/modeling_ctrl.py +++ b/src/transformers/models/ctrl/modeling_ctrl.py @@ -355,7 +355,7 @@ class CTRLModel(CTRLPreTrainedModel): @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC, @@ -516,7 +516,7 @@ class CTRLLMHeadModel(CTRLPreTrainedModel): @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC, @@ -619,7 +619,7 @@ class CTRLForSequenceClassification(CTRLPreTrainedModel): @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/ctrl/modeling_tf_ctrl.py b/src/transformers/models/ctrl/modeling_tf_ctrl.py index a4cf3f509ce..7c701a59c60 100644 --- a/src/transformers/models/ctrl/modeling_tf_ctrl.py +++ b/src/transformers/models/ctrl/modeling_tf_ctrl.py @@ -543,7 +543,7 @@ class TFCTRLModel(TFCTRLPreTrainedModel): @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC, @@ -671,7 +671,7 @@ class TFCTRLLMHeadModel(TFCTRLPreTrainedModel, TFCausalLanguageModelingLoss): @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC, @@ -795,7 +795,7 @@ class TFCTRLForSequenceClassification(TFCTRLPreTrainedModel, TFSequenceClassific @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/deberta/modeling_deberta.py b/src/transformers/models/deberta/modeling_deberta.py index f7708eba486..2ec59b34d52 100644 --- a/src/transformers/models/deberta/modeling_deberta.py +++ b/src/transformers/models/deberta/modeling_deberta.py @@ -866,7 +866,7 @@ class DebertaModel(DebertaPreTrainedModel): @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -972,7 +972,7 @@ class DebertaForMaskedLM(DebertaPreTrainedModel): @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1112,7 +1112,7 @@ class DebertaForSequenceClassification(DebertaPreTrainedModel): @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1207,7 +1207,7 @@ class DebertaForTokenClassification(DebertaPreTrainedModel): @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1294,7 +1294,7 @@ class DebertaForQuestionAnswering(DebertaPreTrainedModel): @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/deberta/modeling_tf_deberta.py b/src/transformers/models/deberta/modeling_tf_deberta.py index e85e8c4670e..c6d2100ead6 100644 --- a/src/transformers/models/deberta/modeling_tf_deberta.py +++ b/src/transformers/models/deberta/modeling_tf_deberta.py @@ -1101,7 +1101,7 @@ class TFDebertaModel(TFDebertaPreTrainedModel): @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1173,7 +1173,7 @@ class TFDebertaForMaskedLM(TFDebertaPreTrainedModel, TFMaskedLanguageModelingLos @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1275,7 +1275,7 @@ class TFDebertaForSequenceClassification(TFDebertaPreTrainedModel, TFSequenceCla @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1372,7 +1372,7 @@ class TFDebertaForTokenClassification(TFDebertaPreTrainedModel, TFTokenClassific @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1465,7 +1465,7 @@ class TFDebertaForQuestionAnswering(TFDebertaPreTrainedModel, TFQuestionAnswerin @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/deberta_v2/modeling_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_deberta_v2.py index cb597cad081..4e4740aaa58 100644 --- a/src/transformers/models/deberta_v2/modeling_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_deberta_v2.py @@ -974,7 +974,7 @@ class DebertaV2Model(DebertaV2PreTrainedModel): @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1081,7 +1081,7 @@ class DebertaV2ForMaskedLM(DebertaV2PreTrainedModel): @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1222,7 +1222,7 @@ class DebertaV2ForSequenceClassification(DebertaV2PreTrainedModel): @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1318,7 +1318,7 @@ class DebertaV2ForTokenClassification(DebertaV2PreTrainedModel): @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1406,7 +1406,7 @@ class DebertaV2ForQuestionAnswering(DebertaV2PreTrainedModel): @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py index 08c7f6d8fe0..31d6d48619f 100644 --- a/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py @@ -1223,7 +1223,7 @@ class TFDebertaV2Model(TFDebertaV2PreTrainedModel): @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1296,7 +1296,7 @@ class TFDebertaV2ForMaskedLM(TFDebertaV2PreTrainedModel, TFMaskedLanguageModelin @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1399,7 +1399,7 @@ class TFDebertaV2ForSequenceClassification(TFDebertaV2PreTrainedModel, TFSequenc @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1497,7 +1497,7 @@ class TFDebertaV2ForTokenClassification(TFDebertaV2PreTrainedModel, TFTokenClass @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1591,7 +1591,7 @@ class TFDebertaV2ForQuestionAnswering(TFDebertaV2PreTrainedModel, TFQuestionAnsw @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/distilbert/modeling_distilbert.py b/src/transformers/models/distilbert/modeling_distilbert.py index d137e10fdb2..b68b7c524e8 100755 --- a/src/transformers/models/distilbert/modeling_distilbert.py +++ b/src/transformers/models/distilbert/modeling_distilbert.py @@ -508,7 +508,7 @@ class DistilBertModel(DistilBertPreTrainedModel): @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, @@ -604,7 +604,7 @@ class DistilBertForMaskedLM(DistilBertPreTrainedModel): @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -702,7 +702,7 @@ class DistilBertForSequenceClassification(DistilBertPreTrainedModel): @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -818,7 +818,7 @@ class DistilBertForQuestionAnswering(DistilBertPreTrainedModel): @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, @@ -935,7 +935,7 @@ class DistilBertForTokenClassification(DistilBertPreTrainedModel): @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/distilbert/modeling_tf_distilbert.py b/src/transformers/models/distilbert/modeling_tf_distilbert.py index 2eddbffc143..e997d2a72cc 100644 --- a/src/transformers/models/distilbert/modeling_tf_distilbert.py +++ b/src/transformers/models/distilbert/modeling_tf_distilbert.py @@ -543,7 +543,7 @@ class TFDistilBertModel(TFDistilBertPreTrainedModel): @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, @@ -658,7 +658,7 @@ class TFDistilBertForMaskedLM(TFDistilBertPreTrainedModel, TFMaskedLanguageModel @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -759,7 +759,7 @@ class TFDistilBertForSequenceClassification(TFDistilBertPreTrainedModel, TFSeque @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -854,7 +854,7 @@ class TFDistilBertForTokenClassification(TFDistilBertPreTrainedModel, TFTokenCla @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -962,7 +962,7 @@ class TFDistilBertForMultipleChoice(TFDistilBertPreTrainedModel, TFMultipleChoic DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1088,7 +1088,7 @@ class TFDistilBertForQuestionAnswering(TFDistilBertPreTrainedModel, TFQuestionAn @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/electra/modeling_electra.py b/src/transformers/models/electra/modeling_electra.py index 5d6644ef4ef..e511c7d13e6 100644 --- a/src/transformers/models/electra/modeling_electra.py +++ b/src/transformers/models/electra/modeling_electra.py @@ -833,7 +833,7 @@ class ElectraModel(ElectraPreTrainedModel): @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -941,7 +941,7 @@ class ElectraForSequenceClassification(ElectraPreTrainedModel): @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1136,7 +1136,7 @@ class ElectraForMaskedLM(ElectraPreTrainedModel): @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1218,7 +1218,7 @@ class ElectraForTokenClassification(ElectraPreTrainedModel): @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1307,7 +1307,7 @@ class ElectraForQuestionAnswering(ElectraPreTrainedModel): @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1408,7 +1408,7 @@ class ElectraForMultipleChoice(ElectraPreTrainedModel): @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/electra/modeling_tf_electra.py b/src/transformers/models/electra/modeling_tf_electra.py index cd03c997a29..1fc8c09af5a 100644 --- a/src/transformers/models/electra/modeling_tf_electra.py +++ b/src/transformers/models/electra/modeling_tf_electra.py @@ -950,7 +950,7 @@ class TFElectraModel(TFElectraPreTrainedModel): @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -1211,7 +1211,7 @@ class TFElectraForMaskedLM(TFElectraPreTrainedModel, TFMaskedLanguageModelingLos @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1336,7 +1336,7 @@ class TFElectraForSequenceClassification(TFElectraPreTrainedModel, TFSequenceCla @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1444,7 +1444,7 @@ class TFElectraForMultipleChoice(TFElectraPreTrainedModel, TFMultipleChoiceLoss) @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1584,7 +1584,7 @@ class TFElectraForTokenClassification(TFElectraPreTrainedModel, TFTokenClassific @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1681,7 +1681,7 @@ class TFElectraForQuestionAnswering(TFElectraPreTrainedModel, TFQuestionAnswerin @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/flaubert/modeling_flaubert.py b/src/transformers/models/flaubert/modeling_flaubert.py index 161929db82b..f0f14caa393 100644 --- a/src/transformers/models/flaubert/modeling_flaubert.py +++ b/src/transformers/models/flaubert/modeling_flaubert.py @@ -148,7 +148,7 @@ class FlaubertModel(XLMModel): @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/flaubert/modeling_tf_flaubert.py b/src/transformers/models/flaubert/modeling_tf_flaubert.py index 4ba4c4d099f..eecd686ef1b 100644 --- a/src/transformers/models/flaubert/modeling_tf_flaubert.py +++ b/src/transformers/models/flaubert/modeling_tf_flaubert.py @@ -236,7 +236,7 @@ class TFFlaubertModel(TFFlaubertPreTrainedModel): @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, @@ -820,7 +820,7 @@ class TFFlaubertWithLMHeadModel(TFFlaubertPreTrainedModel): @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFFlaubertWithLMHeadModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/fnet/modeling_fnet.py b/src/transformers/models/fnet/modeling_fnet.py index dbab7d5d174..afcfd273f33 100755 --- a/src/transformers/models/fnet/modeling_fnet.py +++ b/src/transformers/models/fnet/modeling_fnet.py @@ -545,7 +545,7 @@ class FNetModel(FNetPreTrainedModel): @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, @@ -733,7 +733,7 @@ class FNetForMaskedLM(FNetPreTrainedModel): @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -889,7 +889,7 @@ class FNetForSequenceClassification(FNetPreTrainedModel): @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -961,7 +961,7 @@ class FNetForMultipleChoice(FNetPreTrainedModel): @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1042,7 +1042,7 @@ class FNetForTokenClassification(FNetPreTrainedModel): @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1111,7 +1111,7 @@ class FNetForQuestionAnswering(FNetPreTrainedModel): @add_start_docstrings_to_model_forward(FNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/fsmt/modeling_fsmt.py b/src/transformers/models/fsmt/modeling_fsmt.py index 86a0a3e3f88..9ddcd1453bb 100644 --- a/src/transformers/models/fsmt/modeling_fsmt.py +++ b/src/transformers/models/fsmt/modeling_fsmt.py @@ -1007,7 +1007,7 @@ class FSMTModel(PretrainedFSMTModel): @add_start_docstrings_to_model_forward(FSMT_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/funnel/modeling_funnel.py b/src/transformers/models/funnel/modeling_funnel.py index 4527a9f03b5..7ce2e3221c4 100644 --- a/src/transformers/models/funnel/modeling_funnel.py +++ b/src/transformers/models/funnel/modeling_funnel.py @@ -910,7 +910,7 @@ class FunnelBaseModel(FunnelPreTrainedModel): @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small-base", output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, @@ -987,7 +987,7 @@ class FunnelModel(FunnelPreTrainedModel): @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1174,7 +1174,7 @@ class FunnelForMaskedLM(FunnelPreTrainedModel): @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1248,7 +1248,7 @@ class FunnelForSequenceClassification(FunnelPreTrainedModel): @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small-base", output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1338,7 +1338,7 @@ class FunnelForMultipleChoice(FunnelPreTrainedModel): @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small-base", output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1424,7 +1424,7 @@ class FunnelForTokenClassification(FunnelPreTrainedModel): @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1506,7 +1506,7 @@ class FunnelForQuestionAnswering(FunnelPreTrainedModel): @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/funnel/modeling_tf_funnel.py b/src/transformers/models/funnel/modeling_tf_funnel.py index 8c2541da0ce..90113047847 100644 --- a/src/transformers/models/funnel/modeling_tf_funnel.py +++ b/src/transformers/models/funnel/modeling_tf_funnel.py @@ -1126,7 +1126,7 @@ class TFFunnelBaseModel(TFFunnelPreTrainedModel): @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small-base", output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1187,7 +1187,7 @@ class TFFunnelModel(TFFunnelPreTrainedModel): @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small", output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1337,7 +1337,7 @@ class TFFunnelForMaskedLM(TFFunnelPreTrainedModel, TFMaskedLanguageModelingLoss) @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small", output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1426,7 +1426,7 @@ class TFFunnelForSequenceClassification(TFFunnelPreTrainedModel, TFSequenceClass @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small-base", output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1525,7 +1525,7 @@ class TFFunnelForMultipleChoice(TFFunnelPreTrainedModel, TFMultipleChoiceLoss): @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small-base", output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1655,7 +1655,7 @@ class TFFunnelForTokenClassification(TFFunnelPreTrainedModel, TFTokenClassificat @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small", output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1747,7 +1747,7 @@ class TFFunnelForQuestionAnswering(TFFunnelPreTrainedModel, TFQuestionAnsweringL @add_start_docstrings_to_model_forward(FUNNEL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="funnel-transformer/small", output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/gpt2/modeling_gpt2.py b/src/transformers/models/gpt2/modeling_gpt2.py index 58b7d5ea242..6abc7e991c9 100644 --- a/src/transformers/models/gpt2/modeling_gpt2.py +++ b/src/transformers/models/gpt2/modeling_gpt2.py @@ -732,7 +732,7 @@ class GPT2Model(GPT2PreTrainedModel): @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -1009,7 +1009,7 @@ class GPT2LMHeadModel(GPT2PreTrainedModel): @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -1338,7 +1338,7 @@ class GPT2ForSequenceClassification(GPT2PreTrainedModel): @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="microsoft/DialogRPT-updown", output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC, @@ -1457,7 +1457,7 @@ class GPT2ForTokenClassification(GPT2PreTrainedModel): @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="microsoft/DialogRPT-updown", output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/gpt2/modeling_tf_gpt2.py b/src/transformers/models/gpt2/modeling_tf_gpt2.py index 71ff12c3296..609446e8c49 100644 --- a/src/transformers/models/gpt2/modeling_tf_gpt2.py +++ b/src/transformers/models/gpt2/modeling_tf_gpt2.py @@ -587,7 +587,7 @@ class TFGPT2Model(TFGPT2PreTrainedModel): @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC, @@ -679,7 +679,7 @@ class TFGPT2LMHeadModel(TFGPT2PreTrainedModel, TFCausalLanguageModelingLoss): @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC, @@ -959,7 +959,7 @@ class TFGPT2ForSequenceClassification(TFGPT2PreTrainedModel, TFSequenceClassific @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="microsoft/DialogRPT-updown", output_type=TFSequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/gpt_neo/modeling_gpt_neo.py b/src/transformers/models/gpt_neo/modeling_gpt_neo.py index 3fafd75ac21..a845ad5987a 100755 --- a/src/transformers/models/gpt_neo/modeling_gpt_neo.py +++ b/src/transformers/models/gpt_neo/modeling_gpt_neo.py @@ -497,7 +497,7 @@ class GPTNeoModel(GPTNeoPreTrainedModel): @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -713,7 +713,7 @@ class GPTNeoForCausalLM(GPTNeoPreTrainedModel): @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -827,7 +827,7 @@ class GPTNeoForSequenceClassification(GPTNeoPreTrainedModel): @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/gptj/modeling_gptj.py b/src/transformers/models/gptj/modeling_gptj.py index a23da083471..e81819b7f87 100755 --- a/src/transformers/models/gptj/modeling_gptj.py +++ b/src/transformers/models/gptj/modeling_gptj.py @@ -491,7 +491,7 @@ class GPTJModel(GPTJPreTrainedModel): @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC, @@ -743,7 +743,7 @@ class GPTJForCausalLM(GPTJPreTrainedModel): @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC, @@ -864,7 +864,7 @@ class GPTJForSequenceClassification(GPTJPreTrainedModel): @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/ibert/modeling_ibert.py b/src/transformers/models/ibert/modeling_ibert.py index d4f74ff47e7..3aa936ba6ac 100644 --- a/src/transformers/models/ibert/modeling_ibert.py +++ b/src/transformers/models/ibert/modeling_ibert.py @@ -772,7 +772,7 @@ class IBertModel(IBertPreTrainedModel): @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -875,7 +875,7 @@ class IBertForMaskedLM(IBertPreTrainedModel): @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -983,7 +983,7 @@ class IBertForSequenceClassification(IBertPreTrainedModel): @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1066,7 +1066,7 @@ class IBertForMultipleChoice(IBertPreTrainedModel): @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1160,7 +1160,7 @@ class IBertForTokenClassification(IBertPreTrainedModel): @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1269,7 +1269,7 @@ class IBertForQuestionAnswering(IBertPreTrainedModel): @add_start_docstrings_to_model_forward(IBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/led/modeling_led.py b/src/transformers/models/led/modeling_led.py index 1e08899c46c..c078a4e54ce 100755 --- a/src/transformers/models/led/modeling_led.py +++ b/src/transformers/models/led/modeling_led.py @@ -2174,7 +2174,7 @@ class LEDModel(LEDPreTrainedModel): @add_start_docstrings_to_model_forward(LED_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, @@ -2468,7 +2468,7 @@ class LEDForSequenceClassification(LEDPreTrainedModel): @add_start_docstrings_to_model_forward(LED_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -2578,7 +2578,7 @@ class LEDForQuestionAnswering(LEDPreTrainedModel): @add_start_docstrings_to_model_forward(LED_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/led/modeling_tf_led.py b/src/transformers/models/led/modeling_tf_led.py index d0d8afb1a0e..fa8904e4748 100644 --- a/src/transformers/models/led/modeling_tf_led.py +++ b/src/transformers/models/led/modeling_tf_led.py @@ -2230,7 +2230,7 @@ class TFLEDModel(TFLEDPreTrainedModel): @add_start_docstrings_to_model_forward(LED_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFLEDSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/longformer/modeling_longformer.py b/src/transformers/models/longformer/modeling_longformer.py index 3e327c5c688..672c0d948ae 100755 --- a/src/transformers/models/longformer/modeling_longformer.py +++ b/src/transformers/models/longformer/modeling_longformer.py @@ -1822,7 +1822,7 @@ class LongformerForSequenceClassification(LongformerPreTrainedModel): @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=LongformerSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -2084,7 +2084,7 @@ class LongformerForTokenClassification(LongformerPreTrainedModel): @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=LongformerTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -2176,7 +2176,7 @@ class LongformerForMultipleChoice(LongformerPreTrainedModel): LONGFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=LongformerMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/longformer/modeling_tf_longformer.py b/src/transformers/models/longformer/modeling_tf_longformer.py index 7d7a881fc85..164a1bf5265 100644 --- a/src/transformers/models/longformer/modeling_tf_longformer.py +++ b/src/transformers/models/longformer/modeling_tf_longformer.py @@ -2088,7 +2088,7 @@ class TFLongformerForMaskedLM(TFLongformerPreTrainedModel, TFMaskedLanguageModel @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFLongformerMaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -2197,7 +2197,7 @@ class TFLongformerForQuestionAnswering(TFLongformerPreTrainedModel, TFQuestionAn @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="allenai/longformer-large-4096-finetuned-triviaqa", output_type=TFLongformerQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, @@ -2366,7 +2366,7 @@ class TFLongformerForSequenceClassification(TFLongformerPreTrainedModel, TFSeque @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFLongformerSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -2492,7 +2492,7 @@ class TFLongformerForMultipleChoice(TFLongformerPreTrainedModel, TFMultipleChoic LONGFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFLongformerMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -2645,7 +2645,7 @@ class TFLongformerForTokenClassification(TFLongformerPreTrainedModel, TFTokenCla @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFLongformerTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/lxmert/modeling_lxmert.py b/src/transformers/models/lxmert/modeling_lxmert.py index 5cf2bc67855..1135816cc22 100644 --- a/src/transformers/models/lxmert/modeling_lxmert.py +++ b/src/transformers/models/lxmert/modeling_lxmert.py @@ -901,7 +901,7 @@ class LxmertModel(LxmertPreTrainedModel): @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=LxmertModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1384,7 +1384,7 @@ class LxmertForQuestionAnswering(LxmertPreTrainedModel): @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=LxmertForQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/lxmert/modeling_tf_lxmert.py b/src/transformers/models/lxmert/modeling_tf_lxmert.py index 70def7e77be..31972900995 100644 --- a/src/transformers/models/lxmert/modeling_tf_lxmert.py +++ b/src/transformers/models/lxmert/modeling_tf_lxmert.py @@ -950,7 +950,7 @@ class TFLxmertModel(TFLxmertPreTrainedModel): @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFLxmertModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/m2m_100/modeling_m2m_100.py b/src/transformers/models/m2m_100/modeling_m2m_100.py index 9bb15c918a8..e4072c5bfcc 100755 --- a/src/transformers/models/m2m_100/modeling_m2m_100.py +++ b/src/transformers/models/m2m_100/modeling_m2m_100.py @@ -1123,7 +1123,7 @@ class M2M100Model(M2M100PreTrainedModel): @add_start_docstrings_to_model_forward(M2M_100_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/marian/modeling_tf_marian.py b/src/transformers/models/marian/modeling_tf_marian.py index 30c514f6c78..2412fca17c8 100644 --- a/src/transformers/models/marian/modeling_tf_marian.py +++ b/src/transformers/models/marian/modeling_tf_marian.py @@ -1224,7 +1224,7 @@ class TFMarianModel(TFMarianPreTrainedModel): @add_start_docstrings_to_model_forward(MARIAN_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/mbart/modeling_mbart.py b/src/transformers/models/mbart/modeling_mbart.py index a45e4eb808a..c47fb46d5f0 100755 --- a/src/transformers/models/mbart/modeling_mbart.py +++ b/src/transformers/models/mbart/modeling_mbart.py @@ -1134,7 +1134,7 @@ class MBartModel(MBartPreTrainedModel): @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1405,7 +1405,7 @@ class MBartForSequenceClassification(MBartPreTrainedModel): @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1518,7 +1518,7 @@ class MBartForQuestionAnswering(MBartPreTrainedModel): @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/mbart/modeling_tf_mbart.py b/src/transformers/models/mbart/modeling_tf_mbart.py index bf79bb9abf0..06e13cb29f7 100644 --- a/src/transformers/models/mbart/modeling_tf_mbart.py +++ b/src/transformers/models/mbart/modeling_tf_mbart.py @@ -1208,7 +1208,7 @@ class TFMBartModel(TFMBartPreTrainedModel): @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/megatron_bert/modeling_megatron_bert.py b/src/transformers/models/megatron_bert/modeling_megatron_bert.py index 80337b2dabf..042720667c3 100755 --- a/src/transformers/models/megatron_bert/modeling_megatron_bert.py +++ b/src/transformers/models/megatron_bert/modeling_megatron_bert.py @@ -873,7 +873,7 @@ class MegatronBertModel(MegatronBertPreTrainedModel): @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -1282,7 +1282,7 @@ class MegatronBertForMaskedLM(MegatronBertPreTrainedModel): @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1480,7 +1480,7 @@ class MegatronBertForSequenceClassification(MegatronBertPreTrainedModel): @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1566,7 +1566,7 @@ class MegatronBertForMultipleChoice(MegatronBertPreTrainedModel): MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1661,7 +1661,7 @@ class MegatronBertForTokenClassification(MegatronBertPreTrainedModel): @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1751,7 +1751,7 @@ class MegatronBertForQuestionAnswering(MegatronBertPreTrainedModel): @add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/mobilebert/modeling_mobilebert.py b/src/transformers/models/mobilebert/modeling_mobilebert.py index 111771ded41..3c85af3b1bc 100644 --- a/src/transformers/models/mobilebert/modeling_mobilebert.py +++ b/src/transformers/models/mobilebert/modeling_mobilebert.py @@ -817,7 +817,7 @@ class MobileBertModel(MobileBertPreTrainedModel): @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, @@ -1032,7 +1032,7 @@ class MobileBertForMaskedLM(MobileBertPreTrainedModel): @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1222,7 +1222,7 @@ class MobileBertForSequenceClassification(MobileBertPreTrainedModel): @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1322,7 +1322,7 @@ class MobileBertForQuestionAnswering(MobileBertPreTrainedModel): @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1427,7 +1427,7 @@ class MobileBertForMultipleChoice(MobileBertPreTrainedModel): MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1526,7 +1526,7 @@ class MobileBertForTokenClassification(MobileBertPreTrainedModel): @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/mobilebert/modeling_tf_mobilebert.py b/src/transformers/models/mobilebert/modeling_tf_mobilebert.py index 8357d09b559..30ce8c47282 100644 --- a/src/transformers/models/mobilebert/modeling_tf_mobilebert.py +++ b/src/transformers/models/mobilebert/modeling_tf_mobilebert.py @@ -933,7 +933,7 @@ class TFMobileBertModel(TFMobileBertPreTrainedModel): @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, @@ -1124,7 +1124,7 @@ class TFMobileBertForMaskedLM(TFMobileBertPreTrainedModel, TFMaskedLanguageModel @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1348,7 +1348,7 @@ class TFMobileBertForSequenceClassification(TFMobileBertPreTrainedModel, TFSeque @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1456,7 +1456,7 @@ class TFMobileBertForQuestionAnswering(TFMobileBertPreTrainedModel, TFQuestionAn @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1591,7 +1591,7 @@ class TFMobileBertForMultipleChoice(TFMobileBertPreTrainedModel, TFMultipleChoic MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1742,7 +1742,7 @@ class TFMobileBertForTokenClassification(TFMobileBertPreTrainedModel, TFTokenCla @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/mpnet/modeling_mpnet.py b/src/transformers/models/mpnet/modeling_mpnet.py index 16540670b73..9f3e7bd4932 100644 --- a/src/transformers/models/mpnet/modeling_mpnet.py +++ b/src/transformers/models/mpnet/modeling_mpnet.py @@ -511,7 +511,7 @@ class MPNetModel(MPNetPreTrainedModel): @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, @@ -593,7 +593,7 @@ class MPNetForMaskedLM(MPNetPreTrainedModel): @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -695,7 +695,7 @@ class MPNetForSequenceClassification(MPNetPreTrainedModel): @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -777,7 +777,7 @@ class MPNetForMultipleChoice(MPNetPreTrainedModel): @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -869,7 +869,7 @@ class MPNetForTokenClassification(MPNetPreTrainedModel): @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -977,7 +977,7 @@ class MPNetForQuestionAnswering(MPNetPreTrainedModel): @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/mpnet/modeling_tf_mpnet.py b/src/transformers/models/mpnet/modeling_tf_mpnet.py index 67a54e4f731..90b5c16ccca 100644 --- a/src/transformers/models/mpnet/modeling_tf_mpnet.py +++ b/src/transformers/models/mpnet/modeling_tf_mpnet.py @@ -684,7 +684,7 @@ class TFMPNetModel(TFMPNetPreTrainedModel): @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, @@ -813,7 +813,7 @@ class TFMPNetForMaskedLM(TFMPNetPreTrainedModel, TFMaskedLanguageModelingLoss): @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -934,7 +934,7 @@ class TFMPNetForSequenceClassification(TFMPNetPreTrainedModel, TFSequenceClassif @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1040,7 +1040,7 @@ class TFMPNetForMultipleChoice(TFMPNetPreTrainedModel, TFMultipleChoiceLoss): @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1172,7 +1172,7 @@ class TFMPNetForTokenClassification(TFMPNetPreTrainedModel, TFTokenClassificatio @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1271,7 +1271,7 @@ class TFMPNetForQuestionAnswering(TFMPNetPreTrainedModel, TFQuestionAnsweringLos @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/openai/modeling_openai.py b/src/transformers/models/openai/modeling_openai.py index 6bf03a2f926..0ce9344dacc 100644 --- a/src/transformers/models/openai/modeling_openai.py +++ b/src/transformers/models/openai/modeling_openai.py @@ -433,7 +433,7 @@ class OpenAIGPTModel(OpenAIGPTPreTrainedModel): @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, @@ -552,7 +552,7 @@ class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel): @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, @@ -756,7 +756,7 @@ class OpenAIGPTForSequenceClassification(OpenAIGPTPreTrainedModel): @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/openai/modeling_tf_openai.py b/src/transformers/models/openai/modeling_tf_openai.py index 60bb101d15c..e4d5b80209a 100644 --- a/src/transformers/models/openai/modeling_tf_openai.py +++ b/src/transformers/models/openai/modeling_tf_openai.py @@ -522,7 +522,7 @@ class TFOpenAIGPTModel(TFOpenAIGPTPreTrainedModel): @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, @@ -598,7 +598,7 @@ class TFOpenAIGPTLMHeadModel(TFOpenAIGPTPreTrainedModel, TFCausalLanguageModelin @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutput, config_class=_CONFIG_FOR_DOC, @@ -856,7 +856,7 @@ class TFOpenAIGPTForSequenceClassification(TFOpenAIGPTPreTrainedModel, TFSequenc @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/pegasus/modeling_tf_pegasus.py b/src/transformers/models/pegasus/modeling_tf_pegasus.py index 20d4b65a8c1..e3b90377c73 100644 --- a/src/transformers/models/pegasus/modeling_tf_pegasus.py +++ b/src/transformers/models/pegasus/modeling_tf_pegasus.py @@ -1233,7 +1233,7 @@ class TFPegasusModel(TFPegasusPreTrainedModel): @add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/reformer/modeling_reformer.py b/src/transformers/models/reformer/modeling_reformer.py index f91c4bdc96c..c7ee43a5663 100755 --- a/src/transformers/models/reformer/modeling_reformer.py +++ b/src/transformers/models/reformer/modeling_reformer.py @@ -1992,7 +1992,7 @@ class ReformerModel(ReformerPreTrainedModel): @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=ReformerModelOutput, config_class=_CONFIG_FOR_DOC, @@ -2198,7 +2198,7 @@ class ReformerModelWithLMHead(ReformerPreTrainedModel): @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, @@ -2313,7 +2313,7 @@ class ReformerForMaskedLM(ReformerPreTrainedModel): @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -2394,7 +2394,7 @@ class ReformerForSequenceClassification(ReformerPreTrainedModel): @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -2512,7 +2512,7 @@ class ReformerForQuestionAnswering(ReformerPreTrainedModel): @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/rembert/modeling_rembert.py b/src/transformers/models/rembert/modeling_rembert.py index d782e33df2d..599875347f9 100755 --- a/src/transformers/models/rembert/modeling_rembert.py +++ b/src/transformers/models/rembert/modeling_rembert.py @@ -781,7 +781,7 @@ class RemBertModel(RemBertPreTrainedModel): @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="rembert", output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -933,7 +933,7 @@ class RemBertForMaskedLM(RemBertPreTrainedModel): @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="rembert", output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1175,7 +1175,7 @@ class RemBertForSequenceClassification(RemBertPreTrainedModel): @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="rembert", output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1259,7 +1259,7 @@ class RemBertForMultipleChoice(RemBertPreTrainedModel): @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="rembert", output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1351,7 +1351,7 @@ class RemBertForTokenClassification(RemBertPreTrainedModel): @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="rembert", output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1439,7 +1439,7 @@ class RemBertForQuestionAnswering(RemBertPreTrainedModel): @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="rembert", output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/rembert/modeling_tf_rembert.py b/src/transformers/models/rembert/modeling_tf_rembert.py index 59c60ff6f3f..9deab21f756 100644 --- a/src/transformers/models/rembert/modeling_tf_rembert.py +++ b/src/transformers/models/rembert/modeling_tf_rembert.py @@ -956,7 +956,7 @@ class TFRemBertModel(TFRemBertPreTrainedModel): @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="rembert", output_type=TFBaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -1078,7 +1078,7 @@ class TFRemBertForMaskedLM(TFRemBertPreTrainedModel, TFMaskedLanguageModelingLos @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="rembert", output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1186,7 +1186,7 @@ class TFRemBertForCausalLM(TFRemBertPreTrainedModel, TFCausalLanguageModelingLos } @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="rembert", output_type=TFCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -1329,7 +1329,7 @@ class TFRemBertForSequenceClassification(TFRemBertPreTrainedModel, TFSequenceCla @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="rembert", output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1435,7 +1435,7 @@ class TFRemBertForMultipleChoice(TFRemBertPreTrainedModel, TFMultipleChoiceLoss) @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="rembert", output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1579,7 +1579,7 @@ class TFRemBertForTokenClassification(TFRemBertPreTrainedModel, TFTokenClassific @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="rembert", output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1675,7 +1675,7 @@ class TFRemBertForQuestionAnswering(TFRemBertPreTrainedModel, TFQuestionAnswerin @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint="rembert", output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/roberta/modeling_roberta.py b/src/transformers/models/roberta/modeling_roberta.py index 251749bde73..c67d2f026df 100644 --- a/src/transformers/models/roberta/modeling_roberta.py +++ b/src/transformers/models/roberta/modeling_roberta.py @@ -739,7 +739,7 @@ class RobertaModel(RobertaPreTrainedModel): @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -1058,7 +1058,7 @@ class RobertaForMaskedLM(RobertaPreTrainedModel): @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1171,7 +1171,7 @@ class RobertaForSequenceClassification(RobertaPreTrainedModel): @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1267,7 +1267,7 @@ class RobertaForMultipleChoice(RobertaPreTrainedModel): @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1364,7 +1364,7 @@ class RobertaForTokenClassification(RobertaPreTrainedModel): @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1476,7 +1476,7 @@ class RobertaForQuestionAnswering(RobertaPreTrainedModel): @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/roberta/modeling_tf_roberta.py b/src/transformers/models/roberta/modeling_tf_roberta.py index e364ac691a3..107974de100 100644 --- a/src/transformers/models/roberta/modeling_tf_roberta.py +++ b/src/transformers/models/roberta/modeling_tf_roberta.py @@ -933,7 +933,7 @@ class TFRobertaModel(TFRobertaPreTrainedModel): @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -1108,7 +1108,7 @@ class TFRobertaForMaskedLM(TFRobertaPreTrainedModel, TFMaskedLanguageModelingLos @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1222,7 +1222,7 @@ class TFRobertaForCausalLM(TFRobertaPreTrainedModel, TFCausalLanguageModelingLos @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -1392,7 +1392,7 @@ class TFRobertaForSequenceClassification(TFRobertaPreTrainedModel, TFSequenceCla @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1503,7 +1503,7 @@ class TFRobertaForMultipleChoice(TFRobertaPreTrainedModel, TFMultipleChoiceLoss) @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1641,7 +1641,7 @@ class TFRobertaForTokenClassification(TFRobertaPreTrainedModel, TFTokenClassific @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1742,7 +1742,7 @@ class TFRobertaForQuestionAnswering(TFRobertaPreTrainedModel, TFQuestionAnswerin @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/roformer/modeling_roformer.py b/src/transformers/models/roformer/modeling_roformer.py index ab9274c78e1..0543fa8f243 100644 --- a/src/transformers/models/roformer/modeling_roformer.py +++ b/src/transformers/models/roformer/modeling_roformer.py @@ -839,7 +839,7 @@ class RoFormerModel(RoFormerPreTrainedModel): @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -987,7 +987,7 @@ class RoFormerForMaskedLM(RoFormerPreTrainedModel): @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1246,7 +1246,7 @@ class RoFormerForSequenceClassification(RoFormerPreTrainedModel): @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1328,7 +1328,7 @@ class RoFormerForMultipleChoice(RoFormerPreTrainedModel): ROFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1418,7 +1418,7 @@ class RoFormerForTokenClassification(RoFormerPreTrainedModel): @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1505,7 +1505,7 @@ class RoFormerForQuestionAnswering(RoFormerPreTrainedModel): @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/roformer/modeling_tf_roformer.py b/src/transformers/models/roformer/modeling_tf_roformer.py index 436acdbd30d..ad0a42ff4e7 100644 --- a/src/transformers/models/roformer/modeling_tf_roformer.py +++ b/src/transformers/models/roformer/modeling_tf_roformer.py @@ -814,7 +814,7 @@ class TFRoFormerModel(TFRoFormerPreTrainedModel): @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, @@ -886,7 +886,7 @@ class TFRoFormerForMaskedLM(TFRoFormerPreTrainedModel, TFMaskedLanguageModelingL @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -978,7 +978,7 @@ class TFRoFormerForCausalLM(TFRoFormerPreTrainedModel, TFCausalLanguageModelingL return self.mlm.predictions @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1103,7 +1103,7 @@ class TFRoFormerForSequenceClassification(TFRoFormerPreTrainedModel, TFSequenceC @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1208,7 +1208,7 @@ class TFRoFormerForMultipleChoice(TFRoFormerPreTrainedModel, TFMultipleChoiceLos ROFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1344,7 +1344,7 @@ class TFRoFormerForTokenClassification(TFRoFormerPreTrainedModel, TFTokenClassif @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1437,7 +1437,7 @@ class TFRoFormerForQuestionAnswering(TFRoFormerPreTrainedModel, TFQuestionAnswer @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/speech_to_text/modeling_speech_to_text.py b/src/transformers/models/speech_to_text/modeling_speech_to_text.py index 9a8f3643387..fac2fdeca31 100755 --- a/src/transformers/models/speech_to_text/modeling_speech_to_text.py +++ b/src/transformers/models/speech_to_text/modeling_speech_to_text.py @@ -1138,7 +1138,7 @@ class Speech2TextModel(Speech2TextPreTrainedModel): @add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/splinter/modeling_splinter.py b/src/transformers/models/splinter/modeling_splinter.py index e2d79eaa2ca..5e89b5cfc6b 100755 --- a/src/transformers/models/splinter/modeling_splinter.py +++ b/src/transformers/models/splinter/modeling_splinter.py @@ -635,7 +635,7 @@ class SplinterModel(SplinterPreTrainedModel): @add_start_docstrings_to_model_forward(SPLINTER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -836,7 +836,7 @@ class SplinterForQuestionAnswering(SplinterPreTrainedModel): @add_start_docstrings_to_model_forward(SPLINTER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/squeezebert/modeling_squeezebert.py b/src/transformers/models/squeezebert/modeling_squeezebert.py index 577b07b2fc2..6ec972f06c9 100644 --- a/src/transformers/models/squeezebert/modeling_squeezebert.py +++ b/src/transformers/models/squeezebert/modeling_squeezebert.py @@ -571,7 +571,7 @@ class SqueezeBertModel(SqueezeBertPreTrainedModel): @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, @@ -664,7 +664,7 @@ class SqueezeBertForMaskedLM(SqueezeBertPreTrainedModel): @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -743,7 +743,7 @@ class SqueezeBertForSequenceClassification(SqueezeBertPreTrainedModel): @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -842,7 +842,7 @@ class SqueezeBertForMultipleChoice(SqueezeBertPreTrainedModel): SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -934,7 +934,7 @@ class SqueezeBertForTokenClassification(SqueezeBertPreTrainedModel): @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1021,7 +1021,7 @@ class SqueezeBertForQuestionAnswering(SqueezeBertPreTrainedModel): @add_start_docstrings_to_model_forward(SQUEEZEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py b/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py index c0701f7ea66..b883375ef51 100644 --- a/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py +++ b/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py @@ -883,7 +883,7 @@ class TFTransfoXLModel(TFTransfoXLPreTrainedModel): @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTransfoXLModelOutput, config_class=_CONFIG_FOR_DOC, @@ -975,7 +975,7 @@ class TFTransfoXLLMHeadModel(TFTransfoXLPreTrainedModel): @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTransfoXLLMHeadModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1091,7 +1091,7 @@ class TFTransfoXLForSequenceClassification(TFTransfoXLPreTrainedModel, TFSequenc @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTransfoXLSequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/transfo_xl/modeling_transfo_xl.py b/src/transformers/models/transfo_xl/modeling_transfo_xl.py index aa445726a87..b2703bee4f0 100644 --- a/src/transformers/models/transfo_xl/modeling_transfo_xl.py +++ b/src/transformers/models/transfo_xl/modeling_transfo_xl.py @@ -871,7 +871,7 @@ class TransfoXLModel(TransfoXLPreTrainedModel): @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TransfoXLModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1052,7 +1052,7 @@ class TransfoXLLMHeadModel(TransfoXLPreTrainedModel): @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TransfoXLLMHeadModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1174,7 +1174,7 @@ class TransfoXLForSequenceClassification(TransfoXLPreTrainedModel): @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TransfoXLSequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/wav2vec2/modeling_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_wav2vec2.py index aa8da19c10f..e36d92980ca 100755 --- a/src/transformers/models/wav2vec2/modeling_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_wav2vec2.py @@ -29,6 +29,7 @@ from ...activations import ACT2FN from ...deepspeed import is_deepspeed_zero3_enabled from ...file_utils import ( ModelOutput, + add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, @@ -43,6 +44,7 @@ logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "Wav2Vec2Config" _CHECKPOINT_FOR_DOC = "facebook/wav2vec2-base-960h" +_PROCESSOR_FOR_DOC = "Wav2Vec2Processor" WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/wav2vec2-base-960h", @@ -1118,7 +1120,13 @@ class Wav2Vec2Model(Wav2Vec2PreTrainedModel): return hidden_states @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) - @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) + @add_code_sample_docstrings( + processor_class=_PROCESSOR_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=Wav2Vec2BaseModelOutput, + config_class=_CONFIG_FOR_DOC, + modality="audio", + ) def forward( self, input_values, @@ -1128,30 +1136,6 @@ class Wav2Vec2Model(Wav2Vec2PreTrainedModel): output_hidden_states=None, return_dict=None, ): - """ - - Returns: - - Example:: - - >>> from transformers import Wav2Vec2Processor, Wav2Vec2Model - >>> from datasets import load_dataset - >>> import soundfile as sf - - >>> processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h") - >>> model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") - - >>> def map_to_array(batch): - >>> speech, _ = sf.read(batch["file"]) - >>> batch["speech"] = speech - >>> return batch - - >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") - >>> ds = ds.map(map_to_array) - - >>> input_values = processor(ds["speech"][0], return_tensors="pt").input_values # Batch size 1 - >>> hidden_states = model(input_values).last_hidden_state - """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states @@ -1502,7 +1486,12 @@ class Wav2Vec2ForCTC(Wav2Vec2PreTrainedModel): self.wav2vec2.feature_extractor._freeze_parameters() @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) - @replace_return_docstrings(output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC) + @add_code_sample_docstrings( + processor_class=_PROCESSOR_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=CausalLMOutput, + config_class=_CONFIG_FOR_DOC, + ) def forward( self, input_values, @@ -1518,41 +1507,6 @@ class Wav2Vec2ForCTC(Wav2Vec2PreTrainedModel): the sequence length of the output logits. Indices are selected in ``[-100, 0, ..., config.vocab_size - 1]``. All labels set to ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size - 1]``. - - Returns: - - Example:: - - >>> import torch - >>> from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC - >>> from datasets import load_dataset - >>> import soundfile as sf - - >>> processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h") - >>> model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") - - >>> def map_to_array(batch): - >>> speech, _ = sf.read(batch["file"]) - >>> batch["speech"] = speech - >>> return batch - - >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") - >>> ds = ds.map(map_to_array) - - >>> input_values = processor(ds["speech"][0], return_tensors="pt").input_values # Batch size 1 - >>> logits = model(input_values).logits - >>> predicted_ids = torch.argmax(logits, dim=-1) - - >>> transcription = processor.decode(predicted_ids[0]) - - >>> # compute loss - >>> target_transcription = "A MAN SAID TO THE UNIVERSE SIR I EXIST" - - >>> # wrap processor as target processor to encode labels - >>> with processor.as_target_processor(): - >>> labels = processor(target_transcription, return_tensors="pt").input_ids - - >>> loss = model(input_values, labels=labels).loss """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict @@ -1647,7 +1601,13 @@ class Wav2Vec2ForSequenceClassification(Wav2Vec2PreTrainedModel): param.requires_grad = False @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING) - @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) + @add_code_sample_docstrings( + processor_class="Wav2Vec2FeatureExtractor", + checkpoint="superb/wav2vec2-base-superb-ks", + output_type=SequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + modality="audio", + ) def forward( self, input_values, @@ -1662,29 +1622,6 @@ class Wav2Vec2ForSequenceClassification(Wav2Vec2PreTrainedModel): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). - - Returns: - - Example:: - - >>> import torch - >>> from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2ForSequenceClassification - >>> from datasets import load_dataset - - >>> processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-ks") - >>> model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-ks") - - >>> ds = load_dataset("anton-l/superb_dummy", "ks", split="test") - - >>> input_values = processor(ds["speech"][4], return_tensors="pt").input_values # Batch size 1 - >>> logits = model(input_values).logits - >>> predicted_class_ids = torch.argmax(logits, dim=-1) - - >>> # compute loss - >>> target_label = "down" - >>> labels = torch.tensor([model.config.label2id[target_label]]) - - >>> loss = model(input_values, labels=labels).loss """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict diff --git a/src/transformers/models/xlm/modeling_tf_xlm.py b/src/transformers/models/xlm/modeling_tf_xlm.py index b7cdc1ad74f..11cea6943ed 100644 --- a/src/transformers/models/xlm/modeling_tf_xlm.py +++ b/src/transformers/models/xlm/modeling_tf_xlm.py @@ -703,7 +703,7 @@ class TFXLMModel(TFXLMPreTrainedModel): @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, @@ -856,7 +856,7 @@ class TFXLMWithLMHeadModel(TFXLMPreTrainedModel): @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFXLMWithLMHeadModelOutput, config_class=_CONFIG_FOR_DOC, @@ -946,7 +946,7 @@ class TFXLMForSequenceClassification(TFXLMPreTrainedModel, TFSequenceClassificat @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1072,7 +1072,7 @@ class TFXLMForMultipleChoice(TFXLMPreTrainedModel, TFMultipleChoiceLoss): @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1222,7 +1222,7 @@ class TFXLMForTokenClassification(TFXLMPreTrainedModel, TFTokenClassificationLos @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1327,7 +1327,7 @@ class TFXLMForQuestionAnsweringSimple(TFXLMPreTrainedModel, TFQuestionAnsweringL @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/xlm/modeling_xlm.py b/src/transformers/models/xlm/modeling_xlm.py index ebf88d2b12e..4d4b8c0c8d7 100755 --- a/src/transformers/models/xlm/modeling_xlm.py +++ b/src/transformers/models/xlm/modeling_xlm.py @@ -488,7 +488,7 @@ class XLMModel(XLMPreTrainedModel): @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, @@ -710,7 +710,7 @@ class XLMWithLMHeadModel(XLMPreTrainedModel): @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -789,7 +789,7 @@ class XLMForSequenceClassification(XLMPreTrainedModel): @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -889,7 +889,7 @@ class XLMForQuestionAnsweringSimple(XLMPreTrainedModel): @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1112,7 +1112,7 @@ class XLMForTokenClassification(XLMPreTrainedModel): @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1205,7 +1205,7 @@ class XLMForMultipleChoice(XLMPreTrainedModel): @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/xlnet/modeling_tf_xlnet.py b/src/transformers/models/xlnet/modeling_tf_xlnet.py index c70746483e7..859f7f8dce5 100644 --- a/src/transformers/models/xlnet/modeling_tf_xlnet.py +++ b/src/transformers/models/xlnet/modeling_tf_xlnet.py @@ -1160,7 +1160,7 @@ class TFXLNetModel(TFXLNetPreTrainedModel): @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFXLNetModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1429,7 +1429,7 @@ class TFXLNetForSequenceClassification(TFXLNetPreTrainedModel, TFSequenceClassif @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFXLNetForSequenceClassificationOutput, config_class=_CONFIG_FOR_DOC, @@ -1555,7 +1555,7 @@ class TFXLNetForMultipleChoice(TFXLNetPreTrainedModel, TFMultipleChoiceLoss): @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFXLNetForMultipleChoiceOutput, config_class=_CONFIG_FOR_DOC, @@ -1704,7 +1704,7 @@ class TFXLNetForTokenClassification(TFXLNetPreTrainedModel, TFTokenClassificatio @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFXLNetForTokenClassificationOutput, config_class=_CONFIG_FOR_DOC, @@ -1811,7 +1811,7 @@ class TFXLNetForQuestionAnsweringSimple(TFXLNetPreTrainedModel, TFQuestionAnswer @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFXLNetForQuestionAnsweringSimpleOutput, config_class=_CONFIG_FOR_DOC, diff --git a/src/transformers/models/xlnet/modeling_xlnet.py b/src/transformers/models/xlnet/modeling_xlnet.py index b45f158d6a6..70c37ad84f3 100755 --- a/src/transformers/models/xlnet/modeling_xlnet.py +++ b/src/transformers/models/xlnet/modeling_xlnet.py @@ -1069,7 +1069,7 @@ class XLNetModel(XLNetPreTrainedModel): @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1497,7 +1497,7 @@ class XLNetForSequenceClassification(XLNetPreTrainedModel): @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetForSequenceClassificationOutput, config_class=_CONFIG_FOR_DOC, @@ -1604,7 +1604,7 @@ class XLNetForTokenClassification(XLNetPreTrainedModel): @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetForTokenClassificationOutput, config_class=_CONFIG_FOR_DOC, @@ -1701,7 +1701,7 @@ class XLNetForMultipleChoice(XLNetPreTrainedModel): @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetForMultipleChoiceOutput, config_class=_CONFIG_FOR_DOC, @@ -1804,7 +1804,7 @@ class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel): @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetForQuestionAnsweringSimpleOutput, config_class=_CONFIG_FOR_DOC, diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py index 6732966fe16..754e4d88833 100644 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py @@ -945,7 +945,7 @@ class TF{{cookiecutter.camelcase_modelname}}Model(TF{{cookiecutter.camelcase_mod @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -1068,7 +1068,7 @@ class TF{{cookiecutter.camelcase_modelname}}ForMaskedLM(TF{{cookiecutter.camelca @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1177,7 +1177,7 @@ class TF{{cookiecutter.camelcase_modelname}}ForCausalLM(TF{{cookiecutter.camelca } @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -1344,7 +1344,7 @@ class TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification(TF{{cookie @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1450,7 +1450,7 @@ class TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice(TF{{cookiecutter.c @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1593,7 +1593,7 @@ class TF{{cookiecutter.camelcase_modelname}}ForTokenClassification(TF{{cookiecut @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1689,7 +1689,7 @@ class TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering(TF{{cookiecutte @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, @@ -2941,7 +2941,7 @@ class TF{{cookiecutter.camelcase_modelname}}Model(TF{{cookiecutter.camelcase_mod @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py index 94c107fb205..5d894709a04 100755 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py @@ -795,7 +795,7 @@ class {{cookiecutter.camelcase_modelname}}Model({{cookiecutter.camelcase_modelna @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, @@ -953,7 +953,7 @@ class {{cookiecutter.camelcase_modelname}}ForMaskedLM({{cookiecutter.camelcase_m @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, @@ -1221,7 +1221,7 @@ class {{cookiecutter.camelcase_modelname}}ForSequenceClassification({{cookiecutt @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1301,7 +1301,7 @@ class {{cookiecutter.camelcase_modelname}}ForMultipleChoice({{cookiecutter.camel @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, @@ -1391,7 +1391,7 @@ class {{cookiecutter.camelcase_modelname}}ForTokenClassification({{cookiecutter. @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -1478,7 +1478,7 @@ class {{cookiecutter.camelcase_modelname}}ForQuestionAnswering({{cookiecutter.ca @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, @@ -2646,7 +2646,7 @@ class {{cookiecutter.camelcase_modelname}}Model({{cookiecutter.camelcase_modelna @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, @@ -2921,7 +2921,7 @@ class {{cookiecutter.camelcase_modelname}}ForSequenceClassification({{cookiecutt @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, @@ -3022,7 +3022,7 @@ class {{cookiecutter.camelcase_modelname}}ForQuestionAnswering({{cookiecutter.ca @add_start_docstrings_to_model_forward({{cookiecutter.uppercase_modelname}}_INPUTS_DOCSTRING) @add_code_sample_docstrings( - tokenizer_class=_TOKENIZER_FOR_DOC, + processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC,