diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 542a1f7e4f3..1dc09d2aef4 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -31,6 +31,7 @@ from .configuration_openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, Open from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig from .configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig + # Configurations from .configuration_utils import PretrainedConfig from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig @@ -55,6 +56,7 @@ from .data import ( xnli_processors, xnli_tasks_num_labels, ) + # Files and general utilities from .file_utils import ( CONFIG_NAME, @@ -71,8 +73,10 @@ from .file_utils import ( is_tf_available, is_torch_available, ) + # Model Cards from .modelcard import ModelCard + # TF 2.0 <=> PyTorch conversion utilities from .modeling_tf_pytorch_utils import ( convert_tf_weight_name_to_pt_weight_name, @@ -83,6 +87,7 @@ from .modeling_tf_pytorch_utils import ( load_tf2_model_in_pytorch_model, load_tf2_weights_in_pytorch_model, ) + # Pipelines from .pipelines import ( CsvPipelineDataFormat, @@ -108,6 +113,7 @@ from .tokenization_openai import OpenAIGPTTokenizer from .tokenization_roberta import RobertaTokenizer from .tokenization_t5 import T5Tokenizer from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer + # Tokenizers from .tokenization_utils import PreTrainedTokenizer from .tokenization_xlm import XLMTokenizer diff --git a/src/transformers/modeling_tf_camembert.py b/src/transformers/modeling_tf_camembert.py index 74c923e1c20..593c405d656 100644 --- a/src/transformers/modeling_tf_camembert.py +++ b/src/transformers/modeling_tf_camembert.py @@ -30,8 +30,7 @@ from .modeling_tf_roberta import ( logger = logging.getLogger(__name__) -TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP = { -} +TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {} CAMEMBERT_START_DOCSTRING = r""" The CamemBERT model was proposed in diff --git a/templates/adding_a_new_example_script/run_xxx.py b/templates/adding_a_new_example_script/run_xxx.py index 1c56a03d0ec..6de065ce65c 100644 --- a/templates/adding_a_new_example_script/run_xxx.py +++ b/templates/adding_a_new_example_script/run_xxx.py @@ -52,6 +52,7 @@ from utils_squad import ( write_predictions, write_predictions_extended, ) + # The follwing import is the official SQuAD evaluation script (2.0). # You can remove it from the dependencies if you are using this script outside of the library # We've added it here for automated tests (see examples/test_examples.py file) diff --git a/templates/adding_a_new_example_script/utils_xxx.py b/templates/adding_a_new_example_script/utils_xxx.py index 172a1b03a2d..b8f8cdf2b96 100644 --- a/templates/adding_a_new_example_script/utils_xxx.py +++ b/templates/adding_a_new_example_script/utils_xxx.py @@ -21,6 +21,7 @@ import logging import math from transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize + # Required by XLNet evaluation method to compute optimal threshold (see write_predictions_extended() method) from utils_squad_evaluate import find_all_best_thresh_v2, get_raw_scores, make_qid_to_has_ans