Apply quality and style requirements once again

This commit is contained in:
Julien Plu 2020-01-07 15:51:39 +01:00 committed by Lysandre Debut
parent 5e3c72842d
commit ca1d66734d
4 changed files with 9 additions and 2 deletions

View File

@ -31,6 +31,7 @@ from .configuration_openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, Open
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig
from .configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
# Configurations
from .configuration_utils import PretrainedConfig
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig
@ -55,6 +56,7 @@ from .data import (
xnli_processors,
xnli_tasks_num_labels,
)
# Files and general utilities
from .file_utils import (
CONFIG_NAME,
@ -71,8 +73,10 @@ from .file_utils import (
is_tf_available,
is_torch_available,
)
# Model Cards
from .modelcard import ModelCard
# TF 2.0 <=> PyTorch conversion utilities
from .modeling_tf_pytorch_utils import (
convert_tf_weight_name_to_pt_weight_name,
@ -83,6 +87,7 @@ from .modeling_tf_pytorch_utils import (
load_tf2_model_in_pytorch_model,
load_tf2_weights_in_pytorch_model,
)
# Pipelines
from .pipelines import (
CsvPipelineDataFormat,
@ -108,6 +113,7 @@ from .tokenization_openai import OpenAIGPTTokenizer
from .tokenization_roberta import RobertaTokenizer
from .tokenization_t5 import T5Tokenizer
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
# Tokenizers
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_xlm import XLMTokenizer

View File

@ -30,8 +30,7 @@ from .modeling_tf_roberta import (
logger = logging.getLogger(__name__)
TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
}
TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {}
CAMEMBERT_START_DOCSTRING = r""" The CamemBERT model was proposed in

View File

@ -52,6 +52,7 @@ from utils_squad import (
write_predictions,
write_predictions_extended,
)
# The follwing import is the official SQuAD evaluation script (2.0).
# You can remove it from the dependencies if you are using this script outside of the library
# We've added it here for automated tests (see examples/test_examples.py file)

View File

@ -21,6 +21,7 @@ import logging
import math
from transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize
# Required by XLNet evaluation method to compute optimal threshold (see write_predictions_extended() method)
from utils_squad_evaluate import find_all_best_thresh_v2, get_raw_scores, make_qid_to_has_ans