diff --git a/src/transformers/convert_albert_original_tf_checkpoint_to_pytorch.py b/src/transformers/convert_albert_original_tf_checkpoint_to_pytorch.py index 8fabca0fbdd..10c018170fc 100644 --- a/src/transformers/convert_albert_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/convert_albert_original_tf_checkpoint_to_pytorch.py @@ -20,8 +20,7 @@ import argparse import torch from transformers import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert - -from .utils import logging +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/convert_bart_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/convert_bart_original_pytorch_checkpoint_to_pytorch.py index 8f460a5914e..c07b50fef0c 100644 --- a/src/transformers/convert_bart_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/convert_bart_original_pytorch_checkpoint_to_pytorch.py @@ -31,8 +31,7 @@ from transformers import ( BartTokenizer, ) from transformers.modeling_bart import _make_linear_from_emb - -from .utils import logging +from transformers.utils import logging FAIRSEQ_MODELS = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"] diff --git a/src/transformers/convert_bert_original_tf2_checkpoint_to_pytorch.py b/src/transformers/convert_bert_original_tf2_checkpoint_to_pytorch.py index a68bf25487f..e4e0e3f55ae 100644 --- a/src/transformers/convert_bert_original_tf2_checkpoint_to_pytorch.py +++ b/src/transformers/convert_bert_original_tf2_checkpoint_to_pytorch.py @@ -15,8 +15,7 @@ import tensorflow as tf import torch from transformers import BertConfig, BertModel - -from .utils import logging +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/convert_bert_original_tf_checkpoint_to_pytorch.py b/src/transformers/convert_bert_original_tf_checkpoint_to_pytorch.py index bc1c8d128f9..d1cb69a2eb4 100755 --- a/src/transformers/convert_bert_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/convert_bert_original_tf_checkpoint_to_pytorch.py @@ -20,8 +20,7 @@ import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert - -from .utils import logging +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/convert_electra_original_tf_checkpoint_to_pytorch.py b/src/transformers/convert_electra_original_tf_checkpoint_to_pytorch.py index b5f1278ddb2..9cbfcf665dc 100644 --- a/src/transformers/convert_electra_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/convert_electra_original_tf_checkpoint_to_pytorch.py @@ -20,8 +20,7 @@ import argparse import torch from transformers import ElectraConfig, ElectraForMaskedLM, ElectraForPreTraining, load_tf_weights_in_electra - -from .utils import logging +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/convert_gpt2_original_tf_checkpoint_to_pytorch.py b/src/transformers/convert_gpt2_original_tf_checkpoint_to_pytorch.py index 4324bc5a8dd..e42ebd888d1 100755 --- a/src/transformers/convert_gpt2_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/convert_gpt2_original_tf_checkpoint_to_pytorch.py @@ -20,8 +20,7 @@ import argparse import torch from transformers import CONFIG_NAME, WEIGHTS_NAME, GPT2Config, GPT2Model, load_tf_weights_in_gpt2 - -from .utils import logging +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/convert_mobilebert_original_tf_checkpoint_to_pytorch.py b/src/transformers/convert_mobilebert_original_tf_checkpoint_to_pytorch.py index 468c503fd72..767b36a5703 100644 --- a/src/transformers/convert_mobilebert_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/convert_mobilebert_original_tf_checkpoint_to_pytorch.py @@ -3,8 +3,7 @@ import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert - -from .utils import logging +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/convert_openai_original_tf_checkpoint_to_pytorch.py b/src/transformers/convert_openai_original_tf_checkpoint_to_pytorch.py index 83760e00d6b..397884e32c0 100755 --- a/src/transformers/convert_openai_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/convert_openai_original_tf_checkpoint_to_pytorch.py @@ -20,8 +20,7 @@ import argparse import torch from transformers import CONFIG_NAME, WEIGHTS_NAME, OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt - -from .utils import logging +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/convert_pytorch_checkpoint_to_tf2.py b/src/transformers/convert_pytorch_checkpoint_to_tf2.py index cb0ef848485..43779c964dd 100755 --- a/src/transformers/convert_pytorch_checkpoint_to_tf2.py +++ b/src/transformers/convert_pytorch_checkpoint_to_tf2.py @@ -78,8 +78,7 @@ from transformers import ( load_pytorch_checkpoint_in_tf2_model, ) from transformers.file_utils import hf_bucket_url - -from .utils import logging +from transformers.utils import logging if is_torch_available(): diff --git a/src/transformers/convert_reformer_trax_checkpoint_to_pytorch.py b/src/transformers/convert_reformer_trax_checkpoint_to_pytorch.py index 97d1a63f51f..ec58e2f9132 100755 --- a/src/transformers/convert_reformer_trax_checkpoint_to_pytorch.py +++ b/src/transformers/convert_reformer_trax_checkpoint_to_pytorch.py @@ -22,8 +22,7 @@ import numpy as np import torch from transformers import ReformerConfig, ReformerModelWithLMHead - -from .utils import logging +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py index 5b4857cb76f..dbb73db2820 100644 --- a/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py @@ -26,8 +26,7 @@ from packaging import version from transformers.modeling_bert import BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput from transformers.modeling_roberta import RobertaConfig, RobertaForMaskedLM, RobertaForSequenceClassification - -from .utils import logging +from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("0.9.0"): diff --git a/src/transformers/convert_t5_original_tf_checkpoint_to_pytorch.py b/src/transformers/convert_t5_original_tf_checkpoint_to_pytorch.py index 2e1b5c35e2a..67b65faba97 100755 --- a/src/transformers/convert_t5_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/convert_t5_original_tf_checkpoint_to_pytorch.py @@ -20,8 +20,7 @@ import argparse import torch from transformers import T5Config, T5Model, load_tf_weights_in_t5 - -from .utils import logging +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py b/src/transformers/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py index d07ae69f4f2..9135b0d088e 100755 --- a/src/transformers/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py @@ -31,8 +31,7 @@ from transformers import ( load_tf_weights_in_transfo_xl, ) from transformers.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES - -from .utils import logging +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/convert_xlm_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/convert_xlm_original_pytorch_checkpoint_to_pytorch.py index 7a72d3daa49..9baf1159125 100755 --- a/src/transformers/convert_xlm_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/convert_xlm_original_pytorch_checkpoint_to_pytorch.py @@ -23,8 +23,7 @@ import torch from transformers import CONFIG_NAME, WEIGHTS_NAME from transformers.tokenization_xlm import VOCAB_FILES_NAMES - -from .utils import logging +from transformers.utils import logging logging.set_verbosity_info() diff --git a/src/transformers/convert_xlnet_original_tf_checkpoint_to_pytorch.py b/src/transformers/convert_xlnet_original_tf_checkpoint_to_pytorch.py index 633fd01ca68..5ad56c73b51 100755 --- a/src/transformers/convert_xlnet_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/convert_xlnet_original_tf_checkpoint_to_pytorch.py @@ -29,8 +29,7 @@ from transformers import ( XLNetLMHeadModel, load_tf_weights_in_xlnet, ) - -from .utils import logging +from transformers.utils import logging GLUE_TASKS_NUM_LABELS = {