mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
Fix init import_structure sorting (#20477)
* Fix init import_structure sorting * Fix rebase
This commit is contained in:
parent
3b91f96fc9
commit
bbcd5eea3b
@ -569,10 +569,10 @@ else:
|
||||
_import_structure["models.m2m_100"].append("M2M100Tokenizer")
|
||||
_import_structure["models.marian"].append("MarianTokenizer")
|
||||
_import_structure["models.mbart"].append("MBartTokenizer")
|
||||
_import_structure["models.nllb"].append("NllbTokenizer")
|
||||
_import_structure["models.mbart50"].append("MBart50Tokenizer")
|
||||
_import_structure["models.mluke"].append("MLukeTokenizer")
|
||||
_import_structure["models.mt5"].append("MT5Tokenizer")
|
||||
_import_structure["models.nllb"].append("NllbTokenizer")
|
||||
_import_structure["models.pegasus"].append("PegasusTokenizer")
|
||||
_import_structure["models.plbart"].append("PLBartTokenizer")
|
||||
_import_structure["models.reformer"].append("ReformerTokenizer")
|
||||
@ -722,14 +722,14 @@ else:
|
||||
_import_structure["image_utils"] = ["ImageFeatureExtractionMixin"]
|
||||
_import_structure["models.beit"].extend(["BeitFeatureExtractor", "BeitImageProcessor"])
|
||||
_import_structure["models.clip"].extend(["CLIPFeatureExtractor", "CLIPImageProcessor"])
|
||||
_import_structure["models.conditional_detr"].append("ConditionalDetrFeatureExtractor")
|
||||
_import_structure["models.convnext"].extend(["ConvNextFeatureExtractor", "ConvNextImageProcessor"])
|
||||
_import_structure["models.deformable_detr"].append("DeformableDetrFeatureExtractor")
|
||||
_import_structure["models.deit"].extend(["DeiTFeatureExtractor", "DeiTImageProcessor"])
|
||||
_import_structure["models.detr"].append("DetrFeatureExtractor")
|
||||
_import_structure["models.conditional_detr"].append("ConditionalDetrFeatureExtractor")
|
||||
_import_structure["models.donut"].extend(["DonutFeatureExtractor", "DonutImageProcessor"])
|
||||
_import_structure["models.dpt"].extend(["DPTFeatureExtractor", "DPTImageProcessor"])
|
||||
_import_structure["models.flava"].extend(["FlavaFeatureExtractor", "FlavaProcessor", "FlavaImageProcessor"])
|
||||
_import_structure["models.flava"].extend(["FlavaFeatureExtractor", "FlavaImageProcessor", "FlavaProcessor"])
|
||||
_import_structure["models.glpn"].extend(["GLPNFeatureExtractor", "GLPNImageProcessor"])
|
||||
_import_structure["models.imagegpt"].extend(["ImageGPTFeatureExtractor", "ImageGPTImageProcessor"])
|
||||
_import_structure["models.layoutlmv2"].extend(["LayoutLMv2FeatureExtractor", "LayoutLMv2ImageProcessor"])
|
||||
@ -819,70 +819,44 @@ else:
|
||||
"TextDatasetForNextSentencePrediction",
|
||||
]
|
||||
_import_structure["deepspeed"] = []
|
||||
_import_structure["generation_utils"] = []
|
||||
_import_structure["generation"].extend(
|
||||
[
|
||||
"Constraint",
|
||||
"ConstraintListState",
|
||||
"DisjunctiveConstraint",
|
||||
"PhrasalConstraint",
|
||||
"BeamScorer",
|
||||
"BeamSearchScorer",
|
||||
"ConstrainedBeamSearchScorer",
|
||||
"Constraint",
|
||||
"ConstraintListState",
|
||||
"DisjunctiveConstraint",
|
||||
"ForcedBOSTokenLogitsProcessor",
|
||||
"ForcedEOSTokenLogitsProcessor",
|
||||
"GenerationMixin",
|
||||
"HammingDiversityLogitsProcessor",
|
||||
"InfNanRemoveLogitsProcessor",
|
||||
"LogitsProcessor",
|
||||
"LogitsProcessorList",
|
||||
"LogitsWarper",
|
||||
"MaxLengthCriteria",
|
||||
"MaxTimeCriteria",
|
||||
"MinLengthLogitsProcessor",
|
||||
"NoBadWordsLogitsProcessor",
|
||||
"NoRepeatNGramLogitsProcessor",
|
||||
"PhrasalConstraint",
|
||||
"PrefixConstrainedLogitsProcessor",
|
||||
"RepetitionPenaltyLogitsProcessor",
|
||||
"StoppingCriteria",
|
||||
"StoppingCriteriaList",
|
||||
"TemperatureLogitsWarper",
|
||||
"TopKLogitsWarper",
|
||||
"TopPLogitsWarper",
|
||||
"TypicalLogitsWarper",
|
||||
"MaxLengthCriteria",
|
||||
"MaxTimeCriteria",
|
||||
"StoppingCriteria",
|
||||
"StoppingCriteriaList",
|
||||
"GenerationMixin",
|
||||
"top_k_top_p_filtering",
|
||||
]
|
||||
)
|
||||
_import_structure["generation_utils"] = []
|
||||
_import_structure["modeling_outputs"] = []
|
||||
_import_structure["modeling_utils"] = ["PreTrainedModel"]
|
||||
|
||||
# PyTorch models structure
|
||||
|
||||
_import_structure["models.roc_bert"].extend(
|
||||
[
|
||||
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"RoCBertForMaskedLM",
|
||||
"RoCBertForCausalLM",
|
||||
"RoCBertForMultipleChoice",
|
||||
"RoCBertForQuestionAnswering",
|
||||
"RoCBertForSequenceClassification",
|
||||
"RoCBertForTokenClassification",
|
||||
"RoCBertLayer",
|
||||
"RoCBertModel",
|
||||
"RoCBertForPreTraining",
|
||||
"RoCBertPreTrainedModel",
|
||||
"load_tf_weights_in_roc_bert",
|
||||
]
|
||||
)
|
||||
|
||||
_import_structure["models.time_series_transformer"].extend(
|
||||
[
|
||||
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"TimeSeriesTransformerForPrediction",
|
||||
"TimeSeriesTransformerModel",
|
||||
"TimeSeriesTransformerPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.albert"].extend(
|
||||
[
|
||||
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
@ -897,12 +871,13 @@ else:
|
||||
"load_tf_weights_in_albert",
|
||||
]
|
||||
)
|
||||
|
||||
_import_structure["models.audio_spectrogram_transformer"].extend(
|
||||
[
|
||||
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"ASTForAudioClassification",
|
||||
"ASTModel",
|
||||
"ASTPreTrainedModel",
|
||||
"ASTForAudioClassification",
|
||||
]
|
||||
)
|
||||
_import_structure["models.auto"].extend(
|
||||
@ -913,8 +888,8 @@ else:
|
||||
"MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING",
|
||||
"MODEL_FOR_CAUSAL_LM_MAPPING",
|
||||
"MODEL_FOR_CTC_MAPPING",
|
||||
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING",
|
||||
"MODEL_FOR_DEPTH_ESTIMATION_MAPPING",
|
||||
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING",
|
||||
"MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING",
|
||||
"MODEL_FOR_IMAGE_SEGMENTATION_MAPPING",
|
||||
"MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING",
|
||||
@ -934,18 +909,18 @@ else:
|
||||
"MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING",
|
||||
"MODEL_FOR_VISION_2_SEQ_MAPPING",
|
||||
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING",
|
||||
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING",
|
||||
"MODEL_MAPPING",
|
||||
"MODEL_WITH_LM_HEAD_MAPPING",
|
||||
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING",
|
||||
"AutoModel",
|
||||
"AutoBackbone",
|
||||
"AutoModel",
|
||||
"AutoModelForAudioClassification",
|
||||
"AutoModelForAudioFrameClassification",
|
||||
"AutoModelForAudioXVector",
|
||||
"AutoModelForCausalLM",
|
||||
"AutoModelForCTC",
|
||||
"AutoModelForDocumentQuestionAnswering",
|
||||
"AutoModelForDepthEstimation",
|
||||
"AutoModelForDocumentQuestionAnswering",
|
||||
"AutoModelForImageClassification",
|
||||
"AutoModelForImageSegmentation",
|
||||
"AutoModelForInstanceSegmentation",
|
||||
@ -965,8 +940,8 @@ else:
|
||||
"AutoModelForVideoClassification",
|
||||
"AutoModelForVision2Seq",
|
||||
"AutoModelForVisualQuestionAnswering",
|
||||
"AutoModelWithLMHead",
|
||||
"AutoModelForZeroShotObjectDetection",
|
||||
"AutoModelWithLMHead",
|
||||
]
|
||||
)
|
||||
_import_structure["models.bart"].extend(
|
||||
@ -981,17 +956,6 @@ else:
|
||||
"PretrainedBartModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.mvp"].extend(
|
||||
[
|
||||
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"MvpForCausalLM",
|
||||
"MvpForConditionalGeneration",
|
||||
"MvpForQuestionAnswering",
|
||||
"MvpForSequenceClassification",
|
||||
"MvpModel",
|
||||
"MvpPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.beit"].extend(
|
||||
[
|
||||
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
@ -1054,17 +1018,6 @@ else:
|
||||
"BigBirdPegasusPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.bloom"].extend(
|
||||
[
|
||||
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"BloomForCausalLM",
|
||||
"BloomModel",
|
||||
"BloomPreTrainedModel",
|
||||
"BloomForSequenceClassification",
|
||||
"BloomForTokenClassification",
|
||||
"BloomForQuestionAnswering",
|
||||
]
|
||||
)
|
||||
_import_structure["models.blenderbot"].extend(
|
||||
[
|
||||
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
@ -1083,6 +1036,17 @@ else:
|
||||
"BlenderbotSmallPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.bloom"].extend(
|
||||
[
|
||||
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"BloomForCausalLM",
|
||||
"BloomForQuestionAnswering",
|
||||
"BloomForSequenceClassification",
|
||||
"BloomForTokenClassification",
|
||||
"BloomModel",
|
||||
"BloomPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.camembert"].extend(
|
||||
[
|
||||
"CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
@ -1123,20 +1087,19 @@ else:
|
||||
_import_structure["models.clipseg"].extend(
|
||||
[
|
||||
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"CLIPSegForImageSegmentation",
|
||||
"CLIPSegModel",
|
||||
"CLIPSegPreTrainedModel",
|
||||
"CLIPSegTextModel",
|
||||
"CLIPSegVisionModel",
|
||||
"CLIPSegForImageSegmentation",
|
||||
]
|
||||
)
|
||||
_import_structure["models.x_clip"].extend(
|
||||
_import_structure["models.codegen"].extend(
|
||||
[
|
||||
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"XCLIPModel",
|
||||
"XCLIPPreTrainedModel",
|
||||
"XCLIPTextModel",
|
||||
"XCLIPVisionModel",
|
||||
"CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"CodeGenForCausalLM",
|
||||
"CodeGenModel",
|
||||
"CodeGenPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.convbert"].extend(
|
||||
@ -1245,6 +1208,14 @@ else:
|
||||
"DeiTPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.dinat"].extend(
|
||||
[
|
||||
"DINAT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"DinatForImageClassification",
|
||||
"DinatModel",
|
||||
"DinatPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.distilbert"].extend(
|
||||
[
|
||||
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
@ -1257,14 +1228,6 @@ else:
|
||||
"DistilBertPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.dinat"].extend(
|
||||
[
|
||||
"DINAT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"DinatForImageClassification",
|
||||
"DinatModel",
|
||||
"DinatPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.donut"].extend(
|
||||
[
|
||||
"DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
@ -1347,8 +1310,8 @@ else:
|
||||
"FlaubertForSequenceClassification",
|
||||
"FlaubertForTokenClassification",
|
||||
"FlaubertModel",
|
||||
"FlaubertWithLMHeadModel",
|
||||
"FlaubertPreTrainedModel",
|
||||
"FlaubertWithLMHeadModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.flava"].extend(
|
||||
@ -1461,14 +1424,6 @@ else:
|
||||
"GroupViTVisionModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.codegen"].extend(
|
||||
[
|
||||
"CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"CodeGenForCausalLM",
|
||||
"CodeGenModel",
|
||||
"CodeGenPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.hubert"].extend(
|
||||
[
|
||||
"HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
@ -1505,17 +1460,17 @@ else:
|
||||
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"JukeboxModel",
|
||||
"JukeboxPreTrainedModel",
|
||||
"JukeboxVQVAE",
|
||||
"JukeboxPrior",
|
||||
"JukeboxVQVAE",
|
||||
]
|
||||
)
|
||||
_import_structure["models.layoutlm"].extend(
|
||||
[
|
||||
"LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"LayoutLMForMaskedLM",
|
||||
"LayoutLMForQuestionAnswering",
|
||||
"LayoutLMForSequenceClassification",
|
||||
"LayoutLMForTokenClassification",
|
||||
"LayoutLMForQuestionAnswering",
|
||||
"LayoutLMModel",
|
||||
"LayoutLMPreTrainedModel",
|
||||
]
|
||||
@ -1559,6 +1514,16 @@ else:
|
||||
"LevitPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.lilt"].extend(
|
||||
[
|
||||
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"LiltForQuestionAnswering",
|
||||
"LiltForSequenceClassification",
|
||||
"LiltForTokenClassification",
|
||||
"LiltModel",
|
||||
"LiltPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.longformer"].extend(
|
||||
[
|
||||
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
@ -1587,11 +1552,11 @@ else:
|
||||
"LukeForEntityClassification",
|
||||
"LukeForEntityPairClassification",
|
||||
"LukeForEntitySpanClassification",
|
||||
"LukeForMaskedLM",
|
||||
"LukeForMultipleChoice",
|
||||
"LukeForQuestionAnswering",
|
||||
"LukeForSequenceClassification",
|
||||
"LukeForTokenClassification",
|
||||
"LukeForMaskedLM",
|
||||
"LukeModel",
|
||||
"LukePreTrainedModel",
|
||||
]
|
||||
@ -1616,15 +1581,6 @@ else:
|
||||
]
|
||||
)
|
||||
_import_structure["models.marian"].extend(["MarianForCausalLM", "MarianModel", "MarianMTModel"])
|
||||
_import_structure["models.maskformer"].extend(
|
||||
[
|
||||
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"MaskFormerForInstanceSegmentation",
|
||||
"MaskFormerModel",
|
||||
"MaskFormerPreTrainedModel",
|
||||
"MaskFormerSwinBackbone",
|
||||
]
|
||||
)
|
||||
_import_structure["models.markuplm"].extend(
|
||||
[
|
||||
"MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
@ -1635,6 +1591,15 @@ else:
|
||||
"MarkupLMPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.maskformer"].extend(
|
||||
[
|
||||
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"MaskFormerForInstanceSegmentation",
|
||||
"MaskFormerModel",
|
||||
"MaskFormerPreTrainedModel",
|
||||
"MaskFormerSwinBackbone",
|
||||
]
|
||||
)
|
||||
_import_structure["models.mbart"].extend(
|
||||
[
|
||||
"MBartForCausalLM",
|
||||
@ -1727,6 +1692,17 @@ else:
|
||||
]
|
||||
)
|
||||
_import_structure["models.mt5"].extend(["MT5EncoderModel", "MT5ForConditionalGeneration", "MT5Model"])
|
||||
_import_structure["models.mvp"].extend(
|
||||
[
|
||||
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"MvpForCausalLM",
|
||||
"MvpForConditionalGeneration",
|
||||
"MvpForQuestionAnswering",
|
||||
"MvpForSequenceClassification",
|
||||
"MvpModel",
|
||||
"MvpPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.nat"].extend(
|
||||
[
|
||||
"NAT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
@ -1739,9 +1715,9 @@ else:
|
||||
[
|
||||
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"NezhaForMaskedLM",
|
||||
"NezhaForPreTraining",
|
||||
"NezhaForNextSentencePrediction",
|
||||
"NezhaForMultipleChoice",
|
||||
"NezhaForNextSentencePrediction",
|
||||
"NezhaForPreTraining",
|
||||
"NezhaForQuestionAnswering",
|
||||
"NezhaForSequenceClassification",
|
||||
"NezhaForTokenClassification",
|
||||
@ -1777,20 +1753,20 @@ else:
|
||||
[
|
||||
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"OPTForCausalLM",
|
||||
"OPTForQuestionAnswering",
|
||||
"OPTForSequenceClassification",
|
||||
"OPTModel",
|
||||
"OPTPreTrainedModel",
|
||||
"OPTForSequenceClassification",
|
||||
"OPTForQuestionAnswering",
|
||||
]
|
||||
)
|
||||
_import_structure["models.owlvit"].extend(
|
||||
[
|
||||
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"OwlViTForObjectDetection",
|
||||
"OwlViTModel",
|
||||
"OwlViTPreTrainedModel",
|
||||
"OwlViTTextModel",
|
||||
"OwlViTVisionModel",
|
||||
"OwlViTForObjectDetection",
|
||||
]
|
||||
)
|
||||
_import_structure["models.pegasus"].extend(
|
||||
@ -1919,10 +1895,10 @@ else:
|
||||
_import_structure["models.resnet"].extend(
|
||||
[
|
||||
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"ResNetBackbone",
|
||||
"ResNetForImageClassification",
|
||||
"ResNetModel",
|
||||
"ResNetPreTrainedModel",
|
||||
"ResNetBackbone",
|
||||
]
|
||||
)
|
||||
_import_structure["models.retribert"].extend(
|
||||
@ -1941,14 +1917,20 @@ else:
|
||||
"RobertaPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.lilt"].extend(
|
||||
_import_structure["models.roc_bert"].extend(
|
||||
[
|
||||
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"LiltForQuestionAnswering",
|
||||
"LiltForSequenceClassification",
|
||||
"LiltForTokenClassification",
|
||||
"LiltModel",
|
||||
"LiltPreTrainedModel",
|
||||
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"RoCBertForCausalLM",
|
||||
"RoCBertForMaskedLM",
|
||||
"RoCBertForMultipleChoice",
|
||||
"RoCBertForPreTraining",
|
||||
"RoCBertForQuestionAnswering",
|
||||
"RoCBertForSequenceClassification",
|
||||
"RoCBertForTokenClassification",
|
||||
"RoCBertLayer",
|
||||
"RoCBertModel",
|
||||
"RoCBertPreTrainedModel",
|
||||
"load_tf_weights_in_roc_bert",
|
||||
]
|
||||
)
|
||||
_import_structure["models.roformer"].extend(
|
||||
@ -2004,14 +1986,6 @@ else:
|
||||
"Speech2TextPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.whisper"].extend(
|
||||
[
|
||||
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"WhisperForConditionalGeneration",
|
||||
"WhisperModel",
|
||||
"WhisperPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.speech_to_text_2"].extend(["Speech2Text2ForCausalLM", "Speech2Text2PreTrainedModel"])
|
||||
_import_structure["models.splinter"].extend(
|
||||
[
|
||||
@ -2054,15 +2028,15 @@ else:
|
||||
"Swinv2PreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.tapas"].extend(
|
||||
_import_structure["models.switch_transformers"].extend(
|
||||
[
|
||||
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"TapasForMaskedLM",
|
||||
"TapasForQuestionAnswering",
|
||||
"TapasForSequenceClassification",
|
||||
"TapasModel",
|
||||
"TapasPreTrainedModel",
|
||||
"load_tf_weights_in_tapas",
|
||||
"SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"SwitchTransformersEncoderModel",
|
||||
"SwitchTransformersForConditionalGeneration",
|
||||
"SwitchTransformersModel",
|
||||
"SwitchTransformersPreTrainedModel",
|
||||
"SwitchTransformersSparseMLP",
|
||||
"SwitchTransformersTop1Router",
|
||||
]
|
||||
)
|
||||
_import_structure["models.t5"].extend(
|
||||
@ -2075,15 +2049,23 @@ else:
|
||||
"load_tf_weights_in_t5",
|
||||
]
|
||||
)
|
||||
_import_structure["models.switch_transformers"].extend(
|
||||
_import_structure["models.tapas"].extend(
|
||||
[
|
||||
"SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"SwitchTransformersEncoderModel",
|
||||
"SwitchTransformersForConditionalGeneration",
|
||||
"SwitchTransformersModel",
|
||||
"SwitchTransformersPreTrainedModel",
|
||||
"SwitchTransformersTop1Router",
|
||||
"SwitchTransformersSparseMLP",
|
||||
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"TapasForMaskedLM",
|
||||
"TapasForQuestionAnswering",
|
||||
"TapasForSequenceClassification",
|
||||
"TapasModel",
|
||||
"TapasPreTrainedModel",
|
||||
"load_tf_weights_in_tapas",
|
||||
]
|
||||
)
|
||||
_import_structure["models.time_series_transformer"].extend(
|
||||
[
|
||||
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"TimeSeriesTransformerForPrediction",
|
||||
"TimeSeriesTransformerModel",
|
||||
"TimeSeriesTransformerPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.trajectory_transformer"].extend(
|
||||
@ -2137,14 +2119,23 @@ else:
|
||||
"VanPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.videomae"].extend(
|
||||
[
|
||||
"VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"VideoMAEForPreTraining",
|
||||
"VideoMAEForVideoClassification",
|
||||
"VideoMAEModel",
|
||||
"VideoMAEPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.vilt"].extend(
|
||||
[
|
||||
"VILT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"ViltForImageAndTextRetrieval",
|
||||
"ViltForImagesAndTextClassification",
|
||||
"ViltForTokenClassification",
|
||||
"ViltForMaskedLM",
|
||||
"ViltForQuestionAnswering",
|
||||
"ViltForTokenClassification",
|
||||
"ViltLayer",
|
||||
"ViltModel",
|
||||
"ViltPreTrainedModel",
|
||||
@ -2186,20 +2177,11 @@ else:
|
||||
_import_structure["models.vit_msn"].extend(
|
||||
[
|
||||
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"ViTMSNModel",
|
||||
"ViTMSNForImageClassification",
|
||||
"ViTMSNModel",
|
||||
"ViTMSNPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.videomae"].extend(
|
||||
[
|
||||
"VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"VideoMAEForPreTraining",
|
||||
"VideoMAEModel",
|
||||
"VideoMAEPreTrainedModel",
|
||||
"VideoMAEForVideoClassification",
|
||||
]
|
||||
)
|
||||
_import_structure["models.wav2vec2"].extend(
|
||||
[
|
||||
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
@ -2236,6 +2218,23 @@ else:
|
||||
"WavLMPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.whisper"].extend(
|
||||
[
|
||||
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"WhisperForConditionalGeneration",
|
||||
"WhisperModel",
|
||||
"WhisperPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.x_clip"].extend(
|
||||
[
|
||||
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"XCLIPModel",
|
||||
"XCLIPPreTrainedModel",
|
||||
"XCLIPTextModel",
|
||||
"XCLIPVisionModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.xglm"].extend(
|
||||
[
|
||||
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
@ -2358,11 +2357,11 @@ else:
|
||||
_import_structure["activations_tf"] = []
|
||||
_import_structure["benchmark.benchmark_args_tf"] = ["TensorFlowBenchmarkArguments"]
|
||||
_import_structure["benchmark.benchmark_tf"] = ["TensorFlowBenchmark"]
|
||||
_import_structure["generation_tf_utils"] = []
|
||||
_import_structure["generation"].extend(
|
||||
[
|
||||
"TFForcedBOSTokenLogitsProcessor",
|
||||
"TFForcedEOSTokenLogitsProcessor",
|
||||
"TFGenerationMixin",
|
||||
"TFLogitsProcessor",
|
||||
"TFLogitsProcessorList",
|
||||
"TFLogitsWarper",
|
||||
@ -2373,10 +2372,10 @@ else:
|
||||
"TFTemperatureLogitsWarper",
|
||||
"TFTopKLogitsWarper",
|
||||
"TFTopPLogitsWarper",
|
||||
"TFGenerationMixin",
|
||||
"tf_top_k_top_p_filtering",
|
||||
]
|
||||
)
|
||||
_import_structure["generation_tf_utils"] = []
|
||||
_import_structure["keras_callbacks"] = ["KerasMetricCallback", "PushToHubCallback"]
|
||||
_import_structure["modeling_tf_outputs"] = []
|
||||
_import_structure["modeling_tf_utils"] = [
|
||||
@ -2403,13 +2402,13 @@ else:
|
||||
_import_structure["models.auto"].extend(
|
||||
[
|
||||
"TF_MODEL_FOR_CAUSAL_LM_MAPPING",
|
||||
"TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING",
|
||||
"TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING",
|
||||
"TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING",
|
||||
"TF_MODEL_FOR_MASKED_LM_MAPPING",
|
||||
"TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING",
|
||||
"TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING",
|
||||
"TF_MODEL_FOR_PRETRAINING_MAPPING",
|
||||
"TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING",
|
||||
"TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING",
|
||||
"TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING",
|
||||
"TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING",
|
||||
@ -2422,12 +2421,12 @@ else:
|
||||
"TF_MODEL_WITH_LM_HEAD_MAPPING",
|
||||
"TFAutoModel",
|
||||
"TFAutoModelForCausalLM",
|
||||
"TFAutoModelForDocumentQuestionAnswering",
|
||||
"TFAutoModelForImageClassification",
|
||||
"TFAutoModelForMaskedLM",
|
||||
"TFAutoModelForMultipleChoice",
|
||||
"TFAutoModelForNextSentencePrediction",
|
||||
"TFAutoModelForPreTraining",
|
||||
"TFAutoModelForDocumentQuestionAnswering",
|
||||
"TFAutoModelForQuestionAnswering",
|
||||
"TFAutoModelForSemanticSegmentation",
|
||||
"TFAutoModelForSeq2SeqLM",
|
||||
@ -2679,8 +2678,8 @@ else:
|
||||
[
|
||||
"TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"TFLayoutLMForMaskedLM",
|
||||
"TFLayoutLMForSequenceClassification",
|
||||
"TFLayoutLMForQuestionAnswering",
|
||||
"TFLayoutLMForSequenceClassification",
|
||||
"TFLayoutLMForTokenClassification",
|
||||
"TFLayoutLMMainLayer",
|
||||
"TFLayoutLMModel",
|
||||
@ -2743,10 +2742,10 @@ else:
|
||||
_import_structure["models.mobilevit"].extend(
|
||||
[
|
||||
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"TFMobileViTPreTrainedModel",
|
||||
"TFMobileViTModel",
|
||||
"TFMobileViTForImageClassification",
|
||||
"TFMobileViTForSemanticSegmentation",
|
||||
"TFMobileViTModel",
|
||||
"TFMobileViTPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.mpnet"].extend(
|
||||
@ -2999,11 +2998,11 @@ except OptionalDependencyNotAvailable:
|
||||
name for name in dir(dummy_flax_objects) if not name.startswith("_")
|
||||
]
|
||||
else:
|
||||
_import_structure["generation_flax_utils"] = []
|
||||
_import_structure["generation"].extend(
|
||||
[
|
||||
"FlaxForcedBOSTokenLogitsProcessor",
|
||||
"FlaxForcedEOSTokenLogitsProcessor",
|
||||
"FlaxGenerationMixin",
|
||||
"FlaxLogitsProcessor",
|
||||
"FlaxLogitsProcessorList",
|
||||
"FlaxLogitsWarper",
|
||||
@ -3011,9 +3010,9 @@ else:
|
||||
"FlaxTemperatureLogitsWarper",
|
||||
"FlaxTopKLogitsWarper",
|
||||
"FlaxTopPLogitsWarper",
|
||||
"FlaxGenerationMixin",
|
||||
]
|
||||
)
|
||||
_import_structure["generation_flax_utils"] = []
|
||||
_import_structure["modeling_flax_outputs"] = []
|
||||
_import_structure["modeling_flax_utils"] = ["FlaxPreTrainedModel"]
|
||||
_import_structure["models.albert"].extend(
|
||||
|
@ -47,8 +47,13 @@ except OptionalDependencyNotAvailable:
|
||||
else:
|
||||
_import_structure["feature_extraction_speech_to_text"] = ["Speech2TextFeatureExtractor"]
|
||||
|
||||
if is_sentencepiece_available():
|
||||
_import_structure["processing_speech_to_text"] = ["Speech2TextProcessor"]
|
||||
try:
|
||||
if not (is_speech_available() and is_sentencepiece_available()):
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
pass
|
||||
else:
|
||||
_import_structure["processing_speech_to_text"] = ["Speech2TextProcessor"]
|
||||
|
||||
try:
|
||||
if not is_tf_available():
|
||||
@ -96,8 +101,13 @@ if TYPE_CHECKING:
|
||||
else:
|
||||
from .feature_extraction_speech_to_text import Speech2TextFeatureExtractor
|
||||
|
||||
if is_sentencepiece_available():
|
||||
from .processing_speech_to_text import Speech2TextProcessor
|
||||
try:
|
||||
if not (is_speech_available() and is_sentencepiece_available()):
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
pass
|
||||
else:
|
||||
from .processing_speech_to_text import Speech2TextProcessor
|
||||
|
||||
try:
|
||||
if not is_tf_available():
|
||||
|
@ -200,9 +200,9 @@ def sort_imports(file, check_only=True):
|
||||
indent = get_indent(block_lines[1])
|
||||
# Slit the internal block into blocks of indent level 1.
|
||||
internal_blocks = split_code_in_indented_blocks(internal_block_code, indent_level=indent)
|
||||
# We have two categories of import key: list or _import_structu[key].append/extend
|
||||
pattern = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
|
||||
# Grab the keys, but there is a trap: some lines are empty or jsut comments.
|
||||
# We have two categories of import key: list or _import_structure[key].append/extend
|
||||
pattern = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
|
||||
# Grab the keys, but there is a trap: some lines are empty or just comments.
|
||||
keys = [(pattern.search(b).groups()[0] if pattern.search(b) is not None else None) for b in internal_blocks]
|
||||
# We only sort the lines with a key.
|
||||
keys_to_sort = [(i, key) for i, key in enumerate(keys) if key is not None]
|
||||
|
Loading…
Reference in New Issue
Block a user