mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
remove "PretrainedConfig" annotations
This commit is contained in:
parent
bf1192d982
commit
3afd9aceca
@ -2616,7 +2616,7 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, PushToHubMixin, PeftAdapterMi
|
||||
return config
|
||||
|
||||
@classmethod
|
||||
def _check_and_enable_sdpa(cls, config, hard_check_only: bool = False) -> PretrainedConfig:
|
||||
def _check_and_enable_sdpa(cls, config, hard_check_only: bool = False):
|
||||
"""
|
||||
Checks the availability of SDPA for a given model.
|
||||
|
||||
|
@ -131,7 +131,7 @@ class ClvpEncoderConfig(PretrainedConfig):
|
||||
@classmethod
|
||||
def from_pretrained(
|
||||
cls, pretrained_model_name_or_path: Union[str, os.PathLike], config_type: str = "text_config", **kwargs
|
||||
) -> "PretrainedConfig":
|
||||
):
|
||||
cls._set_token_in_kwargs(kwargs)
|
||||
|
||||
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
||||
|
@ -15,7 +15,7 @@
|
||||
"""PyTorch Falcon model."""
|
||||
|
||||
import math
|
||||
from typing import TYPE_CHECKING, Optional, Union
|
||||
from typing import Optional, Union
|
||||
|
||||
import torch
|
||||
import torch.utils.checkpoint
|
||||
@ -47,9 +47,6 @@ from ...utils import (
|
||||
from .configuration_falcon import FalconConfig
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
|
||||
if is_flash_attn_available():
|
||||
from ...modeling_flash_attention_utils import _flash_attention_forward
|
||||
|
||||
@ -688,7 +685,7 @@ class FalconPreTrainedModel(PreTrainedModel):
|
||||
|
||||
# Adapted from transformers.modeling_utils.PreTrainedModel._check_and_enable_sdpa
|
||||
@classmethod
|
||||
def _check_and_enable_sdpa(cls, config, hard_check_only: bool = False) -> "PretrainedConfig":
|
||||
def _check_and_enable_sdpa(cls, config, hard_check_only: bool = False):
|
||||
_is_bettertransformer = getattr(cls, "use_bettertransformer", False)
|
||||
if _is_bettertransformer:
|
||||
return config
|
||||
|
@ -1074,7 +1074,7 @@ class Qwen2_5OmniConfig(PretrainedConfig):
|
||||
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def get_text_config(self, decoder=False) -> "PretrainedConfig":
|
||||
def get_text_config(self, decoder=False):
|
||||
"""
|
||||
Returns the config that is meant to be used with text IO. On most models, it is the original config instance
|
||||
itself. On specific composite models, it is under a set of valid names.
|
||||
|
@ -1114,7 +1114,7 @@ class Qwen2_5OmniConfig(PretrainedConfig):
|
||||
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def get_text_config(self, decoder=False) -> "PretrainedConfig":
|
||||
def get_text_config(self, decoder=False):
|
||||
"""
|
||||
Returns the config that is meant to be used with text IO. On most models, it is the original config instance
|
||||
itself. On specific composite models, it is under a set of valid names.
|
||||
|
@ -324,7 +324,7 @@ class T5GemmaConfig(PretrainedConfig):
|
||||
setattr(self.decoder, key, value)
|
||||
super().__setattr__(key, value)
|
||||
|
||||
def get_text_config(self, decoder=False) -> "PretrainedConfig":
|
||||
def get_text_config(self, decoder=False):
|
||||
# Always return self, regardless of the decoder option.
|
||||
del decoder
|
||||
return self
|
||||
|
@ -213,7 +213,7 @@ class T5GemmaConfig(PretrainedConfig):
|
||||
setattr(self.decoder, key, value)
|
||||
super().__setattr__(key, value)
|
||||
|
||||
def get_text_config(self, decoder=False) -> "PretrainedConfig":
|
||||
def get_text_config(self, decoder=False):
|
||||
# Always return self, regardless of the decoder option.
|
||||
del decoder
|
||||
return self
|
||||
|
Loading…
Reference in New Issue
Block a user