fix: Fixed raising TypeError instead of ValueError for invalid type (#32111)

* Raised TypeError instead of ValueError for invalid types.

* Updated formatting using ruff.

* Retrieved few changes.

* Retrieved few changes.

* Updated tests accordingly.
This commit is contained in:
Sai-Suraj-27 2024-07-22 22:16:17 +05:30 committed by GitHub
parent d1ec36b94f
commit 12b6880c81
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
58 changed files with 111 additions and 113 deletions

View File

@ -59,7 +59,7 @@ class GroupedBatchSampler(BatchSampler):
def __init__(self, sampler, group_ids, batch_size):
if not isinstance(sampler, Sampler):
raise ValueError(
raise TypeError(
"sampler should be an instance of torch.utils.data.Sampler, but got sampler={}".format(sampler)
)
self.sampler = sampler

View File

@ -48,7 +48,7 @@ def convert_to_float(value):
if isinstance(value, int):
return float(value)
if not isinstance(value, str):
raise ValueError("Argument value is not a string. Can't parse it as float")
raise TypeError("Argument value is not a string. Can't parse it as float")
sanitized = value
try:
@ -158,7 +158,7 @@ def _respect_conditions(table, row, conditions):
cmp_value = _normalize_for_match(cmp_value)
if not isinstance(table_value, type(cmp_value)):
raise ValueError("Type difference {} != {}".format(type(table_value), type(cmp_value)))
raise TypeError("Type difference {} != {}".format(type(table_value), type(cmp_value)))
if not _compare(cond.operator, table_value, cmp_value):
return False

View File

@ -107,7 +107,7 @@ class AgentImage(AgentType, ImageType):
elif isinstance(value, np.ndarray):
self._tensor = torch.tensor(value)
else:
raise ValueError(f"Unsupported type for {self.__class__.__name__}: {type(value)}")
raise TypeError(f"Unsupported type for {self.__class__.__name__}: {type(value)}")
def _ipython_display_(self, include=None, exclude=None):
"""

View File

@ -1004,7 +1004,7 @@ class PretrainedConfig(PushToHubMixin):
elif isinstance(old_v, float):
v = float(v)
elif not isinstance(old_v, str):
raise ValueError(
raise TypeError(
f"You can only update int, float, bool or string values in the config, got {v} for key {k}"
)

View File

@ -47,11 +47,11 @@ class XnliProcessor(DataProcessor):
text_b = line[1]
label = "contradiction" if line[2] == "contradictory" else line[2]
if not isinstance(text_a, str):
raise ValueError(f"Training input {text_a} is not a string")
raise TypeError(f"Training input {text_a} is not a string")
if not isinstance(text_b, str):
raise ValueError(f"Training input {text_b} is not a string")
raise TypeError(f"Training input {text_b} is not a string")
if not isinstance(label, str):
raise ValueError(f"Training label {label} is not a string")
raise TypeError(f"Training label {label} is not a string")
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
@ -70,11 +70,11 @@ class XnliProcessor(DataProcessor):
text_b = line[7]
label = line[1]
if not isinstance(text_a, str):
raise ValueError(f"Training input {text_a} is not a string")
raise TypeError(f"Training input {text_a} is not a string")
if not isinstance(text_b, str):
raise ValueError(f"Training input {text_b} is not a string")
raise TypeError(f"Training input {text_b} is not a string")
if not isinstance(label, str):
raise ValueError(f"Training label {label} is not a string")
raise TypeError(f"Training label {label} is not a string")
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples

View File

@ -156,7 +156,7 @@ class PhrasalConstraint(Constraint):
def does_advance(self, token_id: int):
if not isinstance(token_id, int):
raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}")
raise TypeError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}")
if self.completed:
return False
@ -165,7 +165,7 @@ class PhrasalConstraint(Constraint):
def update(self, token_id: int):
if not isinstance(token_id, int):
raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}")
raise TypeError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}")
stepped = False
completed = False
@ -300,7 +300,7 @@ class DisjunctiveConstraint(Constraint):
def does_advance(self, token_id: int):
if not isinstance(token_id, int):
raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}")
raise TypeError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}")
next_tokens = self.trie.next_tokens(self.current_seq)
@ -308,7 +308,7 @@ class DisjunctiveConstraint(Constraint):
def update(self, token_id: int):
if not isinstance(token_id, int):
raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}")
raise TypeError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}")
stepped = False
completed = False
@ -432,7 +432,7 @@ class ConstraintListState:
def add(self, token_id: int):
if not isinstance(token_id, int):
raise ValueError(f"`token_id` should be an `int`, but is `{token_id}`.")
raise TypeError(f"`token_id` should be an `int`, but is `{token_id}`.")
complete, stepped = False, False

View File

@ -4281,7 +4281,7 @@ def _split(data, full_batch_size: int, split_size: int = None):
for i in range(0, full_batch_size, split_size)
]
else:
raise ValueError(f"Unexpected attribute type: {type(data)}")
raise TypeError(f"Unexpected attribute type: {type(data)}")
def _split_model_inputs(
@ -4388,7 +4388,7 @@ def stack_model_outputs(model_outputs: List[ModelOutput]) -> ModelOutput:
# If the elements are integers or floats, return a tensor
return torch.tensor(data)
else:
raise ValueError(f"Unexpected attribute type: {type(data[0])}")
raise TypeError(f"Unexpected attribute type: {type(data[0])}")
# Use a dictionary comprehension to gather attributes from all objects and concatenate them
concatenated_data = {

View File

@ -544,7 +544,7 @@ class ImageProcessingMixin(PushToHubMixin):
response.raise_for_status()
return Image.open(BytesIO(response.content))
else:
raise ValueError(f"only a single or a list of entries is supported but got type={type(image_url_or_urls)}")
raise TypeError(f"only a single or a list of entries is supported but got type={type(image_url_or_urls)}")
ImageProcessingMixin.push_to_hub = copy_func(ImageProcessingMixin.push_to_hub)

View File

@ -75,7 +75,7 @@ def to_channel_dimension_format(
`np.ndarray`: The image with the channel dimension set to `channel_dim`.
"""
if not isinstance(image, np.ndarray):
raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}")
raise TypeError(f"Input image must be of type np.ndarray, got {type(image)}")
if input_channel_dim is None:
input_channel_dim = infer_channel_dimension_format(image)
@ -121,7 +121,7 @@ def rescale(
`np.ndarray`: The rescaled image.
"""
if not isinstance(image, np.ndarray):
raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}")
raise TypeError(f"Input image must be of type np.ndarray, got {type(image)}")
rescaled_image = image * scale
if data_format is not None:
@ -453,7 +453,7 @@ def center_crop(
return_numpy = True if return_numpy is None else return_numpy
if not isinstance(image, np.ndarray):
raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}")
raise TypeError(f"Input image must be of type np.ndarray, got {type(image)}")
if not isinstance(size, Iterable) or len(size) != 2:
raise ValueError("size must have 2 elements representing the height and width of the output image")

View File

@ -377,7 +377,7 @@ def load_image(image: Union[str, "PIL.Image.Image"], timeout: Optional[float] =
elif isinstance(image, PIL.Image.Image):
image = image
else:
raise ValueError(
raise TypeError(
"Incorrect format used for image. Should be an url linking to an image, a base64 string, a local path, or a PIL image."
)
image = PIL.ImageOps.exif_transpose(image)

View File

@ -199,7 +199,7 @@ def get_modules_to_fuse(model, quantization_config):
The quantization configuration to use.
"""
if not isinstance(model, PreTrainedModel):
raise ValueError(f"The model should be an instance of `PreTrainedModel`, got {model.__class__.__name__}")
raise TypeError(f"The model should be an instance of `PreTrainedModel`, got {model.__class__.__name__}")
# Always default to `quantization_config.modules_to_fuse`
if quantization_config.modules_to_fuse is not None:

View File

@ -262,9 +262,7 @@ class PeftAdapterMixin:
raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.")
if not isinstance(adapter_config, PeftConfig):
raise ValueError(
f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead."
)
raise TypeError(f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead.")
# Retrieve the name or path of the model, one could also use self.config._name_or_path
# but to be consistent with what we do in PEFT: https://github.com/huggingface/peft/blob/6e783780ca9df3a623992cc4d1d665001232eae0/src/peft/mapping.py#L100

View File

@ -1209,7 +1209,7 @@ class TFPreTrainedModel(keras.Model, TFModelUtilsMixin, TFGenerationMixin, PushT
def __init__(self, config, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
if not isinstance(config, PretrainedConfig):
raise ValueError(
raise TypeError(
f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class "
"`PretrainedConfig`. To create a model from a pretrained model use "
f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`"

View File

@ -1418,13 +1418,13 @@ class AlignModel(AlignPreTrainedModel):
super().__init__(config)
if not isinstance(config.text_config, AlignTextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type AlignTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, AlignVisionConfig):
raise ValueError(
raise TypeError(
"config.vision_config is expected to be of type AlignVisionConfig but is of type"
f" {type(config.vision_config)}."
)

View File

@ -1466,12 +1466,12 @@ class AltCLIPModel(AltCLIPPreTrainedModel):
super().__init__(config)
if not isinstance(config.vision_config, AltCLIPVisionConfig):
raise ValueError(
raise TypeError(
"config.vision_config is expected to be of type AltCLIPVisionConfig but is of type"
f" {type(config.vision_config)}."
)
if not isinstance(config.text_config, AltCLIPTextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type AltCLIPTextConfig but is of type"
f" {type(config.text_config)}."
)

View File

@ -211,7 +211,7 @@ class BarkProcessor(ProcessorMixin):
raise ValueError(f"Voice preset unrecognized, missing {key} as a key.")
if not isinstance(voice_preset[key], np.ndarray):
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.")
raise TypeError(f"{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.")
if len(voice_preset[key].shape) != self.preset_shape[key]:
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.")

View File

@ -755,13 +755,13 @@ class BlipModel(BlipPreTrainedModel):
super().__init__(config)
if not isinstance(config.text_config, BlipTextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type BlipTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, BlipVisionConfig):
raise ValueError(
raise TypeError(
"config.vision_config is expected to be of type BlipVisionConfig but is of type"
f" {type(config.vision_config)}."
)

View File

@ -794,13 +794,13 @@ class TFBlipMainLayer(keras.layers.Layer):
super().__init__(*args, **kwargs)
if not isinstance(config.text_config, BlipTextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type BlipTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, BlipVisionConfig):
raise ValueError(
raise TypeError(
"config.vision_config is expected to be of type BlipVisionConfig but is of type"
f" {type(config.vision_config)}."
)

View File

@ -113,7 +113,7 @@ class ChameleonProcessor(ProcessorMixin):
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
# Replace the image token with the expanded image token sequence
prompt_strings = []

View File

@ -1341,13 +1341,13 @@ class ChineseCLIPModel(ChineseCLIPPreTrainedModel):
super().__init__(config)
if not isinstance(config.text_config, ChineseCLIPTextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type ChineseCLIPTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, ChineseCLIPVisionConfig):
raise ValueError(
raise TypeError(
"config.vision_config is expected to be of type ChineseCLIPVisionConfig but is of type"
f" {type(config.vision_config)}."
)

View File

@ -1928,13 +1928,13 @@ class ClapModel(ClapPreTrainedModel):
super().__init__(config)
if not isinstance(config.text_config, ClapTextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type ClapTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.audio_config, ClapAudioConfig):
raise ValueError(
raise TypeError(
"config.audio_config is expected to be of type ClapAudioConfig but is of type"
f" {type(config.audio_config)}."
)

View File

@ -1119,13 +1119,13 @@ class CLIPModel(CLIPPreTrainedModel):
super().__init__(config)
if not isinstance(config.text_config, CLIPTextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type CLIPTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, CLIPVisionConfig):
raise ValueError(
raise TypeError(
"config.vision_config is expected to be of type CLIPVisionConfig but is of type"
f" {type(config.vision_config)}."
)

View File

@ -825,13 +825,13 @@ class TFCLIPMainLayer(keras.layers.Layer):
super().__init__(**kwargs)
if not isinstance(config.text_config, CLIPTextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type CLIPTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, CLIPVisionConfig):
raise ValueError(
raise TypeError(
"config.vision_config is expected to be of type CLIPVisionConfig but is of type"
f" {type(config.vision_config)}."
)

View File

@ -924,13 +924,13 @@ class CLIPSegModel(CLIPSegPreTrainedModel):
super().__init__(config)
if not isinstance(config.text_config, CLIPSegTextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type CLIPSegTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, CLIPSegVisionConfig):
raise ValueError(
raise TypeError(
"config.vision_config is expected to be of type CLIPSegVisionConfig but is of type"
f" {type(config.vision_config)}."
)

View File

@ -1513,19 +1513,19 @@ class ClvpModelForConditionalGeneration(ClvpPreTrainedModel):
super().__init__(config)
if not isinstance(config.text_config, ClvpEncoderConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type `ClvpEncoderConfig` but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.speech_config, ClvpEncoderConfig):
raise ValueError(
raise TypeError(
"config.speech_config is expected to be of type `ClvpEncoderConfig` but is of type"
f" {type(config.speech_config)}."
)
if not isinstance(config.decoder_config, ClvpDecoderConfig):
raise ValueError(
raise TypeError(
"config.decoder_config is expected to be of type `ClvpDecoderConfig` but is of type"
f" {type(config.decoder_config)}."
)

View File

@ -518,4 +518,4 @@ def convert_to_unicode(text):
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError(f"Unsupported string type: {type(text)}")
raise TypeError(f"Unsupported string type: {type(text)}")

View File

@ -298,7 +298,7 @@ class DepthAnythingNeck(nn.Module):
List of hidden states from the backbone.
"""
if not isinstance(hidden_states, (tuple, list)):
raise ValueError("hidden_states should be a tuple or list of tensors")
raise TypeError("hidden_states should be a tuple or list of tensors")
if len(hidden_states) != len(self.config.neck_hidden_sizes):
raise ValueError("The number of hidden states should be equal to the number of neck hidden sizes.")

View File

@ -1002,7 +1002,7 @@ class DPTNeck(nn.Module):
List of hidden states from the backbone.
"""
if not isinstance(hidden_states, (tuple, list)):
raise ValueError("hidden_states should be a tuple or list of tensors")
raise TypeError("hidden_states should be a tuple or list of tensors")
if len(hidden_states) != len(self.config.neck_hidden_sizes):
raise ValueError("The number of hidden states should be equal to the number of neck hidden sizes.")

View File

@ -32,7 +32,7 @@ def _fetch_dims(tree: Union[dict, list, tuple, torch.Tensor]) -> List[Tuple[int,
elif isinstance(tree, torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError("Not supported")
raise TypeError("Not supported")
return shapes
@ -302,7 +302,7 @@ def chunk_layer(
else:
out[i : i + chunk_size] = output_chunk
else:
raise ValueError("Not supported")
raise TypeError("Not supported")
i += chunk_size

View File

@ -394,7 +394,7 @@ def map_structure_with_atom_order(in_list: list, first_call: bool = True) -> lis
elif isinstance(in_list[i], str):
in_list[i] = atom_order[in_list[i]]
else:
raise ValueError("Unexpected type when mapping nested lists!")
raise TypeError("Unexpected type when mapping nested lists!")
return in_list

View File

@ -134,7 +134,7 @@ def tree_map(fn, tree, leaf_type):
return fn(tree)
else:
print(type(tree))
raise ValueError("Not supported")
raise TypeError("Not supported")
tensor_tree_map = partial(tree_map, leaf_type=torch.Tensor)

View File

@ -1181,19 +1181,19 @@ class FlavaModel(FlavaPreTrainedModel):
super().__init__(config)
if not isinstance(config.text_config, FlavaTextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type FlavaTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.image_config, FlavaImageConfig):
raise ValueError(
raise TypeError(
"config.image_config is expected to be of type FlavaImageConfig but is of type"
f" {type(config.image_config)}."
)
if not isinstance(config.multimodal_config, FlavaMultimodalConfig):
raise ValueError(
raise TypeError(
"config.multimodal_config is expected to be of type FlavaMultimodalConfig but "
+ f"is of type {type(config.multimodal_config)}."
)

View File

@ -1302,13 +1302,13 @@ class GroupViTModel(GroupViTPreTrainedModel):
super().__init__(config)
if not isinstance(config.text_config, GroupViTTextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type GroupViTTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, GroupViTVisionConfig):
raise ValueError(
raise TypeError(
"config.vision_config is expected to be of type GroupViTVisionConfig but is of type"
f" {type(config.vision_config)}."
)

View File

@ -1443,13 +1443,13 @@ class TFGroupViTMainLayer(keras.layers.Layer):
super().__init__(**kwargs)
if not isinstance(config.text_config, GroupViTTextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type GroupViTTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, GroupViTVisionConfig):
raise ValueError(
raise TypeError(
"config.vision_config is expected to be of type GroupViTVisionConfig but is of type"
f" {type(config.vision_config)}."
)

View File

@ -513,7 +513,7 @@ class LlavaNextImageProcessor(BaseImageProcessor):
List[np.array]: A list of NumPy arrays containing the processed image patches.
"""
if not isinstance(grid_pinpoints, list):
raise ValueError("grid_pinpoints must be a list of possible resolutions.")
raise TypeError("grid_pinpoints must be a list of possible resolutions.")
possible_resolutions = grid_pinpoints

View File

@ -60,12 +60,12 @@ def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size):
tuple: The shape of the image patch grid in the format (width, height).
"""
if not isinstance(grid_pinpoints, list):
raise ValueError("grid_pinpoints should be a list of tuples or lists")
raise TypeError("grid_pinpoints should be a list of tuples or lists")
# ! VERY IMPORTANT if image_size is tensor, must convert to into tuple, otherwise it will cause wrong calculate
if not isinstance(image_size, (list, tuple)):
if not isinstance(image_size, (torch.Tensor, np.ndarray)):
raise ValueError(
raise TypeError(
f"image_size invalid type: {type(image_size)} not valid, should be either list, tuple, np.ndarray or tensor"
)
image_size = image_size.tolist()
@ -91,12 +91,12 @@ def image_size_to_num_patches(image_size, grid_pinpoints, patch_size: int):
int: the number of patches
"""
if not isinstance(grid_pinpoints, list):
raise ValueError("grid_pinpoints should be a list of tuples or lists")
raise TypeError("grid_pinpoints should be a list of tuples or lists")
# ! VERY IMPORTANT if image_size is tensor, must convert to into tuple, otherwise it will cause wrong calculate
if not isinstance(image_size, (list, tuple)):
if not isinstance(image_size, (torch.Tensor, np.ndarray)):
raise ValueError(f"image_size invalid type {type(image_size)} with value {image_size}")
raise TypeError(f"image_size invalid type {type(image_size)} with value {image_size}")
image_size = image_size.tolist()
best_resolution = select_best_resolution(image_size, grid_pinpoints)

View File

@ -66,12 +66,12 @@ def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size):
tuple: The shape of the image patch grid in the format (width, height).
"""
if not isinstance(grid_pinpoints, list):
raise ValueError("grid_pinpoints should be a list of tuples or lists")
raise TypeError("grid_pinpoints should be a list of tuples or lists")
# ! VERY IMPORTANT if image_size is tensor, must convert to into tuple, otherwise it will cause wrong calculate
if not isinstance(image_size, (list, tuple)):
if not isinstance(image_size, (torch.Tensor, np.ndarray)):
raise ValueError(
raise TypeError(
f"image_size invalid type: {type(image_size)} not valid, should be either list, tuple, np.ndarray or tensor"
)
image_size = image_size.tolist()
@ -97,12 +97,12 @@ def image_size_to_num_patches(image_size, grid_pinpoints, patch_size: int):
int: the number of patches
"""
if not isinstance(grid_pinpoints, list):
raise ValueError("grid_pinpoints should be a list of tuples or lists")
raise TypeError("grid_pinpoints should be a list of tuples or lists")
# ! VERY IMPORTANT if image_size is tensor, must convert to into tuple, otherwise it will cause wrong calculate
if not isinstance(image_size, (list, tuple)):
if not isinstance(image_size, (torch.Tensor, np.ndarray)):
raise ValueError(f"image_size invalid type {type(image_size)} with value {image_size}")
raise TypeError(f"image_size invalid type {type(image_size)} with value {image_size}")
image_size = image_size.tolist()
best_resolution = select_best_resolution(image_size, grid_pinpoints)

View File

@ -889,7 +889,7 @@ class LukeTokenizer(PreTrainedTokenizer):
def _check_entity_input_format(self, entities: Optional[EntityInput], entity_spans: Optional[EntitySpanInput]):
if not isinstance(entity_spans, list):
raise ValueError("entity_spans should be given as a list")
raise TypeError("entity_spans should be given as a list")
elif len(entity_spans) > 0 and not isinstance(entity_spans[0], tuple):
raise ValueError(
"entity_spans should be given as a list of tuples containing the start and end character indices"

View File

@ -721,7 +721,7 @@ class MLukeTokenizer(PreTrainedTokenizer):
# Copied from transformers.models.luke.tokenization_luke.LukeTokenizer._check_entity_input_format
def _check_entity_input_format(self, entities: Optional[EntityInput], entity_spans: Optional[EntitySpanInput]):
if not isinstance(entity_spans, list):
raise ValueError("entity_spans should be given as a list")
raise TypeError("entity_spans should be given as a list")
elif len(entity_spans) > 0 and not isinstance(entity_spans[0], tuple):
raise ValueError(
"entity_spans should be given as a list of tuples containing the start and end character indices"

View File

@ -1015,13 +1015,13 @@ class Owlv2Model(Owlv2PreTrainedModel):
super().__init__(config)
if not isinstance(config.text_config, Owlv2TextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type Owlv2TextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, Owlv2VisionConfig):
raise ValueError(
raise TypeError(
"config.vision_config is expected to be of type Owlv2VisionConfig but is of type"
f" {type(config.vision_config)}."
)

View File

@ -998,13 +998,13 @@ class OwlViTModel(OwlViTPreTrainedModel):
super().__init__(config)
if not isinstance(config.text_config, OwlViTTextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type OwlViTTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, OwlViTVisionConfig):
raise ValueError(
raise TypeError(
"config.vision_config is expected to be of type OwlViTVisionConfig but is of type"
f" {type(config.vision_config)}."
)

View File

@ -204,7 +204,7 @@ class HFIndexBase(Index):
def _check_dataset_format(self, with_index: bool):
if not isinstance(self.dataset, Dataset):
raise ValueError(f"Dataset should be a datasets.Dataset object, but got {type(self.dataset)}")
raise TypeError(f"Dataset should be a datasets.Dataset object, but got {type(self.dataset)}")
if len({"title", "text", "embeddings"} - set(self.dataset.column_names)) > 0:
raise ValueError(
"Dataset should be a dataset with the following columns: "

View File

@ -1202,13 +1202,13 @@ class SiglipModel(SiglipPreTrainedModel):
super().__init__(config)
if not isinstance(config.text_config, SiglipTextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type SiglipTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, SiglipVisionConfig):
raise ValueError(
raise TypeError(
"config.vision_config is expected to be of type SiglipVisionConfig but is of type"
f" {type(config.vision_config)}."
)

View File

@ -135,7 +135,7 @@ class UdopConfig(PretrainedConfig):
self.patch_size = patch_size
self.num_channels = num_channels
if not isinstance(relative_bias_args, list):
raise ValueError("`relative_bias_args` should be a list of dictionaries.")
raise TypeError("`relative_bias_args` should be a list of dictionaries.")
self.relative_bias_args = relative_bias_args
act_info = self.feed_forward_proj.split("-")

View File

@ -92,7 +92,7 @@ class Wav2Vec2ProcessorWithLM(ProcessorMixin):
super().__init__(feature_extractor, tokenizer)
if not isinstance(decoder, BeamSearchDecoderCTC):
raise ValueError(f"`decoder` has to be of type {BeamSearchDecoderCTC.__class__}, but is {type(decoder)}")
raise TypeError(f"`decoder` has to be of type {BeamSearchDecoderCTC.__class__}, but is {type(decoder)}")
if feature_extractor.__class__.__name__ not in ["Wav2Vec2FeatureExtractor", "SeamlessM4TFeatureExtractor"]:
raise ValueError(

View File

@ -1242,13 +1242,13 @@ class XCLIPModel(XCLIPPreTrainedModel):
super().__init__(config)
if not isinstance(config.text_config, XCLIPTextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type XCLIPTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, XCLIPVisionConfig):
raise ValueError(
raise TypeError(
"config.vision_config is expected to be of type XCLIPVisionConfig but is of type"
f" {type(config.vision_config)}."
)

View File

@ -334,7 +334,7 @@ class ZoeDepthNeck(nn.Module):
List of hidden states from the backbone.
"""
if not isinstance(hidden_states, (tuple, list)):
raise ValueError("hidden_states should be a tuple or list of tensors")
raise TypeError("hidden_states should be a tuple or list of tensors")
if len(hidden_states) != len(self.config.neck_hidden_sizes):
raise ValueError("The number of hidden states should be equal to the number of neck hidden sizes.")

View File

@ -190,7 +190,7 @@ class AudioClassificationPipeline(Pipeline):
).numpy()
if not isinstance(inputs, np.ndarray):
raise ValueError("We expect a numpy ndarray as input")
raise TypeError("We expect a numpy ndarray as input")
if len(inputs.shape) != 1:
raise ValueError("We expect a single channel audio input for AudioClassificationPipeline")

View File

@ -406,7 +406,7 @@ class AutomaticSpeechRecognitionPipeline(ChunkPipeline):
# of the original length in the stride so we can cut properly.
stride = (inputs.shape[0], int(round(stride[0] * ratio)), int(round(stride[1] * ratio)))
if not isinstance(inputs, np.ndarray):
raise ValueError(f"We expect a numpy ndarray as input, got `{type(inputs)}`")
raise TypeError(f"We expect a numpy ndarray as input, got `{type(inputs)}`")
if len(inputs.shape) != 1:
raise ValueError("We expect a single channel audio input for AutomaticSpeechRecognitionPipeline")

View File

@ -114,7 +114,7 @@ class ZeroShotAudioClassificationPipeline(Pipeline):
audio = ffmpeg_read(audio, self.feature_extractor.sampling_rate)
if not isinstance(audio, np.ndarray):
raise ValueError("We expect a numpy ndarray as input")
raise TypeError("We expect a numpy ndarray as input")
if len(audio.shape) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline")

View File

@ -356,7 +356,7 @@ class ProcessorMixin(PushToHubMixin):
proper_class = getattr(transformers_module, class_name)
if not isinstance(arg, proper_class):
raise ValueError(
raise TypeError(
f"Received a {type(arg).__name__} for argument {attribute_name}, but a {class_name} was expected."
)

View File

@ -474,7 +474,7 @@ class PreTrainedTokenizer(PreTrainedTokenizerBase):
# Always raise an error if string because users should define the behavior
for index, token in value.items():
if not isinstance(token, (str, AddedToken)) or not isinstance(index, int):
raise ValueError(
raise TypeError(
f"The provided `added_tokens_decoder` has an element of type {index.__class__, token.__class__}, should be a dict of {int, Union[AddedToken, str]}"
)

View File

@ -405,7 +405,7 @@ class BitsAndBytesConfig(QuantizationConfigMixin):
@load_in_4bit.setter
def load_in_4bit(self, value: bool):
if not isinstance(value, bool):
raise ValueError("load_in_4bit must be a boolean")
raise TypeError("load_in_4bit must be a boolean")
if self.load_in_8bit and value:
raise ValueError("load_in_4bit and load_in_8bit are both True, but only one can be used at the same time")
@ -418,7 +418,7 @@ class BitsAndBytesConfig(QuantizationConfigMixin):
@load_in_8bit.setter
def load_in_8bit(self, value: bool):
if not isinstance(value, bool):
raise ValueError("load_in_8bit must be a boolean")
raise TypeError("load_in_8bit must be a boolean")
if self.load_in_4bit and value:
raise ValueError("load_in_4bit and load_in_8bit are both True, but only one can be used at the same time")
@ -429,30 +429,30 @@ class BitsAndBytesConfig(QuantizationConfigMixin):
Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
"""
if not isinstance(self.load_in_4bit, bool):
raise ValueError("load_in_4bit must be a boolean")
raise TypeError("load_in_4bit must be a boolean")
if not isinstance(self.load_in_8bit, bool):
raise ValueError("load_in_8bit must be a boolean")
raise TypeError("load_in_8bit must be a boolean")
if not isinstance(self.llm_int8_threshold, float):
raise ValueError("llm_int8_threshold must be a float")
raise TypeError("llm_int8_threshold must be a float")
if self.llm_int8_skip_modules is not None and not isinstance(self.llm_int8_skip_modules, list):
raise ValueError("llm_int8_skip_modules must be a list of strings")
raise TypeError("llm_int8_skip_modules must be a list of strings")
if not isinstance(self.llm_int8_enable_fp32_cpu_offload, bool):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean")
raise TypeError("llm_int8_enable_fp32_cpu_offload must be a boolean")
if not isinstance(self.llm_int8_has_fp16_weight, bool):
raise ValueError("llm_int8_has_fp16_weight must be a boolean")
raise TypeError("llm_int8_has_fp16_weight must be a boolean")
if self.bnb_4bit_compute_dtype is not None and not isinstance(self.bnb_4bit_compute_dtype, torch.dtype):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype")
raise TypeError("bnb_4bit_compute_dtype must be torch.dtype")
if not isinstance(self.bnb_4bit_quant_type, str):
raise ValueError("bnb_4bit_quant_type must be a string")
raise TypeError("bnb_4bit_quant_type must be a string")
if not isinstance(self.bnb_4bit_use_double_quant, bool):
raise ValueError("bnb_4bit_use_double_quant must be a boolean")
raise TypeError("bnb_4bit_use_double_quant must be a boolean")
if self.load_in_4bit and not version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse(
"0.39.0"
@ -957,13 +957,13 @@ class AqlmConfig(QuantizationConfigMixin):
Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
"""
if not isinstance(self.in_group_size, int):
raise ValueError("in_group_size must be a float")
raise TypeError("in_group_size must be a float")
if not isinstance(self.out_group_size, int):
raise ValueError("out_group_size must be a float")
raise TypeError("out_group_size must be a float")
if not isinstance(self.num_codebooks, int):
raise ValueError("num_codebooks must be a float")
raise TypeError("num_codebooks must be a float")
if not isinstance(self.nbits_per_codebook, int):
raise ValueError("nbits_per_codebook must be a float")
raise TypeError("nbits_per_codebook must be a float")
if self.linear_weights_not_to_quantize is not None and not isinstance(
self.linear_weights_not_to_quantize, list

View File

@ -60,7 +60,7 @@ def output_type(output):
elif isinstance(output, (torch.Tensor, AgentAudio)):
return "audio"
else:
raise ValueError(f"Invalid output: {output}")
raise TypeError(f"Invalid output: {output}")
@is_agent_test

View File

@ -188,7 +188,7 @@ class LukeTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
with self.assertRaises(ValueError):
tokenizer(sentence, entities=tuple(entities), entity_spans=spans)
with self.assertRaises(ValueError):
with self.assertRaises(TypeError):
tokenizer(sentence, entities=entities, entity_spans=tuple(spans))
with self.assertRaises(ValueError):

View File

@ -151,7 +151,7 @@ class MLukeTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
with self.assertRaises(ValueError):
tokenizer(sentence, entities=tuple(entities), entity_spans=spans)
with self.assertRaises(ValueError):
with self.assertRaises(TypeError):
tokenizer(sentence, entities=entities, entity_spans=tuple(spans))
with self.assertRaises(ValueError):

View File

@ -171,7 +171,7 @@ class FeatureExtractionPipelineTests(unittest.TestCase):
elif isinstance(input_, float):
return 0
else:
raise ValueError("We expect lists of floats, nothing else")
raise TypeError("We expect lists of floats, nothing else")
return shape
def get_test_pipeline(self, model, tokenizer, processor, torch_dtype="float32"):

View File

@ -145,7 +145,7 @@ class PipelineTesterMixin:
if not isinstance(model_architectures, tuple):
model_architectures = (model_architectures,)
if not isinstance(model_architectures, tuple):
raise ValueError(f"`model_architectures` must be a tuple. Got {type(model_architectures)} instead.")
raise TypeError(f"`model_architectures` must be a tuple. Got {type(model_architectures)} instead.")
for model_architecture in model_architectures:
model_arch_name = model_architecture.__name__