mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-02 19:21:31 +06:00
Fixed a multiple-devices issue in SmolVLM model (#38736)
Fixed a multiple-devices issue in SmolVLMModel (#38557) * Fixed a multiple-devices issue in SmolVLMModel * Changed the modular to reflect changes
This commit is contained in:
parent
aa798b7ac9
commit
f1849eab22
@ -750,7 +750,7 @@ class SmolVLMModel(SmolVLMPreTrainedModel):
|
||||
if pixel_values is not None and image_hidden_states is not None:
|
||||
raise ValueError("You cannot specify both pixel_values and image_hidden_states at the same time")
|
||||
elif pixel_values is not None:
|
||||
image_hidden_states = self.get_image_features(pixel_values, pixel_attention_mask)
|
||||
image_hidden_states = self.get_image_features(pixel_values, pixel_attention_mask).to(input_ids.device)
|
||||
elif image_hidden_states is not None:
|
||||
image_hidden_states = image_hidden_states.to(dtype=self.dtype, device=input_ids.device)
|
||||
|
||||
|
@ -307,7 +307,7 @@ class SmolVLMModel(Idefics3Model):
|
||||
if pixel_values is not None and image_hidden_states is not None:
|
||||
raise ValueError("You cannot specify both pixel_values and image_hidden_states at the same time")
|
||||
elif pixel_values is not None:
|
||||
image_hidden_states = self.get_image_features(pixel_values, pixel_attention_mask)
|
||||
image_hidden_states = self.get_image_features(pixel_values, pixel_attention_mask).to(input_ids.device)
|
||||
elif image_hidden_states is not None:
|
||||
image_hidden_states = image_hidden_states.to(dtype=self.dtype, device=input_ids.device)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user