fux csm and mistral

This commit is contained in:
Arthur 2025-07-01 15:53:44 +02:00
parent 5e5ae84a05
commit 075bd0c2f3
4 changed files with 25 additions and 138 deletions

View File

@ -374,10 +374,6 @@ class CsmPreTrainedModel(PreTrainedModel):
_supports_quantized_cache = True
_supports_static_cache = True
_supports_attention_backend = True
_can_record_outputs: dict[str, tuple[nn.Module, int]] = {
"hidden_states": (CsmDecoderLayer, 0),
"attentions": (CsmAttention, 1),
}
def _init_weights(self, module):
std = self.config.initializer_range
@ -400,6 +396,10 @@ class CsmPreTrainedModel(PreTrainedModel):
@auto_docstring
class CsmDepthDecoderModel(CsmPreTrainedModel):
config_class = CsmDepthDecoderConfig
_can_record_outputs: dict[str, tuple[nn.Module, int]] = {
"hidden_states": (CsmDecoderLayer, 0),
"attentions": (CsmAttention, 1),
}
def __init__(self, config):
super().__init__(config)
@ -543,6 +543,10 @@ class CsmDepthDecoderForCausalLM(CsmPreTrainedModel, GenerationMixin):
_tied_weights_keys = None
_tp_plan = None
_pp_plan = None
_can_record_outputs: dict[str, tuple[nn.Module, int]] = {
"hidden_states": (CsmDecoderLayer, 0),
"attentions": (CsmAttention, 1),
}
def __init__(self, config):
super().__init__(config)
@ -565,7 +569,7 @@ class CsmDepthDecoderForCausalLM(CsmPreTrainedModel, GenerationMixin):
def get_decoder(self):
return self.model
@can_return_tuple
@check_model_inputs
@auto_docstring
def forward(
self,
@ -577,8 +581,6 @@ class CsmDepthDecoderForCausalLM(CsmPreTrainedModel, GenerationMixin):
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
@ -592,12 +594,6 @@ class CsmDepthDecoderForCausalLM(CsmPreTrainedModel, GenerationMixin):
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
backbone_last_hidden_state=backbone_last_hidden_state,
@ -606,8 +602,6 @@ class CsmDepthDecoderForCausalLM(CsmPreTrainedModel, GenerationMixin):
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
**kwargs,
)
@ -794,8 +788,6 @@ class CsmForConditionalGeneration(CsmPreTrainedModel, CsmGenerationMixin):
self.backbone_model = CsmBackboneModel._from_config(config)
self.depth_decoder = CsmDepthDecoderForCausalLM._from_config(config.depth_decoder_config)
self.codec_model = AutoModel.from_config(config.codec_config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
@ -980,8 +972,6 @@ class CsmForConditionalGeneration(CsmPreTrainedModel, CsmGenerationMixin):
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
@ -1052,12 +1042,6 @@ class CsmForConditionalGeneration(CsmPreTrainedModel, CsmGenerationMixin):
>>> output = model(**inputs)
>>> output.loss.backward()
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None and input_ids.ndim == 2:
merged_inputs = self._merge_input_ids_with_input_values(
input_ids, input_values, input_values_cutoffs, labels
@ -1073,8 +1057,6 @@ class CsmForConditionalGeneration(CsmPreTrainedModel, CsmGenerationMixin):
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
**kwargs,
)
@ -1110,10 +1092,9 @@ class CsmForConditionalGeneration(CsmPreTrainedModel, CsmGenerationMixin):
input_ids=depth_decoder_input_ids,
backbone_last_hidden_state=backbone_last_hidden_states,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
labels=depth_decoder_labels,
**kwargs,
)
depth_decoder_loss = depth_decoder_outputs.loss

View File

@ -162,7 +162,7 @@ class CsmPreTrainedModel(PreTrainedModel):
@auto_docstring
class CsmDepthDecoderModel(LlamaModel):
class CsmDepthDecoderModel(LlamaModel, CsmPreTrainedModel):
config_class = CsmDepthDecoderConfig
def __init__(self, config):
@ -325,7 +325,7 @@ class CsmDepthDecoderForCausalLM(LlamaForCausalLM, GenerationMixin):
return model_inputs
@can_return_tuple
@check_model_inputs
@auto_docstring
def forward(
self,
@ -337,8 +337,6 @@ class CsmDepthDecoderForCausalLM(LlamaForCausalLM, GenerationMixin):
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
@ -352,12 +350,6 @@ class CsmDepthDecoderForCausalLM(LlamaForCausalLM, GenerationMixin):
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
backbone_last_hidden_state=backbone_last_hidden_state,
@ -366,8 +358,6 @@ class CsmDepthDecoderForCausalLM(LlamaForCausalLM, GenerationMixin):
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
**kwargs,
)
@ -461,8 +451,6 @@ class CsmForConditionalGeneration(CsmPreTrainedModel, CsmGenerationMixin):
self.backbone_model = CsmBackboneModel._from_config(config)
self.depth_decoder = CsmDepthDecoderForCausalLM._from_config(config.depth_decoder_config)
self.codec_model = AutoModel.from_config(config.codec_config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
@ -647,8 +635,6 @@ class CsmForConditionalGeneration(CsmPreTrainedModel, CsmGenerationMixin):
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
@ -719,12 +705,6 @@ class CsmForConditionalGeneration(CsmPreTrainedModel, CsmGenerationMixin):
>>> output = model(**inputs)
>>> output.loss.backward()
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None and input_ids.ndim == 2:
merged_inputs = self._merge_input_ids_with_input_values(
input_ids, input_values, input_values_cutoffs, labels
@ -740,8 +720,6 @@ class CsmForConditionalGeneration(CsmPreTrainedModel, CsmGenerationMixin):
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
**kwargs,
)
@ -777,10 +755,9 @@ class CsmForConditionalGeneration(CsmPreTrainedModel, CsmGenerationMixin):
input_ids=depth_decoder_input_ids,
backbone_last_hidden_state=backbone_last_hidden_states,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
labels=depth_decoder_labels,
**kwargs,
)
depth_decoder_loss = depth_decoder_outputs.loss

View File

@ -9,6 +9,8 @@ from typing import Callable, Optional, Union
import torch
from torch import nn
from transformers.utils.generic import check_model_inputs
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...generation import GenerationMixin
@ -338,7 +340,7 @@ class MistralModel(MistralPreTrainedModel):
def set_input_embeddings(self, value):
self.embed_tokens = value
@can_return_tuple
@check_model_inputs
@auto_docstring
def forward(
self,
@ -348,30 +350,12 @@ class MistralModel(MistralPreTrainedModel):
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
)
use_cache = False
# TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache
if not isinstance(past_key_values, (type(None), Cache)):
raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
@ -398,45 +382,24 @@ class MistralModel(MistralPreTrainedModel):
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
position_embeddings = self.rotary_emb(hidden_states, position_ids)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = decoder_layer(
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_value=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
hidden_states = self.norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)

View File

@ -4,6 +4,8 @@ import torch
import torch.utils.checkpoint
from torch import nn
from transformers.utils.generic import check_model_inputs
from ...cache_utils import Cache, DynamicCache
from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
from ...modeling_flash_attention_utils import FlashAttentionKwargs
@ -100,11 +102,14 @@ class MistralDecoderLayer(LlamaDecoderLayer):
class MistralPreTrainedModel(LlamaPreTrainedModel):
pass
_can_record_outputs: dict[str, tuple[nn.Module, int]] = {
"hidden_states": (MistralDecoderLayer, 0),
"attentions": (MistralAttention, 1),
}
class MistralModel(LlamaModel):
@can_return_tuple
@check_model_inputs
@auto_docstring
def forward(
self,
@ -114,30 +119,12 @@ class MistralModel(LlamaModel):
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
)
use_cache = False
# TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache
if not isinstance(past_key_values, (type(None), Cache)):
raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
@ -164,45 +151,24 @@ class MistralModel(LlamaModel):
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
position_embeddings = self.rotary_emb(hidden_states, position_ids)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = decoder_layer(
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_value=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
hidden_states = self.norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)