mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-01 02:31:11 +06:00
Fix gradient checkpointing bug in Trajectory Transformer (#22125)
This commit is contained in:
parent
d0876a095f
commit
4c14c1f47b
@ -533,6 +533,13 @@ class TrajectoryTransformerModel(TrajectoryTransformerPreTrainedModel):
|
|||||||
|
|
||||||
hidden_states = self.drop(token_embeddings + position_embeddings)
|
hidden_states = self.drop(token_embeddings + position_embeddings)
|
||||||
|
|
||||||
|
if self.gradient_checkpointing and self.training:
|
||||||
|
if use_cache:
|
||||||
|
logger.warning_once(
|
||||||
|
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
||||||
|
)
|
||||||
|
use_cache = False
|
||||||
|
|
||||||
presents = () if use_cache else None
|
presents = () if use_cache else None
|
||||||
all_self_attentions = () if output_attentions else None
|
all_self_attentions = () if output_attentions else None
|
||||||
all_hidden_states = () if output_hidden_states else None
|
all_hidden_states = () if output_hidden_states else None
|
||||||
@ -542,11 +549,6 @@ class TrajectoryTransformerModel(TrajectoryTransformerPreTrainedModel):
|
|||||||
all_hidden_states = all_hidden_states + (hidden_states,)
|
all_hidden_states = all_hidden_states + (hidden_states,)
|
||||||
|
|
||||||
if self.gradient_checkpointing and self.training:
|
if self.gradient_checkpointing and self.training:
|
||||||
if use_cache:
|
|
||||||
logger.warning_once(
|
|
||||||
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
|
||||||
)
|
|
||||||
use_cache = False
|
|
||||||
|
|
||||||
def create_custom_forward(module):
|
def create_custom_forward(module):
|
||||||
def custom_forward(*inputs):
|
def custom_forward(*inputs):
|
||||||
|
Loading…
Reference in New Issue
Block a user