add back self.max_position_embeddings = config.max_position_embeddings (#33550)

* add back self.max_position_embeddings = config.max_position_embeddings

* fix-copies
This commit is contained in:
chengchengpei 2024-09-23 03:54:58 -07:00 committed by GitHub
parent 6d02968d51
commit 214db9e660
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 2 additions and 0 deletions

View File

@ -310,6 +310,7 @@ class Qwen2Attention(nn.Module):
self.head_dim = self.hidden_size // self.num_heads
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.max_position_embeddings = config.max_position_embeddings
self.rope_theta = config.rope_theta
self.is_causal = True
self.attention_dropout = config.attention_dropout

View File

@ -388,6 +388,7 @@ class Qwen2MoeAttention(nn.Module):
self.head_dim = self.hidden_size // self.num_heads
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.max_position_embeddings = config.max_position_embeddings
self.rope_theta = config.rope_theta
self.is_causal = True
self.attention_dropout = config.attention_dropout