mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-03 11:41:51 +06:00
Falcon: removed unused function (#28605)
This commit is contained in:
parent
de13a951b3
commit
a28a76996c
@ -214,17 +214,6 @@ class FalconDynamicNTKScalingRotaryEmbedding(FalconRotaryEmbedding):
|
|||||||
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
||||||
|
|
||||||
|
|
||||||
def _prepare_4d_attention_mask(mask: torch.Tensor, past_key_values_length: int) -> torch.BoolTensor:
|
|
||||||
"""
|
|
||||||
Expands attention_mask from `[batch_size, seq_length]` to `[batch_size, 1, seq_length, seq_length + past_length]`.
|
|
||||||
"""
|
|
||||||
batch_size, total_length = mask.shape
|
|
||||||
seq_length = total_length - past_key_values_length if past_key_values_length is not None else total_length
|
|
||||||
|
|
||||||
expanded_mask = ~(mask[:, None, None, :].to(torch.bool))
|
|
||||||
return expanded_mask.expand(batch_size, 1, seq_length, total_length)
|
|
||||||
|
|
||||||
|
|
||||||
def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor:
|
def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor:
|
||||||
batch_size, seq_length = attention_mask.shape
|
batch_size, seq_length = attention_mask.shape
|
||||||
closest_power_of_2 = 2 ** math.floor(math.log2(num_heads))
|
closest_power_of_2 = 2 ** math.floor(math.log2(num_heads))
|
||||||
|
Loading…
Reference in New Issue
Block a user