diff --git a/tests/models/roformer/test_modeling_roformer.py b/tests/models/roformer/test_modeling_roformer.py index 3b94cac79ea..dde8b72c49d 100644 --- a/tests/models/roformer/test_modeling_roformer.py +++ b/tests/models/roformer/test_modeling_roformer.py @@ -513,8 +513,9 @@ class RoFormerSinusoidalPositionalEmbeddingTest(unittest.TestCase): def test_basic(self): input_ids = torch.tensor([[4, 10]], dtype=torch.long, device=torch_device) - emb1 = RoFormerSinusoidalPositionalEmbedding(num_positions=6, embedding_dim=6).to(torch_device) + emb1 = RoFormerSinusoidalPositionalEmbedding(num_positions=6, embedding_dim=6) emb1._init_weight() + emb1 = emb1.to(torch_device) emb = emb1(input_ids.shape) desired_weights = torch.tensor( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] @@ -554,8 +555,9 @@ class RoFormerSelfAttentionRotaryPositionEmbeddingTest(unittest.TestCase): key_layer = ( -torch.arange(2 * 12 * 16 * 64, dtype=torch.float, device=torch_device).reshape(2, 12, 16, 64) / 100 ).to(torch_device) - embed_positions = RoFormerSinusoidalPositionalEmbedding(num_positions=32, embedding_dim=64).to(torch_device) + embed_positions = RoFormerSinusoidalPositionalEmbedding(num_positions=32, embedding_dim=64) embed_positions._init_weight() + embed_positions = embed_positions.to(torch_device) sinusoidal_pos = embed_positions([2, 16, 768])[None, None, :, :] query_layer, key_layer = RoFormerSelfAttention.apply_rotary_position_embeddings(