mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
Update deprecated torch.range
in test_modeling_ibert.py
(#27355)
* Update deprecated torch.range * Remove comment
This commit is contained in:
parent
a5bee89c9d
commit
0e402e1478
@ -519,7 +519,7 @@ class IBertModelIntegrationTest(unittest.TestCase):
|
||||
gelu_q = IntGELU(quant_mode=True)
|
||||
gelu_dq = nn.GELU()
|
||||
|
||||
x_int = torch.range(-10000, 10000, 1)
|
||||
x_int = torch.arange(-10000, 10001, 1)
|
||||
x_scaling_factor = torch.tensor(0.001)
|
||||
x = x_int * x_scaling_factor
|
||||
|
||||
@ -534,7 +534,7 @@ class IBertModelIntegrationTest(unittest.TestCase):
|
||||
self.assertTrue(torch.allclose(q_int, q_int.round(), atol=1e-4))
|
||||
|
||||
def test_force_dequant_gelu(self):
|
||||
x_int = torch.range(-10000, 10000, 1)
|
||||
x_int = torch.arange(-10000, 10001, 1)
|
||||
x_scaling_factor = torch.tensor(0.001)
|
||||
x = x_int * x_scaling_factor
|
||||
|
||||
@ -565,7 +565,6 @@ class IBertModelIntegrationTest(unittest.TestCase):
|
||||
softmax_q = IntSoftmax(output_bit, quant_mode=True)
|
||||
softmax_dq = nn.Softmax()
|
||||
|
||||
# x_int = torch.range(-10000, 10000, 1)
|
||||
def _test(array):
|
||||
x_int = torch.tensor(array)
|
||||
x_scaling_factor = torch.tensor(0.1)
|
||||
|
Loading…
Reference in New Issue
Block a user