wrapped forward passes in torch.no_grad() (#15037)

This commit is contained in:
Matt Churgin 2022-01-06 08:48:49 -05:00 committed by GitHub
parent 5a06118b39
commit 5ab87cd4da
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -485,6 +485,7 @@ class RobertaModelIntegrationTest(TestCasePlus):
model = RobertaForMaskedLM.from_pretrained("roberta-base")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size((1, 11, 50265))
self.assertEqual(output.shape, expected_shape)
@ -504,6 +505,7 @@ class RobertaModelIntegrationTest(TestCasePlus):
model = RobertaModel.from_pretrained("roberta-base")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
with torch.no_grad():
output = model(input_ids)[0]
# compare the actual values for a slice.
expected_slice = torch.tensor(
@ -521,6 +523,7 @@ class RobertaModelIntegrationTest(TestCasePlus):
model = RobertaForSequenceClassification.from_pretrained("roberta-large-mnli")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size((1, 3))
self.assertEqual(output.shape, expected_shape)