transformers/tests/models/nemotron/test_modeling_nemotron.py
Matt 53fb245eb6
🚨 🚨 Inherited CausalLM Tests (#37590)
* stash commit

* Experiment 1: Try just Gemma

* Experiment 1: Just try Gemma

* make fixup

* Trigger tests

* stash commit

* Try adding Gemma3 as well

* make fixup

* Correct attrib names

* Correct pipeline model mapping

* Add in all_model_classes for Gemma1 again

* Move the pipeline model mapping around again

* make fixup

* Revert Gemma3 changes since it's a VLM

* Let's try Falcon

* Correct attributes

* Correct attributes

* Let's try just overriding get_config() for now

* Do Nemotron too

* And Llama!

* Do llama/persimmon

* Correctly skip tests

* Fix Persimmon

* Include Phimoe

* Fix Gemma2

* Set model_tester_class correctly

* Add GLM

* More models!

* models models models

* make fixup

* Add Qwen3 + Qwen3MoE

* Correct import

* make fixup

* Add the QuestionAnswering classes

* Add the QuestionAnswering classes

* Move pipeline mapping to the right place

* Jetmoe too

* Stop RoPE testing models with no RoPE

* Fix up JetMOE a bit

* Fix up JetMOE a bit

* Can we just force pad_token_id all the time?

* make fixup

* fix starcoder2

* Move pipeline mapping

* Fix RoPE skipping

* Fix RecurrentGemma tests

* Fix Falcon tests

* Add MoE attributes

* Fix values for RoPE testing

* Make sure we set bos_token_id and eos_token_id in an appropriate range

* make fixup

* Fix GLM4

* Add mamba attributes

* Revert bits of JetMOE

* Re-add the JetMOE skips

* Update tests/causal_lm_tester.py

Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com>

* Add licence

---------

Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com>
2025-05-23 18:29:31 +01:00

173 lines
6.5 KiB
Python

# Copyright 2024 HuggingFace Inc. team. All rights reserved.
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Nemotron model."""
import unittest
from transformers import NemotronConfig, is_torch_available
from transformers.testing_utils import (
Expectations,
require_read_token,
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester
from ...test_configuration_common import ConfigTester
if is_torch_available():
import torch
from transformers import (
AutoTokenizer,
NemotronForCausalLM,
NemotronForQuestionAnswering,
NemotronForSequenceClassification,
NemotronForTokenClassification,
NemotronModel,
)
class NemotronModelTester(CausalLMModelTester):
if is_torch_available():
config_class = NemotronConfig
base_model_class = NemotronModel
causal_lm_class = NemotronForCausalLM
sequence_class = NemotronForSequenceClassification
token_class = NemotronForTokenClassification
@require_torch
class NemotronModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = NemotronModelTester
# Need to use `0.8` instead of `0.9` for `test_cpu_offload`
# This is because we are hitting edge cases with the causal_mask buffer
model_split_percents = [0.5, 0.7, 0.8]
all_model_classes = (
(
NemotronModel,
NemotronForCausalLM,
NemotronForSequenceClassification,
NemotronForQuestionAnswering,
NemotronForTokenClassification,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": NemotronModel,
"text-classification": NemotronForSequenceClassification,
"text-generation": NemotronForCausalLM,
"zero-shot": NemotronForSequenceClassification,
"question-answering": NemotronForQuestionAnswering,
"token-classification": NemotronForTokenClassification,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
fx_compatible = False
# used in `test_torch_compile_for_training`
_torch_compile_train_cls = NemotronForCausalLM if is_torch_available() else None
def setUp(self):
self.model_tester = NemotronModelTester(self)
self.config_tester = ConfigTester(self, config_class=NemotronConfig, hidden_size=37)
@unittest.skip("Eager and SDPA do not produce the same outputs, thus this test fails")
def test_model_outputs_equivalence(self, **kwargs):
pass
@require_torch_accelerator
class NemotronIntegrationTest(unittest.TestCase):
# This variable is used to determine which CUDA device are we using for our runners (A10 or T4)
# Depending on the hardware we get different logits / generations
cuda_compute_capability_major_version = None
@classmethod
def setUpClass(cls):
if is_torch_available() and torch.cuda.is_available():
# 8 is for A100 / A10 and 7 for T4
cls.cuda_compute_capability_major_version = torch.cuda.get_device_capability()[0]
@slow
@require_read_token
def test_nemotron_8b_generation_sdpa(self):
text = ["What is the largest planet in solar system?"]
EXPECTED_TEXT = [
"What is the largest planet in solar system?\nAnswer: Jupiter\n\nWhat is the answer",
]
model_id = "thhaus/nemotron3-8b"
model = NemotronForCausalLM.from_pretrained(
model_id, torch_dtype=torch.float16, device_map="auto", attn_implementation="sdpa"
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(text, return_tensors="pt").to(torch_device)
output = model.generate(**inputs, do_sample=False, max_new_tokens=10)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, output_text)
@slow
@require_read_token
def test_nemotron_8b_generation_eager(self):
text = ["What is the largest planet in solar system?"]
EXPECTED_TEXTS = Expectations(
{
("xpu", 3): [
"What is the largest planet in solar system?\nAnswer: Jupiter\n\nWhat is the answer: What is the name of the 19",
],
("cuda", 7): [
"What is the largest planet in solar system?\nAnswer: Jupiter\n\nWhat is the answer",
],
}
)
EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation()
model_id = "thhaus/nemotron3-8b"
model = NemotronForCausalLM.from_pretrained(
model_id, torch_dtype=torch.float16, device_map="auto", attn_implementation="eager"
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(text, return_tensors="pt").to(torch_device)
output = model.generate(**inputs, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, output_text)
@slow
@require_read_token
def test_nemotron_8b_generation_fa2(self):
text = ["What is the largest planet in solar system?"]
EXPECTED_TEXT = [
"What is the largest planet in solar system?\nAnswer: Jupiter\n\nWhat is the answer",
]
model_id = "thhaus/nemotron3-8b"
model = NemotronForCausalLM.from_pretrained(
model_id, torch_dtype=torch.float16, device_map="auto", attn_implementation="flash_attention_2"
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(text, return_tensors="pt").to(torch_device)
output = model.generate(**inputs, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, output_text)