diff --git a/src/transformers/models/glm4/configuration_glm4.py b/src/transformers/models/glm4/configuration_glm4.py index 3632a7b6b44..05d129a23e5 100644 --- a/src/transformers/models/glm4/configuration_glm4.py +++ b/src/transformers/models/glm4/configuration_glm4.py @@ -22,7 +22,7 @@ class Glm4Config(PretrainedConfig): This is the configuration class to store the configuration of a [`Glm4Model`]. It is used to instantiate an Glm4 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Glm4-4-9b-chat. - e.g. [THUDM/glm-4-0414-9b-chat-chat](https://huggingface.co/THUDM/glm-4-0414-9b-chat-chat) + e.g. [THUDM/GLM-4-9B-0414](https://huggingface.co/THUDM/GLM-4-9B-0414) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: diff --git a/src/transformers/models/glm4/modeling_glm4.py b/src/transformers/models/glm4/modeling_glm4.py index f32bfb3a392..9a22b256175 100644 --- a/src/transformers/models/glm4/modeling_glm4.py +++ b/src/transformers/models/glm4/modeling_glm4.py @@ -551,8 +551,8 @@ class Glm4ForCausalLM(Glm4PreTrainedModel, GenerationMixin): ```python >>> from transformers import AutoTokenizer, Glm4ForCausalLM - >>> model = Glm4ForCausalLM.from_pretrained("THUDM/GLM-4-9B-Chat-0414") - >>> tokenizer = AutoTokenizer.from_pretrained("THUDM/GLM-4-9B-Chat-0414") + >>> model = Glm4ForCausalLM.from_pretrained("THUDM/GLM-4-9B-0414") + >>> tokenizer = AutoTokenizer.from_pretrained("THUDM/GLM-4-9B-0414") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") diff --git a/src/transformers/models/glm4/modular_glm4.py b/src/transformers/models/glm4/modular_glm4.py index 4664efcad2e..1cf98ce159c 100644 --- a/src/transformers/models/glm4/modular_glm4.py +++ b/src/transformers/models/glm4/modular_glm4.py @@ -31,7 +31,7 @@ from .modeling_glm4 import Glm4RMSNorm logger = logging.get_logger(__name__) -_CHECKPOINT_FOR_DOC = "THUDM/GLM-4-9B-Chat-0414" +_CHECKPOINT_FOR_DOC = "THUDM/GLM-4-9B-0414" class Glm4MLP(Phi3MLP): @@ -119,8 +119,8 @@ class Glm4ForCausalLM(GlmForCausalLM): ```python >>> from transformers import AutoTokenizer, Glm4ForCausalLM - >>> model = Glm4ForCausalLM.from_pretrained("THUDM/GLM-4-9B-Chat-0414") - >>> tokenizer = AutoTokenizer.from_pretrained("THUDM/GLM-4-9B-Chat-0414") + >>> model = Glm4ForCausalLM.from_pretrained("THUDM/GLM-4-9B-0414") + >>> tokenizer = AutoTokenizer.from_pretrained("THUDM/GLM-4-9B-0414") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") diff --git a/tests/models/glm4/test_modeling_glm4.py b/tests/models/glm4/test_modeling_glm4.py index d7a8074a5c9..bd5e92a281f 100644 --- a/tests/models/glm4/test_modeling_glm4.py +++ b/tests/models/glm4/test_modeling_glm4.py @@ -20,6 +20,8 @@ import pytest from transformers import AutoModelForCausalLM, AutoTokenizer, Glm4Config, is_torch_available from transformers.testing_utils import ( + Expectations, + cleanup, require_flash_attn, require_torch, require_torch_large_gpu, @@ -80,113 +82,142 @@ class Glm4ModelTest(CausalLMModelTest, unittest.TestCase): @require_torch_large_gpu class Glm4IntegrationTest(unittest.TestCase): input_text = ["Hello I am doing", "Hi today"] - model_id = "THUDM/glm-4-0414-9b-chat" - revision = "refs/pr/15" + model_id = "THUDM/GLM-4-9B-0414" + + def tearDown(self): + cleanup(torch_device, gc_collect=True) def test_model_9b_fp16(self): - EXPECTED_TEXTS = [ - "Hello I am doing a project on the history of the internetSolution:\n\nStep 1: Introduction\nThe history of the", - "Hi today I am going to show you how to make a simple and easy to make a DIY paper flower.", - ] + EXPECTED_TEXTS = Expectations( + { + ("cuda", 7): [], + ("cuda", 8): [ + "Hello I am doing a project on the history of the internet and I need to know what the first website was and what", + "Hi today I am going to tell you about the most common disease in the world. This disease is called diabetes", + ], + } + ) + EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation() model = AutoModelForCausalLM.from_pretrained( - self.model_id, low_cpu_mem_usage=True, torch_dtype=torch.float16, revision=self.revision + self.model_id, low_cpu_mem_usage=True, torch_dtype=torch.float16 ).to(torch_device) - tokenizer = AutoTokenizer.from_pretrained(self.model_id, revision=self.revision) + tokenizer = AutoTokenizer.from_pretrained(self.model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) - self.assertEqual(output_text, EXPECTED_TEXTS) + self.assertEqual(output_text, EXPECTED_TEXT) def test_model_9b_bf16(self): - EXPECTED_TEXTS = [ - "Hello I am doing a project on the history of the internetSolution:\n\nStep 1: Introduction\nThe history of the", - "Hi today I am going to show you how to make a simple and easy to make a DIY paper flower.", - ] + EXPECTED_TEXTS = Expectations( + { + ("cuda", 7): [], + ("cuda", 8): [ + "Hello I am doing a project on the history of the internet and I need to know what the first website was and what", + "Hi today I am going to tell you about the most common disease in the world. This disease is called diabetes", + ], + } + ) + EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation() model = AutoModelForCausalLM.from_pretrained( - self.model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16, revision=self.revision + self.model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 ).to(torch_device) - tokenizer = AutoTokenizer.from_pretrained(self.model_id, revision=self.revision) + tokenizer = AutoTokenizer.from_pretrained(self.model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) - self.assertEqual(output_text, EXPECTED_TEXTS) + self.assertEqual(output_text, EXPECTED_TEXT) def test_model_9b_eager(self): - EXPECTED_TEXTS = [ - "Hello I am doing a project on the history of the internetSolution:\n\nStep 1: Introduction\nThe history of the", - "Hi today I am going to show you how to make a simple and easy to make a DIY paper flower.", - ] + EXPECTED_TEXTS = Expectations( + { + ("cuda", 7): [], + ("cuda", 8): [ + "Hello I am doing a project on the history of the internet and I need to know what the first website was and what", + "Hi today I am going to tell you about the most common disease in the world. This disease is called diabetes", + ], + } + ) + EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation() model = AutoModelForCausalLM.from_pretrained( self.model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16, attn_implementation="eager", - revision=self.revision, ) model.to(torch_device) - tokenizer = AutoTokenizer.from_pretrained(self.model_id, revision=self.revision) + tokenizer = AutoTokenizer.from_pretrained(self.model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) - self.assertEqual(output_text, EXPECTED_TEXTS) + self.assertEqual(output_text, EXPECTED_TEXT) @require_torch_sdpa def test_model_9b_sdpa(self): - EXPECTED_TEXTS = [ - "Hello I am doing a project on the history of the internetSolution:\n\nStep 1: Introduction\nThe history of the", - "Hi today I am going to show you how to make a simple and easy to make a DIY paper flower.", - ] + EXPECTED_TEXTS = Expectations( + { + ("cuda", 7): [], + ("cuda", 8): [ + "Hello I am doing a project on the history of the internet and I need to know what the first website was and what", + "Hi today I am going to tell you about the most common disease in the world. This disease is called diabetes", + ], + } + ) + EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation() model = AutoModelForCausalLM.from_pretrained( self.model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16, attn_implementation="sdpa", - revision=self.revision, ) model.to(torch_device) - tokenizer = AutoTokenizer.from_pretrained(self.model_id, revision=self.revision) + tokenizer = AutoTokenizer.from_pretrained(self.model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) - self.assertEqual(output_text, EXPECTED_TEXTS) + self.assertEqual(output_text, EXPECTED_TEXT) @require_flash_attn @pytest.mark.flash_attn_test def test_model_9b_flash_attn(self): - EXPECTED_TEXTS = [ - "Hello I am doing a project on the history of the internetSolution:\n\nStep 1: Introduction\nThe history of the", - "Hi today I am going to show you how to make a simple and easy to make a DIY paper flower.", - ] + EXPECTED_TEXTS = Expectations( + { + ("cuda", 7): [], + ("cuda", 8): [ + "Hello I am doing a project on the history of the internet and I need to know what the first website was and what", + "Hi today I am going to tell you about the most common disease in the world. This disease is called diabetes", + ], + } + ) + EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation() model = AutoModelForCausalLM.from_pretrained( self.model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", - revision=self.revision, ) model.to(torch_device) - tokenizer = AutoTokenizer.from_pretrained(self.model_id, revision=self.revision) + tokenizer = AutoTokenizer.from_pretrained(self.model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) - self.assertEqual(output_text, EXPECTED_TEXTS) + self.assertEqual(output_text, EXPECTED_TEXT)