mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 10:12:23 +06:00
Fixed typo in Llama configuration docstring (#35520)
Update configuration_llama.py There is no `num_heads` parameter, only `num_attention_heads`
This commit is contained in:
parent
3b1be043cd
commit
1650e0e514
@ -124,7 +124,7 @@ class LlamaConfig(PretrainedConfig):
|
||||
mlp_bias (`bool`, *optional*, defaults to `False`):
|
||||
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
|
||||
head_dim (`int`, *optional*):
|
||||
The attention head dimension. If None, it will default to hidden_size // num_heads
|
||||
The attention head dimension. If None, it will default to hidden_size // num_attention_heads
|
||||
|
||||
```python
|
||||
>>> from transformers import LlamaModel, LlamaConfig
|
||||
|
Loading…
Reference in New Issue
Block a user