mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
[Doctest] Add configuration_clip.py
(#19647)
* CLIP Config for doctest * add doc example to CLIPConfig * add from_text_vision_configs example * added comment explaining objective
This commit is contained in:
parent
c9a0da1e12
commit
fc5fdc109d
@ -80,12 +80,12 @@ class CLIPTextConfig(PretrainedConfig):
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> from transformers import CLIPTextModel, CLIPTextConfig
|
||||
>>> from transformers import CLIPTextConfig, CLIPTextModel
|
||||
|
||||
>>> # Initializing a CLIPTextModel with openai/clip-vit-base-patch32 style configuration
|
||||
>>> # Initializing a CLIPTextConfig with openai/clip-vit-base-patch32 style configuration
|
||||
>>> configuration = CLIPTextConfig()
|
||||
|
||||
>>> # Initializing a CLIPTextConfig from the openai/clip-vit-base-patch32 style configuration
|
||||
>>> # Initializing a CLIPTextModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
|
||||
>>> model = CLIPTextModel(configuration)
|
||||
|
||||
>>> # Accessing the model configuration
|
||||
@ -186,12 +186,12 @@ class CLIPVisionConfig(PretrainedConfig):
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> from transformers import CLIPVisionModel, CLIPVisionConfig
|
||||
>>> from transformers import CLIPVisionConfig, CLIPVisionModel
|
||||
|
||||
>>> # Initializing a CLIPVisionModel with openai/clip-vit-base-patch32 style configuration
|
||||
>>> # Initializing a CLIPVisionConfig with openai/clip-vit-base-patch32 style configuration
|
||||
>>> configuration = CLIPVisionConfig()
|
||||
|
||||
>>> # Initializing a CLIPVisionModel model from the openai/clip-vit-base-patch32 style configuration
|
||||
>>> # Initializing a CLIPVisionModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
|
||||
>>> model = CLIPVisionModel(configuration)
|
||||
|
||||
>>> # Accessing the model configuration
|
||||
@ -270,7 +270,29 @@ class CLIPConfig(PretrainedConfig):
|
||||
The inital value of the *logit_scale* paramter. Default is used as per the original CLIP implementation.
|
||||
kwargs (*optional*):
|
||||
Dictionary of keyword arguments.
|
||||
"""
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> from transformers import CLIPConfig, CLIPModel
|
||||
|
||||
>>> # Initializing a CLIPConfig with openai/clip-vit-base-patch32 style configuration
|
||||
>>> configuration = CLIPConfig()
|
||||
|
||||
>>> # Initializing a CLIPModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
|
||||
>>> model = CLIPModel(configuration)
|
||||
|
||||
>>> # Accessing the model configuration
|
||||
>>> configuration = model.config
|
||||
|
||||
>>> # We can also initialize a CLIPConfig from a CLIPTextConfig and a CLIPVisionConfig
|
||||
|
||||
>>> # Initializing a CLIPText and CLIPVision configuration
|
||||
>>> config_text = CLIPTextConfig()
|
||||
>>> config_vision = CLIPVisionConfig()
|
||||
|
||||
>>> config = CLIPConfig.from_text_vision_configs(config_text, config_vision)
|
||||
```"""
|
||||
|
||||
model_type = "clip"
|
||||
is_composition = True
|
||||
|
@ -33,6 +33,7 @@ src/transformers/models/blenderbot_small/configuration_blenderbot_small.py
|
||||
src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
|
||||
src/transformers/models/bloom/configuration_bloom.py
|
||||
src/transformers/models/canine/configuration_canine.py
|
||||
src/transformers/models/clip/configuration_clip.py
|
||||
src/transformers/models/codegen/configuration_codegen.py
|
||||
src/transformers/models/conditional_detr/configuration_conditional_detr.py
|
||||
src/transformers/models/conditional_detr/modeling_conditional_detr.py
|
||||
|
Loading…
Reference in New Issue
Block a user