mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-03 12:50:06 +06:00

* rework converter * Update modular_model_converter.py * Update modular_model_converter.py * Update modular_model_converter.py * Update modular_model_converter.py * cleaning * cleaning * finalize imports * imports * Update modular_model_converter.py * Better renaming to avoid visiting same file multiple times * start converting files * style * address most comments * style * remove unused stuff in get_needed_imports * style * move class dependency functions outside class * Move main functions outside class * style * Update modular_model_converter.py * rename func * add augmented dependencies * Update modular_model_converter.py * Add types_to_file_type + tweak annotation handling * Allow assignment dependency mapping + fix regex * style + update modular examples * fix modular_roberta example (wrong redefinition of __init__) * slightly correct order in which dependencies will appear * style * review comments * Performance + better handling of dependencies when they are imported * style * Add advanced new classes capabilities * style * add forgotten check * Update modeling_llava_next_video.py * Add prority list ordering in check_conversion as well * Update check_modular_conversion.py * Update configuration_gemma.py
18 lines
527 B
Python
18 lines
527 B
Python
import torch.nn as nn
|
|
|
|
from transformers.models.bert.modeling_bert import BertEmbeddings, BertModel
|
|
|
|
|
|
class RobertaEmbeddings(BertEmbeddings):
|
|
def __init__(self, config):
|
|
super().__init__(config)
|
|
self.pad_token_id = config.pad_token_id
|
|
self.position_embeddings = nn.Embedding(
|
|
config.max_position_embeddings, config.hidden_size, config.pad_token_id
|
|
)
|
|
|
|
|
|
class RobertaModel(BertModel):
|
|
def __init__(self, config, add_pooling_layer=True):
|
|
super().__init__(self, config)
|