mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-03 03:31:05 +06:00
copy fixes
This commit is contained in:
parent
d9f0a8a304
commit
367fe5d043
@ -481,12 +481,10 @@ class LiltLayer(GradientCheckpointingLayer):
|
||||
|
||||
|
||||
class LiltEncoder(nn.Module):
|
||||
# Copied from transformers.models.bert.modeling_bert.BertEncoder.__init__ with Bert->Lilt
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.layer = nn.ModuleList([LiltLayer(config) for _ in range(config.num_hidden_layers)])
|
||||
self.gradient_checkpointing = False
|
||||
|
||||
def forward(
|
||||
self,
|
||||
|
@ -414,7 +414,7 @@ class TapasAttention(nn.Module):
|
||||
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
|
||||
self.pruned_heads = self.pruned_heads.union(heads)
|
||||
|
||||
# Copied from transformers.models.bert.modeling_bert.BertAttention.forward
|
||||
# Copied from transformers.models.rembert.modeling_rembert.RemBertAttention.forward
|
||||
def forward(
|
||||
self,
|
||||
hidden_states: torch.Tensor,
|
||||
@ -485,7 +485,7 @@ class TapasLayer(GradientCheckpointingLayer):
|
||||
self.intermediate = TapasIntermediate(config)
|
||||
self.output = TapasOutput(config)
|
||||
|
||||
# Copied from transformers.models.bert.modeling_bert.BertLayer.forward
|
||||
# Copied from transformers.models.rembert.modeling_rembert.RemBertLayer.forward
|
||||
def forward(
|
||||
self,
|
||||
hidden_states: torch.Tensor,
|
||||
|
Loading…
Reference in New Issue
Block a user