mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-02 19:21:31 +06:00
fix deprecation warnings (#7033)
* fix deprecation warnings * remove tests/test_tokenization_common.py's test_padding_to_max_length * revert test_padding_to_max_length
This commit is contained in:
parent
576eec98e0
commit
4d39148419
@ -127,7 +127,7 @@ def load_tf_weights_in_funnel(model, config, tf_checkpoint_path):
|
||||
skipped = False
|
||||
for m_name in name[1:]:
|
||||
if not isinstance(pointer, FunnelPositionwiseFFN) and re.fullmatch(r"layer_\d+", m_name):
|
||||
layer_index = int(re.search("layer_(\d+)", m_name).groups()[0])
|
||||
layer_index = int(re.search(r"layer_(\d+)", m_name).groups()[0])
|
||||
if layer_index < config.num_hidden_layers:
|
||||
block_idx = 0
|
||||
while layer_index >= config.block_sizes[block_idx]:
|
||||
|
@ -699,7 +699,7 @@ class TFConv1D(tf.keras.layers.Layer):
|
||||
|
||||
|
||||
class TFSharedEmbeddings(tf.keras.layers.Layer):
|
||||
"""
|
||||
r"""
|
||||
Construct shared token embeddings.
|
||||
|
||||
The weights of the embedding layer is usually shared with the weights of the linear decoder when doing
|
||||
|
@ -156,7 +156,7 @@ class TokenizerTesterMixin:
|
||||
tokenizers = self.get_tokenizers()
|
||||
for tokenizer in tokenizers:
|
||||
with self.subTest(f"{tokenizer.__class__.__name__}"):
|
||||
self.assertNotEqual(tokenizer.max_len, 42)
|
||||
self.assertNotEqual(tokenizer.model_max_length, 42)
|
||||
|
||||
# Now let's start the test
|
||||
tokenizers = self.get_tokenizers()
|
||||
|
Loading…
Reference in New Issue
Block a user