update ruff version (#30932)

* update ruff version

* fix research projects

* Empty

* Fix errors

---------

Co-authored-by: Lysandre <lysandre@huggingface.co>
This commit is contained in:
Arthur 2024-05-22 06:40:15 +02:00 committed by GitHub
parent 60bb571e99
commit 673440d073
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1172 changed files with 1555 additions and 1861 deletions

View File

@ -5,7 +5,7 @@ export PYTHONPATH = src
check_dirs := examples tests src utils check_dirs := examples tests src utils
exclude_folders := examples/research_projects exclude_folders := ""
modified_only_fixup: modified_only_fixup:
$(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs))) $(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs)))

View File

@ -20,6 +20,7 @@ text file or a dataset.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script: Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=fill-mask https://huggingface.co/models?filter=fill-mask
""" """
import json import json
import logging import logging
import math import math

View File

@ -19,6 +19,7 @@ Pretraining the library models for T5-like span-masked language modeling on a te
Here is the full list of checkpoints on the hub that can be pretrained by this script: Here is the full list of checkpoints on the hub that can be pretrained by this script:
https://huggingface.co/models?filter=t5 https://huggingface.co/models?filter=t5
""" """
import json import json
import logging import logging
import math import math

View File

@ -15,6 +15,7 @@
""" """
Post-processing utilities for question answering. Post-processing utilities for question answering.
""" """
import collections import collections
import json import json
import logging import logging

View File

@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Finetuning a 🤗 Flax Transformers model for sequence classification on GLUE.""" """Finetuning a 🤗 Flax Transformers model for sequence classification on GLUE."""
import json import json
import logging import logging
import math import math

View File

@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Fine-tuning a 🤗 Flax Transformers model on token classification tasks (NER, POS, CHUNKS)""" """Fine-tuning a 🤗 Flax Transformers model on token classification tasks (NER, POS, CHUNKS)"""
import json import json
import logging import logging
import math import math

View File

@ -93,14 +93,14 @@ class Plot:
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"])) self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"]))
if can_convert_to_int(row["result"]): if can_convert_to_int(row["result"]):
# value is not None # value is not None
self.result_dict[model_name]["result"][ self.result_dict[model_name]["result"][(int(row["batch_size"]), int(row["sequence_length"]))] = (
(int(row["batch_size"]), int(row["sequence_length"])) int(row["result"])
] = int(row["result"]) )
elif can_convert_to_float(row["result"]): elif can_convert_to_float(row["result"]):
# value is not None # value is not None
self.result_dict[model_name]["result"][ self.result_dict[model_name]["result"][(int(row["batch_size"]), int(row["sequence_length"]))] = (
(int(row["batch_size"]), int(row["sequence_length"])) float(row["result"])
] = float(row["result"]) )
def plot(self): def plot(self):
fig, ax = plt.subplots() fig, ax = plt.subplots()

View File

@ -15,7 +15,6 @@
# limitations under the License. # limitations under the License.
"""Finetuning the library models for multiple choice (Bert, Roberta, XLNet).""" """Finetuning the library models for multiple choice (Bert, Roberta, XLNet)."""
import logging import logging
import os import os
from dataclasses import dataclass, field from dataclasses import dataclass, field

View File

@ -15,7 +15,6 @@
# limitations under the License. # limitations under the License.
"""Multiple choice fine-tuning: utilities to work with multiple choice tasks of reading comprehension""" """Multiple choice fine-tuning: utilities to work with multiple choice tasks of reading comprehension"""
import csv import csv
import glob import glob
import json import json

View File

@ -15,7 +15,6 @@
# limitations under the License. # limitations under the License.
"""Finetuning the library models for question-answering on SQuAD (DistilBERT, Bert, XLM, XLNet).""" """Finetuning the library models for question-answering on SQuAD (DistilBERT, Bert, XLM, XLNet)."""
import argparse import argparse
import glob import glob
import logging import logging

View File

@ -15,7 +15,6 @@
# limitations under the License. # limitations under the License.
"""Fine-tuning the library models for question-answering.""" """Fine-tuning the library models for question-answering."""
import logging import logging
import os import os
import sys import sys

View File

@ -20,7 +20,6 @@ GPT, GPT-2 and CTRL are fine-tuned using a causal language modeling (CLM) loss.
using a masked language modeling (MLM) loss. XLNet is fine-tuned using a permutation language modeling (PLM) loss. using a masked language modeling (MLM) loss. XLNet is fine-tuned using a permutation language modeling (PLM) loss.
""" """
import logging import logging
import math import math
import os import os

View File

@ -28,6 +28,7 @@
--output_dir ../log \ --output_dir ../log \
--train_batch_size 16 \ --train_batch_size 16 \
""" """
import argparse import argparse
import csv import csv
import logging import logging

View File

@ -18,7 +18,6 @@
Finetuning the library models for multiple choice on SWAG (Bert). Finetuning the library models for multiple choice on SWAG (Bert).
""" """
import argparse import argparse
import csv import csv
import glob import glob

View File

@ -21,7 +21,6 @@
This script with default values evaluates a pretrained Transformer-XL on WikiText 103 This script with default values evaluates a pretrained Transformer-XL on WikiText 103
""" """
import argparse import argparse
import logging import logging
import math import math

View File

@ -23,7 +23,6 @@ Inspired by https://github.com/pytorch/pytorch/blob/master/torch/distributed/lau
""" """
import importlib import importlib
import sys import sys
from argparse import REMAINDER, ArgumentParser from argparse import REMAINDER, ArgumentParser

View File

@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Fine-tuning the library models for named entity recognition on CoNLL-2003.""" """Fine-tuning the library models for named entity recognition on CoNLL-2003."""
import logging import logging
import os import os
import sys import sys

View File

@ -15,7 +15,6 @@
# limitations under the License. # limitations under the License.
"""Named entity recognition fine-tuning: utilities to work with CoNLL-2003 task.""" """Named entity recognition fine-tuning: utilities to work with CoNLL-2003 task."""
import logging import logging
import os import os
from dataclasses import dataclass from dataclasses import dataclass

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Finetuning any 🤗 Transformers model for image classification leveraging 🤗 Accelerate.""" """Finetuning any 🤗 Transformers model for image classification leveraging 🤗 Accelerate."""
import argparse import argparse
import json import json
import logging import logging

View File

@ -15,6 +15,7 @@
""" """
A subclass of `Trainer` specific to Question-Answering tasks A subclass of `Trainer` specific to Question-Answering tasks
""" """
import math import math
import time import time

View File

@ -15,6 +15,7 @@
""" """
A subclass of `Trainer` specific to Question-Answering tasks A subclass of `Trainer` specific to Question-Answering tasks
""" """
import math import math
import time import time
from typing import Dict, List, Optional from typing import Dict, List, Optional

View File

@ -15,6 +15,7 @@
""" """
Post-processing utilities for question answering. Post-processing utilities for question answering.
""" """
import collections import collections
import json import json
import logging import logging

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Finetuning a 🤗 Transformers model for sequence classification on GLUE.""" """Finetuning a 🤗 Transformers model for sequence classification on GLUE."""
import argparse import argparse
import json import json
import logging import logging

View File

@ -14,9 +14,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
""" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/CTRL/Transformer-XL/XLNet) """Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/CTRL/Transformer-XL/XLNet)"""
"""
import argparse import argparse
import inspect import inspect

View File

@ -19,7 +19,6 @@ Running this example:
python run_generation_contrastive_search.py --model_name_or_path=openai-community/gpt2-large --penalty_alpha=0.6 --k=4 --length=256 python run_generation_contrastive_search.py --model_name_or_path=openai-community/gpt2-large --penalty_alpha=0.6 --k=4 --length=256
""" """
import argparse import argparse
import logging import logging

View File

@ -23,7 +23,6 @@ Inspired by https://github.com/pytorch/pytorch/blob/master/torch/distributed/lau
""" """
import importlib import importlib
import sys import sys
from argparse import REMAINDER, ArgumentParser from argparse import REMAINDER, ArgumentParser

View File

@ -15,7 +15,6 @@
# limitations under the License. # limitations under the License.
"""PyTorch BERT model with Patience-based Early Exit.""" """PyTorch BERT model with Patience-based Early Exit."""
import logging import logging
import torch import torch

View File

@ -15,7 +15,6 @@
# limitations under the License. # limitations under the License.
"""Training and inference using the library models for sequence classification on GLUE (Bert, Albert) with PABEE.""" """Training and inference using the library models for sequence classification on GLUE (Bert, Albert) with PABEE."""
import argparse import argparse
import glob import glob
import json import json

View File

@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""BertAbs configuration""" """BertAbs configuration"""
import logging import logging
from transformers import PretrainedConfig from transformers import PretrainedConfig

View File

@ -19,6 +19,7 @@
Some parts of this script are adapted from the code of Michel et al. (http://arxiv.org/abs/1905.10650) Some parts of this script are adapted from the code of Michel et al. (http://arxiv.org/abs/1905.10650)
which is available at https://github.com/pmichel31415/are-16-heads-really-better-than-1 which is available at https://github.com/pmichel31415/are-16-heads-really-better-than-1
""" """
import argparse import argparse
import logging import logging
import os import os

View File

@ -15,6 +15,7 @@
"""The distiller to distil the student. """The distiller to distil the student.
Adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) Adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM)
""" """
import math import math
import os import os
import time import time

View File

@ -12,8 +12,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
""" Adapted from PyTorch Vision (https://github.com/pytorch/vision/blob/master/references/detection/group_by_aspect_ratio.py) """Adapted from PyTorch Vision (https://github.com/pytorch/vision/blob/master/references/detection/group_by_aspect_ratio.py)"""
"""
import bisect import bisect
import copy import copy
from collections import defaultdict from collections import defaultdict

View File

@ -15,6 +15,7 @@
"""Dataset to distilled models """Dataset to distilled models
adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM)
""" """
import numpy as np import numpy as np
import torch import torch
from torch.utils.data import Dataset from torch.utils.data import Dataset

View File

@ -15,6 +15,7 @@
""" """
Preprocessing script before distillation. Preprocessing script before distillation.
""" """
import argparse import argparse
import logging import logging
import pickle import pickle

View File

@ -16,6 +16,7 @@
Preprocessing script before training the distilled model. Preprocessing script before training the distilled model.
Specific to RoBERTa -> DistilRoBERTa and GPT2 -> DistilGPT2. Specific to RoBERTa -> DistilRoBERTa and GPT2 -> DistilGPT2.
""" """
import argparse import argparse
import torch import torch

View File

@ -16,6 +16,7 @@
Preprocessing script before training DistilBERT. Preprocessing script before training DistilBERT.
Specific to BERT -> DistilBERT. Specific to BERT -> DistilBERT.
""" """
import argparse import argparse
import torch import torch

View File

@ -15,6 +15,7 @@
""" """
Preprocessing script before training the distilled model. Preprocessing script before training the distilled model.
""" """
import argparse import argparse
import logging import logging
import pickle import pickle

View File

@ -16,6 +16,7 @@
Training the distilled model. Training the distilled model.
Supported architectures include: BERT -> DistilBERT, RoBERTa -> DistilRoBERTa, GPT2 -> DistilGPT2. Supported architectures include: BERT -> DistilBERT, RoBERTa -> DistilRoBERTa, GPT2 -> DistilGPT2.
""" """
import argparse import argparse
import json import json
import os import os

View File

@ -15,6 +15,7 @@
"""Utils to train DistilBERT """Utils to train DistilBERT
adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM)
""" """
import json import json
import logging import logging
import os import os

View File

@ -20,6 +20,7 @@ text file or a dataset.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script: Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=fill-mask https://huggingface.co/models?filter=fill-mask
""" """
import logging import logging
import os import os
import sys import sys

View File

@ -15,6 +15,7 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License.import copy limitations under the License.import copy
""" """
import itertools import itertools
import math import math
import os import os

View File

@ -15,6 +15,7 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License.import copy limitations under the License.import copy
""" """
import sys import sys
from typing import Tuple from typing import Tuple

View File

@ -15,6 +15,7 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License.import copy limitations under the License.import copy
""" """
import colorsys import colorsys
import io import io

View File

@ -15,7 +15,6 @@
# limitations under the License. # limitations under the License.
"""Finetuning the library models for multimodal multiclass prediction on MM-IMDB dataset.""" """Finetuning the library models for multimodal multiclass prediction on MM-IMDB dataset."""
import argparse import argparse
import glob import glob
import json import json

View File

@ -15,6 +15,7 @@
Count remaining (non-zero) weights in the encoder (i.e. the transformer layers). Count remaining (non-zero) weights in the encoder (i.e. the transformer layers).
Sparsity and remaining weights levels are equivalent: sparsity % = 100 - remaining weights %. Sparsity and remaining weights levels are equivalent: sparsity % = 100 - remaining weights %.
""" """
import argparse import argparse
import os import os

View File

@ -16,7 +16,6 @@
"""Masked BERT model configuration. It replicates the class `~transformers.BertConfig` """Masked BERT model configuration. It replicates the class `~transformers.BertConfig`
and adapts it to the specificities of MaskedBert (`pruning_method`, `mask_init` and `mask_scale`.""" and adapts it to the specificities of MaskedBert (`pruning_method`, `mask_init` and `mask_scale`."""
import logging import logging
from transformers.configuration_utils import PretrainedConfig from transformers.configuration_utils import PretrainedConfig

View File

@ -18,7 +18,6 @@
compute the adaptive mask. compute the adaptive mask.
Built on top of `transformers.models.bert.modeling_bert`""" Built on top of `transformers.models.bert.modeling_bert`"""
import logging import logging
import math import math

View File

@ -15,7 +15,6 @@
# limitations under the License. # limitations under the License.
"""Fine-pruning Masked BERT for question-answering on SQuAD.""" """Fine-pruning Masked BERT for question-answering on SQuAD."""
import argparse import argparse
import glob import glob
import logging import logging

View File

@ -13,9 +13,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
""" """ """
"""
import argparse import argparse
import logging import logging
import os import os

View File

@ -19,6 +19,7 @@ text file or a dataset.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script: Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=fill-mask https://huggingface.co/models?filter=fill-mask
""" """
import logging import logging
import os import os
import sys import sys

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Finetuning the library models for question-answering on SQuAD (DistilBERT, Bert, XLM, XLNet).""" """Finetuning the library models for question-answering on SQuAD (DistilBERT, Bert, XLM, XLNet)."""
import argparse import argparse
import logging import logging
import os import os

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Helper functions for training models with pytorch-quantization""" """Helper functions for training models with pytorch-quantization"""
import logging import logging
import re import re

View File

@ -15,6 +15,7 @@
""" """
Post-processing utilities for question answering. Post-processing utilities for question answering.
""" """
import collections import collections
import json import json
import logging import logging

View File

@ -15,6 +15,7 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License.import copy limitations under the License.import copy
""" """
import itertools import itertools
import math import math
import os import os

View File

@ -15,6 +15,7 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License.import copy limitations under the License.import copy
""" """
import sys import sys
from typing import Tuple from typing import Tuple

View File

@ -15,6 +15,7 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License.import copy limitations under the License.import copy
""" """
import colorsys import colorsys
import io import io

View File

@ -93,14 +93,14 @@ class Plot:
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"])) self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"]))
if can_convert_to_int(row["result"]): if can_convert_to_int(row["result"]):
# value is not None # value is not None
self.result_dict[model_name]["result"][ self.result_dict[model_name]["result"][(int(row["batch_size"]), int(row["sequence_length"]))] = (
(int(row["batch_size"]), int(row["sequence_length"])) int(row["result"])
] = int(row["result"]) )
elif can_convert_to_float(row["result"]): elif can_convert_to_float(row["result"]):
# value is not None # value is not None
self.result_dict[model_name]["result"][ self.result_dict[model_name]["result"][(int(row["batch_size"]), int(row["sequence_length"]))] = (
(int(row["batch_size"]), int(row["sequence_length"])) float(row["result"])
] = float(row["result"]) )
def plot(self): def plot(self):
fig, ax = plt.subplots() fig, ax = plt.subplots()

View File

@ -15,6 +15,7 @@
""" """
Post-processing utilities for question answering. Post-processing utilities for question answering.
""" """
import collections import collections
import json import json
import logging import logging

View File

@ -156,7 +156,7 @@ _deps = [
"rhoknp>=1.1.0,<1.3.1", "rhoknp>=1.1.0,<1.3.1",
"rjieba", "rjieba",
"rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff==0.1.5", "ruff==0.4.4",
"sacrebleu>=1.4.12,<2.0.0", "sacrebleu>=1.4.12,<2.0.0",
"sacremoses", "sacremoses",
"safetensors>=0.4.1", "safetensors>=0.4.1",

View File

@ -16,6 +16,7 @@
Audio processing functions to extract features from audio waveforms. This code is pure numpy to support all frameworks Audio processing functions to extract features from audio waveforms. This code is pure numpy to support all frameworks
and remove unnecessary dependencies. and remove unnecessary dependencies.
""" """
import warnings import warnings
from typing import Optional, Tuple, Union from typing import Optional, Tuple, Union

View File

@ -17,7 +17,6 @@
Benchmarking the library on inference and training in PyTorch. Benchmarking the library on inference and training in PyTorch.
""" """
import timeit import timeit
from typing import Callable, Optional from typing import Callable, Optional

View File

@ -17,7 +17,6 @@
Benchmarking the library on inference and training in PyTorch. Benchmarking the library on inference and training in PyTorch.
""" """
import random import random
import timeit import timeit
from functools import wraps from functools import wraps

View File

@ -249,7 +249,6 @@ def measure_peak_memory_cpu(function: Callable[[], None], interval=0.5, device_i
else: else:
class MemoryMeasureProcess(Process): class MemoryMeasureProcess(Process):
""" """
`MemoryMeasureProcess` inherits from `Process` and overwrites its `run()` method. Used to measure the `MemoryMeasureProcess` inherits from `Process` and overwrites its `run()` method. Used to measure the
memory usage of a process memory usage of a process

View File

@ -15,7 +15,6 @@
# limitations under the License. # limitations under the License.
"""Configuration base class and utilities.""" """Configuration base class and utilities."""
import copy import copy
import json import json
import os import os

View File

@ -400,7 +400,7 @@ def optimize(onnx_model_path: Path) -> Path:
sess_option.optimized_model_filepath = opt_model_path.as_posix() sess_option.optimized_model_filepath = opt_model_path.as_posix()
_ = InferenceSession(onnx_model_path.as_posix(), sess_option) _ = InferenceSession(onnx_model_path.as_posix(), sess_option)
print(f"Optimized model has been written at {opt_model_path}: \N{heavy check mark}") print(f"Optimized model has been written at {opt_model_path}: \N{HEAVY CHECK MARK}")
print("/!\\ Optimized model contains hardware specific operators which might not be portable. /!\\") print("/!\\ Optimized model contains hardware specific operators which might not be portable. /!\\")
return opt_model_path return opt_model_path
@ -475,7 +475,7 @@ def quantize(onnx_model_path: Path) -> Path:
quantized_model_path = generate_identified_filename(onnx_model_path, "-quantized") quantized_model_path = generate_identified_filename(onnx_model_path, "-quantized")
# Save model # Save model
print(f"Quantized model has been written at {quantized_model_path}: \N{heavy check mark}") print(f"Quantized model has been written at {quantized_model_path}: \N{HEAVY CHECK MARK}")
onnx.save_model(quantizer.model.model, quantized_model_path.as_posix()) onnx.save_model(quantizer.model.model, quantized_model_path.as_posix())
return quantized_model_path return quantized_model_path
@ -489,9 +489,9 @@ def verify(path: Path):
try: try:
onnx_options = SessionOptions() onnx_options = SessionOptions()
_ = InferenceSession(path.as_posix(), onnx_options, providers=["CPUExecutionProvider"]) _ = InferenceSession(path.as_posix(), onnx_options, providers=["CPUExecutionProvider"])
print(f"Model {path} correctly loaded: \N{heavy check mark}") print(f"Model {path} correctly loaded: \N{HEAVY CHECK MARK}")
except RuntimeException as re: except RuntimeException as re:
print(f"Error while loading the model {re}: \N{heavy ballot x}") print(f"Error while loading the model {re}: \N{HEAVY BALLOT X}")
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -14,7 +14,6 @@
# limitations under the License. # limitations under the License.
"""Convert pytorch checkpoints to TensorFlow""" """Convert pytorch checkpoints to TensorFlow"""
import argparse import argparse
import os import os

View File

@ -14,7 +14,6 @@
# limitations under the License. # limitations under the License.
"""Convert Seq2Seq TF Hub checkpoint.""" """Convert Seq2Seq TF Hub checkpoint."""
import argparse import argparse
from . import ( from . import (

View File

@ -20,7 +20,6 @@ additional na_prob.json file is provided. This file is expected to map question
probability that a question is unanswerable. probability that a question is unanswerable.
""" """
import collections import collections
import json import json
import math import math

View File

@ -15,7 +15,6 @@
# limitations under the License. # limitations under the License.
"""XNLI utils (dataset loading and evaluation)""" """XNLI utils (dataset loading and evaluation)"""
import os import os
from ...utils import logging from ...utils import logging

View File

@ -17,6 +17,7 @@ in `integrations/deepspeed` instead.
Check: https://github.com/huggingface/transformers/pull/25599 Check: https://github.com/huggingface/transformers/pull/25599
""" """
import warnings import warnings

View File

@ -62,7 +62,7 @@ deps = {
"rhoknp": "rhoknp>=1.1.0,<1.3.1", "rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba", "rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff==0.1.5", "ruff": "ruff==0.4.4",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses", "sacremoses": "sacremoses",
"safetensors": "safetensors>=0.4.1", "safetensors": "safetensors>=0.4.1",

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Utilities to dynamically load objects from the Hub.""" """Utilities to dynamically load objects from the Hub."""
import filecmp import filecmp
import importlib import importlib
import importlib.util import importlib.util

View File

@ -15,6 +15,7 @@
""" """
Sequence feature extraction class for common feature extractors to preprocess sequences. Sequence feature extraction class for common feature extractors to preprocess sequences.
""" """
from typing import Dict, List, Optional, Union from typing import Dict, List, Optional, Union
import numpy as np import numpy as np

View File

@ -387,9 +387,9 @@ class StopStringCriteria(StoppingCriteria):
# Since this is lots of very small assignments of lists, we build it with numpy rather # Since this is lots of very small assignments of lists, we build it with numpy rather
# than torch for speed + simplicity, then convert to torch at the end # than torch for speed + simplicity, then convert to torch at the end
for token_idx, valid_positions in positions.items(): for token_idx, valid_positions in positions.items():
gather_vec[ gather_vec[token_idx, max_valid_positions * i : max_valid_positions * i + len(valid_positions)] = (
token_idx, max_valid_positions * i : max_valid_positions * i + len(valid_positions) valid_positions
] = valid_positions )
for token_idx, possible_end_lens in end_lens.items(): for token_idx, possible_end_lens in end_lens.items():
gather_vec[ gather_vec[
token_idx, token_idx,

View File

@ -68,7 +68,6 @@ class WatermarkDetectorOutput:
class WatermarkDetector: class WatermarkDetector:
r""" r"""
Detector for detection of watermark generated text. The detector needs to be given the exact same settings that were Detector for detection of watermark generated text. The detector needs to be given the exact same settings that were
given during text generation to replicate the watermark greenlist generation and so detect the watermark. This includes given during text generation to replicate the watermark greenlist generation and so detect the watermark. This includes

View File

@ -13,7 +13,6 @@
# limitations under the License. # limitations under the License.
"AQLM (Additive Quantization of Language Model) integration file" "AQLM (Additive Quantization of Language Model) integration file"
from ..utils import is_accelerate_available, is_aqlm_available, is_torch_available from ..utils import is_accelerate_available, is_aqlm_available, is_torch_available

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"AWQ (Activation aware Weight Quantization) integration file" "AWQ (Activation aware Weight Quantization) integration file"
from ..activations import ACT2FN from ..activations import ACT2FN
from ..modeling_utils import PreTrainedModel from ..modeling_utils import PreTrainedModel
from ..utils import is_auto_awq_available, is_torch_available, logging from ..utils import is_auto_awq_available, is_torch_available, logging

View File

@ -14,6 +14,7 @@
""" """
Integration with Deepspeed Integration with Deepspeed
""" """
import copy import copy
import importlib.metadata as importlib_metadata import importlib.metadata as importlib_metadata
import importlib.util import importlib.util

View File

@ -17,6 +17,7 @@
Integration with GGML / The file is copied and adapted from https://github.com/99991/pygguf Integration with GGML / The file is copied and adapted from https://github.com/99991/pygguf
with extra methods beings exposed with extra methods beings exposed
""" """
from array import array from array import array
import numpy as np import numpy as np

View File

@ -14,6 +14,7 @@
""" """
Integrations with other Python libraries. Integrations with other Python libraries.
""" """
import functools import functools
import importlib.metadata import importlib.metadata
import importlib.util import importlib.util

View File

@ -14,7 +14,6 @@
# limitations under the License. # limitations under the License.
"""Configuration base class and utilities.""" """Configuration base class and utilities."""
import copy import copy
import json import json
import os import os

View File

@ -14,7 +14,6 @@
# limitations under the License. # limitations under the License.
"""PyTorch - Flax general utilities.""" """PyTorch - Flax general utilities."""
import os import os
from pickle import UnpicklingError from pickle import UnpicklingError
from typing import Dict, Tuple from typing import Dict, Tuple

View File

@ -15,7 +15,6 @@
# limitations under the License. # limitations under the License.
"""PyTorch - TF 2.0 general utilities.""" """PyTorch - TF 2.0 general utilities."""
import os import os
import re import re

View File

@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""ALBERT model configuration""" """ALBERT model configuration"""
from collections import OrderedDict from collections import OrderedDict
from typing import Mapping from typing import Mapping

View File

@ -14,7 +14,6 @@
# limitations under the License. # limitations under the License.
"""Convert ALBERT checkpoint.""" """Convert ALBERT checkpoint."""
import argparse import argparse
import torch import torch

View File

@ -15,7 +15,6 @@
# limitations under the License. # limitations under the License.
"""TF 2.0 ALBERT model.""" """TF 2.0 ALBERT model."""
from __future__ import annotations from __future__ import annotations
import math import math

View File

@ -14,7 +14,6 @@
# limitations under the License. # limitations under the License.
"""Tokenization classes for ALBERT model.""" """Tokenization classes for ALBERT model."""
import os import os
import unicodedata import unicodedata
from shutil import copyfile from shutil import copyfile

View File

@ -14,7 +14,6 @@
# limitations under the License. # limitations under the License.
"""Tokenization classes for ALBERT model.""" """Tokenization classes for ALBERT model."""
import os import os
from shutil import copyfile from shutil import copyfile
from typing import List, Optional, Tuple from typing import List, Optional, Tuple

View File

@ -16,7 +16,6 @@
Image/Text processor class for ALIGN Image/Text processor class for ALIGN
""" """
from ...processing_utils import ProcessorMixin from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_base import BatchEncoding

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""AltCLIP model configuration""" """AltCLIP model configuration"""
import os import os
from typing import Union from typing import Union

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""PyTorch AltCLIP model.""" """PyTorch AltCLIP model."""
import math import math
from dataclasses import dataclass from dataclasses import dataclass
from typing import Any, List, Optional, Tuple, Union from typing import Any, List, Optional, Tuple, Union

View File

@ -15,6 +15,7 @@
""" """
Image/Text processor class for AltCLIP Image/Text processor class for AltCLIP
""" """
import warnings import warnings
from ...processing_utils import ProcessorMixin from ...processing_utils import ProcessorMixin

View File

@ -14,7 +14,6 @@
# limitations under the License. # limitations under the License.
"""Audio Spectogram Transformer (AST) model configuration""" """Audio Spectogram Transformer (AST) model configuration"""
from ...configuration_utils import PretrainedConfig from ...configuration_utils import PretrainedConfig
from ...utils import logging from ...utils import logging

View File

@ -14,7 +14,6 @@
# limitations under the License. # limitations under the License.
"""Convert Audio Spectrogram Transformer checkpoints from the original repository. URL: https://github.com/YuanGongND/ast""" """Convert Audio Spectrogram Transformer checkpoints from the original repository. URL: https://github.com/YuanGongND/ast"""
import argparse import argparse
import json import json
from pathlib import Path from pathlib import Path

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Factory function to build auto-model classes.""" """Factory function to build auto-model classes."""
import copy import copy
import importlib import importlib
import json import json

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Auto Config class.""" """Auto Config class."""
import importlib import importlib
import os import os
import re import re

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""AutoFeatureExtractor class.""" """AutoFeatureExtractor class."""
import importlib import importlib
import json import json
import os import os

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""AutoImageProcessor class.""" """AutoImageProcessor class."""
import importlib import importlib
import json import json
import os import os

View File

@ -14,7 +14,6 @@
# limitations under the License. # limitations under the License.
"""Auto Model class.""" """Auto Model class."""
from collections import OrderedDict from collections import OrderedDict
from ...utils import logging from ...utils import logging

View File

@ -14,7 +14,6 @@
# limitations under the License. # limitations under the License.
"""Auto Model class.""" """Auto Model class."""
import warnings import warnings
from collections import OrderedDict from collections import OrderedDict

Some files were not shown because too many files have changed in this diff Show More