mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-17 03:28:22 +06:00

* Add Audiocraft * add cross attention * style * add for lm * convert and verify * introduce t5 * split configs * load t5 + lm * clean conversion * copy from t5 * style * start pattern provider * make generation work * style * fix pos embs * propagate shape changes * propagate shape changes * style * delay pattern: pad tokens at end * audiocraft -> musicgen * fix inits * add mdx * style * fix pad token in processor * override generate and add todos * add init to test * undo pattern delay mask after gen * remove cfg logits processor * remove cfg logits processor * remove logits processor in favour of mask * clean pos embs * make fix copies * update readmes * clean pos emb * refactor encoder/decoder * make fix copies * update conversion * fix config imports * update config docs * make style * send pattern mask to device * pattern mask with delay * recover prompted audio tokens * fix docstrings * laydown test file * pattern edge case * remove t5 ref * add processing class * config refactor * better pattern comment * check if mask is not present * check if mask is not present * refactor to auto class * remove encoder configs * fix processor * processor import * start updating conversion * start updating tests * make style * convert t5, encodec, lm * convert as composite * also convert processor * run generate * classifier free gen * comments and clean up * make style * docs for logit proc * docstring for uncond gen * start lm tests * work tests * let the lm generate * refactor: reshape inside forward * undo greedy loop changes * from_enc_dec -> from_sub_model * fix input id shapes in docstrings * Apply suggestions from code review Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * undo generate changes * from sub model config * Update src/transformers/models/musicgen/modeling_musicgen.py Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> * make generate work again * generate uncond -> get uncond inputs * remove prefix allowed tokens fn * better error message * logit proc checks * Apply suggestions from code review Co-authored-by: Joao Gante <joaofranciscocardosogante@gmail.com> * make decoder only tests work * composite fast tests * make style * uncond generation * feat extr padding * make audio prompt work * fix inputs docstrings * unconditional inputs: dict -> model output * clean up tests * more clean up tests * make style * t5 encoder -> auto text encoder * remove comments * deal with frames * fix auto text * slow tests * nice mdx * remove can generate * todo - hub id * convert m/l * make fix copies * only import generation with torch * ignore decoder from tests * don't wrap uncond inputs * make style * cleaner uncond inputs * add example to musicgen forward * fix docs * ignore MusicGen Model/ForConditionalGeneration in auto mapping * add doc section to toctree * add to doc tests * add processor tests * fix push to hub in conversion * tips for decoder only loading * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * fix conversion for s / m / l checkpoints * import stopping criteria from module * remove from pipeline tests * fix uncond docstring * decode audio method * fix docs * org: sanchit-gandhi -> facebook * fix max pos embeddings * remove auto doc (not compatible with shapes) * bump max pos emb * make style * fix doc * fix config doc * fix config doc * ignore musicgen config from docstring * make style * fix config * fix config for doctest * consistent from_sub_models * don't automap decoder * fix mdx save audio file * fix mdx save audio file * processor batch decode for audio * remove keys to ignore * update doc md * update generation config * allow changes for default generation config * update tests * make style * fix docstring for uncond * fix processor test * fix processor test --------- Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com> Co-authored-by: Joao Gante <joaofranciscocardosogante@gmail.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
90 lines
3.1 KiB
Python
90 lines
3.1 KiB
Python
# coding=utf-8
|
|
# Copyright 2022 The HuggingFace Inc. team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import inspect
|
|
import re
|
|
|
|
from transformers.utils import direct_transformers_import
|
|
|
|
|
|
# All paths are set with the intent you should run this script from the root of the repo with the command
|
|
# python utils/check_config_docstrings.py
|
|
PATH_TO_TRANSFORMERS = "src/transformers"
|
|
|
|
|
|
# This is to make sure the transformers module imported is the one in the repo.
|
|
transformers = direct_transformers_import(PATH_TO_TRANSFORMERS)
|
|
|
|
CONFIG_MAPPING = transformers.models.auto.configuration_auto.CONFIG_MAPPING
|
|
|
|
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
|
|
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
|
|
_re_checkpoint = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
|
|
|
|
|
|
CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK = {
|
|
"DecisionTransformerConfig",
|
|
"EncoderDecoderConfig",
|
|
"MusicgenConfig",
|
|
"RagConfig",
|
|
"SpeechEncoderDecoderConfig",
|
|
"TimmBackboneConfig",
|
|
"VisionEncoderDecoderConfig",
|
|
"VisionTextDualEncoderConfig",
|
|
"LlamaConfig",
|
|
}
|
|
|
|
|
|
def get_checkpoint_from_config_class(config_class):
|
|
checkpoint = None
|
|
|
|
# source code of `config_class`
|
|
config_source = inspect.getsource(config_class)
|
|
checkpoints = _re_checkpoint.findall(config_source)
|
|
|
|
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
|
|
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
|
|
for ckpt_name, ckpt_link in checkpoints:
|
|
# allow the link to end with `/`
|
|
if ckpt_link.endswith("/"):
|
|
ckpt_link = ckpt_link[:-1]
|
|
|
|
# verify the checkpoint name corresponds to the checkpoint link
|
|
ckpt_link_from_name = f"https://huggingface.co/{ckpt_name}"
|
|
if ckpt_link == ckpt_link_from_name:
|
|
checkpoint = ckpt_name
|
|
break
|
|
|
|
return checkpoint
|
|
|
|
|
|
def check_config_docstrings_have_checkpoints():
|
|
configs_without_checkpoint = []
|
|
|
|
for config_class in list(CONFIG_MAPPING.values()):
|
|
checkpoint = get_checkpoint_from_config_class(config_class)
|
|
|
|
name = config_class.__name__
|
|
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
|
|
configs_without_checkpoint.append(name)
|
|
|
|
if len(configs_without_checkpoint) > 0:
|
|
message = "\n".join(sorted(configs_without_checkpoint))
|
|
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
check_config_docstrings_have_checkpoints()
|