mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-29 09:12:21 +06:00

* First draft * More improvements * Improve model, add custom CUDA code * Import torch before * Add script that imports custom layer * Add everything in new ops directory * Import custom layer in modeling file * Fix ARCHIVE_MAP typo * Creating the custom kernel on the fly. * Import custom layer in modeling file * More improvements * Fix CUDA loading * More improvements * Improve conversion script * Improve conversion script * Make it work until encoder_outputs * Make forward pass work * More improvements * Make logits match original implementation * Make implementation also support single_scale model * Add support for single_scale and dilation checkpoint * Add support for with_box_refine model * Support also two stage model * Improve tests * Fix more tests * Make more tests pass * Upload all models to the hub * Clean up some code * Improve decoder outputs * Rename intermediate hidden states and reference points * Improve model outputs * Move tests to dedicated folder * Improve model outputs * Fix retain_grad test * Improve docs * Clean up and make test_initialization pass * Improve variable names * Add copied from statements * Improve docs * Fix style * Improve docs * Improve docs, move tests to model folder * Fix rebase * Remove DetrForSegmentation from auto mapping * Apply suggestions from code review * Improve variable names and docstrings * Apply some more suggestions from code review * Apply suggestion from code review * better docs and variables names * hint to num_queries and two_stage confusion * remove asserts and code refactor * add exception if two_stage is True and with_box_refine is False * use f-strings * Improve docs and variable names * Fix code quality * Fix rebase * Add require_torch_gpu decorator * Add pip install ninja to CI jobs * Apply suggestion of @sgugger * Remove DeformableDetrForObjectDetection from auto mapping * Remove DeformableDetrModel from auto mapping * Add model to toctree * Add model back to mappings, skip model in pipeline tests * Apply @sgugger's suggestion * Fix imports in the init * Fix copies * Add CPU implementation * Comment out GPU function * Undo previous change * Apply more suggestions * Remove require_torch_gpu annotator * Fix quality * Add logger.info * Fix logger * Fix variable names * Fix initializaztion * Add missing initialization * Update checkpoint name * Add model to doc tests * Add CPU/GPU equivalence test * Add Deformable DETR to pipeline tests * Skip model for object detection pipeline Co-authored-by: Nicolas Patry <patry.nicolas@protonmail.com> Co-authored-by: Nouamane Tazi <nouamane98@gmail.com> Co-authored-by: Sylvain Gugger <Sylvain.gugger@gmail.com>
93 lines
4.6 KiB
Plaintext
93 lines
4.6 KiB
Plaintext
docs/source/en/quicktour.mdx
|
|
docs/source/es/quicktour.mdx
|
|
docs/source/en/pipeline_tutorial.mdx
|
|
docs/source/en/autoclass_tutorial.mdx
|
|
docs/source/en/task_summary.mdx
|
|
docs/source/en/model_doc/speech_to_text.mdx
|
|
docs/source/en/model_doc/t5.mdx
|
|
docs/source/en/model_doc/t5v1.1.mdx
|
|
docs/source/en/model_doc/byt5.mdx
|
|
docs/source/en/model_doc/tapex.mdx
|
|
docs/source/en/model_doc/donut.mdx
|
|
docs/source/en/model_doc/encoder-decoder.mdx
|
|
src/transformers/generation_utils.py
|
|
src/transformers/models/albert/modeling_albert.py
|
|
src/transformers/models/albert/modeling_tf_albert.py
|
|
src/transformers/models/bart/modeling_bart.py
|
|
src/transformers/models/beit/modeling_beit.py
|
|
src/transformers/models/bert/modeling_bert.py
|
|
src/transformers/models/bert/modeling_tf_bert.py
|
|
src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py
|
|
src/transformers/models/big_bird/modeling_big_bird.py
|
|
src/transformers/models/blenderbot/modeling_blenderbot.py
|
|
src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
|
|
src/transformers/models/convnext/modeling_convnext.py
|
|
src/transformers/models/ctrl/modeling_ctrl.py
|
|
src/transformers/models/cvt/modeling_cvt.py
|
|
src/transformers/models/data2vec/modeling_data2vec_audio.py
|
|
src/transformers/models/data2vec/modeling_data2vec_vision.py
|
|
src/transformers/models/deberta/modeling_deberta.py
|
|
src/transformers/models/deberta_v2/modeling_deberta_v2.py
|
|
src/transformers/models/deformable_detr/modeling_deformable_detr.py
|
|
src/transformers/models/deit/modeling_deit.py
|
|
src/transformers/models/deit/modeling_tf_deit.py
|
|
src/transformers/models/detr/modeling_detr.py
|
|
src/transformers/models/dpt/modeling_dpt.py
|
|
src/transformers/models/electra/modeling_electra.py
|
|
src/transformers/models/electra/modeling_tf_electra.py
|
|
src/transformers/models/glpn/modeling_glpn.py
|
|
src/transformers/models/gpt2/modeling_gpt2.py
|
|
src/transformers/models/gptj/modeling_gptj.py
|
|
src/transformers/models/hubert/modeling_hubert.py
|
|
src/transformers/models/layoutlm/modeling_layoutlm.py
|
|
src/transformers/models/layoutlm/modeling_tf_layoutlm.py
|
|
src/transformers/models/layoutlmv2/modeling_layoutlmv2.py
|
|
src/transformers/models/layoutlmv3/modeling_layoutlmv3.py
|
|
src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py
|
|
src/transformers/models/longformer/modeling_longformer.py
|
|
src/transformers/models/longformer/modeling_tf_longformer.py
|
|
src/transformers/models/longt5/modeling_longt5.py
|
|
src/transformers/models/marian/modeling_marian.py
|
|
src/transformers/models/mbart/modeling_mbart.py
|
|
src/transformers/models/mobilebert/modeling_mobilebert.py
|
|
src/transformers/models/mobilebert/modeling_tf_mobilebert.py
|
|
src/transformers/models/mobilevit/modeling_mobilevit.py
|
|
src/transformers/models/mobilevit/modeling_tf_mobilevit.py
|
|
src/transformers/models/opt/modeling_opt.py
|
|
src/transformers/models/opt/modeling_tf_opt.py
|
|
src/transformers/models/owlvit/modeling_owlvit.py
|
|
src/transformers/models/pegasus/modeling_pegasus.py
|
|
src/transformers/models/plbart/modeling_plbart.py
|
|
src/transformers/models/poolformer/modeling_poolformer.py
|
|
src/transformers/models/reformer/modeling_reformer.py
|
|
src/transformers/models/regnet/modeling_regnet.py
|
|
src/transformers/models/regnet/modeling_tf_regnet.py
|
|
src/transformers/models/resnet/modeling_resnet.py
|
|
src/transformers/models/resnet/modeling_tf_resnet.py
|
|
src/transformers/models/roberta/modeling_roberta.py
|
|
src/transformers/models/roberta/modeling_tf_roberta.py
|
|
src/transformers/models/segformer/modeling_segformer.py
|
|
src/transformers/models/sew/modeling_sew.py
|
|
src/transformers/models/sew_d/modeling_sew_d.py
|
|
src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py
|
|
src/transformers/models/speech_to_text/modeling_speech_to_text.py
|
|
src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py
|
|
src/transformers/models/segformer/modeling_tf_segformer.py
|
|
src/transformers/models/swin/modeling_swin.py
|
|
src/transformers/models/trocr/modeling_trocr.py
|
|
src/transformers/models/unispeech/modeling_unispeech.py
|
|
src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
|
|
src/transformers/models/van/modeling_van.py
|
|
src/transformers/models/videomae/modeling_videomae.py
|
|
src/transformers/models/vilt/modeling_vilt.py
|
|
src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py
|
|
src/transformers/models/vit/modeling_vit.py
|
|
src/transformers/models/vit/modeling_tf_vit.py
|
|
src/transformers/models/vit_mae/modeling_vit_mae.py
|
|
src/transformers/models/wav2vec2/modeling_wav2vec2.py
|
|
src/transformers/models/wav2vec2/tokenization_wav2vec2.py
|
|
src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py
|
|
src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py
|
|
src/transformers/models/wavlm/modeling_wavlm.py
|
|
src/transformers/models/yolos/modeling_yolos.py
|