[model cards] fix yaml in cards (#7207)

This commit is contained in:
Stas Bekman 2020-09-17 11:11:17 -07:00 committed by GitHub
parent e643a29722
commit 9c5bcab5b0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 44 additions and 35 deletions

View File

@ -1,7 +1,8 @@
---
language: en, de
language:
- en
- de
thumbnail:
tags:
- translation

View File

@ -1,7 +1,8 @@
---
language: en, de
language:
- en
- de
thumbnail:
tags:
- translation
@ -21,7 +22,7 @@ metrics:
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for en-de.
For more details, please see, [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:

View File

@ -1,7 +1,8 @@
---
language: en, de
language:
- en
- de
thumbnail:
tags:
- translation

View File

@ -1,7 +1,9 @@
---
language: de, en
language:
- de
- en
thumbnail:
tags:
- translation

View File

@ -1,7 +1,9 @@
---
language: de, en
language:
- de
- en
thumbnail:
tags:
- translation

View File

@ -1,13 +1,13 @@
---
<!-- This file has been auto-generated by src/transformers/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py - DO NOT EDIT or your changes will be lost -->
language: de, en
language:
- de
- en
thumbnail:
tags:
- translation
- wmt19
- facebook
license: Apache 2.0
datasets:
- http://www.statmt.org/wmt19/ ([test-set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561))

View File

@ -1,13 +1,13 @@
---
<!-- This file has been auto-generated by src/transformers/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py - DO NOT EDIT or your changes will be lost -->
language: en, de
language:
- en
- de
thumbnail:
tags:
- translation
- wmt19
- facebook
license: Apache 2.0
datasets:
- http://www.statmt.org/wmt19/ ([test-set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561))

View File

@ -1,13 +1,13 @@
---
<!-- This file has been auto-generated by src/transformers/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py - DO NOT EDIT or your changes will be lost -->
language: en, ru
language:
- en
- ru
thumbnail:
tags:
- translation
- wmt19
- facebook
license: Apache 2.0
datasets:
- http://www.statmt.org/wmt19/ ([test-set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561))

View File

@ -1,13 +1,13 @@
---
<!-- This file has been auto-generated by src/transformers/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py - DO NOT EDIT or your changes will be lost -->
language: ru, en
language:
- ru
- en
thumbnail:
tags:
- translation
- wmt19
- facebook
license: Apache 2.0
datasets:
- http://www.statmt.org/wmt19/ ([test-set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561))

View File

@ -25,8 +25,9 @@ def write_model_card(model_card_dir, src_lang, tgt_lang, model_name):
readme = f"""
---
language: {src_lang}, {tgt_lang}
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation

View File

@ -25,7 +25,9 @@ def write_model_card(model_card_dir, src_lang, tgt_lang, model_name):
readme = f"""
---
language: {src_lang}, {tgt_lang}
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation

View File

@ -26,14 +26,14 @@ def write_model_card(model_card_dir, src_lang, tgt_lang):
readme = f"""
---
<!-- This file has been auto-generated by src/transformers/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py - DO NOT EDIT or your changes will be lost -->
language: {src_lang}, {tgt_lang}
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: Apache 2.0
datasets:
- http://www.statmt.org/wmt19/ ([test-set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561))
@ -69,7 +69,7 @@ mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)

View File

@ -56,8 +56,7 @@ logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "FSMTConfig"
_TOKENIZER_FOR_DOC = "FSMTTokenizer"
# See all FSMT models at https://huggingface.co/models?search=fsmt
# See all FSMT models at https://huggingface.co/models?filter=fsmt
# Porting notes:
# this one is modeled after BartModel*