Enforce target version for black.

This should stabilize formatting.
This commit is contained in:
Aymeric Augustin 2019-12-27 22:47:59 +01:00 committed by Julien Chaumond
parent f01b3e6680
commit 0ffc8eaf53
19 changed files with 21 additions and 21 deletions

View File

@ -101,7 +101,7 @@ jobs:
# we need a version of isort with https://github.com/timothycrosley/isort/pull/1000
- run: sudo pip install git+git://github.com/timothycrosley/isort.git@e63ae06ec7d70b06df9e528357650281a3d3ec22#egg=isort
- run: sudo pip install .[tf,torch,quality]
- run: black --check --line-length 119 examples templates tests src utils
- run: black --check --line-length 119 --target-version py35 examples templates tests src utils
- run: isort --check-only --recursive examples templates tests src utils
- run: flake8 examples templates tests src utils
check_repository_consistency:

View File

@ -3,14 +3,14 @@
# Check that source code meets quality standards
quality:
black --check --line-length 119 examples templates tests src utils
black --check --line-length 119 --target-version py35 examples templates tests src utils
isort --check-only --recursive examples templates tests src utils
flake8 examples templates tests src utils
# Format source code automatically
style:
black --line-length 119 examples templates tests src utils
black --line-length 119 --target-version py35 examples templates tests src utils
isort --recursive examples templates tests src utils
# Run tests for the library

View File

@ -325,7 +325,7 @@ class Model2Model(PreTrainedEncoderDecoder):
encoder_pretrained_model_name_or_path=pretrained_model_name_or_path,
decoder_pretrained_model_name_or_path=pretrained_model_name_or_path,
*args,
**kwargs
**kwargs,
)
return model

View File

@ -250,7 +250,7 @@ class TFPreTrainedModel(tf.keras.Model):
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
**kwargs
**kwargs,
)
else:
model_kwargs = kwargs

View File

@ -355,7 +355,7 @@ class PreTrainedModel(nn.Module):
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
**kwargs
**kwargs,
)
else:
model_kwargs = kwargs

View File

@ -643,7 +643,7 @@ class QuestionAnsweringPipeline(Pipeline):
framework=framework,
args_parser=QuestionAnsweringArgumentHandler(),
device=device,
**kwargs
**kwargs,
)
@staticmethod

View File

@ -87,7 +87,7 @@ class AlbertTokenizer(PreTrainedTokenizer):
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens

View File

@ -169,7 +169,7 @@ class BertTokenizer(PreTrainedTokenizer):
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens
@ -560,7 +560,7 @@ class BertTokenizerFast(PreTrainedTokenizerFast):
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs
**kwargs,
)
self._tokenizer = tk.Tokenizer(tk.models.WordPiece.from_files(vocab_file, unk_token=unk_token))

View File

@ -113,7 +113,7 @@ class BertJapaneseTokenizer(BertTokenizer):
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens

View File

@ -76,7 +76,7 @@ class CamembertTokenizer(PreTrainedTokenizer):
pad_token=pad_token,
mask_token=mask_token,
additional_special_tokens=additional_special_tokens,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens

View File

@ -95,7 +95,7 @@ class RobertaTokenizer(GPT2Tokenizer):
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens

View File

@ -96,7 +96,7 @@ class T5Tokenizer(PreTrainedTokenizer):
unk_token=unk_token,
pad_token=pad_token,
additional_special_tokens=additional_special_tokens,
**kwargs
**kwargs,
)
try:

View File

@ -817,7 +817,7 @@ class PreTrainedTokenizer(object):
truncation_strategy=truncation_strategy,
pad_to_max_length=pad_to_max_length,
return_tensors=return_tensors,
**kwargs
**kwargs,
)
return encoded_inputs["input_ids"]

View File

@ -586,7 +586,7 @@ class XLMTokenizer(PreTrainedTokenizer):
cls_token=cls_token,
mask_token=mask_token,
additional_special_tokens=additional_special_tokens,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens

View File

@ -83,7 +83,7 @@ class XLMRobertaTokenizer(PreTrainedTokenizer):
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens

View File

@ -86,7 +86,7 @@ class XLNetTokenizer(PreTrainedTokenizer):
cls_token=cls_token,
mask_token=mask_token,
additional_special_tokens=additional_special_tokens,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens

View File

@ -115,7 +115,7 @@ class XxxTokenizer(PreTrainedTokenizer):
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs
**kwargs,
)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens

View File

@ -84,7 +84,7 @@ class BertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer(add_special_tokens=False)
sequence = u"UNwant\u00E9d,running"
sequence = "UNwant\u00E9d,running"
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)

View File

@ -96,7 +96,7 @@ class GPT2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer(add_special_tokens=False, add_prefix_space=True)
sequence = u"lower newer"
sequence = "lower newer"
# Testing tokenization
tokens = tokenizer.tokenize(sequence, add_prefix_space=True)