update config tests and circle-ci

This commit is contained in:
thomwolf 2019-07-02 12:40:39 +02:00
parent 1484d67de9
commit 99ae5ab883
6 changed files with 57 additions and 2 deletions

View File

@ -10,7 +10,7 @@ jobs:
- run: sudo pip install pytest codecov pytest-cov
- run: sudo pip install spacy ftfy==4.4.3
- run: sudo python -m spacy download en
- run: python -m pytest -sv tests/ --cov
- run: python -m pytest -sv ./pytorch_pretrained_bert/tests/ --cov
- run: codecov
build_py2:
working_directory: ~/pytorch-pretrained-BERT
@ -22,7 +22,7 @@ jobs:
- run: sudo pip install pytest codecov pytest-cov
- run: sudo pip install spacy ftfy==4.4.3
- run: sudo python -m spacy download en
- run: python -m pytest -sv tests/ --cov
- run: python -m pytest -sv ./pytorch_pretrained_bert/tests/ --cov
- run: codecov
workflows:
version: 2

View File

@ -175,6 +175,19 @@ class GPT2Config(PretrainedConfig):
def total_tokens_embeddings(self):
return self.vocab_size + self.n_special
@property
def hidden_size(self):
return self.n_embd
@property
def num_attention_heads(self):
return self.n_head
@property
def num_hidden_layers(self):
return self.n_layer
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):

View File

@ -206,6 +206,18 @@ class OpenAIGPTConfig(PretrainedConfig):
def total_tokens_embeddings(self):
return self.vocab_size + self.n_special
@property
def hidden_size(self):
return self.n_embd
@property
def num_attention_heads(self):
return self.n_head
@property
def num_hidden_layers(self):
return self.n_layer
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):

View File

@ -289,6 +289,17 @@ class TransfoXLConfig(PretrainedConfig):
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@property
def hidden_size(self):
return self.d_model
@property
def num_attention_heads(self):
return self.n_head
@property
def num_hidden_layers(self):
return self.n_layer
class PositionalEmbedding(nn.Module):

View File

@ -313,6 +313,18 @@ class XLNetConfig(PretrainedConfig):
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@property
def hidden_size(self):
return self.d_model
@property
def num_attention_heads(self):
return self.n_head
@property
def num_hidden_layers(self):
return self.n_layer
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as XLNetLayerNorm

View File

@ -184,6 +184,12 @@ class ConfigTester(object):
self.config_class = config_class
self.inputs_dict = kwargs
def create_and_test_config_common_properties(self):
config = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(config, 'hidden_size'))
self.parent.assertTrue(hasattr(config, 'num_attention_heads'))
self.parent.assertTrue(hasattr(config, 'num_hidden_layers'))
def create_and_test_config_to_json_string(self):
config = self.config_class(**self.inputs_dict)
obj = json.loads(config.to_json_string())
@ -199,6 +205,7 @@ class ConfigTester(object):
self.parent.assertEqual(config_second.to_dict(), config_first.to_dict())
def run_common_tests(self):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()