mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-03 12:50:06 +06:00
more explicit notation: num_train_step => num_train_optimization_steps
This commit is contained in:
parent
5169069997
commit
1579c53635
5
.gitignore
vendored
5
.gitignore
vendored
@ -119,4 +119,7 @@ dmypy.json
|
|||||||
.vscode
|
.vscode
|
||||||
|
|
||||||
# TF code
|
# TF code
|
||||||
tensorflow_code
|
tensorflow_code
|
||||||
|
|
||||||
|
# Models
|
||||||
|
models
|
@ -438,11 +438,13 @@ def main():
|
|||||||
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
|
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
|
||||||
|
|
||||||
train_examples = None
|
train_examples = None
|
||||||
num_train_steps = None
|
num_train_optimization_steps = None
|
||||||
if args.do_train:
|
if args.do_train:
|
||||||
train_examples = processor.get_train_examples(args.data_dir)
|
train_examples = processor.get_train_examples(args.data_dir)
|
||||||
num_train_steps =
|
num_train_optimization_steps = int(
|
||||||
len(train_examples) // args.train_batch_size // args.gradient_accumulation_steps * args.num_train_epochs
|
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
|
||||||
|
if args.local_rank != -1:
|
||||||
|
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
|
||||||
|
|
||||||
# Prepare model
|
# Prepare model
|
||||||
model = BertForSequenceClassification.from_pretrained(args.bert_model,
|
model = BertForSequenceClassification.from_pretrained(args.bert_model,
|
||||||
@ -468,9 +470,6 @@ def main():
|
|||||||
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
|
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
|
||||||
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
||||||
]
|
]
|
||||||
t_total = num_train_steps
|
|
||||||
if args.local_rank != -1:
|
|
||||||
t_total = t_total // torch.distributed.get_world_size()
|
|
||||||
if args.fp16:
|
if args.fp16:
|
||||||
try:
|
try:
|
||||||
from apex.optimizers import FP16_Optimizer
|
from apex.optimizers import FP16_Optimizer
|
||||||
@ -491,7 +490,7 @@ def main():
|
|||||||
optimizer = BertAdam(optimizer_grouped_parameters,
|
optimizer = BertAdam(optimizer_grouped_parameters,
|
||||||
lr=args.learning_rate,
|
lr=args.learning_rate,
|
||||||
warmup=args.warmup_proportion,
|
warmup=args.warmup_proportion,
|
||||||
t_total=t_total)
|
t_total=num_train_optimization_steps)
|
||||||
|
|
||||||
global_step = 0
|
global_step = 0
|
||||||
nb_tr_steps = 0
|
nb_tr_steps = 0
|
||||||
@ -502,7 +501,7 @@ def main():
|
|||||||
logger.info("***** Running training *****")
|
logger.info("***** Running training *****")
|
||||||
logger.info(" Num examples = %d", len(train_examples))
|
logger.info(" Num examples = %d", len(train_examples))
|
||||||
logger.info(" Batch size = %d", args.train_batch_size)
|
logger.info(" Batch size = %d", args.train_batch_size)
|
||||||
logger.info(" Num steps = %d", num_train_steps)
|
logger.info(" Num steps = %d", num_train_optimization_steps)
|
||||||
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
|
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
|
||||||
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
|
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
|
||||||
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
|
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
|
||||||
@ -539,7 +538,7 @@ def main():
|
|||||||
if args.fp16:
|
if args.fp16:
|
||||||
# modify learning rate with special warm up BERT uses
|
# modify learning rate with special warm up BERT uses
|
||||||
# if args.fp16 is False, BertAdam is used that handles this automatically
|
# if args.fp16 is False, BertAdam is used that handles this automatically
|
||||||
lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)
|
lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion)
|
||||||
for param_group in optimizer.param_groups:
|
for param_group in optimizer.param_groups:
|
||||||
param_group['lr'] = lr_this_step
|
param_group['lr'] = lr_this_step
|
||||||
optimizer.step()
|
optimizer.step()
|
||||||
|
@ -515,13 +515,15 @@ def main():
|
|||||||
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
|
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
|
||||||
|
|
||||||
#train_examples = None
|
#train_examples = None
|
||||||
num_train_steps = None
|
num_train_optimization_steps = None
|
||||||
if args.do_train:
|
if args.do_train:
|
||||||
print("Loading Train Dataset", args.train_file)
|
print("Loading Train Dataset", args.train_file)
|
||||||
train_dataset = BERTDataset(args.train_file, tokenizer, seq_len=args.max_seq_length,
|
train_dataset = BERTDataset(args.train_file, tokenizer, seq_len=args.max_seq_length,
|
||||||
corpus_lines=None, on_memory=args.on_memory)
|
corpus_lines=None, on_memory=args.on_memory)
|
||||||
num_train_steps =
|
num_train_optimization_steps = int(
|
||||||
len(train_dataset) // args.train_batch_size // args.gradient_accumulation_steps * args.num_train_epochs
|
len(train_dataset) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
|
||||||
|
if args.local_rank != -1:
|
||||||
|
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
|
||||||
|
|
||||||
# Prepare model
|
# Prepare model
|
||||||
model = BertForPreTraining.from_pretrained(args.bert_model)
|
model = BertForPreTraining.from_pretrained(args.bert_model)
|
||||||
@ -545,9 +547,6 @@ def main():
|
|||||||
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
||||||
]
|
]
|
||||||
|
|
||||||
t_total = num_train_steps
|
|
||||||
if args.local_rank != -1:
|
|
||||||
t_total = t_total // torch.distributed.get_world_size()
|
|
||||||
if args.fp16:
|
if args.fp16:
|
||||||
try:
|
try:
|
||||||
from apex.optimizers import FP16_Optimizer
|
from apex.optimizers import FP16_Optimizer
|
||||||
@ -568,14 +567,14 @@ def main():
|
|||||||
optimizer = BertAdam(optimizer_grouped_parameters,
|
optimizer = BertAdam(optimizer_grouped_parameters,
|
||||||
lr=args.learning_rate,
|
lr=args.learning_rate,
|
||||||
warmup=args.warmup_proportion,
|
warmup=args.warmup_proportion,
|
||||||
t_total=t_total)
|
t_total=num_train_optimization_steps)
|
||||||
|
|
||||||
global_step = 0
|
global_step = 0
|
||||||
if args.do_train:
|
if args.do_train:
|
||||||
logger.info("***** Running training *****")
|
logger.info("***** Running training *****")
|
||||||
logger.info(" Num examples = %d", len(train_dataset))
|
logger.info(" Num examples = %d", len(train_dataset))
|
||||||
logger.info(" Batch size = %d", args.train_batch_size)
|
logger.info(" Batch size = %d", args.train_batch_size)
|
||||||
logger.info(" Num steps = %d", num_train_steps)
|
logger.info(" Num steps = %d", num_train_optimization_steps)
|
||||||
|
|
||||||
if args.local_rank == -1:
|
if args.local_rank == -1:
|
||||||
train_sampler = RandomSampler(train_dataset)
|
train_sampler = RandomSampler(train_dataset)
|
||||||
@ -608,7 +607,7 @@ def main():
|
|||||||
if args.fp16:
|
if args.fp16:
|
||||||
# modify learning rate with special warm up BERT uses
|
# modify learning rate with special warm up BERT uses
|
||||||
# if args.fp16 is False, BertAdam is used that handles this automatically
|
# if args.fp16 is False, BertAdam is used that handles this automatically
|
||||||
lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)
|
lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion)
|
||||||
for param_group in optimizer.param_groups:
|
for param_group in optimizer.param_groups:
|
||||||
param_group['lr'] = lr_this_step
|
param_group['lr'] = lr_this_step
|
||||||
optimizer.step()
|
optimizer.step()
|
||||||
|
@ -784,12 +784,14 @@ def main():
|
|||||||
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
|
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
|
||||||
|
|
||||||
train_examples = None
|
train_examples = None
|
||||||
num_train_steps = None
|
num_train_optimization_steps = None
|
||||||
if args.do_train:
|
if args.do_train:
|
||||||
train_examples = read_squad_examples(
|
train_examples = read_squad_examples(
|
||||||
input_file=args.train_file, is_training=True)
|
input_file=args.train_file, is_training=True)
|
||||||
num_train_steps =
|
num_train_optimization_steps = int(
|
||||||
len(train_examples) // args.train_batch_size // args.gradient_accumulation_steps * args.num_train_epochs
|
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
|
||||||
|
if args.local_rank != -1:
|
||||||
|
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
|
||||||
|
|
||||||
# Prepare model
|
# Prepare model
|
||||||
model = BertForQuestionAnswering.from_pretrained(args.bert_model,
|
model = BertForQuestionAnswering.from_pretrained(args.bert_model,
|
||||||
@ -821,9 +823,6 @@ def main():
|
|||||||
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
||||||
]
|
]
|
||||||
|
|
||||||
t_total = num_train_steps
|
|
||||||
if args.local_rank != -1:
|
|
||||||
t_total = t_total // torch.distributed.get_world_size()
|
|
||||||
if args.fp16:
|
if args.fp16:
|
||||||
try:
|
try:
|
||||||
from apex.optimizers import FP16_Optimizer
|
from apex.optimizers import FP16_Optimizer
|
||||||
@ -843,7 +842,7 @@ def main():
|
|||||||
optimizer = BertAdam(optimizer_grouped_parameters,
|
optimizer = BertAdam(optimizer_grouped_parameters,
|
||||||
lr=args.learning_rate,
|
lr=args.learning_rate,
|
||||||
warmup=args.warmup_proportion,
|
warmup=args.warmup_proportion,
|
||||||
t_total=t_total)
|
t_total=num_train_optimization_steps)
|
||||||
|
|
||||||
global_step = 0
|
global_step = 0
|
||||||
if args.do_train:
|
if args.do_train:
|
||||||
@ -869,7 +868,7 @@ def main():
|
|||||||
logger.info(" Num orig examples = %d", len(train_examples))
|
logger.info(" Num orig examples = %d", len(train_examples))
|
||||||
logger.info(" Num split examples = %d", len(train_features))
|
logger.info(" Num split examples = %d", len(train_features))
|
||||||
logger.info(" Batch size = %d", args.train_batch_size)
|
logger.info(" Batch size = %d", args.train_batch_size)
|
||||||
logger.info(" Num steps = %d", num_train_steps)
|
logger.info(" Num steps = %d", num_train_optimization_steps)
|
||||||
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
|
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
|
||||||
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
|
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
|
||||||
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
|
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
|
||||||
@ -903,7 +902,7 @@ def main():
|
|||||||
if args.fp16:
|
if args.fp16:
|
||||||
# modify learning rate with special warm up BERT uses
|
# modify learning rate with special warm up BERT uses
|
||||||
# if args.fp16 is False, BertAdam is used that handles this automatically
|
# if args.fp16 is False, BertAdam is used that handles this automatically
|
||||||
lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)
|
lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion)
|
||||||
for param_group in optimizer.param_groups:
|
for param_group in optimizer.param_groups:
|
||||||
param_group['lr'] = lr_this_step
|
param_group['lr'] = lr_this_step
|
||||||
optimizer.step()
|
optimizer.step()
|
||||||
|
@ -877,12 +877,14 @@ def main():
|
|||||||
tokenizer = BertTokenizer.from_pretrained(args.bert_model)
|
tokenizer = BertTokenizer.from_pretrained(args.bert_model)
|
||||||
|
|
||||||
train_examples = None
|
train_examples = None
|
||||||
num_train_steps = None
|
num_train_optimization_steps = None
|
||||||
if args.do_train:
|
if args.do_train:
|
||||||
train_examples = read_squad_examples(
|
train_examples = read_squad_examples(
|
||||||
input_file=args.train_file, is_training=True)
|
input_file=args.train_file, is_training=True)
|
||||||
num_train_steps =
|
num_train_optimization_steps = int(
|
||||||
len(train_examples) // args.train_batch_size // args.gradient_accumulation_steps * args.num_train_epochs
|
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
|
||||||
|
if args.local_rank != -1:
|
||||||
|
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
|
||||||
|
|
||||||
# Prepare model
|
# Prepare model
|
||||||
model = BertForQuestionAnswering.from_pretrained(args.bert_model,
|
model = BertForQuestionAnswering.from_pretrained(args.bert_model,
|
||||||
@ -914,9 +916,6 @@ def main():
|
|||||||
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
||||||
]
|
]
|
||||||
|
|
||||||
t_total = num_train_steps
|
|
||||||
if args.local_rank != -1:
|
|
||||||
t_total = t_total // torch.distributed.get_world_size()
|
|
||||||
if args.fp16:
|
if args.fp16:
|
||||||
try:
|
try:
|
||||||
from apex.optimizers import FP16_Optimizer
|
from apex.optimizers import FP16_Optimizer
|
||||||
@ -936,7 +935,7 @@ def main():
|
|||||||
optimizer = BertAdam(optimizer_grouped_parameters,
|
optimizer = BertAdam(optimizer_grouped_parameters,
|
||||||
lr=args.learning_rate,
|
lr=args.learning_rate,
|
||||||
warmup=args.warmup_proportion,
|
warmup=args.warmup_proportion,
|
||||||
t_total=t_total)
|
t_total=num_train_optimization_steps)
|
||||||
|
|
||||||
global_step = 0
|
global_step = 0
|
||||||
if args.do_train:
|
if args.do_train:
|
||||||
@ -962,7 +961,7 @@ def main():
|
|||||||
logger.info(" Num orig examples = %d", len(train_examples))
|
logger.info(" Num orig examples = %d", len(train_examples))
|
||||||
logger.info(" Num split examples = %d", len(train_features))
|
logger.info(" Num split examples = %d", len(train_features))
|
||||||
logger.info(" Batch size = %d", args.train_batch_size)
|
logger.info(" Batch size = %d", args.train_batch_size)
|
||||||
logger.info(" Num steps = %d", num_train_steps)
|
logger.info(" Num steps = %d", num_train_optimization_steps)
|
||||||
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
|
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
|
||||||
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
|
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
|
||||||
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
|
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
|
||||||
@ -997,7 +996,7 @@ def main():
|
|||||||
if args.fp16:
|
if args.fp16:
|
||||||
# modify learning rate with special warm up BERT uses
|
# modify learning rate with special warm up BERT uses
|
||||||
# if args.fp16 is False, BertAdam is used that handles this automatically
|
# if args.fp16 is False, BertAdam is used that handles this automatically
|
||||||
lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)
|
lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion)
|
||||||
for param_group in optimizer.param_groups:
|
for param_group in optimizer.param_groups:
|
||||||
param_group['lr'] = lr_this_step
|
param_group['lr'] = lr_this_step
|
||||||
optimizer.step()
|
optimizer.step()
|
||||||
|
@ -349,11 +349,13 @@ def main():
|
|||||||
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
|
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
|
||||||
|
|
||||||
train_examples = None
|
train_examples = None
|
||||||
num_train_steps = None
|
num_train_optimization_steps = None
|
||||||
if args.do_train:
|
if args.do_train:
|
||||||
train_examples = read_swag_examples(os.path.join(args.data_dir, 'train.csv'), is_training = True)
|
train_examples = read_swag_examples(os.path.join(args.data_dir, 'train.csv'), is_training = True)
|
||||||
num_train_steps =
|
num_train_optimization_steps = int(
|
||||||
len(train_examples) // args.train_batch_size // args.gradient_accumulation_steps * args.num_train_epochs
|
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
|
||||||
|
if args.local_rank != -1:
|
||||||
|
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
|
||||||
|
|
||||||
# Prepare model
|
# Prepare model
|
||||||
model = BertForMultipleChoice.from_pretrained(args.bert_model,
|
model = BertForMultipleChoice.from_pretrained(args.bert_model,
|
||||||
@ -384,9 +386,6 @@ def main():
|
|||||||
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
|
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
|
||||||
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
||||||
]
|
]
|
||||||
t_total = num_train_steps
|
|
||||||
if args.local_rank != -1:
|
|
||||||
t_total = t_total // torch.distributed.get_world_size()
|
|
||||||
if args.fp16:
|
if args.fp16:
|
||||||
try:
|
try:
|
||||||
from apex.optimizers import FP16_Optimizer
|
from apex.optimizers import FP16_Optimizer
|
||||||
@ -406,7 +405,7 @@ def main():
|
|||||||
optimizer = BertAdam(optimizer_grouped_parameters,
|
optimizer = BertAdam(optimizer_grouped_parameters,
|
||||||
lr=args.learning_rate,
|
lr=args.learning_rate,
|
||||||
warmup=args.warmup_proportion,
|
warmup=args.warmup_proportion,
|
||||||
t_total=t_total)
|
t_total=num_train_optimization_steps)
|
||||||
|
|
||||||
global_step = 0
|
global_step = 0
|
||||||
if args.do_train:
|
if args.do_train:
|
||||||
@ -415,7 +414,7 @@ def main():
|
|||||||
logger.info("***** Running training *****")
|
logger.info("***** Running training *****")
|
||||||
logger.info(" Num examples = %d", len(train_examples))
|
logger.info(" Num examples = %d", len(train_examples))
|
||||||
logger.info(" Batch size = %d", args.train_batch_size)
|
logger.info(" Batch size = %d", args.train_batch_size)
|
||||||
logger.info(" Num steps = %d", num_train_steps)
|
logger.info(" Num steps = %d", num_train_optimization_steps)
|
||||||
all_input_ids = torch.tensor(select_field(train_features, 'input_ids'), dtype=torch.long)
|
all_input_ids = torch.tensor(select_field(train_features, 'input_ids'), dtype=torch.long)
|
||||||
all_input_mask = torch.tensor(select_field(train_features, 'input_mask'), dtype=torch.long)
|
all_input_mask = torch.tensor(select_field(train_features, 'input_mask'), dtype=torch.long)
|
||||||
all_segment_ids = torch.tensor(select_field(train_features, 'segment_ids'), dtype=torch.long)
|
all_segment_ids = torch.tensor(select_field(train_features, 'segment_ids'), dtype=torch.long)
|
||||||
@ -455,7 +454,7 @@ def main():
|
|||||||
if args.fp16:
|
if args.fp16:
|
||||||
# modify learning rate with special warm up BERT uses
|
# modify learning rate with special warm up BERT uses
|
||||||
# if args.fp16 is False, BertAdam is used that handles this automatically
|
# if args.fp16 is False, BertAdam is used that handles this automatically
|
||||||
lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)
|
lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion)
|
||||||
for param_group in optimizer.param_groups:
|
for param_group in optimizer.param_groups:
|
||||||
param_group['lr'] = lr_this_step
|
param_group['lr'] = lr_this_step
|
||||||
optimizer.step()
|
optimizer.step()
|
||||||
|
Loading…
Reference in New Issue
Block a user