mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-01 02:31:11 +06:00
re-format
This commit is contained in:
parent
c8731b9583
commit
2fb9a934b4
@ -1,20 +1,22 @@
|
||||
import json
|
||||
import logging
|
||||
import random
|
||||
from argparse import ArgumentParser
|
||||
from collections import namedtuple
|
||||
from pathlib import Path
|
||||
import os
|
||||
import torch
|
||||
import logging
|
||||
import json
|
||||
import random
|
||||
import numpy as np
|
||||
from collections import namedtuple
|
||||
from tempfile import TemporaryDirectory
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch.utils.data import DataLoader, Dataset, RandomSampler
|
||||
from torch.utils.data.distributed import DistributedSampler
|
||||
from tqdm import tqdm
|
||||
|
||||
from pytorch_transformers import WEIGHTS_NAME, CONFIG_NAME
|
||||
from pytorch_transformers.modeling_bert import BertForPreTraining
|
||||
from pytorch_transformers.optimization import AdamW, WarmupLinearSchedule
|
||||
from pytorch_transformers.tokenization_bert import BertTokenizer
|
||||
from pytorch_transformers.optimization import AdamW, WarmupLinearSchedule
|
||||
|
||||
InputFeatures = namedtuple("InputFeatures", "input_ids input_mask segment_ids lm_label_ids is_next")
|
||||
|
||||
@ -123,8 +125,7 @@ def main():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('--pregenerated_data', type=Path, required=True)
|
||||
parser.add_argument('--output_dir', type=Path, required=True)
|
||||
parser.add_argument("--bert_model", type=str, required=True,
|
||||
help="Bert pre-trained model selected in the list: bert-base-uncased, "
|
||||
parser.add_argument("--bert_model", type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, "
|
||||
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
|
||||
parser.add_argument("--do_lower_case", action="store_true")
|
||||
parser.add_argument("--reduce_memory", action="store_true",
|
||||
@ -336,8 +337,7 @@ def main():
|
||||
# Save a trained model
|
||||
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
|
||||
logging.info("** ** * Saving fine-tuned model ** ** * ")
|
||||
model_to_save = model.module if hasattr(model,
|
||||
'module') else model # Take care of distributed/parallel training
|
||||
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
|
||||
model_to_save.save_pretrained(args.output_dir)
|
||||
tokenizer.save_pretrained(args.output_dir)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user