mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-01 18:51:14 +06:00
clean up
This commit is contained in:
parent
834b485b2e
commit
6b0da96b4b
@ -192,8 +192,7 @@ class ColaProcessor(DataProcessor):
|
||||
return examples
|
||||
|
||||
|
||||
def convert_examples_to_features(examples, label_list, max_seq_length,
|
||||
tokenizer):
|
||||
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
|
||||
"""Loads a data file into a list of `InputBatch`s."""
|
||||
|
||||
label_map = {}
|
||||
@ -380,7 +379,7 @@ def main():
|
||||
parser.add_argument("--do_lower_case",
|
||||
default=False,
|
||||
action='store_true',
|
||||
help="Whether to lower case the input text. Should be True for uncased models and False for cased models.")
|
||||
help="Whether to lower case the input text. True for uncased models, False for cased models.")
|
||||
parser.add_argument("--max_seq_length",
|
||||
default=128,
|
||||
type=int,
|
||||
@ -424,6 +423,10 @@ def main():
|
||||
default=False,
|
||||
action='store_true',
|
||||
help="Whether not to use CUDA when available")
|
||||
parser.add_argument("--accumulate_gradients",
|
||||
type=int,
|
||||
default=1,
|
||||
help="Number of steps to accumulate gradient on (divide the single step batch_size)")
|
||||
parser.add_argument("--local_rank",
|
||||
type=int,
|
||||
default=-1,
|
||||
|
@ -18,15 +18,15 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import six
|
||||
import argparse
|
||||
import collections
|
||||
import logging
|
||||
import json
|
||||
import math
|
||||
import os
|
||||
from tqdm import tqdm, trange
|
||||
import six
|
||||
import random
|
||||
from tqdm import tqdm, trange
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
Loading…
Reference in New Issue
Block a user