mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-02 19:21:31 +06:00
Fix missing usage of token
(#25382)
* add missing tokens * fix --------- Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
This commit is contained in:
parent
5bd8c011bb
commit
9c7b744795
@ -342,11 +342,19 @@ def main():
|
||||
# 5. Load pretrained model, tokenizer, and image processor
|
||||
if model_args.tokenizer_name:
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
|
||||
model_args.tokenizer_name,
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_fast=model_args.use_fast_tokenizer,
|
||||
token=model_args.token,
|
||||
trust_remote_code=model_args.trust_remote_code,
|
||||
)
|
||||
elif model_args.model_name_or_path:
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
|
||||
model_args.model_name_or_path,
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_fast=model_args.use_fast_tokenizer,
|
||||
token=model_args.token,
|
||||
trust_remote_code=model_args.trust_remote_code,
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
|
@ -17,6 +17,7 @@ import argparse
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
|
||||
import datasets
|
||||
@ -186,14 +187,20 @@ def parse_args():
|
||||
default=None,
|
||||
help="Name or path of preprocessor config.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--token",
|
||||
type=str,
|
||||
default=None,
|
||||
help=(
|
||||
"The token to use as HTTP bearer authorization for remote files. If not specified, will use the token "
|
||||
"generated when running `huggingface-cli login` (stored in `~/.huggingface`)."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--use_auth_token",
|
||||
type=bool,
|
||||
default=False,
|
||||
help=(
|
||||
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
|
||||
"with private models)."
|
||||
),
|
||||
default=None,
|
||||
help="The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--trust_remote_code",
|
||||
@ -377,6 +384,12 @@ def collate_fn(examples):
|
||||
def main():
|
||||
args = parse_args()
|
||||
|
||||
if args.use_auth_token is not None:
|
||||
warnings.warn("The `use_auth_token` argument is deprecated and will be removed in v4.34.", FutureWarning)
|
||||
if args.token is not None:
|
||||
raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
|
||||
args.token = args.use_auth_token
|
||||
|
||||
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
|
||||
# information sent is the one passed as arguments along with your Python/PyTorch versions.
|
||||
send_example_telemetry("run_mim_no_trainer", args)
|
||||
@ -440,7 +453,7 @@ def main():
|
||||
args.dataset_config_name,
|
||||
data_files=args.data_files,
|
||||
cache_dir=args.cache_dir,
|
||||
use_auth_token=True if args.use_auth_token else None,
|
||||
token=args.token,
|
||||
)
|
||||
|
||||
# If we don't have a validation split, split off a percentage of train as validation.
|
||||
@ -457,7 +470,7 @@ def main():
|
||||
config_kwargs = {
|
||||
"cache_dir": args.cache_dir,
|
||||
"revision": args.model_revision,
|
||||
"use_auth_token": True if args.use_auth_token else None,
|
||||
"token": args.token,
|
||||
"trust_remote_code": args.trust_remote_code,
|
||||
}
|
||||
if args.config_name_or_path:
|
||||
@ -508,13 +521,14 @@ def main():
|
||||
config=config,
|
||||
cache_dir=args.cache_dir,
|
||||
revision=args.model_revision,
|
||||
token=True if args.use_auth_token else None,
|
||||
token=args.token,
|
||||
trust_remote_code=args.trust_remote_code,
|
||||
)
|
||||
else:
|
||||
logger.info("Training new model from scratch")
|
||||
model = AutoModelForMaskedImageModeling.from_config(
|
||||
config,
|
||||
token=args.token,
|
||||
trust_remote_code=args.trust_remote_code,
|
||||
)
|
||||
|
||||
|
@ -108,6 +108,16 @@ class ModelArguments:
|
||||
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
|
||||
},
|
||||
)
|
||||
trust_remote_code: bool = field(
|
||||
default=False,
|
||||
metadata={
|
||||
"help": (
|
||||
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
|
||||
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
|
||||
"execute code present on the Hub on your local machine."
|
||||
)
|
||||
},
|
||||
)
|
||||
freeze_vision_model: bool = field(
|
||||
default=False, metadata={"help": "Whether to freeze the vision model parameters or not."}
|
||||
)
|
||||
@ -353,15 +363,27 @@ def main():
|
||||
# 5. Load pretrained model, tokenizer, and image processor
|
||||
if model_args.tokenizer_name:
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
|
||||
model_args.tokenizer_name,
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_fast=model_args.use_fast_tokenizer,
|
||||
token=model_args.token,
|
||||
trust_remote_code=model_args.trust_remote_code,
|
||||
)
|
||||
elif model_args.model_name_or_path:
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
|
||||
model_args.model_name_or_path,
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_fast=model_args.use_fast_tokenizer,
|
||||
token=model_args.token,
|
||||
trust_remote_code=model_args.trust_remote_code,
|
||||
)
|
||||
elif model_args.text_model_name_or_path:
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_args.text_model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
|
||||
model_args.text_model_name_or_path,
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_fast=model_args.use_fast_tokenizer,
|
||||
token=model_args.token,
|
||||
trust_remote_code=model_args.trust_remote_code,
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
@ -376,6 +398,7 @@ def main():
|
||||
cache_dir=model_args.cache_dir,
|
||||
revision=model_args.model_revision,
|
||||
token=model_args.token,
|
||||
trust_remote_code=model_args.trust_remote_code,
|
||||
)
|
||||
with training_args.strategy.scope():
|
||||
model = TFAutoModel.from_pretrained(
|
||||
@ -383,6 +406,7 @@ def main():
|
||||
cache_dir=model_args.cache_dir,
|
||||
revision=model_args.model_revision,
|
||||
token=model_args.token,
|
||||
trust_remote_code=model_args.trust_remote_code,
|
||||
)
|
||||
else:
|
||||
# Load image_processor, in this script we only use this to get the mean and std for normalization.
|
||||
@ -391,6 +415,7 @@ def main():
|
||||
cache_dir=model_args.cache_dir,
|
||||
revision=model_args.model_revision,
|
||||
token=model_args.token,
|
||||
trust_remote_code=model_args.trust_remote_code,
|
||||
)
|
||||
with training_args.strategy.scope():
|
||||
model = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
|
||||
@ -398,6 +423,7 @@ def main():
|
||||
text_model_name_or_path=model_args.text_model_name_or_path,
|
||||
cache_dir=model_args.cache_dir,
|
||||
token=model_args.token,
|
||||
trust_remote_code=model_args.trust_remote_code,
|
||||
)
|
||||
config = model.config
|
||||
|
||||
|
@ -378,11 +378,12 @@ def main():
|
||||
if model_args.config_name:
|
||||
config = AutoConfig.from_pretrained(
|
||||
model_args.config_name,
|
||||
token=model_args.token,
|
||||
trust_remote_code=model_args.trust_remote_code,
|
||||
)
|
||||
elif model_args.model_name_or_path:
|
||||
config = AutoConfig.from_pretrained(
|
||||
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code
|
||||
model_args.model_name_or_path, token=model_args.token, trust_remote_code=model_args.trust_remote_code
|
||||
)
|
||||
else:
|
||||
config = CONFIG_MAPPING[model_args.model_type]()
|
||||
@ -390,11 +391,11 @@ def main():
|
||||
|
||||
if model_args.tokenizer_name:
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_args.tokenizer_name, trust_remote_code=model_args.trust_remote_code
|
||||
model_args.tokenizer_name, token=model_args.token, trust_remote_code=model_args.trust_remote_code
|
||||
)
|
||||
elif model_args.model_name_or_path:
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code
|
||||
model_args.model_name_or_path, token=model_args.token, trust_remote_code=model_args.trust_remote_code
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
@ -499,15 +500,20 @@ def main():
|
||||
# region Prepare model
|
||||
if checkpoint is not None:
|
||||
model = TFAutoModelForCausalLM.from_pretrained(
|
||||
checkpoint, config=config, trust_remote_code=model_args.trust_remote_code
|
||||
checkpoint, config=config, token=model_args.token, trust_remote_code=model_args.trust_remote_code
|
||||
)
|
||||
elif model_args.model_name_or_path:
|
||||
model = TFAutoModelForCausalLM.from_pretrained(
|
||||
model_args.model_name_or_path, config=config, trust_remote_code=model_args.trust_remote_code
|
||||
model_args.model_name_or_path,
|
||||
config=config,
|
||||
token=model_args.token,
|
||||
trust_remote_code=model_args.trust_remote_code,
|
||||
)
|
||||
else:
|
||||
logger.info("Training new model from scratch")
|
||||
model = TFAutoModelForCausalLM.from_config(config, trust_remote_code=model_args.trust_remote_code)
|
||||
model = TFAutoModelForCausalLM.from_config(
|
||||
config, token=model_args.token, trust_remote_code=model_args.trust_remote_code
|
||||
)
|
||||
|
||||
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
|
||||
# on a small vocab and want a smaller embedding size, remove this test.
|
||||
|
@ -358,12 +358,16 @@ def main():
|
||||
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
|
||||
# download model & vocab.
|
||||
if checkpoint is not None:
|
||||
config = AutoConfig.from_pretrained(checkpoint, trust_remote_code=model_args.trust_remote_code)
|
||||
config = AutoConfig.from_pretrained(
|
||||
checkpoint, token=model_args.token, trust_remote_code=model_args.trust_remote_code
|
||||
)
|
||||
elif model_args.config_name:
|
||||
config = AutoConfig.from_pretrained(model_args.config_name, trust_remote_code=model_args.trust_remote_code)
|
||||
config = AutoConfig.from_pretrained(
|
||||
model_args.config_name, token=model_args.token, trust_remote_code=model_args.trust_remote_code
|
||||
)
|
||||
elif model_args.model_name_or_path:
|
||||
config = AutoConfig.from_pretrained(
|
||||
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code
|
||||
model_args.model_name_or_path, token=model_args.token, trust_remote_code=model_args.trust_remote_code
|
||||
)
|
||||
else:
|
||||
config = CONFIG_MAPPING[model_args.model_type]()
|
||||
@ -371,11 +375,11 @@ def main():
|
||||
|
||||
if model_args.tokenizer_name:
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_args.tokenizer_name, trust_remote_code=model_args.trust_remote_code
|
||||
model_args.tokenizer_name, token=model_args.token, trust_remote_code=model_args.trust_remote_code
|
||||
)
|
||||
elif model_args.model_name_or_path:
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code
|
||||
model_args.model_name_or_path, token=model_args.token, trust_remote_code=model_args.trust_remote_code
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
@ -512,15 +516,20 @@ def main():
|
||||
# region Prepare model
|
||||
if checkpoint is not None:
|
||||
model = TFAutoModelForMaskedLM.from_pretrained(
|
||||
checkpoint, config=config, trust_remote_code=model_args.trust_remote_code
|
||||
checkpoint, config=config, token=model_args.token, trust_remote_code=model_args.trust_remote_code
|
||||
)
|
||||
elif model_args.model_name_or_path:
|
||||
model = TFAutoModelForMaskedLM.from_pretrained(
|
||||
model_args.model_name_or_path, config=config, trust_remote_code=model_args.trust_remote_code
|
||||
model_args.model_name_or_path,
|
||||
config=config,
|
||||
token=model_args.token,
|
||||
trust_remote_code=model_args.trust_remote_code,
|
||||
)
|
||||
else:
|
||||
logger.info("Training new model from scratch")
|
||||
model = TFAutoModelForMaskedLM.from_config(config, trust_remote_code=model_args.trust_remote_code)
|
||||
model = TFAutoModelForMaskedLM.from_config(
|
||||
config, token=model_args.token, trust_remote_code=model_args.trust_remote_code
|
||||
)
|
||||
|
||||
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
|
||||
# on a small vocab and want a smaller embedding size, remove this test.
|
||||
|
@ -317,12 +317,14 @@ def main():
|
||||
config = AutoConfig.from_pretrained(
|
||||
model_args.config_name,
|
||||
num_labels=num_labels,
|
||||
token=model_args.token,
|
||||
trust_remote_code=model_args.trust_remote_code,
|
||||
)
|
||||
elif model_args.model_name_or_path:
|
||||
config = AutoConfig.from_pretrained(
|
||||
model_args.model_name_or_path,
|
||||
num_labels=num_labels,
|
||||
token=model_args.token,
|
||||
trust_remote_code=model_args.trust_remote_code,
|
||||
)
|
||||
else:
|
||||
@ -341,12 +343,14 @@ def main():
|
||||
tokenizer_name_or_path,
|
||||
use_fast=True,
|
||||
add_prefix_space=True,
|
||||
token=model_args.token,
|
||||
trust_remote_code=model_args.trust_remote_code,
|
||||
)
|
||||
else:
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
tokenizer_name_or_path,
|
||||
use_fast=True,
|
||||
token=model_args.token,
|
||||
trust_remote_code=model_args.trust_remote_code,
|
||||
)
|
||||
# endregion
|
||||
@ -419,12 +423,13 @@ def main():
|
||||
model = TFAutoModelForTokenClassification.from_pretrained(
|
||||
model_args.model_name_or_path,
|
||||
config=config,
|
||||
token=model_args.token,
|
||||
trust_remote_code=model_args.trust_remote_code,
|
||||
)
|
||||
else:
|
||||
logger.info("Training new model from scratch")
|
||||
model = TFAutoModelForTokenClassification.from_config(
|
||||
config, trust_remote_code=model_args.trust_remote_code
|
||||
config, token=model_args.token, trust_remote_code=model_args.trust_remote_code
|
||||
)
|
||||
|
||||
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
|
||||
|
Loading…
Reference in New Issue
Block a user