mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-04 05:10:06 +06:00

* add pretokenization arguments * add pretokenization script * add support for pretokenized data * reformat code * fix run command for training * fix model call from config * remove a package * add comments on pretokenization in the readme * remove explicit parallelization Co-authored-by: Leandro von Werra <lvwerra@users.noreply.github.com> * update readme Co-authored-by: Leandro von Werra <lvwerra@users.noreply.github.com> * update readme -remove username Co-authored-by: Leandro von Werra <lvwerra@users.noreply.github.com> * update readme -remove username Co-authored-by: Leandro von Werra <lvwerra@users.noreply.github.com> * keep data parallelization * reformat code * reformat code * update readme * reformat code * Update examples/research_projects/codeparrot/README.md Co-authored-by: Leandro von Werra <lvwerra@users.noreply.github.com> Co-authored-by: Leandro von Werra <lvwerra@users.noreply.github.com> Co-authored-by: Loubna ben allal <loubnabenallal@gmail.com>
23 lines
869 B
Python
23 lines
869 B
Python
from arguments import InitializationArguments
|
|
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
|
|
|
|
|
|
# Configuration
|
|
parser = HfArgumentParser(InitializationArguments)
|
|
args = parser.parse_args()
|
|
|
|
# Load codeparrot tokenizer trained for Python code tokenization
|
|
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name)
|
|
|
|
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
|
|
config_kwargs = {"vocab_size": len(tokenizer), "scale_attn_by_layer_idx": True, "reorder_and_upcast_attn": True}
|
|
|
|
# Load model config (GPT-2 large in this case)
|
|
config = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
|
|
|
|
# Initialize new model with config
|
|
model = AutoModelForCausalLM.from_config(config)
|
|
|
|
# Save model to the hub
|
|
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
|