mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-04 21:30:07 +06:00

* add readme skeleton * update readme * add initialization script * add deduplication script * add codeparrot training script * add code generation evaluation * add validation loss script * add requirements * update readme * tweak readme * make style * add highlights to readme * add CLIs to scripts * add tokenizer training script * add docstring to constant length dataset * fix defaults in arguments * update readme with cli * move image to hub * tweaks of readme * fix cli commands * add author * explain env variables * fix formatting * Update examples/research_projects/codeparrot/README.md Co-authored-by: lewtun <lewis.c.tunstall@gmail.com> * Apply suggestions from code review Co-authored-by: lewtun <lewis.c.tunstall@gmail.com> * replace generic with gpt2 tokenizer Co-authored-by: lewtun <lewis.c.tunstall@gmail.com>
23 lines
857 B
Python
23 lines
857 B
Python
from arguments import InitializationArguments
|
|
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
|
|
|
|
|
|
# Configuration
|
|
parser = HfArgumentParser(InitializationArguments)
|
|
args = parser.parse_args()
|
|
|
|
# Load codeparrot tokenizer trained for Python code tokenization
|
|
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name)
|
|
|
|
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
|
|
config_kwargs = {"vocab_size": len(tokenizer), "scale_attn_by_layer_idx": True, "reorder_and_upcast_attn": True}
|
|
|
|
# Load model config (GPT-2 large in this case)
|
|
config = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
|
|
|
|
# Initialize new model with config
|
|
model = AutoModelForCausalLM(config)
|
|
|
|
# Save model to the hub
|
|
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
|