From 1e4286fd5962438d15af0257fa5feb9f2b2e58be Mon Sep 17 00:00:00 2001 From: Matt Date: Tue, 11 Mar 2025 13:47:38 +0000 Subject: [PATCH] Remove research projects (#36645) * Remove research projects * Add new README to explain where the projects went * Trigger tests * Cleanup all references to research_projects --- docs/source/ar/bertology.md | 2 +- docs/source/ar/run_scripts.md | 2 +- docs/source/de/index.md | 2 +- docs/source/de/run_scripts.md | 2 +- docs/source/en/model_doc/distilbert.md | 2 +- docs/source/en/model_doc/layoutlmv3.md | 4 +- docs/source/en/model_doc/pegasus.md | 2 +- docs/source/en/model_doc/qdqbert.md | 2 +- docs/source/en/model_doc/visual_bert.md | 2 +- docs/source/en/run_scripts.md | 2 +- docs/source/es/bertology.md | 2 +- docs/source/es/index.md | 2 +- docs/source/es/run_scripts.md | 2 +- docs/source/fr/index.md | 2 +- docs/source/fr/run_scripts_fr.md | 2 +- docs/source/it/index.md | 2 +- docs/source/it/run_scripts.md | 2 +- docs/source/ja/bertology.md | 2 +- docs/source/ja/index.md | 2 +- docs/source/ja/run_scripts.md | 2 +- docs/source/ko/bertology.md | 2 +- docs/source/ko/index.md | 2 +- docs/source/ko/run_scripts.md | 2 +- docs/source/ms/index.md | 2 +- docs/source/pt/index.md | 2 +- docs/source/pt/run_scripts.md | 2 +- docs/source/zh/bertology.md | 2 +- docs/source/zh/run_scripts.md | 2 +- examples/README.md | 2 +- examples/legacy/seq2seq/README.md | 2 +- examples/pytorch/language-modeling/README.md | 2 +- examples/pytorch/summarization/README.md | 2 +- examples/pytorch/translation/README.md | 2 +- examples/research_projects/README.md | 14 +- .../research_projects/adversarial/README.md | 38 - .../adversarial/requirements.txt | 1 - .../research_projects/adversarial/run_hans.py | 242 --- .../adversarial/utils_hans.py | 339 --- .../bert-loses-patience/README.md | 89 - .../bert-loses-patience/pabee/__init__.py | 0 .../pabee/modeling_pabee_albert.py | 320 --- .../pabee/modeling_pabee_bert.py | 345 --- .../bert-loses-patience/requirements.txt | 1 - .../run_glue_with_pabee.py | 751 ------- .../test_run_glue_with_pabee.py | 51 - examples/research_projects/bertabs/README.md | 61 - .../research_projects/bertabs/__init__.py | 0 .../bertabs/configuration_bertabs.py | 98 - ...ert_bertabs_original_pytorch_checkpoint.py | 185 -- .../bertabs/modeling_bertabs.py | 1054 --------- .../bertabs/requirements.txt | 5 - .../bertabs/run_summarization.py | 347 --- .../bertabs/test_utils_summarization.py | 98 - .../bertabs/utils_summarization.py | 167 -- .../bertology/requirements.txt | 1 - .../bertology/run_bertology.py | 453 ---- .../bertology/run_prune_gpt.py | 391 ---- .../research_projects/codeparrot/README.md | 316 --- .../codeparrot/examples/README.md | 58 - .../codeparrot/examples/requirements.txt | 5 - .../examples/train_complexity_predictor.py | 132 -- .../codeparrot/requirements.txt | 9 - .../codeparrot/scripts/arguments.py | 220 -- .../codeparrot/scripts/bpe_training.py | 32 - .../codeparrot/scripts/codeparrot_training.py | 328 --- .../codeparrot/scripts/human_eval.py | 228 -- .../codeparrot/scripts/initialize_model.py | 27 - .../scripts/minhash_deduplication.py | 268 --- .../codeparrot/scripts/preprocessing.py | 215 -- .../codeparrot/scripts/pretokenizing.py | 49 - .../codeparrot/scripts/tests/__init__.py | 0 .../scripts/tests/test_deduplicate.py | 29 - .../codeparrot/scripts/validation_loss.py | 99 - .../decision_transformer/requirements.txt | 240 --- .../run_decision_transformer.py | 173 -- examples/research_projects/deebert/README.md | 54 - .../research_projects/deebert/entropy_eval.sh | 33 - .../research_projects/deebert/eval_deebert.sh | 30 - .../deebert/requirements.txt | 1 - .../deebert/run_glue_deebert.py | 735 ------- .../research_projects/deebert/src/__init__.py | 0 .../deebert/src/modeling_highway_bert.py | 397 ---- .../deebert/src/modeling_highway_roberta.py | 154 -- .../deebert/test_glue_deebert.py | 104 - .../deebert/train_deebert.sh | 38 - .../research_projects/distillation/README.md | 193 -- .../distillation/distiller.py | 601 ------ .../distillation/grouped_batch_sampler.py | 108 - .../distillation/lm_seqs_dataset.py | 167 -- .../distillation/requirements.txt | 7 - .../distillation/run_squad_w_distillation.py | 877 -------- .../distillation/scripts/binarized_data.py | 97 - .../distillation/scripts/extract.py | 106 - .../scripts/extract_distilbert.py | 96 - .../distillation/scripts/token_counts.py | 57 - .../research_projects/distillation/train.py | 325 --- .../distilbert-base-cased.json | 15 - .../distilbert-base-multilingual-cased.json | 15 - .../distilbert-base-uncased.json | 15 - .../training_configs/distilgpt2.json | 9 - .../training_configs/distilroberta-base.json | 14 - .../research_projects/distillation/utils.py | 134 -- examples/research_projects/fsner/README.md | 88 - .../research_projects/fsner/pyproject.toml | 7 - .../research_projects/fsner/requirements.txt | 1 - examples/research_projects/fsner/setup.py | 27 - .../fsner/src/fsner/__init__.py | 5 - .../fsner/src/fsner/model.py | 80 - .../fsner/src/fsner/tokenizer_utils.py | 102 - .../information-gain-filtration/README.md | 100 - .../igf/__init__.py | 0 .../information-gain-filtration/igf/igf.py | 416 ---- .../requirements.txt | 6 - .../result_igf.png | Bin 34410 -> 0 bytes .../run_clm_igf.py | 450 ---- .../jax-projects/HOW_TO_PROPOSE_PROJECT.md | 109 - .../research_projects/jax-projects/README.md | 1295 ----------- .../jax-projects/big_bird/README.md | 60 - .../jax-projects/big_bird/bigbird_flax.py | 323 --- .../jax-projects/big_bird/evaluate.py | 164 -- .../big_bird/prepare_natural_questions.py | 329 --- .../jax-projects/big_bird/requirements.txt | 6 - .../jax-projects/big_bird/sweep_flax.yaml | 16 - .../jax-projects/big_bird/train.py | 78 - .../jax-projects/dataset-streaming/README.md | 121 -- .../dataset-streaming/run_mlm_flax_stream.py | 637 ------ .../jax-projects/hybrid_clip/README.md | 172 -- .../hybrid_clip/configuration_hybrid_clip.py | 112 - .../hybrid_clip/modeling_hybrid_clip.py | 420 ---- .../jax-projects/hybrid_clip/requirements.txt | 8 - .../hybrid_clip/run_hybrid_clip.py | 576 ----- .../jax-projects/model_parallel/README.md | 67 - .../jax-projects/model_parallel/partitions.py | 85 - .../jax-projects/model_parallel/run_clm_mp.py | 662 ------ .../jax-projects/wav2vec2/README.md | 120 -- .../wav2vec2/run_wav2vec2_pretrain_flax.py | 614 ------ .../research_projects/layoutlmv3/README.md | 69 - .../layoutlmv3/requirements.txt | 3 - .../layoutlmv3/run_funsd_cord.py | 533 ----- .../research_projects/longform-qa/README.md | 7 - .../research_projects/longform-qa/eli5_app.py | 349 --- .../longform-qa/eli5_utils.py | 688 ------ .../longform-qa/requirements.txt | 4 - examples/research_projects/luke/README.md | 71 - examples/research_projects/luke/luke_utils.py | 115 - .../luke/run_luke_ner_no_trainer.py | 720 ------- examples/research_projects/lxmert/README.md | 5 - examples/research_projects/lxmert/demo.ipynb | 264 --- .../lxmert/extracting_data.py | 149 -- .../lxmert/modeling_frcnn.py | 1920 ----------------- .../lxmert/processing_image.py | 151 -- .../research_projects/lxmert/requirements.txt | 98 - examples/research_projects/lxmert/utils.py | 554 ----- .../lxmert/visualizing_image.py | 500 ----- examples/research_projects/mlm_wwm/README.md | 98 - .../mlm_wwm/requirements.txt | 4 - .../mlm_wwm/run_chinese_ref.py | 164 -- .../research_projects/mlm_wwm/run_mlm_wwm.py | 435 ---- examples/research_projects/mm-imdb/README.md | 23 - .../research_projects/mm-imdb/run_mmimdb.py | 575 ----- .../research_projects/mm-imdb/utils_mmimdb.py | 146 -- .../movement-pruning/README.md | 185 -- .../movement-pruning/Saving_PruneBERT.ipynb | 645 ------ .../movement-pruning/bertarize.py | 136 -- .../movement-pruning/counts_parameters.py | 97 - .../movement-pruning/emmental/__init__.py | 9 - .../emmental/configuration_bert_masked.py | 70 - .../emmental/modeling_bert_masked.py | 1019 --------- .../emmental/modules/__init__.py | 2 - .../emmental/modules/binarizer.py | 144 -- .../emmental/modules/masked_nn.py | 106 - .../movement-pruning/masked_run_glue.py | 962 --------- .../movement-pruning/masked_run_squad.py | 1147 ---------- .../movement-pruning/requirements.txt | 6 - .../onnx/summarization/README.md | 43 - .../bart_onnx/generation_onnx.py | 755 ------- .../bart_onnx/reduce_onnx_size.py | 121 -- .../onnx/summarization/requirements.txt | 1 - .../onnx/summarization/run_onnx_exporter.py | 206 -- .../research_projects/performer/README.md | 25 - .../performer/full_script.sh | 1 - .../performer/modeling_flax_performer.py | 551 ----- .../modeling_flax_performer_utils.py | 658 ------ .../performer/run_mlm_performer.py | 693 ------ .../performer/sanity_script.sh | 1 - examples/research_projects/pplm/README.md | 56 - .../pplm/imgs/headfigure.png | Bin 668261 -> 0 bytes .../research_projects/pplm/imgs/wooly.png | Bin 679776 -> 0 bytes .../pplm/pplm_classification_head.py | 19 - .../research_projects/pplm/requirements.txt | 22 - examples/research_projects/pplm/run_pplm.py | 823 ------- .../pplm/run_pplm_discrim_train.py | 526 ----- .../quantization-qdqbert/Dockerfile | 34 - .../quantization-qdqbert/README.md | 200 -- .../evaluate-hf-trt-qa.py | 457 ---- .../ort-infer-benchmark.py | 50 - .../quantization-qdqbert/quant_trainer.py | 305 --- .../quantization-qdqbert/run_quant_qa.py | 688 ------ .../quantization-qdqbert/trainer_quant_qa.py | 212 -- .../quantization-qdqbert/utils_qa.py | 435 ---- .../rag-end2end-retriever/README.md | 56 - .../rag-end2end-retriever/callbacks_rag.py | 119 - .../distributed_ray_retriever.py | 185 -- .../rag-end2end-retriever/eval_rag.py | 320 --- .../rag-end2end-retriever/finetune_rag.py | 815 ------- .../finetune_rag_ray_end2end.sh | 68 - .../rag-end2end-retriever/kb_encode_utils.py | 80 - .../rag-end2end-retriever/lightning_base.py | 414 ---- .../rag-end2end-retriever/requirements.txt | 7 - .../dummy-kb/my_knowledge_dataset.csv | 2 - .../test_run/dummy-train-data/test.source | 8 - .../test_run/dummy-train-data/test.target | 8 - .../test_run/dummy-train-data/train.source | 48 - .../test_run/dummy-train-data/train.target | 48 - .../test_run/dummy-train-data/val.source | 8 - .../test_run/dummy-train-data/val.target | 8 - .../test_run/test_finetune.sh | 57 - .../test_run/test_rag_new_features.sh | 16 - .../use_own_knowledge_dataset.py | 175 -- .../rag-end2end-retriever/utils_rag.py | 244 --- examples/research_projects/rag/README.md | 203 -- examples/research_projects/rag/__init__.py | 5 - .../rag/_test_finetune_rag.py | 111 - .../research_projects/rag/callbacks_rag.py | 116 - .../rag/consolidate_rag_checkpoint.py | 101 - .../rag/distributed_pytorch_retriever.py | 138 -- .../rag/distributed_ray_retriever.py | 152 -- examples/research_projects/rag/eval_rag.py | 320 --- .../research_projects/rag/finetune_rag.py | 649 ------ .../research_projects/rag/finetune_rag.sh | 34 - .../research_projects/rag/finetune_rag_ray.sh | 44 - .../research_projects/rag/lightning_base.py | 404 ---- .../rag/parse_dpr_relevance_data.py | 47 - .../research_projects/rag/requirements.txt | 8 - .../rag/test_data/my_knowledge_dataset.csv | 2 - .../rag/test_distributed_retriever.py | 338 --- .../rag/use_own_knowledge_dataset.py | 208 -- examples/research_projects/rag/utils_rag.py | 244 --- .../robust-speech-event/README.md | 713 ------ .../robust-speech-event/eval.py | 136 -- .../run_speech_recognition_ctc_bnb.py | 779 ------- .../run_speech_recognition_ctc_streaming.py | 679 ------ .../README.md | 128 -- .../finetuning.py | 818 ------- .../requirements.txt | 7 - .../self-training-text-classification/run.sh | 81 - .../selftraining.py | 388 ---- .../seq2seq-distillation/README.md | 434 ---- .../seq2seq-distillation/_test_bash_script.py | 203 -- .../_test_make_student.py | 40 - .../_test_seq2seq_examples.py | 444 ---- .../_test_seq2seq_examples_multi_gpu.py | 163 -- .../seq2seq-distillation/callbacks.py | 116 - .../convert_pl_checkpoint_to_hf.py | 74 - .../distil_marian_enro_teacher.sh | 20 - .../distil_marian_no_teacher.sh | 18 - .../seq2seq-distillation/distillation.py | 310 --- .../dynamic_bs_example.sh | 17 - .../seq2seq-distillation/finetune.py | 454 ---- .../seq2seq-distillation/finetune.sh | 11 - .../finetune_bart_tiny.sh | 32 - .../finetune_pegasus_xsum.sh | 14 - .../seq2seq-distillation/finetune_t5.sh | 14 - .../seq2seq-distillation/lightning_base.py | 393 ---- .../seq2seq-distillation/make_student.py | 186 -- .../precomputed_pseudo_labels.md | 43 - .../seq2seq-distillation/requirements.txt | 20 - .../seq2seq-distillation/run_eval.py | 167 -- .../seq2seq-distillation/sentence_splitter.py | 22 - .../train_distilbart_cnn.sh | 24 - .../train_distilbart_xsum.sh | 21 - .../train_mbart_cc25_enro.sh | 18 - .../seq2seq-distillation/utils.py | 645 ------ .../research_projects/synthid_text/README.md | 34 - .../synthid_text/detector_training.py | 502 ----- .../synthid_text/requirements.txt | 5 - .../research_projects/synthid_text/utils.py | 408 ---- examples/research_projects/tapex/README.md | 288 --- .../research_projects/tapex/requirements.txt | 4 - .../tapex/run_tabfact_with_tapex.py | 471 ---- .../tapex/run_wikisql_with_tapex.py | 649 ------ .../run_wikitablequestions_with_tapex.py | 625 ------ .../research_projects/tapex/wikisql_utils.py | 257 --- .../research_projects/token-healing/README.md | 40 - .../token-healing/run_token_healing.py | 62 - .../research_projects/visual_bert/README.md | 6 - .../research_projects/visual_bert/demo.ipynb | 255 --- .../visual_bert/extracting_data.py | 149 -- .../visual_bert/modeling_frcnn.py | 1920 ----------------- .../visual_bert/processing_image.py | 151 -- .../visual_bert/requirements.txt | 98 - .../research_projects/visual_bert/utils.py | 554 ----- .../visual_bert/visualizing_image.py | 500 ----- .../research_projects/vqgan-clip/README.md | 70 - .../vqgan-clip/VQGAN_CLIP.py | 268 --- .../vqgan-clip/img_processing.py | 50 - .../research_projects/vqgan-clip/loaders.py | 74 - .../vqgan-clip/requirements.txt | 27 - .../research_projects/vqgan-clip/utils.py | 35 - .../wav2vec2/FINE_TUNE_XLSR_WAV2VEC2.md | 516 ----- examples/research_projects/wav2vec2/README.md | 249 --- .../research_projects/wav2vec2/alignment.py | 223 -- .../wav2vec2/ds_config_wav2vec2_zero2.json | 51 - .../wav2vec2/ds_config_wav2vec2_zero3.json | 57 - .../wav2vec2/finetune_base_100.sh | 21 - .../wav2vec2/finetune_base_timit_asr.sh | 22 - .../wav2vec2/finetune_large_lv60_100.sh | 21 - .../wav2vec2/finetune_large_lv60_timit_asr.sh | 23 - ...tune_large_xlsr_53_arabic_speech_corpus.sh | 25 - .../finetune_wav2vec2_xlsr_turkish.sh | 22 - .../wav2vec2/requirements.txt | 7 - .../wav2vec2/run_alignment.sh | 8 - .../research_projects/wav2vec2/run_asr.py | 480 ----- .../wav2vec2/run_common_voice.py | 513 ----- .../wav2vec2/run_pretrain.py | 396 ---- .../wav2vec2/test_wav2vec2_deepspeed.py | 199 -- .../wav2vec2/vocab/buckwalter.json | 58 - examples/research_projects/xtreme-s/README.md | 160 -- .../xtreme-s/requirements.txt | 5 - .../xtreme-s/run_xtreme_s.py | 949 -------- .../zero-shot-distillation/README.md | 155 -- .../distill_classifier.py | 338 --- src/transformers/generation/watermarking.py | 4 +- utils/release.py | 2 - 324 files changed, 39 insertions(+), 63649 deletions(-) delete mode 100644 examples/research_projects/adversarial/README.md delete mode 100644 examples/research_projects/adversarial/requirements.txt delete mode 100644 examples/research_projects/adversarial/run_hans.py delete mode 100644 examples/research_projects/adversarial/utils_hans.py delete mode 100755 examples/research_projects/bert-loses-patience/README.md delete mode 100644 examples/research_projects/bert-loses-patience/pabee/__init__.py delete mode 100644 examples/research_projects/bert-loses-patience/pabee/modeling_pabee_albert.py delete mode 100644 examples/research_projects/bert-loses-patience/pabee/modeling_pabee_bert.py delete mode 100644 examples/research_projects/bert-loses-patience/requirements.txt delete mode 100755 examples/research_projects/bert-loses-patience/run_glue_with_pabee.py delete mode 100644 examples/research_projects/bert-loses-patience/test_run_glue_with_pabee.py delete mode 100644 examples/research_projects/bertabs/README.md delete mode 100644 examples/research_projects/bertabs/__init__.py delete mode 100644 examples/research_projects/bertabs/configuration_bertabs.py delete mode 100644 examples/research_projects/bertabs/convert_bertabs_original_pytorch_checkpoint.py delete mode 100644 examples/research_projects/bertabs/modeling_bertabs.py delete mode 100644 examples/research_projects/bertabs/requirements.txt delete mode 100644 examples/research_projects/bertabs/run_summarization.py delete mode 100644 examples/research_projects/bertabs/test_utils_summarization.py delete mode 100644 examples/research_projects/bertabs/utils_summarization.py delete mode 100644 examples/research_projects/bertology/requirements.txt delete mode 100644 examples/research_projects/bertology/run_bertology.py delete mode 100644 examples/research_projects/bertology/run_prune_gpt.py delete mode 100644 examples/research_projects/codeparrot/README.md delete mode 100644 examples/research_projects/codeparrot/examples/README.md delete mode 100644 examples/research_projects/codeparrot/examples/requirements.txt delete mode 100644 examples/research_projects/codeparrot/examples/train_complexity_predictor.py delete mode 100644 examples/research_projects/codeparrot/requirements.txt delete mode 100644 examples/research_projects/codeparrot/scripts/arguments.py delete mode 100644 examples/research_projects/codeparrot/scripts/bpe_training.py delete mode 100644 examples/research_projects/codeparrot/scripts/codeparrot_training.py delete mode 100644 examples/research_projects/codeparrot/scripts/human_eval.py delete mode 100644 examples/research_projects/codeparrot/scripts/initialize_model.py delete mode 100644 examples/research_projects/codeparrot/scripts/minhash_deduplication.py delete mode 100644 examples/research_projects/codeparrot/scripts/preprocessing.py delete mode 100644 examples/research_projects/codeparrot/scripts/pretokenizing.py delete mode 100644 examples/research_projects/codeparrot/scripts/tests/__init__.py delete mode 100644 examples/research_projects/codeparrot/scripts/tests/test_deduplicate.py delete mode 100644 examples/research_projects/codeparrot/scripts/validation_loss.py delete mode 100644 examples/research_projects/decision_transformer/requirements.txt delete mode 100644 examples/research_projects/decision_transformer/run_decision_transformer.py delete mode 100644 examples/research_projects/deebert/README.md delete mode 100755 examples/research_projects/deebert/entropy_eval.sh delete mode 100755 examples/research_projects/deebert/eval_deebert.sh delete mode 100644 examples/research_projects/deebert/requirements.txt delete mode 100644 examples/research_projects/deebert/run_glue_deebert.py delete mode 100644 examples/research_projects/deebert/src/__init__.py delete mode 100644 examples/research_projects/deebert/src/modeling_highway_bert.py delete mode 100644 examples/research_projects/deebert/src/modeling_highway_roberta.py delete mode 100644 examples/research_projects/deebert/test_glue_deebert.py delete mode 100755 examples/research_projects/deebert/train_deebert.sh delete mode 100644 examples/research_projects/distillation/README.md delete mode 100644 examples/research_projects/distillation/distiller.py delete mode 100644 examples/research_projects/distillation/grouped_batch_sampler.py delete mode 100644 examples/research_projects/distillation/lm_seqs_dataset.py delete mode 100644 examples/research_projects/distillation/requirements.txt delete mode 100644 examples/research_projects/distillation/run_squad_w_distillation.py delete mode 100644 examples/research_projects/distillation/scripts/binarized_data.py delete mode 100644 examples/research_projects/distillation/scripts/extract.py delete mode 100644 examples/research_projects/distillation/scripts/extract_distilbert.py delete mode 100644 examples/research_projects/distillation/scripts/token_counts.py delete mode 100644 examples/research_projects/distillation/train.py delete mode 100644 examples/research_projects/distillation/training_configs/distilbert-base-cased.json delete mode 100644 examples/research_projects/distillation/training_configs/distilbert-base-multilingual-cased.json delete mode 100644 examples/research_projects/distillation/training_configs/distilbert-base-uncased.json delete mode 100644 examples/research_projects/distillation/training_configs/distilgpt2.json delete mode 100644 examples/research_projects/distillation/training_configs/distilroberta-base.json delete mode 100644 examples/research_projects/distillation/utils.py delete mode 100644 examples/research_projects/fsner/README.md delete mode 100644 examples/research_projects/fsner/pyproject.toml delete mode 100644 examples/research_projects/fsner/requirements.txt delete mode 100644 examples/research_projects/fsner/setup.py delete mode 100644 examples/research_projects/fsner/src/fsner/__init__.py delete mode 100644 examples/research_projects/fsner/src/fsner/model.py delete mode 100644 examples/research_projects/fsner/src/fsner/tokenizer_utils.py delete mode 100644 examples/research_projects/information-gain-filtration/README.md delete mode 100644 examples/research_projects/information-gain-filtration/igf/__init__.py delete mode 100644 examples/research_projects/information-gain-filtration/igf/igf.py delete mode 100644 examples/research_projects/information-gain-filtration/requirements.txt delete mode 100644 examples/research_projects/information-gain-filtration/result_igf.png delete mode 100644 examples/research_projects/information-gain-filtration/run_clm_igf.py delete mode 100644 examples/research_projects/jax-projects/HOW_TO_PROPOSE_PROJECT.md delete mode 100644 examples/research_projects/jax-projects/README.md delete mode 100644 examples/research_projects/jax-projects/big_bird/README.md delete mode 100644 examples/research_projects/jax-projects/big_bird/bigbird_flax.py delete mode 100644 examples/research_projects/jax-projects/big_bird/evaluate.py delete mode 100644 examples/research_projects/jax-projects/big_bird/prepare_natural_questions.py delete mode 100644 examples/research_projects/jax-projects/big_bird/requirements.txt delete mode 100644 examples/research_projects/jax-projects/big_bird/sweep_flax.yaml delete mode 100644 examples/research_projects/jax-projects/big_bird/train.py delete mode 100644 examples/research_projects/jax-projects/dataset-streaming/README.md delete mode 100755 examples/research_projects/jax-projects/dataset-streaming/run_mlm_flax_stream.py delete mode 100644 examples/research_projects/jax-projects/hybrid_clip/README.md delete mode 100644 examples/research_projects/jax-projects/hybrid_clip/configuration_hybrid_clip.py delete mode 100644 examples/research_projects/jax-projects/hybrid_clip/modeling_hybrid_clip.py delete mode 100644 examples/research_projects/jax-projects/hybrid_clip/requirements.txt delete mode 100644 examples/research_projects/jax-projects/hybrid_clip/run_hybrid_clip.py delete mode 100644 examples/research_projects/jax-projects/model_parallel/README.md delete mode 100644 examples/research_projects/jax-projects/model_parallel/partitions.py delete mode 100644 examples/research_projects/jax-projects/model_parallel/run_clm_mp.py delete mode 100644 examples/research_projects/jax-projects/wav2vec2/README.md delete mode 100755 examples/research_projects/jax-projects/wav2vec2/run_wav2vec2_pretrain_flax.py delete mode 100644 examples/research_projects/layoutlmv3/README.md delete mode 100644 examples/research_projects/layoutlmv3/requirements.txt delete mode 100644 examples/research_projects/layoutlmv3/run_funsd_cord.py delete mode 100644 examples/research_projects/longform-qa/README.md delete mode 100644 examples/research_projects/longform-qa/eli5_app.py delete mode 100644 examples/research_projects/longform-qa/eli5_utils.py delete mode 100644 examples/research_projects/longform-qa/requirements.txt delete mode 100644 examples/research_projects/luke/README.md delete mode 100644 examples/research_projects/luke/luke_utils.py delete mode 100644 examples/research_projects/luke/run_luke_ner_no_trainer.py delete mode 100644 examples/research_projects/lxmert/README.md delete mode 100644 examples/research_projects/lxmert/demo.ipynb delete mode 100644 examples/research_projects/lxmert/extracting_data.py delete mode 100644 examples/research_projects/lxmert/modeling_frcnn.py delete mode 100644 examples/research_projects/lxmert/processing_image.py delete mode 100644 examples/research_projects/lxmert/requirements.txt delete mode 100644 examples/research_projects/lxmert/utils.py delete mode 100644 examples/research_projects/lxmert/visualizing_image.py delete mode 100644 examples/research_projects/mlm_wwm/README.md delete mode 100644 examples/research_projects/mlm_wwm/requirements.txt delete mode 100644 examples/research_projects/mlm_wwm/run_chinese_ref.py delete mode 100644 examples/research_projects/mlm_wwm/run_mlm_wwm.py delete mode 100644 examples/research_projects/mm-imdb/README.md delete mode 100644 examples/research_projects/mm-imdb/run_mmimdb.py delete mode 100644 examples/research_projects/mm-imdb/utils_mmimdb.py delete mode 100644 examples/research_projects/movement-pruning/README.md delete mode 100644 examples/research_projects/movement-pruning/Saving_PruneBERT.ipynb delete mode 100644 examples/research_projects/movement-pruning/bertarize.py delete mode 100644 examples/research_projects/movement-pruning/counts_parameters.py delete mode 100644 examples/research_projects/movement-pruning/emmental/__init__.py delete mode 100644 examples/research_projects/movement-pruning/emmental/configuration_bert_masked.py delete mode 100644 examples/research_projects/movement-pruning/emmental/modeling_bert_masked.py delete mode 100644 examples/research_projects/movement-pruning/emmental/modules/__init__.py delete mode 100644 examples/research_projects/movement-pruning/emmental/modules/binarizer.py delete mode 100644 examples/research_projects/movement-pruning/emmental/modules/masked_nn.py delete mode 100644 examples/research_projects/movement-pruning/masked_run_glue.py delete mode 100644 examples/research_projects/movement-pruning/masked_run_squad.py delete mode 100644 examples/research_projects/movement-pruning/requirements.txt delete mode 100644 examples/research_projects/onnx/summarization/README.md delete mode 100644 examples/research_projects/onnx/summarization/bart_onnx/generation_onnx.py delete mode 100644 examples/research_projects/onnx/summarization/bart_onnx/reduce_onnx_size.py delete mode 100644 examples/research_projects/onnx/summarization/requirements.txt delete mode 100644 examples/research_projects/onnx/summarization/run_onnx_exporter.py delete mode 100644 examples/research_projects/performer/README.md delete mode 100755 examples/research_projects/performer/full_script.sh delete mode 100644 examples/research_projects/performer/modeling_flax_performer.py delete mode 100644 examples/research_projects/performer/modeling_flax_performer_utils.py delete mode 100644 examples/research_projects/performer/run_mlm_performer.py delete mode 100755 examples/research_projects/performer/sanity_script.sh delete mode 100644 examples/research_projects/pplm/README.md delete mode 100644 examples/research_projects/pplm/imgs/headfigure.png delete mode 100644 examples/research_projects/pplm/imgs/wooly.png delete mode 100644 examples/research_projects/pplm/pplm_classification_head.py delete mode 100644 examples/research_projects/pplm/requirements.txt delete mode 100644 examples/research_projects/pplm/run_pplm.py delete mode 100644 examples/research_projects/pplm/run_pplm_discrim_train.py delete mode 100644 examples/research_projects/quantization-qdqbert/Dockerfile delete mode 100644 examples/research_projects/quantization-qdqbert/README.md delete mode 100755 examples/research_projects/quantization-qdqbert/evaluate-hf-trt-qa.py delete mode 100644 examples/research_projects/quantization-qdqbert/ort-infer-benchmark.py delete mode 100755 examples/research_projects/quantization-qdqbert/quant_trainer.py delete mode 100755 examples/research_projects/quantization-qdqbert/run_quant_qa.py delete mode 100644 examples/research_projects/quantization-qdqbert/trainer_quant_qa.py delete mode 100644 examples/research_projects/quantization-qdqbert/utils_qa.py delete mode 100644 examples/research_projects/rag-end2end-retriever/README.md delete mode 100644 examples/research_projects/rag-end2end-retriever/callbacks_rag.py delete mode 100644 examples/research_projects/rag-end2end-retriever/distributed_ray_retriever.py delete mode 100644 examples/research_projects/rag-end2end-retriever/eval_rag.py delete mode 100644 examples/research_projects/rag-end2end-retriever/finetune_rag.py delete mode 100755 examples/research_projects/rag-end2end-retriever/finetune_rag_ray_end2end.sh delete mode 100644 examples/research_projects/rag-end2end-retriever/kb_encode_utils.py delete mode 100644 examples/research_projects/rag-end2end-retriever/lightning_base.py delete mode 100644 examples/research_projects/rag-end2end-retriever/requirements.txt delete mode 100644 examples/research_projects/rag-end2end-retriever/test_run/dummy-kb/my_knowledge_dataset.csv delete mode 100644 examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/test.source delete mode 100644 examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/test.target delete mode 100644 examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/train.source delete mode 100644 examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/train.target delete mode 100644 examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/val.source delete mode 100644 examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/val.target delete mode 100755 examples/research_projects/rag-end2end-retriever/test_run/test_finetune.sh delete mode 100755 examples/research_projects/rag-end2end-retriever/test_run/test_rag_new_features.sh delete mode 100644 examples/research_projects/rag-end2end-retriever/use_own_knowledge_dataset.py delete mode 100644 examples/research_projects/rag-end2end-retriever/utils_rag.py delete mode 100644 examples/research_projects/rag/README.md delete mode 100644 examples/research_projects/rag/__init__.py delete mode 100644 examples/research_projects/rag/_test_finetune_rag.py delete mode 100644 examples/research_projects/rag/callbacks_rag.py delete mode 100644 examples/research_projects/rag/consolidate_rag_checkpoint.py delete mode 100644 examples/research_projects/rag/distributed_pytorch_retriever.py delete mode 100644 examples/research_projects/rag/distributed_ray_retriever.py delete mode 100644 examples/research_projects/rag/eval_rag.py delete mode 100644 examples/research_projects/rag/finetune_rag.py delete mode 100755 examples/research_projects/rag/finetune_rag.sh delete mode 100755 examples/research_projects/rag/finetune_rag_ray.sh delete mode 100644 examples/research_projects/rag/lightning_base.py delete mode 100644 examples/research_projects/rag/parse_dpr_relevance_data.py delete mode 100644 examples/research_projects/rag/requirements.txt delete mode 100644 examples/research_projects/rag/test_data/my_knowledge_dataset.csv delete mode 100644 examples/research_projects/rag/test_distributed_retriever.py delete mode 100644 examples/research_projects/rag/use_own_knowledge_dataset.py delete mode 100644 examples/research_projects/rag/utils_rag.py delete mode 100644 examples/research_projects/robust-speech-event/README.md delete mode 100755 examples/research_projects/robust-speech-event/eval.py delete mode 100755 examples/research_projects/robust-speech-event/run_speech_recognition_ctc_bnb.py delete mode 100644 examples/research_projects/robust-speech-event/run_speech_recognition_ctc_streaming.py delete mode 100644 examples/research_projects/self-training-text-classification/README.md delete mode 100644 examples/research_projects/self-training-text-classification/finetuning.py delete mode 100644 examples/research_projects/self-training-text-classification/requirements.txt delete mode 100755 examples/research_projects/self-training-text-classification/run.sh delete mode 100644 examples/research_projects/self-training-text-classification/selftraining.py delete mode 100644 examples/research_projects/seq2seq-distillation/README.md delete mode 100644 examples/research_projects/seq2seq-distillation/_test_bash_script.py delete mode 100644 examples/research_projects/seq2seq-distillation/_test_make_student.py delete mode 100644 examples/research_projects/seq2seq-distillation/_test_seq2seq_examples.py delete mode 100644 examples/research_projects/seq2seq-distillation/_test_seq2seq_examples_multi_gpu.py delete mode 100644 examples/research_projects/seq2seq-distillation/callbacks.py delete mode 100755 examples/research_projects/seq2seq-distillation/convert_pl_checkpoint_to_hf.py delete mode 100755 examples/research_projects/seq2seq-distillation/distil_marian_enro_teacher.sh delete mode 100755 examples/research_projects/seq2seq-distillation/distil_marian_no_teacher.sh delete mode 100755 examples/research_projects/seq2seq-distillation/distillation.py delete mode 100755 examples/research_projects/seq2seq-distillation/dynamic_bs_example.sh delete mode 100755 examples/research_projects/seq2seq-distillation/finetune.py delete mode 100755 examples/research_projects/seq2seq-distillation/finetune.sh delete mode 100755 examples/research_projects/seq2seq-distillation/finetune_bart_tiny.sh delete mode 100755 examples/research_projects/seq2seq-distillation/finetune_pegasus_xsum.sh delete mode 100755 examples/research_projects/seq2seq-distillation/finetune_t5.sh delete mode 100644 examples/research_projects/seq2seq-distillation/lightning_base.py delete mode 100644 examples/research_projects/seq2seq-distillation/make_student.py delete mode 100644 examples/research_projects/seq2seq-distillation/precomputed_pseudo_labels.md delete mode 100644 examples/research_projects/seq2seq-distillation/requirements.txt delete mode 100755 examples/research_projects/seq2seq-distillation/run_eval.py delete mode 100644 examples/research_projects/seq2seq-distillation/sentence_splitter.py delete mode 100755 examples/research_projects/seq2seq-distillation/train_distilbart_cnn.sh delete mode 100755 examples/research_projects/seq2seq-distillation/train_distilbart_xsum.sh delete mode 100755 examples/research_projects/seq2seq-distillation/train_mbart_cc25_enro.sh delete mode 100644 examples/research_projects/seq2seq-distillation/utils.py delete mode 100644 examples/research_projects/synthid_text/README.md delete mode 100644 examples/research_projects/synthid_text/detector_training.py delete mode 100644 examples/research_projects/synthid_text/requirements.txt delete mode 100644 examples/research_projects/synthid_text/utils.py delete mode 100644 examples/research_projects/tapex/README.md delete mode 100644 examples/research_projects/tapex/requirements.txt delete mode 100644 examples/research_projects/tapex/run_tabfact_with_tapex.py delete mode 100644 examples/research_projects/tapex/run_wikisql_with_tapex.py delete mode 100644 examples/research_projects/tapex/run_wikitablequestions_with_tapex.py delete mode 100644 examples/research_projects/tapex/wikisql_utils.py delete mode 100644 examples/research_projects/token-healing/README.md delete mode 100644 examples/research_projects/token-healing/run_token_healing.py delete mode 100644 examples/research_projects/visual_bert/README.md delete mode 100644 examples/research_projects/visual_bert/demo.ipynb delete mode 100644 examples/research_projects/visual_bert/extracting_data.py delete mode 100644 examples/research_projects/visual_bert/modeling_frcnn.py delete mode 100644 examples/research_projects/visual_bert/processing_image.py delete mode 100644 examples/research_projects/visual_bert/requirements.txt delete mode 100644 examples/research_projects/visual_bert/utils.py delete mode 100644 examples/research_projects/visual_bert/visualizing_image.py delete mode 100644 examples/research_projects/vqgan-clip/README.md delete mode 100644 examples/research_projects/vqgan-clip/VQGAN_CLIP.py delete mode 100644 examples/research_projects/vqgan-clip/img_processing.py delete mode 100644 examples/research_projects/vqgan-clip/loaders.py delete mode 100644 examples/research_projects/vqgan-clip/requirements.txt delete mode 100644 examples/research_projects/vqgan-clip/utils.py delete mode 100644 examples/research_projects/wav2vec2/FINE_TUNE_XLSR_WAV2VEC2.md delete mode 100644 examples/research_projects/wav2vec2/README.md delete mode 100644 examples/research_projects/wav2vec2/alignment.py delete mode 100644 examples/research_projects/wav2vec2/ds_config_wav2vec2_zero2.json delete mode 100644 examples/research_projects/wav2vec2/ds_config_wav2vec2_zero3.json delete mode 100755 examples/research_projects/wav2vec2/finetune_base_100.sh delete mode 100755 examples/research_projects/wav2vec2/finetune_base_timit_asr.sh delete mode 100755 examples/research_projects/wav2vec2/finetune_large_lv60_100.sh delete mode 100755 examples/research_projects/wav2vec2/finetune_large_lv60_timit_asr.sh delete mode 100755 examples/research_projects/wav2vec2/finetune_large_xlsr_53_arabic_speech_corpus.sh delete mode 100644 examples/research_projects/wav2vec2/finetune_wav2vec2_xlsr_turkish.sh delete mode 100644 examples/research_projects/wav2vec2/requirements.txt delete mode 100644 examples/research_projects/wav2vec2/run_alignment.sh delete mode 100755 examples/research_projects/wav2vec2/run_asr.py delete mode 100644 examples/research_projects/wav2vec2/run_common_voice.py delete mode 100755 examples/research_projects/wav2vec2/run_pretrain.py delete mode 100644 examples/research_projects/wav2vec2/test_wav2vec2_deepspeed.py delete mode 100644 examples/research_projects/wav2vec2/vocab/buckwalter.json delete mode 100644 examples/research_projects/xtreme-s/README.md delete mode 100644 examples/research_projects/xtreme-s/requirements.txt delete mode 100644 examples/research_projects/xtreme-s/run_xtreme_s.py delete mode 100644 examples/research_projects/zero-shot-distillation/README.md delete mode 100644 examples/research_projects/zero-shot-distillation/distill_classifier.py diff --git a/docs/source/ar/bertology.md b/docs/source/ar/bertology.md index d3f95e20d7d..d12d7838906 100644 --- a/docs/source/ar/bertology.md +++ b/docs/source/ar/bertology.md @@ -15,4 +15,4 @@ - الوصول إلى جميع أوزان الانتباه لكل رأس في BERT/GPT/GPT-2، - استرجاع قيم ومشتقات مخرجات الرأس لحساب درجة أهمية الرأس وحذفه كما هو موضح في https://arxiv.org/abs/1905.10650. -ولمساعدتك على فهم واستخدام هذه الميزات بسهولة، أضفنا مثالًا برمجيًا محددًا: [bertology.py](https://github.com/huggingface/transformers/tree/main/examples/research_projects/bertology/run_bertology.py) أثناء استخراج المعلومات وتقليص من نموذج تم تدريبه مسبقًا على GLUE. \ No newline at end of file +ولمساعدتك على فهم واستخدام هذه الميزات بسهولة، أضفنا مثالًا برمجيًا محددًا: [bertology.py](https://github.com/huggingface/transformers-research-projects/tree/main/bertology/run_bertology.py) أثناء استخراج المعلومات وتقليص من نموذج تم تدريبه مسبقًا على GLUE. \ No newline at end of file diff --git a/docs/source/ar/run_scripts.md b/docs/source/ar/run_scripts.md index 593d4aec85f..c7aea4eb961 100644 --- a/docs/source/ar/run_scripts.md +++ b/docs/source/ar/run_scripts.md @@ -2,7 +2,7 @@ بالإضافة إلى دفاتر الملاحظات [notebooks](./notebooks) الخاصة بـ 🤗 Transformers، هناك أيضًا نصوص برمجية توضيحية تُظهر كيفية تدريب نموذج لمهمة باستخدام [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch) أو [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow) أو [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax). -كما ستجد النصوص البرمجية التي استخدمناها في [مشاريع الأبحاث](https://github.com/huggingface/transformers/tree/main/examples/research_projects) و [الأمثلة القديمة](https://github.com/huggingface/transformers/tree/main/examples/legacy) والتي ساهم بها المجتمع بشكل أساسي. هذه النصوص البرمجية غير مدعومة بشكل نشط وقد تتطلب إصدارًا محددًا من مكتبة 🤗 Transformers والذي من المحتمل أن يكون غير متوافق مع الإصدار الأحدث من المكتبة. +كما ستجد النصوص البرمجية التي استخدمناها في [مشاريع الأبحاث](https://github.com/huggingface/transformers-research-projects/) و [الأمثلة القديمة](https://github.com/huggingface/transformers/tree/main/examples/legacy) والتي ساهم بها المجتمع بشكل أساسي. هذه النصوص البرمجية غير مدعومة بشكل نشط وقد تتطلب إصدارًا محددًا من مكتبة 🤗 Transformers والذي من المحتمل أن يكون غير متوافق مع الإصدار الأحدث من المكتبة. لا يُتوقع أن تعمل النصوص البرمجية التوضيحية بشكل مباشر على كل مشكلة، وقد تحتاج إلى تكييف النص البرمجي مع المشكلة التي تحاول حلها. ولمساعدتك في ذلك، تعرض معظم النصوص البرمجية كيفية معالجة البيانات قبل التدريب بشكل كامل، مما يتيح لك تحريرها حسب الحاجة لحالتك الاستخدام. diff --git a/docs/source/de/index.md b/docs/source/de/index.md index 5ddabb4e738..8aaaa5952c0 100644 --- a/docs/source/de/index.md +++ b/docs/source/de/index.md @@ -88,7 +88,7 @@ Die Bibliothek enthält derzeit JAX-, PyTorch- und TensorFlow-Implementierungen, 1. **[DeiT](model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou. 1. **[DETR](model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko. 1. **[DialoGPT](model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. -1. **[DistilBERT](model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT. +1. **[DistilBERT](model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers-research-projects/tree/main/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers-research-projects/tree/main/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers-research-projects/tree/main/distillation) and a German version of DistilBERT. 1. **[DiT](model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei. 1. **[DPR](model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 1. **[DPT](master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. diff --git a/docs/source/de/run_scripts.md b/docs/source/de/run_scripts.md index 17b725827dd..4b62c73276e 100644 --- a/docs/source/de/run_scripts.md +++ b/docs/source/de/run_scripts.md @@ -18,7 +18,7 @@ rendered properly in your Markdown viewer. Neben den 🤗 Transformers [notebooks](./notebooks) gibt es auch Beispielskripte, die zeigen, wie man ein Modell für eine Aufgabe mit [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch), [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow) oder [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax) trainiert. -Sie werden auch Skripte finden, die wir in unseren [Forschungsprojekten](https://github.com/huggingface/transformers/tree/main/examples/research_projects) und [Legacy-Beispielen](https://github.com/huggingface/transformers/tree/main/examples/legacy) verwendet haben und die größtenteils von der Community stammen. Diese Skripte werden nicht aktiv gepflegt und erfordern eine bestimmte Version von 🤗 Transformers, die höchstwahrscheinlich nicht mit der neuesten Version der Bibliothek kompatibel ist. +Sie werden auch Skripte finden, die wir in unseren [Forschungsprojekten](https://github.com/huggingface/transformers-research-projects/) und [Legacy-Beispielen](https://github.com/huggingface/transformers/tree/main/examples/legacy) verwendet haben und die größtenteils von der Community stammen. Diese Skripte werden nicht aktiv gepflegt und erfordern eine bestimmte Version von 🤗 Transformers, die höchstwahrscheinlich nicht mit der neuesten Version der Bibliothek kompatibel ist. Es wird nicht erwartet, dass die Beispielskripte bei jedem Problem sofort funktionieren. Möglicherweise müssen Sie das Skript an das Problem anpassen, das Sie zu lösen versuchen. Um Ihnen dabei zu helfen, legen die meisten Skripte vollständig offen, wie die Daten vorverarbeitet werden, so dass Sie sie nach Bedarf für Ihren Anwendungsfall bearbeiten können. diff --git a/docs/source/en/model_doc/distilbert.md b/docs/source/en/model_doc/distilbert.md index 66be95fa040..3f949d9443a 100644 --- a/docs/source/en/model_doc/distilbert.md +++ b/docs/source/en/model_doc/distilbert.md @@ -49,7 +49,7 @@ demonstrate its capabilities for on-device computations in a proof-of-concept ex study.* This model was contributed by [victorsanh](https://huggingface.co/victorsanh). This model jax version was -contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation). +contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/huggingface/transformers-research-projects/tree/main/distillation). ## Usage tips diff --git a/docs/source/en/model_doc/layoutlmv3.md b/docs/source/en/model_doc/layoutlmv3.md index 87ff32f3835..5ab998dc3cd 100644 --- a/docs/source/en/model_doc/layoutlmv3.md +++ b/docs/source/en/model_doc/layoutlmv3.md @@ -52,7 +52,7 @@ LayoutLMv3 is nearly identical to LayoutLMv2, so we've also included LayoutLMv2 - Demo notebooks for LayoutLMv3 can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LayoutLMv3). -- Demo scripts can be found [here](https://github.com/huggingface/transformers/tree/main/examples/research_projects/layoutlmv3). +- Demo scripts can be found [here](https://github.com/huggingface/transformers-research-projects/tree/main/layoutlmv3). @@ -61,7 +61,7 @@ LayoutLMv3 is nearly identical to LayoutLMv2, so we've also included LayoutLMv2 -- [`LayoutLMv3ForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/research_projects/layoutlmv3) and [notebook](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv3/Fine_tune_LayoutLMv3_on_FUNSD_(HuggingFace_Trainer).ipynb). +- [`LayoutLMv3ForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers-research-projects/tree/main/layoutlmv3) and [notebook](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv3/Fine_tune_LayoutLMv3_on_FUNSD_(HuggingFace_Trainer).ipynb). - A [notebook](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/FUNSD/Inference_with_LayoutLMv2ForTokenClassification.ipynb) for how to perform inference with [`LayoutLMv2ForTokenClassification`] and a [notebook](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/FUNSD/True_inference_with_LayoutLMv2ForTokenClassification_%2B_Gradio_demo.ipynb) for how to perform inference when no labels are available with [`LayoutLMv2ForTokenClassification`]. - A [notebook](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/FUNSD/Fine_tuning_LayoutLMv2ForTokenClassification_on_FUNSD_using_HuggingFace_Trainer.ipynb) for how to finetune [`LayoutLMv2ForTokenClassification`] with the 🤗 Trainer. - [Token classification task guide](../tasks/token_classification) diff --git a/docs/source/en/model_doc/pegasus.md b/docs/source/en/model_doc/pegasus.md index 46fca71ac0d..bdb61e66d98 100644 --- a/docs/source/en/model_doc/pegasus.md +++ b/docs/source/en/model_doc/pegasus.md @@ -96,7 +96,7 @@ All the [checkpoints](https://huggingface.co/models?search=pegasus) are fine-tun ## Resources -- [Script](https://github.com/huggingface/transformers/tree/main/examples/research_projects/seq2seq-distillation/finetune_pegasus_xsum.sh) to fine-tune pegasus +- [Script](https://github.com/huggingface/transformers-research-projects/tree/main/seq2seq-distillation/finetune_pegasus_xsum.sh) to fine-tune pegasus on the XSUM dataset. Data download instructions at [examples/pytorch/summarization/](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization/README.md). - [Causal language modeling task guide](../tasks/language_modeling) - [Translation task guide](../tasks/translation) diff --git a/docs/source/en/model_doc/qdqbert.md b/docs/source/en/model_doc/qdqbert.md index 76555909c76..4c1a485b116 100644 --- a/docs/source/en/model_doc/qdqbert.md +++ b/docs/source/en/model_doc/qdqbert.md @@ -54,7 +54,7 @@ This model was contributed by [shangz](https://huggingface.co/shangz). - QDQBERT model can be loaded from any checkpoint of HuggingFace BERT model (for example *google-bert/bert-base-uncased*), and perform Quantization Aware Training/Post Training Quantization. - A complete example of using QDQBERT model to perform Quatization Aware Training and Post Training Quantization for - SQUAD task can be found at [transformers/examples/research_projects/quantization-qdqbert/](examples/research_projects/quantization-qdqbert/). + SQUAD task can be found at https://github.com/huggingface/transformers-research-projects/tree/main/quantization-qdqbert. ### Set default quantizers diff --git a/docs/source/en/model_doc/visual_bert.md b/docs/source/en/model_doc/visual_bert.md index 55c526d067a..265d482c190 100644 --- a/docs/source/en/model_doc/visual_bert.md +++ b/docs/source/en/model_doc/visual_bert.md @@ -64,7 +64,7 @@ appropriately for the textual and visual parts. The [`BertTokenizer`] is used to encode the text. A custom detector/image processor must be used to get the visual embeddings. The following example notebooks show how to use VisualBERT with Detectron-like models: -- [VisualBERT VQA demo notebook](https://github.com/huggingface/transformers/tree/main/examples/research_projects/visual_bert) : This notebook +- [VisualBERT VQA demo notebook](https://github.com/huggingface/transformers-research-projects/tree/main/visual_bert) : This notebook contains an example on VisualBERT VQA. - [Generate Embeddings for VisualBERT (Colab Notebook)](https://colab.research.google.com/drive/1bLGxKdldwqnMVA5x4neY7-l_8fKGWQYI?usp=sharing) : This notebook contains diff --git a/docs/source/en/run_scripts.md b/docs/source/en/run_scripts.md index 8acb0f06e69..37e00c9974c 100644 --- a/docs/source/en/run_scripts.md +++ b/docs/source/en/run_scripts.md @@ -16,7 +16,7 @@ rendered properly in your Markdown viewer. # Training scripts -Transformers provides many example training scripts for deep learning frameworks (PyTorch, TensorFlow, Flax) and tasks in [transformers/examples](https://github.com/huggingface/transformers/tree/main/examples). There are additional scripts in [transformers/research projects](https://github.com/huggingface/transformers/tree/main/examples/research_projects) and [transformers/legacy](https://github.com/huggingface/transformers/tree/main/examples/legacy), but these aren't actively maintained and requires a specific version of Transformers. +Transformers provides many example training scripts for deep learning frameworks (PyTorch, TensorFlow, Flax) and tasks in [transformers/examples](https://github.com/huggingface/transformers/tree/main/examples). There are additional scripts in [transformers/research projects](https://github.com/huggingface/transformers-research-projects/) and [transformers/legacy](https://github.com/huggingface/transformers/tree/main/examples/legacy), but these aren't actively maintained and requires a specific version of Transformers. Example scripts are only examples and you may need to adapt the script to your use-case. To help you with this, most scripts are very transparent in how data is preprocessed, allowing you to edit it as necessary. diff --git a/docs/source/es/bertology.md b/docs/source/es/bertology.md index ed4e12a8d59..c62e5aaf973 100644 --- a/docs/source/es/bertology.md +++ b/docs/source/es/bertology.md @@ -37,5 +37,5 @@ ayudar a acceder a las representaciones internas, principalmente adaptado de la - adquiriendo los valores de salida y gradientes de las heads para poder computar la métrica de importancia de las heads y realizar la poda de heads como se explica en https://arxiv.org/abs/1905.10650. -Para ayudarte a entender y usar estas features, hemos añadido un script específico de ejemplo: [bertology.py](https://github.com/huggingface/transformers/tree/main/examples/research_projects/bertology/run_bertology.py) mientras extraes información y cortas un modelo pre-entrenado en +Para ayudarte a entender y usar estas features, hemos añadido un script específico de ejemplo: [bertology.py](https://github.com/huggingface/transformers-research-projects/tree/main/bertology/run_bertology.py) mientras extraes información y cortas un modelo pre-entrenado en GLUE. diff --git a/docs/source/es/index.md b/docs/source/es/index.md index fe7d65d94e3..3c10e71ebf9 100644 --- a/docs/source/es/index.md +++ b/docs/source/es/index.md @@ -80,7 +80,7 @@ La biblioteca actualmente contiene implementaciones de JAX, PyTorch y TensorFlow 1. **[DeiT](model_doc/deit)** (de Facebook) publicado con el paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) por Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou. 1. **[DETR](model_doc/detr)** (de Facebook) publicado con el paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) por Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko. 1. **[DialoGPT](model_doc/dialogpt)** (de Microsoft Research) publicado con el paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) por Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. -1. **[DistilBERT](model_doc/distilbert)** (de HuggingFace), publicado junto con el paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) por Victor Sanh, Lysandre Debut y Thomas Wolf. Se ha aplicado el mismo método para comprimir GPT2 en [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa en [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), BERT multilingüe en [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) y una versión alemana de DistilBERT. +1. **[DistilBERT](model_doc/distilbert)** (de HuggingFace), publicado junto con el paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) por Victor Sanh, Lysandre Debut y Thomas Wolf. Se ha aplicado el mismo método para comprimir GPT2 en [DistilGPT2](https://github.com/huggingface/transformers-research-projects/tree/main/distillation), RoBERTa en [DistilRoBERTa](https://github.com/huggingface/transformers-research-projects/tree/main/distillation), BERT multilingüe en [DistilmBERT](https://github.com/huggingface/transformers-research-projects/tree/main/distillation) y una versión alemana de DistilBERT. 1. **[DPR](model_doc/dpr)** (de Facebook) publicado con el paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) por Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, y Wen-tau Yih. 1. **[DPT](master/model_doc/dpt)** (de Intel Labs) publicado con el paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) por René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. 1. **[EfficientNet](model_doc/efficientnet)** (from Google Research) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan and Quoc V. Le. diff --git a/docs/source/es/run_scripts.md b/docs/source/es/run_scripts.md index d9a2b142a8a..a389b2d2fe4 100644 --- a/docs/source/es/run_scripts.md +++ b/docs/source/es/run_scripts.md @@ -18,7 +18,7 @@ rendered properly in your Markdown viewer. Junto con los [notebooks](./notebooks) de 🤗 Transformers, también hay scripts con ejemplos que muestran cómo entrenar un modelo para una tarea en [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch), [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow), o [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax). -También encontrarás scripts que hemos usado en nuestros [proyectos de investigación](https://github.com/huggingface/transformers/tree/main/examples/research_projects) y [ejemplos pasados](https://github.com/huggingface/transformers/tree/main/examples/legacy) que en su mayoría son aportados por la comunidad. Estos scripts no se mantienen activamente y requieren una versión específica de 🤗 Transformers que probablemente sea incompatible con la última versión de la biblioteca. +También encontrarás scripts que hemos usado en nuestros [proyectos de investigación](https://github.com/huggingface/transformers-research-projects/) y [ejemplos pasados](https://github.com/huggingface/transformers/tree/main/examples/legacy) que en su mayoría son aportados por la comunidad. Estos scripts no se mantienen activamente y requieren una versión específica de 🤗 Transformers que probablemente sea incompatible con la última versión de la biblioteca. No se espera que los scripts de ejemplo funcionen de inmediato en todos los problemas, y es posible que debas adaptar el script al problema que estás tratando de resolver. Para ayudarte con esto, la mayoría de los scripts exponen completamente cómo se preprocesan los datos, lo que te permite editarlos según sea necesario para tu caso de uso. diff --git a/docs/source/fr/index.md b/docs/source/fr/index.md index 51d35b76e87..963afe48ce4 100644 --- a/docs/source/fr/index.md +++ b/docs/source/fr/index.md @@ -98,7 +98,7 @@ La documentation est organisée en 5 parties: 1. **[DETR](model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko. 1. **[DialoGPT](model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. 1. **[DiNAT](model_doc/dinat)** (from SHI Labs) released with the paper [Dilated Neighborhood Attention Transformer](https://arxiv.org/abs/2209.15001) by Ali Hassani and Humphrey Shi. -1. **[DistilBERT](model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT. +1. **[DistilBERT](model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers-research-projects/tree/main/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers-research-projects/tree/main/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers-research-projects/tree/main/distillation) and a German version of DistilBERT. 1. **[DiT](model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei. 1. **[Donut](model_doc/donut)** (from NAVER), released together with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park. 1. **[DPR](model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. diff --git a/docs/source/fr/run_scripts_fr.md b/docs/source/fr/run_scripts_fr.md index 0344ff2cec3..a68d71035f0 100644 --- a/docs/source/fr/run_scripts_fr.md +++ b/docs/source/fr/run_scripts_fr.md @@ -19,7 +19,7 @@ rendered properly in your Markdown viewer. En plus des [notebooks](./notebooks) de 🤗 Transformers, il existe également des exemples de scripts démontrant comment entraîner un modèle pour une tâche avec [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch), [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow) ou [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax). -Vous trouverez également des scripts que nous avons utilisé dans nos [projets de recherche](https://github.com/huggingface/transformers/tree/main/examples/research_projects) et des [exemples "legacy"](https://github.com/huggingface/transformers/tree/main/examples/legacy) qui sont des contributions de la communauté. Ces scripts ne sont pas activement maintenus et nécessitent une version spécifique de 🤗 Transformers qui sera probablement incompatible avec la dernière version de la librairie. +Vous trouverez également des scripts que nous avons utilisé dans nos [projets de recherche](https://github.com/huggingface/transformers-research-projects/) et des [exemples "legacy"](https://github.com/huggingface/transformers/tree/main/examples/legacy) qui sont des contributions de la communauté. Ces scripts ne sont pas activement maintenus et nécessitent une version spécifique de 🤗 Transformers qui sera probablement incompatible avec la dernière version de la librairie. Les exemples de scripts ne sont pas censés fonctionner immédiatement pour chaque problème, et il se peut que vous ayez besoin d'adapter le script au problème que vous essayez de résoudre. Pour vous aider dans cette tâche, la plupart des scripts exposent entièrement la manière dont les données sont prétraitées, vous permettant de les modifier selon vos besoins. diff --git a/docs/source/it/index.md b/docs/source/it/index.md index 76cdc0ad246..bbab23eed60 100644 --- a/docs/source/it/index.md +++ b/docs/source/it/index.md @@ -86,7 +86,7 @@ La libreria attualmente contiene implementazioni in JAX, PyTorch e TensorFlow, p 1. **[DeiT](model_doc/deit)** (da Facebook) rilasciato con il paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) da Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou. 1. **[DETR](model_doc/detr)** (da Facebook) rilasciato con il paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) da Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko. 1. **[DialoGPT](model_doc/dialogpt)** (da Microsoft Research) rilasciato con il paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) da Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. -1. **[DistilBERT](model_doc/distilbert)** (da HuggingFace), rilasciato assieme al paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) da Victor Sanh, Lysandre Debut e Thomas Wolf. La stessa tecnica è stata applicata per comprimere GPT2 in [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa in [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT in [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT. +1. **[DistilBERT](model_doc/distilbert)** (da HuggingFace), rilasciato assieme al paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) da Victor Sanh, Lysandre Debut e Thomas Wolf. La stessa tecnica è stata applicata per comprimere GPT2 in [DistilGPT2](https://github.com/huggingface/transformers-research-projects/tree/main/distillation), RoBERTa in [DistilRoBERTa](https://github.com/huggingface/transformers-research-projects/tree/main/distillation), Multilingual BERT in [DistilmBERT](https://github.com/huggingface/transformers-research-projects/tree/main/distillation) and a German version of DistilBERT. 1. **[DPR](model_doc/dpr)** (da Facebook) rilasciato con il paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) da Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, e Wen-tau Yih. 1. **[DPT](master/model_doc/dpt)** (da Intel Labs) rilasciato con il paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) da René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. 1. **[EfficientNet](model_doc/efficientnet)** (from Google Research) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan and Quoc V. Le. diff --git a/docs/source/it/run_scripts.md b/docs/source/it/run_scripts.md index b437efb9fb1..b7d13f7019f 100644 --- a/docs/source/it/run_scripts.md +++ b/docs/source/it/run_scripts.md @@ -18,7 +18,7 @@ rendered properly in your Markdown viewer. Insieme ai [notebooks](./notebooks) 🤗 Transformers, ci sono anche esempi di script che dimostrano come addestrare un modello per un task con [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch), [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow), o [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax). -Troverai anche script che abbiamo usato nei nostri [progetti di ricerca](https://github.com/huggingface/transformers/tree/main/examples/research_projects) e [precedenti esempi](https://github.com/huggingface/transformers/tree/main/examples/legacy) a cui contribuisce per lo più la comunità. Questi script non sono attivamente mantenuti e richiedono una specifica versione di 🤗 Transformers che sarà molto probabilmente incompatibile con l'ultima versione della libreria. +Troverai anche script che abbiamo usato nei nostri [progetti di ricerca](https://github.com/huggingface/transformers-research-projects/) e [precedenti esempi](https://github.com/huggingface/transformers/tree/main/examples/legacy) a cui contribuisce per lo più la comunità. Questi script non sono attivamente mantenuti e richiedono una specifica versione di 🤗 Transformers che sarà molto probabilmente incompatibile con l'ultima versione della libreria. Non è dato per scontato che gli script di esempio funzionino senza apportare modifiche per ogni problema, bensì potrebbe essere necessario adattare lo script al tuo caso specifico. Per aiutarti in ciò, la maggioranza degli script espone le modalità di pre-processamento dei dati, consentendoti di modificare lo script come preferisci. diff --git a/docs/source/ja/bertology.md b/docs/source/ja/bertology.md index 167ed007bbe..2525d5edef4 100644 --- a/docs/source/ja/bertology.md +++ b/docs/source/ja/bertology.md @@ -31,4 +31,4 @@ rendered properly in your Markdown viewer. - BERT/GPT/GPT-2の各ヘッドの注意重みにアクセスできます。 - ヘッドの出力値と勾配を取得し、ヘッドの重要性スコアを計算し、[論文リンク](https://arxiv.org/abs/1905.10650)で説明されているようにヘッドを削減できます。 -これらの機能を理解し、使用するのを支援するために、特定のサンプルスクリプト「[bertology.py](https://github.com/huggingface/transformers/tree/main/examples/research_projects/bertology/run_bertology.py)」を追加しました。このスクリプトは、GLUEで事前トレーニングされたモデルから情報を抽出し、ヘッドを削減する役割を果たします。 +これらの機能を理解し、使用するのを支援するために、特定のサンプルスクリプト「[bertology.py](https://github.com/huggingface/transformers-research-projects/tree/main/bertology/run_bertology.py)」を追加しました。このスクリプトは、GLUEで事前トレーニングされたモデルから情報を抽出し、ヘッドを削減する役割を果たします。 diff --git a/docs/source/ja/index.md b/docs/source/ja/index.md index c3baa0888fc..d606662ed83 100644 --- a/docs/source/ja/index.md +++ b/docs/source/ja/index.md @@ -95,7 +95,7 @@ rendered properly in your Markdown viewer. 1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (Facebook から) Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko から公開された研究論文: [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) 1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (Microsoft Research から) Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan から公開された研究論文: [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) 1. **[DiNAT](https://huggingface.co/docs/transformers/model_doc/dinat)** (SHI Labs から) Ali Hassani and Humphrey Shi から公開された研究論文: [Dilated Neighborhood Attention Transformer](https://arxiv.org/abs/2209.15001) -1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (HuggingFace から), Victor Sanh, Lysandre Debut and Thomas Wolf. 同じ手法で GPT2, RoBERTa と Multilingual BERT の圧縮を行いました.圧縮されたモデルはそれぞれ [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation)、[DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation)、[DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) と名付けられました. 公開された研究論文: [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) +1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (HuggingFace から), Victor Sanh, Lysandre Debut and Thomas Wolf. 同じ手法で GPT2, RoBERTa と Multilingual BERT の圧縮を行いました.圧縮されたモデルはそれぞれ [DistilGPT2](https://github.com/huggingface/transformers-research-projects/tree/main/distillation)、[DistilRoBERTa](https://github.com/huggingface/transformers-research-projects/tree/main/distillation)、[DistilmBERT](https://github.com/huggingface/transformers-research-projects/tree/main/distillation) と名付けられました. 公開された研究論文: [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) 1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (Microsoft Research から) Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei から公開された研究論文: [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) 1. **[Donut](https://huggingface.co/docs/transformers/model_doc/donut)** (NAVER から), Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park から公開された研究論文: [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) 1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (Facebook から) Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih から公開された研究論文: [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) diff --git a/docs/source/ja/run_scripts.md b/docs/source/ja/run_scripts.md index af99d1c6da9..69437819e36 100644 --- a/docs/source/ja/run_scripts.md +++ b/docs/source/ja/run_scripts.md @@ -18,7 +18,7 @@ rendered properly in your Markdown viewer. 🤗 Transformersの[notebooks](./notebooks/README)と一緒に、[PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch)、[TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow)、または[JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax)を使用してモデルをトレーニングする方法を示すサンプルスクリプトもあります。 -また、私たちの[研究プロジェクト](https://github.com/huggingface/transformers/tree/main/examples/research_projects)や[レガシーの例](https://github.com/huggingface/transformers/tree/main/examples/legacy)で使用したスクリプトも見つかります。これらのスクリプトは現在メンテナンスされておらず、おそらく最新バージョンのライブラリと互換性がない特定の🤗 Transformersのバージョンが必要です。 +また、私たちの[研究プロジェクト](https://github.com/huggingface/transformers-research-projects/)や[レガシーの例](https://github.com/huggingface/transformers/tree/main/examples/legacy)で使用したスクリプトも見つかります。これらのスクリプトは現在メンテナンスされておらず、おそらく最新バージョンのライブラリと互換性がない特定の🤗 Transformersのバージョンが必要です。 サンプルスクリプトはすべての問題でそのまま動作することは期待されておらず、解決しようとしている問題にスクリプトを適応させる必要があるかもしれません。この点をサポートするために、ほとんどのスクリプトはデータがどのように前処理されているかを完全に公開し、必要に応じて編集できるようにしています。 diff --git a/docs/source/ko/bertology.md b/docs/source/ko/bertology.md index 7b4f3dc4c49..1f69a038170 100644 --- a/docs/source/ko/bertology.md +++ b/docs/source/ko/bertology.md @@ -38,4 +38,4 @@ BERT와 같은 대규모 트랜스포머의 내부 동작을 조사하는 연구 - BERT/GPT/GPT-2의 각 헤드의 모든 어텐션 가중치에 접근하기, - 헤드의 출력 값과 그래디언트를 검색하여 헤드 중요도 점수를 계산하고 https://arxiv.org/abs/1905.10650에서 설명된 대로 헤드를 제거하는 기능을 제공합니다. -이러한 기능들을 이해하고 직접 사용해볼 수 있도록 [bertology.py](https://github.com/huggingface/transformers/tree/main/examples/research_projects/bertology/run_bertology.py) 예제 스크립트를 추가했습니다. 이 예제 스크립트에서는 GLUE에 대해 사전훈련된 모델에서 정보를 추출하고 모델을 가지치기(prune)해봅니다. +이러한 기능들을 이해하고 직접 사용해볼 수 있도록 [bertology.py](https://github.com/huggingface/transformers-research-projects/tree/main/bertology/run_bertology.py) 예제 스크립트를 추가했습니다. 이 예제 스크립트에서는 GLUE에 대해 사전훈련된 모델에서 정보를 추출하고 모델을 가지치기(prune)해봅니다. diff --git a/docs/source/ko/index.md b/docs/source/ko/index.md index 0726085c5b3..bd95cbc0ab0 100644 --- a/docs/source/ko/index.md +++ b/docs/source/ko/index.md @@ -88,7 +88,7 @@ rendered properly in your Markdown viewer. 1. **[DeiT](model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou. 1. **[DETR](model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko. 1. **[DialoGPT](model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. -1. **[DistilBERT](model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT. +1. **[DistilBERT](model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers-research-projects/tree/main/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers-research-projects/tree/main/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers-research-projects/tree/main/distillation) and a German version of DistilBERT. 1. **[DiT](model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei. 1. **[Donut](model_doc/donut)** (from NAVER), released together with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park. 1. **[DPR](model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. diff --git a/docs/source/ko/run_scripts.md b/docs/source/ko/run_scripts.md index 715a949dde4..70520f1a97f 100644 --- a/docs/source/ko/run_scripts.md +++ b/docs/source/ko/run_scripts.md @@ -18,7 +18,7 @@ rendered properly in your Markdown viewer. 🤗 Transformers 노트북과 함께 [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch), [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow), 또는 [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax)를 사용해 특정 태스크에 대한 모델을 훈련하는 방법을 보여주는 예제 스크립트도 있습니다. -또한 [연구 프로젝트](https://github.com/huggingface/transformers/tree/main/examples/research_projects) 및 [레거시 예제](https://github.com/huggingface/transformers/tree/main/examples/legacy)에서 대부분 커뮤니티에서 제공한 스크립트를 찾을 수 있습니다. +또한 [연구 프로젝트](https://github.com/huggingface/transformers-research-projects/) 및 [레거시 예제](https://github.com/huggingface/transformers/tree/main/examples/legacy)에서 대부분 커뮤니티에서 제공한 스크립트를 찾을 수 있습니다. 이러한 스크립트는 적극적으로 유지 관리되지 않으며 최신 버전의 라이브러리와 호환되지 않을 가능성이 높은 특정 버전의 🤗 Transformers를 필요로 합니다. 예제 스크립트가 모든 문제에서 바로 작동하는 것은 아니며, 해결하려는 문제에 맞게 스크립트를 변경해야 할 수도 있습니다. diff --git a/docs/source/ms/index.md b/docs/source/ms/index.md index f51c43c9bd0..e0adb8a8a8e 100644 --- a/docs/source/ms/index.md +++ b/docs/source/ms/index.md @@ -104,7 +104,7 @@ Dokumentasi disusun kepada lima bahagian: 1. **[DETR](model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko. 1. **[DialoGPT](model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. 1. **[DiNAT](model_doc/dinat)** (from SHI Labs) released with the paper [Dilated Neighborhood Attention Transformer](https://arxiv.org/abs/2209.15001) by Ali Hassani and Humphrey Shi. -1. **[DistilBERT](model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT. +1. **[DistilBERT](model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers-research-projects/tree/main/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers-research-projects/tree/main/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers-research-projects/tree/main/distillation) and a German version of DistilBERT. 1. **[DiT](model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei. 1. **[Donut](model_doc/donut)** (from NAVER), released together with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park. 1. **[DPR](model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. diff --git a/docs/source/pt/index.md b/docs/source/pt/index.md index 18dbcbc06b8..365933bd658 100644 --- a/docs/source/pt/index.md +++ b/docs/source/pt/index.md @@ -93,7 +93,7 @@ Atualmente a biblioteca contém implementações do PyTorch, TensorFlow e JAX, p 1. **[DeiT](model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou. 1. **[DETR](model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko. 1. **[DialoGPT](model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. -1. **[DistilBERT](model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT. +1. **[DistilBERT](model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers-research-projects/tree/main/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers-research-projects/tree/main/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers-research-projects/tree/main/distillation) and a German version of DistilBERT. 1. **[DPR](model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 1. **[DPT](master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. 1. **[EfficientNet](model_doc/efficientnet)** (from Google Research) released with the paper [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan and Quoc V. Le. diff --git a/docs/source/pt/run_scripts.md b/docs/source/pt/run_scripts.md index d4cc3973608..ad19a8fdea0 100644 --- a/docs/source/pt/run_scripts.md +++ b/docs/source/pt/run_scripts.md @@ -18,7 +18,7 @@ rendered properly in your Markdown viewer. Junto com os 🤗 Transformers [notebooks](./notebooks), também há scripts de exemplo demonstrando como treinar um modelo para uma tarefa com [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch), [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow) ou [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax). -Você também encontrará scripts que usamos em nossos [projetos de pesquisa](https://github.com/huggingface/transformers/tree/main/examples/research_projects) e [exemplos legados](https://github.com/huggingface/transformers/tree/main/examples/legacy) que são principalmente contribuições da comunidade. Esses scripts não são mantidos ativamente e exigem uma versão específica de 🤗 Transformers que provavelmente será incompatível com a versão mais recente da biblioteca. +Você também encontrará scripts que usamos em nossos [projetos de pesquisa](https://github.com/huggingface/transformers-research-projects/) e [exemplos legados](https://github.com/huggingface/transformers/tree/main/examples/legacy) que são principalmente contribuições da comunidade. Esses scripts não são mantidos ativamente e exigem uma versão específica de 🤗 Transformers que provavelmente será incompatível com a versão mais recente da biblioteca. Não se espera que os scripts de exemplo funcionem imediatamente em todos os problemas, você pode precisar adaptar o script ao problema que está tentando resolver. Para ajudá-lo com isso, a maioria dos scripts expõe totalmente como os dados são pré-processados, permitindo que você os edite conforme necessário para seu caso de uso. diff --git a/docs/source/zh/bertology.md b/docs/source/zh/bertology.md index 9b39f948339..e7df7593a2b 100644 --- a/docs/source/zh/bertology.md +++ b/docs/source/zh/bertology.md @@ -30,4 +30,4 @@ http://www.apache.org/licenses/LICENSE-2.0 - 访问BERT/GPT/GPT-2每个注意力头的所有注意力权重, - 检索注意力头的输出值和梯度,以便计算头的重要性得分并对头进行剪枝,详情可见论文:https://arxiv.org/abs/1905.10650。 -为了帮助您理解和使用这些功能,我们添加了一个具体的示例脚本:[bertology.py](https://github.com/huggingface/transformers/tree/main/examples/research_projects/bertology/run_bertology.py),该脚本可以对一个在 GLUE 数据集上预训练的模型进行信息提取与剪枝。 \ No newline at end of file +为了帮助您理解和使用这些功能,我们添加了一个具体的示例脚本:[bertology.py](https://github.com/huggingface/transformers-research-projects/tree/main/bertology/run_bertology.py),该脚本可以对一个在 GLUE 数据集上预训练的模型进行信息提取与剪枝。 \ No newline at end of file diff --git a/docs/source/zh/run_scripts.md b/docs/source/zh/run_scripts.md index d058e97d1ad..8c21266afce 100644 --- a/docs/source/zh/run_scripts.md +++ b/docs/source/zh/run_scripts.md @@ -18,7 +18,7 @@ rendered properly in your Markdown viewer. 除了 🤗 Transformers [notebooks](./notebooks),还有示例脚本演示了如何使用[PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch)、[TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow)或[JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax)训练模型以解决特定任务。 -您还可以在这些示例中找到我们在[研究项目](https://github.com/huggingface/transformers/tree/main/examples/research_projects)和[遗留示例](https://github.com/huggingface/transformers/tree/main/examples/legacy)中使用过的脚本,这些脚本主要是由社区贡献的。这些脚本已不再被积极维护,需要使用特定版本的🤗 Transformers, 可能与库的最新版本不兼容。 +您还可以在这些示例中找到我们在[研究项目](https://github.com/huggingface/transformers-research-projects/)和[遗留示例](https://github.com/huggingface/transformers/tree/main/examples/legacy)中使用过的脚本,这些脚本主要是由社区贡献的。这些脚本已不再被积极维护,需要使用特定版本的🤗 Transformers, 可能与库的最新版本不兼容。 示例脚本可能无法在初始配置下直接解决每个问题,您可能需要根据要解决的问题调整脚本。为了帮助您,大多数脚本都完全暴露了数据预处理的方式,允许您根据需要对其进行编辑。 diff --git a/examples/README.md b/examples/README.md index 20b1d86fcd6..86c1cdbb503 100644 --- a/examples/README.md +++ b/examples/README.md @@ -17,7 +17,7 @@ limitations under the License. We host a wide range of example scripts for multiple learning frameworks. Simply choose your favorite: [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow), [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch) or [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax). -We also have some [research projects](https://github.com/huggingface/transformers/tree/main/examples/research_projects), as well as some [legacy examples](https://github.com/huggingface/transformers/tree/main/examples/legacy). Note that unlike the main examples these are not actively maintained, and may require specific older versions of dependencies in order to run. +We also have some [research projects](https://github.com/huggingface/transformers-research-projects/), as well as some [legacy examples](https://github.com/huggingface/transformers/tree/main/examples/legacy). Note that unlike the main examples these are not actively maintained, and may require specific older versions of dependencies in order to run. While we strive to present as many use cases as possible, the example scripts are just that - examples. It is expected that they won't work out-of-the-box on your specific problem and that you will be required to change a few lines of code to adapt them to your needs. To help you with that, most of the examples fully expose the preprocessing of the data, allowing you to tweak and edit them as required. diff --git a/examples/legacy/seq2seq/README.md b/examples/legacy/seq2seq/README.md index f574ccabda2..fb826129a8e 100644 --- a/examples/legacy/seq2seq/README.md +++ b/examples/legacy/seq2seq/README.md @@ -17,7 +17,7 @@ limitations under the License. # Sequence-to-Sequence Training and Evaluation This directory contains examples for finetuning and evaluating transformers on summarization and translation tasks. -For deprecated `bertabs` instructions, see [`bertabs/README.md`](https://github.com/huggingface/transformers/blob/main/examples/research_projects/bertabs/README.md). +For deprecated `bertabs` instructions, see https://github.com/huggingface/transformers-research-projects/blob/main/bertabs/README.md. ### Supported Architectures diff --git a/examples/pytorch/language-modeling/README.md b/examples/pytorch/language-modeling/README.md index b13cebde5f5..700d1a2b561 100644 --- a/examples/pytorch/language-modeling/README.md +++ b/examples/pytorch/language-modeling/README.md @@ -177,7 +177,7 @@ sure all your batches have the same length. ### Whole word masking -This part was moved to `examples/research_projects/mlm_wwm`. +This part was moved to https://github.com/huggingface/transformers-research-projects/tree/main/mlm_wwm. ### XLNet and permutation language modeling diff --git a/examples/pytorch/summarization/README.md b/examples/pytorch/summarization/README.md index 93c0bbccef6..0d332564de8 100644 --- a/examples/pytorch/summarization/README.md +++ b/examples/pytorch/summarization/README.md @@ -18,7 +18,7 @@ limitations under the License. This directory contains examples for finetuning and evaluating transformers on summarization tasks. Please tag @patil-suraj with any issues/unexpected behaviors, or send a PR! -For deprecated `bertabs` instructions, see [`bertabs/README.md`](https://github.com/huggingface/transformers/blob/main/examples/research_projects/bertabs/README.md). +For deprecated `bertabs` instructions, see https://github.com/huggingface/transformers-research-projects/blob/main/bertabs/README.md. For the old `finetune_trainer.py` and related utils, see [`examples/legacy/seq2seq`](https://github.com/huggingface/transformers/blob/main/examples/legacy/seq2seq). ### Supported Architectures diff --git a/examples/pytorch/translation/README.md b/examples/pytorch/translation/README.md index 74ca16ccb0b..8285355fb0b 100644 --- a/examples/pytorch/translation/README.md +++ b/examples/pytorch/translation/README.md @@ -18,7 +18,7 @@ limitations under the License. This directory contains examples for finetuning and evaluating transformers on translation tasks. Please tag @patil-suraj with any issues/unexpected behaviors, or send a PR! -For deprecated `bertabs` instructions, see [`bertabs/README.md`](https://github.com/huggingface/transformers/blob/main/examples/research_projects/bertabs/README.md). +For deprecated `bertabs` instructions, see https://github.com/huggingface/transformers-research-projects/blob/main/bertabs/README.md. For the old `finetune_trainer.py` and related utils, see [`examples/legacy/seq2seq`](https://github.com/huggingface/transformers/blob/main/examples/legacy/seq2seq). ### Supported Architectures diff --git a/examples/research_projects/README.md b/examples/research_projects/README.md index b2f5d431f25..e8a0ecd8c4e 100644 --- a/examples/research_projects/README.md +++ b/examples/research_projects/README.md @@ -1,5 +1,5 @@ D - C --> D - A --> E - C --> E - A --> F - C --> F - D --> O - E --> O - F --> O - B --> O - - Also includes several additional tricks. - - Args: - head_count (int): number of parallel heads - model_dim (int): the dimension of keys/values/queries, - must be divisible by head_count - dropout (float): dropout parameter - """ - - def __init__(self, head_count, model_dim, dropout=0.1, use_final_linear=True): - assert model_dim % head_count == 0 - self.dim_per_head = model_dim // head_count - self.model_dim = model_dim - - super().__init__() - self.head_count = head_count - - self.linear_keys = nn.Linear(model_dim, head_count * self.dim_per_head) - self.linear_values = nn.Linear(model_dim, head_count * self.dim_per_head) - self.linear_query = nn.Linear(model_dim, head_count * self.dim_per_head) - self.softmax = nn.Softmax(dim=-1) - self.dropout = nn.Dropout(dropout) - self.use_final_linear = use_final_linear - if self.use_final_linear: - self.final_linear = nn.Linear(model_dim, model_dim) - - def forward( - self, - key, - value, - query, - mask=None, - layer_cache=None, - type=None, - predefined_graph_1=None, - ): - """ - Compute the context vector and the attention vectors. - - Args: - key (`FloatTensor`): set of `key_len` - key vectors `[batch, key_len, dim]` - value (`FloatTensor`): set of `key_len` - value vectors `[batch, key_len, dim]` - query (`FloatTensor`): set of `query_len` - query vectors `[batch, query_len, dim]` - mask: binary mask indicating which keys have - non-zero attention `[batch, query_len, key_len]` - Returns: - (`FloatTensor`, `FloatTensor`) : - - * output context vectors `[batch, query_len, dim]` - * one of the attention vectors `[batch, query_len, key_len]` - """ - batch_size = key.size(0) - dim_per_head = self.dim_per_head - head_count = self.head_count - - def shape(x): - """projection""" - return x.view(batch_size, -1, head_count, dim_per_head).transpose(1, 2) - - def unshape(x): - """compute context""" - return x.transpose(1, 2).contiguous().view(batch_size, -1, head_count * dim_per_head) - - # 1) Project key, value, and query. - if layer_cache is not None: - if type == "self": - query, key, value = ( - self.linear_query(query), - self.linear_keys(query), - self.linear_values(query), - ) - - key = shape(key) - value = shape(value) - - if layer_cache is not None: - device = key.device - if layer_cache["self_keys"] is not None: - key = torch.cat((layer_cache["self_keys"].to(device), key), dim=2) - if layer_cache["self_values"] is not None: - value = torch.cat((layer_cache["self_values"].to(device), value), dim=2) - layer_cache["self_keys"] = key - layer_cache["self_values"] = value - elif type == "context": - query = self.linear_query(query) - if layer_cache is not None: - if layer_cache["memory_keys"] is None: - key, value = self.linear_keys(key), self.linear_values(value) - key = shape(key) - value = shape(value) - else: - key, value = ( - layer_cache["memory_keys"], - layer_cache["memory_values"], - ) - layer_cache["memory_keys"] = key - layer_cache["memory_values"] = value - else: - key, value = self.linear_keys(key), self.linear_values(value) - key = shape(key) - value = shape(value) - else: - key = self.linear_keys(key) - value = self.linear_values(value) - query = self.linear_query(query) - key = shape(key) - value = shape(value) - - query = shape(query) - - # 2) Calculate and scale scores. - query = query / math.sqrt(dim_per_head) - scores = torch.matmul(query, key.transpose(2, 3)) - - if mask is not None: - mask = mask.unsqueeze(1).expand_as(scores) - scores = scores.masked_fill(mask, -1e18) - - # 3) Apply attention dropout and compute context vectors. - - attn = self.softmax(scores) - - if predefined_graph_1 is not None: - attn_masked = attn[:, -1] * predefined_graph_1 - attn_masked = attn_masked / (torch.sum(attn_masked, 2).unsqueeze(2) + 1e-9) - - attn = torch.cat([attn[:, :-1], attn_masked.unsqueeze(1)], 1) - - drop_attn = self.dropout(attn) - if self.use_final_linear: - context = unshape(torch.matmul(drop_attn, value)) - output = self.final_linear(context) - return output - else: - context = torch.matmul(drop_attn, value) - return context - - -class DecoderState: - """Interface for grouping together the current state of a recurrent - decoder. In the simplest case just represents the hidden state of - the model. But can also be used for implementing various forms of - input_feeding and non-recurrent models. - - Modules need to implement this to utilize beam search decoding. - """ - - def detach(self): - """Need to document this""" - self.hidden = tuple([_.detach() for _ in self.hidden]) - self.input_feed = self.input_feed.detach() - - def beam_update(self, idx, positions, beam_size): - """Need to document this""" - for e in self._all: - sizes = e.size() - br = sizes[1] - if len(sizes) == 3: - sent_states = e.view(sizes[0], beam_size, br // beam_size, sizes[2])[:, :, idx] - else: - sent_states = e.view(sizes[0], beam_size, br // beam_size, sizes[2], sizes[3])[:, :, idx] - - sent_states.data.copy_(sent_states.data.index_select(1, positions)) - - def map_batch_fn(self, fn): - raise NotImplementedError() - - -class TransformerDecoderState(DecoderState): - """Transformer Decoder state base class""" - - def __init__(self, src): - """ - Args: - src (FloatTensor): a sequence of source words tensors - with optional feature tensors, of size (len x batch). - """ - self.src = src - self.previous_input = None - self.previous_layer_inputs = None - self.cache = None - - @property - def _all(self): - """ - Contains attributes that need to be updated in self.beam_update(). - """ - if self.previous_input is not None and self.previous_layer_inputs is not None: - return (self.previous_input, self.previous_layer_inputs, self.src) - else: - return (self.src,) - - def detach(self): - if self.previous_input is not None: - self.previous_input = self.previous_input.detach() - if self.previous_layer_inputs is not None: - self.previous_layer_inputs = self.previous_layer_inputs.detach() - self.src = self.src.detach() - - def update_state(self, new_input, previous_layer_inputs): - state = TransformerDecoderState(self.src) - state.previous_input = new_input - state.previous_layer_inputs = previous_layer_inputs - return state - - def _init_cache(self, memory_bank, num_layers): - self.cache = {} - - for l in range(num_layers): - layer_cache = {"memory_keys": None, "memory_values": None} - layer_cache["self_keys"] = None - layer_cache["self_values"] = None - self.cache["layer_{}".format(l)] = layer_cache - - def repeat_beam_size_times(self, beam_size): - """Repeat beam_size times along batch dimension.""" - self.src = self.src.data.repeat(1, beam_size, 1) - - def map_batch_fn(self, fn): - def _recursive_map(struct, batch_dim=0): - for k, v in struct.items(): - if v is not None: - if isinstance(v, dict): - _recursive_map(v) - else: - struct[k] = fn(v, batch_dim) - - self.src = fn(self.src, 0) - if self.cache is not None: - _recursive_map(self.cache) - - -def gelu(x): - return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) - - -class PositionwiseFeedForward(nn.Module): - """A two-layer Feed-Forward-Network with residual layer norm. - - Args: - d_model (int): the size of input for the first-layer of the FFN. - d_ff (int): the hidden layer size of the second-layer - of the FNN. - dropout (float): dropout probability in :math:`[0, 1)`. - """ - - def __init__(self, d_model, d_ff, dropout=0.1): - super().__init__() - self.w_1 = nn.Linear(d_model, d_ff) - self.w_2 = nn.Linear(d_ff, d_model) - self.layer_norm = nn.LayerNorm(d_model, eps=1e-6) - self.actv = gelu - self.dropout_1 = nn.Dropout(dropout) - self.dropout_2 = nn.Dropout(dropout) - - def forward(self, x): - inter = self.dropout_1(self.actv(self.w_1(self.layer_norm(x)))) - output = self.dropout_2(self.w_2(inter)) - return output + x - - -# -# TRANSLATOR -# The following code is used to generate summaries using the -# pre-trained weights and beam search. -# - - -def build_predictor(args, tokenizer, symbols, model, logger=None): - # we should be able to refactor the global scorer a lot - scorer = GNMTGlobalScorer(args.alpha, length_penalty="wu") - translator = Translator(args, model, tokenizer, symbols, global_scorer=scorer, logger=logger) - return translator - - -class GNMTGlobalScorer: - """ - NMT re-ranking score from - "Google's Neural Machine Translation System" :cite:`wu2016google` - - Args: - alpha (float): length parameter - beta (float): coverage parameter - """ - - def __init__(self, alpha, length_penalty): - self.alpha = alpha - penalty_builder = PenaltyBuilder(length_penalty) - self.length_penalty = penalty_builder.length_penalty() - - def score(self, beam, logprobs): - """ - Rescores a prediction based on penalty functions - """ - normalized_probs = self.length_penalty(beam, logprobs, self.alpha) - return normalized_probs - - -class PenaltyBuilder: - """ - Returns the Length and Coverage Penalty function for Beam Search. - - Args: - length_pen (str): option name of length pen - cov_pen (str): option name of cov pen - """ - - def __init__(self, length_pen): - self.length_pen = length_pen - - def length_penalty(self): - if self.length_pen == "wu": - return self.length_wu - elif self.length_pen == "avg": - return self.length_average - else: - return self.length_none - - """ - Below are all the different penalty terms implemented so far - """ - - def length_wu(self, beam, logprobs, alpha=0.0): - """ - NMT length re-ranking score from - "Google's Neural Machine Translation System" :cite:`wu2016google`. - """ - - modifier = ((5 + len(beam.next_ys)) ** alpha) / ((5 + 1) ** alpha) - return logprobs / modifier - - def length_average(self, beam, logprobs, alpha=0.0): - """ - Returns the average probability of tokens in a sequence. - """ - return logprobs / len(beam.next_ys) - - def length_none(self, beam, logprobs, alpha=0.0, beta=0.0): - """ - Returns unmodified scores. - """ - return logprobs - - -class Translator: - """ - Uses a model to translate a batch of sentences. - - Args: - model (:obj:`onmt.modules.NMTModel`): - NMT model to use for translation - fields (dict of Fields): data fields - beam_size (int): size of beam to use - n_best (int): number of translations produced - max_length (int): maximum length output to produce - global_scores (:obj:`GlobalScorer`): - object to rescore final translations - copy_attn (bool): use copy attention during translation - beam_trace (bool): trace beam search for debugging - logger(logging.Logger): logger. - """ - - def __init__(self, args, model, vocab, symbols, global_scorer=None, logger=None): - self.logger = logger - - self.args = args - self.model = model - self.generator = self.model.generator - self.vocab = vocab - self.symbols = symbols - self.start_token = symbols["BOS"] - self.end_token = symbols["EOS"] - - self.global_scorer = global_scorer - self.beam_size = args.beam_size - self.min_length = args.min_length - self.max_length = args.max_length - - def translate(self, batch, step, attn_debug=False): - """Generates summaries from one batch of data.""" - self.model.eval() - with torch.no_grad(): - batch_data = self.translate_batch(batch) - translations = self.from_batch(batch_data) - return translations - - def translate_batch(self, batch, fast=False): - """ - Translate a batch of sentences. - - Mostly a wrapper around :obj:`Beam`. - - Args: - batch (:obj:`Batch`): a batch from a dataset object - fast (bool): enables fast beam search (may not support all features) - """ - with torch.no_grad(): - return self._fast_translate_batch(batch, self.max_length, min_length=self.min_length) - - # Where the beam search lives - # I have no idea why it is being called from the method above - def _fast_translate_batch(self, batch, max_length, min_length=0): - """Beam Search using the encoder inputs contained in `batch`.""" - - # The batch object is funny - # Instead of just looking at the size of the arguments we encapsulate - # a size argument. - # Where is it defined? - beam_size = self.beam_size - batch_size = batch.batch_size - src = batch.src - segs = batch.segs - mask_src = batch.mask_src - - src_features = self.model.bert(src, segs, mask_src) - dec_states = self.model.decoder.init_decoder_state(src, src_features, with_cache=True) - device = src_features.device - - # Tile states and memory beam_size times. - dec_states.map_batch_fn(lambda state, dim: tile(state, beam_size, dim=dim)) - src_features = tile(src_features, beam_size, dim=0) - batch_offset = torch.arange(batch_size, dtype=torch.long, device=device) - beam_offset = torch.arange(0, batch_size * beam_size, step=beam_size, dtype=torch.long, device=device) - alive_seq = torch.full([batch_size * beam_size, 1], self.start_token, dtype=torch.long, device=device) - - # Give full probability to the first beam on the first step. - topk_log_probs = torch.tensor([0.0] + [float("-inf")] * (beam_size - 1), device=device).repeat(batch_size) - - # Structure that holds finished hypotheses. - hypotheses = [[] for _ in range(batch_size)] # noqa: F812 - - results = {} - results["predictions"] = [[] for _ in range(batch_size)] # noqa: F812 - results["scores"] = [[] for _ in range(batch_size)] # noqa: F812 - results["gold_score"] = [0] * batch_size - results["batch"] = batch - - for step in range(max_length): - decoder_input = alive_seq[:, -1].view(1, -1) - - # Decoder forward. - decoder_input = decoder_input.transpose(0, 1) - - dec_out, dec_states = self.model.decoder(decoder_input, src_features, dec_states, step=step) - - # Generator forward. - log_probs = self.generator(dec_out.transpose(0, 1).squeeze(0)) - vocab_size = log_probs.size(-1) - - if step < min_length: - log_probs[:, self.end_token] = -1e20 - - # Multiply probs by the beam probability. - log_probs += topk_log_probs.view(-1).unsqueeze(1) - - alpha = self.global_scorer.alpha - length_penalty = ((5.0 + (step + 1)) / 6.0) ** alpha - - # Flatten probs into a list of possibilities. - curr_scores = log_probs / length_penalty - - if self.args.block_trigram: - cur_len = alive_seq.size(1) - if cur_len > 3: - for i in range(alive_seq.size(0)): - fail = False - words = [int(w) for w in alive_seq[i]] - words = [self.vocab.ids_to_tokens[w] for w in words] - words = " ".join(words).replace(" ##", "").split() - if len(words) <= 3: - continue - trigrams = [(words[i - 1], words[i], words[i + 1]) for i in range(1, len(words) - 1)] - trigram = tuple(trigrams[-1]) - if trigram in trigrams[:-1]: - fail = True - if fail: - curr_scores[i] = -10e20 - - curr_scores = curr_scores.reshape(-1, beam_size * vocab_size) - topk_scores, topk_ids = curr_scores.topk(beam_size, dim=-1) - - # Recover log probs. - topk_log_probs = topk_scores * length_penalty - - # Resolve beam origin and true word ids. - topk_beam_index = topk_ids.div(vocab_size) - topk_ids = topk_ids.fmod(vocab_size) - - # Map beam_index to batch_index in the flat representation. - batch_index = topk_beam_index + beam_offset[: topk_beam_index.size(0)].unsqueeze(1) - select_indices = batch_index.view(-1) - - # Append last prediction. - alive_seq = torch.cat([alive_seq.index_select(0, select_indices), topk_ids.view(-1, 1)], -1) - - is_finished = topk_ids.eq(self.end_token) - if step + 1 == max_length: - is_finished.fill_(1) - # End condition is top beam is finished. - end_condition = is_finished[:, 0].eq(1) - # Save finished hypotheses. - if is_finished.any(): - predictions = alive_seq.view(-1, beam_size, alive_seq.size(-1)) - for i in range(is_finished.size(0)): - b = batch_offset[i] - if end_condition[i]: - is_finished[i].fill_(1) - finished_hyp = is_finished[i].nonzero().view(-1) - # Store finished hypotheses for this batch. - for j in finished_hyp: - hypotheses[b].append((topk_scores[i, j], predictions[i, j, 1:])) - # If the batch reached the end, save the n_best hypotheses. - if end_condition[i]: - best_hyp = sorted(hypotheses[b], key=lambda x: x[0], reverse=True) - score, pred = best_hyp[0] - - results["scores"][b].append(score) - results["predictions"][b].append(pred) - non_finished = end_condition.eq(0).nonzero().view(-1) - # If all sentences are translated, no need to go further. - if len(non_finished) == 0: - break - # Remove finished batches for the next step. - topk_log_probs = topk_log_probs.index_select(0, non_finished) - batch_index = batch_index.index_select(0, non_finished) - batch_offset = batch_offset.index_select(0, non_finished) - alive_seq = predictions.index_select(0, non_finished).view(-1, alive_seq.size(-1)) - # Reorder states. - select_indices = batch_index.view(-1) - src_features = src_features.index_select(0, select_indices) - dec_states.map_batch_fn(lambda state, dim: state.index_select(dim, select_indices)) - - return results - - def from_batch(self, translation_batch): - batch = translation_batch["batch"] - assert len(translation_batch["gold_score"]) == len(translation_batch["predictions"]) - batch_size = batch.batch_size - - preds, _, _, tgt_str, src = ( - translation_batch["predictions"], - translation_batch["scores"], - translation_batch["gold_score"], - batch.tgt_str, - batch.src, - ) - - translations = [] - for b in range(batch_size): - pred_sents = self.vocab.convert_ids_to_tokens([int(n) for n in preds[b][0]]) - pred_sents = " ".join(pred_sents).replace(" ##", "") - gold_sent = " ".join(tgt_str[b].split()) - raw_src = [self.vocab.ids_to_tokens[int(t)] for t in src[b]][:500] - raw_src = " ".join(raw_src) - translation = (pred_sents, gold_sent, raw_src) - translations.append(translation) - - return translations - - -def tile(x, count, dim=0): - """ - Tiles x on dimension dim count times. - """ - perm = list(range(len(x.size()))) - if dim != 0: - perm[0], perm[dim] = perm[dim], perm[0] - x = x.permute(perm).contiguous() - out_size = list(x.size()) - out_size[0] *= count - batch = x.size(0) - x = x.view(batch, -1).transpose(0, 1).repeat(count, 1).transpose(0, 1).contiguous().view(*out_size) - if dim != 0: - x = x.permute(perm).contiguous() - return x - - -# -# Optimizer for training. We keep this here in case we want to add -# a finetuning script. -# - - -class BertSumOptimizer: - """Specific optimizer for BertSum. - - As described in [1], the authors fine-tune BertSum for abstractive - summarization using two Adam Optimizers with different warm-up steps and - learning rate. They also use a custom learning rate scheduler. - - [1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders." - arXiv preprint arXiv:1908.08345 (2019). - """ - - def __init__(self, model, lr, warmup_steps, beta_1=0.99, beta_2=0.999, eps=1e-8): - self.encoder = model.encoder - self.decoder = model.decoder - self.lr = lr - self.warmup_steps = warmup_steps - - self.optimizers = { - "encoder": torch.optim.Adam( - model.encoder.parameters(), - lr=lr["encoder"], - betas=(beta_1, beta_2), - eps=eps, - ), - "decoder": torch.optim.Adam( - model.decoder.parameters(), - lr=lr["decoder"], - betas=(beta_1, beta_2), - eps=eps, - ), - } - - self._step = 0 - self.current_learning_rates = {} - - def _update_rate(self, stack): - return self.lr[stack] * min(self._step ** (-0.5), self._step * self.warmup_steps[stack] ** (-1.5)) - - def zero_grad(self): - self.optimizer_decoder.zero_grad() - self.optimizer_encoder.zero_grad() - - def step(self): - self._step += 1 - for stack, optimizer in self.optimizers.items(): - new_rate = self._update_rate(stack) - for param_group in optimizer.param_groups: - param_group["lr"] = new_rate - optimizer.step() - self.current_learning_rates[stack] = new_rate diff --git a/examples/research_projects/bertabs/requirements.txt b/examples/research_projects/bertabs/requirements.txt deleted file mode 100644 index bc2a3d6a163..00000000000 --- a/examples/research_projects/bertabs/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -transformers == 4.38.0 - -# For ROUGE -nltk -py-rouge diff --git a/examples/research_projects/bertabs/run_summarization.py b/examples/research_projects/bertabs/run_summarization.py deleted file mode 100644 index bc13de55899..00000000000 --- a/examples/research_projects/bertabs/run_summarization.py +++ /dev/null @@ -1,347 +0,0 @@ -#! /usr/bin/python3 -import argparse -import logging -import os -import sys -from collections import namedtuple - -import torch -from modeling_bertabs import BertAbs, build_predictor -from torch.utils.data import DataLoader, SequentialSampler -from tqdm import tqdm - -from transformers import BertTokenizer - -from .utils_summarization import ( - CNNDMDataset, - build_mask, - compute_token_type_ids, - encode_for_summarization, - truncate_or_pad, -) - - -logger = logging.getLogger(__name__) -logging.basicConfig(stream=sys.stdout, level=logging.INFO) - - -Batch = namedtuple("Batch", ["document_names", "batch_size", "src", "segs", "mask_src", "tgt_str"]) - - -def evaluate(args): - tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased", do_lower_case=True) - model = BertAbs.from_pretrained("remi/bertabs-finetuned-extractive-abstractive-summarization") - model.to(args.device) - model.eval() - - symbols = { - "BOS": tokenizer.vocab["[unused0]"], - "EOS": tokenizer.vocab["[unused1]"], - "PAD": tokenizer.vocab["[PAD]"], - } - - if args.compute_rouge: - reference_summaries = [] - generated_summaries = [] - - import nltk - import rouge - - nltk.download("punkt") - rouge_evaluator = rouge.Rouge( - metrics=["rouge-n", "rouge-l"], - max_n=2, - limit_length=True, - length_limit=args.beam_size, - length_limit_type="words", - apply_avg=True, - apply_best=False, - alpha=0.5, # Default F1_score - weight_factor=1.2, - stemming=True, - ) - - # these (unused) arguments are defined to keep the compatibility - # with the legacy code and will be deleted in a next iteration. - args.result_path = "" - args.temp_dir = "" - - data_iterator = build_data_iterator(args, tokenizer) - predictor = build_predictor(args, tokenizer, symbols, model) - - logger.info("***** Running evaluation *****") - logger.info(" Number examples = %d", len(data_iterator.dataset)) - logger.info(" Batch size = %d", args.batch_size) - logger.info("") - logger.info("***** Beam Search parameters *****") - logger.info(" Beam size = %d", args.beam_size) - logger.info(" Minimum length = %d", args.min_length) - logger.info(" Maximum length = %d", args.max_length) - logger.info(" Alpha (length penalty) = %.2f", args.alpha) - logger.info(" Trigrams %s be blocked", ("will" if args.block_trigram else "will NOT")) - - for batch in tqdm(data_iterator): - batch_data = predictor.translate_batch(batch) - translations = predictor.from_batch(batch_data) - summaries = [format_summary(t) for t in translations] - save_summaries(summaries, args.summaries_output_dir, batch.document_names) - - if args.compute_rouge: - reference_summaries += batch.tgt_str - generated_summaries += summaries - - if args.compute_rouge: - scores = rouge_evaluator.get_scores(generated_summaries, reference_summaries) - str_scores = format_rouge_scores(scores) - save_rouge_scores(str_scores) - print(str_scores) - - -def save_summaries(summaries, path, original_document_name): - """Write the summaries in fies that are prefixed by the original - files' name with the `_summary` appended. - - Attributes: - original_document_names: List[string] - Name of the document that was summarized. - path: string - Path were the summaries will be written - summaries: List[string] - The summaries that we produced. - """ - for summary, document_name in zip(summaries, original_document_name): - # Prepare the summary file's name - if "." in document_name: - bare_document_name = ".".join(document_name.split(".")[:-1]) - extension = document_name.split(".")[-1] - name = bare_document_name + "_summary." + extension - else: - name = document_name + "_summary" - - file_path = os.path.join(path, name) - with open(file_path, "w") as output: - output.write(summary) - - -def format_summary(translation): - """Transforms the output of the `from_batch` function - into nicely formatted summaries. - """ - raw_summary, _, _ = translation - summary = ( - raw_summary.replace("[unused0]", "") - .replace("[unused3]", "") - .replace("[PAD]", "") - .replace("[unused1]", "") - .replace(r" +", " ") - .replace(" [unused2] ", ". ") - .replace("[unused2]", "") - .strip() - ) - - return summary - - -def format_rouge_scores(scores): - return """\n -****** ROUGE SCORES ****** - -** ROUGE 1 -F1 >> {:.3f} -Precision >> {:.3f} -Recall >> {:.3f} - -** ROUGE 2 -F1 >> {:.3f} -Precision >> {:.3f} -Recall >> {:.3f} - -** ROUGE L -F1 >> {:.3f} -Precision >> {:.3f} -Recall >> {:.3f}""".format( - scores["rouge-1"]["f"], - scores["rouge-1"]["p"], - scores["rouge-1"]["r"], - scores["rouge-2"]["f"], - scores["rouge-2"]["p"], - scores["rouge-2"]["r"], - scores["rouge-l"]["f"], - scores["rouge-l"]["p"], - scores["rouge-l"]["r"], - ) - - -def save_rouge_scores(str_scores): - with open("rouge_scores.txt", "w") as output: - output.write(str_scores) - - -# -# LOAD the dataset -# - - -def build_data_iterator(args, tokenizer): - dataset = load_and_cache_examples(args, tokenizer) - sampler = SequentialSampler(dataset) - - def collate_fn(data): - return collate(data, tokenizer, block_size=512, device=args.device) - - iterator = DataLoader( - dataset, - sampler=sampler, - batch_size=args.batch_size, - collate_fn=collate_fn, - ) - - return iterator - - -def load_and_cache_examples(args, tokenizer): - dataset = CNNDMDataset(args.documents_dir) - return dataset - - -def collate(data, tokenizer, block_size, device): - """Collate formats the data passed to the data loader. - - In particular we tokenize the data batch after batch to avoid keeping them - all in memory. We output the data as a namedtuple to fit the original BertAbs's - API. - """ - data = [x for x in data if not len(x[1]) == 0] # remove empty_files - names = [name for name, _, _ in data] - summaries = [" ".join(summary_list) for _, _, summary_list in data] - - encoded_text = [encode_for_summarization(story, summary, tokenizer) for _, story, summary in data] - encoded_stories = torch.tensor( - [truncate_or_pad(story, block_size, tokenizer.pad_token_id) for story, _ in encoded_text] - ) - encoder_token_type_ids = compute_token_type_ids(encoded_stories, tokenizer.cls_token_id) - encoder_mask = build_mask(encoded_stories, tokenizer.pad_token_id) - - batch = Batch( - document_names=names, - batch_size=len(encoded_stories), - src=encoded_stories.to(device), - segs=encoder_token_type_ids.to(device), - mask_src=encoder_mask.to(device), - tgt_str=summaries, - ) - - return batch - - -def decode_summary(summary_tokens, tokenizer): - """Decode the summary and return it in a format - suitable for evaluation. - """ - summary_tokens = summary_tokens.to("cpu").numpy() - summary = tokenizer.decode(summary_tokens) - sentences = summary.split(".") - sentences = [s + "." for s in sentences] - return sentences - - -def main(): - """The main function defines the interface with the users.""" - parser = argparse.ArgumentParser() - parser.add_argument( - "--documents_dir", - default=None, - type=str, - required=True, - help="The folder where the documents to summarize are located.", - ) - parser.add_argument( - "--summaries_output_dir", - default=None, - type=str, - required=False, - help="The folder in which the summaries should be written. Defaults to the folder where the documents are", - ) - parser.add_argument( - "--compute_rouge", - default=False, - type=bool, - required=False, - help="Compute the ROUGE metrics during evaluation. Only available for the CNN/DailyMail dataset.", - ) - # EVALUATION options - parser.add_argument( - "--no_cuda", - default=False, - type=bool, - help="Whether to force the execution on CPU.", - ) - parser.add_argument( - "--batch_size", - default=4, - type=int, - help="Batch size per GPU/CPU for training.", - ) - # BEAM SEARCH arguments - parser.add_argument( - "--min_length", - default=50, - type=int, - help="Minimum number of tokens for the summaries.", - ) - parser.add_argument( - "--max_length", - default=200, - type=int, - help="Maixmum number of tokens for the summaries.", - ) - parser.add_argument( - "--beam_size", - default=5, - type=int, - help="The number of beams to start with for each example.", - ) - parser.add_argument( - "--alpha", - default=0.95, - type=float, - help="The value of alpha for the length penalty in the beam search.", - ) - parser.add_argument( - "--block_trigram", - default=True, - type=bool, - help="Whether to block the existence of repeating trigrams in the text generated by beam search.", - ) - args = parser.parse_args() - - # Select device (distributed not available) - args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") - - # Check the existence of directories - if not args.summaries_output_dir: - args.summaries_output_dir = args.documents_dir - - if not documents_dir_is_valid(args.documents_dir): - raise FileNotFoundError( - "We could not find the directory you specified for the documents to summarize, or it was empty. Please" - " specify a valid path." - ) - os.makedirs(args.summaries_output_dir, exist_ok=True) - - evaluate(args) - - -def documents_dir_is_valid(path): - if not os.path.exists(path): - return False - - file_list = os.listdir(path) - if len(file_list) == 0: - return False - - return True - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/bertabs/test_utils_summarization.py b/examples/research_projects/bertabs/test_utils_summarization.py deleted file mode 100644 index 18120c9063e..00000000000 --- a/examples/research_projects/bertabs/test_utils_summarization.py +++ /dev/null @@ -1,98 +0,0 @@ -# coding=utf-8 -# Copyright 2019 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import unittest - -import numpy as np -import torch - -from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad - - -class SummarizationDataProcessingTest(unittest.TestCase): - def setUp(self): - self.block_size = 10 - - def test_fit_to_block_sequence_too_small(self): - """Pad the sequence with 0 if the sequence is smaller than the block size.""" - sequence = [1, 2, 3, 4] - expected_output = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] - self.assertEqual(truncate_or_pad(sequence, self.block_size, 0), expected_output) - - def test_fit_to_block_sequence_fit_exactly(self): - """Do nothing if the sequence is the right size.""" - sequence = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - expected_output = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - self.assertEqual(truncate_or_pad(sequence, self.block_size, 0), expected_output) - - def test_fit_to_block_sequence_too_big(self): - """Truncate the sequence if it is too long.""" - sequence = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] - expected_output = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - self.assertEqual(truncate_or_pad(sequence, self.block_size, 0), expected_output) - - def test_process_story_no_highlights(self): - """Processing a story with no highlights returns an empty list for the summary.""" - raw_story = """It was the year of Our Lord one thousand seven hundred and - seventy-five.\n\nSpiritual revelations were conceded to England at that - favoured period, as at this.""" - _, summary_lines = process_story(raw_story) - self.assertEqual(summary_lines, []) - - def test_process_empty_story(self): - """An empty story returns an empty collection of lines.""" - raw_story = "" - story_lines, summary_lines = process_story(raw_story) - self.assertEqual(story_lines, []) - self.assertEqual(summary_lines, []) - - def test_process_story_with_missing_period(self): - raw_story = ( - "It was the year of Our Lord one thousand seven hundred and " - "seventy-five\n\nSpiritual revelations were conceded to England " - "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" - ) - story_lines, summary_lines = process_story(raw_story) - - expected_story_lines = [ - "It was the year of Our Lord one thousand seven hundred and seventy-five.", - "Spiritual revelations were conceded to England at that favoured period, as at this.", - ] - self.assertEqual(expected_story_lines, story_lines) - - expected_summary_lines = ["It was the best of times."] - self.assertEqual(expected_summary_lines, summary_lines) - - def test_build_mask_no_padding(self): - sequence = torch.tensor([1, 2, 3, 4]) - expected = torch.tensor([1, 1, 1, 1]) - np.testing.assert_array_equal(build_mask(sequence, 0).numpy(), expected.numpy()) - - def test_build_mask(self): - sequence = torch.tensor([1, 2, 3, 4, 23, 23, 23]) - expected = torch.tensor([1, 1, 1, 1, 0, 0, 0]) - np.testing.assert_array_equal(build_mask(sequence, 23).numpy(), expected.numpy()) - - def test_build_mask_with_padding_equal_to_one(self): - sequence = torch.tensor([8, 2, 3, 4, 1, 1, 1]) - expected = torch.tensor([1, 1, 1, 1, 0, 0, 0]) - np.testing.assert_array_equal(build_mask(sequence, 1).numpy(), expected.numpy()) - - def test_compute_token_type_ids(self): - separator = 101 - batch = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]]) - expected = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]]) - - result = compute_token_type_ids(batch, separator) - np.testing.assert_array_equal(result, expected) diff --git a/examples/research_projects/bertabs/utils_summarization.py b/examples/research_projects/bertabs/utils_summarization.py deleted file mode 100644 index 716365336bb..00000000000 --- a/examples/research_projects/bertabs/utils_summarization.py +++ /dev/null @@ -1,167 +0,0 @@ -import os -from collections import deque - -import torch -from torch.utils.data import Dataset - - -# ------------ -# Data loading -# ------------ - - -class CNNDMDataset(Dataset): - """Abstracts the dataset used to train seq2seq models. - - The class will process the documents that are located in the specified - folder. The preprocessing will work on any document that is reasonably - formatted. On the CNN/DailyMail dataset it will extract both the story - and the summary. - - CNN/Daily News: - - The CNN/Daily News raw datasets are downloaded from [1]. The stories are - stored in different files; the summary appears at the end of the story as - sentences that are prefixed by the special `@highlight` line. To process - the data, untar both datasets in the same folder, and pass the path to this - folder as the "data_dir argument. The formatting code was inspired by [2]. - - [1] https://cs.nyu.edu/~kcho/ - [2] https://github.com/abisee/cnn-dailymail/ - """ - - def __init__(self, path="", prefix="train"): - """We initialize the class by listing all the documents to summarize. - Files are not read in memory due to the size of some datasets (like CNN/DailyMail). - """ - assert os.path.isdir(path) - - self.documents = [] - story_filenames_list = os.listdir(path) - for story_filename in story_filenames_list: - if "summary" in story_filename: - continue - path_to_story = os.path.join(path, story_filename) - if not os.path.isfile(path_to_story): - continue - self.documents.append(path_to_story) - - def __len__(self): - """Returns the number of documents.""" - return len(self.documents) - - def __getitem__(self, idx): - document_path = self.documents[idx] - document_name = document_path.split("/")[-1] - with open(document_path, encoding="utf-8") as source: - raw_story = source.read() - story_lines, summary_lines = process_story(raw_story) - return document_name, story_lines, summary_lines - - -def process_story(raw_story): - """Extract the story and summary from a story file. - - Arguments: - raw_story (str): content of the story file as an utf-8 encoded string. - - Raises: - IndexError: If the story is empty or contains no highlights. - """ - nonempty_lines = list(filter(lambda x: len(x) != 0, [line.strip() for line in raw_story.split("\n")])) - - # for some unknown reason some lines miss a period, add it - nonempty_lines = [_add_missing_period(line) for line in nonempty_lines] - - # gather article lines - story_lines = [] - lines = deque(nonempty_lines) - while True: - try: - element = lines.popleft() - if element.startswith("@highlight"): - break - story_lines.append(element) - except IndexError: - # if "@highlight" is absent from the file we pop - # all elements until there is None, raising an exception. - return story_lines, [] - - # gather summary lines - summary_lines = list(filter(lambda t: not t.startswith("@highlight"), lines)) - - return story_lines, summary_lines - - -def _add_missing_period(line): - END_TOKENS = [".", "!", "?", "...", "'", "`", '"', "\u2019", "\u2019", ")"] - if line.startswith("@highlight"): - return line - if line[-1] in END_TOKENS: - return line - return line + "." - - -# -------------------------- -# Encoding and preprocessing -# -------------------------- - - -def truncate_or_pad(sequence, block_size, pad_token_id): - """Adapt the source and target sequences' lengths to the block size. - If the sequence is shorter we append padding token to the right of the sequence. - """ - if len(sequence) > block_size: - return sequence[:block_size] - else: - sequence.extend([pad_token_id] * (block_size - len(sequence))) - return sequence - - -def build_mask(sequence, pad_token_id): - """Builds the mask. The attention mechanism will only attend to positions - with value 1.""" - mask = torch.ones_like(sequence) - idx_pad_tokens = sequence == pad_token_id - mask[idx_pad_tokens] = 0 - return mask - - -def encode_for_summarization(story_lines, summary_lines, tokenizer): - """Encode the story and summary lines, and join them - as specified in [1] by using `[SEP] [CLS]` tokens to separate - sentences. - """ - story_lines_token_ids = [tokenizer.encode(line) for line in story_lines] - story_token_ids = [token for sentence in story_lines_token_ids for token in sentence] - summary_lines_token_ids = [tokenizer.encode(line) for line in summary_lines] - summary_token_ids = [token for sentence in summary_lines_token_ids for token in sentence] - - return story_token_ids, summary_token_ids - - -def compute_token_type_ids(batch, separator_token_id): - """Segment embeddings as described in [1] - - The values {0,1} were found in the repository [2]. - - Attributes: - batch: torch.Tensor, size [batch_size, block_size] - Batch of input. - separator_token_id: int - The value of the token that separates the segments. - - [1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders." - arXiv preprint arXiv:1908.08345 (2019). - [2] https://github.com/nlpyang/PreSumm (/src/prepro/data_builder.py, commit fac1217) - """ - batch_embeddings = [] - for sequence in batch: - sentence_num = -1 - embeddings = [] - for s in sequence: - if s == separator_token_id: - sentence_num += 1 - embeddings.append(sentence_num % 2) - batch_embeddings.append(embeddings) - return torch.tensor(batch_embeddings) diff --git a/examples/research_projects/bertology/requirements.txt b/examples/research_projects/bertology/requirements.txt deleted file mode 100644 index 99636a7fce1..00000000000 --- a/examples/research_projects/bertology/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -transformers == 4.38.0 diff --git a/examples/research_projects/bertology/run_bertology.py b/examples/research_projects/bertology/run_bertology.py deleted file mode 100644 index 35d096f1649..00000000000 --- a/examples/research_projects/bertology/run_bertology.py +++ /dev/null @@ -1,453 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2018 CMU and The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Bertology: this script shows how you can explore the internals of the models in the library to: -- compute the entropy of the head attentions -- compute the importance of each head -- prune (remove) the low importance head. -Some parts of this script are adapted from the code of Michel et al. (http://arxiv.org/abs/1905.10650) -which is available at https://github.com/pmichel31415/are-16-heads-really-better-than-1 -""" - -import argparse -import logging -import os -from datetime import datetime - -import numpy as np -import torch -from torch import nn -from torch.utils.data import DataLoader, SequentialSampler, Subset -from torch.utils.data.distributed import DistributedSampler -from tqdm import tqdm - -import transformers -from transformers import ( - AutoConfig, - AutoModelForSequenceClassification, - AutoTokenizer, - GlueDataset, - default_data_collator, - glue_compute_metrics, - glue_output_modes, - glue_processors, - set_seed, -) -from transformers.trainer_utils import is_main_process - - -logger = logging.getLogger(__name__) - - -def entropy(p): - """Compute the entropy of a probability distribution""" - plogp = p * torch.log(p) - plogp[p == 0] = 0 - return -plogp.sum(dim=-1) - - -def print_2d_tensor(tensor): - """Print a 2D tensor""" - logger.info("lv, h >\t" + "\t".join(f"{x + 1}" for x in range(len(tensor)))) - for row in range(len(tensor)): - if tensor.dtype != torch.long: - logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:.5f}" for x in tensor[row].cpu().data)) - else: - logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:d}" for x in tensor[row].cpu().data)) - - -def compute_heads_importance( - args, model, eval_dataloader, compute_entropy=True, compute_importance=True, head_mask=None, actually_pruned=False -): - """This method shows how to compute: - - head attention entropy - - head importance scores according to http://arxiv.org/abs/1905.10650 - """ - # Prepare our tensors - n_layers, n_heads = model.config.num_hidden_layers, model.config.num_attention_heads - head_importance = torch.zeros(n_layers, n_heads).to(args.device) - attn_entropy = torch.zeros(n_layers, n_heads).to(args.device) - - if head_mask is None: - head_mask = torch.ones(n_layers, n_heads).to(args.device) - - head_mask.requires_grad_(requires_grad=True) - # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch - if actually_pruned: - head_mask = None - - preds = None - labels = None - tot_tokens = 0.0 - - for step, inputs in enumerate(tqdm(eval_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])): - for k, v in inputs.items(): - inputs[k] = v.to(args.device) - - # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) - outputs = model(**inputs, head_mask=head_mask) - loss, logits, all_attentions = ( - outputs[0], - outputs[1], - outputs[-1], - ) # Loss and logits are the first, attention the last - loss.backward() # Backpropagate to populate the gradients in the head mask - - if compute_entropy: - for layer, attn in enumerate(all_attentions): - masked_entropy = entropy(attn.detach()) * inputs["attention_mask"].float().unsqueeze(1) - attn_entropy[layer] += masked_entropy.sum(-1).sum(0).detach() - - if compute_importance: - head_importance += head_mask.grad.abs().detach() - - # Also store our logits/labels if we want to compute metrics afterwards - if preds is None: - preds = logits.detach().cpu().numpy() - labels = inputs["labels"].detach().cpu().numpy() - else: - preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) - labels = np.append(labels, inputs["labels"].detach().cpu().numpy(), axis=0) - - tot_tokens += inputs["attention_mask"].float().detach().sum().data - - # Normalize - attn_entropy /= tot_tokens - head_importance /= tot_tokens - # Layerwise importance normalization - if not args.dont_normalize_importance_by_layer: - exponent = 2 - norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1 / exponent) - head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20 - - if not args.dont_normalize_global_importance: - head_importance = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) - - # Print/save matrices - np.save(os.path.join(args.output_dir, "attn_entropy.npy"), attn_entropy.detach().cpu().numpy()) - np.save(os.path.join(args.output_dir, "head_importance.npy"), head_importance.detach().cpu().numpy()) - - logger.info("Attention entropies") - print_2d_tensor(attn_entropy) - logger.info("Head importance scores") - print_2d_tensor(head_importance) - logger.info("Head ranked by importance scores") - head_ranks = torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device) - head_ranks[head_importance.view(-1).sort(descending=True)[1]] = torch.arange( - head_importance.numel(), device=args.device - ) - head_ranks = head_ranks.view_as(head_importance) - print_2d_tensor(head_ranks) - - return attn_entropy, head_importance, preds, labels - - -def mask_heads(args, model, eval_dataloader): - """This method shows how to mask head (set some heads to zero), to test the effect on the network, - based on the head importance scores, as described in Michel et al. (http://arxiv.org/abs/1905.10650) - """ - _, head_importance, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False) - preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) - original_score = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name] - logger.info("Pruning: original score: %f, threshold: %f", original_score, original_score * args.masking_threshold) - - new_head_mask = torch.ones_like(head_importance) - num_to_mask = max(1, int(new_head_mask.numel() * args.masking_amount)) - - current_score = original_score - while current_score >= original_score * args.masking_threshold: - head_mask = new_head_mask.clone() # save current head mask - # heads from least important to most - keep only not-masked heads - head_importance[head_mask == 0.0] = float("Inf") - current_heads_to_mask = head_importance.view(-1).sort()[1] - - if len(current_heads_to_mask) <= num_to_mask: - break - - # mask heads - current_heads_to_mask = current_heads_to_mask[:num_to_mask] - logger.info("Heads to mask: %s", str(current_heads_to_mask.tolist())) - new_head_mask = new_head_mask.view(-1) - new_head_mask[current_heads_to_mask] = 0.0 - new_head_mask = new_head_mask.view_as(head_mask) - new_head_mask = new_head_mask.clone().detach() - print_2d_tensor(new_head_mask) - - # Compute metric and head importance again - _, head_importance, preds, labels = compute_heads_importance( - args, model, eval_dataloader, compute_entropy=False, head_mask=new_head_mask - ) - preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) - current_score = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name] - logger.info( - "Masking: current score: %f, remaining heads %d (%.1f percents)", - current_score, - new_head_mask.sum(), - new_head_mask.sum() / new_head_mask.numel() * 100, - ) - - logger.info("Final head mask") - print_2d_tensor(head_mask) - np.save(os.path.join(args.output_dir, "head_mask.npy"), head_mask.detach().cpu().numpy()) - - return head_mask - - -def prune_heads(args, model, eval_dataloader, head_mask): - """This method shows how to prune head (remove heads weights) based on - the head importance scores as described in Michel et al. (http://arxiv.org/abs/1905.10650) - """ - # Try pruning and test time speedup - # Pruning is like masking but we actually remove the masked weights - before_time = datetime.now() - _, _, preds, labels = compute_heads_importance( - args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=head_mask - ) - preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) - score_masking = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name] - original_time = datetime.now() - before_time - - original_num_params = sum(p.numel() for p in model.parameters()) - heads_to_prune = { - layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(head_mask)) - } - - assert sum(len(h) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item() - model.prune_heads(heads_to_prune) - pruned_num_params = sum(p.numel() for p in model.parameters()) - - before_time = datetime.now() - _, _, preds, labels = compute_heads_importance( - args, - model, - eval_dataloader, - compute_entropy=False, - compute_importance=False, - head_mask=None, - actually_pruned=True, - ) - preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) - score_pruning = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name] - new_time = datetime.now() - before_time - - logger.info( - "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)", - original_num_params, - pruned_num_params, - pruned_num_params / original_num_params * 100, - ) - logger.info("Pruning: score with masking: %f score with pruning: %f", score_masking, score_pruning) - logger.info("Pruning: speed ratio (new timing / original timing): %f percents", original_time / new_time * 100) - - -def main(): - parser = argparse.ArgumentParser() - # Required parameters - parser.add_argument( - "--data_dir", - default=None, - type=str, - required=True, - help="The input data dir. Should contain the .tsv files (or other data files) for the task.", - ) - parser.add_argument( - "--model_name_or_path", - default=None, - type=str, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models", - ) - parser.add_argument( - "--task_name", - default=None, - type=str, - required=True, - help="The name of the task to train selected in the list: " + ", ".join(glue_processors.keys()), - ) - parser.add_argument( - "--output_dir", - default=None, - type=str, - required=True, - help="The output directory where the model predictions and checkpoints will be written.", - ) - - # Other parameters - parser.add_argument( - "--config_name", - default="", - type=str, - help="Pretrained config name or path if not the same as model_name_or_path", - ) - parser.add_argument( - "--tokenizer_name", - default="", - type=str, - help="Pretrained tokenizer name or path if not the same as model_name_or_path", - ) - parser.add_argument( - "--cache_dir", - default=None, - type=str, - help="Where do you want to store the pre-trained models downloaded from huggingface.co", - ) - parser.add_argument( - "--data_subset", type=int, default=-1, help="If > 0: limit the data to a subset of data_subset instances." - ) - parser.add_argument( - "--overwrite_output_dir", action="store_true", help="Whether to overwrite data in output directory" - ) - parser.add_argument( - "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" - ) - - parser.add_argument( - "--dont_normalize_importance_by_layer", action="store_true", help="Don't normalize importance score by layers" - ) - parser.add_argument( - "--dont_normalize_global_importance", - action="store_true", - help="Don't normalize all importance scores between 0 and 1", - ) - - parser.add_argument( - "--try_masking", action="store_true", help="Whether to try to mask head until a threshold of accuracy." - ) - parser.add_argument( - "--masking_threshold", - default=0.9, - type=float, - help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value).", - ) - parser.add_argument( - "--masking_amount", default=0.1, type=float, help="Amount to heads to masking at each masking step." - ) - parser.add_argument("--metric_name", default="acc", type=str, help="Metric to use for head masking.") - - parser.add_argument( - "--max_seq_length", - default=128, - type=int, - help=( - "The maximum total input sequence length after WordPiece tokenization. \n" - "Sequences longer than this will be truncated, sequences shorter padded." - ), - ) - parser.add_argument("--batch_size", default=1, type=int, help="Batch size.") - - parser.add_argument("--seed", type=int, default=42) - parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") - parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") - parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") - parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") - args = parser.parse_args() - - if args.server_ip and args.server_port: - # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script - import ptvsd - - print("Waiting for debugger attach") - ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) - ptvsd.wait_for_attach() - - # Setup devices and distributed training - if args.local_rank == -1 or args.no_cuda: - args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") - args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() - else: - torch.cuda.set_device(args.local_rank) - args.device = torch.device("cuda", args.local_rank) - args.n_gpu = 1 - torch.distributed.init_process_group(backend="nccl") # Initializes the distributed backend - - # Setup logging - logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN) - logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device, args.n_gpu, bool(args.local_rank != -1))) - # Set the verbosity to info of the Transformers logger (on main process only): - if is_main_process(args.local_rank): - transformers.utils.logging.set_verbosity_info() - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - - # Set seeds - set_seed(args.seed) - - # Prepare GLUE task - args.task_name = args.task_name.lower() - if args.task_name not in glue_processors: - raise ValueError("Task not found: %s" % (args.task_name)) - processor = glue_processors[args.task_name]() - args.output_mode = glue_output_modes[args.task_name] - label_list = processor.get_labels() - num_labels = len(label_list) - - # Load pretrained model and tokenizer - # - # Distributed training: - # The .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - - config = AutoConfig.from_pretrained( - args.config_name if args.config_name else args.model_name_or_path, - num_labels=num_labels, - finetuning_task=args.task_name, - output_attentions=True, - cache_dir=args.cache_dir, - ) - tokenizer = AutoTokenizer.from_pretrained( - args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, - cache_dir=args.cache_dir, - ) - model = AutoModelForSequenceClassification.from_pretrained( - args.model_name_or_path, - from_tf=bool(".ckpt" in args.model_name_or_path), - config=config, - cache_dir=args.cache_dir, - ) - - # Distributed and parallel training - model.to(args.device) - if args.local_rank != -1: - model = nn.parallel.DistributedDataParallel( - model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True - ) - elif args.n_gpu > 1: - model = nn.DataParallel(model) - - # Print/save training arguments - os.makedirs(args.output_dir, exist_ok=True) - torch.save(args, os.path.join(args.output_dir, "run_args.bin")) - logger.info("Training/evaluation parameters %s", args) - - # Prepare dataset for the GLUE task - eval_dataset = GlueDataset(args, tokenizer=tokenizer, mode="dev") - if args.data_subset > 0: - eval_dataset = Subset(eval_dataset, list(range(min(args.data_subset, len(eval_dataset))))) - eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) - eval_dataloader = DataLoader( - eval_dataset, sampler=eval_sampler, batch_size=args.batch_size, collate_fn=default_data_collator - ) - - # Compute head entropy and importance score - compute_heads_importance(args, model, eval_dataloader) - - # Try head masking (set heads to zero until the score goes under a threshole) - # and head pruning (remove masked heads and see the effect on the network) - if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: - head_mask = mask_heads(args, model, eval_dataloader) - prune_heads(args, model, eval_dataloader, head_mask) - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/bertology/run_prune_gpt.py b/examples/research_projects/bertology/run_prune_gpt.py deleted file mode 100644 index d227634c2bf..00000000000 --- a/examples/research_projects/bertology/run_prune_gpt.py +++ /dev/null @@ -1,391 +0,0 @@ -#!/usr/bin/env python3 -"""This script is adapted from the Bertology pruning code (https://github.com/huggingface/transformers/blob/783d7d2629e97c5f0c5f9ef01b8c66410275c204/examples/research_projects/bertology/run_bertology.py) -to prune GPT-like models. The author is @altsoph. -""" - -import argparse -import logging -import os -from datetime import datetime - -import numpy as np -import torch -from torch import nn -from torch.utils.data import DataLoader, RandomSampler, TensorDataset -from tqdm import tqdm - -from transformers import GPT2LMHeadModel - - -logger = logging.getLogger(__name__) - - -def save_model(model, dirpath): - # save results - if os.path.exists(dirpath): - if os.path.exists(os.path.join(dirpath, "config.json")) and os.path.isfile( - os.path.join(dirpath, "config.json") - ): - os.remove(os.path.join(dirpath, "config.json")) - if os.path.exists(os.path.join(dirpath, "pytorch_model.bin")) and os.path.isfile( - os.path.join(dirpath, "pytorch_model.bin") - ): - os.remove(os.path.join(dirpath, "pytorch_model.bin")) - else: - os.makedirs(dirpath) - model.save_pretrained(dirpath) - - -def entropy(p, unlogit=False): - """Compute the entropy of a probability distribution""" - exponent = 2 - if unlogit: - p = torch.pow(p, exponent) - plogp = p * torch.log(p) - plogp[p == 0] = 0 - return -plogp.sum(dim=-1) - - -def print_2d_tensor(tensor): - """Print a 2D tensor""" - logger.info("lv, h >\t" + "\t".join(f"{x + 1}" for x in range(len(tensor)))) - for row in range(len(tensor)): - if tensor.dtype != torch.long: - logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:.5f}" for x in tensor[row].cpu().data)) - else: - logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:d}" for x in tensor[row].cpu().data)) - - -def compute_heads_importance( - args, model, eval_dataloader, compute_entropy=True, compute_importance=True, head_mask=None, actually_pruned=False -): - """This method shows how to compute: - - head attention entropy - - head importance scores according to http://arxiv.org/abs/1905.10650 - """ - # Prepare our tensors - n_layers, n_heads = model.config.num_hidden_layers, model.config.num_attention_heads - head_importance = torch.zeros(n_layers, n_heads).to(args.device) - attn_entropy = torch.zeros(n_layers, n_heads).to(args.device) - - if head_mask is None: - head_mask = torch.ones(n_layers, n_heads).to(args.device) - - head_mask.requires_grad_(requires_grad=True) - # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch - if actually_pruned: - head_mask = None - - tot_tokens = 0.0 - total_loss = 0.0 - for step, inputs in enumerate(tqdm(eval_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])): - inputs = tuple(t.to(args.device) for t in inputs) - (input_ids,) = inputs - - # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) - outputs = model(input_ids, labels=input_ids, head_mask=head_mask) - # (loss), lm_logits, presents, (all hidden_states), (attentions) - loss, _, all_attentions = ( - outputs[0], - outputs[1], - outputs[-1], - ) # Loss and logits are the first, attention the last - loss.backward() # Backpropagate to populate the gradients in the head mask - total_loss += loss.detach().cpu().numpy() - if compute_entropy: - for layer, attn in enumerate(all_attentions): - masked_entropy = entropy(attn.detach(), True) - attn_entropy[layer] += masked_entropy.sum(-1).sum(0).sum(0).detach() - - if compute_importance: - head_importance += head_mask.grad.abs().detach() - tot_tokens += torch.ones_like(input_ids).float().detach().sum().data - - # Normalize - attn_entropy /= tot_tokens - head_importance /= tot_tokens - # Layerwise importance normalization - if not args.dont_normalize_importance_by_layer: - exponent = 2 - norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1 / exponent) - head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20 - - if not args.dont_normalize_global_importance: - head_importance = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) - - # Print matrices - if compute_entropy: - logger.info("Attention entropies") - print_2d_tensor(attn_entropy) - if compute_importance: - logger.info("Head importance scores") - print_2d_tensor(head_importance) - logger.info("Head ranked by importance scores") - head_ranks = torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device) - head_ranks[head_importance.view(-1).sort(descending=True)[1]] = torch.arange( - head_importance.numel(), device=args.device - ) - head_ranks = head_ranks.view_as(head_importance) - print_2d_tensor(head_ranks) - return attn_entropy, head_importance, total_loss - - -def mask_heads(args, model, eval_dataloader): - """This method shows how to mask head (set some heads to zero), to test the effect on the network, - based on the head importance scores, as described in Michel et al. (http://arxiv.org/abs/1905.10650) - """ - _, head_importance, loss = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False) - original_score = 1 / loss # instead of downsteam score use the LM loss - logger.info("Pruning: original score: %f, threshold: %f", original_score, original_score * args.masking_threshold) - - new_head_mask = torch.ones_like(head_importance) - num_to_mask = max(1, int(new_head_mask.numel() * args.masking_amount)) - - current_score = original_score - while current_score >= original_score * args.masking_threshold: - head_mask = new_head_mask.clone().detach() # save current head mask - # heads from least important to most - keep only not-masked heads - head_importance[head_mask == 0.0] = float("Inf") - current_heads_to_mask = head_importance.view(-1).sort()[1] - - if len(current_heads_to_mask) <= num_to_mask: - print("BREAK BY num_to_mask") - break - - # mask heads - current_heads_to_mask = current_heads_to_mask[:num_to_mask] - logger.info("Heads to mask: %s", str(current_heads_to_mask.tolist())) - new_head_mask = new_head_mask.view(-1) - new_head_mask[current_heads_to_mask] = 0.0 - new_head_mask = new_head_mask.view_as(head_mask) - new_head_mask = new_head_mask.clone().detach() - print_2d_tensor(new_head_mask) - - # Compute metric and head importance again - _, head_importance, loss = compute_heads_importance( - args, model, eval_dataloader, compute_entropy=False, head_mask=new_head_mask - ) - current_score = 1 / loss - logger.info( - "Masking: current score: %f, remaining heads %d (%.1f percents)", - current_score, - new_head_mask.sum(), - new_head_mask.sum() / new_head_mask.numel() * 100, - ) - - logger.info("Final head mask") - print_2d_tensor(head_mask) - np.save(os.path.join(args.output_dir, "head_mask.npy"), head_mask.detach().cpu().numpy()) - - return head_mask - - -def prune_heads(args, model, eval_dataloader, head_mask): - """This method shows how to prune head (remove heads weights) based on - the head importance scores as described in Michel et al. (http://arxiv.org/abs/1905.10650) - """ - # Try pruning and test time speedup - # Pruning is like masking but we actually remove the masked weights - before_time = datetime.now() - _, _, loss = compute_heads_importance( - args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=head_mask - ) - score_masking = 1 / loss - original_time = datetime.now() - before_time - - original_num_params = sum(p.numel() for p in model.parameters()) - heads_to_prune = { - layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(head_mask)) - } - - for k, v in heads_to_prune.items(): - if isinstance(v, int): - heads_to_prune[k] = [ - v, - ] - - assert sum(len(h) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item() - model.prune_heads(heads_to_prune) - pruned_num_params = sum(p.numel() for p in model.parameters()) - - before_time = datetime.now() - _, _, loss = compute_heads_importance( - args, - model, - eval_dataloader, - compute_entropy=False, - compute_importance=False, - head_mask=None, - actually_pruned=True, - ) - - score_pruning = 1 / loss - new_time = datetime.now() - before_time - - logger.info( - "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)", - original_num_params, - pruned_num_params, - pruned_num_params / original_num_params * 100, - ) - logger.info("Pruning: score with masking: %f score with pruning: %f", score_masking, score_pruning) - logger.info("Pruning: speed ratio (original timing / new timing): %f percents", original_time / new_time * 100) - save_model(model, args.output_dir) - - -def main(): - parser = argparse.ArgumentParser() - # Required parameters - parser.add_argument( - "--data_dir", - default=None, - type=str, - required=True, - help="The input data dir. Should contain the .tsv files (or other data files) for the task.", - ) - parser.add_argument( - "--model_name_or_path", - default=None, - type=str, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models", - ) - parser.add_argument( - "--output_dir", - default=None, - type=str, - required=True, - help="The output directory where the model predictions and checkpoints will be written.", - ) - - # Other parameters - parser.add_argument( - "--config_name", - default="", - type=str, - help="Pretrained config name or path if not the same as model_name_or_path", - ) - parser.add_argument( - "--tokenizer_name", - default="", - type=str, - help="Pretrained tokenizer name or path if not the same as model_name_or_path", - ) - parser.add_argument( - "--cache_dir", - default=None, - type=str, - help="Where do you want to store the pre-trained models downloaded from s3", - ) - parser.add_argument( - "--data_subset", type=int, default=-1, help="If > 0: limit the data to a subset of data_subset instances." - ) - parser.add_argument( - "--overwrite_output_dir", action="store_true", help="Whether to overwrite data in output directory" - ) - parser.add_argument( - "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" - ) - - parser.add_argument( - "--dont_normalize_importance_by_layer", action="store_true", help="Don't normalize importance score by layers" - ) - parser.add_argument( - "--dont_normalize_global_importance", - action="store_true", - help="Don't normalize all importance scores between 0 and 1", - ) - - parser.add_argument( - "--try_masking", action="store_true", help="Whether to try to mask head until a threshold of accuracy." - ) - parser.add_argument( - "--masking_threshold", - default=0.9, - type=float, - help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value).", - ) - parser.add_argument( - "--masking_amount", default=0.1, type=float, help="Amount to heads to masking at each masking step." - ) - parser.add_argument("--metric_name", default="acc", type=str, help="Metric to use for head masking.") - - parser.add_argument( - "--max_seq_length", - default=128, - type=int, - help=( - "The maximum total input sequence length after WordPiece tokenization. \n" - "Sequences longer than this will be truncated, sequences shorter padded." - ), - ) - parser.add_argument("--batch_size", default=1, type=int, help="Batch size.") - - parser.add_argument("--seed", type=int, default=42) - parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") - parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") - parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") - parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") - args = parser.parse_args() - - if args.server_ip and args.server_port: - # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script - import ptvsd - - print("Waiting for debugger attach") - ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) - ptvsd.wait_for_attach() - - # Setup devices and distributed training - if args.local_rank == -1 or args.no_cuda: - args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") - args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() - else: - torch.cuda.set_device(args.local_rank) - args.device = torch.device("cuda", args.local_rank) - args.n_gpu = 1 - torch.distributed.init_process_group(backend="nccl") # Initializes the distributed backend - - # Setup logging - logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN) - logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device, args.n_gpu, bool(args.local_rank != -1))) - - model = GPT2LMHeadModel.from_pretrained(args.model_name_or_path) - - # Distributed and parallel training - model.to(args.device) - if args.local_rank != -1: - model = nn.parallel.DistributedDataParallel( - model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True - ) - elif args.n_gpu > 1: - model = nn.DataParallel(model) - - # Print/save training arguments - os.makedirs(args.output_dir, exist_ok=True) - torch.save(args, os.path.join(args.output_dir, "run_args.bin")) - logger.info("Training/evaluation parameters %s", args) - - # Prepare dataset - numpy_data = np.concatenate( - [ - np.loadtxt(args.data_dir, dtype=np.int64), - ] - ) - train_tensor_dataset = (torch.from_numpy(numpy_data),) - train_data = TensorDataset(*train_tensor_dataset) - train_sampler = RandomSampler(train_data) - eval_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.batch_size) - - # Compute head entropy and importance score - compute_heads_importance(args, model, eval_dataloader) - - # Try head masking (set heads to zero until the score goes under a threshole) - # and head pruning (remove masked heads and see the effect on the network) - if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: - head_mask = mask_heads(args, model, eval_dataloader) - prune_heads(args, model, eval_dataloader, head_mask) - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/codeparrot/README.md b/examples/research_projects/codeparrot/README.md deleted file mode 100644 index f0af3d144f7..00000000000 --- a/examples/research_projects/codeparrot/README.md +++ /dev/null @@ -1,316 +0,0 @@ -# CodeParrot 🦜 -

- drawing -

- -## What is this about? -This is an open-source effort to train and evaluate code generation models. CodeParrot 🦜 is a GPT-2 model trained from scratch on Python code. The highlights of this project are: -- initialize and train a GPT-2 language model from scratch for code generation -- train a custom tokenizer adapted for Python code -- clean and deduplicate a large (>100GB) dataset with `datasets` -- train with `accelerate` on multiple GPUs using data parallelism and mixed precision -- continuously push checkpoints to the hub with `huggingface_hub` -- stream the dataset with `datasets` during training to avoid disk bottlenecks -- apply the `code_eval` metric in `datasets` to evaluate on [OpenAI's _HumanEval_ benchmark](https://huggingface.co/datasets/openai_humaneval) -- showcase examples for downstream tasks with code models in [examples](https://github.com/huggingface/transformers/tree/main/examples/research_projects/codeparrot/examples) folder: - - Algorithmic complexity prediction - - Code generation from english text - - Code explanation - -## Installation -To install the dependencies simply run the following command: -```bash -pip install -r requirements.txt -``` - -To reproduce the results you can follow the scripts in the following sections. Note that we don't always show all possible arguments to the scripts. To get the full list of arguments with descriptions you can run the following command on any script: - -```bash -python scripts/some_script.py --help -``` - -Before you run any of the scripts make sure you are logged in and can push to the hub: - -```bash -huggingface-cli login -``` - -Additionally, sure you have git-lfs installed. You can find instructions for how to install it [here](https://git-lfs.github.com/). - -## Dataset -The source of the dataset is the GitHub dump available on Google's [BigQuery](https://cloud.google.com/blog/topics/public-datasets/github-on-bigquery-analyze-all-the-open-source-code). The database was queried for all Python files with less than 1MB in size resulting in a 180GB dataset with over 20M files. The dataset is available on the Hugging Face Hub [here](https://huggingface.co/datasets/transformersbook/codeparrot). - -### Preprocessing -The raw dataset contains many duplicates. We deduplicated and filtered the dataset using the heuristics proposed in OpenAI's Codex [paper](https://arxiv.org/abs/2107.03374) and some new ones: - -- exact deduplication using each file's hash after having removed whistespaces. -- near deduplication using MinHash and Jaccard similarity. MinHash with a Jaccard threshold (default=0.85) is first used to create duplicate clusters. Then these clusters are then reduced to unique files based on the exact Jaccard similarity. See `deduplicate_dataset` in `minhash_deduplication.py` for a detailed description. -- filtering files with max line length > 1000 -- filtering files with mean line length > 100 -- fraction of alphanumeric characters < 0.25 -- containing the word "auto-generated" or similar in the first 5 lines -- filtering with a probability of 0.7 of files with a mention of "test file" or "configuration file" or similar in the first 5 lines -- filtering with a probability of 0.7 of files with high occurrence of the keywords "test " or "config" -- filtering with a probability of 0.7 of files without a mention of the keywords `def` , `for`, `while` and `class` -- filtering files that use the assignment operator `=` less than 5 times -- filtering files with ratio between number of characters and number of tokens after tokenization < 1.5 (the average ratio is 3.6) - -The script to process the full dataset can be found in `scripts/preprocessing.py`. Executing the script on 16 vCPUs takes roughly 3h and removes 70% of the original dataset. The cleaned [train](https://huggingface.co/datasets/codeparrot/codeparrot-clean-train-v2) and [validation](https://huggingface.co/datasets/codeparrot/codeparrot-clean-valid-v2) splits are also available on the Hub if you want to skip this step or use the data for another project. - -To execute the preprocessing run the following command: -```bash -python scripts/preprocessing.py \ ---dataset_name transformersbook/codeparrot \ ---output_dir codeparrot-clean -``` -During preprocessing the dataset is downloaded and stored locally as well as caches of the computations. Make sure you have more than 500GB free disk space to execute it. - -### Pretokenization -The tokenization of the data might be slow during the training especially for small models. We provide code to pretokenize the data beforehand in `scripts/pretokenizing.py`, but this step is optional. The dataset is downloaded and stored locally and the tokenized data is pushed to the hub. The tokenized clean [train](https://huggingface.co/datasets/codeparrot/tokenized-codeparrot-train) and [validation](https://huggingface.co/datasets/codeparrot/tokenized-codeparrot-valid) datasets are available if you want to use them directly. - -To execute the pretokenization, for the clean train data for instance, run the following command: -```bash -python scripts/pretokenizing.py \ ---dataset_name codeparrot/codeparrot-clean-train \ ---tokenized_data_repo tokenized-codeparrot-train -``` - -## Tokenizer -Before training a new model for code we create a new tokenizer that is efficient at code tokenization. To train the tokenizer you can run the following command: -```bash -python scripts/bpe_training.py \ - --base_tokenizer openai-community/gpt2 \ - --dataset_name codeparrot/codeparrot-clean-train -``` - -_Note:_ We originally trained the tokenizer on the unprocessed train split of the dataset `transformersbook/codeparrot-train`. - -## Training -The models are randomly initialized and trained from scratch. To initialize a new model you can run: - -```bash -python scripts/initialize_model.py \ ---config_name openai-community/gpt2-large \ ---tokenizer_name codeparrot/codeparrot \ ---model_name codeparrot \ ---push_to_hub True -``` -This will initialize a new model with the architecture and configuration of `openai-community/gpt2-large` and use the tokenizer to appropriately size the input embeddings. Finally, the initilaized model is pushed the hub. - -We can either pass the name of a text dataset or a pretokenized dataset which speeds up training a bit. -Now that the tokenizer and model are also ready we can start training the model. The main training script is built with `accelerate` to scale across a wide range of platforms and infrastructure scales. We train two models with [110M](https://huggingface.co/codeparrot/codeparrot-small/) and [1.5B](https://huggingface.co/codeparrot/codeparrot/) parameters for 25-30B tokens on a 16xA100 (40GB) machine which takes 1 day and 1 week, respectively. - -First you need to configure `accelerate` and login to Weights & Biases: - -```bash -accelerate config -wandb login -``` - -Note that during the `accelerate` configuration we enabled FP16. Then to train the large model you can run - -```bash -accelerate launch scripts/codeparrot_training.py -``` - -If you want to train the small model you need to make some modifications: - -```bash -accelerate launch scripts/codeparrot_training.py \ ---model_ckpt codeparrot/codeparrot-small \ ---train_batch_size 12 \ ---valid_batch_size 12 \ ---learning_rate 5e-4 \ ---num_warmup_steps 2000 \ ---gradient_accumulation 1 \ ---gradient_checkpointing False \ ---max_train_steps 150000 \ ---save_checkpoint_steps 15000 -``` - -Recall that you can see the full set of possible options with descriptions (for all scripts) by running: - -```bash -python scripts/codeparrot_training.py --help -``` - -Instead of streaming the dataset from the hub you can also stream it from disk. This can be helpful for long training runs where the connection can be interrupted sometimes. To stream locally you simply need to clone the datasets and replace the dataset name with their path. In this example we store the data in a folder called `data`: - -```bash -git lfs install -mkdir data -git -C "./data" clone https://huggingface.co/datasets/codeparrot/codeparrot-clean-train -git -C "./data" clone https://huggingface.co/datasets/codeparrot/codeparrot-clean-valid -``` - -And then pass the paths to the datasets when we run the training script: - -```bash -accelerate launch scripts/codeparrot_training.py \ ---model_ckpt codeparrot/codeparrot-small \ ---dataset_name_train ./data/codeparrot-clean-train \ ---dataset_name_valid ./data/codeparrot-clean-valid \ ---train_batch_size 12 \ ---valid_batch_size 12 \ ---learning_rate 5e-4 \ ---num_warmup_steps 2000 \ ---gradient_accumulation 1 \ ---gradient_checkpointing False \ ---max_train_steps 150000 \ ---save_checkpoint_steps 15000 -``` - -## Evaluation -For evaluating the language modeling loss on the validation set or any other dataset you can use the following command: -```bash -python scripts/validation_loss.py \ ---model_ckpt codeparrot/codeparrot \ ---dataset_name codeparrot/codeparrot-clean-valid -``` -In addition we evaluate the model on OpenAI's _HumanEval_ benchmark. You can run the evaluation with the following command: - -```bash -accelerate launch scripts/human_eval.py --model_ckpt codeparrot/codeparrot \ ---do_sample True \ ---temperature 0.2 \ ---top_p 0.95 \ ---n_samples=200 \ ---HF_ALLOW_CODE_EVAL="0" -``` - -The results as well as reference values are shown in the following table: - -| Model | pass@1 | pass@10 | pass@100| -|-------|--------|---------|---------| -|CodeParrot 🦜 (110M) | 3.80% | 6.57% | 12.78% | -|CodeParrot 🦜 (1.5B) | 3.99% | 8.69% | 17.88% | -||||| -|Codex (25M)| 3.21% | 7.1% | 12.89%| -|Codex (85M)| 8.22% | 12.81% | 22.40% | -|Codex (300M)| 13.17%| 20.37% | 36.27% | -|Codex (12B)| 28.81%| 46.81% | 72.31% | -||||| -|GPT-neo (125M)| 0.75% | 1.88% | 2.97% | -|GPT-neo (1.5B)| 4.79% | 7.47% | 16.30% | -|GPT-neo (2.7B)| 6.41% | 11.27% | 21.37% | -|GPT-J (6B)| 11.62% | 15.74% | 27.74% | - -The numbers were obtained by sampling with `T = [0.2, 0.6, 0.8]` and picking the best value for each metric. Both CodeParrot 🦜 models are still underfitted and longer training would likely improve the performance. - -## Demo -Give the model a shot yourself! There are three demos to interact with CodeParrot 🦜: -- [Code generation](https://huggingface.co/spaces/codeparrot/codeparrot-generation) -- [Code highlighting](https://huggingface.co/spaces/codeparrot/codeparrot-highlighting) -- [Comparison to other code models](https://huggingface.co/spaces/codeparrot/loubnabnl/code-generation-models) - -## Training with Megatron -[Megatron](https://github.com/NVIDIA/Megatron-LM) is a framework developed by NVIDIA for training large transformer models. While the CodeParrot code is easy to follow and modify to your needs the Megatron framework lets you train models faster. Below we explain how to use it. - -### Setup -You can pull an NVIDIA PyTorch Container that comes with all the required installations from [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch). See [documentation](https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/index.html) for more details: - -With the following Docker command you can run the container (`xx.xx` denotes your Docker version), and clone [Megatron repository](https://github.com/NVIDIA/Megatron-LM) into it: -```bash -docker run --gpus all -it --rm nvcr.io/nvidia/pytorch:xx.xx-py3 -git clone https://github.com/NVIDIA/Megatron-LM -``` - -You also need to add the vocabulary file and merges table of the tokenizer that you trained on code into the container. You can also find these files in [vocab.json](https://huggingface.co/codeparrot/codeparrot/raw/main/vocab.json) and [merges.txt](https://huggingface.co/codeparrot/codeparrot/raw/main/merges.txt). -```bash -sudo docker cp vocab.json CONTAINER_ID:/workspace/Megatron-LM -sudo docker cp merges.txt CONTAINER_ID:/workspace/Megatron-LM -``` - -### Data preprocessing -The training data requires preprocessing. First, you need to convert it into a loose json format, with one json containing a text sample per line. In python this can be done this way: -```python -from datasets import load_dataset - -train_data = load_dataset('codeparrot/codeparrot-clean-train', split='train') -train_data.to_json("codeparrot_data.json", lines=True) -``` - -The data is then tokenized, shuffled and processed into a binary format for training using the following command: -```bash -pip install nltk -cd Megatron-LM -python tools/preprocess_data.py \ - --input codeparrot_data.json \ - --output-prefix codeparrot \ - --vocab vocab.json \ - --dataset-impl mmap \ - --tokenizer-type GPT2BPETokenizer \ - --merge-file merges.txt \ - --json-keys content \ - --workers 32 \ - --chunk-size 25 \ - --append-eod -``` -This outputs two files `codeparrot_content_document.idx` and `codeparrot_content_document.bin` which are used in the training. - -### Training -You can configure the model architecture and training parameters as shown below, or put it in a bash script that you will run. This runs on 8 GPUs the 110M parameter CodeParrot pretraining, with the same settings as before. Note that the data is partitioned by default into a 969:30:1 ratio for training/validation/test sets. -```bash -GPUS_PER_NODE=8 -MASTER_ADDR=localhost -MASTER_PORT=6001 -NNODES=1 -NODE_RANK=0 -WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) -DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT" -CHECKPOINT_PATH=/workspace/Megatron-LM/experiments/codeparrot-small -VOCAB_FILE=vocab.json -MERGE_FILE=merges.txt -DATA_PATH=codeparrot_content_document -GPT_ARGS="--num-layers 12 ---hidden-size 768 ---num-attention-heads 12 ---seq-length 1024 ---max-position-embeddings 1024 ---micro-batch-size 12 ---global-batch-size 192 ---lr 0.0005 ---train-iters 150000 ---lr-decay-iters 150000 ---lr-decay-style cosine ---lr-warmup-iters 2000 ---weight-decay .1 ---adam-beta2 .999 ---fp16 ---log-interval 10 ---save-interval 2000 ---eval-interval 200 ---eval-iters 10 -" -TENSORBOARD_ARGS="--tensorboard-dir experiments/tensorboard" -python3 -m torch.distributed.launch $DISTRIBUTED_ARGS \ - pretrain_gpt.py \ - --tensor-model-parallel-size 1 \ - --pipeline-model-parallel-size 1 \ - $GPT_ARGS \ - --vocab-file $VOCAB_FILE \ - --merge-file $MERGE_FILE \ - --save $CHECKPOINT_PATH \ - --load $CHECKPOINT_PATH \ - --data-path $DATA_PATH \ - $TENSORBOARD_ARGS -``` -The training takes almost 12 hours in this setting. - -### Convert model to `transformers` -After training we want to use the model in `transformers` e.g. to evaluate it on HumanEval. You can convert it to `transformers` following [this](https://huggingface.co/nvidia/megatron-gpt2-345m) tutorial. For instance, after the training is finished you can copy the weights of the last iteration 150k and convert the `model_optim_rng.pt` file to a `pytorch_model.bin` file that is supported by `transformers`. - -```bash -mkdir -p nvidia/megatron-codeparrot-small -sudo docker cp CONTAINER_ID:/workspace/Megatron-LM/experiments/codeparrot-small/iter_0150000/mp_rank_00/model_optim_rng.pt nvidia/megatron-codeparrot-small -git clone https://github.com/huggingface/transformers.git -git clone https://github.com/NVIDIA/Megatron-LM.git -export PYTHONPATH=Megatron-LM -python transformers/src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py nvidia/megatron-codeparrot-small/model_optim_rng.pt -``` -Be careful, you will need to replace the generated vocabulary file and merges table after the conversion, with the original ones if you plan to load the tokenizer from there. - -## Further Resources -A detailed description of the project can be found in the chapter "Training Transformers from Scratch" in the upcoming O'Reilly book [Natural Language Processing with Transformers](https://learning.oreilly.com/library/view/natural-language-processing/9781098103231/). - -This example was provided by [Leandro von Werra](www.github.com/lvwerra). diff --git a/examples/research_projects/codeparrot/examples/README.md b/examples/research_projects/codeparrot/examples/README.md deleted file mode 100644 index c1980262d82..00000000000 --- a/examples/research_projects/codeparrot/examples/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# Examples -In this folder we showcase some examples to use code models for downstream tasks. - -## Complexity prediction -In this task we want to predict the complexity of Java programs in [CodeComplex](https://huggingface.co/datasets/codeparrot/codecomplex) dataset. Using Hugging Face `trainer`, we finetuned [multilingual CodeParrot](https://huggingface.co/codeparrot/codeparrot-small-multi) and [UniXcoder](https://huggingface.co/microsoft/unixcoder-base-nine) on it, and we used the latter to build this Java complexity prediction [space](https://huggingface.co/spaces/codeparrot/code-complexity-predictor) on Hugging Face hub. - -To fine-tune a model on this dataset you can use the following commands: - -```python -python train_complexity_predictor.py \ - --model_ckpt microsoft/unixcoder-base-nine \ - --num_epochs 60 \ - --num_warmup_steps 10 \ - --batch_size 8 \ - --learning_rate 5e-4 -``` - -## Code generation: text to python -In this task we want to train a model to generate code from english text. We finetuned Codeparrot-small on [github-jupyter-text-to-code](https://huggingface.co/datasets/codeparrot/github-jupyter-text-to-code), a dataset where the samples are a succession of docstrings and their Python code, originally extracted from Jupyter notebooks parsed in this [dataset](https://huggingface.co/datasets/codeparrot/github-jupyter-parsed). - -To fine-tune a model on this dataset we use the same [script](https://github.com/huggingface/transformers/blob/main/examples/research_projects/codeparrot/scripts/codeparrot_training.py) as the pretraining of codeparrot: - -```python -accelerate launch scripts/codeparrot_training.py \ - --model_ckpt codeparrot/codeparrot-small \ - --dataset_name_train codeparrot/github-jupyter-text-to-code \ - --dataset_name_valid codeparrot/github-jupyter-text-to-code \ - --train_batch_size 12 \ - --valid_batch_size 12 \ - --learning_rate 5e-4 \ - --num_warmup_steps 100 \ - --gradient_accumulation 1 \ - --gradient_checkpointing False \ - --max_train_steps 3000 \ - --save_checkpoint_steps 200 \ - --save_dir jupyter-text-to-python -``` - -## Code explanation: python to text -In this task we want to train a model to explain python code. We finetuned Codeparrot-small on [github-jupyter-code-to-text](https://huggingface.co/datasets/codeparrot/github-jupyter-code-to-text), a dataset where the samples are a succession of Python code and its explanation as a docstring, we just inverted the order of text and code pairs in github-jupyter-code-to-text dataset and added the delimiters "Explanation:" and "End of explanation" inside the doctrings. - -To fine-tune a model on this dataset we use the same [script](https://github.com/huggingface/transformers/blob/main/examples/research_projects/codeparrot/scripts/codeparrot_training.py) as the pretraining of codeparrot: - -```python -accelerate launch scripts/codeparrot_training.py \ - --model_ckpt codeparrot/codeparrot-small \ - --dataset_name_train codeparrot/github-jupyter-code-to-text \ - --dataset_name_valid codeparrot/github-jupyter-code-to-text \ - --train_batch_size 12 \ - --valid_batch_size 12 \ - --learning_rate 5e-4 \ - --num_warmup_steps 100 \ - --gradient_accumulation 1 \ - --gradient_checkpointing False \ - --max_train_steps 3000 \ - --save_checkpoint_steps 200 \ - --save_dir jupyter-python-to-text -``` \ No newline at end of file diff --git a/examples/research_projects/codeparrot/examples/requirements.txt b/examples/research_projects/codeparrot/examples/requirements.txt deleted file mode 100644 index c5e21ab9819..00000000000 --- a/examples/research_projects/codeparrot/examples/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -datasets==2.3.2 -transformers==4.48.0 -wandb==0.13.1 -evaluate==0.2.2 -scikit-learn==1.5.0 \ No newline at end of file diff --git a/examples/research_projects/codeparrot/examples/train_complexity_predictor.py b/examples/research_projects/codeparrot/examples/train_complexity_predictor.py deleted file mode 100644 index de06b988db6..00000000000 --- a/examples/research_projects/codeparrot/examples/train_complexity_predictor.py +++ /dev/null @@ -1,132 +0,0 @@ -import argparse -from copy import deepcopy - -import numpy as np -from datasets import ClassLabel, DatasetDict, load_dataset -from evaluate import load - -from transformers import ( - AutoModelForSequenceClassification, - AutoTokenizer, - DataCollatorWithPadding, - Trainer, - TrainerCallback, - TrainingArguments, - set_seed, -) - - -def get_args(): - parser = argparse.ArgumentParser() - parser.add_argument("--model_ckpt", type=str, default="microsoft/unixcoder-base-nine") - parser.add_argument("--num_epochs", type=int, default=5) - parser.add_argument("--batch_size", type=int, default=6) - parser.add_argument("--gradient_accumulation_steps", type=int, default=1) - parser.add_argument("--freeze", type=bool, default=True) - parser.add_argument("--learning_rate", type=float, default=5e-4) - parser.add_argument("--seed", type=int, default=0) - parser.add_argument("--lr_scheduler_type", type=str, default="cosine") - parser.add_argument("--num_warmup_steps", type=int, default=10) - parser.add_argument("--weight_decay", type=float, default=0.01) - parser.add_argument("--output_dir", type=str, default="./results") - return parser.parse_args() - - -metric = load("accuracy") - - -def compute_metrics(eval_pred): - predictions, labels = eval_pred - predictions = np.argmax(predictions, axis=1) - return metric.compute(predictions=predictions, references=labels) - - -class CustomCallback(TrainerCallback): - def __init__(self, trainer) -> None: - super().__init__() - self._trainer = trainer - - def on_epoch_end(self, args, state, control, **kwargs): - if control.should_evaluate: - control_copy = deepcopy(control) - self._trainer.evaluate(eval_dataset=self._trainer.train_dataset, metric_key_prefix="train") - return control_copy - - -def main(): - args = get_args() - set_seed(args.seed) - - dataset = load_dataset("codeparrot/codecomplex", split="train") - train_test = dataset.train_test_split(test_size=0.2) - test_validation = train_test["test"].train_test_split(test_size=0.5) - train_test_validation = DatasetDict( - { - "train": train_test["train"], - "test": test_validation["train"], - "valid": test_validation["test"], - } - ) - - print("Loading tokenizer and model") - tokenizer = AutoTokenizer.from_pretrained(args.model_ckpt) - tokenizer.pad_token = tokenizer.eos_token - model = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7) - model.config.pad_token_id = model.config.eos_token_id - - if args.freeze: - for param in model.roberta.parameters(): - param.requires_grad = False - - labels = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"]))) - - def tokenize(example): - inputs = tokenizer(example["src"], truncation=True, max_length=1024) - label = labels.str2int(example["complexity"]) - return { - "input_ids": inputs["input_ids"], - "attention_mask": inputs["attention_mask"], - "label": label, - } - - tokenized_datasets = train_test_validation.map( - tokenize, - batched=True, - remove_columns=train_test_validation["train"].column_names, - ) - data_collator = DataCollatorWithPadding(tokenizer=tokenizer) - - training_args = TrainingArguments( - output_dir=args.output_dir, - learning_rate=args.learning_rate, - lr_scheduler_type=args.lr_scheduler_type, - eval_strategy="epoch", - save_strategy="epoch", - logging_strategy="epoch", - per_device_train_batch_size=args.batch_size, - per_device_eval_batch_size=args.batch_size, - num_train_epochs=args.num_epochs, - gradient_accumulation_steps=args.gradient_accumulation_steps, - weight_decay=0.01, - metric_for_best_model="accuracy", - run_name="complexity-java", - report_to="wandb", - ) - - trainer = Trainer( - model=model, - args=training_args, - train_dataset=tokenized_datasets["train"], - eval_dataset=tokenized_datasets["valid"], - tokenizer=tokenizer, - data_collator=data_collator, - compute_metrics=compute_metrics, - ) - - print("Training...") - trainer.add_callback(CustomCallback(trainer)) - trainer.train() - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/codeparrot/requirements.txt b/examples/research_projects/codeparrot/requirements.txt deleted file mode 100644 index ee4fc0691b0..00000000000 --- a/examples/research_projects/codeparrot/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -transformers==4.38.0 -datasets==1.16.0 -wandb==0.12.0 -tensorboard==2.6.0 -torch==2.2.0 -huggingface-hub==0.1.0 -git+https://github.com/huggingface/accelerate.git@3c45b6f760ad8745be9ebc9bbb26f5b04dea4abe -datasketch==1.5.7 -dpu_utils \ No newline at end of file diff --git a/examples/research_projects/codeparrot/scripts/arguments.py b/examples/research_projects/codeparrot/scripts/arguments.py deleted file mode 100644 index 1540319b3da..00000000000 --- a/examples/research_projects/codeparrot/scripts/arguments.py +++ /dev/null @@ -1,220 +0,0 @@ -from dataclasses import dataclass, field -from typing import Optional - - -@dataclass -class TrainingArguments: - """ - Configuration for training model. - """ - - model_ckpt: Optional[str] = field( - default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be trained."} - ) - save_dir: Optional[str] = field( - default="./", metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} - ) - dataset_name_train: Optional[str] = field( - default="codeparrot/codeparrot-clean-train", metadata={"help": "Name or path of training dataset."} - ) - dataset_name_valid: Optional[str] = field( - default="codeparrot/codeparrot-clean-valid", metadata={"help": "Name or path of validation dataset."} - ) - train_batch_size: Optional[int] = field(default=2, metadata={"help": "Batch size for training."}) - valid_batch_size: Optional[int] = field(default=2, metadata={"help": "Batch size for evaluation."}) - weight_decay: Optional[float] = field(default=0.1, metadata={"help": "Value of weight decay."}) - shuffle_buffer: Optional[int] = field( - default=10000, metadata={"help": "Size of buffer used to shuffle streaming dataset."} - ) - learning_rate: Optional[float] = field(default=2e-4, metadata={"help": "Learning rate fo training."}) - lr_scheduler_type: Optional[str] = field(default="cosine", metadata={"help": "Learning rate."}) - num_warmup_steps: Optional[int] = field( - default=750, metadata={"help": "Number of warmup steps in the learning rate schedule."} - ) - gradient_accumulation_steps: Optional[int] = field( - default=16, metadata={"help": "Number of gradient accumulation steps."} - ) - gradient_checkpointing: Optional[bool] = field( - default=True, metadata={"help": "Use gradient checkpointing to reduce memory footprint."} - ) - max_train_steps: Optional[int] = field(default=50000, metadata={"help": "Maximum number of training steps."}) - max_eval_steps: Optional[int] = field( - default=-1, metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} - ) - seq_length: Optional[int] = field(default=1024, metadata={"help": "Sequence lengths used for training."}) - seed: Optional[int] = field(default=1, metadata={"help": "Training seed."}) - save_checkpoint_steps: Optional[int] = field( - default=1024, - metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."}, - ) - resume_from_checkpoint: Optional[str] = field( - default=None, metadata={"help": "States path if the training should continue from a checkpoint folder."} - ) - tokenized: Optional[bool] = field(default=False, metadata={"help": "If True the data is pretokenized."}) - - -@dataclass -class EvaluationArguments: - """ - Configuration for evaluating model. - """ - - model_ckpt: Optional[str] = field( - default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be evaluated."} - ) - dataset_name: Optional[str] = field( - default="codeparrot/codeparrot-clean-valid", metadata={"help": "Name or path of validation dataset."} - ) - batch_size: Optional[int] = field(default=2, metadata={"help": "Batch size used for evaluation."}) - max_eval_steps: Optional[int] = field( - default=-1, metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} - ) - seq_length: Optional[int] = field(default=1024, metadata={"help": "Length of sequences to be evaluated."}) - seed: Optional[int] = field(default=1, metadata={"help": "Random seed used for evaluation."}) - - -@dataclass -class HumanEvalArguments: - """ - Configuration for running evaluation on HumanEval dataset. - """ - - model_ckpt: Optional[str] = field( - default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be evaluated."} - ) - num_workers: Optional[int] = field(default=None, metadata={"help": "Number of workers used for code evaluation."}) - num_tasks: Optional[int] = field( - default=None, - metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."}, - ) - do_sample: Optional[bool] = field( - default=True, metadata={"help": "Sample from the language model's output distribution."} - ) - temperature: Optional[float] = field(default=0.2, metadata={"help": "Sampling temperature used for generation."}) - max_new_tokens: Optional[int] = field(default=256, metadata={"help": "Maximum number of newly generated tokens."}) - top_k: Optional[int] = field(default=0, metadata={"help": "Top-k parameter used for generation."}) - top_p: Optional[float] = field(default=0.95, metadata={"help": "Top-p parameter used for nucleus sampling."}) - batch_size: Optional[int] = field(default=10, metadata={"help": "Number of generations to run in parallel."}) - n_samples: Optional[int] = field( - default=200, metadata={"help": "Number of completions to generate for each sample."} - ) - seed: Optional[int] = field(default=1, metadata={"help": "Random seed used for evaluation."}) - output_file: Optional[str] = field( - default="eval_results.json", metadata={"help": "Random seed used for evaluation."} - ) - HF_ALLOW_CODE_EVAL: Optional[str] = field( - default="0", metadata={"help": "Allow `code_eval` to execute Python code on machine"} - ) - device_int: Optional[int] = field( - default=-1, - metadata={ - "help": ( - "Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive" - " number corresponds to which GPU device id to run on." - ) - }, - ) - - -@dataclass -class PreprocessingArguments: - """ - Configuration for preprocessing data. - """ - - num_workers: Optional[int] = field( - default=None, - metadata={ - "help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available." - }, - ) - dataset_name: Optional[str] = field( - default="transformersbook/codeparrot", metadata={"help": "Folder or name of dataset to process."} - ) - output_dir: Optional[str] = field( - default="codeparrot-clean", metadata={"help": "Folder to save processed dataset."} - ) - samples_per_file: Optional[int] = field( - default=100_000, metadata={"help": "Number of files to save per JSON output file."} - ) - text_column: Optional[str] = field(default="content", metadata={"help": "Column containing text data to process."}) - line_max: Optional[float] = field( - default=1000, metadata={"help": "Maximum line length in file, otherwise file is filtered."} - ) - line_mean: Optional[float] = field( - default=100, metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} - ) - alpha_frac: Optional[float] = field( - default=0.25, metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} - ) - min_token_ratio: Optional[float] = field( - default=1.5, metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} - ) - filter_proba: Optional[float] = field( - default=0.7, metadata={"help": "Probability for filtering config, test and uncommon files."} - ) - tokenizer: Optional[str] = field( - default="codeparrot/codeparrot", - metadata={"help": "Name or path to the tokenizer."}, - ) - near_deduplication: Optional[bool] = field( - default=False, metadata={"help": "If True, near-duplicate samples are removed."} - ) - jaccard_threshold: Optional[float] = field( - default=0.85, metadata={"help": "Jaccard threshold for near-duplicate samples."} - ) - - -@dataclass -class TokenizerTrainingArguments: - """ - Configuration for tokenizer training. - """ - - base_tokenizer: Optional[str] = field( - default="openai-community/gpt2", metadata={"help": "Base tokenizer to build new tokenizer from."} - ) - dataset_name: Optional[str] = field( - default="transformersbook/codeparrot-train", metadata={"help": "Dataset to train tokenizer on."} - ) - text_column: Optional[str] = field(default="content", metadata={"help": "Column containing text data to process."}) - vocab_size: Optional[int] = field(default=200_000, metadata={"help": "Number of examples to train tokenizer on."}) - n_examples: Optional[int] = field( - default=32768, metadata={"help": "Number of examples to train the tokenizer on."} - ) - tokenizer_name: Optional[str] = field(default="codeparrot", metadata={"help": "Name of new tokenizer."}) - push_to_hub: Optional[bool] = field(default=True, metadata={"help": "Push saved tokenizer to the hub."}) - - -@dataclass -class PretokenizationArguments: - """ - Configuration for data pretokenization. - """ - - tokenizer_dir: Optional[str] = field( - default="codeparrot/codeparrot", metadata={"help": "Name or path to the tokenizer."} - ) - dataset_name: Optional[str] = field( - default="codeparrot/codeparrot-clean-train", metadata={"help": "Name or path to the dataset to pretokenize."} - ) - tokenized_data_repo: Optional[str] = field( - default="tokenized-codeparrot-train", metadata={"help": "Repo name of the pretokenized data."} - ) - num_workers: Optional[int] = field(default=None, metadata={"help": "Number of workers used for code evaluation."}) - - -@dataclass -class InitializationArguments: - """ - Configuration for initializing new model. - """ - - config_name: Optional[str] = field( - default="openai-community/gpt2-large", metadata={"help": "Configuration to use for model initialization."} - ) - tokenizer_name: Optional[str] = field( - default="codeparrot/codeparrot", metadata={"help": "Tokenizer attached to model."} - ) - model_name: Optional[str] = field(default="codeparrot", metadata={"help": "Name of the created model."}) - push_to_hub: Optional[bool] = field(default=True, metadata={"help": "Push saved tokenizer to the hub."}) diff --git a/examples/research_projects/codeparrot/scripts/bpe_training.py b/examples/research_projects/codeparrot/scripts/bpe_training.py deleted file mode 100644 index 1cbeb4b4ee3..00000000000 --- a/examples/research_projects/codeparrot/scripts/bpe_training.py +++ /dev/null @@ -1,32 +0,0 @@ -from arguments import TokenizerTrainingArguments -from datasets import load_dataset -from tqdm import tqdm - -from transformers import AutoTokenizer, HfArgumentParser -from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode - - -# Iterator for Training -def batch_iterator(batch_size=10): - for _ in tqdm(range(0, args.n_examples, batch_size)): - yield [next(iter_dataset)[args.text_column] for _ in range(batch_size)] - - -# Configuration -parser = HfArgumentParser(TokenizerTrainingArguments) -args = parser.parse_args() - -# Base tokenizer -tokenizer = AutoTokenizer.from_pretrained(args.base_tokenizer) -base_vocab = list(bytes_to_unicode().values()) - -# Load dataset -dataset = load_dataset(args.dataset_name, split="train", streaming=True) -iter_dataset = iter(dataset) - - -# Training and saving -new_tokenizer = tokenizer.train_new_from_iterator( - batch_iterator(), vocab_size=args.vocab_size, initial_alphabet=base_vocab -) -new_tokenizer.save_pretrained(args.tokenizer_name, push_to_hub=args.push_to_hub) diff --git a/examples/research_projects/codeparrot/scripts/codeparrot_training.py b/examples/research_projects/codeparrot/scripts/codeparrot_training.py deleted file mode 100644 index 549627d6ca7..00000000000 --- a/examples/research_projects/codeparrot/scripts/codeparrot_training.py +++ /dev/null @@ -1,328 +0,0 @@ -import logging -import os -import time -from argparse import Namespace -from pathlib import Path - -import datasets -import torch -from accelerate import Accelerator, DistributedType -from accelerate.utils import ProjectConfiguration -from arguments import TrainingArguments -from datasets import load_dataset -from huggingface_hub import Repository -from torch.optim import AdamW -from torch.utils.data import IterableDataset -from torch.utils.data.dataloader import DataLoader -from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe - -import transformers -from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, get_scheduler, set_seed - - -class ConstantLengthDataset(IterableDataset): - """ - Iterable dataset that returns constant length chunks of tokens from stream of text files. - Args: - tokenizer (Tokenizer): The processor used for processing the data. - dataset (dataset.Dataset): Dataset with text files. - infinite (bool): If True the iterator is reset after dataset reaches end else stops. - seq_length (int): Length of token sequences to return. - num_of_sequences (int): Number of token sequences to keep in buffer. - chars_per_token (int): Number of characters per token used to estimate number of tokens in text buffer. - tokenized (bool): If true we use a pretokenized dataset. - """ - - def __init__( - self, - tokenizer, - dataset, - infinite=False, - seq_length=1024, - num_of_sequences=1024, - chars_per_token=3.6, - tokenized=False, - ): - self.tokenizer = tokenizer - self.concat_token_id = tokenizer.bos_token_id - self.dataset = dataset - self.seq_length = seq_length - self.epoch = 0 - self.infinite = infinite - self.current_size = 0 - self.tokenized = tokenized - - if self.tokenized: - self.max_buffer_size = seq_length * num_of_sequences - self.content_field = "input_ids" - else: - self.max_buffer_size = seq_length * chars_per_token * num_of_sequences - self.content_field = "content" - - def __iter__(self): - iterator = iter(self.dataset) - more_examples = True - while more_examples: - buffer, buffer_len = [], 0 - while True: - if buffer_len >= self.max_buffer_size: - break - try: - buffer.append(next(iterator)[self.content_field]) - buffer_len += len(buffer[-1]) - except StopIteration: - if self.infinite: - iterator = iter(self.dataset) - self.epoch += 1 - logger.info(f"Dataset epoch: {self.epoch}") - else: - more_examples = False - break - if self.tokenized: - tokenized_inputs = buffer - else: - tokenized_inputs = self.tokenizer(buffer, truncation=False)["input_ids"] - all_token_ids = [] - for tokenized_input in tokenized_inputs: - all_token_ids.extend(tokenized_input + [self.concat_token_id]) - for i in range(0, len(all_token_ids), self.seq_length): - input_ids = all_token_ids[i : i + self.seq_length] - if len(input_ids) == self.seq_length: - self.current_size += 1 - yield torch.tensor(input_ids) - - def shuffle(self, buffer_size=1000): - return ShufflerIterDataPipe(self, buffer_size=buffer_size) - - -def setup_logging(args): - project_name = args.model_ckpt.split("/")[-1] - logger = logging.getLogger(__name__) - log_dir = Path(args.save_dir) / "log/" - log_dir.mkdir(exist_ok=True) - filename = f"debug_{accelerator.process_index}.log" - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO, - handlers=[logging.FileHandler(log_dir / filename), logging.StreamHandler()], - ) - if accelerator.is_main_process: # we only want to setup logging once - accelerator.init_trackers(project_name, vars(args)) - run_name = accelerator.trackers[0].run.name - logger.setLevel(logging.INFO) - datasets.utils.logging.set_verbosity_info() - transformers.utils.logging.set_verbosity_info() - else: - run_name = "" - logger.setLevel(logging.ERROR) - datasets.utils.logging.set_verbosity_error() - transformers.utils.logging.set_verbosity_error() - return logger, run_name - - -def create_dataloaders(args): - ds_kwargs = {"streaming": True} - train_data = load_dataset(args.dataset_name_train, split="train", **ds_kwargs) - train_data = train_data.shuffle(buffer_size=args.shuffle_buffer, seed=args.seed) - valid_data = load_dataset(args.dataset_name_valid, split="train", **ds_kwargs) - train_dataset = ConstantLengthDataset( - tokenizer, train_data, infinite=True, seq_length=args.seq_length, tokenized=args.tokenized - ) - valid_dataset = ConstantLengthDataset( - tokenizer, valid_data, infinite=False, seq_length=args.seq_length, tokenized=args.tokenized - ) - train_dataset = train_dataset.shuffle(buffer_size=args.shuffle_buffer) - train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True) - eval_dataloader = DataLoader(valid_dataset, batch_size=args.valid_batch_size) - return train_dataloader, eval_dataloader - - -def get_grouped_params(model, args, no_decay=["bias", "ln_1.weight", "ln_2.weight", "ln_f.weight"]): - params_with_wd, params_without_wd = [], [] - for n, p in model.named_parameters(): - if any(nd in n for nd in no_decay): - params_without_wd.append(p) - else: - params_with_wd.append(p) - return [ - {"params": params_with_wd, "weight_decay": args.weight_decay}, - {"params": params_without_wd, "weight_decay": 0.0}, - ] - - -def log_metrics(step, metrics): - logger.info(f"Step {step}: {metrics}") - if accelerator.is_main_process: - accelerator.log(metrics, step) - - -def compute_tflops(elapsed_time, accelerator, args): - # TFLOPs formula (from Equation 3 in Section 5.1 of https://arxiv.org/pdf/2104.04473.pdf). - config_model = accelerator.unwrap_model(model).config - checkpoint_factor = 4 if args.gradient_checkpointing else 3 - batch_size = args.train_batch_size * accelerator.state.num_processes * args.gradient_accumulation_steps - factor = 24 * checkpoint_factor * batch_size * args.seq_length * config_model.n_layer * (config_model.n_embd**2) - flops_per_iteration = factor * ( - 1.0 - + (args.seq_length / (6.0 * config_model.n_embd)) - + (tokenizer.vocab_size / (16.0 * config_model.n_layer * config_model.n_embd)) - ) - tflops = flops_per_iteration / (elapsed_time * accelerator.state.num_processes * (10**12)) - return tflops - - -def evaluate(args): - model.eval() - losses = [] - for step, batch in enumerate(eval_dataloader): - with torch.no_grad(): - outputs = model(batch, labels=batch) - loss = outputs.loss.repeat(args.valid_batch_size) - losses.append(accelerator.gather(loss)) - if args.max_eval_steps > 0 and step >= args.max_eval_steps: - break - losses = torch.cat(losses) - loss = losses[: eval_dataloader.dataset.current_size].mean() - try: - perplexity = torch.exp(loss) - except OverflowError: - perplexity = float("inf") - return loss.item(), perplexity.item() - - -# Settings -parser = HfArgumentParser(TrainingArguments) -args = parser.parse_args() - -# Accelerator -config = ProjectConfiguration(project_dir=args.save_dir, logging_dir="log") -accelerator = Accelerator(log_with=["wandb", "tensorboard"], project_config=config) -acc_state = {str(k): str(v) for k, v in accelerator.state.__dict__.items()} - -args = Namespace(**vars(args), **acc_state) -samples_per_step = accelerator.state.num_processes * args.train_batch_size -set_seed(args.seed) - -# Clone model repository -if accelerator.is_main_process: - hf_repo = Repository(args.save_dir, clone_from=args.model_ckpt) - -# Logging -logger, run_name = setup_logging(args) -logger.info(accelerator.state) - -# Checkout new branch on repo -if accelerator.is_main_process: - hf_repo.git_checkout(run_name, create_branch_ok=True) - -# Load model and tokenizer -model = AutoModelForCausalLM.from_pretrained(args.save_dir) -if args.gradient_checkpointing: - model.gradient_checkpointing_enable() -tokenizer = AutoTokenizer.from_pretrained(args.save_dir) - -# Load dataset and dataloader -train_dataloader, eval_dataloader = create_dataloaders(args) - -# Prepare the optimizer and learning rate scheduler -optimizer = AdamW(get_grouped_params(model, args), lr=args.learning_rate) -lr_scheduler = get_scheduler( - name=args.lr_scheduler_type, - optimizer=optimizer, - num_warmup_steps=args.num_warmup_steps, - num_training_steps=args.max_train_steps, -) -accelerator.register_for_checkpointing(lr_scheduler) - - -def get_lr(): - return optimizer.param_groups[0]["lr"] - - -# Prepare everything with our `accelerator`. -model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( - model, optimizer, train_dataloader, eval_dataloader -) - -# load in the weights and states from a previous save -if args.resume_from_checkpoint: - if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": - accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") - accelerator.load_state(args.resume_from_checkpoint) - path = os.path.basename(args.resume_from_checkpoint) - else: - # Get the most recent checkpoint - dirs = [f.name for f in os.scandir(args.save_dir) if f.is_dir() and "step" in str(f)] - dirs.sort(key=os.path.getctime) - path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last - # Extract the step of the checkpoint to continue from there - training_difference = os.path.splitext(path)[0] - resume_step = int(training_difference.replace("step_", "")) - -# Train model -model.train() -completed_steps = 0 -t_start = time.time() -loss_tracking = 0 -for step, batch in enumerate(train_dataloader, start=1): - if args.resume_from_checkpoint and step < resume_step: - continue # we need to skip steps until we reach the resumed step - loss = model(batch, labels=batch, use_cache=False).loss - avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() - loss_tracking += avg_loss.item() / args.gradient_accumulation_steps - log_metrics(step, {"samples": step * samples_per_step, "loss_per_step/train": loss.item()}) - loss = loss / args.gradient_accumulation_steps - if step % args.gradient_accumulation_steps != 0: - # Prevent backward from doing gradient all_reduce in every step - if accelerator.distributed_type == DistributedType.MULTI_GPU: - with model.no_sync(): - accelerator.backward(loss) - else: - accelerator.backward(loss) - else: - lr = get_lr() - accelerator.backward(loss) - accelerator.clip_grad_norm_(model.parameters(), 1.0) - optimizer.step() - lr_scheduler.step() - optimizer.zero_grad() - elapsed_time = time.time() - t_start - tflops = compute_tflops(elapsed_time, accelerator, args) - log_metrics( - step, - { - "steps": completed_steps, - "loss/train": loss_tracking, - "lr": lr, - "tflops": tflops, - "time_per_iteration": elapsed_time, - }, - ) - t_start = time.time() - loss_tracking = 0 - completed_steps += 1 - if step % args.save_checkpoint_steps == 0: - logger.info("Evaluating and saving model checkpoint") - eval_loss, perplexity = evaluate(args) - log_metrics(step, {"loss/eval": eval_loss, "perplexity": perplexity}) - accelerator.wait_for_everyone() - save_dir = os.path.join(args.save_dir, f"step_{step}") - accelerator.save_state(save_dir) - if accelerator.is_main_process: - hf_repo.push_to_hub(commit_message=f"step {step}") - model.train() - if completed_steps >= args.max_train_steps: - break - -# Evaluate and save the last checkpoint -logger.info("Evaluating and saving model after training") -eval_loss, perplexity = evaluate(args) -log_metrics(step, {"loss/eval": eval_loss, "perplexity": perplexity}) -accelerator.wait_for_everyone() -unwrapped_model = accelerator.unwrap_model(model) -unwrapped_model.save_pretrained(args.save_dir, save_function=accelerator.save) -save_dir = os.path.join(args.save_dir, f"step_{step}") -accelerator.save_state(save_dir) -if accelerator.is_main_process: - hf_repo.push_to_hub(commit_message="final model") diff --git a/examples/research_projects/codeparrot/scripts/human_eval.py b/examples/research_projects/codeparrot/scripts/human_eval.py deleted file mode 100644 index ef217a597e3..00000000000 --- a/examples/research_projects/codeparrot/scripts/human_eval.py +++ /dev/null @@ -1,228 +0,0 @@ -import json -import multiprocessing -import os -import re -from collections import defaultdict - -import torch -from accelerate import Accelerator -from accelerate.utils import set_seed -from arguments import HumanEvalArguments -from datasets import load_dataset, load_metric -from torch.utils.data import IterableDataset -from torch.utils.data.dataloader import DataLoader -from tqdm import tqdm - -import transformers -from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList - - -EOF_STRINGS = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"] - - -class TokenizedDataset(IterableDataset): - """Tokenize and preprocess the dataset - Multiple copies of the same prompt are sent sequentially. - See compute_code for more details. - """ - - def __init__(self, tokenizer, dataset, n_tasks=None, n_copies=1): - self.tokenizer = tokenizer - self.dataset = dataset - self.n_tasks = len(dataset) if n_tasks is None else n_tasks - self.n_copies = n_copies - - def __iter__(self): - prompts = [] - for task in range(self.n_tasks): - # without strip, the model generate commented codes ... - prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip()) - outputs = self.tokenizer(prompts, padding=True, return_tensors="pt") - for task in range(self.n_tasks): - for _ in range(self.n_copies): - yield { - "ids": outputs.input_ids[task], - "task_id": task, - "input_len": outputs.attention_mask[task].sum(), - } - - -class EndOfFunctionCriteria(StoppingCriteria): - """Custom `StoppingCriteria` which checks if all generated functions in the batch are completed.""" - - def __init__(self, start_length, eof_strings, tokenizer): - self.start_length = start_length - self.eof_strings = eof_strings - self.tokenizer = tokenizer - - def __call__(self, input_ids, scores, **kwargs): - """Returns true if all generated sequences contain any of the end-of-function strings.""" - decoded_generations = self.tokenizer.batch_decode(input_ids[:, self.start_length :]) - done = [] - for decoded_generation in decoded_generations: - done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings)) - return all(done) - - -def remove_last_block(string): - """Remove the last block of the code containing EOF_STRINGS""" - string_list = re.split("(%s)" % "|".join(EOF_STRINGS), string) - # last string should be "" - return "".join(string_list[:-2]) - - -def complete_code(accelerator, model, tokenizer, dataloader, n_tasks, batch_size=20, **gen_kwargs): - """Generate multiple codes for each task in the dataset. This function leverage accelerator to distribute - the processing to multiple GPUs. - dataloader, a wrapper around a TokenizeDataset objectm is supposed to send all the prompts from - the evalution dataset to the modelm as the following: - [p_0_0, p_0_1, ..., p_0_nc-1, p_1_0, ..., p_nt-1_nc-1] - where nc is the number of copies of the prompt, and nt is the number of tasks. - nc is such that num_sample = nc * batch_size - - Parameters - ---------- - accelerator: Accelerator - - model: transformers.PreTrainedModel - Code generation model. AutoTokenizer.from_pretrained(model_ckpt), ex model_ckpt = "lvwerra/codeparrot" - - tokenizer: transformers.AutoTokenizer - The tokenizer used to train model - - dataloader: DataLoader - The dataloader is a wrapper around a TokenizeDataset object. It is designed to be used with multiple GPUs. - - n_tasks: int - The number of tasks in the dataset. It is used to determine the length of the output. - Should be aligned with the number of tasks in the TokenizeDataset. - - batch_size: int - num_return_sequences per copy of the prompt such that num_sample = batch_size * n_copies - - gen_kwargs: dict - Keyword arguments for the generation function of the model. - - Returns - ------- - code_gens: list of list of str, of length n_tasks - List of generated codes for each task. - Each element is a list of generated codes for each task, with length num_samples - """ - gen_token_dict = defaultdict(list) # dict of list of generated tokens - for step, batch in tqdm(enumerate(dataloader)): - with torch.no_grad(): - gen_kwargs["stopping_criteria"][0].start_length = batch["ids"].shape[-1] - generated_tokens = accelerator.unwrap_model(model).generate( - input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=batch_size, **gen_kwargs - ) - # each task is generated batch_size times - generated_tasks = batch["task_id"].repeat(batch_size) - generated_tokens = accelerator.pad_across_processes( - generated_tokens, dim=1, pad_index=tokenizer.pad_token_id - ) - - generated_tokens, generated_tasks = accelerator.gather((generated_tokens, generated_tasks)) - generated_tokens = generated_tokens.cpu().numpy() - generated_tasks = generated_tasks.cpu().numpy() - - for task, generated_tokens in zip(generated_tasks, generated_tokens): - gen_token_dict[task].append(generated_tokens) - - code_gens = [[] for _ in range(n_tasks)] - for task, generated_tokens in gen_token_dict.items(): - for s in generated_tokens: - gen_code = tokenizer.decode(s, skip_special_tokens=True, clean_up_tokenization_spaces=True) - code_gens[task].append(remove_last_block(gen_code)) - return code_gens - - -def main(): - # Setup configuration - parser = HfArgumentParser(HumanEvalArguments) - args = parser.parse_args() - - transformers.logging.set_verbosity_error() - # enables code execution in code_eval metric - os.environ["HF_ALLOW_CODE_EVAL"] = args.HF_ALLOW_CODE_EVAL - # make sure tokenizer plays nice with multiprocessing - os.environ["TOKENIZERS_PARALLELISM"] = "false" - - if args.num_workers is None: - args.num_workers = multiprocessing.cpu_count() - - # Use dataset load to feed to accelerate - accelerator = Accelerator() - set_seed(args.seed, device_specific=True) - - # Load model and tokenizer - tokenizer = AutoTokenizer.from_pretrained(args.model_ckpt) - tokenizer.pad_token = tokenizer.eos_token - model = AutoModelForCausalLM.from_pretrained(args.model_ckpt) - - # Generation settings - gen_kwargs = { - "do_sample": args.do_sample, - "temperature": args.temperature, - "max_new_tokens": args.max_new_tokens, - "top_p": args.top_p, - "top_k": args.top_k, - "stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, EOF_STRINGS, tokenizer)]), - } - - # Load evaluation dataset and metric - human_eval = load_dataset("openai_humaneval") - code_eval_metric = load_metric("code_eval") - - n_tasks = args.num_tasks if args.num_tasks is not None else len(human_eval["test"]) - n_copies = args.n_samples // args.batch_size - - human_eval_tokenized = TokenizedDataset(tokenizer, human_eval["test"], n_copies=n_copies, n_tasks=n_tasks) - # do not confuse args.batch_size, which is actually the num_return_sequences - human_eval_loader = DataLoader(human_eval_tokenized, batch_size=1) - - # Run a quick test to see if code evaluation is enabled - try: - _ = code_eval_metric.compute(references=[""], predictions=[[""]]) - except ValueError as exception: - print( - 'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`' - " flag to enable code evaluation." - ) - raise exception - - model, human_eval_loader = accelerator.prepare(model, human_eval_loader) - - generations = complete_code( - accelerator, - model, - tokenizer, - human_eval_loader, - n_tasks=n_tasks, - batch_size=args.batch_size, - **gen_kwargs, - ) - - if accelerator.is_main_process: - references = [] - - for task in tqdm(range(n_tasks)): - test_func = human_eval["test"][task]["test"] - entry_point = f"check({human_eval['test'][task]['entry_point']})" - references.append("\n" + test_func + "\n" + entry_point) - - # Evaluate completions with "code_eval" metric - pass_at_k, _ = code_eval_metric.compute( - references=references, predictions=generations, num_workers=args.num_workers - ) - print(f"Results: {pass_at_k}") - - # Save results to json file - with open(args.output_file, "w") as fp: - json.dump(pass_at_k, fp) - - -# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing -# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script -if __name__ == "__main__": - main() diff --git a/examples/research_projects/codeparrot/scripts/initialize_model.py b/examples/research_projects/codeparrot/scripts/initialize_model.py deleted file mode 100644 index 6bf028688f1..00000000000 --- a/examples/research_projects/codeparrot/scripts/initialize_model.py +++ /dev/null @@ -1,27 +0,0 @@ -from arguments import InitializationArguments - -from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser - - -# Configuration -parser = HfArgumentParser(InitializationArguments) -args = parser.parse_args() - -# Load codeparrot tokenizer trained for Python code tokenization -tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name) - -# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks -config_kwargs = { - "vocab_size": len(tokenizer), - "scale_attn_by_inverse_layer_idx": True, - "reorder_and_upcast_attn": True, -} - -# Load model config (GPT-2 large in this case) -config = AutoConfig.from_pretrained(args.config_name, **config_kwargs) - -# Initialize new model with config -model = AutoModelForCausalLM.from_config(config) - -# Save model to the hub -model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub) diff --git a/examples/research_projects/codeparrot/scripts/minhash_deduplication.py b/examples/research_projects/codeparrot/scripts/minhash_deduplication.py deleted file mode 100644 index f1984711278..00000000000 --- a/examples/research_projects/codeparrot/scripts/minhash_deduplication.py +++ /dev/null @@ -1,268 +0,0 @@ -import json -import multiprocessing as mp -import re -from collections import defaultdict -from functools import partial -from typing import Dict, List, Optional, Set, Tuple, Type - -from datasets import Dataset -from datasketch import MinHash, MinHashLSH -from dpu_utils.utils.iterators import ThreadedIterator -from tqdm import tqdm - - -NON_ALPHA = re.compile("[^A-Za-z_0-9]") -# parameters used in DuplicationIndex -MIN_NUM_TOKENS = 10 -NUM_PERM = 256 - - -def get_min_hash(tokens: List[str]) -> Optional[MinHash]: - """Compute the MinHash of a code snippet.""" - if len(tokens) < MIN_NUM_TOKENS: - return None - min_hash = MinHash(num_perm=NUM_PERM) - for token in set(tokens): - min_hash.update(token.encode()) - return min_hash - - -def get_tokens(code: str) -> Set[str]: - """Tokenize a code snippet.""" - return {t for t in NON_ALPHA.split(code) if len(t.strip()) > 0} - - -class DuplicationIndex: - def __init__( - self, - *, - duplication_jaccard_threshold: float = 0.85, - ): - self._duplication_jaccard_threshold = duplication_jaccard_threshold - self._num_perm = NUM_PERM - self._index = MinHashLSH(threshold=self._duplication_jaccard_threshold, num_perm=self._num_perm) - - self._duplicate_clusters = defaultdict(set) - - def add(self, code_key: Tuple, min_hash: MinHash) -> None: - """Add a key to _index (MinHashLSH) - the min_hash is used to query closest matches based on the jaccard_threshold. - The new key is either added to a existing cluster of one close match, - or a new cluster is created. The clusters created in this way, depend on the order of add. - - Args: - code_key (Tuple of (index, repo_name, path)): - Theoritically any hasbale key. Here we use a tuple to retrieve the information later. - min_hash: MinHash of the code_key. - """ - close_duplicates = self._index.query(min_hash) - if code_key in self._index.keys: - print(f"Duplicate key {code_key}") - return - - self._index.insert(code_key, min_hash) - if len(close_duplicates) > 0: - for base_duplicate in close_duplicates: - if base_duplicate in self._duplicate_clusters: - self._duplicate_clusters[base_duplicate].add(code_key) - break - else: - self._duplicate_clusters[close_duplicates[0]].add(code_key) - - def get_duplicate_clusters(self) -> List[List[Dict]]: - """Export the duplicate clusters. - For each cluster, the first element is the base element of the cluster. - The base element has an estimation jaccard similarity higher than the threshold with all the other elements. - - Returns: - duplicate_clusters (List[List[Dict]]): - List of duplicate clusters. - """ - duplicate_clusters = [] - for base, duplicates in self._duplicate_clusters.items(): - cluster = [base] + list(duplicates) - # reformat the cluster to be a list of dict - cluster = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster] - duplicate_clusters.append(cluster) - return duplicate_clusters - - def save(self, filepath) -> None: - duplicate_clusters = self.get_duplicate_clusters() - with open(filepath, "w") as f: - json.dump(duplicate_clusters, f) - - -def _compute_min_hash(element): - index, data = element - min_hash = get_min_hash([t for t in NON_ALPHA.split(data["content"]) if len(t.strip()) > 0]) - if min_hash is not None: - return (index, data["repo_name"], data["path"]), min_hash - - -def minhash_iter(dataset_iterator: Type[Dataset]): - with mp.Pool() as pool: - for data in pool.imap_unordered( - _compute_min_hash, - ThreadedIterator(dataset_iterator, max_queue_size=10000), - chunksize=100, - ): - if data is not None: - yield data - - -def make_duplicate_clusters(dataset_iterator: Type[Dataset], jaccard_threshold: float): - """Find duplicate clusters in the dataset in two steps: - 1. Compute MinHash for each code snippet. MinHash is a tool for fast jaccard similarity estimation. - This step is computed using an asynchronous multiprocessing pool, minhash_iter - 2. Find duplicate clusters. The computed MinHash is added sequentially to the DuplicationIndex. - This step cannot be parallelized. So using asynchronous thread in the previous step helps to speed up the process. - """ - di = DuplicationIndex(duplication_jaccard_threshold=jaccard_threshold) - - for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(dataset_iterator)), max_queue_size=100)): - di.add(filename, min_hash) - - # Returns a List[Cluster] where Cluster is List[str] with the filenames. - return di.get_duplicate_clusters() - - -def jaccard_similarity(code1: str, code2: str) -> float: - """Compute the Jaccard similarity of two code snippets.""" - tokens1 = get_tokens(code1) - tokens2 = get_tokens(code2) - return len(tokens1 & tokens2) / len(tokens1 | tokens2) - - -_shared_dataset = None - - -def _find_cluster_extremes_shared(cluster, jaccard_threshold): - """Find a reduced cluster such that each code in the origin cluster is similar to at least one code in the reduced cluster. - Two codes are similar if their Jaccard similarity is above the threshold. - - Args: - cluster (List[dict]): - cluster is a list of dict, each dict contains the following keys: - - base_index - - repo_name - - path - This is a typical output of DuplicationIndex.get_duplicate_clusters() - jaccard_threshold (float): - threshold for Jaccard similarity. - Two codes are similar if their Jaccard similarity is above the threshold. - - Returns: - extremes (List[dict]): - A reduced representation of the cluster. The field copies is added to each dict. - The copies field indicates the number of similar codes in the cluster for a extreme. - """ - extremes = [] - for element1 in cluster: - code1 = _shared_dataset[element1["base_index"]]["content"] - for element2 in extremes: - code2 = _shared_dataset[element2["base_index"]]["content"] - if jaccard_similarity(code1, code2) >= jaccard_threshold: - element2["copies"] += 1 - break - else: - element1["copies"] = 1 - extremes.append(element1) - return extremes - - -def find_extremes(cluster_list, dataset, jaccard_threshold): - """Call the _find_cluster_extremes_shared function in a parallel fashion. - - Args: - cluster_list (List[List[Dict]]): - each cluster is a list of dicts with the key base_index, - referring to the index of the base code in the dataset. - dataset (Type[Dataset]): - dataset is used to access the content of the code snippets, - using the base_index from the cluster_list. - dataset is shared between all the processes using a glabal variable (any other way to share the dataset?), - otherwise the multi processing is not speeded up. - jaccard_threshold (float): - the threshold for the jaccard similarity. The default value is 0.85 - - Returns: - extremes_list (List[Dict]): - Each cluster is reduced to extremes. - See _find_cluster_extremes_shared for the definition of extremes. - """ - global _shared_dataset - _shared_dataset = dataset - extremes_list = [] - f = partial(_find_cluster_extremes_shared, jaccard_threshold=jaccard_threshold) - with mp.Pool() as pool: - for extremes in tqdm( - pool.imap_unordered( - f, - cluster_list, - ), - total=len(cluster_list), - ): - extremes_list.append(extremes) - return extremes_list - - -def deduplicate_dataset( - dataset: Type[Dataset], jaccard_threshold: float = 0.85 -) -> Tuple[Type[Dataset], List[List[Dict]]]: - """Deduplicate the dataset using minhash and jaccard similarity. - This function first generate duplicate clusters, then each cluster - is reduced to the extremes that are similar to the other elements in the cluster. - Codes are called similar if their Jaccard similarity is greater than jaccard_threshold (0.85 default). - - Args: - dataset (Type[Dataset]): - The dataset to deduplicate. - jaccard_threshold (float, default=0.85): - jaccard threshold to determine if two codes are similar - - Returns: - ds_dedup (Type[Dataset]): - The deduplicated dataset. - duplicate_clusters (List[List[Dict]]): - The list of duplicate clusters. - Each cluster is a list of dicts with the following keys: - - base_index : int - The index of the code in the original dataset. - - repo_name : str - - path : str - - copies : int - The number of copies of the code in the cluster. (find_cluster_extremes) - - is_extreme : bool - Whether the code is an extreme in the cluster. - All the codes in the cluster are removed from the dataset except the extremes. - - Example: - >>> from datasets import load_dataset - >>> from minhash_deduplication import deduplicate_dataset - >>> ds = load_dataset("lvwerra/codeparrot-clean", split="train") - >>> ds_dedup, duplicate_clusters = deduplicate_dataset(ds, jaccard_threshold=0.85) - """ - duplicate_clusters = make_duplicate_clusters(dataset, jaccard_threshold) - duplicate_indices = {x["base_index"] for cluster in duplicate_clusters for x in cluster} - extreme_dict = {} - extremes_clusters = find_extremes(duplicate_clusters, dataset, jaccard_threshold) - for extremes in extremes_clusters: - for element in extremes: - extreme_dict[element["base_index"]] = element - remove_indices = duplicate_indices - set(extreme_dict.keys()) - ds_filter = dataset.filter(lambda x, idx: idx not in remove_indices, with_indices=True) - - # update duplicate_clusters - for cluster in duplicate_clusters: - for element in cluster: - element["is_extreme"] = element["base_index"] in extreme_dict - if element["is_extreme"]: - element["copies"] = extreme_dict[element["base_index"]]["copies"] - - print(f"Original dataset size: {len(dataset)}") - print(f"Number of duplicate clusters: {len(duplicate_clusters)}") - print(f"Files in duplicate cluster: {len(duplicate_indices)}") - print(f"Unique files in duplicate cluster: {len(extreme_dict)}") - print(f"Filtered dataset size: {len(ds_filter)}") - - return ds_filter, duplicate_clusters diff --git a/examples/research_projects/codeparrot/scripts/preprocessing.py b/examples/research_projects/codeparrot/scripts/preprocessing.py deleted file mode 100644 index 3e932c8ef61..00000000000 --- a/examples/research_projects/codeparrot/scripts/preprocessing.py +++ /dev/null @@ -1,215 +0,0 @@ -import gzip -import json -import multiprocessing -import os -import re -import shutil -import time -from pathlib import Path - -import numpy as np -from arguments import PreprocessingArguments -from datasets import load_dataset -from huggingface_hub.utils import insecure_hashlib -from minhash_deduplication import deduplicate_dataset - -from transformers import AutoTokenizer, HfArgumentParser - - -PATTERN = re.compile(r"\s+") - - -def get_hash(example): - """Get hash of content field.""" - return {"hash": insecure_hashlib.md5(re.sub(PATTERN, "", example["content"]).encode("utf-8")).hexdigest()} - - -def line_stats(example): - """Calculates mean and max line length of file.""" - line_lengths = [len(line) for line in example["content"].splitlines()] - return {"line_mean": np.mean(line_lengths), "line_max": max(line_lengths)} - - -def alpha_stats(example): - """Calculates mean and max line length of file.""" - alpha_frac = np.mean([c.isalnum() for c in example["content"]]) - return {"alpha_frac": alpha_frac} - - -def check_uniques(example, uniques): - """Check if current hash is still in set of unique hashes and remove if true.""" - if example["hash"] in uniques: - uniques.remove(example["hash"]) - return True - else: - return False - - -def is_autogenerated(example, scan_width=5): - """Check if file is autogenerated by looking for keywords in the first few lines of the file.""" - keywords = ["auto-generated", "autogenerated", "automatically generated"] - lines = example["content"].splitlines() - for _, line in zip(range(scan_width), lines): - for keyword in keywords: - if keyword in line.lower(): - return {"autogenerated": True} - else: - return {"autogenerated": False} - - -def is_config_or_test(example, scan_width=5, coeff=0.05): - """Check if file is a configuration file or a unit test by : - 1- looking for keywords in the first few lines of the file. - 2- counting number of occurrence of the words 'config' and 'test' with respect to number of lines. - """ - - keywords = ["unit tests", "test file", "configuration file"] - lines = example["content"].splitlines() - count_config = 0 - count_test = 0 - # first test - for _, line in zip(range(scan_width), lines): - for keyword in keywords: - if keyword in line.lower(): - return {"config_or_test": True} - # second test - nlines = example["content"].count("\n") - threshold = int(coeff * nlines) - for line in lines: - count_config += line.lower().count("config") - count_test += line.lower().count("test") - if count_config > threshold or count_test > threshold: - return {"config_or_test": True} - return {"config_or_test": False} - - -def has_no_keywords(example): - """Check if a python file has none of the keywords for: function, class, for loop, while loop.""" - keywords = ["def ", "class ", "for ", "while "] - lines = example["content"].splitlines() - for line in lines: - for keyword in keywords: - if keyword in line.lower(): - return {"has_no_keywords": False} - return {"has_no_keywords": True} - - -def has_few_assignments(example, minimum=4): - """Check if file uses symbol '=' less than `minimum` times.""" - lines = example["content"].splitlines() - counter = 0 - for line in lines: - counter += line.lower().count("=") - if counter > minimum: - return {"has_few_assignments": False} - return {"has_few_assignments": True} - - -def char_token_ratio(example): - """Compute character/token ratio of the file with tokenizer.""" - input_ids = tokenizer(example["content"], truncation=False)["input_ids"] - ratio = len(example["content"]) / len(input_ids) - return {"ratio": ratio} - - -def preprocess(example): - """Chain all preprocessing steps into one function to not fill cache.""" - results = {} - results.update(get_hash(example)) - results.update(line_stats(example)) - results.update(alpha_stats(example)) - results.update(char_token_ratio(example)) - results.update(is_autogenerated(example)) - results.update(is_config_or_test(example)) - results.update(has_no_keywords(example)) - results.update(has_few_assignments(example)) - return results - - -def filter(example, uniques, args): - """Filter dataset with heuristics. Config, test and has_no_keywords files are removed with a given probability.""" - if not check_uniques(example, uniques): - return False - elif example["autogenerated"]: - return False - elif example["line_max"] > args.line_max: - return False - elif example["line_mean"] > args.line_mean: - return False - elif example["alpha_frac"] < args.alpha_frac: - return False - elif example["ratio"] < args.min_token_ratio: - return False - elif example["config_or_test"] and np.random.rand() <= args.filter_proba: - return False - elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: - return False - elif example["has_few_assignments"]: - return False - else: - return True - - -def compress_file(file_path): - """Compress a file with g-zip.""" - with open(file_path, "rb") as f_in: - with gzip.open(str(file_path) + ".gz", "wb", compresslevel=6) as f_out: - shutil.copyfileobj(f_in, f_out) - os.unlink(file_path) - - -# Settings -parser = HfArgumentParser(PreprocessingArguments) -args = parser.parse_args() -if args.num_workers is None: - args.num_workers = multiprocessing.cpu_count() -tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_dir) - -# Load dataset -t_start = time.time() -ds = load_dataset(args.dataset_name, split="train") -print(f"Time to load dataset: {time.time()-t_start:.2f}") - -# Run preprocessing -t_start = time.time() -ds = ds.map(preprocess, num_proc=args.num_workers) -print(f"Time to preprocess dataset: {time.time()-t_start:.2f}") - -# Deduplicate hashes -uniques = set(ds.unique("hash")) -frac = len(uniques) / len(ds) -print(f"Fraction of duplicates: {1-frac:.2%}") - -# Deduplicate data and apply heuristics -t_start = time.time() -ds_filter = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args}) -print(f"Time to filter dataset: {time.time()-t_start:.2f}") -print(f"Size of filtered dataset: {len(ds_filter)}") - -# Deduplicate with minhash and jaccard similarity -if args.near_deduplication: - t_start = time.time() - ds_filter, duplicate_clusters = deduplicate_dataset(ds_filter, args.jaccard_threshold) - print(f"Time to deduplicate dataset: {time.time()-t_start:.2f}") - print(f"Size of deduplicate dataset: {len(ds_filter)}") - -# Save data in batches of samples_per_file -output_dir = Path(args.output_dir) -output_dir.mkdir(exist_ok=True) - -# save duplicate_clusters in the output_dir as artifacts -# not sure it is the right place the save it -if args.near_deduplication: - with open(output_dir / "duplicate_clusters.json", "w") as f: - json.dump(duplicate_clusters, f) - -data_dir = output_dir / "data" -data_dir.mkdir(exist_ok=True) - -t_start = time.time() -for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): - file_path = str(data_dir / f"file-{file_number+1:012}.json") - end_index = min(len(ds_filter), index + args.samples_per_file) - ds_filter.select(list(range(index, end_index))).to_json(file_path) - compress_file(file_path) -print(f"Time to save dataset: {time.time()-t_start:.2f}") diff --git a/examples/research_projects/codeparrot/scripts/pretokenizing.py b/examples/research_projects/codeparrot/scripts/pretokenizing.py deleted file mode 100644 index 7cac8f51191..00000000000 --- a/examples/research_projects/codeparrot/scripts/pretokenizing.py +++ /dev/null @@ -1,49 +0,0 @@ -import multiprocessing -import time - -from arguments import PretokenizationArguments -from datasets import load_dataset - -from transformers import AutoTokenizer, HfArgumentParser - - -def tokenize(example): - output = {} - output["input_ids"] = tokenizer(example["content"], truncation=False)["input_ids"] - output["ratio_char_token"] = len(example["content"]) / len(output["input_ids"]) - return output - - -parser = HfArgumentParser(PretokenizationArguments) -args = parser.parse_args() -if args.num_workers is None: - args.num_workers = multiprocessing.cpu_count() -tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_dir) - -t_start = time.time() -ds = load_dataset(args.dataset_name, split="train") -print(f"Dataset loaded in {time.time()-t_start:.2f}s") - -t_start = time.time() -ds = ds.map( - tokenize, - num_proc=args.num_workers, - remove_columns=[ - "repo_name", - "path", - "copies", - "size", - "content", - "license", - "hash", - "line_mean", - "line_max", - "alpha_frac", - "autogenerated", - ], -) -print(f"Dataset tokenized in {time.time()-t_start:.2f}s") - -t_start = time.time() -ds.push_to_hub(args.tokenized_data_repo) -print(f"Data pushed to the hub in {time.time()-t_start:.2f}s") diff --git a/examples/research_projects/codeparrot/scripts/tests/__init__.py b/examples/research_projects/codeparrot/scripts/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/examples/research_projects/codeparrot/scripts/tests/test_deduplicate.py b/examples/research_projects/codeparrot/scripts/tests/test_deduplicate.py deleted file mode 100644 index aaf53de137f..00000000000 --- a/examples/research_projects/codeparrot/scripts/tests/test_deduplicate.py +++ /dev/null @@ -1,29 +0,0 @@ -from unittest import TestCase - -from datasets import Dataset -from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters - - -def get_dataset(): - data_dict = { - "repo_name": ["test_repo1", "test_repo2", "test_repo3"], - "path": ["test_1.py", "test_2.py", "unit_test.py"], - "content": ["a " * 20, "a " * 30, "b " * 7], - } - dataset = Dataset.from_dict(data_dict) - return dataset - - -class MakeDuplicateClustersTest(TestCase): - def test_make_duplicate_clusters(self): - ds = get_dataset() - duplicate_clusters = make_duplicate_clusters(ds, 0.85) - self.assertEqual(len(duplicate_clusters[0]), 2) - - def test_deduplicate_dataset(self): - ds = get_dataset() - ds_filter, duplicate_clusters = deduplicate_dataset(ds) - self.assertEqual(len(ds_filter), 2) - print(duplicate_clusters) - self.assertEqual(duplicate_clusters[0][0]["copies"], 2) - self.assertEqual(duplicate_clusters[0][0]["is_extreme"], True) diff --git a/examples/research_projects/codeparrot/scripts/validation_loss.py b/examples/research_projects/codeparrot/scripts/validation_loss.py deleted file mode 100644 index 929c2df427e..00000000000 --- a/examples/research_projects/codeparrot/scripts/validation_loss.py +++ /dev/null @@ -1,99 +0,0 @@ -import logging - -import torch -from accelerate import Accelerator -from arguments import EvaluationArguments -from datasets import load_dataset -from torch.utils.data import IterableDataset -from torch.utils.data.dataloader import DataLoader - -from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed - - -class ConstantLengthDataset(IterableDataset): - def __init__(self, tokenizer, dataset, seq_length=1024, num_of_sequences=1024, chars_per_token=3.6): - self.tokenizer = tokenizer - self.concat_token_id = tokenizer.bos_token_id - self.dataset = dataset - self.seq_length = seq_length - self.input_characters = seq_length * chars_per_token * num_of_sequences - - def __iter__(self): - iterator = iter(self.dataset) - more_examples = True - while more_examples: - buffer, buffer_len = [], 0 - while True: - if buffer_len >= self.input_characters: - break - try: - buffer.append(next(iterator)["content"]) - buffer_len += len(buffer[-1]) - except StopIteration: - more_examples = False - break - tokenized_inputs = tokenizer(buffer, truncation=False)["input_ids"] - all_token_ids = [] - for tokenized_input in tokenized_inputs: - all_token_ids.extend(tokenized_input + [self.concat_token_id]) - for i in range(0, len(all_token_ids), self.seq_length): - input_ids = all_token_ids[i : i + self.seq_length] - if len(input_ids) == self.seq_length: - yield torch.tensor(input_ids) - - -def create_dataloader(args): - ds_kwargs = {"streaming": True} - valid_data = load_dataset(args.dataset_name, split="train", **ds_kwargs) - valid_dataset = ConstantLengthDataset(tokenizer, valid_data, seq_length=args.seq_length) - eval_dataloader = DataLoader(valid_dataset, batch_size=args.batch_size) - return eval_dataloader - - -def evaluate(args): - model.eval() - losses = [] - for step, batch in enumerate(eval_dataloader): - with torch.no_grad(): - outputs = model(batch, labels=batch) - loss = outputs.loss.repeat(args.batch_size) - losses.append(accelerator.gather(loss)) - - if args.max_eval_steps > 0 and step >= args.max_eval_steps: - break - loss = torch.mean(torch.cat(losses)) - try: - perplexity = torch.exp(loss) - except OverflowError: - perplexity = float("inf") - return loss.item(), perplexity.item() - - -# Setup Accelerator -accelerator = Accelerator() - -# Parse configuration -parser = HfArgumentParser(EvaluationArguments) -args = parser.parse_args() -set_seed(args.seed) - -# Logging -logger = logging.getLogger(__name__) -logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO -) - -# Load model and tokenizer -model = AutoModelForCausalLM.from_pretrained(args.model_ckpt) -tokenizer = AutoTokenizer.from_pretrained(args.model_ckpt) - -# Load dataset and dataloader -eval_dataloader = create_dataloader(args) - -# Prepare everything with our `accelerator`. -model, eval_dataloader = accelerator.prepare(model, eval_dataloader) - -# Evaluate and save the last checkpoint -logger.info("Evaluating and saving model after training") -eval_loss, perplexity = evaluate(args) -logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}") diff --git a/examples/research_projects/decision_transformer/requirements.txt b/examples/research_projects/decision_transformer/requirements.txt deleted file mode 100644 index 14632a74d80..00000000000 --- a/examples/research_projects/decision_transformer/requirements.txt +++ /dev/null @@ -1,240 +0,0 @@ -absl-py==1.0.0 -aiohttp==3.10.11 -aiosignal==1.2.0 -alembic==1.7.7 -appdirs==1.4.4 -APScheduler==3.9.1 -arrow==1.2.2 -asttokens==2.0.5 -astunparse==1.6.3 -async-timeout==4.0.2 -attrs==21.4.0 -audioread==2.1.9 -autopage==0.5.0 -backcall==0.2.0 -backoff==1.11.1 -backports.zoneinfo==0.2.1 -binaryornot==0.4.4 -black==24.3.0 -boto3==1.16.34 -botocore==1.19.63 -Brotli==1.0.9 -cachetools==5.0.0 -certifi==2024.7.4 -cffi==1.15.0 -chardet==4.0.0 -charset-normalizer==2.0.12 -chex==0.1.1 -click==8.0.4 -cliff==3.10.1 -clldutils==3.11.1 -cloudpickle==2.0.0 -cmaes==0.8.2 -cmd2==2.4.0 -codecarbon==1.2.0 -colorlog==6.6.0 -cookiecutter==2.1.1 -cryptography==44.0.1 -csvw==2.0.0 -cycler==0.11.0 -Cython==0.29.28 -dash==2.15.0 -dash-bootstrap-components==1.0.3 -dash-core-components==2.0.0 -dash-html-components==2.0.0 -dash-table==5.0.0 -datasets==2.0.0 -decorator==5.1.1 -Deprecated==1.2.13 -dill==0.3.4 -dlinfo==1.2.1 -dm-tree==0.1.6 -docker==4.4.4 -execnet==1.9.0 -executing==0.8.3 -faiss-cpu==1.7.2 -fasteners==0.17.3 -filelock==3.6.0 -fire==0.4.0 -flake8==4.0.1 -Flask==2.3.2 -Flask-Compress==1.11 -flatbuffers==2.0 -flax==0.4.0 -fonttools==4.43.0 -frozenlist==1.3.0 -fsspec==2022.2.0 -fugashi==1.1.2 -gast==0.5.3 -gitdb==4.0.9 -GitPython==3.1.41 -glfw==2.5.1 -google-auth==2.6.2 -google-auth-oauthlib==0.4.6 -google-pasta==0.2.0 -greenlet==1.1.2 -grpcio==1.53.2 -gym==0.23.1 -gym-notices==0.0.6 -h5py==3.6.0 -huggingface-hub==0.4.0 -hypothesis==6.39.4 -idna==3.7 -imageio==2.16.1 -importlib-metadata==4.11.3 -importlib-resources==5.4.0 -iniconfig==1.1.1 -ipadic==1.0.0 -ipython==8.10.0 -isodate==0.6.1 -isort==5.10.1 -itsdangerous==2.1.1 -jax==0.3.4 -jaxlib==0.3.2 -jedi==0.18.1 -Jinja2==3.1.6 -jinja2-time==0.2.0 -jmespath==0.10.0 -joblib==1.2.0 -jsonschema==4.4.0 -keras==2.13.1 -Keras-Preprocessing==1.1.2 -kiwisolver==1.4.0 -kubernetes==12.0.1 -libclang==13.0.0 -librosa==0.9.1 -llvmlite==0.38.0 -Mako==1.2.2 -Markdown==3.3.6 -MarkupSafe==1.1.1 -matplotlib==3.5.1 -matplotlib-inline==0.1.3 -mccabe==0.6.1 -msgpack==1.0.3 -mujoco-py==2.1.2.14 -multidict==6.0.2 -multiprocess==0.70.12.2 -mypy-extensions==0.4.3 -nltk==3.9 -numba==0.55.1 -numpy==1.22.3 -oauthlib==3.2.2 -onnx>=1.15.0 -onnxconverter-common==1.9.0 -opt-einsum==3.3.0 -optax==0.1.1 -optuna==2.10.0 -packaging==21.3 -pandas==1.4.1 -parameterized==0.8.1 -parso==0.8.3 -pathspec==0.9.0 -pbr==5.8.1 -pexpect==4.8.0 -phonemizer==3.0.1 -pickleshare==0.7.5 -Pillow==10.3.0 -Pint==0.16.1 -plac==1.3.4 -platformdirs==2.5.1 -plotly==5.6.0 -pluggy==1.0.0 -pooch==1.6.0 -portalocker==2.0.0 -poyo==0.5.0 -prettytable==3.2.0 -prompt-toolkit==3.0.28 -protobuf==3.19.5 -psutil==5.9.0 -ptyprocess==0.7.0 -pure-eval==0.2.2 -py==1.11.0 -py-cpuinfo==8.0.0 -pyarrow==15.0.0 -pyasn1==0.4.8 -pyasn1-modules==0.2.8 -pycodestyle==2.8.0 -pycparser==2.21 -pyctcdecode==0.3.0 -pyflakes==2.4.0 -Pygments==2.15.0 -pygtrie==2.4.2 -pynvml==11.4.1 -pyOpenSSL==22.0.0 -pyparsing==3.0.7 -pyperclip==1.8.2 -pypng==0.0.21 -pyrsistent==0.18.1 -pytest==7.1.1 -pytest-forked==1.4.0 -pytest-timeout==2.1.0 -pytest-xdist==2.5.0 -python-dateutil==2.8.2 -python-slugify==6.1.1 -pytz==2022.1 -pytz-deprecation-shim==0.1.0.post0 -PyYAML==6.0 -ray>2.6.3 -redis==4.5.4 -regex==2022.3.15 -requests==2.32.0 -requests-oauthlib==1.3.1 -resampy==0.2.2 -responses==0.18.0 -rfc3986==1.5.0 -rouge-score==0.0.4 -rsa==4.8 -s3transfer==0.3.7 -sacrebleu==1.5.1 -sacremoses==0.0.49 -scikit-learn==1.5.0 -scipy==1.8.0 -segments==2.2.0 -sentencepiece==0.1.96 -sigopt==8.2.0 -six==1.16.0 -smmap==5.0.0 -sortedcontainers==2.4.0 -SoundFile==0.10.3.post1 -SQLAlchemy==1.4.32 -stack-data==0.2.0 -stevedore==3.5.0 -tabulate==0.8.9 -tenacity==8.0.1 -tensorboard==2.8.0 -tensorboard-data-server==0.6.1 -tensorboard-plugin-wit==1.8.1 -tensorboardX==2.5 -tensorflow==2.12.1 -tensorflow-io-gcs-filesystem==0.24.0 -termcolor==1.1.0 -text-unidecode==1.3 -tf-estimator-nightly==2.8.0.dev2021122109 -tf2onnx==1.9.3 -threadpoolctl==3.1.0 -timeout-decorator==0.5.0 -timm==0.5.4 -tokenizers==0.11.6 -tomli==2.0.1 -toolz==0.11.2 -torch==2.2.0 -torchaudio==0.11.0 -torchvision==0.12.0 -tqdm==4.66.3 -traitlets==5.1.1 --e git+git@github.com:edbeeching/transformers.git@77b90113ca0a0e4058b046796c874bdc98f1da61#egg=transformers -typing-extensions==4.1.1 -tzdata==2022.1 -tzlocal==4.1 -unidic==1.1.0 -unidic-lite==1.0.8 -uritemplate==4.1.1 -urllib3==1.26.19 -wasabi==0.9.0 -wcwidth==0.2.5 -websocket-client==1.3.1 -Werkzeug==3.0.6 -wrapt==1.14.0 -xxhash==3.0.0 -yarl==1.7.2 -zipp==3.19.1 \ No newline at end of file diff --git a/examples/research_projects/decision_transformer/run_decision_transformer.py b/examples/research_projects/decision_transformer/run_decision_transformer.py deleted file mode 100644 index d6c3e283312..00000000000 --- a/examples/research_projects/decision_transformer/run_decision_transformer.py +++ /dev/null @@ -1,173 +0,0 @@ -import gym -import numpy as np -import torch -from mujoco_py import GlfwContext - -from transformers import DecisionTransformerModel - - -GlfwContext(offscreen=True) # Create a window to init GLFW. - - -def get_action(model, states, actions, rewards, returns_to_go, timesteps): - # we don't care about the past rewards in this model - - states = states.reshape(1, -1, model.config.state_dim) - actions = actions.reshape(1, -1, model.config.act_dim) - returns_to_go = returns_to_go.reshape(1, -1, 1) - timesteps = timesteps.reshape(1, -1) - - if model.config.max_length is not None: - states = states[:, -model.config.max_length :] - actions = actions[:, -model.config.max_length :] - returns_to_go = returns_to_go[:, -model.config.max_length :] - timesteps = timesteps[:, -model.config.max_length :] - - # pad all tokens to sequence length - attention_mask = torch.cat( - [torch.zeros(model.config.max_length - states.shape[1]), torch.ones(states.shape[1])] - ) - attention_mask = attention_mask.to(dtype=torch.long, device=states.device).reshape(1, -1) - states = torch.cat( - [ - torch.zeros( - (states.shape[0], model.config.max_length - states.shape[1], model.config.state_dim), - device=states.device, - ), - states, - ], - dim=1, - ).to(dtype=torch.float32) - actions = torch.cat( - [ - torch.zeros( - (actions.shape[0], model.config.max_length - actions.shape[1], model.config.act_dim), - device=actions.device, - ), - actions, - ], - dim=1, - ).to(dtype=torch.float32) - returns_to_go = torch.cat( - [ - torch.zeros( - (returns_to_go.shape[0], model.config.max_length - returns_to_go.shape[1], 1), - device=returns_to_go.device, - ), - returns_to_go, - ], - dim=1, - ).to(dtype=torch.float32) - timesteps = torch.cat( - [ - torch.zeros( - (timesteps.shape[0], model.config.max_length - timesteps.shape[1]), device=timesteps.device - ), - timesteps, - ], - dim=1, - ).to(dtype=torch.long) - else: - attention_mask = None - - _, action_preds, _ = model( - states=states, - actions=actions, - rewards=rewards, - returns_to_go=returns_to_go, - timesteps=timesteps, - attention_mask=attention_mask, - return_dict=False, - ) - - return action_preds[0, -1] - - -# build the environment - -env = gym.make("Hopper-v3") -state_dim = env.observation_space.shape[0] -act_dim = env.action_space.shape[0] -max_ep_len = 1000 -device = "cuda" -scale = 1000.0 # normalization for rewards/returns -TARGET_RETURN = 3600 / scale # evaluation conditioning targets, 3600 is reasonable from the paper LINK -state_mean = np.array( - [ - 1.311279, - -0.08469521, - -0.5382719, - -0.07201576, - 0.04932366, - 2.1066856, - -0.15017354, - 0.00878345, - -0.2848186, - -0.18540096, - -0.28461286, - ] -) -state_std = np.array( - [ - 0.17790751, - 0.05444621, - 0.21297139, - 0.14530419, - 0.6124444, - 0.85174465, - 1.4515252, - 0.6751696, - 1.536239, - 1.6160746, - 5.6072536, - ] -) -state_mean = torch.from_numpy(state_mean).to(device=device) -state_std = torch.from_numpy(state_std).to(device=device) - -# Create the decision transformer model -model = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-medium") -model = model.to(device) -model.eval() - -for ep in range(10): - episode_return, episode_length = 0, 0 - state = env.reset() - target_return = torch.tensor(TARGET_RETURN, device=device, dtype=torch.float32).reshape(1, 1) - states = torch.from_numpy(state).reshape(1, state_dim).to(device=device, dtype=torch.float32) - actions = torch.zeros((0, act_dim), device=device, dtype=torch.float32) - rewards = torch.zeros(0, device=device, dtype=torch.float32) - - timesteps = torch.tensor(0, device=device, dtype=torch.long).reshape(1, 1) - for t in range(max_ep_len): - env.render() - # add padding - actions = torch.cat([actions, torch.zeros((1, act_dim), device=device)], dim=0) - rewards = torch.cat([rewards, torch.zeros(1, device=device)]) - - action = get_action( - model, - (states.to(dtype=torch.float32) - state_mean) / state_std, - actions.to(dtype=torch.float32), - rewards.to(dtype=torch.float32), - target_return.to(dtype=torch.float32), - timesteps.to(dtype=torch.long), - ) - actions[-1] = action - action = action.detach().cpu().numpy() - - state, reward, done, _ = env.step(action) - - cur_state = torch.from_numpy(state).to(device=device).reshape(1, state_dim) - states = torch.cat([states, cur_state], dim=0) - rewards[-1] = reward - - pred_return = target_return[0, -1] - (reward / scale) - target_return = torch.cat([target_return, pred_return.reshape(1, 1)], dim=1) - timesteps = torch.cat([timesteps, torch.ones((1, 1), device=device, dtype=torch.long) * (t + 1)], dim=1) - - episode_return += reward - episode_length += 1 - - if done: - break diff --git a/examples/research_projects/deebert/README.md b/examples/research_projects/deebert/README.md deleted file mode 100644 index 08a087dc03e..00000000000 --- a/examples/research_projects/deebert/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# DeeBERT: Early Exiting for *BERT - -This is the code base for the paper [DeeBERT: Dynamic Early Exiting for Accelerating BERT Inference](https://www.aclweb.org/anthology/2020.acl-main.204/), modified from its [original code base](https://github.com/castorini/deebert). - -The original code base also has information for downloading sample models that we have trained in advance. - -## Usage - -There are three scripts in the folder which can be run directly. - -In each script, there are several things to modify before running: - -* `PATH_TO_DATA`: path to the GLUE dataset. -* `--output_dir`: path for saving fine-tuned models. Default: `./saved_models`. -* `--plot_data_dir`: path for saving evaluation results. Default: `./results`. Results are printed to stdout and also saved to `npy` files in this directory to facilitate plotting figures and further analyses. -* `MODEL_TYPE`: bert or roberta -* `MODEL_SIZE`: base or large -* `DATASET`: SST-2, MRPC, RTE, QNLI, QQP, or MNLI - -#### train_deebert.sh - -This is for fine-tuning DeeBERT models. - -#### eval_deebert.sh - -This is for evaluating each exit layer for fine-tuned DeeBERT models. - -#### entropy_eval.sh - -This is for evaluating fine-tuned DeeBERT models, given a number of different early exit entropy thresholds. - - - -## Citation - -Please cite our paper if you find the resource useful: -```bibtex -@inproceedings{xin-etal-2020-deebert, - title = "{D}ee{BERT}: Dynamic Early Exiting for Accelerating {BERT} Inference", - author = "Xin, Ji and - Tang, Raphael and - Lee, Jaejun and - Yu, Yaoliang and - Lin, Jimmy", - booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", - month = jul, - year = "2020", - address = "Online", - publisher = "Association for Computational Linguistics", - url = "https://www.aclweb.org/anthology/2020.acl-main.204", - pages = "2246--2251", -} -``` - diff --git a/examples/research_projects/deebert/entropy_eval.sh b/examples/research_projects/deebert/entropy_eval.sh deleted file mode 100755 index 884c286a56a..00000000000 --- a/examples/research_projects/deebert/entropy_eval.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -export CUDA_VISIBLE_DEVICES=0 - -PATH_TO_DATA=/h/xinji/projects/GLUE - -MODEL_TYPE=bert # bert or roberta -MODEL_SIZE=base # base or large -DATASET=MRPC # SST-2, MRPC, RTE, QNLI, QQP, or MNLI - -MODEL_NAME=${MODEL_TYPE}-${MODEL_SIZE} -if [ $MODEL_TYPE = 'bert' ] -then - MODEL_NAME=${MODEL_NAME}-uncased -fi - -ENTROPIES="0 0.1 0.2 0.3 0.4 0.5 0.6 0.7" - -for ENTROPY in $ENTROPIES; do - python -u run_glue_deebert.py \ - --model_type $MODEL_TYPE \ - --model_name_or_path ./saved_models/${MODEL_TYPE}-${MODEL_SIZE}/$DATASET/two_stage \ - --task_name $DATASET \ - --do_eval \ - --do_lower_case \ - --data_dir $PATH_TO_DATA/$DATASET \ - --output_dir ./saved_models/${MODEL_TYPE}-${MODEL_SIZE}/$DATASET/two_stage \ - --plot_data_dir ./results/ \ - --max_seq_length 128 \ - --early_exit_entropy $ENTROPY \ - --eval_highway \ - --overwrite_cache \ - --per_gpu_eval_batch_size=1 -done diff --git a/examples/research_projects/deebert/eval_deebert.sh b/examples/research_projects/deebert/eval_deebert.sh deleted file mode 100755 index adf4f652a9f..00000000000 --- a/examples/research_projects/deebert/eval_deebert.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -export CUDA_VISIBLE_DEVICES=0 - -PATH_TO_DATA=/h/xinji/projects/GLUE - -MODEL_TYPE=bert # bert or roberta -MODEL_SIZE=base # base or large -DATASET=MRPC # SST-2, MRPC, RTE, QNLI, QQP, or MNLI - -MODEL_NAME=${MODEL_TYPE}-${MODEL_SIZE} -if [ $MODEL_TYPE = 'bert' ] -then - MODEL_NAME=${MODEL_NAME}-uncased -fi - - -python -u run_glue_deebert.py \ - --model_type $MODEL_TYPE \ - --model_name_or_path ./saved_models/${MODEL_TYPE}-${MODEL_SIZE}/$DATASET/two_stage \ - --task_name $DATASET \ - --do_eval \ - --do_lower_case \ - --data_dir $PATH_TO_DATA/$DATASET \ - --output_dir ./saved_models/${MODEL_TYPE}-${MODEL_SIZE}/$DATASET/two_stage \ - --plot_data_dir ./results/ \ - --max_seq_length 128 \ - --eval_each_highway \ - --eval_highway \ - --overwrite_cache \ - --per_gpu_eval_batch_size=1 diff --git a/examples/research_projects/deebert/requirements.txt b/examples/research_projects/deebert/requirements.txt deleted file mode 100644 index 99636a7fce1..00000000000 --- a/examples/research_projects/deebert/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -transformers == 4.38.0 diff --git a/examples/research_projects/deebert/run_glue_deebert.py b/examples/research_projects/deebert/run_glue_deebert.py deleted file mode 100644 index 6ca28ab5bc0..00000000000 --- a/examples/research_projects/deebert/run_glue_deebert.py +++ /dev/null @@ -1,735 +0,0 @@ -from __future__ import absolute_import, division, print_function - -import argparse -import glob -import logging -import os -import random -import time - -import numpy as np -import torch -from torch import nn -from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset -from torch.utils.data.distributed import DistributedSampler -from tqdm import tqdm, trange - -import transformers -from src.modeling_highway_bert import DeeBertForSequenceClassification -from src.modeling_highway_roberta import DeeRobertaForSequenceClassification -from transformers import ( - WEIGHTS_NAME, - AdamW, - BertConfig, - BertTokenizer, - RobertaConfig, - RobertaTokenizer, - get_linear_schedule_with_warmup, -) -from transformers import glue_compute_metrics as compute_metrics -from transformers import glue_convert_examples_to_features as convert_examples_to_features -from transformers import glue_output_modes as output_modes -from transformers import glue_processors as processors -from transformers.trainer_utils import is_main_process - - -try: - from torch.utils.tensorboard import SummaryWriter -except ImportError: - from tensorboardX import SummaryWriter - - -logger = logging.getLogger(__name__) - - -MODEL_CLASSES = { - "bert": (BertConfig, DeeBertForSequenceClassification, BertTokenizer), - "roberta": (RobertaConfig, DeeRobertaForSequenceClassification, RobertaTokenizer), -} - - -def set_seed(args): - random.seed(args.seed) - np.random.seed(args.seed) - torch.manual_seed(args.seed) - if args.n_gpu > 0: - torch.cuda.manual_seed_all(args.seed) - - -def get_wanted_result(result): - if "spearmanr" in result: - print_result = result["spearmanr"] - elif "f1" in result: - print_result = result["f1"] - elif "mcc" in result: - print_result = result["mcc"] - elif "acc" in result: - print_result = result["acc"] - else: - raise ValueError("Primary metric unclear in the results") - return print_result - - -def train(args, train_dataset, model, tokenizer, train_highway=False): - """Train the model""" - if args.local_rank in [-1, 0]: - tb_writer = SummaryWriter() - - args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) - train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) - train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) - - if args.max_steps > 0: - t_total = args.max_steps - args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 - else: - t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs - - # Prepare optimizer and schedule (linear warmup and decay) - no_decay = ["bias", "LayerNorm.weight"] - if train_highway: - optimizer_grouped_parameters = [ - { - "params": [ - p - for n, p in model.named_parameters() - if ("highway" in n) and (not any(nd in n for nd in no_decay)) - ], - "weight_decay": args.weight_decay, - }, - { - "params": [ - p for n, p in model.named_parameters() if ("highway" in n) and (any(nd in n for nd in no_decay)) - ], - "weight_decay": 0.0, - }, - ] - else: - optimizer_grouped_parameters = [ - { - "params": [ - p - for n, p in model.named_parameters() - if ("highway" not in n) and (not any(nd in n for nd in no_decay)) - ], - "weight_decay": args.weight_decay, - }, - { - "params": [ - p - for n, p in model.named_parameters() - if ("highway" not in n) and (any(nd in n for nd in no_decay)) - ], - "weight_decay": 0.0, - }, - ] - optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) - scheduler = get_linear_schedule_with_warmup( - optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total - ) - if args.fp16: - try: - from apex import amp - except ImportError: - raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") - model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) - - # multi-gpu training (should be after apex fp16 initialization) - if args.n_gpu > 1: - model = nn.DataParallel(model) - - # Distributed training (should be after apex fp16 initialization) - if args.local_rank != -1: - model = nn.parallel.DistributedDataParallel( - model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True - ) - - # Train! - logger.info("***** Running training *****") - logger.info(" Num examples = %d", len(train_dataset)) - logger.info(" Num Epochs = %d", args.num_train_epochs) - logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) - logger.info( - " Total train batch size (w. parallel, distributed & accumulation) = %d", - args.train_batch_size - * args.gradient_accumulation_steps - * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), - ) - logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) - logger.info(" Total optimization steps = %d", t_total) - - global_step = 0 - tr_loss, logging_loss = 0.0, 0.0 - model.zero_grad() - train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) - set_seed(args) # Added here for reproducibility (even between python 2 and 3) - for _ in train_iterator: - epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) - for step, batch in enumerate(epoch_iterator): - model.train() - batch = tuple(t.to(args.device) for t in batch) - inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} - if args.model_type != "distilbert": - inputs["token_type_ids"] = ( - batch[2] if args.model_type in ["bert", "xlnet"] else None - ) # XLM, DistilBERT and RoBERTa don't use segment_ids - inputs["train_highway"] = train_highway - outputs = model(**inputs) - loss = outputs[0] # model outputs are always tuple in transformers (see doc) - - if args.n_gpu > 1: - loss = loss.mean() # mean() to average on multi-gpu parallel training - if args.gradient_accumulation_steps > 1: - loss = loss / args.gradient_accumulation_steps - - if args.fp16: - with amp.scale_loss(loss, optimizer) as scaled_loss: - scaled_loss.backward() - else: - loss.backward() - - tr_loss += loss.item() - if (step + 1) % args.gradient_accumulation_steps == 0: - if args.fp16: - nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) - else: - nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) - - optimizer.step() - scheduler.step() # Update learning rate schedule - model.zero_grad() - global_step += 1 - - if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: - # Log metrics - if ( - args.local_rank == -1 and args.evaluate_during_training - ): # Only evaluate when single GPU otherwise metrics may not average well - results = evaluate(args, model, tokenizer) - for key, value in results.items(): - tb_writer.add_scalar("eval_{}".format(key), value, global_step) - tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) - tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) - logging_loss = tr_loss - - if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: - # Save model checkpoint - output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) - if not os.path.exists(output_dir): - os.makedirs(output_dir) - model_to_save = ( - model.module if hasattr(model, "module") else model - ) # Take care of distributed/parallel training - model_to_save.save_pretrained(output_dir) - torch.save(args, os.path.join(output_dir, "training_args.bin")) - logger.info("Saving model checkpoint to %s", output_dir) - - if args.max_steps > 0 and global_step > args.max_steps: - epoch_iterator.close() - break - if args.max_steps > 0 and global_step > args.max_steps: - train_iterator.close() - break - - if args.local_rank in [-1, 0]: - tb_writer.close() - - return global_step, tr_loss / global_step - - -def evaluate(args, model, tokenizer, prefix="", output_layer=-1, eval_highway=False): - # Loop to handle MNLI double evaluation (matched, mis-matched) - eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) - eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,) - - results = {} - for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): - eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True) - - if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: - os.makedirs(eval_output_dir) - - args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) - # Note that DistributedSampler samples randomly - eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) - eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) - - # multi-gpu eval - if args.n_gpu > 1: - model = nn.DataParallel(model) - - # Eval! - logger.info("***** Running evaluation {} *****".format(prefix)) - logger.info(" Num examples = %d", len(eval_dataset)) - logger.info(" Batch size = %d", args.eval_batch_size) - eval_loss = 0.0 - nb_eval_steps = 0 - preds = None - out_label_ids = None - exit_layer_counter = {(i + 1): 0 for i in range(model.num_layers)} - st = time.time() - for batch in tqdm(eval_dataloader, desc="Evaluating"): - model.eval() - batch = tuple(t.to(args.device) for t in batch) - - with torch.no_grad(): - inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} - if args.model_type != "distilbert": - inputs["token_type_ids"] = ( - batch[2] if args.model_type in ["bert", "xlnet"] else None - ) # XLM, DistilBERT and RoBERTa don't use segment_ids - if output_layer >= 0: - inputs["output_layer"] = output_layer - outputs = model(**inputs) - if eval_highway: - exit_layer_counter[outputs[-1]] += 1 - tmp_eval_loss, logits = outputs[:2] - - eval_loss += tmp_eval_loss.mean().item() - nb_eval_steps += 1 - if preds is None: - preds = logits.detach().cpu().numpy() - out_label_ids = inputs["labels"].detach().cpu().numpy() - else: - preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) - out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0) - eval_time = time.time() - st - logger.info("Eval time: {}".format(eval_time)) - - eval_loss = eval_loss / nb_eval_steps - if args.output_mode == "classification": - preds = np.argmax(preds, axis=1) - elif args.output_mode == "regression": - preds = np.squeeze(preds) - result = compute_metrics(eval_task, preds, out_label_ids) - results.update(result) - - if eval_highway: - logger.info("Exit layer counter: {}".format(exit_layer_counter)) - actual_cost = sum([l * c for l, c in exit_layer_counter.items()]) - full_cost = len(eval_dataloader) * model.num_layers - logger.info("Expected saving: {}".format(actual_cost / full_cost)) - if args.early_exit_entropy >= 0: - save_fname = ( - args.plot_data_dir - + "/" - + args.model_name_or_path[2:] - + "/entropy_{}.npy".format(args.early_exit_entropy) - ) - if not os.path.exists(os.path.dirname(save_fname)): - os.makedirs(os.path.dirname(save_fname)) - print_result = get_wanted_result(result) - np.save(save_fname, np.array([exit_layer_counter, eval_time, actual_cost / full_cost, print_result])) - logger.info("Entropy={}\tResult={:.2f}".format(args.early_exit_entropy, 100 * print_result)) - - output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") - with open(output_eval_file, "w") as writer: - logger.info("***** Eval results {} *****".format(prefix)) - for key in sorted(result.keys()): - logger.info(" %s = %s", key, str(result[key])) - writer.write("%s = %s\n" % (key, str(result[key]))) - - return results - - -def load_and_cache_examples(args, task, tokenizer, evaluate=False): - if args.local_rank not in [-1, 0] and not evaluate: - torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache - - processor = processors[task]() - output_mode = output_modes[task] - # Load data features from cache or dataset file - cached_features_file = os.path.join( - args.data_dir, - "cached_{}_{}_{}_{}".format( - "dev" if evaluate else "train", - list(filter(None, args.model_name_or_path.split("/"))).pop(), - str(args.max_seq_length), - str(task), - ), - ) - if os.path.exists(cached_features_file) and not args.overwrite_cache: - logger.info("Loading features from cached file %s", cached_features_file) - features = torch.load(cached_features_file) - else: - logger.info("Creating features from dataset file at %s", args.data_dir) - label_list = processor.get_labels() - if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta"]: - # HACK(label indices are swapped in RoBERTa pretrained model) - label_list[1], label_list[2] = label_list[2], label_list[1] - examples = ( - processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir) - ) - features = convert_examples_to_features( - examples, - tokenizer, - label_list=label_list, - max_length=args.max_seq_length, - output_mode=output_mode, - ) - if args.local_rank in [-1, 0]: - logger.info("Saving features into cached file %s", cached_features_file) - torch.save(features, cached_features_file) - - if args.local_rank == 0 and not evaluate: - torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache - - # Convert to Tensors and build dataset - all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) - all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) - - if features[0].token_type_ids is None: - # For RoBERTa (a potential bug!) - all_token_type_ids = torch.tensor([[0] * args.max_seq_length for f in features], dtype=torch.long) - else: - all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) - if output_mode == "classification": - all_labels = torch.tensor([f.label for f in features], dtype=torch.long) - elif output_mode == "regression": - all_labels = torch.tensor([f.label for f in features], dtype=torch.float) - - dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels) - return dataset - - -def main(): - parser = argparse.ArgumentParser() - - # Required parameters - parser.add_argument( - "--data_dir", - default=None, - type=str, - required=True, - help="The input data dir. Should contain the .tsv files (or other data files) for the task.", - ) - parser.add_argument( - "--model_type", - default=None, - type=str, - required=True, - help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), - ) - parser.add_argument( - "--model_name_or_path", - default=None, - type=str, - required=True, - help="Path to pre-trained model or shortcut name.", - ) - parser.add_argument( - "--task_name", - default=None, - type=str, - required=True, - help="The name of the task to train selected in the list: " + ", ".join(processors.keys()), - ) - parser.add_argument( - "--output_dir", - default=None, - type=str, - required=True, - help="The output directory where the model predictions and checkpoints will be written.", - ) - parser.add_argument( - "--plot_data_dir", - default="./plotting/", - type=str, - required=False, - help="The directory to store data for plotting figures.", - ) - - # Other parameters - parser.add_argument( - "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" - ) - parser.add_argument( - "--tokenizer_name", - default="", - type=str, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--cache_dir", - default="", - type=str, - help="Where do you want to store the pre-trained models downloaded from huggingface.co", - ) - parser.add_argument( - "--max_seq_length", - default=128, - type=int, - help=( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ), - ) - parser.add_argument("--do_train", action="store_true", help="Whether to run training.") - parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") - parser.add_argument( - "--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step." - ) - parser.add_argument( - "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model." - ) - parser.add_argument("--eval_each_highway", action="store_true", help="Set this flag to evaluate each highway.") - parser.add_argument( - "--eval_after_first_stage", - action="store_true", - help="Set this flag to evaluate after training only bert (not highway).", - ) - parser.add_argument("--eval_highway", action="store_true", help="Set this flag if it's evaluating highway models") - - parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") - parser.add_argument( - "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." - ) - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") - parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") - parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") - parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") - parser.add_argument( - "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform." - ) - parser.add_argument( - "--max_steps", - default=-1, - type=int, - help="If > 0: set total number of training steps to perform. Override num_train_epochs.", - ) - parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") - parser.add_argument("--early_exit_entropy", default=-1, type=float, help="Entropy threshold for early exit.") - - parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") - parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") - parser.add_argument( - "--eval_all_checkpoints", - action="store_true", - help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", - ) - parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") - parser.add_argument( - "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" - ) - parser.add_argument( - "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" - ) - parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") - - parser.add_argument( - "--fp16", - action="store_true", - help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", - ) - parser.add_argument( - "--fp16_opt_level", - type=str, - default="O1", - help=( - "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " - "See details at https://nvidia.github.io/apex/amp.html" - ), - ) - parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") - parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.") - parser.add_argument("--server_port", type=str, default="", help="For distant debugging.") - args = parser.parse_args() - - if ( - os.path.exists(args.output_dir) - and os.listdir(args.output_dir) - and args.do_train - and not args.overwrite_output_dir - ): - raise ValueError( - "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( - args.output_dir - ) - ) - - # Setup distant debugging if needed - if args.server_ip and args.server_port: - # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script - import ptvsd - - print("Waiting for debugger attach") - ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) - ptvsd.wait_for_attach() - - # Setup CUDA, GPU & distributed training - if args.local_rank == -1 or args.no_cuda: - device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") - args.n_gpu = torch.cuda.device_count() - else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs - torch.cuda.set_device(args.local_rank) - device = torch.device("cuda", args.local_rank) - torch.distributed.init_process_group(backend="nccl") - args.n_gpu = 1 - args.device = device - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, - ) - logger.warning( - "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", - args.local_rank, - device, - args.n_gpu, - bool(args.local_rank != -1), - args.fp16, - ) - # Set the verbosity to info of the Transformers logger (on main process only): - if is_main_process(args.local_rank): - transformers.utils.logging.set_verbosity_info() - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - # Set seed - set_seed(args) - - # Prepare GLUE task - args.task_name = args.task_name.lower() - if args.task_name not in processors: - raise ValueError("Task not found: %s" % (args.task_name)) - processor = processors[args.task_name]() - args.output_mode = output_modes[args.task_name] - label_list = processor.get_labels() - num_labels = len(label_list) - - # Load pretrained model and tokenizer - if args.local_rank not in [-1, 0]: - torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab - - args.model_type = args.model_type.lower() - config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] - config = config_class.from_pretrained( - args.config_name if args.config_name else args.model_name_or_path, - num_labels=num_labels, - finetuning_task=args.task_name, - cache_dir=args.cache_dir if args.cache_dir else None, - ) - tokenizer = tokenizer_class.from_pretrained( - args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, - do_lower_case=args.do_lower_case, - cache_dir=args.cache_dir if args.cache_dir else None, - ) - model = model_class.from_pretrained( - args.model_name_or_path, - from_tf=bool(".ckpt" in args.model_name_or_path), - config=config, - cache_dir=args.cache_dir if args.cache_dir else None, - ) - - if args.model_type == "bert": - model.bert.encoder.set_early_exit_entropy(args.early_exit_entropy) - model.bert.init_highway_pooler() - elif args.model_type == "roberta": - model.roberta.encoder.set_early_exit_entropy(args.early_exit_entropy) - model.roberta.init_highway_pooler() - else: - raise NotImplementedError() - - if args.local_rank == 0: - torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab - - model.to(args.device) - - logger.info("Training/evaluation parameters %s", args) - - # Training - if args.do_train: - train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False) - global_step, tr_loss = train(args, train_dataset, model, tokenizer) - logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) - - if args.eval_after_first_stage: - result = evaluate(args, model, tokenizer, prefix="") - print_result = get_wanted_result(result) - - train(args, train_dataset, model, tokenizer, train_highway=True) - - # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() - if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): - # Create output directory if needed - if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: - os.makedirs(args.output_dir) - - logger.info("Saving model checkpoint to %s", args.output_dir) - # Save a trained model, configuration and tokenizer using `save_pretrained()`. - # They can then be reloaded using `from_pretrained()` - model_to_save = ( - model.module if hasattr(model, "module") else model - ) # Take care of distributed/parallel training - model_to_save.save_pretrained(args.output_dir) - tokenizer.save_pretrained(args.output_dir) - - # Good practice: save your training arguments together with the trained model - torch.save(args, os.path.join(args.output_dir, "training_args.bin")) - - # Load a trained model and vocabulary that you have fine-tuned - model = model_class.from_pretrained(args.output_dir) - tokenizer = tokenizer_class.from_pretrained(args.output_dir) - model.to(args.device) - - # Evaluation - results = {} - if args.do_eval and args.local_rank in [-1, 0]: - tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) - checkpoints = [args.output_dir] - if args.eval_all_checkpoints: - checkpoints = [ - os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) - ] - - logger.info("Evaluate the following checkpoints: %s", checkpoints) - for checkpoint in checkpoints: - global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" - prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else "" - - model = model_class.from_pretrained(checkpoint) - if args.model_type == "bert": - model.bert.encoder.set_early_exit_entropy(args.early_exit_entropy) - elif args.model_type == "roberta": - model.roberta.encoder.set_early_exit_entropy(args.early_exit_entropy) - else: - raise NotImplementedError() - - model.to(args.device) - result = evaluate(args, model, tokenizer, prefix=prefix, eval_highway=args.eval_highway) - print_result = get_wanted_result(result) - logger.info("Result: {}".format(print_result)) - if args.eval_each_highway: - last_layer_results = print_result - each_layer_results = [] - for i in range(model.num_layers): - logger.info("\n") - _result = evaluate( - args, model, tokenizer, prefix=prefix, output_layer=i, eval_highway=args.eval_highway - ) - if i + 1 < model.num_layers: - each_layer_results.append(get_wanted_result(_result)) - each_layer_results.append(last_layer_results) - save_fname = args.plot_data_dir + "/" + args.model_name_or_path[2:] + "/each_layer.npy" - if not os.path.exists(os.path.dirname(save_fname)): - os.makedirs(os.path.dirname(save_fname)) - np.save(save_fname, np.array(each_layer_results)) - info_str = "Score of each layer:" - for i in range(model.num_layers): - info_str += " {:.2f}".format(100 * each_layer_results[i]) - logger.info(info_str) - result = {k + "_{}".format(global_step): v for k, v in result.items()} - results.update(result) - - return results - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/deebert/src/__init__.py b/examples/research_projects/deebert/src/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/examples/research_projects/deebert/src/modeling_highway_bert.py b/examples/research_projects/deebert/src/modeling_highway_bert.py deleted file mode 100644 index b866ef0869c..00000000000 --- a/examples/research_projects/deebert/src/modeling_highway_bert.py +++ /dev/null @@ -1,397 +0,0 @@ -import torch -from torch import nn -from torch.nn import CrossEntropyLoss, MSELoss - -from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward -from transformers.models.bert.modeling_bert import ( - BERT_INPUTS_DOCSTRING, - BERT_START_DOCSTRING, - BertEmbeddings, - BertLayer, - BertPooler, - BertPreTrainedModel, -) - - -def entropy(x): - """Calculate entropy of a pre-softmax logit Tensor""" - exp_x = torch.exp(x) - A = torch.sum(exp_x, dim=1) # sum of exp(x_i) - B = torch.sum(x * exp_x, dim=1) # sum of x_i * exp(x_i) - return torch.log(A) - B / A - - -class DeeBertEncoder(nn.Module): - def __init__(self, config): - super().__init__() - self.output_attentions = config.output_attentions - self.output_hidden_states = config.output_hidden_states - self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) - self.highway = nn.ModuleList([BertHighway(config) for _ in range(config.num_hidden_layers)]) - - self.early_exit_entropy = [-1 for _ in range(config.num_hidden_layers)] - - def set_early_exit_entropy(self, x): - if isinstance(x, (float, int)): - for i in range(len(self.early_exit_entropy)): - self.early_exit_entropy[i] = x - else: - self.early_exit_entropy = x - - def init_highway_pooler(self, pooler): - loaded_model = pooler.state_dict() - for highway in self.highway: - for name, param in highway.pooler.state_dict().items(): - param.copy_(loaded_model[name]) - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - ): - all_hidden_states = () - all_attentions = () - all_highway_exits = () - for i, layer_module in enumerate(self.layer): - if self.output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - layer_outputs = layer_module( - hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask - ) - hidden_states = layer_outputs[0] - - if self.output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - - current_outputs = (hidden_states,) - if self.output_hidden_states: - current_outputs = current_outputs + (all_hidden_states,) - if self.output_attentions: - current_outputs = current_outputs + (all_attentions,) - - highway_exit = self.highway[i](current_outputs) - # logits, pooled_output - - if not self.training: - highway_logits = highway_exit[0] - highway_entropy = entropy(highway_logits) - highway_exit = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy - all_highway_exits = all_highway_exits + (highway_exit,) - - if highway_entropy < self.early_exit_entropy[i]: - new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) - raise HighwayException(new_output, i + 1) - else: - all_highway_exits = all_highway_exits + (highway_exit,) - - # Add last layer - if self.output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - outputs = (hidden_states,) - if self.output_hidden_states: - outputs = outputs + (all_hidden_states,) - if self.output_attentions: - outputs = outputs + (all_attentions,) - - outputs = outputs + (all_highway_exits,) - return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits - - -@add_start_docstrings( - "The Bert Model transformer with early exiting (DeeBERT). ", - BERT_START_DOCSTRING, -) -class DeeBertModel(BertPreTrainedModel): - def __init__(self, config): - super().__init__(config) - self.config = config - - self.embeddings = BertEmbeddings(config) - self.encoder = DeeBertEncoder(config) - self.pooler = BertPooler(config) - - self.init_weights() - - def init_highway_pooler(self): - self.encoder.init_highway_pooler(self.pooler) - - def get_input_embeddings(self): - return self.embeddings.word_embeddings - - def set_input_embeddings(self, value): - self.embeddings.word_embeddings = value - - def _prune_heads(self, heads_to_prune): - """Prunes heads of the model. - heads_to_prune: dict of {layer_num: list of heads to prune in this layer} - See base class PreTrainedModel - """ - for layer, heads in heads_to_prune.items(): - self.encoder.layer[layer].attention.prune_heads(heads) - - @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING) - def forward( - self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - ): - r""" - Return: - :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs: - last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): - Sequence of hidden-states at the output of the last layer of the model. - pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`): - Last layer hidden-state of the first token of the sequence (classification token) - further processed by a Linear layer and a Tanh activation function. The Linear - layer weights are trained from the next sentence prediction (classification) - objective during pre-training. - - This output is usually *not* a good summary - of the semantic content of the input, you're often better with averaging or pooling - the sequence of hidden-states for the whole input sequence. - hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): - Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) - of shape :obj:`(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): - Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape - :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. - - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention - heads. - highway_exits (:obj:`tuple(tuple(torch.Tensor))`: - Tuple of each early exit's results (total length: number of layers) - Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states. - """ - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - input_shape = input_ids.size() - elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] - else: - raise ValueError("You have to specify either input_ids or inputs_embeds") - - device = input_ids.device if input_ids is not None else inputs_embeds.device - - if attention_mask is None: - attention_mask = torch.ones(input_shape, device=device) - if encoder_attention_mask is None: - encoder_attention_mask = torch.ones(input_shape, device=device) - if token_type_ids is None: - token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) - - # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] - # ourselves in which case we just need to make it broadcastable to all heads. - extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) - - # If a 2D ou 3D attention mask is provided for the cross-attention - # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] - if encoder_attention_mask.dim() == 3: - encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] - if encoder_attention_mask.dim() == 2: - encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] - - encoder_extended_attention_mask = encoder_extended_attention_mask.to( - dtype=next(self.parameters()).dtype - ) # fp16 compatibility - encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 - - # Prepare head mask if needed - # 1.0 in head_mask indicate we keep the head - # attention_probs has shape bsz x n_heads x N x N - # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] - # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] - head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) - - embedding_output = self.embeddings( - input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds - ) - encoder_outputs = self.encoder( - embedding_output, - attention_mask=extended_attention_mask, - head_mask=head_mask, - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_extended_attention_mask, - ) - sequence_output = encoder_outputs[0] - pooled_output = self.pooler(sequence_output) - - outputs = ( - sequence_output, - pooled_output, - ) + encoder_outputs[1:] # add hidden_states and attentions if they are here - return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits - - -class HighwayException(Exception): - def __init__(self, message, exit_layer): - self.message = message - self.exit_layer = exit_layer # start from 1! - - -class BertHighway(nn.Module): - """A module to provide a shortcut - from (the output of one non-final BertLayer in BertEncoder) to (cross-entropy computation in BertForSequenceClassification) - """ - - def __init__(self, config): - super().__init__() - self.pooler = BertPooler(config) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.classifier = nn.Linear(config.hidden_size, config.num_labels) - - def forward(self, encoder_outputs): - # Pooler - pooler_input = encoder_outputs[0] - pooler_output = self.pooler(pooler_input) - # "return" pooler_output - - # BertModel - bmodel_output = (pooler_input, pooler_output) + encoder_outputs[1:] - # "return" bmodel_output - - # Dropout and classification - pooled_output = bmodel_output[1] - - pooled_output = self.dropout(pooled_output) - logits = self.classifier(pooled_output) - - return logits, pooled_output - - -@add_start_docstrings( - """Bert Model (with early exiting - DeeBERT) with a classifier on top, - also takes care of multi-layer training. """, - BERT_START_DOCSTRING, -) -class DeeBertForSequenceClassification(BertPreTrainedModel): - def __init__(self, config): - super().__init__(config) - self.num_labels = config.num_labels - self.num_layers = config.num_hidden_layers - - self.bert = DeeBertModel(config) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) - - self.init_weights() - - @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING) - def forward( - self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - labels=None, - output_layer=-1, - train_highway=False, - ): - r""" - labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): - Labels for computing the sequence classification/regression loss. - Indices should be in :obj:`[0, ..., config.num_labels - 1]`. - If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), - If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). - - Returns: - :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs: - loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided): - Classification (or regression if config.num_labels==1) loss. - logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`): - Classification (or regression if config.num_labels==1) scores (before SoftMax). - hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): - Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) - of shape :obj:`(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): - Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape - :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. - - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention - heads. - highway_exits (:obj:`tuple(tuple(torch.Tensor))`: - Tuple of each early exit's results (total length: number of layers) - Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states. - """ - - exit_layer = self.num_layers - try: - outputs = self.bert( - input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - ) - # sequence_output, pooled_output, (hidden_states), (attentions), highway exits - - pooled_output = outputs[1] - - pooled_output = self.dropout(pooled_output) - logits = self.classifier(pooled_output) - outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here - except HighwayException as e: - outputs = e.message - exit_layer = e.exit_layer - logits = outputs[0] - - if not self.training: - original_entropy = entropy(logits) - highway_entropy = [] - highway_logits_all = [] - if labels is not None: - if self.num_labels == 1: - # We are doing regression - loss_fct = MSELoss() - loss = loss_fct(logits.view(-1), labels.view(-1)) - else: - loss_fct = CrossEntropyLoss() - loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) - - # work with highway exits - highway_losses = [] - for highway_exit in outputs[-1]: - highway_logits = highway_exit[0] - if not self.training: - highway_logits_all.append(highway_logits) - highway_entropy.append(highway_exit[2]) - if self.num_labels == 1: - # We are doing regression - loss_fct = MSELoss() - highway_loss = loss_fct(highway_logits.view(-1), labels.view(-1)) - else: - loss_fct = CrossEntropyLoss() - highway_loss = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1)) - highway_losses.append(highway_loss) - - if train_highway: - outputs = (sum(highway_losses[:-1]),) + outputs - # exclude the final highway, of course - else: - outputs = (loss,) + outputs - if not self.training: - outputs = outputs + ((original_entropy, highway_entropy), exit_layer) - if output_layer >= 0: - outputs = ( - (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] - ) # use the highway of the last layer - - return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits) diff --git a/examples/research_projects/deebert/src/modeling_highway_roberta.py b/examples/research_projects/deebert/src/modeling_highway_roberta.py deleted file mode 100644 index c21fb32fde7..00000000000 --- a/examples/research_projects/deebert/src/modeling_highway_roberta.py +++ /dev/null @@ -1,154 +0,0 @@ -from __future__ import absolute_import, division, print_function, unicode_literals - -from torch import nn -from torch.nn import CrossEntropyLoss, MSELoss - -from transformers import RobertaConfig -from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward -from transformers.models.roberta.modeling_roberta import ( - ROBERTA_INPUTS_DOCSTRING, - ROBERTA_START_DOCSTRING, - RobertaEmbeddings, -) - -from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy - - -@add_start_docstrings( - "The RoBERTa Model transformer with early exiting (DeeRoBERTa). ", - ROBERTA_START_DOCSTRING, -) -class DeeRobertaModel(DeeBertModel): - config_class = RobertaConfig - base_model_prefix = "roberta" - - def __init__(self, config): - super().__init__(config) - - self.embeddings = RobertaEmbeddings(config) - self.init_weights() - - -@add_start_docstrings( - """RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top, - also takes care of multi-layer training. """, - ROBERTA_START_DOCSTRING, -) -class DeeRobertaForSequenceClassification(BertPreTrainedModel): - config_class = RobertaConfig - base_model_prefix = "roberta" - - def __init__(self, config): - super().__init__(config) - self.num_labels = config.num_labels - self.num_layers = config.num_hidden_layers - - self.roberta = DeeRobertaModel(config) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) - - @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING) - def forward( - self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - labels=None, - output_layer=-1, - train_highway=False, - ): - r""" - labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): - Labels for computing the sequence classification/regression loss. - Indices should be in :obj:`[0, ..., config.num_labels - 1]`. - If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), - If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). - - Returns: - :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs: - loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided): - Classification (or regression if config.num_labels==1) loss. - logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`): - Classification (or regression if config.num_labels==1) scores (before SoftMax). - hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): - Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) - of shape :obj:`(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): - Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape - :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. - - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention - heads. - highway_exits (:obj:`tuple(tuple(torch.Tensor))`: - Tuple of each early exit's results (total length: number of layers) - Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states. - """ - - exit_layer = self.num_layers - try: - outputs = self.roberta( - input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - ) - - pooled_output = outputs[1] - - pooled_output = self.dropout(pooled_output) - logits = self.classifier(pooled_output) - outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here - except HighwayException as e: - outputs = e.message - exit_layer = e.exit_layer - logits = outputs[0] - - if not self.training: - original_entropy = entropy(logits) - highway_entropy = [] - highway_logits_all = [] - if labels is not None: - if self.num_labels == 1: - # We are doing regression - loss_fct = MSELoss() - loss = loss_fct(logits.view(-1), labels.view(-1)) - else: - loss_fct = CrossEntropyLoss() - loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) - - # work with highway exits - highway_losses = [] - for highway_exit in outputs[-1]: - highway_logits = highway_exit[0] - if not self.training: - highway_logits_all.append(highway_logits) - highway_entropy.append(highway_exit[2]) - if self.num_labels == 1: - # We are doing regression - loss_fct = MSELoss() - highway_loss = loss_fct(highway_logits.view(-1), labels.view(-1)) - else: - loss_fct = CrossEntropyLoss() - highway_loss = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1)) - highway_losses.append(highway_loss) - - if train_highway: - outputs = (sum(highway_losses[:-1]),) + outputs - # exclude the final highway, of course - else: - outputs = (loss,) + outputs - if not self.training: - outputs = outputs + ((original_entropy, highway_entropy), exit_layer) - if output_layer >= 0: - outputs = ( - (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] - ) # use the highway of the last layer - - return outputs # (loss), logits, (hidden_states), (attentions), entropy diff --git a/examples/research_projects/deebert/test_glue_deebert.py b/examples/research_projects/deebert/test_glue_deebert.py deleted file mode 100644 index 7a5f059c8ce..00000000000 --- a/examples/research_projects/deebert/test_glue_deebert.py +++ /dev/null @@ -1,104 +0,0 @@ -import argparse -import logging -import sys -from unittest.mock import patch - -import run_glue_deebert - -from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow - - -logging.basicConfig(level=logging.DEBUG) - -logger = logging.getLogger() - - -def get_setup_file(): - parser = argparse.ArgumentParser() - parser.add_argument("-f") - args = parser.parse_args() - return args.f - - -class DeeBertTests(TestCasePlus): - def setup(self) -> None: - stream_handler = logging.StreamHandler(sys.stdout) - logger.addHandler(stream_handler) - - def run_and_check(self, args): - n_gpu = get_gpu_count() - - if n_gpu > 1: - pass - # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 - # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" - # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() - # cmd = [sys.executable] + distributed_args + args - # execute_subprocess_async(cmd, env=self.get_env()) - # XXX: test the results - need to save them first into .json file - else: - args.insert(0, "run_glue_deebert.py") - with patch.object(sys, "argv", args): - result = run_glue_deebert.main() - for value in result.values(): - self.assertGreaterEqual(value, 0.666) - - @slow - @require_torch_non_multi_gpu - def test_glue_deebert_train(self): - train_args = """ - --model_type roberta - --model_name_or_path FacebookAI/roberta-base - --task_name MRPC - --do_train - --do_eval - --do_lower_case - --data_dir ./tests/fixtures/tests_samples/MRPC/ - --max_seq_length 128 - --per_gpu_eval_batch_size=1 - --per_gpu_train_batch_size=8 - --learning_rate 2e-4 - --num_train_epochs 3 - --overwrite_output_dir - --seed 42 - --output_dir ./examples/deebert/saved_models/FacebookAI/roberta-base/MRPC/two_stage - --plot_data_dir ./examples/deebert/results/ - --save_steps 0 - --overwrite_cache - --eval_after_first_stage - """.split() - self.run_and_check(train_args) - - eval_args = """ - --model_type roberta - --model_name_or_path ./examples/deebert/saved_models/FacebookAI/roberta-base/MRPC/two_stage - --task_name MRPC - --do_eval - --do_lower_case - --data_dir ./tests/fixtures/tests_samples/MRPC/ - --output_dir ./examples/deebert/saved_models/FacebookAI/roberta-base/MRPC/two_stage - --plot_data_dir ./examples/deebert/results/ - --max_seq_length 128 - --eval_each_highway - --eval_highway - --overwrite_cache - --per_gpu_eval_batch_size=1 - """.split() - self.run_and_check(eval_args) - - entropy_eval_args = """ - --model_type roberta - --model_name_or_path ./examples/deebert/saved_models/FacebookAI/roberta-base/MRPC/two_stage - --task_name MRPC - --do_eval - --do_lower_case - --data_dir ./tests/fixtures/tests_samples/MRPC/ - --output_dir ./examples/deebert/saved_models/FacebookAI/roberta-base/MRPC/two_stage - --plot_data_dir ./examples/deebert/results/ - --max_seq_length 128 - --early_exit_entropy 0.1 - --eval_highway - --overwrite_cache - --per_gpu_eval_batch_size=1 - """.split() - self.run_and_check(entropy_eval_args) diff --git a/examples/research_projects/deebert/train_deebert.sh b/examples/research_projects/deebert/train_deebert.sh deleted file mode 100755 index 32cdf5730f2..00000000000 --- a/examples/research_projects/deebert/train_deebert.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -export CUDA_VISIBLE_DEVICES=0 - -PATH_TO_DATA=/h/xinji/projects/GLUE - -MODEL_TYPE=bert # bert or roberta -MODEL_SIZE=base # base or large -DATASET=MRPC # SST-2, MRPC, RTE, QNLI, QQP, or MNLI - -MODEL_NAME=${MODEL_TYPE}-${MODEL_SIZE} -EPOCHS=10 -if [ $MODEL_TYPE = 'bert' ] -then - EPOCHS=3 - MODEL_NAME=${MODEL_NAME}-uncased -fi - - -python -u run_glue_deebert.py \ - --model_type $MODEL_TYPE \ - --model_name_or_path $MODEL_NAME \ - --task_name $DATASET \ - --do_train \ - --do_eval \ - --do_lower_case \ - --data_dir $PATH_TO_DATA/$DATASET \ - --max_seq_length 128 \ - --per_gpu_eval_batch_size=1 \ - --per_gpu_train_batch_size=8 \ - --learning_rate 2e-5 \ - --num_train_epochs $EPOCHS \ - --overwrite_output_dir \ - --seed 42 \ - --output_dir ./saved_models/${MODEL_TYPE}-${MODEL_SIZE}/$DATASET/two_stage \ - --plot_data_dir ./results/ \ - --save_steps 0 \ - --overwrite_cache \ - --eval_after_first_stage diff --git a/examples/research_projects/distillation/README.md b/examples/research_projects/distillation/README.md deleted file mode 100644 index 594e953f99d..00000000000 --- a/examples/research_projects/distillation/README.md +++ /dev/null @@ -1,193 +0,0 @@ -# Distil* - -Author: @VictorSanh - -This folder contains the original code used to train Distil* as well as examples showcasing how to use DistilBERT, DistilRoBERTa and DistilGPT2. - -**January 20, 2020 - Bug fixing** We have recently discovered and fixed [a bug](https://github.com/huggingface/transformers/commit/48cbf267c988b56c71a2380f748a3e6092ccaed3) in the evaluation of our `run_*.py` scripts that caused the reported metrics to be over-estimated on average. We have updated all the metrics with the latest runs. - -**December 6, 2019 - Update** We release **DistilmBERT**: 92% of `bert-base-multilingual-cased` on XNLI. The model supports 104 different languages listed [here](https://github.com/google-research/bert/blob/master/multilingual.md#list-of-languages). - -**November 19, 2019 - Update** We release German **DistilBERT**: 98.8% of `bert-base-german-dbmdz-cased` on NER tasks. - -**October 23, 2019 - Update** We release **DistilRoBERTa**: 95% of `RoBERTa-base`'s performance on GLUE, twice as fast as RoBERTa while being 35% smaller. - -**October 3, 2019 - Update** We release our [NeurIPS workshop paper](https://arxiv.org/abs/1910.01108) explaining our approach on **DistilBERT**. It includes updated results and further experiments. We applied the same method to GPT2 and release the weights of **DistilGPT2**. DistilGPT2 is two times faster and 33% smaller than GPT2. **The paper supersedes our [previous blogpost](https://medium.com/huggingface/distilbert-8cf3380435b5) with a different distillation loss and better performances. Please use the paper as a reference when comparing/reporting results on DistilBERT.** - -**September 19, 2019 - Update:** We fixed bugs in the code and released an updated version of the weights trained with a modification of the distillation loss. DistilBERT now reaches 99% of `BERT-base`'s performance on GLUE, and 86.9 F1 score on SQuAD v1.1 dev set (compared to 88.5 for `BERT-base`). We will publish a formal write-up of our approach in the near future! - - -## What is Distil* - -Distil* is a class of compressed models that started with DistilBERT. DistilBERT stands for Distilled-BERT. DistilBERT is a small, fast, cheap and light Transformer model based on Bert architecture. It has 40% less parameters than `bert-base-uncased`, runs 60% faster while preserving 97% of BERT's performances as measured on the GLUE language understanding benchmark. DistilBERT is trained using knowledge distillation, a technique to compress a large model called the teacher into a smaller model called the student. By distillating Bert, we obtain a smaller Transformer model that bears a lot of similarities with the original BERT model while being lighter, smaller and faster to run. DistilBERT is thus an interesting option to put large-scaled trained Transformer model into production. - -We have applied the same method to other Transformer architectures and released the weights: -- GPT2: on the [WikiText-103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/) benchmark, GPT2 reaches a perplexity on the test set of 16.3 compared to 21.1 for **DistilGPT2** (after fine-tuning on the train set). -- RoBERTa: **DistilRoBERTa** reaches 95% of `RoBERTa-base`'s performance on GLUE while being twice faster and 35% smaller. -- German BERT: **German DistilBERT** reaches 99% of `bert-base-german-dbmdz-cased`'s performance on German NER (CoNLL-2003). -- Multilingual BERT: **DistilmBERT** reaches 92% of Multilingual BERT's performance on XNLI while being twice faster and 25% smaller. The model supports 104 languages listed [here](https://github.com/google-research/bert/blob/master/multilingual.md#list-of-languages). - -For more information on DistilBERT, please refer to our [NeurIPS workshop paper](https://arxiv.org/abs/1910.01108). - -Here are the results on the dev sets of GLUE: - -| Model | Macro-score | CoLA | MNLI | MRPC | QNLI | QQP | RTE | SST-2| STS-B| WNLI | -| :---: | :---: | :---:| :---:| :---:| :---:| :---:| :---:| :---:| :---:| :---: | -| BERT-base-uncased | **79.5** | 56.3 | 84.7 | 88.6 | 91.8 | 89.6 | 69.3 | 92.7 | 89.0 | 53.5 | -| DistilBERT-base-uncased | **77.0** | 51.3 | 82.1 | 87.5 | 89.2 | 88.5 | 59.9 | 91.3 | 86.9 | 56.3 | -| BERT-base-cased | **78.2** | 58.2 | 83.9 | 87.8 | 91.0 | 89.2 | 66.1 | 91.7 | 89.2 | 46.5 | -| DistilBERT-base-cased | **75.9** | 47.2 | 81.5 | 85.6 | 88.2 | 87.8 | 60.6 | 90.4 | 85.5 | 56.3 | -| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | -| RoBERTa-base (reported) | **83.2**/**86.4**2 | 63.6 | 87.6 | 90.2 | 92.8 | 91.9 | 78.7 | 94.8 | 91.2 | 57.73 | -| DistilRoBERTa1 | **79.0**/**82.3**2 | 59.3 | 84.0 | 86.6 | 90.8 | 89.4 | 67.9 | 92.5 | 88.3 | 52.1 | - -1 We did not use the MNLI checkpoint for fine-tuning but directly perform transfer learning on the pre-trained DistilRoBERTa. - -2 Macro-score computed without WNLI. - -3 We compute this score ourselves for completeness. - -Here are the results on the *test* sets for 6 of the languages available in XNLI. The results are computed in the zero shot setting (trained on the English portion and evaluated on the target language portion): - -| Model | English | Spanish | Chinese | German | Arabic | Urdu | -| :---: | :---: | :---: | :---: | :---: | :---: | :---:| -| mBERT base cased (computed) | 82.1 | 74.6 | 69.1 | 72.3 | 66.4 | 58.5 | -| mBERT base uncased (reported)| 81.4 | 74.3 | 63.8 | 70.5 | 62.1 | 58.3 | -| DistilmBERT | 78.2 | 69.1 | 64.0 | 66.3 | 59.1 | 54.7 | - -## Setup - -This part of the library has only be tested with Python3.6+. There are few specific dependencies to install before launching a distillation, you can install them with the command `pip install -r requirements.txt`. - -**Important note:** The training scripts have been updated to support PyTorch v1.2.0 (there are breaking changes compared to v1.1.0). - - -## How to use DistilBERT - -Transformers includes five pre-trained Distil* models, currently only provided for English and German (we are investigating the possibility to train and release a multilingual version of DistilBERT): - -- `distilbert-base-uncased`: DistilBERT English language model pretrained on the same data used to pretrain Bert (concatenation of the Toronto Book Corpus and full English Wikipedia) using distillation with the supervision of the `bert-base-uncased` version of Bert. The model has 6 layers, 768 dimension and 12 heads, totalizing 66M parameters. -- `distilbert-base-uncased-distilled-squad`: A finetuned version of `distilbert-base-uncased` finetuned using (a second step of) knowledge distillation on SQuAD 1.0. This model reaches a F1 score of 86.9 on the dev set (for comparison, Bert `bert-base-uncased` version reaches a 88.5 F1 score). -- `distilbert-base-cased`: DistilBERT English language model pretrained on the same data used to pretrain Bert (concatenation of the Toronto Book Corpus and full English Wikipedia) using distillation with the supervision of the `bert-base-cased` version of Bert. The model has 6 layers, 768 dimension and 12 heads, totalizing 65M parameters. -- `distilbert-base-cased-distilled-squad`: A finetuned version of `distilbert-base-cased` finetuned using (a second step of) knowledge distillation on SQuAD 1.0. This model reaches a F1 score of 87.1 on the dev set (for comparison, Bert `bert-base-cased` version reaches a 88.7 F1 score). -- `distilbert-base-german-cased`: DistilBERT German language model pretrained on 1/2 of the data used to pretrain Bert using distillation with the supervision of the `bert-base-german-dbmdz-cased` version of German DBMDZ Bert. For NER tasks the model reaches a F1 score of 83.49 on the CoNLL-2003 test set (for comparison, `bert-base-german-dbmdz-cased` reaches a 84.52 F1 score), and a F1 score of 85.23 on the GermEval 2014 test set (`bert-base-german-dbmdz-cased` reaches a 86.89 F1 score). -- `distilgpt2`: DistilGPT2 English language model pretrained with the supervision of `gpt2` (the smallest version of GPT2) on [OpenWebTextCorpus](https://skylion007.github.io/OpenWebTextCorpus/), a reproduction of OpenAI's WebText dataset. The model has 6 layers, 768 dimension and 12 heads, totalizing 82M parameters (compared to 124M parameters for GPT2). On average, DistilGPT2 is two times faster than GPT2. -- `distilroberta-base`: DistilRoBERTa English language model pretrained with the supervision of `roberta-base` solely on [OpenWebTextCorpus](https://skylion007.github.io/OpenWebTextCorpus/), a reproduction of OpenAI's WebText dataset (it is ~4 times less training data than the teacher RoBERTa). The model has 6 layers, 768 dimension and 12 heads, totalizing 82M parameters (compared to 125M parameters for RoBERTa-base). On average DistilRoBERTa is twice as fast as Roberta-base. -- `distilbert-base-multilingual-cased`: DistilmBERT multilingual model pretrained with the supervision of `bert-base-multilingual-cased` on the concatenation of Wikipedia in 104 different languages. The model supports the 104 languages listed [here](https://github.com/google-research/bert/blob/master/multilingual.md#list-of-languages). The model has 6 layers, 768 dimension and 12 heads, totalizing 134M parameters (compared to 177M parameters for mBERT-base). On average DistilmBERT is twice as fast as mBERT-base. - -Using DistilBERT is very similar to using BERT. DistilBERT share the same tokenizer as BERT's `bert-base-uncased` even though we provide a link to this tokenizer under the `DistilBertTokenizer` name to have a consistent naming between the library models. - -```python -tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased') -model = DistilBertModel.from_pretrained('distilbert-base-cased') - -input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) -outputs = model(input_ids) -last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple -``` - -Similarly, using the other Distil* models simply consists in calling the base classes with a different pretrained checkpoint: -- DistilBERT uncased: `model = DistilBertModel.from_pretrained('distilbert-base-uncased')` -- DistilGPT2: `model = GPT2Model.from_pretrained('distilgpt2')` -- DistilRoBERTa: `model = RobertaModel.from_pretrained('distilroberta-base')` -- DistilmBERT: `model = DistilBertModel.from_pretrained('distilbert-base-multilingual-cased')` - - -## How to train Distil* - -In the following, we will explain how you can train DistilBERT. - -### A. Preparing the data - -The weights we release are trained using a concatenation of Toronto Book Corpus and English Wikipedia (same training data as the English version of BERT). - -To avoid processing the data several time, we do it once and for all before the training. From now on, will suppose that you have a text file `dump.txt` which contains one sequence per line (a sequence being composed of one of several coherent sentences). - -First, we will binarize the data, i.e. tokenize the data and convert each token in an index in our model's vocabulary. - -```bash -python scripts/binarized_data.py \ - --file_path data/dump.txt \ - --tokenizer_type bert \ - --tokenizer_name bert-base-uncased \ - --dump_file data/binarized_text -``` - -Our implementation of masked language modeling loss follows [XLM](https://github.com/facebookresearch/XLM)'s one and smooths the probability of masking with a factor that put more emphasis on rare words. Thus we count the occurrences of each tokens in the data: - -```bash -python scripts/token_counts.py \ - --data_file data/binarized_text.bert-base-uncased.pickle \ - --token_counts_dump data/token_counts.bert-base-uncased.pickle \ - --vocab_size 30522 -``` - -### B. Training - -Training with distillation is really simple once you have pre-processed the data: - -```bash -python train.py \ - --student_type distilbert \ - --student_config training_configs/distilbert-base-uncased.json \ - --teacher_type bert \ - --teacher_name bert-base-uncased \ - --alpha_ce 5.0 --alpha_mlm 2.0 --alpha_cos 1.0 --alpha_clm 0.0 --mlm \ - --freeze_pos_embs \ - --dump_path serialization_dir/my_first_training \ - --data_file data/binarized_text.bert-base-uncased.pickle \ - --token_counts data/token_counts.bert-base-uncased.pickle \ - --force # overwrites the `dump_path` if it already exists. -``` - -By default, this will launch a training on a single GPU (even if more are available on the cluster). Other parameters are available in the command line, please look in `train.py` or run `python train.py --help` to list them. - -We highly encourage you to use distributed training for training DistilBERT as the training corpus is quite large. Here's an example that runs a distributed training on a single node having 4 GPUs: - -```bash -export NODE_RANK=0 -export N_NODES=1 - -export N_GPU_NODE=4 -export WORLD_SIZE=4 -export MASTER_PORT= -export MASTER_ADDR= - -pkill -f 'python -u train.py' - -python -m torch.distributed.launch \ - --nproc_per_node=$N_GPU_NODE \ - --nnodes=$N_NODES \ - --node_rank $NODE_RANK \ - --master_addr $MASTER_ADDR \ - --master_port $MASTER_PORT \ - train.py \ - --force \ - --n_gpu $WORLD_SIZE \ - --student_type distilbert \ - --student_config training_configs/distilbert-base-uncased.json \ - --teacher_type bert \ - --teacher_name bert-base-uncased \ - --alpha_ce 0.33 --alpha_mlm 0.33 --alpha_cos 0.33 --alpha_clm 0.0 --mlm \ - --freeze_pos_embs \ - --dump_path serialization_dir/my_first_training \ - --data_file data/binarized_text.bert-base-uncased.pickle \ - --token_counts data/token_counts.bert-base-uncased.pickle -``` - -**Tips:** Starting distilled training with good initialization of the model weights is crucial to reach decent performance. In our experiments, we initialized our model from a few layers of the teacher (Bert) itself! Please refer to `scripts/extract.py` and `scripts/extract_distilbert.py` to create a valid initialization checkpoint and use `--student_pretrained_weights` argument to use this initialization for the distilled training! - -Happy distillation! - -## Citation - -If you find the resource useful, you should cite the following paper: - -```bibtex -@inproceedings{sanh2019distilbert, - title={DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter}, - author={Sanh, Victor and Debut, Lysandre and Chaumond, Julien and Wolf, Thomas}, - booktitle={NeurIPS EMC^2 Workshop}, - year={2019} -} -``` diff --git a/examples/research_projects/distillation/distiller.py b/examples/research_projects/distillation/distiller.py deleted file mode 100644 index 963af976f5a..00000000000 --- a/examples/research_projects/distillation/distiller.py +++ /dev/null @@ -1,601 +0,0 @@ -# coding=utf-8 -# Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""The distiller to distil the student. -Adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) -""" - -import math -import os -import time - -import psutil -import torch -from grouped_batch_sampler import GroupedBatchSampler, create_lengths_groups -from lm_seqs_dataset import LmSeqsDataset -from torch import nn -from torch.optim import AdamW -from torch.utils.data import BatchSampler, DataLoader, RandomSampler -from torch.utils.data.distributed import DistributedSampler -from tqdm import tqdm - -from transformers import get_linear_schedule_with_warmup -from utils import logger - - -try: - from torch.utils.tensorboard import SummaryWriter -except ImportError: - from tensorboardX import SummaryWriter - - -class Distiller: - def __init__( - self, params: dict, dataset: LmSeqsDataset, token_probs: torch.tensor, student: nn.Module, teacher: nn.Module - ): - logger.info("Initializing Distiller") - self.params = params - self.dump_path = params.dump_path - self.multi_gpu = params.multi_gpu - self.fp16 = params.fp16 - - self.student = student - self.teacher = teacher - - self.student_config = student.config - self.vocab_size = student.config.vocab_size - - if params.n_gpu <= 1: - sampler = RandomSampler(dataset) - else: - sampler = DistributedSampler(dataset) - - if params.group_by_size: - groups = create_lengths_groups(lengths=dataset.lengths, k=params.max_model_input_size) - sampler = GroupedBatchSampler(sampler=sampler, group_ids=groups, batch_size=params.batch_size) - else: - sampler = BatchSampler(sampler=sampler, batch_size=params.batch_size, drop_last=False) - - self.dataloader = DataLoader(dataset=dataset, batch_sampler=sampler, collate_fn=dataset.batch_sequences) - - self.temperature = params.temperature - assert self.temperature > 0.0 - - self.alpha_ce = params.alpha_ce - self.alpha_mlm = params.alpha_mlm - self.alpha_clm = params.alpha_clm - self.alpha_mse = params.alpha_mse - self.alpha_cos = params.alpha_cos - - self.mlm = params.mlm - if self.mlm: - logger.info("Using MLM loss for LM step.") - self.mlm_mask_prop = params.mlm_mask_prop - assert 0.0 <= self.mlm_mask_prop <= 1.0 - assert params.word_mask + params.word_keep + params.word_rand == 1.0 - self.pred_probs = torch.FloatTensor([params.word_mask, params.word_keep, params.word_rand]) - self.pred_probs = self.pred_probs.to(f"cuda:{params.local_rank}") if params.n_gpu > 0 else self.pred_probs - self.token_probs = token_probs.to(f"cuda:{params.local_rank}") if params.n_gpu > 0 else token_probs - if self.fp16: - self.pred_probs = self.pred_probs.half() - self.token_probs = self.token_probs.half() - else: - logger.info("Using CLM loss for LM step.") - - self.epoch = 0 - self.n_iter = 0 - self.n_total_iter = 0 - self.n_sequences_epoch = 0 - self.total_loss_epoch = 0 - self.last_loss = 0 - self.last_loss_ce = 0 - self.last_loss_mlm = 0 - self.last_loss_clm = 0 - if self.alpha_mse > 0.0: - self.last_loss_mse = 0 - if self.alpha_cos > 0.0: - self.last_loss_cos = 0 - self.last_log = 0 - - self.ce_loss_fct = nn.KLDivLoss(reduction="batchmean") - self.lm_loss_fct = nn.CrossEntropyLoss(ignore_index=-100) - if self.alpha_mse > 0.0: - self.mse_loss_fct = nn.MSELoss(reduction="sum") - if self.alpha_cos > 0.0: - self.cosine_loss_fct = nn.CosineEmbeddingLoss(reduction="mean") - - logger.info("--- Initializing model optimizer") - assert params.gradient_accumulation_steps >= 1 - self.num_steps_epoch = len(self.dataloader) - num_train_optimization_steps = ( - int(self.num_steps_epoch / params.gradient_accumulation_steps * params.n_epoch) + 1 - ) - - no_decay = ["bias", "LayerNorm.weight"] - optimizer_grouped_parameters = [ - { - "params": [ - p for n, p in student.named_parameters() if not any(nd in n for nd in no_decay) and p.requires_grad - ], - "weight_decay": params.weight_decay, - }, - { - "params": [ - p for n, p in student.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad - ], - "weight_decay": 0.0, - }, - ] - logger.info( - "------ Number of trainable parameters (student): %i" - % sum([p.numel() for p in self.student.parameters() if p.requires_grad]) - ) - logger.info("------ Number of parameters (student): %i" % sum([p.numel() for p in self.student.parameters()])) - self.optimizer = AdamW( - optimizer_grouped_parameters, lr=params.learning_rate, eps=params.adam_epsilon, betas=(0.9, 0.98) - ) - - warmup_steps = math.ceil(num_train_optimization_steps * params.warmup_prop) - self.scheduler = get_linear_schedule_with_warmup( - self.optimizer, num_warmup_steps=warmup_steps, num_training_steps=num_train_optimization_steps - ) - - if self.fp16: - try: - from apex import amp - except ImportError: - raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") - logger.info(f"Using fp16 training: {self.params.fp16_opt_level} level") - self.student, self.optimizer = amp.initialize( - self.student, self.optimizer, opt_level=self.params.fp16_opt_level - ) - self.teacher = self.teacher.half() - - if self.multi_gpu: - if self.fp16: - from apex.parallel import DistributedDataParallel - - logger.info("Using apex.parallel.DistributedDataParallel for distributed training.") - self.student = DistributedDataParallel(self.student) - else: - from torch.nn.parallel import DistributedDataParallel - - logger.info("Using nn.parallel.DistributedDataParallel for distributed training.") - self.student = DistributedDataParallel( - self.student, - device_ids=[params.local_rank], - output_device=params.local_rank, - find_unused_parameters=True, - ) - - self.is_master = params.is_master - if self.is_master: - logger.info("--- Initializing Tensorboard") - self.tensorboard = SummaryWriter(log_dir=os.path.join(self.dump_path, "log", "train")) - self.tensorboard.add_text(tag="config/training", text_string=str(self.params), global_step=0) - self.tensorboard.add_text(tag="config/student", text_string=str(self.student_config), global_step=0) - - def prepare_batch_mlm(self, batch): - """ - Prepare the batch: from the token_ids and the lengths, compute the attention mask and the masked label for MLM. - - Input: - ------ - batch: `Tuple` - token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded. - lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch. - - Output: - ------- - token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM. - attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention. - mlm_labels: `torch.tensor(bs, seq_length)` - The masked language modeling labels. There is a -100 where there is nothing to predict. - """ - token_ids, lengths = batch - token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths) - assert token_ids.size(0) == lengths.size(0) - - attn_mask = torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None] - - bs, max_seq_len = token_ids.size() - mlm_labels = token_ids.new(token_ids.size()).copy_(token_ids) - - x_prob = self.token_probs[token_ids.flatten()] - n_tgt = math.ceil(self.mlm_mask_prop * lengths.sum().item()) - tgt_ids = torch.multinomial(x_prob / x_prob.sum(), n_tgt, replacement=False) - pred_mask = torch.zeros( - bs * max_seq_len, dtype=torch.bool, device=token_ids.device - ) # previously `dtype=torch.uint8`, cf pytorch 1.2.0 compatibility - pred_mask[tgt_ids] = 1 - pred_mask = pred_mask.view(bs, max_seq_len) - - pred_mask[token_ids == self.params.special_tok_ids["pad_token"]] = 0 - - # mask a number of words == 0 [8] (faster with fp16) - if self.fp16: - n1 = pred_mask.sum().item() - if n1 > 8: - pred_mask = pred_mask.view(-1) - n2 = max(n1 % 8, 8 * (n1 // 8)) - if n2 != n1: - pred_mask[torch.nonzero(pred_mask).view(-1)[: n1 - n2]] = 0 - pred_mask = pred_mask.view(bs, max_seq_len) - assert pred_mask.sum().item() % 8 == 0, pred_mask.sum().item() - - _token_ids_real = token_ids[pred_mask] - _token_ids_rand = _token_ids_real.clone().random_(self.vocab_size) - _token_ids_mask = _token_ids_real.clone().fill_(self.params.special_tok_ids["mask_token"]) - probs = torch.multinomial(self.pred_probs, len(_token_ids_real), replacement=True) - _token_ids = ( - _token_ids_mask * (probs == 0).long() - + _token_ids_real * (probs == 1).long() - + _token_ids_rand * (probs == 2).long() - ) - token_ids = token_ids.masked_scatter(pred_mask, _token_ids) - - mlm_labels[~pred_mask] = -100 # previously `mlm_labels[1-pred_mask] = -1`, cf pytorch 1.2.0 compatibility - - # sanity checks - assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size - - return token_ids, attn_mask, mlm_labels - - def prepare_batch_clm(self, batch): - """ - Prepare the batch: from the token_ids and the lengths, compute the attention mask and the labels for CLM. - - Input: - ------ - batch: `Tuple` - token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded. - lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch. - - Output: - ------- - token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM. - attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention. - clm_labels: `torch.tensor(bs, seq_length)` - The causal language modeling labels. There is a -100 where there is nothing to predict. - """ - token_ids, lengths = batch - token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths) - assert token_ids.size(0) == lengths.size(0) - - attn_mask = torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None] - clm_labels = token_ids.new(token_ids.size()).copy_(token_ids) - clm_labels[~attn_mask] = -100 # previously `clm_labels[1-attn_mask] = -1`, cf pytorch 1.2.0 compatibility - - # sanity checks - assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size - - return token_ids, attn_mask, clm_labels - - def round_batch(self, x: torch.tensor, lengths: torch.tensor): - """ - For float16 only. - Sub-sample sentences in a batch, and add padding, so that each dimension is a multiple of 8. - - Input: - ------ - x: `torch.tensor(bs, seq_length)` - The token ids. - lengths: `torch.tensor(bs, seq_length)` - The lengths of each of the sequence in the batch. - - Output: - ------- - x: `torch.tensor(new_bs, new_seq_length)` - The updated token ids. - lengths: `torch.tensor(new_bs, new_seq_length)` - The updated lengths. - """ - if not self.fp16 or len(lengths) < 8: - return x, lengths - - # number of sentences == 0 [8] - bs1 = len(lengths) - bs2 = 8 * (bs1 // 8) - assert bs2 > 0 and bs2 % 8 == 0 - if bs1 != bs2: - idx = torch.randperm(bs1)[:bs2] - lengths = lengths[idx] - slen = lengths.max().item() - x = x[idx, :slen] - else: - idx = None - - # sequence length == 0 [8] - ml1 = x.size(1) - if ml1 % 8 != 0: - pad = 8 - (ml1 % 8) - ml2 = ml1 + pad - if self.mlm: - pad_id = self.params.special_tok_ids["pad_token"] - else: - pad_id = self.params.special_tok_ids["unk_token"] - padding_tensor = torch.zeros(bs2, pad, dtype=torch.long, device=x.device).fill_(pad_id) - x = torch.cat([x, padding_tensor], 1) - assert x.size() == (bs2, ml2) - - assert x.size(0) % 8 == 0 - assert x.size(1) % 8 == 0 - return x, lengths - - def train(self): - """ - The real training loop. - """ - if self.is_master: - logger.info("Starting training") - self.last_log = time.time() - self.student.train() - self.teacher.eval() - - for _ in range(self.params.n_epoch): - if self.is_master: - logger.info(f"--- Starting epoch {self.epoch}/{self.params.n_epoch-1}") - if self.multi_gpu: - torch.distributed.barrier() - - iter_bar = tqdm(self.dataloader, desc="-Iter", disable=self.params.local_rank not in [-1, 0]) - for batch in iter_bar: - if self.params.n_gpu > 0: - batch = tuple(t.to(f"cuda:{self.params.local_rank}") for t in batch) - - if self.mlm: - token_ids, attn_mask, lm_labels = self.prepare_batch_mlm(batch=batch) - else: - token_ids, attn_mask, lm_labels = self.prepare_batch_clm(batch=batch) - self.step(input_ids=token_ids, attention_mask=attn_mask, lm_labels=lm_labels) - - iter_bar.update() - iter_bar.set_postfix( - {"Last_loss": f"{self.last_loss:.2f}", "Avg_cum_loss": f"{self.total_loss_epoch/self.n_iter:.2f}"} - ) - iter_bar.close() - - if self.is_master: - logger.info(f"--- Ending epoch {self.epoch}/{self.params.n_epoch-1}") - self.end_epoch() - - if self.is_master: - logger.info("Save very last checkpoint as `pytorch_model.bin`.") - self.save_checkpoint(checkpoint_name="pytorch_model.bin") - logger.info("Training is finished") - - def step(self, input_ids: torch.tensor, attention_mask: torch.tensor, lm_labels: torch.tensor): - """ - One optimization step: forward of student AND teacher, backward on the loss (for gradient accumulation), - and possibly a parameter update (depending on the gradient accumulation). - - Input: - ------ - input_ids: `torch.tensor(bs, seq_length)` - The token ids. - attention_mask: `torch.tensor(bs, seq_length)` - The attention mask for self attention. - lm_labels: `torch.tensor(bs, seq_length)` - The language modeling labels (mlm labels for MLM and clm labels for CLM). - """ - if self.mlm: - student_outputs = self.student( - input_ids=input_ids, attention_mask=attention_mask - ) # (bs, seq_length, voc_size) - with torch.no_grad(): - teacher_outputs = self.teacher( - input_ids=input_ids, attention_mask=attention_mask - ) # (bs, seq_length, voc_size) - else: - student_outputs = self.student(input_ids=input_ids, attention_mask=None) # (bs, seq_length, voc_size) - with torch.no_grad(): - teacher_outputs = self.teacher(input_ids=input_ids, attention_mask=None) # (bs, seq_length, voc_size) - s_logits, s_hidden_states = student_outputs["logits"], student_outputs["hidden_states"] - t_logits, t_hidden_states = teacher_outputs["logits"], teacher_outputs["hidden_states"] - assert s_logits.size() == t_logits.size() - - # https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100 - # https://github.com/peterliht/knowledge-distillation-pytorch/issues/2 - if self.params.restrict_ce_to_mask: - mask = (lm_labels > -1).unsqueeze(-1).expand_as(s_logits) # (bs, seq_length, voc_size) - else: - mask = attention_mask.unsqueeze(-1).expand_as(s_logits) # (bs, seq_length, voc_size) - s_logits_slct = torch.masked_select(s_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask - s_logits_slct = s_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask - t_logits_slct = torch.masked_select(t_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask - t_logits_slct = t_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask - assert t_logits_slct.size() == s_logits_slct.size() - - loss_ce = ( - self.ce_loss_fct( - nn.functional.log_softmax(s_logits_slct / self.temperature, dim=-1), - nn.functional.softmax(t_logits_slct / self.temperature, dim=-1), - ) - * (self.temperature) ** 2 - ) - loss = self.alpha_ce * loss_ce - - if self.alpha_mlm > 0.0: - loss_mlm = self.lm_loss_fct(s_logits.view(-1, s_logits.size(-1)), lm_labels.view(-1)) - loss += self.alpha_mlm * loss_mlm - if self.alpha_clm > 0.0: - shift_logits = s_logits[..., :-1, :].contiguous() - shift_labels = lm_labels[..., 1:].contiguous() - loss_clm = self.lm_loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) - loss += self.alpha_clm * loss_clm - - if self.alpha_mse > 0.0: - loss_mse = self.mse_loss_fct(s_logits_slct, t_logits_slct) / s_logits_slct.size( - 0 - ) # Reproducing batchmean reduction - loss += self.alpha_mse * loss_mse - if self.alpha_cos > 0.0: - s_hidden_states = s_hidden_states[-1] # (bs, seq_length, dim) - t_hidden_states = t_hidden_states[-1] # (bs, seq_length, dim) - mask = attention_mask.unsqueeze(-1).expand_as(s_hidden_states) # (bs, seq_length, dim) - assert s_hidden_states.size() == t_hidden_states.size() - dim = s_hidden_states.size(-1) - - s_hidden_states_slct = torch.masked_select(s_hidden_states, mask) # (bs * seq_length * dim) - s_hidden_states_slct = s_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim) - t_hidden_states_slct = torch.masked_select(t_hidden_states, mask) # (bs * seq_length * dim) - t_hidden_states_slct = t_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim) - - target = s_hidden_states_slct.new(s_hidden_states_slct.size(0)).fill_(1) # (bs * seq_length,) - loss_cos = self.cosine_loss_fct(s_hidden_states_slct, t_hidden_states_slct, target) - loss += self.alpha_cos * loss_cos - - self.total_loss_epoch += loss.item() - self.last_loss = loss.item() - self.last_loss_ce = loss_ce.item() - if self.alpha_mlm > 0.0: - self.last_loss_mlm = loss_mlm.item() - if self.alpha_clm > 0.0: - self.last_loss_clm = loss_clm.item() - if self.alpha_mse > 0.0: - self.last_loss_mse = loss_mse.item() - if self.alpha_cos > 0.0: - self.last_loss_cos = loss_cos.item() - - self.optimize(loss) - - self.n_sequences_epoch += input_ids.size(0) - - def optimize(self, loss): - """ - Normalization on the loss (gradient accumulation or distributed training), followed by - backward pass on the loss, possibly followed by a parameter update (depending on the gradient accumulation). - Also update the metrics for tensorboard. - """ - # Check for NaN - if (loss != loss).data.any(): - logger.error("NaN detected") - exit() - - if self.multi_gpu: - loss = loss.mean() - if self.params.gradient_accumulation_steps > 1: - loss = loss / self.params.gradient_accumulation_steps - - if self.fp16: - from apex import amp - - with amp.scale_loss(loss, self.optimizer) as scaled_loss: - scaled_loss.backward() - else: - loss.backward() - - self.iter() - if self.n_iter % self.params.gradient_accumulation_steps == 0: - if self.fp16: - nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.params.max_grad_norm) - else: - nn.utils.clip_grad_norm_(self.student.parameters(), self.params.max_grad_norm) - self.optimizer.step() - self.optimizer.zero_grad() - self.scheduler.step() - - def iter(self): - """ - Update global counts, write to tensorboard and save checkpoint. - """ - self.n_iter += 1 - self.n_total_iter += 1 - - if self.n_total_iter % self.params.log_interval == 0: - self.log_tensorboard() - self.last_log = time.time() - if self.n_total_iter % self.params.checkpoint_interval == 0: - self.save_checkpoint() - - def log_tensorboard(self): - """ - Log into tensorboard. Only by the master process. - """ - if not self.is_master: - return - - for param_name, param in self.student.named_parameters(): - self.tensorboard.add_scalar( - tag="parameter_mean/" + param_name, scalar_value=param.data.mean(), global_step=self.n_total_iter - ) - self.tensorboard.add_scalar( - tag="parameter_std/" + param_name, scalar_value=param.data.std(), global_step=self.n_total_iter - ) - if param.grad is None: - continue - self.tensorboard.add_scalar( - tag="grad_mean/" + param_name, scalar_value=param.grad.data.mean(), global_step=self.n_total_iter - ) - self.tensorboard.add_scalar( - tag="grad_std/" + param_name, scalar_value=param.grad.data.std(), global_step=self.n_total_iter - ) - - self.tensorboard.add_scalar( - tag="losses/cum_avg_loss_epoch", - scalar_value=self.total_loss_epoch / self.n_iter, - global_step=self.n_total_iter, - ) - self.tensorboard.add_scalar(tag="losses/loss", scalar_value=self.last_loss, global_step=self.n_total_iter) - self.tensorboard.add_scalar( - tag="losses/loss_ce", scalar_value=self.last_loss_ce, global_step=self.n_total_iter - ) - if self.alpha_mlm > 0.0: - self.tensorboard.add_scalar( - tag="losses/loss_mlm", scalar_value=self.last_loss_mlm, global_step=self.n_total_iter - ) - if self.alpha_clm > 0.0: - self.tensorboard.add_scalar( - tag="losses/loss_clm", scalar_value=self.last_loss_clm, global_step=self.n_total_iter - ) - if self.alpha_mse > 0.0: - self.tensorboard.add_scalar( - tag="losses/loss_mse", scalar_value=self.last_loss_mse, global_step=self.n_total_iter - ) - if self.alpha_cos > 0.0: - self.tensorboard.add_scalar( - tag="losses/loss_cos", scalar_value=self.last_loss_cos, global_step=self.n_total_iter - ) - self.tensorboard.add_scalar( - tag="learning_rate/lr", scalar_value=self.scheduler.get_lr()[0], global_step=self.n_total_iter - ) - - self.tensorboard.add_scalar( - tag="global/memory_usage", - scalar_value=psutil.virtual_memory()._asdict()["used"] / 1_000_000, - global_step=self.n_total_iter, - ) - self.tensorboard.add_scalar( - tag="global/speed", scalar_value=time.time() - self.last_log, global_step=self.n_total_iter - ) - - def end_epoch(self): - """ - Finally arrived at the end of epoch (full pass on dataset). - Do some tensorboard logging and checkpoint saving. - """ - logger.info(f"{self.n_sequences_epoch} sequences have been trained during this epoch.") - - if self.is_master: - self.save_checkpoint(checkpoint_name=f"model_epoch_{self.epoch}.pth") - self.tensorboard.add_scalar( - tag="epoch/loss", scalar_value=self.total_loss_epoch / self.n_iter, global_step=self.epoch - ) - - self.epoch += 1 - self.n_sequences_epoch = 0 - self.n_iter = 0 - self.total_loss_epoch = 0 - - def save_checkpoint(self, checkpoint_name: str = "checkpoint.pth"): - """ - Save the current state. Only by the master process. - """ - if not self.is_master: - return - mdl_to_save = self.student.module if hasattr(self.student, "module") else self.student - mdl_to_save.config.save_pretrained(self.dump_path) - state_dict = mdl_to_save.state_dict() - torch.save(state_dict, os.path.join(self.dump_path, checkpoint_name)) diff --git a/examples/research_projects/distillation/grouped_batch_sampler.py b/examples/research_projects/distillation/grouped_batch_sampler.py deleted file mode 100644 index e25def738a8..00000000000 --- a/examples/research_projects/distillation/grouped_batch_sampler.py +++ /dev/null @@ -1,108 +0,0 @@ -# coding=utf-8 -# Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Adapted from PyTorch Vision (https://github.com/pytorch/vision/blob/master/references/detection/group_by_aspect_ratio.py)""" - -import bisect -import copy -from collections import defaultdict - -import numpy as np -from torch.utils.data import BatchSampler, Sampler - -from utils import logger - - -def _quantize(x, bins): - bins = copy.deepcopy(bins) - bins = sorted(bins) - quantized = [bisect.bisect_right(bins, y) for y in x] - return quantized - - -def create_lengths_groups(lengths, k=0): - bins = np.arange(start=3, stop=k, step=4).tolist() if k > 0 else [10] - groups = _quantize(lengths, bins) - # count number of elements per group - counts = np.unique(groups, return_counts=True)[1] - fbins = [0] + bins + [np.inf] - logger.info("Using {} as bins for aspect lengths quantization".format(fbins)) - logger.info("Count of instances per bin: {}".format(counts)) - return groups - - -class GroupedBatchSampler(BatchSampler): - """ - Wraps another sampler to yield a mini-batch of indices. - It enforces that the batch only contain elements from the same group. - It also tries to provide mini-batches which follows an ordering which is - as close as possible to the ordering from the original sampler. - Arguments: - sampler (Sampler): Base sampler. - group_ids (list[int]): If the sampler produces indices in range [0, N), - `group_ids` must be a list of `N` ints which contains the group id of each sample. - The group ids must be a continuous set of integers starting from - 0, i.e. they must be in the range [0, num_groups). - batch_size (int): Size of mini-batch. - """ - - def __init__(self, sampler, group_ids, batch_size): - if not isinstance(sampler, Sampler): - raise TypeError( - "sampler should be an instance of torch.utils.data.Sampler, but got sampler={}".format(sampler) - ) - self.sampler = sampler - self.group_ids = group_ids - self.batch_size = batch_size - - def __iter__(self): - buffer_per_group = defaultdict(list) - samples_per_group = defaultdict(list) - - num_batches = 0 - for idx in self.sampler: - group_id = self.group_ids[idx] - buffer_per_group[group_id].append(idx) - samples_per_group[group_id].append(idx) - if len(buffer_per_group[group_id]) == self.batch_size: - yield buffer_per_group[group_id] # TODO - num_batches += 1 - del buffer_per_group[group_id] - assert len(buffer_per_group[group_id]) < self.batch_size - - # now we have run out of elements that satisfy - # the group criteria, let's return the remaining - # elements so that the size of the sampler is - # deterministic - expected_num_batches = len(self) - num_remaining = expected_num_batches - num_batches - if num_remaining > 0: - # for the remaining batches, group the batches by similar lengths - batch_idx = [] - for group_id, idxs in sorted(buffer_per_group.items(), key=lambda x: x[0]): - batch_idx.extend(idxs) - if len(batch_idx) >= self.batch_size: - yield batch_idx[: self.batch_size] - batch_idx = batch_idx[self.batch_size :] - num_remaining -= 1 - if len(batch_idx) > 0: - yield batch_idx - num_remaining -= 1 - assert num_remaining == 0 - - def __len__(self): - """ - Return the number of mini-batches rather than the number of samples. - """ - return (len(self.sampler) + self.batch_size - 1) // self.batch_size diff --git a/examples/research_projects/distillation/lm_seqs_dataset.py b/examples/research_projects/distillation/lm_seqs_dataset.py deleted file mode 100644 index 647c8f464f7..00000000000 --- a/examples/research_projects/distillation/lm_seqs_dataset.py +++ /dev/null @@ -1,167 +0,0 @@ -# coding=utf-8 -# Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Dataset to distilled models -adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) -""" - -import numpy as np -import torch -from torch.utils.data import Dataset - -from utils import logger - - -class LmSeqsDataset(Dataset): - """Custom Dataset wrapping language modeling sequences. - - Each sample will be retrieved by indexing the list of token_ids and their corresponding lengths. - - Input: - ------ - params: `NameSpace` parameters - data: `List[np.array[int]] - """ - - def __init__(self, params, data): - self.params = params - - self.token_ids = np.array(data) - self.lengths = np.array([len(t) for t in data]) - - self.check() - self.remove_long_sequences() - self.remove_empty_sequences() - self.remove_unknown_sequences() - self.check() - self.print_statistics() - - def __getitem__(self, index): - return (self.token_ids[index], self.lengths[index]) - - def __len__(self): - return len(self.lengths) - - def check(self): - """ - Some sanity checks - """ - assert len(self.token_ids) == len(self.lengths) - assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths))) - - def remove_long_sequences(self): - """ - Sequences that are too long are split by chunk of max_model_input_size. - """ - max_len = self.params.max_model_input_size - indices = self.lengths > max_len - logger.info(f"Splitting {sum(indices)} too long sequences.") - - def divide_chunks(l, n): - return [l[i : i + n] for i in range(0, len(l), n)] - - new_tok_ids = [] - new_lengths = [] - if self.params.mlm: - cls_id, sep_id = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"] - else: - cls_id, sep_id = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"] - - for seq_, len_ in zip(self.token_ids, self.lengths): - assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ - if len_ <= max_len: - new_tok_ids.append(seq_) - new_lengths.append(len_) - else: - sub_seqs = [] - for sub_s in divide_chunks(seq_, max_len - 2): - if sub_s[0] != cls_id: - sub_s = np.insert(sub_s, 0, cls_id) - if sub_s[-1] != sep_id: - sub_s = np.insert(sub_s, len(sub_s), sep_id) - assert len(sub_s) <= max_len - assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s - sub_seqs.append(sub_s) - - new_tok_ids.extend(sub_seqs) - new_lengths.extend([len(l) for l in sub_seqs]) - - self.token_ids = np.array(new_tok_ids) - self.lengths = np.array(new_lengths) - - def remove_empty_sequences(self): - """ - Too short sequences are simply removed. This could be tuned. - """ - init_size = len(self) - indices = self.lengths > 11 - self.token_ids = self.token_ids[indices] - self.lengths = self.lengths[indices] - new_size = len(self) - logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences.") - - def remove_unknown_sequences(self): - """ - Remove sequences with a (too) high level of unknown tokens. - """ - if "unk_token" not in self.params.special_tok_ids: - return - else: - unk_token_id = self.params.special_tok_ids["unk_token"] - init_size = len(self) - unk_occs = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids]) - indices = (unk_occs / self.lengths) < 0.5 - self.token_ids = self.token_ids[indices] - self.lengths = self.lengths[indices] - new_size = len(self) - logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).") - - def print_statistics(self): - """ - Print some statistics on the corpus. Only the master process. - """ - if not self.params.is_master: - return - logger.info(f"{len(self)} sequences") - # data_len = sum(self.lengths) - # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) - # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') - - # unk_idx = self.params.special_tok_ids['unk_token'] - # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) - # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') - - def batch_sequences(self, batch): - """ - Do the padding and transform into torch.tensor. - """ - token_ids = [t[0] for t in batch] - lengths = [t[1] for t in batch] - assert len(token_ids) == len(lengths) - - # Max for paddings - max_seq_len_ = max(lengths) - - # Pad token ids - if self.params.mlm: - pad_idx = self.params.special_tok_ids["pad_token"] - else: - pad_idx = self.params.special_tok_ids["unk_token"] - tk_ = [list(t.astype(int)) + [pad_idx] * (max_seq_len_ - len(t)) for t in token_ids] - assert len(tk_) == len(token_ids) - assert all(len(t) == max_seq_len_ for t in tk_) - - tk_t = torch.tensor(tk_) # (bs, max_seq_len_) - lg_t = torch.tensor(lengths) # (bs) - return tk_t, lg_t diff --git a/examples/research_projects/distillation/requirements.txt b/examples/research_projects/distillation/requirements.txt deleted file mode 100644 index 4a2ed783a7c..00000000000 --- a/examples/research_projects/distillation/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -transformers - -gitpython==3.1.41 -tensorboard>=1.14.0 -tensorboardX==1.8 -psutil==5.6.6 -scipy>=1.4.1 diff --git a/examples/research_projects/distillation/run_squad_w_distillation.py b/examples/research_projects/distillation/run_squad_w_distillation.py deleted file mode 100644 index a1150f6b437..00000000000 --- a/examples/research_projects/distillation/run_squad_w_distillation.py +++ /dev/null @@ -1,877 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. -# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""This is the exact same script as `examples/question-answering/run_squad.py` (as of 2020, January 8th) with an additional and optional step of distillation.""" - -import argparse -import glob -import logging -import os -import random -import timeit - -import numpy as np -import torch -from torch import nn -from torch.utils.data import DataLoader, RandomSampler, SequentialSampler -from torch.utils.data.distributed import DistributedSampler -from tqdm import tqdm, trange - -import transformers -from transformers import ( - WEIGHTS_NAME, - AdamW, - BertConfig, - BertForQuestionAnswering, - BertTokenizer, - DistilBertConfig, - DistilBertForQuestionAnswering, - DistilBertTokenizer, - RobertaConfig, - RobertaForQuestionAnswering, - RobertaTokenizer, - XLMConfig, - XLMForQuestionAnswering, - XLMTokenizer, - XLNetConfig, - XLNetForQuestionAnswering, - XLNetTokenizer, - get_linear_schedule_with_warmup, - squad_convert_examples_to_features, -) -from transformers.data.metrics.squad_metrics import ( - compute_predictions_log_probs, - compute_predictions_logits, - squad_evaluate, -) -from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor -from transformers.trainer_utils import is_main_process - - -try: - from torch.utils.tensorboard import SummaryWriter -except ImportError: - from tensorboardX import SummaryWriter - - -logger = logging.getLogger(__name__) - - -MODEL_CLASSES = { - "bert": (BertConfig, BertForQuestionAnswering, BertTokenizer), - "xlnet": (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer), - "xlm": (XLMConfig, XLMForQuestionAnswering, XLMTokenizer), - "distilbert": (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer), - "roberta": (RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer), -} - - -def set_seed(args): - random.seed(args.seed) - np.random.seed(args.seed) - torch.manual_seed(args.seed) - if args.n_gpu > 0: - torch.cuda.manual_seed_all(args.seed) - - -def to_list(tensor): - return tensor.detach().cpu().tolist() - - -def train(args, train_dataset, model, tokenizer, teacher=None): - """Train the model""" - if args.local_rank in [-1, 0]: - tb_writer = SummaryWriter() - - args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) - train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) - train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) - - if args.max_steps > 0: - t_total = args.max_steps - args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 - else: - t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs - - # Prepare optimizer and schedule (linear warmup and decay) - no_decay = ["bias", "LayerNorm.weight"] - optimizer_grouped_parameters = [ - { - "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], - "weight_decay": args.weight_decay, - }, - {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, - ] - optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) - scheduler = get_linear_schedule_with_warmup( - optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total - ) - - # Check if saved optimizer or scheduler states exist - if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile( - os.path.join(args.model_name_or_path, "scheduler.pt") - ): - # Load in optimizer and scheduler states - optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) - scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) - - if args.fp16: - try: - from apex import amp - except ImportError: - raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") - - model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) - - # multi-gpu training (should be after apex fp16 initialization) - if args.n_gpu > 1: - model = nn.DataParallel(model) - - # Distributed training (should be after apex fp16 initialization) - if args.local_rank != -1: - model = nn.parallel.DistributedDataParallel( - model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True - ) - - # Train! - logger.info("***** Running training *****") - logger.info(" Num examples = %d", len(train_dataset)) - logger.info(" Num Epochs = %d", args.num_train_epochs) - logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) - logger.info( - " Total train batch size (w. parallel, distributed & accumulation) = %d", - args.train_batch_size - * args.gradient_accumulation_steps - * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), - ) - logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) - logger.info(" Total optimization steps = %d", t_total) - - global_step = 1 - epochs_trained = 0 - steps_trained_in_current_epoch = 0 - # Check if continuing training from a checkpoint - if os.path.exists(args.model_name_or_path): - try: - # set global_step to global_step of last saved checkpoint from model path - checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0] - global_step = int(checkpoint_suffix) - epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) - steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) - - logger.info(" Continuing training from checkpoint, will skip to saved global_step") - logger.info(" Continuing training from epoch %d", epochs_trained) - logger.info(" Continuing training from global step %d", global_step) - logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) - except ValueError: - logger.info(" Starting fine-tuning.") - - tr_loss, logging_loss = 0.0, 0.0 - model.zero_grad() - train_iterator = trange( - epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0] - ) - # Added here for reproducibility - set_seed(args) - - for _ in train_iterator: - epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) - for step, batch in enumerate(epoch_iterator): - # Skip past any already trained steps if resuming training - if steps_trained_in_current_epoch > 0: - steps_trained_in_current_epoch -= 1 - continue - - model.train() - if teacher is not None: - teacher.eval() - batch = tuple(t.to(args.device) for t in batch) - - inputs = { - "input_ids": batch[0], - "attention_mask": batch[1], - "start_positions": batch[3], - "end_positions": batch[4], - } - if args.model_type != "distilbert": - inputs["token_type_ids"] = None if args.model_type == "xlm" else batch[2] - if args.model_type in ["xlnet", "xlm"]: - inputs.update({"cls_index": batch[5], "p_mask": batch[6]}) - if args.version_2_with_negative: - inputs.update({"is_impossible": batch[7]}) - outputs = model(**inputs) - loss, start_logits_stu, end_logits_stu = outputs - - # Distillation loss - if teacher is not None: - if "token_type_ids" not in inputs: - inputs["token_type_ids"] = None if args.teacher_type == "xlm" else batch[2] - with torch.no_grad(): - start_logits_tea, end_logits_tea = teacher( - input_ids=inputs["input_ids"], - token_type_ids=inputs["token_type_ids"], - attention_mask=inputs["attention_mask"], - ) - assert start_logits_tea.size() == start_logits_stu.size() - assert end_logits_tea.size() == end_logits_stu.size() - - loss_fct = nn.KLDivLoss(reduction="batchmean") - loss_start = loss_fct( - nn.functional.log_softmax(start_logits_stu / args.temperature, dim=-1), - nn.functional.softmax(start_logits_tea / args.temperature, dim=-1), - ) * (args.temperature**2) - loss_end = loss_fct( - nn.functional.log_softmax(end_logits_stu / args.temperature, dim=-1), - nn.functional.softmax(end_logits_tea / args.temperature, dim=-1), - ) * (args.temperature**2) - loss_ce = (loss_start + loss_end) / 2.0 - - loss = args.alpha_ce * loss_ce + args.alpha_squad * loss - - if args.n_gpu > 1: - loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training - if args.gradient_accumulation_steps > 1: - loss = loss / args.gradient_accumulation_steps - - if args.fp16: - with amp.scale_loss(loss, optimizer) as scaled_loss: - scaled_loss.backward() - else: - loss.backward() - - tr_loss += loss.item() - if (step + 1) % args.gradient_accumulation_steps == 0: - if args.fp16: - nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) - else: - nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) - - optimizer.step() - scheduler.step() # Update learning rate schedule - model.zero_grad() - global_step += 1 - - # Log metrics - if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: - # Only evaluate when single GPU otherwise metrics may not average well - if args.local_rank == -1 and args.evaluate_during_training: - results = evaluate(args, model, tokenizer) - for key, value in results.items(): - tb_writer.add_scalar("eval_{}".format(key), value, global_step) - tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) - tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) - logging_loss = tr_loss - - if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: - # Save model checkpoint - output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) - if not os.path.exists(output_dir): - os.makedirs(output_dir) - model_to_save = ( - model.module if hasattr(model, "module") else model - ) # Take care of distributed/parallel training - model_to_save.save_pretrained(output_dir) - tokenizer.save_pretrained(output_dir) - - torch.save(args, os.path.join(output_dir, "training_args.bin")) - logger.info("Saving model checkpoint to %s", output_dir) - - torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) - torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) - logger.info("Saving optimizer and scheduler states to %s", output_dir) - - if args.max_steps > 0 and global_step > args.max_steps: - epoch_iterator.close() - break - if args.max_steps > 0 and global_step > args.max_steps: - train_iterator.close() - break - - if args.local_rank in [-1, 0]: - tb_writer.close() - - return global_step, tr_loss / global_step - - -def evaluate(args, model, tokenizer, prefix=""): - dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) - - if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: - os.makedirs(args.output_dir) - - args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) - - # Note that DistributedSampler samples randomly - eval_sampler = SequentialSampler(dataset) - eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) - - # multi-gpu evaluate - if args.n_gpu > 1 and not isinstance(model, nn.DataParallel): - model = nn.DataParallel(model) - - # Eval! - logger.info("***** Running evaluation {} *****".format(prefix)) - logger.info(" Num examples = %d", len(dataset)) - logger.info(" Batch size = %d", args.eval_batch_size) - - all_results = [] - start_time = timeit.default_timer() - - for batch in tqdm(eval_dataloader, desc="Evaluating"): - model.eval() - batch = tuple(t.to(args.device) for t in batch) - - with torch.no_grad(): - inputs = {"input_ids": batch[0], "attention_mask": batch[1]} - if args.model_type != "distilbert": - inputs["token_type_ids"] = None if args.model_type == "xlm" else batch[2] # XLM don't use segment_ids - example_indices = batch[3] - if args.model_type in ["xlnet", "xlm"]: - inputs.update({"cls_index": batch[4], "p_mask": batch[5]}) - - outputs = model(**inputs) - - for i, example_index in enumerate(example_indices): - eval_feature = features[example_index.item()] - unique_id = int(eval_feature.unique_id) - - output = [to_list(output[i]) for output in outputs] - - # Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler" - # models only use two. - if len(output) >= 5: - start_logits = output[0] - start_top_index = output[1] - end_logits = output[2] - end_top_index = output[3] - cls_logits = output[4] - - result = SquadResult( - unique_id, - start_logits, - end_logits, - start_top_index=start_top_index, - end_top_index=end_top_index, - cls_logits=cls_logits, - ) - - else: - start_logits, end_logits = output - result = SquadResult(unique_id, start_logits, end_logits) - - all_results.append(result) - - evalTime = timeit.default_timer() - start_time - logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset)) - - # Compute predictions - output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) - output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) - - if args.version_2_with_negative: - output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) - else: - output_null_log_odds_file = None - - if args.model_type in ["xlnet", "xlm"]: - # XLNet uses a more complex post-processing procedure - predictions = compute_predictions_log_probs( - examples, - features, - all_results, - args.n_best_size, - args.max_answer_length, - output_prediction_file, - output_nbest_file, - output_null_log_odds_file, - model.config.start_n_top, - model.config.end_n_top, - args.version_2_with_negative, - tokenizer, - args.verbose_logging, - ) - else: - predictions = compute_predictions_logits( - examples, - features, - all_results, - args.n_best_size, - args.max_answer_length, - args.do_lower_case, - output_prediction_file, - output_nbest_file, - output_null_log_odds_file, - args.verbose_logging, - args.version_2_with_negative, - args.null_score_diff_threshold, - tokenizer, - ) - - # Compute the F1 and exact scores. - results = squad_evaluate(examples, predictions) - return results - - -def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): - if args.local_rank not in [-1, 0] and not evaluate: - # Make sure only the first process in distributed training process the dataset, and the others will use the cache - torch.distributed.barrier() - - # Load data features from cache or dataset file - input_file = args.predict_file if evaluate else args.train_file - cached_features_file = os.path.join( - os.path.dirname(input_file), - "cached_distillation_{}_{}_{}".format( - "dev" if evaluate else "train", - list(filter(None, args.model_name_or_path.split("/"))).pop(), - str(args.max_seq_length), - ), - ) - if os.path.exists(cached_features_file) and not args.overwrite_cache: - logger.info("Loading features from cached file %s", cached_features_file) - features_and_dataset = torch.load(cached_features_file) - - try: - features, dataset, examples = ( - features_and_dataset["features"], - features_and_dataset["dataset"], - features_and_dataset["examples"], - ) - except KeyError: - raise DeprecationWarning( - "You seem to be loading features from an older version of this script please delete the " - "file %s in order for it to be created again" % cached_features_file - ) - else: - logger.info("Creating features from dataset file at %s", input_file) - processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor() - if evaluate: - examples = processor.get_dev_examples(args.data_dir, filename=args.predict_file) - else: - examples = processor.get_train_examples(args.data_dir, filename=args.train_file) - - features, dataset = squad_convert_examples_to_features( - examples=examples, - tokenizer=tokenizer, - max_seq_length=args.max_seq_length, - doc_stride=args.doc_stride, - max_query_length=args.max_query_length, - is_training=not evaluate, - return_dataset="pt", - threads=args.threads, - ) - - if args.local_rank in [-1, 0]: - logger.info("Saving features into cached file %s", cached_features_file) - torch.save({"features": features, "dataset": dataset, "examples": examples}, cached_features_file) - - if args.local_rank == 0 and not evaluate: - # Make sure only the first process in distributed training process the dataset, and the others will use the cache - torch.distributed.barrier() - - if output_examples: - return dataset, examples, features - return dataset - - -def main(): - parser = argparse.ArgumentParser() - - # Required parameters - parser.add_argument( - "--model_type", - default=None, - type=str, - required=True, - help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), - ) - parser.add_argument( - "--model_name_or_path", - default=None, - type=str, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models", - ) - parser.add_argument( - "--output_dir", - default=None, - type=str, - required=True, - help="The output directory where the model checkpoints and predictions will be written.", - ) - - # Distillation parameters (optional) - parser.add_argument( - "--teacher_type", - default=None, - type=str, - help=( - "Teacher type. Teacher tokenizer and student (model) tokenizer must output the same tokenization. Only for" - " distillation." - ), - ) - parser.add_argument( - "--teacher_name_or_path", - default=None, - type=str, - help="Path to the already SQuAD fine-tuned teacher model. Only for distillation.", - ) - parser.add_argument( - "--alpha_ce", default=0.5, type=float, help="Distillation loss linear weight. Only for distillation." - ) - parser.add_argument( - "--alpha_squad", default=0.5, type=float, help="True SQuAD loss linear weight. Only for distillation." - ) - parser.add_argument( - "--temperature", default=2.0, type=float, help="Distillation temperature. Only for distillation." - ) - - # Other parameters - parser.add_argument( - "--data_dir", - default=None, - type=str, - help="The input data dir. Should contain the .json files for the task." - + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", - ) - parser.add_argument( - "--train_file", - default=None, - type=str, - help="The input training file. If a data dir is specified, will look for the file there" - + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", - ) - parser.add_argument( - "--predict_file", - default=None, - type=str, - help="The input evaluation file. If a data dir is specified, will look for the file there" - + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", - ) - parser.add_argument( - "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" - ) - parser.add_argument( - "--tokenizer_name", - default="", - type=str, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--cache_dir", - default="", - type=str, - help="Where do you want to store the pre-trained models downloaded from huggingface.co", - ) - - parser.add_argument( - "--version_2_with_negative", - action="store_true", - help="If true, the SQuAD examples contain some that do not have an answer.", - ) - parser.add_argument( - "--null_score_diff_threshold", - type=float, - default=0.0, - help="If null_score - best_non_null is greater than the threshold predict null.", - ) - - parser.add_argument( - "--max_seq_length", - default=384, - type=int, - help=( - "The maximum total input sequence length after WordPiece tokenization. Sequences " - "longer than this will be truncated, and sequences shorter than this will be padded." - ), - ) - parser.add_argument( - "--doc_stride", - default=128, - type=int, - help="When splitting up a long document into chunks, how much stride to take between chunks.", - ) - parser.add_argument( - "--max_query_length", - default=64, - type=int, - help=( - "The maximum number of tokens for the question. Questions longer than this will " - "be truncated to this length." - ), - ) - parser.add_argument("--do_train", action="store_true", help="Whether to run training.") - parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") - parser.add_argument( - "--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step." - ) - parser.add_argument( - "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model." - ) - - parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") - parser.add_argument( - "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." - ) - parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") - parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") - parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") - parser.add_argument( - "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform." - ) - parser.add_argument( - "--max_steps", - default=-1, - type=int, - help="If > 0: set total number of training steps to perform. Override num_train_epochs.", - ) - parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") - parser.add_argument( - "--n_best_size", - default=20, - type=int, - help="The total number of n-best predictions to generate in the nbest_predictions.json output file.", - ) - parser.add_argument( - "--max_answer_length", - default=30, - type=int, - help=( - "The maximum length of an answer that can be generated. This is needed because the start " - "and end predictions are not conditioned on one another." - ), - ) - parser.add_argument( - "--verbose_logging", - action="store_true", - help=( - "If true, all of the warnings related to data processing will be printed. " - "A number of warnings are expected for a normal SQuAD evaluation." - ), - ) - - parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") - parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") - parser.add_argument( - "--eval_all_checkpoints", - action="store_true", - help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", - ) - parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") - parser.add_argument( - "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" - ) - parser.add_argument( - "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" - ) - parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") - - parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") - parser.add_argument( - "--fp16", - action="store_true", - help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", - ) - parser.add_argument( - "--fp16_opt_level", - type=str, - default="O1", - help=( - "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " - "See details at https://nvidia.github.io/apex/amp.html" - ), - ) - parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") - parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") - - parser.add_argument("--threads", type=int, default=1, help="multiple threads for converting example to features") - args = parser.parse_args() - - if ( - os.path.exists(args.output_dir) - and os.listdir(args.output_dir) - and args.do_train - and not args.overwrite_output_dir - ): - raise ValueError( - "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( - args.output_dir - ) - ) - - # Setup distant debugging if needed - if args.server_ip and args.server_port: - # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script - import ptvsd - - print("Waiting for debugger attach") - ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) - ptvsd.wait_for_attach() - - # Setup CUDA, GPU & distributed training - if args.local_rank == -1 or args.no_cuda: - device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") - args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() - else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs - torch.cuda.set_device(args.local_rank) - device = torch.device("cuda", args.local_rank) - torch.distributed.init_process_group(backend="nccl") - args.n_gpu = 1 - args.device = device - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, - ) - logger.warning( - "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", - args.local_rank, - device, - args.n_gpu, - bool(args.local_rank != -1), - args.fp16, - ) - # Set the verbosity to info of the Transformers logger (on main process only): - if is_main_process(args.local_rank): - transformers.utils.logging.set_verbosity_info() - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - # Set seed - set_seed(args) - - # Load pretrained model and tokenizer - if args.local_rank not in [-1, 0]: - # Make sure only the first process in distributed training will download model & vocab - torch.distributed.barrier() - - args.model_type = args.model_type.lower() - config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] - config = config_class.from_pretrained( - args.config_name if args.config_name else args.model_name_or_path, - cache_dir=args.cache_dir if args.cache_dir else None, - ) - tokenizer = tokenizer_class.from_pretrained( - args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, - do_lower_case=args.do_lower_case, - cache_dir=args.cache_dir if args.cache_dir else None, - ) - model = model_class.from_pretrained( - args.model_name_or_path, - from_tf=bool(".ckpt" in args.model_name_or_path), - config=config, - cache_dir=args.cache_dir if args.cache_dir else None, - ) - - if args.teacher_type is not None: - assert args.teacher_name_or_path is not None - assert args.alpha_ce > 0.0 - assert args.alpha_ce + args.alpha_squad > 0.0 - assert args.teacher_type != "distilbert", "We constraint teachers not to be of type DistilBERT." - teacher_config_class, teacher_model_class, _ = MODEL_CLASSES[args.teacher_type] - teacher_config = teacher_config_class.from_pretrained( - args.teacher_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None - ) - teacher = teacher_model_class.from_pretrained( - args.teacher_name_or_path, config=teacher_config, cache_dir=args.cache_dir if args.cache_dir else None - ) - teacher.to(args.device) - else: - teacher = None - - if args.local_rank == 0: - # Make sure only the first process in distributed training will download model & vocab - torch.distributed.barrier() - - model.to(args.device) - - logger.info("Training/evaluation parameters %s", args) - - # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. - # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will - # remove the need for this code, but it is still valid. - if args.fp16: - try: - import apex - - apex.amp.register_half_function(torch, "einsum") - except ImportError: - raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") - - # Training - if args.do_train: - train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) - global_step, tr_loss = train(args, train_dataset, model, tokenizer, teacher=teacher) - logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) - - # Save the trained model and the tokenizer - if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): - logger.info("Saving model checkpoint to %s", args.output_dir) - # Save a trained model, configuration and tokenizer using `save_pretrained()`. - # They can then be reloaded using `from_pretrained()` - model_to_save = ( - model.module if hasattr(model, "module") else model - ) # Take care of distributed/parallel training - model_to_save.save_pretrained(args.output_dir) - tokenizer.save_pretrained(args.output_dir) - - # Good practice: save your training arguments together with the trained model - torch.save(args, os.path.join(args.output_dir, "training_args.bin")) - - # Load a trained model and vocabulary that you have fine-tuned - model = model_class.from_pretrained(args.output_dir) - tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) - model.to(args.device) - - # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory - results = {} - if args.do_eval and args.local_rank in [-1, 0]: - if args.do_train: - logger.info("Loading checkpoints saved during training for evaluation") - checkpoints = [args.output_dir] - if args.eval_all_checkpoints: - checkpoints = [ - os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) - ] - - logger.info("Evaluate the following checkpoints: %s", checkpoints) - - for checkpoint in checkpoints: - # Reload the model - global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" - model = model_class.from_pretrained(checkpoint) - model.to(args.device) - - # Evaluate - result = evaluate(args, model, tokenizer, prefix=global_step) - - result = {k + ("_{}".format(global_step) if global_step else ""): v for k, v in result.items()} - results.update(result) - - logger.info("Results: {}".format(results)) - - return results - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/distillation/scripts/binarized_data.py b/examples/research_projects/distillation/scripts/binarized_data.py deleted file mode 100644 index 3fc3214acf7..00000000000 --- a/examples/research_projects/distillation/scripts/binarized_data.py +++ /dev/null @@ -1,97 +0,0 @@ -# coding=utf-8 -# Copyright 2019-present, the HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Preprocessing script before distillation. -""" - -import argparse -import logging -import pickle -import random -import time - -import numpy as np - -from transformers import BertTokenizer, GPT2Tokenizer, RobertaTokenizer - - -logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO -) -logger = logging.getLogger(__name__) - - -def main(): - parser = argparse.ArgumentParser( - description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." - ) - parser.add_argument("--file_path", type=str, default="data/dump.txt", help="The path to the data.") - parser.add_argument("--tokenizer_type", type=str, default="bert", choices=["bert", "roberta", "gpt2"]) - parser.add_argument("--tokenizer_name", type=str, default="bert-base-uncased", help="The tokenizer to use.") - parser.add_argument("--dump_file", type=str, default="data/dump", help="The dump file prefix.") - args = parser.parse_args() - - logger.info(f"Loading Tokenizer ({args.tokenizer_name})") - if args.tokenizer_type == "bert": - tokenizer = BertTokenizer.from_pretrained(args.tokenizer_name) - bos = tokenizer.special_tokens_map["cls_token"] # `[CLS]` - sep = tokenizer.special_tokens_map["sep_token"] # `[SEP]` - elif args.tokenizer_type == "roberta": - tokenizer = RobertaTokenizer.from_pretrained(args.tokenizer_name) - bos = tokenizer.special_tokens_map["cls_token"] # `` - sep = tokenizer.special_tokens_map["sep_token"] # `` - elif args.tokenizer_type == "gpt2": - tokenizer = GPT2Tokenizer.from_pretrained(args.tokenizer_name) - bos = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>` - sep = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>` - - logger.info(f"Loading text from {args.file_path}") - with open(args.file_path, "r", encoding="utf8") as fp: - data = fp.readlines() - - logger.info("Start encoding") - logger.info(f"{len(data)} examples to process.") - - rslt = [] - iter = 0 - interval = 10000 - start = time.time() - for text in data: - text = f"{bos} {text.strip()} {sep}" - token_ids = tokenizer.encode(text, add_special_tokens=False) - rslt.append(token_ids) - - iter += 1 - if iter % interval == 0: - end = time.time() - logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl") - start = time.time() - logger.info("Finished binarization") - logger.info(f"{len(data)} examples processed.") - - dp_file = f"{args.dump_file}.{args.tokenizer_name}.pickle" - vocab_size = tokenizer.vocab_size - if vocab_size < (1 << 16): - rslt_ = [np.uint16(d) for d in rslt] - else: - rslt_ = [np.int32(d) for d in rslt] - random.shuffle(rslt_) - logger.info(f"Dump to {dp_file}") - with open(dp_file, "wb") as handle: - pickle.dump(rslt_, handle, protocol=pickle.HIGHEST_PROTOCOL) - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/distillation/scripts/extract.py b/examples/research_projects/distillation/scripts/extract.py deleted file mode 100644 index c45821d1873..00000000000 --- a/examples/research_projects/distillation/scripts/extract.py +++ /dev/null @@ -1,106 +0,0 @@ -# coding=utf-8 -# Copyright 2019-present, the HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Preprocessing script before training the distilled model. -Specific to RoBERTa -> DistilRoBERTa and GPT2 -> DistilGPT2. -""" - -import argparse - -import torch - -from transformers import GPT2LMHeadModel, RobertaForMaskedLM - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description=( - "Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned" - " Distillation" - ) - ) - parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"]) - parser.add_argument("--model_name", default="roberta-large", type=str) - parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str) - parser.add_argument("--vocab_transform", action="store_true") - args = parser.parse_args() - - if args.model_type == "roberta": - model = RobertaForMaskedLM.from_pretrained(args.model_name) - prefix = "roberta" - elif args.model_type == "gpt2": - model = GPT2LMHeadModel.from_pretrained(args.model_name) - prefix = "transformer" - - state_dict = model.state_dict() - compressed_sd = {} - - # Embeddings # - if args.model_type == "gpt2": - for param_name in ["wte.weight", "wpe.weight"]: - compressed_sd[f"{prefix}.{param_name}"] = state_dict[f"{prefix}.{param_name}"] - else: - for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: - param_name = f"{prefix}.embeddings.{w}.weight" - compressed_sd[param_name] = state_dict[param_name] - for w in ["weight", "bias"]: - param_name = f"{prefix}.embeddings.LayerNorm.{w}" - compressed_sd[param_name] = state_dict[param_name] - - # Transformer Blocks # - std_idx = 0 - for teacher_idx in [0, 2, 4, 7, 9, 11]: - if args.model_type == "gpt2": - for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: - for w in ["weight", "bias"]: - compressed_sd[f"{prefix}.h.{std_idx}.{layer}.{w}"] = state_dict[ - f"{prefix}.h.{teacher_idx}.{layer}.{w}" - ] - compressed_sd[f"{prefix}.h.{std_idx}.attn.bias"] = state_dict[f"{prefix}.h.{teacher_idx}.attn.bias"] - else: - for layer in [ - "attention.self.query", - "attention.self.key", - "attention.self.value", - "attention.output.dense", - "attention.output.LayerNorm", - "intermediate.dense", - "output.dense", - "output.LayerNorm", - ]: - for w in ["weight", "bias"]: - compressed_sd[f"{prefix}.encoder.layer.{std_idx}.{layer}.{w}"] = state_dict[ - f"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}" - ] - std_idx += 1 - - # Language Modeling Head ###s - if args.model_type == "roberta": - for layer in ["lm_head.decoder.weight", "lm_head.bias"]: - compressed_sd[f"{layer}"] = state_dict[f"{layer}"] - if args.vocab_transform: - for w in ["weight", "bias"]: - compressed_sd[f"lm_head.dense.{w}"] = state_dict[f"lm_head.dense.{w}"] - compressed_sd[f"lm_head.layer_norm.{w}"] = state_dict[f"lm_head.layer_norm.{w}"] - elif args.model_type == "gpt2": - for w in ["weight", "bias"]: - compressed_sd[f"{prefix}.ln_f.{w}"] = state_dict[f"{prefix}.ln_f.{w}"] - compressed_sd["lm_head.weight"] = state_dict["lm_head.weight"] - - print(f"N layers selected for distillation: {std_idx}") - print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}") - - print(f"Save transferred checkpoint to {args.dump_checkpoint}.") - torch.save(compressed_sd, args.dump_checkpoint) diff --git a/examples/research_projects/distillation/scripts/extract_distilbert.py b/examples/research_projects/distillation/scripts/extract_distilbert.py deleted file mode 100644 index 8637970c511..00000000000 --- a/examples/research_projects/distillation/scripts/extract_distilbert.py +++ /dev/null @@ -1,96 +0,0 @@ -# coding=utf-8 -# Copyright 2019-present, the HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Preprocessing script before training DistilBERT. -Specific to BERT -> DistilBERT. -""" - -import argparse - -import torch - -from transformers import BertForMaskedLM - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description=( - "Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned" - " Distillation" - ) - ) - parser.add_argument("--model_type", default="bert", choices=["bert"]) - parser.add_argument("--model_name", default="bert-base-uncased", type=str) - parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str) - parser.add_argument("--vocab_transform", action="store_true") - args = parser.parse_args() - - if args.model_type == "bert": - model = BertForMaskedLM.from_pretrained(args.model_name) - prefix = "bert" - else: - raise ValueError('args.model_type should be "bert".') - - state_dict = model.state_dict() - compressed_sd = {} - - for w in ["word_embeddings", "position_embeddings"]: - compressed_sd[f"distilbert.embeddings.{w}.weight"] = state_dict[f"{prefix}.embeddings.{w}.weight"] - for w in ["weight", "bias"]: - compressed_sd[f"distilbert.embeddings.LayerNorm.{w}"] = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"] - - std_idx = 0 - for teacher_idx in [0, 2, 4, 7, 9, 11]: - for w in ["weight", "bias"]: - compressed_sd[f"distilbert.transformer.layer.{std_idx}.attention.q_lin.{w}"] = state_dict[ - f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" - ] - compressed_sd[f"distilbert.transformer.layer.{std_idx}.attention.k_lin.{w}"] = state_dict[ - f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" - ] - compressed_sd[f"distilbert.transformer.layer.{std_idx}.attention.v_lin.{w}"] = state_dict[ - f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" - ] - - compressed_sd[f"distilbert.transformer.layer.{std_idx}.attention.out_lin.{w}"] = state_dict[ - f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" - ] - compressed_sd[f"distilbert.transformer.layer.{std_idx}.sa_layer_norm.{w}"] = state_dict[ - f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" - ] - - compressed_sd[f"distilbert.transformer.layer.{std_idx}.ffn.lin1.{w}"] = state_dict[ - f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" - ] - compressed_sd[f"distilbert.transformer.layer.{std_idx}.ffn.lin2.{w}"] = state_dict[ - f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" - ] - compressed_sd[f"distilbert.transformer.layer.{std_idx}.output_layer_norm.{w}"] = state_dict[ - f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" - ] - std_idx += 1 - - compressed_sd["vocab_projector.weight"] = state_dict["cls.predictions.decoder.weight"] - compressed_sd["vocab_projector.bias"] = state_dict["cls.predictions.bias"] - if args.vocab_transform: - for w in ["weight", "bias"]: - compressed_sd[f"vocab_transform.{w}"] = state_dict[f"cls.predictions.transform.dense.{w}"] - compressed_sd[f"vocab_layer_norm.{w}"] = state_dict[f"cls.predictions.transform.LayerNorm.{w}"] - - print(f"N layers selected for distillation: {std_idx}") - print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}") - - print(f"Save transferred checkpoint to {args.dump_checkpoint}.") - torch.save(compressed_sd, args.dump_checkpoint) diff --git a/examples/research_projects/distillation/scripts/token_counts.py b/examples/research_projects/distillation/scripts/token_counts.py deleted file mode 100644 index 2f80bf31f47..00000000000 --- a/examples/research_projects/distillation/scripts/token_counts.py +++ /dev/null @@ -1,57 +0,0 @@ -# coding=utf-8 -# Copyright 2019-present, the HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Preprocessing script before training the distilled model. -""" - -import argparse -import logging -import pickle -from collections import Counter - - -logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO -) -logger = logging.getLogger(__name__) - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)" - ) - parser.add_argument( - "--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset." - ) - parser.add_argument( - "--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file." - ) - parser.add_argument("--vocab_size", default=30522, type=int) - args = parser.parse_args() - - logger.info(f"Loading data from {args.data_file}") - with open(args.data_file, "rb") as fp: - data = pickle.load(fp) - - logger.info("Counting occurrences for MLM.") - counter = Counter() - for tk_ids in data: - counter.update(tk_ids) - counts = [0] * args.vocab_size - for k, v in counter.items(): - counts[k] = v - - logger.info(f"Dump to {args.token_counts_dump}") - with open(args.token_counts_dump, "wb") as handle: - pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL) diff --git a/examples/research_projects/distillation/train.py b/examples/research_projects/distillation/train.py deleted file mode 100644 index 15d98ace09b..00000000000 --- a/examples/research_projects/distillation/train.py +++ /dev/null @@ -1,325 +0,0 @@ -# coding=utf-8 -# Copyright 2019-present, the HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Training the distilled model. -Supported architectures include: BERT -> DistilBERT, RoBERTa -> DistilRoBERTa, GPT2 -> DistilGPT2. -""" - -import argparse -import json -import os -import pickle -import shutil - -import numpy as np -import torch -from distiller import Distiller -from lm_seqs_dataset import LmSeqsDataset - -from transformers import ( - BertConfig, - BertForMaskedLM, - BertTokenizer, - DistilBertConfig, - DistilBertForMaskedLM, - DistilBertTokenizer, - GPT2Config, - GPT2LMHeadModel, - GPT2Tokenizer, - RobertaConfig, - RobertaForMaskedLM, - RobertaTokenizer, -) -from utils import git_log, init_gpu_params, logger, set_seed - - -MODEL_CLASSES = { - "distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), - "roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), - "bert": (BertConfig, BertForMaskedLM, BertTokenizer), - "gpt2": (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer), -} - - -def sanity_checks(args): - """ - A bunch of args sanity checks to perform even starting... - """ - assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) - assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) - if args.mlm: - assert os.path.isfile(args.token_counts) - assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) - else: - assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) - - assert args.teacher_type == args.student_type or ( - args.student_type == "distilbert" and args.teacher_type == "bert" - ) - assert os.path.isfile(args.student_config) - if args.student_pretrained_weights is not None: - assert os.path.isfile(args.student_pretrained_weights) - - if args.freeze_token_type_embds: - assert args.student_type in ["roberta"] - - assert args.alpha_ce >= 0.0 - assert args.alpha_mlm >= 0.0 - assert args.alpha_clm >= 0.0 - assert args.alpha_mse >= 0.0 - assert args.alpha_cos >= 0.0 - assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 - - -def freeze_pos_embeddings(student, args): - if args.student_type == "roberta": - student.roberta.embeddings.position_embeddings.weight.requires_grad = False - elif args.student_type == "gpt2": - student.transformer.wpe.weight.requires_grad = False - - -def freeze_token_type_embeddings(student, args): - if args.student_type == "roberta": - student.roberta.embeddings.token_type_embeddings.weight.requires_grad = False - - -def main(): - parser = argparse.ArgumentParser(description="Training") - parser.add_argument("--force", action="store_true", help="Overwrite dump_path if it already exists.") - - parser.add_argument( - "--dump_path", type=str, required=True, help="The output directory (log, checkpoints, parameters, etc.)" - ) - parser.add_argument( - "--data_file", - type=str, - required=True, - help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence.", - ) - - parser.add_argument( - "--student_type", - type=str, - choices=["distilbert", "roberta", "gpt2"], - required=True, - help="The student type (DistilBERT, RoBERTa).", - ) - parser.add_argument("--student_config", type=str, required=True, help="Path to the student configuration.") - parser.add_argument( - "--student_pretrained_weights", default=None, type=str, help="Load student initialization checkpoint." - ) - - parser.add_argument( - "--teacher_type", choices=["bert", "roberta", "gpt2"], required=True, help="Teacher type (BERT, RoBERTa)." - ) - parser.add_argument("--teacher_name", type=str, required=True, help="The teacher model.") - - parser.add_argument("--temperature", default=2.0, type=float, help="Temperature for the softmax temperature.") - parser.add_argument( - "--alpha_ce", default=0.5, type=float, help="Linear weight for the distillation loss. Must be >=0." - ) - parser.add_argument( - "--alpha_mlm", - default=0.0, - type=float, - help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.", - ) - parser.add_argument("--alpha_clm", default=0.5, type=float, help="Linear weight for the CLM loss. Must be >=0.") - parser.add_argument("--alpha_mse", default=0.0, type=float, help="Linear weight of the MSE loss. Must be >=0.") - parser.add_argument( - "--alpha_cos", default=0.0, type=float, help="Linear weight of the cosine embedding loss. Must be >=0." - ) - - parser.add_argument( - "--mlm", action="store_true", help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." - ) - parser.add_argument( - "--mlm_mask_prop", - default=0.15, - type=float, - help="Proportion of tokens for which we need to make a prediction.", - ) - parser.add_argument("--word_mask", default=0.8, type=float, help="Proportion of tokens to mask out.") - parser.add_argument("--word_keep", default=0.1, type=float, help="Proportion of tokens to keep.") - parser.add_argument("--word_rand", default=0.1, type=float, help="Proportion of tokens to randomly replace.") - parser.add_argument( - "--mlm_smoothing", - default=0.7, - type=float, - help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).", - ) - parser.add_argument("--token_counts", type=str, help="The token counts in the data_file for MLM.") - - parser.add_argument( - "--restrict_ce_to_mask", - action="store_true", - help="If true, compute the distillation loss only the [MLM] prediction distribution.", - ) - parser.add_argument( - "--freeze_pos_embs", - action="store_true", - help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.", - ) - parser.add_argument( - "--freeze_token_type_embds", - action="store_true", - help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.", - ) - - parser.add_argument("--n_epoch", type=int, default=3, help="Number of pass on the whole dataset.") - parser.add_argument("--batch_size", type=int, default=5, help="Batch size (for each process).") - parser.add_argument( - "--group_by_size", - action="store_false", - help="If true, group sequences that have similar length into the same batch. Default is true.", - ) - - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=50, - help="Gradient accumulation for larger training batches.", - ) - parser.add_argument("--warmup_prop", default=0.05, type=float, help="Linear warmup proportion.") - parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") - parser.add_argument("--learning_rate", default=5e-4, type=float, help="The initial learning rate for Adam.") - parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.") - parser.add_argument("--max_grad_norm", default=5.0, type=float, help="Max gradient norm.") - parser.add_argument("--initializer_range", default=0.02, type=float, help="Random initialization range.") - - parser.add_argument( - "--fp16", - action="store_true", - help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", - ) - parser.add_argument( - "--fp16_opt_level", - type=str, - default="O1", - help=( - "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " - "See details at https://nvidia.github.io/apex/amp.html" - ), - ) - parser.add_argument("--n_gpu", type=int, default=1, help="Number of GPUs in the node.") - parser.add_argument("--local_rank", type=int, default=-1, help="Distributed training - Local rank") - parser.add_argument("--seed", type=int, default=56, help="Random seed") - - parser.add_argument("--log_interval", type=int, default=500, help="Tensorboard logging interval.") - parser.add_argument("--checkpoint_interval", type=int, default=4000, help="Checkpoint interval.") - args = parser.parse_args() - sanity_checks(args) - - # ARGS # - init_gpu_params(args) - set_seed(args) - if args.is_master: - if os.path.exists(args.dump_path): - if not args.force: - raise ValueError( - f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite" - " itUse `--force` if you want to overwrite it" - ) - else: - shutil.rmtree(args.dump_path) - - if not os.path.exists(args.dump_path): - os.makedirs(args.dump_path) - logger.info(f"Experiment will be dumped and logged in {args.dump_path}") - - # SAVE PARAMS # - logger.info(f"Param: {args}") - with open(os.path.join(args.dump_path, "parameters.json"), "w") as f: - json.dump(vars(args), f, indent=4) - git_log(args.dump_path) - - student_config_class, student_model_class, _ = MODEL_CLASSES[args.student_type] - teacher_config_class, teacher_model_class, teacher_tokenizer_class = MODEL_CLASSES[args.teacher_type] - - # TOKENIZER # - tokenizer = teacher_tokenizer_class.from_pretrained(args.teacher_name) - special_tok_ids = {} - for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): - idx = tokenizer.all_special_tokens.index(tok_symbol) - special_tok_ids[tok_name] = tokenizer.all_special_ids[idx] - logger.info(f"Special tokens {special_tok_ids}") - args.special_tok_ids = special_tok_ids - args.max_model_input_size = tokenizer.max_model_input_sizes[args.teacher_name] - - # DATA LOADER # - logger.info(f"Loading data from {args.data_file}") - with open(args.data_file, "rb") as fp: - data = pickle.load(fp) - - if args.mlm: - logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)") - with open(args.token_counts, "rb") as fp: - counts = pickle.load(fp) - - token_probs = np.maximum(counts, 1) ** -args.mlm_smoothing - for idx in special_tok_ids.values(): - token_probs[idx] = 0.0 # do not predict special tokens - token_probs = torch.from_numpy(token_probs) - else: - token_probs = None - - train_lm_seq_dataset = LmSeqsDataset(params=args, data=data) - logger.info("Data loader created.") - - # STUDENT # - logger.info(f"Loading student config from {args.student_config}") - stu_architecture_config = student_config_class.from_pretrained(args.student_config) - stu_architecture_config.output_hidden_states = True - - if args.student_pretrained_weights is not None: - logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}") - student = student_model_class.from_pretrained(args.student_pretrained_weights, config=stu_architecture_config) - else: - student = student_model_class(stu_architecture_config) - - if args.n_gpu > 0: - student.to(f"cuda:{args.local_rank}") - logger.info("Student loaded.") - - # TEACHER # - teacher = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=True) - if args.n_gpu > 0: - teacher.to(f"cuda:{args.local_rank}") - logger.info(f"Teacher loaded from {args.teacher_name}.") - - # FREEZING # - if args.freeze_pos_embs: - freeze_pos_embeddings(student, args) - if args.freeze_token_type_embds: - freeze_token_type_embeddings(student, args) - - # SANITY CHECKS # - assert student.config.vocab_size == teacher.config.vocab_size - assert student.config.hidden_size == teacher.config.hidden_size - assert student.config.max_position_embeddings == teacher.config.max_position_embeddings - if args.mlm: - assert token_probs.size(0) == stu_architecture_config.vocab_size - - # DISTILLER # - torch.cuda.empty_cache() - distiller = Distiller( - params=args, dataset=train_lm_seq_dataset, token_probs=token_probs, student=student, teacher=teacher - ) - distiller.train() - logger.info("Let's go get some drinks.") - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/distillation/training_configs/distilbert-base-cased.json b/examples/research_projects/distillation/training_configs/distilbert-base-cased.json deleted file mode 100644 index d4f524d704c..00000000000 --- a/examples/research_projects/distillation/training_configs/distilbert-base-cased.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "activation": "gelu", - "attention_dropout": 0.1, - "dim": 768, - "dropout": 0.1, - "hidden_dim": 3072, - "initializer_range": 0.02, - "max_position_embeddings": 512, - "n_heads": 12, - "n_layers": 6, - "sinusoidal_pos_embds": true, - "tie_weights_": true, - "vocab_size": 28996 - } - \ No newline at end of file diff --git a/examples/research_projects/distillation/training_configs/distilbert-base-multilingual-cased.json b/examples/research_projects/distillation/training_configs/distilbert-base-multilingual-cased.json deleted file mode 100644 index f76e7febcba..00000000000 --- a/examples/research_projects/distillation/training_configs/distilbert-base-multilingual-cased.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "activation": "gelu", - "attention_dropout": 0.1, - "dim": 768, - "dropout": 0.1, - "hidden_dim": 3072, - "initializer_range": 0.02, - "max_position_embeddings": 512, - "n_heads": 12, - "n_layers": 6, - "sinusoidal_pos_embds": true, - "tie_weights_": true, - "vocab_size": 119547 - } - \ No newline at end of file diff --git a/examples/research_projects/distillation/training_configs/distilbert-base-uncased.json b/examples/research_projects/distillation/training_configs/distilbert-base-uncased.json deleted file mode 100644 index 15d1e7fe00e..00000000000 --- a/examples/research_projects/distillation/training_configs/distilbert-base-uncased.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "activation": "gelu", - "attention_dropout": 0.1, - "dim": 768, - "dropout": 0.1, - "hidden_dim": 3072, - "initializer_range": 0.02, - "max_position_embeddings": 512, - "n_heads": 12, - "n_layers": 6, - "sinusoidal_pos_embds": true, - "tie_weights_": true, - "vocab_size": 30522 - } - \ No newline at end of file diff --git a/examples/research_projects/distillation/training_configs/distilgpt2.json b/examples/research_projects/distillation/training_configs/distilgpt2.json deleted file mode 100644 index 9820ac93b8c..00000000000 --- a/examples/research_projects/distillation/training_configs/distilgpt2.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "initializer_range": 0.02, - "layer_norm_epsilon": 0.00001, - "n_embd": 768, - "n_head": 12, - "n_layer": 6, - "n_positions": 1024, - "vocab_size": 50257 -} \ No newline at end of file diff --git a/examples/research_projects/distillation/training_configs/distilroberta-base.json b/examples/research_projects/distillation/training_configs/distilroberta-base.json deleted file mode 100644 index 2d90ef6380a..00000000000 --- a/examples/research_projects/distillation/training_configs/distilroberta-base.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "vocab_size": 50265, - "hidden_size": 768, - "num_hidden_layers": 6, - "num_attention_heads": 12, - "intermediate_size": 3072, - "hidden_act": "gelu", - "hidden_dropout_prob": 0.1, - "attention_probs_dropout_prob": 0.1, - "max_position_embeddings": 514, - "type_vocab_size": 1, - "initializer_range": 0.02, - "layer_norm_eps": 0.00001 -} \ No newline at end of file diff --git a/examples/research_projects/distillation/utils.py b/examples/research_projects/distillation/utils.py deleted file mode 100644 index e86d2593bbd..00000000000 --- a/examples/research_projects/distillation/utils.py +++ /dev/null @@ -1,134 +0,0 @@ -# coding=utf-8 -# Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Utils to train DistilBERT -adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) -""" - -import json -import logging -import os -import socket - -import git -import numpy as np -import torch - - -logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO, -) -logger = logging.getLogger(__name__) - - -def git_log(folder_path: str): - """ - Log commit info. - """ - repo = git.Repo(search_parent_directories=True) - repo_infos = { - "repo_id": str(repo), - "repo_sha": str(repo.head.object.hexsha), - "repo_branch": str(repo.active_branch), - } - - with open(os.path.join(folder_path, "git_log.json"), "w") as f: - json.dump(repo_infos, f, indent=4) - - -def init_gpu_params(params): - """ - Handle single and multi-GPU / multi-node. - """ - if params.n_gpu <= 0: - params.local_rank = 0 - params.master_port = -1 - params.is_master = True - params.multi_gpu = False - return - - assert torch.cuda.is_available() - - logger.info("Initializing GPUs") - if params.n_gpu > 1: - assert params.local_rank != -1 - - params.world_size = int(os.environ["WORLD_SIZE"]) - params.n_gpu_per_node = int(os.environ["N_GPU_NODE"]) - params.global_rank = int(os.environ["RANK"]) - - # number of nodes / node ID - params.n_nodes = params.world_size // params.n_gpu_per_node - params.node_id = params.global_rank // params.n_gpu_per_node - params.multi_gpu = True - - assert params.n_nodes == int(os.environ["N_NODES"]) - assert params.node_id == int(os.environ["NODE_RANK"]) - - # local job (single GPU) - else: - assert params.local_rank == -1 - - params.n_nodes = 1 - params.node_id = 0 - params.local_rank = 0 - params.global_rank = 0 - params.world_size = 1 - params.n_gpu_per_node = 1 - params.multi_gpu = False - - # sanity checks - assert params.n_nodes >= 1 - assert 0 <= params.node_id < params.n_nodes - assert 0 <= params.local_rank <= params.global_rank < params.world_size - assert params.world_size == params.n_nodes * params.n_gpu_per_node - - # define whether this is the master process / if we are in multi-node distributed mode - params.is_master = params.node_id == 0 and params.local_rank == 0 - params.multi_node = params.n_nodes > 1 - - # summary - PREFIX = f"--- Global rank: {params.global_rank} - " - logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes) - logger.info(PREFIX + "Node ID : %i" % params.node_id) - logger.info(PREFIX + "Local rank : %i" % params.local_rank) - logger.info(PREFIX + "World size : %i" % params.world_size) - logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node) - logger.info(PREFIX + "Master : %s" % str(params.is_master)) - logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node)) - logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu)) - logger.info(PREFIX + "Hostname : %s" % socket.gethostname()) - - # set GPU device - torch.cuda.set_device(params.local_rank) - - # initialize multi-GPU - if params.multi_gpu: - logger.info("Initializing PyTorch distributed") - torch.distributed.init_process_group( - init_method="env://", - backend="nccl", - ) - - -def set_seed(args): - """ - Set the random seed. - """ - np.random.seed(args.seed) - torch.manual_seed(args.seed) - if args.n_gpu > 0: - torch.cuda.manual_seed_all(args.seed) diff --git a/examples/research_projects/fsner/README.md b/examples/research_projects/fsner/README.md deleted file mode 100644 index 5ebcee07fcb..00000000000 --- a/examples/research_projects/fsner/README.md +++ /dev/null @@ -1,88 +0,0 @@ -

FSNER LOGO

- -

- Implemented by sayef . -

- -## Overview - -The FSNER model was proposed in [Example-Based Named Entity Recognition](https://arxiv.org/abs/2008.10570) by Morteza Ziyadi, Yuting Sun, Abhishek Goswami, Jade Huang, Weizhu Chen. To identify entity spans in a new domain, it uses a train-free few-shot learning approach inspired by question-answering. - - - -## Abstract ----- -> We present a novel approach to named entity recognition (NER) in the presence of scarce data that we call example-based NER. Our train-free few-shot learning approach takes inspiration from question-answering to identify entity spans in a new and unseen domain. In comparison with the current state-of-the-art, the proposed method performs significantly better, especially when using a low number of support examples. - - - -## Model Training Details ------ - -| identifier | epochs | datasets | -| ---------- |:----------:| :-----:| -| [sayef/fsner-bert-base-uncased](https://huggingface.co/sayef/fsner-bert-base-uncased) | 10 | ontonotes5, conll2003, wnut2017, and fin (Alvarado et al.). | - - -## Installation and Example Usage ------- - -You can use the FSNER model in 3 ways: - -1. Install directly from PyPI: `pip install fsner` and import the model as shown in the code example below - - or - -2. Install from source: `python setup.py install` and import the model as shown in the code example below - - or - -3. Clone repo and change directory to `src` and import the model as shown in the code example below - - - -```python -from fsner import FSNERModel, FSNERTokenizerUtils - -model = FSNERModel("sayef/fsner-bert-base-uncased") - -tokenizer = FSNERTokenizerUtils("sayef/fsner-bert-base-uncased") - -# size of query and supports must be the same. If you want to find all the entitites in one particular query, just repeat the same query n times where n is equal to the number of supports (or entities). - - -query = [ - 'KWE 4000 can reach with a maximum speed from up to 450 P/min an accuracy from 50 mg', - 'I would like to order a computer from eBay.', -] - -# each list in supports are the examples of one entity type -# wrap entities around with [E] and [/E] in the examples - -supports = [ - [ - 'Horizontal flow wrapper [E] Pack 403 [/E] features the new retrofit-kit „paper-ON-form“', - '[E] Paloma Pick-and-Place-Roboter [/E] arranges the bakery products for the downstream tray-forming equipment', - 'Finally, the new [E] Kliklok ACE [/E] carton former forms cartons and trays without the use of glue', - 'We set up our pilot plant with the right [E] FibreForm® [/E] configuration to make prototypes for your marketing tests and package validation', - 'The [E] CAR-T5 [/E] is a reliable, purely mechanically driven cartoning machine for versatile application fields' - ], - [ - "[E] Walmart [/E] is a leading e-commerce company", - "I recently ordered a book from [E] Amazon [/E]", - "I ordered this from [E] ShopClues [/E]", - "[E] Flipkart [/E] started it's journey from zero" - ] - ] - -device = 'cpu' - -W_query = tokenizer.tokenize(query).to(device) -W_supports = tokenizer.tokenize(supports).to(device) - -start_prob, end_prob = model(W_query, W_supports) - -output = tokenizer.extract_entity_from_scores(query, W_query, start_prob, end_prob, thresh=0.50) - -print(output) -``` diff --git a/examples/research_projects/fsner/pyproject.toml b/examples/research_projects/fsner/pyproject.toml deleted file mode 100644 index f00ba2f7a92..00000000000 --- a/examples/research_projects/fsner/pyproject.toml +++ /dev/null @@ -1,7 +0,0 @@ -[build-system] -requires = [ - "setuptools>=57.4.0", - "wheel>=0.37.0", - "transformers>=4.9.2" -] -build-backend = "setuptools.build_meta" \ No newline at end of file diff --git a/examples/research_projects/fsner/requirements.txt b/examples/research_projects/fsner/requirements.txt deleted file mode 100644 index f77cb020b2c..00000000000 --- a/examples/research_projects/fsner/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -transformers>=4.9.2 \ No newline at end of file diff --git a/examples/research_projects/fsner/setup.py b/examples/research_projects/fsner/setup.py deleted file mode 100644 index 8ce34d0f7d9..00000000000 --- a/examples/research_projects/fsner/setup.py +++ /dev/null @@ -1,27 +0,0 @@ -import setuptools - - -with open("README.md", "r", encoding="utf-8") as fh: - long_description = fh.read() - -setuptools.setup( - name="fsner", - version="0.0.1", - author="msi sayef", - author_email="msi.sayef@gmail.com", - description="Few-shot Named Entity Recognition", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/huggingface/transformers/tree/main/examples/research_projects/fsner", - project_urls={ - "Bug Tracker": "https://github.com/huggingface/transformers/issues", - }, - classifiers=[ - "Programming Language :: Python :: 3", - "Operating System :: OS Independent", - ], - package_dir={"": "src"}, - packages=setuptools.find_packages(where="src"), - python_requires=">=3.6", - install_requires=["torch>=1.9.0", "transformers>=4.9.2"], -) diff --git a/examples/research_projects/fsner/src/fsner/__init__.py b/examples/research_projects/fsner/src/fsner/__init__.py deleted file mode 100644 index 130813cc119..00000000000 --- a/examples/research_projects/fsner/src/fsner/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .model import FSNERModel -from .tokenizer_utils import FSNERTokenizerUtils - - -__all__ = ["FSNERModel", "FSNERTokenizerUtils"] diff --git a/examples/research_projects/fsner/src/fsner/model.py b/examples/research_projects/fsner/src/fsner/model.py deleted file mode 100644 index 0410340c4a9..00000000000 --- a/examples/research_projects/fsner/src/fsner/model.py +++ /dev/null @@ -1,80 +0,0 @@ -import torch - -from transformers import AutoModel - - -class FSNERModel(torch.nn.Module): - """ - The FSNER model implements a few-shot named entity recognition method from the paper `Example-Based Named Entity Recognition `__ by - Morteza Ziyadi, Yuting Sun, Abhishek Goswami, Jade Huang, Weizhu Chen. To identify entity spans in a new domain, it - uses a train-free few-shot learning approach inspired by question-answering. - """ - - def __init__(self, pretrained_model_name_or_path="sayef/fsner-bert-base-uncased"): - super(FSNERModel, self).__init__() - - self.bert = AutoModel.from_pretrained(pretrained_model_name_or_path, return_dict=True) - self.cos = torch.nn.CosineSimilarity(3, 1e-08) - self.softmax = torch.nn.Softmax(dim=1) - - def BERT(self, **inputs): - return self.bert(**inputs).last_hidden_state - - def VectorSum(self, token_embeddings): - return token_embeddings.sum(2, keepdim=True) - - def Atten(self, q_rep, S_rep, T=1): - return self.softmax(T * self.cos(q_rep, S_rep)) - - def forward(self, W_query, W_supports): - """ - Find scores of each token being start and end token for an entity. - Args: - W_query (`torch.LongTensor` of shape `(batch_size, sequence_length)`): - Indices of query sequence tokens in the vocabulary. - W_supports (`torch.LongTensor` of shape `(batch_size, sequence_length)`): - Indices of support sequence tokens in the vocabulary. - Returns: - p_start (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Scores of each token as - being start token of an entity - p_end (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Scores of each token as - being end token of an entity - """ - - support_sizes = W_supports["sizes"].tolist() - start_token_id = W_supports["start_token_id"].item() - end_token_id = W_supports["end_token_id"].item() - - del W_supports["sizes"] - del W_supports["start_token_id"] - del W_supports["end_token_id"] - - q = self.BERT(**W_query) - S = self.BERT(**W_supports) - - p_starts = None - p_ends = None - - start_token_masks = W_supports["input_ids"] == start_token_id - end_token_masks = W_supports["input_ids"] == end_token_id - - for i, size in enumerate(support_sizes): - if i == 0: - s = 0 - else: - s = support_sizes[i - 1] - - s_start = S[s : s + size][start_token_masks[s : s + size]] - s_end = S[s : s + size][end_token_masks[s : s + size]] - - p_start = torch.matmul(q[i], s_start.T).sum(1).softmax(0) - p_end = torch.matmul(q[i], s_end.T).sum(1).softmax(0) - - if p_starts is not None: - p_starts = torch.vstack((p_starts, p_start)) - p_ends = torch.vstack((p_ends, p_end)) - else: - p_starts = p_start - p_ends = p_end - - return p_starts, p_ends diff --git a/examples/research_projects/fsner/src/fsner/tokenizer_utils.py b/examples/research_projects/fsner/src/fsner/tokenizer_utils.py deleted file mode 100644 index 7169e23dbe4..00000000000 --- a/examples/research_projects/fsner/src/fsner/tokenizer_utils.py +++ /dev/null @@ -1,102 +0,0 @@ -import torch - -from transformers import AutoTokenizer - - -class FSNERTokenizerUtils: - def __init__(self, pretrained_model_name_or_path): - self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path) - - def tokenize(self, x): - """ - Wrapper function for tokenizing query and supports - Args: - x (`List[str] or List[List[str]]`): - List of strings for query or list of lists of strings for supports. - Returns: - `transformers.tokenization_utils_base.BatchEncoding` dict with additional keys and values for start_token_id, end_token_id and sizes of example lists for each entity type - """ - - if isinstance(x, list) and all(isinstance(_x, list) for _x in x): - d = None - for l in x: - t = self.tokenizer( - l, - padding="max_length", - max_length=384, - truncation=True, - return_tensors="pt", - ) - t["sizes"] = torch.tensor([len(l)]) - if d is not None: - for k in d.keys(): - d[k] = torch.cat((d[k], t[k]), 0) - else: - d = t - - d["start_token_id"] = torch.tensor(self.tokenizer.convert_tokens_to_ids("[E]")) - d["end_token_id"] = torch.tensor(self.tokenizer.convert_tokens_to_ids("[/E]")) - - elif isinstance(x, list) and all(isinstance(_x, str) for _x in x): - d = self.tokenizer( - x, - padding="max_length", - max_length=384, - truncation=True, - return_tensors="pt", - ) - - else: - raise Exception( - "Type of parameter x was not recognized! Only `list of strings` for query or `list of lists of" - " strings` for supports are supported." - ) - - return d - - def extract_entity_from_scores(self, query, W_query, p_start, p_end, thresh=0.70): - """ - Extracts entities from query and scores given a threshold. - Args: - query (`List[str]`): - List of query strings. - W_query (`torch.LongTensor` of shape `(batch_size, sequence_length)`): - Indices of query sequence tokens in the vocabulary. - p_start (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): - Scores of each token as being start token of an entity - p_end (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): - Scores of each token as being end token of an entity - thresh (`float`): - Score threshold value - Returns: - A list of lists of tuples(decoded entity, score) - """ - - final_outputs = [] - for idx in range(len(W_query["input_ids"])): - start_indexes = end_indexes = range(p_start.shape[1]) - - output = [] - for start_id in start_indexes: - for end_id in end_indexes: - if start_id < end_id: - output.append( - ( - start_id, - end_id, - p_start[idx][start_id].item(), - p_end[idx][end_id].item(), - ) - ) - - output.sort(key=lambda tup: (tup[2] * tup[3]), reverse=True) - temp = [] - for k in range(len(output)): - if output[k][2] * output[k][3] >= thresh: - c_start_pos, c_end_pos = output[k][0], output[k][1] - decoded = self.tokenizer.decode(W_query["input_ids"][idx][c_start_pos:c_end_pos]) - temp.append((decoded, output[k][2] * output[k][3])) - - final_outputs.append(temp) - - return final_outputs diff --git a/examples/research_projects/information-gain-filtration/README.md b/examples/research_projects/information-gain-filtration/README.md deleted file mode 100644 index f685a512509..00000000000 --- a/examples/research_projects/information-gain-filtration/README.md +++ /dev/null @@ -1,100 +0,0 @@ - -# Information Gain Filtration(IGF) - -Authors @Tuko @mraunak - -This folder contains the code how to implement IGF for finetuning on GPT-2. - -## What is IGF? - -Here we present a general fine-tuning method that we call information gain filtration for improving the overall training efficiency and final -performance of language model fine-tuning(see paper below). The method is an alternative fine-tuning method that trains -a secondary model (e.g., a simple convolutional network) to predict the amount of information -gained over a given pre-trained model. The secondary model is lightweight and trained to -predict the Information Gain measure. Information Gain is defined as the change in a loss -function for a model before and after an SGD update with a sample (Equation X in the paper). -A small subset of the training set named the “objective” set, is used to measure information -gain on the pre-trained model, and consequently to train the secondary model. After -training, the model is used for filtering samples for the fine-tuning process. Therefore, -a high information gain value would suggest a sample is informative, whereas a low value -would suggest a non-informative sample that should be filtered out. Thus, a thresholding -strategy is defined to select informative samples. With such a strategy, samples are filtered -and once enough samples are selected to form a mini-batch and a usual fine-tuning/optimization -step is applied. The filtration process is repeated until the fine-tuning process is over. - -Paper [Selecting Informative Contexts Improves Language Model Finetuning](https://arxiv.org/abs/2005.00175) - -# Results - -Several experiments were conducted to show the robustness of the IGF method versus the -standard fine-tuning process. For example, we achieve a median perplexity of 54.0 on the -Books dataset compared to 57.3 for standard fine-tuning on GPT-2 Small. The code was -implemented using the Transformers library and Pytorch. While the method may seem more -expensive, we saw enough evidence that it may lead to a performance benefit in the final models. - -![IGF performance](result_igf.png) - -Figure 1: Comparing IGF to Standard Fine-tuning: -IGF with constant (p < 10−3 , t-test) and shifting(p < 10−6 , t-test) thresholding significantly outperform standard fine-tuning. The left-hand figure shows -test-set perplexity after each fine-tuning batch, averaged over 50 runs (error bars denote ± one standard error). The right-hand figure shows the perplexity of each -method after 60 batches. IGF with shifting thresholding (red) clearly improves over standard batched fine-tuning with Adam - -## How to use this project? - -To fine-tune a transformer model with IGF on a language modeling task, use the following script: - -- `model_name_or_path`: Path to pretrained model or model identifier from huggingface.co/models -- `data_file`: A jbl file containing tokenized data which can be split as objective dataset, - train_dataset and test_dataset -- `igf_data_file`: A jbl file containing the context and information gain pairs to train secondary learner. -- `context_len`: The maximum total input sequence length after tokenization. Sequences longer - than this will be truncated, sequences shorter will be padded. -- `size_objective_set`: Number of articles that are long enough to be used as our objective set" -- `min_len`: The minimum length of the article to be used as objective set -- `trim`: Truncate the example if it exceeds context length -- `eval_freq`: Secondary model evaluation can be triggered at eval_freq -- `max_steps`: To calculate training epochs -- `number`: The number of examples split to be used as objective_set/test_data -- `secondary_learner_batch_size`: The batch size of training data for secondary learner -- `secondary_learner_max_epochs`: The number of epochs to train secondary learner -- `recopy_model`: Reset the model to the original pretrained GPT-2 weights after each iteration -- `eval_interval`: Decay the selectivity of our secondary learner filter from" - 1 standard deviation above average to 1 below average after eval_interval(10) batches" - - -```python -python run_clm_igf.py\ ---model_name_or_path "openai-community/gpt2" \ ---data_file="data/tokenized_stories_train_wikitext103" \ ---igf_data_file="data/IGF_values" \ ---context_len 32 \ ---size_objective_set 100 \ ---min_len 1026 \ ---trim True \ ---eval_freq 100 \ ---max_steps 1000 \ ---secondary_learner_batch_size 128 \ ---secondary_learner_max_epochs 15 \ ---number 100 \ ---recopy_model \ ---eval_interval 10 \ -``` - -## Citation - -If you find the resource useful, please cite the following paper - -```bibtex -@inproceedings{antonello-etal-2021-selecting, - title = "Selecting Informative Contexts Improves Language Model Fine-tuning", - author = "Antonello, Richard and Beckage, Nicole and Turek, Javier and Huth, Alexander", - booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)", - month = aug, - year = "2021", - address = "Online", - publisher = "Association for Computational Linguistics", - url = "https://aclanthology.org/2021.acl-long.87", - doi = "10.18653/v1/2021.acl-long.87", - pages = "1072--1085", -} -``` diff --git a/examples/research_projects/information-gain-filtration/igf/__init__.py b/examples/research_projects/information-gain-filtration/igf/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/examples/research_projects/information-gain-filtration/igf/igf.py b/examples/research_projects/information-gain-filtration/igf/igf.py deleted file mode 100644 index 4c5aefd9584..00000000000 --- a/examples/research_projects/information-gain-filtration/igf/igf.py +++ /dev/null @@ -1,416 +0,0 @@ -# Copyright 2022 - Intel Corp. All rights reserved. -# Authors: Mayank Kumar Raunak, Javier Turek, Nicole Backage - -import copy -import logging -import random - -import joblib -import numpy as np -import torch -import torch.nn as nn -from torch.utils.data import DataLoader -from tqdm import tqdm - -from transformers import AdamW, GPT2LMHeadModel, get_linear_schedule_with_warmup - - -logger = logging.getLogger(__name__) - - -def set_seed(seed): - """ - For reproducible training - - Args: - seed: A seed for reproducible training - - """ - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - - -def compute_perplexity(model, test_data, context_len): - """ - Computes perplexity of the transformer model on data in test_data - - Args: - model: Pre-trained GPT2 model - test_data: Data on which perplexity calculation is required - context_len: The maximum total input sequence length after tokenization. Sequences longer - than this will be truncated, sequences shorter will be padded - - Returns: - Perplexity on input test data - - """ - - model.eval() - device = next(model.parameters()).device - eval_batch_size = 1 - context = torch.zeros((eval_batch_size, context_len), dtype=torch.long, device=device) - eval_dataloader = DataLoader(test_data, shuffle=False, batch_size=eval_batch_size) - eval_loss = torch.zeros(1, device=device) - nb_eval_examples = 0 - for batch in eval_dataloader: - batch.to(device) - # pad - context.zero_() - for i in range(eval_batch_size): - context[i, :] = batch[i] - outputs = model(context, labels=context) - eval_loss += outputs[0].sum().item() - nb_eval_examples += batch.size(0) - eval_loss = eval_loss / nb_eval_examples - perplexity = torch.exp(eval_loss) - model.train() - return perplexity - - -def load_gpt2(model_name="openai-community/gpt2"): - """ - load original openai-community/gpt2 and save off for quicker loading - - Args: - model_name: GPT-2 - - Returns: - GPT-2 model - - """ - - model = GPT2LMHeadModel.from_pretrained(model_name, output_hidden_states=True) - torch.save(model.state_dict(), model_name + "local.pt") - return model - - -def recopy_gpt2(orig_model, device, max_steps): - """ - Reset the model to the original pretrained GPT-2 weights after each iteration - - Args: - orig_model: Original pretrained GPT-2 model imported from Transformers library - device: CPU/GPU - max_steps: number of training steps - - Returns: - Original PreTrained GPT-2 model, - lm_optimizer: Adam optimizer with Decoupled weight decay - lm_scheduler: linear scheduler with the appropriate schedule - - """ - model = copy.deepcopy(orig_model) - model.to(device) - - no_decay = ["bias", "LayerNorm.weight"] - optimizer_grouped_parameters = [ - { - "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], - "weight_decay": 0.0, - }, - {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, - ] - lm_optimizer = AdamW(optimizer_grouped_parameters, lr=5e-5, eps=1e-8) - lm_scheduler = get_linear_schedule_with_warmup(lm_optimizer, 0, max_steps) - torch.cuda.empty_cache() - return model, lm_optimizer, lm_scheduler - - -def intermittent_save(contexts, real_perps, past_perps, filename): - """ - save the perplexity differences to filename - - Args: - contexts: Example on which the perplexity is calculated - real_perps: Perplexity after back-propagating on the selected context - past_perps: Perplexity of model before training on the context - filename: File to store perplexity differences - - Returns: - file with perplexity differences - - """ - # save the perplexity differences to filename - avg = np.array(real_perps).mean() - std = np.array(real_perps).std() - perp_diff = (real_perps - avg) / std - data_final = list(zip(contexts, perp_diff, past_perps)) - joblib.dump(data_final, filename) - - -def collect_objective_set( - model, - orig_perp, - context_len, - train_data, - objective_set, - max_steps, - device, - filename="dev.jbl", - recopy_model=recopy_gpt2, -): - """ - Collect individual IGF values from pre-trained transformer model - max_steps samples of training data to train secondary model - - Args: - model: Pre-trained GPT2 model - orig_perp: Perplexity of original pretrained GPT-2 model - context_len: The maximum total input sequence length after tokenization. Sequences longer - than this will be truncated, sequences shorter will be padded - train_data: Data to train model - objective_set: Contexts used to create (X,IG(X)) pairs which is the training data for secondary learner - max_steps: To calculate training epochs of model - device: GPU/CPU - filename: To store intermediate perplexity differences - recopy_model: Reset the model to the original pretrained GPT-2 weights after each iteration - - Returns: - file stored intermediate perplexity differences in intermediate stages - - """ - - # initialize variables to record relevant information - contexts = [] - real_perps = [] - past_perps = [] - - # Initialize the transformer model - orig_model = copy.deepcopy(model) - orig_model.to(device="cpu") - torch.cuda.empty_cache() - - # Compute perplexity of initial transformer model for comparison - model.train() - model, lm_optimizer, lm_scheduler = recopy_model(orig_model, device, max_steps) - - for step in tqdm(range(max_steps)): - context = torch.zeros((1, context_len), dtype=torch.long, device=device) - story = random.choice(train_data) - start = random.randint(0, len(story[0]) - context_len - 1) - context[0, :] = story[0][start : start + context_len] - lm_optimizer.zero_grad() - outputs = model(context, labels=context) - lm_loss = outputs[0] - past_perp = compute_perplexity(model, context, context_len) - model.train() - lm_loss.backward() - # Do LM backprop - torch.nn.utils.clip_grad_norm_(model.parameters(), 3.0) - lm_optimizer.step() - lm_scheduler.step() # Update learning rate schedule - - # Compute perplexity after back-propagating on the selected context - real_perp = compute_perplexity(model, objective_set, context_len) - - # Periodically save the stored (X, IG(X)) pairs - if step % 1000 == 0 and step > 1: - intermittent_save(contexts, real_perps, past_perps, filename) - - # Reset the pretrained model to the original pretrained GPT-2 weights after each iteration - model, lm_optimizer, lm_scheduler = recopy_model(orig_model, device, max_steps) - - past_perps.append(past_perp.item()) - real_perps.append(orig_perp - real_perp.item()) - contexts.append(np.array(context.cpu())) - - intermittent_save(contexts, real_perps, past_perps, filename) - - -def generate_datasets( - context_len, file="data/tokenized_stories_train_wikitext103.jbl", number=100, min_len=1026, trim=True -): - """ - Generate objective set and training set - - Args: - context_len: The maximum total input sequence length after tokenization. Sequences longer - than this will be truncated, sequences shorter will be padded - file: Tokenized data split into training set and objective set - number: size of objective dataset - min_len: minimum length of a context in objective set - trim: If True truncate the context if it exceeds context length - - Returns: - Generated objective set and training data - - - """ - # Generate objective set and training set - # Designate the first number (100) articles that are long enough to be used - # as our objective set, rest (that are long enough) are training data for - # secondary learner - - data = joblib.load(file) - print("data loaded") - objective_set = [] - if trim: - for i, example in enumerate(data): - if len(example[0]) > min_len: - start = random.randint(0, len(example[0]) - context_len - 1) - objective_set.append(example[0, start : start + context_len]) - if len(objective_set) >= number: - break - train_data = [] - for j in range(i + 1, len(data)): - if len(data[j][0]) > min_len: - train_data.append(data[j]) - else: - objective_set = data[0:number] - train_data = data[number:] - - joblib.dump(objective_set, "objective_set.jbl") - print("objective set saved") - return train_data, objective_set - - -def train_secondary_learner( - secondary_learner, train_dataset, max_epochs, batch_size, eval_freq=50, igf_model_path="secondary_learner.pt" -): - """ - Train the secondary learner (igf_model) - - Args: - secondary_learner: secondary learner - train_dataset: data to train secondary learner - max_epochs: number of epochs to train secondary learner - batch_size: batch size of training data of secondary learner - eval_freq: secondary model evaluation can be triggered at eval_freq - igf_model_path: path to store trained secondary learner - - Returns: - Trained secondary learner - - """ - device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - # We will use the first 512 pairs from our dataset as a test set for - # our secondary learner and the rest to train - test_dataset = train_dataset[:512] - train_dataset = train_dataset[512:] - train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size) - test_dataloader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size) - - # secondary learner model set up - loss = nn.MSELoss() - test_loss = nn.MSELoss(reduction="sum") - secondary_learner.to(device) - q_optimizer = torch.optim.Adam(secondary_learner.parameters(), lr=0.00001) - secondary_learner.train() - - # TODO in original code this is written as number of actual batches seen - # not number of items seen but other places it is number of items instead. - # improve consistency! changed this to epochs for clarity - best_test_loss = float("inf") - # Iterate through batches until we've used max_steps batches - for epoch in range(int(max_epochs)): - tr_q_loss = 0.0 - secondary_learner.train() - for step, batch in enumerate(train_dataloader): - context = batch[0].to(device) - real_q = batch[1].to(device) - predicted_q = secondary_learner(context) - q_optimizer.zero_grad() - q_loss = loss(predicted_q, real_q.float()) - q_loss.backward() - q_optimizer.step() - tr_q_loss += q_loss.item() - - # model trains fairly quickly so we won't wait for a full epoch - # eval is triggered at eval_freq and end of epochs - if (step % eval_freq == 0 and step > 0) or ((step + 1) == len(train_dataloader)): - tr_loss = tr_q_loss / (step + 1) - - secondary_learner.eval() - q_loss2 = 0.0 - sum_q2 = 0.0 - predicted = [] - actual = [] - # Compute performance of the secondary learner after this batch - for step2, batch2 in enumerate(test_dataloader): - features2 = batch2[0].to(device) - real_q2 = batch2[1].to(device) - predicted_q2 = secondary_learner(features2) - q_loss2 += test_loss(predicted_q2, real_q2).item() - sum_q2 += torch.sum(predicted_q2).item() - for ei, i in enumerate(predicted_q2.cpu().detach().numpy()): - predicted.append(i.item()) - for ei, i in enumerate(real_q2.cpu().detach().numpy()): - actual.append(i.item()) - - q_loss2 /= len(test_dataset) - print( - "Epoch: ", - epoch, - "step: ", - step, - "Avg. q:", - sum_q2 / len(test_dataset), - "Train Loss: ", - tr_loss, - "Test Loss: ", - q_loss2, - ) - if q_loss2 < best_test_loss: - joblib.dump((predicted, actual), "pred_vs_actual.jbl") - torch.save(secondary_learner.state_dict(), igf_model_path) - best_test_loss = q_loss2 - - secondary_learner.train() - return secondary_learner - - -class SecondaryLearner(nn.Module): - """ - Our secondary learner - """ - - def __init__(self, model): - """ - We use a simple convolutional network as our secondary learner - - Args: - model: Pre-trained GPT2 model - """ - # embeddings are from the pretrained model - super(SecondaryLearner, self).__init__() - self.embeddings = model.transformer.wte - self.embeddings.weight = copy.deepcopy(model.transformer.wte.weight) - self.conv = nn.Conv1d(self.embeddings.weight.size(1), 256, 3, padding=1) - self.fc = nn.Sequential(nn.Linear(256, 32), nn.Dropout(p=0.1), nn.Linear(32, 32), nn.Linear(32, 1)) - - def forward(self, context): - """ - Forward pass through the secondary learner - - Args: - context: Context input to the secondary learner - - Returns: - tensor after squeeze operation - - """ - pooled = torch.max(self.conv(self.embeddings(context).squeeze(1).transpose(1, 2)), 2)[0] - qs = self.fc(pooled) - return qs.squeeze(1) - - @classmethod - def from_pretrained(cls, state_path, model): - """ - Load the secondary learner - - Args: - state_path: Path to save secondary learner - model: Pretrained GPT-2 - - Returns: - secondary learner - """ - - secondary_learner = cls(model) # this calls __init__ - state_dict = torch.load(state_path) - secondary_learner.load_state_dict(state_dict) - secondary_learner.embeddings = model.transformer.wte - secondary_learner.embeddings.weight = copy.deepcopy(model.transformer.wte.weight) - return secondary_learner diff --git a/examples/research_projects/information-gain-filtration/requirements.txt b/examples/research_projects/information-gain-filtration/requirements.txt deleted file mode 100644 index 2aa3227637c..00000000000 --- a/examples/research_projects/information-gain-filtration/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -matplotlib -numpy>=1.17.2 -joblib>=0.13.2 -scipy -torch>=1.10.1 -transformers>=3.5 \ No newline at end of file diff --git a/examples/research_projects/information-gain-filtration/result_igf.png b/examples/research_projects/information-gain-filtration/result_igf.png deleted file mode 100644 index 10bb0b7d681630c668d11dec6c6606b9934f168e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 34410 zcmd43g;!MF_XjKuf^>)=-QC^YEge#WbPZi1Al(QIDI=mFg3=7#4GtjPIZA_cy%(S7 z`~2Ru-hbeAEtYF8%suCxv(Mh2{n?*=V|BEYpFDo{_|cASDV&eVi5h>%t50a`L!{MVx zQTM6}vIhQ^d--U21~0Dr?teS`Z98MIuzaK`wRvfS4E;Pq$o9GNJso2aCKQv9ZISic z#`q%e&qC_{vLI;8%g@)>59H?n@;kc>mD;ZMwgrtYK}6ev3QoH3W1%c@P!o`JM12742fl=-=f(^K!7O4Y&0sMUiW)bwu-y6vN z|Mv#7mS74Y7ZxLP&!~lpr@lHF#PRQX-t}r>k?$mG6X?{JU;{ z+H;0pZjv?L91o$~So3Y!`-Y zTy7Wo)n^#5TpX<*7vA6WEyi)M!PsE5MbI7Kd%(_A(XqjF6%>U948YH8@Nn5+YKFk4 zI0b$1<%+QD9K+6ZC0^NgQeMCPh5uWFkv6>G!sO@TTcKUr=_U&2{i*a`e-OHUL#xYS z_wR16&xZ!mnHMEvb>WGU2H+1WH40hOB9)CkJBB9_cqMx?-|34_y5Rq;CvdO%diSi& zp{!il5%n5Hzk{U~y%G)b#*Th8KnJ5R)>`sbT%Q}IgDtTkeP4R5xevC zV~bufB?!82MwS5|FP{KgX<4F~n@Q7ZnO%E--gYm@@5jvs`yL(VMm!J)HT%;CukziN z^>epVJ&;Hd-$2kRsn`_7}| zQnEx~;AzcJ3Aq@zZ@-FnS@#w>8RlQq7Onb>Kxc*Jf01Hcv>agE$r|%HFHRn2fO&d) z97TTmnq!e5nOpljy&-ISWuS4dnTK46u|Bj6vR ziCOg&NI6aI>c}MsqW*OH&=3&`6TsyZg++MC?w9Xx3(Jkace;32E0BsExMcOklW}Lx zyESpHHX%Dtm8Sw0Vo?x$JP_%#GdcId$bxY@&pXKxXLtDXu5Y8)CbE>WnH`1jatv&f2DnYvC01G&#$c`tFT)}tI*4j0WL*&C|MDg@wfNQ=SxALl0154+;t5Uk$BI9?F??KCCqmui=PTZ=VI1w&zHHW!)`p^ zl-9Y-R_Qf**|X}re=YFpM|)%uF{kNdGj1VCqkcNkqL|Nbl~TQr!$S8I$<(12tEljh zo1?yw9p3BJQ-pYgd(>l+VzYX;ElSWi`t+&Kx=d52o?rgkEt6#!&l=QW4_r94Q+{0M#!^Ek6on;kj;s*Q06mT*k{Lyv3 z4hwl#dg9|)G#<53#BQDIyhc?}B;o2RcJs>)WN*v$-o(v#?pB^;tDrr>^yO#xd;Fs! z^s->ylSh>u?}gkJM5+^c0#E+5Uhg-JLlv=4o+53uq!C#hZ_xd~$-}7_4s3z$2y)*a z^-=4=z>bcyRUaGCW4Q0dX&KH+!jWd@ju50EwO6PpuZj=ieNbL&HuZZ5cj>kRU7iYk;>B=>${%!-Z$OyTEs^c&f zkuiihB*wEn$Fao316g2?(&u3eLwS|W@C6jIrroE?ApzWvwH5+*zA7Ce!Sls@$^vj= znn=`MX6C0Mj>CA``Sl&XX=;Igp~wv;_>2`uf~alO3L)h1?C|P441=825)}Mg3bzJX zMdA1uw?Li0UO3_rUN@ffjm~2-uDlfl3eHt!+Z~KN8!Pz!=46b>zPpHnEJC0}tr_DM z%Mko2VV5@jcsoTiC{I?n#g6o(N;-y^Uqk^DUdwMN6M1-7eDInzse@#7^~&sUs@oj< zj%g;>y>&K*iysGdI&KCqW2bIH{KeU7hupr`qU6=@qc+f4W4Y+GWBQp3gKf^3nPCeG zKMtA>TFpSi+16*8IRXR0Mu&#=Bfa`WU5*Ya!K5x<5M_vx$T+txARuAlTc8pW;airG zc861=k)={gqlkj!pGpSSklT^7kk>#u@Z+}`jp6y?K~=+}@8m%hOZ)q()DT55L3@E@ zP|h9CiS1q{IpwI_*&NSnFZ!-WZ&)*>UV;?!^ocCgVBfj0ceh2GnX9cqL)rD@#aKfs zTHr56m0W^OlRU|NNza`v=^5c@oifHvQ8LhYRf=lf5_eQG+U>B_`>ie4i^J8e;EKjQ z?CNtS&7AB$bACbYFNl)2|I8h=_4aID0lG8hw;Yo7jVi`*lESYqVMgrU81N#S?3qJv zC`IiqiYIJRsQBU$Ybtin-{Ujn`nb_*%AnPl6@b6 zbe8Y8#5;zh(N$iER5{ijp+62F1N$ZHBDsZc=HIA4kw%j1qEFel`eRUvc*w9BmcJVP zYFb+s{0FXF!hI*;G}&L?adk55P{=4Ee`F47=>C0s5O&Xh{b)yZXX@+7`bYJwoP)<& z`q}eX``la#-WlUK3VciR(C9qt(7%B-Cun3prWEn9tVb`K)QtXmLg@M)?03hBAJW^58sQ zFo?v0vAX@-Kv7r)IhHGlKl{ex=BQKby+s7da6bTM{OT#+2y(CM@HhKwIhs_)H8F=2 z_cc>$0&wXsIM6vcb~FIonCp4DwD5g z$@TY7r>)$lp0-5N!6FKpwM~OpQGb3SBU!BZ5aQ8a9-;%6O#Gk4e|9Jg5z?hcLJlyv zJ)c*2+)h?t;xUlMs3{U7T@>SXKaQs|q+Q^3UJSUW|IgRsfB`}VNm7JJ;u`*}v>6*VRD zsjT9;02>TK^xy*15QYKff$xH3wp=~*&`YAhv2G{VioRdH&P%ALT8~ynbA>Bo2N;h~ z@aNSUqO<%IA}V|3|%@)Urgii%{opL03#UqXwW`{C7N7q zTg@f2$r&8d29b<9*0tXPo?@eMyMUi+13;R*OB*k6DrhqV^l}Q&`E9STf4Ip zVWSH3ttNRk{i|Qeac;XsPzpskui~p(7*5a{m7_qnDr+`XTFgH0a(ivCLfqa3R7AAu z<$INMaYc|1j=xq~okPntdbliDWlu#H(~A=CDG!b8>stp{q#5luoT`|o&`6y$s-yDi zQMK$BX>k)KvZy|;=itG%<9-uD#7#5nje&lq3e0<@A@o?boaGpc*r0M6EkdM ziK!fih=1)BHrwGCff(Hr#)F9WUMQX0Hi}uTUPRUt(KekGFTP|FP)NyF zDAxc!gDO+IIo69~!=_$1sQcO~_^3zqxcbY#7=swTw_KE(z!sX!`aR1Mc(yxEx_m3G z@Ayc!Sw zQJ^qNb};kogZ~?0p`4R)gzvBMo=M)?kGC$TEDF<2nZvs?Q}t+|m4R5UfO=+)?3c$P z{MFXJY6T91_;YlZGHo&2AX2w@ibrUIcX!wB^f17|))H7+j>(bogYND4)+KjeKYwBX zj)-X0DM&?T2@boo^!~7o2Mzl55%8TgQ3p7!NXA|lsR&ZM^Rq=;$MUL`@v}@!$WsGD>J`k@1XIQ*(T*Ax@AJ1RM1@WceG(}MqweS z`cG<&p%@BjRDI9N?QwI7 zyNto-TXkO43e|_@$9-&^=RGRAt^9dF&xfV?!41SQ3^=!)AZa z^iZK6ysmI(4)OeeEy(^SN)rRHJpSTEYKVPc0i|A@?$z`W5Ul99hQ0PU@T1GVRKNxo zu=F@OM~#vAU4Ve8NzDT{_0&=YmnztO`NLydvtsiB9IzwfK=97cwk4T0o9;|1-g?9FVS zLx*5{-gC}}rAHMDJ~-rl)at&J%X7D;e7N6qz*P0t7{KU3-Q@w0Q-Lsy{9z>)sJQ2x zF{F?`y7}sBwG!kZ(5BI)BSnb^P-Zk#e}WS7?1b#qPHT`bp<>h8?PZa(@k>k%sQ9)y zj1?$Ej05SjW|E7{OVQ-2;9)w?m^DdO3KbHTrR0$20-r#eM>QWW9_enM20SVI*?c_6 z9-Cyw!+9e813_O6ZYmjiR^x*>_A9m-dCJV$4E<_LjKY75(%L0rMvKVq7R~apTdm??N6WdVjOf~3Eyy<^+zWpA7|0Dzp0A=u;|cDa?Sx%Xm6 zAbl275$rIQ&*{BCL|YhYrtSM&1I9%B81l0@?-|7BeX_vc2!$Dq@S>02z5*bFBggY$ zb6#@q58Wdq2N1%@G*53~j};m3iMJCpHvkg!nzhe@1ddL=E-9sdu}f496}Roh5xw56 z9>I%LbOzyOZ}3g*3KPTgh20A@h709$rYg9wh4*}tixa`I`{T5&i75L^{ZO0re-iKn zQj1LHr}D=04j~zAwuuzNLY5mO?1mXb+-(b(pK2{p-;leSo96}K79ge)Bs|v?Y&#Bx z?x8F|8m&AW$MsD}iG$Mb^Nq(V|FC0}nyrFYgk^{%wlsaQsWO%kWiIc(!@3c}rTz(4 zwDgK({L!5Do+Yp|ZzPc6wiK}G}cMABIYZS~p1T+(?X=#xBH!?CWC#P>bw z`7$RJvNrkha8U%A$Ro4R#NHA?37=QxBapwR13*JK$k%5;+0aZ5+wJ6T*-V1Nw zn|8d)sHyxSy`#LM@E#D7`JAcw0yWRS_fhe`QNs~(R6hz13_o31t;~NrFo99@ntIDcr zfoGD({qzL9qS0)ykY#q-F6m)rzcBG`$uyyHdKh)Wg-ji#Mi@M*=KcR`bJMbE0)3kg z-R+dTCX3g6~_j^?G4pQ@zY3*{ldn-rbhKk zFQql8rZIWo(vi8zkuHuEz9VqoP7`L6|o6_I8^e9yyV%t4F?gEd{p$R~@{QS*jmQR1SA4m%04IDVm#5hcA`h%lc`rGHo zS7}PW-bQuyCQ=YF{yHQqUrBv3aD8vmuypq9N_goUa2UR08U-~mT$XKi3y;wxB9D$w zF+3UF{TdXfJI9F%d9_|}XdhY7uEr(y;FA4om=))D)ux2lHvqrJnv={c?#%^B>(uPu z+ML_8_lHn!MSK4?n8UaLNS5-iwsk1zd{u%3mfwYz>7t-<+a1ZQ# zX?<98pj=Y=H*=85W9UpFG{@*JFi5iNUlY^I8QDr0Wl@=ul}naEW%tK?Vp4gtdQv}~ z(v2g~G-7JSqb2J|U=fFO;6o<`rO7Nb+*Pjq^B4XjRAkfOx9ddLRs0FUla#D_p)sw$>;T= zxxCJ4jq&%fUSW1CohB(KCh>MtgU%@)()gIPBiPWPgbW;0j@lX1%HcmM8!}LgQ2Lw7 z802jaF5YruMuaq-@-gNe^YzsYs-9~*dlP@Q@(`jtglsm&^LKo1CUcTsZxzpH25hT5wmXUE^G3Ywwt6AQ#$$bU zlXJRAV@K{yNySo6P2eA^@Ssksre3$N&R|}7!!i$qi{o9ybL2=J+Js__;z0bh4(FFg zE8JVZ0di<8kNQ=~kc?KDRI9Ma+~sl}Pk*&WYThUDH7&UyDR;L&{#LEx3&W{*$+@C; z)aZI_?cZ)>Y40W7AX4lecl;Jtk-x|;Dm<&(cMgNE+ zQ}OE`OUr*T{Hl>B-uV`TLzvi{yY=2Y$n59aF`_So_rjIdm9z&D`1-oh+vZ`nr(4{G z^reK2ysKeyuUy-{BB~0<3J-sd>Mh|?+TU8KMZ>`4Tk8XN?{KkWi=Xib1Z3$cUXqf4O_L#nw#%0*|>FZ1Q48x5j zV4A!rrYbkKq8r~2#6Nw>ZfQfTJw2UkpVo7!ygMmt=35~dm}jlb*tZgUU|KG6Cf8%X zBJ_4&R|SxwQ-C5d-#!2H6ilERH#c!1eN&Xl86mJXj6*>Ceu}G_T}2bzQpn9OhAzk* z^Q2HP3`Lg8v9j_}wG2pU>Wz)80gFS->%%;N70WpVvC{!atby;#+I{DS);PLC_U#3W z@wda7=8<0n7S|Rai|m`}*N8ddx0DV_ah0f@8n&9*2cm7PsIjMkQrPI^Dt5x`+X6h@*^o^r~1A|(ft#z*kM!6 zd1<1A{rQ6dtkwrJU(mG|mGy1o6@Tu3( z7%y>Jt8ZdHs}tK2Ejy6&$*C-gWIi4Q>?TcNoo} zk_^nwrN^Ew10H|tJBbDAk9`z3^iED#GdHW6nQWopomh{ zn~{SsMUJiLu>wO<{dWNK9C~lxVc!3~yF7B%Uo8b zb}Xd${2ldmETkjH)iT}Hc}j8EgMi_xsv)ODi3tdbMn>tQ;{oM2_(`y}~R{j`{Ykw(_$<^Q$YqfNcl<;GmD+#5U zi&on91$@4h`IB!?c~nlaLutTg#_xNdJRWJnn_co+j|)o z@~1GD`OYe-I{D7RYk(1ydv0Af8v2YRi>G6qxW^$90=dohI5ODg#dTV!ISl5|ycyfw zuC9VQTP628!!btdpQMMGDrB8^g2x;%B!kWbM6+)d{eXg0tXnW2A<5zl{d_!EHHOXJ z8(LzxeO*WLDgKueZ@B4qFww z$tfdTF;VsgEVT!rn(@d5>6yK+-L5-#F_r1N-yxCgJSs5&nNfZswsN{HNzN`Mz^rTh zZVehtM4xBK+m`g#=%wvWpYG`%D9-{}VW-pLs{76hSz>#F4AmQDEeor{nuY1?pT}QN zHyo{r22&I0UOy{`(4oelMjidasP=N3YM{04ReElD)!R4>GyVXo>)WC6HO3SnJE;k= zK#ik#LS_5B7!sCO81I zxl5oA9eN#I8rO^W9KM+}vwtE)>B=)};lCOM^1ytnqcAP@&iE;UT%sk?$#Ti_RW!#T z7Cd&~tLB@8b!H-QH#Bt+A2-Gu2&$FFJ$clgyui_c{QFSk>SdYhbo%TQ(NPz^Y7303 zo`k8wjS5XK53XriMtt!KBvcqKxuYfe)Tc&IYmV=yc>xKP9X`_*4#Q)+cpEPrJ63Vx zaRsx;v%bYW5>8oo=@ro#=uZRi;K|v97W`BAsS@%%|RY+*k`~E-;nwIHy%bvcY%&VsLi)$Tnd6 zz>&`jZdlE2007$mD-wDD9QjMihTmmhhP^Pf!tLa3Rom5pdXWJd5f=Y?LU=^Hw5O>rPS8=i|EW`g;fc*`ewKegO8 z9=dU;04Rwgc(-D;VUoPn`?ub6jalZjWMnMQ!A&d&)m~c=2ee>G*H!n6*t~zU^G-IH zNq9}ne!E_yz2eV%wY~x8V$kv>$m;X|To-2bI@=WK}S|{ zO5E(&N7$XBafBTJwuJz)CWVEIi8g9EyK9C;U+LJO&VD z9!Y!eFZ6k+tf@l%Hgob&x3GJvKC^btw<7vJ`O0+&72HDi*i*EQ>(6hgf$*$);bh)l zcPa@o2^_PX&Gw5iMVr|^Z|PfXKjVKG_6|pyqu{@swhDu=8L)Qvv=g9Xh_-q&NLuSe z{rqB-%NG_-W6+&R$H0p<%G8SIf*K96%7CFgLz&&aGrL|7os9TJlnj;NLhYyW9p^dr_W18+vG4ql{&)s1GCP#`C zK{{Q^MOUtV_Eoumaft%WcP!NDSIOBITlTqXjO_Tw*zlm^>Gf)x)_P1%2g7AKPk~}C-DF&MCRoH^GJYDq2P4`t1RLdKY4li1b!i!qtxELTMwKa zw!0oid&?}$WFiYz9UC}j{0q?7U{V)HN)a_NZf%9qsRSSQ2_W+p&$>A?gHM%;&A|7k zSWBLTm>(x`uSS>ao!)V9AH`4}V;Q{%ileN{*Trv1EV@$;V=`pGrCI8ucF%a-Eid@y z2}3an$RkuNU%#493#(f}qSL-WKbp&~AAkdN6*7S{)|O1cx}Y>lWygy8u#io7%3Ln8 zLRJQ}8DrH3lY-Xv^()?!jk)wWe1hH&Bt+bu+SC_enC!HBG$|^D21UfYD@<=t*)dr{ zR(La!K)oBJTlZQ?Kt9lsciyu*l0)}$N&124@k_Ujc}AYD@~PjpJKI?Q%40%P7~ zx+V4qy0gb$?SF<8@=`M0xxKbt5w{sS*D<|Pd%4urNc*f<5sXoLX{scZBX8$o`%B6-f1^y2V-Xs(z|1ie zpQZAhlihHkrssT#c_@zfiGNbq{7&xAkHD%;t+af+eLDPf7jio%O98ikWPh8yCEb9` zcw=l-TujF=>P1eEC@+$8dhs?JV!|>XLv zjU!x)e~5z$pJKXFF{UP&Z>@?JdZY2^fS+Amd^Ui9O2MD&sFsw^v7QGDGcXMgWe1T* z-eSOY)UU0f_K%cBRK}t)ZA|N&Ysq!z#^#lmB5Gb6EF4))F!pird#ijw1WN8e?7ip* zIP#pWSFiI);r8uy9^PFqUn6YhYIR;kyg?tHakYIr;?v$es`+Rd3mb{w3}C@ah8#K) zNO-#qTx5g4KRzt5gnw-QAnZ+X8&`thTi?26{W&1G{=MFs77M6v{pIBRriNwk4Tdw9 zn)_~a$WcX=*1R-Q9(=vHR7IkOe?K#9ukhX1>!kiY!8IE>aCisW1k~?`{o7}82(Ns9 zO*8$>ouN9P%#%1SCa1_@pscPJIMylj;_YYRbW(Wji?a- z!;!fPG+HR0&??Hex_EsPFX)hi+B=486Ymzx4@i}W4c!V6cWLu3%ci#9hAqkb znF=W(s$dB1Y^Li&7=H|)?mF^o%_*=4a5e|oD>aP$dR$!NyXe#E zIG!Ub4+uq0ch0%ez&Alj+q%?bCwz2+!->;fs{LA*YfpiPe~3;qI1-Z_pOY3>TD67B`EZsanG4@p-6;prC$M-^t^DXYNIze@ny4J6 zfst}h1Y?-A9@izP2vcA_oWv5vX*o{4$1mw?7DP|T@CH0`-|Eo=yXQIe2$IDgomUp} zPTM-qcoE5)Q2e@B2}HOkb0{UkbBXsk9DP>B_F6`srZF@1t(1QhrtU{{R}t%x%(3pf zwm;#0<;=E*S|fg)Z(bdVoiVZC=XH6>J5`b+uJN_t_DZ1-%<@rJn`3_pA;+v7nEXAa z?Kf%sVRJwghSU6@ zp>X6=r}iMUJsT`fg%k-ED#s09nMA6Cc{jbgk~UG}F@;5qU-=K4`j`^dqDDNxhZAVL zv2~|{3-7A+)f%t$laQ-Fo?~&iz~Sq1p6+i~_UgY2>`|D~$Ur_-9#qbkAL7WMf+sS~ zVPMQ-3^xvAwm7e((p86&t`b_v4V4!@az0(?HsiURw(0Io(Kx@`3Pnc@& z`lOoV@I!}s+U+~G7jZcdQBJ7(u54MVtVYJ)eHmn>e>QfVZ}rk^-h4 zJ7Hh`sI$ifX41Zj-4^cg{vYZt%^FjeaenN*Y{Wv#rw)P0ESBvR2Er%fbg&=4^69Ep za$+qVGmCV7gfGcnucVk!?UWF>rCypgCGEHvB1qfW%8q^8+^o&WSznL(;uB$W70-QpjI z=4MFq6*|8@#%+J&oE&?TrtV8E!L-*~nRal+?UDQz<6cT3V)zF}m8>Iml_52s9C!kS zd?;r13?uy%D9~@YiDD_mw4FWZ2<@=po7mc_B5ZjpQLj1-y00{7s*t(?zhG9+QFxt(YGiMWpD|g628)&6PZqerJKE%rqG>PY?rzFbvfa3HZvAg zO(G4R@bqQo0J0!x)M%-I{e1(|H2z}sm?Zbgjw5#1dQi}mi9=(dmmO9Uzzjv9UX;(y zDJfB6_hI>J|I#AFw-2RUcsG|3Njf}JK9E9LILN#7IyB9TrVJ><2smrr6~6%Yjs9ll zr#il+*7T0qwPeRgE=b9%eLFay-@(YDg3SiQW%CDwp&cDhq}Oq6QFOS{j+1rjTujny zhvgm{JXSDf@d#h33e^xSlS!Ozxv9HGj&t@JiF12DJwbXWwKXTG@}hu)9muJnm+X)CO!unx)(?WGY5|cu`32?FQ1Yq`9W3RXaNrnH(jOZzVY= z*kC8ZW2tDO-u-Q1QnXcbs+;F3>Ad4SZr@8Kh;aeos=*=ULoLvTI6<}!hjBF5#oODm z*6Brcrt{GSWtjdOaotM9+NC-xAH}^opc4@bQxv?^ zP?D_bJD~pJ-o9k6!;5S~=rznUeZpu8 zMPYF6GlL)h`c#;c;Ys?tBa>CX*C5O~k@=&NoX?d*JsF27O;_Az#>fc!V3Os}k=DQ# zrlED|#0)V>X6C%CR6sxre>I2k2{?zOF4UOV8kLAN+Z+=o)AvmL>$oHz`jetlkg3M( z>ge*BXOA(NkCg%OPK*h>$NDj^{m4&36|m>_wnS@l=_)tu2GN4nc4iubb@VhF~mxPtb&u<=fFs?7_4xt5|{a zks3Ugtd{7cA&X_4)MaI6pP50-$>!bnB^J@UAWM+z88D{MCJs6G=j(Vzb{%sMrH7LW zR7KW2HAdp|R~x-P zVVt#U-bg<-|3$WFq(cV;cVg9Kgq9V*?AGSsucq2o<>C+e+tu(CM!nrWIG@viP<4Ll zXN1`26;V53c)ny%T`Am^Xip(-QbCx}*z_bhTL-E=9K)scBs+d$4k{a{;CH(8lkfU2 z&&%uVQR-HbvJlj)b&)t7m+!-W1{B{_ER6aC`>1oNTbMldU-pBt+$Tj%Jsus{D3m_j zenj-ayAJ@NxC2whdkVjW9W;M4pw#=H395VHP<}-2;C)OKiSjSmAU?ue;7>Qg`3xXn ziVld*2P;(973L)9s@%#57OmE^AaU~1NoYpx(#HRDYufTjKAi+z4^{8K$$krTe)H!| zRhbdSGN8rkuc@oDKBt|nI*)0->SOpN?i^85*f1x_fBWxpkii*kpQAl~5NtOBm55b!)oJ+~BH!9{PKq#E~z!)Zjo=+0qH( z$=mc#{Fzk*U!U$cc$J)pGs2%Fw>-h7pc-2B`lk0#BS=J0iF$S4$T$E}(sAiDUbZ

E<}*{Z8@Nk?`}NtLW+#=}o8G>@q33=J zv_SEIuMg^tB zSb`UO^u45(4&5;X`Y11*gdh38p$#5iZ=`G60UZjxHofZ`rq!U4 z1qa4WxH7e1DeXb?iI%Ne3G%aMfOf$evu%v;WGPOqsd)wOByJ45SdC5$5T85)ME7bR zloFAtCP-U)pFB-(eT8obXG9WxGnk{Q)_gi;wk2^~rj+w1Za-2{-rjJZG6|97%E4Dv z7u4;SCFS;U;rx~dINARiAO?Jd#f6u6%7gtynVpj>3TfSgBpQ&8i?iyNBpgNF0V?qk z-u4^wA~`gDop3dWA|X(}NFbURpwW7=ny3o@s{bj8V4@}H92SI3L`LQ?+J(Y94%lnf zt|{G#_f`shJn5y-nu1u{E)@Z!#wL2fl3%NCPkIf|9sjre^=^wf&=dP_>IMnI*{Pi3 ze)|IaP9SOS=EFBQJN`4Qamdsa}18}f7Tb^)SF}Rpt!#jkFfE1@fQMBa9rl~qmyGsZGGDLF;J@V1e^DeCMOV;I2~6kHog`z3`c?{O3rtM>w*@Dm)tH0= zSp7~ys7p_-?0>G1QTiGMPl9IJ!Y@DJnc(1VmxjtN7t@;OdM_m8ZpEzAbWiepfxdpx zH@ErjY6nE^@gWTO+kr*DWmg69ANPRD2k2&qX1a-a8?)YRUr*5G@uS93Kj3(PS#$jv z0l7`RiIE{8piYX9!KIQg=4!j~T2FXA`<7c=F5Xt*e1GwyOgQpZpG*M9JjMS2*PQ>|K^+;J~6DGLQ#~`|cXs z3TZjVcRzDiirLq8Ut&*ZYg=`xVWk}(xa_2dG4x$j+#@^kIz??%@SC4es_k|>fBdhx z=z>?<$*^ld2B$7$sk#DSC!l zmhtrABaZ;M{a|aVzk$9a!P!0EgC*Cq@8*h|MF8YczPmZmuI2`!*4Fu84j4~CTsBmMip5`wUcql|?mORoYu<6?K}3d0zCz9x@hnf5 z>!#M{fOZ+_p8#wB^-ukBsCw1ipb)N_$djmlf}3JJ(6|IzWXuDtY32$I)J|LG z($v^BlmQ)e0Skb3p1$*j1&>OC1?brOaP<058UeKFbj6VT`Y^-`{Ep&Tw~JpV_o!`F zRPNc}S3&eI2+*U4Wzz)fd-)>g+Oz%dQ3iJ3AoO%u%vW4qe0wSDH*^^;aq-^vch3;q z%=QE@TtRYBGXQnv&bPgWGObba6MI_Q3K+;U=Yyf*UT{)BcJM!u6<9jcm*uW=@RO|?XtE0byvnuOAh&VdiPQK@<7 zS?MzNVCp*}!u<4|>mskhO4o(&MuzT-oMwIp>R^f5OjBw)IG;TAx4c&msdeks?=RqZ zE?Z0#d!PqUv$?_u%nBZRiVtLX59oOjNLW>Vmc7^n#HMLLUr<+e2++D?OCOrCdhAY) zWR04g8crPm^f(OL`u{Hdlvb=T#By@{y5Q7);iPAglUn|cnUD7SIw`Gup>Hnd*WU6B zC&_J<0|>7>lD zpkYI}CjCE#)dsYSMp8J1!8e7qes}$k>mj68|K0b}%(O%FH_+>t>GZQ~z)Nj6;H3q` z4nb+w7TVHuE&vQcER~%v9<4u2g}X=S=>MT#l^G&7v+;E7M$B@sJ{LGEO8*>B+^0P4 z8B6p^)9kPdFCF9d(Kq~miTILfFuyo(AD;5=kg9-QRKKCV$^gYX5sSRAz0nA--;-_Q zKEK5R8myb++@9(F&1lpG-%A7>XS?`3=r^H2OPn}SVbIyELo}y=?%zg`L^^U-SjZ4b zCL~|tsIg^<)X}%H$ulQHS-U@fxqG0HNRIvg?OHbpAIB8NSY*<%;%;qbv~y^BlFaWC z689K=ejG_eiUs~X^);7)%J?Bo4S;XPYZ}Y;#F2ain1QXzs1)i}hSh{k&DaFuMXCFs z7i-Qf2X2?`sb#C0`${%cu6#;MdpwJakNq*wHN zwUfhF{)U-kQEhqAFMmh@SA4drIe5nLq3>&QI56Vhj6*!$?K2H5@|kA7n4qjjc<{(Y z#3O$5-+}4tvwv%u)zG0V5v$f;HeB%RHp{~ORt)mol+J2(1aWSMAXfbF==f(OQsA0ZvoLqF)95TUvy$0gbb)$=xe2ShA^Ru>pHr&zqX~XidvBlz8gXy!!6cRKW1IHN zuEZB8f2-VAZ4Y(Fk)bZSwXT^HnUW(;tMuv97?~8asF7^cYH8I0B0hdBpj1>oG9rQ4 zB0ER;6Nj7W*y6FLT_KSeSisZLIG6H6XQBS){N2e5u)GYADT>FqiM(w7K4g-e*>X9H z#pyEht*B`;#bjj2$RaS+<^Gs1buJ5av7kR73Wr*1AtY*7eK;p;XR_3`n>ZTdVMMpg z6DF-Z#C|L#IVa3Bc`L2Jsu|(d2tI1LyRqDV+vWZtb13`cyxs9&KB#W!nY+`!P}Kpx zSnpn_8d?qDBZj5kI~{ABZ5G0Hop(46TI}VKRMuV~$9T`Ul;XSF>s;H1)QKLXn05NJ zJLA1CFfOANpZ=}KyDhcY1ky#zk0{; z`O;yntOlL02WI4&)|14Mr)Fzc-T`ijdkBDSFDXCbo1; zs-78{1vvGxYCf?34mu<@D7u|TKPGXK(8OG2SA*~ks{RHJ6-{p1^zyF<7MjDuUp}It zC#~rWK;{Ae1wgm?I!&&|4MO^qa`iM2&?R5gdl{R1=JA4G{~z4Yk&}&u4Q&tpZqu_- zH}94XwBbI}1~q41ZuccqA4t(ZjK%f&s5Bu^W+lHva))k8S%P)U6=e@D&1HVpCpdUb#iDR~w-?TmX|r_S9XFz`%`Vc->f@k*Zv z;uEH2QsY$C)MwwjOy^Sx_TsxH5Du%xq7FQ($70gln6VEZ+5HdXv*M{lRmpG|e`}xo z`A?xwBfbKS8EtSsHL(n6;5}XnI#31LG}8@kV+8=q!01g(*Bv9vMA~cfId7m`SiK&% zVyp5?Z(0|K;Yu?2n3K$(d~n6&afI>j31@m})%F5XPbTq+Cq{CA%Q$8+ZXm`Fieyap zI+`$pGrN+i$rd>~Q*pm_0ZdL>d3iDyUXmCCu)fk?4rG7ot63t^i;g#Fnnfs6V1jM{ z01W*!vf^BX`S5$z3OwF%_{$Oj!f>yTYX*u;*~2m-3VuTk1bp8%LcESM^mq8fsVj&a zjm8&6Mq26n?7Q~mdx`HSxz{(6ay<7_yZ<#UVR)@7_>E(6!3fLWuuZmD>pY!ZXBZXPrz_(>CEzrMpQTS+uOb7cKcOsc<&AJl`oY+KW&6d>)m=Li*n{z`fC+R1au+smxwuStvfatNWfwfy?&=u}0>-MSjoh z5cT7!!_kKUOk=TTfLkGYR=KxGCFqn91lj_eSbf0B)>eedNNEvJ$=qrd_1mGQ2-2rxB6M#4YFBYJGcn1T_XD8=&QnUD_;2X0qhMeiuryB#^TT32s`!m97 zs$G8j-t{-HOjL?*z4w)l7pxo_f@gy&0?Pll+kQY76rXKxf?Y1BMj;${1q1M22#?5H zwp*ZQz-zy8d+`45|LN;1qpI4%c8>`PN($1_ASKBe2#DabIm#5=Y0YptkLN1(!z*)u^ax?CSQ?Ph&iIRf2Gx4xFBihe_?k zszuAP`XEb~1$wls)*q(u$c?XHBpx;aSsOVhTkCT`O-`fq1$rs_k)kf#vH1Z$RsMES z1`#o6>|l)*aD3cSITCN$Z$ZP6eZ3-qlmaKyg#_%A_a9j+=iFDvc=D6Tx}F?-%UtH@ z1A>SYR?$0YVL+PB7XA&$lW25Oe0{wh-h)m->ZJ1ZFLH!Ghf$A@Ic(FglXcSf(Q?Vy zS#&G~+U1RR*yTB8Q+3qu{*GH-sheY8iA-P0c0?Q;>L;IlS{MJGy~+o1NalAc{%ei$ zL(Ir$!_HoAV-U#3DU7;fq^c*OdeUJDGd3(5u(sL9=qE0Ll^{AkC9x6f?8- zfzFFjw$M9v{)UX;p9Tc{NEzN<>YF)c{A@h=w(8fh6vGlqXF`}rB{?3i7wA)gC58vq zuPXhpSBq-qhbgsje+oU2hF7`BDn4k5V+_~<)(?wTygnoK?j~c58V3`G!}(XkZ&B6F+jUS)z{-w`+?rD-Sv25NGyZj zpe+Db=2B4&*I)YLO&au8DT7u%<<+Xh^|nXY)v=4tZEX-XBXsd1VY^yhWTz|q|X-EzWT?44v-Phj;M2G}AF=R-etCd=d+ zbiaPbdKUVLj8?cCD4&xoWq8C9ZSYyx0AjI!0P$&*^eD7$Jp5WqoKTx(nEFJ>eabdt z=2|YLaQ>rYSQ-y~FxReqxES)}j=Tx+_+ihz#>-+CK+j32VX50;JL2KoXFn*B1V$Jy z=#{bp>OK;tuqKj}-4{O=g_^zBb=3#sckb`$>lGZ_elVh2vR|vMw!&7kn>_u+>fe!1 z%{92K0?DhL@M>q&syIHTTF`Qtl|u)KQv%sDVqQD3AUvj*tKG5D9q#P?U}h&?>$cF> z#yzu+i|?*@-1%);N;D1~6<*>p6S{J4`qRN>Z5``yg=HDC8RwytWxKMx05&Y3w2s7` z1-no;FhwvWy$og+5wS3N6@mYgPfJtR{XpD4niQ3jC4H?~1f#o#JB0y%wU%z9{ak>B z{oC5A!xsq-@xlXH8rd$ijvRpd00$LE1QiW-M%^@m)Vupxyl2m4joVeiq+-^bDm|Xa zKVnsfdWPA6@2LcO3G_2`hdI#y9wNR!QwrO8k(+pYB3jufkTujA)A(sT;Y}t$!6bfM zQf@szC%6@ZUJuTG(MZ9b zl?x%m;Tk}DDE0>WxTuVtCmc0XEVUaH%kO+QJolSX?0^81u*6UaVofO|4ZBzz7wRZ(Do+_(2?R>G2tV^gYT6Y^$VuVd}{ve#)R73vPA%KeQ+e4Q9N zpIFbosNq|ATx%;d&?Ae~x=RKy7hawjp2MW&6Duif(LdZ>F`?0K&$t5*%W}T_@nKn4 z`T62rU_sw$fVh^?-eTLgr;D$_?s~a^DFujP36CB@LZ6X^&eYfwl^A-Vy?=`w32&$3 zZWA-^)RyaB!Rw0c9B!jJhDi|sMzX0G_#D>y@r(ziKq0+Pg%-MM1#7MNn zNo?!>Y0ELb4spJk1^(8_c)OOktV zy0|ce!)8+)En)}15VUg(<=JEvbf@78{`>NVGh_;xzL5g@L#0Mf!%CD)?-aA*;}t0K z@C)1p9C0&c>h!+r|zU_bs2gQl~G6Kbw@*U;f+gUYbMObc*%M3ei+$7p92fN zsm7qo4tcf^t|>=1u0%quS*}A$`CHdM)bM`_Hu!+zk%GQ9E0%hIuO%1(dr$&Bq*+#Z z20$T&%#37DHs6h?G*uwkHHv---QOkATlrpWI9~%Dm zCB|paboD_s#jFJbOlPnfL6vB`T?^S3q4RjTJ{g`9@3o zmY(E5{E4S*^l`%3nA4TN|~qIB~4tRWcM>J`uX>$ zc8<90@+pS%f7hVX4q@u3>n>qwTy07ymAffwlsSdgasSiW^vv`!ud{RN4dR>kwqymQ zN2p87+6UJ%MOGw?FTB zy!a8fG{e6iX^<6A(f@gQj{s8qAcML#`36y8aa{4cq=AOzqnGCQ>vNbJ6#-XYr1D8g z?5tPg5*#&*z}&Ixh>1qBhD&+HJB*#`{jPqFkhLbLPJtF8Zv!d>3N>1-AoxmGbRR~k z=k*25cKIzR1p?zXNYnV=hZWDiFrW97Xn+5b!FOto<*15QE@08f%YS?oa=k5fYGs4S z>OZu+%>HH_Cll#Y;yMsLZH6BAEBPL-4l4HFNuh={3(Q%7=4D!PX4#jOPDp5u_(A91Tczb?KO+Q(X zwZoI*u9jLvnbgy$rVs?W4}q(4_nf@A+u9E0)f+_{cZKzCY|Ats{JUTj0Jft#q$uOv zT}v?U;8?8JwP0=U<2l*L3{O(}1U#1he9S<_fE)Z)1qFk22actD8HI`LD0D#LOoy`q zC;Q_EXsHxmOl6=Yn|rXx9N|Y)Dr&fNkV3~hD6K2x1k2;j8Bu1Vl?ds;Ty`p^iEnV67d2{eIZpXQUpIBblFR_Tl&rz zQBcyz_6vuH%W{eXYlP$H8xWTGQK_ zec?L@d!YfeW7i=w_u`fPu!s83W;H9!645;O7v4C9C$R8b*6X=LTW&5X#DPWG2`KrK z4y`*c$cVbjgy(3P*2^8j(`XdQ9E|SY?5>5)V8Meb+1E3l!FIbL}UWpxnO4)xQ7w(o}$Da9h0KWN%)tU*Jd< zp&Z5t?&EDJVIsy|+J`bo$in7wvDJT8kfPP^3Hy2|X{zl(R&x}A7~(8P&dvnb6O@{R z{`L-QV&b~4OXYmou#xSO4|(8)%}zvTP$Y$+7q02er_N6=$$8c9NOAZv%@_!zXslj(z-78>TrS0piuK?1CIJ5V z9*XSwfLf6z!2uv*E9%7)m?yFs%@}9Ng1#P(Sh;epyb`D)kDli_ugxm}E+KIF2>YQ+ zy|pn1jQ!SHe`a|n`gQ2kIer|ld3{{{4?QnCRR;hP`&>h-O1TV?CTRluKkq^P1l`YU zoPb0sc_Zym$NLZWo|Kuf$Nw~@rD!ky^g4xq5BGAABUdsrSsd?r8IBrSeV!cZ;!da3d3VyY6|- zejF#*CVa60YJOb1pH-$K?YMI44Scr6cHkZs>^olqlGa^s4q?I|Im&ux2Diz2molv7 zrC-5yZX(O0`O58gdbk=bvf_M@P34N`BL8fl2y@+vj05FK{7N^IKXibrg}-=5St-L_ zpP<_AL$}AoD^bWSu7rDCJq6pcHR81mW`{Yu>aMWeOxw{~KEJkRh$eYSRVu}DoF#Y1 zzsGpY*&6%x?f@*x5Mfk(VS!UFMzdb$Xzx35^SnJeAy0bZnC!!Okk*jB8D78%RHI2K zpSuYZ`k>yI7%dq_cHT2pHK8>(FN))J920BG9&A~I&wNzC4`$ROoi5}t!-SN7-KGi9 z^WT_Ql2XfUsbEmyulkvL!?7mRYzswfQ2a9_8blCr54v^FW-21?9AZ}dOt#20l=oj$!O0J_; z0vLh%@wQKz;+HIsde%P%C7UsP z?ys;^_D4W&SstvKEY}%r3im;bn#k^^#VZkUIoh=>OAtPdz5CI>b41Rs2ZhOB`WK!k zb}G?kdoK-Px^LBAilU90G+MpTBgQ{E@vH0QudCI~7YqSb+0wBq!!j(K zyI;_lUwtvCDggu{MBzTNzQ?MO?lrJQ?_> z0$&q1Cfwhhkzu?D-MO)9H)T{qp0i#`{>q`sV(JI;0&jCUtf*b4sVg}9B)}!=sE)@a zZw@mNW)sV(Q6cY1$iu&2-!l0ew3juUlTVvXn@_)i_T46)PKzrXz=-#4uX_-6AHJ@) z6MQor%J7{`AA&p>TJ?hfOgM)%$F6U;(#x+1*^npJZ;}Ses94}{IN<)GGSaT^%ZquN z)+fN2BfQ#exc%zN=k!Gks@=+;BHfJ2`OB_Genq?CzLQ9OKR3N%tL z?X8d!YL;UcHJY)rpMnez@G!7VkM$yjLF4@q^ipgJjg(`9UoOpzt%YhVZ`3Fb*MetQ z5Sw?jd^=*cx2NrTIom?t0}-tt#2OAvtymsB#(fpeo>U0c4a5738nuzoZ<3t9IgSL?FBY>Om>!M>^aTY|J@P3g@iRe4H7PCkn;aVUZ!ar3((iV;9Gs z`t>{hi`NY8Y`lZQWMQROfr1H2Q134D;Rp%ENip7J^66ciREfS=vP*fkx!(B`y${62 zjkm_%ET)~rgtl2F#&!i((g=k9!jLe5wh)3&y0!#;N^Df_wirTCApAjET3ll>1%*(d zDR{Cypkd*PzMlQ@pEnKx635 z?TldbB->gix%c64%f}efDknCRPB1GWq$AVq0~mK`VXpO^VOgrGN-b_AHsvozU+H>2 zua|_EuxpdFu~&tB9UJePE^B$P~HqmYZko?UC|{=(A85$;rONgHfG$8aeT9!Vd!}_&kXPfRkh(#yyT`l|7-D+RGHh zIsWjsuUls|%coqyC=o`SBN@b05@T~G5*Q(_#Rk4jm) znesPl-}Hhl(zcaPvbsdCq&E59F?wAjr5-L1QrjV_L}gih=?ff_3?2)k%Qt1Fi2tQ* zzT1la_WeUM!;msS-Pj|6iLGn%JWQC{wN!0e0eUaFLK|MGPr13S=9wJHDXddZ3-%A@6vxAl`e%JC$zMb*@ z{OFW#l%)LK%gS_S>g2H4me99Sk#7%RN1t$lXN|TrdF`&sTeb3#uTWwr^x^+`U4>O2 z9*RJ0bx^#9er3s8>PQ=ldl!Nuti;69Qv<3|b8EA<;?Ec53`nQK;M9ekj80=kKc4LGuwGez| zF`WEQyPv)f@kTx${vc+<#~-WnZ$wpmLe*Z(npZO#{Wq#6>9Mwu=zGk!SL#*`Cu+zl z4~zfCb_zIH&4QK)7~V~x`zi0_!MYx-aIh72lNoL#u{~zD(uS4KUsEhFcyK3QFyFD8 zW}T!*JPzH_j$64R?=+htIlA$^W^wCb(IwJkBGmBNiMw?Pf${ybg1MuWKH*&H@XhUB z7ys~7*@P##K1jYWj!p|2s5TjZC3Tq!;B<#Ds5!XoBFk(6BzOMrtMrWBXUvflT)jGb zcSU4%bMGO#%|MtyDMf98yLZdbYLA@O#eyg-$Ab49(e{Yf^Z19wM73DbfElxREj}yt zz41MnBt0%H35+v%9JBqf)TygP7NU+~J2zRBz?KBb28gqqWU*{7V$&U{alb2jrdWp6 zv(hS`pPQiam)amV2E2*;Ry5vzA`mXzh%!!{fdkx!o99db_84gz{6H6Wj$*fC7O|GxYCFMFfK3^Br10U=wDyj_E zXiIm}xm}YOvnO#tsekw*ijB1ZGdQDecRdGo9WD&Hv}Tgcrhqowzj3l#;rfkBCn)Q0fOqGUblb6wg^-lLki=OgocqzkbqFzT2 zlO9G~$z^YJX-}n0HZyxDEyv&W4IDCxrFI|RWK7rC8v`Rnx8CK~5qhaao-dybp$gv} z`@9FN)P1g3tPwzi@Crhd#TRWUn^QkcJW$vcvyle)rKaaou`QJ`7{qv-Jq z3^83Hl@&(ahkMI^!FWkBt7!j&^-a_0F!!qqXakbkeq+zoJ#gC_!NPwIT>=K^;{cJY zgLGlztiC370tyP0Pbs};Ayg1ECsR2(l_2JJGN-}y8eY2Mwy zEOUq-5nW7MW7IdtsYG7pm5i0SR^ok0+-~EWhgj2--@F<7Bb6O1 z&5bJ&;lKv^*N?pkA%Z&3LU)cO$};$6^FBr|?+w*fYq2VY@Er4Sj!)+v2Hz(2J!b*q z`wW#N5grmL$)DS=b7d0em$Ki?7`W`Lv)Y-5HXm96376H?OZqjO1@Yw6=6D+_B~_&Q z34or=y(zrPjaP>G9;N)QLQPTg@Li@8=*Yobj-b(Or%C3; ztS1djK0L_UrrkX0qrPS75Z&+Z8AQ|uIu6$wQ7lb`o<$*r^tXP6B`O-H_dAzFN!PVW z?4&mDWK&zIrVtCdsK{S$_o7D~H$P^V;_|GX3p^p2{mCu9v8vpVB~MS#oFd+1v@<%n z#+sq|<4rw!TGeD$TGrW($2Bt4kr zG?$aY1p$Xi1U)Mu>Pvi=9`7$n@rD?jEE17BG_?hKE~@f7)3kN&W!}^^dx%HY)V!-i z6HhO7o!)oI!!>~8LjE$F=`vORtE?}$8``fulTM?Fg#ww!RJTQKS_ zkNSyw4=ev|)bzOBc@9%x&&!a7`6pn00D^LFRARFdA*Ma6c{Shyv37JW_i9=Jz?9nS<|-%P zOB2x70e?T*+AGQ7;fyyxrZOxEC*o6iI*@L?>X-Di3h!mu83dg1dG)CW9CI3>`w8j(-@tePC6Q3Dlh~vQv|KE{q9xo;dej# z;U=~MD80~74E-l7v*>?B1%b>J<_AyFD#X2a7;AM;pO8Rd>R2N^^Pu2fC&l8egodUF zEbzq3YH`T9Dr_+UlqSYNFo+<0oxH&R`G`F%0%Eu>ojwS+txwz5hC}KI6=fhI?DH}F zCyXk(Y$6tFdpss%uHU-$vk$|q)j|euN)X!g!zMBH8#oJY9tN_7`$W4r(hYBdR}OLv zL(9g}L_4y!6KW2X=cU%pJLFT?@|h{+9I9R?2^D*u$kjj1LofU^(F|n<90gaLE)@4{BCCnEzj3EJ-?c9UYZ^%zH4HWFo68+%Trm7C$lzQv4w8VUo>B%?eYOcT}b`7ZAhU@mD`1&V>j zHYHM00xB8s#FbHw4sUFiA@c!PuxfQ%jTqXJ9g|7vr+sObk2itKRAU9qaZfYxhwT@4 zwnH3cUCcZ1>K~cztZQhLgYJcOsOnIP;9;lDob*y-dQ)%zi)`LyZuA;dV=4 zV&G9VT14ByO&!`XYwPSU1!MJWb&&THh3fLtb*>7Lu z?liuS{KL7T2jveMJ~Ky+`n>0*Wxjf8M!wA*NEs4+>1zb&`P9`3IM{L2o7YOXUu&r4 zMHJrdQ&Rr)A>Q4Z5x+U{_!eHI$I`+5u>YUm299Wn<)%>_iF0C)9hpp|pNu7HOHB2f zn{VCZw72KqgUo&<6JI;se`NJwoajP+-d~C*%9^Xb&1t0LVuN+Q4?k^0|AkVH&i!x# zm{EWNM^Us|uAXwEFz&!2v;5eB_De`-`o!MI*1aX(mV{ZRDS--hNg>J_!$= zQnxA#%Tb=i16al{$nRj5F4z}ei0UT3(MZ*N(#c}X^K!EYbUneDgj7A^{R~#oCjK2A zQ}&WjfywnQpW|qha57oCwg}06Of`Wtr;=xhBfkS5;M}VRH<|hkwfGrjs!)LXSJpmx~)x&QL7GFoWfgoih*N~w|xD3bpBz)Q3pruJ;Kp9 zkIo1?DvKfv{V_UAXNguJ168Y|dIO#Lj^RB`Cna%SUe#*yrzI4$IoH&*bxrggrIxg@UyFNgfn)KRqSx;Q20#q)(1^HVb zSc^lh)8w{GD;qy2IHU5M$9ufF+fSzKdUQ-^hilPi^IjNd0@DwlkW&42RgEU~c9qQZ z49$|vrBEtVX6+hhO^?}75lP#+is9Ivm*u-2k@|jIh`rBQZ)veN1}pxB2HW3LFj}@A z=xnNBiN*O?5;rS%*mLui*P}zar!B3iYeHxjVum2Q*XUQZiTDFo$|kab=%=LD4#DR% zYAa7sE9L5!q|bAn@o}Z~JafFZU7*WVJCU_yu6%Cv+53E@U%fsJ zc`m#h!^0@D161QYo0lOWC=T#Txg@EU2viBSr|R?xUp`#KZMKsKWBcf#Uy7vdo!6@n zt0|K19DbFO3G`OmGiu15`DzC<7)-XnrVJd(eZ|*{0FIUVZTMt@4*29mAwb_9Ajf1JXbYPxDSKCC7If`9e9hjPngM zxLdd8&3bFRHal4gV)sh*b2{nLc^EgAsW$if0Ph)9y+32DkaQrC{JUe-4HoEgWRb@Dr#^IdSz~f zSSPP^!|01F4?T*y9-74f_|H(r+S$LNIFugjkUsYN!}V{>$K zfavxcR=&_a0jX}eWEi6^qnj%tE*6;i2C*{s#p~f+PEpMFI?FU%z%u2CC_5$Mm^q<4 z-etx;^xX1plc&F6cDc$AOfS|pm8@)dH|y$4IMT)@oTZ`}2+3(6nb0)2?l4hK7%K zzD+NYMSDfTR+1~Z7et|j$HTr#!XWmPXDsOhGPyxOi-^+<(tc+uj6@zChHN0(A#Jm; zy@16D^1hz~)*na4EJ~ZR3jz-NYNa!|Nkqr(c^)e@*c_5TJao0NpFjgKMWW8XWmtMn z@t7bI)0K@H4syIT15!7Gwh$cN9UOj<$d{poR28(pT zYwMeWLN|nzSc+%bk=##=fKYS#O%m@%nPNM*o4%Yzg&)V=jv-$ zj`+;0U&Oj3XWv=hFW3LFARza2+n9#7%MN3V#p&Iw8xhpH=2`IlkXjmV?^%(P|K5&N z$4BX;efHXWc(*gPgU6AHga+mq)mfb`RvU*!Fd_;0p>Q8_MV=+7oPH7Y7-RhWREIft zONWwnTaRV9HlZ_JA%J6F5RQm6d}ysPh=nk<6!&xAEmX$I?RAqqoMnvYNJ``^JHjb` zU;?^wly0dVS&IUmwQvhN2=x$k+EJXA73()|s{T7xOS&$r&v4Fvvc)!SoNkXRyj1;8 zg(Mbpp1m*$cD}s1+*^$TJILMjbTS9Gh5DnFowEMNXRhTSEuu%GaT!QI--~lov1M*N zyyttaurp)&P=mNeoqg_5^xkrt`0m29aA9&SFEffdKF63;FWRDv`vJ5f=z`GWh;~XM zqDXjCMicI)T3bSHSK%*4g7xC42I=l=l0EM2u7Ojxu>jVbo!aQ_Bn?j1&W5Kx+hiqY%bxltq#M5S{)*(HU6=4;NXETk*jDi3Ji zpwhsFgM!enWFhZ3JIb%?6nyy4pyrSrk?KVKTjykDmAl5JuJ3ts_WihdJ3JdZTxL!c zB?h;(*Kls)-MR0ToQJJqo?n{Ihp&_H4rk=;SY8)S*Q^9qZbZEuX4D`SvJi{Ac@26G zS(=>loQGJF4x2}yBV{U$(x1pKp@pj%4dfV&GAyxSw+*}-V@rNSs~PqKPry^$9LX)% zc#HOIcChn%MlyEY<#DWzPF?ruyjQ&HK+^}Y=ZbHA#_M(GR$@;(nlK7)H9M(8CkgXd z1l;>Ljy1+4x-poop{-uM*P@3X`)SidZ)Am@-+ab0BxHv|2T?g&uAwu@H19c| zehAIsWGmZojov2sRzjku_5DdPH03AWt~x|^p~Lp^U$`{I(Ei!1yOxryv#NAfvO{gc^yRODE?w#|IV{L`z^z9@y z^_CZe=F^acU#UwU)9FLUdV6hYF70p9Y1$L_Uv|F=(-3}J%eX~xk~_+6{u|ASF{Y%$ z%hHN?Y*)*;zRB1!m3kV(GG2)NqEl~K-rs;4uwW5wfjBe_aoK5oGvUwES`^n)(Qk~pWS4%L<6AD8ot1oDF8BS^f;;~H#5 zA@;jd@Uj*(4_L1*o7UR_n?otXIMPaJf=h_u_+FV8w@5!B8~ca07y_q)nxd78&m~>+ zXg6FLJ-vjqusvE2>-!D8U)cb^FbZosV>Xzu#LA3v`JJCl{FYXC?;x+=2OOMh?Sb{) z%N#Kd@px1}K=E%nY{OAVm}ES-*dFKS=Fz>I7!PubYC*aG9Bg-yslX^CuEK;I&4WF& zsPQu)xo!$&uI-@atrYc9yqQcP37Ky7yRfsq6tuWtKWX9Z zsK2NS=kYzEWlEv)&Uwlfu1qntX7muGOOAJs6?H-Aj+$>b$vVIxeF!uOkP9?G)ljU- z19u)yay#TPG93TVKzr*Bh!d7Di^v5-nQJmgg~HcF!RQt5vt?zg9iCN8 z<$}%?1!&z<`pCpF2T^dj|84@%XCjoyFjW}7qB1-ZIIAnYz=s_-`-hapNgXZ{5vvKQ zN{^|!j~C*XT<%j&{{EJ!!9BOYj~Gk=FgQwSCoN)!m2@J2kKII_)v>S_(ZHO-=dOO* zk*;gF=CXsv4GyN&fDy|I5@u|M0SWWDft!_*c1eys62%;KgvuFzwf zNkcC?XorMH^7JVo&c}7No50O}W2A6`4lTOs>#_Idd<@b82klWVx zy7}4HYye0725ug+UnqhTO_?*|?A}O)kyChDyFjQ3^X;W|;{4ao#9CGIPy13Ch=fX@ zVI)a{(}|o;+cb4`h=__kl^9OzdQ<2W_$ns@D!b-l8LF^sp>qK@-|?k;E~I1Sngexa zYkP`G1y|0-iUOm;&oYrC8e7bdw<6*kw(J3UTcEYZt6vAwMIWsz+OhG176qtCO@JB; z@>?i@iri{WpLB0)PUq=B;&QU3!mFA>spqhYH=>loDxR9Uzf-ly#gk; zi__T5Muc9Cg~mG&MW)IYWM0ysR$RUGc%+|qomd%X*6Mwc=5Ij;qyO>W1`R)Ll`iH>~MIv{!NCEv z37pdYSknBT@e~AD0r!?{g>)e{^D)K|U^QWKTB6%9H9DGoHJ`w0DEq~Cce?7YA`_hN z{>PmFG6A3{MWcSl@4lFIoVJ9ypW8Ww{|AuKX?q|A2gmyK704Ljgl~8Mnq|pdE|eFJ z`rm!KyEj`D7!P)Wfgk~7A4O4!2X(IXCC^e)$iy*5h6`OtO2^P46*-OwJ`U4i zu^@aFDmmvQP;YZ4bUn|F=z$6DPf&aV=}H5r zkET%cGBHr4kmY#u=^_va$A^*d*EZF5+Ap;|I*JdH2BLeg<({`$v?6m?0R(agM%M)% zXQ~O;OoN+~{RoIS_5GVzjEqCJFMIw3`1U`PK?Yxfr5mp10GMFqmeXorbmmSBd=NjO za`SO{usGh6XyDVJbV`z~>>6Q-`<+ms%T{GL1d5Gh6rmaHWe9F=HQoFz*bcl91b*>$T^2!704< zNrvTsK9_hMgv|{4&L4nb!XH`dmoD(~oDhETpzu$u$U@@XwGNw0@Q(}XAXAkJBpyRO z9xK$_d`f&$qVVuLQc8r!sJ=idHG?v?-SXQX zqu`pSxSf@Q{s0o=+wpkLYT%JD7NaILXt-crp0mVp!56hFVj62wi?5c@{ z;MZ`rOz8PX;0dwjI&mN8XQ(1zFtyI1h<9xP3B+B}cyz}&pxToGqSD!-{Ue|+r6&A| z`mq}5{`$V6DCfws&Sfq>#25f^#^VAx7`H%XhI8@y%8=^fhxNQr4TyH^BP3~Ao<|XR z)AAsr-B=OK`};%L7~HcC$|7U{Wx=ZydQs~;bJU z@Zy!ng@e64h#gk~DD>xJTXe2CKqKkt{Z8TAv|%vU+H0m!WtFOJ-^U48J6quUf%owE zb7JTN+E|;@J0n|QV6?!6?fsX0kE^o-sSBHv-|y?R2+-Jf{mDGl0n0oFoImfC$@cg? zA!->zfgXno-&9NK2mHi3{0E0dR&v-Mso?`u6_9W&RmK(M&th9*VfglRuN6G%*0Fyd zln7wnVF$v~y>19phm5|4mv1ateAxgooj)xu(R8aXKhekDf9R9@VX6I*-{-j|j74k# zA?xI5;N);)fFr+HRVKOM@v4k{IhDuOpzY$*U?YgiUSqx1w{%(f`B~}$Q-1M%Okr4% z)yp4orYMAY?@Q85BPk+WCe781Qlpz$v=?~%I^D)GH@(MG0s{F{JoZ}_gaE~w3i{h7 zX%Y)_dp5|0NYNY+)izs0x#toF{UsA87eQGQDISbRFjDMA)?%bvtduQHfx;Z8gR~9G z!dT?6Wsp__;O{fc=fwG)ys|ljvMFW81CHT-7%9Fs6c_ug7;>`Y;g)J7KDiR%r0s^) zfPYP+s>Abv1gj$va6l6fxsmC38}}w%TV_$lP_W$R@XNT^&7wx`u>-o11pZNMzn$suet=eN4d4cawvr8^ zG_owhy)z&mkzYU1nnNG9{#+t!#9>Blrqh5yGTKA#?h|?}`r^x?;%G$fEF=#_xO6vR zAVZ2fsvpto)H+OD{tUqe2Dh@+Pjf6~vMv^URX!v0EUT~)pOZ6fPQHPU6)N~_&t*)7 zUdlL7h3p?3;dBYGD28-uQcQx^-i=(Qz^wEEyDF}@VhrlHi~>6F3oH?^%z2-O0$uG{PRTk7e3C6ZSkKDjww{6ve3 zAQKa8z|57t?YspSjG6@3ziS|l$OvX;V~yvCI`;ysmBUeG1?qr3Bt&hTMIs{W%w$a` z55xzmxYX?6Kykkxr879TQYz;bbc)Jqq0F>can@aKdvuE&E2>@2Bcn&Nl3;s@a5 zb)SlJht~`hwO|{YO|b`B z*o0mMd_DKlNl;VfN}+5$u;=eMOQ+jNL%M<7j%dy3DB}JCQU+{HhFT=hEww3O*j|Nn zPP1JH!EUcQ=(XV(s)_X{YUeU@E5K%~8MWZOEIH>9n<)<#w^&4CL8{APEf5`Wlqm*- z*qfyKPRFq>dt{7#7;D+7$Zv}K@rV~ru%a9>dSNr#BVy zC!+-K;;TB`1uD|;#N|`y9TxLS`nTWcg0O+>l=SPu4TCTb(DBD1%$Y$Ub^b|Q2hyjE z#tJpy?AYEvBkz;x!{?itOmTJBT?&7>N=Z4;AAUL~P_}y|bzHPT5%c+jV8U{tlvW|Z zRNVmD_6V`{@9(jbIay>L8B0T8>&AJ>LLZco^=JN$jyV8fheep7XXTnY8qJ5T20MXw zqG$D4RJA+|wjp=7nm>q-*Y$7vR_KNq##O$HkM}6-Q|jw%Vl7y&t_p+gvWF7g9kWer zR=#tiQ_w9NNNAH!h@+00oz+kw&r3NWm7NhuYvXv5^z~0@Ztq+!0>um zp_$j2z0>*{oWC`0z>)~g>RgEy8^dmCd~0+sIVK~sLHvj_n% zeIsD_2gd@RFhD>5eotk;hN_BOJ41Ygj}(k1UC$Ae`6Axf z@ParM{{F=9wv3eC{{4&e3DB9``ZOfg?3 zo*7Px(uSGK7O<6y&m<_PG4)SJFcohn zHvuEQmsM;BfVc4=gg&8HK0&AG1rWOCXtX680?K?GIIc<}oj}*wBIvt?;?-wCs9ZOY zIn(};i5Xzg@q$!NG#;-Z<1nCw>7DrpNjy0OFn3o`)tuQWPC6PFE2{2{4*{!av0|p5 zR7A7qjUITCuaQr3vf4Hi_oWJ*RIAGt;F(1OWSn3%jo96e`ugPtGYq2d*JY1&`5Gu=dx(vTCep!9B)Qrh zi!_;%cE*62VbRLgN?rVOHP53{ir0I=v~FwPayyf!mg&W5BMV`E-zLr^5M(>0tOMbr2qf` diff --git a/examples/research_projects/information-gain-filtration/run_clm_igf.py b/examples/research_projects/information-gain-filtration/run_clm_igf.py deleted file mode 100644 index 74973309c4e..00000000000 --- a/examples/research_projects/information-gain-filtration/run_clm_igf.py +++ /dev/null @@ -1,450 +0,0 @@ -# Copyright 2022 - Intel Corp. All rights reserved. -# Authors: Mayank Kumar Raunak, Javier Turek, Nicole Beckage - -""" -Implementation of a new method for fine-tuning transformer models that we call -Information Gain Filtration 'IGF' on WikiText data set and compared the results -with the standard fine-tuning method - -Steps followed in the code: - -1) Generate a objective dataset of pairs (X, IG(X)). IG(X)--Informativeness of context 'X'. -Our IG (information gain) model is learning to predict the ‘informativeness’ of a particular -context. Informativeness is the change in metric between the model’s accuracy on an -objective set before and after seeing that context. For casual language modeling, the -metric is perplexity. - -2) A secondary learner is trained to infer a function approximation for IG using the dataset -created in (1). - -3) The learner created in (2) is used to inform the fine-tuning process and filter out low informative samples. - -Last, a plot is generated to compare the performance of IGF to standard fine-tuning without any filtering - -""" - -# Prerequisite libraries: - -import argparse -import random - -import joblib -import numpy as np -import torch -from igf.igf import ( - SecondaryLearner, - collect_objective_set, - compute_perplexity, - generate_datasets, - load_gpt2, - recopy_gpt2, - set_seed, - train_secondary_learner, -) -from torch.utils.data import DataLoader, RandomSampler - -from transformers import GPT2LMHeadModel - - -def generate_n_pairs( - context_len=32, - max_steps=10, - size_objective_set=100, - min_len=1026, - trim=True, - data_file="data/tokenized_stories_train_wikitext103.jbl", - igf_data_file="igf_context_pairs.jbl", -): - """ - Collecting *n* pairs for training the secondary learner - Args: - context_len: The maximum total input sequence length after tokenization. Sequences longer - than this will be truncated, sequences shorter will be padded - max_steps: To calculate training epochs of secondary learner - size_objective_set: size of objective data set used to create (X,IG(X)) pairs which is the training data for secondary learner - min_len: The minimum length of the article to be used as objective set - trim: If True truncate the context if it exceeds context length - data_file: Tokenized data set split for training and evaluation of model - igf_data_file: file to store (I,IG(X)) paired data set to train secondary learner - - Returns: - Data stored in igf_data_file - - """ - # generates same data everytime - set_seed(3) - # generate train_data and objective_set - train_data, objective_set = generate_datasets( - context_len, data_file, number=size_objective_set, min_len=1026, trim=True - ) - # keeps model same across runs - set_seed(4) - # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights - # can we train on GPU? - device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - # load pretrained model - model = load_gpt2("openai-community/gpt2").to(device) - print("computing perplexity on objective set") - orig_perp = compute_perplexity(model, objective_set, context_len).item() - print("perplexity on objective set:", orig_perp) - - # collect igf pairs and save to file demo.jbl - collect_objective_set(model, orig_perp, context_len, train_data, objective_set, max_steps, device, igf_data_file) - - # clean up, delete model and data we don't need anymore - del model, train_data, objective_set - torch.cuda.empty_cache() - - -def training_secondary_learner( - secondary_learner_train_data, - secondary_learner_max_epochs=15, - secondary_learner_batch_size=128, - eval_freq=100, - igf_model_path="igf_model.pt", -): - """ - Train the secondary learner - - Args: - secondary_learner_train_data: Data set with (X,IG(X)) pairs to train secondary learner where IG(X) - measure of informativeness and X- context - secondary_learner_max_epochs: Number of epochs to train secondary learner - secondary_learner_batch_size: Batch size to train secondary learner - eval_freq (object): secondary model evaluation can be triggered at eval_freq - igf_model_path: path to store trained secondary learner - - Returns: - Trained secondary learner - """ - - set_seed(42) - - # Load pre-trained model - model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2") - - # Initialize secondary learner to use embedding weights of model - secondary_learner = SecondaryLearner(model) - - # Train secondary learner - secondary_learner = train_secondary_learner( - secondary_learner, - secondary_learner_train_data, - max_epochs=secondary_learner_max_epochs, - batch_size=secondary_learner_batch_size, - eval_freq=100, - igf_model_path=igf_model_path, - ) - - del model, secondary_learner_train_data - torch.cuda.empty_cache() - - return secondary_learner - - -def finetune( - model, - train_dataset, - test_dataset, - context_len=32, - max_steps=1000, - batch_size=16, - threshold=1.0, - recopy_model=recopy_gpt2, - secondary_learner=None, - eval_interval=10, - finetuned_model_name="openai-community/gpt2_finetuned.pt", -): - """ - fine-tune with IGF if secondary_learner is not None, else standard fine-tuning - - Args: - model: pre-trained GPT-2 model - train_dataset: Data set to train GPT-2 model - test_dataset: Evaluate GPT-2 model - context_len: The maximum total input sequence length after tokenization. Sequences longer - than this will be truncated, sequences shorter will be padded - max_steps: To calculate training epochs - batch_size: Batch size to train GPT-2 model - threshold: The threshold value used by secondary learner to filter the train_data and allow only" - informative data as input to the model - recopy_model: Reset the model to the original pretrained GPT-2 weights after each iteration - secondary_learner: Selection of IGF as fine-tuning method if not None - eval_interval: number of batches after which decay the selectivity of our secondary learner filter from - 1 standard deviation above average to 1 below average - fine-tuned_model_name: name of the final final-tuned GPT-2 model - - Returns: - Fine-tuned GPT-2 model - - """ - - device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - train_sampler = RandomSampler(train_dataset) - train_dataloader = DataLoader(train_dataset, sampler=train_sampler) - - num_train_epochs = max_steps // (len(train_dataset)) + 1 - global_step = 0 - context = torch.zeros((1, context_len), dtype=torch.long, device=device) - model, lm_optimizer, lm_scheduler = recopy_model(model, device, max_steps) - - model.train() - if secondary_learner is not None: - secondary_learner.to(device) - secondary_learner.eval() - contexts = [] - examples = 0 - - observed_qs = [] - test_perps = [] - - # Compute the performance of the transformer model at the beginning - real_perp = compute_perplexity(model, test_dataset, context_len) - test_perps.append(real_perp) - print("Test perplexity, step", global_step, ":", real_perp) - for epoch in range(int(num_train_epochs)): - for step, example in enumerate(train_dataloader): - torch.cuda.empty_cache() - start = random.randint(0, example.size(2) - context_len - 1) - context[0, :] = example[0, 0, start : start + context_len] - lm_optimizer.zero_grad() - outputs = model(context, labels=context) - do_backprop = True - - if secondary_learner is not None: - predicted_q = secondary_learner.forward( - torch.tensor(context, dtype=torch.long, device=device).unsqueeze(0) - )[0].item() - observed_qs.append(float(predicted_q)) - - # Here we implement the simple non-constant threshold for the predicted IG(X) value - # We will decay the selectivity of our secondary learner filter from - # 1 standard deviation above average to 1 below average after 10 batches. - - if global_step == 10: - threshold = -1 - if predicted_q < threshold: - do_backprop = False - - # If we passed the filter, add the context to the batch! - if do_backprop: - contexts.append(np.array(context.cpu())) - lm_loss = outputs[0] - lm_loss.backward() - examples += 1 - - del outputs - - # Once the batch is filled with enough contexts, backprop on the batch. - if examples == batch_size: - torch.cuda.empty_cache() - examples = 0 - # Do LM backprop - torch.nn.utils.clip_grad_norm_(model.parameters(), 3.0) - lm_optimizer.step() - lm_scheduler.step() # Update learning rate schedule - global_step += 1 - # Compute the performance of the transformer model at this batch - if global_step % eval_interval == 0: - real_perp = compute_perplexity(model, test_dataset, context_len) - test_perps.append(real_perp) - - print("Test perplexity, step", global_step, ":", real_perp) - # Break out of the loop after 60 batches - if max_steps > 0 and global_step > 60: - break - if max_steps > 0 and global_step > 60: - break - - # save finetuned transformer model - torch.save(model.state_dict(), finetuned_model_name) - torch.cuda.empty_cache() - # Do some cleaning up so we can reinitialize for the next run of this function - del lm_optimizer - del lm_scheduler - return model - - -def main(): - parser = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task") - - # Required parameters - parser.add_argument( - "--data_dir", - default=None, - type=str, - required=True, - help="The input data dir. Should contain data files for WikiText.", - ) - parser.add_argument( - "--model_name_or_path", - default=None, - type=str, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models", - ) - parser.add_argument( - "--data_file", - type=str, - default=None, - help=( - "A jbl file containing tokenized data which can be split as objective dataset, " - "train_dataset and test_dataset." - ), - ) - - parser.add_argument( - "--igf_data_file", - type=str, - default=None, - help="A jbl file containing the context and information gain pairs to train secondary learner.", - ) - - parser.add_argument( - "--output_dir", - default=None, - type=str, - required=True, - help="The output directory where the final fine-tuned model is stored.", - ) - - parser.add_argument( - "--tokenizer_name", - default=None, - type=str, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") - - parser.add_argument( - "--context_len", - default=32, - type=int, - help=( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ), - ) - - parser.add_argument( - "--size_objective_set", - default=100, - type=int, - help="number of articles that are long enough to be used as our objective set", - ) - parser.add_argument( - "--eval_freq", default=100, type=int, help="secondary model evaluation is triggered at eval_freq" - ) - - parser.add_argument("--max_steps", default=1000, type=int, help="To calculate training epochs") - - parser.add_argument( - "--secondary_learner_batch_size", - default=128, - type=int, - help="batch size of training data for secondary learner", - ) - - parser.add_argument( - "--batch_size", - default=16, - type=int, - help="batch size of training data of language model(openai-community/gpt2) ", - ) - - parser.add_argument( - "--eval_interval", - default=10, - type=int, - help=( - "decay the selectivity of our secondary learner filter from " - "1 standard deviation above average to 1 below average after 10 batches" - ), - ) - - parser.add_argument( - "--number", default=100, type=int, help="The number of examples split to be used as objective_set/test_data" - ) - - parser.add_argument( - "--min_len", default=1026, type=int, help="The minimum length of the article to be used as objective set" - ) - - parser.add_argument( - "--secondary_learner_max_epochs", default=15, type=int, help="number of epochs to train secondary learner" - ) - - parser.add_argument("--trim", default=True, type=bool, help="truncate the example if it exceeds context length") - - parser.add_argument( - "--threshold", - default=1.0, - type=float, - help=( - "The threshold value used by secondary learner to filter the train_data and allow only" - " informative data as input to the model" - ), - ) - - parser.add_argument( - "--finetuned_model_name", default="openai-community/gpt2_finetuned.pt", type=str, help="finetuned_model_name" - ) - - parser.add_argument( - "--recopy_model", - default=recopy_gpt2, - type=str, - help="Reset the model to the original pretrained GPT-2 weights after each iteration", - ) - - # function calls - # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner - generate_n_pairs( - context_len=32, - max_steps=10, - size_objective_set=100, - min_len=1026, - trim=True, - data_file="data/tokenized_stories_train_wikitext103.jbl", - igf_data_file="igf_context_pairs.jbl", - ) - - # Load train data for secondary learner - secondary_learner_train_data = joblib.load("data/IGF_values.jbl") - - # Train secondary learner - secondary_learner = training_secondary_learner( - secondary_learner_train_data, - secondary_learner_max_epochs=15, - secondary_learner_batch_size=128, - eval_freq=100, - igf_model_path="igf_model.pt", - ) - - # load pretrained openai-community/gpt2 model - model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2") - set_seed(42) - - # Generate train and test data to train and evaluate openai-community/gpt2 model - train_dataset, test_dataset = generate_datasets( - context_len=32, file="data/tokenized_stories_train_wikitext103.jbl", number=100, min_len=1026, trim=True - ) - - # fine-tuning of the openai-community/gpt2 model using igf (Information Gain Filtration) - finetune( - model, - train_dataset, - test_dataset, - context_len=32, - max_steps=1000, - batch_size=16, - threshold=1.0, - recopy_model=recopy_gpt2, - secondary_learner=secondary_learner, - eval_interval=10, - finetuned_model_name="openai-community/gpt2_finetuned.pt", - ) - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/jax-projects/HOW_TO_PROPOSE_PROJECT.md b/examples/research_projects/jax-projects/HOW_TO_PROPOSE_PROJECT.md deleted file mode 100644 index 08e05f38931..00000000000 --- a/examples/research_projects/jax-projects/HOW_TO_PROPOSE_PROJECT.md +++ /dev/null @@ -1,109 +0,0 @@ -# How to propose a Flax/JAX + Transformers project - -Great that you've opened this document! -While we at 🤗 are proposing a couple of projects, we strongly -believe that the community can come up with much more **creative**, **fun**, and -**impactful** projects on their own. This being said, we are really looking forward -to seeing your project proposal! - -## What a project should be about - -The proposed project should fall into the machine learning fields of **Natural Language Processing (NLP)** and/or **Computer Vision (CV)** (possibly also **Speech Recognition (ASR)** depending on whether Speech Recognition models are available in Flax in due time) and aim at solving a specific task. -Possible tasks can belong to: - - * text classification - * text generation - * image recognition - * image processing - * image captioning - * audio classification - * and other tasks you can think of! - -The clearer a task is defined, the better your project proposal is. -*E.g.* "Using a T5 model to learn grammar correction in French" or "Adapting a pre-trained CLIP model for zero-shot image classification in Spanish" are **well-defined and clear** project proposals, while something like "Train a language model" or "Image classification" are **too vague**. - -There is no limit to your creativity as long as the project is feasible and ethical. -The more creative & specific your project proposal, the more interesting it will be, -and the more likely will you find motivated team members to work on your project! -To get an idea of how to formulate your project proposals, you can browse through -existing project proposals on the [forum](https://discuss.huggingface.co/c/flax-jax-projects/22). - -## How to submit a project proposal - -First, you should make sure that you are [logged in](https://huggingface.co/login?sso=bm9uY2U9OTRlNjZjZmZhYjMwMmJmMWMyYjc5MmFiMTMyMzY5ODYmcmV0dXJuX3Nzb191cmw9aHR0cHMlM0ElMkYlMkZkaXNjdXNzLmh1Z2dpbmdmYWNlLmNvJTJGc2Vzc2lvbiUyRnNzb19sb2dpbg%3D%3D&sig=429ad8924bcb33c40f9823027ea749abb55d393f4f58924f36a2dba3ab0a48da) with your Hugging Face account on the forum. - -Second, make sure that your project idea doesn't already exist by checking [existing projects](https://discuss.huggingface.co/c/flax-jax-projects/22). -If your project already exists - great! This means that you can comment and improve -the existing idea and join the project to form a team! If your project idea already -exists for a different language, feel free to submit the same project idea, just in -a different language. - -Third, having ensured that your project doesn't exist, click on the *"New Topic"* -button on the [Flax/JAX Projects Forum category](https://discuss.huggingface.co/c/flax-jax-projects/22) to create a new project proposal. - -Fourth, make sure that your project proposal includes the following information: - -1. *A clear description of the project* -2. *In which language should the project be conducted?* English, German, Chinese, ...? It can also be a multi-lingual project -3. *Which model should be used?* If you want to adapt an existing model, you can add the link to one of the 4000 available checkpoints in JAX [here](https://huggingface.co/models?filter=jax) If you want to train a model from scratch, you can simply state the model architecture to be used, *e.g.* BERT, CLIP, etc. You can also base your project on a model that is not part of transformers. For an overview of libraries based on JAX, you can take a look at [awesome-jax](https://github.com/n2cholas/awesome-jax#awesome-jax-). **Note** that for a project that is not based on Transformers it will be more difficult for the 🤗 team to help you. Also have a look at the section [Quickstart Flax & Jax in Transformers](https://github.com/huggingface/transformers/tree/main/examples/research_projects/jax-projects#quickstart-flax-and-jax-in-transformers) to see what model architectures are currently supported in 🤗 Transformers. -4. *What data should be used?* It is important to state at least what kind of data you would like to use. Ideally, you can already point to publicly available data or a dataset in the 🤗 Datasets library. -5. *Are similar training scripts available in Flax/JAX?* It would be important to find similar training scripts that already exist in Flax/JAX. *E.g.* if you are working on a Seq-to-Seq task, you can make use of the [`run_summarization_flax.py`](https://github.com/huggingface/transformers/blob/main/examples/flax/summarization/run_summarization_flax.py) script which is very similar to any seq2seq training. Also have a look at the section [Quickstart Flax & Jax in Transformers](https://github.com/huggingface/transformers/tree/main/examples/research_projects/jax-projects#quickstart-flax-and-jax-in-transformers) to see what training scripts are currently supported in 🤗 Transformers. -6. *(Optionally) What are possible challenges?* List possible difficulties with your project. *E.g.* If you know that training convergence usually takes a lot of time, it is worth stating this here! -7. *(Optionally) What is the desired project outcome?* - How would you like to demo your project? One could *e.g.* create a Streamlit application. -8. *(Optionally) Links to read upon* - Can you provide any links that would help the reader to better understand your project idea? - -Feel free to copy-paste the following format for your project proposal and fill out the respective sections: - -``` -# - - - -## 2. Language - -The model will be trained in . - -## 3. Model - - - -## 4. Datasets - - - -Possible links to publicly available datasets include: -- -- -- - -## 5. Training scripts - - - -We can make use of to train the model.> - -## 6. (Optional) Challenges - -<(Optionally) FILL ME: 6. What are possible challenges?> - -## 7. (Optional) Desired project outcome - -<(Optionally) FILL ME: 7. What is the desired project outcome? A demo?> - -## 8. (Optional) Reads - -The following links can be useful to better understand the project and -what has previously been done. - -- -- -- -``` - -To see how a proposed project looks like, please have a look at submitted project -proposals [here](https://discuss.huggingface.co/c/flax-jax-projects/22). - -## Will my project proposal be selected? - -Having submitted a project proposal, you can now promote your idea in the Slack channel `#flax-jax-community-week` to try to convince other participants to join your project! -Once other people have joined your project, one of the organizers (`@Suzana, @valhalla, @osanseviero, @patrickvonplaten`) will officially create a team for your project and add your project to [this google sheet](https://docs.google.com/spreadsheets/d/1GpHebL7qrwJOc9olTpIPgjf8vOS0jNb6zR_B8x_Jtik/edit?usp=sharing). diff --git a/examples/research_projects/jax-projects/README.md b/examples/research_projects/jax-projects/README.md deleted file mode 100644 index 88d8d7f9eba..00000000000 --- a/examples/research_projects/jax-projects/README.md +++ /dev/null @@ -1,1295 +0,0 @@ -# Flax/JAX community week 🤗 - -Welcome to the Flax/JAX community week! The goal of this week is to make compute-intensive NLP and CV projects (like pre-training BERT, GPT2, CLIP, ViT) -practicable for a wider audience of engineers and researchers. -To do so, we will try to teach **you** how to effectively use JAX/Flax on TPU and help you to complete a fun NLP and/or CV project in JAX/Flax during the community week. - -Free access to a TPUv3-8 will kindly be provided by the Google Cloud team! - -In this document, we list all the important information that you will need during the Flax/JAX community week. - -Don't forget to sign up [here](https://forms.gle/tVGPhjKXyEsSgUcs8)! - -## Table of Contents - -- [Organization](#organization) -- [Important dates](#important-dates) -- [Communication](#communication) -- [Projects](#projects) - - [How to propose](#how-to-propose-a-project) - - [How to form a team](#how-to-form-a-team-around-a-project) -- [Tips & Tricks for project](#tips-on-how-to-organize-the-project) -- [How to install flax, jax, optax, transformers, datasets](#how-to-install-relevant-libraries) -- [Quickstart Flax/JAX](#quickstart-flax-and-jax) -- [Quickstart Flax/JAX in 🤗 Transformers](#quickstart-flax-and-jax-in-transformers) - - [Flax design philosophy in 🤗 Transformers](#flax-design-philosophy-in-transformers) - - [How to use flax models & scripts](#how-to-use-flax-models-and-example-scripts) -- [Talks](#talks) -- [How to use the 🤗 Hub for training](#how-to-use-the-hub-for-collaboration) -- [How to setup TPU VM](#how-to-setup-tpu-vm) -- [How to build a demo](#how-to-build-a-demo) - - [Using the Hugging Face Widgets](#using-the-hugging-face-widgets) - - [Using a Streamlit demo](#using-a-streamlit-demo) - - [Using a Gradio demo](#using-a-gradio-demo) -- [Project evaluation](#project-evaluation) -- [General Tips & Tricks](#general-tips-and-tricks) -- [FAQ](#faq) - -## Organization - -Participants can propose ideas for an interesting NLP and/or CV project. Teams of 3 to 5 will then be formed around the most promising and interesting projects. Make sure to read through the [Projects](#projects) section on how to propose projects, comment on other participants' project ideas, and create a team. - -To help each team successfully finish their project, we have organized talks by leading scientists and engineers from Google, Hugging Face, and the open-source NLP & CV community. The talks will take place before the community week from June 30th to July 2nd. Make sure to attend the talks to get the most out of your participation! Check out the [Talks](#talks) section to get an overview of the talks, including the speaker and the time of the talk. - -Each team is then given **free access to a TPUv3-8 VM** from July 7th to July 14th. In addition, we will provide training examples in JAX/Flax for a variety of NLP and Vision models to kick-start your project. During the week, we'll make sure to answer any questions you might have about JAX/Flax and Transformers and help each team as much as possible to complete their project! - -At the end of the community week, each team should submit a demo of their project. All demonstrations will be evaluated by a jury and the top-3 demos will be awarded a prize. Check out the [How to submit a demo](#how-to-submit-a-demo) section for more information and suggestions on how to submit your project. - -## Important dates - -- **23.06.** Official announcement of the community week. Make sure to sign-up in [this google form](https://forms.gle/tVGPhjKXyEsSgUcs8). -- **23.06. - 30.06.** Participants will be added to an internal Slack channel. Project ideas can be proposed here and groups of 3-5 are formed. Read this document for more information. -- **30.06.** Release of all relevant training scripts in JAX/Flax as well as other documents on how to set up a TPU, how to use the training scripts, how to submit a demo, tips & tricks for JAX/Flax, tips & tricks for efficient use of the hub. -- **30.06. - 2.07.** Talks about JAX/Flax, TPU, Transformers, Computer Vision & NLP will be held. -- **7.07.** Start of the community week! Access to TPUv3-8 will be given to each team. -- **7.07. - 14.07.** The Hugging Face & JAX/Flax & Cloud team will be available for any questions, problems the teams might run into. -- **15.07.** Access to TPU is deactivated and community week officially ends. -- **16.07.** Deadline for each team to submit a demo. - -## Communication - -All important communication will take place in an internal Slack channel, called `#flax-jax-community-week`. -Important announcements of the Hugging Face, Flax/JAX, and Google Cloud team will be posted there. -Such announcements include general information about the community week (Dates, Rules, ...), release of relevant training scripts (Flax/JAX example scripts for NLP and Vision), release of other important documents (How to access the TPU), etc. -The Slack channel will also be the central place for participants to post about their results, share their learning experiences, ask questions, etc. - -For issues with Flax/JAX, Transformers, Datasets or for questions that are specific to your project we would be **very happy** if you could use the following public repositories and forums: - -- Flax: [Issues](https://github.com/google/flax/issues), [Questions](https://github.com/google/flax/discussions) -- JAX: [Issues](https://github.com/google/jax/issues), [Questions](https://github.com/google/jax/discussions) -- 🤗 Transformers: [Issues](https://github.com/huggingface/transformers/issues), [Questions](https://discuss.huggingface.co/c/transformers/9) -- 🤗 Datasets: [Issues](https://github.com/huggingface/datasets/issues), [Questions](https://discuss.huggingface.co/c/datasets/10) -- Project specific questions: [Forum](https://discuss.huggingface.co/c/flax-jax-projects/22) -- TPU related questions: [TODO]() - -Please do **not** post the complete issue/project-specific question in the Slack channel, but instead a link to your issue/question that we will try to answer as soon as possible. -This way, we make sure that the everybody in the community can benefit from your questions - even after the community week - and that the same question is not answered twice. - -To be invited to the Slack channel, please make sure you have signed up [on the Google form](https://forms.gle/tVGPhjKXyEsSgUcs8). - -**Note**: If you have signed up on the google form, but you are not in the Slack channel, please leave a message on [(TODO) the official forum announcement]( ) and ping `@Suzana` and `@patrickvonplaten`. - -## Projects - -During the first week after the community week announcement, **23.06. - 30.06.**, teams will be formed around the most promising and interesting project ideas. Each team can consist of 2 to 10 participants. Projects can be accessed [here](https://discuss.huggingface.co/c/flax-jax-projects/22). - -All officially defined projects can be seen [here](https://docs.google.com/spreadsheets/d/1GpHebL7qrwJOc9olTpIPgjf8vOS0jNb6zR_B8x_Jtik/edit?usp=sharing). - -### How to propose a project - -Some default project ideas are given by the organizers. **However, we strongly encourage participants to submit their own project ideas!** -Check out the [HOW_TO_PROPOSE_PROJECT.md](https://github.com/huggingface/transformers/tree/main/examples/research_projects/jax-projects/HOW_TO_PROPOSE_PROJECT.md) for more information on how to propose a new project. - -### How to form a team around a project - -You can check out all existing projects ideas on the forum under [Flax/JAX projects category](https://discuss.huggingface.co/c/flax-jax-projects/22). -Make sure to quickly check out each project idea and leave a ❤️ if you like an idea. -Feel free to leave comments, suggestions for improvement, or questions about more details directly on the discussion thread. -If you have found the project that you ❤️ the most, leave a message "I would like to join this project" on the discussion thread. -We strongly advise you to also shortly state who you are, which time zone you are in and why you would like to work on this project, how you can contribute to the project and what your vision is for the project. -For projects that see a lot of interest and for which enough participants have expressed interest in joining, an official team will be created by the organizers. -One of the organizers (`@Suzana`, `@valhalla`, `@osanseviero`, `@patrickvonplaten`) will leave a message "For this project the team: ``, `` , is officially created" on the thread and note down the teams on [this google sheet](https://docs.google.com/spreadsheets/d/1GpHebL7qrwJOc9olTpIPgjf8vOS0jNb6zR_B8x_Jtik/edit?usp=sharing). - -Once created, the team can start refining their project: - -- What is the goal of the project? *E.g.*, Present a language model that writes poetry in Russian. -- What model will we use? *E.g.*, FlaxGPT2 -- What data will we use? *E.g.* Russian dataset of OSCAR & publicly available book on poetry -- Should we use a pre-trained model or train a model from scratch? E.g. Train a model from scratch -- What training scripts do we need? *E.g.* `transformers/examples/flax/run_clm_flax.py` can be used -- What kind of demo would we like to present? E.g. Text-generation API of the 🤗 Hub in combination with a Streamlit demo that lets the user generate a poem of a given length -- How will the work be divided? *E.g.* Team member 1 works on data preprocessing, Team member 2 works on adapting the Flax script, ... - -We highly recommend that each team discusses all relevant ideas for their project directly on the forum thread. -This way valuable learning experiences are shared and accessible by the whole community in the future. -Additionally, the organizers, other participants, or anybody in the community really can read through your discussions and leave comments/tips for improvement. Obviously, you can also create private chats, ... to discuss more sensitive topics, etc. - -**Important**: - -- For project ideas that see a lot of interest, we are more than happy to create more than one team. -- Participants are welcome to join multiple teams, even though we encourage them to only work on a single project. -- Under special circumstances, participants can change/create new teams. Please note that we would like to keep this the exception. If however, you would like to change/leave existing teams, please leave a post on the project's thread where you ping the corresponding organizer that created the group. - - It is often easy to propose/join a project that is done in your native language. Feel free to reach out to existing [language-specific groups](https://discuss.huggingface.co/c/languages-at-hugging-face/15) to look for community members that might be interested in joining your project. - -## Tips on how to organize the project - -This section gives you some tips on how to most efficiently & effectively -work as a team to achieve your goal. It is by no means a strict recipe to follow, -but rather a collection of tips from the 🤗 team. - -Once your team is defined, you can start working on the project as soon as possible. - - -### Communication - -At first, it is always useful to get to know each other and to set up a means of communication. -While we recommend that all technical aspects of work can be discussed directly on the [forum](https://discuss.huggingface.co/c/flax-jax-projects/22) under your project thread, -it can be very helpful to have a more direct way of communicating, *e.g.* in a channel. -For this we have created a discord that you can access [here](https://discord.com/channels/858019234139602994/858019234139602997). -This discord will not be managed by anybody and is just there so that you can communicate more effectively with your team members. -Feel free to create a new channel for you and your team where you can discuss everything. If you and your team have already set up other ways of communicating, it is absolutely not required to make use of the discord. However, we do recommend each team to set up some kind -of channel or group for quick discussions. - -### Project definition - -In the very beginning, you should make sure your project is well-defined and that -everybody in the team understands the goal of the project and the work that needs to be -done in order to achieve the goal. A well-defined project: - -- has defined the task on which the model will be trained -- has defined the model that will be trained -- has defined the datasets that will be used for training -- has defined the type of training scripts that need to be written -- has defined the desired outcome of the project -- has defined the workflows - -By "has defined" we don't meant that the corresponding code already has to be written and ready -to be used, but that everybody in team is on the same page on what type of model, data and training script should be used. - -To give an example, a well-defined project would be the following: - -- task: summarization -- model: [google-t5/t5-small](https://huggingface.co/google-t5/t5-small) -- dataset: [CNN/Daily mail](https://huggingface.co/datasets/cnn_dailymail) -- training script: [run_summarization_flax.py](https://github.com/huggingface/transformers/blob/main/examples/flax/summarization/run_summarization_flax.py) -- outcome: t5 model that can summarize news -- work flow: adapt `run_summarization_flax.py` to work with `google-t5/t5-small`. - -This example is a very easy and not the most interesting project since a `google-t5/t5-small` -summarization model exists already for CNN/Daily mail and pretty much no code has to be -written. -A well-defined project does not need to have the dataset be part of -the `datasets` library and the training script already be pre-written, however it should -be clear how the desired dataset can be accessed and how the training script can be -written. - -It is also important to have a clear plan regarding the workflow. Usually, the -data processing is done in a first step. Once the data is in a format that the model can -work with, the training script can be written, etc. These steps should be more detailed -once the team has a clearly defined project. It can be helpful to set deadlines for each step. - -### Workload division - -To effectively work as a team, it is crucial to divide the workload among everybody. -Some team members will be more motivated and experienced than others and -some team members simply want to participate to learn more and cannot contribute that -much to the team. This is totally fine! One cannot expect everybody in the team to have the same level of experience and time/motivation during the community week. - -As a conclusion, being honest about one's expected involvement is crucial so that -the workload can be divided accordingly. If someone doesn't think her/his tasks are feasible - let -the team know early on so that someone else can take care of it! - -It is recommended that the motivated and experienced team members take the lead in dividing the work and are ready to take over the tasks of another team member if necessary. - -The workload can often be divided according to: - -- data preprocessing (load the data and preprocess data in the correct format) -- data tokenization / data collator (process data samples into tokens or images) -- model configuration (writing the code that defines the model) -- model forward pass (make sure input / output work correctly) -- loss function (define the loss function) -- putting the pieces together in a training script - -Many of the steps above require other steps to be finished, so it often makes sense -to use dummy data in the expected format to start, *e.g.*, with the model forward pass -before the data preprocessing is done. - -### Expectations - -It is also very important to stay realistic with the scope of your project. Each team -has access to a TPUv3-8 for only *ca.* 10 days, so it's important to keep the scope of -the project reasonable. While we do want each team to work on interesting projects, each -team should make sure that the project goals can be achieved within the provided compute -time on TPU. For instance, pretraining a 11 billion parameters T5 model is not really a realistic -task with just 10 days of TPUv3-8 compute. -Also, it might be difficult to finish a project where the whole modeling, dataset and training code has to be written from scratch. - -Having defined your project, feel free to reach out on Slack or the forum for feedback from the organizers. We can surely give you our opinion on whether the project is feasible and what can be done to improve it. -the project is feasible. - -### Other tips - -Here is a collection of some more tips: - -- We strongly recommend to work as publicly and collaboratively as possible during the week so that other teams -and the organizers can best help you. This includes publishing important discussions on -the forum and making use of the [🤗 hub](http://huggingface.co/) to have a version -control for your models and training logs. -- When debugging, it is important that the debugging cycle is kept as short as possible to -be able to effectively debug. *E.g.* if there is a problem with your training script, -you should run it with just a couple of hundreds of examples and not the whole dataset script. This can be done by either making use of [datasets streaming](https://huggingface.co/docs/datasets/master/dataset_streaming?highlight=streaming) or by selecting just the first -X number of data samples after loading: - -```python -datasets["train"] = datasets["train"].select(range(1000)) -``` -- Ask for help. If you are stuck, use the public Slack channel or the [forum](https://discuss.huggingface.co/c/flax-jax-projects/22) to ask for help. - -## How to install relevant libraries - -In the following we will explain how to install all relevant libraries on your local computer and on TPU VM. - -It is recommended to install all relevant libraries both on your local machine -and on the TPU virtual machine. This way, quick prototyping and testing can be done on -your local machine and the actual training can be done on the TPU VM. - -### Local computer - -The following libraries are required to train a JAX/Flax model with 🤗 Transformers and 🤗 Datasets: - -- [JAX](https://github.com/google/jax/) -- [Flax](https://github.com/google/flax) -- [Optax](https://github.com/deepmind/optax) -- [Transformers](https://github.com/huggingface/transformers) -- [Datasets](https://github.com/huggingface/datasets) - -You should install the above libraries in a [virtual environment](https://docs.python.org/3/library/venv.html). -If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). Create a virtual environment with the version of Python you're going -to use and activate it. - -You should be able to run the command: - -```bash -python3 -m venv -``` - -You can activate your venv by running - -```bash -source ~//bin/activate -``` - -We strongly recommend to make use of the provided JAX/Flax examples scripts in [transformers/examples/flax](https://github.com/huggingface/transformers/tree/main/examples/flax) even if you want to train a JAX/Flax model of another github repository that is not integrated into 🤗 Transformers. -In all likelihood, you will need to adapt one of the example scripts, so we recommend forking and cloning the 🤗 Transformers repository as follows. -Doing so will allow you to share your fork of the Transformers library with your team members so that the team effectively works on the same code base. It will also automatically install the newest versions of `flax`, `jax` and `optax`. - -1. Fork the [repository](https://github.com/huggingface/transformers) by - clicking on the 'Fork' button on the repository's page. This creates a copy of the code - under your GitHub user account. - -2. Clone your fork to your local disk, and add the base repository as a remote: - - ```bash - $ git clone https://github.com//transformers.git - $ cd transformers - $ git remote add upstream https://github.com/huggingface/transformers.git - ``` - -3. Create a new branch to hold your development changes. This is especially useful to share code changes with your team: - - ```bash - $ git checkout -b a-descriptive-name-for-my-project - ``` - -4. Set up a flax environment by running the following command in a virtual environment: - - ```bash - $ pip install -e ".[flax]" - ``` - - (If transformers was already installed in the virtual environment, remove - it with `pip uninstall transformers` before reinstalling it in editable - mode with the `-e` flag.) - - If you have already cloned that repo, you might need to `git pull` to get the most recent changes in the `datasets` - library. - - Running this command will automatically install `flax`, `jax` and `optax`. - -Next, you should also install the 🤗 Datasets library. We strongly recommend installing the -library from source to profit from the most current additions during the community week. - -Simply run the following steps: - -```bash -$ cd ~/ -$ git clone https://github.com/huggingface/datasets.git -$ cd datasets -$ pip install -e ".[streaming]" -``` - -If you plan on contributing a specific dataset during -the community week, please fork the datasets repository and follow the instructions -[here](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-create-a-pull-request). - -To verify that all libraries are correctly installed, you can run the following command. -It assumes that both `transformers` and `datasets` were installed from main - otherwise -datasets streaming will not work correctly. - -```python -from transformers import FlaxRobertaModel, RobertaTokenizerFast -from datasets import load_dataset -import jax - -dataset = load_dataset('oscar', "unshuffled_deduplicated_en", split='train', streaming=True) - -dummy_input = next(iter(dataset))["text"] - -tokenizer = RobertaTokenizerFast.from_pretrained("FacebookAI/roberta-base") -input_ids = tokenizer(dummy_input, return_tensors="np").input_ids[:, :10] - -model = FlaxRobertaModel.from_pretrained("julien-c/dummy-unknown") - -# run a forward pass, should return an object `FlaxBaseModelOutputWithPooling` -model(input_ids) -``` - -### TPU VM - -**VERY IMPORTANT** - Only one process can access the TPU cores at a time. This means that if multiple team members -are trying to connect to the TPU cores errors, such as: - -``` -libtpu.so already in used by another process. Not attempting to load libtpu.so in this process. -``` - -are thrown. As a conclusion, we recommend every team member to create her/his own virtual environment, but only one -person should run the heavy training processes. Also, please take turns when setting up the TPUv3-8 so that everybody -can verify that JAX is correctly installed. - -The following libraries are required to train a JAX/Flax model with 🤗 Transformers and 🤗 Datasets on TPU VM: - -- [JAX](https://github.com/google/jax/) -- [Flax](https://github.com/google/flax) -- [Optax](https://github.com/deepmind/optax) -- [Transformers](https://github.com/huggingface/transformers) -- [Datasets](https://github.com/huggingface/datasets) - -You should install the above libraries in a [virtual environment](https://docs.python.org/3/library/venv.html). -If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). Create a virtual environment with the version of Python you're going -to use and activate it. - -You should be able to run the command: - -```bash -python3 -m venv -``` - -If this doesn't work, you first might to have install `python3-venv`. You can do this as follows: - -```bash -sudo apt-get install python3-venv -``` - -You can activate your venv by running - -```bash -source ~//bin/activate -``` - -Next you should install JAX's TPU version on TPU by running the following command: - -```bash -$ pip install requests -``` - -and then: - -```bash -$ pip install "jax[tpu]>=0.2.16" -f https://storage.googleapis.com/jax-releases/libtpu_releases.html -``` - -**Note**: Running this command might actually throw an error, such as: -``` - Building wheel for jax (setup.py) ... error - ERROR: Command errored out with exit status 1: - command: /home/patrick/patrick/bin/python3 -u -c 'import sys, setuptools, tokenize; sys.argv[0] = '"'"'/tmp/pip-install-lwseckn1/jax/setup.py'"'"'; __file__='"'"'/tmp/pip-install-lwseckn1/jax/setup.py'"'"';f=getattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' bdist_wheel -d /tmp/pip-wheel-pydotzlo - cwd: /tmp/pip-install-lwseckn1/jax/ - Complete output (6 lines): - usage: setup.py [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...] - or: setup.py --help [cmd1 cmd2 ...] - or: setup.py --help-commands - or: setup.py cmd --help - - error: invalid command 'bdist_wheel' - ---------------------------------------- - ERROR: Failed building wheel for jax -``` -Jax should have been installed correctly nevertheless. - -To verify that JAX was correctly installed, you can run the following command: - -```python -import jax -jax.device_count() -``` - -This should display the number of TPU cores, which should be 8 on a TPUv3-8 VM. - -We strongly recommend to make use of the provided JAX/Flax examples scripts in [transformers/examples/flax](https://github.com/huggingface/transformers/tree/main/examples/flax) even if you want to train a JAX/Flax model of another github repository that is not integrated into 🤗 Transformers. -In all likelihood, you will need to adapt one of the example scripts, so we recommend forking and cloning the 🤗 Transformers repository as follows. -Doing so will allow you to share your fork of the Transformers library with your team members so that the team effectively works on the same code base. It will also automatically install the newest versions of `flax`, `jax` and `optax`. - -1. Fork the [repository](https://github.com/huggingface/transformers) by - clicking on the 'Fork' button on the repository's page. This creates a copy of the code - under your GitHub user account. - -2. Clone your fork to your local disk, and add the base repository as a remote: - - ```bash - $ git clone https://github.com//transformers.git - $ cd transformers - $ git remote add upstream https://github.com/huggingface/transformers.git - ``` - -3. Create a new branch to hold your development changes. This is especially useful to share code changes with your team: - - ```bash - $ git checkout -b a-descriptive-name-for-my-project - ``` - -4. Set up a flax environment by running the following command in a virtual environment: - - ```bash - $ pip install -e ".[flax]" - ``` - - (If transformers was already installed in the virtual environment, remove - it with `pip uninstall transformers` before reinstalling it in editable - mode with the `-e` flag.) - - If you have already cloned that repo, you might need to `git pull` to get the most recent changes in the `datasets` - library. - - Running this command will automatically install `flax`, `jax` and `optax`. - -Next, you should also install the 🤗 Datasets library. We strongly recommend installing the -library from source to profit from the most current additions during the community week. - -Simply run the following steps: - -```bash -$ cd ~/ -$ git clone https://github.com/huggingface/datasets.git -$ cd datasets -$ pip install -e ".[streaming]" -``` - -If you plan on contributing a specific dataset during -the community week, please fork the datasets repository and follow the instructions -[here](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-create-a-pull-request). - -To verify that all libraries are correctly installed, you can run the following command. -It assumes that both `transformers` and `datasets` were installed from main - otherwise -datasets streaming will not work correctly. - -```python -from transformers import FlaxRobertaModel, RobertaTokenizerFast -from datasets import load_dataset -import jax - -dataset = load_dataset('oscar', "unshuffled_deduplicated_en", split='train', streaming=True) - -dummy_input = next(iter(dataset))["text"] - -tokenizer = RobertaTokenizerFast.from_pretrained("FacebookAI/roberta-base") -input_ids = tokenizer(dummy_input, return_tensors="np").input_ids[:, :10] - -model = FlaxRobertaModel.from_pretrained("julien-c/dummy-unknown") - -# run a forward pass, should return an object `FlaxBaseModelOutputWithPooling` -model(input_ids) -``` - -## Quickstart flax and jax - -[JAX](https://jax.readthedocs.io/en/latest/index.html) is Autograd and XLA, brought together for high-performance numerical computing and machine learning research. It provides composable transformations of Python+NumPy programs: differentiate, vectorize, parallelize, Just-In-Time compile to GPU/TPU, and more. A great place for getting started with JAX is the [JAX 101 Tutorial](https://jax.readthedocs.io/en/latest/jax-101/index.html). - -[Flax](https://flax.readthedocs.io/en/latest/index.html) is a high-performance neural network library designed for flexibility built on top of JAX. It aims to provide users with full control of their training code and is carefully designed to work well with JAX transformations such as `grad` and `pmap` (see the [Flax philosophy](https://flax.readthedocs.io/en/latest/philosophy.html)). For an introduction to Flax see the [Flax Basics Colab](https://flax.readthedocs.io/en/latest/notebooks/flax_basics.html) or the list of curated [Flax examples](https://flax.readthedocs.io/en/latest/examples.html). - -## Quickstart flax and jax in transformers - -Currently, we support the following models in Flax. -Note that some models are about to be merged to `main` and will -be available in a couple of days. - -- [BART](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bart/modeling_flax_bart.py) -- [BERT](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bert/modeling_flax_bert.py) -- [BigBird](https://github.com/huggingface/transformers/blob/main/src/transformers/models/big_bird/modeling_flax_big_bird.py) -- [CLIP](https://github.com/huggingface/transformers/blob/main/src/transformers/models/clip/modeling_flax_clip.py) -- [ELECTRA](https://github.com/huggingface/transformers/blob/main/src/transformers/models/electra/modeling_flax_electra.py) -- [GPT2](https://github.com/huggingface/transformers/blob/main/src/transformers/models/openai-community/gpt2/modeling_flax_gpt2.py) -- [(TODO) MBART](https://github.com/huggingface/transformers/blob/main/src/transformers/models/mbart/modeling_flax_mbart.py) -- [RoBERTa](https://github.com/huggingface/transformers/blob/main/src/transformers/models/roberta/modeling_flax_roberta.py) -- [T5](https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_flax_t5.py) -- [ViT](https://github.com/huggingface/transformers/blob/main/src/transformers/models/vit/modeling_flax_vit.py) -- [Wav2Vec2](https://github.com/huggingface/transformers/blob/main/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py) - -You can find all available training scripts for JAX/Flax under the -official [flax example folder](https://github.com/huggingface/transformers/tree/main/examples/flax). Note that a couple of training scripts will be released in the following week. - -- [Causal language modeling (GPT2)](https://github.com/huggingface/transformers/blob/main/examples/flax/language-modeling/run_clm_flax.py) -- [Masked language modeling (BERT, RoBERTa, ELECTRA, BigBird)](https://github.com/huggingface/transformers/blob/main/examples/flax/language-modeling/run_mlm_flax.py) -- [Text classification (BERT, RoBERTa, ELECTRA, BigBird)](https://github.com/huggingface/transformers/blob/main/examples/flax/text-classification/run_flax_glue.py) -- [Summarization / Seq2Seq (BART, MBART, T5)](https://github.com/huggingface/transformers/blob/main/examples/flax/summarization/run_summarization_flax.py) -- [Masked Seq2Seq pret-training (T5)](https://github.com/huggingface/transformers/blob/main/examples/flax/language-modeling/run_t5_mlm_flax.py) -- [Contrastive Loss pretraining for Wav2Vec2](https://github.com/huggingface/transformers/blob/main/examples/research_projects/jax-projects/wav2vec2) -- [Fine-tuning long-range QA for BigBird](https://github.com/huggingface/transformers/blob/main/examples/research_projects/jax-projects/big_bird) -- [(TODO) Image classification (ViT)]( ) -- [(TODO) CLIP pretraining, fine-tuning (CLIP)]( ) - - -### **Flax design philosophy in Transformers** - -This section will explain how Flax models are implemented in Transformers and how the design differs from PyTorch. - -Let's first go over the difference between Flax and PyTorch. - -In JAX, most transformations (notably `jax.jit`) require functions that are transformed to be stateless so that they have no side effects. This is because any such side-effects will only be executed once when the transformed function is run during compilation and all subsequent calls of the compiled function would re-use the same side-effects of the compiled run instead of the "actual" side-effects (see [Stateful Computations in JAX](https://jax.readthedocs.io/en/latest/jax-101/07-state.html)). As a consequence, Flax models, which are designed to work well with JAX transformations, are stateless. This means that when running a model in inference, both the inputs and the model weights are passed to the forward pass. In contrast, PyTorch model are very much stateful with the weights being stored within the model instance and the user just passing the inputs to the forward pass. - -Let's illustrate the difference between stateful models in PyTorch and stateless models in Flax. - -For simplicity, let's assume the language model consists simply of a single attention layer [`key_proj`, `value_proj`, `query_proj`] and a linear layer `logits_proj` to project the transformed word embeddings to the output logit vectors. - -#### **Stateful models in PyTorch** - -In PyTorch, the weights matrices would be stored as `torch.nn.Linear` objects alongside the model's config inside the model class `ModelPyTorch`: - -```python -class ModelPyTorch: - - def __init__(self, config): - self.config = config - self.key_proj = torch.nn.Linear(config) - self.value_proj = torch.nn.Linear(config) - self.query_proj = torch.nn.Linear(config) - self.logits_proj = torch.nn.Linear(config) -``` - -Instantiating an object `model_pytorch` of the class `ModelPyTorch` would actually allocate memory for the model weights and attach them to the attributes `self.key_proj`, `self.value_proj`, `self.query_proj`, and `self.logits.proj`. We could access the weights via: - -```python -key_projection_matrix = model_pytorch.key_proj.weight.data -``` - -Visually, we would represent an object of `model_pytorch` therefore as follows: - -![alt text](https://raw.githubusercontent.com/patrickvonplaten/scientific_images/master/lm_pytorch_def.png) - -Executing a forward pass then simply corresponds to passing the `input_ids` to the object `model_pytorch`: - -```python -sequences = model_pytorch(input_ids) -``` - -In a more abstract way, this can be represented as passing the word embeddings to the model function to get the output logits: - -![alt text](https://raw.githubusercontent.com/patrickvonplaten/scientific_images/master/lm_pt_inference.png) - -This design is called **stateful** because the output logits, the `sequences`, can change even if the word embeddings, the `input_ids`, stay the same. Hence, the function's output does not only depend on its inputs, but also on its **state**, `[self.key_proj, self.value_proj, self.query_proj, self.logits_proj]`, which makes `model_pytorch` stateful. - -#### **Stateless models in Flax/JAX** - -Now, let's see how the mathematically equivalent model would be written in JAX/Flax. The model class `ModelFlax` would define the self-attention and logits projection weights as [**`flax.linen.Dense`**](https://flax.readthedocs.io/en/latest/_autosummary/flax.linen.Dense.html#flax.linen.Dense) objects: - -```python -class ModelFlax: - - def __init__(self, config): - self.config = config - self.key_proj = flax.linen.Dense(config) - self.value_proj = flax.linen.Dense(config) - self.query_proj = flax.linen.Dense(config) - self.logits_proj = flax.linen.Dense(config) -``` - -At first glance the linear layer class `flax.linen.Dense` looks very similar to PyTorch's `torch.nn.Linear` class. However, instantiating an object `model_flax` only defines the linear transformation functions and does **not** allocate memory to store the linear transformation weights. In a way, the attribute `self.key_proj` tell the instantiated object `model_flax` to perform a linear transformation on some input and force it to expect a weight, called `key_proj`, as an input. - -This time we would illustrate the object `model_flax` without the weight matrices: - -![alt text](https://raw.githubusercontent.com/patrickvonplaten/scientific_images/master/lm_flax_def.png) - - -Accordingly, the forward pass requires both `input_ids` as well as a dictionary consisting of the model's weights (called `state` here) to compute the `sequences`: - -To get the initial `state` we need to explicitly do a forward pass by passing a dummy input: - -```python -state = model_flax.init(rng, dummy_input_ids) -``` - -and then we can do the forward pass. - -```python -sequences = model_flax.apply(state, input_ids) -``` - -Visually, the forward pass would now be represented as passing all tensors required for the computation to the model's object: - -![alt text](https://raw.githubusercontent.com/patrickvonplaten/scientific_images/master/lm_flax_inference.png) - -This design is called **stateless** because the output logits, the `sequences`, **cannot** change if the word embeddings, the `input_ids`, stay the same. Hence, the function's output only depends on its inputs, being the `input_ids` and the `state` dictionary consisting of the weights **state**, `[key_proj, value_proj, query_proj, logits_proj]`. - -Another term which is often used to describe the design difference between Flax/JAX and PyTorch is **immutable** vs **mutable**. A instantiated Flax model, `model_flax`, is **immutable** as a logical consequence of `model_flax`'s output being fully defined by its input: If calling `model_flax` could mutate `model_flax`, then calling `model_flax` twice with the same inputs could lead to different results which would violate the "*statelessness*" of Flax models. - -#### **Flax models in Transformers** - -Now let us see how this is handled in `Transformers.` If you have used a Flax model in Transformers already, you might wonder how come you don't always have to pass the parameters to the function of the forward pass. This is because the `FlaxPreTrainedModel` class abstracts it away. -It is designed this way so that the Flax models in Transformers will have a similar API to PyTorch and Tensorflow models. - -The `FlaxPreTrainedModel` is an abstract class that holds a Flax module, handles weights initialization, and provides a simple interface for downloading and loading pre-trained weights i.e. the `save_pretrained` and `from_pretrained` methods. Each Flax model then defines its own subclass of `FlaxPreTrainedModel`; *e.g.* the BERT model has `FlaxBertPreTrainedModel`. Each such class provides two important methods, `init_weights` and `__call__`. Let's see what each of those methods do: - -- The `init_weights` method takes the expected input shape and a [`PRNGKey`](https://jax.readthedocs.io/en/latest/_autosummary/jax.random.PRNGKey.html) (and any other arguments that are required to get initial weights) and calls `module.init` by passing it a random example to get the initial weights with the given `dtype` (for ex. `fp32` or `bf16` etc). This method is called when we create an instance of the model class, so the weights are already initialized when you create a model i.e., when you do - - model = FlaxBertModel(config) - -- The `__call__` method defines forward pass. It takes all necessary model inputs and parameters (and any other arguments required for the forward pass). The parameters are optional; when no parameters are passed, it uses the previously initialized or loaded parameters which can be accessed using `model.params`. It then calls the `module.apply` method, passing it the parameters and inputs to do the actual forward pass. So we can do a forward pass using - - output = model(inputs, params=params) - - -Let's look at an example to see how this works. We will write a simple two-layer MLP model. - -First, write a Flax module that will declare the layers and computation. - -```python -import flax.linen as nn -import jax.numpy as jnp - -class MLPModule(nn.Module): - config: MLPConfig - dtype: jnp.dtype = jnp.float32 - - def setup(self): - self.dense1 = nn.Dense(self.config.hidden_dim, dtype=self.dtype) - self.dense2 = nn.Desne(self.config.hidden_dim, dtype=self.dtype) - - def __call__(self, inputs): - hidden_states = self.dense1(inputs) - hidden_states = nn.relu(hidden_states) - hidden_states = self.dense2(hidden_states) - return hidden_states -``` - -Now let's define the `FlaxPreTrainedModel` model class. - -```python -from transformers.modeling_flax_utils import FlaxPreTrainedModel - -class FlaxMLPPreTrainedModel(FlaxPreTrainedModel): - config_class = MLPConfig - base_model_prefix = "model" - module_class: nn.Module = None - - def __init__(self, config: BertConfig, input_shape: Tuple = (1, 8), seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs): - # initialize the flax module - module = self.module_class(config=config, dtype=dtype, **kwargs) - super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype) - - def init_weights(self, rng, input_shape): - # init input tensors - inputs = jnp.zeros(input_shape, dtype="i4") - - params_rng, dropout_rng = jax.random.split(rng) - rngs = {"params": params_rng, "dropout": dropout_rng} - - params = self.module.init(rngs, inputs)["params"] - return params - - def __call__(self, inputs, params: dict = None): - params = {"params": params or self.params} - outputs = self.module.apply(params, jnp.array(inputs)) - return outputs -``` - - -Now we can define our model class as follows. - -```python -class FlaxMLPModel(FlaxMLPPreTrainedModel): - module_class = FlaxMLPModule -``` - -Now the `FlaxMLPModel` will have a similar interface as PyTorch or Tensorflow models and allows us to attach loaded or randomly initialized weights to the model instance. - -So the important point to remember is that the `model` is not an instance of `nn.Module`; it's an abstract class, like a container that holds a Flax module, its parameters and provides convenient methods for initialization and forward pass. The key take-away here is that an instance of `FlaxMLPModel` is very much stateful now since it holds all the model parameters, whereas the underlying Flax module `FlaxMLPModule` is still stateless. Now to make `FlaxMLPModel` fully compliant with JAX transformations, it is always possible to pass the parameters to `FlaxMLPModel` as well to make it stateless and easier to work with during training. Feel free to take a look at the code to see how exactly this is implemented for ex. [`modeling_flax_bert.py`](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bert/modeling_flax_bert.py#L536) - -Another significant difference between Flax and PyTorch models is that, we can pass the `labels` directly to PyTorch's forward pass to compute the loss, whereas Flax models never accept `labels` as an input argument. In PyTorch, gradient backpropagation is performed by simply calling `.backward()` on the computed loss which makes it very handy for the user to be able to pass the `labels`. In Flax however, gradient backpropagation cannot be done by simply calling `.backward()` on the loss output, but the loss function itself has to be transformed by `jax.grad` or `jax.value_and_grad` to return the gradients of all parameters. This transformation cannot happen under-the-hood when one passes the `labels` to Flax's forward function, so that in Flax, we simply don't allow `labels` to be passed by design and force the user to implement the loss function oneself. As a conclusion, you will see that all training-related code is decoupled from the modeling code and always defined in the training scripts themselves. - -### **How to use flax models and example scripts** - - -#### **How to do a forward pass** - -Let's first see how to load, save and do inference with Flax models. As explained in the above section, all Flax models in Transformers have similar API to PyTorch models, so we can use the familiar `from_pretrained` and `save_pretrained` methods to load and save Flax models. - -Let's use the base `FlaxRobertaModel` without any heads as an example. - -```python -from transformers import FlaxRobertaModel, RobertaTokenizerFast -import jax - -tokenizer = RobertaTokenizerFast.from_pretrained("FacebookAI/roberta-base") -inputs = tokenizer("JAX/Flax is amazing ", padding="max_length", max_length=128, return_tensors="np") - -model = FlaxRobertaModel.from_pretrained("julien-c/dummy-unknown") - -@jax.jit -def run_model(input_ids, attention_mask): - # run a forward pass, should return an object `FlaxBaseModelOutputWithPooling` - return model(input_ids, attention_mask) - -outputs = run_model(**inputs) -``` - -We use `jax.jit` to compile the function to get maximum performance. Note that in the above example, we set `padding=max_length` to pad all examples to the same length. We do this because JAX's compiler has to recompile a function everytime its input shape changes - in a sense a compiled function is not only defined by its code but also by its input and output shape. It is usually much more effective to pad the input to be of a fixed static shape than having to recompile every the function multiple times. - - -#### **How to write a training loop** - -Now let's see how we can write a simple training loop to train Flax models, we will use `FlaxGPT2ForCausalLM` as an example. - -A training loop for Flax models typically consists of -- A loss function that takes the parameters and inputs, runs the forward pass and returns the loss. -- We then transform the loss function using `jax.grad` or `jax.value_and_grad` so that we get the gradients of all parameters. -- An optimizer to update the paramteres using the gradients returned by the transformed loss function. -- A train step function which combines the loss function and optimizer update, does the forward and backward pass and returns the updated parameters. - -Lets see how that looks like in code: - -First initialize our model - -```python -import jax -import jax.numpy as jnp - -from transformers import FlaxGPT2ForCausalLM - -model = FlaxGPT2ForCausalLM(config) -``` - -As explained above we don't compute the loss inside the model, but rather in the task-specific training script. -For demonstration purposes, we write a pseudo training script for causal language modeling in the following. - -```python -from flax.training.common_utils import onehot - -def cross_entropy(logits, labels): - return -jnp.sum(labels * jax.nn.log_softmax(logits, axis=-1), axis=-1) - -# define a function which will run the forward pass return loss -def compute_loss(params, input_ids, labels): - logits = model(input_ids, params=params, train=True) - num_classes = logits.shape[-1] - loss = cross_entropy(logits, onehot(labels, num_classes)).mean() - return loss -``` - -Now we transform the loss function with `jax.value_and_grad`. - -```python -# transform the loss function to get the gradients -grad_fn = jax.value_and_grad(compute_loss) -``` - -We use the [optax](https://github.com/deepmind/optax) library to Initialize the optimizer. - -```python -import optax - -params = model.params -tx = optax.sgd(learning_rate=3e-3) -opt_state = tx.init(params) -``` - -Now we define a single training step which will do a forward and a backward pass. - -```python -def _train_step(params, opt_state, input_ids, labels) - # do the forward pass and get the loss and gradients - loss, grads = grad_fn(params, input_ids, labels) - - # use the gradients to update parameters - updates, opt_state = tx.update(grads, opt_state) - updated_params = optax.apply_updates(params, updates) - - return updates_params, opt_state, loss - -train_step = jax.jit(_train_step) -``` - -Finally, let's run our training loop. - -```python -# train loop -for i in range(10): - params, opt_state, loss = train_step(params, opt_state, input_ids, labels) -``` - -Note how we always pass the `params` and `opt_state` to the `train_step` which then returns the updated `params` and `opt_state`. This is because of the staless nature of JAX/Flax models, all the state -like parameters, optimizer state is kept external. - -We can now save the model with the trained parameters using - -```python -model.save_pretrained("awesome-flax-model", params=params) -``` - -Note that, as JAX is backed by the [XLA](https://www.tensorflow.org/xla) compiler any JAX/Flax code can run on all `XLA` compliant device without code change! -That menas you could use the same training script on CPUs, GPUs, TPUs. - -To know more about how to train the Flax models on different devices (GPU, multi-GPUs, TPUs) and use the example scripts, please look at the [examples README](https://github.com/huggingface/transformers/tree/main/examples/flax). - -## Talks - -3 days of talks around JAX / Flax, Transformers, large-scale language modeling and other great topics during our community event! - -### Wednesday, June 30th -- [Watch the talks on YouTube](https://www.youtube.com/watch?v=fuAyUQcVzTY) -- [Chat history](https://docs.google.com/spreadsheets/d/1PZ5xYV2hVwlAVQSqDag65ympv5YNCSDmXyG-eWTaZ_o/edit?usp=sharing) - - Speaker | Topic | Time | Video | -|-------------|---------------------------------|------------------------|------------------------| -| Skye Wanderman-Milne, Google Brain | Intro to JAX on Cloud TPUs | 6.00pm-6.45pm CEST / 9.00am-9.45am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=fuAyUQcVzTY) | -| Marc van Zee, Google Brain | Introduction to Flax | 6.45pm-7.30pm CEST / 9.45am-10.30am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://youtu.be/fuAyUQcVzTY?t=2569) | -| Pablo Castro, Google Brain | Using Jax & Flax for RL with the Dopamine library | 7.30pm-8.00pm CEST / 10.30am-11.00am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://youtu.be/fuAyUQcVzTY?t=5306) | - -### Thursday, July 1st -- [Watch the talks on YouTube](https://www.youtube.com/watch?v=__eG63ZP_5g) -- [Chat history](https://docs.google.com/spreadsheets/d/1PZ5xYV2hVwlAVQSqDag65ympv5YNCSDmXyG-eWTaZ_o/edit#gid=1515796400) - - Speaker | Topic | Time | Video | -|-------------|---------------------------------|------------------------|------------------------| -| Suraj Patil & Patrick von Platen, Hugging Face | How to use JAX/Flax with Transformers | 5.30pm-6.00pm CEST / 8.30am-9.00am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=__eG63ZP_5g) | -| Sabrina J. Mielke, Johns Hopkins University & HuggingFace | From stateful code to purified JAX: how to build your neural net framework | 6.00pm-6.30pm CEST / 9.00am-9.30am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://youtu.be/__eG63ZP_5g?t=1576) | -| Mostafa Dehghani, Google Brain | Long Range Arena: Benchmarking Efficient Transformers | 6.30pm-7.00pm CEST / 9.30am-10.00am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://youtu.be/__eG63ZP_5g?t=3695) | -| Rohan Anil, Google Brain | Scalable Second Order Optimization for Deep Learning | 7.00pm-7.30pm CEST / 10.00am-10.30am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://youtu.be/__eG63ZP_5g?t=5285) | - - -### Friday, July 2nd -- [Watch the talks on YouTube](https://www.youtube.com/watch?v=ZCMOPkcTu3s) -- [Chat history](https://docs.google.com/spreadsheets/d/1PZ5xYV2hVwlAVQSqDag65ympv5YNCSDmXyG-eWTaZ_o/edit#gid=1166061401) - - Speaker | Topic | Time | Video | -|-------------|---------------------------------|------------------------|------------------------| -| Lucas Beyer, Google Brain | Vision Transformer | 5.00pm-5.30 CEST / 8.00am-8.30 PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=ZCMOPkcTu3s) | -| Ben Wang, EleutherAI | Multihost Training in Mesh Transformer JAX | 5.30pm-6.00 CEST / 8.30am-9.00 PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://youtu.be/ZCMOPkcTu3s?t=1803) | -| Iurii Kemaev, Soňa Mokrá, Junhyuk Oh, DeepMind | DeepMind JAX Ecosystem | 6.00pm-6.30 CEST / 9.00am-9.30am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://youtu.be/ZCMOPkcTu3s?t=3388) | -| Siddhartha Kamalakara, Joanna Yoo & João G M Araújo, Cohere | Training large scale language models | 6:30pm-7.00pm CEST / 9:30am-10.00am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://youtu.be/ZCMOPkcTu3s?t=5095) | - -### Talks & Speakers - -#### Skye Wanderman-Milne, JAX developer, Google Brain -- Talk: Intro to JAX on Cloud TPUs -- Abstract: JAX is a system for high-performance machine-learning research that combines the familiarity of Python + NumPy together with the power of hardware acceleration on CPUs, GPUs, and TPUs. It offers composable function transformations for automatic differentiation, automatic batching, end-to-end compilation, and both data and model parallelism. This talk will show you how to get up and running with JAX on a Cloud TPU VM. -- Speaker info: Skye Wanderman-Milne is a software engineer working on JAX. She has previously worked on TensorFlow and Apache Impala, a high-performance distributed database. - -#### Marc van Zee, Research SWE, Google Brain (Flax team) -- Talk: Introduction to Flax -- Abstract: In this talk I will provide a high-level introduction to the neural network library Flax. I will discuss the Flax philosophy, talk about the ecosystem around Flax and provide a high-level introduction to the code. I explain the Module abstraction and how to use it to train your models. -- Speaker info: Marc is at Google Research for over 4 years. First he worked on conceptual AI, developing a next generation language understanding and reasoning prototype and he authored the CFQ dataset for compositional generalization. Currently, Marc works as a research software engineer in the Flax team. - -#### Pablo Castro, Staff Research Software Developer; Google Research, Brain Team -- Talk: Using Jax & Flax for RL with the Dopamine library -- Abstract: The Dopamine library was launched with TensorFlow in 2018 and we added a Jax/Flax variant of it last year. Internally, Jax's flexibility has facilitated our RL research tremendously, and we are excited to demonstrate its potential. -- Speaker info: Pablo Samuel has been at Google for over 9 years, and is currently a researcher with the Brain team, focusing on fundamental reinforcement learning, as well as machine learning and creativity. Aside from his research, Pablo Samuel is an active musician (with a channel exploring the intersection of music and computer science), and is helping increase the representation of the LatinX community in the research world. -- Dopamine repo: https://github.com/google/dopamine -- Homepage: https://psc-g.github.io/ -- Twitter: https://twitter.com/pcastr - -#### Suraj Patil & Patrick von Platen, Machine Learning Engineers at Hugging Face -- Talk: How to use JAX/Flax with Transformers -- Abstract: Transformers is one of the most popular open-source ML libraries and supports PyTorch, Tensorflow, and JAX/Flax. In this talk, we will explain how JAX/Flax models should be used in Transformers and compare their design in Transformers with the design of PyTorch models in Transformers. In the second part, we will give you a hands-on presentation of how a model can be trained end-to-end with the official JAX/Flax example scripts using Transformers & Datasets. Along the way, we want to give you some tips and tricks on how to best realize your project. -- Speaker info: Suraj and Patrick are part of Hugging Face’s open source team and lead the integration of JAX/Flax into Transformers. -- GitHub: https://github.com/patil-suraj & https://github.com/patrickvonplaten - -#### Sabrina J. Mielke, PhD student at The Johns Hopkins University & Part-time research intern at HuggingFace -- Talk: From stateful code to purified JAX: how to build your neural net framework -- Abstract: Moving from object-oriented (and stateful) PyTorch- or TF2-code with tape-based backprop to JAX isn't easy---and while running grad() on numpy-oneliners is cool and all, you do wonder... how do I build actual big neural nets? Libraries like flax, trax, or haiku make it easy---but how could you build machinery like that yourself? -- Speaker info: Sabrina is a PhD student at the Johns Hopkins University and a part-time research intern at HuggingFace, researching open-vocabulary language models for segmentation and tokenization. She has published and co-organized workshops and shared tasks on these topics as well as on morphology and typological analysis in ACL, NAACL, EMNLP, LREC, and AAAI. You can find her reminisce for a time when formal language theory played a bigger role in NLP on Twitter at @sjmielke. -- Links: The 2020 blogpost this talk will be based on: https://sjmielke.com/jax-purify.htm, leading to our experiment Parallax and eventually Haiku - -#### Mostafa Dehghani, Research Scientist, Google Brain -- Talk: Long Range Arena: Benchmarking Efficient Transformers -- Abstract: Transformers do not scale very well to long sequence lengths largely because of quadratic self-attention complexity. In the recent months, a wide spectrum of efficient, fast Transformers have been proposed to tackle this problem, more often than not claiming superior or comparable model quality to vanilla Transformer models. So, we now need a well-established consensus on how to evaluate this class of models. Moreover, inconsistent benchmarking on a wide spectrum of tasks and datasets makes it difficult to assess relative model quality amongst many models. I'll talk about a systematic and unified benchmark, LRA, specifically focused on evaluating model quality under long-context scenarios. LRA is a suite of tasks consisting of sequences ranging from 1K to 16K tokens, encompassing a wide range of data types and modalities such as text, natural, synthetic images, and mathematical expressions requiring similarity, structural, and visual-spatial reasoning. We systematically evaluate ten well-established long-range Transformer models (Reformers, Linformers, Linear Transformers, Sinkhorn Transformers, Performers, Synthesizers, Sparse Transformers, and Longformers) on LRA. LRA paves the way towards better understanding this class of efficient Transformer models, facilitates more research in this direction, and presents new challenging tasks to tackle. -- Speaker info: https://mostafadehghani.com/ - -#### Rohan Anil, Senior Staff Software Engineer, Google Research, Brain Team -- Talk: Scalable Second Order Optimization for Deep Learning -- Abstract: Optimization in machine learning, both theoretical and applied, is presently dominated by first-order gradient methods such as stochastic gradient descent. Second-order optimization methods, that involve second derivatives and/or second order statistics of the data, are far less prevalent despite strong theoretical properties, due to their prohibitive computation, memory and communication costs. In an attempt to bridge this gap between theoretical and practical optimization, we present a scalable implementation of a second-order preconditioned method (concretely, a variant of full-matrix Adagrad), that along with several critical algorithmic and numerical improvements, provides significant convergence and wall-clock time improvements compared to conventional first-order methods on state-of-the-art deep models. Our novel design effectively utilizes the prevalent heterogeneous hardware architecture for training deep models, consisting of a multicore CPU coupled with multiple accelerator units. We demonstrate superior performance compared to state-of-the-art on very large learning tasks such as machine translation with Transformers, language modeling with BERT, click-through rate prediction on Criteo, and image classification on ImageNet with ResNet-50. -- Speaker info: Rohan Anil is a software engineer at Google Research, Mountain View. Lately, he has been working on scalable and practical optimization techniques for efficient training of neural networks in various regimes. -- Resources: - - https://arxiv.org/abs/2002.09018 - - https://arxiv.org/abs/1901.11150 - - https://arxiv.org/abs/2106.06199 - - -#### Lucas Beyer, Senior Research Engineer, Google Brain -- Talk: Vision Transformer -- Abstract: This talk will discuss the learning of general visual representations via large-scale pre-training and few-shot transfer, with a special focus on the Vision Transformer (ViT) architecture, which popularized transformers for the visual domain. -- Speaker info: Lucas Beyer is a self-taught hacker and studied engineer. He went on to do his PhD in robotic perception at RWTH Aachen and is currently on a quest to find the ultimate visual representation at Google Brain in Zürich - -#### Ben Wang, Independent AI Researcher, EleutherAI -- Talk: Multihost Training in Mesh Transformer JAX -- Abstract: As models become larger, training must be scaled across multiple nodes. This talk discusses some design decisions and tradeoffs made for scaling to multiple nodes in Mesh Transformer JAX, a library for running model parallel transformers on TPU pods. -- Speaker info: Ben is an independent AI researcher who contributes to EleutherAI, an open source research collective centered around democratizing access to powerful AI models. Recently he has released GPT-J-6B, a 6 billion parameter transformer which is the most powerful autoregressive language model in terms of zero-shot performance with public weights. -- Website: https://www.eleuther.ai/ - -#### Iurii Kemaev, Research Engineer, Soňa Mokrá, Research Engineer, and Junhyuk Oh, Research Scientist, DeepMind -- Talk: DeepMind JAX Ecosystem -- Abstract: The DeepMind JAX Ecosystem is an effort to build a shared substrate of components to enable all aspects of AGI Research. In this talk, our researchers and engineers will give a high-level overview of our Ecosystem goals and design philosophies, using our Haiku (neural network), Optax (optimization) and RLax (reinforcement learning) libraries as examples. We will then deep dive on two examples of recent DeepMind research that have been enabled by JAX and these libraries: generative models and meta-gradient reinforcement learning. -- Speaker info: - - Iurii Kemaev is a Research Engineer at DeepMind. He has been using JAX for 2 years advancing RL research. Iurii is one of the DM JAX ecosystem leads. - - Soňa Mokrá is a Research Engineer at DeepMind. She has a background in machine translation and has been using JAX as the main ML framework for the past 6 months. - - Junhyuk Oh is a Research Scientist at DeepMind, working on reinforcement learning and meta-learning. More information is available at https://junhyuk.com/ - -#### Siddhartha Kamalakara, Joanna Yoo, João G M Araújo, MLE at Cohere -- Talk: Training large scale language models -- Abstract: A journey through Cohere’s experiences with training large scale language models. Join us in our exploration of pipeline and model parallelism as strategies for efficient training of large language models. We will present and motivate our recent transition to JAX+Flax as our choice of internal tech stack. -- Speaker info: - - João G M Araújo is a Brazilian college student with a passion for mathematics and a fascination for Deep Learning. João conducted research on representation learning and spent 3 months in Japan working on NeuroEvolution. João likes reading fantasy books and spending quality time with family and friends, and also runs a YouTube series on theoretical understanding of Deep Learning where researchers talk about their findings - - Joanna Yoo is one of the founding engineers at Cohere, working on scaling language models for the last year and half. Joanna loves live concerts and rock climbing! - - Siddhartha Rao Kamalakara is an MLE at Cohere and a researcher at FOR.ai with research interests at the intersection of efficient training and empirical understanding of DL. -- Website: https://cohere.ai/ - - -## How to use the hub for collaboration - -In this section, we will explain how a team can use the 🤗 hub to collaborate on a project. -The 🤗 hub allows each team to create a repository with integrated git version control that -should be used for their project. -The advantages of using a repository on the 🤗 hub are: - -- easy collaboration - each team member has write access to the model repository -- integrated git version control - code scripts as well as large model files are tracked using git version control -- easy sharing - the hub allows each team to easily share their work during and after the event -- integrated tensorboard functionality - uploaded tensorboard traces are automatically displayed on an integrated tensorboard tab - -We highly recommend each team to make use of the 🤗 hub during the event. -To better understand how the repository and the hub in general functions, please take a look at the documentation and the videos [here](https://huggingface.co/docs/hub). - -Now let's explain in more detail how a project can be created on the hub. Having an officially defined project on [this](https://docs.google.com/spreadsheets/d/1GpHebL7qrwJOc9olTpIPgjf8vOS0jNb6zR_B8x_Jtik/edit?usp=sharing) Google Sheet you should be part of [the Flax Community organization on the hub](https://huggingface.co/flax-community). All repositories should be created under this organization so that write access can be shared and everybody can easily access other participants' -work 🤗. Note that we are giving each team member access to all repositories created under [flax-community](https://huggingface.co/flax-community), but we encourage participants to only clone and edit repositories corresponding to one's teams. If you want to help other teams, please ask them before changing files in their repository! The integrated git version control keeps track of -all changes, so in case a file was deleted by mistake, it is trivial to re-create it. - -Awesome! Now, let's first go over a simple example where most of the required we'll pre-train a RoBERTa model on a low-resource language. To begin with, we create a repository -under [the Flax Community organization on the hub](https://huggingface.co/flax-community) by logging in to the hub and going to [*"Add model"*](https://huggingface.co/new). By default -the username should be displayed under "*Owner*", which we want to change to *flax-community*. Next, we give our repository a fitting name for the project - here we'll just call it -*roberta-base-als* because we'll be pretraining a RoBERTa model on the super low-resource language *Alemannic* (`als`). We make sure that the model is a public repository and create it! -It should then be displayed on [the Flax Community organization on the hub](https://huggingface.co/flax-community). - -Great, now we have a project directory with integrated git version control and a public model page, which we can access under [flax-community/roberta-base-als](https://huggingface.co/flax-community/roberta-base-als). Let's create a short README so that other participants know what this model is about. You can create the README.md directly on the model page as a markdown file. -Let's now make use of the repository for training. - -We assume that the 🤗 Transformers library and [git-lfs](https://git-lfs.github.com/) are correctly installed on our machine or the TPU attributed to us. -If this is not the case, please refer to the [Installation guide](#how-to-install-relevant-libraries) and the official [git-lfs](https://git-lfs.github.com/) website. - -At first we should log in: - -```bash -$ huggingface-cli login -``` - -Next we can clone the repo: - -```bash -$ git clone https://huggingface.co/flax-community/roberta-base-als -``` - -We have now cloned the model's repository and it should be under `roberta-base-als`. As you can see, -we have all the usual git functionalities in this repo - when adding a file, we can do `git add .`, `git commit -m "add file"` and `git push` -as usual. Let's try it out by adding the model's config. - -We go into the folder: - -```bash -$ cd ./roberta-base-als -``` - -and run the following commands in a Python shell to save a config. - -```python -from transformers import RobertaConfig - -config = RobertaConfig.from_pretrained("FacebookAI/roberta-base") -config.save_pretrained("./") -``` - -Now we've added a `config.json` file and can upload it by running - -```bash -$ git add . && git commit -m "add config" && git push -``` - -Cool! The file is now displayed on the model page under the [files tab](https://huggingface.co/flax-community/roberta-base-als/tree/main). -We encourage you to upload all files except maybe the actual data files to the repository. This includes training scripts, model weights, -model configurations, training logs, etc... - -Next, let's create a tokenizer and save it to the model dir by following the instructions of the [official Flax MLM README](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling#train-tokenizer). We can again use a simple Python shell. - -```python -from datasets import load_dataset -from tokenizers import ByteLevelBPETokenizer - -# load dataset -dataset = load_dataset("oscar", "unshuffled_deduplicated_als", split="train") - -# Instantiate tokenizer -tokenizer = ByteLevelBPETokenizer() - -def batch_iterator(batch_size=1000): - for i in range(0, len(dataset), batch_size): - yield dataset[i: i + batch_size]["text"] - -# Customized training -tokenizer.train_from_iterator(batch_iterator(), vocab_size=50265, min_frequency=2, special_tokens=[ - "", - "", - "", - "", - "", -]) - -# Save files to disk -tokenizer.save("./tokenizer.json") -``` - -This creates and saves our tokenizer directly in the cloned repository. -Finally, we can start training. For now, we'll simply use the official [`run_mlm_flax`](https://github.com/huggingface/transformers/blob/main/examples/flax/language-modeling/run_mlm_flax.py) -script, but we might make some changes later. So let's copy the script into our model repository. - -```bash -$ cp ~/transformers/examples/flax/language-modeling/run_mlm_flax.py ./ -``` - -This way we are certain to have all the code used to train the model tracked in our repository. -Let's start training by running: - -```bash -./run_mlm_flax.py \ - --output_dir="./" \ - --model_type="roberta" \ - --config_name="./" \ - --tokenizer_name="./" \ - --dataset_name="oscar" \ - --dataset_config_name="unshuffled_deduplicated_als" \ - --max_seq_length="128" \ - --per_device_train_batch_size="4" \ - --per_device_eval_batch_size="4" \ - --learning_rate="3e-4" \ - --warmup_steps="1000" \ - --overwrite_output_dir \ - --num_train_epochs="8" \ - --push_to_hub -``` - -Since the dataset is tiny this command should actually run in less than 5 minutes. Note that we attach -the flag ``--push_to_hub`` so that both model weights and tensorboard traces are automatically uploaded to the hub. -You can see the tensorboard directly on the model page, under the [Training metrics tab](https://huggingface.co/flax-community/roberta-base-als/tensorboard). - -As you can see, it is pretty simple to upload model weights and training logs to the model hub. Since the repository -has git version control, you & your team probably already have the necessary skills to collaborate. Thanks -to `git-lfs` being integrated into the hub, model weights and other larger file can just as easily be uploaded -and changed. Finally, at Hugging Face, we believe that the model hub is a great platform to share your project -while you are still working on it: - -- Bugs in training scripts can be found and corrected by anybody participating in the event -- Loss curves can be analyzed directly on the model page -- Model weights can be accessed and analyzed by everybody from the model repository - -If you are not using a transformers model, don't worry - you should still be able to make use of the hub's functionalities! -The [huggingface_hub](https://github.com/huggingface/huggingface_hub) allows you to upload essentially any JAX/Flax model to the hub with -just a couple of lines of code. *E.g.* assuming you want to call your model simply `flax-model-dummy`, you can upload it to the hub with -just three lines of code: - - -```python -from flax import serialization -from jax import random -from flax import linen as nn -from huggingface_hub import Repository - -model = nn.Dense(features=5) - -key1, key2 = random.split(random.PRNGKey(0)) -x = random.normal(key1, (10,)) -params = model.init(key2, x) - -bytes_output = serialization.to_bytes(params) - -repo = Repository("flax-model", clone_from="flax-community/flax-model-dummy", token=True) -with repo.commit("My cool Flax model :)"): - with open("flax_model.msgpack", "wb") as f: - f.write(bytes_output) - -# Repo is created and available here: https://huggingface.co/flax-community/flax-model-dummy -``` - -**Note**: Make sure to have `huggingface_hub >= 0.0.13` to make this command work. - -For more information, check out [this PR](https://github.com/huggingface/huggingface_hub/pull/143) on how to upload any framework to the hub. - -## How to setup TPU VM - -In this section we will explain how you can ssh into a TPU VM that has been given to your team. -If your username is in one of the officially defined projects [here](https://docs.google.com/spreadsheets/d/1GpHebL7qrwJOc9olTpIPgjf8vOS0jNb6zR_B8x_Jtik/edit?usp=sharing), you should have received two emails: - -- one that states that you have been granted the role "Community Week Participants" for the project hf-flax, and -- one (or more if you are in multiple projects) that gives you the TPU name and the TPU zone for the TPU of your team - -You should click on "Open Cloud Console" on the first mail and agree to the pop up windows that follows. It will allow you to use a TPU VM. Don't worry if you cannot access the actual project `hf-flax` visually on the google cloud console and receive an error: - -``` -You don't have sufficient permission to view this page -``` -- this is expected! - -Great, now you and your team can access your TPU VM! - -In the following, we will describe how to do so using a standard console, but you should also be able to connect to the TPU VM via IDEs, like Visual Studio Code, etc. - -1. You need to install the Google Cloud SDK. Please follow the instructions on [cloud.google.com/sdk](https://cloud.google.com/sdk/docs/install#linux). - -2. Once you've installed the google cloud sdk, you should set your account by running the following command. Make sure that `` corresponds to the gmail address you used to sign up for this event. - -```bash -$ gcloud config set account -``` - -3. Let's also make sure the correct project is set in case your email is used for multiple gcloud projects: - -```bash -$ gcloud config set project hf-flax -``` - -4. Next, you will need to authenticate yourself. You can do so by running: - -```bash -$ gcloud auth login -``` - -This should give you a link to a website, where you can authenticate your gmail account. - -5. Finally, you can ssh into the TPU VM! Please run the following command by setting to either `europe-west4-a` or `us-central1-a` (depending on what is stated in the second email you received) and to the TPU name also sent to you in the second email. - -```bash -$ gcloud alpha compute tpus tpu-vm ssh --zone --project hf-flax -``` - -This should ssh you into the TPU VM! -Now you can follow the steps of the section [How to install relevant libraries](#how-to-install-relevant-libraries) to install all necessary -libraries. Make sure to carefully follow the explanations of the "**IMPORTANT**" statement to correctly install JAX on TPU. -Also feel free to install other `python` or `apt` packages on your machine if it helps you to work more efficiently! - - -## How to build a demo - -### Using the Hugging Face Widgets - -Hugging Face has over [15 widgets](https://huggingface-widgets.netlify.app/) for different use cases using 🤗 Transformers library. Some of them also support [3rd party libraries](https://huggingface.co/docs/hub/libraries) such as [Sentence Similarity](https://huggingface.co/sentence-transformers/paraphrase-xlm-r-multilingual-v1) with Sentence Transformers and [Text to Speech](https://huggingface.co/julien-c/ljspeech_tts_train_tacotron2_raw_phn_tacotron_g2p_en_no_space_train) with [ESPnet](https://github.com/espnet/espnet). - -All the widgets are open sourced in the `huggingface_hub` [repo](https://github.com/huggingface/huggingface_hub/tree/main/widgets). Here is a summary of existing widgets: - -**NLP** -* **Conversational:** To have the best conversations!. [Example](https://huggingface.co/microsoft/DialoGPT-large?). -* **Feature Extraction:** Retrieve the input embeddings. [Example](https://huggingface.co/sentence-transformers/distilbert-base-nli-mean-tokens?text=test). -* **Fill Mask:** Predict potential words for a mask token. [Example](https://huggingface.co/google-bert/bert-base-uncased?). -* **Question Answering:** Given a context and a question, predict the answer. [Example](https://huggingface.co/google-bert/bert-large-uncased-whole-word-masking-finetuned-squad). -* **Sentence Simmilarity:** Predict how similar a set of sentences are. Useful for Sentence Transformers. -* **Summarization:** Given a text, output a summary of it. [Example](https://huggingface.co/sshleifer/distilbart-cnn-12-6). -* **Table Question Answering:** Given a table and a question, predict the answer. [Example](https://huggingface.co/google/tapas-base-finetuned-wtq). -* **Text Generation:** Generate text based on a prompt. [Example](https://huggingface.co/openai-community/gpt2) -* **Token Classification:** Useful for tasks such as Named Entity Recognition and Part of Speech. [Example](https://huggingface.co/dslim/bert-base-NER). -* **Zero-Shot Classification:** Too cool to explain with words. Here is an [example](https://huggingface.co/typeform/distilbert-base-uncased-mnli) -* ([WIP](https://github.com/huggingface/huggingface_hub/issues/99)) **Table to Text Generation**. - -**Speech** -* **Audio to Audio:** For tasks such as audio source separation or speech enhancement. -* **Automatic Speech Recognition:** Convert audio to text. [Example](https://huggingface.co/facebook/wav2vec2-base-960h) -* **Text to Speech**: Convert text to audio. - -**Image** -* **Image Classification:** Given an image, predict its class. [Example](https://huggingface.co/osanseviero/llamastic). -* ([WIP](https://github.com/huggingface/huggingface_hub/issues/100)) **Zero Shot Image Classification** -* ([WIP](https://github.com/huggingface/huggingface_hub/issues/112)) **Image Captioning** -* ([WIP](https://github.com/huggingface/huggingface_hub/issues/113)) **Text to Image Generation** -* ([Proposed](https://github.com/huggingface/huggingface_hub/issues/127)) **Visual Question Answering** - -You can propose and implement new widgets by [opening an issue](https://github.com/huggingface/huggingface_hub/issues). Contributions are welcomed! - - -### Using a Streamlit demo - -Sometimes you might be using different libraries or a very specific application that is not well supported by the current widgets. In this case, [Streamlit](https://streamlit.io/) can be an excellent option to build a cool visual demo. Setting up a Streamlit application is straightforward and in Python! - -A common use case is how to load files you have in your model repository in the Hub from the Streamlit demo. The `huggingface_hub` library is here to help you! - -```bash -pip install huggingface_hub -``` - -Here is an example downloading (and caching!) a specific file directly from the Hub -```python -from huggingface_hub import hf_hub_download -filepath = hf_hub_download("flax-community/roberta-base-als", "flax_model.msgpack"); -``` - -In many cases you will want to download the full repository. Here is an example downloading all the files from a repo. You can even specify specific revisions! - -```python -from huggingface_hub import snapshot_download -local_path = snapshot_download("flax-community/roberta-base-als"); -``` - -Note that if you're using 🤗 Transformers library, you can quickly load the model and tokenizer as follows -```python -from transformers import AutoTokenizer, AutoModelForMaskedLM - -tokenizer = AutoTokenizer.from_pretrained("REPO_ID") -model = AutoModelForMaskedLM.from_pretrained("REPO_ID") -``` - - -We'll provide more examples on Streamlit demos next week. Stay tuned! - -### Using a Gradio demo - -You can also use [Gradio](https://gradio.app/) to share your demos! [Here](https://huggingface.co/blog/gradio) is an example using the Gradio library to create a GUI for a Hugging Face model. - -More to come! - -## Project evaluation - -For your project to be evaluated, please fill out [this google form](https://forms.gle/jQaMkj3JJdD4Xcwn9). -Please make sure that your submitted project includes a demo as well as information about the model, data, training methods, etc. - -### Criteria - -* **Demo.** All projects are required to have a demo. It’s open ended, but we provide some ideas on how to build demos in the [How to build a demo](#how-to-build-a-demo) section. -* **Technical difficulty.** Difficulty has different aspects, such as working with complex architectures, obtaining better evaluation metrics than existing models, or implementing models for low-resource languages. -* **Social impact.** The project is expected to have a positive social impact, e.g. by tackling under-explored area of practical interest for minorities or under-represented group (low-ressources languages, specific focus on bias, fairness or ethical issues in ML) or by tackling general societal challenges, e.g. health or climate related challenges. -* **Innovativeness.** Projects that propose novel applications or bring new ideas will be rewarded more. - -### Jury - -* [Niki Parmar](https://research.google/people/NikiParmar/): Staff Research Scientist at Google. -* [Ross Wightman](https://www.linkedin.com/in/wightmanr/): Angel Investor. -* [Thomas Wolf](https://www.linkedin.com/in/thomas-wolf-a056857/): Co-founder and CSO at Hugging Face. -* [Ashish Vaswani](https://research.google/people/AshishVaswani/): Staff Research Scientist at Google Brain. - -### Process - -* **July 17, 12h00 CEST**: TPU VM access closes. -* **July 19, 12h00 CEST**: Project completition ends (including demo). -* **July 19-21** A group of event organizers (Suraj, Patrick, Suzana, and Omar) will do an initial filter to find the top 15 projects. -* **July 22-26** The jury will go over the 15 projects and pick the top three projects out of them. -* **July 27.** Winner projects are announced - - -## General tips and tricks - -TODO (will be filled continuously)... - -## FAQ - -TODO (will be filled continuously)... diff --git a/examples/research_projects/jax-projects/big_bird/README.md b/examples/research_projects/jax-projects/big_bird/README.md deleted file mode 100644 index 42586e49580..00000000000 --- a/examples/research_projects/jax-projects/big_bird/README.md +++ /dev/null @@ -1,60 +0,0 @@ - -Author: [@vasudevgupta7](https://github.com/thevasudevgupta/) - -## Intro - -In this project, we fine-tuned [**BigBird**](https://arxiv.org/abs/2007.14062) on [**natural-questions**](https://huggingface.co/datasets/natural_questions) dataset for **question-answering** task on long documents. **BigBird**, is a **sparse-attention based transformer** which extends Transformer based models, such as BERT to much **longer sequences**. - -Read more about BigBird at https://huggingface.co/blog/big-bird - -## Fine-tuning - -**Setup** - -You need to install jax yourself by following the official docs ([refer this](https://github.com/google/jax#installation)). Other requirements for this project can be installed by running following command: - -```shell -pip3 install -qr requirements.txt -``` - -**Download & prepare dataset** - -The Natural Questions corpus contains questions from real users, and it requires QA systems to read and comprehend an entire Wikipedia article that may or may not contain the answer to the question. This corpus takes ~100 GB on disk. We have used HuggingFace datasets to download & process the dataset. - -```shell -# just run following CMD -python3 prepare_natural_questions.py - -# this will download the whole dataset from HuggingFace Hub & will make it ready for training -# this script takes ~3 hours to process the dataset -``` - -**Launch Training** - -We have trained on Cloud's TPU v3-8. Each epoch took around 4.5 hours and the model got converged in just 2 epochs. You can see complete training args in [this script](bigbird_flax.py). - -```shell -# just run following CMD -python3 train.py - -# In case, you want to try hparams tuning, you can run wandb sweep -wandb sweep --project=bigbird sweep_flax.yaml -wandb agent -``` - -## Evaluation - -Our evaluation script is different from the original script and we are evaluating sequences with length up to 4096 for simplicity. We managed to get the **EM score of ~55.2** using our evaluation script. - -```shell -# download validation-dataset first -mkdir natural-questions-validation -wget https://huggingface.co/datasets/vasudevgupta/natural-questions-validation/resolve/main/natural_questions-validation.arrow -P natural-questions-validation -wget https://huggingface.co/datasets/vasudevgupta/natural-questions-validation/resolve/main/dataset_info.json -P natural-questions-validation -wget https://huggingface.co/datasets/vasudevgupta/natural-questions-validation/resolve/main/state.json -P natural-questions-validation - -# simply run following command -python3 evaluate.py -``` - -You can find our checkpoint on HuggingFace Hub ([see this](https://huggingface.co/vasudevgupta/flax-bigbird-natural-questions)). In case you are interested in PyTorch BigBird fine-tuning, you can refer to [this repository](https://github.com/thevasudevgupta/bigbird). diff --git a/examples/research_projects/jax-projects/big_bird/bigbird_flax.py b/examples/research_projects/jax-projects/big_bird/bigbird_flax.py deleted file mode 100644 index af5e11c83a6..00000000000 --- a/examples/research_projects/jax-projects/big_bird/bigbird_flax.py +++ /dev/null @@ -1,323 +0,0 @@ -import json -import os -from dataclasses import dataclass -from functools import partial -from typing import Callable - -import flax.linen as nn -import jax -import jax.numpy as jnp -import joblib -import optax -import wandb -from flax import jax_utils, struct, traverse_util -from flax.serialization import from_bytes, to_bytes -from flax.training import train_state -from flax.training.common_utils import shard -from tqdm.auto import tqdm - -from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering -from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule - - -class FlaxBigBirdForNaturalQuestionsModule(FlaxBigBirdForQuestionAnsweringModule): - """ - BigBirdForQuestionAnswering with CLS Head over the top for predicting category - - This way we can load its weights with FlaxBigBirdForQuestionAnswering - """ - - config: BigBirdConfig - dtype: jnp.dtype = jnp.float32 - add_pooling_layer: bool = True - - def setup(self): - super().setup() - self.cls = nn.Dense(5, dtype=self.dtype) - - def __call__(self, *args, **kwargs): - outputs = super().__call__(*args, **kwargs) - cls_out = self.cls(outputs[2]) - return outputs[:2] + (cls_out,) - - -class FlaxBigBirdForNaturalQuestions(FlaxBigBirdForQuestionAnswering): - module_class = FlaxBigBirdForNaturalQuestionsModule - - -def calculate_loss_for_nq(start_logits, start_labels, end_logits, end_labels, pooled_logits, pooler_labels): - def cross_entropy(logits, labels, reduction=None): - """ - Args: - logits: bsz, seqlen, vocab_size - labels: bsz, seqlen - """ - vocab_size = logits.shape[-1] - labels = (labels[..., None] == jnp.arange(vocab_size)[None]).astype("f4") - logits = jax.nn.log_softmax(logits, axis=-1) - loss = -jnp.sum(labels * logits, axis=-1) - if reduction is not None: - loss = reduction(loss) - return loss - - cross_entropy = partial(cross_entropy, reduction=jnp.mean) - start_loss = cross_entropy(start_logits, start_labels) - end_loss = cross_entropy(end_logits, end_labels) - pooled_loss = cross_entropy(pooled_logits, pooler_labels) - return (start_loss + end_loss + pooled_loss) / 3 - - -@dataclass -class Args: - model_id: str = "google/bigbird-roberta-base" - logging_steps: int = 3000 - save_steps: int = 10500 - - block_size: int = 128 - num_random_blocks: int = 3 - - batch_size_per_device: int = 1 - max_epochs: int = 5 - - # tx_args - lr: float = 3e-5 - init_lr: float = 0.0 - warmup_steps: int = 20000 - weight_decay: float = 0.0095 - - save_dir: str = "bigbird-roberta-natural-questions" - base_dir: str = "training-expt" - tr_data_path: str = "data/nq-training.jsonl" - val_data_path: str = "data/nq-validation.jsonl" - - def __post_init__(self): - os.makedirs(self.base_dir, exist_ok=True) - self.save_dir = os.path.join(self.base_dir, self.save_dir) - self.batch_size = self.batch_size_per_device * jax.device_count() - - -@dataclass -class DataCollator: - pad_id: int - max_length: int = 4096 # no dynamic padding on TPUs - - def __call__(self, batch): - batch = self.collate_fn(batch) - batch = jax.tree_util.tree_map(shard, batch) - return batch - - def collate_fn(self, features): - input_ids, attention_mask = self.fetch_inputs(features["input_ids"]) - batch = { - "input_ids": jnp.array(input_ids, dtype=jnp.int32), - "attention_mask": jnp.array(attention_mask, dtype=jnp.int32), - "start_labels": jnp.array(features["start_token"], dtype=jnp.int32), - "end_labels": jnp.array(features["end_token"], dtype=jnp.int32), - "pooled_labels": jnp.array(features["category"], dtype=jnp.int32), - } - return batch - - def fetch_inputs(self, input_ids: list): - inputs = [self._fetch_inputs(ids) for ids in input_ids] - return zip(*inputs) - - def _fetch_inputs(self, input_ids: list): - attention_mask = [1 for _ in range(len(input_ids))] - while len(input_ids) < self.max_length: - input_ids.append(self.pad_id) - attention_mask.append(0) - return input_ids, attention_mask - - -def get_batched_dataset(dataset, batch_size, seed=None): - if seed is not None: - dataset = dataset.shuffle(seed=seed) - for i in range(len(dataset) // batch_size): - batch = dataset[i * batch_size : (i + 1) * batch_size] - yield dict(batch) - - -@partial(jax.pmap, axis_name="batch") -def train_step(state, drp_rng, **model_inputs): - def loss_fn(params): - start_labels = model_inputs.pop("start_labels") - end_labels = model_inputs.pop("end_labels") - pooled_labels = model_inputs.pop("pooled_labels") - - outputs = state.apply_fn(**model_inputs, params=params, dropout_rng=drp_rng, train=True) - start_logits, end_logits, pooled_logits = outputs - - return state.loss_fn( - start_logits, - start_labels, - end_logits, - end_labels, - pooled_logits, - pooled_labels, - ) - - drp_rng, new_drp_rng = jax.random.split(drp_rng) - grad_fn = jax.value_and_grad(loss_fn) - loss, grads = grad_fn(state.params) - metrics = jax.lax.pmean({"loss": loss}, axis_name="batch") - grads = jax.lax.pmean(grads, "batch") - - state = state.apply_gradients(grads=grads) - return state, metrics, new_drp_rng - - -@partial(jax.pmap, axis_name="batch") -def val_step(state, **model_inputs): - start_labels = model_inputs.pop("start_labels") - end_labels = model_inputs.pop("end_labels") - pooled_labels = model_inputs.pop("pooled_labels") - - outputs = state.apply_fn(**model_inputs, params=state.params, train=False) - start_logits, end_logits, pooled_logits = outputs - - loss = state.loss_fn(start_logits, start_labels, end_logits, end_labels, pooled_logits, pooled_labels) - metrics = jax.lax.pmean({"loss": loss}, axis_name="batch") - return metrics - - -class TrainState(train_state.TrainState): - loss_fn: Callable = struct.field(pytree_node=False) - - -@dataclass -class Trainer: - args: Args - data_collator: Callable - train_step_fn: Callable - val_step_fn: Callable - model_save_fn: Callable - logger: wandb - scheduler_fn: Callable = None - - def create_state(self, model, tx, num_train_steps, ckpt_dir=None): - params = model.params - state = TrainState.create( - apply_fn=model.__call__, - params=params, - tx=tx, - loss_fn=calculate_loss_for_nq, - ) - if ckpt_dir is not None: - params, opt_state, step, args, data_collator = restore_checkpoint(ckpt_dir, state) - tx_args = { - "lr": args.lr, - "init_lr": args.init_lr, - "warmup_steps": args.warmup_steps, - "num_train_steps": num_train_steps, - "weight_decay": args.weight_decay, - } - tx, lr = build_tx(**tx_args) - state = train_state.TrainState( - step=step, - apply_fn=model.__call__, - params=params, - tx=tx, - opt_state=opt_state, - ) - self.args = args - self.data_collator = data_collator - self.scheduler_fn = lr - model.params = params - state = jax_utils.replicate(state) - return state - - def train(self, state, tr_dataset, val_dataset): - args = self.args - total = len(tr_dataset) // args.batch_size - - rng = jax.random.PRNGKey(0) - drp_rng = jax.random.split(rng, jax.device_count()) - for epoch in range(args.max_epochs): - running_loss = jnp.array(0, dtype=jnp.float32) - tr_dataloader = get_batched_dataset(tr_dataset, args.batch_size, seed=epoch) - i = 0 - for batch in tqdm(tr_dataloader, total=total, desc=f"Running EPOCH-{epoch}"): - batch = self.data_collator(batch) - state, metrics, drp_rng = self.train_step_fn(state, drp_rng, **batch) - running_loss += jax_utils.unreplicate(metrics["loss"]) - i += 1 - if i % args.logging_steps == 0: - state_step = jax_utils.unreplicate(state.step) - tr_loss = running_loss.item() / i - lr = self.scheduler_fn(state_step - 1) - - eval_loss = self.evaluate(state, val_dataset) - logging_dict = { - "step": state_step.item(), - "eval_loss": eval_loss.item(), - "tr_loss": tr_loss, - "lr": lr.item(), - } - tqdm.write(str(logging_dict)) - self.logger.log(logging_dict, commit=True) - - if i % args.save_steps == 0: - self.save_checkpoint(args.save_dir + f"-e{epoch}-s{i}", state=state) - - def evaluate(self, state, dataset): - dataloader = get_batched_dataset(dataset, self.args.batch_size) - total = len(dataset) // self.args.batch_size - running_loss = jnp.array(0, dtype=jnp.float32) - i = 0 - for batch in tqdm(dataloader, total=total, desc="Evaluating ... "): - batch = self.data_collator(batch) - metrics = self.val_step_fn(state, **batch) - running_loss += jax_utils.unreplicate(metrics["loss"]) - i += 1 - return running_loss / i - - def save_checkpoint(self, save_dir, state): - state = jax_utils.unreplicate(state) - print(f"SAVING CHECKPOINT IN {save_dir}", end=" ... ") - self.model_save_fn(save_dir, params=state.params) - with open(os.path.join(save_dir, "opt_state.msgpack"), "wb") as f: - f.write(to_bytes(state.opt_state)) - joblib.dump(self.args, os.path.join(save_dir, "args.joblib")) - joblib.dump(self.data_collator, os.path.join(save_dir, "data_collator.joblib")) - with open(os.path.join(save_dir, "training_state.json"), "w") as f: - json.dump({"step": state.step.item()}, f) - print("DONE") - - -def restore_checkpoint(save_dir, state): - print(f"RESTORING CHECKPOINT FROM {save_dir}", end=" ... ") - with open(os.path.join(save_dir, "flax_model.msgpack"), "rb") as f: - params = from_bytes(state.params, f.read()) - - with open(os.path.join(save_dir, "opt_state.msgpack"), "rb") as f: - opt_state = from_bytes(state.opt_state, f.read()) - - args = joblib.load(os.path.join(save_dir, "args.joblib")) - data_collator = joblib.load(os.path.join(save_dir, "data_collator.joblib")) - - with open(os.path.join(save_dir, "training_state.json"), "r") as f: - training_state = json.load(f) - step = training_state["step"] - - print("DONE") - return params, opt_state, step, args, data_collator - - -def scheduler_fn(lr, init_lr, warmup_steps, num_train_steps): - decay_steps = num_train_steps - warmup_steps - warmup_fn = optax.linear_schedule(init_value=init_lr, end_value=lr, transition_steps=warmup_steps) - decay_fn = optax.linear_schedule(init_value=lr, end_value=1e-7, transition_steps=decay_steps) - lr = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[warmup_steps]) - return lr - - -def build_tx(lr, init_lr, warmup_steps, num_train_steps, weight_decay): - def weight_decay_mask(params): - params = traverse_util.flatten_dict(params) - mask = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()} - return traverse_util.unflatten_dict(mask) - - lr = scheduler_fn(lr, init_lr, warmup_steps, num_train_steps) - - tx = optax.adamw(learning_rate=lr, weight_decay=weight_decay, mask=weight_decay_mask) - return tx, lr diff --git a/examples/research_projects/jax-projects/big_bird/evaluate.py b/examples/research_projects/jax-projects/big_bird/evaluate.py deleted file mode 100644 index 3c5123efeba..00000000000 --- a/examples/research_projects/jax-projects/big_bird/evaluate.py +++ /dev/null @@ -1,164 +0,0 @@ -import jax -import jax.numpy as jnp -from bigbird_flax import FlaxBigBirdForNaturalQuestions -from datasets import load_from_disk - -from transformers import BigBirdTokenizerFast - - -CATEGORY_MAPPING = {0: "null", 1: "short", 2: "long", 3: "yes", 4: "no"} -PUNCTUATION_SET_TO_EXCLUDE = set("".join(["‘", "’", "´", "`", ".", ",", "-", '"'])) - - -def get_sub_answers(answers, begin=0, end=None): - return [" ".join(x.split(" ")[begin:end]) for x in answers if len(x.split(" ")) > 1] - - -def expand_to_aliases(given_answers, make_sub_answers=False): - if make_sub_answers: - # if answers are longer than one word, make sure a predictions is correct if it coresponds to the complete 1: or :-1 sub word - # *e.g.* if the correct answer contains a prefix such as "the", or "a" - given_answers = ( - given_answers + get_sub_answers(given_answers, begin=1) + get_sub_answers(given_answers, end=-1) - ) - answers = [] - for answer in given_answers: - alias = answer.replace("_", " ").lower() - alias = "".join(c if c not in PUNCTUATION_SET_TO_EXCLUDE else " " for c in alias) - answers.append(" ".join(alias.split()).strip()) - return set(answers) - - -def get_best_valid_start_end_idx(start_scores, end_scores, top_k=1, max_size=100): - best_start_scores, best_start_idx = jax.lax.top_k(start_scores, top_k) - best_end_scores, best_end_idx = jax.lax.top_k(end_scores, top_k) - - widths = best_end_idx[:, None] - best_start_idx[None, :] - mask = jnp.logical_or(widths < 0, widths > max_size) - scores = (best_end_scores[:, None] + best_start_scores[None, :]) - (1e8 * mask) - best_score = jnp.argmax(scores).item() - - return best_start_idx[best_score % top_k], best_end_idx[best_score // top_k] - - -def format_dataset(sample): - question = sample["question"]["text"] - context = sample["document"]["tokens"]["token"] - is_html = sample["document"]["tokens"]["is_html"] - long_answers = sample["annotations"]["long_answer"] - short_answers = sample["annotations"]["short_answers"] - - context_string = " ".join([context[i] for i in range(len(context)) if not is_html[i]]) - - # 0 - No ; 1 - Yes - for answer in sample["annotations"]["yes_no_answer"]: - if answer == 0 or answer == 1: - return { - "question": question, - "context": context_string, - "short": [], - "long": [], - "category": "no" if answer == 0 else "yes", - } - - short_targets = [] - for s in short_answers: - short_targets.extend(s["text"]) - short_targets = list(set(short_targets)) - - long_targets = [] - for s in long_answers: - if s["start_token"] == -1: - continue - answer = context[s["start_token"] : s["end_token"]] - html = is_html[s["start_token"] : s["end_token"]] - new_answer = " ".join([answer[i] for i in range(len(answer)) if not html[i]]) - if new_answer not in long_targets: - long_targets.append(new_answer) - - category = "long_short" if len(short_targets + long_targets) > 0 else "null" - - return { - "question": question, - "context": context_string, - "short": short_targets, - "long": long_targets, - "category": category, - } - - -def main(): - dataset = load_from_disk("natural-questions-validation") - dataset = dataset.map(format_dataset).remove_columns(["annotations", "document", "id"]) - print(dataset) - - short_validation_dataset = dataset.filter(lambda x: (len(x["question"]) + len(x["context"])) < 4 * 4096) - short_validation_dataset = short_validation_dataset.filter(lambda x: x["category"] != "null") - - model_id = "vasudevgupta/flax-bigbird-natural-questions" - model = FlaxBigBirdForNaturalQuestions.from_pretrained(model_id) - tokenizer = BigBirdTokenizerFast.from_pretrained(model_id) - - @jax.jit - def forward(*args, **kwargs): - start_logits, end_logits, pooled_logits = model(*args, **kwargs) - return start_logits, end_logits, jnp.argmax(pooled_logits, axis=-1) - - def evaluate(example): - # encode question and context so that they are separated by a tokenizer.sep_token and cut at max_length - inputs = tokenizer( - example["question"], - example["context"], - return_tensors="np", - max_length=4096, - padding="max_length", - truncation=True, - ) - - start_scores, end_scores, category = forward(**inputs) - - predicted_category = CATEGORY_MAPPING[category.item()] - - example["targets"] = example["long"] + example["short"] - if example["category"] in ["yes", "no", "null"]: - example["targets"] = [example["category"]] - example["has_tgt"] = example["category"] != "null" - # Now target can be: "yes", "no", "null", "list of long & short answers" - - if predicted_category in ["yes", "no", "null"]: - example["output"] = [predicted_category] - example["match"] = example["output"] == example["targets"] - example["has_pred"] = predicted_category != "null" - return example - - max_size = 38 if predicted_category == "short" else 1024 - start_score, end_score = get_best_valid_start_end_idx( - start_scores[0], end_scores[0], top_k=8, max_size=max_size - ) - - input_ids = inputs["input_ids"][0].tolist() - example["output"] = [tokenizer.decode(input_ids[start_score : end_score + 1])] - - answers = expand_to_aliases(example["targets"], make_sub_answers=True) - predictions = expand_to_aliases(example["output"]) - - # some preprocessing to both prediction and answer - answers = {"".join(a.split()) for a in answers} - predictions = {"".join(p.split()) for p in predictions} - predictions = {s for s in predictions if s not in ["``", "''", "`", "'"]} - - # if there is a common element, it's a exact match - example["match"] = len(list(answers & predictions)) > 0 - example["has_pred"] = predicted_category != "null" and len(predictions) > 0 - - return example - - short_validation_dataset = short_validation_dataset.map(evaluate) - - total = len(short_validation_dataset) - matched = len(short_validation_dataset.filter(lambda x: x["match"] == 1)) - print("EM score:", (matched / total) * 100, "%") - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/jax-projects/big_bird/prepare_natural_questions.py b/examples/research_projects/jax-projects/big_bird/prepare_natural_questions.py deleted file mode 100644 index ebbb184ccb6..00000000000 --- a/examples/research_projects/jax-projects/big_bird/prepare_natural_questions.py +++ /dev/null @@ -1,329 +0,0 @@ -import os - -import jsonlines -import numpy as np -from tqdm import tqdm - - -DOC_STRIDE = 2048 -MAX_LENGTH = 4096 -SEED = 42 -PROCESS_TRAIN = os.environ.pop("PROCESS_TRAIN", "false") -CATEGORY_MAPPING = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4} - - -def _get_single_answer(example): - def choose_first(answer, is_long_answer=False): - assert isinstance(answer, list) - if len(answer) == 1: - answer = answer[0] - return {k: [answer[k]] for k in answer} if is_long_answer else answer - for a in answer: - if is_long_answer: - a = {k: [a[k]] for k in a} - if len(a["start_token"]) > 0: - break - return a - - answer = {"id": example["id"]} - annotation = example["annotations"] - yes_no_answer = annotation["yes_no_answer"] - if 0 in yes_no_answer or 1 in yes_no_answer: - answer["category"] = ["yes"] if 1 in yes_no_answer else ["no"] - answer["start_token"] = answer["end_token"] = [] - answer["start_byte"] = answer["end_byte"] = [] - answer["text"] = [""] - else: - answer["category"] = ["short"] - out = choose_first(annotation["short_answers"]) - if len(out["start_token"]) == 0: - # answer will be long if short is not available - answer["category"] = ["long"] - out = choose_first(annotation["long_answer"], is_long_answer=True) - out["text"] = [] - answer.update(out) - - # disregard some samples - if len(answer["start_token"]) > 1 or answer["start_token"] == answer["end_token"]: - answer["remove_it"] = True - else: - answer["remove_it"] = False - - cols = ["start_token", "end_token", "start_byte", "end_byte", "text"] - if not all(isinstance(answer[k], list) for k in cols): - raise ValueError("Issue in ID", example["id"]) - - return answer - - -def get_context_and_ans(example, assertion=False): - """Gives new context after removing & new answer tokens as per new context""" - answer = _get_single_answer(example) - # bytes are of no use - del answer["start_byte"] - del answer["end_byte"] - - # handle yes_no answers explicitly - if answer["category"][0] in ["yes", "no"]: # category is list with one element - doc = example["document"]["tokens"] - context = [] - for i in range(len(doc["token"])): - if not doc["is_html"][i]: - context.append(doc["token"][i]) - return { - "context": " ".join(context), - "answer": { - "start_token": -100, # ignore index in cross-entropy - "end_token": -100, # ignore index in cross-entropy - "category": answer["category"], - "span": answer["category"], # extra - }, - } - - # later, help in removing all no answers - if answer["start_token"] == [-1]: - return { - "context": "None", - "answer": { - "start_token": -1, - "end_token": -1, - "category": "null", - "span": "None", # extra - }, - } - - # handling normal samples - - cols = ["start_token", "end_token"] - answer.update({k: answer[k][0] if len(answer[k]) > 0 else answer[k] for k in cols}) # e.g. [10] == 10 - - doc = example["document"]["tokens"] - start_token = answer["start_token"] - end_token = answer["end_token"] - - context = [] - for i in range(len(doc["token"])): - if not doc["is_html"][i]: - context.append(doc["token"][i]) - else: - if answer["start_token"] > i: - start_token -= 1 - if answer["end_token"] > i: - end_token -= 1 - new = " ".join(context[start_token:end_token]) - - # checking above code - if assertion: - """checking if above code is working as expected for all the samples""" - is_html = doc["is_html"][answer["start_token"] : answer["end_token"]] - old = doc["token"][answer["start_token"] : answer["end_token"]] - old = " ".join([old[i] for i in range(len(old)) if not is_html[i]]) - if new != old: - print("ID:", example["id"]) - print("New:", new, end="\n") - print("Old:", old, end="\n\n") - - return { - "context": " ".join(context), - "answer": { - "start_token": start_token, - "end_token": end_token - 1, # this makes it inclusive - "category": answer["category"], # either long or short - "span": new, # extra - }, - } - - -def get_strided_contexts_and_ans(example, tokenizer, doc_stride=2048, max_length=4096, assertion=True): - # overlap will be of doc_stride - q_len - - out = get_context_and_ans(example, assertion=assertion) - answer = out["answer"] - - # later, removing these samples - if answer["start_token"] == -1: - return { - "example_id": example["id"], - "input_ids": [[-1]], - "labels": { - "start_token": [-1], - "end_token": [-1], - "category": ["null"], - }, - } - - input_ids = tokenizer(example["question"]["text"], out["context"]).input_ids - q_len = input_ids.index(tokenizer.sep_token_id) + 1 - - # return yes/no - if answer["category"][0] in ["yes", "no"]: # category is list with one element - inputs = [] - category = [] - q_indices = input_ids[:q_len] - doc_start_indices = range(q_len, len(input_ids), max_length - doc_stride) - for i in doc_start_indices: - end_index = i + max_length - q_len - slice = input_ids[i:end_index] - inputs.append(q_indices + slice) - category.append(answer["category"][0]) - if slice[-1] == tokenizer.sep_token_id: - break - - return { - "example_id": example["id"], - "input_ids": inputs, - "labels": { - "start_token": [-100] * len(category), - "end_token": [-100] * len(category), - "category": category, - }, - } - - splitted_context = out["context"].split() - complete_end_token = splitted_context[answer["end_token"]] - answer["start_token"] = len( - tokenizer( - " ".join(splitted_context[: answer["start_token"]]), - add_special_tokens=False, - ).input_ids - ) - answer["end_token"] = len( - tokenizer(" ".join(splitted_context[: answer["end_token"]]), add_special_tokens=False).input_ids - ) - - answer["start_token"] += q_len - answer["end_token"] += q_len - - # fixing end token - num_sub_tokens = len(tokenizer(complete_end_token, add_special_tokens=False).input_ids) - if num_sub_tokens > 1: - answer["end_token"] += num_sub_tokens - 1 - - old = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive - start_token = answer["start_token"] - end_token = answer["end_token"] - - if assertion: - """This won't match exactly because of extra gaps => visaully inspect everything""" - new = tokenizer.decode(old) - if answer["span"] != new: - print("ISSUE IN TOKENIZATION") - print("OLD:", answer["span"]) - print("NEW:", new, end="\n\n") - - if len(input_ids) <= max_length: - return { - "example_id": example["id"], - "input_ids": [input_ids], - "labels": { - "start_token": [answer["start_token"]], - "end_token": [answer["end_token"]], - "category": answer["category"], - }, - } - - q_indices = input_ids[:q_len] - doc_start_indices = range(q_len, len(input_ids), max_length - doc_stride) - - inputs = [] - answers_start_token = [] - answers_end_token = [] - answers_category = [] # null, yes, no, long, short - for i in doc_start_indices: - end_index = i + max_length - q_len - slice = input_ids[i:end_index] - inputs.append(q_indices + slice) - assert len(inputs[-1]) <= max_length, "Issue in truncating length" - - if start_token >= i and end_token <= end_index - 1: - start_token = start_token - i + q_len - end_token = end_token - i + q_len - answers_category.append(answer["category"][0]) # ["short"] -> "short" - else: - start_token = -100 - end_token = -100 - answers_category.append("null") - new = inputs[-1][start_token : end_token + 1] - - answers_start_token.append(start_token) - answers_end_token.append(end_token) - if assertion: - """checking if above code is working as expected for all the samples""" - if new != old and new != [tokenizer.cls_token_id]: - print("ISSUE in strided for ID:", example["id"]) - print("New:", tokenizer.decode(new)) - print("Old:", tokenizer.decode(old), end="\n\n") - if slice[-1] == tokenizer.sep_token_id: - break - - return { - "example_id": example["id"], - "input_ids": inputs, - "labels": { - "start_token": answers_start_token, - "end_token": answers_end_token, - "category": answers_category, - }, - } - - -def prepare_inputs(example, tokenizer, doc_stride=2048, max_length=4096, assertion=False): - example = get_strided_contexts_and_ans( - example, - tokenizer, - doc_stride=doc_stride, - max_length=max_length, - assertion=assertion, - ) - - return example - - -def save_to_disk(hf_data, file_name): - with jsonlines.open(file_name, "a") as writer: - for example in tqdm(hf_data, total=len(hf_data), desc="Saving samples ... "): - labels = example["labels"] - for ids, start, end, cat in zip( - example["input_ids"], - labels["start_token"], - labels["end_token"], - labels["category"], - ): - if start == -1 and end == -1: - continue # leave waste samples with no answer - if cat == "null" and np.random.rand() < 0.6: - continue # removing 50 % samples - writer.write( - { - "input_ids": ids, - "start_token": start, - "end_token": end, - "category": CATEGORY_MAPPING[cat], - } - ) - - -if __name__ == "__main__": - """Running area""" - from datasets import load_dataset - - from transformers import BigBirdTokenizer - - data = load_dataset("natural_questions") - tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base") - - data = data["train" if PROCESS_TRAIN == "true" else "validation"] - - fn_kwargs = { - "tokenizer": tokenizer, - "doc_stride": DOC_STRIDE, - "max_length": MAX_LENGTH, - "assertion": False, - } - data = data.map(prepare_inputs, fn_kwargs=fn_kwargs) - data = data.remove_columns(["annotations", "document", "id", "question"]) - print(data) - - np.random.seed(SEED) - cache_file_name = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl" - save_to_disk(data, file_name=cache_file_name) diff --git a/examples/research_projects/jax-projects/big_bird/requirements.txt b/examples/research_projects/jax-projects/big_bird/requirements.txt deleted file mode 100644 index b1bc8a7ace2..00000000000 --- a/examples/research_projects/jax-projects/big_bird/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -git+https://github.com/huggingface/transformers@main -datasets -sentencepiece -wandb -flax -jsonlines diff --git a/examples/research_projects/jax-projects/big_bird/sweep_flax.yaml b/examples/research_projects/jax-projects/big_bird/sweep_flax.yaml deleted file mode 100644 index d804f61b3e1..00000000000 --- a/examples/research_projects/jax-projects/big_bird/sweep_flax.yaml +++ /dev/null @@ -1,16 +0,0 @@ -command: - - python3 - - train.py -method: random -parameters: - lr: - values: [4e-5, 3e-5] - warmup_steps: - values: [20000, 15000, 10000, 5000] - weight_decay: - distribution: normal - mu: 1e-2 - sigma: 2e-3 -metric: - name: eval_loss - goal: minimize diff --git a/examples/research_projects/jax-projects/big_bird/train.py b/examples/research_projects/jax-projects/big_bird/train.py deleted file mode 100644 index ce37b7f975b..00000000000 --- a/examples/research_projects/jax-projects/big_bird/train.py +++ /dev/null @@ -1,78 +0,0 @@ -import os -from dataclasses import replace - -import jax -import wandb -from bigbird_flax import Args, DataCollator, FlaxBigBirdForNaturalQuestions, Trainer, build_tx, train_step, val_step -from datasets import load_dataset -from flax import jax_utils - -from transformers import BigBirdTokenizerFast - - -if __name__ == "__main__": - print("#################### AVAILABLE DEVICES ####################") - print(jax.devices()) - print("###########################################################") - - # setup for wandb sweep - args = Args() - logger = wandb.init(project="bigbird-natural-questions", config=args.__dict__) - wandb_args = dict(logger.config) - del wandb_args["batch_size"] - args = replace(args, **wandb_args) - base_dir = args.base_dir + "-" + wandb.run.id - args = replace(args, base_dir=base_dir) - print(args) - - tr_dataset = load_dataset("json", data_files=args.tr_data_path)["train"] - val_dataset = load_dataset("json", data_files=args.val_data_path)["train"] - - # drop extra batch for now - indices = range(len(tr_dataset) - len(tr_dataset) % args.batch_size) - tr_dataset = tr_dataset.shuffle().select(indices) - indices = range(len(val_dataset) - len(val_dataset) % args.batch_size) - val_dataset = val_dataset.shuffle().select(indices) - - if os.environ.get("TRAIN_ON_SMALL", "false") == "true": - tr_dataset = tr_dataset.shuffle().select(range(80000)) - val_dataset = val_dataset.shuffle().select(range(8000)) - - print(tr_dataset) - print(val_dataset) - - model = FlaxBigBirdForNaturalQuestions.from_pretrained( - args.model_id, block_size=args.block_size, num_random_blocks=args.num_random_blocks - ) - tokenizer = BigBirdTokenizerFast.from_pretrained(args.model_id) - data_collator = DataCollator(pad_id=tokenizer.pad_token_id, max_length=4096) - - tx_args = { - "lr": args.lr, - "init_lr": args.init_lr, - "warmup_steps": args.warmup_steps, - "num_train_steps": args.max_epochs * (len(tr_dataset) // args.batch_size), - "weight_decay": args.weight_decay, - } - tx, lr = build_tx(**tx_args) - - trainer = Trainer( - args=args, - data_collator=data_collator, - model_save_fn=model.save_pretrained, - train_step_fn=train_step, - val_step_fn=val_step, - logger=logger, - scheduler_fn=lr, - ) - - ckpt_dir = None - state = trainer.create_state(model, tx, num_train_steps=tx_args["num_train_steps"], ckpt_dir=ckpt_dir) - try: - trainer.train(state, tr_dataset, val_dataset) - except KeyboardInterrupt: - print("Oooops; TRAINING STOPPED UNFORTUNATELY") - - print("SAVING WEIGHTS IN `final-weights`") - params = jax_utils.unreplicate(state.params) - model.save_pretrained(os.path.join(args.base_dir, "final-weights"), params=params) diff --git a/examples/research_projects/jax-projects/dataset-streaming/README.md b/examples/research_projects/jax-projects/dataset-streaming/README.md deleted file mode 100644 index bdb6629e509..00000000000 --- a/examples/research_projects/jax-projects/dataset-streaming/README.md +++ /dev/null @@ -1,121 +0,0 @@ - - -# Language model training examples in streaming mode - -The following examples showcase how to train a language model from scratch -using the JAX/Flax backend. - -JAX/Flax allows you to trace pure functions and compile them into efficient, fused accelerator code on both GPU and TPU. -Models written in JAX/Flax are **immutable** and updated in a purely functional -way which enables simple and efficient model parallelism. - -All of the following examples make use of [dataset streaming](https://huggingface.co/docs/datasets/master/dataset_streaming), therefore allowing to train models on massive datasets\ -without ever having to download the full dataset. - -## Masked language modeling - -In the following, we demonstrate how to train a bi-directional transformer model -using masked language modeling objective as introduced in [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805). -More specifically, we demonstrate how JAX/Flax and dataset streaming can be leveraged -to pre-train [**`FacebookAI/roberta-base`**](https://huggingface.co/FacebookAI/roberta-base) -in English on a single TPUv3-8 pod for 10000 update steps. - -The example script uses the 🤗 Datasets library. You can easily customize them to your needs if you need extra processing on your datasets. - -Let's start by creating a model repository to save the trained model and logs. -Here we call the model `"english-roberta-base-dummy"`, but you can change the model name as you like. - -You can do this either directly on [huggingface.co](https://huggingface.co/new) (assuming that -you are logged in) or via the command line: - -```bash -huggingface-cli repo create english-roberta-base-dummy -``` - -Next we clone the model repository to add the tokenizer and model files. - -```bash -git clone https://huggingface.co//english-roberta-base-dummy -``` - -To ensure that all tensorboard traces will be uploaded correctly, we need to -track them. You can run the following command inside your model repo to do so. - -```bash -cd english-roberta-base-dummy -git lfs track "*tfevents*" -``` - -Great, we have set up our model repository. During training, we will automatically -push the training logs and model weights to the repo. - -Next, let's add a symbolic link to the `run_mlm_flax.py`. - -```bash -export MODEL_DIR="./english-roberta-base-dummy" -ln -s ~/transformers/examples/research_projects/jax-projects/dataset-streaming/run_mlm_flax_stream.py ./ -``` - -### Copy config and tokenizer of existing model - -In this example, we will simply copy an existing config and tokenizer in English. -You can run the following code in a Python shell to do so. - -```python -from transformers import RobertaTokenizerFast, RobertaConfig - -model_dir = "./english-roberta-base-dummy" - -tokenizer = RobertaTokenizerFast.from_pretrained("FacebookAI/roberta-base") -config = RobertaConfig.from_pretrained("FacebookAI/roberta-base") - -tokenizer.save_pretrained(model_dir) -config.save_pretrained(model_dir) -``` - -### Train model - -Next we can run the example script to pretrain the model. -Compared to the default [`run_mlm_flax`](https://github.com/huggingface/transformers/blob/main/examples/flax/language-modeling/run_mlm_flax.py), we introduced 4 new training settings: -- `num_train_steps` - how many update steps should be run. -- `num_eval_samples` - how many training samples should be taken for evaluation. -- `logging_steps` - at what rate should the training loss be logged. -- `eval_steps` - at what rate should evaluation be run. -10K update steps - -```bash -./run_mlm_flax_stream.py \ - --output_dir="${MODEL_DIR}" \ - --model_type="roberta" \ - --config_name="${MODEL_DIR}" \ - --tokenizer_name="${MODEL_DIR}" \ - --dataset_name="oscar" \ - --dataset_config_name="unshuffled_deduplicated_en" \ - --max_seq_length="128" \ - --per_device_train_batch_size="128" \ - --per_device_eval_batch_size="128" \ - --learning_rate="3e-4" \ - --warmup_steps="1000" \ - --overwrite_output_dir \ - --adam_beta1="0.9" \ - --adam_beta2="0.98" \ - --num_train_steps="10000" \ - --num_eval_samples="5000" \ - --logging_steps="250" \ - --eval_steps="1000" \ - --push_to_hub -``` diff --git a/examples/research_projects/jax-projects/dataset-streaming/run_mlm_flax_stream.py b/examples/research_projects/jax-projects/dataset-streaming/run_mlm_flax_stream.py deleted file mode 100755 index 8940fab5bda..00000000000 --- a/examples/research_projects/jax-projects/dataset-streaming/run_mlm_flax_stream.py +++ /dev/null @@ -1,637 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2021 The HuggingFace Team All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) with whole word masking on a -text file or a dataset. - -Here is the full list of checkpoints on the hub that can be fine-tuned by this script: -https://huggingface.co/models?filter=fill-mask -""" - -import logging -import os -import sys -import time -from collections import defaultdict -from dataclasses import dataclass, field - -# You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments. -from pathlib import Path -from typing import Dict, List, Optional, Tuple - -import datasets -import flax -import jax -import jax.numpy as jnp -import numpy as np -import optax -from datasets import load_dataset -from flax import jax_utils, traverse_util -from flax.training import train_state -from flax.training.common_utils import get_metrics, onehot, shard -from tqdm import tqdm - -from transformers import ( - CONFIG_MAPPING, - FLAX_MODEL_FOR_MASKED_LM_MAPPING, - AutoConfig, - AutoTokenizer, - FlaxAutoModelForMaskedLM, - HfArgumentParser, - PreTrainedTokenizerBase, - TensorType, - TrainingArguments, - is_tensorboard_available, - set_seed, -) - - -if datasets.__version__ <= "1.8.0": - raise ValueError("Make sure to upgrade `datasets` to a version >= 1.9.0 to use dataset streaming") - - -MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_MASKED_LM_MAPPING.keys()) -MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. - """ - - model_name_or_path: Optional[str] = field( - default=None, - metadata={ - "help": ( - "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." - ) - }, - ) - model_type: Optional[str] = field( - default=None, - metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, - ) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - tokenizer_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} - ) - cache_dir: Optional[str] = field( - default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} - ) - use_fast_tokenizer: bool = field( - default=True, - metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, - ) - dtype: Optional[str] = field( - default="float32", - metadata={ - "help": ( - "Floating-point format in which the model weights should be initialized and trained. Choose one of" - " `[float32, float16, bfloat16]`." - ) - }, - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - """ - - dataset_name: Optional[str] = field( - default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} - ) - dataset_config_name: Optional[str] = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) - validation_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, - ) - train_ref_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input train ref data file for whole word masking in Chinese."}, - ) - validation_ref_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."}, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} - ) - validation_split_percentage: Optional[int] = field( - default=5, - metadata={ - "help": "The percentage of the train set used as validation set in case there's no validation split" - }, - ) - max_seq_length: Optional[int] = field( - default=None, - metadata={ - "help": ( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated. Default to the max input length of the model." - ) - }, - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - mlm_probability: float = field( - default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} - ) - pad_to_max_length: bool = field( - default=False, - metadata={ - "help": ( - "Whether to pad all samples to `max_seq_length`. " - "If False, will pad the samples dynamically when batching to the maximum length in the batch." - ) - }, - ) - line_by_line: bool = field( - default=False, - metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."}, - ) - text_column_name: str = field( - default="text", metadata={"help": "The name of the column to retrieve the training text."} - ) - shuffle_buffer_size: int = field( - default=10000, metadata={"help": "The number of examples to pre-load for shuffling."} - ) - num_train_steps: int = field(default=50000, metadata={"help": "The number of training steps."}) - num_eval_samples: int = field(default=50000, metadata={"help": "The number of samples to be used for evaluation"}) - - def __post_init__(self): - if self.dataset_name is None and self.train_file is None and self.validation_file is None: - raise ValueError("Need either a dataset name or a training/validation file.") - else: - if self.train_file is not None: - extension = self.train_file.split(".")[-1] - assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." - if self.validation_file is not None: - extension = self.validation_file.split(".")[-1] - assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." - - -@flax.struct.dataclass -class FlaxDataCollatorForLanguageModeling: - """ - Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they - are not all of the same length. - - Args: - tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`): - The tokenizer used for encoding the data. - mlm_probability (:obj:`float`, `optional`, defaults to 0.15): - The probability with which to (randomly) mask tokens in the input. - - .. note:: - - For best performance, this data collator should be used with a dataset having items that are dictionaries or - BatchEncoding, with the :obj:`"special_tokens_mask"` key, as returned by a - :class:`~transformers.PreTrainedTokenizer` or a :class:`~transformers.PreTrainedTokenizerFast` with the - argument :obj:`return_special_tokens_mask=True`. - """ - - tokenizer: PreTrainedTokenizerBase - mlm_probability: float = 0.15 - - def __post_init__(self): - if self.tokenizer.mask_token is None: - raise ValueError( - "This tokenizer does not have a mask token which is necessary for masked language modeling. " - "You should pass `mlm=False` to train on causal language modeling instead." - ) - - def __call__(self, examples: List[Dict[str, np.ndarray]]) -> Dict[str, np.ndarray]: - # Handle dict or lists with proper padding and conversion to tensor. - batch = self.tokenizer.pad(examples, return_tensors=TensorType.NUMPY) - - # If special token mask has been preprocessed, pop it from the dict. - special_tokens_mask = batch.pop("special_tokens_mask", None) - - batch["input_ids"], batch["labels"] = self.mask_tokens( - batch["input_ids"], special_tokens_mask=special_tokens_mask - ) - return batch - - def mask_tokens( - self, inputs: np.ndarray, special_tokens_mask: Optional[np.ndarray] - ) -> Tuple[jnp.ndarray, jnp.ndarray]: - """ - Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. - """ - labels = inputs.copy() - # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) - probability_matrix = np.full(labels.shape, self.mlm_probability) - special_tokens_mask = special_tokens_mask.astype("bool") - - probability_matrix[special_tokens_mask] = 0.0 - masked_indices = np.random.binomial(1, probability_matrix).astype("bool") - labels[~masked_indices] = -100 # We only compute loss on masked tokens - - # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) - indices_replaced = np.random.binomial(1, np.full(labels.shape, 0.8)).astype("bool") & masked_indices - inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token) - - # 10% of the time, we replace masked input tokens with random word - indices_random = np.random.binomial(1, np.full(labels.shape, 0.5)).astype("bool") - indices_random &= masked_indices & ~indices_replaced - - random_words = np.random.randint(self.tokenizer.vocab_size, size=labels.shape, dtype="i4") - inputs[indices_random] = random_words[indices_random] - - # The rest of the time (10% of the time) we keep the masked input tokens unchanged - return inputs, labels - - -def generate_batch_splits(samples_idx: np.ndarray, batch_size: int) -> np.ndarray: - num_samples = len(samples_idx) - samples_to_remove = num_samples % batch_size - - if samples_to_remove != 0: - samples_idx = samples_idx[:-samples_to_remove] - sections_split = num_samples // batch_size - batch_idx = np.split(samples_idx, sections_split) - return batch_idx - - -def advance_iter_and_group_samples(train_iterator, num_samples, max_seq_length): - """ - The training iterator is advanced so that after groupifying the samples, - `num_samples` of length `max_seq_length` are returned. - """ - num_total_tokens = max_seq_length * num_samples - samples = defaultdict(list) - - i = 0 - while i < num_total_tokens: - tokenized_samples = next(train_iterator) - i += len(tokenized_samples["input_ids"]) - - # concatenate tokenized samples to list (excluding "id" and "text") - samples = { - k: samples[k] + tokenized_samples[k] for k in ["input_ids", "attention_mask", "special_tokens_mask"] - } - - # Concatenated tokens are split to lists of length `max_seq_length`. - # Note that remainedr of % max_seq_length are thrown away. - def group_texts(examples): - result = { - k: [t[i : i + max_seq_length] for i in range(0, num_total_tokens, max_seq_length)] - for k, t in examples.items() - } - return result - - grouped_samples = group_texts(samples) - return grouped_samples - - -def write_train_metric(summary_writer, train_metrics, train_time, step): - summary_writer.scalar("train_time", train_time, step) - - train_metrics = get_metrics(train_metrics) - for key, vals in train_metrics.items(): - tag = f"train_{key}" - for i, val in enumerate(vals): - summary_writer.scalar(tag, val, step - len(vals) + i + 1) - - -def write_eval_metric(summary_writer, eval_metrics, step): - for metric_name, value in eval_metrics.items(): - summary_writer.scalar(f"eval_{metric_name}", value, step) - - -if __name__ == "__main__": - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - - if ( - os.path.exists(training_args.output_dir) - and os.listdir(training_args.output_dir) - and training_args.do_train - and not training_args.overwrite_output_dir - ): - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - level="INFO", - datefmt="[%X]", - ) - - # Log on each process the small summary: - logger = logging.getLogger(__name__) - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" - + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - - # Set the verbosity to info of the Transformers logger (on main process only): - logger.info(f"Training/evaluation parameters {training_args}") - - # Set seed before initializing model. - set_seed(training_args.seed) - - # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) - # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ - # (the dataset will be downloaded automatically from the datasets Hub). - # - # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called - # 'text' is found. You can easily tweak this behavior (see below). - if data_args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - dataset = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - cache_dir=model_args.cache_dir, - streaming=True, - split="train", - ) - - if model_args.config_name: - config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir) - elif model_args.model_name_or_path: - config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir) - else: - config = CONFIG_MAPPING[model_args.model_type]() - logger.warning("You are instantiating a new config instance from scratch.") - - if model_args.tokenizer_name: - tokenizer = AutoTokenizer.from_pretrained( - model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer - ) - elif model_args.model_name_or_path: - tokenizer = AutoTokenizer.from_pretrained( - model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer - ) - else: - raise ValueError( - "You are instantiating a new tokenizer from scratch. This is not supported by this script. " - "You can do it from another script, save it, and load it from here, using --tokenizer_name." - ) - - # Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts. - # We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more - # efficient when it receives the `special_tokens_mask`. - def tokenize_function(examples): - return tokenizer(examples[data_args.text_column_name], return_special_tokens_mask=True) - - tokenized_datasets = dataset.map(tokenize_function, batched=True, remove_columns=list(dataset.features.keys())) - - shuffle_seed = training_args.seed - tokenized_datasets = tokenized_datasets.shuffle(buffer_size=data_args.shuffle_buffer_size, seed=shuffle_seed) - - has_tensorboard = is_tensorboard_available() - if has_tensorboard and jax.process_index() == 0: - try: - from flax.metrics.tensorboard import SummaryWriter - except ImportError as ie: - has_tensorboard = False - logger.warning( - f"Unable to display metrics through TensorBoard because some package are not installed: {ie}" - ) - - summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir)) - - # Data collator - # This one will take care of randomly masking the tokens. - data_collator = FlaxDataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability) - - # Initialize our training - rng = jax.random.PRNGKey(training_args.seed) - dropout_rngs = jax.random.split(rng, jax.local_device_count()) - - if model_args.model_name_or_path: - model = FlaxAutoModelForMaskedLM.from_pretrained( - model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype) - ) - else: - model = FlaxAutoModelForMaskedLM.from_config( - config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype) - ) - - # Store some constant - num_epochs = int(training_args.num_train_epochs) - train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() - eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count() - - # define number steps per stream epoch - num_train_steps = data_args.num_train_steps - - # Create learning rate schedule - warmup_fn = optax.linear_schedule( - init_value=0.0, end_value=training_args.learning_rate, transition_steps=training_args.warmup_steps - ) - decay_fn = optax.linear_schedule( - init_value=training_args.learning_rate, - end_value=0, - transition_steps=num_train_steps - training_args.warmup_steps, - ) - linear_decay_lr_schedule_fn = optax.join_schedules( - schedules=[warmup_fn, decay_fn], boundaries=[training_args.warmup_steps] - ) - - # We use Optax's "masking" functionality to not apply weight decay - # to bias and LayerNorm scale parameters. decay_mask_fn returns a - # mask boolean with the same structure as the parameters. - # The mask is True for parameters that should be decayed. - # Note that this mask is specifically adapted for FlaxBERT-like models. - # For other models, one should correct the layer norm parameter naming - # accordingly. - def decay_mask_fn(params): - flat_params = traverse_util.flatten_dict(params) - flat_mask = {path: (path[-1] != "bias" and path[-2:] != ("LayerNorm", "scale")) for path in flat_params} - return traverse_util.unflatten_dict(flat_mask) - - # create adam optimizer - adamw = optax.adamw( - learning_rate=linear_decay_lr_schedule_fn, - b1=training_args.adam_beta1, - b2=training_args.adam_beta2, - eps=training_args.adam_epsilon, - weight_decay=training_args.weight_decay, - mask=decay_mask_fn, - ) - - # Setup train state - state = train_state.TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw) - - # Define gradient update step fn - def train_step(state, batch, dropout_rng): - dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) - - def loss_fn(params): - labels = batch.pop("labels") - - logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] - - # compute loss, ignore padded input tokens - label_mask = jnp.where(labels > 0, 1.0, 0.0) - loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask - - # take average - loss = loss.sum() / label_mask.sum() - - return loss - - grad_fn = jax.value_and_grad(loss_fn) - loss, grad = grad_fn(state.params) - grad = jax.lax.pmean(grad, "batch") - new_state = state.apply_gradients(grads=grad) - - metrics = jax.lax.pmean( - {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch" - ) - - return new_state, metrics, new_dropout_rng - - # Create parallel version of the train step - p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) - - # Define eval fn - def eval_step(params, batch): - labels = batch.pop("labels") - - logits = model(**batch, params=params, train=False)[0] - - # compute loss, ignore padded input tokens - label_mask = jnp.where(labels > 0, 1.0, 0.0) - loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask - - # compute accuracy - accuracy = jnp.equal(jnp.argmax(logits, axis=-1), labels) * label_mask - - # summarize metrics - metrics = {"loss": loss.sum(), "accuracy": accuracy.sum(), "normalizer": label_mask.sum()} - metrics = jax.lax.psum(metrics, axis_name="batch") - - return metrics - - p_eval_step = jax.pmap(eval_step, "batch", donate_argnums=(0,)) - - # Replicate the train state on each device - state = jax_utils.replicate(state) - - train_time = 0 - train_start = time.time() - train_metrics = [] - eval_metrics = [] - - training_iter = iter(tokenized_datasets) - - max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) - eval_samples = advance_iter_and_group_samples(training_iter, data_args.num_eval_samples, max_seq_length) - - steps = tqdm(range(num_train_steps), desc="Training...", position=0) - for step in range(num_train_steps): - # ======================== Training ================================ - try: - samples = advance_iter_and_group_samples(training_iter, train_batch_size, max_seq_length) - except StopIteration: - # Once the end of the dataset stream is reached, the training iterator - # is reinitialized and reshuffled and a new eval dataset is randomly chosen. - shuffle_seed += 1 - tokenized_datasets.set_epoch(shuffle_seed) - - training_iter = iter(tokenized_datasets) - - eval_dataset = advance_iter_and_group_samples(training_iter, data_args.num_eval_samples, max_seq_length) - samples = advance_iter_and_group_samples(training_iter, train_batch_size, max_seq_length) - - # process input samples - model_inputs = data_collator(samples) - - # Model forward - model_inputs = shard(model_inputs.data) - state, train_metric, dropout_rngs = p_train_step(state, model_inputs, dropout_rngs) - - train_metrics.append(train_metric) - - if step % training_args.logging_steps == 0 and step > 0: - steps.write( - f"Step... ({step} | Loss: {train_metric['loss'].mean()}, Learning Rate:" - f" {train_metric['learning_rate'].mean()})" - ) - train_time += time.time() - train_start - if has_tensorboard and jax.process_index() == 0: - write_train_metric(summary_writer, train_metrics, train_time, step) - train_metrics = [] - - # ======================== Evaluating ============================== - if step % training_args.eval_steps == 0 and step > 0: - # Avoid using jax.numpy here in case of TPU training - eval_samples_idx = np.arange(data_args.num_eval_samples) - eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size) - - for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=1)): - # process input samples - batch_eval_samples = {k: [v[idx] for idx in batch_idx] for k, v in eval_samples.items()} - model_inputs = data_collator(batch_eval_samples) - - # Model forward - model_inputs = shard(model_inputs.data) - metrics = p_eval_step(state.params, model_inputs) - eval_metrics.append(metrics) - - # normalize eval metrics - eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_util.tree_map(jnp.sum, eval_metrics) - eval_normalizer = eval_metrics.pop("normalizer") - eval_metrics = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics) - - # Update progress bar - steps.desc = ( - f"Step... ({step + 1}/{num_train_steps} | Loss: {eval_metrics['loss']}, Acc:" - f" {eval_metrics['accuracy']})" - ) - - if has_tensorboard and jax.process_index() == 0: - write_eval_metric(summary_writer, eval_metrics, step) - eval_metrics = [] - - # save checkpoint after each epoch and push checkpoint to the hub - if jax.process_index() == 0: - params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) - model.save_pretrained( - training_args.output_dir, - params=params, - push_to_hub=training_args.push_to_hub, - commit_message=f"Saving weights and logs of step {step+1}", - ) - - # update tqdm bar - steps.update(1) diff --git a/examples/research_projects/jax-projects/hybrid_clip/README.md b/examples/research_projects/jax-projects/hybrid_clip/README.md deleted file mode 100644 index 72d3db19358..00000000000 --- a/examples/research_projects/jax-projects/hybrid_clip/README.md +++ /dev/null @@ -1,172 +0,0 @@ - - -# Vision-Text dual encoder model training examples - -> Note: This example is experimental and might not give the best possible results - -The following example showcases how to train a CLIP like vision-text dual encoder model -using a pre-trained vision and text encoder using the JAX/Flax backend. - -Such a model can be used for natural language image search and potentially zero-shot image classification. -The model is inspired by the [CLIP](https://openai.com/blog/clip/) approach, introduced by Alec Radford et al. -The idea is to train a vision encoder and a text encoder jointly to project the representation of images and their -captions into the same embedding space, such that the caption embeddings are located near the embeddings -of the images they describe. - -JAX/Flax allows you to trace pure functions and compile them into efficient, fused accelerator code on both GPU and TPU. -Models written in JAX/Flax are **immutable** and updated in a purely functional -way which enables simple and efficient model parallelism. - -In this example we will use the vision model from [CLIP](https://huggingface.co/models?filter=clip) -as the image encoder and [`FacebookAI/roberta-base`](https://huggingface.co/FacebookAI/roberta-base) as the text encoder. -Note that one can also use the [ViT](https://huggingface.co/models?filter=vit) model as image encoder and any other BERT or ROBERTa model as text encoder. -To train the model on languages other than English one should choose a text encoder trained on the desired -language and a image-text dataset in that language. One such dataset is [WIT](https://github.com/google-research-datasets/wit). - -Let's start by creating a model repository to save the trained model and logs. -Here we call the model `"clip-roberta-base"`, but you can change the model name as you like. - -You can do this either directly on [huggingface.co](https://huggingface.co/new) (assuming that -you are logged in) or via the command line: - -```bash -huggingface-cli repo create clip-roberta-base -``` -Next we clone the model repository to add the tokenizer and model files. -```bash -git clone https://huggingface.co//clip-roberta-base -``` -To ensure that all tensorboard traces will be uploaded correctly, we need to -track them. You can run the following command inside your model repo to do so. - -```bash -cd clip-roberta-base -git lfs track "*tfevents*" -``` - -Great, we have set up our model repository. During training, we will automatically -push the training logs and model weights to the repo. - -Next, let's add a symbolic link to the `run_hybrid_clip.py`. - -```bash -export MODEL_DIR="./clip-roberta-base -ln -s ~/transformers/examples/research_projects/jax-projects/hybrid_clip/run_hybrid_clip.py run_hybrid_clip.py -``` - -## How to use the `FlaxHybridCLIP` model: - -The `FlaxHybridCLIP` class let's you load any text and vision encoder model to create a dual encoder. -Here is an example of how to load the model using pre-trained text and vision models. - -```python -from modeling_hybrid_clip import FlaxHybridCLIP - -model = FlaxHybridCLIP.from_text_vision_pretrained("google-bert/bert-base-uncased", "openai/clip-vit-base-patch32") - -# save the model -model.save_pretrained("bert-clip") - -# load the saved model -model = FlaxHybridCLIP.from_pretrained("bert-clip") -``` - -If the checkpoints are in PyTorch then one could pass `text_from_pt=True` and `vision_from_pt=True`. This will load the model -PyTorch checkpoints convert them to flax and load the model. - -```python -model = FlaxHybridCLIP.from_text_vision_pretrained("google-bert/bert-base-uncased", "openai/clip-vit-base-patch32", text_from_pt=True, vision_from_pt=True) -``` - -This loads both the text and vision encoders using pre-trained weights, the projection layers are randomly -initialized except for CLIP's vision model. If you use CLIP to initialize the vision model then the vision projection weights are also -loaded using the pre-trained weights. - -## Prepare the dataset - -We will use the MS-COCO dataset to train our dual encoder model. MS-COCO contains over 82,000 images, each of which has at least 5 different caption annotations. The dataset is usually used for image captioning tasks, but we can repurpose the image-caption pairs to train our dual encoder model for image search. - -### Download and extract the data. - -It consists of two compressed folders: one with images, and the other—with associated image captions. Note that the compressed images folder is 13GB in size. - -```bash -wget http://images.cocodataset.org/annotations/annotations_trainval2014.zip -wget http://images.cocodataset.org/zips/train2014.zip - -unzip annotations_trainval2014.zip -unzip train2014.zip - -mkdir coco_dataset -mv train2014 coco_dataset/ -mv annotations coco_dataset/ -``` - -### Prepare dataset files and split the dataset. - -```python -import json -import collections - -images_dir = "coco_dataset/train2014" -annotation_file = "coco_dataset/annotations/captions_train2014.json" -with open(annotation_file, "r") as f: - annotations = json.load(f)["annotations"] - -image_path_to_caption = collections.defaultdict(list) -for element in annotations: - caption = f"{element['caption'].lower().rstrip('.')}" - image_path = images_dir + "/COCO_train2014_" + "%012d.jpg" % (element["image_id"]) - image_path_to_caption[image_path].append(caption) - -lines = [] -for image_path, captions in image_path_to_caption.items(): - lines.append(json.dumps({"image_path": image_path, "captions": captions})) - -train_lines = lines[:-8000] -valid_line = lines[-8000:] -with open("coco_dataset/train_dataset.json", "w") as f: - f.write("\n".join(train_lines)) - -with open("coco_dataset/valid_dataset.json", "w") as f: - f.write("\n".join(valid_line)) -``` - -> Note: The data loading and processing part of this script can still be improved for maximum performance. In particular one should decode the images beforehand and use those instead decoding them each time. If the dataset is small or if you have huge disk space the you could also pre-process all the dataset beforehand and then use it. - -## Train the model -Next we can run the example script to train the model: - -```bash -python run_hybrid_clip.py \ - --output_dir ${MODEL_DIR} \ - --text_model_name_or_path="FacebookAI/roberta-base" \ - --vision_model_name_or_path="openai/clip-vit-base-patch32" \ - --tokenizer_name="FacebookAI/roberta-base" \ - --train_file="coco_dataset/train_dataset.json" \ - --validation_file="coco_dataset/validation_dataset.json" \ - --do_train --do_eval \ - --num_train_epochs="40" --max_seq_length 96 \ - --per_device_train_batch_size="64" \ - --per_device_eval_batch_size="64" \ - --learning_rate="5e-5" --warmup_steps="0" --weight_decay 0.1 \ - --overwrite_output_dir \ - --preprocessing_num_workers 32 \ - --push_to_hub -``` - -This should finish in ~1h50 mins with min validation loss 2.43. Training statistics can be accessed on [tfhub.de](https://tensorboard.dev/experiment/RUNPYd1yRgSD5kZSb9hDig/#scalars) diff --git a/examples/research_projects/jax-projects/hybrid_clip/configuration_hybrid_clip.py b/examples/research_projects/jax-projects/hybrid_clip/configuration_hybrid_clip.py deleted file mode 100644 index 5272ac44a1a..00000000000 --- a/examples/research_projects/jax-projects/hybrid_clip/configuration_hybrid_clip.py +++ /dev/null @@ -1,112 +0,0 @@ -import copy - -from transformers.configuration_utils import PretrainedConfig -from transformers.utils import logging - - -logger = logging.get_logger(__name__) - - -class HybridCLIPConfig(PretrainedConfig): - r""" - :class:`HybridCLIPConfig` is the configuration class to store the configuration of a - :class:`~HybridCLIPModel`. It is used to instantiate HybridCLIPModel model according to the specified arguments, - defining the text model and vision model configs. - - Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model - outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. - - Args: - text_config_dict (:obj:`dict`): - Dictionary of configuration options that defines text model config. - vision_config_dict (:obj:`dict`): - Dictionary of configuration options that defines vison model config. - projection_dim (:obj:`int`, `optional`, defaults to 512): - Dimentionality of text and vision projection layers. - kwargs (`optional`): - Dictionary of keyword arguments. - - Examples:: - - >>> from transformers import BertConfig, CLIPConfig, HybridCLIPConfig, FlaxHybridCLIP - - >>> # Initializing a BERT and CLIP configuration - >>> config_text = BertConfig() - >>> config_vision = CLIPConfig() - - >>> config = HybridCLIPConfig.from_text_vision_configs(config_text, config_vision, projection_dim=512) - - >>> # Initializing a BERT and CLIPVision model - >>> model = EncoderDecoderModel(config=config) - - >>> # Accessing the model configuration - >>> config_text = model.config.text_config - >>> config_vision = model.config.vision_config - - >>> # Saving the model, including its configuration - >>> model.save_pretrained('my-model') - - >>> # loading model and config from pretrained folder - >>> encoder_decoder_config = HybridCLIPConfig.from_pretrained('my-model') - >>> model = FlaxHybridCLIP.from_pretrained('my-model', config=encoder_decoder_config) - """ - - model_type = "hybrid-clip" - is_composition = True - - def __init__(self, projection_dim=512, **kwargs): - super().__init__(**kwargs) - - if "text_config" not in kwargs: - raise ValueError("`text_config` can not be `None`.") - - if "vision_config" not in kwargs: - raise ValueError("`vision_config` can not be `None`.") - - text_config = kwargs.pop("text_config") - vision_config = kwargs.pop("vision_config") - - text_model_type = text_config.pop("model_type") - vision_model_type = vision_config.pop("model_type") - - from transformers import AutoConfig - - self.text_config = AutoConfig.for_model(text_model_type, **text_config) - - if vision_model_type == "clip": - self.vision_config = AutoConfig.for_model(vision_model_type, **vision_config).vision_config - elif vision_model_type == "clip_vision_model": - from transformers import CLIPVisionConfig - - self.vision_config = CLIPVisionConfig(**vision_config) - else: - self.vision_config = AutoConfig.for_model(vision_model_type, **vision_config) - - self.projection_dim = projection_dim - self.initializer_factor = 1.0 - - @classmethod - def from_text_vision_configs(cls, text_config: PretrainedConfig, vision_config: PretrainedConfig, **kwargs): - r""" - Instantiate a :class:`HybridCLIPConfig` (or a derived class) from text model configuration and - vision model configuration. - - Returns: - :class:`HybridCLIPConfig`: An instance of a configuration object - """ - - return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) - - def to_dict(self): - """ - Serializes this instance to a Python dictionary. Override the default - :meth:`~transformers.PretrainedConfig.to_dict`. - - Returns: - :obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, - """ - output = copy.deepcopy(self.__dict__) - output["text_config"] = self.text_config.to_dict() - output["vision_config"] = self.vision_config.to_dict() - output["model_type"] = self.__class__.model_type - return output diff --git a/examples/research_projects/jax-projects/hybrid_clip/modeling_hybrid_clip.py b/examples/research_projects/jax-projects/hybrid_clip/modeling_hybrid_clip.py deleted file mode 100644 index 08cb3bd0b34..00000000000 --- a/examples/research_projects/jax-projects/hybrid_clip/modeling_hybrid_clip.py +++ /dev/null @@ -1,420 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Optional, Tuple - -import flax.linen as nn -import jax -import jax.numpy as jnp -from configuration_hybrid_clip import HybridCLIPConfig -from flax.core.frozen_dict import FrozenDict - -from transformers import FLAX_MODEL_MAPPING, FlaxCLIPVisionModel -from transformers.modeling_flax_utils import FlaxPreTrainedModel -from transformers.models.clip.modeling_flax_clip import FlaxCLIPOutput -from transformers.utils import logging - - -logger = logging.get_logger(__name__) - - -class FlaxHybridCLIPModule(nn.Module): - config: HybridCLIPConfig - dtype: jnp.dtype = jnp.float32 - - def setup(self): - text_config = self.config.text_config - vision_config = self.config.vision_config - - self.projection_dim = self.config.projection_dim - self.text_embed_dim = text_config.hidden_size - self.vision_embed_dim = vision_config.hidden_size - - text_module = FLAX_MODEL_MAPPING[self.config.text_config.__class__].module_class - vision_module = FLAX_MODEL_MAPPING.get(self.config.vision_config.__class__, FlaxCLIPVisionModel).module_class - - self.text_model = text_module(text_config, dtype=self.dtype) - self.vision_model = vision_module(vision_config, dtype=self.dtype) - - self.visual_projection = nn.Dense( - self.projection_dim, - dtype=self.dtype, - kernel_init=jax.nn.initializers.normal(0.02), - use_bias=False, - ) - self.text_projection = nn.Dense( - self.projection_dim, - dtype=self.dtype, - kernel_init=jax.nn.initializers.normal(0.02), - use_bias=False, - ) - self.logit_scale = self.param("logit_scale", jax.nn.initializers.ones, []) - - def __call__( - self, - input_ids=None, - pixel_values=None, - attention_mask=None, - position_ids=None, - token_type_ids=None, - deterministic: bool = True, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - ): - return_dict = return_dict if return_dict is not None else self.config.return_dict - - vision_outputs = self.vision_model( - pixel_values=pixel_values, - deterministic=deterministic, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - text_outputs = self.text_model( - input_ids=input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - deterministic=deterministic, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - image_embeds = vision_outputs[1] - image_embeds = self.visual_projection(image_embeds) - - text_embeds = text_outputs[1] - text_embeds = self.text_projection(text_embeds) - - # normalized features - image_embeds = image_embeds / jnp.linalg.norm(image_embeds, axis=-1, keepdims=True) - text_embeds = text_embeds / jnp.linalg.norm(text_embeds, axis=-1, keepdims=True) - - # cosine similarity as logits - logit_scale = jnp.exp(self.logit_scale) - logits_per_text = jnp.matmul(text_embeds, image_embeds.T) * logit_scale - logits_per_image = logits_per_text.T - - if not return_dict: - return (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) - - return FlaxCLIPOutput( - logits_per_image=logits_per_image, - logits_per_text=logits_per_text, - text_embeds=text_embeds, - image_embeds=image_embeds, - text_model_output=text_outputs, - vision_model_output=vision_outputs, - ) - - -class FlaxHybridCLIP(FlaxPreTrainedModel): - config_class = HybridCLIPConfig - module_class = FlaxHybridCLIPModule - - def __init__( - self, - config: HybridCLIPConfig, - input_shape: Optional[Tuple] = None, - seed: int = 0, - dtype: jnp.dtype = jnp.float32, - **kwargs, - ): - if input_shape is None: - input_shape = ((1, 1), (1, config.vision_config.image_size, config.vision_config.image_size, 3)) - - module = self.module_class(config=config, dtype=dtype, **kwargs) - super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype) - - def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: - # init input tensor - input_ids = jnp.zeros(input_shape[0], dtype="i4") - position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape[0]) - token_type_ids = jnp.ones_like(input_ids) - attention_mask = jnp.ones_like(input_ids) - - pixel_values = jax.random.normal(rng, input_shape[1]) - - params_rng, dropout_rng = jax.random.split(rng) - rngs = {"params": params_rng, "dropout": dropout_rng} - - return self.module.init(rngs, input_ids, pixel_values, attention_mask, position_ids, token_type_ids)["params"] - - def __call__( - self, - input_ids, - pixel_values, - attention_mask=None, - position_ids=None, - token_type_ids=None, - params: dict = None, - dropout_rng: jax.random.PRNGKey = None, - train: bool = False, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ): - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.return_dict - - if position_ids is None: - position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) - - if token_type_ids is None: - token_type_ids = jnp.zeros_like(input_ids) - - if attention_mask is None: - attention_mask = jnp.ones_like(input_ids) - - # Handle any PRNG if needed - rngs = {} - if dropout_rng is not None: - rngs["dropout"] = dropout_rng - - return self.module.apply( - {"params": params or self.params}, - jnp.array(input_ids, dtype="i4"), - jnp.array(pixel_values, dtype=jnp.float32), - jnp.array(attention_mask, dtype="i4"), - jnp.array(position_ids, dtype="i4"), - jnp.array(token_type_ids, dtype="i4"), - not train, - output_attentions, - output_hidden_states, - return_dict, - rngs=rngs, - ) - - def get_text_features( - self, - input_ids, - attention_mask=None, - position_ids=None, - token_type_ids=None, - params: dict = None, - dropout_rng: jax.random.PRNGKey = None, - train=False, - ): - r""" - Args: - input_ids (:obj:`numpy.ndarray` of shape :obj:`(batch_size, sequence_length)`): - Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you - provide it. - - Indices can be obtained using :class:`~transformers.PreTrainedTokenizer`. See - :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` - for details. - - `What are input IDs? <../glossary.html#input-ids>`__ - - Returns: - text_features (:obj:`jnp.ndarray` of shape :obj:`(batch_size, output_dim`): The text embeddings - obtained by applying the projection layer to the pooled output of text model. - """ - if position_ids is None: - position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) - - if token_type_ids is None: - token_type_ids = jnp.zeros_like(input_ids) - - if attention_mask is None: - attention_mask = jnp.ones_like(input_ids) - - # Handle any PRNG if needed - rngs = {} - if dropout_rng is not None: - rngs["dropout"] = dropout_rng - - def _get_features(module, input_ids, attention_mask, position_ids, token_type_ids, deterministic): - text_outputs = module.text_model( - input_ids=input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - token_type_ids=token_type_ids, - deterministic=deterministic, - ) - pooled_output = text_outputs[1] - text_features = module.text_projection(pooled_output) - return text_features - - return self.module.apply( - {"params": params or self.params}, - jnp.array(input_ids, dtype="i4"), - jnp.array(attention_mask, dtype="i4"), - jnp.array(position_ids, dtype="i4"), - jnp.array(token_type_ids, dtype="i4"), - not train, - method=_get_features, - rngs=rngs, - ) - - def get_image_features( - self, pixel_values, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train=False - ): - r""" - Args: - pixel_values (:obj:`numpy.ndarray` of shape :obj:`(batch_size, num_channels, height, width)`): - Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained - using :class:`~transformers.ImageFeatureExtractionMixin`. See - :meth:`transformers.ImageFeatureExtractionMixin.__call__` for details. - - Returns: - image_features (:obj:`jnp.ndarray` of shape :obj:`(batch_size, output_dim`): The image embeddings - obtained by applying the projection layer to the pooled output of vision model. - """ - - # Handle any PRNG if needed - rngs = {} - if dropout_rng is not None: - rngs["dropout"] = dropout_rng - - def _get_features(module, pixel_values, deterministic): - vision_outputs = module.vision_model(pixel_values=pixel_values, deterministic=deterministic) - pooled_output = vision_outputs[1] # pooled_output - image_features = module.visual_projection(pooled_output) - return image_features - - return self.module.apply( - {"params": params or self.params}, - jnp.array(pixel_values, dtype=jnp.float32), - not train, - method=_get_features, - rngs=rngs, - ) - - @classmethod - def from_text_vision_pretrained( - cls, - text_model_name_or_path: str = None, - vision_model_name_or_path: str = None, - *model_args, - **kwargs, - ) -> FlaxPreTrainedModel: - """ - Params: - text_model_name_or_path (:obj: `str`, `optional`): - Information necessary to initiate the text model. Can be either: - - - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co. - - A path to a `directory` containing model weights saved using - :func:`~transformers.FlaxPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``. - - A path or url to a `PyTorch checkpoint folder` (e.g, ``./pt_model``). In - this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided - as ``config`` argument. This loading path is slower than converting the PyTorch checkpoint in - a Flax model using the provided conversion scripts and loading the Flax model afterwards. - - vision_model_name_or_path (:obj: `str`, `optional`, defaults to `None`): - Information necessary to initiate the vision model. Can be either: - - - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co. - - A path to a `directory` containing model weights saved using - :func:`~transformers.FlaxPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``. - - A path or url to a `PyTorch checkpoint folder` (e.g, ``./pt_model``). In - this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided - as ``config`` argument. This loading path is slower than converting the PyTorch checkpoint in - a Flax model using the provided conversion scripts and loading the Flax model afterwards. - - model_args (remaining positional arguments, `optional`): - All remaning positional arguments will be passed to the underlying model's ``__init__`` method. - - kwargs (remaining dictionary of keyword arguments, `optional`): - Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., - :obj:`output_attentions=True`). - - - To update the text configuration, use the prefix `text_` for each configuration parameter. - - To update the vision configuration, use the prefix `vision_` for each configuration parameter. - - To update the parent model configuration, do not use a prefix for each configuration parameter. - - Behaves differently depending on whether a :obj:`config` is provided or automatically loaded. - - Example:: - - >>> from transformers import FlaxHybridCLIP - >>> # initialize a model from pretrained BERT and CLIP models. Note that the projection layers will be randomly initialized. - >>> # If using CLIP's vision model the vision projection layer will be initialized using pre-trained weights - >>> model = FlaxHybridCLIP.from_text_vision_pretrained('google-bert/bert-base-uncased', 'openai/clip-vit-base-patch32') - >>> # saving model after fine-tuning - >>> model.save_pretrained("./bert-clip") - >>> # load fine-tuned model - >>> model = FlaxHybridCLIP.from_pretrained("./bert-clip") - """ - - kwargs_text = { - argument[len("text_") :]: value for argument, value in kwargs.items() if argument.startswith("text_") - } - - kwargs_vision = { - argument[len("vision_") :]: value for argument, value in kwargs.items() if argument.startswith("vision_") - } - - # remove text, vision kwargs from kwargs - for key in kwargs_text.keys(): - del kwargs["text_" + key] - for key in kwargs_vision.keys(): - del kwargs["vision_" + key] - - # Load and initialize the text and vision model - text_model = kwargs_text.pop("model", None) - if text_model is None: - assert ( - text_model_name_or_path is not None - ), "If `model` is not defined as an argument, a `text_model_name_or_path` has to be defined" - from transformers import FlaxAutoModel - - if "config" not in kwargs_text: - from transformers import AutoConfig - - text_config = AutoConfig.from_pretrained(text_model_name_or_path) - kwargs_text["config"] = text_config - - text_model = FlaxAutoModel.from_pretrained(text_model_name_or_path, *model_args, **kwargs_text) - - vision_model = kwargs_vision.pop("model", None) - if vision_model is None: - assert ( - vision_model_name_or_path is not None - ), "If `model` is not defined as an argument, a `vision_model_name_or_path` has to be defined" - from transformers import FlaxAutoModel - - if "config" not in kwargs_vision: - from transformers import AutoConfig - - vision_config = AutoConfig.from_pretrained(vision_model_name_or_path) - kwargs_vision["config"] = vision_config - - vision_model = FlaxAutoModel.from_pretrained(vision_model_name_or_path, *model_args, **kwargs_vision) - - # instantiate config with corresponding kwargs - dtype = kwargs.pop("dtype", jnp.float32) - config = HybridCLIPConfig.from_text_vision_configs(text_model.config, vision_model.config, **kwargs) - - # init model - model = cls(config, *model_args, dtype=dtype, **kwargs) - - if vision_config.model_type == "clip": - model.params["vision_model"]["vision_model"] = vision_model.params["vision_model"] - model.params["visual_projection"]["kernel"] = vision_model.params["visual_projection"]["kernel"] - else: - model.params["vision_model"] = vision_model.params - - model.params["text_model"] = text_model.params - - return model diff --git a/examples/research_projects/jax-projects/hybrid_clip/requirements.txt b/examples/research_projects/jax-projects/hybrid_clip/requirements.txt deleted file mode 100644 index 7b465dde645..00000000000 --- a/examples/research_projects/jax-projects/hybrid_clip/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -jax>=0.2.8 -jaxlib>=0.1.59 -flax>=0.3.5 -optax>=0.0.8 --f https://download.pytorch.org/whl/torch_stable.html -torch==2.2.0 --f https://download.pytorch.org/whl/torch_stable.html -torchvision==0.10.0+cpu \ No newline at end of file diff --git a/examples/research_projects/jax-projects/hybrid_clip/run_hybrid_clip.py b/examples/research_projects/jax-projects/hybrid_clip/run_hybrid_clip.py deleted file mode 100644 index 2020f0a35c4..00000000000 --- a/examples/research_projects/jax-projects/hybrid_clip/run_hybrid_clip.py +++ /dev/null @@ -1,576 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2021 The HuggingFace Team All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Training a CLIP like dual encoder models using text and vision encoders in the library. - -The script can be used to train CLIP like models for languages other than english by using -a text encoder pre-trained in the desired language. Currently this script support the following vision -and text models: -Vision models: ViT(https://huggingface.co/models?filter=vit), CLIP (https://huggingface.co/models?filter=clip) -Text models: BERT, ROBERTa (https://huggingface.co/models?filter=fill-mask) -""" - -import json -import logging -import os -import sys -import time -from dataclasses import dataclass, field -from pathlib import Path -from typing import Callable, Optional - -import jax -import jax.numpy as jnp -import optax -import torch -from flax import jax_utils -from flax.jax_utils import unreplicate -from flax.training import train_state -from flax.training.common_utils import get_metrics, shard, shard_prng_key -from modeling_hybrid_clip import FlaxHybridCLIP -from torchvision.datasets import VisionDataset -from torchvision.io import ImageReadMode, read_image -from torchvision.transforms import CenterCrop, ConvertImageDtype, Normalize, Resize -from torchvision.transforms.functional import InterpolationMode -from tqdm import tqdm - -import transformers -from transformers import AutoTokenizer, HfArgumentParser, TrainingArguments, is_tensorboard_available, set_seed - - -logger = logging.getLogger(__name__) - -# Cache the result -has_tensorboard = is_tensorboard_available() -if has_tensorboard: - try: - from flax.metrics.tensorboard import SummaryWriter - except ImportError as ie: - has_tensorboard = False - print(f"Unable to display metrics through TensorBoard because some package are not installed: {ie}") - -else: - print( - "Unable to display metrics through TensorBoard because the package is not installed: " - "Please run pip install tensorboard to enable." - ) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. - """ - - text_model_name_or_path: str = field( - metadata={ - "help": ( - "The text model checkpoint for weights initialization. " - "Don't set if you want to train a model from scratch." - ) - }, - ) - vision_model_name_or_path: str = field( - metadata={ - "help": ( - "The vision model checkpoint for weights initialization. " - "Don't set if you want to train a model from scratch." - ) - }, - ) - from_pt: bool = field( - default=True, - metadata={"help": "whether to load the text and vision model using PyTorch checkpoints."}, - ) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - tokenizer_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} - ) - cache_dir: Optional[str] = field( - default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} - ) - use_fast_tokenizer: bool = field( - default=True, - metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, - ) - dtype: Optional[str] = field( - default="float32", - metadata={ - "help": ( - "Floating-point format in which the model weights should be initialized and trained. Choose one of" - " `[float32, float16, bfloat16]`." - ) - }, - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - """ - - data_dir: Optional[str] = field(default=None, metadata={"help": "The data directory containing input files."}) - train_file: Optional[str] = field( - default=None, metadata={"help": "The input training data file (a jsonlines file)."} - ) - validation_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input evaluation data file (a jsonlines file)."}, - ) - max_seq_length: Optional[int] = field( - default=72, - metadata={ - "help": ( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ) - }, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - ) - }, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - - def __post_init__(self): - if self.train_file is None and self.validation_file is None: - raise ValueError("Need either a dataset name or a training/validation file.") - else: - if self.train_file is not None: - extension = self.train_file.split(".")[-1] - assert extension == "json", "`train_file` should be a json file." - if self.validation_file is not None: - extension = self.validation_file.split(".")[-1] - assert extension == "json", "`validation_file` should be a json file." - - -# We use torchvision for faster image pre-processing. -# We need to ensure faster processing speed as it can become a bottleneck on TPU -class Transform(torch.nn.Module): - def __init__(self, image_size): - super().__init__() - self.transforms = torch.nn.Sequential( - Resize([image_size], interpolation=InterpolationMode.BICUBIC), - CenterCrop(image_size), - ConvertImageDtype(torch.float), - Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), - ) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - with torch.no_grad(): - x = self.transforms(x) - return x - - -class ImageTextDataset(VisionDataset): - """ - Dtaset for loading image-text data for tasks like CLIP training, Image Captioning. - - Args: - root: (string): The root path where the dataset is stored - file_path: (string): Path to the file containing the image_paths and associated captions. - The expected format is jsonlines where each line is a json object containing to keys. - `image_path`: The path to the image. - `captions`: An `array` of captions. - transform (callable, optional): A function/transform that takes in an PIL image - and returns a transformed version. E.g, ``transforms.ToTensor`` - target_transform (callable, optional): A function/transform that takes in the - target and transforms it. - transforms (callable, optional): A function/transform that takes input sample and its target as entry - and returns a transformed version. - """ - - def __init__( - self, - root: str, - file_path: str, - captions_per_image=2, - transform: Optional[Callable] = None, - target_transform: Optional[Callable] = None, - transforms: Optional[Callable] = None, - ): - super().__init__(root, transforms, transform, target_transform) - - with open(file_path, "r") as f: - examples = [json.loads(line) for line in f.readlines()] - - self.captions = [] - self.image_paths = [] - - for example in examples: - captions_subset = example["captions"][:captions_per_image] - self.captions.extend(captions_subset) - self.image_paths.extend([example["image_path"]] * len(captions_subset)) - - def _load_image(self, idx: int): - path = self.image_paths[idx] - return read_image(path, mode=ImageReadMode.RGB) - - def _load_target(self, idx): - return self.captions[idx] - - def __getitem__(self, index: int): - image = self._load_image(index) - target = self._load_target(index) - - if self.transforms is not None: - image, target = self.transforms(image, target) - - return image, target - - def __len__(self) -> int: - return len(self.captions) - - -class TrainState(train_state.TrainState): - dropout_rng: jnp.ndarray - - def replicate(self): - return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng)) - - -def write_metric(summary_writer, train_metrics, eval_metrics, train_time, step): - summary_writer.scalar("train_time", train_time, step) - - train_metrics = get_metrics(train_metrics) - for key, vals in train_metrics.items(): - tag = f"train_{key}" - for i, val in enumerate(vals): - summary_writer.scalar(tag, val, step - len(vals) + i + 1) - - for metric_name, value in eval_metrics.items(): - summary_writer.scalar(f"eval_{metric_name}", value, step) - - -def create_learning_rate_fn( - train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float -) -> Callable[[int], jnp.ndarray]: - """Returns a linear warmup, linear_decay learning rate function.""" - steps_per_epoch = train_ds_size // train_batch_size - num_train_steps = steps_per_epoch * num_train_epochs - warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps) - decay_fn = optax.linear_schedule( - init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps - ) - schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]) - return schedule_fn - - -def main(): - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - - if ( - os.path.exists(training_args.output_dir) - and os.listdir(training_args.output_dir) - and training_args.do_train - and not training_args.overwrite_output_dir - ): - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - - # Make one log on every process with the configuration for debugging. - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO, - ) - # Setup logging, we only want one process per machine to log things on the screen. - logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) - if jax.process_index() == 0: - transformers.utils.logging.set_verbosity_info() - else: - transformers.utils.logging.set_verbosity_error() - - # Set the verbosity to info of the Transformers logger (on main process only): - logger.info(f"Training/evaluation parameters {training_args}") - - if model_args.tokenizer_name: - tokenizer = AutoTokenizer.from_pretrained( - model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer - ) - elif model_args.text_model_name_or_path: - tokenizer = AutoTokenizer.from_pretrained( - model_args.text_model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer - ) - else: - raise ValueError( - "You are instantiating a new tokenizer from scratch. This is not supported by this script. " - "You can do it from another script, save it, and load it from here, using --tokenizer_name." - ) - - model = FlaxHybridCLIP.from_text_vision_pretrained( - model_args.text_model_name_or_path, - model_args.vision_model_name_or_path, - seed=training_args.seed, - dtype=getattr(jnp, model_args.dtype), - text_from_pt=model_args.from_pt, - vision_from_pt=model_args.from_pt, - ) - config = model.config - # set seed for torch dataloaders - set_seed(training_args.seed) - - # Initialize torchvision transforms and jit them for faster processing - preprocess = Transform(config.vision_config.image_size) - preprocess = torch.jit.script(preprocess) - - # Initialize the image-text dataset - train_dataset = ImageTextDataset( - data_args.data_dir, - data_args.train_file, - captions_per_image=2, - transform=preprocess, - ) - - eval_dataset = ImageTextDataset( - data_args.data_dir, - data_args.validation_file, - captions_per_image=1, - transform=preprocess, - ) - - # Store some constant - num_epochs = int(training_args.num_train_epochs) - train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() - eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count() - steps_per_epoch = len(train_dataset) // train_batch_size - total_train_steps = steps_per_epoch * num_epochs - - # Use collate function to tokenizer the text and convert the processed images to numpy - def collate_fn(examples): - pixel_values = torch.stack([example[0] for example in examples]).permute(0, 2, 3, 1).numpy() - captions = [example[1] for example in examples] - inputs = tokenizer( - captions, max_length=data_args.max_seq_length, padding="max_length", truncation=True, return_tensors="np" - ) - - batch = { - "pixel_values": pixel_values, - "input_ids": inputs["input_ids"], - "attention_mask": inputs["attention_mask"], - } - - return batch - - # Create data loaders - train_loader = torch.utils.data.DataLoader( - train_dataset, - batch_size=train_batch_size, - shuffle=True, - num_workers=data_args.preprocessing_num_workers, - persistent_workers=True, - drop_last=True, - collate_fn=collate_fn, - ) - - eval_loader = torch.utils.data.DataLoader( - eval_dataset, - batch_size=eval_batch_size, - shuffle=False, - num_workers=data_args.preprocessing_num_workers, - persistent_workers=True, - drop_last=True, - collate_fn=collate_fn, - ) - - # Enable tensorboard only on the master node - if has_tensorboard and jax.process_index() == 0: - summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir).joinpath("logs").as_posix()) - - # Initialize our training - rng = jax.random.PRNGKey(training_args.seed) - rng, dropout_rng = jax.random.split(rng) - - # Create learning rate schedule - linear_decay_lr_schedule_fn = create_learning_rate_fn( - len(train_dataset), - train_batch_size, - training_args.num_train_epochs, - training_args.warmup_steps, - training_args.learning_rate, - ) - - # create adam optimizer - adamw = optax.adamw( - learning_rate=linear_decay_lr_schedule_fn, - b1=training_args.adam_beta1, - b2=training_args.adam_beta2, - eps=training_args.adam_epsilon, - weight_decay=training_args.weight_decay, - ) - - # Setup train state - state = TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw, dropout_rng=dropout_rng) - - def cross_entropy(logits, axis): - logprobs = jax.nn.log_softmax(logits, axis=axis) - nll = jnp.diag(logprobs) - ce = -jnp.mean(nll) - return ce - - def clip_loss(similarity): - loss = (cross_entropy(similarity, axis=0) + cross_entropy(similarity, axis=1)) / 2 - return loss - - # Define gradient update step fn - def train_step(state, batch): - dropout_rng, new_dropout_rng = jax.random.split(state.dropout_rng) - - def compute_loss(params): - logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] - loss = clip_loss(logits) - return loss - - grad_fn = jax.value_and_grad(compute_loss) - loss, grad = grad_fn(state.params) - grad = jax.lax.pmean(grad, "batch") - - new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng) - - metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} - metrics = jax.lax.pmean(metrics, axis_name="batch") - - return new_state, metrics - - # Define eval fn - def eval_step(params, batch): - logits = model(**batch, params=params, train=False)[0] - loss = clip_loss(logits) - - # summarize metrics - metrics = {"loss": loss} - metrics = jax.lax.pmean(metrics, axis_name="batch") - return metrics - - # Create parallel version of the train and eval step - p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) - p_eval_step = jax.pmap(eval_step, "batch") - - # Replicate the train state on each device - state = state.replicate() - - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num Epochs = {num_epochs}") - logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") - logger.info(f" Total train batch size (w. parallel & distributed) = {train_batch_size}") - logger.info(f" Total optimization steps = {total_train_steps}") - - train_time = 0 - # Create sampling rng - rng, input_rng = jax.random.split(rng) - - epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0) - for epoch in epochs: - # ======================== Training ================================ - train_start = time.time() - - # Create sampling rng - rng, input_rng = jax.random.split(rng) - train_metrics = [] - - steps_per_epoch = len(train_dataset) // train_batch_size - train_step_progress_bar = tqdm(total=steps_per_epoch, desc="Training...", position=1, leave=False) - # train - for batch in train_loader: - batch = shard(batch) - state, train_metric = p_train_step(state, batch) - train_metrics.append(train_metric) - - train_step_progress_bar.update(1) - - train_time += time.time() - train_start - - train_metric = unreplicate(train_metric) - - train_step_progress_bar.close() - epochs.write( - f"Epoch... ({epoch + 1}/{num_epochs} | Loss: {train_metric['loss']}, Learning Rate:" - f" {train_metric['learning_rate']})" - ) - - # ======================== Evaluating ============================== - eval_metrics = [] - eval_steps = len(eval_dataset) // eval_batch_size - eval_step_progress_bar = tqdm(total=eval_steps, desc="Evaluating...", position=2, leave=False) - for batch in eval_loader: - # Model forward - batch = shard(batch) - metrics = p_eval_step(state.params, batch) - eval_metrics.append(metrics) - - eval_step_progress_bar.update(1) - - # normalize eval metrics - eval_metrics = get_metrics(eval_metrics) - - eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) - - # Print metrics and update progress bar - eval_step_progress_bar.close() - desc = f"Epoch... ({epoch + 1}/{num_epochs} | Eval Loss: {eval_metrics['loss']})" - epochs.write(desc) - epochs.desc = desc - - # Save metrics - if has_tensorboard and jax.process_index() == 0: - cur_step = epoch * (len(train_dataset) // train_batch_size) - write_metric(summary_writer, train_metrics, eval_metrics, train_time, cur_step) - - # save checkpoint after each epoch and push checkpoint to the hub - if jax.process_index() == 0: - params = jax.device_get(unreplicate(state.params)) - model.save_pretrained( - training_args.output_dir, - params=params, - push_to_hub=training_args.push_to_hub, - commit_message=f"Saving weights and logs of epoch {epoch+1}", - ) - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/jax-projects/model_parallel/README.md b/examples/research_projects/jax-projects/model_parallel/README.md deleted file mode 100644 index 393c9e89375..00000000000 --- a/examples/research_projects/jax-projects/model_parallel/README.md +++ /dev/null @@ -1,67 +0,0 @@ - - -# Model parallel language model training example - -The following example showcases how to train/fine-tune GPTNeo model with model parallelism using -the JAX/Flax backend and the [`pjit`](https://jax.readthedocs.io/en/latest/jax.experimental.pjit.html) transformation. - -> Note: The example is experimental and might have bugs. Also currently it only supports single V3-8. - -The `partition.py` file defines the `PyTree` of `ParitionSpec` for the GPTNeo model which describes how the model will be sharded. -The actual sharding is auto-matically handled by `pjit`. The weights are sharded across all local devices. -To adapt the script for other models, we need to also change the `ParitionSpec` accordingly. - -TODO: Add more explantion. - -Before training, let's prepare our model first. To be able to shard the model, the sharded dimension needs to be a multiple of devices it'll be sharded on. But GPTNeo's vocab size is 50257, so we need to resize the embeddings accordingly. - -```python -from transformers import FlaxGPTNeoForCausalLM, GPTNeoConfig -model = FlaxGPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B") - -emb = jnp.zeros((50264, model.config.hidden_size)) -# update the first 50257 weights using pre-trained weights -emb = emb.at[:50257, :].set(model.params["transformer"]["wte"]["embedding"]) -params = model.params -params["transformer"]["wte"]["embedding"] = emb - -# initialize a random model with the right vocab_size -config = GPTNeoConfig.from_pretrained("EleutherAI/gpt-neo-1.3B", vocab_size=50264) -model = FlaxGPTNeoForCausalLM(config) - -# assign the pre-trained weights and save the model. -model.params = params -model.save_pretrained("gpt-neo-1.3B") -``` - - -### Train Model - -```bash -python run_clm_mp.py \ - --model_name_or_path gpt-neo-1.3B \ - --tokenizer_name openai-community/gpt2 \ - --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \ - --do_train --do_eval \ - --block_size 1024 \ - --num_train_epochs 5 \ - --learning_rate 4e-6 \ - --per_device_train_batch_size 3 --per_device_eval_batch_size 3 \ - --overwrite_output_dir --output_dir ~/tmp/flax-clm \ - --cache_dir ~/datasets_cache/wikitext --dtype bfloat16 \ - --logging_steps 96 --eval_steps 96 -``` \ No newline at end of file diff --git a/examples/research_projects/jax-projects/model_parallel/partitions.py b/examples/research_projects/jax-projects/model_parallel/partitions.py deleted file mode 100644 index 86e54ad6702..00000000000 --- a/examples/research_projects/jax-projects/model_parallel/partitions.py +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2021 The Google Research Authors and The HuggingFace Team All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Utilities for constructing PyTrees of PartitionSpecs.""" - -# utils adapted from https://github.com/google-research/google-research/blob/master/flax_models/t5x/partitions.py - -import re - -from flax.core.frozen_dict import freeze -from flax.traverse_util import flatten_dict, unflatten_dict -from jax.experimental import PartitionSpec as P - - -# Sentinels -_unmatched = object() - -# For specifying empty leaf dict `{}` -empty_dict = object() - - -def _match(qs, ks): - """Return True if regexes in qs match any window of strings in tuple ks.""" - # compile regexes and force complete match - qts = tuple((re.compile(x + "$") for x in qs)) - for i in range(len(ks) - len(qs) + 1): - matches = [x.match(y) for x, y in zip(qts, ks[i:])] - if matches and all(matches): - return True - return False - - -def _replacement_rules(rules): - def replace(key, val): - for rule, replacement in rules: - if _match(rule, key): - return replacement - return val - - return replace - - -# PartitionSpec for GPTNeo -# replicate the hidden dim and shard feed-forward and head dim -def _get_partition_rules(): - return [ - # embeddings - (("transformer", "wpe", "embedding"), P("mp", None)), - (("transformer", "wte", "embedding"), P("mp", None)), - # atention - (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(None, "mp")), - (("attention", "out_proj", "kernel"), P("mp", None)), - (("attention", "out_proj", "bias"), None), - # mlp - (("mlp", "c_fc", "kernel"), P(None, "mp")), - (("mlp", "c_fc", "bias"), P("mp")), - (("mlp", "c_proj", "kernel"), P("mp", None)), - (("mlp", "c_proj", "bias"), None), - # layer norms - ((r"ln_\d+", "bias"), None), - ((r"\d+", r"ln_\d+", "scale"), None), - (("ln_f", "bias"), None), - (("ln_f", "scale"), None), - ] - - -def set_partitions(in_dict): - rules = _get_partition_rules() - replace = _replacement_rules(rules) - initd = {k: _unmatched for k in flatten_dict(in_dict)} - result = {k: replace(k, v) for k, v in initd.items()} - assert _unmatched not in result.values(), "Incomplete partition spec." - return freeze(unflatten_dict(result)) diff --git a/examples/research_projects/jax-projects/model_parallel/run_clm_mp.py b/examples/research_projects/jax-projects/model_parallel/run_clm_mp.py deleted file mode 100644 index 067f7cb2b18..00000000000 --- a/examples/research_projects/jax-projects/model_parallel/run_clm_mp.py +++ /dev/null @@ -1,662 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2021 The HuggingFace Team All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Pre-training/Fine-tuning the GPTNeo model for causal language modeling on a text file or a dataset using model parallelism. -""" - -import logging -import math -import os -import sys -import time -from dataclasses import dataclass, field -from itertools import chain -from pathlib import Path -from typing import Callable, Optional - -import datasets -import jax -import jax.numpy as jnp -import numpy as np -import optax -from datasets import Dataset, load_dataset -from flax.core.frozen_dict import freeze, unfreeze -from flax.training.common_utils import onehot, stack_forest -from jax.experimental.maps import mesh -from jax.experimental.pjit import pjit -from partitions import set_partitions -from tqdm import tqdm - -import transformers -from transformers import ( - CONFIG_MAPPING, - FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, - AutoConfig, - AutoTokenizer, - FlaxAutoModelForCausalLM, - HfArgumentParser, - TrainingArguments, - is_tensorboard_available, -) -from transformers.testing_utils import CaptureLogger - - -logger = logging.getLogger(__name__) - -MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_CAUSAL_LM_MAPPING.keys()) -MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. - """ - - model_name_or_path: Optional[str] = field( - default=None, - metadata={ - "help": ( - "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." - ) - }, - ) - model_type: Optional[str] = field( - default=None, - metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, - ) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - tokenizer_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} - ) - cache_dir: Optional[str] = field( - default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} - ) - use_fast_tokenizer: bool = field( - default=True, - metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, - ) - dtype: Optional[str] = field( - default="float32", - metadata={ - "help": ( - "Floating-point format in which the model weights should be initialized and trained. Choose one of" - " `[float32, float16, bfloat16]`." - ) - }, - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - """ - - dataset_name: Optional[str] = field( - default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} - ) - dataset_config_name: Optional[str] = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) - validation_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - ) - }, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} - ) - validation_split_percentage: Optional[int] = field( - default=5, - metadata={ - "help": "The percentage of the train set used as validation set in case there's no validation split" - }, - ) - block_size: Optional[int] = field( - default=None, - metadata={ - "help": ( - "Optional input sequence length after tokenization. " - "The training dataset will be truncated in block of this size for training. " - "Default to the model max input length for single sentence inputs (take into account special tokens)." - ) - }, - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - - def __post_init__(self): - if self.dataset_name is None and self.train_file is None and self.validation_file is None: - raise ValueError("Need either a dataset name or a training/validation file.") - else: - if self.train_file is not None: - extension = self.train_file.split(".")[-1] - assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." - if self.validation_file is not None: - extension = self.validation_file.split(".")[-1] - assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." - - -def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False): - """ - Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices. - Shuffle batches if `shuffle` is `True`. - """ - steps_per_epoch = len(dataset) // batch_size - - if shuffle: - batch_idx = jax.random.permutation(rng, len(dataset)) - else: - batch_idx = jnp.arange(len(dataset)) - - batch_idx = batch_idx[: steps_per_epoch * batch_size] # Skip incomplete batch. - batch_idx = batch_idx.reshape((steps_per_epoch, batch_size)) - - for idx in batch_idx: - batch = dataset[idx] - batch = {k: jnp.array(v) for k, v in batch.items()} - yield batch - - -def write_train_metric(summary_writer, train_metrics, train_time, step): - summary_writer.scalar("train_time", train_time, step) - - train_metrics = stack_forest(train_metrics) - for key, vals in train_metrics.items(): - tag = f"train_{key}" - for i, val in enumerate(vals): - summary_writer.scalar(tag, val, step - len(vals) + i + 1) - - -def write_eval_metric(summary_writer, eval_metrics, step): - for metric_name, value in eval_metrics.items(): - summary_writer.scalar(f"eval_{metric_name}", value, step) - - -def create_learning_rate_fn( - train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float -) -> Callable[[int], jnp.ndarray]: - """Returns a linear warmup, linear_decay learning rate function.""" - steps_per_epoch = train_ds_size // train_batch_size - num_train_steps = steps_per_epoch * num_train_epochs - warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps) - decay_fn = optax.linear_schedule( - init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps - ) - schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]) - return schedule_fn - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - - if ( - os.path.exists(training_args.output_dir) - and os.listdir(training_args.output_dir) - and training_args.do_train - and not training_args.overwrite_output_dir - ): - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - - # Make one log on every process with the configuration for debugging. - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO, - ) - # Setup logging, we only want one process per machine to log things on the screen. - logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) - if jax.process_index() == 0: - datasets.utils.logging.set_verbosity_warning() - transformers.utils.logging.set_verbosity_info() - else: - datasets.utils.logging.set_verbosity_error() - transformers.utils.logging.set_verbosity_error() - - # Set the verbosity to info of the Transformers logger (on main process only): - logger.info(f"Training/evaluation parameters {training_args}") - - # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) - # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ - # (the dataset will be downloaded automatically from the datasets Hub). - # - # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called - # 'text' is found. You can easily tweak this behavior (see below). - if data_args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - dataset = load_dataset( - data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, keep_in_memory=False - ) - - if "validation" not in dataset.keys(): - dataset["validation"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"train[:{data_args.validation_split_percentage}%]", - cache_dir=model_args.cache_dir, - ) - dataset["train"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"train[{data_args.validation_split_percentage}%:]", - cache_dir=model_args.cache_dir, - ) - else: - data_files = {} - if data_args.train_file is not None: - data_files["train"] = data_args.train_file - extension = data_args.train_file.split(".")[-1] - if data_args.validation_file is not None: - data_files["validation"] = data_args.validation_file - extension = data_args.validation_file.split(".")[-1] - if extension == "txt": - extension = "text" - dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) - # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets. - - # Load pretrained config and tokenizer - if model_args.config_name: - config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir) - elif model_args.model_name_or_path: - config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir) - else: - config = CONFIG_MAPPING[model_args.model_type]() - logger.warning("You are instantiating a new config instance from scratch.") - - if model_args.tokenizer_name: - tokenizer = AutoTokenizer.from_pretrained( - model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer - ) - elif model_args.model_name_or_path: - tokenizer = AutoTokenizer.from_pretrained( - model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer - ) - else: - raise ValueError( - "You are instantiating a new tokenizer from scratch. This is not supported by this script. " - "You can do it from another script, save it, and load it from here, using --tokenizer_name." - ) - - if training_args.do_train: - column_names = dataset["train"].column_names - else: - column_names = dataset["validation"].column_names - text_column_name = "text" if "text" in column_names else column_names[0] - - # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function - tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base") - - def tokenize_function(examples): - with CaptureLogger(tok_logger) as cl: - output = tokenizer(examples[text_column_name]) - # clm input could be much much longer than block_size - if "Token indices sequence length is longer than the" in cl.out: - tok_logger.warning( - "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" - " before being passed to the model." - ) - return output - - tokenized_datasets = dataset.map( - tokenize_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - ) - - if data_args.block_size is None: - block_size = tokenizer.model_max_length - if block_size > config.max_position_embeddings: - logger.warning( - f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " - f"Using block_size={min(1024, config.max_position_embeddings)} instead. You can change that default value by passing --block_size xxx." - ) - block_size = min(1024, config.max_position_embeddings) - else: - if data_args.block_size > tokenizer.model_max_length: - logger.warning( - f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model " - f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." - ) - block_size = min(data_args.block_size, tokenizer.model_max_length) - - # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. - def group_texts(examples): - # Concatenate all texts. - concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} - total_length = len(concatenated_examples[list(examples.keys())[0]]) - # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can - # customize this part to your needs. - if total_length >= block_size: - total_length = (total_length // block_size) * block_size - # Split by chunks of max_len. - result = { - k: [t[i : i + block_size] for i in range(0, total_length, block_size)] - for k, t in concatenated_examples.items() - } - result["labels"] = result["input_ids"].copy() - return result - - # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder - # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower - # to preprocess. - # - # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: - # https://huggingface.co/docs/datasets/process#map - - lm_datasets = tokenized_datasets.map( - group_texts, - batched=True, - num_proc=data_args.preprocessing_num_workers, - load_from_cache_file=not data_args.overwrite_cache, - ) - - if training_args.do_train: - if "train" not in tokenized_datasets: - raise ValueError("--do_train requires a train dataset") - train_dataset = lm_datasets["train"] - if data_args.max_train_samples is not None: - max_train_samples = min(len(train_dataset), data_args.max_train_samples) - train_dataset = train_dataset.select(range(max_train_samples)) - - if training_args.do_eval: - if "validation" not in tokenized_datasets: - raise ValueError("--do_eval requires a validation dataset") - eval_dataset = lm_datasets["validation"] - if data_args.max_eval_samples is not None: - max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) - eval_dataset = eval_dataset.select(range(max_eval_samples)) - - # Enable tensorboard only on the master node - has_tensorboard = is_tensorboard_available() - if has_tensorboard and jax.process_index() == 0: - try: - from flax.metrics.tensorboard import SummaryWriter - - summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir)) - except ImportError as ie: - has_tensorboard = False - logger.warning( - f"Unable to display metrics through TensorBoard because some package are not installed: {ie}" - ) - else: - logger.warning( - "Unable to display metrics through TensorBoard because the package is not installed: " - "Please run pip install tensorboard to enable." - ) - - # Initialize our training - rng = jax.random.PRNGKey(training_args.seed) - rng, dropout_rng = jax.random.split(rng) - - # Store some constant - num_epochs = int(training_args.num_train_epochs) - train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() - eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count() - steps_per_epoch = len(train_dataset) // train_batch_size - total_train_steps = steps_per_epoch * num_epochs - - # TODO: weights should be initialized in pjitted fun, this won't work for REALLY large models - # TODO: when loading from pre-trained model we need to make sure the vocab is divisible by num_partitions - # GPT2's vocab is odd, we need to resize it for fine-tuning - model = FlaxAutoModelForCausalLM.from_pretrained( - model_args.model_name_or_path, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype) - ) - - # Create learning rate schedule - linear_decay_lr_schedule_fn = create_learning_rate_fn( - len(train_dataset), - train_batch_size, - training_args.num_train_epochs, - training_args.warmup_steps, - training_args.learning_rate, - ) - - optimizer = optax.adamw( - learning_rate=linear_decay_lr_schedule_fn, - b1=training_args.adam_beta1, - b2=training_args.adam_beta2, - eps=training_args.adam_epsilon, - weight_decay=training_args.weight_decay, - ) - - def get_initial_state(params): - state = optimizer.init(params) - return tuple(state), params - - # Get PartitionSpec for model params - param_spec = set_partitions(unfreeze(model.params)) - - # Get the PyTree for opt_state, we don't actually initialize the opt_state yet. - params_shapes = jax.tree_util.tree_map(lambda x: x.shape, model.params) - state_shapes = jax.eval_shape(get_initial_state, params_shapes) - - # get PartitionSpec for opt_state, this is very specific to adamw - # TODO: optax returns different state for different optimizers, how can we handle this generically ? - # or maybe we don't since in our examples we just use adamw or adafactor - def get_opt_spec(x): - if isinstance(x, dict): - return param_spec - return None - - opt_state_spec, param_spec = jax.tree_util.tree_map( - get_opt_spec, state_shapes, is_leaf=lambda x: isinstance(x, (dict, optax.EmptyState)) - ) - - # pjit the get_initial_state function to shard params and init - # optimizer state in sharded way - p_get_initial_state = pjit( - get_initial_state, - in_axis_resources=None, - out_axis_resources=(opt_state_spec, param_spec), - ) - - # hack: move the inital params to CPU to free up device memory - # TODO: allow loading weights on CPU in pre-trained model - model.params = jax.tree_util.tree_map(lambda x: np.asarray(x), model.params) - - # mesh defination - mesh_devices = np.array(jax.devices()).reshape(1, jax.local_device_count()) - - # actually initialize the opt_state - with mesh(mesh_devices, ("dp", "mp")): - opt_state, params = p_get_initial_state(freeze(model.params)) - - # cross-entropy with z loss - def loss_fn(logits, labels, z_loss=0): - shift_logits = logits[..., :-1, :] - shift_labels = labels[..., 1:] - - shift_labels = onehot(shift_labels, shift_logits.shape[-1]) - - shift_logits = shift_logits - jax.lax.stop_gradient(shift_logits.max(axis=-1, keepdims=True)) - log_z = jnp.log(jnp.sum(jnp.exp(shift_logits), axis=-1, keepdims=True)) - log_softmax = shift_logits - log_z - loss = -jnp.sum(shift_labels * log_softmax, axis=-1) - - loss += (1e-4 * jnp.square(log_z.squeeze(-1))) * z_loss - - return loss.mean() - - # Define gradient update step fn - # TODO: try to use TrainState instead of passing params and opt_state individually - def train_step(params, opt_state, dropout_rng, batch, step): - dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) - - def compute_loss(params): - labels = batch.pop("labels") - logits = model(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] - loss = loss_fn(logits, labels, z_loss=1.0) - return loss - - grad_fn = jax.value_and_grad(compute_loss) - loss, grads = grad_fn(params) - - updates, new_opt_state = optimizer.update(grads, opt_state, params) - new_params = optax.apply_updates(params, updates) - - metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(step)} - return new_params, tuple(new_opt_state), new_dropout_rng, metrics, step + 1 - - # Define eval fn - def eval_step(input_ids, labels, params): - logits = model(input_ids=input_ids, params=params, train=False)[0] - loss = loss_fn(logits, labels) - # metrics - return {"loss": loss} - - p_train_step = pjit( - train_step, - in_axis_resources=(param_spec, opt_state_spec, None, None, None), - out_axis_resources=(param_spec, opt_state_spec, None, None, None), - donate_argnums=(0, 1), - ) - - p_eval_step = pjit( - eval_step, - in_axis_resources=(None, None, param_spec), - out_axis_resources=None, - ) - - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num Epochs = {num_epochs}") - logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") - logger.info(f" Total train batch size (w. parallel & distributed) = {train_batch_size}") - logger.info(f" Total optimization steps = {total_train_steps}") - - train_time = 0 - train_metrics = [] - epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0) - global_step = 0 - # we are not doing 2D parallelism (yet!), this just does model parallelism - with mesh(mesh_devices, ("dp", "mp")): - for _ in epochs: - # ======================== Training ================================ - train_start = time.time() - - # Create sampling rng - rng, input_rng = jax.random.split(rng) - - # Generate an epoch by shuffling sampling indices from the train dataset - train_metrics = [] - train_loader = data_loader(input_rng, train_dataset, train_batch_size, shuffle=True) - steps_per_epoch = len(train_dataset) // train_batch_size - - # train - for _ in tqdm(range(steps_per_epoch), desc="Training...", position=1, leave=False): - batch = next(train_loader) - params, opt_state, dropout_rng, train_metric, global_step = p_train_step( - params, - opt_state, - dropout_rng, - batch, - global_step, - ) - train_metrics.append(train_metric) - - cur_step = global_step - - if cur_step % training_args.logging_steps == 0 and cur_step > 0: - # Save metrics - train_time += time.time() - train_start - if has_tensorboard and jax.process_index() == 0: - write_train_metric(summary_writer, train_metrics, train_time, cur_step) - - epochs.write( - f"Step... ({cur_step} | Loss: {train_metric['loss']}, Learning Rate:" - f" {train_metric['learning_rate']})" - ) - - train_metrics = [] - - if cur_step % training_args.eval_steps == 0 and cur_step > 0: - # ======================== Evaluating ============================== - eval_metrics = [] - eval_loader = data_loader(input_rng, eval_dataset, eval_batch_size) - eval_steps = len(eval_dataset) // eval_batch_size - - for _ in tqdm(range(eval_steps), desc="Evaluating...", position=2, leave=False): - batch = next(eval_loader) - metrics = p_eval_step(batch["input_ids"], batch["labels"], params) - eval_metrics.append(metrics) - - # normalize eval metrics - eval_metrics = stack_forest(eval_metrics) - eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) - - try: - eval_metrics["perplexity"] = math.exp(eval_metrics["loss"]) - except OverflowError: - eval_metrics["perplexity"] = float("inf") - - logger.info( - f"Step... ({cur_step} | Eval loss: {eval_metrics['loss']} | Eval Perplexity:" - f" {eval_metrics['perplexity']}" - ) - - if cur_step % training_args.save_steps == 0 and cur_step > 0: - # save checkpoint after each epoch and push checkpoint to the hub - if jax.process_index() == 0: - params = jax.device_get(params) - model.save_pretrained( - training_args.output_dir, - params=params, - push_to_hub=training_args.push_to_hub, - commit_message=f"Saving weights and logs of step {cur_step}", - ) - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/jax-projects/wav2vec2/README.md b/examples/research_projects/jax-projects/wav2vec2/README.md deleted file mode 100644 index 5f8e14f47c5..00000000000 --- a/examples/research_projects/jax-projects/wav2vec2/README.md +++ /dev/null @@ -1,120 +0,0 @@ -# Wav2Vec2 Contrastive Loss PreTraining examples - -The following example showcases how to pretrain a wav2vec2 model using the JAX/Flax backend. -Pretraining Wav2Vec2 is rather complex, so it is highly recommended to read the -[official paper](https://arxiv.org/abs/2006.11477). - -JAX/Flax allows you to trace pure functions and compile them into efficient, fused accelerator code on both GPU and TPU. -Models written in JAX/Flax are **immutable** and updated in a purely functional -way which enables simple and efficient model parallelism. - -`run_wav2vec2_pretrain_flax.py` is a lightweight example of how to download and preprocess a dataset from the 🤗 Datasets library or use your own files (jsonlines or csv), then pretrain the wav2vec2 architectures above on it. - -For custom datasets in `jsonlines` format please see: [the Datasets documentation](https://huggingface.co/docs/datasets/loading_datasets#json-files) and you also will find examples of these below. - -Let's start by creating a model repository to save the trained model and logs. -Here we call the model `"wav2vec2-base-robust"`, but you can change the model name as you like. - -You can do this either directly on [huggingface.co](https://huggingface.co/new) (assuming that -you are logged in) or via the command line: - -```bash -huggingface-cli repo create wav2vec2-base-robust -``` - -Next we clone the model repository to add the tokenizer and model files. - -```bash -git clone https://huggingface.co//wav2vec2-base-robust -``` - -To ensure that all tensorboard traces will be uploaded correctly, we need to -track them. You can run the following command inside your model repo to do so. - -```bash -cd wav2vec2-base-robust -git lfs track "*tfevents*" -``` - -Great, we have set up our model repository. During training, we will automatically -push the training logs and model weights to the repo. - -Next, let's add a symbolic link to the `run_wav2vec2_pretrain_flax`. - -```bash -export MODEL_DIR="./wav2vec2-base-robust" -ln -s ~/transformers/examples/research_projects/jax-projects/wav2vec2/run_wav2vec2_pretrain_flax.py ./ -``` - -### Create the model configuration - -Let's first create the model configuration and store it in the model repository. -Note that many training parameters can be set in the model configuration including -the configuration about the masking distribution (`mask_time_length`, `mask_time_prob`), -dropout (`attention_dropout`, ...), the trade-off between the contrastive loss and -the diversity loss, etc... -Mostly likely you will need to change these parameters depending on your use case. -Again, we highly recommend to read the [official paper](https://arxiv.org/abs/2006.11477) -to better understand which parameters can be set for pretraining. - -For this example, we will be using a `"base"`-sized model of Wav2Vec2 with robust -layer norm and keep most of the default settings. - -```python -model_dir="./wav2vec2-base-robust" - -from transformers import Wav2Vec2Config -config = Wav2Vec2Config.from_pretrained( - "facebook/wav2vec2-base", - mask_time_length=10, - mask_time_prob=0.05, - diversity_loss_weight=0.1, - num_negatives=100, - do_stable_layer_norm=True, - feat_extract_norm="layer", -) -config.save_pretrained(model_dir) -``` - -### Create a feature extractor configuration - -Before we can start the training, we need to define -a feature extractor that takes care of normalization, etc... - -Here we can also re-use the feature extractor of [wav2vec2-base-960h](https://huggingface.co/facebook/wav2vec2-base) while making sure that padding is allowed. - - -```python -model_dir="./wav2vec2-base-robust" - -from transformers import Wav2Vec2FeatureExtractor -config = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-base", return_attention_mask=True) -config.save_pretrained(model_dir) -``` - -### Train the model -Finally, we can run the example script to train the model: - -```bash -./run_wav2vec2_pretrain_flax.py \ - --output_dir=${MODEL_DIR} \ - --num_train_epochs="5" \ - --per_device_train_batch_size="32" \ - --per_device_eval_batch_size="32" \ - --learning_rate="5e-4" \ - --weight_decay="0.01" \ - --warmup_steps="2000" \ - --model_name_or_path=${MODEL_DIR} \ - --dataset_name="librispeech_asr" \ - --dataset_config_name="clean" \ - --train_split_name="train.100" \ - --preprocessing_num_workers="4" \ - --max_duration_in_seconds="10.0" \ - --adam_beta1="0.9" \ - --adam_beta2="0.98" \ - --pad_to_multiple_of="16384" \ - --push_to_hub -``` - -Note that this script is not fully tested yet, so we cannot ensure that -the above script leads to satisfying results. diff --git a/examples/research_projects/jax-projects/wav2vec2/run_wav2vec2_pretrain_flax.py b/examples/research_projects/jax-projects/wav2vec2/run_wav2vec2_pretrain_flax.py deleted file mode 100755 index 017e910db0a..00000000000 --- a/examples/research_projects/jax-projects/wav2vec2/run_wav2vec2_pretrain_flax.py +++ /dev/null @@ -1,614 +0,0 @@ -#!/usr/bin/env python3 -import logging -import sys -import time -from dataclasses import field -from pathlib import Path -from typing import Dict, List, Optional, Union - -import flax -import jax -import jax.numpy as jnp -import librosa -import numpy as np -import optax -from datasets import DatasetDict, load_dataset -from flax import jax_utils, traverse_util -from flax.training import train_state -from flax.training.common_utils import get_metrics, onehot, shard -from tqdm import tqdm - -from transformers import ( - FlaxWav2Vec2ForPreTraining, - HfArgumentParser, - TrainingArguments, - Wav2Vec2Config, - Wav2Vec2FeatureExtractor, - is_tensorboard_available, -) -from transformers.models.wav2vec2.modeling_flax_wav2vec2 import _compute_mask_indices, _sample_negative_indices - - -logger = logging.getLogger(__name__) - - -@flax.struct.dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field( - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, - ) - freeze_feature_extractor: Optional[bool] = field( - default=True, metadata={"help": "Whether to freeze the feature extractor layers of the model."} - ) - verbose_logging: Optional[bool] = field( - default=False, - metadata={"help": "Whether to log verbose messages or not."}, - ) - max_gumbel_temperature: Optional[float] = field( - default=2.0, metadata={"help": "Maximum temperature for gumbel softmax."} - ) - min_gumbel_temperature: Optional[float] = field( - default=0.1, metadata={"help": "Minimum temperature for gumbel softmax."} - ) - gumbel_temperature_decay: Optional[float] = field( - default=0.999995, metadata={"help": "Decay of gumbel temperature during training."} - ) - dtype: Optional[str] = field( - default="float32", - metadata={ - "help": ( - "Floating-point format in which the model weights should be initialized and trained. Choose one of" - " `[float32, float16, bfloat16]`." - ) - }, - ) - - -@flax.struct.dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - - Using `HfArgumentParser` we can turn this class - into argparse arguments to be able to specify them on - the command line. - """ - - dataset_name: str = field( - default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} - ) - dataset_config_name: Optional[str] = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_split_name: Optional[str] = field( - default="train", - metadata={ - "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" - }, - ) - validation_split_name: Optional[str] = field( - default="validation", - metadata={ - "help": ( - "The name of the validation data set split to use (via the datasets library). Defaults to 'validation'" - ) - }, - ) - speech_file_column: Optional[str] = field( - default="file", - metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"}, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} - ) - validation_split_percentage: Optional[int] = field( - default=5, - metadata={ - "help": "The percentage of the train set used as validation set in case there's no validation split" - }, - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - max_duration_in_seconds: Optional[float] = field( - default=20.0, metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} - ) - pad_to_multiple_of: Optional[int] = field( - default=1024, - metadata={ - "help": ( - "If set will pad the sequence to a multiple of the provided value. This is important to avoid" - " triggering recompilations on TPU" - ) - }, - ) - - -@flax.struct.dataclass -class FlaxDataCollatorForWav2Vec2Pretraining: - """ - Data collator that will dynamically pad the inputs received and prepare masked indices - for self-supervised pretraining. - - Args: - model (:class:`~transformers.FlaxWav2Vec2ForPreTraining`): - The Wav2Vec2 model used for pretraining. The data collator needs to have access - to config and ``_get_feat_extract_output_lengths`` function for correct padding. - feature_extractor (:class:`~transformers.Wav2Vec2FeatureExtractor`): - The processor used for processing the data. - padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): - Select a strategy to pad the returned sequences (according to the model's padding side and padding index) - among: - * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single - sequence if provided). - * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the - maximum acceptable input length for the model if that argument is not provided. - * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of - different lengths). - max_length (:obj:`int`, `optional`): - Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). - pad_to_multiple_of (:obj:`int`, `optional`): - If set will pad the sequence to a multiple of the provided value. - This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= - 7.5 (Volta). - """ - - model: FlaxWav2Vec2ForPreTraining - feature_extractor: Wav2Vec2FeatureExtractor - padding: Union[bool, str] = "longest" - pad_to_multiple_of: Optional[int] = None - max_length: Optional[int] = None - - def __call__(self, features: List[Dict[str, Union[List[int], np.ndarray]]]) -> Dict[str, np.ndarray]: - # reformat list to dict and set to pytorch format - batch = self.feature_extractor.pad( - features, - max_length=self.max_length, - padding=self.padding, - pad_to_multiple_of=self.pad_to_multiple_of, - return_tensors="np", - ) - mask_indices_seq_length = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1]) - - batch_size = batch["input_values"].shape[0] - - attention_mask = None - if batch["attention_mask"] is not None: - output_lengths = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)) - attention_mask = np.zeros((batch_size, mask_indices_seq_length), dtype=np.int8) - - # these two operations makes sure that all values - # before the output lengths indices are attended to - attention_mask[(np.arange(attention_mask.shape[0]), output_lengths - 1)] = 1 - attention_mask = jnp.flip(jnp.flip(attention_mask, -1).cumsum(-1), -1).astype("bool") - - # sample randomly masked indices - batch["mask_time_indices"] = _compute_mask_indices( - (batch_size, mask_indices_seq_length), - self.model.config.mask_time_prob, - self.model.config.mask_time_length, - attention_mask=attention_mask, - min_masks=2, - ) - - # sample indices to take for negative vectors - batch["sampled_negative_indices"] = _sample_negative_indices( - (batch["mask_time_indices"].shape + (self.model.config.proj_codevector_dim,)), - self.model.config.num_negatives, - attention_mask=attention_mask, - ) - - return batch - - -def configure_logger(model_args: ModelArguments, training_args: TrainingArguments): - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - logging_level = logging.WARNING - if model_args.verbose_logging: - logging_level = logging.DEBUG - logger.setLevel(logging_level) - - -def write_train_metric(summary_writer, train_metrics, train_time, step): - summary_writer.scalar("train_time", train_time, step) - - train_metrics = get_metrics(train_metrics) - for key, vals in train_metrics.items(): - tag = f"train_{key}" - for i, val in enumerate(vals): - summary_writer.scalar(tag, val, step - len(vals) + i + 1) - - -def write_eval_metric(summary_writer, eval_metrics, step): - for metric_name, value in eval_metrics.items(): - summary_writer.scalar(f"eval_{metric_name}", value, step) - - -def generate_batch_splits(samples_idx: np.ndarray, batch_size: int) -> np.ndarray: - num_samples = len(samples_idx) - samples_to_remove = num_samples % batch_size - - if samples_to_remove != 0: - samples_idx = samples_idx[:-samples_to_remove] - sections_split = num_samples // batch_size - batch_idx = np.split(samples_idx, sections_split) - return batch_idx - - -def compute_contrastive_loss( - quantized_features, transformer_features, negative_indices, mask_time_indices, logits_temp, num_negatives -): - batch_size, sequence_length, hidden_size = quantized_features.shape - - # take negative vectors from sampled indices - quantized_negatives = quantized_features.reshape(-1, hidden_size)[negative_indices.reshape(-1)] - quantized_negatives = quantized_negatives.reshape( - batch_size, sequence_length, num_negatives, hidden_size - ).transpose(2, 0, 1, 3) - - target_features = jnp.concatenate([quantized_features[None, :], quantized_negatives], axis=0) - loss_logits = optax.cosine_similarity(transformer_features, target_features) - loss_logits = loss_logits / logits_temp - - neg_is_pos = (quantized_features == quantized_negatives).all(-1) - neg_is_pos = jnp.concatenate([jnp.full((1,) + loss_logits.shape[1:], False), neg_is_pos], axis=0) - - # make sure incorrectly sampled vectors don't contribute to loss - loss_logits = jnp.where(neg_is_pos, -1e9, loss_logits) - - predictions = loss_logits.transpose(2, 1, 0).reshape(-1, loss_logits.shape[0]) - targets = ((1 - mask_time_indices) * -100).transpose(1, 0).flatten() - - target_mask = jnp.where(targets >= 0, 1.0, 0.0) - contrastive_loss = optax.softmax_cross_entropy(predictions, onehot(targets, predictions.shape[-1])) * target_mask - - contrastive_loss = contrastive_loss.sum() - - return contrastive_loss - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) - - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - configure_logger(model_args, training_args) - - # Downloading and loading a dataset from the hub. - datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) - - if "validation" not in datasets.keys(): - # make sure only "validation" and "train" keys remain" - datasets = DatasetDict() - datasets["validation"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]", - cache_dir=model_args.cache_dir, - ) - datasets["train"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]", - cache_dir=model_args.cache_dir, - ) - else: - # make sure only "validation" and "train" keys remain" - datasets = DatasetDict() - datasets["validation"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split="validation", - cache_dir=model_args.cache_dir, - ) - datasets["train"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"{data_args.train_split_name}", - cache_dir=model_args.cache_dir, - ) - - # only normalized-inputs-training is supported - feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( - model_args.model_name_or_path, cache_dir=model_args.cache_dir, do_normalize=True - ) - - def prepare_dataset(batch): - # check that all files have the correct sampling rate - batch["speech"], _ = librosa.load(batch[data_args.speech_file_column], sr=feature_extractor.sampling_rate) - return batch - - # load audio files into numpy arrays - vectorized_datasets = datasets.map( - prepare_dataset, num_proc=data_args.preprocessing_num_workers, remove_columns=datasets["train"].column_names - ) - - # filter audio files that are too long - vectorized_datasets = vectorized_datasets.filter( - lambda data: len(data["speech"]) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate) - ) - - def normalize(batch): - return feature_extractor(batch["speech"], sampling_rate=feature_extractor.sampling_rate) - - # normalize and transform to `BatchFeatures` - vectorized_datasets = vectorized_datasets.map( - normalize, - batched=True, - num_proc=data_args.preprocessing_num_workers, - load_from_cache_file=not data_args.overwrite_cache, - remove_columns=vectorized_datasets["train"].column_names, - ) - - # pretraining is only supported for "newer" stable layer norm architecture - # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 - config = Wav2Vec2Config.from_pretrained( - model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - ) - - if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": - raise ValueError( - "PreTraining is only supported for ``config.do_stable_layer_norm=True`` and" - " ``config.feat_extract_norm='layer'" - ) - - model = FlaxWav2Vec2ForPreTraining(config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)) - - # Activate gradient checkpointing if needed - if training_args.gradient_checkpointing: - model.gradient_checkpointing_enable() - - data_collator = FlaxDataCollatorForWav2Vec2Pretraining( - model=model, feature_extractor=feature_extractor, pad_to_multiple_of=data_args.pad_to_multiple_of - ) - - # Enable tensorboard only on the master node - has_tensorboard = is_tensorboard_available() - if has_tensorboard and jax.process_index() == 0: - try: - from flax.metrics.tensorboard import SummaryWriter - - summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir)) - except ImportError as ie: - has_tensorboard = False - logger.warning( - f"Unable to display metrics through TensorBoard because some package are not installed: {ie}" - ) - else: - logger.warning( - "Unable to display metrics through TensorBoard because the package is not installed: " - "Please run pip install tensorboard to enable." - ) - - # Initialize our training - rng = jax.random.PRNGKey(training_args.seed) - dropout_rngs = jax.random.split(rng, jax.local_device_count()) - gumbel_rngs = jax.random.split(rng, jax.local_device_count()) - - num_epochs = int(training_args.num_train_epochs) - train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() - eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count() - - num_train_steps = len(vectorized_datasets["train"]) // train_batch_size * num_epochs - - # Create learning rate schedule - warmup_fn = optax.linear_schedule( - init_value=0.0, end_value=training_args.learning_rate, transition_steps=training_args.warmup_steps - ) - decay_fn = optax.linear_schedule( - init_value=training_args.learning_rate, - end_value=0, - transition_steps=num_train_steps - training_args.warmup_steps, - ) - linear_decay_lr_schedule_fn = optax.join_schedules( - schedules=[warmup_fn, decay_fn], boundaries=[training_args.warmup_steps] - ) - - # We use Optax's "masking" functionality to not apply weight decay - # to bias and LayerNorm scale parameters. decay_mask_fn returns a - # mask boolean with the same structure as the parameters. - # The mask is True for parameters that should be decayed. - def decay_mask_fn(params): - flat_params = traverse_util.flatten_dict(params) - flat_mask = { - path: (path[-1] != "bias" and path[-2:] not in [("layer_norm", "scale"), ("final_layer_norm", "scale")]) - for path in flat_params - } - return traverse_util.unflatten_dict(flat_mask) - - # create adam optimizer - adamw = optax.adamw( - learning_rate=linear_decay_lr_schedule_fn, - b1=training_args.adam_beta1, - b2=training_args.adam_beta2, - eps=training_args.adam_epsilon, - weight_decay=training_args.weight_decay, - mask=decay_mask_fn, - ) - - # Setup train state and define training hyper-parameters - state = train_state.TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw) - num_negatives = model.config.num_negatives - contrastive_logits_temperature = model.config.contrastive_logits_temperature - num_codevectors = model.config.num_codevectors_per_group * model.config.num_codevector_groups - diversity_loss_weight = model.config.diversity_loss_weight - - # Define gradient update step fn - def train_step(state, batch, dropout_rng, gumbel_rng): - dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) - gumbel_rng, new_gumbel_rng = jax.random.split(gumbel_rng) - - def loss_fn(params): - negative_indices = batch.pop("sampled_negative_indices") - - gumbel_temperature = jnp.clip( - model_args.max_gumbel_temperature * model_args.gumbel_temperature_decay**state.step, - a_min=model_args.min_gumbel_temperature, - ) - - outputs = state.apply_fn( - **batch, - gumbel_temperature=gumbel_temperature, - params=params, - dropout_rng=dropout_rng, - gumbel_rng=gumbel_rng, - train=True, - ) - - contrastive_loss = compute_contrastive_loss( - outputs.projected_quantized_states, - outputs.projected_states, - negative_indices, - batch["mask_time_indices"], - contrastive_logits_temperature, - num_negatives, - ) - - diversity_loss = (num_codevectors - outputs.codevector_perplexity) / num_codevectors - loss = contrastive_loss + diversity_loss_weight * diversity_loss - - return loss - - grad_fn = jax.value_and_grad(loss_fn) - loss, grad = grad_fn(state.params) - grad = jax.lax.pmean(grad, "batch") - new_state = state.apply_gradients(grads=grad) - - metrics = jax.lax.pmean( - {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch" - ) - - return new_state, metrics, new_dropout_rng, new_gumbel_rng - - # Create parallel version of the train step - p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) - - # Define eval fn - def eval_step(params, batch): - negative_indices = batch.pop("sampled_negative_indices") - - outputs = model(**batch, params=params, train=False) - - contrastive_loss = compute_contrastive_loss( - outputs.projected_quantized_states, - outputs.projected_states, - negative_indices, - batch["mask_time_indices"], - contrastive_logits_temperature, - num_negatives, - ) - - diversity_loss = (num_codevectors - outputs.codevector_perplexity) / num_codevectors - loss = contrastive_loss + diversity_loss_weight * diversity_loss - - # summarize metrics - metrics = {"loss": loss.mean(), "codevector_perplexity": outputs.codevector_perplexity} - metrics = jax.lax.pmean(metrics, axis_name="batch") - - return metrics - - p_eval_step = jax.pmap(eval_step, "batch", donate_argnums=(0,)) - - # Replicate the train state on each device - state = jax_utils.replicate(state) - - train_time = 0 - train_metrics = [] - epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0) - for epoch in epochs: - # ======================== Training ================================ - train_start = time.time() - - # Create sampling rng - rng, input_rng = jax.random.split(rng) - - # Generate an epoch by shuffling sampling indices from the train dataset - num_train_samples = len(vectorized_datasets["train"]) - # Avoid using jax.numpy here in case of TPU training - train_samples_idx = np.random.permutation(np.arange(num_train_samples)) - train_batch_idx = generate_batch_splits(train_samples_idx, train_batch_size) - - # Gather the indexes for creating the batch and do a training step - for step, batch_idx in enumerate(tqdm(train_batch_idx, desc="Training...", position=1)): - samples = [vectorized_datasets["train"][int(idx)] for idx in batch_idx] - model_inputs = data_collator(samples) - model_inputs = shard(model_inputs.data) - - # Model forward - state, train_metric, dropout_rngs, gumbel_rngs = p_train_step( - state, model_inputs, dropout_rngs, gumbel_rngs - ) - train_metrics.append(train_metric) - - cur_step = epoch * (num_train_samples // train_batch_size) + step - - if cur_step % training_args.logging_steps == 0 and cur_step > 0: - # Save metrics - train_metric = jax_utils.unreplicate(train_metric) - train_time += time.time() - train_start - if has_tensorboard and jax.process_index() == 0: - write_train_metric(summary_writer, train_metrics, train_time, cur_step) - - epochs.write( - f"Step... ({cur_step} | Loss: {train_metric['loss'].mean()}, Learning Rate:" - f" {train_metric['learning_rate'].mean()})" - ) - - train_metrics = [] - - # ======================== Evaluating ============================== - num_eval_samples = len(vectorized_datasets["validation"]) - # Avoid using jax.numpy here in case of TPU training - eval_samples_idx = np.arange(num_eval_samples) - eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size) - - eval_metrics = [] - for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)): - samples = [vectorized_datasets["validation"][int(idx)] for idx in batch_idx] - model_inputs = data_collator(samples) - - # Model forward - model_inputs = shard(model_inputs.data) - metrics = p_eval_step(state.params, model_inputs) - eval_metrics.append(metrics) - - # get eval metrics - eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) - - # Update progress bar - epochs.write( - f"Epoch... ({epoch + 1}/{num_epochs} | Loss: {eval_metrics['loss']}, Perplexity:" - f" {eval_metrics['codevector_perplexity']})" - ) - - # Save metrics - if has_tensorboard and jax.process_index() == 0: - cur_step = epoch * (len(vectorized_datasets["train"]) // train_batch_size) - write_eval_metric(summary_writer, eval_metrics, cur_step) - - # save checkpoint after each epoch and push checkpoint to the hub - if jax.process_index() == 0: - params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) - model.save_pretrained(training_args.output_dir, params=params, push_to_hub=training_args.push_to_hub) - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/layoutlmv3/README.md b/examples/research_projects/layoutlmv3/README.md deleted file mode 100644 index 2cc0fb75bd2..00000000000 --- a/examples/research_projects/layoutlmv3/README.md +++ /dev/null @@ -1,69 +0,0 @@ - - -# Token classification with LayoutLMv3 (PyTorch version) - -This directory contains a script, `run_funsd_cord.py`, that can be used to fine-tune (or evaluate) LayoutLMv3 on form understanding datasets, such as [FUNSD](https://guillaumejaume.github.io/FUNSD/) and [CORD](https://github.com/clovaai/cord). - -The script `run_funsd_cord.py` leverages the 🤗 Datasets library and the Trainer API. You can easily customize it to your needs. - -## Fine-tuning on FUNSD - -Fine-tuning LayoutLMv3 for token classification on [FUNSD](https://guillaumejaume.github.io/FUNSD/) can be done as follows: - -```bash -python run_funsd_cord.py \ - --model_name_or_path microsoft/layoutlmv3-base \ - --dataset_name funsd \ - --output_dir layoutlmv3-test \ - --do_train \ - --do_eval \ - --max_steps 1000 \ - --eval_strategy steps \ - --eval_steps 100 \ - --learning_rate 1e-5 \ - --load_best_model_at_end \ - --metric_for_best_model "eval_f1" \ - --push_to_hub \ - --push_to_hub°model_id layoutlmv3-finetuned-funsd -``` - -👀 The resulting model can be found here: https://huggingface.co/nielsr/layoutlmv3-finetuned-funsd. By specifying the `push_to_hub` flag, the model gets uploaded automatically to the hub (regularly), together with a model card, which includes metrics such as precision, recall and F1. Note that you can easily update the model card, as it's just a README file of the respective repo on the hub. - -There's also the "Training metrics" [tab](https://huggingface.co/nielsr/layoutlmv3-finetuned-funsd/tensorboard), which shows Tensorboard logs over the course of training. Pretty neat, huh? - -## Fine-tuning on CORD - -Fine-tuning LayoutLMv3 for token classification on [CORD](https://github.com/clovaai/cord) can be done as follows: - -```bash -python run_funsd_cord.py \ - --model_name_or_path microsoft/layoutlmv3-base \ - --dataset_name cord \ - --output_dir layoutlmv3-test \ - --do_train \ - --do_eval \ - --max_steps 1000 \ - --eval_strategy steps \ - --eval_steps 100 \ - --learning_rate 5e-5 \ - --load_best_model_at_end \ - --metric_for_best_model "eval_f1" \ - --push_to_hub \ - --push_to_hub°model_id layoutlmv3-finetuned-cord -``` - -👀 The resulting model can be found here: https://huggingface.co/nielsr/layoutlmv3-finetuned-cord. Note that a model card gets generated automatically in case you specify the `push_to_hub` flag. \ No newline at end of file diff --git a/examples/research_projects/layoutlmv3/requirements.txt b/examples/research_projects/layoutlmv3/requirements.txt deleted file mode 100644 index c4fa0075733..00000000000 --- a/examples/research_projects/layoutlmv3/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -datasets -seqeval -pillow diff --git a/examples/research_projects/layoutlmv3/run_funsd_cord.py b/examples/research_projects/layoutlmv3/run_funsd_cord.py deleted file mode 100644 index ad83fbdef9d..00000000000 --- a/examples/research_projects/layoutlmv3/run_funsd_cord.py +++ /dev/null @@ -1,533 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2022 The HuggingFace Team All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Fine-tuning LayoutLMv3 for token classification on FUNSD or CORD. -""" -# You can also adapt this script on your own token classification task and datasets. Pointers for this are left as -# comments. - -import logging -import os -import sys -from dataclasses import dataclass, field -from typing import Optional - -import datasets -import numpy as np -from datasets import ClassLabel, load_dataset, load_metric - -import transformers -from transformers import ( - AutoConfig, - AutoModelForTokenClassification, - AutoProcessor, - HfArgumentParser, - Trainer, - TrainingArguments, - set_seed, -) -from transformers.data.data_collator import default_data_collator -from transformers.trainer_utils import get_last_checkpoint -from transformers.utils import check_min_version -from transformers.utils.versions import require_version - - -# Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.19.0.dev0") - -require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt") - -logger = logging.getLogger(__name__) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field( - default="microsoft/layoutlmv3-base", - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, - ) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - processor_name: Optional[str] = field( - default=None, metadata={"help": "Name or path to the processor files if not the same as model_name"} - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, - ) - model_revision: str = field( - default="main", - metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "Will use the token generated when running `huggingface-cli login` (necessary to use this script " - "with private models)." - ) - }, - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - """ - - task_name: Optional[str] = field(default="ner", metadata={"help": "The name of the task (ner, pos...)."}) - dataset_name: Optional[str] = field( - default="nielsr/funsd-layoutlmv3", - metadata={"help": "The name of the dataset to use (via the datasets library)."}, - ) - dataset_config_name: Optional[str] = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_file: Optional[str] = field( - default=None, metadata={"help": "The input training data file (a csv or JSON file)."} - ) - validation_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."}, - ) - test_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."}, - ) - text_column_name: Optional[str] = field( - default=None, metadata={"help": "The column name of text to input in the file (a csv or JSON file)."} - ) - label_column_name: Optional[str] = field( - default=None, metadata={"help": "The column name of label to input in the file (a csv or JSON file)."} - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - max_seq_length: int = field( - default=512, - metadata={ - "help": ( - "The maximum total input sequence length after tokenization. If set, sequences longer " - "than this will be truncated, sequences shorter will be padded." - ) - }, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - ) - }, - ) - max_predict_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of prediction examples to this " - "value if set." - ) - }, - ) - label_all_tokens: bool = field( - default=False, - metadata={ - "help": ( - "Whether to put the label for one word on all tokens of generated by that word or just on the " - "one (in which case the other tokens will have a padding index)." - ) - }, - ) - return_entity_level_metrics: bool = field( - default=False, - metadata={"help": "Whether to return all the entity levels during evaluation or just the overall ones."}, - ) - - def __post_init__(self): - if self.dataset_name is None and self.train_file is None and self.validation_file is None: - raise ValueError("Need either a dataset name or a training/validation file.") - else: - if self.train_file is not None: - extension = self.train_file.split(".")[-1] - assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." - if self.validation_file is not None: - extension = self.validation_file.split(".")[-1] - assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." - self.task_name = self.task_name.lower() - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - - log_level = training_args.get_process_log_level() - logger.setLevel(log_level) - datasets.utils.logging.set_verbosity(log_level) - transformers.utils.logging.set_verbosity(log_level) - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - - # Log on each process the small summary: - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" - + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - logger.info(f"Training/evaluation parameters {training_args}") - - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - - # Set seed before initializing model. - set_seed(training_args.seed) - - # Get the datasets - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if data_args.dataset_name == "funsd": - # Downloading and loading a dataset from the hub. - dataset = load_dataset( - "nielsr/funsd-layoutlmv3", - data_args.dataset_config_name, - cache_dir=model_args.cache_dir, - token=True if model_args.use_auth_token else None, - ) - elif data_args.dataset_name == "cord": - # Downloading and loading a dataset from the hub. - dataset = load_dataset( - "nielsr/cord-layoutlmv3", - data_args.dataset_config_name, - cache_dir=model_args.cache_dir, - token=True if model_args.use_auth_token else None, - ) - else: - raise ValueError("This script only supports either FUNSD or CORD out-of-the-box.") - - if training_args.do_train: - column_names = dataset["train"].column_names - features = dataset["train"].features - else: - column_names = dataset["test"].column_names - features = dataset["test"].features - - image_column_name = "image" - text_column_name = "words" if "words" in column_names else "tokens" - boxes_column_name = "bboxes" - label_column_name = ( - f"{data_args.task_name}_tags" if f"{data_args.task_name}_tags" in column_names else column_names[1] - ) - - remove_columns = column_names - - # In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the - # unique labels. - def get_label_list(labels): - unique_labels = set() - for label in labels: - unique_labels = unique_labels | set(label) - label_list = list(unique_labels) - label_list.sort() - return label_list - - # If the labels are of type ClassLabel, they are already integers and we have the map stored somewhere. - # Otherwise, we have to get the list of labels manually. - if isinstance(features[label_column_name].feature, ClassLabel): - label_list = features[label_column_name].feature.names - # No need to convert the labels since they are already ints. - id2label = dict(enumerate(label_list)) - label2id = {v: k for k, v in enumerate(label_list)} - else: - label_list = get_label_list(datasets["train"][label_column_name]) - id2label = dict(enumerate(label_list)) - label2id = {v: k for k, v in enumerate(label_list)} - num_labels = len(label_list) - - # Load pretrained model and processor - # - # Distributed training: - # The .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - config = AutoConfig.from_pretrained( - model_args.config_name if model_args.config_name else model_args.model_name_or_path, - num_labels=num_labels, - finetuning_task=data_args.task_name, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - token=True if model_args.use_auth_token else None, - ) - - processor = AutoProcessor.from_pretrained( - model_args.processor_name if model_args.processor_name else model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - use_fast=True, - revision=model_args.model_revision, - token=True if model_args.use_auth_token else None, - add_prefix_space=True, - apply_ocr=False, - ) - - model = AutoModelForTokenClassification.from_pretrained( - model_args.model_name_or_path, - from_tf=bool(".ckpt" in model_args.model_name_or_path), - config=config, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - token=True if model_args.use_auth_token else None, - ) - - # Set the correspondences label/ID inside the model config - model.config.label2id = label2id - model.config.id2label = id2label - - # Preprocessing the dataset - # The processor does everything for us (prepare the image using LayoutLMv3ImageProcessor - # and prepare the words, boxes and word-level labels using LayoutLMv3TokenizerFast) - def prepare_examples(examples): - images = examples[image_column_name] - words = examples[text_column_name] - boxes = examples[boxes_column_name] - word_labels = examples[label_column_name] - - encoding = processor( - images, - words, - boxes=boxes, - word_labels=word_labels, - truncation=True, - padding="max_length", - max_length=data_args.max_seq_length, - ) - - return encoding - - if training_args.do_train: - if "train" not in dataset: - raise ValueError("--do_train requires a train dataset") - train_dataset = dataset["train"] - if data_args.max_train_samples is not None: - train_dataset = train_dataset.select(range(data_args.max_train_samples)) - with training_args.main_process_first(desc="train dataset map pre-processing"): - train_dataset = train_dataset.map( - prepare_examples, - batched=True, - remove_columns=remove_columns, - num_proc=data_args.preprocessing_num_workers, - load_from_cache_file=not data_args.overwrite_cache, - ) - - if training_args.do_eval: - validation_name = "test" - if validation_name not in dataset: - raise ValueError("--do_eval requires a validation dataset") - eval_dataset = dataset[validation_name] - if data_args.max_eval_samples is not None: - eval_dataset = eval_dataset.select(range(data_args.max_eval_samples)) - with training_args.main_process_first(desc="validation dataset map pre-processing"): - eval_dataset = eval_dataset.map( - prepare_examples, - batched=True, - remove_columns=remove_columns, - num_proc=data_args.preprocessing_num_workers, - load_from_cache_file=not data_args.overwrite_cache, - ) - - if training_args.do_predict: - if "test" not in datasets: - raise ValueError("--do_predict requires a test dataset") - predict_dataset = datasets["test"] - if data_args.max_predict_samples is not None: - max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) - predict_dataset = predict_dataset.select(range(max_predict_samples)) - with training_args.main_process_first(desc="prediction dataset map pre-processing"): - predict_dataset = predict_dataset.map( - prepare_examples, - batched=True, - remove_columns=remove_columns, - num_proc=data_args.preprocessing_num_workers, - load_from_cache_file=not data_args.overwrite_cache, - ) - - # Metrics - metric = load_metric("seqeval") - - def compute_metrics(p): - predictions, labels = p - predictions = np.argmax(predictions, axis=2) - - # Remove ignored index (special tokens) - true_predictions = [ - [label_list[p] for (p, l) in zip(prediction, label) if l != -100] - for prediction, label in zip(predictions, labels) - ] - true_labels = [ - [label_list[l] for (p, l) in zip(prediction, label) if l != -100] - for prediction, label in zip(predictions, labels) - ] - - results = metric.compute(predictions=true_predictions, references=true_labels) - if data_args.return_entity_level_metrics: - # Unpack nested dictionaries - final_results = {} - for key, value in results.items(): - if isinstance(value, dict): - for n, v in value.items(): - final_results[f"{key}_{n}"] = v - else: - final_results[key] = value - return final_results - else: - return { - "precision": results["overall_precision"], - "recall": results["overall_recall"], - "f1": results["overall_f1"], - "accuracy": results["overall_accuracy"], - } - - # Initialize our Trainer - trainer = Trainer( - model=model, - args=training_args, - train_dataset=train_dataset if training_args.do_train else None, - eval_dataset=eval_dataset if training_args.do_eval else None, - tokenizer=processor, - data_collator=default_data_collator, - compute_metrics=compute_metrics, - ) - - # Training - if training_args.do_train: - checkpoint = None - if training_args.resume_from_checkpoint is not None: - checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint - train_result = trainer.train(resume_from_checkpoint=checkpoint) - metrics = train_result.metrics - trainer.save_model() # Saves the tokenizer too for easy upload - - max_train_samples = ( - data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) - ) - metrics["train_samples"] = min(max_train_samples, len(train_dataset)) - - trainer.log_metrics("train", metrics) - trainer.save_metrics("train", metrics) - trainer.save_state() - - # Evaluation - if training_args.do_eval: - logger.info("*** Evaluate ***") - - metrics = trainer.evaluate() - - max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) - metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) - - trainer.log_metrics("eval", metrics) - trainer.save_metrics("eval", metrics) - - # Predict - if training_args.do_predict: - logger.info("*** Predict ***") - - predictions, labels, metrics = trainer.predict(predict_dataset, metric_key_prefix="predict") - predictions = np.argmax(predictions, axis=2) - - # Remove ignored index (special tokens) - true_predictions = [ - [label_list[p] for (p, l) in zip(prediction, label) if l != -100] - for prediction, label in zip(predictions, labels) - ] - - trainer.log_metrics("predict", metrics) - trainer.save_metrics("predict", metrics) - - # Save predictions - output_predictions_file = os.path.join(training_args.output_dir, "predictions.txt") - if trainer.is_world_process_zero(): - with open(output_predictions_file, "w") as writer: - for prediction in true_predictions: - writer.write(" ".join(prediction) + "\n") - - kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "token-classification"} - if data_args.dataset_name is not None: - kwargs["dataset_tags"] = data_args.dataset_name - if data_args.dataset_config_name is not None: - kwargs["dataset_args"] = data_args.dataset_config_name - kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" - else: - kwargs["dataset"] = data_args.dataset_name - - if training_args.push_to_hub: - trainer.push_to_hub(**kwargs) - else: - trainer.create_model_card(**kwargs) - - -def _mp_fn(index): - # For xla_spawn (TPUs) - main() - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/longform-qa/README.md b/examples/research_projects/longform-qa/README.md deleted file mode 100644 index eaa29d45422..00000000000 --- a/examples/research_projects/longform-qa/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Long Form Question Answering - -Author: @yjernite - -This folder contains the code for the Long Form Question answering [demo](http://35.226.96.115:8080/) as well as methods to train and use a fully end-to-end Long Form Question Answering system using the [🤗transformers](https://github.com/huggingface/transformers) and [🤗datasets](https://github.com/huggingface/datasets) libraries. - -You can use these methods to train your own system by following along the associate [notebook](https://github.com/huggingface/notebooks/blob/master/longform-qa/Long_Form_Question_Answering_with_ELI5_and_Wikipedia.ipynb) or [blog post](https://yjernite.github.io/lfqa.html). diff --git a/examples/research_projects/longform-qa/eli5_app.py b/examples/research_projects/longform-qa/eli5_app.py deleted file mode 100644 index 6b1b15cc9cb..00000000000 --- a/examples/research_projects/longform-qa/eli5_app.py +++ /dev/null @@ -1,349 +0,0 @@ -import datasets -import faiss -import numpy as np -import streamlit as st -import torch -from elasticsearch import Elasticsearch -from eli5_utils import ( - embed_questions_for_retrieval, - make_qa_s2s_model, - qa_s2s_generate, - query_es_index, - query_qa_dense_index, -) - -import transformers -from transformers import AutoModel, AutoModelForSeq2SeqLM, AutoTokenizer - - -MODEL_TYPE = "bart" -LOAD_DENSE_INDEX = True - - -@st.cache(allow_output_mutation=True) -def load_models(): - if LOAD_DENSE_INDEX: - qar_tokenizer = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased") - qar_model = AutoModel.from_pretrained("yjernite/retribert-base-uncased").to("cuda:0") - _ = qar_model.eval() - else: - qar_tokenizer, qar_model = (None, None) - if MODEL_TYPE == "bart": - s2s_tokenizer = AutoTokenizer.from_pretrained("yjernite/bart_eli5") - s2s_model = AutoModelForSeq2SeqLM.from_pretrained("yjernite/bart_eli5").to("cuda:0") - save_dict = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth") - s2s_model.load_state_dict(save_dict["model"]) - _ = s2s_model.eval() - else: - s2s_tokenizer, s2s_model = make_qa_s2s_model( - model_name="google-t5/t5-small", from_file="seq2seq_models/eli5_t5_model_1024_4.pth", device="cuda:0" - ) - return (qar_tokenizer, qar_model, s2s_tokenizer, s2s_model) - - -@st.cache(allow_output_mutation=True) -def load_indexes(): - if LOAD_DENSE_INDEX: - faiss_res = faiss.StandardGpuResources() - wiki40b_passages = datasets.load_dataset(path="wiki_snippets", name="wiki40b_en_100_0")["train"] - wiki40b_passage_reps = np.memmap( - "wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat", - dtype="float32", - mode="r", - shape=(wiki40b_passages.num_rows, 128), - ) - wiki40b_index_flat = faiss.IndexFlatIP(128) - wiki40b_gpu_index_flat = faiss.index_cpu_to_gpu(faiss_res, 1, wiki40b_index_flat) - wiki40b_gpu_index_flat.add(wiki40b_passage_reps) # TODO fix for larger GPU - else: - wiki40b_passages, wiki40b_gpu_index_flat = (None, None) - es_client = Elasticsearch([{"host": "localhost", "port": "9200"}]) - return (wiki40b_passages, wiki40b_gpu_index_flat, es_client) - - -@st.cache(allow_output_mutation=True) -def load_train_data(): - eli5 = datasets.load_dataset("eli5", name="LFQA_reddit") - eli5_train = eli5["train_eli5"] - eli5_train_q_reps = np.memmap( - "eli5_questions_reps.dat", dtype="float32", mode="r", shape=(eli5_train.num_rows, 128) - ) - eli5_train_q_index = faiss.IndexFlatIP(128) - eli5_train_q_index.add(eli5_train_q_reps) - return (eli5_train, eli5_train_q_index) - - -passages, gpu_dense_index, es_client = load_indexes() -qar_tokenizer, qar_model, s2s_tokenizer, s2s_model = load_models() -eli5_train, eli5_train_q_index = load_train_data() - - -def find_nearest_training(question, n_results=10): - q_rep = embed_questions_for_retrieval([question], qar_tokenizer, qar_model) - D, I = eli5_train_q_index.search(q_rep, n_results) - nn_examples = [eli5_train[int(i)] for i in I[0]] - return nn_examples - - -def make_support(question, source="wiki40b", method="dense", n_results=10): - if source == "none": - support_doc, hit_lst = ("

".join(["" for _ in range(11)]).strip(), []) - else: - if method == "dense": - support_doc, hit_lst = query_qa_dense_index( - question, qar_model, qar_tokenizer, passages, gpu_dense_index, n_results - ) - else: - support_doc, hit_lst = query_es_index( - question, - es_client, - index_name="english_wiki40b_snippets_100w", - n_results=n_results, - ) - support_list = [ - (res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst - ] - question_doc = "question: {} context: {}".format(question, support_doc) - return question_doc, support_list - - -@st.cache( - hash_funcs={ - torch.Tensor: (lambda _: None), - transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _: None), - } -) -def answer_question( - question_doc, s2s_model, s2s_tokenizer, min_len=64, max_len=256, sampling=False, n_beams=2, top_p=0.95, temp=0.8 -): - with torch.no_grad(): - answer = qa_s2s_generate( - question_doc, - s2s_model, - s2s_tokenizer, - num_answers=1, - num_beams=n_beams, - min_len=min_len, - max_len=max_len, - do_sample=sampling, - temp=temp, - top_p=top_p, - top_k=None, - max_input_length=1024, - device="cuda:0", - )[0] - return (answer, support_list) - - -st.title("Long Form Question Answering with ELI5") - -# Start sidebar -header_html = "" -header_full = """ - - - - - - - %s - - - -""" % (header_html,) -st.sidebar.markdown( - header_full, - unsafe_allow_html=True, -) - -# Long Form QA with ELI5 and Wikipedia -description = """ -This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). -First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, -a pre-processed fixed snapshot of Wikipedia. -""" -st.sidebar.markdown(description, unsafe_allow_html=True) - -action_list = [ - "Answer the question", - "View the retrieved document only", - "View the most similar ELI5 question and answer", - "Show me everything, please!", -] -demo_options = st.sidebar.checkbox("Demo options") -if demo_options: - action_st = st.sidebar.selectbox( - "", - action_list, - index=3, - ) - action = action_list.index(action_st) - show_type = st.sidebar.selectbox( - "", - ["Show full text of passages", "Show passage section titles"], - index=0, - ) - show_passages = show_type == "Show full text of passages" -else: - action = 3 - show_passages = True - -retrieval_options = st.sidebar.checkbox("Retrieval options") -if retrieval_options: - retriever_info = """ - ### Information retriever options - - The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding - trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. - The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. - """ - st.sidebar.markdown(retriever_info) - wiki_source = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) - index_type = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) -else: - wiki_source = "wiki40b" - index_type = "dense" - -sampled = "beam" -n_beams = 2 -min_len = 64 -max_len = 256 -top_p = None -temp = None -generate_options = st.sidebar.checkbox("Generation options") -if generate_options: - generate_info = """ - ### Answer generation options - - The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) - weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with - **beam** search, or **sample** from the decoder's output probabilities. - """ - st.sidebar.markdown(generate_info) - sampled = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) - min_len = st.sidebar.slider( - "Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None - ) - max_len = st.sidebar.slider( - "Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None - ) - if sampled == "beam": - n_beams = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) - else: - top_p = st.sidebar.slider( - "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None - ) - temp = st.sidebar.slider( - "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None - ) - n_beams = None - -# start main text -questions_list = [ - "", - "How do people make chocolate?", - "Why do we get a fever when we are sick?", - "How can different animals perceive different colors?", - "What is natural language processing?", - "What's the best way to treat a sunburn?", - "What exactly are vitamins ?", - "How does nuclear energy provide electricity?", - "What's the difference between viruses and bacteria?", - "Why are flutes classified as woodwinds when most of them are made out of metal ?", - "Why do people like drinking coffee even though it tastes so bad?", - "What happens when wine ages? How does it make the wine taste better?", - "If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?", - "How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?", - "How does New Zealand have so many large bird predators?", -] -question_s = st.selectbox( - "What would you like to ask? ---- select to enter a new query", - questions_list, - index=1, -) -if question_s == "": - question = st.text_input("Enter your question here:", "") -else: - question = question_s - -if st.button("Show me!"): - if action in [0, 1, 3]: - if index_type == "mixed": - _, support_list_dense = make_support(question, source=wiki_source, method="dense", n_results=10) - _, support_list_sparse = make_support(question, source=wiki_source, method="sparse", n_results=10) - support_list = [] - for res_d, res_s in zip(support_list_dense, support_list_sparse): - if tuple(res_d) not in support_list: - support_list += [tuple(res_d)] - if tuple(res_s) not in support_list: - support_list += [tuple(res_s)] - support_list = support_list[:10] - question_doc = "

" + "

".join([res[-1] for res in support_list]) - else: - question_doc, support_list = make_support(question, source=wiki_source, method=index_type, n_results=10) - if action in [0, 3]: - answer, support_list = answer_question( - question_doc, - s2s_model, - s2s_tokenizer, - min_len=min_len, - max_len=int(max_len), - sampling=(sampled == "sampled"), - n_beams=n_beams, - top_p=top_p, - temp=temp, - ) - st.markdown("### The model generated answer is:") - st.write(answer) - if action in [0, 1, 3] and wiki_source != "none": - st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") - for i, res in enumerate(support_list): - wiki_url = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_")) - sec_titles = res[1].strip() - if sec_titles == "": - sections = "[{}]({})".format(res[0], wiki_url) - else: - sec_list = sec_titles.split(" & ") - sections = " & ".join( - ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] - ) - st.markdown( - "{0:02d} - **Article**: {1:<18}
_Section_: {2}".format(i + 1, res[0], sections), - unsafe_allow_html=True, - ) - if show_passages: - st.write( - '> ' + res[-1] + "", unsafe_allow_html=True - ) - if action in [2, 3]: - nn_train_list = find_nearest_training(question) - train_exple = nn_train_list[0] - st.markdown( - "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) - ) - answers_st = [ - "{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) - for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) - if i == 0 or sc > 2 - ] - st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) - - -disclaimer = """ ---- - -**Disclaimer** - -*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. -Evaluating biases of such a model and ensuring factual generations are still very much open research problems. -Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* -""" -st.sidebar.markdown(disclaimer, unsafe_allow_html=True) diff --git a/examples/research_projects/longform-qa/eli5_utils.py b/examples/research_projects/longform-qa/eli5_utils.py deleted file mode 100644 index d4b235fdbaa..00000000000 --- a/examples/research_projects/longform-qa/eli5_utils.py +++ /dev/null @@ -1,688 +0,0 @@ -import functools -import math -import os # noqa: F401 -from random import choice, randint -from time import time - -import datasets # noqa: F401 -import faiss # noqa: F401 -import numpy as np -import pandas as pd -import torch -import torch.utils.checkpoint as checkpoint -from elasticsearch import Elasticsearch # noqa: F401 -from elasticsearch.helpers import bulk, streaming_bulk # noqa: F401 -from torch import nn -from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler -from tqdm import tqdm - -from transformers import AdamW, AutoModel, AutoModelForSeq2SeqLM, AutoTokenizer, get_linear_schedule_with_warmup - - -pd.set_option("display.max_colwidth", None) - - -############### -# Sparse index -############### -def make_es_index_snippets(es_client, passages_dset, index_name="english_wiki_kilt_snippets_100w"): - index_config = { - "settings": { - "number_of_shards": 1, - "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, - }, - "mappings": { - "properties": { - "article_title": {"type": "text", "analyzer": "standard", "similarity": "BM25"}, - "section_title": {"type": "text", "analyzer": "standard", "similarity": "BM25"}, - "passage_text": {"type": "text", "analyzer": "standard", "similarity": "BM25"}, - } - }, - } - es_client.indices.create(index=index_name, body=index_config) - number_of_docs = passages_dset.num_rows - progress = tqdm(unit="docs", total=number_of_docs) - successes = 0 - - def passage_generator(): - for passage in passages_dset: - yield passage - - # create the ES index - for ok, action in streaming_bulk( - client=es_client, - index=index_name, - actions=passage_generator(), - ): - progress.update(1) - successes += ok - print("Indexed %d documents" % (successes,)) - - -def query_es_index(question, es_client, index_name="english_wiki_kilt_snippets_100w", n_results=10, min_length=20): - q = question.lower() - banned = ["how", "why", "what", "where", "which", "do", "does", "is", "?", "eli5", "eli5:"] - q = " ".join([w for w in q.split() if w not in banned]) - response = es_client.search( - index=index_name, - body={ - "query": { - "multi_match": { - "query": q, - "fields": ["article_title", "section_title", "passage_text^2"], - "type": "cross_fields", - } - }, - "size": 2 * n_results, - }, - ) - hits = response["hits"]["hits"] - support_doc = "

" + "

".join([hit["_source"]["passage_text"] for hit in hits]) - res_list = [{k: hit["_source"][k] for k in hit["_source"] if k != "passage_text"} for hit in hits] - for r, hit in zip(res_list, hits): - r["passage_id"] = hit["_id"] - r["score"] = hit["_score"] - r["passage_text"] = hit["_source"]["passage_text"] - res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results] - return support_doc, res_list - - -############### -# ELI5 retriever training -############### -class ELI5DatasetQARetriver(Dataset): - def __init__(self, examples_array, extra_answer_threshold=3, min_answer_length=64, training=True, n_samples=None): - self.data = examples_array - self.answer_thres = extra_answer_threshold - self.min_length = min_answer_length - self.training = training - self.n_samples = self.data.num_rows if n_samples is None else n_samples - - def __len__(self): - return self.n_samples - - def make_example(self, idx): - example = self.data[idx] - question = example["title"] - if self.training: - answers = [a for i, (a, sc) in enumerate(zip(example["answers"]["text"], example["answers"]["score"]))] - answer_tab = choice(answers).split(" ") - start_idx = randint(0, max(0, len(answer_tab) - self.min_length)) - answer_span = " ".join(answer_tab[start_idx:]) - else: - answer_span = example["answers"]["text"][0] - return (question, answer_span) - - def __getitem__(self, idx): - return self.make_example(idx % self.data.num_rows) - - -class RetrievalQAEmbedder(nn.Module): - def __init__(self, sent_encoder, dim): - super(RetrievalQAEmbedder, self).__init__() - self.sent_encoder = sent_encoder - self.output_dim = 128 - self.project_q = nn.Linear(dim, self.output_dim, bias=False) - self.project_a = nn.Linear(dim, self.output_dim, bias=False) - self.ce_loss = nn.CrossEntropyLoss(reduction="mean") - - def embed_sentences_checkpointed(self, input_ids, attention_mask, checkpoint_batch_size=-1): - # reproduces BERT forward pass with checkpointing - if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size: - return self.sent_encoder(input_ids, attention_mask=attention_mask)[1] - else: - # prepare implicit variables - device = input_ids.device - input_shape = input_ids.size() - token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) - head_mask = [None] * self.sent_encoder.config.num_hidden_layers - extended_attention_mask: torch.Tensor = self.sent_encoder.get_extended_attention_mask( - attention_mask, input_shape - ) - - # define function for checkpointing - def partial_encode(*inputs): - encoder_outputs = self.sent_encoder.encoder( - inputs[0], - attention_mask=inputs[1], - head_mask=head_mask, - ) - sequence_output = encoder_outputs[0] - pooled_output = self.sent_encoder.pooler(sequence_output) - return pooled_output - - # run embedding layer on everything at once - embedding_output = self.sent_encoder.embeddings( - input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None - ) - # run encoding and pooling on one mini-batch at a time - pooled_output_list = [] - for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)): - b_embedding_output = embedding_output[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size] - b_attention_mask = extended_attention_mask[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size] - pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask) - pooled_output_list.append(pooled_output) - return torch.cat(pooled_output_list, dim=0) - - def embed_questions(self, q_ids, q_mask, checkpoint_batch_size=-1): - q_reps = self.embed_sentences_checkpointed(q_ids, q_mask, checkpoint_batch_size) - return self.project_q(q_reps) - - def embed_answers(self, a_ids, a_mask, checkpoint_batch_size=-1): - a_reps = self.embed_sentences_checkpointed(a_ids, a_mask, checkpoint_batch_size) - return self.project_a(a_reps) - - def forward(self, q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=-1): - device = q_ids.device - q_reps = self.embed_questions(q_ids, q_mask, checkpoint_batch_size) - a_reps = self.embed_answers(a_ids, a_mask, checkpoint_batch_size) - compare_scores = torch.mm(q_reps, a_reps.t()) - loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device)) - loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device)) - loss = (loss_qa + loss_aq) / 2 - return loss - - -def make_qa_retriever_model(model_name="google/bert_uncased_L-8_H-512_A-8", from_file=None, device="cuda:0"): - tokenizer = AutoTokenizer.from_pretrained(model_name) - bert_model = AutoModel.from_pretrained(model_name).to(device) - # run bert_model on a dummy batch to get output dimension - d_ids = torch.LongTensor( - [[bert_model.config.bos_token_id if bert_model.config.bos_token_id is not None else 1]] - ).to(device) - d_mask = torch.LongTensor([[1]]).to(device) - sent_dim = bert_model(d_ids, attention_mask=d_mask)[1].shape[-1] - qa_embedder = RetrievalQAEmbedder(bert_model, sent_dim).to(device) - if from_file is not None: - param_dict = torch.load(from_file) # has model weights, optimizer, and scheduler states - qa_embedder.load_state_dict(param_dict["model"]) - return tokenizer, qa_embedder - - -def make_qa_retriever_batch(qa_list, tokenizer, max_len=64, device="cuda:0"): - q_ls = [q for q, a in qa_list] - a_ls = [a for q, a in qa_list] - q_toks = tokenizer(q_ls, max_length=max_len, padding="max_length", truncation=True) - q_ids, q_mask = ( - torch.LongTensor(q_toks["input_ids"]).to(device), - torch.LongTensor(q_toks["attention_mask"]).to(device), - ) - a_toks = tokenizer(a_ls, max_length=max_len, padding="max_length", truncation=True) - a_ids, a_mask = ( - torch.LongTensor(a_toks["input_ids"]).to(device), - torch.LongTensor(a_toks["attention_mask"]).to(device), - ) - return (q_ids, q_mask, a_ids, a_mask) - - -def train_qa_retriever_epoch(model, dataset, tokenizer, optimizer, scheduler, args, e=0): - model.train() - # make iterator - train_sampler = RandomSampler(dataset) - model_collate_fn = functools.partial( - make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" - ) - data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) - epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) - # accumulate loss since last print - loc_steps = 0 - loc_loss = 0.0 - st_time = time() - for step, batch in enumerate(epoch_iterator): - q_ids, q_mask, a_ids, a_mask = batch - pre_loss = model(q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=args.checkpoint_batch_size) - loss = pre_loss.sum() - # optimizer - loss.backward() - optimizer.step() - scheduler.step() - model.zero_grad() - # some printing within the epoch - loc_loss += loss.item() - loc_steps += 1 - if step % args.print_freq == 0 or step == 1: - print( - "{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( - e, - step, - len(dataset) // args.batch_size, - loc_loss / loc_steps, - time() - st_time, - ) - ) - loc_loss = 0 - loc_steps = 0 - - -def train_qa_retriever_joint_epoch(model, dataset_list, tokenizer, optimizer, scheduler, args, e=0): - model.train() - model_collate_fn = functools.partial( - make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" - ) - # make iterator - train_samplers = [RandomSampler(dataset) for dataset in dataset_list] - data_loaders = [ - DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) - for dataset, train_sampler in zip(dataset_list, train_samplers) - ] - iterators = [iter(dloader) for dloader in data_loaders] - joint_iter = zip(*iterators) - # accumulate loss since last print - loc_steps = 0 - loc_loss = 0.0 - st_time = time() - for step, (batches,) in enumerate(zip(joint_iter)): - for batch in batches: - q_ids, q_mask, a_ids, a_mask = batch - loss = model(q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=args.checkpoint_batch_size) - # optimizer - loss.backward() - optimizer.step() - scheduler.step() - model.zero_grad() - # some printing within the epoch - loc_loss += loss.item() - loc_steps += 1 - if step % args.print_freq == 0: - print( - "{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( - e, - step, - len(dataset_list[0]) // args.batch_size, - loc_loss / loc_steps, - time() - st_time, - ) - ) - loc_loss = 0 - loc_steps = 0 - - -def evaluate_qa_retriever(model, dataset, tokenizer, args): - model.eval() - # make iterator - eval_sampler = SequentialSampler(dataset) - model_collate_fn = functools.partial( - make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" - ) - data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=eval_sampler, collate_fn=model_collate_fn) - epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) - tot_loss = 0.0 - with torch.no_grad(): - for step, batch in enumerate(epoch_iterator): - q_ids, q_mask, a_ids, a_mask = batch - loss = model(q_ids, q_mask, a_ids, a_mask) - tot_loss += loss.item() - return tot_loss / (step + 1) - - -def train_qa_retriever(qar_model, qar_tokenizer, qar_train_dset, qar_valid_dset, qar_args): - qar_optimizer = AdamW(qar_model.parameters(), lr=qar_args.learning_rate, eps=1e-8) - qar_scheduler = get_linear_schedule_with_warmup( - qar_optimizer, - num_warmup_steps=100, - num_training_steps=(qar_args.num_epochs + 1) * math.ceil(len(qar_train_dset) / qar_args.batch_size), - ) - for e in range(qar_args.num_epochs): - train_qa_retriever_epoch(qar_model, qar_train_dset, qar_tokenizer, qar_optimizer, qar_scheduler, qar_args, e) - m_save_dict = { - "model": qar_model.state_dict(), - "optimizer": qar_optimizer.state_dict(), - "scheduler": qar_scheduler.state_dict(), - } - print("Saving model {}".format(qar_args.model_save_name)) - torch.save(m_save_dict, "{}_{}.pth".format(qar_args.model_save_name, e)) - eval_loss = evaluate_qa_retriever(qar_model, qar_valid_dset, qar_tokenizer, qar_args) - print("Evaluation loss epoch {:4d}: {:.3f}".format(e, eval_loss)) - - -############### -# ELI5 seq2seq model training -############### -class ELI5DatasetS2S(Dataset): - def __init__( - self, examples_array, make_doc_fun=None, extra_answer_threshold=3, document_cache=None, training=True - ): - self.training = training - self.data = examples_array - self.make_doc_function = make_doc_fun - self.document_cache = {} if document_cache is None else document_cache - assert not (make_doc_fun is None and document_cache is None) - # make index of specific question-answer pairs from multi-answers - if self.training: - self.qa_id_list = [ - (i, j) - for i, qa in enumerate(self.data) - for j, (a, sc) in enumerate(zip(qa["answers"]["text"], qa["answers"]["score"])) - if j == 0 or sc >= extra_answer_threshold - ] - else: - self.qa_id_list = [(i, 0) for i in range(self.data.num_rows)] - - def __len__(self): - return len(self.qa_id_list) - - def make_example(self, idx): - i, j = self.qa_id_list[idx] - example = self.data[i] - question = example["title"] + " " + example["selftext"] - answer = example["answers"]["text"][j] - q_id = example["q_id"] - if self.make_doc_function is not None: - self.document_cache[q_id] = self.document_cache.get(q_id, self.make_doc_function(example["title"])) - document = self.document_cache[q_id] - in_st = "question: {} context: {}".format( - question.lower().replace(" --t--", "").strip(), - document.lower().strip(), - ) - out_st = answer - return (in_st, out_st) - - def __getitem__(self, idx): - return self.make_example(idx) - - -def make_qa_s2s_model(model_name="facebook/bart-large", from_file=None, device="cuda:0"): - tokenizer = AutoTokenizer.from_pretrained(model_name) - model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device) - if from_file is not None: - param_dict = torch.load(from_file) # has model weights, optimizer, and scheduler states - model.load_state_dict(param_dict["model"]) - return tokenizer, model - - -def make_qa_s2s_batch(qa_list, tokenizer, max_len=64, max_a_len=360, device="cuda:0"): - q_ls = [q for q, a in qa_list] - a_ls = [a for q, a in qa_list] - q_toks = tokenizer(q_ls, max_length=max_len, padding="max_length", truncation=True) - q_ids, q_mask = ( - torch.LongTensor(q_toks["input_ids"]).to(device), - torch.LongTensor(q_toks["attention_mask"]).to(device), - ) - a_toks = tokenizer(a_ls, max_length=min(max_len, max_a_len), padding="max_length", truncation=True) - a_ids, a_mask = ( - torch.LongTensor(a_toks["input_ids"]).to(device), - torch.LongTensor(a_toks["attention_mask"]).to(device), - ) - lm_labels = a_ids[:, 1:].contiguous().clone() - lm_labels[a_mask[:, 1:].contiguous() == 0] = -100 - model_inputs = { - "input_ids": q_ids, - "attention_mask": q_mask, - "decoder_input_ids": a_ids[:, :-1].contiguous(), - "lm_labels": lm_labels, - } - return model_inputs - - -def train_qa_s2s_epoch(model, dataset, tokenizer, optimizer, scheduler, args, e=0, curriculum=False): - model.train() - # make iterator - if curriculum: - train_sampler = SequentialSampler(dataset) - else: - train_sampler = RandomSampler(dataset) - model_collate_fn = functools.partial( - make_qa_s2s_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" - ) - data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) - epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) - # accumulate loss since last print - loc_steps = 0 - loc_loss = 0.0 - st_time = time() - for step, batch_inputs in enumerate(epoch_iterator): - pre_loss = model(**batch_inputs)[0] - loss = pre_loss.sum() / pre_loss.shape[0] - loss.backward() - # optimizer - if step % args.backward_freq == 0: - optimizer.step() - scheduler.step() - model.zero_grad() - # some printing within the epoch - loc_loss += loss.item() - loc_steps += 1 - if step % args.print_freq == 0 or step == 1: - print( - "{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( - e, - step, - len(dataset) // args.batch_size, - loc_loss / loc_steps, - time() - st_time, - ) - ) - loc_loss = 0 - loc_steps = 0 - - -def eval_qa_s2s_epoch(model, dataset, tokenizer, args): - model.eval() - # make iterator - train_sampler = SequentialSampler(dataset) - model_collate_fn = functools.partial( - make_qa_s2s_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" - ) - data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) - epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) - # accumulate loss since last print - loc_steps = 0 - loc_loss = 0.0 - st_time = time() - with torch.no_grad(): - for step, batch_inputs in enumerate(epoch_iterator): - pre_loss = model(**batch_inputs)[0] - loss = pre_loss.sum() / pre_loss.shape[0] - loc_loss += loss.item() - loc_steps += 1 - if step % args.print_freq == 0: - print( - "{:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( - step, - len(dataset) // args.batch_size, - loc_loss / loc_steps, - time() - st_time, - ) - ) - print( - "Total \t L: {:.3f} \t -- {:.3f}".format( - loc_loss / loc_steps, - time() - st_time, - ) - ) - - -def train_qa_s2s(qa_s2s_model, qa_s2s_tokenizer, s2s_train_dset, s2s_valid_dset, s2s_args): - s2s_optimizer = AdamW(qa_s2s_model.parameters(), lr=s2s_args.learning_rate, eps=1e-8) - s2s_scheduler = get_linear_schedule_with_warmup( - s2s_optimizer, - num_warmup_steps=400, - num_training_steps=(s2s_args.num_epochs + 1) * math.ceil(len(s2s_train_dset) / s2s_args.batch_size), - ) - for e in range(s2s_args.num_epochs): - train_qa_s2s_epoch( - qa_s2s_model, - s2s_train_dset, - qa_s2s_tokenizer, - s2s_optimizer, - s2s_scheduler, - s2s_args, - e, - curriculum=(e == 0), - ) - m_save_dict = { - "model": qa_s2s_model.state_dict(), - "optimizer": s2s_optimizer.state_dict(), - "scheduler": s2s_scheduler.state_dict(), - } - print("Saving model {}".format(s2s_args.model_save_name)) - eval_qa_s2s_epoch(qa_s2s_model, s2s_valid_dset, qa_s2s_tokenizer, s2s_args) - torch.save(m_save_dict, "{}_{}.pth".format(s2s_args.model_save_name, e)) - - -# generate answer from input "question: ... context:

..." -def qa_s2s_generate( - question_doc, - qa_s2s_model, - qa_s2s_tokenizer, - num_answers=1, - num_beams=None, - min_len=64, - max_len=256, - do_sample=False, - temp=1.0, - top_p=None, - top_k=None, - max_input_length=512, - device="cuda:0", -): - model_inputs = make_qa_s2s_batch( - [(question_doc, "A")], - qa_s2s_tokenizer, - max_input_length, - device=device, - ) - n_beams = num_answers if num_beams is None else max(num_beams, num_answers) - generated_ids = qa_s2s_model.generate( - input_ids=model_inputs["input_ids"], - attention_mask=model_inputs["attention_mask"], - min_length=min_len, - max_length=max_len, - do_sample=do_sample, - early_stopping=True, - num_beams=1 if do_sample else n_beams, - temperature=temp, - top_k=top_k, - top_p=top_p, - eos_token_id=qa_s2s_tokenizer.eos_token_id, - no_repeat_ngram_size=3, - num_return_sequences=num_answers, - decoder_start_token_id=qa_s2s_tokenizer.bos_token_id, - ) - return [qa_s2s_tokenizer.decode(ans_ids, skip_special_tokens=True).strip() for ans_ids in generated_ids] - - -############### -# ELI5-trained retrieval model usage -############### -def embed_passages_for_retrieval(passages, tokenizer, qa_embedder, max_length=128, device="cuda:0"): - a_toks = tokenizer(passages, max_length=max_length, padding="max_length", truncation=True) - a_ids, a_mask = ( - torch.LongTensor(a_toks["input_ids"]).to(device), - torch.LongTensor(a_toks["attention_mask"]).to(device), - ) - with torch.no_grad(): - a_reps = qa_embedder.embed_answers(a_ids, a_mask).cpu().type(torch.float) - return a_reps.numpy() - - -def embed_questions_for_retrieval(q_ls, tokenizer, qa_embedder, device="cuda:0"): - q_toks = tokenizer(q_ls, max_length=128, padding="max_length", truncation=True) - q_ids, q_mask = ( - torch.LongTensor(q_toks["input_ids"]).to(device), - torch.LongTensor(q_toks["attention_mask"]).to(device), - ) - with torch.no_grad(): - q_reps = qa_embedder.embed_questions(q_ids, q_mask).cpu().type(torch.float) - return q_reps.numpy() - - -def make_qa_dense_index( - qa_embedder, - tokenizer, - passages_dset, - batch_size=512, - max_length=128, - index_name="kilt_passages_reps.dat", - dtype="float32", - device="cuda:0", -): - st_time = time() - fp = np.memmap(index_name, dtype=dtype, mode="w+", shape=(passages_dset.num_rows, 128)) - n_batches = math.ceil(passages_dset.num_rows / batch_size) - for i in range(n_batches): - passages = list(passages_dset[i * batch_size : (i + 1) * batch_size]["passage_text"]) - reps = embed_passages_for_retrieval(passages, tokenizer, qa_embedder, max_length, device) - fp[i * batch_size : (i + 1) * batch_size] = reps - if i % 50 == 0: - print(i, time() - st_time) - - -def evaluate_retriever(qa_list, retriever_func, scoring_func, n_ret=10, verbose=False): - total_retriever_time = 0.0 - total_retriever_score = 0.0 - st_time = time() - for i, (question, answer) in enumerate(qa_list): - r_time = time() - retrieved_passages = retriever_func(question, n_ret) - total_retriever_time += time() - r_time - total_retriever_score += scoring_func(retrieved_passages, answer) - if verbose and ((i + 1) % 500 == 0 or i <= 1): - print( - "{:03d}: S-{:.4f} T-{:.4f} | {:.2f}".format( - i + 1, total_retriever_score / (i + 1), total_retriever_time / (i + 1), time() - st_time - ) - ) - return {"idf_recall": total_retriever_score / (i + 1), "retrieval_time": total_retriever_time / (i + 1)} - - -# build a support document for the question out of Wikipedia snippets -def query_qa_dense_index( - question, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10, min_length=20, device="cuda:0" -): - q_rep = embed_questions_for_retrieval([question], tokenizer, qa_embedder, device=device) - D, I = wiki_index.search(q_rep, 2 * n_results) - res_passages = [wiki_passages[int(i)] for i in I[0]] - support_doc = "

" + "

".join([p["passage_text"] for p in res_passages]) - res_list = [{k: p[k] for k in wiki_passages.column_names} for p in res_passages] - res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results] - for r, sc in zip(res_list, D[0]): - r["score"] = float(sc) - return support_doc, res_list - - -def batch_query_qa_dense_index(questions, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10): - q_rep = embed_questions_for_retrieval(questions, tokenizer, qa_embedder) - D, I = wiki_index.search(q_rep, n_results) - res_passages_lst = [[wiki_passages[int(i)] for i in i_lst] for i_lst in I] - support_doc_lst = [ - "

" + "

".join([p["passage_text"] for p in res_passages]) for res_passages in res_passages_lst - ] - all_res_lists = [] - for res_passages, dl in zip(res_passages_lst, D): - res_list = [{k: p[k] for k in wiki_passages.column_names} for p in res_passages] - for r, sc in zip(res_list, dl): - r["score"] = float(sc) - all_res_lists += [res_list[:]] - return support_doc_lst, all_res_lists - - -# find nearest neighbors of an answer or declarative text in Wikipedia snippets -def query_qa_dense_index_nn(passage, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10, min_length=20): - a_rep = embed_passages_for_retrieval([passage], tokenizer, qa_embedder) - D, I = wiki_index.search(a_rep, 2 * n_results) - res_passages = [wiki_passages[int(i)] for i in I[0]] - support_doc = "

" + "

".join([p["passage_text"] for p in res_passages]) - res_list = [{k: p[k] for k in wiki_passages.column_names} for p in res_passages] - res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results] - for r, sc, i in zip(res_list, D[0], I[0]): - r["passage_id"] = int(i) - r["score"] = float(sc) - return support_doc, res_list - - -def batch_query_qa_dense_index_nn(passages, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10): - a_reps = embed_passages_for_retrieval(passages, tokenizer, qa_embedder) - D, I = wiki_index.search(a_reps, n_results) - res_passages_lst = [[wiki_passages[int(i)] for i in i_lst] for i_lst in I] - support_doc_lst = [ - "

" + "

".join([p["passage_text"] for p in res_passages]) for res_passages in res_passages_lst - ] - all_res_lists = [] - for res_passages, dl, il in zip(res_passages_lst, D, I): - res_list = [{k: p[k] for k in wiki_passages.column_names} for p in res_passages] - for r, sc, i in zip(res_list, dl, il): - r["passage_id"] = int(i) - r["score"] = float(sc) - all_res_lists += [res_list[:]] - return support_doc_lst, all_res_lists diff --git a/examples/research_projects/longform-qa/requirements.txt b/examples/research_projects/longform-qa/requirements.txt deleted file mode 100644 index a21b64d33df..00000000000 --- a/examples/research_projects/longform-qa/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -datasets >= 1.1.3 -faiss-cpu -streamlit -elasticsearch diff --git a/examples/research_projects/luke/README.md b/examples/research_projects/luke/README.md deleted file mode 100644 index 703eb0b4e42..00000000000 --- a/examples/research_projects/luke/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# Token classification - -## PyTorch version, no Trainer - -Fine-tuning (m)LUKE for token classification task such as Named Entity Recognition (NER), Parts-of-speech -tagging (POS) or phrase extraction (CHUNKS). You can easily -customize it to your needs if you need extra processing on your datasets. - -It will either run on a datasets hosted on our [hub](https://huggingface.co/datasets) or with your own text files for -training and validation, you might just need to add some tweaks in the data preprocessing. - -The script can be run in a distributed setup, on TPU and supports mixed precision by -the mean of the [🤗 `Accelerate`](https://github.com/huggingface/accelerate) library. You can use the script normally -after installing it: - -```bash -pip install git+https://github.com/huggingface/accelerate -``` - -then to train English LUKE on CoNLL2003: - -```bash -export TASK_NAME=ner - -python run_luke_ner_no_trainer.py \ - --model_name_or_path studio-ousia/luke-base \ - --dataset_name conll2003 \ - --task_name $TASK_NAME \ - --max_length 128 \ - --per_device_train_batch_size 32 \ - --learning_rate 2e-5 \ - --num_train_epochs 3 \ - --output_dir /tmp/$TASK_NAME/ -``` - -You can then use your usual launchers to run in it in a distributed environment, but the easiest way is to run - -```bash -accelerate config -``` - -and reply to the questions asked. Then - -```bash -accelerate test -``` - -that will check everything is ready for training. Finally, you can launch training with - -```bash -export TASK_NAME=ner - -accelerate launch run_ner_no_trainer.py \ - --model_name_or_path studio-ousia/luke-base \ - --dataset_name conll2003 \ - --task_name $TASK_NAME \ - --max_length 128 \ - --per_device_train_batch_size 32 \ - --learning_rate 2e-5 \ - --num_train_epochs 3 \ - --output_dir /tmp/$TASK_NAME/ -``` - -This command is the same and will work for: - -- a CPU-only setup -- a setup with one GPU -- a distributed training with several GPUs (single or multi node) -- a training on TPUs - -Note that this library is in alpha release so your feedback is more than welcome if you encounter any problem using it. diff --git a/examples/research_projects/luke/luke_utils.py b/examples/research_projects/luke/luke_utils.py deleted file mode 100644 index aec4133f21b..00000000000 --- a/examples/research_projects/luke/luke_utils.py +++ /dev/null @@ -1,115 +0,0 @@ -import unicodedata -from dataclasses import dataclass -from typing import Optional, Union - -import numpy as np - -from transformers.data.data_collator import DataCollatorMixin -from transformers.file_utils import PaddingStrategy -from transformers.tokenization_utils_base import PreTrainedTokenizerBase - - -def padding_tensor(sequences, padding_value, padding_side, sequence_length): - if isinstance(padding_value, tuple): - out_tensor = np.full((len(sequences), sequence_length, 2), padding_value) - else: - out_tensor = np.full((len(sequences), sequence_length), padding_value) - - for i, tensor in enumerate(sequences): - if padding_side == "right": - if isinstance(padding_value, tuple): - out_tensor[i, : len(tensor[:sequence_length]), :2] = tensor[:sequence_length] - else: - out_tensor[i, : len(tensor[:sequence_length])] = tensor[:sequence_length] - else: - if isinstance(padding_value, tuple): - out_tensor[i, len(tensor[:sequence_length]) - 1 :, :2] = tensor[:sequence_length] - else: - out_tensor[i, len(tensor[:sequence_length]) - 1 :] = tensor[:sequence_length] - - return out_tensor.tolist() - - -def is_punctuation(char): - cp = ord(char) - if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): - return True - cat = unicodedata.category(char) - if cat.startswith("P"): - return True - return False - - -@dataclass -class DataCollatorForLukeTokenClassification(DataCollatorMixin): - """ - Data collator that will dynamically pad the inputs received, as well as the labels. - - Args: - tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]): - The tokenizer used for encoding the data. - padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `True`): - Select a strategy to pad the returned sequences (according to the model's padding side and padding index) - among: - - - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single - sequence if provided). - - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the - maximum acceptable input length for the model if that argument is not provided. - - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of - different lengths). - max_length (`int`, *optional*): - Maximum length of the returned list and optionally padding length (see above). - pad_to_multiple_of (`int`, *optional*): - If set will pad the sequence to a multiple of the provided value. - - This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= - 7.5 (Volta). - label_pad_token_id (`int`, *optional*, defaults to -100): - The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions). - return_tensors (`str`): - The type of Tensor to return. Allowable values are "np", "pt" and "tf". - """ - - tokenizer: PreTrainedTokenizerBase - padding: Union[bool, str, PaddingStrategy] = True - max_length: Optional[int] = None - pad_to_multiple_of: Optional[int] = None - label_pad_token_id: int = -100 - return_tensors: str = "pt" - - def torch_call(self, features): - import torch - - label_name = "label" if "label" in features[0].keys() else "labels" - labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None - batch = self.tokenizer.pad( - features, - padding=self.padding, - max_length=self.max_length, - pad_to_multiple_of=self.pad_to_multiple_of, - # Conversion to tensors will fail if we have labels as they are not of the same length yet. - return_tensors="pt" if labels is None else None, - ) - - if labels is None: - return batch - - sequence_length = torch.tensor(batch["entity_ids"]).shape[1] - padding_side = self.tokenizer.padding_side - if padding_side == "right": - batch[label_name] = [ - list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels - ] - else: - batch[label_name] = [ - [self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels - ] - - ner_tags = [feature["ner_tags"] for feature in features] - batch["ner_tags"] = padding_tensor(ner_tags, -1, padding_side, sequence_length) - original_entity_spans = [feature["original_entity_spans"] for feature in features] - batch["original_entity_spans"] = padding_tensor(original_entity_spans, (-1, -1), padding_side, sequence_length) - batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()} - - return batch diff --git a/examples/research_projects/luke/run_luke_ner_no_trainer.py b/examples/research_projects/luke/run_luke_ner_no_trainer.py deleted file mode 100644 index 1552acbd42c..00000000000 --- a/examples/research_projects/luke/run_luke_ner_no_trainer.py +++ /dev/null @@ -1,720 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2022 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Fine-tuning (m)LUKE model on token classification tasks (NER, POS, CHUNKS) relying on the accelerate library 🤗 -without using a Trainer. -""" - -import argparse -import logging -import math -import os -import random -from pathlib import Path - -import datasets -import torch -from accelerate import Accelerator, DistributedDataParallelKwargs -from datasets import ClassLabel, load_dataset, load_metric -from huggingface_hub import Repository, create_repo -from luke_utils import DataCollatorForLukeTokenClassification, is_punctuation, padding_tensor -from torch.utils.data import DataLoader -from tqdm.auto import tqdm - -import transformers -from transformers import ( - AdamW, - LukeConfig, - LukeForEntitySpanClassification, - LukeTokenizer, - SchedulerType, - default_data_collator, - get_scheduler, - set_seed, -) -from transformers.utils.versions import require_version - - -logger = logging.getLogger(__name__) -require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt") - - -def parse_args(): - parser = argparse.ArgumentParser( - description="Finetune (m)LUKE on a token classification task (such as NER) with the accelerate library" - ) - parser.add_argument( - "--dataset_name", - type=str, - default=None, - help="The name of the dataset to use (via the datasets library).", - ) - parser.add_argument( - "--dataset_config_name", - type=str, - default=None, - help="The configuration name of the dataset to use (via the datasets library).", - ) - parser.add_argument( - "--train_file", type=str, default=None, help="A csv or a json file containing the training data." - ) - parser.add_argument( - "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." - ) - parser.add_argument( - "--text_column_name", - type=str, - default=None, - help="The column name of text to input in the file (a csv or JSON file).", - ) - parser.add_argument( - "--label_column_name", - type=str, - default=None, - help="The column name of label to input in the file (a csv or JSON file).", - ) - parser.add_argument( - "--max_length", - type=int, - default=128, - help=( - "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," - " sequences shorter will be padded if `--pad_to_max_length` is passed." - ), - ) - parser.add_argument( - "--max_entity_length", - type=int, - default=32, - help=( - "The maximum total input entity length after tokenization (Used only for (M)Luke models). Sequences longer" - " than this will be truncated, sequences shorter will be padded if `--pad_to_max_length` is passed." - ), - ) - parser.add_argument( - "--max_mention_length", - type=int, - default=30, - help=( - "The maximum total input mention length after tokenization (Used only for (M)Luke models). Sequences" - " longer than this will be truncated, sequences shorter will be padded if `--pad_to_max_length` is passed." - ), - ) - parser.add_argument( - "--pad_to_max_length", - action="store_true", - help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.", - ) - parser.add_argument( - "--model_name_or_path", - type=str, - help="Path to pretrained model or model identifier from huggingface.co/models.", - required=True, - ) - parser.add_argument( - "--config_name", - type=str, - default=None, - help="Pretrained config name or path if not the same as model_name", - ) - parser.add_argument( - "--tokenizer_name", - type=str, - default=None, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--per_device_train_batch_size", - type=int, - default=8, - help="Batch size (per device) for the training dataloader.", - ) - parser.add_argument( - "--per_device_eval_batch_size", - type=int, - default=8, - help="Batch size (per device) for the evaluation dataloader.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=5e-5, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") - parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") - parser.add_argument( - "--max_train_steps", - type=int, - default=None, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.", - ) - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument( - "--lr_scheduler_type", - type=SchedulerType, - default="linear", - help="The scheduler type to use.", - choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], - ) - parser.add_argument( - "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." - ) - parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") - parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") - parser.add_argument( - "--label_all_tokens", - action="store_true", - help="Setting labels of all special tokens to -100 and thus PyTorch will ignore them.", - ) - parser.add_argument( - "--return_entity_level_metrics", - action="store_true", - help="Indication whether entity level metrics are to be returner.", - ) - parser.add_argument( - "--task_name", - type=str, - default="ner", - choices=["ner", "pos", "chunk"], - help="The name of the task.", - ) - parser.add_argument( - "--debug", - action="store_true", - help="Activate debug mode and run training only with a subset of data.", - ) - parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") - parser.add_argument( - "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." - ) - parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") - args = parser.parse_args() - - # Sanity checks - if args.task_name is None and args.train_file is None and args.validation_file is None: - raise ValueError("Need either a task name or a training/validation file.") - else: - if args.train_file is not None: - extension = args.train_file.split(".")[-1] - assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." - if args.validation_file is not None: - extension = args.validation_file.split(".")[-1] - assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." - - if args.push_to_hub: - assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." - - return args - - -def main(): - args = parse_args() - - # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. - handler = DistributedDataParallelKwargs(find_unused_parameters=True) - accelerator = Accelerator(kwargs_handlers=[handler]) - # Make one log on every process with the configuration for debugging. - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO, - ) - logger.info(accelerator.state) - - # Setup logging, we only want one process per machine to log things on the screen. - # accelerator.is_local_main_process is only True for one process per machine. - logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) - if accelerator.is_local_main_process: - datasets.utils.logging.set_verbosity_warning() - transformers.utils.logging.set_verbosity_info() - else: - datasets.utils.logging.set_verbosity_error() - transformers.utils.logging.set_verbosity_error() - - # If passed along, set the training seed now. - if args.seed is not None: - set_seed(args.seed) - - # Handle the repository creation - if accelerator.is_main_process: - if args.push_to_hub: - # Retrieve of infer repo_name - repo_name = args.hub_model_id - if repo_name is None: - repo_name = Path(args.output_dir).absolute().name - # Create repo and retrieve repo_id - repo_id = create_repo(repo_name, exist_ok=True, token=args.hub_token).repo_id - # Clone repo locally - repo = Repository(args.output_dir, clone_from=repo_id, token=args.hub_token) - elif args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - accelerator.wait_for_everyone() - - # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) - # or just provide the name of one of the public datasets for token classification task available on the hub at https://huggingface.co/datasets/ - # (the dataset will be downloaded automatically from the datasets Hub). - # - # For CSV/JSON files, this script will use the column called 'tokens' or the first column if no column called - # 'tokens' is found. You can easily tweak this behavior (see below). - # - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name) - else: - data_files = {} - if args.train_file is not None: - data_files["train"] = args.train_file - extension = args.train_file.split(".")[-1] - if args.validation_file is not None: - data_files["validation"] = args.validation_file - extension = args.validation_file.split(".")[-1] - raw_datasets = load_dataset(extension, data_files=data_files) - # Trim a number of training examples - if args.debug: - for split in raw_datasets.keys(): - raw_datasets[split] = raw_datasets[split].select(range(100)) - # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets. - - if raw_datasets["train"] is not None: - column_names = raw_datasets["train"].column_names - features = raw_datasets["train"].features - else: - column_names = raw_datasets["validation"].column_names - features = raw_datasets["validation"].features - - if args.text_column_name is not None: - text_column_name = args.text_column_name - elif "tokens" in column_names: - text_column_name = "tokens" - else: - text_column_name = column_names[0] - - if args.label_column_name is not None: - label_column_name = args.label_column_name - elif f"{args.task_name}_tags" in column_names: - label_column_name = f"{args.task_name}_tags" - else: - label_column_name = column_names[1] - - # In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the - # unique labels. - def get_label_list(labels): - unique_labels = set() - for label in labels: - unique_labels = unique_labels | set(label) - label_list = list(unique_labels) - label_list.sort() - return label_list - - if isinstance(features[label_column_name].feature, ClassLabel): - label_list = features[label_column_name].feature.names - # No need to convert the labels since they are already ints. - else: - label_list = get_label_list(raw_datasets["train"][label_column_name]) - num_labels = len(label_list) - - # Map that sends B-Xxx label to its I-Xxx counterpart - b_to_i_label = [] - - for idx, label in enumerate(label_list): - if label.startswith("B-") and label.replace("B-", "I-") in label_list: - b_to_i_label.append(label_list.index(label.replace("B-", "I-"))) - else: - b_to_i_label.append(idx) - - # Load pretrained model and tokenizer - # - # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - if args.config_name: - config = LukeConfig.from_pretrained(args.config_name, num_labels=num_labels) - elif args.model_name_or_path: - config = LukeConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels) - else: - logger.warning("You are instantiating a new config instance from scratch.") - - tokenizer_name_or_path = args.tokenizer_name if args.tokenizer_name else args.model_name_or_path - if not tokenizer_name_or_path: - raise ValueError( - "You are instantiating a new tokenizer from scratch. This is not supported by this script. " - "You can do it from another script, save it, and load it from here, using --tokenizer_name." - ) - - tokenizer = LukeTokenizer.from_pretrained( - tokenizer_name_or_path, - use_fast=False, - task="entity_span_classification", - max_entity_length=args.max_entity_length, - max_mention_length=args.max_mention_length, - ) - - if args.model_name_or_path: - model = LukeForEntitySpanClassification.from_pretrained( - args.model_name_or_path, - from_tf=bool(".ckpt" in args.model_name_or_path), - config=config, - ) - else: - logger.info("Training new model from scratch") - model = LukeForEntitySpanClassification.from_config(config) - - model.resize_token_embeddings(len(tokenizer)) - - # Preprocessing the datasets. - # First we tokenize all the texts. - padding = "max_length" if args.pad_to_max_length else False - - def compute_sentence_boundaries_for_luke(examples): - sentence_boundaries = [] - - for tokens in examples[text_column_name]: - sentence_boundaries.append([0, len(tokens)]) - - examples["sentence_boundaries"] = sentence_boundaries - - return examples - - def compute_entity_spans_for_luke(examples): - all_entity_spans = [] - texts = [] - all_labels_entity_spans = [] - all_original_entity_spans = [] - - for labels, tokens, sentence_boundaries in zip( - examples[label_column_name], examples[text_column_name], examples["sentence_boundaries"] - ): - subword_lengths = [len(tokenizer.tokenize(token)) for token in tokens] - total_subword_length = sum(subword_lengths) - _, context_end = sentence_boundaries - - if total_subword_length > args.max_length - 2: - cur_length = sum(subword_lengths[:context_end]) - idx = context_end - 1 - - while cur_length > args.max_length - 2: - cur_length -= subword_lengths[idx] - context_end -= 1 - idx -= 1 - - text = "" - sentence_words = tokens[:context_end] - sentence_subword_lengths = subword_lengths[:context_end] - word_start_char_positions = [] - word_end_char_positions = [] - labels_positions = {} - - for word, label in zip(sentence_words, labels): - if word[0] == "'" or (len(word) == 1 and is_punctuation(word)): - text = text.rstrip() - - word_start_char_positions.append(len(text)) - text += word - word_end_char_positions.append(len(text)) - text += " " - labels_positions[(word_start_char_positions[-1], word_end_char_positions[-1])] = label - - text = text.rstrip() - texts.append(text) - entity_spans = [] - labels_entity_spans = [] - original_entity_spans = [] - - for word_start in range(len(sentence_words)): - for word_end in range(word_start, len(sentence_words)): - if ( - sum(sentence_subword_lengths[word_start:word_end]) <= tokenizer.max_mention_length - and len(entity_spans) < tokenizer.max_entity_length - ): - entity_spans.append((word_start_char_positions[word_start], word_end_char_positions[word_end])) - original_entity_spans.append((word_start, word_end + 1)) - if ( - word_start_char_positions[word_start], - word_end_char_positions[word_end], - ) in labels_positions: - labels_entity_spans.append( - labels_positions[ - (word_start_char_positions[word_start], word_end_char_positions[word_end]) - ] - ) - else: - labels_entity_spans.append(0) - - all_entity_spans.append(entity_spans) - all_labels_entity_spans.append(labels_entity_spans) - all_original_entity_spans.append(original_entity_spans) - - examples["entity_spans"] = all_entity_spans - examples["text"] = texts - examples["labels_entity_spans"] = all_labels_entity_spans - examples["original_entity_spans"] = all_original_entity_spans - - return examples - - def tokenize_and_align_labels(examples): - entity_spans = [] - - for v in examples["entity_spans"]: - entity_spans.append(list(map(tuple, v))) - - tokenized_inputs = tokenizer( - examples["text"], - entity_spans=entity_spans, - max_length=args.max_length, - padding=padding, - truncation=True, - ) - - if padding == "max_length": - tokenized_inputs["labels"] = padding_tensor( - examples["labels_entity_spans"], -100, tokenizer.padding_side, tokenizer.max_entity_length - ) - tokenized_inputs["original_entity_spans"] = padding_tensor( - examples["original_entity_spans"], (-1, -1), tokenizer.padding_side, tokenizer.max_entity_length - ) - tokenized_inputs[label_column_name] = padding_tensor( - examples[label_column_name], -1, tokenizer.padding_side, tokenizer.max_entity_length - ) - else: - tokenized_inputs["labels"] = [ex[: tokenizer.max_entity_length] for ex in examples["labels_entity_spans"]] - tokenized_inputs["original_entity_spans"] = [ - ex[: tokenizer.max_entity_length] for ex in examples["original_entity_spans"] - ] - tokenized_inputs[label_column_name] = [ - ex[: tokenizer.max_entity_length] for ex in examples[label_column_name] - ] - - return tokenized_inputs - - with accelerator.main_process_first(): - raw_datasets = raw_datasets.map( - compute_sentence_boundaries_for_luke, - batched=True, - desc="Adding sentence boundaries", - ) - raw_datasets = raw_datasets.map( - compute_entity_spans_for_luke, - batched=True, - desc="Adding sentence spans", - ) - - processed_raw_datasets = raw_datasets.map( - tokenize_and_align_labels, - batched=True, - remove_columns=raw_datasets["train"].column_names, - desc="Running tokenizer on dataset", - ) - - train_dataset = processed_raw_datasets["train"] - eval_dataset = processed_raw_datasets["validation"] - - # Log a few random samples from the training set: - for index in random.sample(range(len(train_dataset)), 3): - logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") - - # DataLoaders creation: - if args.pad_to_max_length: - # If padding was already done ot max length, we use the default data collator that will just convert everything - # to tensors. - data_collator = default_data_collator - else: - # Otherwise, `DataCollatorForTokenClassification` will apply dynamic padding for us (by padding to the maximum length of - # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple - # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). - # For fp8, we pad to multiple of 16. - if accelerator.mixed_precision == "fp8": - pad_to_multiple_of = 16 - elif accelerator.mixed_precision != "no": - pad_to_multiple_of = 8 - else: - pad_to_multiple_of = None - data_collator = DataCollatorForLukeTokenClassification(tokenizer, pad_to_multiple_of=pad_to_multiple_of) - - train_dataloader = DataLoader( - train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size - ) - eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size) - - # Optimizer - # Split weights in two groups, one with weight decay and the other not. - no_decay = ["bias", "LayerNorm.weight"] - optimizer_grouped_parameters = [ - { - "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], - "weight_decay": args.weight_decay, - }, - { - "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], - "weight_decay": 0.0, - }, - ] - optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) - - # Use the device given by the `accelerator` object. - device = accelerator.device - model.to(device) - - # Prepare everything with our `accelerator`. - model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( - model, optimizer, train_dataloader, eval_dataloader - ) - - # Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be - # shorter in multiprocess) - - # Scheduler and math around the number of training steps. - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if args.max_train_steps is None: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - else: - args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) - - lr_scheduler = get_scheduler( - name=args.lr_scheduler_type, - optimizer=optimizer, - num_warmup_steps=args.num_warmup_steps, - num_training_steps=args.max_train_steps, - ) - - # Metrics - metric = load_metric("seqeval") - - def get_luke_labels(outputs, ner_tags, original_entity_spans): - true_predictions = [] - true_labels = [] - - for output, original_spans, tags in zip(outputs.logits, original_entity_spans, ner_tags): - true_tags = [val for val in tags if val != -1] - true_original_spans = [val for val in original_spans if val != (-1, -1)] - max_indices = torch.argmax(output, axis=1) - max_logits = torch.max(output, axis=1).values - predictions = [] - - for logit, index, span in zip(max_logits, max_indices, true_original_spans): - if index != 0: - predictions.append((logit, span, label_list[index])) - - predicted_sequence = [label_list[0]] * len(true_tags) - - for _, span, label in sorted(predictions, key=lambda o: o[0], reverse=True): - if all(o == label_list[0] for o in predicted_sequence[span[0] : span[1]]): - predicted_sequence[span[0]] = label - if span[1] - span[0] > 1: - predicted_sequence[span[0] + 1 : span[1]] = [label] * (span[1] - span[0] - 1) - - true_predictions.append(predicted_sequence) - true_labels.append([label_list[tag_id] for tag_id in true_tags]) - - return true_predictions, true_labels - - def compute_metrics(): - results = metric.compute() - if args.return_entity_level_metrics: - # Unpack nested dictionaries - final_results = {} - for key, value in results.items(): - if isinstance(value, dict): - for n, v in value.items(): - final_results[f"{key}_{n}"] = v - else: - final_results[key] = value - return final_results - else: - return { - "precision": results["overall_precision"], - "recall": results["overall_recall"], - "f1": results["overall_f1"], - "accuracy": results["overall_accuracy"], - } - - # Train! - total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps - - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num Epochs = {args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") - logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") - logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - logger.info(f" Total optimization steps = {args.max_train_steps}") - # Only show the progress bar once on each machine. - progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) - completed_steps = 0 - - for epoch in range(args.num_train_epochs): - model.train() - for step, batch in enumerate(train_dataloader): - _ = batch.pop("original_entity_spans") - outputs = model(**batch) - loss = outputs.loss - loss = loss / args.gradient_accumulation_steps - accelerator.backward(loss) - if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: - optimizer.step() - lr_scheduler.step() - optimizer.zero_grad() - progress_bar.update(1) - completed_steps += 1 - - if completed_steps >= args.max_train_steps: - break - - model.eval() - for step, batch in enumerate(eval_dataloader): - original_entity_spans = batch.pop("original_entity_spans") - with torch.no_grad(): - outputs = model(**batch) - - preds, refs = get_luke_labels(outputs, batch[label_column_name], original_entity_spans) - - metric.add_batch( - predictions=preds, - references=refs, - ) # predictions and preferences are expected to be a nested list of labels, not label_ids - - eval_metric = compute_metrics() - accelerator.print(f"epoch {epoch}:", eval_metric) - - if args.push_to_hub and epoch < args.num_train_epochs - 1: - accelerator.wait_for_everyone() - unwrapped_model = accelerator.unwrap_model(model) - unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) - if accelerator.is_main_process: - tokenizer.save_pretrained(args.output_dir) - repo.push_to_hub( - commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True - ) - - if args.output_dir is not None: - accelerator.wait_for_everyone() - unwrapped_model = accelerator.unwrap_model(model) - unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save) - if accelerator.is_main_process: - tokenizer.save_pretrained(args.output_dir) - if args.push_to_hub: - repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True) - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/lxmert/README.md b/examples/research_projects/lxmert/README.md deleted file mode 100644 index 2ec1aaebbb0..00000000000 --- a/examples/research_projects/lxmert/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# LXMERT DEMO - -1. make a virtualenv: ``virtualenv venv`` and activate ``source venv/bin/activate`` -2. install reqs: ``pip install -r ./requirements.txt`` -3. usage is as shown in demo.ipynb diff --git a/examples/research_projects/lxmert/demo.ipynb b/examples/research_projects/lxmert/demo.ipynb deleted file mode 100644 index 576a4b7631c..00000000000 --- a/examples/research_projects/lxmert/demo.ipynb +++ /dev/null @@ -1,264 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "# %pip install-r requirements.txt" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "PyTorch version 1.6.0 available.\n" - ] - } - ], - "source": [ - "import io\n", - "\n", - "import numpy as np\n", - "import PIL.Image\n", - "from IPython.display import Image, display\n", - "from modeling_frcnn import GeneralizedRCNN\n", - "from processing_image import Preprocess\n", - "from visualizing_image import SingleImageViz\n", - "\n", - "import utils\n", - "from transformers import LxmertForQuestionAnswering, LxmertTokenizer\n", - "from utils import Config\n", - "\n", - "\n", - "# URL = \"https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/images/input.jpg\",\n", - "URL = \"https://vqa.cloudcv.org/media/test2014/COCO_test2014_000000262567.jpg\"\n", - "OBJ_URL = \"https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/objects_vocab.txt\"\n", - "ATTR_URL = \"https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/attributes_vocab.txt\"\n", - "GQA_URL = \"https://raw.githubusercontent.com/airsplay/lxmert/master/data/gqa/trainval_label2ans.json\"\n", - "VQA_URL = \"https://raw.githubusercontent.com/airsplay/lxmert/master/data/vqa/trainval_label2ans.json\"\n", - "\n", - "\n", - "# for visualizing output\n", - "def showarray(a, fmt=\"jpeg\"):\n", - " a = np.uint8(np.clip(a, 0, 255))\n", - " f = io.BytesIO()\n", - " PIL.Image.fromarray(a).save(f, fmt)\n", - " display(Image(data=f.getvalue()))" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "# load object, attribute, and answer labels\n", - "\n", - "objids = utils.get_data(OBJ_URL)\n", - "attrids = utils.get_data(ATTR_URL)\n", - "gqa_answers = utils.get_data(GQA_URL)\n", - "vqa_answers = utils.get_data(VQA_URL)" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "loading configuration file cache\n", - "loading weights file https://cdn.huggingface.co/unc-nlp/frcnn-vg-finetuned/pytorch_model.bin from cache at /home/eltoto/.cache/torch/transformers/57f6df6abe353be2773f2700159c65615babf39ab5b48114d2b49267672ae10f.77b59256a4cf8343ae0f923246a81489fc8d82f98d082edc2d2037c977c0d9d0\n", - "All model checkpoint weights were used when initializing GeneralizedRCNN.\n", - "\n", - "All the weights of GeneralizedRCNN were initialized from the model checkpoint at unc-nlp/frcnn-vg-finetuned.\n", - "If your task is similar to the task the model of the checkpoint was trained on, you can already use GeneralizedRCNN for predictions without further training.\n" - ] - } - ], - "source": [ - "# load models and model components\n", - "frcnn_cfg = Config.from_pretrained(\"unc-nlp/frcnn-vg-finetuned\")\n", - "\n", - "frcnn = GeneralizedRCNN.from_pretrained(\"unc-nlp/frcnn-vg-finetuned\", config=frcnn_cfg)\n", - "\n", - "image_preprocess = Preprocess(frcnn_cfg)\n", - "\n", - "lxmert_tokenizer = LxmertTokenizer.from_pretrained(\"unc-nlp/lxmert-base-uncased\")\n", - "lxmert_gqa = LxmertForQuestionAnswering.from_pretrained(\"unc-nlp/lxmert-gqa-uncased\")\n", - "lxmert_vqa = LxmertForQuestionAnswering.from_pretrained(\"unc-nlp/lxmert-vqa-uncased\")" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAGPAlgDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDA1q3ik8VajNKu9V8pQvHUoDn9KbHZWxCgwpl84+UcVpz6Ne3/AIvvjbywqrxoxEhPZVHTBrTi8HaoRgXFp/303/xNdrnCPKpLov636r7iDn1srXA/cJnbn7op4srXk+RHjGcbR6/SumTwPqpx/pFn0x99un/fNWI/Auq4P+kWfTA+dv8A4miNam3Zr+vvCx55qOmW0944WJQ4ij2YAAGWbP6CmTaZZxwtttFO+ZfLyQMDZnk4zjOfyrtrr4da1Lq0Zi1CziZ4tpGGYEcnutOPwr19txbWLNt3qrHB9RxweTyKzVak3Ll31X9a+noZxfM3Z7M4w6RaQy4URqxRkYIwIPBBwDyP1rF162gJ8qNcDbGm44z2H4cV6efhVr7bd2sWZK9G2tn8TjJrG8R/CnWbXRrm7a/tZ2Tb8q7gT8wHGRinKUJSSpx3f9ItK2rZxV9Z211HeWwREFrMFQiILsX5sjI5bgZ59Kj0SCGOZEEgNvJliDApLEYBUknK9uR612a/Dnxnf21tOYrXBAkBDoN+R1YZ54P61Inwy8ax7vKgs4wc4Csnyk9SCTkH8at1YKrzdvLz/pDtocbZWkUcUiuIzAFZ5N0I3PnPBbqGyDwPSs+30W1lklhDF5hB5qKFwM4BxnPpn/PFehR/DHxtHbrbiK0MSqVCsY269TknOaU/CvxfBOsltDarIqIolEik8KOOTjqPSo56b5ey3/ry6BY4+LQbSy1OCaLcVS5gWMk9Tvwx/MfrTU0WwuLwTWv2iMLcPHJj72euQR0Fdmfhl43aKOMRWo8tw6sJFzuBBzyfUUifC7xnG+5be0ALmQr5i4Lnq33s5/Stfb0dktN/61FZnHS6HYywafAyGKTY2WBHzAFyeuME46k8cCqF5pun2tutwkUchZthi88OF685XFdrefDnxRp1nF9qn0+zgSX928txGgDcnaGZvqcfWqLeENSlGJtV0CRePlN7AoyO/wArConUhKOi1/4C/rzuO2pjixt/tX9lJCgtmt9+4qN24jOc9fbHSo9KsrVXlmWK1jVcIJTlwrZHBDZ5PqB61vHwrrBi8v8AtzRfvbt32+Dd1zjO7pnnFOXwrqaODHqnh9F43Ri9g2t06gv7VXtYcydvw/rYLGNNaJb37SRW0EYZsyFkBCqAMtznaDntz0ra8N+HbC8068uDEHHnFo9wOSCAcde2G/KsKe3137ZcQxXdgV8xhhWikVyuckE5zj2NaWhXmvabBFBA1lLtle4CqyHzA3BBAP3eD0x1NKVWN9G1v/Wn9XGovqdUfDOkCBYHgiVhctGHCZJOF6nPTNNt/DWlN5az2se3E3CpyCq565BP/wBb3rGtL7Wp0hBv7EML5V+aWLJZ/X5hwNvt160y31nW5r6KGO601mV5Dh54wrBh8wY7uAAD0IqfbO+7/EfKbUPhHT50V47QkOSIyIyRx/eOeP1qC10DSmaR5LJGWNC+3JGeg9fes6bWNUtkXfJpE0UoLwsJ1IQZwQPnB69mz+tQpf6vp/lzvqemyeZHu8hpozvU8YOMY/MGl7V3Vm/xDlNe60DSlMTpZIqyRh9uScHJHr7VNceH9HaDdb2NsVULuKs+4HHOcnHX0rFm1HVriKe5W90tkRFXy0nQeSCRjGTz6dW6n601ta1i4hWK3/s+PewUtDIpMhHblj+QxR7WWvvfmHKdTZ+HNFl1qxENhbNbm8jQlGfOCw4OT9eleof8IP4a/wCgTB+bf414lZaxrM9zE8Eun2YgdZ/3Tod7KwH8THOM9B+VdMPHmugzJJr0KyoBhBDEeSQPm446+h5wO9N4iqn7s2vmw5E+qPR/+EI8Nf8AQIg/Nv8AGj/hCPDX/QIg/Nv8a89uvGPiW1vmtG1y2eQEgeXHGQ4Hdfl5FVf+Fha75vl/8JBaB87dpWHOfT7tH1mv/wA/H97H7Nd0emf8IP4a/wCgRB+bf41h+HPDOjyatrkTWS7IrgKih2G0Zb39q5q98Z67ZR7/APhKtLmAfy38kxHY/ocqPfkZHB5rN0DxHrTTXt1H4l0yB7u4ZY0laPdMy5Jx8pA+91OAa1hiq3JNOo+nV9xOmr7o9Z/4RPQ/+fEf9/H/AMaX/hE9D/58R/38f/GvNoPG2tXFs0kfizTDKqM5gIQPtXJJzs29BnGa7bSvEcV5p9mz+ItKe4khRnXz4927aCeB+NZqtXf/AC8f3spUovqv6+Rp/wDCJ6H/AM+A/wC/j/41Q1zwtosegai62QDLaykHzH4O0+9XP7S/6jOn/wDf1ar31yL2wubQ61YDz4mjyJFJ+YEdPxqufEfzv73/AJBKlFJvmX9fI4a08GaJd28Oy02verG9ufNb5Quzzu/TLMef7tRad4W0a9JDWFvHFctKbcmWYyhVzjbjK4H+11rTj8KyRCIL4vtovKDKilgCgOcgfNxnJ/Onw+GZreEww+NLaOIncUR8Ln1wGrJqr3f4k04xlFO6M3+z7X+y9vlcf2Js+8en2nOPzrMsLZdN8Pareaarw3qtEoliYh1QhycHqAWCA/l3rov+ETO3b/wl9nt2eXjcMbc52/e6Z5x61DL4Yk0+0ubi08X2sUqwuQYWCseOmQ1EYVW0r/n/AJF8i7ox7+MQ6tKsCBHEmSkYxtkPLAAdMNnitG/vZbnR7O7X7R5sNwV864m8xy2AflOB8ox05wTUeheGGl0aCT/hK7WLe5kKFhkMGOGPzdeTz71qTeGp55o5pvGtvJLGco7yZZT7EtxRUoTjNp/1+AKKt8SMXxBI82oxSysXke0t2ZmPJJiTJq/ZaPaXWmRziEmS4hMMQDH/AI+BvOevOQijHT56t3Xhqe+2fa/GlvcbM7fOk37c9cZbjoKZH4VaJY1j8YWiCN/MQKwG1uPmHzcHgc+1R7GX9X/yHyL+ZGdql/Y6RCLT+z1u4W1MQAGVlAG0KzDHOTjI7c9Kh0+8gsNRtgtgJXN3qNqWeRvmWKJSvAx13Efj9Kp6j4YM/iSLS/8AhI4ZRJALhHTkK4Ylm+912q3PWqz6DPNNpc0HiBUkvbhriEnqjYUMy/N95mBHHUqKwd1JoOUu6XeaXd6daXt3BaW6Xc0iSIXuGaFVxny9iMC2DnDnnI+tZWg6441C72Wf+kRWUs9vh8lmC5BAx125I7jFaU+jalZztMviS8inura4kuPMUwu5RSQXUOefc1zd9ozaffWssOspFKLeGVXT5GUlAcgg+/WhJvZisaPiDX5xaaPd3Fk5ubq0MkrM5y37x1Uk45JUKfpiuhvdVeXS7yxijlkuYLK1d7Zvlt4QTH88b92O4ZyB95uTisLVdCvozql5Prrsl46pBM5P+lITkYO75htAz17CnaTpNzcLBpVz4q8tlnMZsZzIfLC9lXoDnIxxjHvRZ2vcfKb91fT6fpVlLb2lvc2tjqiBTBeRuZBhcv8AKSeT26gYz0rU/wCFlN/0BW/8CP8A7CuT0LwvdQnTpI9QaeGC9MrNGh8mErjDuCehAzzjj16U7RbaW7vzZuY2NxE8cZYdHxlSOOOQB+NS03e0hqKOth+Ik1xPHBFoZaSRgij7TjJJwOq0i/EWV5vK/sYBs4+a6CgfiVxVMw263thd2qQqtxfQxRhVHyrG5DH6keWT9TVW0T7dPaSzQWwIvniISIAMm0EA8c4Pc889am0v5h8qNVviNKiozaG6q4ypM5AYZIyPk55BH4Uz/hZLf9AY/wDgR/8AYVm26S3NvoS3SxGyWNkkkEC/6wPJtUtgdfl4yM5z3zWfr0DRpagRSxT/AD72ltFtw4424VSRxzzxnimou9uYOVeR0/8Awn919m+0/wDCPTeRnHm+adufTOzFFv49vLxmW18OzzsoyRFKWI/JKxhHc+ULrd/xL/7LMW7+HzNhG36+Zzj8axdO064n1O3guSY4mxJISuCIgNxbp/dBNCjKz94OVeR2h8bag872y+GrkzoMtEHbco9xsyK09F1yTWbOSc2LQFJTGU37ugB9B61x1nJNqa6pNJDNc+dPG32W1ba4GWwc4PyqOMY7jpiuo0JbtzqjJcRMDfyHKjg8L9f5mhQlJ257DUY36fibJmf/AJ4tTTK//PFqQx3v/PZPy/8ArUwx3n/PVPy/+tVfV5/8/fwX+RfJDuvvf+Q4yt/zyamGVv8AnkaQx3n/AD1T8v8A61MKXf8Az1T8v/rUvq8/+fv4L/IOSHdfe/8AIguXYzQZQ/e/wqYu39w1WuVuPOhzIpO7jj6VKUuf+eifl/8AWrGnQnzz/edu3b0OajCHtamq3XV9vQUu39w0wsf7ppClz/z0X8v/AK1MK3H/AD0X8q2+rz/5+/l/kdPJDuvvf+QpY/3aYTntSFZ/761GRMP4l/Kj6vP/AJ+/l/kHJDuvvf8AkK1RsPelKy/31/Ko2En94Uvq8/8An7+X+Q+SHdfe/wDIfEv75eaglQeY/wAw6mpIdwuUDEHrTJR+8b6mnOjUjFLn/BHLJKFfZP3V37shMa/3xRQV+YfWiuDEVKtJpKX4I6acYzWxl6af+Kru/wDriP8A2WuvgPSuOsDjxXd/9cR/7LXXWx6V7WI+KP8AhX5HIjTi6VbjqnFV2PoKwAhb/kLwf7h/rWkBWc//ACF4P9w/1rSA4rGjvP1/RHNQ+Kf+L9ELWR4pH/FNXn0X/wBCFbNZHir/AJFq8+i/+hiu3Dfxoeq/M3exd0kf8Sex/wCveP8A9BFXRVPSf+QPZf8AXvH/AOgirtRV+OXq/wA2NBS0UuKzGGKMUUtAHnvxgGfCNoMdb9P/AECSvNG061VV3Wah2bCgSt+p/CvU/ivaT3nha1jt03uL1GIyBxscd/rXF6v4K12DRbuSa1crHEzbmmjyuB7GuuE6cKSc+77/AKee/lsLW5z32KzJVFsxvLFSDIcAgZ60R2Vk5QmzCq+QD5hzkdf5Gku/BNnYyXobVZpFsbgQzlbUZYtnBQb+funOSMds1D/whXkTyx3l28SrdtawvHFvDsMEseRtXDL6nnpXP/aWGlHRP/yb8Pw+RXIzLtZreB5ikZDpMXjHULuIyM/QUjw2EsrjHyJGscWSwHH075zWnaeASyQx3M7R3s8ssKxLCGUOhx8zbhgE9CAT7VY07wfBJZxyajFLtbTZJo/Jt03RuJmXnDLvOBkEnoQOwqpZhhI1OZRvZ+euvTv+o3CWxkGS3a4MoTINzHN8uT90e/1qpbxW0F0ZHGFcOhIzwGBUn9a1k8BySWy7ZGF3JC88MLQrtKLnAZt3ysQpIABHTJqHU/BclhDbiItcXEsMUzx+UqpGHQMRuLZJBOOmCOc9qn65h5LljH3vn9/p2/UXKyhLb2jQwW4ZHRNx3fMBk9h39OtSKIRZvDJceahTasJ3HYc9eeBjnpW8ng6I20dgdLT7Q+nvdG8835llCs4QLnbtwoXpnJzmsrSPDdwl8BdafYNG4I3XbuI075PlHd7d+tSsZCpFe78P3tf8Fpvv+Actioy26WDQNMzxnBjg3NhTnOeeB36etLaNa2scy42OuHjxk/Pgr+H3s/hW+3hOCDxJdW4srBrFtpilvpJQqggE7fLO7BzwWHTGar2/hAw+OIYUs1fT11FVCz7CWi8wcMO/FKpj4zjLljo/e6LS3+fS4KBnyPbM8nl5QNCyADPVm3H+eKrt9neB04JeBIxnP8JBx+ldNYeBo4tdtLiMx3dqLsxTxSQqqg4JGBk7lODjODx0FZ1p8Pbq70qW5VZ1mjjeTabceVheSPMDcHAz93HbNaf2jhV9nTS979X+H/DBySM92tJLm5Y+WUuDuYtuAxnocc9SOnpUEqxNfrdbFYptYFA20hQPU57d63LfwMtpfWCzuZbkywvLB5KmMKxBKli3JweRtx71Brfgeazu5MIvnPK7fZ0VQIkz8uTnqR2A4GOe1OOOw82ocvR2311/qz2/AOV7mZLHZeRLHG6us0yuSQw24zjP/fR6UWElnaqYt6IEkLOGQsxDAfcOPlOOO1dHa+C7U21pYy6aGnu7WSZrvzSGicb9ihQdpX5RnIJ+Y4IxXO6Nocn2+4WXSI75lPlrFI5A3Z4PysCehGAe9ZxxSqUXGMdV6d3rfz212t5hy2Yy1NvDbyqZWWJ1IaFWbDkjj29Oa7TwBpdhda/YieHzIihG0sRz5ZJ6fh+VUZvBtnZz6jdDTEuEhWBUtHmPlrK65cblYMQpDAc+nJ79J4K8EPD4wmeGGVNPjWOTPmKTGJImYL6nBOPwprMee6px1aXZaqzfnfVfeDhbc9G/4RrQf+fFf+/j/wCNIfDuhINy2QDDkHzH6/nUl5olvaRLKkkpO8Dk9jUdof3Df7xrKOLr+0UJ6X877EziuRtAvh/RJhvlslZz1PmN/jU0fhXQX6WK/wDfx/8AGnxn5BV61NdDrVLv3n95nRS9nH0RS/4RHQv+fAf9/H/xqrqnhTRE0m8dbEBlgcg+Y/XafeukHSqmrf8AIGvv+veT/wBBNVTrVOePvPddfNGlkc14Z8LaLP4dtJJLIM7BsnzG/vH3rW/4RHQv+fAf9/H/AMaXwn/yLFl9G/8AQzWzWmJrVPbT957vr5gkrGJ/wiOhf8+A/wC/j/40f8IjoX/PgP8Av4/+NbdFYe2qfzP7x2Rw2q/Drw/qWrws8U8X7rbtil46k55z61EfhL4a9b3/AL+j/wCJrsZP+QnF/uH+tWzWFOrNud29/wDI0klZehwR+E3hv1vP+/o/+JpD8JvDfre/9/R/8TXdmm1pzy7kWOF/4VN4b9bz/v6P/iaxPE/w50TStNintXuw7TKh3SA8EH29q9UrmvHI/wCJJD/18r/Jq2oScqsU2J7FY/D7Sf8An4vf++0/+Jo/4V9pP/Pxe/8Afaf/ABNdbiiuco5L/hX2k/8APxe/99p/8TSf8K+0n/n4vf8AvtP/AImutpKAOS/4V9pP/Pxe/wDfaf8AxNJ/wr/Sv+fi9/77X/4mutpDQByX/Cv9K/5+L3/vtf8A4mk/4V/pX/Pxe/8Afa//ABNdZRQBx8ngHSgf+Pi8/wC+1/8Aia1tJ0i30W0e2tnkdGcyEyEE5IA7Aelakn3qiNADDTTTjTDQAw0w080w0gKd1/r7f/e/wqY1Dd/663/3v8KmNYU/4k/Vfkc9H+LU9V+RGajNSNUbVsdIwjFMIp5phFAEZqNgKlNRsKQEcfN2n0NMlH7xvqafH/x+J9D/AFpJRl2+proqfBEVT+Mv8K/NlfHzCin4+YUV42O+KJ10NmYFtNHF4ruvMkRP3I+8cf3a6i2vrXvcw/8AfwV5X4ltluPGcxc4jS2Qt/IVDFp9qw3EYUttU88/rXvVYczi/Jfkec6lrr/M9thv7Pj/AEuD/v4Kux6hZf8AP5b/APf0f414emmW3AZMMSRjJ7fjUiaZaHGE4Oe57fjUexf9WJ9sv6ue0NqFn/a0Dfa4MBDz5g9/etIajY/8/tt/39X/ABrwQ6Zam4UiP5dpPU/41ONNs8cxdvU/41nSoWcvN36GNOXLzPu/8j3b+0bH/n9tv+/q/wCNZPii/sm8OXird27EhcASg/xD3ryD+zLTH+p/U/41U1TT7VNOmKxYIx3PqK6KUHTqRk11RsqnNome76VqNiukWQN5bgiBMgyr/dHvV3+0rD/n9tv+/q/414Fb6bafZogYQTsXJ3H0+tSjTbP/AJ4j/vo/41NSC9o7vdv8wjNuN0j3n+0rD/n9tv8Av6v+NH9pWH/P7bf9/V/xrwcabZ5/1I/76P8AjR/Ztn/zxH/fR/xqHTX42Gqj7dLnvP8AaVh/z+23/f1f8aP7SsP+f22/7+r/AI14N/Zlp/zxH/fR/wAaU6daFi3kjOfU4p+xdrk+21t/mep+N76zk0WFY7uBz9pU4WQHs1aHiK9tZ/D2oxRXMMkj27hUSQEscdAB1rw3V9OtvsybI9sjSgDGeSe3XirU+nWqRMyxYI9zTxFN+wt2uOFW8kjT1XWJreK4ku7VlN9MJX2Qnhhk4GT0+Y1VPjdjNJI+nNLuk85VeE4R8YyPm9hwcjiszUNLtJbOIeT8zsq7txypPGevrXIw+Ur5aESeikkDP4Vz/VKPKrr+rL9LFKrNt2O5j8X3VvNBMbGSRopGlQshySxyd3/1qSPxpdRCFRpRkSOFodr7gGVmLHOOep7Y6CubawtvMZmREEUId0Z22qxIGCRz3+varOl28cc9yGjQAhGUIxK4IPIzzWtTB0lq0N1pSlZG1/wm18sOxNIjDqjRpKS25EbOVHbueTk89apXXinUbqRXbT1UrGkYxu6KoUdvQVL5UP8Ac/U0GKL+7+pqI0KMXdLX+vML1H2E/wCEz1gWvkjT4N4jMQmKMXCHqvXHcjpnHGarxeKdRick6TaSLtC7XSXGR34YHJ+uPapzFF/d/WozHH/d/WkqFFXst/67jvU8hD4y1d7iWa4060nL7QqvDIBGFGAF2sOMYHOelVD4r1/+1F1BgrSrMJtphO0kHOMA9KsMkY/h/WomVP7tJUKK2XS3y+8L1PIkfxxr3nwSxW1vF5UplZI4X2yORjLZY/kMD2qOLxrrsNksH2eF5FgktxO0T7wjhgf4tuRuJBxnpnI4qFgoPSomx6VP1bD7W/r7x81TyJG8X60yW5e2ja5gKYuSjh2CYwGAbaeABnGcd6qX/iXXNQhVJy+9HZllVSHCnnZnuoPTOSPXFOYioy1UqNFO6W39dxXqeRJF4s1uHTha+WHlSN4orpkbzY0bO5Rzt7tyQSMnBFVtH8R3+k30lzDYQyu0JhbzPN6k8vlXBD44yCB6DvSsxqtGx8yXnvWsMNRlTqadr/f6i5ql+hbi8R30FxcPHp0X2W4RVks284xcHIIJfeDn/a7kdOKj/tvV7i9urya5nieZgxCEooA6AD0A4FIjZBJNMmOYXPtWcKNNPmjv/XmJymrXSPqSKQz+FLKUnJMUZJ/Cqdqf3Df7xp+jv5vgPT39bdD+tR2v+pb/AHjWE/48Pn+hpL4Jf13LafdFXrQ9qz0PFXrM10S+JkUf4UfRGiOlVNW/5A19/wBe8n/oJq2OlVNW/wCQNff9e8n/AKCaqn8cfVfmiyj4T/5Fiy+jf+hmtqsbwn/yLFl9G/8AQzWzWmJ/jT9X+YlsJRS0VgMpSj/iZxf7h/rVo1Vl/wCQnF/uH+tWjWFLefr+iNJ7L0GmmmnEU2tiBK5nxz/yBIf+vlf5NXT1zHjn/kCQ/wDXyv8AJq3w38aPqJ7HS0UtFYFDaQ9KdTSKAENNp1JQA00lONYHiy+v7DTYX05mWd5wnyoHJG1jjBB9KTdlcDWbqajNeb/8JN4kZUcTSlXVnVvs64ZVzuI+XkDBye2Kc2v+Jo1heeSeKKYgJI9qoVs+hK81PM+wHoZphrhdR1nWrCKV/wC0/M2Xktrj7Ogzs2/N077ulJb6p4hubRJ11KFXlV3hhZF3yqudxX5cdj1Izg4zRzPsB3BphrgLLxBrl/M6LqCRrGhkkkkjUKijqThSfToD1qz/AGhrwnkV9Vt0gSJZjcsg8so3CkYTdyTjGM9fSjmfYZ1N1/roP97/AAqZq8+v9c1y31JLOScvOHAQRxo27OCpXA5yCCPrSjX/ABAwQhpyJGZUIt1+Yr1A+XkjvWMLqcnbe35GNONqk33a/I701Gxrjr/U9fsoY5xNPJbNDFIZxbKEUuobbnGMjOKUXfiOazuru3N08NsyK4a0Af5lLZwARgAcnPRlPeteZ9jY6w0w1w8uua9FbR3Mv2hLeT7krW6hW+hxg1YsNS1jUITL/aMFvH5giRp1ADueijCn8zgDjJo5n2A60mmk1xUeua3LqAsFlb7SZPK2GNBhs4OTjilutbv4Z44odUhvHc4H2eLOD6fMgz+GRS5n2GdhHj7WmPQ0kgPmN9TWbpMt5b+JzY6pMk5FqJR5WMAnGOQBngkelb7yWW5swydfX/69a1aj5Irlf9fMmetVf4V+bM0jkUVoK1i8iqIXyxA6/wD16K8fHVPeV00dVF2TPLtYtLy58cSraxCUNbqrKWAB79yParMOgawDgWKYzkDzF4P/AH1WhF/yPr/9cf8A2UV1kZwwr6HEScHCz+yjgjGMm7rqchF4a1xtp+xA4Of9an/xVWofCmudPsIwAf8Alqnp/vV3Ns3StOE8VjGrJO9xypxatY8xPhTXft0afYRkofl81Pf/AGquDwdr5/5h4/7/ACf/ABVegk/8TaD/AHD/AFrWWs6Vabctev6IwpRi3O62f+R4BFqDzuI4rC6kfBO1ApOAMno3oKo6nqQfT5R9lnGccnZ6j/aru/Ddrb20dhstDLJcWk87XG5sqQJFwBnGBtAORnJqrrOn6XD4ckWWWESvZLOrgTGQyEBguAuzbn5fbrntXSpLnS8zp5YrZHL2N+8sMEUVhdSSFAAECknj03U/+1R/z53H/jn/AMVXb6VaWYutOnsIoktVcRFyZBKpMZIEgbjPB+7xxWPq9tFZ3KW8MZ2KgInJz54P8Y7AegH481Mql5Xf9fgJQglaxgf2sP8Anzuf/HP/AIqj+1R/z53P/jn/AMVXcTtIYbm0Of7Pj02KWNf4Q5VDuH+0XJBPuRVPw3b5u/tavAZopEWJJJUQ5J5bDEZwAencipc0/wDhylGK2Ryn9rD/AJ87j/xz/wCKo/tYf8+dz/45/wDFV2UNzNp3iW5RvtTRyXJBS1nChzuyAcAhhg/d96oxW0TeJTaXKRiNrloWCEhVJJUEewPP4VTqt73JVOC2Rxmr6rm0TFpOGEgILbeDz0w1W5tTDxEC0uBn/c/+KrqvEei2Nv4fknki2yW6JDJlz/x8Eocnnsrvx0+Srtxo9jcXM9l5BtUgvobbz95JkVn2knJxnA3DGBilVqc1K3r/AFsNQgndI4K61Jm02REtZxIE+Vjs4Pr96uXt3urMQXX2A7SGVJHB2uR1IOeoyOnTivYrfTLLUIYy+ntbASyxG3V2zKFjLBeSTuyApx/eHArlk02x1PT9EE9nHYwJHqE4gLSmOQpt6EbpNvGTjJ4bGOxGpZLy/rsDhDXTc4lryRSGW0UI6bXjzlWHB/vZ6gd6sWOpGOSV54mXcFVVTGABkY610U1n4fW2vb6K3t7xbfTFn8mF7hIUmNykfyl9rlSrcgnuQD0I57X7S0tNZgFvH5FtcW1vceWGLeX5kaswBPJAJOM84qpVL6MuUYqV7F3+2IP7sv5D/Gk/tiD+5J+Q/wAa2fE8txNbeJ7e7z9ksNQjj05SPlhXc4VY/RTGM4HXANZfhK+v7FWu5L2W30O2lEl1Gp+W5bj9zt6OWAxg5AGSajmj2DQhOrwf3JPyH+NXbcG6t1mTAVs4DdeuK1/C0oXTdJtxLLCb+4uTHaQx5guhgKEuGzwAR6NgHOF61yunP/xL4h9f5mmpQ6r8Rq3Y1GtZD/En51G1pL/eT86rM+aiZqvmp/y/j/wB+72LTWUp/iT86iawm/vJ+dVmaoi1Lmp/y/j/AMAV49i02nzH+KP86jOmzf3o/wA6qk0w0c1P+X8f+AK8exaOmT/3o/zP+FVYNPmeaYBo/lbB5+tRmoI/vyfWt6Uqfs6nu9F18/QTcbrQ0DpU/Z4/++j/AIU06bNtMZePJHrVPGeKe67I2ArKMqf8v4/8Aio42WnVH0R4e1WGPwJYWzrIZEgCkqMjIP1p1vqESLsKvkn0p/gV/N+GNgfSFh+tTWn+pP1/pXLOVP28Pd6Pr6eRcnHklp/Wo06nCjFSkhI9v/r1btdZtl6pL/3yP8aF6VfsetdDlTv8P4/8Aii4+yjp0QDXrXH+rm/75H+NVdU1y1bSL1RHNkwOPuj+6fet4dKq6sP+JNff9e8n/oJq6cqfPH3eq6+a8i2422Of8L63bR+HLRGSYkBuij+8fetj+3rX/nnP/wB8j/GovCf/ACLFl9G/9DNbVXiZU/bT93q+vn6Ci422Mn+3rX/nnP8A98j/ABo/t61/55z/APfI/wAa1aSsOal/L+P/AACrx7GE+sW5vo5Qku0Lj7oz396n/t21/wCec3/fI/xq1L/yE4v9w/1qyawoyp3n7vXv5LyLm42WnQyjrlr/AM85v++R/jSf25a/885v++R/jWoaK35qX8v4/wDAIvHsZf8Ablr/AM85v++R/jXN+NNXt5tGhVUlBFwp5Ueje9dvXMeOf+QJD/18r/Jq2w8qftY+7+P/AABScbbGn/blt/zzm/75H+NH9uWv/POb/vkf41p0lYc1L+X8f+AO8exmf23bf885v++R/jSHW7b+5N/3yP8AGtQ0ho5qX8v4/wDAHePYy/7btv8AnnN/3yP8aT+2rb/nnN/3yP8AGtM0lPmpfy/j/wAALx7GZ/bVt/cm/wC+R/jXP+K9cijs7GeJJN8F7HLyBzgMfWuzxUM9pb3cfl3NvFMgO4LKgYA+uDWVadNQbUfx/wCAF49jy7UPEWkCwvra0ldhAohsf3ZG5H2+Yeen3Dwcffpk+saHDpc8NtMhMjQOgCzGQ7T82/d8uRk/dH416C2i6V/0DLP/AL8L/hUZ0bS/+gbZ/wDfhf8ACnePYWh5lrWtWN1bzrBIzltTuJwNhGY2C7W59cHjrVuw1GxEOmXklxsmsIXj8goxMpLOykEDGMvg5I6d67XUdH0xbGUjTrQHjpAvqPanQaPphtoidOtPuD/lgvp9Kcork5l3G1pc890ae2srl5JLiFXltmWN3jZ0icnGHXHzDaD0DD5h7ir13qNhfNd2rXkaedBCDc+UwiMkfYKBlVweML1HQCu1Oj6Z/wBA2z/78L/hTDpGmf8AQOtP+/C/4VmSeZa1fJPrMU9ozbIVijjcjBby0VQ2Pfbmulvdf0pkvVt5DhIme0Gwj95LvEg6cYEvU/8APMe1btzpOmiaDGn2nLf88V9vapTpGm/9A60/78r/AIVlCV5yXp+RlTlec12t+RxU2r20jzKbgmM6RHaqCGx5gVMr0/vA89KL6/sb6DVIVu0jMrWssbOj4fy4mVl4UkHLcZwOOtdkdJ03/oHWn/flf8KYdJ03tp9r/wB+V/wrS5scnrOs2d3Z3j2v2JDdrGGj2z+cu0ggckxjGMZHbsM1n6fNYz6VHZXd2LUwXf2gMUZt6lQGA2g/MNoxnA56iu5Ok6cOmn2n/flf8KadK04f8w+1/wC/K/4UXCxxdrq8EXir+25CoSW7ldodpZkV884xtP3jxnt9Ksy63awSWT3Nw+q3UHnE3UbNGyhgoQBnXJK4YjI4LDHSuoOlad/z4Wv/AH5X/Cm/2Vp3/Pha/wDflf8ACi4WMjSrzT7zxPbNYLMFj01I38x92CoUY+6Onc9633++31NQ2tlaW9+rQW0MTFSMpGFOPwqeT75+tbVF7kSZq1Zf4V+bEh4uIv8AfH86KWH/AI+Yv98fzorwcz+OPp+p1UtmcdF/yPj/APXH/wBlFdWtcpD/AMj4/wD1x/8AZRXVrX0WL3h/hRx0936mhav0rVhPSsSBsNWtbtwK5kWybP8AxNof9w/1rXQ1jA/8TWH/AHD/AFrYjPFZUd5+v6I5aHxT/wAX6I4TRPBd5/Zzxxa/PFF5hzGkZCk4HON9R694Mu7LwzdomvT/AGcAZhEZCnLDtvxXZ6D/AMeT/wDXU/yFR+K/+RZvPov/AKGK9JRX1lR6XX6HTU0bOdtPBl/eafYzzeI7l2EKshdGYplR0JenN8O5HjSNtaYomdimAkLnrgb+K67Sf+QNY/8AXvH/AOgirtc9RWm15v8ANiucQfAFy1sLU69KbdTkRGE7QfXG/FNb4dyvKJW1t2kGMOYCSMdOd/bAruaWoA4uLwNfQeZ5PiK4j805k2RsNx98PzVcfDYhgw1cgg5yLfp/4/Xe0HPagDyrxf4LuLfSA8mtSzCa6VnVoz8zYb5j83J68+9a+o+A7l7ALLr00scQGxHiJC89hv4rX8d/8gOD/r5T/wBBat7UP+PKT8P5iniNMNzLf3gW5xq+Bbu6EEs3iCeSRFGxnjLFfoS/FeQ+LrDVLDxbcwz61dzS28u+GV3bcpIHK5bg8Dp6CvpK34giP+yP5V4n8XLT7P4tjnAwLiEHPqRx/SohrFBfU4vU01G5jjlutYurh7iIJK0rsxdQQwUktyMgHB7jNZ0tjLcMrTXjyMqKilwThQMADJ6AAACte/b/AESz/wCuf9BVEPxW1RJSsvL8jeskp2Xl+SEmXULmG3in1a6litv9QjuzCL/dBPy/hVi2vdcshKLXxDqMAlkMsgindd7nqxw3JOBz1qHfS76gzGwtqVtbzW8GsXcUE5JmjSRlWQnruAbB/Glt4/IgWLdu255xjvRu4ppagZKWpjNUZamlqBDmNRk0E0wmkICaYaU0lAhpqvH/AKyT61YNQR/fk+tdFL+FU9F+Ynuh4OCDTj86N25pv0pwOIz9ayhuyKmy9UfQvw0fzPhnbD+75i/yq7af6n8azPhQ/mfDlR/dlkH6CtO0/wBRj3/pXLP+PD5/oXL4Jf13LI/Sr9j96s9TV6xPz10S+JkUf4UfRGuvSqurf8ga+/695P8A0E1aXpVXVv8AkDX3/XvJ/wCgmqp/HH1X5oroUvCf/IsWX0b/ANDNbVYvhP8A5Fiy+jf+hmtqtMT/ABp+r/MFsJQaKSsBlOT/AJCcX+4f61aqrL/yE4v9w/1qzWFHefr+iNJ7L0ENJS0lbECVzPjn/kCQf9fK/wAmrp65jxz/AMgSH/r5X+TVvhv40fUT2OlopaSsBiGkzSmkNACUlKaSgBKDR3pD978Kyr/w5DW5UaozUjVG1aAUdS/48Jfw/mKdB/x7Rf7g/lTdS/48Jfw/mKWD/j2i/wBwfyrV/wAFev6F/ZHGmGnmmViSVLr/AF0H+9/hUpqG6/10H+9/hUxrCn/En6r8jno/xanqvyGHvUbU8jmmGtjpGn60w08mmGgBhxTTinHGaacUANT/AI/E+lK/32+tJH/x+J9D/Wlfh2+tdFT4Iiqfxl/hX5sIf+PiL/eH86KIOLiP/fH86K8DM/jj6fqdFLZnhdje+IJZbjU5bp0kW18xGEi7jyAMrnIBBOOmanOueMERWN1J8xAwHBYE9MgHIz71Auq2T288v2s5lsliEJZdqsNoOOc/w+g/Grt3r1pMu+O6A8yRGKgRALhgeo+bt3r15SqN6r+vvPKjKrfb+v66j7fXPFG+dZ9SYGOB5F8uVW5XscE/lULeLfGNvII31CVXwDt3DIz688H2qX+3rCC7ScTrJIsUgLMUGScYGFOOx96yL29tWvvOhuFKthgGcfKfTr0+tEHK/vL+vvLpuo5e8tDo7zxB4stQ0ya3JJLAwjlXGNpOeh3HIzkcgVLpnirxjfkqNZulfOBtQMoHqx3ggfgaxrvUtOK3TQ3ILXkiswLLhBkk459T3xUdjd6bbzGaS6JaJsqqlfnHbndx+tTHmUHpr/X9ehEVP2b7+nkv1/A29J8SeMJreZYdYuUZGbO1AYwQO7buOnoaoXXjPxne2U8UmpSyRBdzqW7Aj3qtp2oaavl3ElwqNG7MYgV2nJyBnOcduhqKyv7KK6HnzR+RIGjkAcfdYY/rW3PJTckv6+80fPeT3NiPxb48tokhGoTKI1RQoYcAj5R1qRvGHj9JEQ6nKWckLtkDDI6jIPaqr+IbN1t5fOj8wTh5PnHKgkr/AOhGmpq+n25jjF2JFLuWZnXIDKV4wT6//WFZ89TrFX/rz/4chSrW1j/X3/8ADlz/AITHx95oj/tSQll3AiQFceu7djH41Pc+LfHEbwJFq0xZoBJIWkG1TkjrnGOn51mLq1girb/awV8p0Mpdd2SQR3x29akGtWCShBdJt8hU3koxyGJ6E471LlUvpH+vv/ATlVvov6/roWl8X/EBi4GpyjyyFYtIoAJ6clu+Kjj8beO5Ltbb+1pllL7MM3Q5xzzVC61i2eK4CXSbmkjw25ASFVh/Ccdx0qOTVbRdbN2k8RQTB8FxyM5qlKdndL+vmXF1GnddPxLviDxR4qexiL61Lcwebj5htIcD03Hsf/rCr0HjHxddW53a9MZdrN5TKCvAJwTuz0HpXO6ldae1nFaQ3QKNceYzFkJAxgfxY/Mj8KtW99p0ensv2wRyuGEjDYxI7KPmyPyond0+Vrv/AEw9/l63v/VzaXxX4ya1Drr0om8nzhEAMBeuM7s5xz0x71L4zs/EU+gaLq+qTPKs8KFXL5+8ob+8cda5+31m2t9NK/a1eVozEFOwbQevzZyRz045r1LxXLY3vwd0ry7u3aaC0t2CLKpP+rXtmrw8+W/NFP1v/mhwjNt8x5Fdx3q29sXkbaU+X5u3HvVXZdf3z+f/ANetXUpF+x2HzDBj9fYVnCZe7L+dddStBS+BdO/b1OutStPr0/JEe26/vn8//r0uy6/vn8//AK9S+an98fnR5yd2X86j28P+fcfx/wAzL2b8yHbdf3z+f/16Cl1/fP5//XqbzU/vr+dJ5y/3h+dHtof8+4/j/mP2b8yHZc/3z+f/ANemE3COmSTk9zVrzE/vL+dQzOhki+YYz61Mq0LaQX4/5kuDXcdvn/uL+dJum/uL+dPLqP41/Ok8xP7y/nWn1mP/AD7j+P8AmVyPzGbpv7i0hab+4Kk8xP7y/nSb0/vD86PrMf8An3H8f8w5H5keZf7oqKMyb5MKOvNWN6f3l/OoI3UPJyOvrXRSxEfZz/dx2Xfv6kuDuhxeRBkqMVKeIz9aimdTGPmHX1p5dfKY7h19azqOEqUZqKTfNt5W8yJp7eaPe/g62/wFMv8AduHH/jorYtP9R+Ncv8G9StIPCl7DPdwRkXBIDyBTyvvXQW19ZqmxrqAMW4BkGTXlzf7+Hz/Q1knyS0/rU0B7VdsM+YayDqNkjYa8t1I7GVQf51bsdU08SHN9bD6zL/jW8n7zIoxfso6dEdKvSqurf8ga+/695P8A0E01NX03H/IRtP8Av8v+NVtV1bTm0e9Av7Uk28gAEy8/Kferpv34+q/NFcrsN8J/8ixZfRv/AENq2q5rwrqmnp4as1e/tVYBsgzKD94+9bH9r6Z/0EbT/v8AL/jV4l/vp+r/ADBRdti5SGqn9r6Z/wBBG0/7/L/jSf2vpv8A0EbT/v8AL/jWNx8r7BJ/yFIv9w/1q1WXJqdgdQjkF7bbAuC3mrgdfepzq+m/9BC0/wC/y/41hR3n6/ojSadlp0LZpKqHVtN/6CFp/wB/l/xpP7X03/oIWn/f5f8AGtiOV9i3XM+Of+QJD/18r/Jq2v7X03/oIWn/AH+X/GsLxpNHPoFvJFIkiNcrhkOQeG71vhv40fUmSaR1NNpeKaTWIC5ptBNQT3EdvGXc4ApATGkNVIL+KdcowNTeaP7woGS5ppP7z8KaJFPQg03fmY/Ssq/8NjW5AxqM04mmE1oBT1L/AI8Jfw/mKWD/AI9Yv9wfypupf8eMv4fzFLB/x7Rf7g/lWr/hL1/Qv7Ip60005qjJrEkq3P8AroP97/CpjUNz/roP97/CpSawp/xJ/L8jno/xanqvyGseKYaeaYeK2OkYaaTz0p5NRmgBD9KaRSkU2gBqf8fkefT/ABok++31oT/j7j+h/rRJ/rGHvW9T4Iiqfxl/hX5sWD/j4j/3x/OilhwLiP8A3h/OivBzP44+n6nRS2Z8+6bpNpvCzDfJ5O5l5AGSMc5681q/2FYDrb/XLN/jTNCs9QvZYY0tkZ3hI37gCQDj1x2rox4f1Y/8uYz3PmL/AI17VeM4yV+yKjPDuNkvz/q5hLoen5I+z547s3+NKmiabuwbcH/gbf410I8Pav2tAP8Atov+NOXw5q5b/j0/8iJ/jWcb31FVlScbQ3MNNC00yqv2bg8n52/xqZvD+lg/8e2OM8O3+Nb8fhnWTcIos+SP+eqe/vWing7XX6WR6YOJo+f1qI3aaXcUK1F1XLovL+umn9XOOtPD2mOFLW2csRje3p9adqPh3S47GRo7TDAdfMbj9a6zTvB2uzwl0szgOeksfXHuak1fwnrtrpU872ICptyTKh/iH+1XWoS9rZLW/wDkTOULxu9Ounn+OhyMGgaT9nTfZ/MEyx8xuwHvUp8PaSuQ1jg8YAlc5zx6111l4J12WyhlFgSJIV/5bR8jH+9U/wDwguv4ObFiTxkzx8f+PVjKM02mac+HvotPn/X+fQ4r/hHtJHymxG/OMea+P500aBpIB3Wag7sDMrAD8c13H/CC6/j/AI8WznO7z48/zoHgTXwOLFs5znz485/OptManQSs/wBf6t+JxI8P6Q23ZYgkg/8ALZu3404+HtI8gyCz/hyAZH/xrth4H8QBgxsCSARzNH/8VSjwNr4j8v8As/K4xzNH/wDFU0pdTOU6N1ZdV93X9DzfVtA063t12W/JYc729/ertx4e0pB+7tcbTyfMb/Gui8ReD9bstPjlmsjgzKuWmQ9jxwa07vwProgaRtNAIwc+bH/8VTqpqjf1HGpS9peWunbr3scUPD2lM0X+icHg/vG5OM+tes3Pg7SJ/h6BFZhZf7PRlbex52D3rnU8Ca60cb/2aOgbPmx88f71eo2Fo8ehWlnOu2RbVInXOcEKARRTTtqZ1qkWo+z001Pl7UYV+xWCleRHj9BWb5Mf939a6PxJbfZrpbfGPLZ1/IisPYc1vVS5/u/JFVpy59+i/JEHlR/3f1p3lR/3f1qXZS+WaysjLnl3IPJj/u/rSiGL0x+NTbDUdw3kQNJtzt7dO9FkCnJ9Q8mLHC/rUEsUYePC9T61B/aR/wCeB/76/wDrUn2uSaQbLfOwFyN3Ydal2sNqo9F+Zd8mM/w/rSmCPqF/WqsV9JLIEjtiWPQbv/rU6W+kgfbJbYJGQRICCPUEcGneOxXLVtzdCbyY/wC7+tVry1MwhhhTMkkgVRnuc1N505haZYIyiruOJ1JA+nXvUE11c24hulgQiKRX4kDfgQORmtKMoe0jfuOVOty63FbR4zZw+TJFLK87L5iOdoAUE5zjGOTnFV10dmfKzwtDsL+cC20AHB4xnPI7d6sx6qbe1imtLNokinOd0pLEsuDyAMcDrTH1J28q8cXzIGaNd14TIrcEkHbwOfSu9VqXK9e39bGPs59vMamlLLYsYijus+0zbiECbcknPQfhmoLKzjuJ2jdz5SKzuy9Sqgk4z64qd9fkYSr9jHlTSBpELcOoXGDx14zn15qva3D24kvEg3Qq3lNGzclXVhjOPQHn6Up16doWemtyeSb2Op8P6Fp97ZNOPPCN0VnBKnJBGQBnpnp3q6uiWUv70iUMmOd3HX6VleH9cmihkhtNKmkhTAADkkck5Y7ff26VoSaxeRKI/wCxpyRzlJNwJ/Ba53VXtb8y2f36baadynTm4N20JpNBsJt0rCYMSB9/j+VEPhqxc8+b1x9+oW1bUPs6v/Y8oUdvM5/LGafHrV/Egb+xpmD9AHyR9QBkU/rEXNJvS3Tvv/wAjRqKF7dC6PC2nY/5bf8AfdMn8MactvIwMoIQnJfgcfStoWviA/8AMs6n/wCA7/8AxNR3NtrotZjJ4a1MJsbcTA4wMc87a1VWlzaSf4/5bfiHJMxbLwvYm0jMok385w/HWrB8L6aSSBKM9g/Sr1hFrs1jHJH4c1F0OdrJA5GMn/Zqz9l8Qf8AQs6n/wCA7/8AxNXKrS5mm3v5/wCX9dRckzI/4RbTv+m3/fdJ/wAItp3/AE2/77rY+y+IP+hZ1P8A8B3/APiaT7L4g/6FnU//AAHf/wCJrD20eZWf9W9O4+SRjHw9YqwhHm7G5Pzc0v8Awi+nf9Nv++//AK1abWuvfaVz4b1PdjgeQ/8A8TUhtfEH/Qtan/34f/4mohWXNN82l/8AL+vuKlCWhU0rwlpk+s2MLibbJcRqcP2LAelepf8ACsPDvpd/9/v/AK1cFpaa7b6vZTN4a1ILHOjnMLgYDA9dtemf8JJqX/QvXf5n/wCJq62KcZ/upWX9eQRpTa/4P/BMfUPhtoFvp11NH9q3xwu65l7gEjtXP3nhixTwTp9yJbje8+CN4x1f29q63UvEOoPpd2p0C7UGBwWJPHynn7tcpeaxeN4KsITpE4RZ8iTJweX46UUMZV9rG8nb+vIt0JOD/wA1/mdifBGmY/195/38H+FNPgrTB/y3vP8Av4P8KU+JNRH/ADL93+Z/+JqJ/EuoDOdBuh7kn/4ms/rtb+d/18iPq8u34r/MyvEml6H4Z0aXUruS/dFYIqowJZjnA+7x0rw+/wDEd/PftcxyMsecLAWyNvoff3rZ8deNbzxTqTQqzQ6dA2I4A2dzDqzeprjWI7mh42t/O/6+Q/q8u34r/M34fGEbJmG0mVx1zMMZ/Klj8XT+cDPbs0XcJLhv5VzQAV+CMGn4B70vrtb+d/18g+ry7fiv8z13S206/so7y2ublo3H3S4yp7g8V2nh/RrCT/SI5bgsyYILg46e1eEeG9WOm3jQyORbzcH/AGT2Ney+E78rdFM8eWT+orKviq7pyTkzNRSZqDwfpxH+uu/+/g/woPg7Tv8Antd/99j/AArahl3oGHcZqbPArf63X/mYuVHK33hGwSzkYTXWRjq49fpTovCOntbxkzXWSgP3x6fSt3Uf+PGX8P5ilg/49ov9wfyrR4qv7JPme/6F8q5djBPhDT/+e11/32P8KYfCVh/z2uv++x/hXRNUbVj9br/zsXKjnJPDVlCyIstwRIcHLj/ClPhOw/563P8A32P8K2Ln/XQcfxf4VMawpYquqk3zPdfkc9GK9rU9V+Rz58J2A/5bXP8A32P8KafClh/z2uf++x/hW+3SmE5rf63X/nZ08qOa0m1Sx8TXdtEzsiQDBc5PO010JPHWsW2/5HC+/wCuA/8AZa2feni25TTe9l+QR2EzTTnFKcfjSE1ylDE/4/E+n+NLIMO31pE/4/E+n+NK5+dvrW9T4Ik1P4y/wr82EGftEf8AvD+dFLD/AMfEX+8P50V4OZfHH0/U6KWzPL/BX/H/AGn/AFwf/wBCNd70bFcH4J/5CFp/1wf/ANCNd64wc19Jj/4kfRHHR6+pKpqZTVdDxUymuG6WrNi7bP8A6ZF/u/410MDdK5i3fF5GSeAK6C3mQYy6/nWVCcbz1W/6I5KKfNP/ABfoibQD/ob/APXQ/wAhR4p/5Fm8+i/+hiotCljS0cM6j94ep9hTvE8sb+GrxUkVmIXAByfvCvUjUh9bWq+JdV5eZ0VFqzT0j/kDWP8A17x/+girtZ+kzxLo9kGlQEW8YILDj5RV37RD/wA9o/8AvoVzVJw55ard9V3ZKRJS1F9oh/57R/8AfQo+0Qf89o/++hUc8e6+9DsSilqL7RD/AM9o/wDvoUfaIP8AntH/AN9Clzx7r70FjnfHf/ICg/6+k/k1b+of8eMv4fzFc744kSXRYVjdXP2lThTns1b1/cQtZSASxnp0YeoqsTOH1Xdfa6oIr3izB/x6xf7g/lStxg+lQwXEItYgZo/uD+IelOa4hI/10f8A30KinOPKtVsuq7A0eAfEG08nxRcoBhfMcj865PyOelegfEtAPEEcwIKybxkeuRXGhQDXRVacrry/I1rfH935IqC3p32aryqvWpViBHAqDEzTbmqWpQhLCQsOAVz+YrofI46VU1C2V7KRXXKnGR+IpPYqGs0c/JukvEJulFvvJi2uPlHbA/h7CnvMEuISk+2V4ZELGYE57ZYYFaaaRaeSh8nkqP4j/jVO7022SeACLgt/ePtWTp6HW8Y4t6de7/r07GTbHbdSCVxudHTeWyMkEZz9e9Jcp+6ghDozRIxYhwQMknAPf8PWtSW0skbb5ZLeikk1A1pD2tW/Fj/jV8utzBVvccLf1e5Q3LFpxVWBeZ/mAPIVen5k/pSy7otMljklhwdpQRlSzHPfHOMZ6+1WjawjrbsPxNUroWqYXy2z16n/ABq6UVzq7tqONV7JdLf1+ZNZySLYSJBcJFKZlPzOFJXBzyacZLeSUCOZYk+1yMCpAIGFwfYEjrVUvaf88n/P/wCvUINtub923Xjn/wCvUxhFxk7/ANXNlWqKKjy7f18jTmmi8yJhMouTC6h3mVyrZ4yw4zjOD9OaZbzqkFwLuZHmMqbXLhwDtbDHHUDj/PFZrNb9o2/P/wCvT99t5RHltnPXP/16fs4+zXvdxyxNXm5uT+np9/mdP4LLtJqSu298qSQ27Jye/et6PgN9TWb8NtU8Pabe351ixubiN0XyxCeQQTnPzCurGt+DkZt2lX/3iRg9v+/lYSt7SPzMrzVNx5d7/wBfgjHJqSBv3lara34NzzpGoA+h/wD3lTQ674Kil/eaPqPHUf5kq3a+5UHP2aTg9rff/Wx7dGflFV9W/wCQNff9e8n/AKCa4lPi5oCgf6Hqf/fpP/i6h1H4saFLpt1CLPUw8kLKN0SAcqcfx1tCUVNa9V+aOf2U7bHX+E/+RYsvo3/oZrZry3QPinodlodtbyWuol0DZKxpjlif7/vWl/wt7QP+fPU/+/Sf/F1WInF1pNPq/wAwVKdtjv8ANITXn5+Lugf8+ep/9+k/+LqxYfE7R9RnaGG2v1ZV3fPGuMZA7MfWseZDVGbdkjq5WH9pxf7h/rVgvXIy+MLA3qP5NzgL/cHv71HN4pt3kMga7SM9AFH8s1z05pOXr+iNp4erp7vQ68v700ye9cd/wk9r/wA9b3/vgf8AxVV5fFtmhw0l8PrH/wDZVr7VE/Vav8p1mrS/8Se+/wCveT/0E1xd+/8AxQGmj/p4/q9LeeKbV7CcB7pgYm4KjB4+tYWoa5D/AMItZw7ZRmVXAwMD73v71th6sVWi2N4eqoW5ep6jLNgE1geJdUGn+HtRus4Mdu+D7kYH6ms+bxhZkHEVx/3yP8a4T4ieJlvNA+x2/mp50g3luMqOcdfUCs+ePcX1at/KzzEEk7ic7uT9TUM4weh/CpYlYoMkU7y8rhiPejnj3D6tW/lZmyTtkALgZqa3dyfnx7U+UxohRVHmZ64BGKas8hbb5cQB9EGab2Ippqok+4NMoYgmvU/hpqr3d1tkJJSMruP8XIryRgTIR0yfpXpvgJ/J1VYgQQsB5HTqtZVv4bE/iZ7FZSZiUe1Xwaw9OkBhj57CthGz1rdmaItR/wCPGT8P5inQf8e0X+4P5UzUD/oMv4fzFPg/49Yv9wfyrR/wl6/oafZBqYae1MNYCKlz/roP97/Cpj0qK5/10H+9/hUp61hT/iT+X5GFH+LU9V+Qw0z6U9v0pnetjoMG2/5HC9x/zwX/ANlraPSsW248YX3/AFwX/wBlra78HiujE/FH/CvyFESmmlJzSdq5yhif8fkf0/xpX++31NIn/H4n0/xpz/fbnvW9T4Ik1P4y/wAK/NhDjz4/94fzooh/4+I/94fzorwcz+OPp+p0UtmeYeCf+Qhaf9cH/wDQjXfydDXn3go/6faf9cH/APQjXY6xqP8AZmnPdeX5u0gbd23OTjrX0uOi5VYpdl+pyUE5Npdy4j1OrAjrXF/8JZcC2F1/ZZEJcxhvP/iABIxjPQihfHBXJbTyFAyf33/2NediKM3RkvLyOn2cl/SO5jdRcJk8Y5ratvs7EA4J+tebweO0+zx3h0cG3LeUGN0PmYjP92pT42laVfs+jS/MQFUTEnPt8tRSwaTleC38vI56NKpzTv37+S8z0vRYIZbVi65O89z6CneI7aKHw7dyRptcBcHJ/vCuA0/4h3elhrW40GZZQ27a8pRhx6FKl134jXVz4cui3h+aO3O0NOZiVX5h1OzFelHC0vrKfIrXXReRtUpVLu35npWl2Vu+k2btHlmgQk5PXaKu/YLX/nl+pryyD4rz6bpNmZ/DtzHB5SKk0jsqPhR0JSp7f4wNdLut9I8wf7Nzn/2SsKmEhzv3Fu+3cn2VT+memfYLX/nl+ppf7Ptf+eQ/M150PitdDr4fc/8Abwf/AIinf8LYn7+HJP8AwJP/AMRUfVIfyr8A9nP+mv8AM9C+wWv/ADy/8eNch8RryfQfD9vc6Y/k3El2sRO0PlSrHGGz3ArFuPjG8EkcZ8NXDvIGIWO4ycKMk/c6Ac/hXK+KPi9baqtlFNos0Atr5JXbzg5AUMCAMDnnPXtThQoqaUor8BWknq/xGa7qniyPS4Xnv8O9wEEX2ZAemQfu+9W5NX8Y7jC95l8Z8sWy7iPpsrmpPH+mpFZq0U0xjuJHfg4UMhUEdCTk57dODSzePE8qJEsI0jkjdYyRKY3GRkZI3Hkduh+taVKdBw5eWP3IE9ToLvxH4nsLFZZdT/fGUQpbLaqXYkZAA25zVWDVvG+qyIsupLaLnPlmNAwA5O4heOlY1n420awNwq212zTTA+a3LRDYASuegByMdcHrmorTx7bwXf8ApFhMsLq6GRWz1UjIBA9aXs6KsuWP3Id2S+MLvXGiglur6K4TeSjxxgA5yD/CD1B61i3cOoWlnFPJqdr5kkayrbhG37W6c7Nv/j1WdX8TWt9ZQWKQSDGXMpBA3FicKMZPGOuOc0HUDDp1xZX91eTK9uqw2zxH903ysGG4/LxnkdQfQ1cJRiuWy/AHqyvfLfaaqiTVbV5yqMYI0bcoZQwySgXoR0NNtrvWp/L8lnk8xtibIQdzeg45PI4qaXUopNJntbu+vLwtsFuHTPk7TyQSeOOMDjn2qXRdZs9NsrhW+1ecp8y2YIPlfayHv6MD9VFX7SN+lvkK3oRRXGuzNGkRkdpM7AsIJbHXHHNRSNr1w8lqkc0sy/eiSDLDHqAM1sXev6XNLcxQC6igltjFG3lj5C0vmMDz05K/TFV7/WtOutOntFkuo3ZLdfPZPv8Alggg45x8wx/ujpRKcbNXX4DirNMoWR1u7YRReawTAkKwbvLHTLccfjTLux1ma/nijMslvaSsklyIPlUA4ycA46ZrdGt6dqF1CsP9orL9tWdfLjBeY7VUcDPzZUnH+0eafJ4m0+6vrVrexZ5oLuSaMSLJnBIOVCMATgHIYdPxpOcLWbX4EySOdFveRWzTxwXktuF3GZYcJjJGc46ZBH4UlksmoSyKG8mOJDJLNI3yooIGTgE9SBwDya2Itf0hXsVaO98uCzmgZRjAL+Zxj/gQ5+npVOwvtDhgvLeS3u1S5iCFiQcEOrDp2+Wn7SN1t+A7ehRvLc2d39nlvShKq6SbdyOrDIIPXBB9KgvNMMIluXurbUEjhVgIC+AS2Pm+VTwMnj861tQ1DQLl4gLa5ZYIEiUscFsf/XJ69qqale6eyRLb314vlbVhUFiI1y2TkkY69B71EpqWisaUpKEuZ7fIo2Vmkrq7i0a3lmEQwZcqcZwvGc98nIqG7aOLTIYfKjJEsqiTJzwR74/SugtLrTpZ4o5r+9m3MdwjUgY2n1cc5xxW1oOhafqAvgfN8gzcIynk9ckbvpWaVrttdDo9vT9nyRWrur+76/p/kcTYtJHBYrB0muSkwHRx8vyt7YJ/WqVpcR2V6Zo1Z0QtjY2DjBGQe1eoan4WtbSwzbboQ7gPhCoYYPB55pg0XTV1WBYbhFRFPCrjBOeg3fSqbjy3bjrcidde4k7OLXVfhr835/ect4eWaW+eaKeUtPbBh9obey4YgDPccH866BYbtlDRzIAPv5HU/l6YraTStNilZ1uQsrcMfKGT/wCPUkWmxQlo2upDvbIPlev/AAKuZVIxqxu47PsXXrqVKUFbXXdf5vV/lpczGju2UiOSNcgbTjnPfPFIEvVckSRFCc9OQPyrcXTYAxVbl2I/6Y//AGVWIdDE+dsz8f8ATH/69XCLl7sZRubQxFOpJPlvfXeP9fqc4EvwmBNEW9WHT0xxUV2t79jmCzJ/qznI9ucceua6w+HW/wCez/8Afn/69VL7QCljcEztxGx/1XsfeumGHrc6em/deXmDT5JRUVqu8dLX89fn8uhy2nJenTYds0Y5J5HbP0qxImolm23EYXPAI7flWrpOjbtKgb7QRkH/AJZ+596mk0jH/L0f+/f/ANerqUKsastFu+3+ZHLOdKEbbW6x/DXru/MwpPtny7JUA/izyT/47W94SbU/7YkNrcxRH7Mcl+c/Mv8As1Uk0ls8XRz/ANc//r1Z0mN9KvHuDIZd0ZTG3HUg56n0rOFGpBp2jp6f5mtRVJxkrb26x6dd/wCu7Oxd9a/ivYDL2baMAf8AfNQh9a3krew+b/E23gj8qzhq7G3eUoeDj71WNP1J7iVfLgaRj1VTzgfhWdOVXml7sfi/u9l5nPPD1NNF96/zIdd8Sav4aht/Mu0nurptsNvDGC7Y6nkduPzrF1fxpqi6jaafcOZIroDE/lAKrEcr06jvW/qkEk+qR6pJpEk8tpCRboSQQ5Iyc49BXPeIZdT8U2sdvb6VJp7W7B4WdNzbz948Ada7Oat/LH7o/wCZH1ar2X3r/MnuLnV4bO4iN3EU8tsjA6Y+lZupXF7/AMI1YhbhAhZeMd8H2qXXZWs7YiRTGrRlAX4ycVi6neovh/T13rzg4z7UUZVfbwTjH7l/mE6FRQ1S+9f5nUT3uqlTuvYsfQf4V5/46uJ5ILb7Y/mgyYG3jBwfpXRS6rCf+W8f/fQrmPEv2e/hgBu41KSZzuB7Uuat/LH7o/5h9Wq9l96/zObSSFFA2sKHnhEirtbJIzSyWcABxqEQz9P8ab9ig84N9viJGOOP8acpVbP3Y/dH/McMPV5lovvX+Y2WWESldjZJA601XhLgBHz9Klexhkm/4/YwxIwvfP50ySAx3LlpC3l4HHGSRmspTqRpXcY29EVUpVIVuZpW5vIY5gDEMjZ69a7X4fz+drEgXOVgYc/Va4V3BkYkZOa7j4UKJPFDoeAbdjj15WsK2Ik6TXKtuyOSUveZ6XpiazJaxOl3AAVGMr/9atiK31/tfW//AHz/APY1HpsYigVOy1swngYrpeJl/LH/AMBRmpGRe22viyctfW5XjIC+/wDu06G31/7PHi+t8bRgbfb/AHa1705sJfoP5inw/wDHvEP9gfyqniZeyT5Y79l2Neb3TAvf7dsrWS4kvICqYyFQZ5OP7tbFrI0tlBI5yzxqzH3Iqtr3/IFufov/AKEKnsf+Qdbf9ck/kKzqy56Ck0k7taK3RCewy5/10H+9/hUx6YqG5H76D/e/wqY15tP+JP1X5HLR/i1PVfkRtzTT7089eaYeK2OkwLYf8Vhff9cF/wDZa2j04rFtv+Rwvv8ArgP/AGWto10Yn4o/4V+QoiU0+1O6dqb+tc5QxP8Aj8j+n+NK/Ejc96RP+PxPof60r/fPrmt6nwRJqfxl/hX5sWH/AI+I/wDfH86KIc/aI8/3x/OivBzL44+n6nRS2Z5X4NOL61/64P8A+hGut1QW80cMV2R9naeISbjgY3jqewrjvCLbby2/64v/AOhGuh1hEu7NrdywVyM7TzxzX1WIV8RBeS/JmOBV6qS7/oRrEWt7CPV7SK13Xk37tYljDYjGzKggctgdRkd+9Qtbq13Gn9lXbXBgkDE6bGh6rtcQ5Ktjkds5HpWHPpVmjECWb/vof4VZC+H7VvsUE9zc3QjLvtZQo/HHvXm1ayjBtrY05/I1YLewuNOubKYW0k0F0ohQxLHE0vlhgGUcAjONvTcMHis/RtRJ1WS1t4XlulinjD7NiLLsYKueADuwOOhrE8PRWmrW01xqFldXCpIT5Nu6qAvHQEc11l+3htPCNxqGktqDXceI47SVQrBj0428j3FKOJpyk1fVM5o14uT6WZi6vZX/APZ2nW888kNzBDI80EXEiR7iRnuABk/Q1p31rpFx4f8AtRtR5UWnxiO9di37wAZQEnGS2cjrkk1xsvh+SS+EkguGygycd8n2q5LoEUdk7kTgjHX6/SuiNWP1jkT3aXQ6JOKu7r70drBdfYfDLC6g+yqEjxI6sDMc9FycHrnKjtVfTG0K5jn1BoITNYMJ2fy8FwQQAf7w3bOD61iWnh63a3iJNxygPUen0q4vhy1x964/Mf4VlKvFTeu1/wBSVKCad196Na4uW0u6ms7eNNRngtPNhTYDJKzSgjA6NiJgQOehqaPVLaV7hLW1EmpRpC0tosYdo9wJfCc8g7c/3cnpWKfDNo3G64OeOSP8KytU8AQRzY066lSQDLqwyN3oCBR9Yg/6Q5VILqvvR1Fy1lN4hgjtzEu37Yk7feES/Z+vHQbtwz7VlHS9KLXzXNtGqWzqu24SSUShif3h8vJAIHBGB8w5PfNt/DM7SQJqEt3psgDK06rlZFYYbbjB5GQQeuat3fhpbDypbHXdT3xfu4zGmzYnJIBDZHPP41zcrrVmoSSvZb/16HPKUZTbjJfeMGk6LqEot7WGMwRkTTS+WQfIIfcw3AH5GUDOATuFLpVtY6np+lQTaWuL150Rtx/cKZRwvuoOTnOQtZ7WYij1WeJ76Rrn/RRcztgupILnb3yVxknpnjvV2HwzfR2zQLq95DbsCpgjJ2kHqCM4P5VlOlNxTc9Omvlq9H3ehNub7S+/y9SI6dZeXBavpiln017p7wZ+R1VjjH3cZUKcjOT17VLJo0MXh26luLKzt7m3SCUlPMZ/nZV+fcNnR8/Ke2Klj8IzizNoNYvFtmOTCM7CfUrnFSSeFLqS1Fs+t3rW4XYImJKBcg4xnGMgHHsKVp3uprf+uv8AwCuXW/Mvv/4JHqMOk6dd6rJHoK7LG8VFR2Y+YC5G/r0GMDHHzDOal1rVbGDxOLS40+NAPIV96KSoKJwT7A/pUEfhXUYrq9vdL1G7n1WOaLzAoPmNG6lmcnOSMgAn35rUufCUc2p6mz3d3e3cV6YpEt7AXTeUFXacNICFPIyM42jkU6VKV1Lmvpbr2X6pv5j9naWr6f5FfVtcVrLUDeWSZtL1beFZIh8vD7lX2AVf09al0PWbZpbawKxWjtPi4g8jInQ7e65GAM53EADmqzaFfXukzTi81CKKyEr2891bbo9qMcKJd5Mb8ABR1IHNN8RaReWFncJYarPNbuyf2goBUo7KNuV3fdIx83c8HHApPDydLkuv6Vv+D6/eQ6a5eXQdYa1KNNup/s5OlJvRIVgBEzEHt0wMgknp9SKydOni1yWfTY7WH7RNCxgxCo+dfm/UKw/GtDQdM1m60nTo9N1a7WGO9dLuONmCQRnYQ7/NgIfn64GQfWuctdC1e+8QQw6H9qV7ieRLOdFaMMF+8Qw6YU5OOgq1SnaVmk3/AF/w/ctK17WuztUSy0/WdKlt7MJHdX8MVsrRgMqqSshz1znaa5GO20/WJ7C7bTvswlubiCSFZGwwSJXVuvB+bnGAcDgU7U7HVNJ1C203brEcNvGVt5ntpIpeXBaWNCQRyOOhwBnBp99omo2+u2GnQzyW1oJH+zz+XhXOGHy8/MSir35yAacMJVtfnu9uvn/wPmZ+zfcei2erzeHbGayhjU6a0zTK0m5ghmOw4LcMy84XOTx2FYWtDTAbRtOEEksgZZY7VZjGGB42+aA3IPI56e9dgmkTwPp9jFe3UYntprlLaS22FJE7CIMQGILdOePesCwt9R1vUYv7QuLl5XhnR5ZlLNBKNwEfJypPGBxy3StYYapTmtdNdNfN/r+BUabT3Ll48ogudMlidNMj0WOdY2XCrIUQ78dm80lc9eorM8MwzWOkapfMbuxiCwqLu2hzMNzE/LyvykDk7h/D16VPqularbaJbabJNeNbRWTXM0JVtsMgJIBGeufyBzila21DTtFu9VTUriF5XjhgmiYiR4F2DduByR8wGOcbSOKlYaSi4adH93fzfX9RKFk0atwE0iTUr5YL2Ce41JIQ2nkRvsZMr8xXuScqAMkY4xUWn67caBHrdpdWzX0C3zQrexyhHSUbuR1yCATgjFTPpl1Y3d7dJql5FFcvbx29ysexbje68Blcl/l3Zz0wR3rP0MXUvg2d7HTku7gaoq7DbrIdnlnnaeD7nqM9e9a4bDzoykr3uvyt/lpqOnDleo661/W7nShdbbxrQSlfMEnAYAHnHThhzVSfXZdYuQLixlvXx0PzMf610y2cMn9mwaUyNYw6jeJISvmKBjhf9rIwB65FU59IUXlpcxWHm3rWE0iWs9osHmSK+BuhU4ztJ4HXaOOa7XzuCjpZX79dzZ2fQ5SXTby4MtxZWd1GkUixyJ99lZgSOM5HCn24rWj8T3h1iHy2cBSim3kbliD0Hoe1b9jYTanFfpqVskMwmtH+ypH5aI4jlAR1GMDkHHHUA4GSOf0ee8fx20t1araSl2XZIu0xvsYJngAfNt9K53Tjzxk1/TB2tqa+oeILsaq0c9hcW9w4BELuQxGOwIz2qSbVL+C1W4n026S3HBlywUn0ztxWVrsGpwwaXCY5jfRSTyeUEJdYsIRkdQMrIfoc1oae+pnTLuaeyEEUtq7LeMr+gIQc7SSRjpkZ9qJxpuorx79+wQkkopLb/ImTxJE1s0w0W4lhRgrSPdPtBPQHaAAa0Lq4tHtZv9BljJQ8pcMe3vmqUzRDwtdxQ3tvPDF5J2bZAzOd24nK4yTwOeiilvrqM20wjSUAoSAyHPToauM6UJxvFO9uvn6myqU0nzP8SO0lgS0iUzFDz/rGx39aluImhK+azx7xlSTgN9PWudkluWtIxHFIwGeiH1qFbnUmtjbmGYx5yAUPyn2qsTWo+2neC3fV9/USxNNJLT7zoWhB581vruqJrUHnzjz7/wD16x431IKCqS49ChqVJb8cmGTHptNc/tqP8i/8Cf8AmaLFUvL71/mbiQAaTNFklWcEv2HStPQtTh8N/Zb+Rg8Y3xlS23O4N3+tc/Ff3S+HrpjbyFlmGF2HJGVp1hZ2+r6Hq11K8NlLBdWqLNd7wEV0l3DCgk5Kp2OMdhmsaVWk5StBaPu+3qOpXpSSSa+86a9+JdxcSCKyNlECcAud5/nVefxprGn3piuZrHIAJjMW04I+v41yEeiXTaxPpUt3YW19FP8AZ1imL/vXzgBWVSoBPdiBz1o1OwFr4WstVN1CL6aWaOS0dJC/yMq4XCbQRkk5bpjHORXR7aN/gVvV/wCZi6lNdPxOh8Ra/ZeItKaNvKjn3CTcsm4DAx07Vg6vbW50nTc3EYITAzj0HvRotha3Flpr6mLpZdVvGtLc2+AsAXYPMcEEsN0g+UFeFPPSsjStOXUfE8Om6wTDboZVkZJVh+ZUYgb3BVcsoGSO9a0qtNVYy5Fp5sTrR5bJfiXTptow4vIvyH+NUrnSLUjnUIUx3wP8alutKtLDxDbW95pepW9jKgYKl7FO0mSQGSVYwhXPoD0NZ2v6X/Z3iDU9PgWZobW7lhjZxliquVBJAxnAqfa0l9hfexuvFqzS+8l/sGybG/WreM56MB/8VR/YVgr7hrtqxByFAHP/AI9VuX4f607MYzbybb2KyXazfvGkAKuvy/c+dMnr868VPN4J+0aNo8tlPZpeS21zI8bPJuumimlBKfKQPkQY3bc9snNZ1ornfK7ISnBSWn4lCLRbJrqOU6zbq+4HyyBn6feq3d6Xaf6Q/wDa0G7cp8vjPTHrUmneDor2C4uL66tdP8rSo76B185g5NwseZMI56EghcclD03VTl8J332aS4Fxaeabf7WLPL+c0IH3x8u3GAWwWDY5xihP9y436/odDrw10+1fcYNEsHAY65bKTyRgcf8Aj1dr8PdKsrHVpblNXt3KQMOMDuPeuD0XR7Wax1LVdTivHtbMxIIbZhG8ryE4+ZlYAAKxPB6Ad81dbRxo3im4tEMstt5KSxOy4YxyKki57Z2sM+9YVItwa5vyOV1IX2/E9l8LarDqUEiJfQXLxnkxkdPzrp45kVwu4bj2zXjGh+GLIy6Rc2ry2a6hci3MMl9DdsVIUh/3arsPONjDOTVuO4tETS9ag03VIbV9Qa2dJgJJFKbG3DAGQQxGMcFSMmtNf5vyJ54dl956/eOTYyfKe386njLfZojsP3B/KuUsvFmn6gr2cdwXZvuFo2XP5gV1ttNGbSMeYmQg6sPSrcl7Fe91fbsP21Ll6ff/AMEzNeY/2Lc/Kei/+hCp7An+zrX5f+WSfyFQa86NpFwqupJ28A5/iFT2MiDT7YF1BES8E+wqpSX1Ze99p9uyH7albp9//BEuSfOg4/i/wqZs+lQzsrTQbWBw3Y/SpzXDSTc52fb8jChOLqVGl1XXyIzk0w09hTDW9n3Ormj/AC/mYNsP+Kxvuf8Algv/ALLW0QO5rEtv+Rwvv+uA/wDZa2j+tdGJT5o6/Zj+RMZR7BgetJtHqKbkGkJxzXNyvuVzR/lBAPtic9v8aV1Bc/NzmmJ/x+Rn2/xpW/1p/wB6uiqn7OOvQmbj7dafZX5sdF8tzGDz8w/nRSx/8fSf74orwsxd5QfkbwSTaR5B4WcLc25P/PFv/QjW1f3QBPzcDk9q5PTNRi06JLiV9qrEw+uWPAqGO/udUvWeYlINp2RZ/U19ZW/3qn6L9THLleuvX9CK91Wa+do7U7YujS+v0qz4etES+kwOsRyT1PIqjGoUAAAAdAKswXFxbzRi1DtNKwiVUXczE9AB9a8LEJzpuKNYwSVx+j3Fxpco8iTCHOVwOcitCG6n+YGTOT12j/CqZ03UFvFtBFG0xVnPl3MLKgX7xdgxCY77iKmj03VnuLiIW+026JJK7zRLGqN91t5baVPqDiudujzc7S19CHSoyd3BX9ESw67dXt4sMSKrFCfmfjgEn+H0FVbjxLNJaOjRZBx/EPX6Vo6XZ6rDpNxcTzGC0g0551jWWLJZmAVnTk4YNwxA7YNUNPmju9Bupry/kt9LgjjhdY0WQmV8kYjO3n5WbdnOB36V3Qnh1U9pFbNbLr0sYOjT/lX3I0NM8Q3Ny0FrHAoYqAC0mBwP932p6+Kpym42uF/vF+P5Vn315d6dr8dpe6iB5EwUFWDRqhXh1XHygqQQPeo9WWCW1sdRTVZ5tMaVrfH2RUeEqATiPfhuGHO4E98cVE/YN83Le/l31F7Gn/KvuR0jeJbmCwFzHpAYbQxkkuAxAJwG2AAgZ7nIqmPF2tX42QCCEpzuMkcZP4uOfwrO1horGxtNRtdUmmF/alBFNapE6opCKcBm4JRvT7tH9jalDYabqWniOXzrNriUTNC3KySBtiNywCopOAcZ7VLeGsny6PTbsH1el/KvuReTU9a1D7RFLFI8kZ2N5kqrtY54GQBnjoPSsOTVtTgfbJPM0IODEwUEH0ztrT1C2u7maGCzvEZp4o76R7maKJdzjBIZtoxnGB15qmLbW5ddu5WiERguCLh53jhRS2Rjc5C5IzgDr2qqLw0J86S/DzWwlQp3+Bfcia/8UefYxxJZ7FVwRiT2PtWg3i2Qn/jzYe3m/wD1qq6lbXP9hWUttIZW+wyCYkxqqRrMykhuAAdnckktgdQKjsbqTUUu7nUNRCabZtHKNkKylQW2qqpxjOeRkDgnnFOfsJUkraJ/r+ti4U6cZXUV9y/yL48WS/8APm//AH9/+tU8niS4jtYZzaHbKWCgTHI24zn5fesjUrrULLV4baa/S7l3LLbTbFOUkQMhxjgHcpx25rZ1OK90gWa3t9cPcK7KZZbIBM9ykhyXweM4HtWap4XRcu+39XLuv5V/4DH/ACMrUdQhvHFzJa3AdwN22YY4AH932qibi2/597n/AL/D/wCIrpzNLaa/AiX7XUdzaLJ5jwqjNySOmeQV657Cr11DbSS7zFG7siF/lBJOBnJ9a1pUaTgnFaDlNX2X3L/I5K4tmtYVlms7hUJA4uVJUkZAYBcqcdjiktoftYPkW0pOcBWu0VmPoAVyfwrtrlICtw/7phNIGGMZ6k5P/wBemWsKxXImH2dQp2ljsyPp/wDWrX6vTvaxHP5L7l/kcULZ5LQ3K27rECwzJeRoSR1wCAT1HSqgtVmsRceXPsPuCOuOu2vRINoDBzF5BZjhtu7nv656V2tmqf8ACqREFGQDkYGOZjSdGmlexVOSc1ot+yPCJtIL2o3w3IRwMcYz39Kbf+GPK/s/ybe9YXEeVBxndnnHy+4r6P1MrJ4Gibk7IIjyeQflHFUdTIFz4TkPTMYyDz/BXQ6VNYdSS1v+iJqSsnZLfsjwaLwbc3lncMllqL3Fu4VgFzwex+XrUMHgq/uIlmSwvzC3/LXZ8oHrnbX1em2OZ/70pJyp64GOfypm1DA1uAMgDI/h/KsuSHYnm8l9yPl6b4f6pJO5t9I1Vo/4SY85GOv3azofDJMpWS3uywJAQEAkjPH3fUGvrctsjwM4A9TXlPimytbfXILiKILJLyzZPP36unThKpGNt2jWk0+a6Wz6Hl6fDvXriaRF0fUWeMgONn3c9P4ap2vgfVLsXMkOm3pjtz+9YD7vXr8vsa+rI3HmTPtA+brzziuT8PMv2jxESoGZuQfcvV06VN05trZL8zLn1Wi+5HgMPhCW7tTcQQXLwq4QuGGAxGcfd9K0I/AV61wUezvhIAx4IyNuc8bfY17NrljZaf4YgS0tIoA90rNsQAE7TVi6m2a/ayYUE20hzjrkPVexp+xjK2/N+FiZ1Gnol06I8hHw8D2BmNlqbcgCeN12gnsV2fTvTP8AhXmpaTPG80dx5LSqrPj3+lerxata2ul7JriKJjNnaxA7CuL8U+LlvtQg02GQPGLhTvTofmHSuOnyuKujXERipSSS6/kcXq+mQ2uqyxG4kUDGN2PQe1RTWltFDE3nv8y5+8v+FN8VvdS6vMgLOqkY3HOPlFVLyOU2tn93cI+/0FRVjFVFp1f5GFNRcIadP0GyRqchLpse5FSyXt3IjA6gvIPG0c1mtFL/ABsMe1SoQsbbSucelbU4U3ON4rdfmhyo03vFfcWIrq8jiUC+UL/d2ilN9d5yNRA9ii1V83bECwBHsKgnAddyNwe1XiqdL28/cW7/ADJWHpNX5V9yNaK/us836t/wFatRXs7Hi8Rh3G0VzkD7Wwe/pVohoysqHaR1I/rXP7Kl/IhrD0v5V9yN1ri6OjXKx3eWMow+0Hb04rHl1O+XRtR05kaR7m5gm87pt8pZFxjHOfM9eNvfNX7R1bRblgcDzh/7LWHNduZZN2OGIBxUUqdP3mopa/oi3Rpxs1Fbdjo7b4mT2Wp3d3/Z91G818LtRbXph3AADy5CEy6cZwNvU1g6j4q/tHSmtZrOZLhLqae3mSYBUEjKWV1KHdjbwQV69DWNO3zZzyarElia25V2FyR7HT6L4y/s+0tYbzT2vJLG5a6s3E3lhHO3IcbTvTKKcAqevPNZkeqwPcRyahb3dzlna4CXCxmQn7pU+WdpBOTndn2rKVvmA96lVd9xtzWtCEZVYxsN048l7HQS+K4Li/00Pp9yul2ERjjt0ulErZZn3NKYyM7m7IBgY461Dr+vrrGu3WoWdrcWi3MjTPFLMsxDsxJwQi8c8DBPuasaXFZQ/LLEr7ThiRyKxrtBFqcqIMKGOMCseWPYHTjbY7WH4h3totnt03d9n042py5G+X5Ak33eCvlQ8d9nXnjAsvGVxaT6ITal/wCzYJoCPMx5vmNIc/d+XHmYxznHvURG4BSe1Zs6RpOoHXcK3xUFCrKKWxoqcXI3rbxkVlitbmwlktm01dNlSKcI7YmEodWKMFOQowQeM+vFm88e30ul/wBlSPqyPHbfZVWHUTHAyYwN8Oz5jtODhgDjkdc8kONUj/66L/Spr1c6vIc9x/6DUqEfYOduv6G/sI6v+9b8y7pHiP7BBe2N9ayXdldBN8cUoidWQ5VlYqwHVhyp4Y/WtXT9Yn1/xFd3UtlKXcJ5UUL/ACRwoAuwjaSflCANkYweDnjlBsEjZPeuq8Bf8hqcD/n2b+a1z1ElBuxyunFS2NS3uo7qwhsdO0q9i08XS3MzvP5ssrKCAqsI1CgBm7E5I54rX8Q6zf3OlHyYNQhS0DSrLdStPJvwOS5UDACjAxgc+tYWhaibe2CSSrHFGOpOAKh17xQl9YS2Vpu2ycPM3AI7gCteWO1iFCPVFTwtq97L4ksElnLI0uCNo54PtX0baW8BhjJTOVHc+lfNvhK2H9u2s7Z2KxKn1ODX0XY3Ae1iIPO0fyq3RpeyXurft5E/V6XL8K+4Zr9tFDpFw8aYIC4Of9oVJZW0L2FuxTJMSknJ9BS66wfQLn1AX/0IVLYD/iXWv/XJP5CqlQpfV0+VfE+nkh/V6PL8K+5ALaFSGCYI6cmnmpGqJuKwjGMfhVi4U4w0irEZPFRtUjd6iJFM0MG2/wCRwvf+uA/9lraNYtt/yOF7n/ngv/stbRPSujE/FH/CvyJiN/Ck47ilNI3Nc5Q1P+PyP6f405v9afrTI8/a48+n+NOb/Wn/AHq3rfw4+n+Ypfx4/wCFf+lMdH/x9p/vCiiP/j7QY/jFFfP5hvD0OmO8vU8Mn0dWtNF+zndPeWskz+fPHHGm2V04ZyoAwo6nrUlnoesfbbuBLTY9rGjzPJPEqIj42tvLbSpyMEHHI5qa213R4xpUd9aSSfZLCWDe1uk4jkaZnVxG7BXADYw2OT7Cto6zpOu6frcrJc29oljZ27PFbxq4ZZPvCNWCY/2QRj8K+mlO9RO5y4e6acbmInhy9GlapeTtFbzafPFC8E08SFt6O2QWcZ4QYAzu3fLnBqJItT8P3Om6zPY7o4bmJzH5ybxuBKhlBLJuGcFgKnvPEOlajaanZSQ3UMEgsxauqLIx+zRPEokG4Y3BskgnHoanl13SdQvbieK0me+1WaATxzxr5UGHVmKNuJbJXA4XAJHNcsowlFxbWvkV73n95m79O0OW7jJ1B7K9ga1ndo4vMgbcrrhVkIJyg6lcjPSrT6vY3uktYQvcRpcww2Nm0iDc5jfeWkwflBZ8ADdj8M0/xRNpMV3ren6ZaO1xcak0kpmhVVgVGcbUIYlgS3UheABg9ar6Rq2iWltp66havPd6dctPGttArR3AO0iORiQQAy9QG4YjHesvq1Jvmcrv+vL5DvIeutaRcm+leLUlv77To7F40iVo0ZfLG4HcC2fLHGBjPeodPs7aK0vNEvVvRBcrHeo8UAMyPHvUKYyw5Ku3f0NVrLRU1G5jeaW7SKQuZ2ht0Ijb+HaC43e4+XHvW5qPh7R5Le1je6uoLa0tvLa4S2Rpp3MhbLLvAAAbaBuJwBz2rSOEpp8qlu18rbWJbZgaxcafqOvT3cxureN5418oxLvWELtOfm++AAAOh5OR0rQ8RQ2180EelG8+y28v2WG1kt0jWInn7wkbexPJYgZ+mAH66ulG9thBDLsjtIo1d413yALjc2D1P49uTXRnxRodtKI5NIlDpYeWw+zxn/TAABJ1+78o/M0vYQi01Lbp/SCzOY1+0/tO6gubKN0s47VY4klZQypHlegJ5IXccZ5Y81tWUtglpplxtuFvdLsGiWNmQRSl3kIO4sCAPMyeOenHUwQa5pEc2ms9pOy29hNbyjyE+eRvN2t97nHmJyefl9hUz6tou2S0ubW6hD2ENuZI7WN2SRWViwG8A5AI6g80pYanKKhfRf8ADBaRAl3ZAJPcW7DZp8MEcr28VwY2U/M3lO2Cp6bj09Kj1fV9I1qPUTcvdWtm1xBLG8MKO4cRbCpQMq4O0kEEYx05wLt1rtlf6OYYLWaOVbRbd1i0i3YHChd5m++uQMn3zg1y+m3Glm0u9I1CO7P2iWKWOW1t1klR03DAUsMhg5zz1APOKmGEpe05r2YK5f1DW7BdAt/DV4lz9mtlY+dEo3CXzJHRgNw3qVcAg4xnI961g+l2el3MF1cXX2XUcJvS3XfG0bBg+3fhhyRjI6+2DX19oNS1O5lgs7qCeSVRDZi2HEQUgZIOd2Av8POScjvNrd5ptzeWdslvPaW1tbxwtm3USZ6yOV3DJLFiMnpgZqnhqSp2T3d/ne9xpPmL0rWl/wCLrZ4XmEcUNsLVGjHKRxKAXO7g/KDwDkk9K7TU20u8hvfkuWa9uVuZhKeIyN3C/Nz9488cDFc5DdaBLrenz2H9oELbpBIJLKNM7Itu4YkbJYgk9Me9b32vTnJDJeYB4Atk/wDiqqODoOzc7W9f8u+oWm9vzRf0fTtB1LxTpcMaXEohiNuyzJ5YwFcg5Vyepr0E+ENAEoX7APmBJzPJ+nzV5XaX1pb3ck0L3scynKmOBQR+IathNcu54ZZ0vtXIgALtgfKCcdPM55I6V0Rw9GmuVVPwf+RVSnLm/wCCjvF8HeHyzIbAADHBnkz/ADpB4R0Bo2Y6eNyk4HnPkf8Aj1cB/wAJJMH3DUNYyep29f8Ax+kHiOUZUX+sBD229f8Ax+q9lS/5+L7mR7Kf9NHoLeEfD4jDrYjPGT50nH/j1Gu2ttpnhWe0s0EUAxhdxbq4PU8964RNYv5LV51n117dD80iwkop9zvwKp3uq6hqdjLbW02t3U5AIjSIueozwGP8qTpUrfxF9zKhTmpJ2/FHot+wHgsRkgL9lhIX8Vqlfvvk8LgkErIuB6cR1w8t9qc+nCwj/t2S5WNVa3EBLDGONu7PH0pqS3+oXulQxXWpxyQyKrLKhUqxwOBuPp7VtONL6uo+0W/Z9kKdOTXz7o9mMpa1LhxvAILkcj1pZJgFSRWABYbiBya4ldH11iwGo6js6Yw2P50v9ka43y/2hqWQfRv8ay9nS/5+L7mP2FTt+K/zOzupTHbSN7V514qAN/YktjIHH/fVXLvStcEW19T1Ebj33c/rXJeKtN1S2urVZru7ZioKlwc/xe9XSp01Whad9V0ZrSo1FzNro+q7ep7Lsiit5Faf5m3cY9a5TRjFHL4h/e9J1xkdeXrI1HT9atbf97ql/GGOMvu/xrlLgzwi4WPUZGMpBdicZPPvz1qb06dOaUrt26Pv5mSw1Rvb8V/mdn411uwt9Dgi+0JvWVW25x2avPtd8ePeTIbNSixx+UTnk9c4/OsDWI22ZkvhId38XJFUnto2b/j9i9d6gcfrTb/2eH/b36EVKE0/muq/zLWqSvJMHLt90E7jiqdtOralaAsc+cnT6ip9RtYZLlWk1FEAQfKce/vT9KsIJdQt2ivbdwJV7ZPUf7VedR+GJ1YqjPmn8+q7eoa5Iia5cZOMlev+6Kde2yS21q6vyE45znpV3W9KjfWp3+1IpOMqw/2R702exi8m3Buo48JgHsenvSrfxY+r/I5aVGfJT9O67eph+WFbBYg+pHFQypBySoDY4I5FbyWMTDi/ib2wP8aqNoMCiV2ul6Egdv51vS+OPqvzRvKhO3T71/mZqkCFcyDGOBiqshBzhh9RXSW3hiC8s0k+27c54A6c1Fc+E44eftqge4/+vWmKf7+fq/zFGhUcVt96/wAzll+/1GK04ZBsxlT6CrI0O2U83sePoP8AGnx6Tbwtn7fDn3H/ANesB/V6n9Nf5lmzjQaROpO3dMPp/DXL3wCSuNwxuPT612UdjE+g3BW8jYCUcgfT3rnn060JZjqcGQT97HBz9aijtL/F+iHOjNtLy7r/ADOfkACjBJz6jFQZ5NbsunWkgwdWg+px/jUI0a0xk6tAB9B/jWlyPq9Ty+9f5mOv3xircZ8uQnHPvWnDoVuVZ01GJyq7sAD/ABqFrCAIH/tCIsTgpxkfrW2Gf7+JUqE1Tf8Amv8AMbNfMWDxbUkHX3qOWZboq5QrIBhsdD71eTSbMj/kKQH8B/jUq6XaqMHUoPwx/jWIvq9T+mv8yJBmYcHpTLu0kaRXONuR0ra07SoJ7hSb5BngLgc/rVvU9Ot7M7Hv03HkIQB/Wt8d/Hnbua06EnNN/mv8zlEgUX0ZxzvX+lNv4caq7e4/lWzFZ2pnRzfRBtw+U4z/ADov7GA3Ekn22Pdx8uB6fWsY831Z+q/JnfKlo/8AEu3n5mI9jKtuLhkj8thuG6RQxGcZC5yRnvitWw0/UdPvZDhrd1UxuYpQWBz0IU5GccZ64qNHtJrD7NLcb3KbUDxL+6Oc5D53Y68dOa6JIYbbXdQlikdpZZt7K4G1drZ455yfpitpxhytt/19xlKlTs3e23b/AC19Djri2vYTGksY+dti/vVIDehIOFPscVcstGllkcXi7IxC0ibJFcPgeoyMZrUEEMs8YL+fCJfMMItI05wQMlfvYz3rViWaJoXjictFG6gmBUBJ6fKOKtcl9xRpUb3ctPkYkFnfpdKqO8ZjUHPmhdgPQdeD7da3be51qOIsNUvI0QlT+/IwR261EkRh8791IiSMJCWiWTa3II+bqOevWorm6k8nDo+4ys+5lABBAA6fSh8qhuRKnTULp3evb/I02vNSmsJDLrt8IiQpHmM3OfTdVlZ9ahRY/wC274bQFwJnwMenNc8mow/ZHinLBdwYGPBJ9RgkVuW+ofaEW4SIsWOcdQKG17JepDScI6q/Xb5/pYstLrIZV/t+/wCQSf3z8Y6/xe1Ng1LVIy5OrXsqHgFpmBBBII60hu/uf6OwABDYz3z/AI1RS5BkkjjjcqjHJYYJJJzUy5HF2/rb/gmlWFNwly26fp/wb+ZryahqaqGW/uyNoY/v2/xpr3+pGQqmoXXQHm4I6j61U+1RnHyuH8vbgjjp1p3nR7mYqckLg7QegwetJqmTKnQva9r26laC71M6xczC7uQQoUuZiCTxx1rQfUNSChlv7vGwMSZm4/Wsz7TFNqU6MHXDM6kAHIYLwfyFWXmWSFYyCNo4I9auqod+iM/Z0EpJO/b1u/LT9Swmo6gys7aldqoIGRKxOT+PtTJNS1KORkOoXXBx/rm/xqvG6eW0cm4AkHKjPI//AF0jSgzGXHIbIUjjFY2jyohxpumtdf8Ah7/pYttfahFLCW1K6y2c4lb5f1rS0S8vG8SW0Ml7cSxtkkPISDlCemaw3mSRok2bSCSSMnr9TWtoLxv4ntGjLHgg7hjohHr7VpWUeTQqrGlzXhbaNu/W+/8AVz0BP+PqP/fFFEf/AB9p/viivncw3h6CjvL1PD7+2gEseIYx+7H8I9TTbS3gMh/cxn5f7orr9N8InWbqz866EUUsZ+6MtwW/wrq5PBOi6RapJHE80pcKXlbPHPavp5Qvi16/5meVTjzRT7/oeT2mmSX8nl2diZ39Iot38q6rTvhhq14ym6toLKI9TIBux9K9igghtoxHBEkSDoqLgVKK4VBCdZ9EcNb/AAs0RVRLovLGvPloAgY/7RHJ+mcV0dl4V8P2EYS30WwUDu0Cs35kZrXpaoycmzlvCuk6dJpcpewtWPnsMtCp7D2qz4j0jTE8P3bJp1orALgiFQfvD2pPCkqppM2eT57cD6CrXiSVW8PXQ5yQv/oQrrh/vS9V+gp9Tl7rSbXaHsdJs7i9Fra5ja2R8RlX3MFI9QoLdvUZq1Jo+lf2jqLRaZFNMt6VeKHT47nEWBjgkbQTu+Yc8Dkd7S+G7PWrm2muZZ1ZbKJR5bADGAe4PrVkeAdK/wCfi9/77X/4muep8cvV/mzSorNei/I5v+x7KXSLn7NpUdtDH5zedPZRusgDHA83qj4woA6n60eItK05IpHsrC1YZT7U5hXfG20bQBj5VPqOpyD2FdMPAGlf8/F7/wB9r/8AE07/AIV/pP8Az8Xv/fa//E1BmcXY6RHd22jtZ2ULeRqDPdkRr8iHy9rP/s4D8njr61yviLw79r1CO50vTDKJZ3WKKOIgSqMtgYxnA7DnmvUL/wACaXHd2Sie8w0mDl19R/s1U8UeBdLt9LRlnuyWmCkM69MH/ZqsMuetyre/6BH4jzrX9JuZ5NFhg8PQLdJAWfSYYpNwUSMfnG4yfNnpnIHTioPGFrHHqFmGsrW0k+xx77SJMfZyCw2tkkk4Abk5+YA9Km8XeEYtBvXti8xTeDGxI+ZTn2/Ck1DQLdEuWiluN8Vr5qgFTubzY0x09HP6U5xfsVLzNINc9vJndWCaATaF7XT4ZLm3XVfliVSsMYQSKPQZ8/j/AGBUGlahY3emW12sFo2mSQTSXt0Ih+5lDPgFv4CAE2rxnPfNeXHwxqE1zKghBdGCNvkjX5iOFyerf7I59qjg0C7aW3jELBrjPlj5BnBIOfTBB649ax9oV7Jdz1Yy2cenTXX2e3GkGwSWK9Ea5achcjf1Lbyy7M8AdO9aN3c21vYaqz28UGlAwCC5SIDzIjIvzBh9/jBJ5weOOleZXfh24h063uLeSRoWt0mcFo8jPUhcZ2j1x+NVJtGv7e0F05dYsKxwULKG+6So5APYkc01VT6FyprqeuNLpw1nT4HsJvKl1KKKCRrFI4XQk/KHDHzQeDnnp15rmr3XrG48P2t+Y4o2N1LBmKILlQsbAHHXG48nn1ri20TUle3QMXa4lEMflyxMN56KSOFPPQ4p8Wh6m8/llzhWVWxNFwTn5f8Af4Py9fan7RdvxEqUT0jT9UiltNMvot502CznW6kA+SNsyblf0ZgVwO+RiuXF1PeL5VtpV1eu0YmW38l/3se4DICkMRnuD2rnZNPkfxHLpVvdygLcvCrybeFDEZOB6DNVtVtUgsIru2v5rm2lZowXiVGDrtJBHPZlI570e0XYapxuen3N4t3FqFo9lcTzGwtlfTLE4lT7vygkMfkwM5DHnnpkQS+I7ex8YLvWVzFNASF5K4UfKTzlh0PuDXn8ej+dYRL/AGhL9smtWuY4PKBUooYkFuzYQkDGOnNLpy2406e7h1K6SW2iWR1ktE2btwUKG355J/u9AfSm6i5Nuoeyj1Pdf+Fi2SE4s71wfwqP/hYlsrFhYXhz2LV4fBqus3as8EjOBIkZwEzufO0Y98Gr1iNZuNTe1ujImPtEfy7MmWKJn29DnkLz0wetHtor7P4/8A2VPDvo/vZ63P8AEC2mIJ0y7OOg8zpXH+LvFo1G8tXFlLGI1Aw0hJP3v8a4ea41mKR0mTfi3adWiliK7RxuDAEMAQcgHPFN1e11a3v4bVJba6d4UlBWWH5AY1clsE7VG77zYBxkVdOvFVIvl2a6lxhQSlZPZnqWr+Km1KEL9mMRHQtISa5WQSysSBEc+prhL+/1KymMV2FD4DcbGDA8ggjII9xWtcpcWMF0Ir8Pc2JRbuHyFUIW4+Vud2G4PA56ZqHVi94fj/wDFKh0X5k2taZM0avJ5f3gMA/Wql3oF7M/kK0MeRlcE4qSwV9RtBLc3jRK1wlvGsdqsrF2BwSMjA9+T7Vbso0vdSaxuNSmiuIzIGZLNJI1VASW3FwcYB7Vu8RD2UY8j05uvexEoUpP5oim8Ganqcysk1tsC4wWwc1qaX8NL23uIZ3mhDRyK2B3wc9ayLJrmeJpmZmcOVBAA4wP8au2vie+0+6gtWnnYSSqpDNkcmsaThGKTjr6ixE6UuaSj36+Ru3/AIdkGpzPcFGV8cBvYVWm8MXd6mLfywsfBDDrWJqniCWPxBcBn6Feozj5RWy3jB9LsFdbna0oB29c/wCc1NZw9pHTq+vkc1KVPkh7vTv5ehWt9CkjcxlIi44IOaffaTILJyEVMISQD14rS0TxFBrYkY2yROozvJ5Jqa8gN/ZS+XvQBGLMe/Hat6bp88fd6rr5o6Oenb4fx/4Bzul2l0ttE8RTByCMnnmr11a3UseAIs/7X/6qsaZoKy6fCwkZWYHPPXk1ox6escDC4CnH3cCrxMqftp+71fXzCEqdl7v4/wDAOJudCudwfdFnOTzgfypn9g3Eh3kxn0AY/wCFb1/pcRUlVwc8VlSQrbw+WnGDg/Wsean/AC/j/wAAfNS/l/H/AIBag0ySPQriJvKGZB0J9q5Q+HXeWVXliRixKtuOME9xiujZlj0GZu5lH9K8/kuvJvZX2BmDtgntzWdGUPf0+138kE5U9Pd6dzWPg3UZJ1SNoGLHC4Y8/pXWa38L103wnazmZv7VDHzs5MTAngD0IFcfpfi270/UoLgEhYzyFPOK63VfiJf69G1na6hJ5BHzQz8b/bNa81P+X8SVKl/L+P8AwDjz4c1H5T58AXgH5z+XSo5tAu4PneSEgnHDH/CvRrDWr+98MvpV7otlPbbSSXADZ/vggckVVtPC+k2enxX7L5jzSBdrDITr/gK1w8qfto2X4jlKn7N+7+P/AADjoPCepzNiERyH0Usf6VbbwhqtuwFwkaZ7MWH9K+h7GKztF8m3hijC8jYoGRWH400VNaslKKDNGcof6VlzU/5fxFzUv5fx/wCAeceF9G3T+ZMsbxZ2kZzg1d8XeGZrmMTW8w2oN2G4IqXw/bvaRPuQoS/ysfuk+lJrXiJpIJbWeHZKMq6np7EVtjZU1Wndde5rBwc0uX8f+AcNHoV8LiKQvBguCAXOev0o1DSL1buSQvDsGP4jnoPapfLilmjYRgHcOfxqG/tF+0yOOvH8qxVWH1Z6dV18j0PZU7PT7Xf1LVho1xcXZZni8pk2nk5/lXpnhKzM2qEMwBMZWQBiehHIry+Fdm4/3QX4613/AMP5Vtr37ajbmEZEi+2RzUYmUOSWn4+hw4n2arS93r3PQ7T7HYQLDawGNAP4VGT9T3qU3kfo35U+0vIL62W4t5A8bDqD09jS3EQngeIkgOpUkdqrmp/y/j/wDFSpfy/j/wAAy7zVLN4XjEo3nHH41Pb3sL2yAHeAoBAwRXF6h4c1Czl3KDJCDneh/mKTSbPVYrtGhilDbsliMDHvWrdP2S069/Irmp8vw/j/AMA2PEel6Zc6bNMLbyphjDxjb3HUDg03wybjQDGkp8yxnUEMOCpI9K2dbJ/sWfPXC5/76FWbNUm0u3jcAq0Kgj8BWjlD6stPtPr5IV6bXw/j/wAAuSXsR5Cvg+1YOmXKC/1M4bmX092q7pOqI88+lySAzW5wpPcVX0s/8TDVP+u39WrFShZ+7+JpTlT5J+726+foPluFN4jYOAv+NPNwno35Usv/AB/R/wC7/jTyeaz5qf8AL+P/AACqsqdo+707+vkc/bTL/wAJdenB/wBQP/Za2TOvoaybY/8AFX3v/XAf+y1tZ9a6MTKnzR937K6+RzxlT/l/H/gEJnXrg0hmX0NTZwODTDjmufmp/wAv4/8AAK5qX8v4/wDAGwsHu0POKe3+tP8AvUsR/eimsf3p/wB6nVkpQVlsc7mpYjRWsl+bHp/x9x4/viikT/j7j/3xRXg5hvD0OuO8vUxfC3+t0r/rlJ/N66fW/wDjyT/roP5GuZ8Lf6zSv+uUn83rptb/AOPJP+ug/ka+rf8AvS9f8zkyv+JH1NQU6kFVbjVdOs5fKur+1gkxnZLMqnH0JrgAuClxWd/wkGi/9Bew/wDAlP8AGl/4SDRv+gvYf+BKf40AZfhaEnTJSvP78jH4Cr3iSNU8O3ZA5wv/AKEKyvC2t6TDpkiy6nZIxnY4adAeg96vavqek6hpU9rFrOmq8gGC1ymOCD6+1dKko4lSe11+gS6lnResP/XpH/6Ctbgrzy2v7uC8McfibR1VIwqkzJjAAAHStBdT1AnjxVon/f6P/Cqlh4yk2qkd33/yNK0rtadF+R2opwrjlvtQPXxdoQ/7eI/8Kf8A2ndR8/8ACXaEx9poz/7LS+qr/n5H8f8AIx5vI6W8sHvmiKSNG0ZJBAz6f4Vh+JtLuYNMieW8kcGZRtbPoeetVxrupfw+K9CUe80f+FY3iTU7+fT4/N8UaROPOB2xzJxweeBV4fAUnXjKU1v3a/RCVnLYk+J2kpZ6JDeSXH2qRJgu1x0BBPvXFX+uDRrszJHulktjHGMZGfMjbnuBhTyOc4+ta/j3xEraKLSTxFpl8xkB8q0KuRweSQtczfXBa4Ux6ha42D+Me9KWCpRoJKa37v8AyFTTVRO3chttb0+C3e1iEsUImM8byWkNwykqAV+f02jDAjPcVLDqDroOoXtxG5kkmdbSZgF3NKCJen+yvbgE+9QefL/0EbT/AL6X/Cgzy/8AQRtP++h/hXJ9Th/z8j97/wAjtXp/X3kz6xYrYwXCC5NylkbQRlV2ElWUtnOeAx4x1x0qC41uxkS6nSOdrm8ijiljYARoFKEkNnJzsGBgYz3q3Jcz/wBmxA6pZkBz8u9cjr7VAv2yRAyXUDKehXBB/SksJD/n5H73/kU1/X9MnbxRpUMlt5EEoii1KC72rbRR7I03ZTKnLnkYLHn2741lrFiLQQXy3AEV19pjMKhi+QAVOSMdBzzjnirskV6RzPF+X/1qqPFd/wDPRPy/+tR9Uh/z8j97/wAgtfp/X3kH9vRR+KJdUSBmhe5eXYxAYoxOR3wcGmX2paa1lb6fb/afssckkzyyRLvLsFAAUNjA2DnPcn2qQxXefvr+X/1qimiuRGxZhj6UpYWCV+eP3v8AyLjTu7Wev9dy1YeJo7DSNiyzyXAhkhSIwIFQPkcSZ345ztwBmsx9WhTQ47CKJvNluPNnY4AIUYQD1xucn6irPl3HlDJG3A7VBNDP5kOSOTxxWrwkFQU+db932XkTUhZdf6+ZPoOvwaTcXLzwO6vD+7CY4lVg0bc9gy8+xNXZPFlo89i4t5gIrKaObgZaeSExlhz0OEyevXiqJgucdR+X/wBaoZUuEGeCf92s1hIN/wASP3v/ACBrlX9f5liLxHaQ6dDbtBIXSyntzgDG53LDv055qRPFWmx3n2sRSlriyjtLlXto5BHsWMBlDEh8mMHBC9evesOWOdyco5/4CaYYpBCR5bZJ6bTW0MDDniudatdX/kYwm5N+Sf4E+ta3Ff3kZjzNFHGI0L28cGACTgImQBknvWne+JdPu47+W2gnF3qTI1yJFUJHhtzbSDlssAeQMD1rCFrMWDeW/wCRpqxzK5/dyDPX5TR9Sh/z8j97/wAjHnfY63S/EFnpls8KXF9bgXIkEtvGu6dMcI43DA79WHJ4NQxatbxnU2jgaKa7GyIKBtjjLbmH14VenQmqUP2m4tkClioOMlcY/SupgtdRS8izdW4YqeWUHHX2rb+zqfs1L2i1v1fS3kTKo9rGZpd40Fi7CFmxIfmH0FINRgnv7dfsg3mVRuIHqPatHUY9TRSWubeQHqFUZ/lXPw293HfwOj4bzVPsOa5/7Ppt39ovvl/kKaXK9C3rfkrq1xuto3bK8nqflHtXOapO0wjAQIsWRgHI7f4Vr6yLo6pceY6s2RllXr8o9qxZhM67SjnPbbT+oU1K6qR++X+RFJJQjp0Lfh7WBaXwEuRGcDg16tFqMI06YKdwaJxuXt8pxXiDwyAnEEvH+ya6TwlNqFxeNZwTlcg7o5T1HetY4eMZJupHT1/yNebyPWdCjE2m2sgB+6wP/fRq1dW+6LyySN2RmrehWRttIihcgumc46ckmn3cO6VQOxzWGIkpVZSWzbKjsctEfPkuLaTHmwnp7CsDW7cwXa4+7KuR9RWne3CW3jSRgcCUcj8Kj8RoZRGVx+7YHP1rAowbn/kA3HtKD/6DXnVxue6kABJLngD3r1YWYuNIu0xzvz/Krvwx8JaDqGq3TXyGa8iYkxzcKOeoHeoo7S/xfoh1Onocd4c+F2v67MjNam3txgs8hxkHpiu/h/Z+TyFabW2SUnO2ODt9c9a9mstOtLGFY7ePao9WJ/nV0dK0bIPIp/hh/YmkzvYapcsiQsWW5w2cA9MYxXGahKbTR7WKSZMs+dufdq9z8YRXD+FdU+ycT/ZpNuBk/dNfJVx9vmvfs8iytL6EEmtcLrWiW/4T9T2221lzcAo/8OPrW3DdXToWLhl6gEVwWmeH9c0fTopdUiESkgYJ+YfWt2616HTLAu7huOOeTXO20wSTRS/tO3g0iZJCNxl2kfUcH8xXI62/m3aOWVn8oCTBzk//AKsVmT3UtzI3zHBOetSBTsOTniujHS/fzXmdVOnaaYkKneh/2h/OnXw+eT8P6U6AfNGff+tF/wANJx6VjH/dn6r8md72f+L/ADKxYp5jcfcGK6TwROz390kWdqQlwPbI4/KuUkkBjki/i+8PpW74EnaDWblwM/6MxP8A30uaWJ+GR5uJX76Xqa+j6vf6d5c1lcr0/eRNyCOx9x1H4V29h45s5VCX8T20ndl+ZD/hXkcLXNvfRLbAmFF3pnoUOMg/Q8j8a0ZNUcOwGCAcAgda0aORM9im1KzvLFzb3UUmcfdb3qzbE+SmWGNo714paamr3aKVwTnkDHanv4gdJXQT3C4YjiQitGv3S9f0Lv7p63rzqNFuMso4Hf8A2hWbN4psNM0yBEk8+4ES4jTscdzXmFxrJniZGlmfPZpCajXVI0UKBjArRr/Zl/if5IL+6bQ1O6j1QagJGEu/c20dR6V3vh2+jv3vrmM8SOGI9OteUHVC27GcAZJ56V1fgKaf+1JjGSLdo/nz/Ee1Yr4WXT+Cfy/M72U/6dH/ALp/rUhNV3ObxP8Ad/xqb61kXV2h6fqzEtv+Rvvf+uA/9lraP61iWx/4q69/64j/ANlrZOetdOJ+KP8AhX5GEQPrSHNHNNOfU5rnGSRf65aacmY/WlhP75e1MbiY88bv61T+Axh/vHyX5kkf/H2n++KKRP8Aj8jP+2KK8TMN4eh3R3l6mP4W/wBbpX/XKT+b10+t/wDHkn/XQfyNcz4W/wBbpX/XKT+b102t/wDHkn/XQfyNfVv/AHpev+ZyZX/Ej6moK878XQ2z6/eySjdIsESqDGGAzu9T7de1eiDnpXDeINF1bVPFFzHY2gljaCMsS6qQVz6kf3q4o9QOXuNJszcyu4ESGTYoVc8/TIwKjXQ7fescmFldyiKMkEg45OeOa6oeEvEe52l02Ngzb8GVMA+o+amr4c12M/8AHgskgYsrGVCVJ64+aq0EcVY6Rbpp0UrEvJMC4XbgKMkYJz7VqQaNZFIUkgBaYE7tzfLyQMc47VZ0jw/r95patHYKUhJjUiRMnHPduvNXV8MeLRBmSwgt0XO2SWZNy564+YD881UtJO4Pc5qG0sY79hJGhQL/ABs2B09Dmr8tjZJcqsVrHIHUYyz4JPpyDWdeC30i4LXkaXJA2YjkD7269m69uPSqU/iTWJplksdMji2DCmZs49MDI+vWs+dLdm0qc3ay6I2bvT7MXEmyFVjU4+8ccdTyaZfx6NHfXKJIrSJMEeKNT8m5to6HHU47VxtxaazeHN3LI+f4Q4A/Q1oz3eozXDzixtYpZZlmlaPjzCDkA5bp9MZ70vaoXsX1LV1PZn7QYSYYIZfI85oi5Z+ei7vu4GcnB56VQ/4R+7u7oQ3OoAYmkichflRlxt79GLAUkLaghmD2UE0c0nmmOQ/KG5wRhge57/Wmy3GsLb3aMik3UqyO5I3Bgd3HPHOPyFXRkpVEkX7NrZEMmiwR2bzT3TQrHFHK4WHc3znAUDI5xg9utPurDy9WSz8wNuKBXxjIbBBx9CKNWm1O4W7kmt4l+1sgcKRhdvQLzx0qe/s9SlvVmeAJIEQDa442gAd/ak2vZXW1yeV89mLfW1m1rdPb2/km1uFizvJ8xTu5OT1+Xtgc9Kj0qK1uJPIntFaMAtNcF2BjT1GDjj3ByeKnuTqN0NrWMCq0nmyhDjzW9W+b3PTHU0sP2yGze1OlW0kbybzudgT6AlXGQO2ay51e9y+R22GWMFrPpzCW1ULHG7Pc72BU87eM45OBjHNXdKVf7MhyPX/0I0z7PfpoEMD6ZbvFvYh2kIJY55OHAJHbIqSwjkt7GOOQbXGcjOe5rKU9NGawhZ6omkCgcDNVZMjvVh2461UlJyeay5pdzoUY9iJmbsar3Dt5Lc1Ix55zVeckwtzScpdzWEY8y0H72MA54wKhndvMt+eh/wAKX/liOewqCckPD7Gulyl9VWv2n+SM60Y2en9XLUlwyZO7isue/mL4D8fQVJdy7Iz3NUY03Zd84qabe7ZyYlq9kh32q5PPmYH0FONzNtLF8YPBwKjHUscYHSnrH+5y/O411UW3Vj11RlSaTd3a6Y5Ly6m+VG4XvgU5jeDlpAR7AVoRRL5fljaOD2pohY+YRjgDPvWHtJeZXsY/zr8f8h8OpoLZULN5gYfeGK6H+0YWuI2JBXb0FcpPFsRX9aUNJu2vJ5fON1dbqP2ENH9r9DKdFX+Nbrv/AJHTXV/CynY2PrWZDJLLc22dwUzKM46/NWVM6W4LRz+bnqKZZar5d3AA5RPNU5b7q89a5FOV+ppOiuV++vx/yNPxFbX8WqTshUxAqAARn7orAknvgcFmBB4+UV2ssEeoyvc/2vZHJ+7uHpii38GXF/eLAt9bvIeRk8gVblO+39fec9P2agk5rbz/APkTiY7i6adA7kgsB0HrViwupNO8SLdRkgowP14GRXXap4Mm0va13d2yYPyk8ZNZEmlWbT731WzSQdi4zQ3Jx1X9feX+7351+P8A8ieyaVqcdzZRXEePLdcn2NS3dwoaNwQVbqa810meSyVooNcsxE4wRvBGfWrU91eqiIuu2g2nI5FQ2+35f5lqMH9tfdL/AORKGp6jDcaqLgHMnmAfQZrSub2OdGGcnIrCubCG4uBM+q2KuG3EqwGTTfsfBA1q0H/AhU3fb8v8zTlp/wA6+6X/AMidTYyQjSrt2OP3g5P4VEuoJZ6mmr6U8SXcCASx5z5i9Dmsqz0xxpcwXWLVozJyd3fiqcGkRpcM66vaBsn+Os6bklLTr5dl5lSjSdvf6dpf/ImjrnxN8RLfi7sLp44nUDyuqqe9el/Dn4jp4nVNMuIpft0Ue55T0f1NeP3GhxucHVLTbnOA3erWhLJ4d1D7ZYaxZxykYbkcj8a05n2/L/MydOHSa+6X/wAifSWrYOiX/wD17yf+gmvPrzTbNPBOn3i28YuJJwGk28kAv3qtZ+Lb3VNNuLc6/p+8wsGUlQcEc/w0tzaay/hGyQ6ham0E2UYAYJy3fH171th7qrF2/r7yZOmoNOa/8m/+RPQryzjnjIdFZAOhFfPfxI0qbTvEjny1S3k5jCHj8uxr2trbxSet7b/98f8A2NcR420e5v7Vv7T1GzWVTlCcA/yFYuMu39feOnOkn8a/H/5E8mgXJq0R8h+lW7XSY5EJOo2qYOMM1WTo6EYOq2f/AH1WuMUnXm7dfL/M7PaUoy1mvx/+RMuD70f1H86ZqJw0n4VuQ6NaLs3arb7gegwf61X1XSrRUlcapAzDHyjGe3vSUGsO15r8mWsVSaaTfxLo/PyOTumKTK46gfnXQ+DFD6zckdBaOw/Nar/2PZzqHk1aCI4+6wH+NdB4N0ezgv7pk1e3lPkFcKBwMj3rPERfLL+uxx4utD20l59n/kctJct5cYC4BjFVTubqK0xotkljCp1+2+ZmbPHPT/aqP+xrL/oYLf8AT/4qt+RnB9Yp+f3P/Ih04H+0IvlPft7GorgH7VNwfvnt71qadpFmt/ERr0DHnjjng/7VEukWhuZf+J/bj5zxxxz/AL1auL9kl5/oX9Yp8vX7n/kZA3bhwfyppZsnr19K2F0e03D/AIn8B9uP/iqa2j2m4/8AFQW/X/Z/+Kq3F/V0v7z/ACQvrFPl6/c/8jJaV1t5sbjlQP1r1HwMnl2uSMHavb61xlh4etLq4ii/tuCQvIoCgDnHPrXpWiW0UV3eosqgK4UfrWKi+VmtKvDkn6Lo+/obDH/S0/3f8amz71H5SC4Q+cvA6VN5aD/lqtZckiquJp2jvt2fn5GDbf8AI23v/XEf+y1tE8nrWbb20Q8S3cguULGIAp3H3fetbyl/56rXRiItyj/hX5GCxFPz+5/5EXJ780hNS+Wn/PVaTyk/57LXPyMf1in5/c/8hsP+tA701v8AXH/e/rUqIqOG81TjtUJIMpI/vU5K0LMVKanXuuy/MfHn7ZH/AL4ooT/j9j/31orw8w3h6HoR3l6md4RgeaTR9veGT+b11uu2Qi0+Nnf/AJagfoa43wLeNPNYQRtiSBHBwOcHcf611/iGNl09JZ2wPNHzSNgdD619a1/tS9Tjyx2qxXmbYltYuIl3kf3Rn9axoLiVvFN4UATMI9/7tZeq/ETwvpBMf21r6cf8srRS/wD490/WuHvPiPqs19NeaRZx2RlXZmfDso45x0zxXDzRVxqnOR7E0bFDJM52DktI2FH9K5nVPiF4X0gtE2oC7nX/AJY2nznPocdK8c1K/wBT1l92r6pcXX+wznYPw6VXjjhhXEaKBWTq9jaOHX2mdenj7WNLs2stJtbeMO5k8+YbmXOBgDp29K5vUtR1TWHL6vq11c5/5Z7yif8AfK4FJNLslH+7VORgW5NGIk/aSN4QitbFz9zBpkSxRqAHOAB9ahExPanyN/xLIv8AfP8AWqgb3rnudVXdeiLJfPWgNzUAfJqRWpGRMG9qq3l5An7tnwwIyMH0qcHnrQtzFa6PrcwFytwTDGJIZxGQGB4+6TjI5GeRxxW1CbhVjKO9wKWoX9vLbqqPk7wcbTWreatZNOCs2RtH8B/wqtr1nY/2leXd+bpklv8A7Oi27BSuFBLHIOfvDA4zzyKkbQLSO/ttPuZZ2ubq4kt4pImASMq+wFgQS2W7AjA9arnfsOXzMH/FTGf2paf89f8Ax0/4Uv8Aaln/AM9f/HT/AIVl39rZWem2LL9oe7uYPOZi42J87LjGMnhfXj37aumQweXo1k1tC8eopIZ5HjBcHeyDa3VdoUNxjrzmsLOxqp6l2XWrBtJhjE/zhySNje/tVA6paH/lr/46f8KybS4W2WCVioXdhmaBZsDnna3BP1rRvp7W11mC4hRYrWe2RjI1lFJu4wXERO1cspGM8c4qVFobqJitqVr/AM9f/HT/AIVXe+tj0k/8dP8AhUOupbx643lxPHaOI5FVQFLIyKdwHIXOc47Zx2rVl8KW0IbzLic+VNK8m1hzbqJNrDjqfKb/AL6Xiiw1MyjeQdpD+RqCe5iaMgOc/SrTaPaC3NuJLn7cLEXvmbh5WCofZtxn7p6568Yp1x4ftXku7C2muFvLN4klkkYGOTe6odoABXDMO5yM9KXKXGq0yj9piEPL9h2NQT3ETPDh+/oa6GLTLK90qXTLGS5j36xb27yXDBs/JMNwwBjv8vOMDnmqn/CPaZLfWS/anjjfz/MijvYbiQBIi6uCnABIxg+nXnjfm/cqHm3+CIqTcl/XcxZTE7ctkD261BKynCpnHrVnWLW3gtLG7sjcLFdIx8uZw7KysVPzADIPB6VqSxQrC+l/Z4RENHF4J/LHmeb5Yk3b/vYydmM49s1MboxqWluc7y0qjGEWrJK/ZwM/Nmq+hX4tb5RPKEiYYd2tI7kj6LIQPxzmuolvNN07UNWtmRbNp7iFrV5LGO6CRlWJyrn5QdyHjJGMY4rooznComjDlg0Z1nFJPC8kakgPjOfxq9DaTq5LRnaRg8iqsVtc2U19ZzSYnguXR/LOF3A4OMdqvNIy5RnbBxg56Vj7/Ror911T/ApXNlcGPaIzw3qKbe2NwYGxDuYdORUl5JIGx5jjJGPmNE0suwjzHyPeuxqr7CGq+138jKbo32e67GYNHvZEDGILnryK2/Dng+5vJlWZAsO7Dlhkgeoqbwva3et6gljDMGmByQ7HpX0XoXh2003T44XgiZwOSVB5+priSqd0bN0V0f4Hnlp8PvD9pdW1wtwDtX94u04LevSte80+wsrv7fa3C5GFKhCMjv2r0FrGzxxawf8AfsVVutPgkhcJawZI/uCn7/kTel2f4Hn/AIsFlqPhqfy9jyou+MFD1rw26064kunbyB25BFe1+J9TTS9Km0+SzVZGUgSbQDzXjsrSm5f97J/30ambqKO6OijGk3s/wEt9NmUcx4/EVYaynP8AB+op0RlA++/5mpAZD/y0b8zXN7/kdsVT6JlU2E//ADz/AFFINOn5/d/qKtEyY5d/++jQN+Pvvj6mj3/Id4dmXLO1kTRZ0K4Yyg4z9KzPsU6ysQnf1FbVtu/sS4O458wYJPPash3k3sd79fU1NPn5Zev6Im9Pm2exMLaU9U/UU9bFjyY/5VWEkm7iRv8AvqniaVeTIf8Avqq98b9n2ZYOmjy3YxgttOOla8evavbeGbbTUbMUcu9VODjr/jWE93IInG9h8p71Vku5P7PjHmPnd/e+tdGF9p7aNmtzOqqbpu6Z6ZqPxB1u7t1iUpHxhigwTXB6jPqF9MTK8kme7Nk1DNdyY4dufeqwlkY58x/++jWLlUfUIwpR2T/AlgtpWThe/qKcRtJUjpTrBn81QWOCTxn2pZR++f6murFyf1qafcUox5VJDF5kX6iq+oj5pPwqymN6/UVBqAG6T8Kpf7u/VfkzWn/D/wC3l+RkXQ+cfStjwYWXWLjHQ2zZH4rWbOivg7wOK1PCo8jVJWUeaTAw2r25HNZ4iEmpNfmjOvRlKcmvzRzFx8kdvGeNsQ4+pNQZHrV6SFXYEzLwAOKj+zx/89hW9n/Vji+qVPL71/mLpJH9pw8+v8jUc7hb6fPTzG/nVzToUS/iYSBiCeB9DUF1BGbuYmZQS549Oa1al7Jev6D+qz5bfqv8wT/WCmsrM7ALnntSxKRKo+1Agdqs2wKTZ8/POcA1q4v6ul/ef5Ih4edrfqv8zc8FWpk8RWquP9UrSsD+AFelaX/x+Xx7eZ/U1yugSeTILhbQvIV27wOSPyrqNJEnnXMkkTR+YwIDD61z2ai7lRpuEJc3l1Xc18/6Sn0qxmqoP+kJ9KnrEVXaHp+rMe2P/FWXn/XEf+y1tZrEtuPFt5z/AMsR/Ja2s4rpxPxR/wAK/IwiO64pp60hz2pDn0rnGGcNg0L99frSbgRSqRvAI5zSew47olT/AI/Y/wDfWikQ/wCmxgj+Nf50V4+Ybw9DeO79TzOwuNTsb2G80y7FvIE2bs/XPH41JcQ6vqN00upapJdEjgPJwPwArjLJiLVOeh/rWvazFm5P8NfUKpGWIjdfj6meXU5qpFRa37eRoR6S0IxGIR+P/wBantp8+0fPH+f/ANasQy+9PaT/AEdTnvXEpU9fd/H/AIA3Gpp734f8E1P7NnJ+/H/30f8ACkOm3H/PSL8//rVjeYfWjec8moc6f8v4/wDANFGp/N+H/BN6606dpRh4/u+v/wBaq50u4P8AHH+Z/wAKp6g2J15/gH8zVXfx96tK8qftZXj+P/ACMalvi/D/AIJ0D6bOdOiTfHkMT976+1VhpU/9+P8AM/4VWkb/AIlEBz/Gf61UDe9YuVP+X8f+AbVY1br3ui6f8E1l0ucfxx/99f8A1qkGmTf3o/8Avr/61ZKvj+Knh896Oan/AC/j/wAAy5an834f8E1hpsw/jj/76P8AhVK90XUXhnjingWCdkLqTySoOO3uaiDcdakuT/oMf+//AI1th3TdeEeXd9/XyKjTqST97p2/4I+S28QwvLOt7aGSaQSOXRWG8dGAKYVh6jBpHXWbLMMd5FhsvuIDMpbhirFcqT3IIqlqDf6OvP8AGP61eDAdDmio4exjyrdv8DKFOXPeUr2M+bT764SFZZ4mEMflx9sLknHT1JqxAmr2tmbWG6hER3YyoLLuGG2sVyuR1wRmrO/1o3gmuW7NuVCR2Go2mnW9xG9ljmMB4lcEZJ5BUg/U5PSoc6sbprh57WR2QJiSFHQKOgClSoA9hW1Mw/sG25/5aH/2as3eM9eKlNspwRl3djf3lw9xczxySv1Yk9uB26YHSrEs2tP5u++QiW3W1fgcxLjC9Pbr1/M1YZ896hLj1qtRcqKzPq32D7H9qi8nZ5f3Rv2Zzt37d23PbOKivJ9XnsvIlu4yi7clVCs23hdzBctjtknFW2bjrUE7fuWyaltmkIRckJc3+t3Vuqy3kYAlW4zGioTIAcOSqglvmPPU9+gqvNe6o1zDN5tskih13R28aZ3rtbO1RkkE8nmrSO2xeR0HaoJ2Jmh6fe/wrenWpqHLOF/nb9CKkUr/ANdSnPBdzW0EDyoYoNwjXH3cnJ7c806SbVX0/wDs83KfZ9uz7i79md2zfjdtzztzj2rSDc9KQqTnir9vQ/59v/wL/gD9lFmXaSahaXLPF9i+ZVVg9rG6/KMA4KEZ9+p7mpv7Q1aCeWfz7eSeWQSNJNAkjBuxUsp2n6Yq0I8HIWqV4P3e4Kc7h/KurC1MNOrGLpvX+9/wDGrS5VdFi2kkRJHncvK0m9mzkknHJPrU88+8BhmqCFwrqQ2HHHHQ9qky/wBnCFW4as/bYf8Akf8A4F/wDL2T7DribzYlH8QbIp80u9SV71SlRwnKkEHvVqOPzIypV1BI7V2OtQ9hD3H9r7Xp5GU6avquqGafeX+k6xFf6fMYpVwCR3HcV774f+KFnfQJDcWF5HOqgHADBjjrnIry3RfDtlPIrvJqLueoghA/Ug1674V0/T7UCO03CUff3zF3/H0rjjVoP7D/APAv+AbTjGK2NYeNNNwN0N0D6bB/jTT4y0/nEV3/AN+x/jW5KluTGJHUvn5cnkmkZ1UtGCCw64PShzoL7D/8C/4BkuXseSfEXUbTVoIpIIpgydS6gZH515iQvmlueTXsPxM1MC1isoJVeXdmQJztHoa8v8pyf9Wx98VE62H5fgf/AIF/wDroQfYpq6Ds30qQSp/darflP/zzP5UeTLwfKbH0rH2uF/kf/gX/AADq5ZFQyp2B/Km+auMYNXDFL/zzP5UnkTHpE35Ue2wv8j/8C/4AOMh8N1GukzxlWyZARx9Ky3dST1roYYpRolwChz5g7fSslrafJPlHFTTrYXll7j3/AJvJeRLi+b5FEuvvTC/XrV421wf+WRqFrWfvGar2uF/kf/gX/AHyyKsj5RwM8g1WfP2VF96vy204hc7ONpqnJbS/YY328bvX61vhauG9tG0Hv/N/wCakX7N6E+4e5pysB61KLWfOCo/Opfsky9uvvWHtsL/I/wDwL/gD5JdhbAfvVz6n+VLL/rHx2Y0+2ikW4XIH5+1RzRymV8AdT3rCtWVWvKptcpwl7PbqIg/eL65FQ3/WT8P6VKiSCRen3h3qG/Vt0h4xxW6kvq79V+TNKcX7Pb7S/Izpl+UfStjwYp/tqb/r3b+a1Q+zGSMfMBxWz4QtCmsSnfn/AEdv5rXJVkuRnPOL1OPZc1ERV42f+2fyqNrUd2P5VspIzcWGlj/iZw/U/wAjVa8X/TJ/+ujfzrR023C6jEdx7/yNQXVuv2uYknl2/nXQ5fuV6v8AIlxdihCP3y1oWkJaYY9ahSBRIOTW3pFsG5zk5rdO+GX+J/kjnqxsrnY6G22NR0wK6uBvl965TTVKBa6W3bt1rBmKLuf9IT6VOD6VWzmdT7VYB5qToq7Q9P1ZkW3/ACNV5/1xH/stbWaw7bjxXef9cR/7LW1k4rpxPxR/wr8jCI7JI+lNPFHQZFJnrXMMOlC/eX60n86FPI+ooew1uiwhzeR9OHWimJ/x/wAZ/wBtf6UV4+Ybw9DeO8vU8Q06ENYxHA5z1+prRggZX+XaOPSqWmg/2fEcev8AM1pQM28g56V7dOpL60o+f+Z24PDwcoPVXts/Ir/Zs9An5U8wERgYXj2pUZuwNSEkr3rljVlZ6LbsRKhHTV/eQC3OeQn5UvksOML9cVOMntSrnJGKj20uy+4v6vDu/vGzROZBuKk49KYIDn+D8qvMIX5dyD04FAS3/wCejf5/CuuupSqNpx/AmnQjyrV/eRPBL9jj5Tbu4GPr7VEbdh/zz/KtNhbiyj/eNt3cH86gxbZ/1rflWXLN9Y/gbVKEbrV7LqUfs7Kf4Pypwhk7BPyq4fs2MeYaX/Rto/emjln3j96I9hDu/vKqxyg/wflS3SOLNMlfvdh9atZtRj94aivDD9lTDnG//GtcNCf1indx36NdmXGjFKWr27leexknQISgGc5xSDTrkjH2pvzNXw9vj/WmnLJAD/rG/KoU68VyqUbfIj6rTbu2/vZltYXAP/Hy3606PT7g/wDL0361ouYOpkb8qYrwZyJW/Kn7Wv8AzR/8l/yD6pS8/vYsmlXo06JjfZjLnC5PB5qqNMuMf8fWPzrceWP+yYCzHZvOD+dVhJBtOGNR7XEfzQ/8l/yL+qUu8vvZlnTbgHBuj+tL/Z1w3/L3/OtAyQ45kP5U4NCB/rT+VL22I/mh/wCS/wCQfU6Xn97M3+zLj/n6/nUV1p06WzsbkkDHHPrWsJIc/wCtb8qhvjELOTEjE8fzpOrXtrKP/kv+RcMJSUk7v72VINMu3gjK3JAKjjn0qvd6ddJc2oNxks+Ac9Olblq0Qtov3hzsHf2qrqDRfa7Ihv8Alpz+Ype1r94/+S/5GdbC01Fu7+99yMaVeY/4/P1NNbTLsHBvf1Na3mQ5yHpxaI85P5UvbYj+aH/kv+Rr9Upd5fezJ/sy725F6xHtmqeoafPHbqz3LMC4GOfeukV4wOCaz9adTZpj/noP5GujCVa7rwTcd/7vn5GdbC0lTbvL72QDSrsj/j9P5mm/2Rdk/wDH2fzNbW5dvX9KQSoBgs35Vz+2xHeH/kv+Rp9Tpd5feznNQ02eOAF7kuCwGOfera6Nc7wzXhB9ec1Y1dk+yJhmJ8wdvY1faRMcsfyrqqVMQsNTalHVy/l8vIzjhaPtHe+lurIY9KmCfvdVnC/3FB5/WrcBvYIfKh1m6hj7JFkAf+PUxXRh94n8KTcg53NXIquJW0o/+S/5G8sPSlun+JPFJf283nQ6tdedjHmsx3fnu4FV5JdVMrMNcustyTuPP60eYn9400SRg/eP5U3Wxb+3H/yX/ISw1BdPzKEtndMzM2ozNnk5zz+tMWwnZci/lXPYZ4/WtGSSIqcE9KZG8YiByeBRzYhw+KN7/wB3/IOSmppeXn/mUTp06nnUZfyP+NPbT7kJn+0psfj/AI1aaSI8lzQZoQuPM/OoviP5o/8Akv8Akaezp+f4/wCZRGnTkH/iYzfr/jSDT5zx/aEw/A/41bM0J/5a/lTPPhGf31UniP5o/wDkv+RPLT8/vf8AmNOnXI0uZv7RmIDD5ecHp71QNnPt5vpf1/xrZE0LaNORLkbx/Sso3VkBg3QB+tOnKu4tc0d/7vZeRjy0+d+nd/5kH2Gb/n9l/X/GoJLSbP8Ax+Sf5/GrpvdPUHN4v51Cb3TM5N4Pz/8ArVoniP5o/wDkv+Q3Gn/Tf+ZSltZRC+buQ/Kf89abbWLS2ke64YjOdpGe/wBanuL3SzE4F6CdpwPfH0plnqOlx2qLJeBWGcj8fpVqWIW0o/8Akv8AkTy0uv5/8EvYGelB4GMVWOraOD/x/D8j/hTH1nRv+f8AP/fJ/wAK5vqlTuvvLdWHcuQn9+tQSsfNfnuait9V0h7lFjvGZznA2n0+lRy6xoyyuGu3DBiCAh6/lSWFnz2utu4OrDk36kwPzr9RUF+RmT8P6UxdW0Z5UC3UhJYADYf8KjvtQ0vzJI/Pk83jjacfyrsWGmsO9VuuvkyoVYez36r8hUcCNfXFbfhM51eX/r3b+YrnRqmjKgVp5gwGDhD1/Kt3whqGlS6xMIppi32duqnplfauarhZqm3dfejkqVYaq5zzNxUDuBTmv9HP/LxP/wB8n/CmG80c/wDLef8A75P+FbrCz7r70ZurHuWNObOoxcev8jVe6b/Spv8Afb+dPg1HSreZZUmlJXplT/hWXPqSyXUrBDsZyQc9s1pOny0lG636PyM3Uj3LiNlxW7ohJIyOM8VzlvIZHOAAB711OixgFc+lbJWwy/xP8kc9WaklY6yzHT3rct+grHswNorYh+6PcVzsyRbB/eqfarIORVRT++X6VYB680joq7R9P1Zl2x/4qq87/uR/7LWxnjNYtv8A8jVeEf8APEf+y1sg/nXTifij/hX5HPEcTz703PAGMUGgmucYnSlQ/OPrTc+1Kv31+opPYcdydP8Aj+j/AN9f6UU2P/j/AI/99f6UV4+Ybw9DeO79T54ju5o4wizMFHQA1JDO11d28NxMzRtKoIz15ro7CHTksdEhRVkkuobmaZZbOM7iqSAfvCSwwVGABg9eDxXPNpTW8Vu6XTvqPkC9FusGVWMAuCXz12jdjbjHevZcuZt2NIRjSmtW7FR4YPJmnF1KI0kEagx8kkE/3unHrThp80sJaKSbIjVwGhI3EsF455HPX2qS6s52jurZI9ksDJLcRLE2I8kLwST3cA5A5PFSXM6W+oahazyCOUxmGSTyWXdIrjIIyTjAOen09aoRvNKW2v5Dc6T/AK9fP0KVvbXRuJIQsxYIcq0XI9wOfzpPKuixEW+QKu44TkD39OlaEktuXniZRm1jWPdIJNp5w2cc+gFOS5i1G8McJxi4EqtsfLDGT07jnr610xw8O/5EScWrJlCOOVrhbcS/viwUoVAwatT28sAj/wBJjZHzhgo7deoqNLpF1y5naHIjkdi6gk7c4z6dDTRJE/kWkLq+0yyM6o20ZX3Gf4ahwjyy7q//AAPv1FzQ11JEikmj3x3cTYUuUK4OB1PTH61OInEOfPg3iMSFeNwX16YxVe1G2yObpzbNFJmFUcFjjv24JB61OGieJZVz509uIUjMR3EBcHnpjCk1ahTcVff1/H/gfgCmt79Bsbs3KywOCQvQdT0HA61JDE8kjqXjGN2AyDOVGSP5VBpkHkCYvGfmVZIAEb5pB939SatNKjXUXllmkmilx+7bmUrg4/SlCnSaUpP5BCSsrsi8uUSvG8iq6jOwxrkfUY460lzY3kUojY5ZhkAR9fpxz1FQJcIyjeXeZbJt42nrvLgfkRViW4i8x3ZVKXlsD86vhCAuc4wccds1SpU9Ne3YIyjZ3ZFc7g75mjVlOGTao2/hio7y1khjkf7QjyCXY4jA+QnoCOAPwplzN54luPLj2KEQSIrhWIxx83NXLwRquobAu551dg6uPLJOTuP1I6VmqcLSXb+vnroP2id9Sl9kkFuZnuliQPs/eIwycZ4wDxT7FGnjRRPH5khOEOd2B6YGPzNOsWnj1NbWSdIT9p2yW6pIfN5xjGMHPTnFP0rKMHhuyLZnYPEEcE4BOBjg8dz+VONKD5br11/Hf+uzBThffQs2U0LWcAuJEIy+QxPXDYzjnrikeLy5JbnCMiQ+YsaOdj/MFzyc9T+lR6fpxmt9PwrM97K0NqrwMRO2cckNx8xwMZ9xU0Wn6pNAb1Yj5As2ZYvIbY2C5aPOc8eVI2c5+U1xuLvoEK1JRSk/w/P0NvwrHa3fivR7eVcQXk0CvHvIJVnUFc5z6+9aUls1q0cranZPavM0DzJOxWGQclWyoPTuMg4OCawtJjm0XxNaatfNIBp2oxRGAQFRvjKsUyT8uCCD1PfBqfTfFEFvrtrZ2uk+RBBeS3Nyj3HmF5NjL8h2jaF5K5DEE5JOKpQXU5qsoym2m7fd+p1kGnh3Ux6paPbNbtci6WR/L2K21jjG7hsDG3v6Uj2cFxYyuNb08Q+YIUmedtkjnB2jgnoR1AAyM4rFvPGttqL6XqMlvqyraieBHTVGNxuyjBvNKk9GIxjH0q9b+OY54NQCre2iyP8AaFj0+9kgmZ1RVJZgpVshQWJAOQTxmnyR7GaSvu/vf+Zf0y3hm1OPTbm+SGWLcJ0EgLR7FJcYz1AU/lVXX1s44tKv7GaQ29zI6ASyhyjJtz8wABGGU9BXL2+sC0u49bS33z/aX83zpWczK6ncGYnkkMQT15q/dahbahHpVpbWcsFhbK5SJpw0hdwPmL7MHkJxtHC46nNJxXYrlg95P7/+CdPaRWd1o11cG4eKS3jLGQ30ZDNkYURY34OfvZIqLVoIrLSY7myF1cDyYXluE1CN1jZwCQ0SruUZOASayo7mHTrOX7PY/wCnS2727TPc7owGG1mCbAc4J6sQM9KhS+hi0u5tbKzaK4uolhnlmufMG0MrHYoQbclR1LUcnkXyU/5n/XzLmmQ6hqiK8E6qpuI7dt8rDYXDEMePu4Vsn26VoxaLcSx3cOoSopSK52b5mAjeLALnHbJPrnB46Z5vSby80W31OOPy5vttq0Kbnx5LngSDjkhSwx/te1P1TxldyxSyT2axuNPNiQsuQXblpenUszNj3xmqhFqSaWpE4wW0n9//AATqPJuYhc5vIZYltFulnEr7TGZFTcvAOcnBDD14zirOo2P2fVLm3t9ZiNvbqHllkkceUOAN3yjJJIwFBrztPF0z6c1p5ABOmDT95k7i4E2/G3224/HPatSLxvLHqNxe28V3CbyFY7sW995b7l24aJgmU+70O8cmi3katR/mZc8QJercQ2H2kySSSRmNkkJVg4yrD2IIq3rUEMVrfSWF5fF9PultpjNKCJd24b1AA2jKHg56jmuI1nXru71U3qXV1uUrsN1P58gwOMuVGenoKt6t4ujv4p4rewa2+2XS3V8VuA3mMN3yx5T5F+djg7jkjnjFXLmdNJrTUEoX+JnReHGtNRvFtL86n/HLJPBehFjiVSzHaY2yQAe4zwKs6VpA1KPTYftGptdaoZRA8c37uEqSAHGMt0ycFcAg81wdp4gmsbPVYYIn33sYhSVnBMMe8Mw4Xknaozxxnjni1ovjSbQrJ0hN6bskspF5ttw2PlZognzMvUfNjIGQayt5DfL0kzqtLMkmmRO5Z2OcknJPzGp2V/7h/OuBtNdvIrVI0ucAZwNgPf6VIfEF+G5uCo7koo/pWbUux1KGGa1m/wADtHV+fkaoSj4+5J+Rrkk1++d1xdDlgPur/hT7jWtSSVgJmb6Rr/hT9/l2D2eF/wCfj/A0tQjuzbSrDHPu3gjapzWM1hq0pH7i+Y9vlar+l6tqVxcrGbjBJ/iRf8K3o7rVIpAUu0EnZto/wrmqVeSXvI78NhKVWm/ZzelzO0LTtQW0kWSyuQQ/8cTZ/WtNtOvP+fOX/v0f8KuWmrakrOLm9JzypjRP8KsHW7kH/j6nP/AY60VWm1ucksLWT0K8Wn3Q0adTay7vMHHln29q4C80DU2vp9mnXTDzDyIm9a9KOr37afLJHdSBFYA7lTOePasaTV9Y8xmW8UA+qLn+VTGpCMXbv+iEsLUlK0mtjhz4d1X/AKBl1/35b/Ck/wCEc1X/AKBlz/35b/Cu1bWNaA5vl/74X/Cmf2zrWM/bVx/uL/hT9ui/qb7o44eHNUHP9mXPH/TE/wCFKPDmqvyNNuSD/wBMj/hXXPrWtCJm+2r0P8C/4U2DW9ZeBW+2rj3Rf8KPbIPqjta6/r5HKf8ACNarn/kHXH/fs/4Uv/CN6r/0D5/+/Z/wrrW1nWgMm+H/AHwv+FRHXNZ6/bR/37X/AApqqiXhGuq/r5GFp3h7U476J2sZwATzsPofaobjw7qbXUpFjNguSPkPr9K6e11zWGuU3XmRz/yzX0+lRza5rImfF5xuP/LNfX6VKqLnKeGfJa63/roc5b+HdTW4jY2cvDg/cPr9KnvNA1E30kgtJscf8s29K2E17WTIv+mHlh/Av+FLPr2srMym8/8AHF/wrp9p+5a8/wBGXCg4091uvy9DnW8Nai3zfZpeeceU1b/gzQr631mZpLaVQbdhkxsO60v/AAkWqAYNyeP9hf8ACtnwrrmoT6rKklyWUQMcbFHce1c1Sa5Hc5J0t2efnw7qPe0n/wC/LUn/AAj2o97O4H/bFv8ACt7/AISTVf8An8P/AHwv+FNPiXVf+fw/98L/AIVtzEOmYf8Awj2of8+lz/35b/Cj+wL8f8ulz/35b/Cto+JdW/5/D/3wv+FN/wCEm1bH/H4f++F/wp3J5DPttIv1uEH2K5wO/kt/hXYaTp94qgG0mHuYzWDZeJNXlnJN4cD/AKZr/hXXabrGotGpa4J4/ur/AIV26/Vl6v8AJHPI1reCSJR5iMhPTcMVqQgcZNJdM0lvZO5yxTJ+uBQg6VzMETqf3qmrCn0qsp/eD6VYU8UjertH0/VmXbH/AIqm7/64j/2WtjisW2P/ABVN5/1xH/stbNdOJ+KP+GP5HPEcOCOaT2ozjGaQ56VzjDJxSo3zrn1FJn1oTh1+tJ7DW5Mn/H/H/vr/ADopEP8AxMI/99f5iivHzDeHobx3l6nz42sX1lNYhYYc2cMkce4E5Em7OeevzHH4VJa63evbfZhbWxmFsbYXWD5vk/3PvbenGcZxxnFUtRBNwP8AdFJYfJOxP9w/zFe0mubU6XSaxfL0uT3HiS9nhaP7NarPN5az3CKfMnCEFQ3zY6qpOAMkDOauT21zf3sl5NBGJbl3mfGMZZixxk9MmsWzh8y9j3cqDk4rrpbiBjCUVwqrgZHPetKKi7+jOFxm9yC8lurq2MJ02zjZiplljQB5dowM84HvtAyeTUVubq2+2fZ7OGJbpdhVTnyxuB+XLE9sZOTgn1qybuPnh/yqNrtB0DflUqSWzJ5al72H2BfTo5kbRbG681dpactkLxwNrjHTr196r2MdxYX63UdjbuyhgElUMnzAjpnnrVm5vFRwMN09KZHeIHDEN+VErJsFGppoX5I7trdTHpNlDEYmhCIowN3Vslid3uSaaYdZ0+xtU/sawfAYwTyKpcK3UcNg9T1GRngirMmqwmwjAWTO70+tX9av1GnaUcN80Pp7LU6GtWE01bsjCtLzWLRLQDRtNka0EnltIgJO8knd82Gx2z0wKjs11izisSukWLm0cukkqqWYEgkN82COMdM4zzVs6xawgYV2b3AqvLr+4khG/Gn7ply1DHb+0LPUY5/sVruC7GidVKOu3aQRnuP8Rit42Orahp6zLoFgkRi8iMxtjYvXjL5znucn1rEW93zPPKpMh6Hrge1dXpOqm38NoZQ/MhKDH1rWiozrQi+rt+ZpSpTaaa6EWo2OppElhL4Y0wMsaplZDnAOc8S4ye5xzTLWLVry+eOTw9pjYeOS4OMeYFIAz8+MeoXGe+aunxAr3r3DiQs+c5UH+tPtddW3vp5mjYmSPGNv09/apiou/kSo1He6OX1SW60vxCNVks7VrmC7Fx5ciKY2YNuwVUj5fYY4rPm8STrcWxh0nTIFi80+VFCdsryJsZmyxOcdACFU9AOaueLrlLvWWljLqjxq6owxz0NYwUK24f6wj7x/h+lZOSuONKXI3bsb1hrOo6J4bsGWxtJntrl5LOeYEtayHOWTDAE5UH5gQCMgZqPSPEmtafp2n28MFqbawvTeK06n52Ixsbn5kwX4GPvtzzSyXcEXhm0hWPzJFlJy4+Ufe96xJ5ZLg7mYuVIPHQf4URkKdKStZHRabrN7d3c9olsk8n2t9Rklc4JlOM9+n60ukarrN/4lfV44Ihtme6faoCqeScZPI5xg9RxTPCkfmaxPI4wGiY7R9RWlocsUdtPEPNLTyRxgKowF3Bj+gpxa5mc8YTdSSt2/U7nQ9A1u9sYWh0WzS0Uu0UEEm2PL9W+aQsT079gOgxW9D4e8Q29u0cOi26StGYjOJV3lSMHq+3kEjOM1raV4s0uyso4Vt7vCrjiMf41of8Jzpn/PC8/79j/4qtLxL9nPscu+ga8NK+wHQrbaG3+b5/z7sYz/AKzHT2xXVW6yW2j2kEo2yR26Iy5zghQCKrT+OdNxxBd/9+x/jWPd+NLBs4huv++B/jRzIPZT7CarLnPNc0bho5jtNLqHia0kJxHP+Kj/ABrCk1u3L52S/wDfI/xqHJFKlPsdVFPuGc151rN2b6x1ubOR9sCL7Bdo/pXQjxDbxwudk3Cn+Een1rjoJQ/hS/kIOWudx/Eqa1oSXtYeqMMRTkoq66r8yhHCpTJz+FTxRBTzuU9uaSK6hOAUcAdxU4u4QclH/EVldHUqcuxHdPKIlBldgD0ODVe53t95nbJqW5uUZFwrDnJyOtNlmQnofyrqqtfV6frL9CVTk5PQo+WVHAwfrSB5s/eJ/CrBlRVwoP4imtMAMBSBj0rkuinTl2K7PNgkuRjnoKYskwGfNP4jIqUyBlwQenpSAqFy4OBQ2hKnPsOChDbTLGoaR8EgcDB9KgvizXcnJI47+1WLaZTOA6ny3YDaP4fQim3uEvJRjnj+VO65Rezle1ix4eONSjDZxk117FfPXGelcVpUwhvUY569q6H7cpG/5sDivNxavNNHv5VeMJJ9n+RqsyK5/nULOgJJ61nfbl7lvypjXsfq/wCQrFQZs5HRRODoc+Cf9YP6VlNMoP6GprS5R9CuDlv9aO30rGku4gzD5uvpVU4+4/8AE/yRztvnfoX3mXHFRGcdc9aom7h9G/KkF1B3D/kK0UQbZclnXyX4HIPeo7eUfZkGMf8A66rvcxFWAD5IxT4HAt045/8Ar0coXdiy8w9/yphmx0/lTScjoM+tRsTnpimkiG2WbaYm7jGPX+VV7iVvOk/3jT7T/j7j/H+VV7g/v5P94/zoS9/5BJvk+YRSN5yem4fzp94x+0vz6fyqKL/XR4/vD+dOvP8Aj6f8P5V2L/d36r8mH/Ll+q/IryOw71u+DWJ1mYk/8u7fzWufkJArd8F5/tib/r3b+a1y1P4bOSZzZJppPag59abzW5mwPSmOcKTSkGmMCSq56mqSM5uyNHTI8IWx1rtNNX92n0FctZR7Y8D0rrtNH7pCPQV3P/dl/if5I5vsnWzjNnY/9c/6Ckj5wD0zT7kYsrL/AK5/0FRJ93JrkY0TrzIB7VYBFVlOZBUwoN620fT9WZlsf+Kou/8AriP/AGWtkfWsS2/5Gi7/AOuI/wDZa2c85rpxPxR/wr8jmiOznvRnIpvSjJzXOULuyPelU/vFx6imbsGnLjzF9M0nsC3RMh/4mMY/6aL/AEopE/5CMf8A10X+lFePmG8PQ6I7y9T531Bf34/3RTdO/wCPlv8AcP8AMVNfr+9B/wBkVDp//Hy3+4f5ivcj8fz/AMzp/wCY/wCf6Gn4J09r7XVwdqpjLeldprlmsOuJGgx8mTz7Gs74SWf2nWJQegYV6g+lWlx8RIYJFXy/s+SCM/wmumlH3fkzy0/efoeXSW0mSAjH6DNVpLabH+pk/wC+TXpGsaeuk6lLBJbP5LHMUoXhh6exriNX1bUba4zb22FBI2bCeK5ybmNdo6yAhGPHpUCSuTgDmr11q8+BGbORs85VTgn8qbDDf3XI0y5wehCU57saLjMw0iHc2PnP9am8R3D/ANlaUsIyTAe+M8LxSS6Drl7pkMFtZMkgfJErBcDnmti/8GancabpkUlxBE0UWH5Lc4Xpx7VKN6+69F+RwUMs6hnuxGgx8qKQT+NVJ/Mlb5JHwei16PZfDnzMbpJ5h3baEUfrmup0rwZpukkSCESz+rchatJs5m0cBoHge5uFS61SRoIcBhEPvMPf0rZ1Ro9kCIu2AOAi+wFdbqSSSKsCkh5HEage/X9M1zniG08y9MUSnbGyoMewrWgrYin/AIv0Zvh3dy9P8gltla8uLm4QLCn7z6+grHhuHutXmduPkyB7cVd1O5UP9gW4V0RvmfeDuNYV7qcWkvczrh5SgSJV5y3HP0ohCXvaPbs/8iIJ2ZmeNJIZr2JYzmW2gIbHbJ4H865d7meDGyQKpHT1/CpT5s9vLNLuaWWbLE98D/69QSblJIjLHoBtzWDhPmWj+5/5GkU/Zy+X6m1cyb/CFiZfmJuG69Or1mRTmVtsjjCnCoeFH4VqTRbvBlmjAeb5zHHpy1RaeIZrY+dYwtIhH3sgsKUKc7PR/c/8hTTuvRHT+FI1FyzKVZfKI+XqORXb/Dm2gm1KeLA3Q7ZkHUdNv/s1cXpNnpaXpeyE0NwYiHgD715I5B4xWt4Ia70Lw3repG5T7WGijCmQZ2EjIHr71Uac+d6Pp0ZzL+LLTov1Pf4NvlAqQR6g0skm0Vg+Hb+0t/D9tHJeQAgEjdKM4PPPPqTV2bUrExFvttuT2Hmr/jWvJPs/uf8AkVZjbu4yDXP30/B5o1HW7WIfLPDJ/uyCsS41S2kGRcRc9t4qHCfZ/c/8i1Fle9ferY6j+VYkgJbIq9Lewb8iaPj/AGhWRrt8tnYStbujOeAQ2doI61Hs59n9z/yKsxupT+TpF22eRGa4+0B/4Q+7zn/XL1/4DWldXpuPCkzs+ZGUKQepOfSs61D/APCH3YP/AD3Xgf8AAa1oQkqsLp7rozDEp8q9UNhMeA289fTrT12s5BY896rxgbQcgY7mpo5VRic7m9xwKjkn/K/uf+R0KI28ZDFwMndk7u1TuFfndj29Kgu2R41bCg7ux61KXQA/Lye4NdNSnP6vT0e8uj8vIFpJlKRirHnC471GWcjJPB6CrU21QWGG9P8A69QCR9xAAY471y+zn2f3P/IdiNxxubJX+FQeWP8AhULAuA8hA9FHYVbmjErGWM5LYymeU9h6iqjqRng8eoo9nPs/uf8AkJpjY8eagTIUsOtTXSLPK6ceag+T/aGORTI0JlQkEfMOlOuSyXTFQcjnOO+Kfs58uz+5/wCQrO5DpzgX0Z6DdXQeYNpPTHvWHjbqEbKMBiGPHQ4rVyDG33etefiqU+ZaP7n/AJHt5U7RkvJ/kSGYHim+aM9P1qLeuMcUxpEHSs1Rn/K/uf8AkaORuWkg/sO4x2lHf6VjSSDzD06mrFlqklupt0RCjtuORz0/+tUjeIrpGKCKEgcfcP8AjQqNaMXaF7vz7LyMXJc/yM/zfm60hce9Xx4ku8/6mH/vg/40v/CR3f8Azyg/75P+NLkxH/Pv8/8AIfMu5nq496UMM96v/wDCR3n/ADxg/wC+T/jTh4ivP+eUH/fJ/wAaOTEf8+/z/wAguu5nEgnHNNLAds1p/wDCQ3f/ADzg/wC+T/jSHxFef88oP++T/jT5MR/z7/P/ACBtFKzYfak49f5VXnJNxJx/Ef51sW2vXktyiNFAFOein/Gmya/epK6iO3wGIHyn/GpUa/N8H5/5DdnBepkx5E0eR/EKW9OLqTj0/lWpH4gvTKmUgxuGflP+NOuvEF2twwWOAj/dPp9a6lHEewa9n1X5PyHp7J+v6HOsd3AyK6DwZu/tiYf9O7fzWoT4hvAP9VD/AN8H/Gtvwlrt3Pq0qNFEAIGOQp9R71zVI1+R3h/X3HLJK25whBB6U09O1bp8R3v/ADxg/wC+D/jTT4kvv+eEH/fB/wAa25cR/wA+/wCvuIaj3ME9aWFd9yo9K2j4kv8A/nhB/wB+z/jVqHXb0SHMUGMD+A/41pGOI/59/wBfcY1VG1r/AIDbZMQk11WnL+6j9NorNh1u4MR+WDPptP8AjXRWOoTPChKx5IHQV0zddYdJwt7z6+S8jG0bb/gbd1xaWQ9Y/wCgqBT3q5d3Ti2szheU9PYVAt0+BwvPPSuVyq/y/j/wBpR7gpw4qwCM5qNbhyw4X8qmE7Y6ClzVf5fx/wCAbVVG0den+Zj2x/4qe7/65D/2WtkH8qzbe8kbxDcwkJtWMEHHP8NannN6CunETrXjeC2XXy9DBKHf8BtJnjg08SsSelHmtnoK5+ar/L+P/AHaHf8AAYeQaWP76/UU7zj6ChJm8xQQME0nOrb4fx/4AJQutfwJU/5CMf8A10X+lFKspW/jHGN6/wBKK87FwqT5bq2nc1vGMmfPl+P3g/3ag0//AI+X/wBw/wAxVq+XLD/dqrYD/SmH+wf5ivbj8fz/AMzp/wCY/wCf6HpnwPhD395IeisP5Cuztr5H+KwkdtqyRMqn+78pxVP4W+GH0nw685BN5dAuyjsMcD8qy9QdoPGynBVkT8Rwa7Kbs7PseTHWT9Gex+UjxhJgrg+oBBrOutC0u4z59oRno68VQ0TxDBcxGG5lVSB/EeG/+vXRROpAaNjtI71nKCZlscZoXhe0ntnlEpBEhUAgEdBXQRaOkOFfy2x6Ej+tR+HCy6dJt4/fHoPYVrbiDzQ4q4Ns56XTLOXUpkdXChQcBiPSrnk28SIscS4AwM81DMxfV5yR/COB+FSykhEI9KhHRX3XovyGkb2CHgHpiq8sDR++KeZcjBFXIv8ASYAWUh14Pv71RznMyFRr1uSPlijeQ/XBrKvVRftElwm55BuAzjFdNcaaV1Pz2X92Ewff2rltVk8/VJdxG0cfpWU5OLi1vf8AzOihtL0/VHKzWdvHIXkQKmC7HJ6VwOuXYuLx5IgUjP3F9BXVeKdSDym0iPAA8zH6CuI1JS+zGM5710Rr1bP3nt3HCTsyuLi4a3OGwA/JxUgaZnG1yB9BUEcnlo8bx/Kw4x2PrV6CASSqvmPjvsXP86xderzL3n95pFv2cvl+ptvAn/CKWk0jsGMrZb2+alsNNuL1EffHbwN/y2bnj0A7mtV9PM3hezjWFUjablpnGQMn3/zmrkls1uiLsVY1XqMAf/qqqdetZ3m/vZM5O69EW9HsbSC5b7ODxEVMjnLPyOT2H0GK7P4faTotz4fvptQjR0+0bT5rlRgZ9DXIaNxcNsZWUxn8OlZeSq7XZnI6jOQDihYiqpv3n06mELurL0X6npWva/4T0VNsdgbhwPlVJGx/OuD1Lx6zEfZdGtYEPA3O7Ef+PViS3LEup+UcZzzWRcvu3fMeu4n2qnia38z+9m1mupoXHizUWc4W3Xnsh/xqkfE+o558k/8AAP8A69ZrEM2M8fyqMEDOMmp+s1v5n97Dmfc24vFsytiaxt3H1Yf1rUg8UaTcoI7uxeEnurFhXHFc549qUIxc/LzS+sVv5397GpM7HWHsZNEkNiyM2QAFPI/CqdsJF8IXZIOfPXr/AMBrDhQ55OK3wp/4RS8wODMpGf8AgNaUa9V1YJye66nPiW+Veq/MonZgDJz2x0oCAOPmBU9fam42Kd5G30FRRsNrDOAOc1H1it/O/vZ0czHzOnl7Qo+961YjKOGJXoOxqlKq7Bh+M5PHNSq0YLFQ3THWumpiKv1eHvPeXX0EpPmZLL5ZQ4BVuvB61TMhAwDk9/8ACn5YnndjPBIqKQYbjv0Fc31it/O/vY3J9x25mHA5Hemlp0P+sGPRgDTHc8DawA9RimlGO3jb9eKX1it/O/vYczZYjmJljHlpncASCR/WlupWF0wVUA9eSelQwgCZfnBG4cCluwpumDdM5z+FP6xVtfmf3sLsgR3F0qly2WrUJ/cN9ax12m7TbnG4VqhsW7/WuLEYitde+/vZ62WvSV+z/IjZsDmmGTA60jfWonPWksTX/nf3smTsT28xN0gz6/yqGa4IncZ/iNJan/S0/H+VVpz/AKRJ/vH+dbKvW5b87+9nO5e8Ti4bHX9KeJ84yapZIpQ1L29b+d/ex87Lwm96eJuKzxIacJDS9vW/nf3saqF8S8etIZRVQS0eZS9vX/nf3srnNOxkzexj6/yqOeQ/aJcf3j/OotOkzfx+vP8AI1HcP/pMv++f51n7etz353t3Zpzfu16k8L7pkB/vD+dOvH2XTgdOP5VWgb9/HjP3h/On37f6ZJ+H8q61XrewfvvddX2ZV17FvzX5DDK3XPFdD4LcnWZuf+Xdv5rXMFq6PwUf+JzN/wBezfzWuWtXrOm05v72cspHOGR/WmmVv71ITTCa2WJrfzv72S2P818jLY59K10XIU+3NYafNMg98mugtFBXn1rWOIrW+N/ezmqSfMa1jbxsnK559a6S1ARAFGABwKw7BOnYZreg47dsVM6s56SbZF2zbvCfsdljr5f9BUSHPX0qS7x9lsv+ufX8BUMbc5zWbGiZT8wqdTxVdT834VMvOMUjattD0/Vmbb/8jPd4/wCeQ/8AZa2c+lYluf8Aiprv/rkP/Za2Sea6cT8Uf8K/I54iggUtNzR6D8q5yhQ2RjvSp/rV+opuaEP7xP8AeFD2BbosD/kIR/8AXRf6UU0EnUU/66L/AEorgxH2fQuXxM8JvCMj/dqPR4ll1IknhV6evIp96MsP92maYy2955hOOMHP1FenH49Tuv8A7f8AP9D6Q8KXaIYkBwykfKeKx9Xsra/+I/lTjCmHkr1HBrQ0W8jvrCGeCxaYFQUkQ8fmBWBqltq9x4nku7SN45VjAw4JPT6e9ejGjJO0tNO6/wAzz44aonrbXzX+Zoar4bl06bdbzebH1AIw1dBoOpH7Ekdz1XgOOR+PpXISXHiadVt5ZAJFHy5TBI/75qpDceIbS4LGVUJ+8DHwfw20lh5rS6+9B9Tqd196/wAz0Tw2QdLk5yPObn8BWo5GwmvMtFv9eitW8idQpc8Bfp/s1sLfeKJRhZEPt5Y/+IoeHk9mvvF9TqPW6+9f5m4P+QrN/uD+lSXjiKJSRntiuT3+J11CQ5G/aMjy/p/s0+8PibZFukRs84EfT/x2o+rS7r7zathZ3W2y6rt6nVWcHn4bBwa1wkNqmWwTjpXCQXXiqCEIpUD/AK5f/YUNdeKmOXYH/tn/APY1X1WXdfejH6nU7r71/mdRfzNJGxGF4OK8r1m7eGSYoMyscKPT3ra1DUfEdtEWmlQA8DKdf/Ha466GpTMzyTx5Y8/L/wDWrGrhZXjqt+/qb0sLUipbbd15eZy2oRFGJY5YnLH1NZFzEJUAZc4NdHfWcrH95PGKy5bJCpBuoxjHSr+ryinqtu6COFqJPb71/mY6QxqfugGui0awWRt7sAOwPeobPR1mk3/aFdV6gD/69bunwqtsVIBG4gcdelYewmpL/Nf5lxwtTkktOnVf5mvIj/2JaxsCuZCDx/vVVijZGwAdnQq/9KvyPt0aCM7iokOHJ+vFZu4gfNJhskkZqoYepZ7bvqv8xVMLVuttl1X+ZJHdSWc4eONMjgbu/wDnFFx4iuc7VSEdz8p/xqnJtwAJlBBLHJqpIY9rZnQHdnPtSlhZPVpfev8AMxeXuTvKK+9f5libxDeAjEUH4of8arP4ivVXmKDr/cPT86qPBHJkm6U57/5NQiBM4+1KW9ABUfU32X3r/MX9nL+Vfev8y23iS7VR+6t8kZ4Q/wCNIviS9JOYrfj/AGD/AI1UNkvQXUQOOhH/ANenC2hUY+1Rn8sfzo+pvsvvX+Yv7N/ur71/mWT4nve8Nv8A98H/ABqRfEl22cRW/v8AIf8AGqAtoFGftERye/8A+ulNvDwPtUY78Y/xpfU32X3r/Mf9mr+Vfev8zSXxJeY/1VuSOg2H/Gqmpa7d3tk1vIkQRiM7VIPHPrUJt4sAC6T1/wA809II8ZW4QnHHtWtHDypzU+Vaea/zD+zu0V96/wAyi06v/C349KaJBj8egq79mU5xdIB3/wA5pRaI3/Lyp+gFbeyh/J/5MjVYSr5fev8AMz5pUKAbSDmhZ0DDcGI9AKt3VsgUOZ064AP/AOun/ZVz/wAfcY9en+NdFSnD2EPc6y+0vIlYWrzNafev8ykLobictkn0oNxG3BViR3Aq75EK8tcREjjp/wDXoMMZU4uohkdgP8a5/Zw/k/8AJkP6pW8vvX+ZneeoPcgdjUJk3kkkmtD7HCAcXcfT8v1pq2cPH+lxnHbjn9afsofyf+TIX1St5fev8yrGwM0WARhhT72QfaXH0/pVlLRPPVjdoeRwf/1064s43uWb7Sg9sf8A16Xs4fyf+TIPq1W9tPvX+ZlKds+4duauidvschx/F/hTktIUuA32uPOOh/8A11c8qIwMBLH169v51y4ilC69zqvtI9LAYeslLVbPqu3qZolBQHBzio3kJ7V01lp0UllE3nRtgYJzjmntY2i/euIR9XFbKhS/59/+To43RxPdfev8zlbd3+2RcDGT/Kqk7yfaZen3z/OuqubazBjMd3AXDcBWBPSsSWzhM8hN7ECWPGOnP1pulC1uT/yZErD127N/iv8AMzd8vtSbpfUVofYoP+f6H8v/AK9H2GH/AJ/ovyH+NL2UP+ff/k6K+q1u6+9f5mful9RRul/vCtD7DD/z/RfkP8aPsMH/AD/RfkP8aPZQ/wCff/k6D6rW7r71/mZ+6X+8KTdL/frR+wwf8/0X5D/Gj7DB/wA/0X5D/Gj2UP8An3/5Og+q1u6+9f5kWltL/aMOX9f5Goroy/a5sOf9Y3861NPsolv42F7Gx54AHofeorixhNzKft8Yy54445+tZKlD2r9zp/Mu5o8NW9mlfr3Xb1M6B5RcREucBx/On37u97IyswBxj8hVyOzgSVH+3RnawOOP8arX7K97IysCpxyOe1by9jClaUOv83kTOlUhRak+vl29SniTu5rp/Aob+3JssT/ozfzWudx710vgYAa3N/17N/Na4a86Hs5Wg/v/AOActpdzldjHuaNh9alwKQgAGteeh/I//Av+AJqXcktfkkye1btrcooBIbGfSsS0B3iuksgdgGa056H8j/8AAv8AgGOvc0rTUYUXlZPwA/xrUj1m3A+7N/3yP8ajsuIwe5rTjbA4pc9D+R/+Bf8AAKSfcu3+rwR2OnsUlw0WRhR6D3qkmvWo/wCWc3/fI/xravGxaWP/AFy/oKrRt8ppKdDrB/f/AMAEn3Ka6/a/885+n90f41KviG0A/wBXP/3yP8avrwalXoaOeh/I/wDwL/gG1VStHXp/mZGm3KXWvXE6BgjRDG4c/wAIre6GmE5INLmorVFUldK2iX3GSVh+aQnNJ70p+lZABPFKh/eoO2RTc/LihD+9X6ik9hx3ROMf2lH6+Yv9KKZnGpxkf89F/pRXDiPs+hpL4meFXmPNH+7UC4zT7xv3w/3RUSNzXqxxdf2KjzO1jevFfXX6mhYave6Y2badlXumflP4V3GheK9DlkWTXBdWyyDb5kR3KG9+OnFebFqnl/5BkX++f612RzDEfale39djz3CLPoe18LeHtctFuNO1CS5TqGimVsfkMiq114OtrY/vGuX994B/lzXz3baje6dJ5lndSwP6xsRW5bfE7xZZDb/arzr/AHZxuFP6/Ue02iPZHrGk6Bp11A/mSTq4cjAYDjj2q8vhjT1b93Nc59Aw/wAK5Hwx8R9PjuFsNaUW8jHdHcqPlyeMH06V6ctyHt1nhMcsRGVkjIINOWKxEdVN2/ryCy6mPZ+EobjUnRvtIXaCSZAOOPatDUvBtrPHDH9onAjG0EOM/wAvakgvLh9QkfzTGpUZJ5OOKtXmpNGsSpkK4OXbrUrF13Z87NK8UmrdkZT+FNKto8yXVyAByzSDn9Kxrq10iPKwTXLn+8ZBj+VaFxI8jlncufU1kzWqysxThs/hV/XMR/O/6+Rz2Rm3GkQTuzNPM4/hG4cfpWdPpEPl+XukwDnr/wDWrWmhkhIBOM9CKEzMSZV5H61jWxdduPvvf/M3opWn6fqjjNS0yKJCwL59zXOPBvGE3ZZsD+tdrrhEpZV4UcVR0myiUmWQDAUhRnn603jMRf43/XyJjFDbLSLVbQMJmyRz6VHaWUUiBQ75y2R2AAFbMqKH3P8ALGi/jVXTQBbuSj8khcDk+tRLF1+ZPnfX+tjphFezl8v1LEmkW40mCUvIBvJO4jGOfb6Vj3FrBvIDSEeu4Vupq81vAsMaoF67XGTj86qy+ILlWb5YCOg+Q8/rUrG4tX95/f8A8AJRpu3+RgyWsW7hmZR1yRxVKRE3EKzfietbz+Jrv5lCW599hH9ahPia8H/LK3I9Np5P50njsX/M/v8A+AL2dLv+Bguo4wTSeX3y1bv/AAlF1n/UW5PfCn/Gr1hrOo3M8eyzjkiJ+fYhzj160fXsX/M/v/4A/Z0u/wCBy0dpLKsjqjBIxlmPatLTdCiv7a4uXufKghAG5v4mPQCuyutUfTftqXCxeQ3EY2/MMHqeaqQ69PdQNJa26Jawgnlc59+tL6/iv5n9/wDwBclLv+Byd3o5S8jtrUPNJ5YaTbztPXn04qi9usLbXV9w/vcV3lhf6xcwbzBarG5O3k8+xPTNWY5JZhiSGKNz0dV35P5in9fxX8z+/wD4AuSn3/A858uPb3B7CkMYjxhiSfTtXXaje6rYTgNajyyTtbyic/kazj4hvs/6u2+uw/rzR9exf8z+/wD4A+Sl3/Aw2AwcMc9xSccfM2K3P+Eou+AsNuwHqh/xpqeKLwsf3NsG/wBw/wCNH17F/wAz+/8A4AclPv8AgYrRlhkBjn14pwUB8Ek+yjAFa6eKb85zFb/Taf8AGmnxTej/AJZ23/fB/wAayqV69W3tHe3n/wAAaVNbP8DFlYbxz+NKCgQl8sSfujpW0fEmo4B8m1G48Daf8aQ+KLtflMVqzdyEPH61jeXYdod/wMR5Fb/lmAPbtTMKOcnHpW8fE90oyYrcnPGFP+NR/wDCUXZ6Jbg/7h/xp3l2BqHf8DJhY+bGScfOOv1pt85F5IN3cd/atdPE98ZQrxWxDEDhD3/GnT+J72CRo1it9q/3kPP607y5dibQvv8Agc/GwF2prQL/AOiOff1+lXI/FV606gw24B9UP+NWW1+6MZm8uAMvAG04/nXJWburo9TActpWfR/kc25Vs/KD9arttz91fyrpW8UXoGfKtv8Avg/41A3iq+HSG2/74P8AjVJvsc8uXuY1jj7bH8oHXoPao5yPtEv++f510Fv4mvZ51jaK3APXCH0+tMk8U3qSsgitsKSBlD/jWt5cuxlaN9zn8ik3Vv8A/CWX3/PK2/74P+NH/CV33/PG1/74P+NTeXYdo9/wMAmjNb3/AAll9/zytv8Avg/40Dxbfn/lja/98H/Gi8uwWj3/AAMHNFb/APwll/8A88bb/vg/40f8JZfZ/wBTa/8AfB/xpXl2C0e5maYf+JjD+P8AI1DdH/S5v+ujfzrobLxPez3kcbRW4DZyQh9PrUU/iq9S4kQRW2FYjlD6/WoTlz7dDVqPs1r1/Q53mit//hLL/wD542v/AHwf8aT/AISy/wD+eNr/AN8H/GtLy7GVo9zCwa6bwMP+J3P/ANezfzWq3/CWX/8Azxtv++D/AI10Xg7xLeXOryo8VuALdj8qH1X3rOs5ezegrR7nAc01umK3/wDhLb7/AJ423/fB/wAaT/hLb4nHk23/AHwf8a1TlfYmShbf8DMtF+b3rpbFehqO28TXrEfurb/vg/41u2mu3bAZih/BT/jV3l2MuWHf8CS14A9KvxtkUQ61O3VIv++T/jVtNWnxnbF+R/xpXl2HaHf8DQvT/oVj/wBc/wCgqBCM49av3moSraWJ2p80eTx7CoI9RlOPlT8qLy7AlDv+ABqlU0LeSMwcqmfpUy3smOiflReXY3qqFo69O3mxg6/zp+eKeLyT0X8qd9rfphfyovLsY2h3/Aj7UZ6VKt254wufpQbt/RfyovLsFod/wISaEP71P94VKbx/RfyphvZB2T8qG5dgSgtb/gPyP7UQf9NF/pRUEMhkv4nbGTIvT6iiuPEqzivIG7ts8KvT+/X/AHRUUbc/hU13DJJKGRcjaO4qOO3lU5KfqK6oyXs7XO6tRqvFuSi7X7DM1bkP/Esi/wB8/wBarfZ5f7n6irqRo1mkUpZSCTxWynHXU4vq1b+R/czMbpVaQVstZ25H+sk/z+FRHT7Y/wDLST9P8KV49194/q1b+R/cypqqlrxAOpQfzNerfB/xMIzN4dvX/dyfvLZmPRu6/jxXByadaTzCUzuCF24xVuws7exuUuYbqQSxncrY6VvCcVJ3as/Mh4Ws18D+5nvUsezUZQvZRx6dKjv1JhhZey/4VxNv46dwDJ5bS7QCSGrSuvGKNBb+UI3bZ84KsMHinzRXVFVcJXk01F7LozTf96mU69xVac+RCQgy546ViN4lYsGVIwc9s1G/iBnDfLGpPcA8fSn7SHdfeZfUcR/Ky7BM5nLzEFcHqOgqrN5m9wjlgf4u2KpnVIQwaRRIQMAHOPyqOfVUuBjcIweyA1jVqR9136nRRwVf3ly7r/IhuIVlO0N0OP8AePpTTbq1yxRcBVJ9qGnhYAeawxjpmgzxH/l4cc9h/wDWpfWKfcpZdiF9kkmYOY432qFG5t3f0rOtC6wM6yA5c9TV83UO52D8vjccHmqlnp32qzYNu27z0IB7VDrQutTeOBrqDXL2KkpLO2Byo556VF/Z11cYWKPAxkM3ANbkOlwwsWEG85z8zVNNBPNwWKp/dXFDrw7mTwGJ/lMFfDUpG6eYKBzlanj0jTkljDSb3Y4Ck9TWiNOG3afMb6vSrYxxtGyQImw5BAGan20O4v7PxH8oWnhqCzuPOeNxIjY2uOMfT0pLW7t7e+aPTl+yurZzyVcnqKtXj3d2CGuJRkAAgjIxTLW3aBQqgkg5Z+NzH3PWn7WHcP7PxP8AKYOstcXmqXXlqGMJ6N90e59q6VfDs1r4etrV5DIb2Tc0qDG0kdB7cVWS0jj2I9ukqq2/a/IZvVh3P1rbtdb1CGF0jCYMu8f7IxjA9qPa0+4v7PxP8pR1SCHS9HTS7dibyOECGIf8tWYfe/OuX03Vf7LnNpfbnnXPmOHG1PpxzW7qNrLfapHflnikjxsCNwPpWLdeEIJZzMZJxk5I3g0OrT7h/Z+J/lLn9swai4gMs5izzJvALe3Sqd/4ajG6S3eRUb5iXbdj+VaNhpdpZ9LKOU9mkJJX6c1bMDbCgUkN1DHP86XtYdx/UMT/ACnFvo7ouY3EmDyAefwqCW3niTmEqnXOOa7eezjnQK1tErgcOgANVf7KdcYkfA9SDR7aHcP7PxP8pwxDo3KsPrTCwD8fdHt1rvG0pZBiRQynsQKrHw1Z7TtjZW7MG5FP20O4/qGI/lOLdnZtzAqO1IM54XLV2H/CLxBtwlmzjHJB/pTH8LxsuDLKBnPBXml7WHcf1DEfynJFQW5c59hxTShz1Bx+Ga6v/hEoP+es35r/AIUHwlCcDzpvzWn7aHcX9n4j+U5WIHz48g/eHI6dakvSBcucZ6dfpXTJ4ThR1YTTfKc4yMUs/hSKeRnaWUE+hFP29O24LLsS38Jx6yYnRm4HrV4zKbOQg8A/4VunwfDkfvpeP92pE8JBx9nR5CH5zlf89q5qs4Sasd2Ewlakpcyto/yOQeUdjUDOCa7c+AG/vzf99JTf+FfN/wA9Jv8AvpKtSRyOjN9V95yNi4+2R9e/8qjncfaJf98/zrtofAbwyrIHlJHYstRSeBS8jsXmBJJ+8tU6kVGxKw829196OK3e1G6uy/4QP/ppN/30tH/CBD/npN/30tT7SJX1afdfejjM0ZA4rs/+ED/6aT/99LR/wgQ/vzf99LR7SIvq0+6+9HGbs0m73Fdp/wAIF/00m/76Wj/hAh/z0m/NKPaRD6tPuvvRy+lv/wATGH6n+RqC7b/S5+n+sb+ddpbeCTbXCShpiV7Fl9KpT+Ela4kY+fksT99fWlFqU212KlSkqaTa37o5ItSbzXVf8Ignrcf99rR/wiCes/8A32ta2MfZPuvvOVDmuo8CsTrc+f8An2b+a07/AIRBP+m//fa10HhDwyltq0r/AL7mBl5ZfUVnWX7ti9k11X3nmu406Plq7f8A4Vzef88J/wDv7H/jUkfw7vAf9RP/AN/Y/wDGtFKPcmVGT6r70c5ZIMrXR2gxHmtG28CXiEfuJf8Av6n+Nasfg++VMeQ//fxP8afPHuT7CXdfejJhP/16tRsBx+NaieFb4f8ALB/+/if41Kvhi+DZ+zt/38T/ABo5o9x+wfdfeiS+b/QtOz/zy/oKgjPpWrfaRcG1s4/LO5EwRuXjgVXj0m6AH7o/99L/AI0nUguoKg31X3jEOVqRTxUy6XeAYEP/AI8P8aeumXgP+p/8eH+NL2kO46ytyrsv8yIHpT+pqYabef8APH/x4f40/wDs67z/AKn/AMeH+NHtIdzArZwM+9KT1HrVj+zrvH+q/wDHh/jSHTbz/nl/48P8aPaQ7oLFbOCKjZqt/wBm3n/PH/x4f40w6bebv9T/AOPD/Gj2kO4EFq3+mQD/AKaL/OirEGnXaXUTNDhVcEncOmfrRXHiZJtWZSP/2Q==\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "# image viz\n", - "frcnn_visualizer = SingleImageViz(URL, id2obj=objids, id2attr=attrids)\n", - "# run frcnn\n", - "images, sizes, scales_yx = image_preprocess(URL)\n", - "output_dict = frcnn(\n", - " images,\n", - " sizes,\n", - " scales_yx=scales_yx,\n", - " padding=\"max_detections\",\n", - " max_detections=frcnn_cfg.max_detections,\n", - " return_tensors=\"pt\",\n", - ")\n", - "# add boxes and labels to the image\n", - "\n", - "frcnn_visualizer.draw_boxes(\n", - " output_dict.get(\"boxes\"),\n", - " output_dict.pop(\"obj_ids\"),\n", - " output_dict.pop(\"obj_probs\"),\n", - " output_dict.pop(\"attr_ids\"),\n", - " output_dict.pop(\"attr_probs\"),\n", - ")\n", - "showarray(frcnn_visualizer._get_buffer())" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Question: ['Where is the cat?']\n", - "prediction from LXMERT GQA: desk\n", - "prediction from LXMERT VQA: desk\n", - "Question: ['What is near the disk?']\n", - "prediction from LXMERT GQA: can\n", - "prediction from LXMERT VQA: cat\n", - "Question: ['What is the color of the table?']\n", - "prediction from LXMERT GQA: brown\n", - "prediction from LXMERT VQA: brown\n", - "Question: ['What is the color of the cat?']\n", - "prediction from LXMERT GQA: black\n", - "prediction from LXMERT VQA: black and white\n", - "Question: ['What is the shape of the monitor?']\n", - "prediction from LXMERT GQA: square\n", - "prediction from LXMERT VQA: rectangle\n" - ] - } - ], - "source": [ - "test_questions_for_url1 = [\n", - " \"Where is this scene?\",\n", - " \"what is the man riding?\",\n", - " \"What is the man wearing?\",\n", - " \"What is the color of the horse?\",\n", - "]\n", - "test_questions_for_url2 = [\n", - " \"Where is the cat?\",\n", - " \"What is near the disk?\",\n", - " \"What is the color of the table?\",\n", - " \"What is the color of the cat?\",\n", - " \"What is the shape of the monitor?\",\n", - "]\n", - "\n", - "# Very important that the boxes are normalized\n", - "normalized_boxes = output_dict.get(\"normalized_boxes\")\n", - "features = output_dict.get(\"roi_features\")\n", - "\n", - "for test_question in test_questions_for_url2:\n", - " # run lxmert\n", - " test_question = [test_question]\n", - "\n", - " inputs = lxmert_tokenizer(\n", - " test_question,\n", - " padding=\"max_length\",\n", - " max_length=20,\n", - " truncation=True,\n", - " return_token_type_ids=True,\n", - " return_attention_mask=True,\n", - " add_special_tokens=True,\n", - " return_tensors=\"pt\",\n", - " )\n", - "\n", - " # run lxmert(s)\n", - " output_gqa = lxmert_gqa(\n", - " input_ids=inputs.input_ids,\n", - " attention_mask=inputs.attention_mask,\n", - " visual_feats=features,\n", - " visual_pos=normalized_boxes,\n", - " token_type_ids=inputs.token_type_ids,\n", - " output_attentions=False,\n", - " )\n", - " output_vqa = lxmert_vqa(\n", - " input_ids=inputs.input_ids,\n", - " attention_mask=inputs.attention_mask,\n", - " visual_feats=features,\n", - " visual_pos=normalized_boxes,\n", - " token_type_ids=inputs.token_type_ids,\n", - " output_attentions=False,\n", - " )\n", - " # get prediction\n", - " pred_vqa = output_vqa[\"question_answering_score\"].argmax(-1)\n", - " pred_gqa = output_gqa[\"question_answering_score\"].argmax(-1)\n", - " print(\"Question:\", test_question)\n", - " print(\"prediction from LXMERT GQA:\", gqa_answers[pred_gqa])\n", - " print(\"prediction from LXMERT VQA:\", vqa_answers[pred_vqa])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.2" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} \ No newline at end of file diff --git a/examples/research_projects/lxmert/extracting_data.py b/examples/research_projects/lxmert/extracting_data.py deleted file mode 100644 index 6b1342c9b11..00000000000 --- a/examples/research_projects/lxmert/extracting_data.py +++ /dev/null @@ -1,149 +0,0 @@ -import getopt -import json -import os - -# import numpy as np -import sys -from collections import OrderedDict - -import datasets -import numpy as np -import torch -from modeling_frcnn import GeneralizedRCNN -from processing_image import Preprocess - -from utils import Config - - -""" -USAGE: -``python extracting_data.py -i -o .datasets `` -""" - - -TEST = False -CONFIG = Config.from_pretrained("unc-nlp/frcnn-vg-finetuned") -DEFAULT_SCHEMA = datasets.Features( - OrderedDict( - { - "attr_ids": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")), - "attr_probs": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")), - "boxes": datasets.Array2D((CONFIG.MAX_DETECTIONS, 4), dtype="float32"), - "img_id": datasets.Value("int32"), - "obj_ids": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")), - "obj_probs": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")), - "roi_features": datasets.Array2D((CONFIG.MAX_DETECTIONS, 2048), dtype="float32"), - "sizes": datasets.Sequence(length=2, feature=datasets.Value("float32")), - "preds_per_image": datasets.Value(dtype="int32"), - } - ) -) - - -class Extract: - def __init__(self, argv=sys.argv[1:]): - inputdir = None - outputfile = None - subset_list = None - batch_size = 1 - opts, args = getopt.getopt(argv, "i:o:b:s", ["inputdir=", "outfile=", "batch_size=", "subset_list="]) - for opt, arg in opts: - if opt in ("-i", "--inputdir"): - inputdir = arg - elif opt in ("-o", "--outfile"): - outputfile = arg - elif opt in ("-b", "--batch_size"): - batch_size = int(arg) - elif opt in ("-s", "--subset_list"): - subset_list = arg - - assert inputdir is not None # and os.path.isdir(inputdir), f"{inputdir}" - assert outputfile is not None and not os.path.isfile(outputfile), f"{outputfile}" - if subset_list is not None: - with open(os.path.realpath(subset_list)) as f: - self.subset_list = {self._vqa_file_split()[0] for x in tryload(f)} - else: - self.subset_list = None - - self.config = CONFIG - if torch.cuda.is_available(): - self.config.model.device = "cuda" - self.inputdir = os.path.realpath(inputdir) - self.outputfile = os.path.realpath(outputfile) - self.preprocess = Preprocess(self.config) - self.model = GeneralizedRCNN.from_pretrained("unc-nlp/frcnn-vg-finetuned", config=self.config) - self.batch = batch_size if batch_size != 0 else 1 - self.schema = DEFAULT_SCHEMA - - def _vqa_file_split(self, file): - img_id = int(file.split(".")[0].split("_")[-1]) - filepath = os.path.join(self.inputdir, file) - return (img_id, filepath) - - @property - def file_generator(self): - batch = [] - for i, file in enumerate(os.listdir(self.inputdir)): - if self.subset_list is not None and i not in self.subset_list: - continue - batch.append(self._vqa_file_split(file)) - if len(batch) == self.batch: - temp = batch - batch = [] - yield list(map(list, zip(*temp))) - - for i in range(1): - yield list(map(list, zip(*batch))) - - def __call__(self): - # make writer - if not TEST: - writer = datasets.ArrowWriter(features=self.schema, path=self.outputfile) - # do file generator - for i, (img_ids, filepaths) in enumerate(self.file_generator): - images, sizes, scales_yx = self.preprocess(filepaths) - output_dict = self.model( - images, - sizes, - scales_yx=scales_yx, - padding="max_detections", - max_detections=self.config.MAX_DETECTIONS, - pad_value=0, - return_tensors="np", - location="cpu", - ) - output_dict["boxes"] = output_dict.pop("normalized_boxes") - if not TEST: - output_dict["img_id"] = np.array(img_ids) - batch = self.schema.encode_batch(output_dict) - writer.write_batch(batch) - if TEST: - break - # finalizer the writer - if not TEST: - num_examples, num_bytes = writer.finalize() - print(f"Success! You wrote {num_examples} entry(s) and {num_bytes >> 20} mb") - - -def tryload(stream): - try: - data = json.load(stream) - try: - data = list(data.keys()) - except Exception: - data = [d["img_id"] for d in data] - except Exception: - try: - data = eval(stream.read()) - except Exception: - data = stream.read().split("\n") - return data - - -if __name__ == "__main__": - extract = Extract(sys.argv[1:]) - extract() - if not TEST: - dataset = datasets.Dataset.from_file(extract.outputfile) - # wala! - # print(np.array(dataset[0:2]["roi_features"]).shape) diff --git a/examples/research_projects/lxmert/modeling_frcnn.py b/examples/research_projects/lxmert/modeling_frcnn.py deleted file mode 100644 index c7c3bf376ce..00000000000 --- a/examples/research_projects/lxmert/modeling_frcnn.py +++ /dev/null @@ -1,1920 +0,0 @@ -""" -coding=utf-8 -Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal -Adapted From Facebook Inc, Detectron2 && Huggingface Co. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License.import copy -""" - -import itertools -import math -import os -from abc import ABCMeta, abstractmethod -from collections import OrderedDict, namedtuple -from typing import Dict, List, Tuple - -import numpy as np -import torch -from torch import nn -from torch.nn.modules.batchnorm import BatchNorm2d -from torchvision.ops import RoIPool -from torchvision.ops.boxes import batched_nms, nms - -from utils import WEIGHTS_NAME, Config, cached_path, hf_bucket_url, is_remote_url, load_checkpoint - - -# other: -def norm_box(boxes, raw_sizes): - if not isinstance(boxes, torch.Tensor): - normalized_boxes = boxes.copy() - else: - normalized_boxes = boxes.clone() - normalized_boxes[:, :, (0, 2)] /= raw_sizes[:, 1] - normalized_boxes[:, :, (1, 3)] /= raw_sizes[:, 0] - return normalized_boxes - - -def pad_list_tensors( - list_tensors, - preds_per_image, - max_detections=None, - return_tensors=None, - padding=None, - pad_value=0, - location=None, -): - """ - location will always be cpu for np tensors - """ - if location is None: - location = "cpu" - assert return_tensors in {"pt", "np", None} - assert padding in {"max_detections", "max_batch", None} - new = [] - if padding is None: - if return_tensors is None: - return list_tensors - elif return_tensors == "pt": - if not isinstance(list_tensors, torch.Tensor): - return torch.stack(list_tensors).to(location) - else: - return list_tensors.to(location) - else: - if not isinstance(list_tensors, list): - return np.array(list_tensors.to(location)) - else: - return list_tensors.to(location) - if padding == "max_detections": - assert max_detections is not None, "specify max number of detections per batch" - elif padding == "max_batch": - max_detections = max(preds_per_image) - for i in range(len(list_tensors)): - too_small = False - tensor_i = list_tensors.pop(0) - if tensor_i.ndim < 2: - too_small = True - tensor_i = tensor_i.unsqueeze(-1) - assert isinstance(tensor_i, torch.Tensor) - tensor_i = nn.functional.pad( - input=tensor_i, - pad=(0, 0, 0, max_detections - preds_per_image[i]), - mode="constant", - value=pad_value, - ) - if too_small: - tensor_i = tensor_i.squeeze(-1) - if return_tensors is None: - if location == "cpu": - tensor_i = tensor_i.cpu() - tensor_i = tensor_i.tolist() - if return_tensors == "np": - if location == "cpu": - tensor_i = tensor_i.cpu() - tensor_i = tensor_i.numpy() - else: - if location == "cpu": - tensor_i = tensor_i.cpu() - new.append(tensor_i) - if return_tensors == "np": - return np.stack(new, axis=0) - elif return_tensors == "pt" and not isinstance(new, torch.Tensor): - return torch.stack(new, dim=0) - else: - return list_tensors - - -def do_nms(boxes, scores, image_shape, score_thresh, nms_thresh, mind, maxd): - scores = scores[:, :-1] - num_bbox_reg_classes = boxes.shape[1] // 4 - # Convert to Boxes to use the `clip` function ... - boxes = boxes.reshape(-1, 4) - _clip_box(boxes, image_shape) - boxes = boxes.view(-1, num_bbox_reg_classes, 4) # R x C x 4 - - # Select max scores - max_scores, max_classes = scores.max(1) # R x C --> R - num_objs = boxes.size(0) - boxes = boxes.view(-1, 4) - idxs = torch.arange(num_objs).to(boxes.device) * num_bbox_reg_classes + max_classes - max_boxes = boxes[idxs] # Select max boxes according to the max scores. - - # Apply NMS - keep = nms(max_boxes, max_scores, nms_thresh) - keep = keep[:maxd] - if keep.shape[-1] >= mind and keep.shape[-1] <= maxd: - max_boxes, max_scores = max_boxes[keep], max_scores[keep] - classes = max_classes[keep] - return max_boxes, max_scores, classes, keep - else: - return None - - -# Helper Functions -def _clip_box(tensor, box_size: Tuple[int, int]): - assert torch.isfinite(tensor).all(), "Box tensor contains infinite or NaN!" - h, w = box_size - tensor[:, 0].clamp_(min=0, max=w) - tensor[:, 1].clamp_(min=0, max=h) - tensor[:, 2].clamp_(min=0, max=w) - tensor[:, 3].clamp_(min=0, max=h) - - -def _nonempty_boxes(box, threshold: float = 0.0) -> torch.Tensor: - widths = box[:, 2] - box[:, 0] - heights = box[:, 3] - box[:, 1] - keep = (widths > threshold) & (heights > threshold) - return keep - - -def get_norm(norm, out_channels): - if isinstance(norm, str): - if len(norm) == 0: - return None - norm = { - "BN": BatchNorm2d, - "GN": lambda channels: nn.GroupNorm(32, channels), - "nnSyncBN": nn.SyncBatchNorm, # keep for debugging - "": lambda x: x, - }[norm] - return norm(out_channels) - - -def _create_grid_offsets(size: List[int], stride: int, offset: float, device): - grid_height, grid_width = size - shifts_x = torch.arange( - offset * stride, - grid_width * stride, - step=stride, - dtype=torch.float32, - device=device, - ) - shifts_y = torch.arange( - offset * stride, - grid_height * stride, - step=stride, - dtype=torch.float32, - device=device, - ) - - shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) - shift_x = shift_x.reshape(-1) - shift_y = shift_y.reshape(-1) - return shift_x, shift_y - - -def build_backbone(cfg): - input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN)) - norm = cfg.RESNETS.NORM - stem = BasicStem( - in_channels=input_shape.channels, - out_channels=cfg.RESNETS.STEM_OUT_CHANNELS, - norm=norm, - caffe_maxpool=cfg.MODEL.MAX_POOL, - ) - freeze_at = cfg.BACKBONE.FREEZE_AT - - if freeze_at >= 1: - for p in stem.parameters(): - p.requires_grad = False - - out_features = cfg.RESNETS.OUT_FEATURES - depth = cfg.RESNETS.DEPTH - num_groups = cfg.RESNETS.NUM_GROUPS - width_per_group = cfg.RESNETS.WIDTH_PER_GROUP - bottleneck_channels = num_groups * width_per_group - in_channels = cfg.RESNETS.STEM_OUT_CHANNELS - out_channels = cfg.RESNETS.RES2_OUT_CHANNELS - stride_in_1x1 = cfg.RESNETS.STRIDE_IN_1X1 - res5_dilation = cfg.RESNETS.RES5_DILATION - assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation) - - num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth] - - stages = [] - out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features] - max_stage_idx = max(out_stage_idx) - for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)): - dilation = res5_dilation if stage_idx == 5 else 1 - first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2 - stage_kargs = { - "num_blocks": num_blocks_per_stage[idx], - "first_stride": first_stride, - "in_channels": in_channels, - "bottleneck_channels": bottleneck_channels, - "out_channels": out_channels, - "num_groups": num_groups, - "norm": norm, - "stride_in_1x1": stride_in_1x1, - "dilation": dilation, - } - - stage_kargs["block_class"] = BottleneckBlock - blocks = ResNet.make_stage(**stage_kargs) - in_channels = out_channels - out_channels *= 2 - bottleneck_channels *= 2 - - if freeze_at >= stage_idx: - for block in blocks: - block.freeze() - stages.append(blocks) - - return ResNet(stem, stages, out_features=out_features) - - -def find_top_rpn_proposals( - proposals, - pred_objectness_logits, - images, - image_sizes, - nms_thresh, - pre_nms_topk, - post_nms_topk, - min_box_side_len, - training, -): - """Args: - proposals (list[Tensor]): (L, N, Hi*Wi*A, 4). - pred_objectness_logits: tensors of length L. - nms_thresh (float): IoU threshold to use for NMS - pre_nms_topk (int): before nms - post_nms_topk (int): after nms - min_box_side_len (float): minimum proposal box side - training (bool): True if proposals are to be used in training, - Returns: - results (List[Dict]): stores post_nms_topk object proposals for image i. - """ - num_images = len(images) - device = proposals[0].device - - # 1. Select top-k anchor for every level and every image - topk_scores = [] # #lvl Tensor, each of shape N x topk - topk_proposals = [] - level_ids = [] # #lvl Tensor, each of shape (topk,) - batch_idx = torch.arange(num_images, device=device) - for level_id, proposals_i, logits_i in zip(itertools.count(), proposals, pred_objectness_logits): - Hi_Wi_A = logits_i.shape[1] - num_proposals_i = min(pre_nms_topk, Hi_Wi_A) - - # sort is faster than topk (https://github.com/pytorch/pytorch/issues/22812) - # topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1) - logits_i, idx = logits_i.sort(descending=True, dim=1) - topk_scores_i = logits_i[batch_idx, :num_proposals_i] - topk_idx = idx[batch_idx, :num_proposals_i] - - # each is N x topk - topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 4 - - topk_proposals.append(topk_proposals_i) - topk_scores.append(topk_scores_i) - level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device)) - - # 2. Concat all levels together - topk_scores = torch.cat(topk_scores, dim=1) - topk_proposals = torch.cat(topk_proposals, dim=1) - level_ids = torch.cat(level_ids, dim=0) - - # if I change to batched_nms, I wonder if this will make a difference - # 3. For each image, run a per-level NMS, and choose topk results. - results = [] - for n, image_size in enumerate(image_sizes): - boxes = topk_proposals[n] - scores_per_img = topk_scores[n] - # I will have to take a look at the boxes clip method - _clip_box(boxes, image_size) - # filter empty boxes - keep = _nonempty_boxes(boxes, threshold=min_box_side_len) - lvl = level_ids - if keep.sum().item() != len(boxes): - boxes, scores_per_img, lvl = ( - boxes[keep], - scores_per_img[keep], - level_ids[keep], - ) - - keep = batched_nms(boxes, scores_per_img, lvl, nms_thresh) - keep = keep[:post_nms_topk] - - res = (boxes[keep], scores_per_img[keep]) - results.append(res) - - # I wonder if it would be possible for me to pad all these things. - return results - - -def subsample_labels(labels, num_samples, positive_fraction, bg_label): - """ - Returns: - pos_idx, neg_idx (Tensor): - 1D vector of indices. The total length of both is `num_samples` or fewer. - """ - positive = torch.nonzero((labels != -1) & (labels != bg_label)).squeeze(1) - negative = torch.nonzero(labels == bg_label).squeeze(1) - - num_pos = int(num_samples * positive_fraction) - # protect against not enough positive examples - num_pos = min(positive.numel(), num_pos) - num_neg = num_samples - num_pos - # protect against not enough negative examples - num_neg = min(negative.numel(), num_neg) - - # randomly select positive and negative examples - perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos] - perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg] - - pos_idx = positive[perm1] - neg_idx = negative[perm2] - return pos_idx, neg_idx - - -def add_ground_truth_to_proposals(gt_boxes, proposals): - raise NotImplementedError() - - -def add_ground_truth_to_proposals_single_image(gt_boxes, proposals): - raise NotImplementedError() - - -def _fmt_box_list(box_tensor, batch_index: int): - repeated_index = torch.full( - (len(box_tensor), 1), - batch_index, - dtype=box_tensor.dtype, - device=box_tensor.device, - ) - return torch.cat((repeated_index, box_tensor), dim=1) - - -def convert_boxes_to_pooler_format(box_lists: List[torch.Tensor]): - pooler_fmt_boxes = torch.cat( - [_fmt_box_list(box_list, i) for i, box_list in enumerate(box_lists)], - dim=0, - ) - return pooler_fmt_boxes - - -def assign_boxes_to_levels( - box_lists: List[torch.Tensor], - min_level: int, - max_level: int, - canonical_box_size: int, - canonical_level: int, -): - box_sizes = torch.sqrt(torch.cat([boxes.area() for boxes in box_lists])) - # Eqn.(1) in FPN paper - level_assignments = torch.floor(canonical_level + torch.log2(box_sizes / canonical_box_size + 1e-8)) - # clamp level to (min, max), in case the box size is too large or too small - # for the available feature maps - level_assignments = torch.clamp(level_assignments, min=min_level, max=max_level) - return level_assignments.to(torch.int64) - min_level - - -# Helper Classes -class _NewEmptyTensorOp(torch.autograd.Function): - @staticmethod - def forward(ctx, x, new_shape): - ctx.shape = x.shape - return x.new_empty(new_shape) - - @staticmethod - def backward(ctx, grad): - shape = ctx.shape - return _NewEmptyTensorOp.apply(grad, shape), None - - -class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])): - def __new__(cls, *, channels=None, height=None, width=None, stride=None): - return super().__new__(cls, channels, height, width, stride) - - -class Box2BoxTransform: - """ - This R-CNN transformation scales the box's width and height - by exp(dw), exp(dh) and shifts a box's center by the offset - (dx * width, dy * height). - """ - - def __init__(self, weights: Tuple[float, float, float, float], scale_clamp: float = None): - """ - Args: - weights (4-element tuple): Scaling factors that are applied to the - (dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set - such that the deltas have unit variance; now they are treated as - hyperparameters of the system. - scale_clamp (float): When predicting deltas, the predicted box scaling - factors (dw and dh) are clamped such that they are <= scale_clamp. - """ - self.weights = weights - if scale_clamp is not None: - self.scale_clamp = scale_clamp - else: - """ - Value for clamping large dw and dh predictions. - The heuristic is that we clamp such that dw and dh are no larger - than what would transform a 16px box into a 1000px box - (based on a small anchor, 16px, and a typical image size, 1000px). - """ - self.scale_clamp = math.log(1000.0 / 16) - - def get_deltas(self, src_boxes, target_boxes): - """ - Get box regression transformation deltas (dx, dy, dw, dh) that can be used - to transform the `src_boxes` into the `target_boxes`. That is, the relation - ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless - any delta is too large and is clamped). - Args: - src_boxes (Tensor): source boxes, e.g., object proposals - target_boxes (Tensor): target of the transformation, e.g., ground-truth - boxes. - """ - assert isinstance(src_boxes, torch.Tensor), type(src_boxes) - assert isinstance(target_boxes, torch.Tensor), type(target_boxes) - - src_widths = src_boxes[:, 2] - src_boxes[:, 0] - src_heights = src_boxes[:, 3] - src_boxes[:, 1] - src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths - src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights - - target_widths = target_boxes[:, 2] - target_boxes[:, 0] - target_heights = target_boxes[:, 3] - target_boxes[:, 1] - target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths - target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights - - wx, wy, ww, wh = self.weights - dx = wx * (target_ctr_x - src_ctr_x) / src_widths - dy = wy * (target_ctr_y - src_ctr_y) / src_heights - dw = ww * torch.log(target_widths / src_widths) - dh = wh * torch.log(target_heights / src_heights) - - deltas = torch.stack((dx, dy, dw, dh), dim=1) - assert (src_widths > 0).all().item(), "Input boxes to Box2BoxTransform are not valid!" - return deltas - - def apply_deltas(self, deltas, boxes): - """ - Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`. - Args: - deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1. - deltas[i] represents k potentially different class-specific - box transformations for the single box boxes[i]. - boxes (Tensor): boxes to transform, of shape (N, 4) - """ - boxes = boxes.to(deltas.dtype) - - widths = boxes[:, 2] - boxes[:, 0] - heights = boxes[:, 3] - boxes[:, 1] - ctr_x = boxes[:, 0] + 0.5 * widths - ctr_y = boxes[:, 1] + 0.5 * heights - - wx, wy, ww, wh = self.weights - dx = deltas[:, 0::4] / wx - dy = deltas[:, 1::4] / wy - dw = deltas[:, 2::4] / ww - dh = deltas[:, 3::4] / wh - - # Prevent sending too large values into torch.exp() - dw = torch.clamp(dw, max=self.scale_clamp) - dh = torch.clamp(dh, max=self.scale_clamp) - - pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] - pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] - pred_w = torch.exp(dw) * widths[:, None] - pred_h = torch.exp(dh) * heights[:, None] - - pred_boxes = torch.zeros_like(deltas) - pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # x1 - pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # y1 - pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w # x2 - pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h # y2 - return pred_boxes - - -class Matcher: - """ - This class assigns to each predicted "element" (e.g., a box) a ground-truth - element. Each predicted element will have exactly zero or one matches; each - ground-truth element may be matched to zero or more predicted elements. - The matching is determined by the MxN match_quality_matrix, that characterizes - how well each (ground-truth, prediction)-pair match each other. For example, - if the elements are boxes, this matrix may contain box intersection-over-union - overlap values. - The matcher returns (a) a vector of length N containing the index of the - ground-truth element m in [0, M) that matches to prediction n in [0, N). - (b) a vector of length N containing the labels for each prediction. - """ - - def __init__( - self, - thresholds: List[float], - labels: List[int], - allow_low_quality_matches: bool = False, - ): - """ - Args: - thresholds (list): a list of thresholds used to stratify predictions - into levels. - labels (list): a list of values to label predictions belonging at - each level. A label can be one of {-1, 0, 1} signifying - {ignore, negative class, positive class}, respectively. - allow_low_quality_matches (bool): if True, produce additional matches or predictions with maximum match quality lower than high_threshold. - For example, thresholds = [0.3, 0.5] labels = [0, -1, 1] All predictions with iou < 0.3 will be marked with 0 and - thus will be considered as false positives while training. All predictions with 0.3 <= iou < 0.5 will be marked with -1 and - thus will be ignored. All predictions with 0.5 <= iou will be marked with 1 and thus will be considered as true positives. - """ - thresholds = thresholds[:] - assert thresholds[0] > 0 - thresholds.insert(0, -float("inf")) - thresholds.append(float("inf")) - assert all(low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])) - assert all(label_i in [-1, 0, 1] for label_i in labels) - assert len(labels) == len(thresholds) - 1 - self.thresholds = thresholds - self.labels = labels - self.allow_low_quality_matches = allow_low_quality_matches - - def __call__(self, match_quality_matrix): - """ - Args: - match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted - elements. All elements must be >= 0 (due to the us of `torch.nonzero` for selecting indices in :meth:`set_low_quality_matches_`). - Returns: - matches (Tensor[int64]): a vector of length N, where matches[i] is a matched ground-truth index in [0, M) - match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates true or false positive or ignored - """ - assert match_quality_matrix.dim() == 2 - if match_quality_matrix.numel() == 0: - default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64) - # When no gt boxes exist, we define IOU = 0 and therefore set labels - # to `self.labels[0]`, which usually defaults to background class 0 - # To choose to ignore instead, - # can make labels=[-1,0,-1,1] + set appropriate thresholds - default_match_labels = match_quality_matrix.new_full( - (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8 - ) - return default_matches, default_match_labels - - assert torch.all(match_quality_matrix >= 0) - - # match_quality_matrix is M (gt) x N (predicted) - # Max over gt elements (dim 0) to find best gt candidate for each prediction - matched_vals, matches = match_quality_matrix.max(dim=0) - - match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) - - for l, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): - low_high = (matched_vals >= low) & (matched_vals < high) - match_labels[low_high] = l - - if self.allow_low_quality_matches: - self.set_low_quality_matches_(match_labels, match_quality_matrix) - - return matches, match_labels - - def set_low_quality_matches_(self, match_labels, match_quality_matrix): - """ - Produce additional matches for predictions that have only low-quality matches. - Specifically, for each ground-truth G find the set of predictions that have - maximum overlap with it (including ties); for each prediction in that set, if - it is unmatched, then match it to the ground-truth G. - This function implements the RPN assignment case (i) - in Sec. 3.1.2 of Faster R-CNN. - """ - # For each gt, find the prediction with which it has highest quality - highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) - # Find the highest quality match available, even if it is low, including ties. - # Note that the matches qualities must be positive due to the use of - # `torch.nonzero`. - of_quality_inds = match_quality_matrix == highest_quality_foreach_gt[:, None] - if of_quality_inds.dim() == 0: - (_, pred_inds_with_highest_quality) = of_quality_inds.unsqueeze(0).nonzero().unbind(1) - else: - (_, pred_inds_with_highest_quality) = of_quality_inds.nonzero().unbind(1) - match_labels[pred_inds_with_highest_quality] = 1 - - -class RPNOutputs: - def __init__( - self, - box2box_transform, - anchor_matcher, - batch_size_per_image, - positive_fraction, - images, - pred_objectness_logits, - pred_anchor_deltas, - anchors, - boundary_threshold=0, - gt_boxes=None, - smooth_l1_beta=0.0, - ): - """ - Args: - box2box_transform (Box2BoxTransform): :class:`Box2BoxTransform` instance for anchor-proposal transformations. - anchor_matcher (Matcher): :class:`Matcher` instance for matching anchors to ground-truth boxes; used to determine training labels. - batch_size_per_image (int): number of proposals to sample when training - positive_fraction (float): target fraction of sampled proposals that should be positive - images (ImageList): :class:`ImageList` instance representing N input images - pred_objectness_logits (list[Tensor]): A list of L elements. Element i is a tensor of shape (N, A, Hi, W) - pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape (N, A*4, Hi, Wi) - anchors (list[torch.Tensor]): nested list of boxes. anchors[i][j] at (n, l) stores anchor array for feature map l - boundary_threshold (int): if >= 0, then anchors that extend beyond the image boundary by more than boundary_thresh are not used in training. - gt_boxes (list[Boxes], optional): A list of N elements. - smooth_l1_beta (float): The transition point between L1 and L2 lossn. When set to 0, the loss becomes L1. When +inf, it is ignored - """ - self.box2box_transform = box2box_transform - self.anchor_matcher = anchor_matcher - self.batch_size_per_image = batch_size_per_image - self.positive_fraction = positive_fraction - self.pred_objectness_logits = pred_objectness_logits - self.pred_anchor_deltas = pred_anchor_deltas - - self.anchors = anchors - self.gt_boxes = gt_boxes - self.num_feature_maps = len(pred_objectness_logits) - self.num_images = len(images) - self.boundary_threshold = boundary_threshold - self.smooth_l1_beta = smooth_l1_beta - - def _get_ground_truth(self): - raise NotImplementedError() - - def predict_proposals(self): - # pred_anchor_deltas: (L, N, ? Hi, Wi) - # anchors:(N, L, -1, B) - # here we loop over specific feature map, NOT images - proposals = [] - anchors = self.anchors.transpose(0, 1) - for anchors_i, pred_anchor_deltas_i in zip(anchors, self.pred_anchor_deltas): - B = anchors_i.size(-1) - N, _, Hi, Wi = pred_anchor_deltas_i.shape - anchors_i = anchors_i.flatten(start_dim=0, end_dim=1) - pred_anchor_deltas_i = pred_anchor_deltas_i.view(N, -1, B, Hi, Wi).permute(0, 3, 4, 1, 2).reshape(-1, B) - proposals_i = self.box2box_transform.apply_deltas(pred_anchor_deltas_i, anchors_i) - # Append feature map proposals with shape (N, Hi*Wi*A, B) - proposals.append(proposals_i.view(N, -1, B)) - proposals = torch.stack(proposals) - return proposals - - def predict_objectness_logits(self): - """ - Returns: - pred_objectness_logits (list[Tensor]) -> (N, Hi*Wi*A). - """ - pred_objectness_logits = [ - # Reshape: (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A) - score.permute(0, 2, 3, 1).reshape(self.num_images, -1) - for score in self.pred_objectness_logits - ] - return pred_objectness_logits - - -# Main Classes -class Conv2d(nn.Conv2d): - def __init__(self, *args, **kwargs): - norm = kwargs.pop("norm", None) - activation = kwargs.pop("activation", None) - super().__init__(*args, **kwargs) - - self.norm = norm - self.activation = activation - - def forward(self, x): - if x.numel() == 0 and self.training: - assert not isinstance(self.norm, nn.SyncBatchNorm) - if x.numel() == 0: - assert not isinstance(self.norm, nn.GroupNorm) - output_shape = [ - (i + 2 * p - (di * (k - 1) + 1)) // s + 1 - for i, p, di, k, s in zip( - x.shape[-2:], - self.padding, - self.dilation, - self.kernel_size, - self.stride, - ) - ] - output_shape = [x.shape[0], self.weight.shape[0]] + output_shape - empty = _NewEmptyTensorOp.apply(x, output_shape) - if self.training: - _dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 - return empty + _dummy - else: - return empty - - x = super().forward(x) - if self.norm is not None: - x = self.norm(x) - if self.activation is not None: - x = self.activation(x) - return x - - -class LastLevelMaxPool(nn.Module): - """ - This module is used in the original FPN to generate a downsampled P6 feature from P5. - """ - - def __init__(self): - super().__init__() - self.num_levels = 1 - self.in_feature = "p5" - - def forward(self, x): - return [nn.functional.max_pool2d(x, kernel_size=1, stride=2, padding=0)] - - -class LastLevelP6P7(nn.Module): - """ - This module is used in RetinaNet to generate extra layers, P6 and P7 from C5 feature. - """ - - def __init__(self, in_channels, out_channels): - super().__init__() - self.num_levels = 2 - self.in_feature = "res5" - self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) - self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) - - def forward(self, c5): - p6 = self.p6(c5) - p7 = self.p7(nn.functional.relu(p6)) - return [p6, p7] - - -class BasicStem(nn.Module): - def __init__(self, in_channels=3, out_channels=64, norm="BN", caffe_maxpool=False): - super().__init__() - self.conv1 = Conv2d( - in_channels, - out_channels, - kernel_size=7, - stride=2, - padding=3, - bias=False, - norm=get_norm(norm, out_channels), - ) - self.caffe_maxpool = caffe_maxpool - # use pad 1 instead of pad zero - - def forward(self, x): - x = self.conv1(x) - x = nn.functional.relu_(x) - if self.caffe_maxpool: - x = nn.functional.max_pool2d(x, kernel_size=3, stride=2, padding=0, ceil_mode=True) - else: - x = nn.functional.max_pool2d(x, kernel_size=3, stride=2, padding=1) - return x - - @property - def out_channels(self): - return self.conv1.out_channels - - @property - def stride(self): - return 4 # = stride 2 conv -> stride 2 max pool - - -class ResNetBlockBase(nn.Module): - def __init__(self, in_channels, out_channels, stride): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.stride = stride - - def freeze(self): - for p in self.parameters(): - p.requires_grad = False - return self - - -class BottleneckBlock(ResNetBlockBase): - def __init__( - self, - in_channels, - out_channels, - bottleneck_channels, - stride=1, - num_groups=1, - norm="BN", - stride_in_1x1=False, - dilation=1, - ): - super().__init__(in_channels, out_channels, stride) - - if in_channels != out_channels: - self.shortcut = Conv2d( - in_channels, - out_channels, - kernel_size=1, - stride=stride, - bias=False, - norm=get_norm(norm, out_channels), - ) - else: - self.shortcut = None - - # The original MSRA ResNet models have stride in the first 1x1 conv - # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have - # stride in the 3x3 conv - stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) - - self.conv1 = Conv2d( - in_channels, - bottleneck_channels, - kernel_size=1, - stride=stride_1x1, - bias=False, - norm=get_norm(norm, bottleneck_channels), - ) - - self.conv2 = Conv2d( - bottleneck_channels, - bottleneck_channels, - kernel_size=3, - stride=stride_3x3, - padding=1 * dilation, - bias=False, - groups=num_groups, - dilation=dilation, - norm=get_norm(norm, bottleneck_channels), - ) - - self.conv3 = Conv2d( - bottleneck_channels, - out_channels, - kernel_size=1, - bias=False, - norm=get_norm(norm, out_channels), - ) - - def forward(self, x): - out = self.conv1(x) - out = nn.functional.relu_(out) - - out = self.conv2(out) - out = nn.functional.relu_(out) - - out = self.conv3(out) - - if self.shortcut is not None: - shortcut = self.shortcut(x) - else: - shortcut = x - - out += shortcut - out = nn.functional.relu_(out) - return out - - -class Backbone(nn.Module, metaclass=ABCMeta): - def __init__(self): - super().__init__() - - @abstractmethod - def forward(self): - pass - - @property - def size_divisibility(self): - """ - Some backbones require the input height and width to be divisible by a specific integer. This is - typically true for encoder / decoder type networks with lateral connection (e.g., FPN) for which feature maps need to match - dimension in the "bottom up" and "top down" paths. Set to 0 if no specific input size divisibility is required. - """ - return 0 - - def output_shape(self): - return { - name: ShapeSpec( - channels=self._out_feature_channels[name], - stride=self._out_feature_strides[name], - ) - for name in self._out_features - } - - @property - def out_features(self): - """deprecated""" - return self._out_features - - @property - def out_feature_strides(self): - """deprecated""" - return {f: self._out_feature_strides[f] for f in self._out_features} - - @property - def out_feature_channels(self): - """deprecated""" - return {f: self._out_feature_channels[f] for f in self._out_features} - - -class ResNet(Backbone): - def __init__(self, stem, stages, num_classes=None, out_features=None): - """ - Args: - stem (nn.Module): a stem module - stages (list[list[ResNetBlock]]): several (typically 4) stages, each contains multiple :class:`ResNetBlockBase`. - num_classes (None or int): if None, will not perform classification. - out_features (list[str]): name of the layers whose outputs should be returned in forward. Can be anything in: - "stem", "linear", or "res2" ... If None, will return the output of the last layer. - """ - super(ResNet, self).__init__() - self.stem = stem - self.num_classes = num_classes - - current_stride = self.stem.stride - self._out_feature_strides = {"stem": current_stride} - self._out_feature_channels = {"stem": self.stem.out_channels} - - self.stages_and_names = [] - for i, blocks in enumerate(stages): - for block in blocks: - assert isinstance(block, ResNetBlockBase), block - curr_channels = block.out_channels - stage = nn.Sequential(*blocks) - name = "res" + str(i + 2) - self.add_module(name, stage) - self.stages_and_names.append((stage, name)) - self._out_feature_strides[name] = current_stride = int( - current_stride * np.prod([k.stride for k in blocks]) - ) - self._out_feature_channels[name] = blocks[-1].out_channels - - if num_classes is not None: - self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) - self.linear = nn.Linear(curr_channels, num_classes) - - # Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour": - # "The 1000-way fully-connected layer is initialized by - # drawing weights from a zero-mean Gaussian with std of 0.01." - nn.init.normal_(self.linear.weight, stddev=0.01) - name = "linear" - - if out_features is None: - out_features = [name] - self._out_features = out_features - assert len(self._out_features) - children = [x[0] for x in self.named_children()] - for out_feature in self._out_features: - assert out_feature in children, "Available children: {}".format(", ".join(children)) - - def forward(self, x): - outputs = {} - x = self.stem(x) - if "stem" in self._out_features: - outputs["stem"] = x - for stage, name in self.stages_and_names: - x = stage(x) - if name in self._out_features: - outputs[name] = x - if self.num_classes is not None: - x = self.avgpool(x) - x = self.linear(x) - if "linear" in self._out_features: - outputs["linear"] = x - return outputs - - def output_shape(self): - return { - name: ShapeSpec( - channels=self._out_feature_channels[name], - stride=self._out_feature_strides[name], - ) - for name in self._out_features - } - - @staticmethod - def make_stage( - block_class, - num_blocks, - first_stride=None, - *, - in_channels, - out_channels, - **kwargs, - ): - """ - Usually, layers that produce the same feature map spatial size - are defined as one "stage". - Under such definition, stride_per_block[1:] should all be 1. - """ - if first_stride is not None: - assert "stride" not in kwargs and "stride_per_block" not in kwargs - kwargs["stride_per_block"] = [first_stride] + [1] * (num_blocks - 1) - blocks = [] - for i in range(num_blocks): - curr_kwargs = {} - for k, v in kwargs.items(): - if k.endswith("_per_block"): - assert ( - len(v) == num_blocks - ), f"Argument '{k}' of make_stage should have the same length as num_blocks={num_blocks}." - newk = k[: -len("_per_block")] - assert newk not in kwargs, f"Cannot call make_stage with both {k} and {newk}!" - curr_kwargs[newk] = v[i] - else: - curr_kwargs[k] = v - - blocks.append(block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs)) - in_channels = out_channels - - return blocks - - -class ROIPooler(nn.Module): - """ - Region of interest feature map pooler that supports pooling from one or more - feature maps. - """ - - def __init__( - self, - output_size, - scales, - sampling_ratio, - canonical_box_size=224, - canonical_level=4, - ): - super().__init__() - # assumption that stride is a power of 2. - min_level = -math.log2(scales[0]) - max_level = -math.log2(scales[-1]) - - # a bunch of testing - assert math.isclose(min_level, int(min_level)) and math.isclose(max_level, int(max_level)) - assert len(scales) == max_level - min_level + 1, "not pyramid" - assert 0 < min_level and min_level <= max_level - if isinstance(output_size, int): - output_size = (output_size, output_size) - assert len(output_size) == 2 and isinstance(output_size[0], int) and isinstance(output_size[1], int) - if len(scales) > 1: - assert min_level <= canonical_level and canonical_level <= max_level - assert canonical_box_size > 0 - - self.output_size = output_size - self.min_level = int(min_level) - self.max_level = int(max_level) - self.level_poolers = nn.ModuleList(RoIPool(output_size, spatial_scale=scale) for scale in scales) - self.canonical_level = canonical_level - self.canonical_box_size = canonical_box_size - - def forward(self, feature_maps, boxes): - """ - Args: - feature_maps: List[torch.Tensor(N,C,W,H)] - box_lists: list[torch.Tensor]) - Returns: - A tensor of shape(N*B, Channels, output_size, output_size) - """ - x = list(feature_maps.values()) - num_level_assignments = len(self.level_poolers) - assert len(x) == num_level_assignments and len(boxes) == x[0].size(0) - - pooler_fmt_boxes = convert_boxes_to_pooler_format(boxes) - - if num_level_assignments == 1: - return self.level_poolers[0](x[0], pooler_fmt_boxes) - - level_assignments = assign_boxes_to_levels( - boxes, - self.min_level, - self.max_level, - self.canonical_box_size, - self.canonical_level, - ) - - num_boxes = len(pooler_fmt_boxes) - num_channels = x[0].shape[1] - output_size = self.output_size[0] - - dtype, device = x[0].dtype, x[0].device - output = torch.zeros( - (num_boxes, num_channels, output_size, output_size), - dtype=dtype, - device=device, - ) - - for level, (x_level, pooler) in enumerate(zip(x, self.level_poolers)): - inds = torch.nonzero(level_assignments == level).squeeze(1) - pooler_fmt_boxes_level = pooler_fmt_boxes[inds] - output[inds] = pooler(x_level, pooler_fmt_boxes_level) - - return output - - -class ROIOutputs: - def __init__(self, cfg, training=False): - self.smooth_l1_beta = cfg.ROI_BOX_HEAD.SMOOTH_L1_BETA - self.box2box_transform = Box2BoxTransform(weights=cfg.ROI_BOX_HEAD.BBOX_REG_WEIGHTS) - self.training = training - self.score_thresh = cfg.ROI_HEADS.SCORE_THRESH_TEST - self.min_detections = cfg.MIN_DETECTIONS - self.max_detections = cfg.MAX_DETECTIONS - - nms_thresh = cfg.ROI_HEADS.NMS_THRESH_TEST - if not isinstance(nms_thresh, list): - nms_thresh = [nms_thresh] - self.nms_thresh = nms_thresh - - def _predict_boxes(self, proposals, box_deltas, preds_per_image): - num_pred = box_deltas.size(0) - B = proposals[0].size(-1) - K = box_deltas.size(-1) // B - box_deltas = box_deltas.view(num_pred * K, B) - proposals = torch.cat(proposals, dim=0).unsqueeze(-2).expand(num_pred, K, B) - proposals = proposals.reshape(-1, B) - boxes = self.box2box_transform.apply_deltas(box_deltas, proposals) - return boxes.view(num_pred, K * B).split(preds_per_image, dim=0) - - def _predict_objs(self, obj_logits, preds_per_image): - probs = nn.functional.softmax(obj_logits, dim=-1) - probs = probs.split(preds_per_image, dim=0) - return probs - - def _predict_attrs(self, attr_logits, preds_per_image): - attr_logits = attr_logits[..., :-1].softmax(-1) - attr_probs, attrs = attr_logits.max(-1) - return attr_probs.split(preds_per_image, dim=0), attrs.split(preds_per_image, dim=0) - - @torch.no_grad() - def inference( - self, - obj_logits, - attr_logits, - box_deltas, - pred_boxes, - features, - sizes, - scales=None, - ): - # only the pred boxes is the - preds_per_image = [p.size(0) for p in pred_boxes] - boxes_all = self._predict_boxes(pred_boxes, box_deltas, preds_per_image) - obj_scores_all = self._predict_objs(obj_logits, preds_per_image) # list of length N - attr_probs_all, attrs_all = self._predict_attrs(attr_logits, preds_per_image) - features = features.split(preds_per_image, dim=0) - - # fun for each image too, also I can experiment and do multiple images - final_results = [] - zipped = zip(boxes_all, obj_scores_all, attr_probs_all, attrs_all, sizes) - for i, (boxes, obj_scores, attr_probs, attrs, size) in enumerate(zipped): - for nms_t in self.nms_thresh: - outputs = do_nms( - boxes, - obj_scores, - size, - self.score_thresh, - nms_t, - self.min_detections, - self.max_detections, - ) - if outputs is not None: - max_boxes, max_scores, classes, ids = outputs - break - - if scales is not None: - scale_yx = scales[i] - max_boxes[:, 0::2] *= scale_yx[1] - max_boxes[:, 1::2] *= scale_yx[0] - - final_results.append( - ( - max_boxes, - classes, - max_scores, - attrs[ids], - attr_probs[ids], - features[i][ids], - ) - ) - boxes, classes, class_probs, attrs, attr_probs, roi_features = map(list, zip(*final_results)) - return boxes, classes, class_probs, attrs, attr_probs, roi_features - - def training(self, obj_logits, attr_logits, box_deltas, pred_boxes, features, sizes): - pass - - def __call__( - self, - obj_logits, - attr_logits, - box_deltas, - pred_boxes, - features, - sizes, - scales=None, - ): - if self.training: - raise NotImplementedError() - return self.inference( - obj_logits, - attr_logits, - box_deltas, - pred_boxes, - features, - sizes, - scales=scales, - ) - - -class Res5ROIHeads(nn.Module): - """ - ROIHeads perform all per-region computation in an R-CNN. - It contains logic of cropping the regions, extract per-region features - (by the res-5 block in this case), and make per-region predictions. - """ - - def __init__(self, cfg, input_shape): - super().__init__() - self.batch_size_per_image = cfg.RPN.BATCH_SIZE_PER_IMAGE - self.positive_sample_fraction = cfg.ROI_HEADS.POSITIVE_FRACTION - self.in_features = cfg.ROI_HEADS.IN_FEATURES - self.num_classes = cfg.ROI_HEADS.NUM_CLASSES - self.proposal_append_gt = cfg.ROI_HEADS.PROPOSAL_APPEND_GT - self.feature_strides = {k: v.stride for k, v in input_shape.items()} - self.feature_channels = {k: v.channels for k, v in input_shape.items()} - self.cls_agnostic_bbox_reg = cfg.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG - self.stage_channel_factor = 2**3 # res5 is 8x res2 - self.out_channels = cfg.RESNETS.RES2_OUT_CHANNELS * self.stage_channel_factor - - # self.proposal_matcher = Matcher( - # cfg.ROI_HEADS.IOU_THRESHOLDS, - # cfg.ROI_HEADS.IOU_LABELS, - # allow_low_quality_matches=False, - # ) - - pooler_resolution = cfg.ROI_BOX_HEAD.POOLER_RESOLUTION - pooler_scales = (1.0 / self.feature_strides[self.in_features[0]],) - sampling_ratio = cfg.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO - res5_halve = cfg.ROI_BOX_HEAD.RES5HALVE - use_attr = cfg.ROI_BOX_HEAD.ATTR - num_attrs = cfg.ROI_BOX_HEAD.NUM_ATTRS - - self.pooler = ROIPooler( - output_size=pooler_resolution, - scales=pooler_scales, - sampling_ratio=sampling_ratio, - ) - - self.res5 = self._build_res5_block(cfg) - if not res5_halve: - """ - Modifications for VG in RoI heads: - 1. Change the stride of conv1 and shortcut in Res5.Block1 from 2 to 1 - 2. Modifying all conv2 with (padding: 1 --> 2) and (dilation: 1 --> 2) - """ - self.res5[0].conv1.stride = (1, 1) - self.res5[0].shortcut.stride = (1, 1) - for i in range(3): - self.res5[i].conv2.padding = (2, 2) - self.res5[i].conv2.dilation = (2, 2) - - self.box_predictor = FastRCNNOutputLayers( - self.out_channels, - self.num_classes, - self.cls_agnostic_bbox_reg, - use_attr=use_attr, - num_attrs=num_attrs, - ) - - def _build_res5_block(self, cfg): - stage_channel_factor = self.stage_channel_factor # res5 is 8x res2 - num_groups = cfg.RESNETS.NUM_GROUPS - width_per_group = cfg.RESNETS.WIDTH_PER_GROUP - bottleneck_channels = num_groups * width_per_group * stage_channel_factor - out_channels = self.out_channels - stride_in_1x1 = cfg.RESNETS.STRIDE_IN_1X1 - norm = cfg.RESNETS.NORM - - blocks = ResNet.make_stage( - BottleneckBlock, - 3, - first_stride=2, - in_channels=out_channels // 2, - bottleneck_channels=bottleneck_channels, - out_channels=out_channels, - num_groups=num_groups, - norm=norm, - stride_in_1x1=stride_in_1x1, - ) - return nn.Sequential(*blocks) - - def _shared_roi_transform(self, features, boxes): - x = self.pooler(features, boxes) - return self.res5(x) - - def forward(self, features, proposal_boxes, gt_boxes=None): - if self.training: - """ - see https://github.com/airsplay/py-bottom-up-attention/\ - blob/master/detectron2/modeling/roi_heads/roi_heads.py - """ - raise NotImplementedError() - - assert not proposal_boxes[0].requires_grad - box_features = self._shared_roi_transform(features, proposal_boxes) - feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1 - obj_logits, attr_logits, pred_proposal_deltas = self.box_predictor(feature_pooled) - return obj_logits, attr_logits, pred_proposal_deltas, feature_pooled - - -class AnchorGenerator(nn.Module): - """ - For a set of image sizes and feature maps, computes a set of anchors. - """ - - def __init__(self, cfg, input_shape: List[ShapeSpec]): - super().__init__() - sizes = cfg.ANCHOR_GENERATOR.SIZES - aspect_ratios = cfg.ANCHOR_GENERATOR.ASPECT_RATIOS - self.strides = [x.stride for x in input_shape] - self.offset = cfg.ANCHOR_GENERATOR.OFFSET - assert 0.0 <= self.offset < 1.0, self.offset - - """ - sizes (list[list[int]]): sizes[i] is the list of anchor sizes for feat map i - 1. given in absolute lengths in units of the input image; - 2. they do not dynamically scale if the input image size changes. - aspect_ratios (list[list[float]]) - strides (list[int]): stride of each input feature. - """ - - self.num_features = len(self.strides) - self.cell_anchors = nn.ParameterList(self._calculate_anchors(sizes, aspect_ratios)) - self._spacial_feat_dim = 4 - - def _calculate_anchors(self, sizes, aspect_ratios): - # If one size (or aspect ratio) is specified and there are multiple feature - # maps, then we "broadcast" anchors of that single size (or aspect ratio) - if len(sizes) == 1: - sizes *= self.num_features - if len(aspect_ratios) == 1: - aspect_ratios *= self.num_features - assert self.num_features == len(sizes) - assert self.num_features == len(aspect_ratios) - - cell_anchors = [self.generate_cell_anchors(s, a).float() for s, a in zip(sizes, aspect_ratios)] - - return cell_anchors - - @property - def box_dim(self): - return self._spacial_feat_dim - - @property - def num_cell_anchors(self): - """ - Returns: - list[int]: Each int is the number of anchors at every pixel location, on that feature map. - """ - return [len(cell_anchors) for cell_anchors in self.cell_anchors] - - def grid_anchors(self, grid_sizes): - anchors = [] - for size, stride, base_anchors in zip(grid_sizes, self.strides, self.cell_anchors): - shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors.device) - shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1) - - anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)) - - return anchors - - def generate_cell_anchors(self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)): - """ - anchors are continuous geometric rectangles - centered on one feature map point sample. - We can later build the set of anchors - for the entire feature map by tiling these tensors - """ - - anchors = [] - for size in sizes: - area = size**2.0 - for aspect_ratio in aspect_ratios: - w = math.sqrt(area / aspect_ratio) - h = aspect_ratio * w - x0, y0, x1, y1 = -w / 2.0, -h / 2.0, w / 2.0, h / 2.0 - anchors.append([x0, y0, x1, y1]) - return nn.Parameter(torch.tensor(anchors)) - - def forward(self, features): - """ - Args: - features List[torch.Tensor]: list of feature maps on which to generate anchors. - Returns: - torch.Tensor: a list of #image elements. - """ - num_images = features[0].size(0) - grid_sizes = [feature_map.shape[-2:] for feature_map in features] - anchors_over_all_feature_maps = self.grid_anchors(grid_sizes) - anchors_over_all_feature_maps = torch.stack(anchors_over_all_feature_maps) - return anchors_over_all_feature_maps.unsqueeze(0).repeat_interleave(num_images, dim=0) - - -class RPNHead(nn.Module): - """ - RPN classification and regression heads. Uses a 3x3 conv to produce a shared - hidden state from which one 1x1 conv predicts objectness logits for each anchor - and a second 1x1 conv predicts bounding-box deltas specifying how to deform - each anchor into an object proposal. - """ - - def __init__(self, cfg, input_shape: List[ShapeSpec]): - super().__init__() - - # Standard RPN is shared across levels: - in_channels = [s.channels for s in input_shape] - assert len(set(in_channels)) == 1, "Each level must have the same channel!" - in_channels = in_channels[0] - - anchor_generator = AnchorGenerator(cfg, input_shape) - num_cell_anchors = anchor_generator.num_cell_anchors - box_dim = anchor_generator.box_dim - assert len(set(num_cell_anchors)) == 1, "Each level must have the same number of cell anchors" - num_cell_anchors = num_cell_anchors[0] - - if cfg.PROPOSAL_GENERATOR.HIDDEN_CHANNELS == -1: - hid_channels = in_channels - else: - hid_channels = cfg.PROPOSAL_GENERATOR.HIDDEN_CHANNELS - # Modifications for VG in RPN (modeling/proposal_generator/rpn.py) - # Use hidden dim instead fo the same dim as Res4 (in_channels) - - # 3x3 conv for the hidden representation - self.conv = nn.Conv2d(in_channels, hid_channels, kernel_size=3, stride=1, padding=1) - # 1x1 conv for predicting objectness logits - self.objectness_logits = nn.Conv2d(hid_channels, num_cell_anchors, kernel_size=1, stride=1) - # 1x1 conv for predicting box2box transform deltas - self.anchor_deltas = nn.Conv2d(hid_channels, num_cell_anchors * box_dim, kernel_size=1, stride=1) - - for layer in [self.conv, self.objectness_logits, self.anchor_deltas]: - nn.init.normal_(layer.weight, std=0.01) - nn.init.constant_(layer.bias, 0) - - def forward(self, features): - """ - Args: - features (list[Tensor]): list of feature maps - """ - pred_objectness_logits = [] - pred_anchor_deltas = [] - for x in features: - t = nn.functional.relu(self.conv(x)) - pred_objectness_logits.append(self.objectness_logits(t)) - pred_anchor_deltas.append(self.anchor_deltas(t)) - return pred_objectness_logits, pred_anchor_deltas - - -class RPN(nn.Module): - """ - Region Proposal Network, introduced by the Faster R-CNN paper. - """ - - def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): - super().__init__() - - self.min_box_side_len = cfg.PROPOSAL_GENERATOR.MIN_SIZE - self.in_features = cfg.RPN.IN_FEATURES - self.nms_thresh = cfg.RPN.NMS_THRESH - self.batch_size_per_image = cfg.RPN.BATCH_SIZE_PER_IMAGE - self.positive_fraction = cfg.RPN.POSITIVE_FRACTION - self.smooth_l1_beta = cfg.RPN.SMOOTH_L1_BETA - self.loss_weight = cfg.RPN.LOSS_WEIGHT - - self.pre_nms_topk = { - True: cfg.RPN.PRE_NMS_TOPK_TRAIN, - False: cfg.RPN.PRE_NMS_TOPK_TEST, - } - self.post_nms_topk = { - True: cfg.RPN.POST_NMS_TOPK_TRAIN, - False: cfg.RPN.POST_NMS_TOPK_TEST, - } - self.boundary_threshold = cfg.RPN.BOUNDARY_THRESH - - self.anchor_generator = AnchorGenerator(cfg, [input_shape[f] for f in self.in_features]) - self.box2box_transform = Box2BoxTransform(weights=cfg.RPN.BBOX_REG_WEIGHTS) - self.anchor_matcher = Matcher( - cfg.RPN.IOU_THRESHOLDS, - cfg.RPN.IOU_LABELS, - allow_low_quality_matches=True, - ) - self.rpn_head = RPNHead(cfg, [input_shape[f] for f in self.in_features]) - - def training(self, images, image_shapes, features, gt_boxes): - pass - - def inference(self, outputs, images, image_shapes, features, gt_boxes=None): - outputs = find_top_rpn_proposals( - outputs.predict_proposals(), - outputs.predict_objectness_logits(), - images, - image_shapes, - self.nms_thresh, - self.pre_nms_topk[self.training], - self.post_nms_topk[self.training], - self.min_box_side_len, - self.training, - ) - - results = [] - for img in outputs: - im_boxes, img_box_logits = img - img_box_logits, inds = img_box_logits.sort(descending=True) - im_boxes = im_boxes[inds] - results.append((im_boxes, img_box_logits)) - - (proposal_boxes, logits) = tuple(map(list, zip(*results))) - return proposal_boxes, logits - - def forward(self, images, image_shapes, features, gt_boxes=None): - """ - Args: - images (torch.Tensor): input images of length `N` - features (dict[str: Tensor]) - gt_instances - """ - # features is dict, key = block level, v = feature_map - features = [features[f] for f in self.in_features] - pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features) - anchors = self.anchor_generator(features) - outputs = RPNOutputs( - self.box2box_transform, - self.anchor_matcher, - self.batch_size_per_image, - self.positive_fraction, - images, - pred_objectness_logits, - pred_anchor_deltas, - anchors, - self.boundary_threshold, - gt_boxes, - self.smooth_l1_beta, - ) - # For RPN-only models, the proposals are the final output - - if self.training: - raise NotImplementedError() - return self.training(outputs, images, image_shapes, features, gt_boxes) - else: - return self.inference(outputs, images, image_shapes, features, gt_boxes) - - -class FastRCNNOutputLayers(nn.Module): - """ - Two linear layers for predicting Fast R-CNN outputs: - (1) proposal-to-detection box regression deltas - (2) classification scores - """ - - def __init__( - self, - input_size, - num_classes, - cls_agnostic_bbox_reg, - box_dim=4, - use_attr=False, - num_attrs=-1, - ): - """ - Args: - input_size (int): channels, or (channels, height, width) - num_classes (int) - cls_agnostic_bbox_reg (bool) - box_dim (int) - """ - super().__init__() - - if not isinstance(input_size, int): - input_size = np.prod(input_size) - - # (do + 1 for background class) - self.cls_score = nn.Linear(input_size, num_classes + 1) - num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes - self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim) - - self.use_attr = use_attr - if use_attr: - """ - Modifications for VG in RoI heads - Embedding: {num_classes + 1} --> {input_size // 8} - Linear: {input_size + input_size // 8} --> {input_size // 4} - Linear: {input_size // 4} --> {num_attrs + 1} - """ - self.cls_embedding = nn.Embedding(num_classes + 1, input_size // 8) - self.fc_attr = nn.Linear(input_size + input_size // 8, input_size // 4) - self.attr_score = nn.Linear(input_size // 4, num_attrs + 1) - - nn.init.normal_(self.cls_score.weight, std=0.01) - nn.init.normal_(self.bbox_pred.weight, std=0.001) - for item in [self.cls_score, self.bbox_pred]: - nn.init.constant_(item.bias, 0) - - def forward(self, roi_features): - if roi_features.dim() > 2: - roi_features = torch.flatten(roi_features, start_dim=1) - scores = self.cls_score(roi_features) - proposal_deltas = self.bbox_pred(roi_features) - if self.use_attr: - _, max_class = scores.max(-1) # [b, c] --> [b] - cls_emb = self.cls_embedding(max_class) # [b] --> [b, 256] - roi_features = torch.cat([roi_features, cls_emb], -1) # [b, 2048] + [b, 256] --> [b, 2304] - roi_features = self.fc_attr(roi_features) - roi_features = nn.functional.relu(roi_features) - attr_scores = self.attr_score(roi_features) - return scores, attr_scores, proposal_deltas - else: - return scores, proposal_deltas - - -class GeneralizedRCNN(nn.Module): - def __init__(self, cfg): - super().__init__() - - self.device = torch.device(cfg.MODEL.DEVICE) - self.backbone = build_backbone(cfg) - self.proposal_generator = RPN(cfg, self.backbone.output_shape()) - self.roi_heads = Res5ROIHeads(cfg, self.backbone.output_shape()) - self.roi_outputs = ROIOutputs(cfg) - self.to(self.device) - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): - config = kwargs.pop("config", None) - state_dict = kwargs.pop("state_dict", None) - cache_dir = kwargs.pop("cache_dir", None) - from_tf = kwargs.pop("from_tf", False) - force_download = kwargs.pop("force_download", False) - resume_download = kwargs.pop("resume_download", False) - proxies = kwargs.pop("proxies", None) - local_files_only = kwargs.pop("local_files_only", False) - use_cdn = kwargs.pop("use_cdn", True) - - # Load config if we don't provide a configuration - if not isinstance(config, Config): - config_path = config if config is not None else pretrained_model_name_or_path - # try: - config = Config.from_pretrained( - config_path, - cache_dir=cache_dir, - force_download=force_download, - resume_download=resume_download, - proxies=proxies, - local_files_only=local_files_only, - ) - - # Load model - if pretrained_model_name_or_path is not None: - if os.path.isdir(pretrained_model_name_or_path): - if os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): - # Load from a PyTorch checkpoint - archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) - else: - raise EnvironmentError( - "Error no file named {} found in directory {} ".format( - WEIGHTS_NAME, - pretrained_model_name_or_path, - ) - ) - elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): - archive_file = pretrained_model_name_or_path - elif os.path.isfile(pretrained_model_name_or_path + ".index"): - assert from_tf, "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format( - pretrained_model_name_or_path + ".index" - ) - archive_file = pretrained_model_name_or_path + ".index" - else: - archive_file = hf_bucket_url( - pretrained_model_name_or_path, - filename=WEIGHTS_NAME, - use_cdn=use_cdn, - ) - - try: - # Load from URL or cache if already cached - resolved_archive_file = cached_path( - archive_file, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - ) - if resolved_archive_file is None: - raise EnvironmentError - except EnvironmentError: - msg = f"Can't load weights for '{pretrained_model_name_or_path}'." - raise EnvironmentError(msg) - - if resolved_archive_file == archive_file: - print("loading weights file {}".format(archive_file)) - else: - print("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file)) - else: - resolved_archive_file = None - - # Instantiate model. - model = cls(config) - - if state_dict is None: - try: - try: - state_dict = torch.load(resolved_archive_file, map_location="cpu") - except Exception: - state_dict = load_checkpoint(resolved_archive_file) - - except Exception: - raise OSError( - "Unable to load weights from pytorch checkpoint file. " - "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. " - ) - - missing_keys = [] - unexpected_keys = [] - error_msgs = [] - - # Convert old format to new format if needed from a PyTorch state_dict - old_keys = [] - new_keys = [] - for key in state_dict.keys(): - new_key = None - if "gamma" in key: - new_key = key.replace("gamma", "weight") - if "beta" in key: - new_key = key.replace("beta", "bias") - if new_key: - old_keys.append(key) - new_keys.append(new_key) - for old_key, new_key in zip(old_keys, new_keys): - state_dict[new_key] = state_dict.pop(old_key) - - # copy state_dict so _load_from_state_dict can modify it - metadata = getattr(state_dict, "_metadata", None) - state_dict = state_dict.copy() - if metadata is not None: - state_dict._metadata = metadata - - model_to_load = model - model_to_load.load_state_dict(state_dict) - - if model.__class__.__name__ != model_to_load.__class__.__name__: - base_model_state_dict = model_to_load.state_dict().keys() - head_model_state_dict_without_base_prefix = [ - key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys() - ] - missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict) - - if len(unexpected_keys) > 0: - print( - f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" - f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" - f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or" - " with another architecture (e.g. initializing a BertForSequenceClassification model from a" - " BertForPreTraining model).\n- This IS NOT expected if you are initializing" - f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical" - " (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." - ) - else: - print(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") - if len(missing_keys) > 0: - print( - f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" - f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" - " TRAIN this model on a down-stream task to be able to use it for predictions and inference." - ) - else: - print( - f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" - f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint" - f" was trained on, you can already use {model.__class__.__name__} for predictions without further" - " training." - ) - if len(error_msgs) > 0: - raise RuntimeError( - "Error(s) in loading state_dict for {}:\n\t{}".format( - model.__class__.__name__, "\n\t".join(error_msgs) - ) - ) - # Set model in evaluation mode to deactivate DropOut modules by default - model.eval() - - return model - - def forward( - self, - images, - image_shapes, - gt_boxes=None, - proposals=None, - scales_yx=None, - **kwargs, - ): - """ - kwargs: - max_detections (int), return_tensors {"np", "pt", None}, padding {None, - "max_detections"}, pad_value (int), location = {"cuda", "cpu"} - """ - if self.training: - raise NotImplementedError() - return self.inference( - images=images, - image_shapes=image_shapes, - gt_boxes=gt_boxes, - proposals=proposals, - scales_yx=scales_yx, - **kwargs, - ) - - @torch.no_grad() - def inference( - self, - images, - image_shapes, - gt_boxes=None, - proposals=None, - scales_yx=None, - **kwargs, - ): - # run images through backbone - original_sizes = image_shapes * scales_yx - features = self.backbone(images) - - # generate proposals if none are available - if proposals is None: - proposal_boxes, _ = self.proposal_generator(images, image_shapes, features, gt_boxes) - else: - assert proposals is not None - - # pool object features from either gt_boxes, or from proposals - obj_logits, attr_logits, box_deltas, feature_pooled = self.roi_heads(features, proposal_boxes, gt_boxes) - - # prepare FRCNN Outputs and select top proposals - boxes, classes, class_probs, attrs, attr_probs, roi_features = self.roi_outputs( - obj_logits=obj_logits, - attr_logits=attr_logits, - box_deltas=box_deltas, - pred_boxes=proposal_boxes, - features=feature_pooled, - sizes=image_shapes, - scales=scales_yx, - ) - - # will we pad??? - subset_kwargs = { - "max_detections": kwargs.get("max_detections", None), - "return_tensors": kwargs.get("return_tensors", None), - "pad_value": kwargs.get("pad_value", 0), - "padding": kwargs.get("padding", None), - } - preds_per_image = torch.tensor([p.size(0) for p in boxes]) - boxes = pad_list_tensors(boxes, preds_per_image, **subset_kwargs) - classes = pad_list_tensors(classes, preds_per_image, **subset_kwargs) - class_probs = pad_list_tensors(class_probs, preds_per_image, **subset_kwargs) - attrs = pad_list_tensors(attrs, preds_per_image, **subset_kwargs) - attr_probs = pad_list_tensors(attr_probs, preds_per_image, **subset_kwargs) - roi_features = pad_list_tensors(roi_features, preds_per_image, **subset_kwargs) - subset_kwargs["padding"] = None - preds_per_image = pad_list_tensors(preds_per_image, None, **subset_kwargs) - sizes = pad_list_tensors(image_shapes, None, **subset_kwargs) - normalized_boxes = norm_box(boxes, original_sizes) - return OrderedDict( - { - "obj_ids": classes, - "obj_probs": class_probs, - "attr_ids": attrs, - "attr_probs": attr_probs, - "boxes": boxes, - "sizes": sizes, - "preds_per_image": preds_per_image, - "roi_features": roi_features, - "normalized_boxes": normalized_boxes, - } - ) diff --git a/examples/research_projects/lxmert/processing_image.py b/examples/research_projects/lxmert/processing_image.py deleted file mode 100644 index 65f8f6cd377..00000000000 --- a/examples/research_projects/lxmert/processing_image.py +++ /dev/null @@ -1,151 +0,0 @@ -""" -coding=utf-8 -Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal -Adapted From Facebook Inc, Detectron2 - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License.import copy -""" - -import sys -from typing import Tuple - -import numpy as np -import torch -from PIL import Image -from torch import nn - -from transformers.image_utils import PILImageResampling -from utils import img_tensorize - - -class ResizeShortestEdge: - def __init__(self, short_edge_length, max_size=sys.maxsize): - """ - Args: - short_edge_length (list[min, max]) - max_size (int): maximum allowed longest edge length. - """ - self.interp_method = "bilinear" - self.max_size = max_size - self.short_edge_length = short_edge_length - - def __call__(self, imgs): - img_augs = [] - for img in imgs: - h, w = img.shape[:2] - # later: provide list and randomly choose index for resize - size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1) - if size == 0: - return img - scale = size * 1.0 / min(h, w) - if h < w: - newh, neww = size, scale * w - else: - newh, neww = scale * h, size - if max(newh, neww) > self.max_size: - scale = self.max_size * 1.0 / max(newh, neww) - newh = newh * scale - neww = neww * scale - neww = int(neww + 0.5) - newh = int(newh + 0.5) - - if img.dtype == np.uint8: - pil_image = Image.fromarray(img) - pil_image = pil_image.resize((neww, newh), PILImageResampling.BILINEAR) - img = np.asarray(pil_image) - else: - img = img.permute(2, 0, 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw - img = nn.functional.interpolate( - img, (newh, neww), mode=self.interp_method, align_corners=False - ).squeeze(0) - img_augs.append(img) - - return img_augs - - -class Preprocess: - def __init__(self, cfg): - self.aug = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST) - self.input_format = cfg.INPUT.FORMAT - self.size_divisibility = cfg.SIZE_DIVISIBILITY - self.pad_value = cfg.PAD_VALUE - self.max_image_size = cfg.INPUT.MAX_SIZE_TEST - self.device = cfg.MODEL.DEVICE - self.pixel_std = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD), 1, 1) - self.pixel_mean = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD), 1, 1) - self.normalizer = lambda x: (x - self.pixel_mean) / self.pixel_std - - def pad(self, images): - max_size = tuple(max(s) for s in zip(*[img.shape for img in images])) - image_sizes = [im.shape[-2:] for im in images] - images = [ - nn.functional.pad( - im, - [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]], - value=self.pad_value, - ) - for size, im in zip(image_sizes, images) - ] - - return torch.stack(images), torch.tensor(image_sizes) - - def __call__(self, images, single_image=False): - with torch.no_grad(): - if not isinstance(images, list): - images = [images] - if single_image: - assert len(images) == 1 - for i in range(len(images)): - if isinstance(images[i], torch.Tensor): - images.insert(i, images.pop(i).to(self.device).float()) - elif not isinstance(images[i], torch.Tensor): - images.insert( - i, - torch.as_tensor(img_tensorize(images.pop(i), input_format=self.input_format)) - .to(self.device) - .float(), - ) - # resize smallest edge - raw_sizes = torch.tensor([im.shape[:2] for im in images]) - images = self.aug(images) - # transpose images and convert to torch tensors - # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] - # now normalize before pad to avoid useless arithmetic - images = [self.normalizer(x) for x in images] - # now pad them to do the following operations - images, sizes = self.pad(images) - # Normalize - - if self.size_divisibility > 0: - raise NotImplementedError() - # pad - scales_yx = torch.true_divide(raw_sizes, sizes) - if single_image: - return images[0], sizes[0], scales_yx[0] - else: - return images, sizes, scales_yx - - -def _scale_box(boxes, scale_yx): - boxes[:, 0::2] *= scale_yx[:, 1] - boxes[:, 1::2] *= scale_yx[:, 0] - return boxes - - -def _clip_box(tensor, box_size: Tuple[int, int]): - assert torch.isfinite(tensor).all(), "Box tensor contains infinite or NaN!" - h, w = box_size - tensor[:, 0].clamp_(min=0, max=w) - tensor[:, 1].clamp_(min=0, max=h) - tensor[:, 2].clamp_(min=0, max=w) - tensor[:, 3].clamp_(min=0, max=h) diff --git a/examples/research_projects/lxmert/requirements.txt b/examples/research_projects/lxmert/requirements.txt deleted file mode 100644 index e2778663a53..00000000000 --- a/examples/research_projects/lxmert/requirements.txt +++ /dev/null @@ -1,98 +0,0 @@ -appdirs==1.4.3 -argon2-cffi==20.1.0 -async-generator==1.10 -attrs==20.2.0 -backcall==0.2.0 -CacheControl==0.12.6 -certifi==2024.7.4 -cffi==1.14.2 -chardet==3.0.4 -click==7.1.2 -colorama==0.4.3 -contextlib2==0.6.0 -cycler==0.10.0 -datasets==1.0.0 -decorator==4.4.2 -defusedxml==0.6.0 -dill==0.3.2 -distlib==0.3.0 -distro==1.4.0 -entrypoints==0.3 -filelock==3.0.12 -future==0.18.3 -html5lib==1.0.1 -idna==3.7 -ipaddr==2.2.0 -ipykernel==5.3.4 -ipython -ipython-genutils==0.2.0 -ipywidgets==7.5.1 -jedi==0.17.2 -Jinja2>=2.11.3 -joblib==1.2.0 -jsonschema==3.2.0 -jupyter==1.0.0 -jupyter-client==6.1.7 -jupyter-console==6.2.0 -jupyter-core==4.11.2 -jupyterlab-pygments==0.1.1 -kiwisolver==1.2.0 -lockfile==0.12.2 -MarkupSafe==1.1.1 -matplotlib==3.3.1 -mistune==2.0.3 -msgpack==0.6.2 -nbclient==0.5.0 -nbconvert==6.5.1 -nbformat==5.0.7 -nest-asyncio==1.4.0 -notebook==6.4.12 -numpy==1.22.0 -opencv-python==4.8.1.78 -packaging==20.3 -pandas==1.1.2 -pandocfilters==1.4.2 -parso==0.7.1 -pep517==0.8.2 -pexpect==4.8.0 -pickleshare==0.7.5 -Pillow>=8.1.1 -progress==1.5 -prometheus-client==0.8.0 -prompt-toolkit==3.0.7 -ptyprocess==0.6.0 -pyaml==20.4.0 -pyarrow==15.0.0 -pycparser==2.20 -Pygments>=2.7.4 -pyparsing==2.4.6 -pyrsistent==0.16.0 -python-dateutil==2.8.1 -pytoml==0.1.21 -pytz==2020.1 -PyYAML>=5.4 -pyzmq==19.0.2 -qtconsole==4.7.7 -QtPy==1.9.0 -regex==2020.7.14 -requests==2.32.2 -retrying==1.3.3 -sacremoses==0.0.43 -Send2Trash==1.5.0 -sentencepiece==0.1.91 -six==1.14.0 -terminado==0.8.3 -testpath==0.4.4 -tokenizers==0.8.1rc2 -torch==2.2.0 -torchvision==0.7.0 -tornado==6.4.2 -tqdm==4.66.3 -traitlets -git+https://github.com/huggingface/transformers.git -urllib3==1.26.19 -wcwidth==0.2.5 -webencodings==0.5.1 -wget==3.2 -widgetsnbextension==3.5.1 -xxhash==2.0.0 diff --git a/examples/research_projects/lxmert/utils.py b/examples/research_projects/lxmert/utils.py deleted file mode 100644 index 995fbd2c19a..00000000000 --- a/examples/research_projects/lxmert/utils.py +++ /dev/null @@ -1,554 +0,0 @@ -""" -coding=utf-8 -Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal, Huggingface team :) -Adapted From Facebook Inc, Detectron2 - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License.import copy -""" - -import copy -import fnmatch -import json -import os -import pickle as pkl -import shutil -import sys -import tarfile -import tempfile -from collections import OrderedDict -from contextlib import contextmanager -from functools import partial -from io import BytesIO -from pathlib import Path -from urllib.parse import urlparse -from zipfile import ZipFile, is_zipfile - -import cv2 -import numpy as np -import requests -import wget -from filelock import FileLock -from huggingface_hub.utils import insecure_hashlib -from PIL import Image -from tqdm.auto import tqdm -from yaml import Loader, dump, load - - -try: - import torch - - _torch_available = True -except ImportError: - _torch_available = False - - -try: - from torch.hub import _get_torch_home - - torch_cache_home = _get_torch_home() -except ImportError: - torch_cache_home = os.path.expanduser( - os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) - ) - -default_cache_path = os.path.join(torch_cache_home, "transformers") - -CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co" -S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert" -PATH = "/".join(str(Path(__file__).resolve()).split("/")[:-1]) -CONFIG = os.path.join(PATH, "config.yaml") -ATTRIBUTES = os.path.join(PATH, "attributes.txt") -OBJECTS = os.path.join(PATH, "objects.txt") -PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path) -PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) -TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) -WEIGHTS_NAME = "pytorch_model.bin" -CONFIG_NAME = "config.yaml" - - -def load_labels(objs=OBJECTS, attrs=ATTRIBUTES): - vg_classes = [] - with open(objs) as f: - for object in f.readlines(): - vg_classes.append(object.split(",")[0].lower().strip()) - - vg_attrs = [] - with open(attrs) as f: - for object in f.readlines(): - vg_attrs.append(object.split(",")[0].lower().strip()) - return vg_classes, vg_attrs - - -def load_checkpoint(ckp): - r = OrderedDict() - with open(ckp, "rb") as f: - ckp = pkl.load(f)["model"] - for k in copy.deepcopy(list(ckp.keys())): - v = ckp.pop(k) - if isinstance(v, np.ndarray): - v = torch.tensor(v) - else: - assert isinstance(v, torch.tensor), type(v) - r[k] = v - return r - - -class Config: - _pointer = {} - - def __init__(self, dictionary: dict, name: str = "root", level=0): - self._name = name - self._level = level - d = {} - for k, v in dictionary.items(): - if v is None: - raise ValueError() - k = copy.deepcopy(k) - v = copy.deepcopy(v) - if isinstance(v, dict): - v = Config(v, name=k, level=level + 1) - d[k] = v - setattr(self, k, v) - - self._pointer = d - - def __repr__(self): - return str(list((self._pointer.keys()))) - - def __setattr__(self, key, val): - self.__dict__[key] = val - self.__dict__[key.upper()] = val - levels = key.split(".") - last_level = len(levels) - 1 - pointer = self._pointer - if len(levels) > 1: - for i, l in enumerate(levels): - if hasattr(self, l) and isinstance(getattr(self, l), Config): - setattr(getattr(self, l), ".".join(levels[i:]), val) - if l == last_level: - pointer[l] = val - else: - pointer = pointer[l] - - def to_dict(self): - return self._pointer - - def dump_yaml(self, data, file_name): - with open(f"{file_name}", "w") as stream: - dump(data, stream) - - def dump_json(self, data, file_name): - with open(f"{file_name}", "w") as stream: - json.dump(data, stream) - - @staticmethod - def load_yaml(config): - with open(config) as stream: - data = load(stream, Loader=Loader) - return data - - def __str__(self): - t = " " - if self._name != "root": - r = f"{t * (self._level-1)}{self._name}:\n" - else: - r = "" - level = self._level - for i, (k, v) in enumerate(self._pointer.items()): - if isinstance(v, Config): - r += f"{t * (self._level)}{v}\n" - self._level += 1 - else: - r += f"{t * (self._level)}{k}: {v} ({type(v).__name__})\n" - self._level = level - return r[:-1] - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs): - config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) - return cls(config_dict) - - @classmethod - def get_config_dict(cls, pretrained_model_name_or_path: str, **kwargs): - cache_dir = kwargs.pop("cache_dir", None) - force_download = kwargs.pop("force_download", False) - resume_download = kwargs.pop("resume_download", False) - proxies = kwargs.pop("proxies", None) - local_files_only = kwargs.pop("local_files_only", False) - - if os.path.isdir(pretrained_model_name_or_path): - config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME) - elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): - config_file = pretrained_model_name_or_path - else: - config_file = hf_bucket_url(pretrained_model_name_or_path, filename=CONFIG_NAME, use_cdn=False) - - try: - # Load from URL or cache if already cached - resolved_config_file = cached_path( - config_file, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - ) - # Load config dict - if resolved_config_file is None: - raise EnvironmentError - - config_file = Config.load_yaml(resolved_config_file) - - except EnvironmentError: - msg = "Can't load config for" - raise EnvironmentError(msg) - - if resolved_config_file == config_file: - print("loading configuration file from path") - else: - print("loading configuration file cache") - - return Config.load_yaml(resolved_config_file), kwargs - - -# quick compare tensors -def compare(in_tensor): - out_tensor = torch.load("dump.pt", map_location=in_tensor.device) - n1 = in_tensor.numpy() - n2 = out_tensor.numpy()[0] - print(n1.shape, n1[0, 0, :5]) - print(n2.shape, n2[0, 0, :5]) - assert np.allclose(n1, n2, rtol=0.01, atol=0.1), ( - f"{sum([1 for x in np.isclose(n1, n2, rtol=0.01, atol=0.1).flatten() if x is False])/len(n1.flatten())*100:.4f} %" - " element-wise mismatch" - ) - raise Exception("tensors are all good") - - # Hugging face functions below - - -def is_remote_url(url_or_filename): - parsed = urlparse(url_or_filename) - return parsed.scheme in ("http", "https") - - -def hf_bucket_url(model_id: str, filename: str, use_cdn=True) -> str: - endpoint = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX - legacy_format = "/" not in model_id - if legacy_format: - return f"{endpoint}/{model_id}-{filename}" - else: - return f"{endpoint}/{model_id}/{filename}" - - -def http_get( - url, - temp_file, - proxies=None, - resume_size=0, - user_agent=None, -): - ua = "python/{}".format(sys.version.split()[0]) - if _torch_available: - ua += "; torch/{}".format(torch.__version__) - if isinstance(user_agent, dict): - ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in user_agent.items()) - elif isinstance(user_agent, str): - ua += "; " + user_agent - headers = {"user-agent": ua} - if resume_size > 0: - headers["Range"] = "bytes=%d-" % (resume_size,) - response = requests.get(url, stream=True, proxies=proxies, headers=headers) - if response.status_code == 416: # Range not satisfiable - return - content_length = response.headers.get("Content-Length") - total = resume_size + int(content_length) if content_length is not None else None - progress = tqdm( - unit="B", - unit_scale=True, - total=total, - initial=resume_size, - desc="Downloading", - ) - for chunk in response.iter_content(chunk_size=1024): - if chunk: # filter out keep-alive new chunks - progress.update(len(chunk)) - temp_file.write(chunk) - progress.close() - - -def get_from_cache( - url, - cache_dir=None, - force_download=False, - proxies=None, - etag_timeout=10, - resume_download=False, - user_agent=None, - local_files_only=False, -): - if cache_dir is None: - cache_dir = TRANSFORMERS_CACHE - if isinstance(cache_dir, Path): - cache_dir = str(cache_dir) - - os.makedirs(cache_dir, exist_ok=True) - - etag = None - if not local_files_only: - try: - response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout) - if response.status_code == 200: - etag = response.headers.get("ETag") - except (EnvironmentError, requests.exceptions.Timeout): - # etag is already None - pass - - filename = url_to_filename(url, etag) - - # get cache path to put the file - cache_path = os.path.join(cache_dir, filename) - - # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. - # try to get the last downloaded one - if etag is None: - if os.path.exists(cache_path): - return cache_path - else: - matching_files = [ - file - for file in fnmatch.filter(os.listdir(cache_dir), filename + ".*") - if not file.endswith(".json") and not file.endswith(".lock") - ] - if len(matching_files) > 0: - return os.path.join(cache_dir, matching_files[-1]) - else: - # If files cannot be found and local_files_only=True, - # the models might've been found if local_files_only=False - # Notify the user about that - if local_files_only: - raise ValueError( - "Cannot find the requested files in the cached path and outgoing traffic has been" - " disabled. To enable model look-ups and downloads online, set 'local_files_only'" - " to False." - ) - return None - - # From now on, etag is not None. - if os.path.exists(cache_path) and not force_download: - return cache_path - - # Prevent parallel downloads of the same file with a lock. - lock_path = cache_path + ".lock" - with FileLock(lock_path): - # If the download just completed while the lock was activated. - if os.path.exists(cache_path) and not force_download: - # Even if returning early like here, the lock will be released. - return cache_path - - if resume_download: - incomplete_path = cache_path + ".incomplete" - - @contextmanager - def _resumable_file_manager(): - with open(incomplete_path, "a+b") as f: - yield f - - temp_file_manager = _resumable_file_manager - if os.path.exists(incomplete_path): - resume_size = os.stat(incomplete_path).st_size - else: - resume_size = 0 - else: - temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False) - resume_size = 0 - - # Download to temporary file, then copy to cache dir once finished. - # Otherwise you get corrupt cache entries if the download gets interrupted. - with temp_file_manager() as temp_file: - print( - "%s not found in cache or force_download set to True, downloading to %s", - url, - temp_file.name, - ) - - http_get( - url, - temp_file, - proxies=proxies, - resume_size=resume_size, - user_agent=user_agent, - ) - - os.replace(temp_file.name, cache_path) - - meta = {"url": url, "etag": etag} - meta_path = cache_path + ".json" - with open(meta_path, "w") as meta_file: - json.dump(meta, meta_file) - - return cache_path - - -def url_to_filename(url, etag=None): - url_bytes = url.encode("utf-8") - url_hash = insecure_hashlib.sha256(url_bytes) - filename = url_hash.hexdigest() - - if etag: - etag_bytes = etag.encode("utf-8") - etag_hash = insecure_hashlib.sha256(etag_bytes) - filename += "." + etag_hash.hexdigest() - - if url.endswith(".h5"): - filename += ".h5" - - return filename - - -def cached_path( - url_or_filename, - cache_dir=None, - force_download=False, - proxies=None, - resume_download=False, - user_agent=None, - extract_compressed_file=False, - force_extract=False, - local_files_only=False, -): - if cache_dir is None: - cache_dir = TRANSFORMERS_CACHE - if isinstance(url_or_filename, Path): - url_or_filename = str(url_or_filename) - if isinstance(cache_dir, Path): - cache_dir = str(cache_dir) - - if is_remote_url(url_or_filename): - # URL, so get it from the cache (downloading if necessary) - output_path = get_from_cache( - url_or_filename, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - user_agent=user_agent, - local_files_only=local_files_only, - ) - elif os.path.exists(url_or_filename): - # File, and it exists. - output_path = url_or_filename - elif urlparse(url_or_filename).scheme == "": - # File, but it doesn't exist. - raise EnvironmentError("file {} not found".format(url_or_filename)) - else: - # Something unknown - raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) - - if extract_compressed_file: - if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path): - return output_path - - # Path where we extract compressed archives - # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" - output_dir, output_file = os.path.split(output_path) - output_extract_dir_name = output_file.replace(".", "-") + "-extracted" - output_path_extracted = os.path.join(output_dir, output_extract_dir_name) - - if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract: - return output_path_extracted - - # Prevent parallel extractions - lock_path = output_path + ".lock" - with FileLock(lock_path): - shutil.rmtree(output_path_extracted, ignore_errors=True) - os.makedirs(output_path_extracted) - if is_zipfile(output_path): - with ZipFile(output_path, "r") as zip_file: - zip_file.extractall(output_path_extracted) - zip_file.close() - elif tarfile.is_tarfile(output_path): - tar_file = tarfile.open(output_path) - tar_file.extractall(output_path_extracted) - tar_file.close() - else: - raise EnvironmentError("Archive format of {} could not be identified".format(output_path)) - - return output_path_extracted - - return output_path - - -def get_data(query, delim=","): - assert isinstance(query, str) - if os.path.isfile(query): - with open(query) as f: - data = eval(f.read()) - else: - req = requests.get(query) - try: - data = requests.json() - except Exception: - data = req.content.decode() - assert data is not None, "could not connect" - try: - data = eval(data) - except Exception: - data = data.split("\n") - req.close() - return data - - -def get_image_from_url(url): - response = requests.get(url) - img = np.array(Image.open(BytesIO(response.content))) - return img - - -# to load legacy frcnn checkpoint from detectron -def load_frcnn_pkl_from_url(url): - fn = url.split("/")[-1] - if fn not in os.listdir(os.getcwd()): - wget.download(url) - with open(fn, "rb") as stream: - weights = pkl.load(stream) - model = weights.pop("model") - new = {} - for k, v in model.items(): - new[k] = torch.from_numpy(v) - if "running_var" in k: - zero = torch.tensor([0]) - k2 = k.replace("running_var", "num_batches_tracked") - new[k2] = zero - return new - - -def get_demo_path(): - print(f"{os.path.abspath(os.path.join(PATH, os.pardir))}/demo.ipynb") - - -def img_tensorize(im, input_format="RGB"): - assert isinstance(im, str) - if os.path.isfile(im): - img = cv2.imread(im) - else: - img = get_image_from_url(im) - assert img is not None, f"could not connect to: {im}" - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - if input_format == "RGB": - img = img[:, :, ::-1] - return img - - -def chunk(images, batch=1): - return (images[i : i + batch] for i in range(0, len(images), batch)) diff --git a/examples/research_projects/lxmert/visualizing_image.py b/examples/research_projects/lxmert/visualizing_image.py deleted file mode 100644 index dcfd8426ff4..00000000000 --- a/examples/research_projects/lxmert/visualizing_image.py +++ /dev/null @@ -1,500 +0,0 @@ -""" -coding=utf-8 -Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal -Adapted From Facebook Inc, Detectron2 - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License.import copy -""" - -import colorsys -import io - -import cv2 -import matplotlib as mpl -import matplotlib.colors as mplc -import matplotlib.figure as mplfigure -import numpy as np -import torch -from matplotlib.backends.backend_agg import FigureCanvasAgg - -from utils import img_tensorize - - -_SMALL_OBJ = 1000 - - -class SingleImageViz: - def __init__( - self, - img, - scale=1.2, - edgecolor="g", - alpha=0.5, - linestyle="-", - saveas="test_out.jpg", - rgb=True, - pynb=False, - id2obj=None, - id2attr=None, - pad=0.7, - ): - """ - img: an RGB image of shape (H, W, 3). - """ - if isinstance(img, torch.Tensor): - img = img.numpy().astype("np.uint8") - if isinstance(img, str): - img = img_tensorize(img) - assert isinstance(img, np.ndarray) - - width, height = img.shape[1], img.shape[0] - fig = mplfigure.Figure(frameon=False) - dpi = fig.get_dpi() - width_in = (width * scale + 1e-2) / dpi - height_in = (height * scale + 1e-2) / dpi - fig.set_size_inches(width_in, height_in) - ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) - ax.axis("off") - ax.set_xlim(0.0, width) - ax.set_ylim(height) - - self.saveas = saveas - self.rgb = rgb - self.pynb = pynb - self.img = img - self.edgecolor = edgecolor - self.alpha = 0.5 - self.linestyle = linestyle - self.font_size = int(np.sqrt(min(height, width)) * scale // 3) - self.width = width - self.height = height - self.scale = scale - self.fig = fig - self.ax = ax - self.pad = pad - self.id2obj = id2obj - self.id2attr = id2attr - self.canvas = FigureCanvasAgg(fig) - - def add_box(self, box, color=None): - if color is None: - color = self.edgecolor - (x0, y0, x1, y1) = box - width = x1 - x0 - height = y1 - y0 - self.ax.add_patch( - mpl.patches.Rectangle( - (x0, y0), - width, - height, - fill=False, - edgecolor=color, - linewidth=self.font_size // 3, - alpha=self.alpha, - linestyle=self.linestyle, - ) - ) - - def draw_boxes(self, boxes, obj_ids=None, obj_scores=None, attr_ids=None, attr_scores=None): - if len(boxes.shape) > 2: - boxes = boxes[0] - if len(obj_ids.shape) > 1: - obj_ids = obj_ids[0] - if len(obj_scores.shape) > 1: - obj_scores = obj_scores[0] - if len(attr_ids.shape) > 1: - attr_ids = attr_ids[0] - if len(attr_scores.shape) > 1: - attr_scores = attr_scores[0] - if isinstance(boxes, torch.Tensor): - boxes = boxes.numpy() - if isinstance(boxes, list): - boxes = np.array(boxes) - assert isinstance(boxes, np.ndarray) - areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) - sorted_idxs = np.argsort(-areas).tolist() - boxes = boxes[sorted_idxs] if boxes is not None else None - obj_ids = obj_ids[sorted_idxs] if obj_ids is not None else None - obj_scores = obj_scores[sorted_idxs] if obj_scores is not None else None - attr_ids = attr_ids[sorted_idxs] if attr_ids is not None else None - attr_scores = attr_scores[sorted_idxs] if attr_scores is not None else None - - assigned_colors = [self._random_color(maximum=1) for _ in range(len(boxes))] - assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] - if obj_ids is not None: - labels = self._create_text_labels_attr(obj_ids, obj_scores, attr_ids, attr_scores) - for i in range(len(boxes)): - color = assigned_colors[i] - self.add_box(boxes[i], color) - self.draw_labels(labels[i], boxes[i], color) - - def draw_labels(self, label, box, color): - x0, y0, x1, y1 = box - text_pos = (x0, y0) - instance_area = (y1 - y0) * (x1 - x0) - small = _SMALL_OBJ * self.scale - if instance_area < small or y1 - y0 < 40 * self.scale: - if y1 >= self.height - 5: - text_pos = (x1, y0) - else: - text_pos = (x0, y1) - - height_ratio = (y1 - y0) / np.sqrt(self.height * self.width) - lighter_color = self._change_color_brightness(color, brightness_factor=0.7) - font_size = np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) - font_size *= 0.75 * self.font_size - - self.draw_text( - text=label, - position=text_pos, - color=lighter_color, - ) - - def draw_text( - self, - text, - position, - color="g", - ha="left", - ): - rotation = 0 - font_size = self.font_size - color = np.maximum(list(mplc.to_rgb(color)), 0.2) - color[np.argmax(color)] = max(0.8, np.max(color)) - bbox = { - "facecolor": "black", - "alpha": self.alpha, - "pad": self.pad, - "edgecolor": "none", - } - x, y = position - self.ax.text( - x, - y, - text, - size=font_size * self.scale, - family="sans-serif", - bbox=bbox, - verticalalignment="top", - horizontalalignment=ha, - color=color, - zorder=10, - rotation=rotation, - ) - - def save(self, saveas=None): - if saveas is None: - saveas = self.saveas - if saveas.lower().endswith(".jpg") or saveas.lower().endswith(".png"): - cv2.imwrite( - saveas, - self._get_buffer()[:, :, ::-1], - ) - else: - self.fig.savefig(saveas) - - def _create_text_labels_attr(self, classes, scores, attr_classes, attr_scores): - labels = [self.id2obj[i] for i in classes] - attr_labels = [self.id2attr[i] for i in attr_classes] - labels = [ - f"{label} {score:.2f} {attr} {attr_score:.2f}" - for label, score, attr, attr_score in zip(labels, scores, attr_labels, attr_scores) - ] - return labels - - def _create_text_labels(self, classes, scores): - labels = [self.id2obj[i] for i in classes] - if scores is not None: - if labels is None: - labels = ["{:.0f}%".format(s * 100) for s in scores] - else: - labels = ["{} {:.0f}%".format(li, s * 100) for li, s in zip(labels, scores)] - return labels - - def _random_color(self, maximum=255): - idx = np.random.randint(0, len(_COLORS)) - ret = _COLORS[idx] * maximum - if not self.rgb: - ret = ret[::-1] - return ret - - def _get_buffer(self): - if not self.pynb: - s, (width, height) = self.canvas.print_to_buffer() - if (width, height) != (self.width, self.height): - img = cv2.resize(self.img, (width, height)) - else: - img = self.img - else: - buf = io.BytesIO() # works for cairo backend - self.canvas.print_rgba(buf) - width, height = self.width, self.height - s = buf.getvalue() - img = self.img - - buffer = np.frombuffer(s, dtype="uint8") - img_rgba = buffer.reshape(height, width, 4) - rgb, alpha = np.split(img_rgba, [3], axis=2) - - try: - import numexpr as ne # fuse them with numexpr - - visualized_image = ne.evaluate("img * (1 - alpha / 255.0) + rgb * (alpha / 255.0)") - except ImportError: - alpha = alpha.astype("float32") / 255.0 - visualized_image = img * (1 - alpha) + rgb * alpha - - return visualized_image.astype("uint8") - - def _change_color_brightness(self, color, brightness_factor): - assert brightness_factor >= -1.0 and brightness_factor <= 1.0 - color = mplc.to_rgb(color) - polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) - modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) - modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness - modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness - modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) - return modified_color - - -# Color map -_COLORS = ( - np.array( - [ - 0.000, - 0.447, - 0.741, - 0.850, - 0.325, - 0.098, - 0.929, - 0.694, - 0.125, - 0.494, - 0.184, - 0.556, - 0.466, - 0.674, - 0.188, - 0.301, - 0.745, - 0.933, - 0.635, - 0.078, - 0.184, - 0.300, - 0.300, - 0.300, - 0.600, - 0.600, - 0.600, - 1.000, - 0.000, - 0.000, - 1.000, - 0.500, - 0.000, - 0.749, - 0.749, - 0.000, - 0.000, - 1.000, - 0.000, - 0.000, - 0.000, - 1.000, - 0.667, - 0.000, - 1.000, - 0.333, - 0.333, - 0.000, - 0.333, - 0.667, - 0.000, - 0.333, - 1.000, - 0.000, - 0.667, - 0.333, - 0.000, - 0.667, - 0.667, - 0.000, - 0.667, - 1.000, - 0.000, - 1.000, - 0.333, - 0.000, - 1.000, - 0.667, - 0.000, - 1.000, - 1.000, - 0.000, - 0.000, - 0.333, - 0.500, - 0.000, - 0.667, - 0.500, - 0.000, - 1.000, - 0.500, - 0.333, - 0.000, - 0.500, - 0.333, - 0.333, - 0.500, - 0.333, - 0.667, - 0.500, - 0.333, - 1.000, - 0.500, - 0.667, - 0.000, - 0.500, - 0.667, - 0.333, - 0.500, - 0.667, - 0.667, - 0.500, - 0.667, - 1.000, - 0.500, - 1.000, - 0.000, - 0.500, - 1.000, - 0.333, - 0.500, - 1.000, - 0.667, - 0.500, - 1.000, - 1.000, - 0.500, - 0.000, - 0.333, - 1.000, - 0.000, - 0.667, - 1.000, - 0.000, - 1.000, - 1.000, - 0.333, - 0.000, - 1.000, - 0.333, - 0.333, - 1.000, - 0.333, - 0.667, - 1.000, - 0.333, - 1.000, - 1.000, - 0.667, - 0.000, - 1.000, - 0.667, - 0.333, - 1.000, - 0.667, - 0.667, - 1.000, - 0.667, - 1.000, - 1.000, - 1.000, - 0.000, - 1.000, - 1.000, - 0.333, - 1.000, - 1.000, - 0.667, - 1.000, - 0.333, - 0.000, - 0.000, - 0.500, - 0.000, - 0.000, - 0.667, - 0.000, - 0.000, - 0.833, - 0.000, - 0.000, - 1.000, - 0.000, - 0.000, - 0.000, - 0.167, - 0.000, - 0.000, - 0.333, - 0.000, - 0.000, - 0.500, - 0.000, - 0.000, - 0.667, - 0.000, - 0.000, - 0.833, - 0.000, - 0.000, - 1.000, - 0.000, - 0.000, - 0.000, - 0.167, - 0.000, - 0.000, - 0.333, - 0.000, - 0.000, - 0.500, - 0.000, - 0.000, - 0.667, - 0.000, - 0.000, - 0.833, - 0.000, - 0.000, - 1.000, - 0.000, - 0.000, - 0.000, - 0.143, - 0.143, - 0.143, - 0.857, - 0.857, - 0.857, - 1.000, - 1.000, - 1.000, - ] - ) - .astype(np.float32) - .reshape(-1, 3) -) diff --git a/examples/research_projects/mlm_wwm/README.md b/examples/research_projects/mlm_wwm/README.md deleted file mode 100644 index bf5aa941082..00000000000 --- a/examples/research_projects/mlm_wwm/README.md +++ /dev/null @@ -1,98 +0,0 @@ - - -## Whole Word Mask Language Model - - -These scripts leverage the 🤗 Datasets library and the Trainer API. You can easily customize them to your needs if you -need extra processing on your datasets. - -The following examples, will run on a datasets hosted on our [hub](https://huggingface.co/datasets) or with your own -text files for training and validation. We give examples of both below. - - - -The BERT authors released a new version of BERT using Whole Word Masking in May 2019. Instead of masking randomly -selected tokens (which may be part of words), they mask randomly selected words (masking all the tokens corresponding -to that word). This technique has been refined for Chinese in [this paper](https://arxiv.org/abs/1906.08101). - -To fine-tune a model using whole word masking, use the following script: -```bash -python run_mlm_wwm.py \ - --model_name_or_path FacebookAI/roberta-base \ - --dataset_name wikitext \ - --dataset_config_name wikitext-2-raw-v1 \ - --do_train \ - --do_eval \ - --output_dir /tmp/test-mlm-wwm -``` - -For Chinese models, we need to generate a reference files (which requires the ltp library), because it's tokenized at -the character level. - -**Q :** Why a reference file? - -**A :** Suppose we have a Chinese sentence like: `我喜欢你` The original Chinese-BERT will tokenize it as -`['我','喜','欢','你']` (character level). But `喜欢` is a whole word. For whole word masking proxy, we need a result -like `['我','喜','##欢','你']`, so we need a reference file to tell the model which position of the BERT original token -should be added `##`. - -**Q :** Why LTP ? - -**A :** Cause the best known Chinese WWM BERT is [Chinese-BERT-wwm](https://github.com/ymcui/Chinese-BERT-wwm) by HIT. -It works well on so many Chines Task like CLUE (Chinese GLUE). They use LTP, so if we want to fine-tune their model, -we need LTP. - -You could run the following: - - -```bash -export TRAIN_FILE=/path/to/train/file -export LTP_RESOURCE=/path/to/ltp/tokenizer -export BERT_RESOURCE=/path/to/bert/tokenizer -export SAVE_PATH=/path/to/data/ref.txt - -python run_chinese_ref.py \ - --file_name=$TRAIN_FILE \ - --ltp=$LTP_RESOURCE \ - --bert=$BERT_RESOURCE \ - --save_path=$SAVE_PATH -``` - -Then you can run the script like this: - - -```bash -export TRAIN_FILE=/path/to/train/file -export VALIDATION_FILE=/path/to/validation/file -export TRAIN_REF_FILE=/path/to/train/chinese_ref/file -export VALIDATION_REF_FILE=/path/to/validation/chinese_ref/file -export OUTPUT_DIR=/tmp/test-mlm-wwm - -python run_mlm_wwm.py \ - --model_name_or_path FacebookAI/roberta-base \ - --train_file $TRAIN_FILE \ - --validation_file $VALIDATION_FILE \ - --train_ref_file $TRAIN_REF_FILE \ - --validation_ref_file $VALIDATION_REF_FILE \ - --do_train \ - --do_eval \ - --output_dir $OUTPUT_DIR -``` - -**Note1:** On TPU, you should the flag `--pad_to_max_length` to make sure all your batches have the same length. - -**Note2:** And if you have any questions or something goes wrong when running this code, don't hesitate to pin @wlhgtc. diff --git a/examples/research_projects/mlm_wwm/requirements.txt b/examples/research_projects/mlm_wwm/requirements.txt deleted file mode 100644 index 2d0f26bd4dc..00000000000 --- a/examples/research_projects/mlm_wwm/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -datasets >= 1.1.3 -sentencepiece != 0.1.92 -protobuf -ltp diff --git a/examples/research_projects/mlm_wwm/run_chinese_ref.py b/examples/research_projects/mlm_wwm/run_chinese_ref.py deleted file mode 100644 index eca89df9798..00000000000 --- a/examples/research_projects/mlm_wwm/run_chinese_ref.py +++ /dev/null @@ -1,164 +0,0 @@ -import argparse -import json -from typing import List - -from ltp import LTP - -from transformers.models.bert.tokenization_bert import BertTokenizer - - -def _is_chinese_char(cp): - """Checks whether CP is the codepoint of a CJK character.""" - # This defines a "chinese character" as anything in the CJK Unicode block: - # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) - # - # Note that the CJK Unicode block is NOT all Japanese and Korean characters, - # despite its name. The modern Korean Hangul alphabet is a different block, - # as is Japanese Hiragana and Katakana. Those alphabets are used to write - # space-separated words, so they are not treated specially and handled - # like the all of the other languages. - if ( - (cp >= 0x4E00 and cp <= 0x9FFF) - or (cp >= 0x3400 and cp <= 0x4DBF) # - or (cp >= 0x20000 and cp <= 0x2A6DF) # - or (cp >= 0x2A700 and cp <= 0x2B73F) # - or (cp >= 0x2B740 and cp <= 0x2B81F) # - or (cp >= 0x2B820 and cp <= 0x2CEAF) # - or (cp >= 0xF900 and cp <= 0xFAFF) - or (cp >= 0x2F800 and cp <= 0x2FA1F) # - ): # - return True - - return False - - -def is_chinese(word: str): - # word like '180' or '身高' or '神' - for char in word: - char = ord(char) - if not _is_chinese_char(char): - return 0 - return 1 - - -def get_chinese_word(tokens: List[str]): - word_set = set() - - for token in tokens: - chinese_word = len(token) > 1 and is_chinese(token) - if chinese_word: - word_set.add(token) - word_list = list(word_set) - return word_list - - -def add_sub_symbol(bert_tokens: List[str], chinese_word_set: set()): - if not chinese_word_set: - return bert_tokens - max_word_len = max([len(w) for w in chinese_word_set]) - - bert_word = bert_tokens - start, end = 0, len(bert_word) - while start < end: - single_word = True - if is_chinese(bert_word[start]): - l = min(end - start, max_word_len) - for i in range(l, 1, -1): - whole_word = "".join(bert_word[start : start + i]) - if whole_word in chinese_word_set: - for j in range(start + 1, start + i): - bert_word[j] = "##" + bert_word[j] - start = start + i - single_word = False - break - if single_word: - start += 1 - return bert_word - - -def prepare_ref(lines: List[str], ltp_tokenizer: LTP, bert_tokenizer: BertTokenizer): - ltp_res = [] - - for i in range(0, len(lines), 100): - res = ltp_tokenizer.pipeline(lines[i : i + 100], tasks=["cws"]).cws - res = [get_chinese_word(r) for r in res] - ltp_res.extend(res) - assert len(ltp_res) == len(lines) - - bert_res = [] - for i in range(0, len(lines), 100): - res = bert_tokenizer(lines[i : i + 100], add_special_tokens=True, truncation=True, max_length=512) - bert_res.extend(res["input_ids"]) - assert len(bert_res) == len(lines) - - ref_ids = [] - for input_ids, chinese_word in zip(bert_res, ltp_res): - input_tokens = [] - for id in input_ids: - token = bert_tokenizer._convert_id_to_token(id) - input_tokens.append(token) - input_tokens = add_sub_symbol(input_tokens, chinese_word) - ref_id = [] - # We only save pos of chinese subwords start with ##, which mean is part of a whole word. - for i, token in enumerate(input_tokens): - if token[:2] == "##": - clean_token = token[2:] - # save chinese tokens' pos - if len(clean_token) == 1 and _is_chinese_char(ord(clean_token)): - ref_id.append(i) - ref_ids.append(ref_id) - - assert len(ref_ids) == len(bert_res) - - return ref_ids - - -def main(args): - # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) - # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) - with open(args.file_name, "r", encoding="utf-8") as f: - data = f.readlines() - data = [line.strip() for line in data if len(line) > 0 and not line.isspace()] # avoid delimiter like '\u2029' - ltp_tokenizer = LTP(args.ltp) # faster in GPU device - bert_tokenizer = BertTokenizer.from_pretrained(args.bert) - - ref_ids = prepare_ref(data, ltp_tokenizer, bert_tokenizer) - - with open(args.save_path, "w", encoding="utf-8") as f: - data = [json.dumps(ref) + "\n" for ref in ref_ids] - f.writelines(data) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="prepare_chinese_ref") - parser.add_argument( - "--file_name", - required=False, - type=str, - default="./resources/chinese-demo.txt", - help="file need process, same as training data in lm", - ) - parser.add_argument( - "--ltp", - required=False, - type=str, - default="./resources/ltp", - help="resources for LTP tokenizer, usually a path", - ) - parser.add_argument( - "--bert", - required=False, - type=str, - default="./resources/robert", - help="resources for Bert tokenizer", - ) - parser.add_argument( - "--save_path", - required=False, - type=str, - default="./resources/ref.txt", - help="path to save res", - ) - - args = parser.parse_args() - main(args) diff --git a/examples/research_projects/mlm_wwm/run_mlm_wwm.py b/examples/research_projects/mlm_wwm/run_mlm_wwm.py deleted file mode 100644 index 629026bdb20..00000000000 --- a/examples/research_projects/mlm_wwm/run_mlm_wwm.py +++ /dev/null @@ -1,435 +0,0 @@ -# coding=utf-8 -# Copyright 2020 The HuggingFace Team All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) with whole word masking on a -text file or a dataset. - -Here is the full list of checkpoints on the hub that can be fine-tuned by this script: -https://huggingface.co/models?filter=fill-mask -""" -# You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments. - -import json -import logging -import math -import os -import sys -from dataclasses import dataclass, field -from typing import Optional - -from datasets import Dataset, load_dataset - -import transformers -from transformers import ( - CONFIG_MAPPING, - MODEL_FOR_MASKED_LM_MAPPING, - AutoConfig, - AutoModelForMaskedLM, - AutoTokenizer, - DataCollatorForWholeWordMask, - HfArgumentParser, - Trainer, - TrainingArguments, - set_seed, -) -from transformers.trainer_utils import get_last_checkpoint, is_main_process - - -logger = logging.getLogger(__name__) -MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) -MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. - """ - - model_name_or_path: Optional[str] = field( - default=None, - metadata={ - "help": ( - "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." - ) - }, - ) - model_type: Optional[str] = field( - default=None, - metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, - ) - config_overrides: Optional[str] = field( - default=None, - metadata={ - "help": ( - "Override some existing default config settings when a model is trained from scratch. Example: " - "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" - ) - }, - ) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - tokenizer_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, - ) - use_fast_tokenizer: bool = field( - default=True, - metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, - ) - model_revision: str = field( - default="main", - metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "Will use the token generated when running `huggingface-cli login` (necessary to use this script " - "with private models)." - ) - }, - ) - - def __post_init__(self): - if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): - raise ValueError( - "--config_overrides can't be used in combination with --config_name or --model_name_or_path" - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - """ - - dataset_name: Optional[str] = field( - default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} - ) - dataset_config_name: Optional[str] = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) - validation_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, - ) - train_ref_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input train ref data file for whole word masking in Chinese."}, - ) - validation_ref_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."}, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} - ) - validation_split_percentage: Optional[int] = field( - default=5, - metadata={ - "help": "The percentage of the train set used as validation set in case there's no validation split" - }, - ) - max_seq_length: Optional[int] = field( - default=None, - metadata={ - "help": ( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated. Default to the max input length of the model." - ) - }, - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - mlm_probability: float = field( - default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} - ) - pad_to_max_length: bool = field( - default=False, - metadata={ - "help": ( - "Whether to pad all samples to `max_seq_length`. " - "If False, will pad the samples dynamically when batching to the maximum length in the batch." - ) - }, - ) - - def __post_init__(self): - if self.train_file is not None: - extension = self.train_file.split(".")[-1] - assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." - if self.validation_file is not None: - extension = self.validation_file.split(".")[-1] - assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." - - -def add_chinese_references(dataset, ref_file): - with open(ref_file, "r", encoding="utf-8") as f: - refs = [json.loads(line) for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())] - assert len(dataset) == len(refs) - - dataset_dict = {c: dataset[c] for c in dataset.column_names} - dataset_dict["chinese_ref"] = refs - return Dataset.from_dict(dataset_dict) - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) - - # Log on each process the small summary: - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" - + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - # Set the verbosity to info of the Transformers logger (on main process only): - if is_main_process(training_args.local_rank): - transformers.utils.logging.set_verbosity_info() - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - logger.info("Training/evaluation parameters %s", training_args) - - # Set seed before initializing model. - set_seed(training_args.seed) - - # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) - # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ - # (the dataset will be downloaded automatically from the datasets Hub). - # - # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called - # 'text' is found. You can easily tweak this behavior (see below). - # - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if data_args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) - if "validation" not in datasets.keys(): - datasets["validation"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"train[:{data_args.validation_split_percentage}%]", - ) - datasets["train"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"train[{data_args.validation_split_percentage}%:]", - ) - else: - data_files = {} - if data_args.train_file is not None: - data_files["train"] = data_args.train_file - extension = data_args.train_file.split(".")[-1] - if data_args.validation_file is not None: - data_files["validation"] = data_args.validation_file - extension = data_args.validation_file.split(".")[-1] - if extension == "txt": - extension = "text" - datasets = load_dataset(extension, data_files=data_files) - # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets. - - # Load pretrained model and tokenizer - # - # Distributed training: - # The .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - config_kwargs = { - "cache_dir": model_args.cache_dir, - "revision": model_args.model_revision, - "use_auth_token": True if model_args.use_auth_token else None, - } - if model_args.config_name: - config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) - elif model_args.model_name_or_path: - config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) - else: - config = CONFIG_MAPPING[model_args.model_type]() - logger.warning("You are instantiating a new config instance from scratch.") - if model_args.config_overrides is not None: - logger.info(f"Overriding config: {model_args.config_overrides}") - config.update_from_string(model_args.config_overrides) - logger.info(f"New config: {config}") - - tokenizer_kwargs = { - "cache_dir": model_args.cache_dir, - "use_fast": model_args.use_fast_tokenizer, - "revision": model_args.model_revision, - "use_auth_token": True if model_args.use_auth_token else None, - } - if model_args.tokenizer_name: - tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) - elif model_args.model_name_or_path: - tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs) - else: - raise ValueError( - "You are instantiating a new tokenizer from scratch. This is not supported by this script. " - "You can do it from another script, save it, and load it from here, using --tokenizer_name." - ) - - if model_args.model_name_or_path: - model = AutoModelForMaskedLM.from_pretrained( - model_args.model_name_or_path, - from_tf=bool(".ckpt" in model_args.model_name_or_path), - config=config, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - token=True if model_args.use_auth_token else None, - ) - else: - logger.info("Training new model from scratch") - model = AutoModelForMaskedLM.from_config(config) - - model.resize_token_embeddings(len(tokenizer)) - - # Preprocessing the datasets. - # First we tokenize all the texts. - if training_args.do_train: - column_names = datasets["train"].column_names - else: - column_names = datasets["validation"].column_names - text_column_name = "text" if "text" in column_names else column_names[0] - - padding = "max_length" if data_args.pad_to_max_length else False - - def tokenize_function(examples): - # Remove empty lines - examples["text"] = [line for line in examples["text"] if len(line) > 0 and not line.isspace()] - return tokenizer(examples["text"], padding=padding, truncation=True, max_length=data_args.max_seq_length) - - tokenized_datasets = datasets.map( - tokenize_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=[text_column_name], - load_from_cache_file=not data_args.overwrite_cache, - ) - - # Add the chinese references if provided - if data_args.train_ref_file is not None: - tokenized_datasets["train"] = add_chinese_references(tokenized_datasets["train"], data_args.train_ref_file) - if data_args.validation_ref_file is not None: - tokenized_datasets["validation"] = add_chinese_references( - tokenized_datasets["validation"], data_args.validation_ref_file - ) - # If we have ref files, need to avoid it removed by trainer - has_ref = data_args.train_ref_file or data_args.validation_ref_file - if has_ref: - training_args.remove_unused_columns = False - - # Data collator - # This one will take care of randomly masking the tokens. - data_collator = DataCollatorForWholeWordMask(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability) - - # Initialize our Trainer - trainer = Trainer( - model=model, - args=training_args, - train_dataset=tokenized_datasets["train"] if training_args.do_train else None, - eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None, - tokenizer=tokenizer, - data_collator=data_collator, - ) - - # Training - if training_args.do_train: - if last_checkpoint is not None: - checkpoint = last_checkpoint - elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path): - checkpoint = model_args.model_name_or_path - else: - checkpoint = None - train_result = trainer.train(resume_from_checkpoint=checkpoint) - trainer.save_model() # Saves the tokenizer too for easy upload - - output_train_file = os.path.join(training_args.output_dir, "train_results.txt") - if trainer.is_world_process_zero(): - with open(output_train_file, "w") as writer: - logger.info("***** Train results *****") - for key, value in sorted(train_result.metrics.items()): - logger.info(f" {key} = {value}") - writer.write(f"{key} = {value}\n") - - # Need to save the state, since Trainer.save_model saves only the tokenizer with the model - trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json")) - - # Evaluation - results = {} - if training_args.do_eval: - logger.info("*** Evaluate ***") - - eval_output = trainer.evaluate() - - perplexity = math.exp(eval_output["eval_loss"]) - results["perplexity"] = perplexity - - output_eval_file = os.path.join(training_args.output_dir, "eval_results_mlm_wwm.txt") - if trainer.is_world_process_zero(): - with open(output_eval_file, "w") as writer: - logger.info("***** Eval results *****") - for key, value in sorted(results.items()): - logger.info(f" {key} = {value}") - writer.write(f"{key} = {value}\n") - - return results - - -def _mp_fn(index): - # For xla_spawn (TPUs) - main() - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/mm-imdb/README.md b/examples/research_projects/mm-imdb/README.md deleted file mode 100644 index 68b2f15159e..00000000000 --- a/examples/research_projects/mm-imdb/README.md +++ /dev/null @@ -1,23 +0,0 @@ -## MM-IMDb - -Based on the script [`run_mmimdb.py`](https://github.com/huggingface/transformers/blob/main/examples/research_projects/mm-imdb/run_mmimdb.py). - -[MM-IMDb](http://lisi1.unal.edu.co/mmimdb/) is a Multimodal dataset with around 26,000 movies including images, plots and other metadata. - -### Training on MM-IMDb - -```bash -python run_mmimdb.py \ - --data_dir /path/to/mmimdb/dataset/ \ - --model_type bert \ - --model_name_or_path google-bert/bert-base-uncased \ - --output_dir /path/to/save/dir/ \ - --do_train \ - --do_eval \ - --max_seq_len 512 \ - --gradient_accumulation_steps 20 \ - --num_image_embeds 3 \ - --num_train_epochs 100 \ - --patience 5 -``` - diff --git a/examples/research_projects/mm-imdb/run_mmimdb.py b/examples/research_projects/mm-imdb/run_mmimdb.py deleted file mode 100644 index 686691e0b9c..00000000000 --- a/examples/research_projects/mm-imdb/run_mmimdb.py +++ /dev/null @@ -1,575 +0,0 @@ -# coding=utf-8 -# Copyright (c) Facebook, Inc. and its affiliates. -# Copyright (c) HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Finetuning the library models for multimodal multiclass prediction on MM-IMDB dataset.""" - -import argparse -import glob -import json -import logging -import os -import random - -import numpy as np -import torch -from sklearn.metrics import f1_score -from torch import nn -from torch.utils.data import DataLoader, RandomSampler, SequentialSampler -from torch.utils.data.distributed import DistributedSampler -from tqdm import tqdm, trange -from utils_mmimdb import ImageEncoder, JsonlDataset, collate_fn, get_image_transforms, get_mmimdb_labels - -import transformers -from transformers import ( - WEIGHTS_NAME, - AdamW, - AutoConfig, - AutoModel, - AutoTokenizer, - MMBTConfig, - MMBTForClassification, - get_linear_schedule_with_warmup, -) -from transformers.trainer_utils import is_main_process - - -try: - from torch.utils.tensorboard import SummaryWriter -except ImportError: - from tensorboardX import SummaryWriter - - -logger = logging.getLogger(__name__) - - -def set_seed(args): - random.seed(args.seed) - np.random.seed(args.seed) - torch.manual_seed(args.seed) - if args.n_gpu > 0: - torch.cuda.manual_seed_all(args.seed) - - -def train(args, train_dataset, model, tokenizer, criterion): - """Train the model""" - if args.local_rank in [-1, 0]: - tb_writer = SummaryWriter() - - args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) - train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) - train_dataloader = DataLoader( - train_dataset, - sampler=train_sampler, - batch_size=args.train_batch_size, - collate_fn=collate_fn, - num_workers=args.num_workers, - ) - - if args.max_steps > 0: - t_total = args.max_steps - args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 - else: - t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs - - # Prepare optimizer and schedule (linear warmup and decay) - no_decay = ["bias", "LayerNorm.weight"] - optimizer_grouped_parameters = [ - { - "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], - "weight_decay": args.weight_decay, - }, - {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, - ] - - optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) - scheduler = get_linear_schedule_with_warmup( - optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total - ) - if args.fp16: - try: - from apex import amp - except ImportError: - raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") - model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) - - # multi-gpu training (should be after apex fp16 initialization) - if args.n_gpu > 1: - model = nn.DataParallel(model) - - # Distributed training (should be after apex fp16 initialization) - if args.local_rank != -1: - model = nn.parallel.DistributedDataParallel( - model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True - ) - - # Train! - logger.info("***** Running training *****") - logger.info(" Num examples = %d", len(train_dataset)) - logger.info(" Num Epochs = %d", args.num_train_epochs) - logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) - logger.info( - " Total train batch size (w. parallel, distributed & accumulation) = %d", - args.train_batch_size - * args.gradient_accumulation_steps - * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), - ) - logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) - logger.info(" Total optimization steps = %d", t_total) - - global_step = 0 - tr_loss, logging_loss = 0.0, 0.0 - best_f1, n_no_improve = 0, 0 - model.zero_grad() - train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) - set_seed(args) # Added here for reproducibility - for _ in train_iterator: - epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) - for step, batch in enumerate(epoch_iterator): - model.train() - batch = tuple(t.to(args.device) for t in batch) - labels = batch[5] - inputs = { - "input_ids": batch[0], - "input_modal": batch[2], - "attention_mask": batch[1], - "modal_start_tokens": batch[3], - "modal_end_tokens": batch[4], - } - outputs = model(**inputs) - logits = outputs[0] # model outputs are always tuple in transformers (see doc) - loss = criterion(logits, labels) - - if args.n_gpu > 1: - loss = loss.mean() # mean() to average on multi-gpu parallel training - if args.gradient_accumulation_steps > 1: - loss = loss / args.gradient_accumulation_steps - - if args.fp16: - with amp.scale_loss(loss, optimizer) as scaled_loss: - scaled_loss.backward() - else: - loss.backward() - - tr_loss += loss.item() - if (step + 1) % args.gradient_accumulation_steps == 0: - if args.fp16: - nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) - else: - nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) - - optimizer.step() - scheduler.step() # Update learning rate schedule - model.zero_grad() - global_step += 1 - - if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: - logs = {} - if ( - args.local_rank == -1 and args.evaluate_during_training - ): # Only evaluate when single GPU otherwise metrics may not average well - results = evaluate(args, model, tokenizer, criterion) - for key, value in results.items(): - eval_key = "eval_{}".format(key) - logs[eval_key] = value - - loss_scalar = (tr_loss - logging_loss) / args.logging_steps - learning_rate_scalar = scheduler.get_lr()[0] - logs["learning_rate"] = learning_rate_scalar - logs["loss"] = loss_scalar - logging_loss = tr_loss - - for key, value in logs.items(): - tb_writer.add_scalar(key, value, global_step) - print(json.dumps({**logs, **{"step": global_step}})) - - if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: - # Save model checkpoint - output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) - if not os.path.exists(output_dir): - os.makedirs(output_dir) - model_to_save = ( - model.module if hasattr(model, "module") else model - ) # Take care of distributed/parallel training - torch.save(model_to_save.state_dict(), os.path.join(output_dir, WEIGHTS_NAME)) - torch.save(args, os.path.join(output_dir, "training_args.bin")) - logger.info("Saving model checkpoint to %s", output_dir) - - if args.max_steps > 0 and global_step > args.max_steps: - epoch_iterator.close() - break - if args.max_steps > 0 and global_step > args.max_steps: - train_iterator.close() - break - - if args.local_rank == -1: - results = evaluate(args, model, tokenizer, criterion) - if results["micro_f1"] > best_f1: - best_f1 = results["micro_f1"] - n_no_improve = 0 - else: - n_no_improve += 1 - - if n_no_improve > args.patience: - train_iterator.close() - break - - if args.local_rank in [-1, 0]: - tb_writer.close() - - return global_step, tr_loss / global_step - - -def evaluate(args, model, tokenizer, criterion, prefix=""): - # Loop to handle MNLI double evaluation (matched, mis-matched) - eval_output_dir = args.output_dir - eval_dataset = load_examples(args, tokenizer, evaluate=True) - - if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: - os.makedirs(eval_output_dir) - - args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) - # Note that DistributedSampler samples randomly - eval_sampler = SequentialSampler(eval_dataset) - eval_dataloader = DataLoader( - eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=collate_fn - ) - - # multi-gpu eval - if args.n_gpu > 1 and not isinstance(model, nn.DataParallel): - model = nn.DataParallel(model) - - # Eval! - logger.info("***** Running evaluation {} *****".format(prefix)) - logger.info(" Num examples = %d", len(eval_dataset)) - logger.info(" Batch size = %d", args.eval_batch_size) - eval_loss = 0.0 - nb_eval_steps = 0 - preds = None - out_label_ids = None - for batch in tqdm(eval_dataloader, desc="Evaluating"): - model.eval() - batch = tuple(t.to(args.device) for t in batch) - - with torch.no_grad(): - batch = tuple(t.to(args.device) for t in batch) - labels = batch[5] - inputs = { - "input_ids": batch[0], - "input_modal": batch[2], - "attention_mask": batch[1], - "modal_start_tokens": batch[3], - "modal_end_tokens": batch[4], - } - outputs = model(**inputs) - logits = outputs[0] # model outputs are always tuple in transformers (see doc) - tmp_eval_loss = criterion(logits, labels) - eval_loss += tmp_eval_loss.mean().item() - nb_eval_steps += 1 - if preds is None: - preds = torch.sigmoid(logits).detach().cpu().numpy() > 0.5 - out_label_ids = labels.detach().cpu().numpy() - else: - preds = np.append(preds, torch.sigmoid(logits).detach().cpu().numpy() > 0.5, axis=0) - out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0) - - eval_loss = eval_loss / nb_eval_steps - result = { - "loss": eval_loss, - "macro_f1": f1_score(out_label_ids, preds, average="macro"), - "micro_f1": f1_score(out_label_ids, preds, average="micro"), - } - - output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") - with open(output_eval_file, "w") as writer: - logger.info("***** Eval results {} *****".format(prefix)) - for key in sorted(result.keys()): - logger.info(" %s = %s", key, str(result[key])) - writer.write("%s = %s\n" % (key, str(result[key]))) - - return result - - -def load_examples(args, tokenizer, evaluate=False): - path = os.path.join(args.data_dir, "dev.jsonl" if evaluate else "train.jsonl") - transforms = get_image_transforms() - labels = get_mmimdb_labels() - dataset = JsonlDataset(path, tokenizer, transforms, labels, args.max_seq_length - args.num_image_embeds - 2) - return dataset - - -def main(): - parser = argparse.ArgumentParser() - - # Required parameters - parser.add_argument( - "--data_dir", - default=None, - type=str, - required=True, - help="The input data dir. Should contain the .jsonl files for MMIMDB.", - ) - parser.add_argument( - "--model_name_or_path", - default=None, - type=str, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models", - ) - parser.add_argument( - "--output_dir", - default=None, - type=str, - required=True, - help="The output directory where the model predictions and checkpoints will be written.", - ) - - # Other parameters - parser.add_argument( - "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" - ) - parser.add_argument( - "--tokenizer_name", - default="", - type=str, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--cache_dir", - default=None, - type=str, - help="Where do you want to store the pre-trained models downloaded from huggingface.co", - ) - parser.add_argument( - "--max_seq_length", - default=128, - type=int, - help=( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ), - ) - parser.add_argument( - "--num_image_embeds", default=1, type=int, help="Number of Image Embeddings from the Image Encoder" - ) - parser.add_argument("--do_train", action="store_true", help="Whether to run training.") - parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") - parser.add_argument( - "--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step." - ) - parser.add_argument( - "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model." - ) - - parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") - parser.add_argument( - "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." - ) - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") - parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") - parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") - parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") - parser.add_argument( - "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform." - ) - parser.add_argument("--patience", default=5, type=int, help="Patience for Early Stopping.") - parser.add_argument( - "--max_steps", - default=-1, - type=int, - help="If > 0: set total number of training steps to perform. Override num_train_epochs.", - ) - parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") - - parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") - parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") - parser.add_argument( - "--eval_all_checkpoints", - action="store_true", - help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", - ) - parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") - parser.add_argument("--num_workers", type=int, default=8, help="number of worker threads for dataloading") - parser.add_argument( - "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" - ) - parser.add_argument( - "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" - ) - parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") - - parser.add_argument( - "--fp16", - action="store_true", - help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", - ) - parser.add_argument( - "--fp16_opt_level", - type=str, - default="O1", - help=( - "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " - "See details at https://nvidia.github.io/apex/amp.html" - ), - ) - parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") - parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.") - parser.add_argument("--server_port", type=str, default="", help="For distant debugging.") - args = parser.parse_args() - - if ( - os.path.exists(args.output_dir) - and os.listdir(args.output_dir) - and args.do_train - and not args.overwrite_output_dir - ): - raise ValueError( - "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( - args.output_dir - ) - ) - - # Setup distant debugging if needed - if args.server_ip and args.server_port: - # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script - import ptvsd - - print("Waiting for debugger attach") - ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) - ptvsd.wait_for_attach() - - # Setup CUDA, GPU & distributed training - if args.local_rank == -1 or args.no_cuda: - device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") - args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() - else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs - torch.cuda.set_device(args.local_rank) - device = torch.device("cuda", args.local_rank) - torch.distributed.init_process_group(backend="nccl") - args.n_gpu = 1 - - args.device = device - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, - ) - logger.warning( - "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", - args.local_rank, - device, - args.n_gpu, - bool(args.local_rank != -1), - args.fp16, - ) - # Set the verbosity to info of the Transformers logger (on main process only): - if is_main_process(args.local_rank): - transformers.utils.logging.set_verbosity_info() - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - # Set seed - set_seed(args) - - # Load pretrained model and tokenizer - if args.local_rank not in [-1, 0]: - torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab - - # Setup model - labels = get_mmimdb_labels() - num_labels = len(labels) - transformer_config = AutoConfig.from_pretrained(args.config_name if args.config_name else args.model_name_or_path) - tokenizer = AutoTokenizer.from_pretrained( - args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, - do_lower_case=args.do_lower_case, - cache_dir=args.cache_dir, - ) - transformer = AutoModel.from_pretrained( - args.model_name_or_path, config=transformer_config, cache_dir=args.cache_dir - ) - img_encoder = ImageEncoder(args) - config = MMBTConfig(transformer_config, num_labels=num_labels) - model = MMBTForClassification(config, transformer, img_encoder) - - if args.local_rank == 0: - torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab - - model.to(args.device) - - logger.info("Training/evaluation parameters %s", args) - - # Training - if args.do_train: - train_dataset = load_examples(args, tokenizer, evaluate=False) - label_frequences = train_dataset.get_label_frequencies() - label_frequences = [label_frequences[l] for l in labels] - label_weights = ( - torch.tensor(label_frequences, device=args.device, dtype=torch.float) / len(train_dataset) - ) ** -1 - criterion = nn.BCEWithLogitsLoss(pos_weight=label_weights) - global_step, tr_loss = train(args, train_dataset, model, tokenizer, criterion) - logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) - - # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() - if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): - logger.info("Saving model checkpoint to %s", args.output_dir) - # Save a trained model, configuration and tokenizer using `save_pretrained()`. - # They can then be reloaded using `from_pretrained()` - model_to_save = ( - model.module if hasattr(model, "module") else model - ) # Take care of distributed/parallel training - torch.save(model_to_save.state_dict(), os.path.join(args.output_dir, WEIGHTS_NAME)) - tokenizer.save_pretrained(args.output_dir) - - # Good practice: save your training arguments together with the trained model - torch.save(args, os.path.join(args.output_dir, "training_args.bin")) - - # Load a trained model and vocabulary that you have fine-tuned - model = MMBTForClassification(config, transformer, img_encoder) - model.load_state_dict(torch.load(os.path.join(args.output_dir, WEIGHTS_NAME))) - tokenizer = AutoTokenizer.from_pretrained(args.output_dir) - model.to(args.device) - - # Evaluation - results = {} - if args.do_eval and args.local_rank in [-1, 0]: - checkpoints = [args.output_dir] - if args.eval_all_checkpoints: - checkpoints = [ - os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) - ] - - logger.info("Evaluate the following checkpoints: %s", checkpoints) - for checkpoint in checkpoints: - global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" - prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else "" - model = MMBTForClassification(config, transformer, img_encoder) - model.load_state_dict(torch.load(checkpoint)) - model.to(args.device) - result = evaluate(args, model, tokenizer, criterion, prefix=prefix) - result = {k + "_{}".format(global_step): v for k, v in result.items()} - results.update(result) - - return results - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/mm-imdb/utils_mmimdb.py b/examples/research_projects/mm-imdb/utils_mmimdb.py deleted file mode 100644 index df8e38d5974..00000000000 --- a/examples/research_projects/mm-imdb/utils_mmimdb.py +++ /dev/null @@ -1,146 +0,0 @@ -# coding=utf-8 -# Copyright (c) Facebook, Inc. and its affiliates. -# Copyright (c) HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import os -from collections import Counter - -import torch -import torchvision -import torchvision.transforms as transforms -from PIL import Image -from torch import nn -from torch.utils.data import Dataset - - -POOLING_BREAKDOWN = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} - - -class ImageEncoder(nn.Module): - def __init__(self, args): - super().__init__() - model = torchvision.models.resnet152(pretrained=True) - modules = list(model.children())[:-2] - self.model = nn.Sequential(*modules) - self.pool = nn.AdaptiveAvgPool2d(POOLING_BREAKDOWN[args.num_image_embeds]) - - def forward(self, x): - # Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048 - out = self.pool(self.model(x)) - out = torch.flatten(out, start_dim=2) - out = out.transpose(1, 2).contiguous() - return out # BxNx2048 - - -class JsonlDataset(Dataset): - def __init__(self, data_path, tokenizer, transforms, labels, max_seq_length): - self.data = [json.loads(l) for l in open(data_path)] - self.data_dir = os.path.dirname(data_path) - self.tokenizer = tokenizer - self.labels = labels - self.n_classes = len(labels) - self.max_seq_length = max_seq_length - - self.transforms = transforms - - def __len__(self): - return len(self.data) - - def __getitem__(self, index): - sentence = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"], add_special_tokens=True)) - start_token, sentence, end_token = sentence[0], sentence[1:-1], sentence[-1] - sentence = sentence[: self.max_seq_length] - - label = torch.zeros(self.n_classes) - label[[self.labels.index(tgt) for tgt in self.data[index]["label"]]] = 1 - - image = Image.open(os.path.join(self.data_dir, self.data[index]["img"])).convert("RGB") - image = self.transforms(image) - - return { - "image_start_token": start_token, - "image_end_token": end_token, - "sentence": sentence, - "image": image, - "label": label, - } - - def get_label_frequencies(self): - label_freqs = Counter() - for row in self.data: - label_freqs.update(row["label"]) - return label_freqs - - -def collate_fn(batch): - lens = [len(row["sentence"]) for row in batch] - bsz, max_seq_len = len(batch), max(lens) - - mask_tensor = torch.zeros(bsz, max_seq_len, dtype=torch.long) - text_tensor = torch.zeros(bsz, max_seq_len, dtype=torch.long) - - for i_batch, (input_row, length) in enumerate(zip(batch, lens)): - text_tensor[i_batch, :length] = input_row["sentence"] - mask_tensor[i_batch, :length] = 1 - - img_tensor = torch.stack([row["image"] for row in batch]) - tgt_tensor = torch.stack([row["label"] for row in batch]) - img_start_token = torch.stack([row["image_start_token"] for row in batch]) - img_end_token = torch.stack([row["image_end_token"] for row in batch]) - - return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor - - -def get_mmimdb_labels(): - return [ - "Crime", - "Drama", - "Thriller", - "Action", - "Comedy", - "Romance", - "Documentary", - "Short", - "Mystery", - "History", - "Family", - "Adventure", - "Fantasy", - "Sci-Fi", - "Western", - "Horror", - "Sport", - "War", - "Music", - "Musical", - "Animation", - "Biography", - "Film-Noir", - ] - - -def get_image_transforms(): - return transforms.Compose( - [ - transforms.Resize(256), - transforms.CenterCrop(224), - transforms.ToTensor(), - transforms.Normalize( - mean=[0.46777044, 0.44531429, 0.40661017], - std=[0.12221994, 0.12145835, 0.14380469], - ), - ] - ) diff --git a/examples/research_projects/movement-pruning/README.md b/examples/research_projects/movement-pruning/README.md deleted file mode 100644 index 575ec1a9b49..00000000000 --- a/examples/research_projects/movement-pruning/README.md +++ /dev/null @@ -1,185 +0,0 @@ -# Movement Pruning: Adaptive Sparsity by Fine-Tuning - -Author: @VictorSanh - -*Magnitude pruning is a widely used strategy for reducing model size in pure supervised learning; however, it is less effective in the transfer learning regime that has become standard for state-of-the-art natural language processing applications. We propose the use of *movement pruning*, a simple, deterministic first-order weight pruning method that is more adaptive to pretrained model fine-tuning. Experiments show that when pruning large pretrained language models, movement pruning shows significant improvements in high-sparsity regimes. When combined with distillation, the approach achieves minimal accuracy loss with down to only 3% of the model parameters:* - -| Fine-pruning+Distillation
(Teacher=BERT-base fine-tuned) | BERT base
fine-tuned | Remaining
Weights (%) | Magnitude Pruning | L0 Regularization | Movement Pruning | Soft Movement Pruning | -| :---: | :---: | :---: | :---: | :---: | :---: | :---: | -| SQuAD - Dev
EM/F1 | 80.4/88.1 | 10%
3% | 70.2/80.1
45.5/59.6 | 72.4/81.9
64.3/75.8 | 75.6/84.3
67.5/78.0 | **76.6/84.9**
**72.7/82.3** | -| MNLI - Dev
acc/MM acc | 84.5/84.9 | 10%
3% | 78.3/79.3
69.4/70.6 | 78.7/79.7
76.0/76.2 | 80.1/80.4
76.5/77.4 | **81.2/81.8**
**79.5/80.1** | -| QQP - Dev
acc/F1 | 91.4/88.4 | 10%
3% | 79.8/65.0
72.4/57.8 | 88.1/82.8
87.0/81.9 | 89.7/86.2
86.1/81.5 | **90.2/86.8**
**89.1/85.5** | - -This page contains information on how to fine-prune pre-trained models such as `BERT` to obtain extremely sparse models with movement pruning. In contrast to magnitude pruning which selects weights that are far from 0, movement pruning retains weights that are moving away from 0. - -For more information, we invite you to check out [our paper](https://arxiv.org/abs/2005.07683). -You can also have a look at this fun *Explain Like I'm Five* introductory [slide deck](https://www.slideshare.net/VictorSanh/movement-pruning-explain-like-im-five-234205241). - -

- -
- -## Extreme sparsity and efficient storage - -One promise of extreme pruning is to obtain extremely small models that can be easily sent (and stored) on edge devices. By setting weights to 0., we reduce the amount of information we need to store, and thus decreasing the memory size. We are able to obtain extremely sparse fine-pruned models with movement pruning: ~95% of the dense performance with ~5% of total remaining weights in the BERT encoder. - -In [this notebook](https://github.com/huggingface/transformers/blob/main/examples/research_projects/movement-pruning/Saving_PruneBERT.ipynb), we showcase how we can leverage standard tools that exist out-of-the-box to efficiently store an extremely sparse question answering model (only 6% of total remaining weights in the encoder). We are able to reduce the memory size of the encoder **from the 340MB (the original dense BERT) to 11MB**, without any additional training of the model (every operation is performed *post fine-pruning*). It is sufficiently small to store it on a [91' floppy disk](https://en.wikipedia.org/wiki/Floptical) 📎! - -While movement pruning does not directly optimize for memory footprint (but rather the number of non-null weights), we hypothetize that further memory compression ratios can be achieved with specific quantization aware trainings (see for instance [Q8BERT](https://arxiv.org/abs/1910.06188), [And the Bit Goes Down](https://arxiv.org/abs/1907.05686) or [Quant-Noise](https://arxiv.org/abs/2004.07320)). - -## Fine-pruned models - -As examples, we release two English PruneBERT checkpoints (models fine-pruned from a pre-trained `BERT` checkpoint), one on SQuAD and the other on MNLI. - -- **`prunebert-base-uncased-6-finepruned-w-distil-squad`**
-Pre-trained `BERT-base-uncased` fine-pruned with soft movement pruning on SQuAD v1.1. We use an additional distillation signal from `BERT-base-uncased` finetuned on SQuAD. The encoder counts 6% of total non-null weights and reaches 83.8 F1 score. The model can be accessed with: `pruned_bert = BertForQuestionAnswering.from_pretrained("huggingface/prunebert-base-uncased-6-finepruned-w-distil-squad")` -- **`prunebert-base-uncased-6-finepruned-w-distil-mnli`**
-Pre-trained `BERT-base-uncased` fine-pruned with soft movement pruning on MNLI. We use an additional distillation signal from `BERT-base-uncased` finetuned on MNLI. The encoder counts 6% of total non-null weights and reaches 80.7 (matched) accuracy. The model can be accessed with: `pruned_bert = BertForSequenceClassification.from_pretrained("huggingface/prunebert-base-uncased-6-finepruned-w-distil-mnli")` - -## How to fine-prune? - -### Setup - -The code relies on the 🤗 Transformers library. In addition to the dependencies listed in the [`examples`](https://github.com/huggingface/transformers/tree/main/examples) folder, you should install a few additional dependencies listed in the `requirements.txt` file: `pip install -r requirements.txt`. - -Note that we built our experiments on top of a stabilized version of the library (commit https://github.com/huggingface/transformers/commit/352d5472b0c1dec0f420d606d16747d851b4bda8): we do not guarantee that everything is still compatible with the latest version of the main branch. - -### Fine-pruning with movement pruning - -Below, we detail how to reproduce the results reported in the paper. We use SQuAD as a running example. Commands (and scripts) can be easily adapted for other tasks. - -The following command fine-prunes a pre-trained `BERT-base` on SQuAD using movement pruning towards 15% of remaining weights (85% sparsity). Note that we freeze all the embeddings modules (from their pre-trained value) and only prune the Fully Connected layers in the encoder (12 layers of Transformer Block). - -```bash -SERIALIZATION_DIR= -SQUAD_DATA= - -python examples/movement-pruning/masked_run_squad.py \ - --output_dir $SERIALIZATION_DIR \ - --data_dir $SQUAD_DATA \ - --train_file train-v1.1.json \ - --predict_file dev-v1.1.json \ - --do_train --do_eval --do_lower_case \ - --model_type masked_bert \ - --model_name_or_path google-bert/bert-base-uncased \ - --per_gpu_train_batch_size 16 \ - --warmup_steps 5400 \ - --num_train_epochs 10 \ - --learning_rate 3e-5 --mask_scores_learning_rate 1e-2 \ - --initial_threshold 1 --final_threshold 0.15 \ - --initial_warmup 1 --final_warmup 2 \ - --pruning_method topK --mask_init constant --mask_scale 0. -``` - -### Fine-pruning with other methods - -We can also explore other fine-pruning methods by changing the `pruning_method` parameter: - -Soft movement pruning -```bash -python examples/movement-pruning/masked_run_squad.py \ - --output_dir $SERIALIZATION_DIR \ - --data_dir $SQUAD_DATA \ - --train_file train-v1.1.json \ - --predict_file dev-v1.1.json \ - --do_train --do_eval --do_lower_case \ - --model_type masked_bert \ - --model_name_or_path google-bert/bert-base-uncased \ - --per_gpu_train_batch_size 16 \ - --warmup_steps 5400 \ - --num_train_epochs 10 \ - --learning_rate 3e-5 --mask_scores_learning_rate 1e-2 \ - --initial_threshold 0 --final_threshold 0.1 \ - --initial_warmup 1 --final_warmup 2 \ - --pruning_method sigmoied_threshold --mask_init constant --mask_scale 0. \ - --regularization l1 --final_lambda 400. -``` - -L0 regularization -```bash -python examples/movement-pruning/masked_run_squad.py \ - --output_dir $SERIALIZATION_DIR \ - --data_dir $SQUAD_DATA \ - --train_file train-v1.1.json \ - --predict_file dev-v1.1.json \ - --do_train --do_eval --do_lower_case \ - --model_type masked_bert \ - --model_name_or_path google-bert/bert-base-uncased \ - --per_gpu_train_batch_size 16 \ - --warmup_steps 5400 \ - --num_train_epochs 10 \ - --learning_rate 3e-5 --mask_scores_learning_rate 1e-1 \ - --initial_threshold 1. --final_threshold 1. \ - --initial_warmup 1 --final_warmup 1 \ - --pruning_method l0 --mask_init constant --mask_scale 2.197 \ - --regularization l0 --final_lambda 125. -``` - -Iterative Magnitude Pruning -```bash -python examples/movement-pruning/masked_run_squad.py \ - --output_dir ./dbg \ - --data_dir examples/distillation/data/squad_data \ - --train_file train-v1.1.json \ - --predict_file dev-v1.1.json \ - --do_train --do_eval --do_lower_case \ - --model_type masked_bert \ - --model_name_or_path google-bert/bert-base-uncased \ - --per_gpu_train_batch_size 16 \ - --warmup_steps 5400 \ - --num_train_epochs 10 \ - --learning_rate 3e-5 \ - --initial_threshold 1 --final_threshold 0.15 \ - --initial_warmup 1 --final_warmup 2 \ - --pruning_method magnitude -``` - -### After fine-pruning - -**Counting parameters** - -Regularization based pruning methods (soft movement pruning and L0 regularization) rely on the penalty to induce sparsity. The multiplicative coefficient controls the sparsity level. -To obtain the effective sparsity level in the encoder, we simply count the number of activated (non-null) weights: - -```bash -python examples/movement-pruning/counts_parameters.py \ - --pruning_method sigmoied_threshold \ - --threshold 0.1 \ - --serialization_dir $SERIALIZATION_DIR -``` - -**Pruning once for all** - -Once the model has been fine-pruned, the pruned weights can be set to 0. once for all (reducing the amount of information to store). In our running experiments, we can convert a `MaskedBertForQuestionAnswering` (a BERT model augmented to enable on-the-fly pruning capabilities) to a standard `BertForQuestionAnswering`: - -```bash -python examples/movement-pruning/bertarize.py \ - --pruning_method sigmoied_threshold \ - --threshold 0.1 \ - --model_name_or_path $SERIALIZATION_DIR -``` - -## Hyper-parameters - -For reproducibility purposes, we share the detailed results presented in the paper. These [tables](https://docs.google.com/spreadsheets/d/17JgRq_OFFTniUrz6BZWW_87DjFkKXpI1kYDSsseT_7g/edit?usp=sharing) exhaustively describe the individual hyper-parameters used for each data point. - -## Inference speed - -Early experiments show that even though models fine-pruned with (soft) movement pruning are extremely sparse, they do not benefit from significant improvement in terms of inference speed when using the standard PyTorch inference. -We are currently benchmarking and exploring inference setups specifically for sparse architectures. -In particular, hardware manufacturers are announcing devices that will speedup inference for sparse networks considerably. - -## Citation - -If you find this resource useful, please consider citing the following paper: - -```bibtex -@article{sanh2020movement, - title={Movement Pruning: Adaptive Sparsity by Fine-Tuning}, - author={Victor Sanh and Thomas Wolf and Alexander M. Rush}, - year={2020}, - eprint={2005.07683}, - archivePrefix={arXiv}, - primaryClass={cs.CL} -} -``` diff --git a/examples/research_projects/movement-pruning/Saving_PruneBERT.ipynb b/examples/research_projects/movement-pruning/Saving_PruneBERT.ipynb deleted file mode 100644 index 0c27bd02a7d..00000000000 --- a/examples/research_projects/movement-pruning/Saving_PruneBERT.ipynb +++ /dev/null @@ -1,645 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Saving PruneBERT\n", - "\n", - "\n", - "This notebook aims at showcasing how we can leverage standard tools to save (and load) an extremely sparse model fine-pruned with [movement pruning](https://arxiv.org/abs/2005.07683) (or any other unstructured pruning mehtod).\n", - "\n", - "In this example, we used BERT (base-uncased, but the procedure described here is not specific to BERT and can be applied to a large variety of models.\n", - "\n", - "We first obtain an extremely sparse model by fine-pruning with movement pruning on SQuAD v1.1. We then used the following combination of standard tools:\n", - "- We reduce the precision of the model with Int8 dynamic quantization using [PyTorch implementation](https://pytorch.org/tutorials/intermediate/dynamic_quantization_bert_tutorial.html). We only quantized the Fully Connected Layers.\n", - "- Sparse quantized matrices are converted into the [Compressed Sparse Row format](https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html).\n", - "- We use HDF5 with `gzip` compression to store the weights.\n", - "\n", - "We experiment with a question answering model with only 6% of total remaining weights in the encoder (previously obtained with movement pruning). **We are able to reduce the memory size of the encoder from 340MB (original dense BERT) to 11MB**, which fits on a [91' floppy disk](https://en.wikipedia.org/wiki/Floptical)!\n", - "\n", - "\n", - "\n", - "*Note: this notebook is compatible with `torch>=1.5.0` If you are using, `torch==1.4.0`, please refer to [this previous version of the notebook](https://github.com/huggingface/transformers/commit/b11386e158e86e62d4041eabd86d044cd1695737).*" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "# Includes\n", - "\n", - "import json\n", - "import os\n", - "from collections import OrderedDict\n", - "\n", - "import h5py\n", - "import numpy as np\n", - "import torch\n", - "from scipy import sparse\n", - "from torch import nn\n", - "\n", - "from transformers import BertForQuestionAnswering\n", - "\n", - "\n", - "os.chdir(\"../../\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Saving" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Dynamic quantization induces little or no loss of performance while significantly reducing the memory footprint." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "# Load fine-pruned model and quantize the model\n", - "\n", - "model = BertForQuestionAnswering.from_pretrained(\"huggingface/prunebert-base-uncased-6-finepruned-w-distil-squad\")\n", - "model.to(\"cpu\")\n", - "\n", - "quantized_model = torch.quantization.quantize_dynamic(\n", - " model=model,\n", - " qconfig_spec={\n", - " nn.Linear: torch.quantization.default_dynamic_qconfig,\n", - " },\n", - " dtype=torch.qint8,\n", - ")\n", - "# print(quantized_model)\n", - "\n", - "qtz_st = quantized_model.state_dict()" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "# Saving the original (encoder + classifier) in the standard torch.save format\n", - "\n", - "dense_st = {\n", - " name: param for name, param in model.state_dict().items() if \"embedding\" not in name and \"pooler\" not in name\n", - "}\n", - "torch.save(\n", - " dense_st,\n", - " \"dbg/dense_squad.pt\",\n", - ")\n", - "dense_mb_size = os.path.getsize(\"dbg/dense_squad.pt\")" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Decompose quantization for bert.encoder.layer.0.attention.self.query._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.0.attention.self.key._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.0.attention.self.value._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.0.attention.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.0.intermediate.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.0.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.1.attention.self.query._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.1.attention.self.key._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.1.attention.self.value._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.1.attention.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.1.intermediate.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.1.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.2.attention.self.query._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.2.attention.self.key._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.2.attention.self.value._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.2.attention.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.2.intermediate.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.2.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.3.attention.self.query._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.3.attention.self.key._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.3.attention.self.value._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.3.attention.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.3.intermediate.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.3.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.4.attention.self.query._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.4.attention.self.key._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.4.attention.self.value._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.4.attention.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.4.intermediate.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.4.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.5.attention.self.query._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.5.attention.self.key._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.5.attention.self.value._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.5.attention.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.5.intermediate.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.5.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.6.attention.self.query._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.6.attention.self.key._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.6.attention.self.value._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.6.attention.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.6.intermediate.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.6.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.7.attention.self.query._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.7.attention.self.key._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.7.attention.self.value._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.7.attention.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.7.intermediate.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.7.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.8.attention.self.query._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.8.attention.self.key._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.8.attention.self.value._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.8.attention.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.8.intermediate.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.8.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.9.attention.self.query._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.9.attention.self.key._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.9.attention.self.value._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.9.attention.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.9.intermediate.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.9.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.10.attention.self.query._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.10.attention.self.key._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.10.attention.self.value._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.10.attention.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.10.intermediate.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.10.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.11.attention.self.query._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.11.attention.self.key._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.11.attention.self.value._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.11.attention.output.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.11.intermediate.dense._packed_params.weight\n", - "Decompose quantization for bert.encoder.layer.11.output.dense._packed_params.weight\n", - "Decompose quantization for bert.pooler.dense._packed_params.weight\n", - "Decompose quantization for qa_outputs._packed_params.weight\n" - ] - } - ], - "source": [ - "# Elementary representation: we decompose the quantized tensors into (scale, zero_point, int_repr).\n", - "# See https://pytorch.org/docs/stable/quantization.html\n", - "\n", - "# We further leverage the fact that int_repr is sparse matrix to optimize the storage: we decompose int_repr into\n", - "# its CSR representation (data, indptr, indices).\n", - "\n", - "elementary_qtz_st = {}\n", - "for name, param in qtz_st.items():\n", - " if \"dtype\" not in name and param.is_quantized:\n", - " print(\"Decompose quantization for\", name)\n", - " # We need to extract the scale, the zero_point and the int_repr for the quantized tensor and modules\n", - " scale = param.q_scale() # torch.tensor(1,) - float32\n", - " zero_point = param.q_zero_point() # torch.tensor(1,) - int32\n", - " elementary_qtz_st[f\"{name}.scale\"] = scale\n", - " elementary_qtz_st[f\"{name}.zero_point\"] = zero_point\n", - "\n", - " # We assume the int_repr is sparse and compute its CSR representation\n", - " # Only the FCs in the encoder are actually sparse\n", - " int_repr = param.int_repr() # torch.tensor(nb_rows, nb_columns) - int8\n", - " int_repr_cs = sparse.csr_matrix(int_repr) # scipy.sparse.csr.csr_matrix\n", - "\n", - " elementary_qtz_st[f\"{name}.int_repr.data\"] = int_repr_cs.data # np.array int8\n", - " elementary_qtz_st[f\"{name}.int_repr.indptr\"] = int_repr_cs.indptr # np.array int32\n", - " assert max(int_repr_cs.indices) < 65535 # If not, we shall fall back to int32\n", - " elementary_qtz_st[f\"{name}.int_repr.indices\"] = np.uint16(int_repr_cs.indices) # np.array uint16\n", - " elementary_qtz_st[f\"{name}.int_repr.shape\"] = int_repr_cs.shape # tuple(int, int)\n", - " else:\n", - " elementary_qtz_st[name] = param" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "# Create mapping from torch.dtype to string description (we could also used an int8 instead of string)\n", - "str_2_dtype = {\"qint8\": torch.qint8}\n", - "dtype_2_str = {torch.qint8: \"qint8\"}" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Encoder Size (MB) - Sparse & Quantized - `torch.save`: 21.29\n" - ] - } - ], - "source": [ - "# Saving the pruned (encoder + classifier) in the standard torch.save format\n", - "\n", - "dense_optimized_st = {\n", - " name: param for name, param in elementary_qtz_st.items() if \"embedding\" not in name and \"pooler\" not in name\n", - "}\n", - "torch.save(\n", - " dense_optimized_st,\n", - " \"dbg/dense_squad_optimized.pt\",\n", - ")\n", - "print(\n", - " \"Encoder Size (MB) - Sparse & Quantized - `torch.save`:\",\n", - " round(os.path.getsize(\"dbg/dense_squad_optimized.pt\") / 1e6, 2),\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Skip bert.embeddings.word_embeddings.weight\n", - "Skip bert.embeddings.position_embeddings.weight\n", - "Skip bert.embeddings.token_type_embeddings.weight\n", - "Skip bert.embeddings.LayerNorm.weight\n", - "Skip bert.embeddings.LayerNorm.bias\n", - "Skip bert.pooler.dense.scale\n", - "Skip bert.pooler.dense.zero_point\n", - "Skip bert.pooler.dense._packed_params.weight.scale\n", - "Skip bert.pooler.dense._packed_params.weight.zero_point\n", - "Skip bert.pooler.dense._packed_params.weight.int_repr.data\n", - "Skip bert.pooler.dense._packed_params.weight.int_repr.indptr\n", - "Skip bert.pooler.dense._packed_params.weight.int_repr.indices\n", - "Skip bert.pooler.dense._packed_params.weight.int_repr.shape\n", - "Skip bert.pooler.dense._packed_params.bias\n", - "Skip bert.pooler.dense._packed_params.dtype\n", - "\n", - "Encoder Size (MB) - Dense: 340.26\n", - "Encoder Size (MB) - Sparse & Quantized: 11.28\n" - ] - } - ], - "source": [ - "# Save the decomposed state_dict with an HDF5 file\n", - "# Saving only the encoder + QA Head\n", - "\n", - "with h5py.File(\"dbg/squad_sparse.h5\", \"w\") as hf:\n", - " for name, param in elementary_qtz_st.items():\n", - " if \"embedding\" in name:\n", - " print(f\"Skip {name}\")\n", - " continue\n", - "\n", - " if \"pooler\" in name:\n", - " print(f\"Skip {name}\")\n", - " continue\n", - "\n", - " if isinstance(param, torch.Tensor):\n", - " if param.numel() == 1:\n", - " # module scale\n", - " # module zero_point\n", - " hf.attrs[name] = param\n", - " continue\n", - "\n", - " if param.requires_grad:\n", - " # LayerNorm\n", - " param = param.detach().numpy()\n", - " hf.create_dataset(name, data=param, compression=\"gzip\", compression_opts=9)\n", - "\n", - " elif isinstance(param, (float, int, tuple)):\n", - " # float - tensor _packed_params.weight.scale\n", - " # int - tensor _packed_params.weight.zero_point\n", - " # tuple - tensor _packed_params.weight.shape\n", - " hf.attrs[name] = param\n", - "\n", - " elif isinstance(param, torch.dtype):\n", - " # dtype - tensor _packed_params.dtype\n", - " hf.attrs[name] = dtype_2_str[param]\n", - "\n", - " else:\n", - " hf.create_dataset(name, data=param, compression=\"gzip\", compression_opts=9)\n", - "\n", - "\n", - "with open(\"dbg/metadata.json\", \"w\") as f:\n", - " f.write(json.dumps(qtz_st._metadata))\n", - "\n", - "size = os.path.getsize(\"dbg/squad_sparse.h5\") + os.path.getsize(\"dbg/metadata.json\")\n", - "print(\"\")\n", - "print(\"Encoder Size (MB) - Dense: \", round(dense_mb_size / 1e6, 2))\n", - "print(\"Encoder Size (MB) - Sparse & Quantized:\", round(size / 1e6, 2))" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Size (MB): 99.41\n" - ] - } - ], - "source": [ - "# Save the decomposed state_dict to HDF5 storage\n", - "# Save everything in the architecutre (embedding + encoder + QA Head)\n", - "\n", - "with h5py.File(\"dbg/squad_sparse_with_embs.h5\", \"w\") as hf:\n", - " for name, param in elementary_qtz_st.items():\n", - " # if \"embedding\" in name:\n", - " # print(f\"Skip {name}\")\n", - " # continue\n", - "\n", - " # if \"pooler\" in name:\n", - " # print(f\"Skip {name}\")\n", - " # continue\n", - "\n", - " if isinstance(param, torch.Tensor):\n", - " if param.numel() == 1:\n", - " # module scale\n", - " # module zero_point\n", - " hf.attrs[name] = param\n", - " continue\n", - "\n", - " if param.requires_grad:\n", - " # LayerNorm\n", - " param = param.detach().numpy()\n", - " hf.create_dataset(name, data=param, compression=\"gzip\", compression_opts=9)\n", - "\n", - " elif isinstance(param, (float, int, tuple)):\n", - " # float - tensor _packed_params.weight.scale\n", - " # int - tensor _packed_params.weight.zero_point\n", - " # tuple - tensor _packed_params.weight.shape\n", - " hf.attrs[name] = param\n", - "\n", - " elif isinstance(param, torch.dtype):\n", - " # dtype - tensor _packed_params.dtype\n", - " hf.attrs[name] = dtype_2_str[param]\n", - "\n", - " else:\n", - " hf.create_dataset(name, data=param, compression=\"gzip\", compression_opts=9)\n", - "\n", - "\n", - "with open(\"dbg/metadata.json\", \"w\") as f:\n", - " f.write(json.dumps(qtz_st._metadata))\n", - "\n", - "size = os.path.getsize(\"dbg/squad_sparse_with_embs.h5\") + os.path.getsize(\"dbg/metadata.json\")\n", - "print(\"\\nSize (MB):\", round(size / 1e6, 2))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Loading" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "# Reconstruct the elementary state dict\n", - "\n", - "reconstructed_elementary_qtz_st = {}\n", - "\n", - "hf = h5py.File(\"dbg/squad_sparse_with_embs.h5\", \"r\")\n", - "\n", - "for attr_name, attr_param in hf.attrs.items():\n", - " if \"shape\" in attr_name:\n", - " attr_param = tuple(attr_param)\n", - " elif \".scale\" in attr_name:\n", - " if \"_packed_params\" in attr_name:\n", - " attr_param = float(attr_param)\n", - " else:\n", - " attr_param = torch.tensor(attr_param)\n", - " elif \".zero_point\" in attr_name:\n", - " if \"_packed_params\" in attr_name:\n", - " attr_param = int(attr_param)\n", - " else:\n", - " attr_param = torch.tensor(attr_param)\n", - " elif \".dtype\" in attr_name:\n", - " attr_param = str_2_dtype[attr_param]\n", - " reconstructed_elementary_qtz_st[attr_name] = attr_param\n", - " # print(f\"Unpack {attr_name}\")\n", - "\n", - "# Get the tensors/arrays\n", - "for data_name, data_param in hf.items():\n", - " if \"LayerNorm\" in data_name or \"_packed_params.bias\" in data_name:\n", - " reconstructed_elementary_qtz_st[data_name] = torch.from_numpy(np.array(data_param))\n", - " elif \"embedding\" in data_name:\n", - " reconstructed_elementary_qtz_st[data_name] = torch.from_numpy(np.array(data_param))\n", - " else: # _packed_params.weight.int_repr.data, _packed_params.weight.int_repr.indices and _packed_params.weight.int_repr.indptr\n", - " data_param = np.array(data_param)\n", - " if \"indices\" in data_name:\n", - " data_param = np.array(data_param, dtype=np.int32)\n", - " reconstructed_elementary_qtz_st[data_name] = data_param\n", - " # print(f\"Unpack {data_name}\")\n", - "\n", - "\n", - "hf.close()" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "# Sanity checks\n", - "\n", - "for name, param in reconstructed_elementary_qtz_st.items():\n", - " assert name in elementary_qtz_st\n", - "for name, param in elementary_qtz_st.items():\n", - " assert name in reconstructed_elementary_qtz_st, name\n", - "\n", - "for name, param in reconstructed_elementary_qtz_st.items():\n", - " assert isinstance(param, type(elementary_qtz_st[name])), name\n", - " if isinstance(param, torch.Tensor):\n", - " assert torch.all(torch.eq(param, elementary_qtz_st[name])), name\n", - " elif isinstance(param, np.ndarray):\n", - " assert (param == elementary_qtz_st[name]).all(), name\n", - " else:\n", - " assert param == elementary_qtz_st[name], name" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "# Re-assemble the sparse int_repr from the CSR format\n", - "\n", - "reconstructed_qtz_st = {}\n", - "\n", - "for name, param in reconstructed_elementary_qtz_st.items():\n", - " if \"weight.int_repr.indptr\" in name:\n", - " prefix_ = name[:-16]\n", - " data = reconstructed_elementary_qtz_st[f\"{prefix_}.int_repr.data\"]\n", - " indptr = reconstructed_elementary_qtz_st[f\"{prefix_}.int_repr.indptr\"]\n", - " indices = reconstructed_elementary_qtz_st[f\"{prefix_}.int_repr.indices\"]\n", - " shape = reconstructed_elementary_qtz_st[f\"{prefix_}.int_repr.shape\"]\n", - "\n", - " int_repr = sparse.csr_matrix(arg1=(data, indices, indptr), shape=shape)\n", - " int_repr = torch.tensor(int_repr.todense())\n", - "\n", - " scale = reconstructed_elementary_qtz_st[f\"{prefix_}.scale\"]\n", - " zero_point = reconstructed_elementary_qtz_st[f\"{prefix_}.zero_point\"]\n", - " weight = torch._make_per_tensor_quantized_tensor(int_repr, scale, zero_point)\n", - "\n", - " reconstructed_qtz_st[f\"{prefix_}\"] = weight\n", - " elif (\n", - " \"int_repr.data\" in name\n", - " or \"int_repr.shape\" in name\n", - " or \"int_repr.indices\" in name\n", - " or \"weight.scale\" in name\n", - " or \"weight.zero_point\" in name\n", - " ):\n", - " continue\n", - " else:\n", - " reconstructed_qtz_st[name] = param" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "# Sanity checks\n", - "\n", - "for name, param in reconstructed_qtz_st.items():\n", - " assert name in qtz_st\n", - "for name, param in qtz_st.items():\n", - " assert name in reconstructed_qtz_st, name\n", - "\n", - "for name, param in reconstructed_qtz_st.items():\n", - " assert isinstance(param, type(qtz_st[name])), name\n", - " if isinstance(param, torch.Tensor):\n", - " assert torch.all(torch.eq(param, qtz_st[name])), name\n", - " elif isinstance(param, np.ndarray):\n", - " assert (param == qtz_st[name]).all(), name\n", - " else:\n", - " assert param == qtz_st[name], name" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Sanity checks" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Load the re-constructed state dict into a model\n", - "\n", - "dummy_model = BertForQuestionAnswering.from_pretrained(\"bert-base-uncased\")\n", - "dummy_model.to(\"cpu\")\n", - "\n", - "reconstructed_qtz_model = torch.quantization.quantize_dynamic(\n", - " model=dummy_model,\n", - " qconfig_spec=None,\n", - " dtype=torch.qint8,\n", - ")\n", - "\n", - "reconstructed_qtz_st = OrderedDict(reconstructed_qtz_st)\n", - "with open(\"dbg/metadata.json\", \"r\") as read_file:\n", - " metadata = json.loads(read_file.read())\n", - "reconstructed_qtz_st._metadata = metadata\n", - "\n", - "reconstructed_qtz_model.load_state_dict(reconstructed_qtz_st)" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Sanity check passed\n" - ] - } - ], - "source": [ - "# Sanity checks on the infernce\n", - "\n", - "N = 32\n", - "\n", - "for _ in range(25):\n", - " inputs = torch.randint(low=0, high=30000, size=(N, 128))\n", - " mask = torch.ones(size=(N, 128))\n", - "\n", - " y_reconstructed = reconstructed_qtz_model(input_ids=inputs, attention_mask=mask)[0]\n", - " y = quantized_model(input_ids=inputs, attention_mask=mask)[0]\n", - "\n", - " assert torch.all(torch.eq(y, y_reconstructed))\n", - "print(\"Sanity check passed\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/research_projects/movement-pruning/bertarize.py b/examples/research_projects/movement-pruning/bertarize.py deleted file mode 100644 index da7534f4a6f..00000000000 --- a/examples/research_projects/movement-pruning/bertarize.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright 2020-present, the HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Once a model has been fine-pruned, the weights that are masked during the forward pass can be pruned once for all. -For instance, once the a model from the :class:`~emmental.MaskedBertForSequenceClassification` is trained, it can be saved (and then loaded) -as a standard :class:`~transformers.BertForSequenceClassification`. -""" - -import argparse -import os -import shutil - -import torch -from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer - - -def main(args): - pruning_method = args.pruning_method - threshold = args.threshold - - model_name_or_path = args.model_name_or_path.rstrip("/") - target_model_path = args.target_model_path - - print(f"Load fine-pruned model from {model_name_or_path}") - model = torch.load(os.path.join(model_name_or_path, "pytorch_model.bin")) - pruned_model = {} - - for name, tensor in model.items(): - if "embeddings" in name or "LayerNorm" in name or "pooler" in name: - pruned_model[name] = tensor - print(f"Copied layer {name}") - elif "classifier" in name or "qa_output" in name: - pruned_model[name] = tensor - print(f"Copied layer {name}") - elif "bias" in name: - pruned_model[name] = tensor - print(f"Copied layer {name}") - else: - if pruning_method == "magnitude": - mask = MagnitudeBinarizer.apply(inputs=tensor, threshold=threshold) - pruned_model[name] = tensor * mask - print(f"Pruned layer {name}") - elif pruning_method == "topK": - if "mask_scores" in name: - continue - prefix_ = name[:-6] - scores = model[f"{prefix_}mask_scores"] - mask = TopKBinarizer.apply(scores, threshold) - pruned_model[name] = tensor * mask - print(f"Pruned layer {name}") - elif pruning_method == "sigmoied_threshold": - if "mask_scores" in name: - continue - prefix_ = name[:-6] - scores = model[f"{prefix_}mask_scores"] - mask = ThresholdBinarizer.apply(scores, threshold, True) - pruned_model[name] = tensor * mask - print(f"Pruned layer {name}") - elif pruning_method == "l0": - if "mask_scores" in name: - continue - prefix_ = name[:-6] - scores = model[f"{prefix_}mask_scores"] - l, r = -0.1, 1.1 - s = torch.sigmoid(scores) - s_bar = s * (r - l) + l - mask = s_bar.clamp(min=0.0, max=1.0) - pruned_model[name] = tensor * mask - print(f"Pruned layer {name}") - else: - raise ValueError("Unknown pruning method") - - if target_model_path is None: - target_model_path = os.path.join( - os.path.dirname(model_name_or_path), f"bertarized_{os.path.basename(model_name_or_path)}" - ) - - if not os.path.isdir(target_model_path): - shutil.copytree(model_name_or_path, target_model_path) - print(f"\nCreated folder {target_model_path}") - - torch.save(pruned_model, os.path.join(target_model_path, "pytorch_model.bin")) - print("\nPruned model saved! See you later!") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "--pruning_method", - choices=["l0", "magnitude", "topK", "sigmoied_threshold"], - type=str, - required=True, - help=( - "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," - " sigmoied_threshold = Soft movement pruning)" - ), - ) - parser.add_argument( - "--threshold", - type=float, - required=False, - help=( - "For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model. " - "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared. " - "Not needed for `l0`" - ), - ) - parser.add_argument( - "--model_name_or_path", - type=str, - required=True, - help="Folder containing the model that was previously fine-pruned", - ) - parser.add_argument( - "--target_model_path", - default=None, - type=str, - required=False, - help="Folder containing the model that was previously fine-pruned", - ) - - args = parser.parse_args() - - main(args) diff --git a/examples/research_projects/movement-pruning/counts_parameters.py b/examples/research_projects/movement-pruning/counts_parameters.py deleted file mode 100644 index c0ac53fb785..00000000000 --- a/examples/research_projects/movement-pruning/counts_parameters.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2020-present, the HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Count remaining (non-zero) weights in the encoder (i.e. the transformer layers). -Sparsity and remaining weights levels are equivalent: sparsity % = 100 - remaining weights %. -""" - -import argparse -import os - -import torch -from emmental.modules import ThresholdBinarizer, TopKBinarizer - - -def main(args): - serialization_dir = args.serialization_dir - pruning_method = args.pruning_method - threshold = args.threshold - - st = torch.load(os.path.join(serialization_dir, "pytorch_model.bin"), map_location="cpu") - - remaining_count = 0 # Number of remaining (not pruned) params in the encoder - encoder_count = 0 # Number of params in the encoder - - print("name".ljust(60, " "), "Remaining Weights %", "Remaining Weight") - for name, param in st.items(): - if "encoder" not in name: - continue - - if "mask_scores" in name: - if pruning_method == "topK": - mask_ones = TopKBinarizer.apply(param, threshold).sum().item() - elif pruning_method == "sigmoied_threshold": - mask_ones = ThresholdBinarizer.apply(param, threshold, True).sum().item() - elif pruning_method == "l0": - l, r = -0.1, 1.1 - s = torch.sigmoid(param) - s_bar = s * (r - l) + l - mask = s_bar.clamp(min=0.0, max=1.0) - mask_ones = (mask > 0.0).sum().item() - else: - raise ValueError("Unknown pruning method") - remaining_count += mask_ones - print(name.ljust(60, " "), str(round(100 * mask_ones / param.numel(), 3)).ljust(20, " "), str(mask_ones)) - else: - encoder_count += param.numel() - if "bias" in name or "LayerNorm" in name: - remaining_count += param.numel() - - print("") - print("Remaining Weights (global) %: ", 100 * remaining_count / encoder_count) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "--pruning_method", - choices=["l0", "topK", "sigmoied_threshold"], - type=str, - required=True, - help=( - "Pruning Method (l0 = L0 regularization, topK = Movement pruning, sigmoied_threshold = Soft movement" - " pruning)" - ), - ) - parser.add_argument( - "--threshold", - type=float, - required=False, - help=( - "For `topK`, it is the level of remaining weights (in %) in the fine-pruned model. " - "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared. " - "Not needed for `l0`" - ), - ) - parser.add_argument( - "--serialization_dir", - type=str, - required=True, - help="Folder containing the model that was previously fine-pruned", - ) - - args = parser.parse_args() - - main(args) diff --git a/examples/research_projects/movement-pruning/emmental/__init__.py b/examples/research_projects/movement-pruning/emmental/__init__.py deleted file mode 100644 index 6646667ea88..00000000000 --- a/examples/research_projects/movement-pruning/emmental/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from .configuration_bert_masked import MaskedBertConfig -from .modeling_bert_masked import ( - MaskedBertForMultipleChoice, - MaskedBertForQuestionAnswering, - MaskedBertForSequenceClassification, - MaskedBertForTokenClassification, - MaskedBertModel, -) -from .modules import * diff --git a/examples/research_projects/movement-pruning/emmental/configuration_bert_masked.py b/examples/research_projects/movement-pruning/emmental/configuration_bert_masked.py deleted file mode 100644 index 9c7459f27a7..00000000000 --- a/examples/research_projects/movement-pruning/emmental/configuration_bert_masked.py +++ /dev/null @@ -1,70 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. -# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Masked BERT model configuration. It replicates the class `~transformers.BertConfig` -and adapts it to the specificities of MaskedBert (`pruning_method`, `mask_init` and `mask_scale`.""" - -import logging - -from transformers.configuration_utils import PretrainedConfig - - -logger = logging.getLogger(__name__) - - -class MaskedBertConfig(PretrainedConfig): - """ - A class replicating the `~transformers.BertConfig` with additional parameters for pruning/masking configuration. - """ - - model_type = "masked_bert" - - def __init__( - self, - vocab_size=30522, - hidden_size=768, - num_hidden_layers=12, - num_attention_heads=12, - intermediate_size=3072, - hidden_act="gelu", - hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, - max_position_embeddings=512, - type_vocab_size=2, - initializer_range=0.02, - layer_norm_eps=1e-12, - pad_token_id=0, - pruning_method="topK", - mask_init="constant", - mask_scale=0.0, - **kwargs, - ): - super().__init__(pad_token_id=pad_token_id, **kwargs) - - self.vocab_size = vocab_size - self.hidden_size = hidden_size - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - self.hidden_act = hidden_act - self.intermediate_size = intermediate_size - self.hidden_dropout_prob = hidden_dropout_prob - self.attention_probs_dropout_prob = attention_probs_dropout_prob - self.max_position_embeddings = max_position_embeddings - self.type_vocab_size = type_vocab_size - self.initializer_range = initializer_range - self.layer_norm_eps = layer_norm_eps - self.pruning_method = pruning_method - self.mask_init = mask_init - self.mask_scale = mask_scale diff --git a/examples/research_projects/movement-pruning/emmental/modeling_bert_masked.py b/examples/research_projects/movement-pruning/emmental/modeling_bert_masked.py deleted file mode 100644 index 8c0b091c7de..00000000000 --- a/examples/research_projects/movement-pruning/emmental/modeling_bert_masked.py +++ /dev/null @@ -1,1019 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. -# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Masked Version of BERT. It replaces the `torch.nn.Linear` layers with -:class:`~emmental.MaskedLinear` and add an additional parameters in the forward pass to -compute the adaptive mask. -Built on top of `transformers.models.bert.modeling_bert`""" - -import logging -import math - -import torch -from torch import nn -from torch.nn import CrossEntropyLoss, MSELoss - -from emmental import MaskedBertConfig -from emmental.modules import MaskedLinear -from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward -from transformers.modeling_utils import PreTrainedModel, prune_linear_layer -from transformers.models.bert.modeling_bert import ACT2FN, load_tf_weights_in_bert - - -logger = logging.getLogger(__name__) - - -class BertEmbeddings(nn.Module): - """Construct the embeddings from word, position and token_type embeddings.""" - - def __init__(self, config): - super().__init__() - self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) - self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) - self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) - - # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load - # any TensorFlow checkpoint file - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): - if input_ids is not None: - input_shape = input_ids.size() - else: - input_shape = inputs_embeds.size()[:-1] - - seq_length = input_shape[1] - device = input_ids.device if input_ids is not None else inputs_embeds.device - if position_ids is None: - position_ids = torch.arange(seq_length, dtype=torch.long, device=device) - position_ids = position_ids.unsqueeze(0).expand(input_shape) - if token_type_ids is None: - token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) - - if inputs_embeds is None: - inputs_embeds = self.word_embeddings(input_ids) - position_embeddings = self.position_embeddings(position_ids) - token_type_embeddings = self.token_type_embeddings(token_type_ids) - - embeddings = inputs_embeds + position_embeddings + token_type_embeddings - embeddings = self.LayerNorm(embeddings) - embeddings = self.dropout(embeddings) - return embeddings - - -class BertSelfAttention(nn.Module): - def __init__(self, config): - super().__init__() - if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): - raise ValueError( - "The hidden size (%d) is not a multiple of the number of attention heads (%d)" - % (config.hidden_size, config.num_attention_heads) - ) - self.output_attentions = config.output_attentions - - self.num_attention_heads = config.num_attention_heads - self.attention_head_size = int(config.hidden_size / config.num_attention_heads) - self.all_head_size = self.num_attention_heads * self.attention_head_size - - self.query = MaskedLinear( - config.hidden_size, - self.all_head_size, - pruning_method=config.pruning_method, - mask_init=config.mask_init, - mask_scale=config.mask_scale, - ) - self.key = MaskedLinear( - config.hidden_size, - self.all_head_size, - pruning_method=config.pruning_method, - mask_init=config.mask_init, - mask_scale=config.mask_scale, - ) - self.value = MaskedLinear( - config.hidden_size, - self.all_head_size, - pruning_method=config.pruning_method, - mask_init=config.mask_init, - mask_scale=config.mask_scale, - ) - - self.dropout = nn.Dropout(config.attention_probs_dropout_prob) - - def transpose_for_scores(self, x): - new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) - x = x.view(*new_x_shape) - return x.permute(0, 2, 1, 3) - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - threshold=None, - ): - mixed_query_layer = self.query(hidden_states, threshold=threshold) - - # If this is instantiated as a cross-attention module, the keys - # and values come from an encoder; the attention mask needs to be - # such that the encoder's padding tokens are not attended to. - if encoder_hidden_states is not None: - mixed_key_layer = self.key(encoder_hidden_states, threshold=threshold) - mixed_value_layer = self.value(encoder_hidden_states, threshold=threshold) - attention_mask = encoder_attention_mask - else: - mixed_key_layer = self.key(hidden_states, threshold=threshold) - mixed_value_layer = self.value(hidden_states, threshold=threshold) - - query_layer = self.transpose_for_scores(mixed_query_layer) - key_layer = self.transpose_for_scores(mixed_key_layer) - value_layer = self.transpose_for_scores(mixed_value_layer) - - # Take the dot product between "query" and "key" to get the raw attention scores. - attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) - attention_scores = attention_scores / math.sqrt(self.attention_head_size) - if attention_mask is not None: - # Apply the attention mask is (precomputed for all layers in BertModel forward() function) - attention_scores = attention_scores + attention_mask - - # Normalize the attention scores to probabilities. - attention_probs = nn.functional.softmax(attention_scores, dim=-1) - - # This is actually dropping out entire tokens to attend to, which might - # seem a bit unusual, but is taken from the original Transformer paper. - attention_probs = self.dropout(attention_probs) - - # Mask heads if we want to - if head_mask is not None: - attention_probs = attention_probs * head_mask - - context_layer = torch.matmul(attention_probs, value_layer) - - context_layer = context_layer.permute(0, 2, 1, 3).contiguous() - new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) - context_layer = context_layer.view(*new_context_layer_shape) - - outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,) - return outputs - - -class BertSelfOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = MaskedLinear( - config.hidden_size, - config.hidden_size, - pruning_method=config.pruning_method, - mask_init=config.mask_init, - mask_scale=config.mask_scale, - ) - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states, input_tensor, threshold): - hidden_states = self.dense(hidden_states, threshold=threshold) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class BertAttention(nn.Module): - def __init__(self, config): - super().__init__() - self.self = BertSelfAttention(config) - self.output = BertSelfOutput(config) - self.pruned_heads = set() - - def prune_heads(self, heads): - if len(heads) == 0: - return - mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size) - heads = set(heads) - self.pruned_heads # Convert to set and remove already pruned heads - for head in heads: - # Compute how many pruned heads are before the head and move the index accordingly - head = head - sum(1 if h < head else 0 for h in self.pruned_heads) - mask[head] = 0 - mask = mask.view(-1).contiguous().eq(1) - index = torch.arange(len(mask))[mask].long() - - # Prune linear layers - self.self.query = prune_linear_layer(self.self.query, index) - self.self.key = prune_linear_layer(self.self.key, index) - self.self.value = prune_linear_layer(self.self.value, index) - self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) - - # Update hyper params and store pruned heads - self.self.num_attention_heads = self.self.num_attention_heads - len(heads) - self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads - self.pruned_heads = self.pruned_heads.union(heads) - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - threshold=None, - ): - self_outputs = self.self( - hidden_states, - attention_mask, - head_mask, - encoder_hidden_states, - encoder_attention_mask, - threshold=threshold, - ) - attention_output = self.output(self_outputs[0], hidden_states, threshold=threshold) - outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them - return outputs - - -class BertIntermediate(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = MaskedLinear( - config.hidden_size, - config.intermediate_size, - pruning_method=config.pruning_method, - mask_init=config.mask_init, - mask_scale=config.mask_scale, - ) - if isinstance(config.hidden_act, str): - self.intermediate_act_fn = ACT2FN[config.hidden_act] - else: - self.intermediate_act_fn = config.hidden_act - - def forward(self, hidden_states, threshold): - hidden_states = self.dense(hidden_states, threshold=threshold) - hidden_states = self.intermediate_act_fn(hidden_states) - return hidden_states - - -class BertOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = MaskedLinear( - config.intermediate_size, - config.hidden_size, - pruning_method=config.pruning_method, - mask_init=config.mask_init, - mask_scale=config.mask_scale, - ) - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states, input_tensor, threshold): - hidden_states = self.dense(hidden_states, threshold=threshold) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class BertLayer(nn.Module): - def __init__(self, config): - super().__init__() - self.attention = BertAttention(config) - self.is_decoder = config.is_decoder - if self.is_decoder: - self.crossattention = BertAttention(config) - self.intermediate = BertIntermediate(config) - self.output = BertOutput(config) - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - threshold=None, - ): - self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, threshold=threshold) - attention_output = self_attention_outputs[0] - outputs = self_attention_outputs[1:] # add self attentions if we output attention weights - - if self.is_decoder and encoder_hidden_states is not None: - cross_attention_outputs = self.crossattention( - attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask - ) - attention_output = cross_attention_outputs[0] - outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights - - intermediate_output = self.intermediate(attention_output, threshold=threshold) - layer_output = self.output(intermediate_output, attention_output, threshold=threshold) - outputs = (layer_output,) + outputs - return outputs - - -class BertEncoder(nn.Module): - def __init__(self, config): - super().__init__() - self.output_attentions = config.output_attentions - self.output_hidden_states = config.output_hidden_states - self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - threshold=None, - ): - all_hidden_states = () - all_attentions = () - for i, layer_module in enumerate(self.layer): - if self.output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - layer_outputs = layer_module( - hidden_states, - attention_mask, - head_mask[i], - encoder_hidden_states, - encoder_attention_mask, - threshold=threshold, - ) - hidden_states = layer_outputs[0] - - if self.output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - - # Add last layer - if self.output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - outputs = (hidden_states,) - if self.output_hidden_states: - outputs = outputs + (all_hidden_states,) - if self.output_attentions: - outputs = outputs + (all_attentions,) - return outputs # last-layer hidden state, (all hidden states), (all attentions) - - -class BertPooler(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.activation = nn.Tanh() - - def forward(self, hidden_states): - # We "pool" the model by simply taking the hidden state corresponding - # to the first token. - first_token_tensor = hidden_states[:, 0] - pooled_output = self.dense(first_token_tensor) - pooled_output = self.activation(pooled_output) - return pooled_output - - -class MaskedBertPreTrainedModel(PreTrainedModel): - """An abstract class to handle weights initialization and - a simple interface for downloading and loading pretrained models. - """ - - config_class = MaskedBertConfig - load_tf_weights = load_tf_weights_in_bert - base_model_prefix = "bert" - - def _init_weights(self, module): - """Initialize the weights""" - if isinstance(module, (nn.Linear, nn.Embedding)): - # Slightly different from the TF version which uses truncated_normal for initialization - # cf https://github.com/pytorch/pytorch/pull/5617 - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - if isinstance(module, nn.Linear) and module.bias is not None: - module.bias.data.zero_() - - -MASKED_BERT_START_DOCSTRING = r""" - This model is a PyTorch `torch.nn.Module `_ sub-class. - Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general - usage and behavior. - - Parameters: - config (:class:`~emmental.MaskedBertConfig`): Model configuration class with all the parameters of the model. - Initializing with a config file does not load the weights associated with the model, only the configuration. - Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. -""" - -MASKED_BERT_INPUTS_DOCSTRING = r""" - Args: - input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): - Indices of input sequence tokens in the vocabulary. - - Indices can be obtained using :class:`transformers.BertTokenizer`. - See :func:`transformers.PreTrainedTokenizer.encode` and - :func:`transformers.PreTrainedTokenizer.__call__` for details. - - `What are input IDs? <../glossary.html#input-ids>`__ - attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): - Mask to avoid performing attention on padding token indices. - Mask values selected in ``[0, 1]``: - ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. - - `What are attention masks? <../glossary.html#attention-mask>`__ - token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): - Segment token indices to indicate first and second portions of the inputs. - Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` - corresponds to a `sentence B` token - - `What are token type IDs? <../glossary.html#token-type-ids>`_ - position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): - Indices of positions of each input sequence tokens in the position embeddings. - Selected in the range ``[0, config.max_position_embeddings - 1]``. - - `What are position IDs? <../glossary.html#position-ids>`_ - head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): - Mask to nullify selected heads of the self-attention modules. - Mask values selected in ``[0, 1]``: - :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**. - inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): - Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert `input_ids` indices into associated vectors - than the model's internal embedding lookup matrix. - encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): - Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention - if the model is configured as a decoder. - encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): - Mask to avoid performing attention on the padding token indices of the encoder input. This mask - is used in the cross-attention if the model is configured as a decoder. - Mask values selected in ``[0, 1]``: - ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. -""" - - -@add_start_docstrings( - "The bare Masked Bert Model transformer outputting raw hidden-states without any specific head on top.", - MASKED_BERT_START_DOCSTRING, -) -class MaskedBertModel(MaskedBertPreTrainedModel): - """ - The `MaskedBertModel` class replicates the :class:`~transformers.BertModel` class - and adds specific inputs to compute the adaptive mask on the fly. - Note that we freeze the embeddings modules from their pre-trained values. - """ - - def __init__(self, config): - super().__init__(config) - self.config = config - - self.embeddings = BertEmbeddings(config) - self.embeddings.requires_grad_(requires_grad=False) - self.encoder = BertEncoder(config) - self.pooler = BertPooler(config) - - self.init_weights() - - def get_input_embeddings(self): - return self.embeddings.word_embeddings - - def set_input_embeddings(self, value): - self.embeddings.word_embeddings = value - - def _prune_heads(self, heads_to_prune): - """Prunes heads of the model. - heads_to_prune: dict of {layer_num: list of heads to prune in this layer} - See base class PreTrainedModel - """ - for layer, heads in heads_to_prune.items(): - self.encoder.layer[layer].attention.prune_heads(heads) - - @add_start_docstrings_to_model_forward(MASKED_BERT_INPUTS_DOCSTRING) - def forward( - self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - threshold=None, - ): - r""" - threshold (:obj:`float`): - Threshold value (see :class:`~emmental.MaskedLinear`). - - Return: - :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs: - last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): - Sequence of hidden-states at the output of the last layer of the model. - pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`): - Last layer hidden-state of the first token of the sequence (classification token) - further processed by a Linear layer and a Tanh activation function. The Linear - layer weights are trained from the next sentence prediction (classification) - objective during pre-training. - - This output is usually *not* a good summary - of the semantic content of the input, you're often better with averaging or pooling - the sequence of hidden-states for the whole input sequence. - hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): - Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) - of shape :obj:`(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): - Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape - :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. - - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention - heads. - """ - - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - input_shape = input_ids.size() - elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] - else: - raise ValueError("You have to specify either input_ids or inputs_embeds") - - device = input_ids.device if input_ids is not None else inputs_embeds.device - - if attention_mask is None: - attention_mask = torch.ones(input_shape, device=device) - if token_type_ids is None: - token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) - - # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] - # ourselves in which case we just need to make it broadcastable to all heads. - if attention_mask.dim() == 3: - extended_attention_mask = attention_mask[:, None, :, :] - elif attention_mask.dim() == 2: - # Provided a padding mask of dimensions [batch_size, seq_length] - # - if the model is a decoder, apply a causal mask in addition to the padding mask - # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] - if self.config.is_decoder: - batch_size, seq_length = input_shape - seq_ids = torch.arange(seq_length, device=device) - causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] - causal_mask = causal_mask.to( - attention_mask.dtype - ) # causal and attention masks must have same type with pytorch version < 1.3 - extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] - else: - extended_attention_mask = attention_mask[:, None, None, :] - else: - raise ValueError( - "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( - input_shape, attention_mask.shape - ) - ) - - # Since attention_mask is 1.0 for positions we want to attend and 0.0 for - # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. - # Since we are adding it to the raw scores before the softmax, this is - # effectively the same as removing these entirely. - extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility - extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 - - # If a 2D ou 3D attention mask is provided for the cross-attention - # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] - if self.config.is_decoder and encoder_hidden_states is not None: - encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() - encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) - if encoder_attention_mask is None: - encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) - - if encoder_attention_mask.dim() == 3: - encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] - elif encoder_attention_mask.dim() == 2: - encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] - else: - raise ValueError( - "Wrong shape for encoder_hidden_shape (shape {}) or encoder_attention_mask (shape {})".format( - encoder_hidden_shape, encoder_attention_mask.shape - ) - ) - - encoder_extended_attention_mask = encoder_extended_attention_mask.to( - dtype=next(self.parameters()).dtype - ) # fp16 compatibility - encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 - else: - encoder_extended_attention_mask = None - - # Prepare head mask if needed - # 1.0 in head_mask indicate we keep the head - # attention_probs has shape bsz x n_heads x N x N - # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] - # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] - if head_mask is not None: - if head_mask.dim() == 1: - head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) - head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) - elif head_mask.dim() == 2: - head_mask = ( - head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) - ) # We can specify head_mask for each layer - head_mask = head_mask.to( - dtype=next(self.parameters()).dtype - ) # switch to float if need + fp16 compatibility - else: - head_mask = [None] * self.config.num_hidden_layers - - embedding_output = self.embeddings( - input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds - ) - encoder_outputs = self.encoder( - embedding_output, - attention_mask=extended_attention_mask, - head_mask=head_mask, - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_extended_attention_mask, - threshold=threshold, - ) - sequence_output = encoder_outputs[0] - pooled_output = self.pooler(sequence_output) - - outputs = ( - sequence_output, - pooled_output, - ) + encoder_outputs[1:] # add hidden_states and attentions if they are here - return outputs # sequence_output, pooled_output, (hidden_states), (attentions) - - -@add_start_docstrings( - """Masked Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of - the pooled output) e.g. for GLUE tasks. """, - MASKED_BERT_START_DOCSTRING, -) -class MaskedBertForSequenceClassification(MaskedBertPreTrainedModel): - def __init__(self, config): - super().__init__(config) - self.num_labels = config.num_labels - - self.bert = MaskedBertModel(config) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) - - self.init_weights() - - @add_start_docstrings_to_model_forward(MASKED_BERT_INPUTS_DOCSTRING) - def forward( - self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - labels=None, - threshold=None, - ): - r""" - labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): - Labels for computing the sequence classification/regression loss. - Indices should be in :obj:`[0, ..., config.num_labels - 1]`. - If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), - If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). - threshold (:obj:`float`): - Threshold value (see :class:`~emmental.MaskedLinear`). - - Returns: - :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs: - loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided): - Classification (or regression if config.num_labels==1) loss. - logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`): - Classification (or regression if config.num_labels==1) scores (before SoftMax). - hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): - Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) - of shape :obj:`(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): - Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape - :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. - - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention - heads. - """ - - outputs = self.bert( - input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - threshold=threshold, - ) - - pooled_output = outputs[1] - - pooled_output = self.dropout(pooled_output) - logits = self.classifier(pooled_output) - - outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here - - if labels is not None: - if self.num_labels == 1: - # We are doing regression - loss_fct = MSELoss() - loss = loss_fct(logits.view(-1), labels.view(-1)) - else: - loss_fct = CrossEntropyLoss() - loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) - outputs = (loss,) + outputs - - return outputs # (loss), logits, (hidden_states), (attentions) - - -@add_start_docstrings( - """Masked Bert Model with a multiple choice classification head on top (a linear layer on top of - the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, - MASKED_BERT_START_DOCSTRING, -) -class MaskedBertForMultipleChoice(MaskedBertPreTrainedModel): - def __init__(self, config): - super().__init__(config) - - self.bert = MaskedBertModel(config) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.classifier = nn.Linear(config.hidden_size, 1) - - self.init_weights() - - @add_start_docstrings_to_model_forward(MASKED_BERT_INPUTS_DOCSTRING) - def forward( - self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - labels=None, - threshold=None, - ): - r""" - labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): - Labels for computing the multiple choice classification loss. - Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension - of the input tensors. (see `input_ids` above) - threshold (:obj:`float`): - Threshold value (see :class:`~emmental.MaskedLinear`). - - Returns: - :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs: - loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided): - Classification loss. - classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`): - `num_choices` is the second dimension of the input tensors. (see `input_ids` above). - - Classification scores (before SoftMax). - hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): - Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) - of shape :obj:`(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): - Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape - :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. - - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention - heads. - - """ - num_choices = input_ids.shape[1] - - input_ids = input_ids.view(-1, input_ids.size(-1)) - attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None - token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None - position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None - - outputs = self.bert( - input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - threshold=threshold, - ) - - pooled_output = outputs[1] - - pooled_output = self.dropout(pooled_output) - logits = self.classifier(pooled_output) - reshaped_logits = logits.view(-1, num_choices) - - outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here - - if labels is not None: - loss_fct = CrossEntropyLoss() - loss = loss_fct(reshaped_logits, labels) - outputs = (loss,) + outputs - - return outputs # (loss), reshaped_logits, (hidden_states), (attentions) - - -@add_start_docstrings( - """Masked Bert Model with a token classification head on top (a linear layer on top of - the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, - MASKED_BERT_START_DOCSTRING, -) -class MaskedBertForTokenClassification(MaskedBertPreTrainedModel): - def __init__(self, config): - super().__init__(config) - self.num_labels = config.num_labels - - self.bert = MaskedBertModel(config) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - self.classifier = nn.Linear(config.hidden_size, config.num_labels) - - self.init_weights() - - @add_start_docstrings_to_model_forward(MASKED_BERT_INPUTS_DOCSTRING) - def forward( - self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - labels=None, - threshold=None, - ): - r""" - labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): - Labels for computing the token classification loss. - Indices should be in ``[0, ..., config.num_labels - 1]``. - threshold (:obj:`float`): - Threshold value (see :class:`~emmental.MaskedLinear`). - - Returns: - :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs: - loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) : - Classification loss. - scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`) - Classification scores (before SoftMax). - hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): - Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) - of shape :obj:`(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): - Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape - :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. - - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention - heads. - """ - - outputs = self.bert( - input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - threshold=threshold, - ) - - sequence_output = outputs[0] - - sequence_output = self.dropout(sequence_output) - logits = self.classifier(sequence_output) - - outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here - if labels is not None: - loss_fct = CrossEntropyLoss() - # Only keep active parts of the loss - if attention_mask is not None: - active_loss = attention_mask.view(-1) == 1 - active_logits = logits.view(-1, self.num_labels) - active_labels = torch.where( - active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) - ) - loss = loss_fct(active_logits, active_labels) - else: - loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) - outputs = (loss,) + outputs - - return outputs # (loss), scores, (hidden_states), (attentions) - - -@add_start_docstrings( - """Masked Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear - layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, - MASKED_BERT_START_DOCSTRING, -) -class MaskedBertForQuestionAnswering(MaskedBertPreTrainedModel): - def __init__(self, config): - super().__init__(config) - self.num_labels = config.num_labels - - self.bert = MaskedBertModel(config) - self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) - - self.init_weights() - - @add_start_docstrings_to_model_forward(MASKED_BERT_INPUTS_DOCSTRING) - def forward( - self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - start_positions=None, - end_positions=None, - threshold=None, - ): - r""" - start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): - Labels for position (index) of the start of the labelled span for computing the token classification loss. - Positions are clamped to the length of the sequence (`sequence_length`). - Position outside of the sequence are not taken into account for computing the loss. - end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): - Labels for position (index) of the end of the labelled span for computing the token classification loss. - Positions are clamped to the length of the sequence (`sequence_length`). - Position outside of the sequence are not taken into account for computing the loss. - threshold (:obj:`float`): - Threshold value (see :class:`~emmental.MaskedLinear`). - - Returns: - :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs: - loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): - Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. - start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`): - Span-start scores (before SoftMax). - end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`): - Span-end scores (before SoftMax). - hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): - Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) - of shape :obj:`(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the model at the output of each layer plus the initial embedding outputs. - attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): - Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape - :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. - - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention - heads. - """ - - outputs = self.bert( - input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - threshold=threshold, - ) - - sequence_output = outputs[0] - - logits = self.qa_outputs(sequence_output) - start_logits, end_logits = logits.split(1, dim=-1) - start_logits = start_logits.squeeze(-1) - end_logits = end_logits.squeeze(-1) - - outputs = ( - start_logits, - end_logits, - ) + outputs[2:] - if start_positions is not None and end_positions is not None: - # If we are on multi-GPU, split add a dimension - if len(start_positions.size()) > 1: - start_positions = start_positions.squeeze(-1) - if len(end_positions.size()) > 1: - end_positions = end_positions.squeeze(-1) - # sometimes the start/end positions are outside our model inputs, we ignore these terms - ignored_index = start_logits.size(1) - start_positions.clamp_(0, ignored_index) - end_positions.clamp_(0, ignored_index) - - loss_fct = CrossEntropyLoss(ignore_index=ignored_index) - start_loss = loss_fct(start_logits, start_positions) - end_loss = loss_fct(end_logits, end_positions) - total_loss = (start_loss + end_loss) / 2 - outputs = (total_loss,) + outputs - - return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions) diff --git a/examples/research_projects/movement-pruning/emmental/modules/__init__.py b/examples/research_projects/movement-pruning/emmental/modules/__init__.py deleted file mode 100644 index 761a6343d6b..00000000000 --- a/examples/research_projects/movement-pruning/emmental/modules/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .binarizer import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer -from .masked_nn import MaskedLinear diff --git a/examples/research_projects/movement-pruning/emmental/modules/binarizer.py b/examples/research_projects/movement-pruning/emmental/modules/binarizer.py deleted file mode 100644 index c96975e3b37..00000000000 --- a/examples/research_projects/movement-pruning/emmental/modules/binarizer.py +++ /dev/null @@ -1,144 +0,0 @@ -# coding=utf-8 -# Copyright 2020-present, AllenAI Authors, University of Illinois Urbana-Champaign, -# Intel Nervana Systems and the HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Binarizers take a (real value) matrix as input and produce a binary (values in {0,1}) mask of the same shape. -""" - -import torch -from torch import autograd - - -class ThresholdBinarizer(autograd.Function): - """ - Thresholdd binarizer. - Computes a binary mask M from a real value matrix S such that `M_{i,j} = 1` if and only if `S_{i,j} > \tau` - where `\tau` is a real value threshold. - - Implementation is inspired from: - https://github.com/arunmallya/piggyback - Piggyback: Adapting a Single Network to Multiple Tasks by Learning to Mask Weights - Arun Mallya, Dillon Davis, Svetlana Lazebnik - """ - - @staticmethod - def forward(ctx, inputs: torch.tensor, threshold: float, sigmoid: bool): - """ - Args: - inputs (`torch.FloatTensor`) - The input matrix from which the binarizer computes the binary mask. - threshold (`float`) - The threshold value (in R). - sigmoid (`bool`) - If set to ``True``, we apply the sigmoid function to the `inputs` matrix before comparing to `threshold`. - In this case, `threshold` should be a value between 0 and 1. - Returns: - mask (`torch.FloatTensor`) - Binary matrix of the same size as `inputs` acting as a mask (1 - the associated weight is - retained, 0 - the associated weight is pruned). - """ - nb_elems = inputs.numel() - nb_min = int(0.005 * nb_elems) + 1 - if sigmoid: - mask = (torch.sigmoid(inputs) > threshold).type(inputs.type()) - else: - mask = (inputs > threshold).type(inputs.type()) - if mask.sum() < nb_min: - # We limit the pruning so that at least 0.5% (half a percent) of the weights are remaining - k_threshold = inputs.flatten().kthvalue(max(nb_elems - nb_min, 1)).values - mask = (inputs > k_threshold).type(inputs.type()) - return mask - - @staticmethod - def backward(ctx, gradOutput): - return gradOutput, None, None - - -class TopKBinarizer(autograd.Function): - """ - Top-k Binarizer. - Computes a binary mask M from a real value matrix S such that `M_{i,j} = 1` if and only if `S_{i,j}` - is among the k% highest values of S. - - Implementation is inspired from: - https://github.com/allenai/hidden-networks - What's hidden in a randomly weighted neural network? - Vivek Ramanujan*, Mitchell Wortsman*, Aniruddha Kembhavi, Ali Farhadi, Mohammad Rastegari - """ - - @staticmethod - def forward(ctx, inputs: torch.tensor, threshold: float): - """ - Args: - inputs (`torch.FloatTensor`) - The input matrix from which the binarizer computes the binary mask. - threshold (`float`) - The percentage of weights to keep (the rest is pruned). - `threshold` is a float between 0 and 1. - Returns: - mask (`torch.FloatTensor`) - Binary matrix of the same size as `inputs` acting as a mask (1 - the associated weight is - retained, 0 - the associated weight is pruned). - """ - # Get the subnetwork by sorting the inputs and using the top threshold % - mask = inputs.clone() - _, idx = inputs.flatten().sort(descending=True) - j = int(threshold * inputs.numel()) - - # flat_out and mask access the same memory. - flat_out = mask.flatten() - flat_out[idx[j:]] = 0 - flat_out[idx[:j]] = 1 - return mask - - @staticmethod - def backward(ctx, gradOutput): - return gradOutput, None - - -class MagnitudeBinarizer: - """ - Magnitude Binarizer. - Computes a binary mask M from a real value matrix S such that `M_{i,j} = 1` if and only if `S_{i,j}` - is among the k% highest values of |S| (absolute value). - - Implementation is inspired from https://github.com/NervanaSystems/distiller/blob/2291fdcc2ea642a98d4e20629acb5a9e2e04b4e6/distiller/pruning/automated_gradual_pruner.py#L24 - """ - - @staticmethod - def apply(inputs: torch.tensor, threshold: float): - """ - Args: - inputs (`torch.FloatTensor`) - The input matrix from which the binarizer computes the binary mask. - This input marix is typically the weight matrix. - threshold (`float`) - The percentage of weights to keep (the rest is pruned). - `threshold` is a float between 0 and 1. - Returns: - mask (`torch.FloatTensor`) - Binary matrix of the same size as `inputs` acting as a mask (1 - the associated weight is - retained, 0 - the associated weight is pruned). - """ - # Get the subnetwork by sorting the inputs and using the top threshold % - mask = inputs.clone() - _, idx = inputs.abs().flatten().sort(descending=True) - j = int(threshold * inputs.numel()) - - # flat_out and mask access the same memory. - flat_out = mask.flatten() - flat_out[idx[j:]] = 0 - flat_out[idx[:j]] = 1 - return mask diff --git a/examples/research_projects/movement-pruning/emmental/modules/masked_nn.py b/examples/research_projects/movement-pruning/emmental/modules/masked_nn.py deleted file mode 100644 index e3c94836851..00000000000 --- a/examples/research_projects/movement-pruning/emmental/modules/masked_nn.py +++ /dev/null @@ -1,106 +0,0 @@ -# coding=utf-8 -# Copyright 2020-present, the HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Masked Linear module: A fully connected layer that computes an adaptive binary mask on the fly. -The mask (binary or not) is computed at each forward pass and multiplied against -the weight matrix to prune a portion of the weights. -The pruned weight matrix is then multiplied against the inputs (and if necessary, the bias is added). -""" - -import math - -import torch -from torch import nn -from torch.nn import init - -from .binarizer import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer - - -class MaskedLinear(nn.Linear): - """ - Fully Connected layer with on the fly adaptive mask. - If needed, a score matrix is created to store the importance of each associated weight. - """ - - def __init__( - self, - in_features: int, - out_features: int, - bias: bool = True, - mask_init: str = "constant", - mask_scale: float = 0.0, - pruning_method: str = "topK", - ): - """ - Args: - in_features (`int`) - Size of each input sample - out_features (`int`) - Size of each output sample - bias (`bool`) - If set to ``False``, the layer will not learn an additive bias. - Default: ``True`` - mask_init (`str`) - The initialization method for the score matrix if a score matrix is needed. - Choices: ["constant", "uniform", "kaiming"] - Default: ``constant`` - mask_scale (`float`) - The initialization parameter for the chosen initialization method `mask_init`. - Default: ``0.`` - pruning_method (`str`) - Method to compute the mask. - Choices: ["topK", "threshold", "sigmoied_threshold", "magnitude", "l0"] - Default: ``topK`` - """ - super(MaskedLinear, self).__init__(in_features=in_features, out_features=out_features, bias=bias) - assert pruning_method in ["topK", "threshold", "sigmoied_threshold", "magnitude", "l0"] - self.pruning_method = pruning_method - - if self.pruning_method in ["topK", "threshold", "sigmoied_threshold", "l0"]: - self.mask_scale = mask_scale - self.mask_init = mask_init - self.mask_scores = nn.Parameter(torch.empty(self.weight.size())) - self.init_mask() - - def init_mask(self): - if self.mask_init == "constant": - init.constant_(self.mask_scores, val=self.mask_scale) - elif self.mask_init == "uniform": - init.uniform_(self.mask_scores, a=-self.mask_scale, b=self.mask_scale) - elif self.mask_init == "kaiming": - init.kaiming_uniform_(self.mask_scores, a=math.sqrt(5)) - - def forward(self, input: torch.tensor, threshold: float): - # Get the mask - if self.pruning_method == "topK": - mask = TopKBinarizer.apply(self.mask_scores, threshold) - elif self.pruning_method in ["threshold", "sigmoied_threshold"]: - sig = "sigmoied" in self.pruning_method - mask = ThresholdBinarizer.apply(self.mask_scores, threshold, sig) - elif self.pruning_method == "magnitude": - mask = MagnitudeBinarizer.apply(self.weight, threshold) - elif self.pruning_method == "l0": - l, r, b = -0.1, 1.1, 2 / 3 - if self.training: - u = torch.zeros_like(self.mask_scores).uniform_().clamp(0.0001, 0.9999) - s = torch.sigmoid((u.log() - (1 - u).log() + self.mask_scores) / b) - else: - s = torch.sigmoid(self.mask_scores) - s_bar = s * (r - l) + l - mask = s_bar.clamp(min=0.0, max=1.0) - # Mask weights with computed mask - weight_thresholded = mask * self.weight - # Compute output (linear layer) with masked weights - return nn.functional.linear(input, weight_thresholded, self.bias) diff --git a/examples/research_projects/movement-pruning/masked_run_glue.py b/examples/research_projects/movement-pruning/masked_run_glue.py deleted file mode 100644 index 4ddb4248357..00000000000 --- a/examples/research_projects/movement-pruning/masked_run_glue.py +++ /dev/null @@ -1,962 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. -# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Fine-pruning Masked BERT on sequence classification on GLUE.""" - -import argparse -import glob -import json -import logging -import os -import random - -import numpy as np -import torch -from emmental import MaskedBertConfig, MaskedBertForSequenceClassification -from torch import nn -from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset -from torch.utils.data.distributed import DistributedSampler -from tqdm import tqdm, trange - -from transformers import ( - WEIGHTS_NAME, - AdamW, - BertConfig, - BertForSequenceClassification, - BertTokenizer, - get_linear_schedule_with_warmup, -) -from transformers import glue_compute_metrics as compute_metrics -from transformers import glue_convert_examples_to_features as convert_examples_to_features -from transformers import glue_output_modes as output_modes -from transformers import glue_processors as processors - - -try: - from torch.utils.tensorboard import SummaryWriter -except ImportError: - from tensorboardX import SummaryWriter - - -logger = logging.getLogger(__name__) - -MODEL_CLASSES = { - "bert": (BertConfig, BertForSequenceClassification, BertTokenizer), - "masked_bert": (MaskedBertConfig, MaskedBertForSequenceClassification, BertTokenizer), -} - - -def set_seed(args): - random.seed(args.seed) - np.random.seed(args.seed) - torch.manual_seed(args.seed) - if args.n_gpu > 0: - torch.cuda.manual_seed_all(args.seed) - - -def schedule_threshold( - step: int, - total_step: int, - warmup_steps: int, - initial_threshold: float, - final_threshold: float, - initial_warmup: int, - final_warmup: int, - final_lambda: float, -): - if step <= initial_warmup * warmup_steps: - threshold = initial_threshold - elif step > (total_step - final_warmup * warmup_steps): - threshold = final_threshold - else: - spars_warmup_steps = initial_warmup * warmup_steps - spars_schedu_steps = (final_warmup + initial_warmup) * warmup_steps - mul_coeff = 1 - (step - spars_warmup_steps) / (total_step - spars_schedu_steps) - threshold = final_threshold + (initial_threshold - final_threshold) * (mul_coeff**3) - regu_lambda = final_lambda * threshold / final_threshold - return threshold, regu_lambda - - -def regularization(model: nn.Module, mode: str): - regu, counter = 0, 0 - for name, param in model.named_parameters(): - if "mask_scores" in name: - if mode == "l1": - regu += torch.norm(torch.sigmoid(param), p=1) / param.numel() - elif mode == "l0": - regu += torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1)).sum() / param.numel() - else: - raise ValueError("Don't know this mode.") - counter += 1 - return regu / counter - - -def train(args, train_dataset, model, tokenizer, teacher=None): - """Train the model""" - if args.local_rank in [-1, 0]: - tb_writer = SummaryWriter(log_dir=args.output_dir) - - args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) - train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) - train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) - - if args.max_steps > 0: - t_total = args.max_steps - args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 - else: - t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs - - # Prepare optimizer and schedule (linear warmup and decay) - no_decay = ["bias", "LayerNorm.weight"] - optimizer_grouped_parameters = [ - { - "params": [p for n, p in model.named_parameters() if "mask_score" in n and p.requires_grad], - "lr": args.mask_scores_learning_rate, - }, - { - "params": [ - p - for n, p in model.named_parameters() - if "mask_score" not in n and p.requires_grad and not any(nd in n for nd in no_decay) - ], - "lr": args.learning_rate, - "weight_decay": args.weight_decay, - }, - { - "params": [ - p - for n, p in model.named_parameters() - if "mask_score" not in n and p.requires_grad and any(nd in n for nd in no_decay) - ], - "lr": args.learning_rate, - "weight_decay": 0.0, - }, - ] - - optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) - scheduler = get_linear_schedule_with_warmup( - optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total - ) - - # Check if saved optimizer or scheduler states exist - if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile( - os.path.join(args.model_name_or_path, "scheduler.pt") - ): - # Load in optimizer and scheduler states - optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) - scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) - - if args.fp16: - try: - from apex import amp - except ImportError: - raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") - model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) - - # multi-gpu training (should be after apex fp16 initialization) - if args.n_gpu > 1: - model = nn.DataParallel(model) - - # Distributed training (should be after apex fp16 initialization) - if args.local_rank != -1: - model = nn.parallel.DistributedDataParallel( - model, - device_ids=[args.local_rank], - output_device=args.local_rank, - find_unused_parameters=True, - ) - - # Train! - logger.info("***** Running training *****") - logger.info(" Num examples = %d", len(train_dataset)) - logger.info(" Num Epochs = %d", args.num_train_epochs) - logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) - logger.info( - " Total train batch size (w. parallel, distributed & accumulation) = %d", - args.train_batch_size - * args.gradient_accumulation_steps - * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), - ) - logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) - logger.info(" Total optimization steps = %d", t_total) - # Distillation - if teacher is not None: - logger.info(" Training with distillation") - - global_step = 0 - # Global TopK - if args.global_topk: - threshold_mem = None - epochs_trained = 0 - steps_trained_in_current_epoch = 0 - # Check if continuing training from a checkpoint - if os.path.exists(args.model_name_or_path): - # set global_step to global_step of last saved checkpoint from model path - try: - global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0]) - except ValueError: - global_step = 0 - epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) - steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) - - logger.info(" Continuing training from checkpoint, will skip to saved global_step") - logger.info(" Continuing training from epoch %d", epochs_trained) - logger.info(" Continuing training from global step %d", global_step) - logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) - - tr_loss, logging_loss = 0.0, 0.0 - model.zero_grad() - train_iterator = trange( - epochs_trained, - int(args.num_train_epochs), - desc="Epoch", - disable=args.local_rank not in [-1, 0], - ) - set_seed(args) # Added here for reproducibility - for _ in train_iterator: - epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) - for step, batch in enumerate(epoch_iterator): - # Skip past any already trained steps if resuming training - if steps_trained_in_current_epoch > 0: - steps_trained_in_current_epoch -= 1 - continue - - model.train() - batch = tuple(t.to(args.device) for t in batch) - threshold, regu_lambda = schedule_threshold( - step=global_step, - total_step=t_total, - warmup_steps=args.warmup_steps, - final_threshold=args.final_threshold, - initial_threshold=args.initial_threshold, - final_warmup=args.final_warmup, - initial_warmup=args.initial_warmup, - final_lambda=args.final_lambda, - ) - # Global TopK - if args.global_topk: - if threshold == 1.0: - threshold = -1e2 # Or an indefinitely low quantity - else: - if (threshold_mem is None) or (global_step % args.global_topk_frequency_compute == 0): - # Sort all the values to get the global topK - concat = torch.cat( - [param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name] - ) - n = concat.numel() - kth = max(n - (int(n * threshold) + 1), 1) - threshold_mem = concat.kthvalue(kth).values.item() - threshold = threshold_mem - else: - threshold = threshold_mem - inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} - if args.model_type != "distilbert": - inputs["token_type_ids"] = ( - batch[2] if args.model_type in ["bert", "masked_bert", "xlnet", "albert"] else None - ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids - - if "masked" in args.model_type: - inputs["threshold"] = threshold - - outputs = model(**inputs) - loss, logits_stu = outputs # model outputs are always tuple in transformers (see doc) - - # Distillation loss - if teacher is not None: - if "token_type_ids" not in inputs: - inputs["token_type_ids"] = None if args.teacher_type == "xlm" else batch[2] - with torch.no_grad(): - (logits_tea,) = teacher( - input_ids=inputs["input_ids"], - token_type_ids=inputs["token_type_ids"], - attention_mask=inputs["attention_mask"], - ) - - loss_logits = nn.functional.kl_div( - input=nn.functional.log_softmax(logits_stu / args.temperature, dim=-1), - target=nn.functional.softmax(logits_tea / args.temperature, dim=-1), - reduction="batchmean", - ) * (args.temperature**2) - - loss = args.alpha_distil * loss_logits + args.alpha_ce * loss - - # Regularization - if args.regularization is not None: - regu_ = regularization(model=model, mode=args.regularization) - loss = loss + regu_lambda * regu_ - - if args.n_gpu > 1: - loss = loss.mean() # mean() to average on multi-gpu parallel training - if args.gradient_accumulation_steps > 1: - loss = loss / args.gradient_accumulation_steps - - if args.fp16: - with amp.scale_loss(loss, optimizer) as scaled_loss: - scaled_loss.backward() - else: - loss.backward() - - tr_loss += loss.item() - if (step + 1) % args.gradient_accumulation_steps == 0 or ( - # last step in epoch but step is always smaller than gradient_accumulation_steps - len(epoch_iterator) <= args.gradient_accumulation_steps and (step + 1) == len(epoch_iterator) - ): - if args.fp16: - nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) - else: - nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) - - if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: - tb_writer.add_scalar("threshold", threshold, global_step) - for name, param in model.named_parameters(): - if not param.requires_grad: - continue - tb_writer.add_scalar("parameter_mean/" + name, param.data.mean(), global_step) - tb_writer.add_scalar("parameter_std/" + name, param.data.std(), global_step) - tb_writer.add_scalar("parameter_min/" + name, param.data.min(), global_step) - tb_writer.add_scalar("parameter_max/" + name, param.data.max(), global_step) - tb_writer.add_scalar("grad_mean/" + name, param.grad.data.mean(), global_step) - tb_writer.add_scalar("grad_std/" + name, param.grad.data.std(), global_step) - if args.regularization is not None and "mask_scores" in name: - if args.regularization == "l1": - perc = (torch.sigmoid(param) > threshold).sum().item() / param.numel() - elif args.regularization == "l0": - perc = (torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1))).sum().item() / param.numel() - tb_writer.add_scalar("retained_weights_perc/" + name, perc, global_step) - - optimizer.step() - scheduler.step() # Update learning rate schedule - model.zero_grad() - global_step += 1 - - if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: - logs = {} - if ( - args.local_rank == -1 and args.evaluate_during_training - ): # Only evaluate when single GPU otherwise metrics may not average well - results = evaluate(args, model, tokenizer) - for key, value in results.items(): - eval_key = "eval_{}".format(key) - logs[eval_key] = value - - loss_scalar = (tr_loss - logging_loss) / args.logging_steps - learning_rate_scalar = scheduler.get_lr() - logs["learning_rate"] = learning_rate_scalar[0] - if len(learning_rate_scalar) > 1: - for idx, lr in enumerate(learning_rate_scalar[1:]): - logs[f"learning_rate/{idx+1}"] = lr - logs["loss"] = loss_scalar - if teacher is not None: - logs["loss/distil"] = loss_logits.item() - if args.regularization is not None: - logs["loss/regularization"] = regu_.item() - if (teacher is not None) or (args.regularization is not None): - if (teacher is not None) and (args.regularization is not None): - logs["loss/instant_ce"] = ( - loss.item() - - regu_lambda * logs["loss/regularization"] - - args.alpha_distil * logs["loss/distil"] - ) / args.alpha_ce - elif teacher is not None: - logs["loss/instant_ce"] = ( - loss.item() - args.alpha_distil * logs["loss/distil"] - ) / args.alpha_ce - else: - logs["loss/instant_ce"] = loss.item() - regu_lambda * logs["loss/regularization"] - logging_loss = tr_loss - - for key, value in logs.items(): - tb_writer.add_scalar(key, value, global_step) - print(json.dumps({**logs, **{"step": global_step}})) - - if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: - # Save model checkpoint - output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) - if not os.path.exists(output_dir): - os.makedirs(output_dir) - model_to_save = ( - model.module if hasattr(model, "module") else model - ) # Take care of distributed/parallel training - model_to_save.save_pretrained(output_dir) - tokenizer.save_pretrained(output_dir) - - torch.save(args, os.path.join(output_dir, "training_args.bin")) - logger.info("Saving model checkpoint to %s", output_dir) - - torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) - torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) - logger.info("Saving optimizer and scheduler states to %s", output_dir) - - if args.max_steps > 0 and global_step > args.max_steps: - epoch_iterator.close() - break - if args.max_steps > 0 and global_step > args.max_steps: - train_iterator.close() - break - - if args.local_rank in [-1, 0]: - tb_writer.close() - - return global_step, tr_loss / global_step - - -def evaluate(args, model, tokenizer, prefix=""): - # Loop to handle MNLI double evaluation (matched, mis-matched) - eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) - eval_outputs_dirs = (args.output_dir, args.output_dir + "/MM") if args.task_name == "mnli" else (args.output_dir,) - - results = {} - for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): - eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True) - - if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: - os.makedirs(eval_output_dir) - - args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) - # Note that DistributedSampler samples randomly - eval_sampler = SequentialSampler(eval_dataset) - eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) - - # multi-gpu eval - if args.n_gpu > 1 and not isinstance(model, nn.DataParallel): - model = nn.DataParallel(model) - - # Eval! - logger.info("***** Running evaluation {} *****".format(prefix)) - logger.info(" Num examples = %d", len(eval_dataset)) - logger.info(" Batch size = %d", args.eval_batch_size) - eval_loss = 0.0 - nb_eval_steps = 0 - preds = None - out_label_ids = None - - # Global TopK - if args.global_topk: - threshold_mem = None - - for batch in tqdm(eval_dataloader, desc="Evaluating"): - model.eval() - batch = tuple(t.to(args.device) for t in batch) - - with torch.no_grad(): - inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} - if args.model_type != "distilbert": - inputs["token_type_ids"] = ( - batch[2] if args.model_type in ["bert", "masked_bert", "xlnet", "albert"] else None - ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids - if "masked" in args.model_type: - inputs["threshold"] = args.final_threshold - if args.global_topk: - if threshold_mem is None: - concat = torch.cat( - [param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name] - ) - n = concat.numel() - kth = max(n - (int(n * args.final_threshold) + 1), 1) - threshold_mem = concat.kthvalue(kth).values.item() - inputs["threshold"] = threshold_mem - outputs = model(**inputs) - tmp_eval_loss, logits = outputs[:2] - - eval_loss += tmp_eval_loss.mean().item() - nb_eval_steps += 1 - if preds is None: - preds = logits.detach().cpu().numpy() - out_label_ids = inputs["labels"].detach().cpu().numpy() - else: - preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) - out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0) - - eval_loss = eval_loss / nb_eval_steps - if args.output_mode == "classification": - from scipy.special import softmax - - probs = softmax(preds, axis=-1) - entropy = np.exp((-probs * np.log(probs)).sum(axis=-1).mean()) - preds = np.argmax(preds, axis=1) - elif args.output_mode == "regression": - preds = np.squeeze(preds) - result = compute_metrics(eval_task, preds, out_label_ids) - results.update(result) - if entropy is not None: - result["eval_avg_entropy"] = entropy - - output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") - with open(output_eval_file, "w") as writer: - logger.info("***** Eval results {} *****".format(prefix)) - for key in sorted(result.keys()): - logger.info(" %s = %s", key, str(result[key])) - writer.write("%s = %s\n" % (key, str(result[key]))) - - return results - - -def load_and_cache_examples(args, task, tokenizer, evaluate=False): - if args.local_rank not in [-1, 0] and not evaluate: - torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache - - processor = processors[task]() - output_mode = output_modes[task] - # Load data features from cache or dataset file - cached_features_file = os.path.join( - args.data_dir, - "cached_{}_{}_{}_{}".format( - "dev" if evaluate else "train", - list(filter(None, args.model_name_or_path.split("/"))).pop(), - str(args.max_seq_length), - str(task), - ), - ) - if os.path.exists(cached_features_file) and not args.overwrite_cache: - logger.info("Loading features from cached file %s", cached_features_file) - features = torch.load(cached_features_file) - else: - logger.info("Creating features from dataset file at %s", args.data_dir) - label_list = processor.get_labels() - if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]: - # HACK(label indices are swapped in RoBERTa pretrained model) - label_list[1], label_list[2] = label_list[2], label_list[1] - examples = ( - processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir) - ) - features = convert_examples_to_features( - examples, - tokenizer, - max_length=args.max_seq_length, - label_list=label_list, - output_mode=output_mode, - ) - if args.local_rank in [-1, 0]: - logger.info("Saving features into cached file %s", cached_features_file) - torch.save(features, cached_features_file) - - if args.local_rank == 0 and not evaluate: - torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache - - # Convert to Tensors and build dataset - all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) - all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) - all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) - if output_mode == "classification": - all_labels = torch.tensor([f.label for f in features], dtype=torch.long) - elif output_mode == "regression": - all_labels = torch.tensor([f.label for f in features], dtype=torch.float) - - dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels) - return dataset - - -def main(): - parser = argparse.ArgumentParser() - - # Required parameters - parser.add_argument( - "--data_dir", - default=None, - type=str, - required=True, - help="The input data dir. Should contain the .tsv files (or other data files) for the task.", - ) - parser.add_argument( - "--model_type", - default=None, - type=str, - required=True, - help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), - ) - parser.add_argument( - "--model_name_or_path", - default=None, - type=str, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models", - ) - parser.add_argument( - "--task_name", - default=None, - type=str, - required=True, - help="The name of the task to train selected in the list: " + ", ".join(processors.keys()), - ) - parser.add_argument( - "--output_dir", - default=None, - type=str, - required=True, - help="The output directory where the model predictions and checkpoints will be written.", - ) - # Other parameters - parser.add_argument( - "--config_name", - default="", - type=str, - help="Pretrained config name or path if not the same as model_name", - ) - parser.add_argument( - "--tokenizer_name", - default="", - type=str, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--cache_dir", - default="", - type=str, - help="Where do you want to store the pre-trained models downloaded from huggingface.co", - ) - parser.add_argument( - "--max_seq_length", - default=128, - type=int, - help=( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ), - ) - parser.add_argument("--do_train", action="store_true", help="Whether to run training.") - parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") - parser.add_argument( - "--evaluate_during_training", - action="store_true", - help="Run evaluation during training at each logging step.", - ) - parser.add_argument( - "--do_lower_case", - action="store_true", - help="Set this flag if you are using an uncased model.", - ) - - parser.add_argument( - "--per_gpu_train_batch_size", - default=8, - type=int, - help="Batch size per GPU/CPU for training.", - ) - parser.add_argument( - "--per_gpu_eval_batch_size", - default=8, - type=int, - help="Batch size per GPU/CPU for evaluation.", - ) - parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") - - # Pruning parameters - parser.add_argument( - "--mask_scores_learning_rate", - default=1e-2, - type=float, - help="The Adam initial learning rate of the mask scores.", - ) - parser.add_argument( - "--initial_threshold", default=1.0, type=float, help="Initial value of the threshold (for scheduling)." - ) - parser.add_argument( - "--final_threshold", default=0.7, type=float, help="Final value of the threshold (for scheduling)." - ) - parser.add_argument( - "--initial_warmup", - default=1, - type=int, - help=( - "Run `initial_warmup` * `warmup_steps` steps of threshold warmup during which threshold stays " - "at its `initial_threshold` value (sparsity schedule)." - ), - ) - parser.add_argument( - "--final_warmup", - default=2, - type=int, - help=( - "Run `final_warmup` * `warmup_steps` steps of threshold cool-down during which threshold stays " - "at its final_threshold value (sparsity schedule)." - ), - ) - - parser.add_argument( - "--pruning_method", - default="topK", - type=str, - help=( - "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," - " sigmoied_threshold = Soft movement pruning)." - ), - ) - parser.add_argument( - "--mask_init", - default="constant", - type=str, - help="Initialization method for the mask scores. Choices: constant, uniform, kaiming.", - ) - parser.add_argument( - "--mask_scale", default=0.0, type=float, help="Initialization parameter for the chosen initialization method." - ) - - parser.add_argument("--regularization", default=None, help="Add L0 or L1 regularization to the mask scores.") - parser.add_argument( - "--final_lambda", - default=0.0, - type=float, - help="Regularization intensity (used in conjunction with `regularization`.", - ) - - parser.add_argument("--global_topk", action="store_true", help="Global TopK on the Scores.") - parser.add_argument( - "--global_topk_frequency_compute", - default=25, - type=int, - help="Frequency at which we compute the TopK global threshold.", - ) - - # Distillation parameters (optional) - parser.add_argument( - "--teacher_type", - default=None, - type=str, - help=( - "Teacher type. Teacher tokenizer and student (model) tokenizer must output the same tokenization. Only for" - " distillation." - ), - ) - parser.add_argument( - "--teacher_name_or_path", - default=None, - type=str, - help="Path to the already fine-tuned teacher model. Only for distillation.", - ) - parser.add_argument( - "--alpha_ce", default=0.5, type=float, help="Cross entropy loss linear weight. Only for distillation." - ) - parser.add_argument( - "--alpha_distil", default=0.5, type=float, help="Distillation loss linear weight. Only for distillation." - ) - parser.add_argument( - "--temperature", default=2.0, type=float, help="Distillation temperature. Only for distillation." - ) - - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") - parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") - parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") - parser.add_argument( - "--num_train_epochs", - default=3.0, - type=float, - help="Total number of training epochs to perform.", - ) - parser.add_argument( - "--max_steps", - default=-1, - type=int, - help="If > 0: set total number of training steps to perform. Override num_train_epochs.", - ) - parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") - - parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") - parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") - parser.add_argument( - "--eval_all_checkpoints", - action="store_true", - help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", - ) - parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") - parser.add_argument( - "--overwrite_output_dir", - action="store_true", - help="Overwrite the content of the output directory", - ) - parser.add_argument( - "--overwrite_cache", - action="store_true", - help="Overwrite the cached training and evaluation sets", - ) - parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") - - parser.add_argument( - "--fp16", - action="store_true", - help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", - ) - parser.add_argument( - "--fp16_opt_level", - type=str, - default="O1", - help=( - "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " - "See details at https://nvidia.github.io/apex/amp.html" - ), - ) - parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") - - args = parser.parse_args() - - # Regularization - if args.regularization == "null": - args.regularization = None - - if ( - os.path.exists(args.output_dir) - and os.listdir(args.output_dir) - and args.do_train - and not args.overwrite_output_dir - ): - raise ValueError( - f"Output directory ({args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to" - " overcome." - ) - - # Setup CUDA, GPU & distributed training - if args.local_rank == -1 or args.no_cuda: - device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") - args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() - else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs - torch.cuda.set_device(args.local_rank) - device = torch.device("cuda", args.local_rank) - torch.distributed.init_process_group(backend="nccl") - args.n_gpu = 1 - args.device = device - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, - ) - logger.warning( - "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", - args.local_rank, - device, - args.n_gpu, - bool(args.local_rank != -1), - args.fp16, - ) - - # Set seed - set_seed(args) - - # Prepare GLUE task - args.task_name = args.task_name.lower() - if args.task_name not in processors: - raise ValueError("Task not found: %s" % (args.task_name)) - processor = processors[args.task_name]() - args.output_mode = output_modes[args.task_name] - label_list = processor.get_labels() - num_labels = len(label_list) - - # Load pretrained model and tokenizer - if args.local_rank not in [-1, 0]: - torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab - - args.model_type = args.model_type.lower() - config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] - config = config_class.from_pretrained( - args.config_name if args.config_name else args.model_name_or_path, - num_labels=num_labels, - finetuning_task=args.task_name, - cache_dir=args.cache_dir if args.cache_dir else None, - pruning_method=args.pruning_method, - mask_init=args.mask_init, - mask_scale=args.mask_scale, - ) - tokenizer = tokenizer_class.from_pretrained( - args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, - cache_dir=args.cache_dir if args.cache_dir else None, - do_lower_case=args.do_lower_case, - ) - model = model_class.from_pretrained( - args.model_name_or_path, - from_tf=bool(".ckpt" in args.model_name_or_path), - config=config, - cache_dir=args.cache_dir if args.cache_dir else None, - ) - - if args.teacher_type is not None: - assert args.teacher_name_or_path is not None - assert args.alpha_distil > 0.0 - assert args.alpha_distil + args.alpha_ce > 0.0 - teacher_config_class, teacher_model_class, _ = MODEL_CLASSES[args.teacher_type] - teacher_config = teacher_config_class.from_pretrained(args.teacher_name_or_path) - teacher = teacher_model_class.from_pretrained( - args.teacher_name_or_path, - from_tf=False, - config=teacher_config, - cache_dir=args.cache_dir if args.cache_dir else None, - ) - teacher.to(args.device) - else: - teacher = None - - if args.local_rank == 0: - torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab - - model.to(args.device) - - logger.info("Training/evaluation parameters %s", args) - - # Training - if args.do_train: - train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False) - global_step, tr_loss = train(args, train_dataset, model, tokenizer, teacher=teacher) - logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) - - # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() - if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): - logger.info("Saving model checkpoint to %s", args.output_dir) - # Save a trained model, configuration and tokenizer using `save_pretrained()`. - # They can then be reloaded using `from_pretrained()` - model_to_save = ( - model.module if hasattr(model, "module") else model - ) # Take care of distributed/parallel training - model_to_save.save_pretrained(args.output_dir) - tokenizer.save_pretrained(args.output_dir) - - # Good practice: save your training arguments together with the trained model - torch.save(args, os.path.join(args.output_dir, "training_args.bin")) - - # Load a trained model and vocabulary that you have fine-tuned - model = model_class.from_pretrained(args.output_dir) - tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) - model.to(args.device) - - # Evaluation - results = {} - if args.do_eval and args.local_rank in [-1, 0]: - tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) - checkpoints = [args.output_dir] - if args.eval_all_checkpoints: - checkpoints = [ - os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) - ] - - logger.info("Evaluate the following checkpoints: %s", checkpoints) - for checkpoint in checkpoints: - global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" - prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else "" - - model = model_class.from_pretrained(checkpoint) - model.to(args.device) - result = evaluate(args, model, tokenizer, prefix=prefix) - result = {k + "_{}".format(global_step): v for k, v in result.items()} - results.update(result) - - return results - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/movement-pruning/masked_run_squad.py b/examples/research_projects/movement-pruning/masked_run_squad.py deleted file mode 100644 index 7b1c2b32209..00000000000 --- a/examples/research_projects/movement-pruning/masked_run_squad.py +++ /dev/null @@ -1,1147 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. -# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Fine-pruning Masked BERT for question-answering on SQuAD.""" - -import argparse -import glob -import logging -import os -import random -import timeit - -import numpy as np -import torch -from emmental import MaskedBertConfig, MaskedBertForQuestionAnswering -from torch import nn -from torch.utils.data import DataLoader, RandomSampler, SequentialSampler -from torch.utils.data.distributed import DistributedSampler -from tqdm import tqdm, trange - -from transformers import ( - WEIGHTS_NAME, - AdamW, - BertConfig, - BertForQuestionAnswering, - BertTokenizer, - get_linear_schedule_with_warmup, - squad_convert_examples_to_features, -) -from transformers.data.metrics.squad_metrics import ( - compute_predictions_log_probs, - compute_predictions_logits, - squad_evaluate, -) -from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor - - -try: - from torch.utils.tensorboard import SummaryWriter -except ImportError: - from tensorboardX import SummaryWriter - - -logger = logging.getLogger(__name__) - -MODEL_CLASSES = { - "bert": (BertConfig, BertForQuestionAnswering, BertTokenizer), - "masked_bert": (MaskedBertConfig, MaskedBertForQuestionAnswering, BertTokenizer), -} - - -def set_seed(args): - random.seed(args.seed) - np.random.seed(args.seed) - torch.manual_seed(args.seed) - if args.n_gpu > 0: - torch.cuda.manual_seed_all(args.seed) - - -def schedule_threshold( - step: int, - total_step: int, - warmup_steps: int, - initial_threshold: float, - final_threshold: float, - initial_warmup: int, - final_warmup: int, - final_lambda: float, -): - if step <= initial_warmup * warmup_steps: - threshold = initial_threshold - elif step > (total_step - final_warmup * warmup_steps): - threshold = final_threshold - else: - spars_warmup_steps = initial_warmup * warmup_steps - spars_schedu_steps = (final_warmup + initial_warmup) * warmup_steps - mul_coeff = 1 - (step - spars_warmup_steps) / (total_step - spars_schedu_steps) - threshold = final_threshold + (initial_threshold - final_threshold) * (mul_coeff**3) - regu_lambda = final_lambda * threshold / final_threshold - return threshold, regu_lambda - - -def regularization(model: nn.Module, mode: str): - regu, counter = 0, 0 - for name, param in model.named_parameters(): - if "mask_scores" in name: - if mode == "l1": - regu += torch.norm(torch.sigmoid(param), p=1) / param.numel() - elif mode == "l0": - regu += torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1)).sum() / param.numel() - else: - raise ValueError("Don't know this mode.") - counter += 1 - return regu / counter - - -def to_list(tensor): - return tensor.detach().cpu().tolist() - - -def train(args, train_dataset, model, tokenizer, teacher=None): - """Train the model""" - if args.local_rank in [-1, 0]: - tb_writer = SummaryWriter(log_dir=args.output_dir) - - args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) - train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) - train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) - - if args.max_steps > 0: - t_total = args.max_steps - args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 - else: - t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs - - # Prepare optimizer and schedule (linear warmup and decay) - no_decay = ["bias", "LayerNorm.weight"] - optimizer_grouped_parameters = [ - { - "params": [p for n, p in model.named_parameters() if "mask_score" in n and p.requires_grad], - "lr": args.mask_scores_learning_rate, - }, - { - "params": [ - p - for n, p in model.named_parameters() - if "mask_score" not in n and p.requires_grad and not any(nd in n for nd in no_decay) - ], - "lr": args.learning_rate, - "weight_decay": args.weight_decay, - }, - { - "params": [ - p - for n, p in model.named_parameters() - if "mask_score" not in n and p.requires_grad and any(nd in n for nd in no_decay) - ], - "lr": args.learning_rate, - "weight_decay": 0.0, - }, - ] - - optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) - scheduler = get_linear_schedule_with_warmup( - optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total - ) - - # Check if saved optimizer or scheduler states exist - if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile( - os.path.join(args.model_name_or_path, "scheduler.pt") - ): - # Load in optimizer and scheduler states - optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) - scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) - - if args.fp16: - try: - from apex import amp - except ImportError: - raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") - model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) - - # multi-gpu training (should be after apex fp16 initialization) - if args.n_gpu > 1: - model = nn.DataParallel(model) - - # Distributed training (should be after apex fp16 initialization) - if args.local_rank != -1: - model = nn.parallel.DistributedDataParallel( - model, - device_ids=[args.local_rank], - output_device=args.local_rank, - find_unused_parameters=True, - ) - - # Train! - logger.info("***** Running training *****") - logger.info(" Num examples = %d", len(train_dataset)) - logger.info(" Num Epochs = %d", args.num_train_epochs) - logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) - logger.info( - " Total train batch size (w. parallel, distributed & accumulation) = %d", - args.train_batch_size - * args.gradient_accumulation_steps - * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), - ) - logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) - logger.info(" Total optimization steps = %d", t_total) - # Distillation - if teacher is not None: - logger.info(" Training with distillation") - - global_step = 1 - # Global TopK - if args.global_topk: - threshold_mem = None - epochs_trained = 0 - steps_trained_in_current_epoch = 0 - # Check if continuing training from a checkpoint - if os.path.exists(args.model_name_or_path): - # set global_step to global_step of last saved checkpoint from model path - try: - checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0] - global_step = int(checkpoint_suffix) - epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) - steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) - - logger.info(" Continuing training from checkpoint, will skip to saved global_step") - logger.info(" Continuing training from epoch %d", epochs_trained) - logger.info(" Continuing training from global step %d", global_step) - logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) - except ValueError: - logger.info(" Starting fine-tuning.") - - tr_loss, logging_loss = 0.0, 0.0 - model.zero_grad() - train_iterator = trange( - epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0] - ) - # Added here for reproducibility - set_seed(args) - - for _ in train_iterator: - epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) - for step, batch in enumerate(epoch_iterator): - # Skip past any already trained steps if resuming training - if steps_trained_in_current_epoch > 0: - steps_trained_in_current_epoch -= 1 - continue - - model.train() - batch = tuple(t.to(args.device) for t in batch) - threshold, regu_lambda = schedule_threshold( - step=global_step, - total_step=t_total, - warmup_steps=args.warmup_steps, - final_threshold=args.final_threshold, - initial_threshold=args.initial_threshold, - final_warmup=args.final_warmup, - initial_warmup=args.initial_warmup, - final_lambda=args.final_lambda, - ) - # Global TopK - if args.global_topk: - if threshold == 1.0: - threshold = -1e2 # Or an indefinitely low quantity - else: - if (threshold_mem is None) or (global_step % args.global_topk_frequency_compute == 0): - # Sort all the values to get the global topK - concat = torch.cat( - [param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name] - ) - n = concat.numel() - kth = max(n - (int(n * threshold) + 1), 1) - threshold_mem = concat.kthvalue(kth).values.item() - threshold = threshold_mem - else: - threshold = threshold_mem - inputs = { - "input_ids": batch[0], - "attention_mask": batch[1], - "token_type_ids": batch[2], - "start_positions": batch[3], - "end_positions": batch[4], - } - - if args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: - del inputs["token_type_ids"] - - if args.model_type in ["xlnet", "xlm"]: - inputs.update({"cls_index": batch[5], "p_mask": batch[6]}) - if args.version_2_with_negative: - inputs.update({"is_impossible": batch[7]}) - if hasattr(model, "config") and hasattr(model.config, "lang2id"): - inputs.update( - {"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)} - ) - - if "masked" in args.model_type: - inputs["threshold"] = threshold - - outputs = model(**inputs) - # model outputs are always tuple in transformers (see doc) - loss, start_logits_stu, end_logits_stu = outputs - - # Distillation loss - if teacher is not None: - with torch.no_grad(): - start_logits_tea, end_logits_tea = teacher( - input_ids=inputs["input_ids"], - token_type_ids=inputs["token_type_ids"], - attention_mask=inputs["attention_mask"], - ) - - loss_start = nn.functional.kl_div( - input=nn.functional.log_softmax(start_logits_stu / args.temperature, dim=-1), - target=nn.functional.softmax(start_logits_tea / args.temperature, dim=-1), - reduction="batchmean", - ) * (args.temperature**2) - loss_end = nn.functional.kl_div( - input=nn.functional.log_softmax(end_logits_stu / args.temperature, dim=-1), - target=nn.functional.softmax(end_logits_tea / args.temperature, dim=-1), - reduction="batchmean", - ) * (args.temperature**2) - loss_logits = (loss_start + loss_end) / 2.0 - - loss = args.alpha_distil * loss_logits + args.alpha_ce * loss - - # Regularization - if args.regularization is not None: - regu_ = regularization(model=model, mode=args.regularization) - loss = loss + regu_lambda * regu_ - - if args.n_gpu > 1: - loss = loss.mean() # mean() to average on multi-gpu parallel training - if args.gradient_accumulation_steps > 1: - loss = loss / args.gradient_accumulation_steps - - if args.fp16: - with amp.scale_loss(loss, optimizer) as scaled_loss: - scaled_loss.backward() - else: - loss.backward() - - tr_loss += loss.item() - if (step + 1) % args.gradient_accumulation_steps == 0: - if args.fp16: - nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) - else: - nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) - - if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: - tb_writer.add_scalar("threshold", threshold, global_step) - for name, param in model.named_parameters(): - if not param.requires_grad: - continue - tb_writer.add_scalar("parameter_mean/" + name, param.data.mean(), global_step) - tb_writer.add_scalar("parameter_std/" + name, param.data.std(), global_step) - tb_writer.add_scalar("parameter_min/" + name, param.data.min(), global_step) - tb_writer.add_scalar("parameter_max/" + name, param.data.max(), global_step) - if "pooler" in name: - continue - tb_writer.add_scalar("grad_mean/" + name, param.grad.data.mean(), global_step) - tb_writer.add_scalar("grad_std/" + name, param.grad.data.std(), global_step) - if args.regularization is not None and "mask_scores" in name: - if args.regularization == "l1": - perc = (torch.sigmoid(param) > threshold).sum().item() / param.numel() - elif args.regularization == "l0": - perc = (torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1))).sum().item() / param.numel() - tb_writer.add_scalar("retained_weights_perc/" + name, perc, global_step) - - optimizer.step() - scheduler.step() # Update learning rate schedule - model.zero_grad() - global_step += 1 - - # Log metrics - if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: - # Only evaluate when single GPU otherwise metrics may not average well - if args.local_rank == -1 and args.evaluate_during_training: - results = evaluate(args, model, tokenizer) - for key, value in results.items(): - tb_writer.add_scalar("eval_{}".format(key), value, global_step) - learning_rate_scalar = scheduler.get_lr() - tb_writer.add_scalar("lr", learning_rate_scalar[0], global_step) - if len(learning_rate_scalar) > 1: - for idx, lr in enumerate(learning_rate_scalar[1:]): - tb_writer.add_scalar(f"lr/{idx+1}", lr, global_step) - tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) - if teacher is not None: - tb_writer.add_scalar("loss/distil", loss_logits.item(), global_step) - if args.regularization is not None: - tb_writer.add_scalar("loss/regularization", regu_.item(), global_step) - if (teacher is not None) or (args.regularization is not None): - if (teacher is not None) and (args.regularization is not None): - tb_writer.add_scalar( - "loss/instant_ce", - (loss.item() - regu_lambda * regu_.item() - args.alpha_distil * loss_logits.item()) - / args.alpha_ce, - global_step, - ) - elif teacher is not None: - tb_writer.add_scalar( - "loss/instant_ce", - (loss.item() - args.alpha_distil * loss_logits.item()) / args.alpha_ce, - global_step, - ) - else: - tb_writer.add_scalar( - "loss/instant_ce", loss.item() - regu_lambda * regu_.item(), global_step - ) - logging_loss = tr_loss - - # Save model checkpoint - if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: - output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) - if not os.path.exists(output_dir): - os.makedirs(output_dir) - # Take care of distributed/parallel training - model_to_save = model.module if hasattr(model, "module") else model - model_to_save.save_pretrained(output_dir) - tokenizer.save_pretrained(output_dir) - - torch.save(args, os.path.join(output_dir, "training_args.bin")) - logger.info("Saving model checkpoint to %s", output_dir) - - torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) - torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) - logger.info("Saving optimizer and scheduler states to %s", output_dir) - - if args.max_steps > 0 and global_step > args.max_steps: - epoch_iterator.close() - break - if args.max_steps > 0 and global_step > args.max_steps: - train_iterator.close() - break - - if args.local_rank in [-1, 0]: - tb_writer.close() - - return global_step, tr_loss / global_step - - -def evaluate(args, model, tokenizer, prefix=""): - dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) - - if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: - os.makedirs(args.output_dir) - - args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) - # Note that DistributedSampler samples randomly - eval_sampler = SequentialSampler(dataset) - eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) - - # multi-gpu eval - if args.n_gpu > 1 and not isinstance(model, nn.DataParallel): - model = nn.DataParallel(model) - - # Eval! - logger.info("***** Running evaluation {} *****".format(prefix)) - logger.info(" Num examples = %d", len(dataset)) - logger.info(" Batch size = %d", args.eval_batch_size) - - all_results = [] - start_time = timeit.default_timer() - # Global TopK - if args.global_topk: - threshold_mem = None - - for batch in tqdm(eval_dataloader, desc="Evaluating"): - model.eval() - batch = tuple(t.to(args.device) for t in batch) - - with torch.no_grad(): - inputs = { - "input_ids": batch[0], - "attention_mask": batch[1], - "token_type_ids": batch[2], - } - - if args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: - del inputs["token_type_ids"] - - example_indices = batch[3] - - # XLNet and XLM use more arguments for their predictions - if args.model_type in ["xlnet", "xlm"]: - inputs.update({"cls_index": batch[4], "p_mask": batch[5]}) - # for lang_id-sensitive xlm models - if hasattr(model, "config") and hasattr(model.config, "lang2id"): - inputs.update( - {"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)} - ) - if "masked" in args.model_type: - inputs["threshold"] = args.final_threshold - if args.global_topk: - if threshold_mem is None: - concat = torch.cat( - [param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name] - ) - n = concat.numel() - kth = max(n - (int(n * args.final_threshold) + 1), 1) - threshold_mem = concat.kthvalue(kth).values.item() - inputs["threshold"] = threshold_mem - outputs = model(**inputs) - - for i, example_index in enumerate(example_indices): - eval_feature = features[example_index.item()] - unique_id = int(eval_feature.unique_id) - - output = [to_list(output[i]) for output in outputs] - - # Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler" - # models only use two. - if len(output) >= 5: - start_logits = output[0] - start_top_index = output[1] - end_logits = output[2] - end_top_index = output[3] - cls_logits = output[4] - - result = SquadResult( - unique_id, - start_logits, - end_logits, - start_top_index=start_top_index, - end_top_index=end_top_index, - cls_logits=cls_logits, - ) - - else: - start_logits, end_logits = output - result = SquadResult(unique_id, start_logits, end_logits) - - all_results.append(result) - - evalTime = timeit.default_timer() - start_time - logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset)) - - # Compute predictions - output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) - output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) - - if args.version_2_with_negative: - output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) - else: - output_null_log_odds_file = None - - # XLNet and XLM use a more complex post-processing procedure - if args.model_type in ["xlnet", "xlm"]: - start_n_top = model.config.start_n_top if hasattr(model, "config") else model.module.config.start_n_top - end_n_top = model.config.end_n_top if hasattr(model, "config") else model.module.config.end_n_top - - predictions = compute_predictions_log_probs( - examples, - features, - all_results, - args.n_best_size, - args.max_answer_length, - output_prediction_file, - output_nbest_file, - output_null_log_odds_file, - start_n_top, - end_n_top, - args.version_2_with_negative, - tokenizer, - args.verbose_logging, - ) - else: - predictions = compute_predictions_logits( - examples, - features, - all_results, - args.n_best_size, - args.max_answer_length, - args.do_lower_case, - output_prediction_file, - output_nbest_file, - output_null_log_odds_file, - args.verbose_logging, - args.version_2_with_negative, - args.null_score_diff_threshold, - tokenizer, - ) - - # Compute the F1 and exact scores. - results = squad_evaluate(examples, predictions) - return results - - -def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): - if args.local_rank not in [-1, 0] and not evaluate: - # Make sure only the first process in distributed training process the dataset, and the others will use the cache - torch.distributed.barrier() - - # Load data features from cache or dataset file - input_dir = args.data_dir if args.data_dir else "." - cached_features_file = os.path.join( - input_dir, - "cached_{}_{}_{}_{}".format( - "dev" if evaluate else "train", - args.tokenizer_name - if args.tokenizer_name - else list(filter(None, args.model_name_or_path.split("/"))).pop(), - str(args.max_seq_length), - list(filter(None, args.predict_file.split("/"))).pop() - if evaluate - else list(filter(None, args.train_file.split("/"))).pop(), - ), - ) - - # Init features and dataset from cache if it exists - if os.path.exists(cached_features_file) and not args.overwrite_cache: - logger.info("Loading features from cached file %s", cached_features_file) - features_and_dataset = torch.load(cached_features_file) - features, dataset, examples = ( - features_and_dataset["features"], - features_and_dataset["dataset"], - features_and_dataset["examples"], - ) - else: - logger.info("Creating features from dataset file at %s", input_dir) - - if not args.data_dir and ((evaluate and not args.predict_file) or (not evaluate and not args.train_file)): - try: - import tensorflow_datasets as tfds - except ImportError: - raise ImportError("If not data_dir is specified, tensorflow_datasets needs to be installed.") - - if args.version_2_with_negative: - logger.warning("tensorflow_datasets does not handle version 2 of SQuAD.") - - tfds_examples = tfds.load("squad") - examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) - else: - processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor() - if evaluate: - examples = processor.get_dev_examples(args.data_dir, filename=args.predict_file) - else: - examples = processor.get_train_examples(args.data_dir, filename=args.train_file) - - features, dataset = squad_convert_examples_to_features( - examples=examples, - tokenizer=tokenizer, - max_seq_length=args.max_seq_length, - doc_stride=args.doc_stride, - max_query_length=args.max_query_length, - is_training=not evaluate, - return_dataset="pt", - threads=args.threads, - ) - - if args.local_rank in [-1, 0]: - logger.info("Saving features into cached file %s", cached_features_file) - torch.save({"features": features, "dataset": dataset, "examples": examples}, cached_features_file) - - if args.local_rank == 0 and not evaluate: - # Make sure only the first process in distributed training process the dataset, and the others will use the cache - torch.distributed.barrier() - - if output_examples: - return dataset, examples, features - return dataset - - -def main(): - parser = argparse.ArgumentParser() - - # Required parameters - parser.add_argument( - "--model_type", - default=None, - type=str, - required=True, - help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), - ) - parser.add_argument( - "--model_name_or_path", - default=None, - type=str, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models", - ) - parser.add_argument( - "--output_dir", - default=None, - type=str, - required=True, - help="The output directory where the model checkpoints and predictions will be written.", - ) - - # Other parameters - parser.add_argument( - "--data_dir", - default=None, - type=str, - help="The input data dir. Should contain the .json files for the task." - + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", - ) - parser.add_argument( - "--train_file", - default=None, - type=str, - help="The input training file. If a data dir is specified, will look for the file there" - + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", - ) - parser.add_argument( - "--predict_file", - default=None, - type=str, - help="The input evaluation file. If a data dir is specified, will look for the file there" - + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", - ) - parser.add_argument( - "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" - ) - parser.add_argument( - "--tokenizer_name", - default="", - type=str, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--cache_dir", - default="", - type=str, - help="Where do you want to store the pre-trained models downloaded from huggingface.co", - ) - - parser.add_argument( - "--version_2_with_negative", - action="store_true", - help="If true, the SQuAD examples contain some that do not have an answer.", - ) - parser.add_argument( - "--null_score_diff_threshold", - type=float, - default=0.0, - help="If null_score - best_non_null is greater than the threshold predict null.", - ) - - parser.add_argument( - "--max_seq_length", - default=384, - type=int, - help=( - "The maximum total input sequence length after WordPiece tokenization. Sequences " - "longer than this will be truncated, and sequences shorter than this will be padded." - ), - ) - parser.add_argument( - "--doc_stride", - default=128, - type=int, - help="When splitting up a long document into chunks, how much stride to take between chunks.", - ) - parser.add_argument( - "--max_query_length", - default=64, - type=int, - help=( - "The maximum number of tokens for the question. Questions longer than this will " - "be truncated to this length." - ), - ) - parser.add_argument("--do_train", action="store_true", help="Whether to run training.") - parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") - parser.add_argument( - "--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step." - ) - parser.add_argument( - "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model." - ) - - parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") - parser.add_argument( - "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." - ) - parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") - - # Pruning parameters - parser.add_argument( - "--mask_scores_learning_rate", - default=1e-2, - type=float, - help="The Adam initial learning rate of the mask scores.", - ) - parser.add_argument( - "--initial_threshold", default=1.0, type=float, help="Initial value of the threshold (for scheduling)." - ) - parser.add_argument( - "--final_threshold", default=0.7, type=float, help="Final value of the threshold (for scheduling)." - ) - parser.add_argument( - "--initial_warmup", - default=1, - type=int, - help=( - "Run `initial_warmup` * `warmup_steps` steps of threshold warmup during which threshold stays " - "at its `initial_threshold` value (sparsity schedule)." - ), - ) - parser.add_argument( - "--final_warmup", - default=2, - type=int, - help=( - "Run `final_warmup` * `warmup_steps` steps of threshold cool-down during which threshold stays " - "at its final_threshold value (sparsity schedule)." - ), - ) - - parser.add_argument( - "--pruning_method", - default="topK", - type=str, - help=( - "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," - " sigmoied_threshold = Soft movement pruning)." - ), - ) - parser.add_argument( - "--mask_init", - default="constant", - type=str, - help="Initialization method for the mask scores. Choices: constant, uniform, kaiming.", - ) - parser.add_argument( - "--mask_scale", default=0.0, type=float, help="Initialization parameter for the chosen initialization method." - ) - - parser.add_argument("--regularization", default=None, help="Add L0 or L1 regularization to the mask scores.") - parser.add_argument( - "--final_lambda", - default=0.0, - type=float, - help="Regularization intensity (used in conjunction with `regularization`.", - ) - - parser.add_argument("--global_topk", action="store_true", help="Global TopK on the Scores.") - parser.add_argument( - "--global_topk_frequency_compute", - default=25, - type=int, - help="Frequency at which we compute the TopK global threshold.", - ) - - # Distillation parameters (optional) - parser.add_argument( - "--teacher_type", - default=None, - type=str, - help=( - "Teacher type. Teacher tokenizer and student (model) tokenizer must output the same tokenization. Only for" - " distillation." - ), - ) - parser.add_argument( - "--teacher_name_or_path", - default=None, - type=str, - help="Path to the already SQuAD fine-tuned teacher model. Only for distillation.", - ) - parser.add_argument( - "--alpha_ce", default=0.5, type=float, help="Cross entropy loss linear weight. Only for distillation." - ) - parser.add_argument( - "--alpha_distil", default=0.5, type=float, help="Distillation loss linear weight. Only for distillation." - ) - parser.add_argument( - "--temperature", default=2.0, type=float, help="Distillation temperature. Only for distillation." - ) - - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") - parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") - parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") - parser.add_argument( - "--num_train_epochs", - default=3.0, - type=float, - help="Total number of training epochs to perform.", - ) - parser.add_argument( - "--max_steps", - default=-1, - type=int, - help="If > 0: set total number of training steps to perform. Override num_train_epochs.", - ) - parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") - parser.add_argument( - "--n_best_size", - default=20, - type=int, - help="The total number of n-best predictions to generate in the nbest_predictions.json output file.", - ) - parser.add_argument( - "--max_answer_length", - default=30, - type=int, - help=( - "The maximum length of an answer that can be generated. This is needed because the start " - "and end predictions are not conditioned on one another." - ), - ) - parser.add_argument( - "--verbose_logging", - action="store_true", - help=( - "If true, all of the warnings related to data processing will be printed. " - "A number of warnings are expected for a normal SQuAD evaluation." - ), - ) - parser.add_argument( - "--lang_id", - default=0, - type=int, - help=( - "language id of input for language-specific xlm models (see" - " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" - ), - ) - - parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.") - parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.") - parser.add_argument( - "--eval_all_checkpoints", - action="store_true", - help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", - ) - parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") - parser.add_argument( - "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" - ) - parser.add_argument( - "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" - ) - parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") - - parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") - parser.add_argument( - "--fp16", - action="store_true", - help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", - ) - parser.add_argument( - "--fp16_opt_level", - type=str, - default="O1", - help=( - "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " - "See details at https://nvidia.github.io/apex/amp.html" - ), - ) - parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") - parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") - - parser.add_argument("--threads", type=int, default=1, help="multiple threads for converting example to features") - args = parser.parse_args() - - # Regularization - if args.regularization == "null": - args.regularization = None - - if args.doc_stride >= args.max_seq_length - args.max_query_length: - logger.warning( - "WARNING - You've set a doc stride which may be superior to the document length in some " - "examples. This could result in errors when building features from the examples. Please reduce the doc " - "stride or increase the maximum length to ensure the features are correctly built." - ) - - if ( - os.path.exists(args.output_dir) - and os.listdir(args.output_dir) - and args.do_train - and not args.overwrite_output_dir - ): - raise ValueError( - "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( - args.output_dir - ) - ) - - # Setup distant debugging if needed - if args.server_ip and args.server_port: - # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script - import ptvsd - - print("Waiting for debugger attach") - ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) - ptvsd.wait_for_attach() - - # Setup CUDA, GPU & distributed training - if args.local_rank == -1 or args.no_cuda: - device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") - args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() - else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs - torch.cuda.set_device(args.local_rank) - device = torch.device("cuda", args.local_rank) - torch.distributed.init_process_group(backend="nccl") - args.n_gpu = 1 - args.device = device - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, - ) - logger.warning( - "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", - args.local_rank, - device, - args.n_gpu, - bool(args.local_rank != -1), - args.fp16, - ) - - # Set seed - set_seed(args) - - # Load pretrained model and tokenizer - if args.local_rank not in [-1, 0]: - # Make sure only the first process in distributed training will download model & vocab - torch.distributed.barrier() - - args.model_type = args.model_type.lower() - config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] - config = config_class.from_pretrained( - args.config_name if args.config_name else args.model_name_or_path, - cache_dir=args.cache_dir if args.cache_dir else None, - pruning_method=args.pruning_method, - mask_init=args.mask_init, - mask_scale=args.mask_scale, - ) - tokenizer = tokenizer_class.from_pretrained( - args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, - do_lower_case=args.do_lower_case, - cache_dir=args.cache_dir if args.cache_dir else None, - ) - model = model_class.from_pretrained( - args.model_name_or_path, - from_tf=bool(".ckpt" in args.model_name_or_path), - config=config, - cache_dir=args.cache_dir if args.cache_dir else None, - ) - - if args.teacher_type is not None: - assert args.teacher_name_or_path is not None - assert args.alpha_distil > 0.0 - assert args.alpha_distil + args.alpha_ce > 0.0 - teacher_config_class, teacher_model_class, _ = MODEL_CLASSES[args.teacher_type] - teacher_config = teacher_config_class.from_pretrained(args.teacher_name_or_path) - teacher = teacher_model_class.from_pretrained( - args.teacher_name_or_path, - from_tf=False, - config=teacher_config, - cache_dir=args.cache_dir if args.cache_dir else None, - ) - teacher.to(args.device) - else: - teacher = None - - if args.local_rank == 0: - # Make sure only the first process in distributed training will download model & vocab - torch.distributed.barrier() - - model.to(args.device) - - logger.info("Training/evaluation parameters %s", args) - - # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. - # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will - # remove the need for this code, but it is still valid. - if args.fp16: - try: - import apex - - apex.amp.register_half_function(torch, "einsum") - except ImportError: - raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") - - # Training - if args.do_train: - train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) - global_step, tr_loss = train(args, train_dataset, model, tokenizer, teacher=teacher) - logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) - - # Save the trained model and the tokenizer - if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): - logger.info("Saving model checkpoint to %s", args.output_dir) - # Save a trained model, configuration and tokenizer using `save_pretrained()`. - # They can then be reloaded using `from_pretrained()` - # Take care of distributed/parallel training - model_to_save = model.module if hasattr(model, "module") else model - model_to_save.save_pretrained(args.output_dir) - tokenizer.save_pretrained(args.output_dir) - - # Good practice: save your training arguments together with the trained model - torch.save(args, os.path.join(args.output_dir, "training_args.bin")) - - # Load a trained model and vocabulary that you have fine-tuned - model = model_class.from_pretrained(args.output_dir) # , force_download=True) - tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) - model.to(args.device) - - # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory - results = {} - if args.do_eval and args.local_rank in [-1, 0]: - if args.do_train: - logger.info("Loading checkpoints saved during training for evaluation") - checkpoints = [args.output_dir] - if args.eval_all_checkpoints: - checkpoints = [ - os.path.dirname(c) - for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) - ] - - else: - logger.info("Loading checkpoint %s for evaluation", args.model_name_or_path) - checkpoints = [args.model_name_or_path] - - logger.info("Evaluate the following checkpoints: %s", checkpoints) - - for checkpoint in checkpoints: - # Reload the model - global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" - model = model_class.from_pretrained(checkpoint) # , force_download=True) - model.to(args.device) - - # Evaluate - result = evaluate(args, model, tokenizer, prefix=global_step) - - result = {k + ("_{}".format(global_step) if global_step else ""): v for k, v in result.items()} - results.update(result) - - logger.info("Results: {}".format(results)) - predict_file = list(filter(None, args.predict_file.split("/"))).pop() - if not os.path.exists(os.path.join(args.output_dir, predict_file)): - os.makedirs(os.path.join(args.output_dir, predict_file)) - output_eval_file = os.path.join(args.output_dir, predict_file, "eval_results.txt") - with open(output_eval_file, "w") as writer: - for key in sorted(results.keys()): - writer.write("%s = %s\n" % (key, str(results[key]))) - - return results - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/movement-pruning/requirements.txt b/examples/research_projects/movement-pruning/requirements.txt deleted file mode 100644 index b678a785bc3..00000000000 --- a/examples/research_projects/movement-pruning/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -torch>=1.4.0 --e git+https://github.com/huggingface/transformers.git@352d5472b0c1dec0f420d606d16747d851b4bda8#egg=transformers -knockknock>=0.1.8.1 -h5py>=2.10.0 -numpy>=1.18.2 -scipy>=1.4.1 diff --git a/examples/research_projects/onnx/summarization/README.md b/examples/research_projects/onnx/summarization/README.md deleted file mode 100644 index c43b0450ea2..00000000000 --- a/examples/research_projects/onnx/summarization/README.md +++ /dev/null @@ -1,43 +0,0 @@ - - -# Bart + Beam Search to ONNX - -Author: [@fatcat-z](https://github.com/fatcat-z) - -This folder contains an example of exporting Bart + Beam Search generation (`BartForConditionalGeneration`) to ONNX. - -Beam Search contains a for-loop workflow, so we need to make them TorchScript-compatible for exporting to ONNX. This example shows how to make a Bart model be TorchScript-compatible by wrapping up it into a new model. In addition, some changes were made to the `beam_search()` function to make it TorchScript-compatible. - - -## How to run the example - -To make sure you can successfully run the latest versions of the example scripts, you have to **install the library from source** and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: - -```bash -git clone https://github.com/huggingface/transformers -cd transformers -pip install '.[onnxruntime]' -``` -Then cd in this example folder and run -```bash -pip install -r requirements.txt -``` - -Now you can run the example command below to get the example ONNX file: - -```bash -python run_onnx_exporter.py --model_name_or_path facebook/bart-base -``` diff --git a/examples/research_projects/onnx/summarization/bart_onnx/generation_onnx.py b/examples/research_projects/onnx/summarization/bart_onnx/generation_onnx.py deleted file mode 100644 index 5c1b0da7000..00000000000 --- a/examples/research_projects/onnx/summarization/bart_onnx/generation_onnx.py +++ /dev/null @@ -1,755 +0,0 @@ -import copy -import itertools -from typing import List, Optional, Tuple - -import torch -import torch.nn.functional as F - -from transformers import BartConfig -from transformers.generation import GenerationMixin - - -def _convert_past_list_to_tuple(past_key_values): - """ - In Bart model, the type of past_key_values is tuple(tuple(torch.FloatTensor)) which is not - TorchScript-compatible. To support this, we have to convert it during the export process. - This function will convert past values from a list to tuple(tuple(torch.FloatTensor)) for - the inner decoder. - - According to the definition of past_key_values, each inner tuple(torch.FloatTensor) has 4 tensors, - so we convert every 4 elements in the list as a tuple(torch.FloatTensor). - """ - count_of_each_inner_tuple = 4 - results = () - temp_result = () - count_n = len(past_key_values) // count_of_each_inner_tuple - for idx in range(count_n): - real_idx = idx * count_of_each_inner_tuple - temp_result = tuple(past_key_values[real_idx : real_idx + count_of_each_inner_tuple]) - results += ((temp_result),) - - return results - - -class EncoderForONNX(torch.nn.Module): - def __init__(self, encoder): - super().__init__() - self.encoder = encoder - - def forward(self, input_ids, attention_mask): - return self.encoder( - input_ids=input_ids, - attention_mask=attention_mask, - return_dict=False, - ) - - -class DecoderForONNX(torch.nn.Module): - def __init__(self, decoder): - super().__init__() - self.decoder = decoder - - def forward(self, input_ids, encoder_state, attention_mask, past=None): - all_results = None - if past is not None: - all_results = _convert_past_list_to_tuple(past) - input_ids = input_ids[:, -1:] - - last_hidden_state, past_key_values = self.decoder( - input_ids=input_ids, - encoder_hidden_states=encoder_state, - encoder_attention_mask=attention_mask, - past_key_values=all_results, - return_dict=False, - ) - - past_values = [] - for past in past_key_values: - past_values = past_values + list(past) - return last_hidden_state, past_values - - -def _create_traced_encoder(encoder, input_ids, attention_mask): - encoder_c = copy.deepcopy(encoder) - encoder_for_onnx = EncoderForONNX(encoder_c) - - return torch.jit.trace(encoder_for_onnx, (input_ids, attention_mask)) - - -def _create_traced_decoder(decoder, input_ids, encoder_state, attention_mask, past=None): - decoder_c = copy.deepcopy(decoder) - decoder_for_onnx = DecoderForONNX(decoder_c) - past_values = list(itertools.chain.from_iterable(past or ())) - - # Do this twice so we got 2 different decoders for further work. - if past_values: - return torch.jit.trace(decoder_for_onnx, (input_ids, encoder_state, attention_mask, past_values)) - else: - return torch.jit.trace(decoder_for_onnx, (input_ids, encoder_state, attention_mask)) - - -class BartConfigTS(BartConfig, torch.nn.Module): - """ - BartConfigTS is a TorchScript-compatible transformers.models.bart.configuration_bart.BartConfig. - TorchScript only supports sub-classes of torch.nn.Module. - """ - - def __init__(self, config): - BartConfig.__init__(self, config) - torch.nn.Module.__init__(self) - - -class MinLengthLogitsProcessorTS(torch.nn.Module): - r""" - :class:`transformers.LogitsProcessor` enforcing a min-length by setting EOS probability to 0. - - Args: - min_length (:obj:`int`): - The minimum length below which the score of :obj:`eos_token_id` is set to :obj:`-float("Inf")`. - eos_token_id (:obj:`int`): - The id of the `end-of-sequence` token. - """ - - def __init__(self, min_length: int, eos_token_id: int): - super().__init__() - - if not isinstance(min_length, int) or min_length < 0: - raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}") - - if not isinstance(eos_token_id, int) or eos_token_id < 0: - raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}") - - self.min_length = min_length - self.eos_token_id = eos_token_id - - def forward(self, input_ids, scores) -> torch.Tensor: - cur_len = input_ids.shape[-1] - if cur_len < self.min_length: - scores[:, self.eos_token_id] = -float("inf") - return scores - - -class BARTGenerator(torch.nn.Module, GenerationMixin): - def __init__(self, model): - super().__init__() - self.config = BartConfigTS(model.config) - self.config.force_bos_token_to_be_generated = False - self._trace_modules(model) - self.logits_processor = MinLengthLogitsProcessorTS(self.config.min_length, self.config.eos_token_id) - self.final_logits_weight = model.model.shared.weight - self.final_logits_bias = model.final_logits_bias - self.decoder_layers = model.config.decoder_layers - - def _trace_modules(self, model): - input_ids = torch.tensor( - [ - [ - 19, - 669, - 18, - 420, - 8, - 664, - 57, - 42, - 8, - 664, - 21, - 3028, - 195, - 4445, - 331, - 1293, - 34, - 21, - 10, - 6174, - 1100, - 6, - 69, - 104, - 42, - 32, - 2621, - 1638, - 144, - 4, - 6174, - 558, - 108, - 4419, - 1091, - 28, - 4, - 1668, - 9, - 1509, - 1621, - 279, - 35, - 867, - 2734, - 85, - 11, - 2216, - 2734, - 85, - 203, - 2244, - 7, - 6, - 15, - 8102, - 7, - 57, - 8629, - 5, - model.config.eos_token_id, - ] - ], - device=model.device, - dtype=torch.long, - ) - attention_mask = torch.tensor( - [[True] * input_ids.shape[-1]], - device=model.device, - dtype=torch.bool, - ) - self.encoder = _create_traced_encoder(model.get_encoder(), input_ids, attention_mask) - encoder_outputs = model.get_encoder()(input_ids, attention_mask=attention_mask, return_dict=True) - decoder = model.model.decoder - decoder_outputs = decoder(input_ids, attention_mask, encoder_outputs["last_hidden_state"], None, None, None) - self.decoder_no_past = _create_traced_decoder( - model.model.decoder, input_ids, encoder_outputs["last_hidden_state"], attention_mask - ) - self.decoder_with_past = _create_traced_decoder( - model.model.decoder, input_ids, encoder_outputs["last_hidden_state"], attention_mask, decoder_outputs[1] - ) - - def _encoder_forward(self, input_ids, attention_mask): - return self.encoder(input_ids, attention_mask)[0] - - @staticmethod - def _init_sequence_length_for_generation( - input_ids: torch.LongTensor, max_length: int - ) -> Tuple[torch.Tensor, torch.Tensor, int]: - unfinished_sequences = torch.zeros(input_ids.shape[0], dtype=torch.long, device=input_ids.device) + 1 - sequence_lengths = torch.zeros(input_ids.shape[0], dtype=torch.long, device=input_ids.device) + max_length - - cur_len = input_ids.shape[-1] - return sequence_lengths, unfinished_sequences, cur_len - - def _decoder_forward(self, input_ids, encoder_output, attention_mask, past: List[torch.Tensor]): - # Update here to use different decoder for different values of past. - if past is None or len(past) == 0: - decoder_output, past = self.decoder_no_past( - input_ids=input_ids, encoder_state=encoder_output, attention_mask=attention_mask - ) - else: - decoder_output, past = self.decoder_with_past( - input_ids=input_ids, encoder_state=encoder_output, attention_mask=attention_mask, past=past - ) - - lm_logits = F.linear(decoder_output, self.final_logits_weight, bias=self.final_logits_bias) - - return lm_logits, past - - def greedy_search( - self, input_ids, encoder_output, attention_mask, max_length, pad_token_id: int, eos_token_id: int - ): - # init sequence length tensors - sequence_lengths, unfinished_sequences, cur_len = self._init_sequence_length_for_generation( - input_ids, max_length - ) - - past: List[torch.Tensor] = [] - while cur_len < max_length: - logits, past = self._decoder_forward(input_ids, encoder_output, attention_mask, past) - next_token_logits = logits[:, -1, :] - - # pre-process distribution - scores = self.logits_processor(input_ids, next_token_logits) - - # argmax - next_tokens = torch.argmax(scores, dim=-1) - - # add code that transfomers next_tokens to tokens_to_add - if eos_token_id is not None: - assert pad_token_id is not None, "If eos_token_id is defined, make sure that pad_token_id is defined." - next_tokens = next_tokens * unfinished_sequences + (pad_token_id) * (1 - unfinished_sequences) - - # add token and increase length by one - input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) - - # update sequence length - if eos_token_id is not None: - sequence_lengths, unfinished_sequences = self._update_seq_length_for_generation( - sequence_lengths, unfinished_sequences, cur_len, next_tokens == eos_token_id - ) - - # stop when there is a
in each sentence, or if we exceed the maximul length - if unfinished_sequences.max() == 0: - break - - # increase cur_len - cur_len = cur_len + 1 - - return input_ids - - def _prepare_decoder_input_ids_for_generation( - self, - input_ids: torch.LongTensor, - decoder_start_token_id, - bos_token_id: Optional[int] = None, - ) -> torch.LongTensor: - decoder_input_ids = ( - torch.ones((input_ids.shape[0], 1), dtype=input_ids.dtype, device=input_ids.device) - * decoder_start_token_id - ) - return decoder_input_ids - - def forward(self, input_ids, attention_mask, max_length, decoder_start_token_id): - pad_token_id = self.config.pad_token_id - bos_token_id = self.config.bos_token_id - eos_token_id = self.config.eos_token_id - - # special case if pad_token_id is not defined - if pad_token_id is None and eos_token_id is not None: - # Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation. - pad_token_id = eos_token_id - - encoder_output = self._encoder_forward(input_ids, attention_mask) - - input_ids = self._prepare_decoder_input_ids_for_generation( - input_ids, - decoder_start_token_id=decoder_start_token_id, - bos_token_id=bos_token_id, - ) - - return self.greedy_search( - input_ids, - encoder_output, - attention_mask, - max_length=max_length, - pad_token_id=pad_token_id, - eos_token_id=eos_token_id, - ) - - -# TorchScript compatible BeamSearchScorer -class BeamSearchScorerTS(torch.nn.Module): - def __init__(self): - super().__init__() - self.max_length: int = 200 - self.num_beams: int = 3 - self.batch_size: int = 1 - self.length_penalty: float = 1.0 - self.do_early_stopping: bool = True - self.num_beam_hyps_to_keep: int = 1 - self.num_beam_groups: int = 1 - self.group_size: int = self.num_beams // self.num_beam_groups - self._done = torch.zeros(self.batch_size, dtype=torch.bool) - self._beam_hyps_count = torch.zeros(self.batch_size, dtype=torch.long) - self._beam_hyps_worst_scores = torch.zeros(self.batch_size) + 1e9 - self._beam_hyps_max_length: int = self.max_length - 1 - self._beam_hyps: List[torch.Tensor] = [torch.zeros(2)] # placeholder for TorchScript compatibility - self._beam_scores: List[torch.Tensor] = [torch.zeros(2)] # placeholder for TorchScript compatibility - - def is_done(self) -> torch.Tensor: - return self._done.all() - - def init( - self, - batch_size: int, - max_length: int, - num_beams: int, - device: torch.device, - length_penalty: float = 1.0, - do_early_stopping: bool = False, - num_beam_hyps_to_keep: int = 1, - num_beam_groups: int = 1, - ): - self.max_length = max_length - self.num_beams = num_beams - self.batch_size = batch_size - self.length_penalty = length_penalty - self.do_early_stopping = do_early_stopping - self.num_beam_hyps_to_keep = num_beam_hyps_to_keep - self.num_beam_groups = num_beam_groups - self.group_size = self.num_beams // self.num_beam_groups - - # NOTE: TorchScript does not support List of Modules - # Rewritten BeamHypotheses with tensors and list of tensors. - self._done = torch.zeros(batch_size, dtype=torch.bool, device=device) - self._beam_hyps_count = torch.zeros(batch_size, dtype=torch.long, device=device) - self._beam_hyps_worst_scores = torch.zeros(batch_size, device=device) + 1e9 - self._beam_hyps = [] - self._beam_scores = [] - - self._beam_hyps_max_length = max_length - 1 # ignoring bos_token - - if not isinstance(num_beams, int) or num_beams <= 1: - raise ValueError( - f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1," - " one should make use of `greedy_search` instead." - ) - - if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0): - raise ValueError( - "`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` has to be" - f" divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}." - ) - - def hypo_len(self, hypo_idx: int): - """ - Number of hypotheses in the list. - """ - return self._beam_hyps_count[hypo_idx] - - def hypo_add(self, hyp: torch.Tensor, sum_logprobs: float, hypo_idx: int): - """ - Add a new hypothesis to the list. - """ - score = sum_logprobs / (hyp.shape[-1] ** self.length_penalty) - hyps_count = self.hypo_len(hypo_idx) - if hyps_count < self.num_beams or score > self._beam_hyps_worst_scores[hypo_idx]: - # NOTE: work around difference of torch.sum(empty_tensor) == 0, while error in onnx. - # Bug: https://msdata.visualstudio.com/Vienna/_workitems/edit/1486599 - beam_idx = ( - torch.sum(self._beam_hyps_count[:hypo_idx]) if hypo_idx != 0 else torch.tensor(0, dtype=torch.long) - ) - self._beam_scores.insert(beam_idx, torch.tensor([score])) - self._beam_hyps.insert(beam_idx, hyp) - if hyps_count + 1 > self.num_beams: - sorted_next_scores, sorted_indices = torch.topk( - torch.cat(self._beam_scores)[beam_idx : beam_idx + hyps_count + 1], hyps_count + 1, largest=False - ) - del self._beam_hyps[int((sorted_indices[0] + beam_idx))] - del self._beam_scores[int((sorted_indices[0] + beam_idx))] - self._beam_hyps_worst_scores[hypo_idx] = sorted_next_scores[1] - else: - self._beam_hyps_worst_scores[hypo_idx] = min(score, self._beam_hyps_worst_scores[hypo_idx]) - self._beam_hyps_count[hypo_idx] = hyps_count + 1 - - def hypo_is_done(self, hypo_idx: int, best_sum_logprobs: float, cur_len: int) -> bool: - """ - If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst - one in the heap, then we are done with this sentence. - """ - if self.hypo_len(hypo_idx) < self.num_beams: - return False - elif self.do_early_stopping: - return True - else: - cur_score = best_sum_logprobs / cur_len**self.length_penalty - ret = self._beam_hyps_worst_scores[hypo_idx].item() >= cur_score - return ret - - def process( - self, - input_ids: torch.Tensor, - next_scores: torch.Tensor, - next_tokens: torch.Tensor, - next_indices: torch.Tensor, - pad_token_id: Optional[int] = None, - eos_token_id: Optional[int] = None, - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - cur_len = input_ids.shape[-1] - batch_size = len(self._beam_hyps_count) - assert batch_size == (input_ids.shape[0] // self.group_size) - - device = input_ids.device - next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device) - next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device) - next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device) - - for batch_idx in range(batch_size): - if self._done[batch_idx]: - assert ( - self.hypo_len(batch_idx) >= self.num_beams - ), "Batch can only be done if at least {} beams have been generated".format(self.num_beams) - assert ( - eos_token_id is not None and pad_token_id is not None - ), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined" - # pad the batch - next_beam_scores[batch_idx, :] = 0 - next_beam_tokens[batch_idx, :] = pad_token_id - next_beam_indices[batch_idx, :] = 0 - continue - - # next tokens for this sentence - beam_idx = 0 - for beam_token_rank, (next_token, next_score, next_index) in enumerate( - zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx]) - ): - batch_beam_idx = batch_idx * self.group_size + next_index - # add to generated hypotheses if end of sentence - if (eos_token_id is not None) and (next_token == eos_token_id): - # if beam_token does not belong to top num_beams tokens, it should not be added - is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size - if is_beam_token_worse_than_top_num_beams: - continue - self.hypo_add( - input_ids[batch_beam_idx].clone(), - next_score.item(), - batch_idx, - ) - else: - # add next predicted token since it is not eos_token - next_beam_scores[batch_idx, beam_idx] = next_score - next_beam_tokens[batch_idx, beam_idx] = next_token - next_beam_indices[batch_idx, beam_idx] = batch_beam_idx - beam_idx += 1 - - # once the beam for next step is full, don't add more tokens to it. - if beam_idx == self.group_size: - break - - if beam_idx < self.group_size: - raise ValueError( - f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id:" - f" {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected." - ) - - # Check if we are done so that we can save a pad step if all(done) - self._done[batch_idx] = self._done[batch_idx] or self.hypo_is_done( - batch_idx, - next_scores[batch_idx].max().item(), - cur_len, - ) - - return next_beam_scores.view(-1), next_beam_tokens.view(-1), next_beam_indices.view(-1) - - def finalize( - self, - input_ids: torch.Tensor, - final_beam_scores: torch.Tensor, - final_beam_tokens: torch.Tensor, - final_beam_indices: torch.Tensor, - pad_token_id: int, - eos_token_id: int, - ) -> Tuple[torch.Tensor, torch.Tensor]: - batch_size = len(self._beam_hyps_count) - - # finalize all open beam hypotheses and add to generated hypotheses - for batch_idx in range(batch_size): - if self._done[batch_idx]: - continue - - # all open beam hypotheses are added to the beam hypothesis - # beam hypothesis class automatically keeps the best beams - for beam_id in range(self.num_beams): - batch_beam_idx = batch_idx * self.num_beams + beam_id - final_score = final_beam_scores[batch_beam_idx].item() - final_tokens = input_ids[batch_beam_idx] - self.hypo_add(final_tokens, final_score, batch_idx) - - # select the best hypotheses - # NOTE: torch.Tensor.new_zeros() is not scriptable - sent_lengths = torch.zeros(batch_size * self.num_beam_hyps_to_keep, dtype=torch.long) - best = [] - best_scores = torch.zeros( - batch_size * self.num_beam_hyps_to_keep, device=input_ids.device, dtype=torch.float32 - ) - # retrieve best hypotheses - for i in range(batch_size): - # NOTE: lambda is not scriptable - batch_hypo_start = torch.sum(self._beam_hyps_count[:i]) if i > 0 else torch.tensor(0, dtype=torch.long) - batch_hypo_end = torch.sum(self._beam_hyps_count[: i + 1]) - beam_scores = torch.cat(self._beam_scores)[batch_hypo_start:batch_hypo_end] - sorted_next_scores, sorted_indices = torch.topk(beam_scores, len(beam_scores), largest=True) - for j in range(self.num_beam_hyps_to_keep): - best_score = beam_scores[sorted_indices[j]] - best_hyp = self._beam_hyps[batch_hypo_start + sorted_indices[j]] - sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp) - # append to lists - best.append(best_hyp) - best_scores[i * self.num_beam_hyps_to_keep + j] = best_score - - # prepare for adding eos - sent_max_len = min(sent_lengths.max() + 1, self.max_length) - decoded = torch.zeros(batch_size * self.num_beam_hyps_to_keep, sent_max_len, dtype=torch.long) - # shorter batches are padded if needed - if sent_lengths.min() != sent_lengths.max(): - assert pad_token_id is not None, "`pad_token_id` has to be defined" - decoded.fill_(pad_token_id) - - # fill with hypotheses and eos_token_id if the latter fits in - for i, hypo in enumerate(best): - decoded[i, : sent_lengths[i]] = hypo - if sent_lengths[i] < self.max_length: - decoded[i, sent_lengths[i]] = eos_token_id - - return decoded, best_scores - - -class BARTBeamSearchGenerator(BARTGenerator): - def __init__(self, model): - super().__init__(model) - self.beam_scorer = BeamSearchScorerTS() - self.device = model.device - - @staticmethod - def _expand_inputs_for_generation( - input_ids: torch.Tensor, - attention_mask: torch.Tensor, - last_hidden_state: torch.Tensor, - expand_size: int = 1, - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - expanded_return_idx = ( - torch.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1).to(input_ids.device) - ) - input_ids = input_ids.index_select(0, expanded_return_idx) - - attention_mask = attention_mask.index_select(0, expanded_return_idx) - - last_hidden_state = last_hidden_state.index_select(0, expanded_return_idx.to(last_hidden_state.device)) - return input_ids, attention_mask, last_hidden_state - - def adjust_logits_during_generation(self, logits, cur_len: int, max_length: int): - if cur_len == 1 and self.config.force_bos_token_to_be_generated: - logits = self._force_token_id_to_be_generated(logits, self.config.bos_token_id) - elif cur_len == max_length - 1 and self.config.eos_token_id is not None: - logits = self._force_token_id_to_be_generated(logits, self.config.eos_token_id) - return logits - - @staticmethod - def _force_token_id_to_be_generated(scores, token_id: int): - """force one of token_ids to be generated by setting prob of all other tokens to 0 (logprob=-float("inf"))""" - mask = torch.full_like(scores, 1, dtype=torch.bool) - mask[:, token_id] = False - return scores.masked_fill(mask, -float("inf")) - - def _reorder_cache(self, past: List[torch.Tensor], beam_idx): - # if decoder past is not included in output - # speedy decoding is disabled and no need to reorder - reordered_decoder_past = [] - for state in past: - reordered_decoder_past.append(state.index_select(0, beam_idx)) - return reordered_decoder_past - - def beam_search( - self, input_ids, encoder_output, attention_mask, num_beams, max_length, pad_token_id: int, eos_token_id: int - ): - batch_size = self.beam_scorer.batch_size - - num_beams = self.beam_scorer.num_beams - batch_beam_size, cur_len = input_ids.shape - - assert ( - num_beams * batch_size == batch_beam_size - ), f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." - - beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) - beam_scores[:, 1:] = -1e9 - beam_scores = beam_scores.view((batch_size * num_beams,)) - next_tokens = torch.zeros((batch_size, num_beams), dtype=torch.long, device=input_ids.device) - next_indices = torch.zeros((batch_size, num_beams), dtype=torch.long, device=input_ids.device) - - past: List[torch.Tensor] = [] - while cur_len < max_length: - logits, past = self._decoder_forward(input_ids, encoder_output, attention_mask, past) - next_token_logits = logits[:, -1, :] - - # adjust tokens for Bart, *e.g.* - next_token_logits = self.adjust_logits_during_generation( - next_token_logits, cur_len=cur_len, max_length=max_length - ) - - next_token_scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size) - - # pre-process distribution - next_token_scores = self.logits_processor(input_ids, next_token_scores) - next_token_scores = next_token_scores + beam_scores[:, None].expand_as(next_token_scores) - - # reshape for beam search - vocab_size = next_token_scores.shape[-1] - next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) - - next_token_scores, next_tokens = torch.topk( - next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True - ) - - next_indices = next_tokens // vocab_size - next_tokens = next_tokens % vocab_size - - beam_scores, beam_next_tokens, beam_idx = self.beam_scorer.process( - input_ids, - next_token_scores, - next_tokens, - next_indices, - pad_token_id=pad_token_id, - eos_token_id=eos_token_id, - ) - - input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) - - cur_len = cur_len + 1 - - if len(past) > 0: - past = self._reorder_cache(past, beam_idx) - - if self.beam_scorer.is_done(): - break - - sequences, sequence_scores = self.beam_scorer.finalize( - input_ids, - beam_scores, - next_tokens, - next_indices, - pad_token_id=pad_token_id, - eos_token_id=eos_token_id, - ) - - return sequences - - def forward(self, input_ids, attention_mask, num_beams, max_length, decoder_start_token_id): - pad_token_id = self.config.pad_token_id - bos_token_id = self.config.bos_token_id - eos_token_id = self.config.eos_token_id - - # special case if pad_token_id is not defined - if pad_token_id is None and eos_token_id is not None: - # logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") - pad_token_id = eos_token_id - - encoder_output = self._encoder_forward(input_ids, attention_mask) - - input_ids = self._prepare_decoder_input_ids_for_generation( - input_ids, - decoder_start_token_id=decoder_start_token_id, - bos_token_id=bos_token_id, - ) - - batch_size = input_ids.shape[0] - - length_penalty = self.config.length_penalty - num_return_sequences = self.config.num_return_sequences - early_stopping = True - - self.beam_scorer.init( - batch_size=batch_size, - max_length=max_length, - num_beams=num_beams, - device=self.device, - length_penalty=length_penalty, - do_early_stopping=early_stopping, - num_beam_hyps_to_keep=num_return_sequences, - ) - - input_ids, attention_mask, encoder_output = self._expand_inputs_for_generation( - input_ids, - attention_mask, - encoder_output, - expand_size=num_beams, - ) - - return self.beam_search( - input_ids=input_ids, - encoder_output=encoder_output, - attention_mask=attention_mask, - num_beams=num_beams, - max_length=max_length, - pad_token_id=pad_token_id, - eos_token_id=eos_token_id, - ) diff --git a/examples/research_projects/onnx/summarization/bart_onnx/reduce_onnx_size.py b/examples/research_projects/onnx/summarization/bart_onnx/reduce_onnx_size.py deleted file mode 100644 index 1df20e4504d..00000000000 --- a/examples/research_projects/onnx/summarization/bart_onnx/reduce_onnx_size.py +++ /dev/null @@ -1,121 +0,0 @@ -""" -Code to remove duplicate initializers to reduce ONNX model size. -""" - -import os - -import numpy -import onnx - - -def _is_equal_tensor_proto(a, b): - name_a = a.name - name_b = b.name - - a.name = "" - b.name = "" - - res = a == b - - a.name = name_a - b.name = name_b - - return res - - -def _node_replace_input_with(node_proto, name, new_name): - for i, input_name in enumerate(node_proto.input): - if input_name == name: - node_proto.input.insert(i, new_name) - node_proto.input.pop(i + 1) - - if node_proto.op_type == "If": - _graph_replace_input_with(node_proto.attribute[0].g, name, new_name) - _graph_replace_input_with(node_proto.attribute[1].g, name, new_name) - if node_proto.op_type == "Loop": - _graph_replace_input_with(node_proto.attribute[0].g, name, new_name) - - -def _graph_replace_input_with(graph_proto, name, new_name): - for n in graph_proto.node: - _node_replace_input_with(n, name, new_name) - - -def _remove_dup_initializers_from_model(model, model_without_ext, ind_to_replace): - inits_with_data = list(model.graph.initializer) - inits = list(model_without_ext.graph.initializer) - for i, ref_i in ind_to_replace: - assert inits_with_data[i].name == inits[i].name - assert inits_with_data[ref_i].name == inits[ref_i].name - assert i > ref_i - - name_i = inits[i].name - name_ref = inits[ref_i].name - - model_without_ext.graph.initializer.remove(inits[i]) - - # for n in model.graph.node: - _graph_replace_input_with(model_without_ext.graph, name_i, name_ref) - - -def remove_dup_initializers(onnx_file_path): - """ - Removes duplicate initializers from the model to reduce its size. - Writes a new file in the same directory as onnx_file_path and returns the path to that file. - """ - - model_file_folder = os.path.dirname(onnx_file_path) - model_file_name = os.path.basename(onnx_file_path) - - model = onnx.load(os.path.join(model_file_folder, model_file_name)) - - inits = list(model.graph.initializer) - - dup_set = set() - dup_map = {} - ind_to_replace = [] - - total_reduced_size = 0 - - for i in range(len(inits)): - if i in dup_set: - continue - - for j in range(i + 1, len(inits)): - if j in dup_set: - continue - if _is_equal_tensor_proto(inits[i], inits[j]): - dup_set.add(i) - dup_set.add(j) - - dtype = inits[j].data_type - mem_size = numpy.prod(inits[j].dims) - if dtype == 1: - mem_size *= 4 - elif dtype == 6: - mem_size *= 4 - elif dtype == 7 or dtype == 11: - mem_size *= 8 - else: - print("unexpected data type: ", dtype) - total_reduced_size += mem_size - - name_i = inits[i].name - name_j = inits[j].name - - if name_i in dup_map: - dup_map[name_i].append(name_j) - else: - dup_map[name_i] = [name_j] - ind_to_replace.append((j, i)) - - print("total reduced size: ", total_reduced_size / 1024 / 1024 / 1024, "GB") - - ind_to_replace = sorted(ind_to_replace) - _remove_dup_initializers_from_model(model, model, ind_to_replace) - - optimized_model_file_name = "optimized_" + model_file_name - new_model = os.path.join(model_file_folder, optimized_model_file_name) - onnx.save(model, new_model) - - return new_model diff --git a/examples/research_projects/onnx/summarization/requirements.txt b/examples/research_projects/onnx/summarization/requirements.txt deleted file mode 100644 index 21535650612..00000000000 --- a/examples/research_projects/onnx/summarization/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -torch >= 1.10 \ No newline at end of file diff --git a/examples/research_projects/onnx/summarization/run_onnx_exporter.py b/examples/research_projects/onnx/summarization/run_onnx_exporter.py deleted file mode 100644 index fa826732701..00000000000 --- a/examples/research_projects/onnx/summarization/run_onnx_exporter.py +++ /dev/null @@ -1,206 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" """ - -import argparse -import logging -import os -import sys - -import numpy as np -import onnxruntime -import torch -from bart_onnx.generation_onnx import BARTBeamSearchGenerator -from bart_onnx.reduce_onnx_size import remove_dup_initializers - -import transformers -from transformers import BartForConditionalGeneration, BartTokenizer - - -logging.basicConfig( - format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - level=os.environ.get("LOGLEVEL", "INFO").upper(), - stream=sys.stdout, -) - -logger = logging.getLogger(__name__) - -model_dict = {"facebook/bart-base": BartForConditionalGeneration} -tokenizer_dict = {"facebook/bart-base": BartTokenizer} - - -def parse_args(): - parser = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph.") - parser.add_argument( - "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." - ) - parser.add_argument( - "--max_length", - type=int, - default=5, - help="The maximum total input sequence length after tokenization.", - ) - parser.add_argument( - "--num_beams", - type=int, - default=None, - help=( - "Number of beams to use for evaluation. This argument will be " - "passed to ``model.generate``, which is used during ``evaluate`` and ``predict``." - ), - ) - parser.add_argument( - "--model_name_or_path", - type=str, - help="Path to pretrained model or model identifier from huggingface.co/models.", - required=True, - ) - parser.add_argument( - "--config_name", - type=str, - default=None, - help="Pretrained config name or path if not the same as model_name", - ) - parser.add_argument( - "--device", - type=str, - default="cpu", - help="Device where the model will be run", - ) - parser.add_argument("--output_file_path", type=str, default=None, help="Where to store the final ONNX file.") - - args = parser.parse_args() - - return args - - -def load_model_tokenizer(model_name, device="cpu"): - huggingface_model = model_dict[model_name].from_pretrained(model_name).to(device) - tokenizer = tokenizer_dict[model_name].from_pretrained(model_name) - - if model_name in ["facebook/bart-base"]: - huggingface_model.config.no_repeat_ngram_size = 0 - huggingface_model.config.forced_bos_token_id = None - huggingface_model.config.min_length = 0 - - return huggingface_model, tokenizer - - -def export_and_validate_model(model, tokenizer, onnx_file_path, num_beams, max_length): - model.eval() - - ort_sess = None - bart_script_model = torch.jit.script(BARTBeamSearchGenerator(model)) - - with torch.no_grad(): - ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs." - inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="pt").to(model.device) - - summary_ids = model.generate( - inputs["input_ids"], - attention_mask=inputs["attention_mask"], - num_beams=num_beams, - max_length=max_length, - early_stopping=True, - decoder_start_token_id=model.config.decoder_start_token_id, - ) - - torch.onnx.export( - bart_script_model, - ( - inputs["input_ids"], - inputs["attention_mask"], - num_beams, - max_length, - model.config.decoder_start_token_id, - ), - onnx_file_path, - opset_version=14, - input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"], - output_names=["output_ids"], - dynamic_axes={ - "input_ids": {0: "batch", 1: "seq"}, - "output_ids": {0: "batch", 1: "seq_out"}, - }, - example_outputs=summary_ids, - ) - - logger.info("Model exported to {}".format(onnx_file_path)) - - new_onnx_file_path = remove_dup_initializers(os.path.abspath(onnx_file_path)) - - logger.info("Deduplicated and optimized model written to {}".format(new_onnx_file_path)) - - ort_sess = onnxruntime.InferenceSession(new_onnx_file_path) - ort_out = ort_sess.run( - None, - { - "input_ids": inputs["input_ids"].cpu().numpy(), - "attention_mask": inputs["attention_mask"].cpu().numpy(), - "num_beams": np.array(num_beams), - "max_length": np.array(max_length), - "decoder_start_token_id": np.array(model.config.decoder_start_token_id), - }, - ) - - np.testing.assert_allclose(summary_ids.cpu().numpy(), ort_out[0], rtol=1e-3, atol=1e-3) - - logger.info("Model outputs from torch and ONNX Runtime are similar.") - logger.info("Success.") - - -def main(): - args = parse_args() - max_length = 5 - num_beams = 4 - - # Make one log on every process with the configuration for debugging. - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO, - ) - - logger.setLevel(logging.INFO) - transformers.utils.logging.set_verbosity_error() - - device = torch.device(args.device) - - model, tokenizer = load_model_tokenizer(args.model_name_or_path, device) - - if model.config.decoder_start_token_id is None: - raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") - - model.to(device) - - if args.max_length: - max_length = args.max_length - - if args.num_beams: - num_beams = args.num_beams - - if args.output_file_path: - output_name = args.output_file_path - else: - output_name = "BART.onnx" - - logger.info("Exporting model to ONNX") - export_and_validate_model(model, tokenizer, output_name, num_beams, max_length) - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/performer/README.md b/examples/research_projects/performer/README.md deleted file mode 100644 index fa847268b0c..00000000000 --- a/examples/research_projects/performer/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Performer fine-tuning - -Example authors: @TevenLeScao, @Patrickvonplaten - -Paper authors: Krzysztof Choromanski, Valerii Likhosherstov, David Dohan, Xingyou Song, Andreea Gane, Tamas Sarlos, Peter Hawkins, Jared Davis, Afroz Mohiuddin, Lukasz Kaiser, David Belanger, Lucy Colwell, Adrian Weller - -## Requirements - -`datasets`, `flax` and `jax`. `wandb` integration is built-in if you want to use it. - -## Examples - -`sanity_script.sh` will launch performer fine-tuning from the google-bert/bert-base-cased checkpoint on the Simple Wikipedia dataset (a small, easy-language English Wikipedia) from `datasets`. -`full_script.sh` will launch performer fine-tuning from the google-bert/bert-large-cased checkpoint on the English Wikipedia dataset from `datasets`. - -Here are a few key arguments: -- Remove the `--performer` argument to use a standard Bert model. - -- Add `--reinitialize` to start from a blank model rather than a Bert checkpoint. - -- You may change the Bert size by passing a different [checkpoint](https://huggingface.co/transformers/pretrained_models.html) to the `--model_name_or_path` argument. - -- Passing your user name to the `--wandb_user_name` argument will trigger weights and biases logging. - -- You can choose a dataset with `--dataset_name` and `--dataset_config`. Our [viewer](https://huggingface.co/datasets/viewer/) will help you find what you need. \ No newline at end of file diff --git a/examples/research_projects/performer/full_script.sh b/examples/research_projects/performer/full_script.sh deleted file mode 100755 index 8634666f983..00000000000 --- a/examples/research_projects/performer/full_script.sh +++ /dev/null @@ -1 +0,0 @@ -TOKENIZERS_PARALLELISM=true python run_mlm_performer.py --output_dir experiments --dataset_name wikipedia --dataset_config_name 20200501.en --model_name_or_path bert-large-cased --tokenizer_name bert-large-cased --do_train --overwrite_output_dir --per_device_train_batch_size 4 --learning_rate 5e-4 --warmup_steps 100 --num_train_epochs 3 --performer \ No newline at end of file diff --git a/examples/research_projects/performer/modeling_flax_performer.py b/examples/research_projects/performer/modeling_flax_performer.py deleted file mode 100644 index 7c2fde6ddbb..00000000000 --- a/examples/research_projects/performer/modeling_flax_performer.py +++ /dev/null @@ -1,551 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google Flax Team Authors and The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Callable, Dict, Tuple - -import flax.linen as nn -import jax -import jax.numpy as jnp -import numpy as np -from jax.random import PRNGKey -from modeling_flax_performer_utils import make_fast_softmax_attention - -from transformers.file_utils import add_start_docstrings -from transformers.modeling_flax_utils import ACT2FN -from transformers.models.bert.configuration_bert import BertConfig -from transformers.models.bert.modeling_flax_bert import FlaxBertOnlyMLMHead, FlaxBertPreTrainedModel -from transformers.utils import logging - - -logger = logging.get_logger(__name__) - -_CONFIG_FOR_DOC = "BertConfig" -_TOKENIZER_FOR_DOC = "BertTokenizer" - -BERT_START_DOCSTRING = r""" - - This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic - methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, - pruning heads etc.) - - This model is also a PyTorch `torch.nn.Module `__ - subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to - general usage and behavior. - - Parameters: - config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. - Initializing with a config file does not load the weights associated with the model, only the - configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model - weights. -""" - -BERT_INPUTS_DOCSTRING = r""" - Args: - input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): - Indices of input sequence tokens in the vocabulary. - - Indices can be obtained using :class:`~transformers.BertTokenizer`. See - :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for - details. - - `What are input IDs? <../glossary.html#input-ids>`__ - attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): - Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - `What are attention masks? <../glossary.html#attention-mask>`__ - token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): - Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, - 1]``: - - - 0 corresponds to a `sentence A` token, - - 1 corresponds to a `sentence B` token. - - `What are token type IDs? <../glossary.html#token-type-ids>`_ - position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): - Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, - config.max_position_embeddings - 1]``. - - `What are position IDs? <../glossary.html#position-ids>`_ - head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): - Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): - Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert :obj:`input_ids` indices into associated - vectors than the model's internal embedding lookup matrix. - output_attentions (:obj:`bool`, `optional`): - Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned - tensors for more detail. - output_hidden_states (:obj:`bool`, `optional`): - Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for - more detail. - return_dict (:obj:`bool`, `optional`): - Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. -""" - - -class FlaxPerformerLayerNorm(nn.Module): - """ - Layer normalization (https://arxiv.org/abs/1607.06450). Operates on the last axis of the input data. - """ - - epsilon: float = 1e-6 - dtype: jnp.dtype = jnp.float32 # the dtype of the computation - bias: bool = True # If True, bias (beta) is added. - scale: bool = True # If True, multiply by scale (gamma). When the next layer is linear - # (also e.g. nn.relu), this can be disabled since the scaling will be - # done by the next layer. - bias_init: jnp.ndarray = nn.initializers.zeros - scale_init: jnp.ndarray = nn.initializers.ones - - @nn.compact - def __call__(self, x): - """ - Applies layer normalization on the input. It normalizes the activations of the layer for each given example in - a batch independently, rather than across a batch like Batch Normalization. i.e. applies a transformation that - maintains the mean activation within each example close to 0 and the activation standard deviation close to 1 - - Args: - x: the inputs - - Returns: - Normalized inputs (the same shape as inputs). - """ - features = x.shape[-1] - mean = jnp.mean(x, axis=-1, keepdims=True) - mean2 = jnp.mean(jax.lax.square(x), axis=-1, keepdims=True) - var = mean2 - jax.lax.square(mean) - mul = jax.lax.rsqrt(var + self.epsilon) - if self.scale: - mul = mul * jnp.asarray(self.param("gamma", self.scale_init, (features,)), self.dtype) - y = (x - mean) * mul - if self.bias: - y = y + jnp.asarray(self.param("beta", self.bias_init, (features,)), self.dtype) - return y - - -class FlaxPerformerEmbedding(nn.Module): - """ - Specify a new class for doing the embedding stuff as Flax's one use 'embedding' for the parameter name and PyTorch - use 'weight' - """ - - vocab_size: int - hidden_size: int - emb_init: Callable[..., np.ndarray] = nn.initializers.normal(stddev=0.1) - - @nn.compact - def __call__(self, inputs): - embedding = self.param("weight", self.emb_init, (self.vocab_size, self.hidden_size)) - return jnp.take(embedding, inputs, axis=0) - - -class FlaxPerformerEmbeddings(nn.Module): - """Construct the embeddings from word, position and token_type embeddings.""" - - vocab_size: int - hidden_size: int - type_vocab_size: int - max_length: int - - @nn.compact - def __call__(self, input_ids, token_type_ids, position_ids, attention_mask): - # Embed - w_emb = FlaxPerformerEmbedding(self.vocab_size, self.hidden_size, name="word_embeddings")( - jnp.atleast_2d(input_ids.astype("i4")) - ) - p_emb = FlaxPerformerEmbedding(self.max_length, self.hidden_size, name="position_embeddings")( - jnp.atleast_2d(position_ids.astype("i4")) - ) - t_emb = FlaxPerformerEmbedding(self.type_vocab_size, self.hidden_size, name="token_type_embeddings")( - jnp.atleast_2d(token_type_ids.astype("i4")) - ) - - # Sum all embeddings - summed_emb = w_emb + jnp.broadcast_to(p_emb, w_emb.shape) + t_emb - - # Layer Norm - layer_norm = FlaxPerformerLayerNorm(name="layer_norm")(summed_emb) - - return layer_norm - - -class FlaxPerformerAttention(nn.Module): - num_heads: int - head_size: int - - @nn.compact - def __call__(self, hidden_state, attention_mask): - single_head_dim = self.head_size // self.num_heads - fast_softmax_attention = make_fast_softmax_attention(qkv_dim=single_head_dim) - self_att = nn.attention.SelfAttention( - num_heads=self.num_heads, qkv_features=self.head_size, name="self", attention_fn=fast_softmax_attention - )(hidden_state, attention_mask) - - layer_norm = FlaxPerformerLayerNorm(name="layer_norm")(self_att + hidden_state) - return layer_norm - - -class FlaxPerformerIntermediate(nn.Module): - output_size: int - hidden_act: str = "gelu" - - @nn.compact - def __call__(self, hidden_state): - # TODO: Add ACT2FN reference to change activation function - dense = nn.Dense(features=self.output_size, name="dense")(hidden_state) - return ACT2FN[self.hidden_act](dense) - - -class FlaxPerformerOutput(nn.Module): - @nn.compact - def __call__(self, intermediate_output, attention_output): - hidden_state = nn.Dense(attention_output.shape[-1], name="dense")(intermediate_output) - hidden_state = FlaxPerformerLayerNorm(name="layer_norm")(hidden_state + attention_output) - return hidden_state - - -class FlaxPerformerLayer(nn.Module): - num_heads: int - head_size: int - intermediate_size: int - hidden_act: str = "gelu" - - @nn.compact - def __call__(self, hidden_state, attention_mask): - attention = FlaxPerformerAttention(self.num_heads, self.head_size, name="attention")( - hidden_state, attention_mask - ) - intermediate = FlaxPerformerIntermediate( - self.intermediate_size, name="intermediate", hidden_act=self.hidden_act - )(attention) - output = FlaxPerformerOutput(name="output")(intermediate, attention) - - return output - - -class FlaxPerformerLayerCollection(nn.Module): - """ - Stores N BertLayer(s) - """ - - num_layers: int - num_heads: int - head_size: int - intermediate_size: int - hidden_act: str = "gelu" - - @nn.compact - def __call__(self, inputs, attention_mask): - assert self.num_layers > 0, f"num_layers should be >= 1, got ({self.num_layers})" - - # Initialize input / output - input_i = inputs - - # Forward over all encoders - for i in range(self.num_layers): - layer = FlaxPerformerLayer( - self.num_heads, self.head_size, self.intermediate_size, hidden_act=self.hidden_act, name=f"{i}" - ) - input_i = layer(input_i, attention_mask) - return input_i - - -class FlaxPerformerEncoder(nn.Module): - num_layers: int - num_heads: int - head_size: int - intermediate_size: int - hidden_act: str = "gelu" - - @nn.compact - def __call__(self, hidden_state, attention_mask): - layer = FlaxPerformerLayerCollection( - self.num_layers, - self.num_heads, - self.head_size, - self.intermediate_size, - name="layer", - hidden_act=self.hidden_act, - )(hidden_state, attention_mask) - return layer - - -class FlaxPerformerPooler(nn.Module): - @nn.compact - def __call__(self, hidden_state): - cls_token = hidden_state[:, 0] - out = nn.Dense(hidden_state.shape[-1], name="dense")(cls_token) - return jax.lax.tanh(out) - - -class FlaxPerformerModule(nn.Module): - vocab_size: int - hidden_size: int - type_vocab_size: int - max_length: int - num_encoder_layers: int - num_heads: int - head_size: int - intermediate_size: int - hidden_act: str = "gelu" - add_pooling_layer: bool = True - - @nn.compact - def __call__(self, input_ids, token_type_ids, position_ids, attention_mask): - # Embedding - embeddings = FlaxPerformerEmbeddings( - self.vocab_size, self.hidden_size, self.type_vocab_size, self.max_length, name="embeddings" - )(input_ids, token_type_ids, position_ids, attention_mask) - - # N stacked encoding layers - encoder = FlaxPerformerEncoder( - self.num_encoder_layers, - self.num_heads, - self.head_size, - self.intermediate_size, - hidden_act=self.hidden_act, - name="encoder", - )(embeddings, attention_mask) - - if not self.add_pooling_layer: - return encoder - - pooled = FlaxPerformerPooler(name="pooler")(encoder) - return encoder, pooled - - -@add_start_docstrings( - "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.", - BERT_START_DOCSTRING, -) -class FlaxPerformerModel(FlaxBertPreTrainedModel): - """ - The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of - cross-attention is added between the self-attention layers, following the architecture described in `Attention is - all you need `__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, - Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. - """ - - model_class = FlaxPerformerModule - config_class = BertConfig - base_model_prefix = "bert" - - @staticmethod - def convert_from_pytorch(pt_state: Dict, config: BertConfig) -> Dict: - jax_state = dict(pt_state) - - # Need to change some parameters name to match Flax names so that we don't have to fork any layer - for key, tensor in pt_state.items(): - # Key parts - key_parts = set(key.split(".")) - - # Every dense layer has "kernel" parameters instead of "weight" - if "dense.weight" in key: - del jax_state[key] - key = key.replace("weight", "kernel") - jax_state[key] = tensor - - # SelfAttention needs also to replace "weight" by "kernel" - if {"query", "key", "value"} & key_parts: - # Flax SelfAttention decomposes the heads (num_head, size // num_heads) - if "bias" in key: - jax_state[key] = tensor.reshape((config.num_attention_heads, -1)) - elif "weight": - del jax_state[key] - key = key.replace("weight", "kernel") - tensor = tensor.reshape((config.num_attention_heads, -1, config.hidden_size)).transpose((2, 0, 1)) - jax_state[key] = tensor - - # SelfAttention output is not a separate layer, remove one nesting - if "attention.output.dense" in key: - del jax_state[key] - key = key.replace("attention.output.dense", "attention.self.out") - jax_state[key] = tensor - - # SelfAttention output is not a separate layer, remove nesting on layer norm - if "attention.output.LayerNorm" in key: - del jax_state[key] - key = key.replace("attention.output.LayerNorm", "attention.LayerNorm") - jax_state[key] = tensor - - # There are some transposed parameters w.r.t their PyTorch counterpart - if "intermediate.dense.kernel" in key or "output.dense.kernel" in key: - jax_state[key] = tensor.T - - # Self Attention output projection needs to be transposed - if "out.kernel" in key: - jax_state[key] = tensor.reshape((config.hidden_size, config.num_attention_heads, -1)).transpose( - 1, 2, 0 - ) - - # Pooler needs to transpose its kernel - if "pooler.dense.kernel" in key: - jax_state[key] = tensor.T - - # Handle LayerNorm conversion - if "LayerNorm" in key: - del jax_state[key] - - # Replace LayerNorm by layer_norm - new_key = key.replace("LayerNorm", "layer_norm") - - if "weight" in key: - new_key = new_key.replace("weight", "gamma") - elif "bias" in key: - new_key = new_key.replace("bias", "beta") - - jax_state[new_key] = tensor - - return jax_state - - def __init__( - self, config: BertConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs - ): - module = FlaxPerformerModule( - vocab_size=config.vocab_size, - hidden_size=config.hidden_size, - type_vocab_size=config.type_vocab_size, - max_length=config.max_position_embeddings, - num_encoder_layers=config.num_hidden_layers, - num_heads=config.num_attention_heads, - head_size=config.hidden_size, - intermediate_size=config.intermediate_size, - dropout_rate=config.hidden_dropout_prob, - hidden_act=config.hidden_act, - ) - - super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype) - - @property - def module(self) -> nn.Module: - return self._module - - def __call__( - self, input_ids, token_type_ids=None, position_ids=None, dropout_rng: PRNGKey = None, attention_mask=None - ): - input_ids, attention_mask, token_type_ids, position_ids = self._check_inputs( - input_ids, attention_mask, token_type_ids, position_ids - ) - - # Handle any PRNG if needed - rngs = {} - if dropout_rng is not None: - rngs["dropout"] = dropout_rng - - return self.module.apply( - {"params": self.params}, - jnp.array(input_ids, dtype="i4"), - jnp.array(token_type_ids, dtype="i4"), - jnp.array(position_ids, dtype="i4"), - jnp.array(attention_mask, dtype="i4"), - rng=rngs, - ) - - -class FlaxPerformerForMaskedLM(FlaxBertPreTrainedModel): - def __init__( - self, config: BertConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs - ): - module = FlaxPerformerForMaskedLMModule( - vocab_size=config.vocab_size, - type_vocab_size=config.type_vocab_size, - hidden_size=config.hidden_size, - intermediate_size=config.intermediate_size, - head_size=config.hidden_size, - num_heads=config.num_attention_heads, - num_encoder_layers=config.num_hidden_layers, - max_length=config.max_position_embeddings, - hidden_act=config.hidden_act, - **kwargs, - ) - - super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype) - - def __call__( - self, - input_ids, - attention_mask=None, - token_type_ids=None, - position_ids=None, - params: dict = None, - train: bool = False, - dropout_rng: PRNGKey = None, - ): - input_ids, attention_mask, token_type_ids, position_ids = self._check_inputs( - input_ids, attention_mask, token_type_ids, position_ids - ) - - # Handle any PRNG if needed - rngs = {} - if dropout_rng is not None: - rngs["dropout"] = dropout_rng - - return self.module.apply( - {"params": params or self.params}, - jnp.array(input_ids, dtype="i4"), - jnp.array(attention_mask, dtype="i4"), - jnp.array(token_type_ids, dtype="i4"), - jnp.array(position_ids, dtype="i4"), - not train, - rngs=rngs, - ) - - -class FlaxPerformerForMaskedLMModule(nn.Module): - vocab_size: int - hidden_size: int - intermediate_size: int - head_size: int - num_heads: int - num_encoder_layers: int - type_vocab_size: int - max_length: int - hidden_act: str - dropout_rate: float = 0.0 - dtype: jnp.dtype = jnp.float32 - - @nn.compact - def __call__( - self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, deterministic: bool = True - ): - # Model - encoder = FlaxPerformerModule( - vocab_size=self.vocab_size, - hidden_size=self.hidden_size, - type_vocab_size=self.type_vocab_size, - max_length=self.max_length, - num_encoder_layers=self.num_encoder_layers, - num_heads=self.num_heads, - head_size=self.hidden_size, - intermediate_size=self.intermediate_size, - hidden_act=self.hidden_act, - add_pooling_layer=False, - name="bert", - )(input_ids, attention_mask, token_type_ids, position_ids) - - # Compute the prediction scores - encoder = nn.Dropout(rate=self.dropout_rate)(encoder, deterministic=deterministic) - logits = FlaxBertOnlyMLMHead( - vocab_size=self.vocab_size, hidden_act=self.hidden_act, name="cls", dtype=self.dtype - )(encoder) - - return (logits,) diff --git a/examples/research_projects/performer/modeling_flax_performer_utils.py b/examples/research_projects/performer/modeling_flax_performer_utils.py deleted file mode 100644 index c5242509381..00000000000 --- a/examples/research_projects/performer/modeling_flax_performer_utils.py +++ /dev/null @@ -1,658 +0,0 @@ -# coding=utf-8 -# Copyright 2020 The Google Research Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -IMPORTANT: - -This code was copied from -https://github.com/google-research/google-research/blob/master/performer/fast_self_attention/fast_self_attention.py on -6/11/2020. This is very new code, so it might be prone to change soon -> make sure to check the original code and -update accordingly - -Core Fast Attention Module for Flax. Implementation of the approximate fast softmax and generalized attention mechanism -leveraging structured random feature maps [RFM] techniques and low rank decomposition of the attention matrix. -""" -# pylint: disable=invalid-name, missing-function-docstring, line-too-long - -import abc -import functools -from collections.abc import Iterable # pylint: disable=g-importing-member - -import jax -import jax.numpy as jnp -import numpy as onp -from absl import logging -from jax import lax, random - - -def nonnegative_softmax_kernel_feature_creator( - data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=True, eps=0.0001 -): - """ - Constructs nonnegative kernel features for fast softmax attention - - Args: - data: input for which features are computes - projection_matrix: random matrix used to compute features - attention_dims_t: tuple of attention dimensions - batch_dims_t: tuple of batch dimensions - precision: precision parameter - is_query: predicate indicating whether input data corresponds to queries or - keys - normalize_data: predicate indicating whether data should be normalized, - eps: numerical stabilizer - - Returns: - Random features for fast softmax attention. - """ - del attention_dims_t - if normalize_data: - # We have e^{qk^T/sqrt{d}} = e^{q_norm k_norm^T}, where - # w_norm = w * data_normalizer for w in {q,k}. - data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1]))) - else: - data_normalizer = 1.0 - ratio = 1.0 / jnp.sqrt(projection_matrix.shape[0]) - data_mod_shape = data.shape[0 : len(batch_dims_t)] + projection_matrix.shape - data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix - - data_dash = lax.dot_general( - data_normalizer * data, - data_thick_random_matrix, - (((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)), (batch_dims_t, batch_dims_t)), - precision=precision, - ) - - diag_data = jnp.square(data) - diag_data = jnp.sum(diag_data, axis=data.ndim - 1) - diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer - diag_data = jnp.expand_dims(diag_data, axis=data.ndim - 1) - - if is_query: - last_dims_t = (len(data_dash.shape) - 1,) - data_dash = ratio * ( - jnp.exp(data_dash - diag_data - jnp.max(data_dash, axis=last_dims_t, keepdims=True)) + eps - ) - else: - data_dash = ratio * (jnp.exp(data_dash - diag_data - jnp.max(data_dash)) + eps) - - return data_dash - - -def sincos_softmax_kernel_feature_creator( - data, projection_matrix, attention_dims_t, batch_dims_t, precision, normalize_data=True -): - """ - Constructs kernel sin-cos features for fast softmax attention - - Args: - data: input for which features are computes - projection_matrix: random matrix used to compute features - attention_dims_t: tuple of attention dimensions - batch_dims_t: tuple of batch dimensions - precision: precision parameter - normalize_data: predicate indicating whether data should be normalized - - Returns: - Random features for fast softmax attention. - """ - if normalize_data: - # We have: exp(qk^T/sqrt{d}) = exp(|q|^2/2sqrt{d}) * exp(|k|^2/2sqrt{d}) * - # exp(-(|q*c-k*c|^2)/2), where c = 1.0 / sqrt{sqrt{d}}. - data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1]))) - else: - data_normalizer = 1.0 - ratio = 1.0 / jnp.sqrt(projection_matrix.shape[0]) - data_mod_shape = data.shape[0 : len(batch_dims_t)] + projection_matrix.shape - data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix - - data_dash = lax.dot_general( - data_normalizer * data, - data_thick_random_matrix, - (((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)), (batch_dims_t, batch_dims_t)), - precision=precision, - ) - data_dash_cos = ratio * jnp.cos(data_dash) - data_dash_sin = ratio * jnp.sin(data_dash) - data_dash = jnp.concatenate((data_dash_cos, data_dash_sin), axis=-1) - - # Constructing D_data and data^{'} - diag_data = jnp.square(data) - diag_data = jnp.sum(diag_data, axis=data.ndim - 1) - diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer - diag_data = jnp.expand_dims(diag_data, axis=data.ndim - 1) - # Additional renormalization for numerical stability - data_renormalizer = jnp.max(diag_data, attention_dims_t, keepdims=True) - diag_data -= data_renormalizer - diag_data = jnp.exp(diag_data) - data_prime = data_dash * diag_data - return data_prime - - -def generalized_kernel_feature_creator( - data, projection_matrix, batch_dims_t, precision, kernel_fn, kernel_epsilon, normalize_data -): - """ - Constructs kernel features for fast generalized attention - - Args: - data: input for which features are computes - projection_matrix: matrix used to compute features - batch_dims_t: tuple of batch dimensions - precision: precision parameter - kernel_fn: kernel function used - kernel_epsilon: additive positive term added to every feature for numerical - stability - normalize_data: predicate indicating whether data should be normalized - - Returns: - Random features for fast generalized attention. - """ - if normalize_data: - data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1]))) - else: - data_normalizer = 1.0 - if projection_matrix is None: - return kernel_fn(data_normalizer * data) + kernel_epsilon - else: - data_mod_shape = data.shape[0 : len(batch_dims_t)] + projection_matrix.shape - data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix - data_dash = lax.dot_general( - data_normalizer * data, - data_thick_random_matrix, - (((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)), (batch_dims_t, batch_dims_t)), - precision=precision, - ) - data_prime = kernel_fn(data_dash) + kernel_epsilon - return data_prime - - -def make_fast_softmax_attention( - qkv_dim, - renormalize_attention=True, - numerical_stabilizer=0.000001, - nb_features=256, - ortho_features=True, - ortho_scaling=0.0, - redraw_features=True, - unidirectional=False, - nonnegative_features=True, - lax_scan_unroll=1, -): - """Construct a fast softmax attention method.""" - logging.info( - "Fast softmax attention: %s features and orthogonal=%s, renormalize=%s", - nb_features, - ortho_features, - renormalize_attention, - ) - if ortho_features: - matrix_creator = functools.partial(GaussianOrthogonalRandomMatrix, nb_features, qkv_dim, scaling=ortho_scaling) - else: - matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix, nb_features, qkv_dim) - if nonnegative_features: - - def kernel_feature_creator( - data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=True - ): - return nonnegative_softmax_kernel_feature_creator( - data, - projection_matrix, - attention_dims_t, - batch_dims_t, - precision, - is_query, - normalize_data, - numerical_stabilizer, - ) - - else: - - def kernel_feature_creator( - data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=True - ): - del is_query - return sincos_softmax_kernel_feature_creator( - data, projection_matrix, attention_dims_t, batch_dims_t, precision, normalize_data - ) - - attention_fn = FastAttentionviaLowRankDecomposition( - matrix_creator, - kernel_feature_creator, - renormalize_attention=renormalize_attention, - numerical_stabilizer=numerical_stabilizer, - redraw_features=redraw_features, - unidirectional=unidirectional, - lax_scan_unroll=lax_scan_unroll, - ).dot_product_attention - return attention_fn - - -def make_fast_generalized_attention( - qkv_dim, - renormalize_attention=True, - numerical_stabilizer=0.0, - nb_features=256, - features_type="deterministic", - kernel_fn=jax.nn.relu, - kernel_epsilon=0.001, - redraw_features=False, - unidirectional=False, - lax_scan_unroll=1, -): - """Construct a fast generalized attention method.""" - logging.info("Fast generalized attention.: %s features and renormalize=%s", nb_features, renormalize_attention) - if features_type == "ortho": - matrix_creator = functools.partial(GaussianOrthogonalRandomMatrix, nb_features, qkv_dim, scaling=False) - elif features_type == "iid": - matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix, nb_features, qkv_dim) - elif features_type == "deterministic": - matrix_creator = None - else: - raise ValueError("Unknown feature value type") - - def kernel_feature_creator( - data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=False - ): - del attention_dims_t - del is_query - return generalized_kernel_feature_creator( - data, projection_matrix, batch_dims_t, precision, kernel_fn, kernel_epsilon, normalize_data - ) - - attention_fn = FastAttentionviaLowRankDecomposition( - matrix_creator, - kernel_feature_creator, - renormalize_attention=renormalize_attention, - numerical_stabilizer=numerical_stabilizer, - redraw_features=redraw_features, - unidirectional=unidirectional, - lax_scan_unroll=lax_scan_unroll, - ).dot_product_attention - return attention_fn - - -class RandomMatrix: - r""" - Abstract class providing a method for constructing 2D random arrays. Class is responsible for constructing 2D - random arrays. - """ - - __metaclass__ = abc.ABCMeta - - @abc.abstractmethod - def get_2d_array(self): - raise NotImplementedError("Abstract method") - - -class GaussianUnstructuredRandomMatrix(RandomMatrix): - def __init__(self, nb_rows, nb_columns, key): - self.nb_rows = nb_rows - self.nb_columns = nb_columns - self.key = key - - def get_2d_array(self): - return random.normal(self.key, (self.nb_rows, self.nb_columns)) - - -class GaussianOrthogonalRandomMatrix(RandomMatrix): - r""" - Class providing a method to create Gaussian orthogonal matrix. Class is responsible for constructing 2D Gaussian - orthogonal arrays. - """ - - def __init__(self, nb_rows, nb_columns, key, scaling=0): - self.nb_rows = nb_rows - self.nb_columns = nb_columns - self.key = key - self.scaling = scaling - - def get_2d_array(self): - nb_full_blocks = int(self.nb_rows / self.nb_columns) - block_list = [] - rng = self.key - for _ in range(nb_full_blocks): - rng, rng_input = jax.random.split(rng) - unstructured_block = random.normal(rng_input, (self.nb_columns, self.nb_columns)) - q, _ = jnp.linalg.qr(unstructured_block) - q = jnp.transpose(q) - block_list.append(q) - remaining_rows = self.nb_rows - nb_full_blocks * self.nb_columns - if remaining_rows > 0: - rng, rng_input = jax.random.split(rng) - unstructured_block = random.normal(rng_input, (self.nb_columns, self.nb_columns)) - q, _ = jnp.linalg.qr(unstructured_block) - q = jnp.transpose(q) - block_list.append(q[0:remaining_rows]) - final_matrix = jnp.vstack(block_list) - - if self.scaling == 0: - multiplier = jnp.linalg.norm(random.normal(self.key, (self.nb_rows, self.nb_columns)), axis=1) - elif self.scaling == 1: - multiplier = jnp.sqrt(float(self.nb_columns)) * jnp.ones((self.nb_rows)) - else: - raise ValueError("Scaling must be one of {0, 1}. Was %s" % self._scaling) - - return jnp.matmul(jnp.diag(multiplier), final_matrix) - - -class FastAttention: - r""" - Abstract class providing a method for fast attention. Class is responsible for providing a method - for fast approximate attention. - """ - - __metaclass__ = abc.ABCMeta - - @abc.abstractmethod - def dot_product_attention( - self, - query, - key, - value, - dtype=jnp.float32, - bias=None, - axis=None, - broadcast_dropout=True, - dropout_rng=None, - dropout_rate=0.0, - deterministic=False, - precision=None, - ): - """ - Computes dot-product attention given query, key, and value. This is the core function for applying fast - approximate dot-product attention. It calculates the attention weights given query and key and combines the - values using the attention weights. This function supports multi-dimensional inputs - - Args: - query: queries for calculating attention with shape of [batch_size, dim1, - dim2, ..., dimN, num_heads, mem_channels]. - key: keys for calculating attention with shape of [batch_size, dim1, dim2, - ..., dimN, num_heads, mem_channels]. - value: values to be used in attention with shape of [batch_size, dim1, - dim2,..., dimN, num_heads, value_channels]. - dtype: the dtype of the computation (default: float32) - bias: bias for the attention weights. This can be used for incorporating - autoregressive mask, padding mask, proximity bias. - axis: axises over which the attention is applied. - broadcast_dropout: bool: use a broadcasted dropout along batch dims. - dropout_rng: JAX PRNGKey: to be used for dropout. - dropout_rate: dropout rate. - deterministic: bool, deterministic or not (to apply dropout). - precision: numerical precision of the computation see `jax.lax.Precision` - for details - - Returns: - Output of shape [bs, dim1, dim2, ..., dimN,, num_heads, value_channels]. - """ - raise NotImplementedError("Abstract method") - - -def _numerator(z_slice_shape, precision, unroll=1): - def fwd(qs, ks, vs): - def body(p, qkv): - (q, k, v) = qkv - p += jnp.einsum("...m,...d->...md", k, v, precision=precision) - X_slice = jnp.einsum("...m,...md->...d", q, p, precision=precision) - return p, X_slice - - init_value = jnp.zeros(z_slice_shape) - p, W = lax.scan(body, init_value, (qs, ks, vs), unroll=unroll) - return W, (p, qs, ks, vs) - - def bwd(pqkv, W_ct): - def body(carry, qkv_xct): - p, p_ct = carry - q, k, v, x_ct = qkv_xct - q_ct = jnp.einsum("...d,...md->...m", x_ct, p, precision=precision) - p_ct += jnp.einsum("...d,...m->...md", x_ct, q, precision=precision) - k_ct = jnp.einsum("...md,...d->...m", p_ct, v, precision=precision) - v_ct = jnp.einsum("...md,...m->...d", p_ct, k, precision=precision) - p -= jnp.einsum("...m,...d->...md", k, v, precision=precision) - return (p, p_ct), (q_ct, k_ct, v_ct) - - p, qs, ks, vs = pqkv - _, (qs_ct, ks_ct, vs_ct) = lax.scan( - body, (p, jnp.zeros_like(p)), (qs, ks, vs, W_ct), reverse=True, unroll=unroll - ) - return qs_ct, ks_ct, vs_ct - - @jax.custom_vjp - def _numerator_impl(qs, ks, vs): - W, _ = fwd(qs, ks, vs) - return W - - _numerator_impl.defvjp(fwd, bwd) - - return _numerator_impl - - -def _denominator(t_slice_shape, precision, unroll=1): - def fwd(qs, ks): - def body(p, qk): - q, k = qk - p += k - x = jnp.einsum("...m,...m->...", q, p, precision=precision) - return p, x - - p = jnp.zeros(t_slice_shape) - p, R = lax.scan(body, p, (qs, ks), unroll=unroll) - return R, (qs, ks, p) - - def bwd(qkp, R_ct): - def body(carry, qkx): - p, p_ct = carry - q, k, x_ct = qkx - q_ct = jnp.einsum("...,...m->...m", x_ct, p, precision=precision) - p_ct += jnp.einsum("...,...m->...m", x_ct, q, precision=precision) - k_ct = p_ct - p -= k - return (p, p_ct), (q_ct, k_ct) - - qs, ks, p = qkp - _, (qs_ct, ks_ct) = lax.scan(body, (p, jnp.zeros_like(p)), (qs, ks, R_ct), reverse=True, unroll=unroll) - return (qs_ct, ks_ct) - - @jax.custom_vjp - def _denominator_impl(qs, ks): - R, _ = fwd(qs, ks) - return R - - _denominator_impl.defvjp(fwd, bwd) - - return _denominator_impl - - -class FastAttentionviaLowRankDecomposition(FastAttention): - r""" - Class providing a method for fast attention via low rank decomposition. Class is responsible for providing a method - for fast dot-product attention with the use of low rank decomposition (e.g. with random - feature maps). - """ - - def __init__( - self, - matrix_creator, - kernel_feature_creator, - renormalize_attention, - numerical_stabilizer, - redraw_features, - unidirectional, - lax_scan_unroll=1, - ): # For optimal GPU performance, set to 16. - rng = random.PRNGKey(0) - self.matrix_creator = matrix_creator - self.projection_matrix = self.draw_weights(rng) - self.kernel_feature_creator = kernel_feature_creator - self.renormalize_attention = renormalize_attention - self.numerical_stabilizer = numerical_stabilizer - self.redraw_features = redraw_features - self.unidirectional = unidirectional - self.lax_scan_unroll = lax_scan_unroll - - def draw_weights(self, key): - if self.matrix_creator is None: - return None - matrixrng, _ = random.split(key) - projection_matrix = self.matrix_creator(key=matrixrng).get_2d_array() - return projection_matrix - - def dot_product_attention( - self, - query, - key, - value, - dtype=jnp.float32, - bias=None, - axis=None, - broadcast_dropout=True, - dropout_rng=None, - dropout_rate=0.0, - deterministic=False, - precision=None, - ): - assert key.shape[:-1] == value.shape[:-1] - assert query.shape[0:1] == key.shape[0:1] and query.shape[-1] == key.shape[-1] - if axis is None: - axis = tuple(range(1, key.ndim - 2)) - if not isinstance(axis, Iterable): - axis = (axis,) - assert key.ndim == query.ndim - assert key.ndim == value.ndim - for ax in axis: - if not (query.ndim >= 3 and 1 <= ax < query.ndim - 2): - raise ValueError("Attention axis must be between the batch axis and the last-two axes.") - n = key.ndim - - # Constructing projection tensor. - if self.redraw_features: - # TODO(kchoro): Get rid of the constant below. - query_seed = lax.convert_element_type(jnp.ceil(jnp.sum(query) * 10000000.0), jnp.int32) - rng = random.PRNGKey(query_seed) - self.projection_matrix = self.draw_weights(rng) - - # batch_dims is , num_heads> - batch_dims = tuple(onp.delete(range(n), axis + (n - 1,))) - # q & k -> (bs, , num_heads, , channels) - qk_perm = batch_dims + axis + (n - 1,) - k_extra_perm = axis + batch_dims + (n - 1,) - key_extra = key.transpose(k_extra_perm) - key = key.transpose(qk_perm) - query = query.transpose(qk_perm) - # v -> (bs, , num_heads, , channels) - v_perm = batch_dims + axis + (n - 1,) - value = value.transpose(v_perm) - batch_dims_t = tuple(range(len(batch_dims))) - attention_dims_t = tuple(range(len(batch_dims), len(batch_dims) + len(axis))) - - # Constructing tensors Q^{'} and K^{'}. - query_prime = self.kernel_feature_creator( - query, self.projection_matrix, attention_dims_t, batch_dims_t, precision, True - ) - key_prime = self.kernel_feature_creator( - key, self.projection_matrix, attention_dims_t, batch_dims_t, precision, False - ) - - if self.unidirectional: - index = attention_dims_t[0] - z_slice_shape = key_prime.shape[0 : len(batch_dims_t)] + (key_prime.shape[-1],) + (value.shape[-1],) - - numerator_fn = _numerator(z_slice_shape, precision, self.lax_scan_unroll) - W = numerator_fn( - jnp.moveaxis(query_prime, index, 0), jnp.moveaxis(key_prime, index, 0), jnp.moveaxis(value, index, 0) - ) - - # Constructing W = (Q^{'}(K^{'})^{T})_{masked}V - W = jnp.moveaxis(W, 0, index) - - if not self.renormalize_attention: - # Unidirectional, not-normalized attention. - perm_inv = _invert_perm(qk_perm) - result = W.transpose(perm_inv) - return result - else: - # Unidirectional, normalized attention. - thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(key_extra.shape[0 : len(axis)]) - - index = attention_dims_t[0] - t_slice_shape = key_prime.shape[0 : len(batch_dims_t)] + (key_prime.shape[-1],) - denominator_fn = _denominator(t_slice_shape, precision, self.lax_scan_unroll) - R = denominator_fn(jnp.moveaxis(query_prime, index, 0), jnp.moveaxis(key_prime, index, 0)) - - R = jnp.moveaxis(R, 0, index) - else: - contract_query = tuple(range(len(batch_dims) + len(axis), len(batch_dims) + len(axis) + 1)) - contract_z = tuple(range(len(batch_dims), len(batch_dims) + 1)) - # Constructing Z = (K^{'})^{T}V - # Z (bs, , num_heads, channels_m, channels_v) - Z = lax.dot_general( - key_prime, - value, - ((attention_dims_t, attention_dims_t), (batch_dims_t, batch_dims_t)), - precision=precision, - ) - # Constructing W = Q^{'}Z = Q^{'}(K^{'})^{T}V - # q (bs, , num_heads, , channels_m) - # Z (bs, , num_heads, channels_m, channels_v) - # W (bs, , num_heads, , channels_v) - W = lax.dot_general( - query_prime, Z, ((contract_query, contract_z), (batch_dims_t, batch_dims_t)), precision=precision - ) - if not self.renormalize_attention: - # Bidirectional, not-normalized attention. - perm_inv = _invert_perm(qk_perm) - result = W.transpose(perm_inv) - return result - else: - # Bidirectional, normalized attention. - thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(key_extra.shape[0 : len(axis)]) - contract_key = tuple(range(len(batch_dims), len(batch_dims) + len(axis))) - contract_thick_all_ones = tuple(range(thick_all_ones.ndim - len(axis), thick_all_ones.ndim)) - # Construct T = (K^{'})^{T} 1_L - # k (bs, , num_heads, , channels) - T = lax.dot_general( - key_prime, - thick_all_ones, - ((contract_key, contract_thick_all_ones), (batch_dims_t, batch_dims_t)), - precision=precision, - ) - - # Construct partition function: R = Q^{'} T = Q^{'}(K^{'})^{T} 1_L - # q_p (bs, , num_heads, , channs_m) - # T (bs, , num_heads, channels_m) - R = lax.dot_general( - query_prime, - T, - (((query_prime.ndim - 1,), (T.ndim - 1,)), (batch_dims_t, range(0, len(T.shape) - 1))), - precision=precision, - ) - - R = R + 2 * self.numerical_stabilizer * (jnp.abs(R) <= self.numerical_stabilizer) - R = jnp.reciprocal(R) - R = jnp.expand_dims(R, len(R.shape)) - # W (bs, , num_heads, , channels_v) - # R (bs, , num_heads, , extra_channel) - result = W * R - # back to (bs, dim1, dim2, ..., dimN, num_heads, channels) - perm_inv = _invert_perm(qk_perm) - result = result.transpose(perm_inv) - return result - - -def _invert_perm(perm): - perm_inv = [0] * len(perm) - for i, j in enumerate(perm): - perm_inv[j] = i - return tuple(perm_inv) diff --git a/examples/research_projects/performer/run_mlm_performer.py b/examples/research_projects/performer/run_mlm_performer.py deleted file mode 100644 index 0332fe1575f..00000000000 --- a/examples/research_projects/performer/run_mlm_performer.py +++ /dev/null @@ -1,693 +0,0 @@ -# coding=utf-8 -# Copyright 2020 The HuggingFace Team All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) with whole word masking on a -text file or a dataset. - -Here is the full list of checkpoints on the hub that can be fine-tuned by this script: -https://huggingface.co/models?filter=fill-mask -""" - -import logging -import os -import sys -from dataclasses import dataclass, field - -# You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments. -from pathlib import Path -from typing import Dict, List, Optional, Tuple - -import jax -import jax.numpy as jnp -import numpy as np -from datasets import load_dataset -from flax import jax_utils -from flax.optim import Adam -from flax.training import common_utils -from flax.training.common_utils import get_metrics -from jax.nn import log_softmax -from modeling_flax_performer import FlaxPerformerForMaskedLM -from tqdm import tqdm - -from transformers import ( - MODEL_FOR_MASKED_LM_MAPPING, - AutoTokenizer, - BertConfig, - FlaxBertForMaskedLM, - HfArgumentParser, - PreTrainedTokenizerBase, - TensorType, - TrainingArguments, - is_tensorboard_available, - set_seed, -) - - -# Cache the result -has_tensorboard = is_tensorboard_available() -if has_tensorboard: - try: - from flax.metrics.tensorboard import SummaryWriter - except ImportError as ie: - has_tensorboard = False - print(f"Unable to display metrics through TensorBoard because some package are not installed: {ie}") - -else: - print( - "Unable to display metrics through TensorBoard because the package is not installed: " - "Please run pip install tensorboard to enable." - ) - -MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) -MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) - - -@dataclass -class WandbArguments: - """ - Arguments for logging - """ - - wandb_user_name: Optional[str] = field( - default=None, - metadata={"help": "The WandB user name for potential logging. If left None, no logging"}, - ) - wandb_project_name: Optional[str] = field( - default="performer-experiments", - metadata={"help": "The WandB project name for potential logging"}, - ) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. - """ - - model_name_or_path: Optional[str] = field( - default=None, - metadata={ - "help": ( - "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." - ) - }, - ) - performer: bool = field( - default=False, - metadata={"help": "Whether to use FAVOR+ attention"}, - ) - reinitialize: bool = field( - default=False, - metadata={"help": "Whether to use a blank model without pretraining"}, - ) - tokenizer_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} - ) - use_fast_tokenizer: bool = field( - default=True, - metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, - ) - cache_dir: Optional[str] = field( - default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - """ - - dataset_name: Optional[str] = field( - default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} - ) - dataset_config_name: Optional[str] = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) - validation_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, - ) - train_ref_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input train ref data file for whole word masking in Chinese."}, - ) - validation_ref_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."}, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} - ) - validation_split_percentage: Optional[int] = field( - default=5, - metadata={ - "help": "The percentage of the train set used as validation set in case there's no validation split" - }, - ) - max_seq_length: Optional[int] = field( - default=None, - metadata={ - "help": ( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated. Default to the max input length of the model." - ) - }, - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - mlm_probability: float = field( - default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} - ) - pad_to_max_length: bool = field( - default=False, - metadata={ - "help": ( - "Whether to pad all samples to `max_seq_length`. " - "If False, will pad the samples dynamically when batching to the maximum length in the batch." - ) - }, - ) - - def __post_init__(self): - if self.dataset_name is None and self.train_file is None and self.validation_file is None: - raise ValueError("Need either a dataset name or a training/validation file.") - else: - if self.train_file is not None: - extension = self.train_file.split(".")[-1] - assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." - if self.validation_file is not None: - extension = self.validation_file.split(".")[-1] - assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." - - -# Adapted from transformers/data/data_collator.py -# Letting here for now, let's discuss where it should live -@dataclass -class FlaxDataCollatorForLanguageModeling: - """ - Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they - are not all of the same length. - - Args: - tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`): - The tokenizer used for encoding the data. - mlm (:obj:`bool`, `optional`, defaults to :obj:`True`): - Whether or not to use masked language modeling. If set to :obj:`False`, the labels are the same as the - inputs with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for - non-masked tokens and the value to predict for the masked token. - mlm_probability (:obj:`float`, `optional`, defaults to 0.15): - The probability with which to (randomly) mask tokens in the input, when :obj:`mlm` is set to :obj:`True`. - - .. note:: - - For best performance, this data collator should be used with a dataset having items that are dictionaries or - BatchEncoding, with the :obj:`"special_tokens_mask"` key, as returned by a - :class:`~transformers.PreTrainedTokenizer` or a :class:`~transformers.PreTrainedTokenizerFast` with the - argument :obj:`return_special_tokens_mask=True`. - """ - - tokenizer: PreTrainedTokenizerBase - mlm: bool = True - mlm_probability: float = 0.15 - - def __post_init__(self): - if self.mlm and self.tokenizer.mask_token is None: - raise ValueError( - "This tokenizer does not have a mask token which is necessary for masked language modeling. " - "You should pass `mlm=False` to train on causal language modeling instead." - ) - - def __call__(self, examples: List[Dict[str, np.ndarray]], pad_to_multiple_of: int) -> Dict[str, np.ndarray]: - # Handle dict or lists with proper padding and conversion to tensor. - batch = self.tokenizer.pad(examples, pad_to_multiple_of=pad_to_multiple_of, return_tensors=TensorType.NUMPY) - - # If special token mask has been preprocessed, pop it from the dict. - special_tokens_mask = batch.pop("special_tokens_mask", None) - if self.mlm: - batch["input_ids"], batch["labels"] = self.mask_tokens( - batch["input_ids"], special_tokens_mask=special_tokens_mask - ) - else: - labels = batch["input_ids"].copy() - if self.tokenizer.pad_token_id is not None: - labels[labels == self.tokenizer.pad_token_id] = -100 - batch["labels"] = labels - return batch - - def mask_tokens( - self, inputs: np.ndarray, special_tokens_mask: Optional[np.ndarray] - ) -> Tuple[jnp.ndarray, jnp.ndarray]: - """ - Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. - """ - labels = inputs.copy() - # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) - probability_matrix = np.full(labels.shape, self.mlm_probability) - special_tokens_mask = special_tokens_mask.astype("bool") - - probability_matrix[special_tokens_mask] = 0.0 - masked_indices = np.random.binomial(1, probability_matrix).astype("bool") - labels[~masked_indices] = -100 # We only compute loss on masked tokens - - # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) - indices_replaced = np.random.binomial(1, np.full(labels.shape, 0.8)).astype("bool") & masked_indices - inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token) - - # 10% of the time, we replace masked input tokens with random word - indices_random = np.random.binomial(1, np.full(labels.shape, 0.5)).astype("bool") - indices_random &= masked_indices & ~indices_replaced - - random_words = np.random.randint(self.tokenizer.vocab_size, size=labels.shape, dtype="i4") - inputs[indices_random] = random_words[indices_random] - - # The rest of the time (10% of the time) we keep the masked input tokens unchanged - return inputs, labels - - -def create_learning_rate_scheduler( - factors="constant * linear_warmup * rsqrt_decay", - base_learning_rate=0.5, - warmup_steps=1000, - decay_factor=0.5, - steps_per_decay=20000, - steps_per_cycle=100000, -): - """Creates learning rate schedule. - Interprets factors in the factors string which can consist of: - * constant: interpreted as the constant value, - * linear_warmup: interpreted as linear warmup until warmup_steps, - * rsqrt_decay: divide by square root of max(step, warmup_steps) - * rsqrt_normalized_decay: divide by square root of max(step/warmup_steps, 1) - * decay_every: Every k steps decay the learning rate by decay_factor. - * cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter. - Args: - factors: string, factors separated by "*" that defines the schedule. - base_learning_rate: float, the starting constant for the lr schedule. - warmup_steps: int, how many steps to warm up for in the warmup schedule. - decay_factor: float, the amount to decay the learning rate by. - steps_per_decay: int, how often to decay the learning rate. - steps_per_cycle: int, steps per cycle when using cosine decay. - Returns: - a function learning_rate(step): float -> {"learning_rate": float}, the - step-dependent lr. - """ - factors = [n.strip() for n in factors.split("*")] - - def step_fn(step): - """Step to learning rate function.""" - ret = 1.0 - for name in factors: - if name == "constant": - ret *= base_learning_rate - elif name == "linear_warmup": - ret *= jnp.minimum(1.0, step / warmup_steps) - elif name == "rsqrt_decay": - ret /= jnp.sqrt(jnp.maximum(step, warmup_steps)) - elif name == "rsqrt_normalized_decay": - ret *= jnp.sqrt(warmup_steps) - ret /= jnp.sqrt(jnp.maximum(step, warmup_steps)) - elif name == "decay_every": - ret *= decay_factor ** (step // steps_per_decay) - elif name == "cosine_decay": - progress = jnp.maximum(0.0, (step - warmup_steps) / float(steps_per_cycle)) - ret *= jnp.maximum(0.0, 0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0)))) - else: - raise ValueError("Unknown factor %s." % name) - return jnp.asarray(ret, dtype=jnp.float32) - - return step_fn - - -def compute_metrics(logits, labels, weights, label_smoothing=0.0): - """Compute summary metrics.""" - loss, normalizer = cross_entropy(logits, labels, weights, label_smoothing) - acc, _ = accuracy(logits, labels, weights) - metrics = {"loss": loss, "accuracy": acc, "normalizer": normalizer} - metrics = jax.lax.psum(metrics, axis_name="batch") - return metrics - - -def accuracy(logits, targets, weights=None): - """Compute weighted accuracy for log probs and targets. - Args: - logits: [batch, length, num_classes] float array. - targets: categorical targets [batch, length] int array. - weights: None or array of shape [batch, length] - Returns: - Tuple of scalar loss and batch normalizing factor. - """ - if logits.ndim != targets.ndim + 1: - raise ValueError( - "Incorrect shapes. Got shape %s logits and %s targets" % (str(logits.shape), str(targets.shape)) - ) - - loss = jnp.equal(jnp.argmax(logits, axis=-1), targets) - loss *= weights - - return loss.sum(), weights.sum() - - -def cross_entropy(logits, targets, weights=None, label_smoothing=0.0): - """Compute cross entropy and entropy for log probs and targets. - Args: - logits: [batch, length, num_classes] float array. - targets: categorical targets [batch, length] int array. - weights: None or array of shape [batch, length] - label_smoothing: label smoothing constant, used to determine the on and off values. - Returns: - Tuple of scalar loss and batch normalizing factor. - """ - if logits.ndim != targets.ndim + 1: - raise ValueError( - "Incorrect shapes. Got shape %s logits and %s targets" % (str(logits.shape), str(targets.shape)) - ) - - vocab_size = logits.shape[-1] - confidence = 1.0 - label_smoothing - low_confidence = (1.0 - confidence) / (vocab_size - 1) - normalizing_constant = -( - confidence * jnp.log(confidence) + (vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20) - ) - soft_targets = common_utils.onehot(targets, vocab_size, on_value=confidence, off_value=low_confidence) - - loss = -jnp.sum(soft_targets * log_softmax(logits), axis=-1) - loss = loss - normalizing_constant - - if weights is not None: - loss = loss * weights - normalizing_factor = weights.sum() - else: - normalizing_factor = np.prod(targets.shape) - - return loss.sum(), normalizing_factor - - -def training_step(optimizer, batch, dropout_rng): - dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) - - def loss_fn(params): - targets = batch.pop("labels") - - # Hide away tokens which doesn't participate in the optimization - token_mask = jnp.where(targets > 0, 1.0, 0.0) - - logits = model(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] - loss, weight_sum = cross_entropy(logits, targets, token_mask) - return loss / weight_sum - - step = optimizer.state.step - lr = lr_scheduler_fn(step) - grad_fn = jax.value_and_grad(loss_fn) - loss, grad = grad_fn(optimizer.target) - grad = jax.lax.pmean(grad, "batch") - optimizer = optimizer.apply_gradient(grad, learning_rate=lr) - - return loss, optimizer, new_dropout_rng - - -def eval_step(params, batch): - """ - Calculate evaluation metrics on a batch. - """ - targets = batch.pop("labels") - - # Hide away tokens which doesn't participate in the optimization - token_mask = jnp.where(targets > 0, 1.0, 0.0) - logits = model(**batch, params=params, train=False)[0] - - return compute_metrics(logits, targets, token_mask) - - -def generate_batch_splits(samples_idx: np.ndarray, batch_size: int) -> np.ndarray: - nb_samples = len(samples_idx) - samples_to_remove = nb_samples % batch_size - - if samples_to_remove != 0: - samples_idx = samples_idx[:-samples_to_remove] - sections_split = nb_samples // batch_size - batch_idx = np.split(samples_idx, sections_split) - return batch_idx - - -if __name__ == "__main__": - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, WandbArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args, wandb_args = parser.parse_json_file( - json_file=os.path.abspath(sys.argv[1]) - ) - else: - model_args, data_args, training_args, wandb_args = parser.parse_args_into_dataclasses() - - if ( - os.path.exists(training_args.output_dir) - and os.listdir(training_args.output_dir) - and training_args.do_train - and not training_args.overwrite_output_dir - ): - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - level="NOTSET", - datefmt="[%X]", - ) - - # Log on each process the small summary: - logger = logging.getLogger(__name__) - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" - + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - - # Set the verbosity to info of the Transformers logger (on main process only): - logger.info("Training/evaluation parameters %s", training_args) - - # Set seed before initializing model. - set_seed(training_args.seed) - - # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) - # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ - # (the dataset will be downloaded automatically from the datasets Hub). - # - # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called - # 'text' is found. You can easily tweak this behavior (see below). - # - # In distributed training, the load_dataset function guarantees that only one local process can concurrently - # download the dataset. - if data_args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) - if "validation" not in datasets.keys(): - datasets["validation"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"train[:{data_args.validation_split_percentage}%]", - ) - datasets["train"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"train[{data_args.validation_split_percentage}%:]", - ) - else: - data_files = {} - if data_args.train_file is not None: - data_files["train"] = data_args.train_file - extension = data_args.train_file.split(".")[-1] - if data_args.validation_file is not None: - data_files["validation"] = data_args.validation_file - extension = data_args.validation_file.split(".")[-1] - if extension == "txt": - extension = "text" - datasets = load_dataset(extension, data_files=data_files) - # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets. - - # Load pretrained model and tokenizer - - # Distributed training: - # The .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - - rng = jax.random.PRNGKey(training_args.seed) - dropout_rngs = jax.random.split(rng, jax.local_device_count()) - - config = BertConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir) - lm_class = FlaxPerformerForMaskedLM if model_args.performer else FlaxBertForMaskedLM - if model_args.reinitialize: - model = lm_class(config=BertConfig.from_pretrained(model_args.model_name_or_path)) - else: - model = lm_class.from_pretrained( - model_args.model_name_or_path, - dtype=jnp.float32, - input_shape=(training_args.train_batch_size, config.max_position_embeddings), - seed=training_args.seed, - dropout_rate=0.1, - ) - - if model_args.tokenizer_name: - tokenizer = AutoTokenizer.from_pretrained( - model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer - ) - elif model_args.model_name_or_path: - tokenizer = AutoTokenizer.from_pretrained( - model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer - ) - else: - raise ValueError( - "You are instantiating a new tokenizer from scratch. This is not supported by this script. " - "You can do it from another script, save it, and load it from here, using --tokenizer_name." - ) - - # Preprocessing the datasets. - # First we tokenize all the texts. - if training_args.do_train: - column_names = datasets["train"].column_names - else: - column_names = datasets["validation"].column_names - text_column_name = "text" if "text" in column_names else column_names[0] - - padding = "max_length" if data_args.pad_to_max_length else False - - def tokenize_function(examples): - # Remove empty lines - examples = [line for line in examples if len(line) > 0 and not line.isspace()] - return tokenizer( - examples, - return_special_tokens_mask=True, - padding=padding, - truncation=True, - max_length=data_args.max_seq_length, - ) - - tokenized_datasets = datasets.map( - tokenize_function, - input_columns=[text_column_name], - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - ) - - # Enable tensorboard only on the master node - if has_tensorboard and jax.host_id() == 0: - summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir).joinpath("logs").as_posix()) - - # Data collator - # This one will take care of randomly masking the tokens. - data_collator = FlaxDataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability) - - # Setup optimizer - optimizer = Adam( - learning_rate=training_args.learning_rate, - weight_decay=training_args.weight_decay, - beta1=training_args.adam_beta1, - beta2=training_args.adam_beta2, - ).create(model.params) - - # Create learning rate scheduler - lr_scheduler_fn = create_learning_rate_scheduler( - base_learning_rate=training_args.learning_rate, warmup_steps=max(training_args.warmup_steps, 1) - ) - - # Create parallel version of the training and evaluation steps - p_training_step = jax.pmap(training_step, "batch", donate_argnums=(0,)) - p_eval_step = jax.pmap(eval_step, "batch", donate_argnums=(0,)) - - # Replicate the optimizer on each device - optimizer = jax_utils.replicate(optimizer) - - # Store some constant - nb_epochs = int(training_args.num_train_epochs) - batch_size = int(training_args.train_batch_size) - eval_batch_size = int(training_args.eval_batch_size) - - if wandb_args.wandb_user_name is not None: - import wandb - - wandb.init(project=wandb_args.wandb_project_name, entity=wandb_args.wandb_user_name) - - epochs = tqdm(range(nb_epochs), desc=f"Epoch ... (1/{nb_epochs})", position=0) - for epoch in epochs: - # ======================== Training ================================ - # Create sampling rng - rng, training_rng, eval_rng = jax.random.split(rng, 3) - - # Generate an epoch by shuffling sampling indices from the train dataset - nb_training_samples = len(tokenized_datasets["train"]) - # Avoid using jax.numpy here in case of TPU training - training_samples_idx = np.random.permutation(np.arange(nb_training_samples)) - training_batch_idx = generate_batch_splits(training_samples_idx, batch_size) - - # Gather the indexes for creating the batch and do a training step - for batch_idx in tqdm(training_batch_idx, desc="Training...", position=1): - samples = [tokenized_datasets["train"][int(idx)] for idx in batch_idx] - model_inputs = data_collator(samples, pad_to_multiple_of=16) - - # Model forward - model_inputs = common_utils.shard(model_inputs.data) - loss, optimizer, dropout_rngs = p_training_step(optimizer, model_inputs, dropout_rngs) - - if wandb_args.wandb_user_name is not None: - wandb.log({"Training loss": np.array(loss).mean()}) - - epochs.write(f"Loss: {loss}") - - # ======================== Evaluating ============================== - nb_eval_samples = len(tokenized_datasets["validation"]) - # Avoid using jax.numpy here in case of TPU training - eval_samples_idx = np.arange(nb_eval_samples) - eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size) - - eval_metrics = [] - for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)): - samples = [tokenized_datasets["validation"][int(idx)] for idx in batch_idx] - model_inputs = data_collator(samples, pad_to_multiple_of=16) - - # Model forward - model_inputs = common_utils.shard(model_inputs.data) - metrics = p_eval_step(optimizer.target, model_inputs) - eval_metrics.append(metrics) - - eval_metrics_np = get_metrics(eval_metrics) - eval_metrics_np = jax.tree_util.tree_map(jnp.sum, eval_metrics_np) - eval_normalizer = eval_metrics_np.pop("normalizer") - eval_summary = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics_np) - - # Update progress bar - epochs.desc = ( - f"Epoch... ({epoch + 1}/{nb_epochs} | Loss: {eval_summary['loss']}, Acc: {eval_summary['accuracy']})" - ) - - if wandb_args.wandb_user_name is not None: - wandb.log({"Eval loss": np.array(eval_summary["loss"]).mean()}) - - # Save metrics - if has_tensorboard and jax.host_id() == 0: - for name, value in eval_summary.items(): - summary_writer.scalar(name, value, epoch) diff --git a/examples/research_projects/performer/sanity_script.sh b/examples/research_projects/performer/sanity_script.sh deleted file mode 100755 index b96cd7e643e..00000000000 --- a/examples/research_projects/performer/sanity_script.sh +++ /dev/null @@ -1 +0,0 @@ -TOKENIZERS_PARALLELISM=true python run_mlm_performer.py --output_dir experiments --dataset_name wikipedia --dataset_config_name 20200501.simple --model_name_or_path bert-base-cased --tokenizer_name bert-base-cased --do_train --overwrite_output_dir --per_device_train_batch_size 4 --learning_rate 5e-4 --warmup_steps 100 --num_train_epochs 3 --performer \ No newline at end of file diff --git a/examples/research_projects/pplm/README.md b/examples/research_projects/pplm/README.md deleted file mode 100644 index f37ea8e96f2..00000000000 --- a/examples/research_projects/pplm/README.md +++ /dev/null @@ -1,56 +0,0 @@ -# Plug and Play Language Models: a Simple Approach to Controlled Text Generation - -Authors: [Sumanth Dathathri](https://dathath.github.io/), [Andrea Madotto](https://andreamad8.github.io/), Janice Lan, Jane Hung, Eric Frank, [Piero Molino](https://w4nderlu.st/), [Jason Yosinski](http://yosinski.com/), and [Rosanne Liu](http://www.rosanneliu.com/) - -This folder contains the original code used to run the Plug and Play Language Model (PPLM). - -Paper link: https://arxiv.org/abs/1912.02164 - -Blog link: https://eng.uber.com/pplm - -Please check out the repo under uber-research for more information: https://github.com/uber-research/PPLM - -# Note - -⚠️ This project should be run with pytorch-lightning==1.0.4 which has a potential security vulnerability - -## Setup - -```bash -git clone https://github.com/huggingface/transformers && cd transformers -pip install . -pip install nltk torchtext # additional requirements. -cd examples/research_projects/pplm -``` - -## PPLM-BoW - -### Example command for bag-of-words control - -```bash -python run_pplm.py -B military --cond_text "The potato" --length 50 --gamma 1.5 --num_iterations 3 --num_samples 10 --stepsize 0.03 --window_length 5 --kl_scale 0.01 --gm_scale 0.99 --colorama --sample -``` - -### Tuning hyperparameters for bag-of-words control - -1. Increase `--stepsize` to intensify topic control, and decrease its value to soften the control. `--stepsize 0` recovers the original uncontrolled GPT-2 model. - -2. If the language being generated is repetitive (For e.g. "science science experiment experiment"), there are several options to consider:
- a) Reduce the `--stepsize`
- b) Increase `--kl_scale` (the KL-loss coefficient) or decrease `--gm_scale` (the gm-scaling term)
- c) Add `--grad-length xx` where xx is an (integer <= length, e.g. `--grad-length 30`).
- - -## PPLM-Discrim - -### Example command for discriminator based sentiment control - -```bash -python run_pplm.py -D sentiment --class_label 2 --cond_text "My dog died" --length 50 --gamma 1.0 --num_iterations 10 --num_samples 10 --stepsize 0.04 --kl_scale 0.01 --gm_scale 0.95 --sample -``` - -### Tuning hyperparameters for discriminator control - -1. Increase `--stepsize` to intensify topic control, and decrease its value to soften the control. `--stepsize 0` recovers the original uncontrolled GPT-2 model. - -2. Use `--class_label 3` for negative, and `--class_label 2` for positive diff --git a/examples/research_projects/pplm/imgs/headfigure.png b/examples/research_projects/pplm/imgs/headfigure.png deleted file mode 100644 index f4c11ad54d10b300e2051ef6ba2d209447bc92e4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 668261 zcmeFZXH=70*DeeQiiiRV(xeC*q$3?^fsKNKfRuoAq>41@Jrvy{T{gX=5Rfjt7dO2t z2%!f-h!7wkkRT!CtnBxB&v?%`$8S8(-*0Sxp@U^|ueIi!*SxN4-qDYAH80b!(U6gm zU4C%?t^pYt%^n#URRHyQ@Gl`ZsVT|G$e%c?t3Ps4*Hrg#_3$!${>0AVuCw=Z*C&CG zq{zqwlI(133?7IG3_f{cV>37=EJ)+!Z}9qcoPkZ)VE2`m?X}Y1*pqP?5W*kFG}M^lJSNIlr-RNQ5RVqyz0k}}W1|ND; zfQ6MMOX_y`UpL;Jcyr5(u=b5}_hw+}HoclFw0yQ{qoj*SlOeBUGgN@1` z?cVeMi%9HZCJ>4K5|0iz&c8YrNXB&2VOM70GoFl0mF&S?HKPFX?WK#0zSE1h&zQza z?>nbVa_g`KF!Iq{Of5CIFQ~p>z{!*<`O@kyyBNu$M@5fff*wWlCvF55-1nBAkS7up zF~PgUmI+g|1)dhsQibm9LLan15O>hq9oTuKavuH^>oGR8p}q zSZYLL{{3wLOT)>m@c+>QvIq$p>J<*&#(NC^{mK8<-qUxxaQ#OMxNmEalegRTHVXW^ zr}|(2Jw?FIx&Lgh`vthe{o`h@-}?`45Vxu-S#{`tv>?2jg^F@7fnnj@e|E6cD-Cr2 z))xQo?PVzpCtH@VHD~;{1LE(cwhW{`9GEDzaY;)aE^lVA2|14Xz?F7NBvJv^iNOpuO986 zp6I_?u78Mye~5*DHQ)ab3;z%c|H_X45DWhh3;%y57P{N1{|DikAw{*@yq4gz^tleht-r@QAG@0 zJs#9lnWX)+?b_v|vy(H9zhk&^IHji-#RUscIQn9kt=85tlWc~wBa$(uTU z?nR}mxKn>P<^ij7xd~S31mPEu%TT1%escclZ-kW#kf+tpI+veBaZF+tJdn_8+Y{=$ zssw#aj?~Vxqv{fN_vW`w0~^^Moj!d`CPngAOurHfY=SP0z9w4^TRLevJkBIerGIOm zuxzW|ypY+vzw#m!=kjJfE-3GkSTlDYo&Qwjs3N9)6OSaqOq$MqPJ|U#hwiutO8(xE zJ}7Xogn>1647*TFX>muIt!d6sznl%mbd*4m8q=8~6LnG8E^?ZMD?$4nq^6Mzsn`MnukHL9 z3K4^Ggff#3p{0<6u?EGY;M=X(QU2e4#n1Qi+dS;3(bvnTy7y+tM}*<9@zd{@e0@b> z6B9n=5K#$PsUoGuGTNOw>i{l=|!nrS%_LoP!v-}-I;h` zefA?gnTrrLEe#>3kkVh_A|7jc8@-5^ofla0M}``9(h$KdnnGLQT00~C3#T}hGJfdD z*(pB%e2-a%YbE-#WJ>0(=e-P?MNOSYl?^Za?JnnvJ1+aptYwPQBZcMW?32N|HAOS~ zuh9RI0{*v7Li!bca(PUTd>4aYeX>rmf$uy<47Ca1WAW3QhvbAjKxZfc*wKc)>n zfR&G)Rg-j7a$L5r$(pHia<8ms*d;tD$qRahw2VGz{=I-Jg2i&B0~6{IgWtC}zXzL_ z-1$Pi2r|dtA*YP6`n8xJecKb}}gyni`5WEqXcsTu;wGg7J3+e197? zc}rzKnv@Sb8y~tQ^kud~k8;T(U{qqA$E)?Iyx`)42}l>x^p@|*0B5iW$MbIThBS!@ zrGp~lPs<>MzR5fJQZkb@ahKAz>(0sd!YLSjCw;g5K$lzU@k+sqE5iw1lPy|1sMzC& zHZ~=1aFqOdD=)9$p7 zvOcwHO>nOn!mBg~YY|VK_76&rxg|wK8)t3D!d{3FlQcOWG$CQZelrf~ma?MFE0J>( zTSonRQ!PB>1 z!M~li@A=GB<>`K6*u$pfAeTn-OXBU%=fQ=YYFu#7Ha2sHZ-lOZ+&tZ05`4NpPQ_CH;c0;tJ-$6@b{N!ugU-XmK>`LTVqFNsO&}16+w+n`9$kRB{$FT7>_*h)!-Ec zbcJEg#D%{r3&N9aV08d;wqL*7iY{g{v8XwzE(se~&|T~J{TC&LdXzSTm7^P91W$i( z;88!6@?fHFkEbJ@SaeIr6sdJ7;bB48*+ITGm)>I{DW4q|67HaKl8vnb*Do%7cZ#HD z={)^0(&m^McJ^}$l$iFaASTLUtpe$gL$EmtausVvJCHQe^?pL7h5G&!S5Y^vl}nc* z(s4K`cqT|mJsL`}AG){Oh8eF4u>Ya*?Txx4UZmD%pi2hih{hlfYWfU|h{*=!PP@F| z-ImQEJ=l+mE2DDAkTGH+>k)Bi)GVC&S2AvQ5%uVIvgHR1pP%YQ7|1U2rCWG?``Gq5 zY4AC7roDo;eWs~R1eHR0$F18A6dr<(X3`Mmw(dX`I+ zLBgmpvsJ*8nWAz7DnO|7o32!c5nTK(0&w7Kvghvidn$Ms84CEpy|^zw-qM9Wt@6w$ z6>cS97fRygmYd7*oWtLFGWVI_c1J5O=<-AZ{QIllGyNYq=6|TF-Zem)Z3$STtKC|= zVjx0AJ0Yl+^uqPr!9z%dMq!fe_aEbuAdp5S)C`%eLp)_OV1y%t9>_TUR$8k!BQML& z7I@bM>w$e2WC&P^xKKN?XLg)ur=`fj0cOWOR-~ zPig;d(00v0t?=f2o`e2_U)&nUqV8lz`)c1HEy~q_!V$4Hk+$t2n)e+3-Ov5jjjm>> zu7I+kja%}!Mi`Y>HqPI;MyaoG-R{`<4-$ARwklwJ15(L|PAKXSd_!lxl@N(a*{E^) zL7J3=se8fAjy9j5DMtDfh-%7d%B=7(TWoLe2vGOp}?qh0l4UnXgM<6QF9+ zo$-`lDufPL9SiCOYMFVD$+T@j=`(~PCTk>E z2mbKQ5h5Sq1DRlyG2lmXzLJz;6#|5)W%p0p?Qhrb7kH2lI3w*He|7p(X5bU+mzAc< zz2UCS!d1<-LH$yB51uH`uV9(L)*vm>_xYfOP8z6hs~y@zyD@@inbO2iK*(u|4xaZn6 zo8TQBc=%Zp^)ynGGw`>*7B5{fPvz|7DykY4I>_l>{gV9?)VY>KJ&00<-{HNE%bnW{;dT(<|u&D zmgqa0I57V!D^7Qg*lkQsXXK2B4+WgNk5pI-&F`CQ!^6K${#ezlH`G&>A$-?_Xdx`T zbxy!DXYs^hrt;4Y>rb}I<%oUlm=KiW)ak(lva#rMNBhx?`P9W*IiE&vg!EjND}n&H z3lgmRv-57sN2Pi?+M*3cK>!-3Tm2k1DF*dT^4{Kok1)d<-{nA*uP?e~BGlc?y=C38 zybIHb&sqk#HPNsF1AcMx^rjSrV*XKu6C7M{p^p7{kB8{$t~1wz_!AvUVyn1?C zfvu}C!jl~Si=R(%0KpXvK4K(9a~}Zc^4^*FBfYELRPH`R3{;szkQBzVWGL>7wS4>2 z6tiG`3XV{(>*tuqeEB3#tjOf{fNQGEa_>G4pv}$}2w`13%=s)n6iuoGyCxGbY@H## zi@D?bJ?D6tnfIT6Y$xCS;3`tmVG*+5x8vc|0o#zupKE~aKiNi*Lb76WtF!sXuyFRm zUQ-vrEyhl;(TClJ`A~oyKP$mzOr3IDb@YaPYLK}g{aU(dT3a9g#q2kY zC~XFXZ%RDu=ZMcv141^jR6eS=t8KScc#M>r34TL-=?{e~KKvfEscoGFife5B*H`Q_ zu=j6ux%geO_F}w8z<)h`%Dy0%kaWz9Ujn%-`MRag=fnCE<;tJp#M?T-t!Y@OT$)%3N-e26yI1*} zoSQ1*IpJ6kIkN;kTV126g*ES|=ip#?iouC}w` zHG-qD2s4*TY}p1niZe)n@a-1l0qNdMfiym=6xJ(oZGk@_9iW58I|kjPN-m2GIjCQW zyyVxQU-lY(A4SmnvaXdw=S8RZa_ILs=|aPN;=$3|D9$Vxt_=wXeafA`5s8bgYrdu59G_e55P#(Z7h?Pl%Yb%; zc|ALh(ev_aZ%vDlH8y%;-a!tk$fRQnIuv5|79C!DqI%&@Bd{a>pVBufOiBwKCy8RtJu` z6raMnd_5LBub5c+Lh%cx)pq&gZUY(I3DNShTWQT27D1cvF|?*@)3N(Z>u%F79Mt2l z8%MlVg#Aa4b6i&0t|A81>f-wS|B{dLNo%%_ae+ET2jk_d?ArPzSnvUh$-TI|$4Gh9 zHaN90v^lCBjzi^m>nd!$FU&$}+jiK6#p8QsBsjv>lvvS<_oELq7KQ!N)G!|v7o5Yw z023gCJHWOEa3C6*JLAiGJsEMVxqMY$4ey-jhR{VH=vid1S_$lqtj#kbKiWc~L*+W1 zUWJAX#I3Jft#9S~QUy9TS&nTl?^+=0rr_eViaziKcYCg(OzonH0`P%oq* z%akF$w=#nKkS!zos&TXyRtc*=ff800kE@9%)fz|Yp4OJ7ux>Y_2!T-1@`lkFq)FGw zbVE8TDkfP^rKJX>WNgRiY?t4>b4e$zS}33qBQ-|!l|8Bd}u zFzdzWvfAiP%u_G8#5#BXaA8#L_h1(@Kgj>=!tE9u?B4qJvx3 zGTzbzJ$0=clZ_Y35JgXSiop?3RiCvKLtR!)|6s8-sr;W05ei0hFGpqdn3@+566RFb zWUk1sgm+G8?p(T@pJU0?Jf~v1gMb!y_k(n~qroJd;U?EqG2YtDx!Z}~jTia^$9gT? zs-QS(OS7cfXwaK{Pw|uu6EL92HvNLZa>@@|a@0v9?H00SnE%X3ExoP*8g%VdW#9#Z zIaO$S7FRPYW34n)x$J_PBU=52(@uVqRg<;&n*967wA!hL;=}{*rQk{-mbw}w3@=j_ zj>&UtoOdUhQ+0#Cl+De&Maq3fU}e{iK*QEKm(>DlA-kuw8C8Y9&cYp!_UgzMw*2GX zTWnCj{<(SQuxqFokSAO9sRU=9)djsN(^s~cTJC*p#RZ^Z!%4dL>sDWcrQE{ARz#a? zn@1?x+$p=0X57Q*~&PRK|#1?eZ|_8@oap z8;-_H=O(yt%$1$A2e>XGzRTt4ElsD#DJtyDX$Rxjq5`FBlkWVHTKz@Eq{+X6=H+OW z2dagq)6kgil0(VZQ7Ic$F#|F=D0Q|P-{RK^s-U|Oo%GQ&NxbQz3+jYadz?xqdmL!*7W+^!X?+89@S83HhkK_V>0obo z>UK!X*Tr<1s*7H`NtWLMWdy6cv>$kM&QI#Viq^%l%sHnsfZuW3 zwNCO?kSv=PY}v7P>cl#4RoMAncHJc=O;^xd$r5d|4mR(w()6nv3}G|9$m_lKP!vk$ zEhXi@9qXG|iuH}(wtyBq_71C}xI7QZnnWb+i%ZgJZ1f4ycVs&Ff4XwduU&|{JTq0i zHctm(KxHj0#eo$gp}K4InMmp{6HX)d5&p)gR(4h)E7i0ECTGA;_Z~M6Z+{> zdXSDl%jk+|!VO}QL^Vx@yQA9nXocdI%6Rk+l+F#F81ju*aQthBR!)BTO0!k6G)B1G zX2pm~6aG}L_5^3nV;5r2n}}~6-<~BjLbFs7(9IW4<|bgq!Oe%wdZnBd_ZQrnEV1pJ z2-H(4jVrIW6MVIp?&g$T2$}@Mh{VEuGT)8d=&8HsH@4oE7%5Yxjk#Kym9R{9(%kiM z{0p-BnpZ3N2+GBKuD8R(K0RSOn~*j*&XH-%tLSa7n`@|T?^qp)%*OFT_uRmJzLWU8 zj6xyXG*C0ND#4|n)V`Ok%sIYVS^VMcyC2$2)2atmbw&($60y7fU2U16ZTT)vIk(o@ zY%GFHT%s{N5?K$NoI?@cE4aReE=t|-U-Y!B(?~-$PjLB1dwW}C+lIMMs9H5~CABDF zYmU#@Gh2U2V1}!T13z?!Pl&8`!C{YcYM_&`4XPqqbB_mBpBR?Q_v&R_!3c{x!ObJs zysY-^^p%I(uIRdaf~z;n==e!1hW8CGEKdBgwP{l>we<0)D3z45hkrDjQ{QS-9`tE( z<51t|fApQ^&*eG%;MQyaI*ppR}C=B|2Yf2ZRng0o`&De+EwW>{Q?G{Yxio-1+@GHe^CA26x)3@@pKpcHDyPT6n4Cvm^=h8` ztQRO$GUsIN0uJ=ksF&F`n$G}@J|FVtE#YQ!Gpby}hDCr{E@8?eWQ~37g0)6V-c)5J zH7piCCKP+nZ5bOr959PM$93G@?baLTNykKoo^FZCdaAa+bd%GI?VkBHQJ2O10SONB z4Nq&S7`xVK;!`=!88%1jrwvSDB(77AOQft;u*n1g<3K9&Jd&HKgYrBNT#J_*KaXuJ z-aL}qMe@kE=SPJFHOOsEl*M66*`|3HT$SP!Vy-U8;lhnW5Q&(AH6hRCS?>wuY^33; zTHF)u5T^uuDhbl}$U4A&!L6y#az6yePx_qkg$g&%Z{iNe$S%ZLDb4 zToy@N$crs~C9KELd}o@ksgn1FqkTav?R`>@Z^E2z>X_*$+e7_*7s}ISKzT^}XNSmq zyhz^khb=+acd^uGvsg##)q%-e|K%1|0R}aA$M7x4&ml%rvRguTvZFVgiAs{JQ4ZBV zKq9_f>ta;rQ?f>QgJw4hHEg)4qzfX@4U18_r_zH6)I974+NqTdoakRvj(5YvivK_^|7P zj(4DZ2~&fwSTl9eDT7By|rClR2jto=!^ z<=$#+Xmec>8P&P>hxnIgWo++M`i|V*dBcM3JTLzQr5<4=^9e2Q z-y7(~$S2vU;Wt4oX&iK^_XRTpt!a}I+H~TXWB-^A7vgPt=OJ9MHcQvYmdo60h|eXL zclG2dB4W@65_1(7w4)aEqA{?a)k#`vb>K9~A(*jH>~2?8%SM)3n{*{A);lCe%A<9& zIO$M|0(JiKSsVj}E#nIX)azpvMqJOeTxO4sqnVA9%XqSt`_DDL+^4h{P&Qv0tq9}e=%YJ{!#6iJT)^X& zx_=v|M`n8S&9v?MO^hMSgmatLmhCP0wn1VXI6SHj>zLtLBeGpLcdfZt5?_D`ctc{v zF^@4j{M#89e+j3Zd2eItH=`3CxV4iku_)g&FaYixXXwDg>I!BKM{vn#BYbeYq9e#CDhdMB1!w|u;A#k zoS?1JF|K_t$nRnMvDOd8gFeS*LByISi}L}m*+k=^Lqxo-Zq*+gaPjxTwuTLUBkiqdjCVul@Ukb z*t0MaGIosLR=Um~ykyI+*`+;)uw-ZKZHg?Fw{ zx2K3#{W|zfX_{|bJV~d{+r0mr7(*O;#6eBM4&SS<391AZ1w|7cdGS~7VpLS94c1~Y z$?vI=PVbgv5EgZr9P$Zx>SB+bWY68`QAARWT-7xOvKtFEMz*1=ESOt1Z5@^J{@)U^ zsn5qg86%MbjEI#%HZH`%Y(CE;09D{*?ehNp(lvS;T1p{Pfmcpv2UOC=IAK5<@v{tj zH%3x*TZ*H~Vnr%)@fM5O-y#PAXOX}0LbQcyCiFIUtpF}L^UnB_7MBpp)xL(qSM2VC zz9pv2h`9i=TaC^+=XhoKy}n z$M9n;*OhnWc)hXf>DJ3WiG<&3z`bbl!(%~!JZ5EK~CMVM29Z=f79A#X6S9{XLFI} zdinZ5FXtLh&pP1L-c&WO#w@yywH_zXPt9CxbdkA=GxQV{1*tM%*7n6^*{AMpw_`$8 zb`1;lJjJi#@8;MkXe+rgMih}4Yi};hX~-80ubnH3utQ3o8$E%!BQa4Gt4i6zo|gwx z1K#uR>Ui;S{vH!5P(|<@?UgC=B?bX$Saw3#vnm0fRhX!v5Nx5@R)g$2lX=KL)f`59 zFg_VH$iMr<4w8U2yMbT`u~z7}W#L?z<bY+iam-=0Y)z<3^VhKNIVKvsM;jd9#d8>Im*dB{$j7lz!{j>3|)$Oyyw?42K z;)VZ@PM`k~)!0M)kG+WmkHwa+LYBl`;$g&8E5_gbbg@chAjk;PtA~d9`&FHMtkK_@ znG1vR*NhKo{^%U8&9by|f|PM#*^ocS{n?;o_a@ejAt$nBT&=Zk#^lDzC^zDV;jA<_ z-V-Plm2?@|q6U-Fq>47ySLTLmVSNI$K(FxR=50l!y@|1!MBkCioC%~?u53H+n%_F9 zkuXw&_F>D&+Al6V)p5cIc;vjk&igBio)eNDW7q(dPD@8O^<7hZ-->$Ch*YJvAbC1JAHhGVd1u z>@Oiz_-y{n9m%s->bVC2sVz^PFtFleLNAyO8 zG#V~eg{vp?3UUKKB}~fV2Kun&WC#tWcI0~sZtuErV-u>Rqs8JHJ*3})TiuvpkRg>0 z6aI@CMrh@OK2-~@eW09k^r?|rPG4i$XhmJ<^;gsCf^c^kba_-R8?^h>^`PN|F)R#_ zj7K?Pk}7e8_71vAgr~)!VHx+ft70YAEUbL=EMBOf)oBWP+oO<1(!+?CjB%lYlM+3r zfsabJ3UV%_83$)FV3zuzDKu>xE#I{Z#H4tk4{{5}Ic?dgr(;_m)WP)3zZjQF|BaS<=g=u z5#cJC>2nT!vTJSk7uD0T(Z`);C?d@}>lWW~Z;S~Q%n?;dQ4~u^pj!=gch7`AdRD2D z&Q{HNf2h%1S>REY!?z=7|B)+~ZK$213Ef}Ps{wiMjp+W8ExT|z#;p(c9ac324{Zs$ z@A|IlIOku6rvJ;zkfBDAbmTtIOx|wT79B+84a`1Nycmnqr=|`KLT7u^Ctq(z2Cr(? zw!()Wrn}X-(?3qVT*-2m4f1aKGNbbm0Oi9)SvynqR2ofbmM3jMKybuQNmke1mnE55 zCbun*7y=AVA`~L;pi`(LW5WlHx#o!)1sbUzYjvgSbgo52?CjKcRn_v2$+}g`LC=r5 zD*D2dJUp6)EfyaGF0{@Knz`|eD!RUjxte#cdYC88t!_AF`cB|JVZ;hs>&nhhKdmQ7 z_|Ao8pq|pRaT1Zk#tI2PZS-JJHNJKHHvbG&%pn~vH*Vg-T_{xUwMG(QMav#DN4QtR zj>_wZBj0+B<(%`vN-YLWgXj%kFokxd&YAPb;kR9rd>Gc z%IU46Zj;=V0fSF=c-bZ3ka|XCuaO_L)YI7X6A1h5Gv}y3J5r7`PAr@Sy^AuiJu9VN z5nbxiK(3vtJT2A8A2RFhxw$3)J-^bw8Y`Z+qrO;;yLO7)@=)5I@6=ha9yqGn4Xmse zzqI1^01ssH|0ff*K^t)k3gH)^45B&xN*6W=@R7ctvrs{uinV0VkPNTj>^gijl)l z?aG_gicS@^KmCmZ^BHCV&1{e=BMW7HCzXGOEE7r&z}6+g4h!c_gu<1fYJtwVzB5BG ztFpKQD+@@ONdDXd+xu9dV;Q{Z(y+WoZpe?JQkwZHnGr4F$`o5725M^^N2naGSa7K| z&F|~L0&YnG9g2??%4j6)+&EI$oi6KcVZ>`LUB$hG6RO#U3XG*{!b~Dw%sJBm0=pjA zjT|6oC@b=2*~<9#Z~K-7Y!80J&-qlxp$$Vk&?26TMsqXx&JyPaL#mSEt(&#ZAgS z#~Xx1{nB(rP9Yi*)OEX|80%lfC_p7>te#y${X2_qrod}yiXxuD+FiP$%CpqsMAd=l zVUoAY2lTK$Ix{ppW^N7Btt#b)`VKdS&r|d!er@6EMc3c?E)BOCGY)3B-v-*7rsbJ4 z^UG6Nm&Gb1ql%*&k4Vmz1o_q1+qFFOt0aC5Tc?n^Z|9Z@Ep?gfI$am<3;#MEtxX^% z5|0zdAH-tTF02V9dteWMDJ8jdcr=?(q3a+c{oJqi*(}O6)X71H@_Cu$nxAnAb;#@P z;Z{Bhld_xd+V~T>Hh8=3J{{brOg3Tt(mEd>L%o<)#fPo2V3mG!lamLirJ}7_ajJdP z*Evn1-7#PIchLf zsL-#<%rZ)q@(Z2U_}O$dES021`qpE^kp0S*sr%CSxx1-Q$N3HAC(fQ9(^g`v0sbNXF~DgNJAS^=MqH38c;~Z}7$scxnf|=h3zCV6B?A)UGUx=s z&u#mM8$_+$tg}Mi;jw8s{GeYt)~p$Ofyc{1feQhTx~MDv^`()+y@^WQiB`Y|1kX*Z zDL@jg4p?tS%r_H5(B?>m=WxcEEI7aV_SV%!h`Ose+`HYcS)kZKQ` z?NINI(i(~V$y;YC^UY$sf%lCR0UQ+}vIc{YX@bi4BDxy_GE zhqn#@?b&#H;G2QE?sY&{`<1;Cofucp=btrL(V?75k^JevoKhNR zGqlr$R5^GseBQ-hmj&OalewLrq$V@d;vT&{#&C?tq;egTK|F+$HMhE@7srNAD8PM0 zu-@C6{cQ&H&h`yu3+V=7ybGpP-zavyr*!UFZeGC8GM^XaI$gPt9dYjo8!BP2lby9k z8-H9dA~?p3N_gz(l(s}&#s1q~SLhvvhis@biy^8ER581zrM(&ZZ{~i7qKhg&k`nFP z?qq)a4t&pQvMi7)7Y0?ChtZ=WEs`;A*nHy8{FY*u>3eCt7SRNgP0>A)zrl}xempHf z89O^(wt=T!y!R?y__qpe zUe&|8gk;~#(GQWpi&cGgq)^4%+~gC7eO4zu;iFQu_OWHL=MNwtc_8T~`d;5^AzP+) z=d^#BYBd9&;OPl+znjq<#^HB2Ct$Y5r!$cuoaa*gFWqzW)Zm6-U*u89EeF`O;&!_w#v%=WS2s$ zhj+7b11M8F79j_j6O2){m37}I6*eer%y%q3`h>@ zoODmYgy#Tr7yfNp@41TzucYF=;!5i95I{W`-0)_Yy#d)utJTo^(lmntRx54vx@#Ev z{8$?HBP@YnK<`I+OY_t82e|aNAlji|V3)YEa9T8KJ=`7uh;62*oxJzF48Mg9-i(rt zG7Td+6h?m_SRX3tv-gB>&TaCr+>Crh>ggCi4kpF#9^?~4vcBJpRu~bb`teOt5#j9d zW$JFhfS9^tY^+B=*kXpmtF;D?XyQ9espwKxI{ zD*w{6IKS0a#LsTsT&#BCyoX1*puxV37vwsjUy=3Mdk_^L*ozToSOxy&|CoiDJ zyX(mAAp;8d()Cq33u&)CRe2uuDrXf zwZ3Wx*oou4N6Yq5<*V~c)=sOQ9EB4pwG1S-+au}A3I1<)4#r3K2Kmej7!u>^@(u@g znF0dFylek<^gQ}hiSv=5GW}6D^0e673v=-oZ)}NZF#;Aai`K8`DnG&d5*rw~M>XS; zyi)7H9hZ_TUAu8v(%!GU23YSZj=UPXBz|+R{&R$uaS8*h4d*WNF11t)&SYKs5=UFT zCIU*{puEiJ-rJfh@VNR8$+!3X)&@tvnDX#W^|N~{nks+78`yZ}a>q>8dZCn<}fwIauB*wD!-_+Zr*`?Q=c`nXKj&*yn~^g9sl(M{IH<=nx2w` z(G)QBe|>b2ej;Oug}*g+|I>-{aGss6^e%UhQ*`%|0rz86{k4w7;8eH9Iex{t0$#S1 zS{WziNn=(tH0b$TAIKNkV64L;*SB3JuF%8Jhk$@mRGfHN8MlIZGNC5SRNIJa#n4t$ z3yh|!;Rxf6JONFIEBd)n0}2b-S1@byOv_0v1WQbiNlet)s+G;Gq$z@+mWZ|@Z-2pw zfZLn}6nH44PoC2T)*^BKQLm*=tE6cK6JTX7=4h;9>{i_V&QJ*x{m@Mzs4+B{s#dMj z()i^U_Cw%!HwRZ?bQzYsj0gV1)0M|Gzq1z`CPfHI-dP|1pe}-X)f>NF0env_eKKgC zOWVNaI(ZaGsSkXdoR&u~jC$#UTf&FChy=_FX}LWmxLXHERCKu<)hYLc)92)kxobb4 z?L|RGt47+8gk4txcPgB0Spf?VM-5TCa@jBOplxWWa)se5Nmysz4ACZxrj1UuU2#Xe z2cWC}ww8^#SV{d^VO)7WYA&Htw*RNxE2>1*bc^V2LayI@Y37k9m`>2yUpxCip4hTJ zTGYy-)heCiqjV_2F)UE~U?3=kQ{WDMyRM|?+03qbPMB7N;J%GUh{{tzhfkFY%K#0f zO6vF%DVcCNl#;8ZYsuuf+#W03 z1?8$>hnF0O&0S6N0^w z1t|TSGv*#+vdY7Dojz_c48r|{Zg)Qi=+$32JPm=%F$Jf3p}0X6gtz3%x@5%VHc9w; z-`;k0@DlpG`x!`<_eq%>aVpX4ZqF%_YJgoD6f>o{BG52ht};_yw+JNFTxc>@CGxr$ za8p-dc^`YvZnu4YYM(HYYB1AGtu#hZ<(jL0H8wo6oH$9hd+l<~4Yw4Eyr`k>h2^H! z>K;Lgw|2zNpBveRrz|(r=zt}s+Q3ohLM@+HvF0%wK4H-P zWsy?=@;RYlRC0u|di~m%IZZd0^qZ>s_DLD#@sbxQBt!R`xvYb`*OviwHgu|IQv2eq zF6c0{>$Iee6YSvRe2vd*<{D28kQes1&dT&i?q8tf$+8Y2ELT zs#QoVOsUdgxU01No@R$2ZkcVHHTVd!9jb$`ZRh{L2&I2r2FK#=#eMDP4Q`zBhXr2* zP6ZYwip}?As_tC-rFAQ-(xkYgDmtn{fXby493|&aVhjnEG)7GX^h?zq!CgZY zw?FjUKR2K~mv^f+aVK>ogHde>?mJxdG;tWrF-hH%ZFH&bXk^V{SNoonByy&Fyz*j9 zqz{J-3p?;lbZk|L;VME?il-!GwvrD6nb_LEy-gE4GFz=-dVVRPa9h30?cViOTsum6 zlO1{7|FTX)V=Gx#q4cLXe*!>R8}SFLBw{Lw7OMQ@mWI3<<*R{C1~p%XW_ohQeR1qj z0K`;Hk=Npns$w4YB>^ol_TYN<0iscQz$5NP(^D6>x>+kQMRW8RZ!%n9j&QPwQ1iu0 z85XnizpqW1h5_3v%jQ;kWD$?N;&@?86g;E4s5uE9z4cc3 zb&bDcZc}w}CG_x1KKyX7N8MxW=S*2G{}>;6J44E~h{zVnU9#mT=gzbuK6cEk4GUMh zp3UZ%6F)?UC7Y&M1#J$w!WtX3RSMbus!#d)o^jiZW@QQFN=G#AgNN2fwxrD_6T!fv z`TU>;#ED2G8wt^rkc>B(W0E*^50`m0B1u-RkU@VyKc0Uj4%?5_I25!NKdSzIFjW zsJAns)ml4@gN?|Y;Nc$wY`8gvGUG;Z@S~Uu0i^!S@m0rQ9E+)6iMk-GPE_aEP$)nF zeCMq?48kV?MW*sRyMUQruR^M6^HDjm1=@<9e69(|xct01?61Mk(tLWSn zd7H))tehv36X^@YsNaqkQ7TP!L6ggg=y^ugkG$m`?$QMYT>NfunTNk1dhQToZyzq*o~sgFkgo{D~+SxJM}scp@&hGisHxn^|??(DiIUDUI5Z24%aiZq-=@Rsrx=iVxO3ROUK*61Gqs)F zTs+L3IyyK5W09k}D&w^6r201rB-q)}M_CKRI*w7J{<9nE^(24i7_j+^r20|>@V-{4 zh5M#ai&bQ^mNSw;w#b$Tufl+B#MiGO)7g`s@>#Fn0vNi;TE=6#!py+TN$U*U{T}7` z-A|*Wfz||IHg2tp4`~I1j-qaWR3C$d9F<#h2nG0)9P^mKiD3Tqw@Lby0rqN~P>rrx zE-kmq-mv#|RSeB-;|geLDFr(Ac05S2;-O%$q@wQe)F^$#B+Rq~5C44Qy4lmW3U!D) zehSE%LaJ~Qc@0t$8c{^RP8}99Gy?%bW9n0QWlBOac{V%R)iX3EuH3V_b3z4A%hb+3 z@XX$C$+;v~qw`9_=I1|2&8CO@!ML$qO4g)*MFy7SWX$2~^9v)9>QG1~m}GMQf+&6* zW#G84R;82d{Lbx{o}p)y_A9w;IM$jk2It4E)T($t)0Z5< zIEqe()(aPsg}<1Mqhg6xxQE)}0;c0MqH zNdQc(&LIGFE31o%lMIWD5zT~@wM0lKYr&h5>$7vZlzjL6K4QZ@Pmo6X`j8|hNs!`V zA<5t@*y!RE{RD=l{M3K49{Jvmuyx)u-bIl#YbJ<7xUl=RK8(Ui)bEz49F;nLTJs3Q zFF4X;1xs@N)Iz!K zs?vE|@QePH9K6kT=bUK-b$iQp?MQ9Djxm{spgj8TQBf{mVeN>I`|6(61~jU84Dk%} zV2EbATs-;b9I3m#JQLUbNk4_Upc8zNwycYf^k5oBiBD=e9wF z!Nx~@C#Cqc+fB&6KoaqBav$-)&O5LBsMFrC9ID-x90~%kOoNDyV-Hq^YM$QNiZ<~ZB8L`q?7&HT}7ah zwaWGdfr!(_b*wdhQ2KQQQ*C6CE#Jf8A3rnd3BF>!VP4%uo82SkQMS=_IH^4a+VUjJ zQ+uktw8-&Gss^xjq`RuS2m49Cz@MQeA|OuuIXnAwz*I#dDnxh)q75>x1R zVujAnee$OeFTE!tKxdS>!ldz`s-W+!0goKOs~$ueuSK+YR>h14kj9p*=>$_6O+{Q= zU7J>8@HRyh@b1CI@6XT|{L9yT_0h<0EbfR}0)Yd4E#Gq5P2>ldTvZFB`zywZb+)&-9)Wk80Rj#ZrQ zO_k6)#W2kykQ-09>qm<349Ns~tp&M!Yej$Z#|FY>b6;#zmxipxN9#B&YTBeq-- zs=dAn!g-;Y#}+Z%34o~YbeW`1i|0BYB?t$?%Q*}<(sWB#!Mdk@tE`VqX>!D0r8&u&3r1Yoqs;x>|g*n_$CRCjp2?y#sN82X$g==o0^aleFg9`4nzCa@zg;U0^ldxS;I5& zAx6Fo(y$*xx16$Lx6{FF*HbA0Wkt34VS@ss&qYe`stK1VFiUS@rxT>-1;*T+Ze>)E zEraNrDrNkw0mu|3pqUX=r1Wb{=17IRT~F3Y-CpU2J@<1+Z4mZD@^5 z=vH`DF$pLOV8IANk^F#WTEIGynL7Oioysw>`g;Hq?ujPgox16oQmSrB9{8#1y=VfN z#l?io!v{?&Pp;(c`%5TLZOly)kw4O!9zF;U*_@}x*?Q&!nr~_PCmYlLBRlsFyBPzn zXVNZLe;Kw{t_Dors24MKcu4^#>m1CI|6h!K1yq%7(={L+k`mG-4FVz{DG1U?Nq2Xb zbVzrHgoJ>kba#VvcSuQh!+)LUdB3;gUyJ2JL^$V)nb~{x>>H>;_7{J`v3G9`gSpMX zo8!FB5s>@@TB-g0{P?UVL-!W$s+9er?`+67SGX?taHTbntb z16Q8L`*MiqP-=Btb*;8Em4kzgVXHh^bfk+mG6OKv2A|TxP8IX|6k8&_N5P&jh(sMg zB%^xHLtuJo%{cpJ1u~;nU4Zx|qIC%r#x%FCi~aICj}Az`=-{4CX|_x`rXPp5X}`po!xT?&0@=PXu699j$6f43sqt zwPAKk8mvMPWbAS)tv9}5{oI1w% z;6}%{T?H7~O4--d)47kvwT}O?G{T}OgcXg0$#>igRl|2AXK{eXgn;vF&XHTtUM(Up z!1w8d=r$Bk1-u~DKj6pgGcVwiA(^Td-dt22bAiKx4(_`s*$#bSQH-VMA7pMMn>Xi8 zJPMSG#gDGBLck~Non*6>L-l;9l|IOxbBwOr1Ju!wjDRLvofeST%s!l}UxS|6YaU71%gFjK0F%)9 zD%w$($aWarGr>{zoO~N_8Yph4A4O^07Gdds{rr*)G7NlC@s=J;48}ujL6nOlS}kB~ z(ecq)en}kYwk|k0{?^NgwD;g;e0B-VUKLkzYg_6H`WvX{P)13A?B%$1Bv31-$4~^- zRCG3}-}*g=bcV7tL0)}hOgwOln69ZG&QU%93>PZO0jru&B7OWwlp6>S4}$rBL@mh- zz%^vkszH3$751$2!;d;P3qi}0pZN@MuPz8*&CGfhEWX*5LtY^kdAR^?NdiC6(C0yl zj4q5nI?m&eX|prndcYPR5Vo&TBi9&-8k);yk$D0N&3IxNjs!fP{@jy6V}ABqxjV$W zPHb5Nf!(|l;DRH&e7DJ2QSmNH19m+@0v7w%O&I_XIlZscYTg~08V56i{^8(|spKK$ z$T1{jUaSYR$d)QSbd+S0W6hfD6W)0)os5bJcQ_A&gUoMWVGu8I-A56LeUf`YFpx}CeHQo zXZuo}r!}2_zFtB;i#X)3%iJsIZ=V0X_afC|>=XC=vK*lLy!e1;iLrUZd3RL-^(xE1 zmF|Fp0de3Sp~gtIlb8(;K+E4(n|Un6SDG3gx(GC_iTlCiP66edgw@fv1p^IXbt-?p zo<|*b&_3Pt?NUS2@Hn5G-RmkIrnzYV;l!B6brnT>hJC;i)^pQWmr|vIFI1uW7>PbR z9L$)rie#D_|KY^(j=)B;4VKWN?5(kV*Cpom)pmBIlLv5R3C-K?cuDQsZg2TMHMhtv zZGf=rjc95v>Oj*)-B5Gf5~B++KhDDwxN^wc^LvjSmTy(>Z2h_y5bf-Z=h7wtZc25LH{U}M zF6KznLsy!6I|(nLauG6a)LQXrm^Fn(Z5JbVO?Mi$rCL10Y_Y=~3gY{s>_r)5GDGSkxhqfe#2PjQ1Xbf;!D3nWHSDy>s^ttIlixuQ zxO(e!(+ij(t)DWyzWrJPQle$%rLA>Zp^sq9BN3v+*`vaS6J%^WYVVTD!iJkM!khU> zknAP-))0ZnMiAmaG5_trQ}ww}UB+M?Wz0X%)ojnalZRY1g#gRmU4bd;f*sPnvOlBi6?TM0v0K3>sXj!AY9v|-F* z*gPo1D`*HyEJ7w!^6ulHt_1AynO(As53j9Ns70q=9Pp{ga=DZFD7qux>THng1wuc( zYPjlYC+iL5M15Mm)_Y@FGt@lks$DO0skZfIf2eP~5wxXb>sp6_#D&LWTbT5@+2p!b zDNgGxK&z(t)jt=QUeX*smbz~+q!OGoOuxy6%qQ)X4=@jSC^nRw-IE`nQ4l^#(dU}? zmI34gPYgy_MM9E2?`cbBP#?(-p4bDeQ{zd$1H}G81In5=GwxRAl}yNX2e4{O(fq{& zMANe*)dMmU*=z?$0#|M5MtMPxp|r%pvjmF&v3+A-3G7+8Kb=0Uw#&^N0#-GYkSQoX zjye633n|6@ShUprO91U9j59che`)RLdwPnSK*NF&)Ww(4m_@E4+%m|b3Cn9P4mAh+~=GkfM(odIn_Qe65jC5iqcl=At zx)?WnI?lxf6+GyWT%EZYSf`Ym@ zJYOUt3NgaPy{}&2yIf#LOEKjU{YJaE)wK;>2X(n)p;{mC#d*oKYp)KwSazlvSRJq| zZFSwT-`}uCw@E$l4t3cs?KAce*-KqO_iMCTZ#Bl=^WQW~Yq=TX*#n1|ZxFLQhyu!1 zIs|HE;qrSDXA#bSD!aV}B{OgF5~gS#hN@t|0nvS8CYc#q)!0jwg0dY%`epY)jKqhu zZZPJU((`WH1?VplJ{A@}plDwSvG=k~=jHM3o6@nxZhD^WnVGzM7QIRjqx~r!Q8dJR z9pHw_X@d&>P{x>4i*oVTD24_EA%l&M60p~x15rLJehCIlaNbhf#{ryA^E0fhZq=oO z7p7bve>zx#nT&U{ph2K&js;G!yZ)T;CGlI?=uyY4!3@Y8p80WC%a40ILmxQYUJ}VV zQ*{t{HIA<#U0#O?x%X8siqAPCs+1eoo)t{=T#@s#ZT<9@2ejKdYkBb$-;&zy^Q}0S z88SR7jO0PdxAw)??~iNZKHq>+&M8DE-E#oEu*HQ^h;>eSMS^dg+t90E(r#{Mm>+gq zDGPd}Ab#TrS8u@>f#q1j@d=N0)KmeD)zGaHl|awS4#b69AoZC-wHYWjcUyFrx7zn8 zJbhC#TZi>lLPC{{Z85Q=%JwNkxXcGL)&sfMfUv?zp0(egB)t}+*R-}DFD#_NK)3vL z;6_n)0x=Pr?tPVRmY3!vaXg||0!SimQ!qd(0s2dh2kUx^z)A3!4<=ixdWHfcW#4IwoNbbA1v2@)1T#*=w+?;R>daAL2L|SV#Cb<}~cjV;;4F}D` zZk|0nHFfO6?5BgDj1TVb1v2WKH+g}f9ryAU@79+sSI;f4m&ddV*jG~QzdPV7JV~*M zv|U!=Naf$)KGRPs(QN1kE#58LWs^ezUR>sW&NZ!4^%^UyTznp8npoGRE`yU_DWC28Ct;wwq z_qWn~g6-Hkw5XD#gsPc6?2^-XDXxnC#^OOfO|D6I!09p9a=vHE(Y*R2t)!cnsjT-JC$o#l^i%ImI#;q_w}f6L2DoHIJ>Kz4KC>hYZu+HH`>tb*vF~v7I?W-|y38(5zXw~8( zw#y<43OwG~PfOo$HWCYa6PvLYjvsvs6o;ZseAdAcO5NSyd}uY3Q2WjH>PUAwlkM>y z2x?4Z*Lj@wj*{?#;)dVsGPZe9HUJJOS332zXsRz3{P3KKWH{Z_)vBJPzBLa2o-H1o#-xQX>V*rRloQxx4_$U*rx* zjZX_ftZXm#-^J<<-hf#p)i> zMfzp(OI_deOtWgNPtMKO>22(|oEF+h?B7`r6@{WV5*w2 z%h9myh8|6ttsd!3_nwkn1a5O+uwx(H(4&&D8-+1y)=!MgCv>P!ICM5PqJ_1fe z&f1TL|K{R#;}9PDU?GM9J<(w@)<90VcTaZ_cq1 zv)(xV&PmR4$GAudHv7Q>OGk@-;loLs5RC+R=B5U%Ag6;qDQ5(U)y|h(DDJfmc%HLe9b>tIdE>+Hw1WSAr0UZzX!( z#12a3UDn$V;jgi`(6!=O-#+np(iqqg`pQ4O_TguzG`Q{KmV)^M@X*hB+X!3(0yB{} zT2DrkI(^=3V7ilU2QvYCc0H+s0TZh?>DO)hP)J@Qqp z^68Jb!qrdSc>0JLFQ>N0^CFv_RthkezK^zi^?Jtxt(%jq>+X9&CEuxQbRD*7qwz#k zYkAaDxN*ng=p{;N0Y0fnXqDBX+Dn2b`g)WDeSM?YcPnYw_k=g4yN)Y<;L@-pS8)G$O{H!6a39vxq#k7O^|f(BT3kQsjJ zXav3qXMOsSRL~=!jMO7`_P&Zzx=6~0^<_A$nD_tq_=5DIbyv!7Sz7x|o1Z>y0(rnT zHie+FpWp;Jj@_csc}+6@tTMTsynM_B1;ow_No%Iz`MXFAg|+oia!o`PvLkww(@q2y z)>Yd{)v!Rk0}R63HA`GW{~&mlH}|ZKSMT|!i`9*5d9-fUpLE0wVLYm+s^RHj#XVUg zD}=YU$G{{AMvdy;{F!>*6HVezPm|$ZzHVIbh=DGf!LO;UfI(e12cl>8=Lp0=PQ6C5 z0FzmVQ3JLIX2y=9Z z+(htCG0N80L*K?dEgd(>6SBm}60`_JdLkO|Kmfz;u9IK0jS@D;v~{nbpm0tFceXC- zLG=5@3!Qg`=Xh6C;g4tDI=PYmbO1#i){KeO_0a$v2p}c z(4+ddDxN+T;sYyx@C!d0xgs0yc%m=!PFrj`N{H?wkJn~^C{4ZixVP&WjYGQ56NVE0 zf|5Mrs5vs8LI?j=j{{K(>X^z*RHMkoSDZ?Px}z$swOQN!_%rS`B{Newq7HA6xJlB2 zi4j+e5d&1xK6Zr7Lg7jZ!1O(6J>N_aUKUJRY{1I8b|zV!{OW^n+-@P=mQ;c;|NY}x zoS+mO6KANUP^;7g(s0s8UY8Uq^XC?ux!uYvbtMSiGo!scgjz%$*jc6Uwo~7g3h(Bo zw^fSOs>VoA!u_R1kNt?@SHnhQE{=@g(Dg7ZgrgQ}ZS`Bw(IWGYH1g(_%QmE*MSi z3cFb%p_MOTmX^a%(}x-fw7zB7_0uh7#aq<;rY3~Z3{oe1I5nVU;EhxJ0gkAAXj!&g z<1Q`f90j7niY|8&{9$qJ#b5##`sR->hjc#BvIsFW&NOqg6jRIQL7XT1J5M<;u#HT;R^26gp+TvgCF+sio(~C;ltNyks<%sOThg!X=5y|2QrF2YzQ{lGLVj zqZ?>JZM!i~uv7%2ClQH-YQSK$xRr~YHt&8>K9k(e@a2Yndil|%+jr8?@*xOnY_Wi+xC z>0@5NcrVxE_98ti&cbhdJ{8iyQN#Cbyw=C??ht0sdd-vY&1$whS63c`!<8LT{D(sw z=htFNVg^HpPGT;b%p&cUrScCWKQd0eexb9ptnN5W<`sPr;Fz;~VoWt@$+agFu8VzW@E$LV}pXj9!>Gg9$PdB`;OJ`k$7IJqX zs-*B|TU)Wle*rCcbPvj16yB{H^CTi4>Md4Q1Z;dAu~Pk>2rF?R7e4DH-kKv5WPDJI ze0A2%W=nF-dljNC*FIwWbtwDbdMpwDxIiH*uYo;h-2N5CebXE+v)9Vg9kz6z)3iCXML8xRqu^JQxgJ_!Mc>$!iZp1d|%w zKiplP!ll=nMvFt~t5w`$eo-&V#O#|Y)fV6*po$XD2Pa%SER>a*Z5^|ZD2elbmRm=d znckf4MU)R>M=GHkgW-%HB5OrWDF&rX2=jGj1}m&HcZ6nw61$I9LN;2C^@c*)Qr9}r z1sFlC8wF6)h#V;gVX?OZ!bJpUf9Y z@T)Hp=pcr*9$2@6m3+oSL`^ISZv)6?v^yk?8J~q2>Ow4EBKuUHg$f@*4Lg9YS*G__ zPT@4a#5U{ZYtc|I&CfALLLVq22Sj_!MzcO!%vVbvv^;w52hY+*P!5_A5FeADy=^9+ z@x|yS$91;gyhVSDTavLqUt>;Q(*grr=Lg;p#XccZ^TGcPk`=$(ug}J%s~v&6pZp)6 zu1c!L7b=%(sj2XT@-Kt8PY4RR^cw(PlLM#?pJHEQpS0@lFEzOhe6P)o2;~(LydM!p z7YD&k!jk=FG)EkJ@tdxzt1)@A+`@-|ubMtIT4gOjW6OjxKxQP|@*Z-X%G<{HV>#7CRLAG* zb_?wKjiY`F)_DsTVTv} z*ezRw_JkGym zt*lBsS&>GWwH_P~mo(kx9bLr2{jrunDXK=cm4b(;5Qs{!_PoeTzt63)ccDzz54U42 zm#QBzni$sqNFHYhuly(n#r7Cr=}H%b$JM%W)e56es>XND?wpJtynT+ykXDi3bzQA+ zcDevRw-~`~pTm!^bwjf=EnY3Sk8ejphe774NfeG>?V@swAf zImb{fM-!OTD`}@~4?e&ns@Nd4_z+7*-W>feOlqj)G@W)|&c(D!TeLr`ek75)5|*YaO47c|Q* z6p|tr%hf$=F zC)OgR(2QQ6Da(w0KJQ@50MuB(Dk{~#p^ju>k&%%N0I0N*d*gh-2+*_xiZK*Y)c^eO z79%2$V}T6KWizT(=DR-&$wi{~2@6B*I0Kzz%VaQ1y80SEMz9jg1t?-RKnFW$YL9*S zA6`GkGgupzI=4%!nnpU}chFP-vhz;)F#!8#tMv)?;3>Tt-kr6Wg zD=b=BSUWqreg#_<+<(}wATAh?5E8%*TV~Qe-O@O|)o|MLse zM#OpctZ%I=H1~ZZiQTVW%o5OioUYN^n2dKkWU#1uP$Qk=|8#*R-c| zpu<6`%?&U^dx7DXO3&s4?$<#3XOu;Z!7tnjH)lKRxI<7_K~6Az0RC&iZf5+$t?mbC zM=I=#O*_&FFe^~&@5y}Y_v>VUr3NQ4`tRSu0us*t65VGWXR+gSO?mgTd@;{-F{z@N z2?<0M54P&#V65r<;F%bPy2C+?e_kiZiSWs%5QMf?Ok+MwE>;Xiw`f%OUduWK5Vy(| zNLLb*-<8m6Sfl>or9pmurXIElD50sLt@C8k4Awe>pAq6p%Kj}7dScM>JAANP${?@H z7)o68x^R^xXO_Vl+Mx|@x)EE0YwE(qJMLDtM8VeGpJ4t-KAFe4#O4{Qbx!Ty1<40$otk-S<0vWx4WPX@%x? zIm#q@;~-}7I<*xuW!)Y?T1h~6Zsf$uj6J4uS~G+7Vo22JSO0o(xU6lLUSlzvHT+{; z0}lsBOhQ7U&$UFUFoC~?Yx?Sy`IHohD77a6{u|_fL^WCj*esz|!s*6b#rkWR44a;b zk}U;yFN&Ut{2lw|JMO2K)zt63$Rl&l>J~!|M zsA^aINzyCd>O9Q`Cp!?KC8eaK;HbJO+$ePvD%|#$C~W_4EB`qc2zI^jelG?}+jvmk zN%+B|#_&k&gC+0xgQOBvxxkm2bIAN zyY64U5s?Ghh5ZirlNyX>iOhBA=70W@-X9wmN3~e*$bjjpC(q8r1$(hSPXpQ~lUvtd zH{$Q^?pCPw-qRn2AfYE?oUV=6b0~8hJ)v|a_puY#IsZM$X0sx=KSO~J_fCM#@wZ}$ z86IlV1{E5z%RpbS0qfrfvH$crMMi5h;f>^K_xU%1Dy0JYV`Jy2+85GY32Lf)*RJx7 z){!_?HzLTK)@+NzuEHmogw*Fgt3z+^Wd3piFr~?Oc+^9k@}yJ0@$HM68O>LkW;dN) z94x92ac>a?hlYku6p0!%hlGUusu=CVsR9B(FMnn4IMQE$n~GgBYSP+!>85$-{iBo}HcV3XYHEgNvOTZ(sXixH%;l zNV{s8>mjt4eoM5S|X+#WI2*}&4h-;*{fn7mPe<%#@;=gk7~qB?wQZBGn08k(Bd>s#jk~|VJX`UN z!0MD{Bt3(sYv+CLRWG>@-pYxS=bdi5$DePp5nIrU)$751XXZyXWP+8=da2QlZV4_J zK>j`g<1geufK<4*+_KE`#ST{=L6TiQ_eutOySoY;cE$FP!l_DJC4;TUKd#MJ zCninwzo29Pb00D(FllLNqmtQ;`)7XWgVkVqR zpgV3^$5Lnxy}rJ_(MOXJ2P77XdiwgBoTr>fQG)*1t({MTj>pHx2YB-!nP4!TPu=3R zdh*t10z^`1WS5Sfl=l?>ABo_|$GfTQ1Dhy6Rq!HnTqjAB`waU@Ru&@yPbJny^%Rr+ zNNzTDhYatS(hh1;|$o>)Q8VL$_?0zxarBqZRn zx{`npd>}yiLU>TD7X&6kyH4K!h4tcyeX{%z;%E;U==f7!*fPmPzoRb7juw1{Njo$c zMd^1VCVx1PGiG@S|0}#VJ(c10Mszh}k;$?ax4XD;hw${>$@|~}`AM79NkO-_%|y-R zi8zN31D1H|X5T07EOP1H%IB-silqAPZ4!PBrcrkx_%=*9OQn5^;k_~vZ$5dB?7G41Le21X`I8kT9i1axX=Z<)jXZsSBz zt)Brgit3c=>M_VIjTl$p+jBr7wmVvR%fi~x!2RNdDCkZxSk6@<9s0OdvQc92NT?0J(%>jq@GF+lM)92}a*b6&yQO;0_MxU;VH^c3T>*dYy7jMi%a(yQ29~&mN9L)X z8fq#Di+k)l>7l&)@^7*Um4OFQcasb`CWj=!d?n4DiY03b&z~`|`MU-pT*NF4<4HWE zJd0^?X`Y!FyFkiImmB?%lz`X1>Gy;==8LO+uFg1obURG2<=88-==`0w+BOy#6XP{m zZcVmyPpK*ob*dlc=s&LOcx?~7_+ygPsye2O-r&`44_n4GtO##q>MzGlQXjZ&Zb@3KV@tYTR~6Q$v%A&jpc8)QJDjViShz%a*;z7|>8rF%SK6HM zdzW70Uc`HL^We{1=7})_cso?k64&y)Hsp4{DkG(fJJe)jV}qy#?6yb9cb1wOI=i|= z41F#Smsv3KmVhj#+j)Mhr)=G#~Bo+7Hg0`KM;?Q)l&}x>tJ7Ai z53n<#p&Mc?>GGB4dGiOsi5lceqD`3{D4wb`1hRY-B0Mz>T_}cvCoAeyhTDGqgw_rr z2||4VrjWV@FfFw7^jM6S_hXVQ;<~!JmAI5e*8$31b@p5RASMz`t;2sRBVae`1>;l< zfU7`zRe=1%3zVo;CJo@Pa>`4Mjl4`F0Y|(vH1CgiK1$qTOZb!e_4Cn|F;M89a0+L=BgR^$9W9s_r4SUHq$$AIqY`h8#W zc|vIeJkfG`tYuPwfRI6lzarVYKJi5KmkSmO>7L|AKjmZJj#-i5I~_B##h)I$YO7-y z8hklO`Q*=1G%FN}cmLw|q$Ef*!p9}(c?SRi;s#Z04hBbvJ7O3u1FF^zC{sT)?sm8a z)U1IgM1Ft-<1S23ujtYj{X&_1pp-Wq$@HP3p^>aG`uS^+eK-!l7V#RZ#Tdu52N!4l9S@>)Zgr$$p&t{{sV;hAig*PgKlQW5cxZ1TV2>f zk)e@MKZN%IoZs|PD5wl5*(w5`632iiH_JDJr|Sd_YUdauw=Wu}yNZ4+)nckJ9-eBS z78_tMSq0Aq7Ob|uv^0cwVZ8@KR+KGwC!rQgjjC>2;g#jA0#qwfP}sjl4?F=!HPnWA zdk?y3ak?D=A{GxxKUOd^5tded7)boLTY#Ymqssp6ifYM`h}Z3cryu(ewr4o=epvIO z&P@#o3Crwy^0y$`wvvhskE)3n zm+6m90aV%^eu}9a+Ga!AFXA&?WMXc$DUmGaP0@QNGR_zN=(Moxb*9L6MLci*Jt6>)0EzC&D8t2yRD+ae~1WtwaL$t~s;3GkV^ zO4$=+PgUb21B<2`K(*f`dM^sLGQhz-isr8@BfrL6<;NlpFvu+`9kYT^{o(e+^w2g|zCfDKH2Z5i{iSnWCy>N4axj2zR7 zw5;s(vnqf@6MP;)HUYRSqqgf15IO4jPyP!3-D$jYJ=Ll$=s=lJRcS9L7p=Rj0W^Y= zx9nrAFm?UvND0e3VJU9T01&V~5yucPJlH+g>)Sms&0O0L&oW6|;C zy`0YRs_;Rb(&gH#=qwMn9G6EeP8`<$!h9z3QE;H5g~=W&*{L4BM97yJhO(e@S!Tn=ur$b@ zwA^W&D7))dUN66-rk82<49cNBE@9&4TJm0(6&Nq{#v|yxb&vj4$t@XE+IX>icSQIn zV(7&3rVON0h)Z-<8$lAxf!iPW!fl@eNWQnS%ZaeI5r?(wPwAMMFAO=ms|2Ca6B?ZM z>0@bRUpv?skOyaq`gq$fH+w+m)!A+Quyt)kV)co_Q~`YB3U9G$g{YU!fi9AnQf|f2 z8%bG;TCnxGuBV&i1H!7*sMg0oXm6<~!<)=uo&cfR3_nNopkezYy(FgD4txtIlI^1^ z@Yg1qPdN<~`WIl16`Ic7KU=%XM+zwb0d+o5X*%BSrbF^iOf{JSY`Kw4Y?daE^mwHC zl+brxj*s*LY!YiJDJ>dw3+3W={9g=HB3KAdri)&Nk<4dCLo(axBg>e*G&@m`6Q5D# zJ@ZY^y^FbLTWnl&-Y%NP@Y#Pi7*3*?Jsgf&5ILnf+-0G9 z%;S)+G_ILJL0KG=al%9q9wAh3C{b_BgzI!kZ^g`x&~zMYi1{=v{1*F=;?Pz)MU7c# z)0{<{`4LmozDKWS|-iXs6>9wb5DGB{YM`^py;I6-~{alWNnI&Y;N zktQG@ume7#y=A%`Fw*oZ_g-Kw$a`E6#)jGUY;6RD96P_?{^F2GB(|Scdcoh~w{1 z_C1C0*U)|ZPwSC7^CB+3+MJfgup8Ga)?XM!_5sSKc;cd6ds@==Mh-wzrIvi=xjN0N zyq4|PRiF>Ymz?F&(ty#dNMR%pcWLk95TUehpb@G-)of@Utn{d#n$9RP?>X&8{kiv+ z8EZJ5U-hKrc8t*(EAK?X@lG8}p*X;l$cwXcf@kQa#LTc@&~_N)-lfWBbp4n=z_x#U z>tV|y38tR#v`|k^&j5&bMQL9qzAH%{&2Q@~uWP%c`bVKZ#D zrqnKQl7yVG>vwXr!RsnD!PpnlXRcPDxLoD#c8yfhF9n==+!y07yG2DsV)POPjbVhG zGLX(L#Pz@6$mU_D-qC#IY2amo1{$*o*Atoi%uGSx6$$4G>@wh{tDkkaJqBab^n;^9 zhR$~u1u$J}zZE9&7U(aIZ3J&ied=8Rt*u|6)=B`w3$8(3(xWnDX0VM(KE9#9fn5W3 z{<`U33m-$-6_9KvDr{JT@;*mGk2A=s95sRzxc1l1F|9yMG-y4eeO~}cG zi9z*5zr5u_4%RXAnqaTG;~9fpzL%_=759Qu1*vrG7wXg8v5hB-)v9PCOEfiBufdJK zqUt!(fy1@9nte&9K50gdnMOcT6!v7bui_|jIaei3lF^#ftKSq${m@0SuAHzbKigZm zf?>u)qee(Gk14Iz4j*aBHWZy0LtWzgjA8>ExhR>D0!(=4QlfaoiRW%vQD3RJUZfUD zX{0aOF41V@z4Em1RHv?hoXzN zH);hA$es^!kK`D#mlt}?xNK#>eJX$`)~bFqFDhqU9v+iW+fDO40D)md zs+I8fIy}SU!?!1+4hJ!CF??PnI93|kZ<3Sn#Sg}T4_=eH-tMOhJ5zyAIcLpRhR*2U zO~|_>1XU{6Wjpi|FY66ewqL>u*%U6+i5~JT6FeN>(Z>DnEVGi!c zwiXH;(hEhonKZPzx0k>j&w_jarG63ObGwg$`eSZ_+oCiQI@MEFE>{lNd65#^VV*`HvMp4D4NBE@o7Sv z0xeJ(L1ca15yXTxMi8D>#m9M!iX(cV$F0g6okR!5qy<#vM7WV_5K>lf8c}woHht6z zVVwWrO5fvYOsufWBkmw7J5vVI)df}O)~~uJ{}3+zK6F4n_f~?Lt;h?tvoyr#JHAVg zaXGjSn8!%68(5@x(awS>82WUNb8&h;z&CssHGAg!^btK7$HHi!Ag6Wm<1(IPOQQ0? zwZ&JW#>$GENRk8QC6TZ3JuidG(J%IylgF2if}pkB7Ey)zk*$oDoPDU9MXrn04L#=c zBMVvLt!kye4?jfaESyK-94^z$VrwT<65b4aJuo&OHGPmZXEbInaU=w+c!T0y<0Er| z(38m3B(iAkoo93^73GR|Y>#qLEUwy&(Py{TvBG$cF(BL^pF9sTl=V^waeg#Rf^`9m zyPio`16R3P`Si~glz7JSuP_Y{gMgfxeJxU@NGqR9v z@OPL8l2B@h`=nUps@NR_;WoKlzVQsWdlaWhqk_~D7Xc0STQaUdgPsdiv~QmsraYsN zS>&v-Ue+ST&Kd`ly*OYm+U+c|dB5I)|C}&5D3$$_91UfYJI|XlC$B%R6O0m)RF%aZ z_sj58=TOKdvx!)NvH!A*C(WCqv=5rji_}0yz&VFAQKoL)qy*;1in|)buA<*hyz3_; zoC^SE1h0Av*y_X}It;2bw9U`|nGMWr$12;}62EwVn4HiuMmEf1wf_87+%JZb*NXyP zlg{7f8!c(C9Q}0T{KtpcP|-hTY9h@8YL``td7|evuL(X(B>!nrP9%6JH9F~u=c|n- zcJmVk`N~V{j@qMkq{zGyBS|R-wC^@8KYu$w+8W-0d+) z_^ZF)>u0cA%98!H9cBK@Mteh5Ie23KGd#tIyg`bztLptUp$L!9h*P?7Ek&_stCw`P zdv!6VQ}5o}yn6^VUb3L>7jO{A>vK7#qnq1m2>J$CXHbTQ*j~rEwDiw#QdK)?uJ6A^ zWg2q0RKcIm|8Y_N<>cXHWg4j2Ap4=PE zQJHc*HUAWrlS9?ye+Ptym93|~D9kUz3H#A70{8%g|H|txL%2lQp$m~h6sgz9tdQf7 zcn!#tg`cz+ngCYI01W5Bl=i52;J_@~5O)4P!ZqVmeM_!*&F#?J?vdB!N6PCmq9-Ma zLJd8!0fypW&oPoFyv-@U#B#&XK{c8OW=s2R9reZ1Bw{@}14#FKq5xa$0`2l1NO;gX z2vqnHK}VH;h*I+H-?SS4kSIW|z}SGbYtf*;ILi*m4N@R^i#=D#v9L*$Wb>`gIsfk5 zJT3d?tm&vGJm7eJ-v48(dhNb5CN{LTdr((L0Nhhsogt7S09C#m5feqm%3)4$pg%ms zhFU`nesk??j!dF$Q+eTrJ4)Q;%$LuklHKO|RQGk5c4s^9sm(8VVjc%%_0VJRs5?UB zM16pmi(*pXK?93rUpdAPKp1NKvvrAHHy;;lhwh9|qJ5qbEZ577v3ux7ebK215_8f| zsH_W4J{pWAa9#X3S^k*&z4NsNqitSww7-ld)mM8?@$ETb59X+=i!BMzc-+~qj#Fgl zOYV_dyVZz)?6_x+Dk@lK%5X+ZXBVZ5l$BVpHt6%S#-!8|wS9ud@L-&wj%E8(edi_O z#?Qv1O(v8By9%phoj*%4Z&_G~BBKrGDw&vQ>HV_%skG@teza;F8(Lti{mxjfTJL-? zV#8P^DxnTX`$OWLcfx>jrUdJivZs5c;MI;9#`ts9gcF_J(%J7yi9nG@t%YQ_p7J^o ze`8g{&G_PNlDhjV>5z-PgsimrTmW?Bujb*;y<)dccT;q!XNEja9hDwrserg6Cf~o^ z=ws1bc33pJcw0RwYIY_nUCn}FEfbE9$ak5Bl+(`89R7RiWx*o###@%3Ocz)1B@K`k zDx}OHz__|Q9qzE3&}#bhd4tP`1Xf7PMdEXh2qF_Y5K)-KBwPe4PXXU0nQE)W=UBrL zV5&((h1a~D-O$diJeMueFb9`O%>@0MN;%N!98NEE#AkrwQ8JQ*54NaXx2>oH=O|!3 z%#|OW_g9I^k9PYUsOolXYj^iXsbsd?xanD_7}Vb-v;uSelX~n;$9I4}ywEL+({bnfT4Wp&cj7t4qn!7$_n6wL3!pNR?OGhpk zG1QtP^ERpGI$yAk=HilY2Fe=LMk54rR9vsZ#y|-ZSgdr}V>f`{Pl4#C|6fk1Hg#x;1*;O_2n zPiDTETQ${xq3AC9oU`A()_Rt7kZkJ3fI?!KaPDWRhDfy=B3D61ojJhkSY`m|WqAzp z9c)R>IK;s6rhBdM#hZ33WsmkOy8n;58r4~L{AyD61Nr;n*I*jg{a{mtN^|=)hKQ7} z@>5K-@(sdVXcli51AZEW#=3x6P%gJlv2u=>iKImjEU9=dx*=b00;x==G#jt>t)Tz?oTx1&V3+vE7z@-s&ySk}^~k zR?2yp{Ytai^z^i(*QCu%i9BH3H3wpKlp=9rk?~|gnt)8fA zjPFvz)rml;jifRO@+{XkvDbSk3UEn^81$jdyZ^H_`#)Bkd}gF==KG=R^WhFw96g_) z1GhaiOdUGcF4Zrhxm%?AmmT7sfr=lY*{SQccK)_$04j5s8I>|EVV#u~kqiqGlW29< zcH0yI3%Nn9c(jpi`%9o2E}3R&U-F_$36n~SKUk;{dpHWaHA7dT6PIbDhz|xKB7@1m zwWF!jVL3W^Z{j%K>ZqN>Gm*I=`0`S{S;kXJEie1d>K&zXHcfU}UvJuN()TO2dgjb% z+ev`=ASPXFr8qS__|C+1vt{q7elsO)D*ceB*y&DrXV|3(_hgOUIX3fsJ2rY&hR9hu zfyVp^M~VW92jz#~Ubcmoua9L5TwMA^(Mpk+Ola-O>JF_lR+Nsn2xlJ#-b4xpcx`@8 z6EP3_RteHIy9p(F2Hdhf_Am?^2zSw_2X3m`zNYEY=wR<(!b#2x(@M_wY|T|D#=De-5@4pL zAJqI`FKkF2NSNpqn6pd-&VfgF&zEdt@ONO*quU1*9y{8w{URbd-P$(>W7sI=2!0|j z2eLR4O2mety(Uv0xkOgp7MaSSlN}^V8yc56>ulwuNj9)%Z91G4Uwr zQ7sJCU;~VAmNeSAx>U@Gl`X2A5<0=YV3=XHBOi+E=d~uPInOu7mNO!o7g=Rx6zEP{Cbv&uV_3GVC|_B2x*0Ww ziczfC814b#aq@i`M#3LnI2@is4VpV|2s6_RRiHIxz!9fbr2aVb!-(xTFHzWF-j`Mr zerS}Y=g+Mt`(}6VcS*Kr&dQ6`o9&~uE-Gy#;Ylcv8?z#gD3V3KjEWDd1r7PoZogBf zQi$I)2)-79!?Qadw@nw}ljEfCq5|r+XTdBTQSPzmmATZrObG^$I2a>=(Mt-C3M#e} z9kOZ$z3uRBDOnJIzi>qC z%nXXM9H^jQsk9$`RrSCY{c+$@Tk^8Z0AZJ1GXD6?ccc2@K(kcC!Ek=h7!dn6= z?V;$G8(Psw_bp{1sdr)mvY-7r)qlo-=_3tV!)4l!bkU!~?$(oq$iESzbS92WbW+vO zH*rVpf=f>IC>C%-=zU@m37dyjkrKNO|qFwP8;*kwdKh z0{FvN-O|A(`5et@w>X%MFHH`)dA$^(nt}glh1u?CBX@wT#>8;Io-jm>Sp)X)zuyQ~ z|M0bRDAo=*yVcq)ayBWcKIVO$mUr0bgk87z3!Kha0WrJ=$k!VFiva){f;vu?*>_-k zQ}6Qg6}afb`L`wd0+5kDn3@){4L{)$#G+b$@B!T5wO@8VECTttSHMCl4@_XQKzyIQ z_XtS!r75tsNPFELGo~BGz2ngw8$U;4|Z?*|<4RMX0V=bn1G zCd)tXgR%H*ksOA=Rt(p*0HTZ^WgJ8u7AS0QsL-yF%)9Ch$2hu*BN%3P3T67*EaM2c zA-)3VK7VLujW{zx7KOligEW-D{C_s%BtRcl0S3%>*7?p?$=|+F&}TaZz2<{g03&3o zmEk&^z~P_c#X8r_be^zQ^jjbjGnoe@=nlO?Im3I-3z4}@zhN}j58i}*F-gU*6xfDy zCC%nwIQ`q!G+U-YkE?Sbq=Xu#A8MVhGgECM)x@pV2sqC6U0Q7CSswtw`9>YSIalpB z-~~3E_Z-_k0(ds>kDd?b$`uZufk5d^i(C$>MULDHXXbF^5$Wh}ev-rIL|TQfIdVdQ zk4S#$ClQTMH=V$g+WH%qT!(+RW%|kG41VE_6O2DX^pw;)EWNt-2hh3zQ46-SzDP}0 zbRRf~$a-D~Pzwlu+?%Fv0HKY*L<4qKj#@yPC5nq?cR>1Nz&jGIL&)z7Xte*bHHWrD zd)U6BgeEJ0>TNV$$nC8F`LOu&+%&jgZ2E?lbYAE;ktnd*o!&#)LmM5=?~nkBllJ~u ziX?LSJ&yp->-SO%j~AZ#8kT8^CyQB!g0tS^aX1wGeLt`-EPsKobX$NArPx08qyyE& zl=;359GZj@rpvh0n=bLDBMU}5d~an)Vgk2XSg1pou6!}4?~e6thfPJbU5eAEce zFB&i!)Ztg+{r>IHPURushl-D7iIr*1x$LY`dI3+txGsG+oz*!^w z;I)$EQmg7)(TA_P_G3qtS*ZTKPru3|FtAQOk8PDND&}_WF+XdopLsU-k(9f%wv39p zyb@iXg)YBgA}aZEAm-12JM{huYH5Fk2SQbaP9V$018rBdC6^q|je}%`=G2wJ@7Q9r zO#fzmDy~TzafM)%Z=cf|)FUjZlZ^S5UVPn@N&7%c>k}U>?%01mmi4`TdlRN=M2Ag` zzEeNTvOlg&L*-|jKhyC|&o41Gd&B8|-@mm)<(F3G{Ua`d|2;zIA0w?VS_4tSVWD2t zUBt8e^9p|5+?M9rZXtBxJz{R_Z+?IFTSd~lF5(`5zKO}R)8G_`e`1`=l5z1KBUyV^ zWQe=S3wGIjTXAyU*gKxxGga32A`=LFxOH3sl4tjda5 z=8~WRbux0zsAs*v{hIiacjANRzgRrS44&@d+K24&%GMG3_}PmS5&AwiQv z0vhzWDOPX|(09pOYKnu%|8GN&L$AzCYt1_(Sw?{c#|lZ?}_T#yOAY{!mX#Y6ag4pW(*;RBuXnK|4l!|P``qQR&e?d zrD0I%20-abm^bn_i1yUqYJClJLX~uwwLtWWMYS;v8v-H=KOG}tHu=LPxX*zk&7qEO zvkpp_#JM04;LlNsIIkccL2$;X#HhQ}QDV}*px}p$I$22xT>`=V6j+lnDYmvIHR`_VnT=IcKqyatt&fUF( z96kkePk^M4L$>FAgggbS^8!$?U9$Z4_B#KFVu9!f_PgIm6kScwq2vh>i3(p#$`KgN_QEC2)CI4c_m+00Xh8Uqkbb zD=NGOJ~gGM0iz~gER1&@n8W8hgVJZTY?m8<<(6)?lRJE-SG=Fsz!84rQOb0gx=beaxvXcx=;*G}el=bnBJXC9)Mg9?nA|pfOt( z*aLe(1OA?Gq&sTUL+_ZXB2ylX%<6}^QK2FJzcp}(=LpBji*#X4-Ry|GtD{a4&Gv5-5z(gV8S@!F71h;A!@3~P89iwx_wq2Z> zCi}0%&koK{cAvj|DP`HK3r!T=^lR!BpVfMfzuCjCMEaP=sfYbrc#vn|mVkBk$k9cbDUkxh;2*@VXQHed{R!F?MUk@vu(6~Gkh5~Xa^z}S!;SfRy@MLqx)9V zWL#d-y7VX{a=j?MMHN-nZ37D#&&gO*I{X{9jmc9BwxP7_#hH}w13u|5wJ(C&&pLbW zQ}zs#(lZ;Lm)6#DqEhmoozAZM7GXR#X?=@F7~B&SP776N%x0+_!lP{e%KlV&oBZDt z{suh=DltJLS9KfqckX&VZ6?|Tgp#J0t5cPj6r&s9-#qd7$9XT?Hw`Bx$IcVRttkyhO}OQq`BA)TSj2Lbk-p z1(S`PGlV^B^U)ZYdWF8WD_|5uHYl?t9yguf?LSwyh$BQ;z13gJb@Qc|JTP_XMXtx9 zv2C8KgALOvhz%P%F9t}K+Q6(!#)PnB>1A+%g;iXiC>SyYTuN>a!w4K+kz<0?(C4`+ zmb%5sb9a33&Zt--kiNlN26jA@u& zXMcR=mtZS?53italZP?L)>j)6#JS*DOV{pTMnkXOUmc&Y;8V8)_A$XMvD5V*34grb ze?K)P5pxplK_H*+g9+KBI|Vp|%(Ry-g3{8y{o{z2DDmZ;sG0wk?Z8d6quzG@_hkzw z6TfhzggFR`L74oJX;tR2^y)pz=1=2!&sq6d&smjn8=*tk_E5-ZdbtE5ObrceVCSr7 zJ!C!fl~OgB0V8+WY3mgDi-Y|EB&(EDeyd++l$P|Y^B*EpU#4!2>6rQ+J0slHM1?;F zdElLdWHcFH`ay(cnlZV?6sd$OsMjlpV8sy*rs&3{$fqx-h9ny~ds+r!@B zmo=^JHK4lfQ1ZWI47LxT$cFgr2wd;uV1yL=y5dg??1^AruuczqDMTW7Xm6Z$q4u{e zuylr=mHYHJ*)Vaq#&ZdOUL{aK?WIgbPocft-%0Wyco_28q6L-*fVt(P-#${=%i5g! z%bZQ3kByjb%=c3Q`6%rCKj{VSXlvztadb(~bz&Y`QFXCe3dnfjPT#W%D>wL!I@Aa0 z-%oc#)LmZF61~>U)M!`XG_pqgi-CMoY`myOZg9%N-|cLYi>6Oil#Cd$-+tm)g~=qO zM*R;9z@R<)d7I_=d$NZX%M(S)M{;9x%a;TtC5EE^_5n%5L-AuGrolae>GdD3zmoF% z25@2V(<2p|%*hHPAg4mq#G5OGv#qLIB?K^h0LHB3;XcenKmiA03f$2{BelOr7~-H( zeJv%3C8g_317?^bJE|K?ONVe(=r7O;LMg&6-R2R01bpL(db~@GNxmA^U&;r#cPsDV zv56IEVr*MbSqebP!P7n1ZLfrJ7|!^HoA9481G5X;l!0lCb4}DL7wwTn^5A=tosfmGjSlo2 zdPD7}cbTrFfzeih&CC}$A-J=^K;lE;!^EQBB%bZ4*Mtf6Vvo`hX3!U+(U$!nM|;sF>KXMx0(dJ&F% zj;lam|5wvj=in$3-s~r1Vmk5-2npS6_ox>;Godflqq!V8R!k#5;m()ufsurO5p}Ld zOD?_YjZ(SK1K-z}!dGOv8E}Qs67n@`M}fyE;X~6tWC}sW2u%4uGSz-n#(G(+(5bt= z$Fb*Z|N8XVnqV>)b}Nwnk-pw}Kfb0gFrmq`{2>@YTXFVEivkIYGu0t#7NH2pRm>Si z2frADed%E#_h->oU$52DLvDeAMpr}zyj8%`&F=zi)LVaD5eoE?)KH;no+6fPv_cML zJ^ln{yjK^v=i#{hZmCT9=ihc}Ok22Ysi#n+jTW#U zHTC_B@z9L3w0sW)eaMzegquniqa76Hh1^1#nJZ0j68dPSOqp(Km)@J>19nik0yD+A z&+7ISx`B#F*3qx4uFReuKR!+H#xj>^lKs;NYl=1TBT{JTZWo;Gd7JjnyJtkp*G-Rd3Gtsb$Bwk^nX>`5gFW~yz10_cTeNm zHfh!_&X=&HZGuVVQiQdH&=S*1w( z?}xGRVSWr3&nPr|FBuNr_;#;zET~ZD@1q~lCmIT+m!Q=s@M%B#W+%|zbn54<)up&^ zZ=1O;l^k*(HbnQAwi2v0b|{qY<$12LCLL#JC{aT+zT)jT3;mJ)9z$mT0WpK~UNf@H zP-9w7#vqA#tIgZn@DKeez}@9B%{L+mM;j9V2OB>rl?6#&uPV{NI)@RO&Q! z1yGeOLrbP~M)yKEgI&r(LX!<}VSf=71Y3pNJ#y%Lo z2{TY0A7ZB-zSaF7sn|FJM0dOSphZzmuP35S5vNfBrq;ip*z9*J)72wPqz3n&anBz=RG1E8_@R&&3r`&Y ziz)_&IxP0}?sa&uXZ6=WHn;12H3&0rc`g_bqsrk1SBXs#LnxTT72y>LKrVQ_LSybDkcB zjyy_yZ;P2Ge!X7xZOU0cB9=lkfSr?tMJesC(M2V-5^B3s?`}B*9*j1#goCRXGa7oB z@w)shC=k(bU8s9}H0)7&5g%^G}1N_av=bXW@ z5};aGBAOaTN0}777xZb5vN|{)`UJ607kIdmWltG)Gv!wCRW=KRM__`-D$?f!yKH9Y z4E88ZgTmJ@t&PJU13H-q!;rAfyHOo0I|)v&W6a2v}C0UQO|N;<#;W^a0CRJDWd(hv=!z;prn$l;ng8>+lmAMqAnV zskGr9I7#Nv9KyyZ10nd^P+RaGN!N4B&J+)Kjcfyr>-AEezj!$Hx#BK3={|XG#&frR zJt&lEKmU1LzCp=FccgQ$T-}E`P-Gm%k)L5g(%#ckC$vPc)CD_vw)uGOmKP+#e%if_ zd>mX;NFR(yvZ|%^OLr&93MSiK5fLJihbPt1*Guk*;r9*0!!rDR0}$QTOk|A=etZRr z7PK=_1Kg1BXofH@cRs&IF_RwLrky{rxFNX8f9u%Gt67Xg#Alagg;`Z zbWa2E*r_D3Hm{PzF0U5zj=M(O3ZA=J^bN<-@mzb~6oGBpmN%Y%PWqr$kV1GcrTlr7mm@Sp~)8eMT%0H32zemq{&-(s&T$qCm z1l$?@m6|9)cjxJ!=V&$Ufk-g%3GtumR{vPx1oCursi?%oQdl#t^uGWdW2Yo^hwlBau*Y(9GJ+5UJVVuG9DUj*; z847XAH@(r9WP(Q^FW}w>h}O4*>mIO6L`Cj`uiwLcgEsPq*34YGCb1*5q0WG{zd{Fd zzf5>wLAim?MZa$T2Jz{U0#^}%U-LJL#~x$cQCc&3jrNZN6evB29?L=NeM3%DD?s&| zSEN_biKR(8*}8mt^cM-5@Hg!bD;oz$Z2UFCEbU3-*#a}F+^`$ihPq|}gb}5)%xAeh z9QgswkoU=fi+q@Eo55+T3`tU;2!aAkTX83-iI0|SlN&AQ`V}6VJrsR2mu;9B#zMp< z*)E8=7nk^9S5eQ;0Lk(a$Rd1>ud>_;`ym&>3*$T=cNMxNK=PYRdeuWB#tX@L7>J+1 z?GfA%8tTTP1`hy&{6TR`g1=R;IMhLba)ASIGHBnC_{Q#eX|AzB4I7D>(_p{+M?fHc z&Dq`?yRHzYSHWW&E)O1!c>kJxGT7ZL0z{F|jqSem*mn~}>-B3EE}jT&S~eS}Xr}=} zXlPoqEg;O(8~yGQ<80u(^W;DloywxG_o2xYlQdsBE7G0J6bh@F~60=KcFq1kirK{cWXNK+R@2elE3Gr7L(-?V3;46)fp9>#0wI zc@wfkk?w-W-aC^u6#`oMPHYg4*`&)et3!Rk`kS0?B>zMrcijMt5j!a+#T_NuJH|fK z0*FbeaLT19{HcrMbR!WbrA5|Vs%aF@QvSe>zIOvcWY4(tkxR;8z?{V6oQrc-C`O(% z%+2BX{vr;rb8MiIK(lIH0e6$ku(>tN32*GP!rY>q0Dh7eQP}KtxcEi}0^yq?3?5Eq zNRS(7OH2hmp^^TAwbCSJ=A8V5V*osaFrZ-_JZYJ44;g`yozLA`Xtt3ekMVussL$HZ zGC;dQwvc`c@Pp>!=172hkQ76NDqI`~3&#~~*W2UeAF$dl$yUjR3<=WI&SKcy=%U zRWbxA(33wciE9&w(WE(5HQeCotK93l4f*`~Kb*(8-B~FN=30?h7$1hmGq){@)9cyv1U)y*V-~5)_-~tGT)})jR)1pvJmW(~qF2D_x1V8y(Xk?S2{Ks4KtBB|2 zK+GwSXH>f5AS~Kgf})NIW{1M7>&$sFw(E^TOfXGP-zmfBUw~VN{I0<0PbctY7_hbs z^CO2o%oXv@lwm}= zEd<8BxZ>wae77{OK_e!Hdm>A{x**kvuW9!JQ1J}+A)@BKje zM6}4s`vU_!vxw9V9f%_doTN5l_=YRq<~$+oapPwB;hrze#0wTm-}pO?=JHC^ z{!n(h<2_R6RQwX~p1bUgVrp;6!rSvH5{c9q+;?U1X|<=z?fq>`SuwiNL(Pwd?&go{ zZ~hB&+tOSG{wtA6v2G@W|8d@7b=Pr4)cuR;bUaqg4bOL461@0(atgZ-e76=x2<7^2 zOhT$CuD5wMGFkZ$h)FoHf8l@^x2T_Y(l&5+=k6&qu^zCu=;44S_oaZnoeeECU_NUL z+JD6K_H`t$zB%3zcI=v>vv||rPI&ZesiFK&@k3rzgNUd( zX1$&!y}ZZU%kjifw8ba{Q#HrzV>-5}asBSC8d%(SqzgN;wb&%N5_@`SPJubzrd%S` z0O^O~j0QoGfq^i1#%Zh zon%)ok_IqJrxi;vou#?aUb8DA?5TDr)+fhdf6FW>kdAYs#coVSr(y&YN}i>X1J~1) zMRnZ|xHg$#E-xPoG25`$GD{*KodFhOW=XN(bgR7vl}XznyW-48K4Uk1jNcDVua(lJ zB~Psms%(EwK6n^srg{ZBY886S|6TE;?&8<__Y>&K7A8Nhc*c{c{hL$gYA@YptyD^+ z4cus{*p*Zw7lcm?Ka*5?4qh0%ZLK^`E0<-7djpz3@Nr>k)dzW>3WO1}N;b0PX!p-` z_fiNU+$=?cRecX`v>{xhnYueH^L{4{^O+@OA$|Y#K31JY{&>=!H``8***swK?Rp)5i)QZ}*~cMN)kx2CdN?OHBN6JD&`^Wp8zUN9HG}u6SYJDaH)DmV zLb@6Zn5Jy3w!{y;jbRZC%>`OUF@?ChV^L0`18-Tj5>6zpMgF;xygneC6?dNY%$E`{ znSkURwBa97YJ!zCA)|S9y^)k-IXyN)l< zi#Tx2+N^&EaxN3SO%fP0ZyrSG=v4pBU4Ud`3h6^)5uBIF2+ZdY^vp4MY^Fo9$l)0< zYGANX39#x@NH;V2olPSG%UdoAe2$G9el;i47ef_8SrIRI$v#b{{uwWPTo&1pSk4&p z-+^;jb<;O{TOM&Ug^w44&mC0qo>&op&pUMt?)R2@%U#XP`h5%KD`nC4t84j}I@{8YuaJ|PrS$j{g4^AJtfy}6qWR{wL7^nBY+v#w(Ur?I zm(fYpgGT?Y%9u|qy^x-u$=}fvKUIx9lTP_q=!Ck}AzhuF3Vu4a#F`=>iBu1txG^FQWvo(PJL(352qqun5SxQDm^uj*s)Vb{gI5#O}cZ+|8o1)F7SEhNp z4XvgLYq@>b7UNx3xh&*r^FCL04G_U#d)N$Z=+O}vFS>Lzf{9sq@8x@WxG;w1ZM`sf ztwD&T7?HsBpsU#z|Fu18(J&|jJe~`sSMdGNCV)&xt8gBqC?7P1K2~Ulqxi-EFt3G; z$IJqQ|r!0a8qxqNZ`Ndr^B2DVt{5JmqZrAf5D=5+DRoIp(z?0r+)209|pB9mGQC@ zU%*xj)hcVS8?%n*2&JJ59kUy%XOb)-DCz}_r_(>qDD$5{=#ijByS*OE9X21T7}hd$1+!Y2#Eg1VLfiPmUxbrOP;f zQ0c2hag!9H-<*7v-W5D&Ef^0zn5yjlDxmxY40P15`~&^qb6U8-r1SUmK@$@GS`r?c z2rvQO0&n~)*+pOt27Qin5HCiUJ^S)ypF`H*0euXH-619x+LFMd4f|ffiyo48Z2~xLKH5pk}T1r!P0DiXV!ZVgG$BHj&+X?XCd=Z)zs`G94D;nc*Y0ep13fkal%u%6lz zOt7(V7Mf%p?A2dk2{D*36L$HaiwVVZ(wG>+E6<>rBI8_Kqy;<^>VxiH1kRx6Ckx5)&Ql2p68JW4HhK_LNo1@TiH(^ot zHfqN??3F3|))H0>y)pC7a4g?yX-!9?%}m%`a}ddb3dAY4aV)E=R>~$&3vqoV&LCW$|a!LP4FV2a%7Th1QDQrfZH6&Jcx!yKmy$u~HExD~}PG zC)mY><{#Dg+bf*eNb?@6xbwNY|6nw|C_qm1iYk$XO7?p*kZXl6X=*6_D+jKCg_T_! z#YymXvB4POH<__g9lo`Vh!f3^zhSGF<46^5x^$&(#PsBO&b(t1?vGYZR~vD_?7XL34&2vzXZw0Jj`8~W9$SCt3UH0^IphUdiDC%OpS0I1jZVJo{mA^ zd!0kyVBW*O^T-vS2mZ}lz!d?1&b(CecbGZgohCFUo;a2p-0XRKtPyHV%=ic=@jjyE zS5Ii19XG{Q;@&s_xyj{f_jOQ2v3UV%|Dxw{BaAof7C~dk4X(ughhj$04f?llIpZ#B zSqST(l6=oIY(9w`Y7i?B^sE1E@kYDqN@Ncf50+1&b6#F~oxL?$w7CM|QyI2VeZFER zFdKY^U1}eyM_sj=GJ&mohSKjzzil>TkLy#<+OwQ4rUq4kP9o41&I?23-KHRfpps)y z4F~J_*AYTc=(@V0;kCm)iuCR@$Kq{oNTR5(Sk)9fR5BOx06NG@##p21?OX(8$luzJd_%Y=V_`2%}yG(IJigYd(4(yzkOb5dVo^mXH@9Qu8f zU$6mR0(?D#dIk9_Hl@dQ`;<2m4z#$#Oj%I_YDpkI;$PX(5cmk6qHXHi-6BDSz4Vp| zz>pzrEjX&&#vQ$Uz%_Q@8RQxZKzP&dhNfe6vM2E+Ji!(*-t8VKS<~XKf5e9;#56x# z1PdKIeuZ^EUEk%n$OpvIjJTe9k`aYAX#{?*-Hxv?ZxrcMd4HckR!TF>cEc*=E%7<^ zS?iTKEBrli!}JYB*U`WB$RoqKkr2wxyRNI0@J@UAio0HjJ1fq`4kC))tq64>RXaw9 zJfhu0qV6vLqE$i+rfg?uyDEzkQ%2DQ7TmLVyxN%pJiZ0iUV*&>Na#tE(8VP0ng5XE z`aV*g5PzYBd*cwR&R8iCne%&*`Z@KKt<9$1Fl6+)jUbP)q+&OnTubg3H1;S=D$8J4 z`>i~0HrJQY*cp^fqWb@@=}#~qn_>J_ucl+}cTA!c8jotCAR$j})Qp{EgL%Jw2}{q8 zK39OcVcUmgxVawMzsmg{$2{8lWJN&`x%)`Qd$s2gF4Hx$9I*VxIRK2+9}v3-1gL&M zP_uh*JvsnbveXp`qnO5i=F!mSBY&*z`hI}gTfIQmra)JkAc1!phzf921sxHQC8Zb_ zsr5|}lAfCj-asI?H?FOW3X(y?8qLx11gc^zO@tb_M59;9Nduga?`QUKjEPqR z5$`V^U-xGsZyWaOMRqkE0_!{+JuA|IzA5_E1;8k|=Hwc|4}$*6R9Xd)LY;y~OY4Ac z7@YL^9AtEl0H`u7#gEhvCb=8TF> zq=Te_J)!~!gsYze7arpg-X?uTB-FY|CfEz%WulW za=&{>y*6mU%+C&Y-x>f2%UbE(ff}Paslvk|xH{s>cyx}rsdJyzGX z2wut=9WPu%Z7(RKGL1oat|uYwb9LXchPT9riun6|i!P?rm*2!5eK(V<&OEmH@1J)5 z8YushnITM4R;xw)*NseRr1c|E*2EP}A3E1k0_Nanlpb1M{sm*RKotjuGv5ZPqZ zapBJKgMuvz_0Fr=e&PA}VlrPQ*>-;$8+PMfg7XW@$E#t2;h0z*T>X~OInZiL`$bWi z)1T|h2%IeLqD+-8MWlwK)HWhENmRSMJK!YX^57fm!phL4N$>&Yzh5=wP`R|iRV}Vk zjXON!)}a74SKtQV#(RD7gA(c`f)gN)k6?XX8Grk!Khe!{52vpWV2nsyv%)T5x{t!d z$GDRY3+&N`{Iu-p$1d)-lT<##s$|Pm=7^J0QFXhJl~GbS*->TZK+1pVvGH@OLDyn@ zikMbAotg`PK0mxh3&^qNQr(O7W`r&=o>}++y4Jh#B;<~D z_UPCPq{F07m-ve;uCuV>2Y9==HDv?xIa=0Jc(UO~+WlU<9LS;xS$a)Qo)H$}=?#S@ zqFGztBT{V_6S%=heSdFksb$_{fD3<0<&=PzjsKtTX2O2ra;@mMntcG3qqxRr#)Tt-)5~(^~F#x{}9Wh_lY<~qbVG={Bn&1ZP z<@w&~6x9`ZvyVm*hMN_S3%&amY86llboyCuM86S=|KqHFA=izaFeX6IsKQ!7$d5yu z>`{u?LZq7x#@Ka4_9ElrAC~Dy3a8_>Tfj`f7oEO%ZNaCp;X&R- zFjNwbe59>%4TI6*A3o5OHiLCs0sa;9PWrau77uI#v2D|6PMYY@Hr3X}@aw)h%NVstaF0j7N(P0Q*R0(|_>sUOCY zqdC%=K#M?_h|>F~j?bf&wi7|Dislwada)<88+;6w1MEw&-g#Bl^;pk8JW(Cc86d?5 zR3Fskr%te=N2j;{T1qRm&%B|wBpUhsn~s!D+9;p}Xj&L!V-G+7qWYUzX^V?_>1w)X zVg0t-jvuXSpu_3;uxmPCy4w#e0+jjWd+&nx1+RQCBYgHCJh5aDf82YOe`|!`!BZ|XQ+L2)+?H2=BFb)Q8 z_1+h=4I%i%#0QLY9&RD3&w7z)J5as74-0MgSLYU?Zv`R`$=(wu$u6S+6z%+>{?}Pq zeZxyl{jcTWdKYn{5h9Kdw|H_H@7zr3(5g;F8S=5B&8p7t)2?C(I$26WM?N1)Keb8f zUN~vA`ylOznfu*~1~%mAPD~VBOU%D>`$JedYy6vaX?)2M3nnw1wq-~DqwQmP>iVJ2 z0EI!Li7&r)=o@2Bn+7%v6qDM2PTdi{XxhK}n;D_bs_U~nOnFd;gr70R78yQ?2vu<$ z_g*0mWeK0uO%GsZ64>9*{%NfBks-7kNPg3V$_7 zH3Ms^L~Q>spXPr5q&?+wOg)4z^rz6`N7?3;5sdsRJr+4*K*lePAIgD*B*}GB6XaT!!!z=t&F9urws%R zL?cM^y88^9Du-`{zS6a(mBEaxR>%)nmEMgW`YRv2+k zUFhCq>Vn_kT%9{?j4Vac1(>6$FH!Olc@LSMk_q;2Iuxh><=N0G9GLUxx?{e;tOxN% z2ugY*ED%?CSQ=}Y2^MZS-xliVmwO_7*OB&YRDjJ%%}spc*qsbYy4d}LeJgfYsgmo?Hj<@P0ONr z6vNWv;{y$1fm&pu@omJv7sZRVKKn=GCuEI*dN!L}fx3D@sh7aCAe{os zUZ4jdgy4W--c1;BuK-EFU7L0UJy!7rfRY(EbPC}7-U)YydJ;K0V{K7auzBFs%lU}q zpUt7{f}MIPD-Icq6%oFz@~c2;^a4P){l;YnmkRvrws?`-^ev}C0fux|4`cryNXEhg zfs#e(zE*)9*Fd_44}NN4U_$7u9nqa0-P#2tc!eG=7&%yOM4H({NC1}V-D1FZ;OO+i zyB-9bPFB*~nlCl`ZMX<}|2nE!x4fmP;1RH-nju+^O?%lc=o{g#;ph6~Z!!q!+2-E? zJ#R^ZjveX{j_%xvzwup`;B1s>w+F2nI_Sbm(e-wcvC;SE{1 zMoRlaA7o#p;a7Ragsvm$n`~4Tall*_u5(X!Ut;e>`)*NV@^1>`*M4-6cPbr7ITmgg zWeF)$h4_)YaKUuKUwyeFBeq)mk(@3axQ#2BN6fZ<)PX{vx*EcPyVO#ok71LBS`xF- zqz1C!Ob8NSQ=cM9Si953#OF0z7^f?kqyD#c1YUvK(;&x_IosEtg6A0Fm3obK!v^Hn zg>C)O1USq_50pQ1b8PWwEGb|vKOCAX65TNCJ%i3Qd3mK8<2D&PM^;JL58s9RzJcz- zd=1%7puN!+<}ZrGruQBI!4^M}`0z!bWf#kdA8F$1XwZ99L9Rtc3uf17dTLyuxa3cb znVc4h+lC^H2UHm4)-HZGjwML)tG3w7wFZAC@*6;pG1^!?Q38&tEfOY6cn>fPJt|O1 z>?+lf7?UuHB)!a)2?db^vSB33rx4*^1YDi%J`oEdzjzZpoe%y>k9m2CwcB!ms;sXv z3F23kV82!qQ+RY+_A!;ml7AVZgsnzg;Ji^+RTT%!`(#k0pE9Vt5KeaG`-p9t?19x+ z;3yMH8b{Z~^zKyC#|GYb4g}*8GND_x#3H|Q)=I;U%=ShBGXt$%Ux1n-Ov?s+uFG*A zh6CrqppAc##OT){#(A|*Qwb=I;YT@Ex4B1CeOMI6KXzN4*I~tUwoq&Lv~oOsjXL76 z=zgZ%z7K0*@JYV{wHx`~q_F1|J`Dcrv^cqDKEyCKD55Z$!|O&d0Ii_%suu+)5gmUA z#9xF56W;fE;9Yru^AWpR_N=S929zezTT{pe5Zwd?YZyoDl6`#IFN(i59IXP*;9psj zKDQu{5bXbO5=Td%HIp#4N5%6^3+e|H&{No5^I02VSk)u&Cuqrpt>|8o@)&IFJv#&Zshn<^?{Ee1AltU|pZ#-o!~ z%bA7AJ7azd(fTxj^{!?oz$+DPJ#^Mkw()U;I@^zn;sY!_%2PAJ|^>tf!bxE$<&rcL^`=C_kv-?KPr(yZ>4wsZ8%Wh1f>?LyAPu|D6Ae%TU#Xrws8EChn`KZ#EW5#mxzA2G zJL3+zRJy)xAj7r;{)=$lE{JLxH$^Ib)Bi|(xu9FOtDiAG_jrTA^H4v&z_6RWE3F%Hjx?B3a`JZ*pDkE?UZYEO=RE?DRT1mVP%|SLThp~(vq@kEcv`veT8q+-f#U`{) zMJ4`6C}qLzh(VX#MmK%A*cN`v-6|#ppTG zH4l0n$Ou`x|A0bcdcK$b=_FnoUqh>CMaw|x<-`8(LC<P$y^5c~|9g z@xE=X(JW8mb@DqQ5{mEY^PeaUy)`JiF{S|Gl({#;=^I8=NFH4>IOm$HeR1?b5lDe=)XPg68Ee-7nN)vPiy)+Nk&rS`Y##% zGma&Cmkh+CFb=LlBu$%>l}QSnjr6nJrNM|C8vK(*glF`1;aa~#x^Bfcvh0T#A$E<9 z@XGOXe2Ii1TC&#jx85Gz9$9jPS5NVN{o0CXUF#!i=~6T`EZ=Yg@zYW8o1{-(ZujiG z0(dp{-J~A`i@Lr85~^Te4_Pz-=ELwF&Mr%C1xmRN7ET+bt^?ik ziskZ`%oKxABwh@VHh3^P)5nUl7NE zoBf?3R_%*o63^6r7NNQ~YEdirC;IHzF&qvVP*C z9vQBe3iS;NXFALmm^y+mu24+s3$-!>M{`3G#Sy~mpcnLa**{StdB#{;2V%E}4&v4C zq`U04e3fK+ly_WdSnE`hu8%4;1Ugh7z1t~qgPz0^ZWzaDv8;Y`OPJV~`Qdy?UXS7I zMvmjmmp-&a_i}eQ^s2v@LEi})IoNo7`)Q}1O>bz=sFb1D?3(N7x|>T*LC| zjVm=;tz+Tmodq?iCvbfzq9(4RD^x@Cc%|S2M`c%!!E?foZ=<+X9bTLN zHR{0`qA{mjo5q(^bz81{sV1TK->Vk_{Tf0Le)b_-@9CWuR;hpLs~2;B>4k(QRk-`O ze;_z?I!-^w#(p)pS~mExf?l@C(tx;`y3&N0rX_9$2~FR8>Squ9hbAn{{MW>sjaJYu zx!=xdB~{#$7s%U@SN(+yFDR|B2W=2L{MtUllkoHLijDQ@q(@CC!E@F&V9c7!nmUQ; zLVk>@fBT}v@8$vZ5vT*3gXT_{PCSieCuD4&b;qQItXt z$q@H2qsyD~#cEo?$iw*RKaPovQmU4aDgqV4YG=D1%jKIitAg(oJha z(4DwbVf;VKc`js$ICP1Z;)ayVUd#btu!p)lBTp~Au7F)IBAVIqV1+2HunF>qn7=$)jGyW>Uu+Z|2DM?>h+I3fNQ-*L z&WcJ$F-geY3y3aWnMgb}W-9jq6SZ%8221Y^s>_A$8VVJ+vpOmjNkX1DAth;Rx(T6l zuTz<{y!F44asL#1(H_WAD@Pi`QS67QKM4|-rccw)pYEE}BnRHfUltIA1$-ddn#jH1 zyb-p@?--@s#NkP$#>Gf};c-c|N>e%+wb4M$a{KW85#dtc}am)%s)0 zC$M%5lz*0$EkZ-8&PCtnb(8pQDDOIj$(IE75yRTY)6?3GK8l@h?&gWh|7p&+WsPYy zyw41!)*?qzNWtOf{T?>74W*b@?!v|w{xx3^LVIpldvnh=lgxDX?9Ne;o@HpHd|B_E z`lKjwxaLsG#ze}%n7#+l$t%uXmG>l@RpU2+7Gu37^6f4&-RvXG(s0b58Ub{ji^caLezVGt{+ zz3h19mC$Nd5Z-%QeF9B4i9YlL^r^?oTL(1QKk`O0iq9;c@&$oj?hku)>oy&Vu3KZ} z{aE^)q|3kkrBBiYsV$h$hr$7$}mrc@w=T4-TatG>=u$ zjlBJS58$@_PU*p35V-U1_tfi-{cvW)5F~S|AK;*ltAA5Oe&I#6W<;W(3!0<{w#i#p zN!MFnV8EZ%$im9923`1PlO^X5T&?nAd^+eoFyfsdw%TdAE0tw?;!GTD0q_z1(XF$vkyph$-$2&|Yzd8*ULvQ!MPGO?!0&r+u0AvJ)dOpM))Me8C7Nf{jz{303pb9 zzaztKjNr%mE|pt;g~Ffea~+;AQjSf`bv>3Wp2+RAzYw$VHO}#ioH(}FUnzc7OBwkl z14Wr87|V7|18eT{H!~8SpkdTBFQ(e{1LC745V`B=xzzSgsqpN%dS*;FX?K<`*8EvI zCO#edF^&CD3Evgf*dq63Jw$#?nEJ_jMZFd#LRrswX--2dekBacH==q3^GM>^M?o!? z1>1@HFS=X)EO^G=HqJM+3TUG+%YJCSms_b$?cYb%ti2^~rXi*V|ET$>^WRwDnj>iY zP>$G^nn*x=;SWUCvs04r`mm@!qV1RP}|4xeJ=^1HSlarFzEBX!F2zUNVr< z7QCCls3d{Ci_qVm$12baOY7a*wtHfg zPyEhELW-m6yI6(4HSO4T_jIT@dL=~DUkN(A?IyhwPux$(^|S!cu6ndj>ZU`vQ}hwL zU@>UIxte#IwZAXQslU6u+Uy`wtBNeBn1$?%VbiJ8Sn^{r6b>d3$K(c5dm+j0ixKQ` zitUb7xkIV}=DU9Ni_u01si~8GobQ73DPx_USxoj8L+U$xye%z|#!wY+LNVI|& z4XWfl&RW(46GK@k^l@1@^xs1~;UPa+Yssj{IMEh2mBe;hP-}bR&eE%4r6?r&u}}C2vP@bVi@=rHcwaG8_O~Z2kVgaE`~9Y4 zi_%vLcf`Szf&>!CS&yTGNHObC}GQ32XS=<0$G z?zop9o~Ou^y0EQi;!VAS|FxY1UTqN%5t)-{Yz1wya-ERLb4IG~^AcLLUuR1Fr5UAs z)F6+NZYtdT#dolLJppn}vn%mTrQy4qaFmTohN{RU{=(n~qsO|31Be15*45lP3w%LD z)-Gd(TWBUb>+H1<@ql;Krv+cFo;Z`o#Z@108?jAT{wHOt;fAqi)p4k}TI?0|v9tG( zw1io|p5*J@#bC&_^X(1eqS-PKl>c5oRvdZuM}evwZ%Hntd>uAnWZtbg?G>-#^meJJ z@;AMH@wryn7b#GLezYPnzHS8dIGs_Sw5G_vo@cC_(+PT0fgzjs)CHF9n)-1;O0*TM zD{ZYs6{0BQFlaXX_~-$XYZg`VsE46CS|>AlT%Zk)Mm!n7wPiqK#|i2Jn$jL{7jA&J ze(lSNLtyfkaH!=TFOHfWSE{;!$_Bb&(T#%&=c1HTw=|3@&qgUgXLY6w;WpNRR1^qu z6)pVqg}7{UZ#n}!ge`_-W4cYDS&Jkt;2guMI*Vm+$kzkLmU1gp{x*UFtPf)v3c~;b zwocwS`(1^y9^fc&8TD%2_~}MdA^Xp1JeQ)V6X!`8%!Uzf7zXqoac`6uR=*VM^chiP zuKP_-lC?gVspdfo_E3fuhQcK~g2+PuzXnyGISU6nzA73x*OjoFNJC79@hx#O1nnt2 zq5B`Zhrk|rQ`5;E^ZPPjm<<7d&}Mc7=Q8J`nJDt5*YI)BJXOduKZDCWy9jEDREJc6 zrB5Qr9k4!C0i=oc)r#`}T`gKDSZ2?-gaW`+k|O6K(pSTs!ube2wUR+GRA9x6&2nw` z=8;Cdxgah{kL8_8PtPNgAPi#?uP@iq+o5Pn%XCJwRSb`zaG~kTcQ1!mwc~bfz_pmMFz3zh^RBg+KNT|gk^}`$DL1Ij0?E-JQ^#m!B5K_TC z#8QwC)e!sL(?ge%h4@sk*8(Y z6?l2**f-mZ^C_#IZ;pl{r}pH{_o(-kfonS-Agw8!NU_z!h@AD%p85YKVsoPAiewjp zM$sYKyXIB6MzCl{dea^l_a=@1&SzB~&pKp>)M zx_Zym8^jfUK~T_-X4a@zmL5Qhj0VDk=zV$42{0)`?!=3xJm~pqLP&R&Q=d#xGEee%|8(XK$v}g$Y!B|MItaIO*N1xEqE6 zSGD`_U{Z>*i2uM!Ci4u*gZ;^%uUM2TkBX{y4X@8${TdNKC) z2Te{JU-tB87kSfuo%@mVbHr-c}5o~2Dx+Jq!o%gRC6T3tGerz<= zvh23?_0I1Ye{UIAp&KYWw=OSHLolR*|! z*Me=A-y#Z?C`>NJCe{)`C#p5tN%e`vwOeGgPE5r{wco)j&(tV>0pP|@w;Dv0k z8?gz<`tx%ZqQ&-p9y!w#HHm+p{Gt8EZ^PtKUVo>=u%eSG<)hQifB82c>Hh*%oHUVv zX1ZE|b%mX{^fs^ueD5C8 z!rP)=R9FJK{l=ga!!(xM?(DlGkM%jYaK?AbRexaBnGr(F^NjtQC*I|Gbi8!meRKsi zZvsV#xIGd3n%pmDsbR>M{$1p6-iChgpmShrs`JEK;+d(Ye%Ho6w9zfGLB7+|7~Xgb zb+dGv5yhjEr0q3#m;CF6fafy1^*)eNc+)hQs}m*d6Po^kuj+@U6{F~9jn`C(B{#pw zs@*c(to=*G$o%9`q~EuF-GelGx0t@P0zwP(uVU3@5tsR3`_yVS0~98EH?N3cQc}IJ z_xo{N5Bh&YBA$YGEu?KUbU_vk1u2Xt@z+=cUa{|;peg2FN;dVE6HxOY^CXpLu%Qyu z-rLJcDkS!KaB6k_+P8NU`aj#Q2Di%1yR;GntOL7nl3mxEP^}j==l^_c6S5XVKcF9o z+*Z*1gHism@}McUEi4(dba_*wGtE@csqoXj3^DhYee`F*elUF+DXyDT-(^VU z#7&3unUQzf!1@Y1skDB~{%7M>ix4h+iEw*!N~o14oX976HsRR;fT5QXguTgr?AJ&% z-0RJMk~1<@3PoF*FO7Ivw+zMmOlS}(lc>ppA8x@%*b>X2VAHP+hWbRFD$2K$vU zI~ZZw4+u&!)WfmyP+seRlk6b|4w*D031w{M@87_U;5Y+>ME+BUz#oF9bvgnT9S`om zr`ueR%>MSE9p62{V0`UjOVb`vn&jakwN3z-G&{r5~*%K^d@>RlBM#NmL;~V57MS2vJw)Y_# zvbztD<*3No|LKi~bRTVf>(3V4f9+?NNK+JG?6sLYgYO4meWe38?OCRUovG86Z$`mI zzJwaV{hiSUmUxrF2>gnM?1^Zof?K1%B)W!LgD}#fzs@(f=K;cq6jQf)*^_Pjlxzni zI%Mc4bs-N}41Z*h!ZH{J|!72|Mi zQO~@?TyN&Rvqlm(x!j2<1x}l>#r3+Ye-9pzl*Fm;86?uc@>Tbn94f+J$blZRzaWRr zax)h(CreI<9+^syN&Gj0My^xhtE!2EXe;&E+BE%lQ+T$!sUN4+;xRWPtt-0_uJ9Tk za+wC2%&T6A2U|XTmJp6hsX@n)Arpc|=p6J+Xmf~(a8DQS89IHxzcP1oMNPVo9d;p0 zZ)OCf@oRd9cK?)|RhgLOLb{K(OH^@{_$xOk_v_m#)&$oTmZr-3TQwND9?rOQj$G+6 z{uUujYM;iFdR(|r%4&{3^&y#3Ej2WZ6sJBUQc;iV>zv7@(XJ@i>Lj?{J(=+x6(F91 zM=bcKjg(TThDVx+d`_2{K9>SVQ@Q~j7J8cFuPvPfpSEy9=Ts5Y zWz@rvWjArr6RpPgXuHiT=vcddE8WE+RgYai}>Q=mPL>rX_~U3&WX~!p@~%Hi)G=`t_zM`-Vs1!i>s> zYGS+mQT0@7FPtxzLH_Fdca=0O81yrb|E~GqNL37>Vbr{O zM~6={!ep;3q2_cp_&Tk=07!HK`V${S@B;UkVa01j&ool6N8;+>Q6bYv3;pFpnD$tT zY1J@=0{KpVI9%U{3e=>poPay=sE(e=DjCR~>ww4R^$|5rydV2cay7O-7eQ`#xo_tu z;$VC}NOw9XLHJfbCtHp|bKz0teaHICL#JM|*U$cxwc|y+$N^_XLWkfpPtEbQ>1>fg zGl(fd)v7K4>J32HsPY={mV0mp?$SFFsb+i+dWsmZU-Cwr=hs1zees!o%at>S*Qs^C zyw>|i%Dm7g3?pO3TggO0usq6B#I#Gj>?^Y9518DF%b8bZA1y`-7jQlN7uD71xZDvw z)6mw3K?aA4BP4xA|15nXI^bL;it_f*oo{je3S)c;DMw~y(3Ix z3tuDbg8WjTRp_w9=HU36eX&o`q|~G)QjROsyqp`6AIVeKOTfqdE}!lp{3(M9-^C1>1J04&*UAO-=!+1_<9MSZ za_4iuQ%0`B?+#WtW63V<1S4e9B9fzC=cACsbOmE(xFm3B$6Xa|%-0#a3q>oUs5`f^ z@#Vjsrx4Q6jo?nH<^3{2|8UK=bZ{ggQGAw^zQ(bL~~$fw);Z9ljAwl*9B<8K(ha@t~Cev!1MTMtDB0~tXJ-Q zyUnYH@0R9)sGN+apq_C~qS-ah@W?~&y>G$Be^gtGXRwMC11lJ{Vf{N@5nu8johOe7#HB#IU=K+HR*Byotzax3r#gvqvT#@BIix;@F8BD_>f}$G z<0Dr?a-L5Mw7|ptEEb3c-o3gy6ny4hX|+?DU4lceIh5fN1d(Wm?*#@qpRBM)W-Ui=*Tr6vO%2hlx!OS z;C`uCgN3K@xrgf`-+XVhgHI&;*=TdmTTq6cusb(iAN93~M7-VOqKIi_zXq2XJww-a zI|{Hz@6(6mr$v3uAEb2PhCKGb;&CqAx%~-jXB6$6(vzVyy+ZiWZXero{je6C8!5#_ z){-qZ(j1!)I|MmL_(lr%7JrIounFZt^(Y>MC0P;Subp!_j9~8l;p;)?-7VLpAAnrp zp(IB!qm?%ITN#XBaah`wGj97;oaEsXHBXGJLT8e#LIRUAalMIdxo&nzpIQ|qKVCC? zf{BY~(LjrG#zvxwdO+04JStJ{{P&eWhuPWS;#r(xnfV`!ZI=+2^`SFU>bDrM<2?#z zv|X~HB)4>6BVY7WNk%7=AH6hT@rlM@wg0Efb>?@-lgNEgMhy*eY^AJx+l?k6V6Ejq zrt40whqofh>!Q3nk#_ZSW92;A@2=aI#I3_y=V;UhYwc6OzXPKjr5LBrpY9AAdZ`7{ zha?kaZ6J#-1}n7R$M&v8u|gm468JAn8NFNgl*s=Kee=B2yFKRTU3@C7TEc7Yc$+ru zqkR><-a5|h&>V}b z?-dDhtx_UJ?b~Pb{xWhoG)G8TUe)#s%xM9(pM{kF^``vD325>j=r+w`T;1gpj$$yq z_P@wIer~U83zyci?K;F1QUO- zRZ`tW{ryz^6U9l%N|BXs8iT}`+vXMDaB&u&7x?VfeSc4w7v{PJW|K9h=|a8I_mxe? z_<~gBo>-Oh;^8Dc0ZDG`KVKc|cfgq{qZl~3S?<7w(dPL@%O}$63((m;3WR~30?#E& zdmt~&3IFg;lRBHS#8kEy*RV(AB~H1(SmaPqLlh70d6&KXmj(8-HO?zx-RBtWdG<3^ zf4+40dC*1^MAm~#K@RY6+JxfYQ%~ywg0aeLDD(P$lq&!x8z z86B1xI_lAbLL29=%hW%S@Xyw+2cf954~rXFo8f$BOP1&9CN7t{DzbQ~qO!^qo0Qb; zTD$!d*bN#?LvE4uP;~YF`KBJLBole_sWnWJXNMWD9#!EIQU>%3mVq|-z!G&`v72XL zUl?UDzW0jw*~gv@_olxvQG%$i!qa%@AGj(!kF6hau+I)ljVx5Y!H053!R$?&V}M9q z#Df{3k1M;FYua0iA0fzqGPB~e^*EF&`+V=8A^+ZE`i6S9^n9!=e)R!sz~BpGIjr~?<9xzLPA z$Sbh~3KR$zGO5mep~If-MfOPyZk6qn^o+=c_{_xVlNsk}5$%eP{!Ywwu0|+RDa=-P zSMi<0nP>f;v4IUPo~B@jvIt?47h60&a!C-ncs3*P3THLCF6FG>r{rbKFG#+%O0x2% zAaeTI_r7cI{VeLDrk&<1-I<2hk4Zgf&CMMdj^9fkNQ7WP<3F7JQ(*awC~xu8&v?`< z>3t{B(A#+9fHijM$niPgH+0#RjkS12eK*BXYs8t^Dd)xJ2hm*o`j1!r5thHs+1~AU z?|X;S)7u@ksjj1)eEV3);CZ%3UWQ zK`@)Yd75+ADp|kdr2$!ZTbs4*;1lM}3MpwK9ol)VJ_tN{f(R>XC`=w6r5Vt#UrArPj zs*iz4pHvROUg*$qGZ@UziZhvK3*Lc&^2NL3fBNW->6Y*ko#4ycXEI&M z(6eW;9ottfpqbw(;shXm-p^+Uqhx)<*!tLh-+HVKESv`%fagmfmqRH}j)ASfJDTE#Sxwd+-%k!=3LnueG5!_tyL=Y3A4>zc+1T38K)|uJ@lRvT>D~`RIUAfkPDAC)Of#cA*j#XY0SltK9>tQAB(^mhTlc)qW!>v3 z?-u1^z4#yBq@(LPS1@|&H*SyS#mv6#YY*>|@KCojt4kdV8G#GoCUfU7(M%zY=I%l! zkU#7Of|(UV4<{Ks{r;d6*k>`(;uxMR7)RArgE%g0a%IHJySW!YlK2te>Tcr8gI0T8 z$z>?$K+0t#doid?$k^_xw%wkIfpq!Z^EKh_7 zs;8~<$D`x?rQA0mbQMpaqZ}xqsN1xL9g=s7BoL2Qal`%(c8tH6YpRcycSIn-CjC+| zssq066#OfcRu1G6u@`m!;eS5BgA@*K74h7mpf(h{E6mK?lFdvsaqLofkrOxFePOh9 z8R6;HSSvGn+F^grRG_FeLS`TW1!A8nYy$;O(?6?{i$iC4-ToB)X>(#Z^gLU$tC{l4 zNVU}P9>yLP_z^Jz**iQ$^af^k$7mn_Tr2G7*h|cgMn_x@;i0=NI%_!2__>jydpIf@ ziSg&{Dl3REd{n<(!PkJoa!H#{pArxFCmrR9%Z5Z|{CtXilH|M_mCy-UiTplLkq`0O zzge=M@m&TxeVU@RO#s$ak+p5ZLVR3 zV6^9+x9#WhKl|RZ7}N#655>(k3HqJ4oY-e2SGfGY6&e)RqZ(q(E@K5LjPwlak|?6N zSHMT&QKSd(f!WWrkbc`>&+=;|;VgZN7+lxrf%lgEPkC!h?Stfe_> zHU`SaI>I-b9-1DU+<#zAQy!MOKGwp};VJnEJ@_;gFRN^OsBJ(`_f(qeUc_9_76*d% zw@m$4&t1Wl5fx$z&S;5lM-nI#f%yPTE3df(@|OT=Iy(t8DR)N2yq0jYod(hEYgMk2B>wR-5U|Gl9TABjUj~3Yu)|c z?8};IG^M;Xj+hZAy92KU*1_=K$vi;-X`FfFaRQ>tac^JhP-V7$W{cDJ^tVvihZG*7 zVi?Op7!dx5Ss=c)xiB##t7$v(ZrR;yt;jrz^}2Db*5Pe^d~`kTISMq4`pffu#}Aw> z`@nO0&d-5YheIto#%C$DmIT;~Y-;!zU1dwv7M)lQ^{X_DW>ii{=p5Ow!#vgkvk-f= zIShcEqrL~t&CMPCg;ofNHLC9o|CQlC?+#3OJFLl1Gngs0$mc|4kRk5*9_`~h$XjSf zXKnO`7&f+mut!(?8-(Y_b-P*IMUyE=zB1uyXHR_O?bNP2DRMQ_?~)ZdRq`e#vD|>( zCarN_GsTr*|?YvO$ zM2uiFd^fC7n6BP~TKoqqdxtqJKU3~ZNaqIB#0nJo{I2^2dZfQ3GKOOaWCx30pQ$&_ zr6xa)3b+fdchS#B{gN~@*eoMJ_Iaq}HBO`9PT~MzP^1Y-e+#4)z1E?Xm9X5uJs!2I z-e|jgy$1{a?t$(fLCc|P;=NO$9#Y80Y<>C*vEL#YZYIu981W#CxM?BH&3@uL^vhuL z)ie2Zim(TId&n8*&5x(ry~#dM8Lq3e0Rv_I(DpxA3qFiK2Dx{W8!o>kb!T$^VdVV` zd1GI2vQCR>J|m$f{<1+KD(ikU|7J~m6({|@I((6$s72Xf_NzEKcN!^|fxcf%#F#@< z6>W)|3^fuQ) zJya!fxN^J22&69$Hvbv5L6cIj0inYVqHo`kA@JBy*<=mQBAdXfO-~Hcp zuji5g_epp^CZvBy4pM=htoo#axjOrU#Wc0O3MyxS$ZHzg)cGUGH>@);1Uzy7TfFMB z#E&Bf0M{c53~j9OgwrAs><`D+!Z};6-^)-Ryav(T)D-&#J~4+Ou73Js)W}9=M32xw z&oNhf@zdx*dOEz5)zh2S09a+ah}&YO{-_u{4iveDz14&$Mp23Iw#}Eokcy*uYU^G? zyds~r25d8+JD8RGUdSZL=T7+k$gTPDQ$JkuIpkmzJkB8=X>+}36+l%Ls=wTL>`Fk zT=LD0XWbwS8TZc2Lhz%^x`$8{N?A4O5MLJ@Y%OQVirogUI9I(sY_g09;CZgDuGU4m znsHOTpK9{1<;LSP!G;xJ-0O*i6c5IhV$6V&D);Yn83MOl8*q=K`S!(aPcDJpTwXwl z?It@QVQA>;*}re!e^2=jhrXHTpJBJH$qY{pB}xm&hX*y)@-i0RF%te_9xGIPw~ia; zBqR!%GrZbNNtnsO&vn}^Fq!mre{8iNoQgu8BELU(C0#s|z;nX%@9(;TA^W~#I56O=!Oi`uVM-cC2-tLQO_dK06RW`@YV z#@S7bMpxpbmSGDv>*-iYLIEFEhshadEp)q3TRu5}EUqrP65xF@g&eRNz|z5{d?#VO_=cHn7EG=Uw) z`prQ+E$Q367FKI@ij%5S_I_H z+hYiBo&E999l*2wvY$yDUMpPw7a8A+LH&V1uiLZ>hdanN@ zaT|z(sVTUvRR}Hm@P(7G@BLiC)AN)ug+%-QjaBfNrY~!zah8?SWiC-rjLtr!**yYa z71wku<;Q>4b!C^v03Ia`qVCE@zow&c5sP=}LiOVhW|7cOv__Q12U!&eEB(xmI%bPt zmt!+8`tbT8V%DO0Roh8_`FzpeokF?ZpK8W-EDT?~a!YA|_qdzZ|MR$*;{+*#8d&|u zya1yuCkQW3!dXM+#0jAbyWe8Y$86>VN2@dV6KL<3`Mwk{ z?2s63->W3#etbf)Q(<3OZbcokKAXqaC}uXxE*d_sf5{NQqcf4(L#D^B{5wtl_mNhP zk)sw@V<;rh+IV&Mvo^h@jEd+ri|I)MyHn`;Qb~{FLx(4lBGx#~Uq&!Yo+}Q-sy<#-*sD&Jnh$$(25t$fhXr1odLg^m`n4`fHWpvP zcD&YhTT*fz*9}1V-;RftpU(*VtyI7LdwCQFeSgwjXq`%XsS{|^k0PHir>P4|dLI$i z+}KgjrrV5O)PiJ_iZR4PW_8Ac;4oIJxA1 z-hpdtu(gS&=fN_Q#h%Z9R8QrLnZ@2}p2|jAIb`*km za4NZOWtr;M06W6NADug=r|}RRLeGTBAJBymL4{>dQCpds9+2%niSjsOvpI&0*6~<2 z*^ECe9xxh@w>iEt#FgHrU)uzct0=FNjXz|BRr$@^nS=cvOR5(vpxK zA$M|%6o0UQmWQGx0>LwAqSYO2-~4pa4Af~e<)h$Hfh&CY+N)6Nu2U2yx5!#mROQhj zN;QZ2&F>H1Xk@gm2uvD3S0zp1PLdIIgZQ~^bI^~D_q+mLu@4glZ(`%y-E$OTzJr~l zQ+4n~F7=liu-18p)7$t0nS}+J1Kja$Z}tyHjUMHKqyn7A!7%**q4wL?H&Vc98nS{r zvkz2mM={niEVB>8ay2){;)0;8?=v$f#_gb*DS&er@R#TZh(b|F4VI;^WQ^WdsAD=k zxfB~lVV_|vl1|3NGlUus@TFC~YDX6ZY%R?FNE1L6e$;wmS`?NqB~R0Gb7s^%jq2ii z-ziD{im*V(jQx@`#3~u2<3)axN(152hewDCzT(HFOY73lgHzdtQJh8l-sTe_Za% zQarKt`Z#|6g14FGjg^$v{Peyih2`H5*mJmV2rf!*rYaNsHWQ7roTf52{OS0#ED1^k ztVG9skBNS?CI?$#!I9OODRSYFf5xNWk41`Onog7XyX1}ccL!5AKXRKqX;6$tF_@;% zdb-t(htHM8lsk=s^(cA#2!9nwPs)LFD;>cFji#GoWI6=lS+sK zZ;E-9WN)K>SkX!Ns`W$0Off0Q^jHp(%8ADaV2II~h-!+5I!?}7tmgE56;9QBg;)Q&;L{qs#An<;F@PBe-E1wdHr1@#rc&4`F3dq8oY_` zxA1efs{cLvv5Y#igb&2ew#JnXIx;HbU7Z_P|Jg(593+1fO0_^6_0>`ZY)LZQX5R4> zTkL2AXv+t;Mr`)a_N1d6%&2Qpd6nM=M#sv8Cm(RU;ld8_n^FW8elIk!3a_3$2XnLtST?71W6}QVM5}JRahxSt@hzj zu6`nBdl0F^?b=JEUK1f&SYv;Y*AmMqsNZsZq-2^v=)heq23D8$mJ4`&iL^wSValBW z&%t$j6U-EZ6-!^|mn8_w3pXRe&aZYV2T4*nx;p-rchgwfmS3xo`x@(9%oGj4l;2Fs z@(FG*tE7Ib_Jj}@$yQ(ZQvA>>MWELZt0X{`?S`|_p#8Rt5G#~_=m#unZT1KZ)a8q$ zO(u#=XbFb*mXTwV9QY4Kpie=iyCUk|r(D#0QaZVX7vP7!P-k{SE%Jd3y(+Q5h_pJt zA>5?S(kk;uNdPQi-Y8F!{raB9+6>;QQy9^i%Ub`R=yu^ESQC`M);f;z9kdbMH-I{k zncKAuru>YVenKJg@WGqf8(+{fG}F!8f2MG9@d~X1lSW}Y#&4ulaRcR?|z0wl@RgW9EX5< z=>e@zdmspCDP&|Bn>&vO&Cf@(2|KkBZS0O|spU58)hqRw7UGBc5kZVKe1yq{bCpPa zKZn`+L@1{Ta!|4Ln9dPqiZX`*R}kae|gQDDtWw-GD6pU>y@7C2MpU<~v}SvfXax$&8C3sLX+VOa1V{#Wd~_Z<*bDiOxC(q&ueTmJNKU<0A%< zG`Kf_oq~Z-^vH~KRP#+$RsS*u9p5g!8{Jw#(k2{j@hwhU0k!Hes-%z-zFimOMP7zW zfIZyfp{*Ktp{PvX@sy-?minVJZ5L50R0>`YS{X~oSjv%<8^VgxfY}j*Eh;CXZKdvh ze2+Cy+g`Lu_eqdE$ZNo_B@7K2n#Z`B7I1hKDdR~Bt5g!N2vANZJuR-LM zPx}njnab-=w6;E2NVdw4>D`QNjKVSR%pUBBQKDV7f=;p$`TAErB-rP`%Eu0khZq|d zg;BGo$2aPp9*%@_{)k)r`K*>2pbROV6FTx?TE{|rhp>}P%E>9!P-^5M%CeMgkl%lv z!1{{=#~(<+%|=H?_{*>%T;_3TY{@T%QkK63yvIvN_&+iq;RhECq%JKZ8P;<;X_Nkkl(<=te0hBtbo3k^BVy zRPkWD5W&5XqJ5UB=c3l|aeUfsaY3(0IeB$5Rf=VT$i={Nn%xu(<2EWhxc&xlD7Vv3 z3~A*uOv~s>kp0+&kaQV9K*doRf;^w(MrwHt^YVFr07SJdIi^m2AXPx1bc2XzQQH3k zU>Gr>Gq@D|SsX?+va&H{5XjT;F$jt6Nrd#<%w&9XPSLP{>*3HW>ZP&=GtdR{vG@mc z*hq>64(9$Z=4PGO6#D@^J7QbZn{8?yw@^1FIQlSYj_G@NnyxADy%v=7{$bHo9q;~U@{y0pZ``q$P+%TMc2+-oL@rO>k42|pk7#Oo6Y zVz5K}r01{H%5gQ>$F(9DT!tex|CggX5mw0>45Q24T!8b7}(jYCVf>xQQb=bSAUGwhlnHx8h zSx71%E=DRAL@r<3U&*Su_`^T)k;6$JS}gW`Z9}nOeph9dU!*+>g%7?g5`a4DCXB{(4i2>ptvMlSKQ0+SG5he|TE8!a#= z#V*s?(RUVn*Hs?%PrqxxUzR87jt-Ba(3(iTV)dmH;})~$-MyJ=6UDJ~X7|}N0R63D z75lyk0h2M2DJKok2QHGg)jzYLS-NoDgMy9ZH65^RjxV|d>igUrR zg9SfY81r?(gnZPDFkwaV!2(^YXEevEkwL?cx{!7a;>m9gKD=S)g7t(>wI&h3ATBG` zfQm(;k(+xzQz=lefU5Sy-MRnA*;|HHxwc)SFqy~{q`Q%p2I)q+I|Kxz87)~eD8D>B zHcuV0s$K|5c0Ajz6p#OEBLB*In6z5*E9N3@e=4tl7W{NOt4i3(PB6PB-sg>VCW{!m zR^8iLo5{M-q4@5w2O}sB4ld@_cH3Pveiy?c*!?Y6I)lDbF&_-^8Sj#x{5eRxK1k(= znp3BF=3}W$C|d4_`F`paFKEc?1ZR12Z(6ORgk6^pzv%F9Ipps22NmVkhMMr#`{r!| z3-LWH0`e2udczS|jNKT;SH)49Dgx5B3&h`p%P-1)e7oru=Xi!#ByC2ErLoO3M^B6$ zH2$XeTbd2R6@0ld=77=%L#GpFnVY}KyErYIc$1-*Fu&)rWAl}jkev*UsZgBp07uWF zFEW4!^@of*ltsQueaC(5WD7+}!vy1jVxkfvgz|@JLeI#%s{1*3%2x!Ag70`7^~-q3 z)A^;)M@}Y7)il;(CgrB(Eh9oIkf0OTBRzg)XxyUAOVS1s#kkRy#)Y@oh?Z$qKax6l zs`sSX>*w85M}9Yc@>~^QQ6c`t^VG3m@NEyNy|;-R)GpulXtE56VV+(kt+4X;yC^!3 z?Kop+ITx}eV|%8RZhIjSJH+zrAuj!NKN}Im%d+Mq@D&O6P6Sa{`*pt=b+3&s2f_?? z(v>9@nUE2WyC%o3wB*Qe&Y;-IxMtnDgU$uSfHA4CW6YW^iG9YR3purT0=O6ROW*GR z_3of)Y6%AlY35HrOI*)zYQlzBrr#vAr*?Sxblrp zt`q&zwBMQS0yvJezWd+BfKc zG%Do2p~IYG{RlaZuNRg1D_}`&BpNtY94FbDZtl*VX~}GkIfEO zPitO*>PhlFndcWxpT-&wpW4i?bzM~4(%I`0=R=;Dn2+)%aWb~By!*S^MCX1 zeobag6Or6zhI1?jTpEh$MA@ukP?_E!=LT0sf1o`&Z7TBo{%myBc0J>bt4s08HCcZO zM=}nEy@}CntZBc$r|>R?%Tu#WHtw^MjQ@y&YrTJ9f-ZNO(y${^qoDqXLA{i)Lokmr zh)2Hd#IiPQrN+>2eagnq`~pPi{qT=j-`{_ECOkbGKqr)(U7yDIbV1KdG~s!j=R3vI znVNLJ4D20jBn-PmTV_Ss3hqchol_s`9O3CZgw-%kc)JKmpb0INYT-;08yPZ1jhc7b~3Jj%!sd1 zDI77`F9ZOhZ!?-LopBft>7BG<@x$D;3!N{XffaK4BSWNVF1m5L0I^2a{?Xa)>{PXa z37aoqhjvdF2={5@(=@<4(RivyHzem#XN1l0a)4M>C4^tD*6WIdoXh;(e#jxtEbUja`H3cYGqt^X%l)QyiPS+Z!7&&a0EK+2JlsWE0v{a$k$cZtf@* zGMk%Ndo;oWMOayl5@MrM29w|m(R$^%8&Dw7Kd^d|GrXxQ6Zn~TFg z!MEa}oAhKdQpTZP4F(+@gm2JXx5mFa;ORHO=U^zB$BRgg)IA>P0R{ZsoB1zztw+R> z8hl1Sq`ol-`3iPxYhN!@6&Of}VLUY~JVQhxQd9N`)Q>d1>B_Yh}lT2l|T7$^_$kF_7c0uXD8HC$_vI zDSQiuNRNtoLq1^(-B{|Mwj3?yym))5x%rt?`KBoB??nOyBp?Z6ug0Jd;P$SVetIBI zqLFG0?Udq%;FsGA{B2^C`O3SOC_5YZtx4Kkd>u+%0ZTG z=2wPdR?)CuRb9HK8@`P(-5kpczN=*X`t|Erbsj?Vm^KM6b}xPaAqgqBHI^^f6qrG< zJ~>3HAsHSkGvGttL0H1#W$YNT(}i~iVPh+Gn_SwhAS~JPq1WHG_d>DQd9M)`TpPbX zOne%^2JjeJg!P8aw2GIQRNsMm)WZVK9n_o`3>>_iaR>T(s}C_Mo*W(`En&B}DlpkD z1>>rW@TPKRLNc{WbjsdSDD53n$u!g4JMZ={PH(P605NI>1`V?2aj&KPfqN^dm5A6tC=NPQO>}KFg#Z?v(D99$T>}ePtg(J=76-q`m^^u2=_XP@U1p3m1r_x(?rZI_t+Mg zSe^GZorC$<=-gMACyxrxVr47dL%Kg4u9%=B)VL!%;QF#5&vW&AwLfxmexzcBj4Q2t><*#Uk=PSkHFCkqZWKrq zh?23M`P)uq;4!f*iIxmS1s*@@Z**gs@LqOPupj%Og>OOXmz6efHM+nq_VlEH;y;N& zN*BrJbmwCRl~eQ!Z))`jIGAN-RSyXcjG<=Evb?;R8|d(A+U)~cs)y|z;VvFagL`RFe?QHss|5WY=p8k)L(_r% zud1Fey0f>%`eL?COWJd0RNb^j1Meh|NjpHsVRx8s)JAGB&0)V@P48SPdFNEDa^Tmt8Zk zTLhe|0uv6oFmiW2DN+wqgmA0*Z$Fw^2g7EC$_J6+oq>QF5aH0g2yG~~O+_Jk;DBo8 zqM{P@fC?>o@Z2-b_i{K>$JpuB7?Bx%KcSyLTpc6xQ7_sXIixuhDs658mE#cU1&$2U zC=eA*Ubd`?1xzNe0-2o}lMcu(73K_zAAfR0GvaiJBoDuHz*}CfbxtBp=L$t*C!iiM zi4m2#z%do@%ZtTR0C7^cPIOhkm&_4j_8TlNZ2+P>WtKyxe|wlOV3zUZzasvTSJ?V&5@D~RSg2}?#XzD>qR8##Hv6+C*{_*)ET9rc(95`a|gAUxRe4n{>4rwx0r@qr4Mx-N+2w+6}^@rLCg_sNaJ=~q6T>*z-x(`$%ZlV#D$U>Lo%%86%n>KSKD0ANSRa zbO-7pZVP}Vn#F>%PjC}?n3_DNgIEhXUB!j=JSIWv zgNF%;8UelrjLUXZ-KQ;w1uD7OQwDi&PlxFMZpf!6?%|;RfOr)Ny6_xkMHAy75N^}JC4X#QmYh`MiMwrG)*q504JF%N9$Ejz=IW%c2cBE&FBA_=KR~wcbw?J z2PjRU^?C0wKGKAx*fBIOV>01xu9L^(w;GetvA~2uqYR=etEuXW=em?|OhC+&}Cf7He@#x16z%Jn~gp6dtl(8Gd6^+SKas(~$BwmXZomQ)CrEu&&qK^OeQ_BP0JXJV?=s#Kj^EXt;wv#I*ft;&*ttWy^ zZ%!<3UC^fD-O0muWi%Q2`Z^1|?PZa`h(LW8kY^6V8B)C$ufC%I0Syz>we{pn&3{fP z{=xMG0t@T+M1EefvLXmoeU@aH?T;qEAdioLMAl_q@|l#9CA=THPyNxe+$sQ@5|~y( zJcm@_Y_7;u4U}MRi;h-Yg*>?$ul41@bp^dqe`{(QYN=in0Fp&-&&_NI&^!JzXtv&; zpU^cXCtP~5fv!Voq4@fB)efWf8_rCy9vm5{p!Wayvcwbwc0WM5=LKQU&i|3^MTO~jL3`WiroB7P*%e)iR^R!idnahM6Oi#v#sb2R~~xv47dIFdSlI@1VZ&+ldSo- z>%E8S6D?~AFyWxrXQ^j7Z9-Oj{qU=HzPZiVg=Q4@V z;lJeOiNPD}*3$n^$@uR-M@JBnUO(M$jy~LPkxViiofFWy+-|(9_8>RWz%w`-TKiT7 zmCk^m!l;=bYr&|d-l&Wn0j3b7b2O$)EhWq!*qqGS?+}lRh9eM>U=JeRJy-v3{(;A)(zFZS_5 z(B?Lu`CnaMi4kQ2uuY&u|K>v#>zjxl?L$7B#`+ftoxSC4nX#Ht1-w6<3QB!3HR$Xx zP)50Zuh^tkKK1xOUq|tWjy1XF9DeB4bUu=?LF_=^E;=q9)#A3}wK&~nQ0@u@$JGu_ z?Uo9mGhr%LiNbR^-0kvs^B7EzFZW*Y;aPcBlZn3Zw2mq7D{D!YXl?=mM4N!vf^GNy z_lNh_)&YQOe6{6E$a}RM_~~J@pCS6G>!rjJ^4CeUz%a|}bid0@qe4XzawDIm!QtTo zz=h5&5+k)GN9<{pn8bHh`1ikDhDsg#8@US!z9A0{)3OdTYgU!KmHI@^*>mdEfOgVv zKNQB97+PK=w|L~$xe%ybM%nJW=vyEsyl{TN^X9~BZ0Zy9{V$CW=X$&pW;IDtIZgi& zdG;G1Vc*+GO97vsJixf_9rNrc5zPA_EeUU(jU6MIB|dKj1bS|OC~eKuGS7Q&m$51= zfYd(cjD(Mew=}|cFoVSYE=Opm;;qwZm8l)f+|p8C(V9kEF?Px!DFIJD@P&m%UO`>e z*$dTWB}NOSCFR036SIHyu>U%oW*I7vX~yNU6VB{TUfYQjnTfMS`9GgBR3#)ZBI#{< zcAt#Y@$2J6xrB>*7YvP`j#@ERFN){Umlekcal#IWJHzp;T69Q(wAF z&64O%)d6x(3Xpxe7Xy%qrcefo)2$~zDDue!Uef|)14jzmh_YgyS{E-b0T5w6xX-)k z3IdkDO-B$ibj)25t$8{FP$XIjw`)-rPIqTNL_YDfLR}*B$)tA4XI$qj{wGVh5DP4Y$TxXQ$f=3Ce9ZN_49#xpilvD#Rl0 zQF5Ou&7$9NV8BrV1E^|hzS_`hFNmhweokj*&#epXXA?|)zvmfKb-G}j5i0plkP(&u z2eE#yJOg|jASXbVX(^93SnV-Ul@Wy(5(f=;} ze#4?A9SF`S3kV3XK6olu0=AkaX1xwpdV=kSq6Gzu)b#aJR2Jp|gCX6#KE2P*^q&3Y zFxyVtEKBwD=}(tDr6o6vqNy`tmhisnb^%D*PYj6CQtexU0*ika{J_nTvxP#uE(9Of ztR*uP9)2N}uW-#h8LhL=AA4A!HhTpPuh;!t!M!=}6PkV$-Ufq0EA!xEB~N1F6nUb> zXxU5a68q_+3$l}=H6{amf7|fK{~dVSc_47L?b-l60iX=>f>roq=g}_Ey^))>uSN%E zTv6iYAbL&R9UdOSY^tiNV8J}sL2$I;JgR3ax<9$ogVCtjZf$LCU90GYZ%hr2fHKyW zk=rMBV-?`4QZhAhAVs-#1qwkmup%llYNZ=ax!df%t1tK`Uei@`L<>NE!sBsXDAJLA9 z8I}VMt5`bl6JWJWoa%ly_nGBDwjC3*+0D)Epjfe81-(#N{#n;(xB8;p3(HDGjV%z;-~n8BWNOPa~$ai19dE z6!SCsbK__gW20N9pLa+1O7knP@7j-KZog~KDeoP|06tUp5D~a{c)rVvoxmvpcfa4q zDU6t;zmUo)Srdo^@APA1iRvow|NV;=A^zcU%vV`|ri?Qji&N4<7R}klkbbrlas9zX z3KS&b9U9q(NyFF|>#AAJ2XXOv))SrE8B{O$Zm)B=&}ILsH`-zRvh7Z$1ug>`G**s| zfF?Yq$Zb86*`V`o_Lu+pKHxC}AK~mfGI zt*kjhft^1+<@WkTawyF|87S>>d$)>J!}ISJo(tr{o)rndTl1IQPw|Z!ulAa34$V*W zddtTY6%|Ev2cbs8kKJ-P7#RB0Tdk9qu+w{pA%1gq;A=lOpp|5!hto?kS4RH(qi=nT z1!e)fe1o}ZFLUB8##>HtCj5^BxiV<<2?DRHhYEz3BX#L!5(l-s7i`9B{L+~ohCTf| zhVqMN=I8fHunpm$1qKB{yn(m(#=TldLXGsw*Yuk}0W8=ESbS3?Gy)cM$lW7bVY+Jc z@Lq(Lkn9^O_~hhdUCc?5ZUiON=(Ii6dn#O=h8|8L2WS7c;WaT;%UUhoS7gWo&1bv?Eb)Jzy4)*h(#RiQ zBtk;MpZ8q4FtXS0&icu1f4jTG4CC!LH#fO`PVEK3P#`DwknjD&=Gzrg)m;F}W)Kcj zc&1D0YiO(pS5q_iENLUeK{`N55h(MU>7Tp!FFqP%5sXGr3+Ba%ea~W;>@JTyihXX{ z#`rOSO%5_Hdsf=V0!B0^{S)Sd7p@pKI}%Fa*ni)26BJNc&qy%4&w%P6YQbk;WCj=? znK3wa;LQy!1flO2-ZwyiGV4_`ozE-Q90Y85d-L(r<}jht;0-MloZ9Sb=>ZF^SKme#2XEGpJWn~Wmw8|Q(1+kw|D|Gw@dqlxk9Czdj8GD{w3H=;#b ztKg@`!ZIqFne!yHz|eA%FuLlj);bya!LR>a`mfhA<{HLP`SS(oUd>OH<6csydr`!~ z%*^(5E=m^{p(dny0Q9(mq3LKWo#MR$7;y@0LLvX7avb!^oJ~CnWj}9C;rnf*`fPw z+W&LAND%2=iz?N7$lky^j)8FBSOoFYZ_y5W+J71!uDjPo8v!xbO(2+=3&c0y3%YHY z8%v=5Hi0;y1(5YoH`8l=N=WFbUk6g$$g? zW8C(?u2blLeN9IW1PLdf_CC&=%6+DlRZ+o#!Lv+l0rON|x&ev`7Cp{-kD$dRB}J{R zqB2ZAqadwO7=2GndD#4Y($xN6WQKqO7*w%NZMEO2)AfPyf;X4aS4;*AUE^m4GLI(j zc*%JK>LSQxly@GFqvSrfu#*%oDT>uiCU+fpC7(GG;W%Hk5rq<7Rh6rQJXMx;$jlHo zIZhk7{?DZkl!I1CN~h~VJ8;MYGemsG?gd8y<2d_eSE#gVT4p792J;+XQ>g&R`6T;Y z{2{zAIf!0=>m#e=sWp5LD2Sf=dSEfp!qU>aI4(gzw%v~FmrIFwE6nC1 zUkQs`_mc+q|C+*UjG#dY_*yr=X~xf2&&F%qYgJ5xbURCLFtg^CbDqTXic&bcabH(( ztx(I;L!2r~<82q;2$W((E|z6JIQZ0BHs@olXDWJMg3>`EnOkFf^V~KVK``%rMpeCt zVC!<@pyhWFqR~YnYV=I|Y(WK?P$i+)h3fAJDb5d3T0rmXdNT>=*!S%#D7T}yxMEYI zx}Zib+qb?L>;-WQs`tw#F;f((cWgbMY0ASxzU%n=t0|L}8P)dz5+QEEYkBi<(2Nwf z4)V^@VnghUUqEAhx=rfq(lY+rOBtvWBz{BqS>qSaCk?Fbc`Tb;r^3CP`l7uqkD^`W zr=Z;+#hDevB=x^|lD%RR^4?*JMx$3O^LDjI3I{V*xx}k!;JhBgU_=83EnzCMnJCXq z`1DanQ#n3e(k8@#5db*xlV4hBo(AFn(Yli17g$h>AGIS~jq%4N3*70DRbL23E4u7R z)=;#|@Js@gb$S)1m%=uM4 z01;h*#Qz4=pW_Qm@)StDGu7W#-wgRAkWJvSdM9UYuYh^M|~Wt6zQeE3N>fu{TJ z%!H|eN@B!)9wau1bg=wy{R0&$Bq4f1QTuF>m=0O}W<^@-^{9Z>o3k9eZ$nWjyc0C3 zw}$xYJ>sdqAH#WXv2DbdawiT?d$G|65TAsIcbzS2?D~Jsxr`RgT-p+gck8-I`+AiT zFW@ZJKXZoAzcH$qJo#>fgtTyFrYJX|pTW5LRA5A|zfC?seBl>vkvz*}q0g0DQ7!&m zYY-PSoUcXG%0?K4>+kyUIs_q3@U9i-wTsIpK=Ja!aTzp#gUW4`+(#P6?KToFQN$X$rM(dF9{v!{?G& z#>@|^?5ERp;)Q9=yDV=lpruqJHEXKH&3^=ojSpKBemCkaT=wjThoJ4NCw#-UUJR@{ zo%zglMphWd`?S%GaN*nCAyeK%OwE&Qr&2$-qwTe0h?f5hZ#tvP{!QL1vZN$ffW4;Y zN#ywXQFN!~5j(bwEqJ+KP4s9Tj|ii}vN6#9sZ7WM>A!!$Jd7_Q6?9$_7u?K^yH`Ok zINeS6x=?-Ks_$d44^lrj&n9KUim34M@yAC-mTa_>+P9yieM!J4By5^|se{19 zgzGt2C?$6Gwtc_>i~Ptu+5+NKRZlg0pQ!)W@Y8;WRp`5pUfg_TY2Z2De8Gk5IaLK3r~q*?*k{v&XTS$XpS`br`TL_E~Ma{G27xw*HjM zwTd12txUx+-##eTV=~wxSKGTmQU6KR!7*K$&P*0-Vp57|2L8uNOJrT5pKy1e^Y8CQ zB}+xLO8jIv;W`UmAq0TI^pRvH^C7)5fEG3aOk{BPQMP+=_|$?&U^FF#u_pRiVrew! zG{nB7CU(O&&Qed}u zW@V-N9gMl!^7{OM+jT?jIbImRpaRgK92YN7Hv-ExffvNmGiIw3oWTBmh53?*=M17S zF`%-sW+G|@UT;zux5I)|ty6QZ57Li~U#W*G!Xc1fzj(QW862=Os0Y5(?aT8qHrG%3 z-w0UjE{GYvWEMno{Dr4JTZ2ahfAIP>%~lU009Y#4<%W4h-`c+JsJ+5XoNk{Q9LPvd}Ghbju?8;(fe;(q$eiZW>GRx4p_ zOk=nNG#w&g;|ZN0jD78AANT<^B4!%xYCdfTsLO1=>mnZc;3W0{bYXII=!y0KAdyDi z9(}%(8ZQFAv)C`!5K3|4{`}FFu*^N5Lx%uUuE&r3JOCQXXUp#tO9Ka;5Ks;9#Z$kx z;S;5fK*FI61Qw3*jUM|de$B_Ue!IV($z-}c&Lm-ngaGO1IUXVawTkWn8esfxP}1rQ zOs5@2O9b(heX0rzXdj5>hmB87DK6&K&pI}BbH)O&CKf9&l(H=+_JH9dz1(4_mY7J` z%))DSi_>3&u&V2l2`$ZE_;<2ex&=StYWD-R+UzU2TTg30>9YpvmXx8}p&(_NAGVhy zBobMM-MxQkLm)$?jWr^5rvpSJTVp)ROygT6b(tD>MN#3N6HMk0liAicUTWQqhP|Xn zN=0|D&wRhlUQid!YQjD+m3a|wcL*nE^pcsYy`D>Qu}P@xy!4F z07{sN^fJD#p#-MbHOI~M(Xi6e36lBbcUTP$6n=0JtH_q+b+oognU&l=t3Z-b^8G)- zNlGH~aEO#zE~n$fuIP7jrPiB$vi_Y}c7?rfh3{tzG=F|>la;w1eIr@ z<$`^`&@Pz^-siu{@sTZT4))GdNDP=Ls+SwJ#21UxA`_$14i^_Qb*f05mlYK~?zTzE zzgy-P5lI)H>85P$Sm9$}2wS4#KE}+@eV$FT39#ogNfkC7QB7EDK1{bv2M9FL85f)>}oBU=XHyO*g>%ojgz^-6eYd+1N8aLSuBp{+wCd!itMy8)LF)pTwAH1iOOTe$lY~czf+|UxuAYB z*UivQ(=07Umw<7|Sy{j9mfw8ipud$i6#XOZg==#tgE%~cHFFS4|byG3w#?m4Ke#k;I!^P-rdF{~<@KXFXx;0vxs0RMgS zr_TzZQulsBHP?x2%iW=(97;H0y~#gh0Bv@(^W=%512BBB7=b?)0_Tj~OGx?DyT+e= zP$Q3XuLM#w>}EWAfQ4F`Zka-yOzY4L?y(8^AcW@47jF)Fj=>5eU3iZ@Jviy^XG{0X zsetA6Ig!ev8>{aR6JLLm;NQwEN;#yAEdj>WaURG&}7lei^k!Y|^)lhia2Qd~)gsl35 zu!M2=FD~2jEtebq2FFNlcHuos@;Cf{GMsVp_D8f+LXqM~14|LF)P}+tG`yqnrmP~Ki_naW!P3B1(|yZa z=UQn<0S+~n3UW_b1!h|XCW8$M|5LUs5d@lipG;yRX)~5cIGdLMRzdknXwM9O)qF@; zphfb!&~jaUf%ol)?c~RWBA27e3o1R5SvHppEq+WUVR-(ZKXlS3HI0IK=}Jr5&$_VX zLj&l)#gD78*9uzwelq6#oC?;G+;lu~x8X0uZ`aA}{WCu@H^Kg~f?|QVBU@g0YZZ?- z?WdQCTQA|IhHakM9k?yew-$ZlwnEm|z?7I<_CVmT_+3VTSl&`afQMJ3&w|^sGB+8% z@a%Ng472%^Q?ZWhLUe~#OG6{po}+u-sQh6wK+8V(q)#LGV=v_?-L4GG{MWHH479jq z=t8>bT4Dzr6Rdu!_{!Rtb9p`=wdfeHr%vK4CF?6Tjva(T(!(;K21UB~_qYfR=I?xng75B5XJI__h&IJ0ECU3n=OSd~5M_+7+S?{-*|psfg|1=_i>d@sJ} zMQqe^fIdIFVc3?{@2t8w`C+rpM}-AfFunAejbil(5Klb+R{!SW@Ld`}WwBq9{qHd4 zc~HQ6=O<4}cV-leZhn@ldG|SVnM&-GdGFj-RE)%>naQf{srQ9F*z61J!5O!;rZ= z``d;CcGAnx``f z&uy(v<-KK{DFKxhtTl%V^lcWcO zPepXwV%GIqLE#;Z{&%;cpE|ex_Xp1d(XWsvw|PIRpjddk;!-p(sj9i;zfhDi`6a)q zgq=6<`ilF7XRN);uUdbUU^3*_N6&^jF>3|wG6K6?=RfgbGtJe&2_aWK`eAyfoc6Bom(|CQ;lROFhabFI>rsYtVIKQ$s0Ji z_Pjp>zEb13#stZop0u)kaQ?(qvP z3>L==30#eC^|_yM=@=l$pNy30a-!&M?cWuOPuzH@onp&sQZWnt-=lQ^(9za7cUi4O z68FiUI+c|CDtl{-yt5DPXlNHLKVKu&h(F`!Io4M&*mP7@d3F5uuOwKo=G;mvai0Xo z)cy+1|69M43xi1b#EnNXw`En?XclgSHpzil92>qW$E@oq{(UGvJJssr`$E!o6|JO^ zqVi}2&62Wkm>oJ|2P+e1vu#y1xgiPkWxe-am)6w~X%NvQ(Z|~RF7xR6r?+Q5)#2Lb zr{Ps0eCj0-oC>8ePipN4Xkm1mkgp(v4dP@)qeBg8)+$s>(H4pmVUdkc(Mtey6Y0(}p!$^Y~ry@O-&lWmgRis!0L$aQ{vl z-^;L4sklt|YnC|iFG85jz};T0$Kfu&@oX`%=u;?9>3OWe-9AhIT#Hz3vin=Q=;n^t zGL093*`_x90BrdFUVPQ5ZQ}zj_wt}Vz4lY5to&NLLX-Zs6p=jMaKFMgGd<(ygP*Ec z>NGSgntq?WU~ z2>(;#>0`lD#=i$IbvM6mcC;FgofW)w*hLBCV;@cpDL;^9;9)VrY(8-616|Fa3k~SR z1#%x2!~lR|iq;Pm7g&wZ;vOxjwYAzG8@k4R-a15EBg|a zGFg^SFVU`OXP^Y;$5}N&ZFQHyx@JYNv^0f5J9do=bX{zLW9M=B$$HJ7->bmG;-}k> z>|a@jRF9l;St7)lsIc(PtpAivY0j6h_m-;cx*QcQSLelx9cWZqo3Jy9oM}~VynONF zjXUmm+Z{&2o5l##@}`g~Op#7Uca_hXunnWMKIP2f=k?NhL~}KPEpVC4PQ?3WfdsqV zRD?Y*E2~4%t#p6}3iuqa4SdG=*~USpK=zc6oxp@Rxw-Ky-wyuxp@f6CVwXP!HDU<{ z(7xmw=AOK(sDLB(s@BxC)&TEi>CWh(?_VrDVVzMmHQY^LPAVd(Kb;u0d$htB*@=s? zBd3)%_w20%n@)n)g&LBjb-kw#o5Z{``NeY7BJ18a%&P!x>SGk6@jiBPYe|!Y!rgbR z++YA(zGX0Jz(9YEgHJP%P!WSQg>;$th%m06;C*E}^oB8G|0`Py)i1UNZ9-L4L4YAmDv*D;pq7Q&@Ne{P z8RjX%9=UJB5*`*nnTGDTeO~{xb<~R$3<&I`<~JoNBL!67pq**-_xC4B*`MK-Dh?Ry%|PPuhTZ!Ay-!oXS|xz^+QV;3xns zl!1pQ89IgVbp3ahM?)E2YsB|s()&)>y9tr~5md-yPwAf77By;9kS+sgD&d7;KM`VZ zM@20WC3a&ZF*s3V)4*cEl^`}Zx9O)B?xPME=v@b3CM+8*ddk+t~@8}5vjENQ3S+`X0uh!ca&{i*bS^*;_y}beu3^n%`T29&mkx<5fc8wix zU-c1i(v36n+2y%cq2C0}N*=IF+?b-QU}a*;cBr3EtXuT+xw#A^ui75@!r7psu2X1| zeG-6#0Rx8+cyIYuW^8PYWVi~5UQ@GWU_V0e7Kpo)_(cL*@@O!J&>=XC&0Xu?h!k8M zk{9V73*qm;6bTdjYj^BrR{VhQt$P=HHB7_d4d#0LU(W!}oA>^GTk~K5(Q`vXB_bG* zif##P6FHmv$@%;N)HdEKQRH-5QgB2vbvTtzwas`m^t=QQ_mj29oMc09=p*818^c*$ z>>a*BRzqZ$;2xx2F&)_zX;RvUH7owc9M!t>&ia z=;*u`gRKbn>f=<*! zIP?r@&!^eUBrQNpn3SqvU2?I+`DP{KK!0R7bGYXw_zYS2QvmzkljDyKQE(RC3?!l) zqO-VpAHMNZz!n?@>tB*@kE<}iTT#K0wv^G=vUABu-s%nh@a_F_D6vm- zP9}Rt5}Nz+MX<*-BGQB0-!A=5Y0ukyp%BELXa27oEL|6qXBKX$5>zNusluLrJpLl2 zG3!H-6EF}`?k)o7eW==z7eqUCzUra#!(Ii6J=6jaQqc>d3p(Zfv9(fZUXYEq-tkVr z*xOGA2+X8Cl$6np*%+>$NFR^7OW_93RxvYZ&Nn&Dd?$G7a1l`ggYN!V@XfP%YtY$- zP8z;6m>hyFrvr5s)1G%Boe(}rszD8$6CT@wB0ipJ*`x8Q|HY{8*W>m{U1r!Q9*`gb zUM*(?b$0lZ?#`<{*i4s=1CKFovwm!WDa!PJ>vdKzxvF=oshQdZhB@t|cXqNoR?EEr zd~cK({@e@}uSVN;eTvc_HcyUp&q3FwTJV05SJXhD-Q8wm@2Og?gQ$ugb{5g6^dEkv zO1B1NfDn2ZR>t@XrPp>-hAxrxaw>@q96R{I=*mj8U#lczim3ZVmVCN^DiT~H(7-3pqEithWG1?Q*IAH8Ts{oSp?`;-lEmm?cnubQ2-Uw(NrY|Unm@ep z#{d29T}>$Knz2Ob)R5pdi*gK4r>nFOHj>)F<=~%X!x)L2qBB+ux8{xP58M)Z?)GAP z1`_Tr;|#FQpqe>g)%>J%NHLLR6v}Ckq7%x{34J{4CPrcz%#SUFq|8D!0)0Lf%NhjD zzPVbwn{*~8$!@gj2^f^anKI_|9H#)=A<8k8EEAk~{>QJZnaibP$Z7%lVjVR8a+bbJ zR3c^Uese|{tBf_7;ayDB09QX=E|sDMaX3^!K?u+&xg{nmwRtnN3rU?{GnuM~*CGYsS2wAnm^7<55UAsRmRSx_O*XmCw5FW92Tb{D32@E zy7s0RDFTVTX@^U>Sy(!rwD{>vC6)c=DvgAr2L4YnHn|0oURg`u6|zBQ&$4gacpjHh}mt+H4SH+(b^Vm4=x1t|p%^%MrfsvGTB z$yBkS#R^HQ0KC0tXIE=8JXP^jZ3J{fo=WE-^_G$S6u0d1YG%*HW?-ntQ)$BX?DO-s z3$oSLd&fLe&d_<&oP0tl>s`R^pX`2yX26SxJ7*lu<*pUE9$5RhL2R$>&JW$p>}hMU zO9WrqLbc4-;>CL*gRUmdJqhiLD(494gLw(8H`>JlY1nXR;MS+lQz41;zA4=BrDwEp(sy%q;o9kUd@T0o4V7cD720aWje(U4ppn3hr{GDE# z!~rcjd&4%JS;2!xQ*}hV%N|`FK85e&LUSltlDQt|qmLMP|1mGK9DO$GNLqWj-!@^p z;G@WVb%w8g|#azII@u8SN*+>+{EiDPblrmD45 zrZcE%RQ+U2dbX&YB9kx$TjVZm)8SgiD|>U9p#R6$!+K&^IBViv>(|Q=tJ*DORa@<7 zk<5fe4&_xsjCr(@z~E#&oThsQq)pF40}d(N)BMYzR3{Y}?NvqmWg z1aaAX_@{gCO|*bz;xr7#@HWE?)A51g7EPw}36*0T^b_BLr{viWCi+fl;q@M_7fNbl z=zMdTK000HsoL~FlxnPH12rfs2mcXV>49o9Fw#6s3vpUPV9$gHp$?b9Pp!0LPJs^I zq{vFUWQ;-Jl(A<9J}u$APy)X1N*7-Xu#;*e%m#iF`rmQtx%K29zte79{{?0uBUR*Z zJWpU_5kr+A`&;4$Ib8n?W>J6+4cBuzN_A|MKa0oDnwCM>c{`=-nTLa$$@}^?0dL{U zJ#lTm`l#TupRG_^!XtP#wA{GCXxU+^ICBuHE_p`skiDdcQSaf2|J|9tw?B$%9NcJK zYe`!rCctgj2Ohef*M5b+N9lQp+pT6$ynKLj!zeuWYjf2+q}^@-xbGG%irw1-{3)Yb zN^k@1^i+#M*V8ic4i{8k z&R$FFpHg@xY;vxb)6^-xo}#c7CTSl>MCM9Qxwgsf725d^PTc9ED@$I)t2CY%JvYS4 z75zB;snX4L8x-k-%Q{2I2U^KWYsyB+{IrlJ^>5A3(YW;Gm_(JJ(4`lkVm_M?nFY%z zi|&iT^CEC10U$m?E}|KL*nDHEi)uJq-hBApf%MrcDdXT*h`nDNdquJYBh7@5D&qQc z<>Txp@Xr_)LMm{(SnH*{ow<(dkJ%S^?nRYb_B)Pj5~jcR7GY$H|;q>rLU>KoCkWvFLYcwn(6zJ zBP;%1L@D(K)*8*>3iRoZ>j>GyE519554xzTPney}4o;@EF3*pP42hDb?hb`@_hWaXqGEe65&R>-t+Y zo;C9#Yw>Mdbm6kk1#bkv%xrpIzhEK-(M=8t-6CMmzTd9u-KTCL0HcS5_Y^4%dwKqv zDGj<+5B3y$j*O`1jCJS$)6Q1t4B7T>s9^jwkf8|PzZvhpVBSwIlK47`T;gk&jQU=9 zf!adDL#^}j?IO=1$j4J$rsh2zh5auJzQ6^9Ppf)KzQoeG5ekU8o-V4w&uuMM-!Qt$TDSC`j@zO3ggM|Od;RARR~ zP6LbRN+Eg>L6&wmijm=+-Dvc`D?;jLA3j%N3S-}&P#J)`ieAE`f`q}z0eAVW_S#IL%Nq>%-x}*OWd=Ic0*pO!g`3nR(Q2Thm5o zU>4$B!o(u>En}LO_%a<;?lYCcrSv-&+fTD^sGcGnB|o)l)GsutLkfo3)7>oI-AHN{Qx|hT5ySp%@aj987z#lKj54^SueFTa2$okRASS@tBFUQvt3PB4g7Z$cN6{ z0zjucchsqM(>~Q`*-N_^Gf*X5ktr>?y(CzW4I5Md8LisQ@Ra4^Hxd4o91^9Fx|Ry0 zsYNg{ms2S>PFQYmKky6jAZC~HRG$csAKm+;_b2lX<+-uvZgj%aj+2NGL&?xbmC{n^tVFrTpqR&6=-c(PgH60On||vd z81a8-I_r2k|No5-#~e&gO&!hA#+aIJrn_UhX6#_jd^KU&J87hLw#q)T~17Ta7<|ZxolVcaJauAuJ||7~$8R*F}J&tG4uzmE-Q3H7A#2 znrfE8tWX15T1+Cd;a;PSE)_@mDGat-+s*wm7#x`QfXMl zsM4!C9e|*0PHz)c_3sY>KBZ9l_!!y({M*vs7CvUDUJ?uo%k=#8@w$7g9Cvtz?Ay0v zrnzWWR2=BS4kB9kFhYy{^~N3AVJ&1sxV@2to~QD=)JpXXSiNZd zYwy_v)coSj;@;20qu;8X_d{jXVJmD2EeggM^AM`SZDN%lTsUnU|Sn$ZFZmlMdn~Y zk%=J5jsId6OZS?aTh@MC)~szmWiY@Gl$Wn5HoyZrHF+xTHviC-Qv`@S3CY9{TYQ7k z5Ux-*UGsy6xM%U}1dPg;7M++lww(Zjmr%N3^nDoDyHZTc!^K^n6)xBGnps3isZeg~JNw z<9_gy*)}`k{t@`y1sSN$XnMay!zqvK z;b)RURTR43r%}ODY?C2Pu<zL8e&ii|ZkP3rvp$siHM(xUbs}*nxyFy{)*oeoDKRtmlm| z=fJ3BiEYj~Qt)H>cp&-Xj;&dMLm=?^lE7D>IxV~^PjTZ;n?XlWML3!G-(a;|e>Kkr zy|C9Fm)f-5+Ls`}%nYPo{xh0EX7H-d4g1)i-+u3mgMncyv&%ouoXp$S1k80aD)lnR zS!f3y!k+DHA&3?#(~Xj2G45Y{Pfm6Nycx;X43(7;y}WEw+_+*jK?U;Fg*-x2{y+UH zUPmIKGWa{kn|PVSQsdA!W6+HF^U-AqA+^%9eWu}x_ATgwfLCl)h!l;Z79uNjv+UX$ z3?!<7`%O3ON)Me*k#hXZ`_U^uqU_%-S?y!1|5UEdd?O5)v#U zQI7oHV9xM!Cb!3Umhe>tv%gI;yW!=V9uO%5BUkR92??Aak1=GC$wm(?qF?5{-Zv|Z z^uea(>IP`-UN;P%N+2?w3iLvxPv|?faFus4Lt?mmUIBgUXavWP4>xn|zG57M4JjmF zWgvvHV1mV`+MvITY#R8@Ku&h$b%QPDCq;L-K|LS|AL=t~33UH@{rt=P^~)P(M9T1Q zf}eOsv@RX~zxFgZI|qc#800p;2j2j-xcB>W{Ebj)VH0A8HlPhq0($ee8ta&J%$#vk z;)KOui~A(-ROtDaQ5wwG#b=x37KAaC7RV9nxi22+@5d(kuse5v@(Jgo29^PNlEF`ql{C~jGz9Dz=;iekH0!1!IBQ-tRp$|B*erfetiL7~@_FZD zgewd)=*A%NSrrgp+W!SIHw6#MHD_l7{YmZbar)$f+U0!}0t1dn9hMw%&1+`2F1zr6(QKS!PtsZ=#6 zhoI#;LTgEm23$3_@vz4!U46sPzD5XnOe^v2V=9ea!(sXOh|!!Xcm-4}8?~yS(;E<> z?HEqrA%Rwuh?=tgci?ORH|Po=-$1XDx;K!F+&S_%Q1PBS_&wlBYFrkRXm2bKI835| z5ql@*v2LuSd)#=HC^1grC(}C6vlq!jQpAP@be6f55U&l=P@flb4yvlLmCxspD^dQm zKVC20lgPXwk~E7U6>HXxBtBgZ@`)jkKpp>5k5x%u4)>C<49-Ovf6Ka94TZRvvRV-( z?wYu^S_luM^ekr^d>o>m>QjM?4>u;o1>O0ht;#w3mWj-6kcQQao^JG9oxK2O;Idlu z){)FH^h?%y9RQZ3xpwD(3_C>h(1BkK*J``Q92F4}=^H(9%8hzP5AMN!ti%D1pA(mC9z zjm5q?pC`mzri$~*FzD*bPO24a2Z}l3Rk&gV7yD{yEcTZ3GGiy^941kGuF#CQas-yM zaQAPD&&#SGK86Q(F_3z^GACQ^fi>}l+%&J9DStf^IAA^uIAwj^pD4X%UZ@7UenvKv zg3G_PAR-%6elu^5)BTxD;$43eN5`Mq-}^cZ7q|%dt%Y!k3>^}raa>?_StmrbzH$S_ zr5NLHEN~a(=md@zDUp>N)I3Oh45T7bY>7L6|JUnC2UzuMxFO3a+pIsde?1 zCJAAnDKuf6|K5MOT&-OC2C&v92;Y(n6ImF-*xz)d|3sWqK|9>bxP5b&-lUSIj#wF# z*3<50GpcORPbXNG&}bY#i_6y)y`2RP{|wODd{#EVOME7tWAi8gO&7{a^9eKPjrZ>5 z(s1tUV6MpFEb?f{S1wGmqAbptAcnq$OGYikN@@>Jo*!ZgATBoO%DlFNWW8+dN*p)J6Ig`G>;CgeOf2GtT#t=26Tz~Z zHgU&FeSYk|K&o2OyISc_b;}q*N3K0_QNAScNa?hL#Oiba%?KCEkQoENT~zuOaYp4> zGtIZEJIH1WLuc%GxD=n%LLhcFX5tzIGm*Ch7UD`VwmdcTHlwZJ^KT4bQ9OYhV$a^d zPZ|{PQ--ndKW2IQ1o0_pkWT3te_65SiC&95wd9oQS>Q_)g=HZKG3KklO?sgr8#A-l zWy)L|CFprkYS|Df=`pHT%!7<+r;c(e-o0q+4J>64DwMxSML}iOIb3Wg*TA_%|Cr6dusOCs^8-~^o`5pw>Y=-u z_EH;nm51vsS?qfvh^&vim$=*ShB>3OQ)g!NN%F6U+>723?qaXXX+qG|mUR_5ZiadEPgMyNKch?m?6b6$kNIiHb+%}D^XE)tvmMeQCp;sj7T-g zq?V5s>UhOU)DK=3ytVY{paEpzyn4p^AMluOp+VyJ7lb$TS!3OH?F@)H!ROXl1=@#r&0J5(zc z_a!-65lS046MKUKYf9MN4?Mjj9ogdC3XKy;NtKQUZkm(_tH`DFeBkFi9r&&_Q)NF_ ze}Y-8%D1DjGq_?IxlD*uOHw}{2|+mOOEoVO;?)|?uMIS2xxoxXZBVyRsmwHphQ8{y z?yK@G53&1M5YpQZE%3o|bF=!?J8hUtRB4awGuAxQXs!gEOG~=w`v>3v&eA~|N`Y@L ze3(zv8yn##ngb}g4v&7nr#gz_D!!X(T=b7jeKdW)^>uH}cDTs6bX4E-aa*nJP{gpz zf#2RQtIFaWR&hQglPCbCY*r}iA=mLd)+mgMlsU@R?BF-X40BSiu^0(dPw*W6U_i>! zrqyvyX8E^*II9$u?K^EcCbZ?&pwwvIE~%&@z0l_B+JNFwzgm|VaL%wriazog+ong& zYcKKV$lR_Wwb=l!glY%ntJm)X5Ai&kB19G&DOpG-#wF(;J3KX(-Qo!eGZ^G>=ec)u zZ1kOk?LNi*^c(wcYV6bpof=O3p**=>hH`W-rByfTfOR&c=WV<;su{Fm$xmJ*uV>EN z)alp+O@p%8K3pl|;zrR3IanE>x$jbC&SzqH2mqeDx7W)27^QxecI7BSS#$`$7j!~o zavUEOMZVu3ls6vu{g8hn7NU$+pq2=B%6k=3TpOHlDr;p|OJJE})SaHHK{Z@0pA;#z zjyj|SJ5|?Yp&^eS>z*#3VlOW!W2r)@mG-dTiNU1Q4!9R-@xTa3lOBB`2F^RK!@ySb zDybw!E3r-9Q_g?zPZm;7s>4qtd^jQ8)B0_9e*V``Y)9fVf8&oV|NvSKzGnJo9j9GgWa_UH8O4dK@K7i{!e96sruay6L z$GFXW{^|NTwubM~qqkKV8hA9SLW*FC!t>rn>(sSRf8%LZ;U09i959V zFU%O9k6^}{5`ee6z5MErFYF+|!cEBBmmm7mZ;!h-=<2dxN*pG5AjZ4cH?vt~@ zc!+5sF@~k3ZIk}|DtbcqU*c8JX6}6+h55$GtP7)b0;<|cQNV+ZQsRMB%jD%pgH39c z$0Gw9!Tbs`k6g)3UK0IYz#9CW*6+q`<_4p9W8JixjuQE{@}yqL@{SS^%X-T#Sn8P4 zrl0Ai_hHlt`u-eVkJ5MR`_YrN+SZKy&F3&$zmOouWK^Pb=eya8LK$o{o^IriEi#Xo zT6zovJ3Ns13{pHU@}OwT*vLR=p>C4_Jq@9a${}-X7JC(WY$6pdztw9|!Go<|7=|w1 zt(ET%`u)qa1F|k(XR3b&C?|4T-=62a5c`n_L=Sbw@|kll-6}31K3Jj z5kc>(Cp<)lCc+taqyE zju3}qWC7$66nMj98ic7wyJR!B_eF%G+=D@(Hwsv|7k2V!@v_KC78hOo(PnW18AlI1#B z;vcuJ0IsfryTD*E?{u$J5m%B%PrhVoBi6AY@II$>HH{e;O4su`6NKaE7pAjayT9>p zC0)0@aI?_N_+mpU3iA`6k_g?m;xX@Na{3F%8n zS|h-sXzqAw<-x~9ipZ%p79#`f75mG~$!8Z^Q6M zn^{pK5g;&A=mP!*SXlE+R9+eLU%kSxD8DHBe4q%#7#x12^X**jY)u3d&2*?wFYb*W zmjzqN9fRu~WcuoMR6NlR*F1v_Rowkti$;r*VB=D8b^TVbJ&w2#SQ_2RP&j#Ec$1WX3| z3Yf3n*pr}hxJ)(c_(NicL+l||th%*yG=kt>Gp~aXXAn5rmriI)7PwP4{oO~{nq(Qj z#%RN`A`5A3g}|B03>;iTKt_^L-tDRKXXNOBEdf1HRB1oYxn$>3 z))h4HDC-A*1mHSEqd!{1?IeIxAlo?LX)T;IJH)erSwk3pTjIRLrCG-9i4r(qq7Nmv zY{kQU7X(z4Lr=S@5xepix-I>Mqy!KZso`f`%F?oRwzWFKGTr%kJa>x?c7+@6$WJH+ zWTe?jKiT+-6iEG4l#V1d!G?cK?`&^vNr@?dLk4k6(S#mQqR8#mjgThW|o#qvIR{AJMqXPx3IRanRctQn~Eh7s79iQ=}Ax>YD^TB zS$(cQB$8xapy6EBH`D*9uoJ?zNIPY^%yg(-5tr!fJ5nhTv81~h-3cfc?;(AusNQje zc^Pl1esFzPX^$_>LMuDN04LNQzSpW`RXm~-Yv!}SKlX_$bMqe!w>?Xsk;LfZ_zDGsOC-!&G=j0 zZ(Tv5?AnV5bUKGrqY-F-y${+GXAnjxrnn45dj8Tjku!kf)bE=hVBO zhgMiSfV>;35Rj0@*b@^{!4vVf1CNh}glmiNLjBT>;*-+f`8TD~G51ia_T*uw?@98- zMxyk)k%erG|Ki_Jv`{$+e1Xig-y|&?oEZ#>?45=*o3y|?(=!QTlYmh`QZ(m&|E&hs zaa_m)BfwUIwm#s=+o}H=9tC1GlE12ej%phWvoDfnWJsyr?)gXNzfRhC$A!eq!U@P0 z`EBxOVi<@dEslMwe|EX1@_YimucW&Kk0RYHG0TO6tbuUw>vJz_MB{Y7jRB7!bq(LUwIQ zis>L)w00;{%4HUhO`OAYo{}@*$N4)PdTNb|Mb5ACiedzXgR=R_`M`7XgwLOAC2QF) zN-v;Xaqbuj0cnkVgtz7DyG8b7CDEyqT*J;-8od(ZPJReoD9#^-WuZ5Y$&!f=Vsf_IolG(kM%J%%z|8@7|$0lE5*$_F7iA4?c=SY z>EmS#`{>t41jXrBt=(7E$;tQh4{_t@pE|Y_$|6-B@;Afeuvvv&R}D}Q?`wi#Kt0d| zzJ2Ydy&j9lGEL+Y;LL)r`bJhmhSI}Ak{_*J2zmZdl%bhVBP+*pf6TP(GbuvCe~=0a zcwzw%?tO?0byf#J4Z>2geIOCgSgl@((iUZJY^(lJ;%7S=dYg1a3o!ZEy@`A3~qen6(?Hy8+odhy)}JZPjE*OP6T8I{~zcO_6B%)3+r9FzYnmZ5{ji23pOJ zuC~dK*y&vq^N{;%!f&A$$QhIQ#M|Vr4N^Ju0>e7V*l;!vM83$Rfo_wO$pIh`cD~X8 zr6SqQp+x%g6D9rk0!i{5LgB;z9y!Wd|0$R0p3=>-(h*JaZgyirBMGj!4+e&%=9Q7W9d7@DM zi;T%?f=($q?c`=XbgNusdAyC9c$-}R=<~CTEE(!qU`uQoD>Ocfnnp0#1+)#|Qy?*% zN)H5^=hMcVf{YX+czAiGej=}`WH{D1Frlb|SBtO#EAW4y34Idtkx!{I;)FIx1z1fM zwDTxtW%g!Z7l_H*Siq8u2Ur53C-F5}(*EFk-~*O-(K$(T^r>Fn_S~e;{x_f z3mMHa;KQs$_?jE;LcKOAM1T=`1qCQ)DZl)Vk`=$fthu8!T3G=#`?}=clKw`LBI8;U z_I(hP#kmrbk~1AiESRdK+@lfBe7*A7crNhy{3pO|C3}(!XuxrKSes&BI~4?kX1q_k z(0F8EG68%KPdnTSTUU{e%IO_sR`Q&GDjuS!9~@D~$q03ocT{I=J)q+cS{H)t#1#iO zG?`@hghKAx+S=2A?^i6Gh;6E!FyP1+C|zlNGq&$mex8KM(Od%x(FuG^44rO>*mw}>@M=rUABm4A&#|y zM!7gV2Iog%>2T(|9P!y`F}`*T`TPpj>qgW3+(p=|7@O7J|MR@gi3cP&fSiqhqyLSL zbI^*lMJ*7SxCc%a7OHHo+<|UsyzlS$3BO>fw!50C*2#<9{UTVbms55}Ia{bO@t*=3 z?rDDJ7^{2JhRKcsFp8I;DU3xXb4`%G<@0nrNbL|^g*l^cHD%E`M*4Se?cv3qg_{Au z?kqQAw+Ok>$pgR&$88_q2Ll8mH|0O-u(BDtRX3~vx9C=5U(<}QkI#c10c%2T|B050 z^P}E|Wj!_P>JT~ti8$k(R2pwgJPTIwy?_UQFgLP`c+SCwSBYNCoCG~&l`#B+h&)j9 zoV$NSX+?#<&zb|kgO1B;5=7cBt`lC{C-AU^+_RSYi(dN&$RfyPS=5^b7sYLDKKyS@ ziH-+^zRq|$msGKbY>HYQZSngr@bG9| z4zOSy1Py)oL*YJenw6TnXxqi6b#_p$0b0eQm2U2TAs{;M93H7Jp>cBsT*a282-}-hE+&ji1&@;VVpe$Fimh8~^YH+q3f$gQC-brEX*r-3Rjf z!FT^6E*VIoYs8C{Gr_*fYo->7S16x#-|le2be^I)V_iHz!Exq%=*q`x{jhBKjoZQl z2d8^0)3p5wuwrxLJ_9Vf)=^W$Sf-|CLfgPu(cMr;_MCgut-dDG_8aD=+r#y-;Svay zglY&e(!-GGC4J~98=(r)) z$8Nh787gp7Q*3;IO=e^M(IZZcT7gg`$;hB&d3hr0p{bj~JGvS*Inon1$@D|V!Uzg4 z*_r3PZBdGF{Wt2>tzlE=8OL84riXw5R2@`C*_>WRl}eZ-4B(tKKHcOeOq%ajJ98*= z-w3AIW_i#kY6{=uX43(To|5ar-H=xYJ@VE%jVVvDUSR$V3c8Rxr}hhasrx`7d37Qq zuv}`42s!UR2R;S8M7AeJRWZ0%@90R<>6?c=qrlr}um4VERyPitS^;yQRoIs`_i!=0 zq3E}g926nVE8Eq)>%Zw&pBqi`TS~wBtAqub!f$^Cc{U`uy{(iq>p;TrYcwlAhk!sl zh2&gbBYy5wc~5fL3}lwCXeJ!Ek*mbuDEIoSa@FAbuOb{imikKilVlFux{boEgqcue zr*DX|P}QmsA0}frrLy5D%Q|`EZ(nlHl&daOC@MBc%7Zkr`YbFUf+@`oGK>x(B8J=4 zGvVJc-uTbIdhcI6!SVf8+*X`sZB8wK?RocBmhJNTwLpV4JWFw=cq1)j_GF1!!fmZH zy)b^p8)k~;G9Ag^LcEe-#mA8~V^l@dWEa3H+~^UECYGwx;utOXn_pK;ubH#;hB#G5 z!hABgEIMS{K}Fgbd0*Eu|EDwdB^HwAzmkLZUtagh^Z|w6m=*YKxP!gJk8~r!oH3tQ zuBk#-Wy%g5>B)_VG$iHun)R8MIMJWnV>tXFhGL(l^pyGW|@pWc(#Yq zjz4pQJd-k8{?UBDOEPM@uQ<0=(|Zz#SIGn6EF7i9FW+9dmS&lckvfo<9H}KbNboXR zf|&e{XX*7TQC=km8&sKKIz}3NLR|s=p40tTKQ9m8Pi?vy;KG;(V(Oaa_@h-W^IG6~|PqZP;Gr=k%%WTya_Q<6(*hVDQM zLh{)Uymo-!(Jvj3j;P_(OK0DC7*IU9L!lP0&2@BVe1D7LMjHY)v@50VX3acZ!r;m!&A> zMO=_fjPMxzK}ITyvpCW%>dZad1#t4tX1R$aX}*zVs8~8)zvKHj15{-97+WnkFXc zdbkLt@{#2}DZriywsaNL@~@RVq3wZ^Z-Gv{*1lXsu14qLpj@Bb-vS=!;%T73qd?F$ z=ar?U<4~5gybP&_pb0 z33K_n3yU}zg`>)!TH6!Nq?FUIn)=@FGm5|aHBeC4lJzBP5g^&t$=caRloagbqZviW zi47*o>A48%a`JvTOXU%YF%~6lzn9(m273PT1cDUAxPHGqQea!Rf)7)oe4UP|isvN- z2Y66+%0!;e)Xd^f8~3%4gB$|2Hv*V-g%28&ZKQcM@Lkk*@ zr^2SFg+RB4!wG;IJkIo1$5AihM+c8j@2`9=Vwi4UJe~O{fAZ-YO>)~h!iAvZI_M6* zK_%j=_OQ0gbb<(HQo2lK9f}l9+Dk5^$=b%sW-mMvlPH)3KsismjWTM#lV=blGu0+- z{3<5}qlN4;&lG^L%y!7@PYmu-jU4uuI-pJw;yq9640k$aoy0rc(goOD`Q};jHRwv~ zE47y4348FN_Z$qW6>sQ{V$d58FV!>%J~E-aDPi!|eADbLE08&=5sQML`O$oZThcwE z)U5w)%rZbS#NnI1Jdi+yw$G{jMQ6-dZzI*I2yn1EHgYvucWhEz>$1*8<}_^rVzAx4 zufci{%5|JwNKgGsbbQLTFD<1J2bvr4%xSXhhQCqX% zsyk=CnuGdiR~nSYe?&0#*ik@_tj4pHZZGsROx9lAtH&hhi%)dGmp>RZ?diLWZIVBh z&v7{IHHqRs0NKo*vK!zuK|o|ND66@*Wx2|Yh0O61Xa|>V64YRJ4|;}f*r@Z=CNhEU zoxATMWIxa8ikm%vjxgM`e39zqhmm1k}*93vk=YEQL)Fd&7 zIMmsP{9iVyR>H%iLjcHdW+Bq~;T9RzyAbUA!vy;z$H#~)9%p%75p zp?s96UwrqI4)aT{)bvmD$o_2K5GE$bcikqhOWiV$E2%Q$#*~A1gn}c?vYn83Xkm+L zo^9BH0tIf%050iYz*N&onHODOm8Lxk>bc?1O97?>B2ytWPGk{?%EjiFvev_X&M{K& z2i5=3w@Ruy%~Xt}!d_CiFtQFjW!A6v66;E9?t&|rPRFkAgEVEzD`_%&y{4M0okiqC zKvRnibJ^Yioiv2=yU%_wz)}?rdIG3G67dEJ!Uqv_r?plCy$HRDA4V8Wb;;HO)7!kv zWLoQ9HNi8b=x!ONzJqCHLZoRt&@5Tv7BKaiqo5c^%mZ5^Goy7 zyllnteL$3zP2;soQl77OZC`&x*)spLv4ZKn6v~(-n=c)i2b=}ExHHcF8s$LOKB5{h zT*h^lx6WCF$2&(Z+BhZ8Bzzc12}eh$S@TakN8YDs%@VaX46a!haE?+*1uni&&RucmX=?gTI2ns`;yl+>N(`Asv3|@rvJs8QQoz9jgwBGtY zs4n*7F-M9XcWak^?AJ7P8=d!lZ!37n1^>L96cdrvrb5w9qNR~PMC+tefX3)yl~o+? zHik)WyCW>!Li--?#-|9ausRGz^Na(bhI}%huxUe-Joeu^HJtVteolM~I5BsKpSYk* z0UG3`bp6Xc>fg-9z#rawBmtCdL!^x3=K7l`vx>0V>ubwxB z6KJ0_jPejPa6MjKUg{lQ8v;td)8J6axa@MHrr(6$csuO&C`m}52tQsAk`smr^g)d7;?TWN@?k6C(syiU;ao|{$Z@~T zqlKkEFS`VPeE7sM5?*xpl-g*D;VVX>rd$e1->zs{3(6GI@5iYaCR zS_J~}ygaEsFW&zGDw_c6rQag^ z+0dj2=RVCM$5CC_?j@S>YOuGb$8=RIN*8kPZ*v^pK$J#`F1}g9mqSc+>*pI!0Wf4W zw|n=0G5w`NKR+BDefeCQrfnRP6x4Z1L6b?skKJF56{QSnCB8g;dTJX}V2OU(cOf&FL%#S{OGs)eNZmwgW#@lZGyz{a` zwo=(M+#w*E!c+_KU~wMxS>wb~7$Ac0IBmGsZ7GR#|z^*tK4u84WPS zWVll7o>?txv_Si=NEH`G)|g`>HF@*~?<@%WR)lgS1)>9~{7V|O3D`XQO4n!2Wd=X~ zVM;CYS|yoaCQvp(R`f#;#6xNi$~+o?=E7U#|7UgsG}! zl9$=8(a(b-y4d~7uk14S6drmKX2ptburPJ38(?Qm;U3xj{!Hnl4gOU$F)7pvTEDVyrmN%e`ETAAgvaERzkTxzT!oSF}MI zTVQ(>Qdgz7Mwvz^@SgjSU4ZQiggTUMa9>NzACeq>Ka{0|Z8dW+McQRI@e29;;lDaw zN(PpGvQ^*j%XVA?8Sl)p+W}-{iKW87)`@Ei!&uI7V4f|R80v~tX2k^k`v!LzzWhLVB*IKekRSgj1G)Gw` zMy1cmCBL?b1T^G7@MyJ%`v@5V%$Mq-ofQO>!1YDFYS@Q(X48E{uHVtHhwV zY(|A09tK=aWK=X9mHTF|(4!w@I}Bq?Hh`ay14h7lDLN3aA9}()US?Fg? zYqW$=tM)r+S86&f0$tK@Hbb_#bc80_5N4^vABoQC52nv);*7}Dm2j2WI3`b9>dRKM zX*a)Qe9snu;(Pwf@{uv;<}4iltYz`aeX=cB3vq+9r&@KO+ou3foz#Kj0r&x&!7y3jMkfh~J%uwN1s4gkE?8W05Xv9q z{;hUc&$$*PaD*kFT)ta*l#yE*lL!x2w|7C##1BxtNXNDBhHe?xO!3dgK-ab2=yT^& zxbu6*JY*0>(^NKc?8y|zrDBc!QuA1~4aH+d=b=C2q6 zQQAPDHh3CXhH_+gDM6#WLmpfxd#-L4?W*XPwI6||qRVbTWY!fpjh{Sy!H&44;CB{v zBQXMs5Fdd-Ryk_(w>%2<2@r{ve&XAB1D%H5^D6vg^2p;IwZyKT9UK#$+I|n&Cf|Ff zZ97@Sw%^Ewryi|p9vTs|FjqMC)vxNdR4|Qqf{wr>JgpQO>=yH5g(o>5RyU2ha4V?4 zHn>jKb3`8evCUrJI1NfRQZdA^tQ#;|Qc64*C8`&rX7i*gU zU0n2m3HNt+7e+``1-FYkB_=GOL1}o?6IfK{Uf`^Xg~9l2FkO546E4q}ELgQylQeAq zMV4-+V+7t}?{f0T+J&xOF=cQy(DX&W#Om4^XN}F&{KmH%VEK}d^P?iZyhe3I1j(rC zOK&N%;icNH(teLM2R!W2fQ+fP{^*=Y47j17%Qs7LS1RP8%iR^qh|C^`@D>gy2suom zhS=nWj>Z?e9g9I!ar|>vY%}WRY)#EqDE5VMekP7iq^&&=YOrYw)4;RAN1 z!{)E)%Y18Jwj9s=-apk-3IpH%#}(LdmYC%NCB9`3%-1tO+n(&WQf_rNy!}W3wW;U# zN*h`RTNT6^1OZ=2A~0i#uhm1A?RT{!=BkX12}uN!RH7e=@}y*tI4ECkMA&f@JQXlz zO4{R!6crT-5GfE!{Yc|2mlUA<(eZe#fAF(ZT~#2^A5gq} zdU4%`ENr}JJEMB7EDhYNOW?@&^{rxuMGgK}pFMVHDm8^WmJ}++gFS_z zex;{zy0nTP&*uAA`R?IHH3643$(;mRAgeuujZkCFAbEx?uf>q*YyDC;C}fVPD6N6n zCd+wshTq5G0`;InYpN4_q?^GuK`xB4J_z9aMnc=@{j zAi#PtiMy)VW51>_qTngsIU~sXJ}G%jT?-Y#SuWstSyC1WKSRB?ln8A>@*#Y)%Q-EP>r~XYlOzb6L7*$9nlw2BJ?}6B0alI%A=Z1Vk z>>0c16Ab5rDIltv%?O`=C395i<6LcavuW@NDiK*m-euO^bITH6)k6j?c%sA>xfw`I z_&G2T*r4T+*9rmM&a$tql0H$_WaEe@04*GNC!RMO}0@xHyHYE*dL9Ji#0s513BZbl5IzL1sy=Z!GKCS4j91nRCPqfbU zwK|Wx0Vpo7R=uofbWoxnwbpOS@(`Rfe+;S1A_!){UZf1m3)PRr=kNeDrq&rvz3{h% z`r}!-mw?b-xP-syL?O5Hs=*N1s^4H@ng>KL~X=U^3*SKVbx7ihwMx#M~ zd}S``j7S{Z!!T2B`i`2spV=*uLqD+2RVcNQEfsf7kUeqzOt>CPYb@^CZ`1v1)ClE0 zw<)-b25*#vXAk8l`@<45K?ou+429g8hlB50PM*r^eJC&U?zn1^Gd0J*%X<8MA$%iH z-co(BqFn68s8O{OjudaZ0e512$}<8Jx8C5r_Dof%{h=rr z^QzS4*REQ-r`rk(^8-WKgFHNix?T<#%rv~3mg}r=j#t`}f$VB=(a@zK6O@OcElPu>d4X0Q;sZ+#RY;q0gY|*!zsShXfgTv{C(?9J^P>+B3)gr`@gZ z{@Um!IQ&{IWFcHNa0UjpT$j~KDrTPv=dtZ0#+T#*vb&Vu)4y5$!v0g#O03?}MpbKE z(CZw5t3bL*0A|mo7YE0Uzq`ZLg!zcrBu@MMh12?~{>{ovXmg|8c36>QUy0()2#z(@ zh#z5o=-Iy3E$_K&1MsqFBTV1daEopH6*)DlsK-`6cDv4b*gVe|1PMW6?uNq?Zj{dI zim16lX%j#=W4w-3kTC4nPza6ZK zH0pY4=mMC`UJkC#tb@LN9`xLu_M+Ix$X>-pP(eE(ZZc*^0M0q0#QR^z*);Rsdk6z<#+#0ssrvsnvYDN)5KID&L)&#Q zV)_|R7`BXzJ-@uIF!@2lINF233^00+^;3DyOePDH;Jk(D#G*>>r1Lpl>mtI1fcrh@ zp^OziWT-`{e38*fZ2X1VO;Z~CNtgmqaCfY$Qk0&@P5fWUQ8<(*xJk}{%PCvAn;#Ew z^sfAVZ~IVy-^1;40+f)~{s-D3+>XYAawhp;#z?g)Z^_lF$!z}!>F+d0w=6sbM^_@a$b zig9sZbF3M(jk{;doAn6TA`1QhFYiBq;5>Ebt@N`s8hrhLyu{l-=edw1G0ss-yisEO zcz@+kmv~#{W;AW4X=X}kicBs346b1l^1M~v-<88~%A;NBW#qJlDQnenr(LTod-uqL zO&gPyKWiJlO-p)5Q624l?DqTy{=O7#_Xgj7%K~UXzQT{6Z>RP5)DZ4w*3nej58<ZD8}8iwbRK^J@)(cXz7tWWsP|qq(pjm;2)T@sx$j93pOPtAut_%OYla ziZYX6WgdnYss5u~Ca+`Lg#y3A)UvsR9S1zPB{l=DEb?13<8dD^u`}qXY6AZSA@`h} zN(NCcbwF?C64X-uIk8Zz;dA_g?E-SLD`u%_iw$4q|S9C^7$lbM}wOlG3?3OIEU}iMLmlvB|s2o%F3&NGDLj*BBFE-8W=4t3*@|-1JKy zF|zu6!`Ju;)A1GQUtR$5`)cWybVf#F>~j0sFQE!ry_0@@MuHzh$OUrfiNYOc?Trj% zA6&c0^?td68IO?t1E4!NP;p$U9Psb(D6|*!cfv&Nb9vciVDOS(5MgiYUv0nTnDEMfv#x_+O-9#i8}Hs`q=O5b)3N}$tDnj_`4WIF=pK~_ zvv)WeI&M;(GVk$W>}LJ2w zWGY7J?eCgbPqQ&z?*~!DgT667C6C#vbUT~cP3Nmu`PXg9{27gx{;e^fLETzo;E%gj zg9LS0U*Xt@JxC>gO(L#iV``1h8L$q&oXaZycH6LsDWTBBu?KtL$L*(#A6Ly>wA(Q% zu~$pDBC{9Eeylv=whjjVhHHZSB7!+c4*6kunA_;+`{&~CBEd1KjvsKC+B%5tI_?wYcazZt zLCG|b2J_eP_-*nw&wrIl@-LX8UEq=UE~xfAJ(^M;j9;7ath$#klgsW1o*Xeu!K^Ur z0#nZEMGS%I^*4H;m@x$kB)?;?%4I4T4Qe-YSKtF{`lm1ICmjA`@yZIAn%Hmu`O_E8 zb0`FzYC_rM-HqC`Jof6zxl^@m|C($rLC*UK--vn*yh#%|3An8sJ@37lnvF6xNhy|itMNGQ2+7`ZL?@&D`lE(@LStsu2m`DeUO0l(_cS*fkwU!kAFo97 z#N-ipp%|f<8@wm>YL=5kItw&$P3kQ}K|-FHt|#R2FBjw?F8EVaS#57js}W-LX7z}H z+)s`SIFx%wNnbB5T<`S=JpboTw7pmXP zv__UZ856X+yCr|@ygTemUmHteJ<}MNzZQ?eDkB$M+*%HcdKEIr1(j^|-Us8_ zd-vGbd;VPxc|+(&^zu|bncc`G1;bj;?#QNuFRpkiPj@k`mV+UVDiQj_+{Zt#$4+-Z z0D)!KlKg(3M#Yc6jyyuje8aEx5kc3kAr6B!Sl}sc;QveCH6p~45$_}#GB$cACA5)- zuyC(UMAZgM9wgiyGJ<;csH$*z)Vad%?EHjq;&2(Mm(Nr(y$~m~InCyDLaiVW?@#;< zD81}%HWP{j5Cp;Krf|0p^uHOBmkocsb=OWxwT=<|yb@a9H!e%Nc%tb;m6H1s(Do|NI7n*XNn+aL)!zQ(HW;_hUgQB0GxRvjc`YM7dnQw2xFIZordae#@` ze%7|nONS-X8%24$A|yOD)Ca^ieTA*x@U?j$ReJOohnUYH>_BK>U6eVw5`^;dwGy@7 zGbNMY|B$8Homf@PAqq4Yl*eIY+ZXpAzdb15h1A7 z$ol$3-{n%{SP*XDuXj?K`|N%l$PPpxe~;i|gwgkaErL=44{vp;>=|iZHRo3@ifhuB zt|BpiuW1^wfOZ6dZ1@i{i^&q+Vj}$U2vu#?EInU7sI4JbaoPy4Y0rFupd@JnN?ilCI8-SFsEp-|9^pw_CVSZl>~6{y?TJa~M#%lQG_O|(rAGzuzd z-Hu>l#>Hxavkyo#QpGAo@o(ck)n0)0z#}KBK}sCo4H)t3cB!$$+e|1oeb6XUhT#l` zXK-v5kM}noMrgI`04eCFMN1ZiM0kUf|4)(J96L zGW|97oy`lQrxi?cu4AOk1;=a~G(oCiHTsJCIm8_Dbxu`~k0=vzY+$&_+SQ}vKabQ; zO88F4U#>W}!6Rh*s{;A^*Hjz2^=n}<^`vIh(er^tO_%MGLEa0{BE;dc9A z(HHgDhMc4Uj={WHPF5sROH?hD@ z@ZC>a?rIP>P5LsugnQ>SdpBng8F!spvfXe#fFyv|5i2|jmJy!7UmFvwu?26u+#o`u zK;b@53oqq7RkgxM!Q1>%l1|7(D!Xxnoy=fY9V-oPVNE zvzYnpTua_5peDo3sME%Tu^ta__Jy32^i<&jwQ zF?Mjp$SBkJPhyJB?UJEsFv|U5CG^q!vu1>fZr))!na7Xn^MWZ)e8z&6UL$`%kR9Nv zlPM{42-g3JmdQ3s>;tG;N`zkIKcfb?v_H1(}~ zBAYmG(uQr^OogBSL$h9cbK$Usrs`|3jx+?m9A`4G@ujUxY_~O}a!#;&$TA zu{0^h!4fN;!fT%`-}pmqzX45Y4|Z8EEt;lYko2k}dL7 zSr*k2rj(M^G9m2-QYz#|q!Rq>!NOn>n!;omiZ#V_fykxUAE>iR6IBU)t2wWiNtY=o z1>Xfne{B)z2{oJB+*$ZMWyDAWu!WF)NBZ|(uWfZ4Lz)YOCtXZl)dk zzvmz39x$QJ+bPEtmO9Jevdtz4JsBfWe@3NBr}iGYVW)ixvAl`Q6r9l~En#ZG?7D8$?X>IpdKeC340^$<^DGv;M$Xhkd!&nOb@yW)ENF zb(aK|TMTKP2Tqm66D-QsD-4w%S5%bpp23)DDN>ug3S>da8}O0obHrRq=PI*`Q#o?e zXX#ww_|i!_c3SmFR?7~!C-Ijxs~4T?>mL%tOf%OmaS#MkyUCgUPkadj?S zWp@J{Y<}~dz+XT2GM&@r&j%5OXiYdH0u5Ns`MCGN%r85+gquaJqAHd*ksLf+j;$i7 zq}xkL%7n^cMReOj_e4KXhL&j6^44ecZN=n-6AS!Y72bIbA&o}FU-jer(uL4FKW)ve z+;ZCUX2SFclk88sIIND>0Q6)B>*^F77ompm=grX2BR@o}d)%b{flkwISWs4}@vpP^ znYK9S_!x){au!;xw0z16cp}jz4AC;nx;9}z5+HnMbkiUWaIPFXHZg>jWrkH>-Vt}m z)zm8l>Q2rvbXXlUJ~(aJ>QaG-v)_?f)_uSbXw7y5^RDUqbpZYA2IwgNVV#B9QbLIQ zcb*)59pGl~l}nL02t5jjd+ouPB44-JZ)g4I>Gx6|?W#c&CfkW&N zdxX?fhA4a_K}bL7vrHBN$4)ub#H~k3RyguZ%G&Y~Eo0Cz@vf-^!;kqNmLv_1ZB|tT zv@XB0DWVieZQrtex%MK=JhJFy)5#`IuTE}~H_|&r=&A&CRd3anvetOG6qyr2_$Kob z@*MSw>#@KthXQ7`ZtTEFNzMJL-$Jnl`+`aElL^9eraPu-YDC+-x(rPSUb$xYIRCKT z=ncekT}ITaKZe@;?|5OOd;PeRl!H*DGvx3rM^+t|`MR2*$o^n13VQ96&xK8_0CW%b zLt~1)Bx{2>{SCwV&B|U!+{D|iO2p31OlRkS-vy@>M;OqHYW=U%(gk9YXe&}0K6b0< zDNWeg`i&L&F1pW%I@qnGz3JHqa>|AE#DMI@x z{;}0&^>zf*Y*1E+e=Hw0aSVUQvx8ZAw63;r!0Y2nwS~Xlk7Yr|R(U7o8QzmN9#lW* z)N;CL*;8!#bsteTE$4S)i6ao}wYZdid!@#*-dK~vD6JRI(riu+f70b>3fi?3XkS`P zVxfvX+qplIOLZA}v$SPq>JaY>-}lo&f;Sus02~0k77?D2iE6bsFqZU0+-v1uwlZ(t zxU{!uN^z&mTd~nR4tdb%d{zbl%=(-fDJ@=3MkWg|w3PN|N(unRRAtzYDR&sBKu^={ zec!Nbgxe*zPHxMn{ ziL4+7QzL%r^&(1(g;@TZ(f5Zyb)<;8%DFmgifZ%Ok(?8innf^72aBR3m?D-yn0+jpc+RQQt5;IN3^GTwURA1`P`hxpSR;!w^ykt3jmlOB5|@4N zcSJzCX?FrxBSQG%;!6UQzL`Tao+iciLGoU`{5t6dx<=~Bsu5}@Vnm5mqoR0a8bZM} zlf7^QI)bLvMT$`K(t3$oOn?t1#{R+*s&Td809 zCq5Y&qOT8R*IRv;;j;Er+dKY?303y-`>|E2AT!BQDNJ{G-Gb>&m@zT+{;sn8?ppMV z*`=`E7@CI+zJ8ZW8t6G7%h7}SpCmOMw5ZD9l_vZ<{xs`Oefynt`dWvM#&7r1Ujb*j{EL}VY5y5>4t zc>h(S)i&?Kr0j24on+F31?XoRFd6acuQ%&}r=!YgmMrO7onYN#&r02|M{-Sq~1<>D14jFqvgeixk#ohDJSk)%Y?HCZpqjAXg_&kM6)*{mej7kLo z!6)aJ^ZHw$BQU>=tuULWtJa5K%+Yx{qR+jOb&5wo$twZSE(FVs%77@%EBZP;F;TVy zv{cv#G=7S7eP4QNb1hx43Dj(MKC~7kRL)(U5FQ*oF892!>^FIF4oEu-(b|t_Z(SL; zjiz3b{{>F|zf0=g`1dyYfs_&1U#GY(3D&c9xW`zat{0?5I%o5;Wmyh^{Tgoxf{>a7p?Eg)Nh6#2u~5t zC8!Rm9)0T7LI3E{rloNIWeDG^*zHXm)j z!?L%dXG*dJTe^lrV2Fh`d-&fM9G7KDlUQZ`jpP@H@Lf`VPjiAqF`3=m{oqK!O7zNu z#XjXvV%28=gJvAYkB^CXecE~+sPsQ+=U`N&I!&1gl;Jb45w8Sgl{!!s1!Jpz#XkOd z{jtk+9)d*47do=Smu#cTL>a@qP04)s^ju->Y)U_G78K|lU~c5?a&=Vl3f+1=V1eH9^28TG zebBi85F-k?dzKKbPgdIt)8z(t(K3k<*h#N_oHF10!udriTzes$Zp@TusjQ(jbWFVp zDkjC8z~nY%#0%L)cYcrHh2|UqWer$y?kX5ae#zIX0dIuAQ3NGI+PfzIhIl3!?Qy@z;5PF21k4q7EO>*7cM{JF6`5%0(i^3DH_2DS)2$u#)4A^|tuS{=%j zaC0?uFQ3OH_yJ9wir=H#gC{u&y3$#ak*fZN5rZ3F@ll9$U3Mu|I1n(5KpROTQTOnf zpoVC%67uM?^SU?BARXi7NL0kO5&{w_hp_J2YZ@+C8uYw=P1bgM0+7_y&Qk;&3CxPb zSZ;x@_4HYF@tYe(UbVH?DWLPG!;P_^u%kz9?*}G?DhX^0);d`dD7P})+#7X|U`ZiB zYpF$4KvE|$OXp@n%?XLitG0YUzht`MR$UzmhI@&Kh*)?{ct=3c--bhg+Fz;KP{%4S zB^9Mh4N+JPR!%9}AkZmWH6{Uy*Tmm-?SjMa!d&qX&Z@eCxFyraQT_T>BS;!{dRSi` z;=F$uRGh)&vG;#^?f-(?wzaboJIhdVlr@a{q6e(Jcv@)28P7ycf)-+EZT z2ewu+4%O(Fx{Dk2$ag;&#?X=Pq7R%AW(lV}$I{=MQOIfAT1RvuiF3EC6L`U4b2Dd> zr5Cru4}{zi^tM^6$HO3x&%GGcscaK?QF`X&(Fkgo>HV_9mn5mpZD=TskcXvOt2AZY zI!Qk-k`CG4wn|X=Impf!5oXGANQf*E!i+}M9S3|SwtR5PQ!LXJsJ3)E^|x6Id?2`c zJ5pcSW_tqy_Gps+#0K1~#**$E!H=qfSyYqD3L+E@!5zR>WSTuT)Vm(T_jyvijR-C1 zMFkY|6cHC4=UeN#AE6VR5$;d#y#t!_sjt_0NKo(~_hyKR6=mzrT}g0cbph))*xP6& z#)^x2r8^Lm0s9O`zaV7+7af#(^ka%I<;h)IgQhh}@)C%k#~I7Ao2-ZkMnfKc ze-agPRG0kum26o~rzPH|Qvb;=6a zYs?s$27A5`oQP&t9hbfJI&*r_{HMTQY{}lI^uuj6VI=%MmCLr4uo-y^5Yw69SsY&2 zO57H)o5m0#@r{5Lan_Vlv=nHwI5Dc3YFM?mp*`rqlUoz@YwrCvf66kWw~wz=+4Nkz z2g2PeX&9&AW1F;jg)WKhS6H|#tp%fYF3$67o$Apm2g6^CCJXyFfml3%s%# zGrxJm!U6JOu-bWd`t4I2T`T%B=t z&k_1AgdikC^=@FXf@3Y3j}X@I;722eK|&_5MJm`to9PN!nz7Quv(73CLW=4&Y7Y}Z zpk*8c?0_{D!3g$HwDk$M?m+Ec4V{Qkv@anLwCF7(Jx7HAe^Ox#JQGvh4%U7n9u_6H z>jb8xA3t^+G=@mU3@0`#6s`#X5B_~d**xv~dnrwcQ>f`8et%LqC_IgudpWGk7vgSh z7j^s{0;-y*WanqxXeIdmPU1gnuf2+@KLp!ZcV?~DUBpTO%s`r<>?+8FA^o$K>qp{b z8UMad7+jc^+wjz6Q%YhKaJx;Z<8z1Wc!uPiSLu3X zDSl2vmZF9b;YN1_;ua}}M^%Z(dFPAV#6>w0Zpicq`IZA_Dj3C7mcQmK+-qL7uf``?B&|LH_WZuwrN?k+<(gm{U~VFZ_J~*xRvYx9Qu6Zt&ayWU1eBv#xuBOg zYvr`ff!*wB=*SHW3z_1f+57FMf2>Rdeyu%bafosR#my-7;LW}!3$x>EMc1V3P|;50 zb1b#fUAsM~2@yfxtzrPdIEZ9DDqTp%d7DP|f8U{8^^24rNwJ1(VW4nfkB_mEZepN# zV3XM~YpR)|nk*=|dRgtU1qmNrEyTu@5YSMU)u}(^EV&nb!MXaphQ2!*!Aw_Ma-YXrjU1f0 zO>!&xUgO(l<-@3MBk$XRZ7DLguL>%Io(+zY-8Pq1)S+UF{5)Fkcr8TjwQ4N$)w{1~ zQ2$x3Z?Vw7DmDoDZX}Ta$|8ER&|XUl0FiMt;~0E%+OG(Euk>8PHnd7iR*!L%cHa1{ zMUl%=Ve21dkfI3;U|KpOOv0DdIkWxBTTyM&_rE$Nw1IZ<%tIHTA5iNmz>pGN&Wq>f z73CbOrcPE_Ezgup2SP>LwbC8aHkDVXW{}bM`)tk7BVTvea4prMus`2#Cg>HS>ORgO z$%{ty^y*Q0YZB}D0w0OmLeaR?ePOOY5Q|XQKvPINPn)tN(-5;QgJ^@Uw{2D)>hgMF zZbNLtujN2xx<$3MPZ19yxR4H{t2Q2|v#sI6!E_;ej7+F$4dl{_3|tVAq)aq=Ξ_ zB_dL_pilhq&W0zu-??I^?gTb^+uH#E7XQX!&WU z_`h#gHTMQEu$@9ne9TFv8J4^&*#? zy&g&7W`?6%4G3|Bty7z3hqS^|dtvwS2A)I>;V(E)vOSr-Y za7`rbd^l>e2;t%~Z+*F3U`_+yjF>|p-gjlG-tICS-!Jp2;RR*~o#Tk0w{8;@*R!{2 zOP>^_>mGFJgw{MiS)OOK?+8WeK5*kH8>_wD#&L@O zQmPyo{WVlqBQr~e;XCW5)WE`xXmFJxVgHjewNdL;yk=F`Q}c%&!($h;SKYifA9M2u ze-7ed9gV(hwN^W^;Z6q3IpJ({VP=R6ybJ#y1V-ql6j5{69TkEm3e+xFiF1+QQGqK$ z#98vPhTwDKl$>UUuAkTWT&5FmBmI~oHaq=aW-5#m3|eDG!he6!fGvJiHaEMmvGJgZ zCHD8)2`vNR;Ff~+B+7ZcE0`GZ7LAyHM|EtI`OP;j1ls$<0m5}W1WmH@a?YQ^js%r$ zy$&TuW=3a$W&{mbfsm!yQh_$vx>touuz_YoWP842I6iEiZ!%VSL)>A*HJgT!W(AY= zPFU6zxVq2QHQ9uo4KpGR3 zu5gx@WBOEqoLA4jwE<+Y8O1E+t~qJee8WdnvuJ%IHUsny7G9hn61cS{aY5vc6(;o< zXmun&{?7$%0?eWlF-ert$4h6^Md>$!rk0d%e^*n#6n3WE3O3 zh~iFrdfNZ#=uwj95%;th7fbowdq{~r*=H64+CqJUkbP(NZiii|o~}L@v^=D@BV82G zCVp{`=iDDhl9`hA5cIJ8X%>HA3W~^4@+}mU!o{>K{p2la3-!z2DnVFRv&PbD`s6{J zBV@yjLn!zu8P`WN@yxie(g86`|w^J}L*T8vrweKAfM0c=&v-JJ~l@s?QR&|c#gjw-z7*yx>c^(w8< zTp*w;yk!_q%Do-NUI|%BT)Wmvg{Tv+kho8m)EAN7 z*8PmLgjZNBMI zT$RN$3V3?Is=$LyO?qdM2~>a5&Gj|RFDy%~AMKIHtNrO1AhKw^*8D*t$RTjV=oW4E zlRCoX(D0f;32ih}FeP->+9G{O8U8zhokirLU4iB#ut9aGw*;Ak7SHif)3*Gj!>Mq@ zC!`Kyo#;~;z$KCrx0^MelB3HWP}lM?<-^g-MduFb#JJc?sqo}P>sM9XR@+^@duQc=^{w1yf(hr41!Z(O^!#vT*7 ze~V?f(Ew#^Z$%*yBv?g!>29G>Vy~U?4Lp)%k-CJA8ymnv`6FMvw|gVzYO$=)-V#{3 zwg`KgB^S0UC13aWelY+}I^wR1dN8XU+0PoBuxERkehF@n+in-@R0p@*Nld8B_UxUO z!I(_(bAFRQ(5yQ^O1C~;fz;0IBI=D!FgS2@SUu+olEUHnOGE3*gv;K|2{kphw%up4=MpgtZE&iEBQskLmYBD@B{$zHS<5wpcffojx$FarPYAp5T{~C%;$20(wy z_C5uas*=TfQIH7kbsQpRY9xv3krpCjyz#SwycXes!~~NFhKZi&-?bC5D01<6=c|$a z?RqCl&+GQRtJm?qw;z7t6_f6cCf*lxh3$xc8(u9uO zLLy;3B>6IyMEu%qDd=~69;<<8m_PJgMV#ocmg0set9r{Lg%6oM;v_ z^k>pR=B-CnRU9e|k+``vXS*P$;S}E_6MvHvYIo*5ytiws}4@6aEdXSm$R^oqpN{-$&uE_ zUdH%!{QEcjz6pYmiEdWICm1wBnd|p%5j`J|{KlFRoF!je@z&TT9^QsNV2S8`vpPrs z1G^mg4k^uJJjLa>smA)XMK3swxwO;h*Y+I`%pTk%8!gcGon<B&WbxUtQe$`8u2euO>y3IdV+rr$DxB1F@>@j>a`S%T) zsyb!_NJiLeT$##ydFD=2ks|QCm+_>FmYay5YyanMz6F!!`hiQjQI6vWQ=i(epxxi0 zn0X_ooQ>&>3GdFS=&mjFf-l~fPi0O`>&1OQ@{c-*liBS!m!b^$Qmh+8*VGa#8z*zz zQXmG!ApzLqd}ld$ed-0Ta!ToOoy~`cPy01OSJH>-NBO2F7qjsxThF|ed8z+dXXO4b zs_>m8^BvRMTZd#n9A7{@QZf2~p~3t@nsGh%Xtl{WIRTGN_8F`^Ehn_ocBEBgAgaH^ zC|VBO+Yj_U?F2R6wE@_v*mwf3JI+N$+Zr9tzYng-eg;g{XWO-HM2~D1E1^~Y`AiIo zRTTCkZS8+u^S@U5KR4k2{NRTTMJc3!bF7Jaq&8?&sFM(#P)sGN?e-xK#KI-lSFme5 z395ckRrZyLtvuMkG|ga(et5)eIO$-g3^6EF2?UytB-!c0gb!$2A8xM$X&fIkLo8RK zqPJQwhBE!S8fP(Yk_}w{?u{fVYuuR(z!qrOb5Cwh?$9}|uDM94 ze%fKRIo}y?P`&d-6NK!AtIy&J>#P^2p@NmX${xI`8-o2o^R#4R8APa(tSAHrq zN0U@?BrQ+eH@71LWA~XS^_O3old1o?&^g$#LHRFVGLO22nf(bf#7WTwGEW^)!!6uE zmlvuvy6!qUMx!!M`%>GnH~bA=`78L$1g@fKg7Uusu01>L8ppzsHiSmLFOZxDdmOej z*`7D;e|JT~F~X0fX*73Bv!S`v{45cf1e~-4Xn9cLZtAK`hTJ$jQ{s6Q0#>v@L+gqwL8~r9M(e%~|7=jx2a=D5?SA|KDk$8OfGvl$kAR@_@2@ZY69P>KBZvE6 zgN~Y-6W?!kji}gMe=a|2IFhQ8+pcuGh|GJ>_K@YeOG@ij`Gie}5G7-iedT>>BZ3Vy z42to^59oSpfAO+|=!fWmG8{}IomyOG`Ms;i&ynEg1>*gIY^>5qW~nGTu^&auE^yB{ z0MN!^U$2J>0oKYv#~VKj4&Z}C!1_f@cySA#T#!v2lz#Pn;- z0jWt*#_`2+R!U}YD=mKG;mHLR{aXH^uzmbSK7K(^%dP#rsk&jouA$gEt zkxG#S5G&@hwa(7oMMmSKxHBAqc|V*2#P8M@O%|E9d{>k>_950&fm~%*)Lzd zd>Sv2i#5N~0|0!Xa$ba1uS(I501OJ&i4YT0sb4zRShCq{X~`Yk*d1)(J8y9R#(@_@ zwegAm04NPb08i{vUrP6FG6J^DLubL1w(5WMim*VnBB)P}PWl`7;%#DZ*}WA9o{i&9 zb*So`uuVNI)~nUlTnK7q*9*rPe<+Lk7q8j0*p%c^jats3b5NRtN!yr)gDkb0JpZ^g z6mqB$RqnHO4W^{v9Ki`1*5c!ZfvV6Q0c2kIGPT~@xU=0cCkP{q*fD;_9|q_}YQqd+ zhh)SU+4tC3F)&bY+rg#@qLh-7%6Oi$Yvj!3?=oAZ?-MX57bju@V&+ z>~Y?e^m^HJTk$9P-|cz-)U)XCV40e@vjLa8_bLe(PH==b&!qIsK>7db+9)G3YI?*>+Cdp)AOP?8E&K}M5R`zRl+=W(f5 z3B5MkJB0TE46#EWW$;>6qFn^teQS@Xsf~_cc{qWN?S%SBu@VGU%(K5(ZR(nyNB`Yc|=J~y&r|M zox$$Sq|<;-m~eqm*X*z}_CB(h8!ZK~>HhCvLkOBZQ>x{>j&(N1SJH_S5 zg$rGA#lM3CA9r??=nq${aC9iOc6&(}t1OcITb`EJ8~9(ooY;j+X&j9!_4_ZF;XRdN zkt*ZfLTwpS&TCGySf(2Y)h(9Shr5hG;t9$P;3%y4=~FC|vo+N%CQC4!!C(S&-g7|K zN|KE8;m_~QqYXiiD8jLf=+1Y5~Ba37fH5F4;p_mH8&Y8zF_`GQg!+(D+X#` z`6K7Yc)7?;_S~?%WKQ~&N@*8U4Ds4fhF&g8>Oh;B(%ge9yqL{ZZ%q4lTxPP?qQ~h! z_8az(N|RaL**1&a6m&>tl=V;d${`GcL!V6U9Ow_1w65~3J3|JDcN$x$_ zZetIOP@U4>M^<)_%I|y&@fiEm+l}89Nh8>DA{1a@TC?`}AU9PIqyF9Z`3|IGd4on& z8K|7Pc%+l_-NN2jHAM;D?of~xW-_2*@tA1x)}GN=m!O>>mO;F-wU^JceJJ6zZG?_X>1!k+}cSo}D; z!d?ZLpHr9Cy=6@_pFX86Q`FhHr?^sF&Miw0QPUrP71Gd@qwJFX_Rm9Aip}G(z<~JD z*;z=TQT}(43a+cXIzit)yC}gWJ*^0moL}yx)Rou`lkuKk;R;5Y#LZE{p{`%MU!}we z-VKT9d0MD+H;Oo}DxDvgW?wCbX1v1DcFb|p-+H2Pz2TF0?pbF0KA>=|7kqwhCytQt zs=L$mlBRI-xU5yv->a_c8x(&1`A3-HL@YU-#!4qHQZF85D#aeeCeX!mh;SFV(65IF z2WJ3U&re|D&C2%6*17LkywTzCWB2!G)@4I6g=9oFc;|!n&0dxzY`&CEYXcG>Il!*M zLP0#?v~?2JMmvsKCYSV1 zm&?V=$q}^AZgS_ywp~Nr(91#dvDz5_(MfEArD#*Rdyu|tDBvKLLaV>1j4fnRcL}Y} zO9=#7pUPbWm3L#*iYaX$;N*9s;y(*`M%c`jWF$ulrTaWMjF@&*;-Gd_JOn7(-N}12 zGUW+fAZ&WnNOZ$TK?Vj!`_##J-a?Lk8{y~(;{~OO+s|F(Dne@9Z%x_deqwJ%b*kS^ zZqPWE{4#&L7#Zo{8rJeKL+62e@l@7IXISl`Oz^ZVhnoazU;pIs3mv)+m*TGV-3YMp z@vfA8VVNQtfUHEMx@4WN@r&yHm^1KKL=WKU2z&wSHfL!*gS`w>N5`YBVAXt~O2BD` z&1to~I9<%o2#ZL_4e|O78t}@@;RgHt!Y?`q4rR@kn$ohe$nQhi88Hcdz2`_vO+9ja z|6D)#1f_M_LIe0aThRS>46tbgGXdlF62 zKgWAzLQ9US&|b()?D=oZNZ63I9;A`-c%at7;WIk?%BE7f7fala_%aYp>F8F0_9|4N z|L=BtW+6e5YS!GF+h9GCG#zK8$6MbCxJx>lweVeQEJ{JXUnWF30^eh@@_m*P^qA5~ zJnc$AQm~#`q4(ppD6P`awG(D(<{{-x*#^)*wQnVA+E`txjHyT?DG$*cGTW=z0LGw8tWrKqdqpPVs z!p{%ZzSZY*t?Sj1-Dx)-eB%fGibrttj;ZPKT*Sv=uwu<=?zj{U4LAd8%&kY5f*->}!~O&Si^j(B+?u{_3Lacg5-1N31R=&aY9+$1>2Kd3Zf1YSo_ zR3bd15_Tu6@9p_Sv)!jv)Rcf*8bwjG_8XOK0Z_C&1JF7^S@&7k zucDouT|uc3rs_LT@)Ce7mKBvmZfkOVq1BV{Ho%8OT=)W#&kMg_Wri|k-d^l07pa!) zfZ%U~wNfi}im$Y|ClW)se?tD39S41Z(L1#dUn)#&+Xn$sQ1SoKAN_@P0sm_aPl zQpfi*Cgt8?y)PsPJ0-OJf1wzD+}>a^hLN1GJ)9(oZUs-iI)p-Y4XoNlxR$1?PfPyg z1z?BOc(C`v83?CgjlX#sfabC===X}7lQVdr$%y4%vx$g#|L*>I!9We3X2m}2(vfF` zNWZq`!q1$$>4Azm%DQoN#OMlAN_c0q+%Uiryg(eG%;L9*M1r!@@^?U{Ux#5f&N&Xy3MV`{@3sQf7axRa0tT439QDvo5D>+G-lxjR(lFS!V>5G8@?<=V{j!S zEOE5(B}-Sga@9_>VCm@R5R49Pz-UMD_7R1rwKxeE>+nzLYKg5}S7y`s)S1N4VYUor z5ZP58OUVP@id9oS3N7!%jv!JZ($v>Km+P^$M!t}pho8lH3QBg35rsdvZhwv#vPi~h z3x^e;j@Dj~4Ia1R$c2(zyr5is_&i=m3}b5gN-iyxxxtDly|_4Q@Lgw%yQ+OkC>~yrWtK(3@>7|{q%flvfxmF~ zhXh@-bQvbrkG^o3O0pvTe~J0N=8%d0H97m*DAJ&=vppgKCSfLKbBavr4iz*CU;cM1 zSr#nNl^KV_`pXz|cU0AQa%_E`U{@PlZ+jvPAGZCJzXp#ErPt}!a_87WV|~X^^xJt{O!3YC ze1&RnQ{PLir2UjpsX;--=sBxkmO?~tXf_t~G2Z>)$Iyl)0yg2bO5K^rP@^{kb%fHl zV?k`kL1DJ(e3dW#_bVP5ww-sF8Vvrs9cw(+J@nX<1v0@mZutHEN}r$@wH^JbvT0V6 zK?fB~>^>!{rfW(kJxjoCEt0V{{L9}}{glubF41JZ^2fU0Qvl?kSi>MAL6QqL+?A0gDSL~ntddI0t*4Y-m-)k|3H+5zGI zC2Zx2o(P_bB!Oeh&q0MRO6|PG>K!GGESDJwiJ%89w>kEVG)Gc*^w@a z=jX-HGq(Pq^Pp~T(S2%Z@-~}gx)+xWJi7FLV|CPXu{Zje4N+~`1AD0=6tc;P{rzjp zUvi`78#C6Ar%|mPK36n_Qoq(Ym;;Jc#qEZ_W#pv}{aIoO+?(PsExCDZ`dvJI6 zAR8yR1$TD{E`i|g!9BP`aQ3%yZufcn-qWMMzrYxqvDdSnIcL?ZD)E|59C>>B?3%uU zXfK-o*^CEKZpJ( z!prZBWNzEqnTKlxGcy36WK`?!if8Fwl|l>sIzBPV`wR2H$)sQAPZ57?FfT;ZT(3Fo z9RtVLIb%n5=Arh-63DmeawkZ%{4OWQ8r>g^&Np@H;!v+SRKFAguTr!fju))8M@ck_ zaWbu%%fE9nP8u1=K-%!G$PMwtJ?oTB5yy@2AYb)+Iu+YGBMrc+nVsiT%AUxInd|OO z_rB#&xjo8`ia5P~xmILECQ6eOe$`@P`IciZoS7uv8`E;5@6phJ2y@`&1$r{y3ZP&^ z>okqZ5q*gr)$|oej&O;2ao;lfACD-6FA$&}BYh6@lEMgiQ7H^o=Zx@i&b{A6Sr|e3 zV|)VKydbW&|L^%WP>NE?QR#ipy7vbs%>xwdcWP33bNv-VYE^B0SS0gNA`3^@NtfO5 zUzhq(yQ4;gFVcRwX7@moWsYGQ%ZT^3l>AXHyS<+Qn|3zDaqN#fmYd7PX3$+OfuYA0 zRAp%acR6f6-=F0Ou8f-8N;?C~L5grPTW4a%yr7Z$$`k)65TONP%3oQ)HD{C~DwK52F9$)6HuAA@+eBRwWb$|JAbGz7Y ztPyUv)dEB;D7y|*FOM~H2)d8S$Y&t1Up{Q}lK4)^x$GnCm4&NUAcSN%bfGD?r`1uL_VRuHm^{k_%$GB9Lma_9&;T>S7C>V@^|H2dGcQQpT z;l>l%$L>G)Wy?Z+G%=+(9E?y@6p!XTWIYop_q*mAo^%>gr&s-0`{*RKo&_!@=47+BiP=XRt7tR{&E zw?5|ArL@%T09PW}CQw)i9e;E{{*OcO&kHY2IFp+4UUx33s&Q*3i>L$gGQ7va3=lo;s@fut4`) zisJqphdkN-wIZEKZyy7c%Qa(Z@A2qJH03W)>!j-MZJx|;0k&Y>vge(|teN2CH{|E2 z!dmn^sUc*GQd~A|xMpW77S!)&{VzchZolmoC#3Y-zp9B^+1gr$lT}$+&{?bzjQBcP z!#9?Pdwj>>P%qPMmll z*^}D7(E&}dLJWo(*OM!eZk3~_hUFj%!yunJqcE8PAFkZ$ACTHeV@^t?6e*h};f7oXsf)sPUpbZjGvs zWuuD;b$VVvjbh4x1~AnFTku(7A-t^ObdKLuz2cBwg+h8@{OcHI($giZ!fj0V3yDk% z7`^1?6Rn6|t39!JvWE3kk4UFaMu_8lCms){Dv&eSIQ5ka=W>Gqy87+W!npWadxjq0 zQ;LS4nV5BYI4;GqMb3B$s&Hf6RSpD8NITS9EWR@X@3H{nrMq=TG<}%V##q0XTg&B+ zC9(4^T_&7`ZOG;|c9SvJofw)KSpkIol`ztY60d87Mh)F0gI12|NNo0AzeSh321?1w ze18z+WsZOS%tV&PR;@;o*-w<7K<}Sym@!0F#mh}EE3CaZkhq!u(MSb`QvzIH_)4K3 z`TzYUe0G4ox;rawXud0;z)tq#7K2N>T9Aos+f{j*izArOY0Bit_9-__55cG$xr3D{ z22H0K*G;ubo)rU-bWzclQ$Iv%`E2s6fe2uu?y*pk{%&^OdJmI?MDeou3zI6@gfP>W z)Nw=pdETu+NqYZ#eFfkyuC+-`UJvLNaiavy+K2`Dkh6}UqF;Qo)6nT$&gc8tJKf~t zQNZx4^NbIZn%XNRHk%2nB6)^1;PVx?uQG+M$zFNVyF0gK*FuQ#^u!vzon=Iir#n`x zGj98nZjtLbdWrD#hQph~ZHk^#F21h-mA3tEkh~$i)KC~RhS;Wi_!s0Z%<93fOP`k7G^x6bFM>q!mup>}I^h?lD-d^5-XH_ z2_hAf?gfjAs6`v~N4R-|hh9I~IAaJca9Dmkp4PlP?IS=``lRaXiju8@`i@l<}S zF}W`V+zn>vrREKDKX+%X?K~Ia9M2}E{xLAI!XRvz$E;f^_H3&h05}l~3G3pEy#zRO zlqt-ho&G*B%AZr)e+=^`Zt?wRiFSYkhHOF|0oMPUcOesl-}QZk&&px@sMB7WQbljJ zrE=J)BqK-2Df52b-jPA2s3ri&;Yv{Teou`>BqVO}_LgFVbqQPG=##xKeo)& zU%8nLu{Xtr2r+j$(=I245wpA&E9(I0Q6mV-4uFq<&cEQXeH(#NLc%Mjrew7qEMG9+ zJeIrHtuLLEb1dE}BR8EtUqN^QANJ`jzUe_%U<8J5X(CAIReW4Ss(eYqD(s%i!7OVb_fNj~nJcTH4L8e_$Ys zM|Wxf(wN{x;8JD?`FzXjl#OFdZ}IV_C15V#>wRvfD?a6#vC7oqhST$OVlZR%ldZDt4UScQu2nWG*hXm* z@Uvr7naP@u`7e-;_!mm-bTT8N<$4F?e2hoW8zanPlfokUM*A!Y?4W8xhrC2uY3!SC zmPUms%rVgH4%fdvZUxz(9d<>~xwv32H!J1=-4Li4c;o5$+gp0jO5IPbu^`nsZ_n%y z!U-=fcQo2g>WO4z84YR{l$z~7e&u5P`t==rv?K2E%W1RN{;BAwOHNdK{ z2U>tSj;8Bo!wa}E8i4e!bjQxke~p+FftiY$qy6pq^_Rh+VIWVzdlv!B`4rAk-LoZ3GLwp97~OO2e8NYw+aD}iWx+Wq8g=1-m{^9 z@d^0H+Mi?#g%d_Ib}T2`p&#v^jF>cqY~yc$Rx?|03oS!23TF>Mrl;phohQc~{qQ*|N4fH~7X~zu zNq(t%@B<$7`>bgu5DEbsD@oiA$B?q7b3y9mDdX^0hP+yx@#ebIhZ{am$86opv!YZ2 zwJrNtl6(#bq!v1ltOfnRm< zAeFuO;^)gJ3CcC7)oYQb-BVd;WTfC$g>n@mcya$xe?<<_K0!DfI-*20o&2l|2Bf6Y z0Go^uapg!3)RHNhTJKxyX=;rxzmd#5M?i?U2Y_j+D+MsOs+PQSzz_mIyIfK%Wvti9rClT;e z@(q6n7;@jrSb#ckl0U2aVm|{~_WrOA77|Qe-}~B|mMX2f{6VjaXgQIOWV%`*!)Ls} z0zG9$*1wo`BgHoq)nn9Ag|}=Pm2&VNf^=Cp8WH(lvIoEo9>hv2So(MJKbNEwBEzMW zf}mj_(5aPS8|L^+Cgn7%SLWyE^_%VKS!lJ12Ae&32o~D2`Qd5C^GBmvD)vBJH^5vi8Xx8HjGq-#Eel%ZyinS`ex-m?OpGpHosmeR z4H$SxLCMD0k6W;$c0V_FJOW7VR>SY!2?zZOsHofYy8flnwh76o@0k}f!9S^E9WuUR z?@|$BS};>wgf5?+Am#xWd%AzLvRPZKx}K51ZJrHplpn)@;ZlMD&ZvMPbWs}F}#v~AHsXPiSBe3XvW8(hJBDI-nXm7Ip zEvB1N`$1RM)sAbSE#>YWf%BK2&nx*#SxNF;`N*R*G!-NUM8s!t5ehQ&I5;Wj5o*YU z9$gkz8^@wAB!V)42?;8o6?Eyan|4U(Aw-H5NCf^!!4Vp;S&+u0k;?_h`wIGNHN53a*FArPH1$LX&EYoq* z6vcWxDsl;P6|ve+hp?_ETRwC}m)aq1{5s!oip)pGQ(dB+v%TeTHF%xVoO7}i)U$n+ z_bmpKL6=qivz zqSRs#FYZBw=dV2A)F|fybCg>p3`Er~f zYvu*KpfS&*G70hFx5!M-6YyLsL+~-D%S)EyU*EO8xbt9|WW5XM3jWH?b?}5VJ>~Ox z3X#Jik><-;0Q&5}wQzFaL3pL-S(JvSHpTo46 zSw7V<%Z+tcHOWA&vW{A=)MEjWc>Nd;leC-F3Eot3Glj5Hh`czkU4!^&S zSDYH$`z(dzT$p?uI9)O%=|nj3s2;$1z6xWK+kO?#4ud0GoGcF=$S@PO1TMgvC5s$t zS6&l(BDnUQ!B|?RlQ<1;IHK)N4S=g1)9vXhEEjY;n9951Nag+e5R*xxU7W3aSl8zt zPR;lrW55!aX)Y~WlGLCR_cCVgL_UDkab1jtY;($&v`|qq#cD(xqbB*i643-?$W&qi z_Gl*%OVfk@nd!PZF{C_f-yi-3wRvX^nHGIL3iSz z9i*e?JoBG-0M`higm=XdXs5|(i^0v=#P&1*3ex88vZ~IvZN{XKx16&o9LMYTIW3Cv3|RiiCp-?JF>R1B7zV0IOL2 zm|tnk-|fVg$$j{*n~MrkI^<8c2{S63-;26Vem9VVBx_$~5KKhOr|f+`I_#vN+vk`k z(9yA1?{d4}OP0PEO%{btu7Ou@LL4(vAwt3#y*K8mg-dbXzqGsd#H#+hVPeXzayQ>^ ztUiIdkr&jD2qPq+kX|rA_?Uh<)N;f(()EO+uQW;8jV`tEO^2zOu49Wu3@o7zK%6HLg)}w(`3GXr&uV=;*Y7YIBSUO7AATQOXu|gZV`kx`@ZFV9N-zg! zOkWtQx^t-Koj$6*Uz2Pp0iy8@p~HJMv(1^(wQ7B#b?t#+>;9s6U1gGFMofL*x)|@f zw^oZEX>)A5kl$w^tob!(z)z5H&})#Y)!DCA5(!I;^9+5+PNIWk@VW~e-d*a5U|7BL zjQB;3nF`$f|8W3ha3G{NnYDdKnQr4LC1(JmB}4Ov(dd6YdJ4)Z!1&!+R$SI}b<||( z57i^R&K!%Yu~3E=*u3r?m8)iuZ3k#4sfOE&0zftiDVJSqMI9Fl*RB&zq2U703H z-Ht+g-46)#&%&7xC?vUJG?CkmnLZ?D@}IE34OoDpaCQwZzEc>}Ka=rfYB8u4-Uw+P ztYWTzH@_HOfeu_WsDWrrtE_11jm5?ET2;GzwJIKpKilA*sgnek$QR!WxcnI3wRBRr z^m-Sd(*B#mW;}7I*>Dog#s8MtOwURE-SEhJ=}?ia{KetxQSYWX{KXCN-ydTVE8kwS zvZ#$n6q}k(pDd9)?qb%+tTQuVfjHbNl!HtC{TQ?S+d%Vj%xV=H)5=v}LD(+Rf+CGD z5V(3qL}W1gd-OkSOb&mtgFIxY_#8#@{GPDZUnKH~`F(Hpa}dIKvsvMi$gpP0wF(JwT9yCw58g-)k)K1%bQdF)i?ax zu@DdZ{C#nZP9;J4-+ug$!}DLiC1Zd_5PlM;_G4I*EQkcUE@tAcUmPO@E`(+r@srV_ zoy_1hnBH6~+&jk0(=fH+-P3iAReI5(jYT^AURAvmD5z^Bg6+z=(d})7G-% zYO+CP&??Up1xy9pGJl^=pAV@51OJ zmD*b&L};nf)h&}iWaM(NSH`wyRHNgCJlF_$vfTLy;UDp>cWuhYk zmR@SS0btR`gHhk7N}u|sxP%mFEj>2;S45jP&dkNk$UC3x8T383B0f}R@-Kcbv@DUC z_=p)qae7=+0vNWZki1oLKU-NGZts%#7#dSJ94sZ|n42{pYUjG{r9l%(>HrmA7+f2L z;{|Z_xQU~bQSfdDhqJ_U~BXnQ2%-((a0fz`Ec*FT+3e6q|P^PmMXci1c>w~0=y8FHD6tal%@VOi2vJT z7r~&|d^X26+u#x{4Ixx^%N-gB5khpp%wfM@u2mM)^{3n$GBRpuJWGXJ+u1r89nV|U zRxR7~(*WP%*b&CM>Kg_~drb<$3@vI3Jy}qB)i9$P+JUaTXXKj{m|>A-IQ?+ z%yE#(8usWz84q;y4@_$#r)9lhL75aR$<`_xD(q_lx(kx)db#SJ3u*MDqP6ywQWMDv znRU1K`Lq%U^>D|oM`ap(w`%#aEW8TbYfYn)K#p4hHe*MlZ)@uB6Z#{TROajHdNY=C zt#~zx^A;y}>$%bcgJvD(cP_=jM+;3#li3P1wS1O6pD|4c&=zekx7GN-&tj0%51n@^ z@FPEp%nrac?Gyd{CP&?egF0{LV2?&AC6~)2V-J$tp++~I>0L*e?Z0vkVjd9v4%ivM z@!p1qy*=~m$O?_2g8D%mxLS6ny0s0mZqweO9T4JmMm%Ck6M z$m{2AM9NP2-?kiq+X5C?#1nzV|1Wsh`bXJU-$DF>O9jmTCYA|oQlr~Ro*Kxpt7M(@kFZaNX?;M)HH-{ zAp+Z!DaA8PX38#9A`B9nJg3gI5npB>if4bqNa6aR@{%I zw!WNsiHj64rU2AI8w_8s}Yd*f5Svc9HEI33|M1E`uFvFDomO{W+hi zaIVr^ogC4kLBLUHNpPNO=qsmmTWejO>JQdeR#&wcznTJSz|s7u3?^-{(*A_Rj}cu5 zd2+fd6q1vC?FK9YL7i3BBNuu5R0ee>IZp-Z=ls_qtGl&*jT(7@Yk^xe#27rI{l1S) zHiKc&zPsUKfPbA{*-tqyPR9fu-s4N-|7k!GIU(iFw0|OMLW_JWiTHBA;D@ z^4GJ-xbn=i@eEhxtxv)Y^xCztzM11quReU<$8;)J&V9vt_gnl)%VmT`m+0|N^=dHA$tu44ceV?pZ zq?dxrT65Ecoy>30Ds>BbmA#Dc9Xe_wgRQ;RGp(^3H5vmdtqf*gTC0E?dqn4CqXxiA z?7d&TP|YkbJEt$;;y?H3ej3fsaQFu7QE*!?P7v@=pU_1SJ2j>-SIyNI1)uH9b~_cs z8u*NVxZd}%1pDU~$@=-eKkP-U0&rLQxuU3;k1_R}?CHR?q!)6FxHFK2N43u9w*A31 zW*4&ap*W1hlgyJ`g6gu#XjJDUqD03HKbGZ zcFB5inmM0cyQmO3X-tN<)8H&nv(4GD?ZW3?7t_i|Aq_x zd2E1=KNEc@;K@y$UOl6-$(jqOS_t)xTP*6z$;Ky6~cCyPSC@<3a*9Cjha$OW#mKL$< zaw9H==bCU0W!u-w6pb@9I!_mpJb+cSPZwW^OdioE9#;dR;6T})y;TfTzxIe@mHXXq z?xlOWNmA8|MNtWFP$9Mwwg8Y=<1631D5`L#7^&}Okwy09)JU=r0Ov~TxAPt5M^*m0 z)vG%f6ui6;Vu*DMMf~&QJY}y5{O__fsig}74&Y^F4SG$a1ozs+!LwxkXXN}9Urh?|wMYH53`zYzOOr%vP<(FBcePhaq z`1;6i+j8U4UmGT_DkJ`U{dKX{J5o!d;5(JIxfr$NzHp=;2D+WI!v-}%O9~Z=ze6n& zI)}AQtMnwW1j^Q&Ed)hjYP$9G!W!386FWr!sq)UVlKchHH=+>z`yKKI7aAzE)g4*(P=Me z#Fu@l8*(&9Vo2da1Xk_1IKs<*kY) zCm823R@YVxq6&MaPo3oTM|A>3i4fn@i5LNUuL7!Il9(iZ!sX7IT3!&Sx?Mp5aiD^n zh&O*KS+gxf?B4wPjT1mR5Q_4)YIABdF+=Efb%b{iet*QjijP_6o6{7O$(6EBP@qnR z|4s(8h?Fj&{)BFw^evd6_*5-mQf@_IoReF1ydZ4&n)3^3@V2fZZ?cN#>ZaxF7nSAA z!JRUSqUT%@Y1afE3mWXPfLG55ud#Tio%@~rtDJv6sJI)rq+=d8o(cx7`_siQRhk$> z9R{qedL8m|-rkVO3|$iRbaYY&kJwkKk&W*z2cPdLl)cOHI!g+ZR=KrEV#6GhcvfZ3 z5vUav6xO|Gzmo<|1;|8^LhjXVpywq@-Ku$xYJsn%zFADqM|NjS?yF)p88dcb)IUG_ z-fF8QcqJ)K%Tj4Kx)~(yJp5VahL6$d)hT| zG0oxJHaI>A>fV-bi3w&hIs(AhSn|U{G82lzc{i`YL90&Nqb9v&D@870!7=kE+hz#D zHVx-pAgo;tNP}Ts!Hzwf+P3itmBbsE^#xxVgMsQZI~31Su01Q3s)rE(THF<9{-(0w z7##KbD7%U?UWnAc21_E9r%@$y#=ksMWB@FfTgXwFaJtbCvxNz&AGGTceUFM#P*{>= z=JYQ}N$*y!h&DW2tu!pnXHNA6Kg%l+k+Nz;nelg$^G7-B~0#m?tIRI zyE63VrdzuCzL*d*h&%JoQV@6*EMm;AYtjQq=mLuVZ17s-_Efpjg>M*!P^-z7y8Rp2|M;yZ1eE!C_sn5!`q<^vShZax<$A3e@i)4LN>SHl$apyDtZ-KQmRt+HJAf&If68Fz#m5#65KndR@nuvUP< z{0-n*Q0hl}hJKUPg2QFAK-EaEhq+2h%;!os&1W-e?uwF+d1g^%W@e@gh?^QG8a>Vp zdO0bko~b@mtpN5SBnO4Buz|&Z(;A-#C9FWepNw8W%(OQgVi)p+59=ra)&$~RdtKoD z6al5*ADxryg0=-Uv=PB1{SB?o2SPM-Q$TG~^1G%b=jDa*G2o{Ez;OEPVhmVab%+r1 zK)qOZ0LBlMCzd4A(?=j%`zY`U?vCM+naFa{lV-~+YTq8vL^aJLdvyWCSZ@$~H=wW@ z1@jzu0LC|!?)eUiKoXboc|~rIFGZUJ`T(4doo|85=I8x}1z`9R1-t;J_>Lze+$UQFMv<0`|n~CPc2n_|0 z=J;L>y8`F0Vv%r?X8L}Tj`aq9h<0kqAqwg4OSpK1lSZK*?2IHhybEFH7ocNSAm%YS ziQXs3Mm@-3vlrUWbeVJFtTytZVA*NK`)Of4*!F7k`3_HcrB^W4M-ZovA8$Qu-D{t? zDfYV$L^Z@R%o*_wsVD34QbWnI$5u82ecuz@**-uUtGx6Jg4|I(1e%pd8Y4c&5R{!W zcp83x!LBeu0&$mc31FIdDfYdG-7&AqBVpG4HpeIF`o+frP!xykL#!)Ko#(xL_Kexb zuO`es^<2+&m||HL%?Y~vl)JUw%kLqZaVD=-h?g-E%WBlsm z%)U*v`qO((lW(ro$&<$CXf7a(i9}5?0eg>JMjU1vPKhk$-)B%mpbe-c$@Cu#ZEcE1 z+FbM9hBAax1$4O)3oO>0am`SO4Ng+I{3&=f0~8qOe+w@L-EA=Z9?a>T6v`tQT}Ytj z;u=~DFs{;A1M}Q3V)F*#!eb?R(jw6|f81kz4>gXDS-~hQl+k?k<)l+JQRb(r4fbUk zOo$Z-7gl>DJzP{vWW?#s;+@UV!;(r<=-~$dDA}H*8*D-442+8NOBemYgtX`YY;qECL^DUs^CB}eTKR6hF~6S1???r509R28*i=M^)83}LJ6USBrMR#HzB z$4*kv&U`~@x0HsYu7^d#N)HUptt@h%h_dfBt2?e_U_3#bH0|^r8-C5bRDp3;Z;NYQ zCKInmC}Rck*`|5)+z}FMu`~v7e)UuJ)Lu$Jq|H&uq3&f02XxWNXz|d%(8;MS6cKtd z_tIdPy`hmgzLL;S`+>zRK?8fREd)d^iC(5@m9v%2T<}@euuh~y7k2<94B>IT{+$6@ zUIppMAeEL;U0M$-5^H(EP|Pq>uWQqnjyN%+dSmgl$7>UguN;1lt{`}>WmDN(qL<=s zxxj8Ydjy8TikhzSiy&B|pFN&|V4s@23m~Meo+^4Ojw~j$1S!?97bMF;4sb`o$0yl{_^7}Yjo><&D<NXi%(ham; z{B>E#+27!O2`5qI^phGk2pzlyQpu-LM5JV(GY8y&0yAG1-6K7~#T@((ONRj?|S( z+JOAc$oX|om9a)qFE|;yRDJh!rT7I`F))m&GYq?vrh1F%RZ)2hrWL{Kg;Lf|gbY^| z&$?&dc_v!-Pe@lFN2A;rEm2#Ix=soqv<-r`lmqC&coIsB`^1_eBZ$h63Z!5d)WnoN z1Y2V+tLOF_m%cAL4RdB^*Hq_2O6FPK)ph7BQnf&dhPq{@iK?)R8E!7`HOdX30_S-S zQ+?uMrh^LOD7_QglQm`3s6^upb3?>dEZoN77Df5&-&?awj7MVKIBYA|yJy#`GaW|i zU&@f0UWKT3l(t%2zQL|MV%z9U`h|4-o%@NFdVh?xGrG|cEG&ZEU#XxxV(7fe$Oxr? z1FTx|&MUxuhet;EV4d0nl$co2i4=y<-I|9wNSS6AZr*IX803#JsZB?=;WS8GK2oyO z!l9>38j*$kjBxrfO$3BC=^vj^CX9j=p8mePd_YW|c#%fugKi(EIS?!kJ^yOTT`K#b z>N1qj97>D02aqr# z-Y{H)l~DYjF9+wlUV!%bON*fK>dUIjWSLvvd*nmuC?bex?ObypS#OgJX48@0Xlxu> z))+(JvKQp|z3{&>x=5Qd7LZMt@3PLNkfAd4SeL)i3Zvu zH3WIo9&)FmJe~m!aarU*UhlwB*lXDE!_iMX>ydc8xsY_S>M!K+U)}*^#O4o)&zi09 zj5XqASW!P_yl-%dwJ)aI`8+_~c@bj(HiQ$VwWfh3l%zX>iHg>Gdn;!(A6h@4i1Bg! zAb`3Em5AZyn%|q-;RZbaLO_2_uvI17(*x0>_LVzq1{ReQ&S@1NAppOl-_0tGnlZ$< zpn~Q&Dpc+IQ{W&pP__!ZG_eoJDk>51Z3i)!ipOA%byB{DZj>H4%H}HCGPrHfF7Uz!6S*VLhBJ7foSnzJK7SS;&)) zf`(YQ*m}UH#TV!WNsE#{=7p);^A%pUTMauzKQ37qyoMe}mPYpm(RLPq{3nE?vVXc@ z-roB`WHoIe5_Dff=s4YoIX|2f0!ym^9xS>CY&g=f@KDn&?$vO+ z(E`k=`8SMxv7#d#ko9l=7qIvT92Iro9iSjnqh$$rp#F_ZFEH53E#W#5 zS;ZjDjV|4RDtLodSDsY%Sa_XEE@u#hq^n|bMOs+Cx~iufSGp>4(U)>a<{8GJ5rE|# zMhQlstlI%h7p!-S?B#6_@Oz<5<>HO%1+|fPe*=~zKU~M_iab+U^o?~&<0|hXMv%83 z0pm?fPI_#sU@Bx4h8fv+CfI!II_I`-HV2lYp|7tIwAo6-4AaHmt!D(jJHiB9yb3N; zxn7DyLX{yL4qT@fj+PSf=GMT9j+|UW?cm$OvvM$Tf4_&MTK8Pu?vE-1+;ElScek~_ zSkCqaQ3)t6R_Wr}VTfKXRO?r4qaSV;=ZbGT20bC0^Z=F}?A4R#(Mx9?*9w5{T8~Hu z-v^_`zeTdPmX&B&P|v<)QmangZ=+|M(drlan_=vz>wNt&Q09e#JK)w)aqj)g1qB5j zlA$8FB1*#8FIH@%vbUnEFO&fMHkGV(!h9lA8hC}6jV>)3e*?2`#bkQfl8_)%8s9nf#@7bDqc6O}2yk%ndupPglARz# z12HEv9QJLdRh zVU(9EEE+X0lfD>hRNUzF3RgkIE7v+HH>e@=T1x$!5J8giS*DVWV+%vHr`-W{uT$%$ zimnK-KRb1Iir7t*ig>+>iq&am*##{=!gQ5&bZyoRs#v}}zRO00dtKNZ6|&Zf!A$N! z6k$-WpF(6Utj`SwmL!>iEN`tzfz-)>XoC-Pg<&{>@R)i(GFGgzkSBd^YG;b&t)2~X z#H_5SyGxBvby!((T5j*thd913b)%dmo_! z@sL2-^^L$3Tyc~@77)UT7@)OZ)kDw7$f&2Ju8w3nprTq>UiH3BW5yGwjYiO>cN%5wt#yi^8za$j5Ajy0eRO)Ss@as3b+3~h<%1N}slqLWw8YDlV?lgbrcgE=UqF1DhZ zw=|ymfdr{A#Bz+lyn_rWgcHw&z`}Zf60Y6j!F*$#PGN^*l5Mj>`)qs(WcPAa50sD@M<6j&l2x_Kp<%Ig0fCKtywNl}F!;|v zB`iN`=wZyu5}_jHFbyxeG&DD-nXl4KWg9@UAgd2#($`=(^6WCfg=lDJ%RT?WHQ2;s z(-Jn$;>wX>Ubw|J0ZEQ3b7y|Co2d9}c2g=CNPn8S|GcIJrOi za*I0vf3NVw-rHWcK0KbxhbP|X&OU8-cW!;>VQK+*uvkm3^d4&cBapX=<8G9c1#h^O zeGM#2E8#L3PfH6Ri+lS*h2sVijEn%iV!&kgVa0!Y0SH9V&Hh0t6W}0xf;mHq)SodT z#5ShB02q8+lQ*(H+2jVloBit6(b`Xmh}rzS4zEf0*~t$7cR=pb9L(FTh1}?GiN^;) zxcqpXocA~4J=^bT!1rHB@(Oqw9`%jxLF!&LV|kQ)HB^&l_Z_ksOvaA2a04^n(f70| z$ls_-HH{a&r?c5k5@*nfNZPAv7I=PXiu-0eeW$Fn_OccBZk?J9bmIQfp5J>{8n3OW z1hXz8@OV?lnf@+|rERF3Px!GkY{SL)oDk}oZW6o^u<@a$!x)A2l}Wm$`z&{Fbhgz0 zg`z{2fDmniwe`m^qOvRtZMOR!F&}e>*20LF_QO+h46fD6!&639v0UA3FlzAyN&!97 zcAOZed10-k7=qxHROAtlSACX!=lq0A2e(0?tw}r@W0S4b*0~N24Nt0Gx&%GxwMHRK z*yC)nZjOoB`sCqRH|@-?pipZifHf-nw64d;*CRfJ__=m`4^-8Z+i(+{?O58C>XW&X(xm31t0%}MX1l8XSz(}0lTVGK$lak za32f}u}%4HbztGL-Q#?L8p4f~?p##69talY3!sEeqQ!DTi&W&<@Z|*DXEND%&uP!a z>MVe~`XdMo>TmO0Ds@mx5V|hL#)_suIF80K_5p0N2D%r<8C}?dxcw`e|Mmjl!}zSw zW=Pj+;Qz44a8}AVG|MK(WMb?Bz$Ogokh+7`$W+pDGeJ|>QAWD%(+fqGh-4MUQ8$1! zWtq5I+!QcDiz~UHG`%p5gxH;auzMnwO{a{Aa({ExhM$lqtWVjCb=2ro;8`Jx2Ahla z)~o}liiuNQ&xIdWX)T*Ab;&w*vfnLVVPmc+_4@2*c|7;SKUM)oQr_~8#E{5GV@j~L zKnIxXG6Zf3>!qJDk_DEY2tabQQ0o9g>)J%!HNWq#xPr3-pe zvMS4C-HRb_ZKK=V)U=e!aJ7#ND^S3}lx!(*$YMe0VHeXIaG6F5=CPa-QD6TNL?ke4 zU7raj)uFT}&g-aH(4Ee1@==Q73jDGGp5|KAGikAeknst8x#$HUf}lyzR+Vw8jP&A8 zLVbiYN4!4~v$K34oF>505MiFToPJwPo-L6Ii25FxdLa>3OWhGoAi_OpGeb$H^gr#y@+ zA!gVg72@ihNKHCEnF4FsO1Q+IH%qu~%9br42YjS7ZCX(9_^svVvR{lY*4Z65MY5G% zzn>RA^`DWFRTp&Om24Bg4ZVXx6TkHBp+I2@pCMR>1)ScCzoCL%BnWAgC#Uyex8aSe zES29rx|FWiAfJ+BV0Pk?2@Rwj>^rd<2}H6}4RyV*ZTTU!MSO>u4vwpDVOnobX%gFcLL6xoFf!gtJ9JKS8$1{NJw^2 zlQ10~`4e~rTj`(oA;O>Vd~#8d-c8@byym~Mvl*0v4=ACYqyw~=jQj6V`EfRN^0RUU zw;qFgI*t1g0+iNRaY96!+JUtsKH1$v`Q7Sc3`m_G5f9OYT3%(C3{3W%{E$*_zOE<>WLCeLQpB-W5QJQIOmQ> z6Z9kZg#>2uc~pO}L!}8Cq#hy-xc4+DiMvc3VV_hzZ;lrgjW{HKwy`P8v;yG}*x462#eJAZ2$(Lt z#ldgn2$Z!><jL;ze4W5Gj4-zXjA00GZ1RUo=0iw+f9q9h&&5tOIrD}_=1XsRqytp;K+aL z4zR@n+3L#WPWLlf!O~2om74(0d{{I@3^ogu%4uoEwHz6}4D$$y9N0AL3<6~}j0}nT z$ScB$3|QHp7W&zpKT*W={t)WB(V|v1x(ZZw1r6i(dULR^vwax~9$x8ng&6%7_w zMqW!(W0i9mPM{8T+CDUz+W=WlzCKTbp%?UTa)!wfD$*I`cCREcUpeR%j39%IyiEq8 z(FYi%C2v6aeC&B6uHP&(mfwB$q8fna)F+Tigny5~I>eTYRbj*+w}4h0+?ZBq&XDB^ zBlfhgO~Gf;qzD=gByW%a_mxi~Bw8ZRia&9f$gQiUeP)$|os_4^xE82flv6=SEK}3d zR@X}-IeG#F`WZ5afoYqxa$&gPX$nvfF=x8bsK)Fx=ZK?W?gSH48{yU63&=0l)sPOQeV&QyZ7YZ;f<1bU*k0 z;GP{O-bq)d@n@(MVge9ellsiV8&C8cwk)ZU-ASZ9j4c=*4R!Tm`3&>&?%vxzp9p20 zq8HzbMb_+Yy!ObW#k>z;;q~9!!|8~t2-joT%u&odKVF>CB0O^0`GR@RJ1`g`$b@OE z35$c>CwHAxPXnd<5Ll(&$wdv1Bd}_~kE|U0C^cleX0c16S#_FxU`KeCFmxCYyG8f( zurM>g31)&z#{xFGfLL=QQUWcOXoo)h$DN+{AHjiO0P!6*PCtv2Htq=WP%Cit+;SMY zpHT&XHM$%)PZyV@&MLwKltrBD4Ri4!qRxN>7Wv(sYCIj^&IYUO(uLxqpe4S{~qnmLj;+D88`Eb_4OzBf7yqZXk!YS;ZK$)=iqw@xMd zY-ng`=k9gNggM|NY<7CVHq2O~86R>&9$53cb3YJ)aTo9oX?OJgvOL__cN3 zhl1K&|33Tqe_3}wb%O8e)k?FUyQQqJUOK%&?60|?~4S| z1LC}B_B$RmY{1JK+Q??#6EA5pw=(LT#Ntv8>~kmzaXCYT*u0`?L|fJ);B~Ertq;T+ z!(c|5%bD&oN0O+iALZI2-AY#hbf|RcdzhjR{~ud#8I@(!f9pzjcXxLy-HkkScPbsy zA>Aq6-67o|C@mr-0@97r-DmOcvHxeEv)@mQfevx--0NPynDd(7%#vI9qu1Zyyqdj& z;?qcyC{0ERqUM-G+s0cs+a8zs5$%ubxUfk5#F(4h7?1YgJ_6hh$49&jCHI+$pH`Q`%ly%$QDIJ zn%jl62$x-u@F#?WRIhl#%!MbsXdj3^9#P&d1d2~x@z>CLm_WDIQZ1F1`1po*HJeRi z4o{|mKfVMXmcJke>pI)T4;?M$sf40h@qKkWup6eXV2(jO3`cUWsrbZv_N!xE51mir z;#{^WW7J(`s$OzX%MLPWGi^WOqGC!~@y%TLKxYr_Lh25($EM;7hM8fT!m-;^^Ac0@ zHHB|~JBrR8p;@_1aQzr`B%uC!xdsEGY?}RHXnX^o&(Vk)#dJqJG^M~L@J`Wi}(7bZ?fcb94W8F_4 z1yXOUqR$DhM7tBkd)f*L$i}>a((dBpMd6;6zH(lHC3&uo!_WLM?pU3l?6G-H)=SzI z!X+pwh&O3-KGClUVJ`^??vAO#G%?>zj{Qs5zd+cN%Dk#6zwat%!s?>ccGjyje7FJky6V zUu(B+G&p_JVW3B@=-%w6O8>8&;rTY(KZnl=T-_oEB}mXl`9X9`i}hq$@n zRVX!Mj06g!ZZ|0^D=WN{BQ&_~dk9q@N2k}YIEMduTsov}`g4EC;lokpSeKHww@%FO z*;S?DZmZN`6b>)1BiiPNzqh*vw>@HUKytOJtex#%o!+;4)G2YAZ5(|0j(0{0O0S5#gcXO!$3zva)n|5R@hgM+ERxaQ`+y?B|P?AcHz8a8LwIqhpq~XynfSr0AgEE zfAJm0#?Qa?eU9yq9gJ$qI~HA5NkOEYARaPFC$nQ2>=qu>wneEV2*L(RIO#vNgtHCn z1?l{)$qO-kiE%A7c`avuI|i;u)51kI`Iz6>Ms{HQL#I9dOAuvJFITosj*5BPzl9>hnbsJA(}bpus{#WV5<~6TN=fzIVf(C5dWR2(7A05 zkf0o-Z5Oo{-m$6O;LM@_cy6o3VHlP z6l%p8WI+)qeu9(kldlB^-R{dhT(jmE>g%4ddc5pBY0SLO)juMw*014cG=IjaSz_)9 zw~Vwyf8ilmAg~^p(z)cCFd`)4HOOPltJ&$&4l?iR+jz5!f5c0R((<7p{<`iRFQZJ& z)AbWIMnoq3>f?2x+mAPLTMm2hu8b=#ogRnGs3E)(=qj=>9b_o3(&2*N6VsFVyjYLBZMm7%|KTflYP@+2t;w_L7GzP@?g@O3y{|3&o?hLq~D^;TE}x8 zM#oI@XBFNoOrkLYuEkG)l3b9*cMj(6lEfm5gBu)<`n{wdE@Wuc&B}U#v-ZY_I;D~D|WlLp_+n^7e%f5`kX*{b`rcN8IXW!(q=uu z${Wy~dyIkbX<){ioG!QTQ1Hf}HWxtXmK(GBLj|1Fk59)rI64u4uev1L$>UrAk0VBi z@nK?6nfVm0I59Pi^ia0v*(=p?XNYo7}W#hCI}-ss)2EQBkSy{ec+!|ivgy?{l$K9%<$4=Rh6Y6Mi9)1MZ0Veym^Q` z)EuiG(sf&;=I_41=P&ljXJR9?Xe9ix?f+~kfY=IiXsOXE=4X$v(9>nQ7dDpVpe^dD zG%Q=9pZI$PSeT^m@;#!4x3PEg4-XHYQD{$;pMcsbOfQy1;O(B{uTx+Z#fL|mb3&rY zDTs%J#+8Xd8Fs_`wiw8 zUXrd6>@5ERKIX!jb5(G})HK%%%$CK?@)Y97=3*Cd8TzuOU=!g7ng%8FTEH@ERY|Lg zJvO_%f}yyOl5KwmDdZ`+xel03Bey`6VGdWV8fGSb!B4D#L<9S}$WV8SQoknLG(q&v zR(x;kHs&-e@*&<6mDM?J=x5tUR)gel&kg4;y~wj+0n}!dB(rqXcVOA}B`WOWaYjGS zAlXtI*J}TxBqduGTsgiRv!2~NR``cn0uDnw_+PJmTF*V9`*mlgT0`h9_cX4ZNW!4h zf7nvHUkeo(OS;L%+fmJ4<^>pLkdl&-v6s}i$V{@>$>IXRrZOhQZ~H#RNL8=1Zj@@+ z>|U9dI$!xIb+uvIs||(%2$+g^CRqzKRN1x$jo4QT`^SZz7$v91s`y>{^rcr?Qq)w6 z$MNfZt99;gd~+f^w_MKA*&syhTe%*{Uf08tD|$Z_FvR|q4aSHxTg8xnm3I|6!@V?a_ml_jnc8H@?T-QI`SO zKe9@k*ygzPfR38msk2n`JSFqLI2x=%#0PM^xNdKO75(66f2de1;gC}^{G1d`DL+k% zLZI>A`Fa9oOh8oTWj?DAGkV~+1>2q4W-UUSA2~$2H5<EWs zhlv;zvy*aA;DsX(!K+2*79#uI?UxheBRnb4!mR-8fppEuemzR$N6KK^L$IR%DJbJd z?$8C^`SqL}m?IMzjHD^9K$=<|iDOQXVeXCmM1!GSg+UB!(h-K* zt>VB$rRe?&(gcD&W2BE=A zve0jHB;$h`GeIa0Kw-Yum8YB_gfdhWP5^%mCHe$T4aFi>7j@FLxK(>`v%>Z-)q%<9 zif@sqgzOZUfm3b5K)G;0h_c0L+F&=&j@-8F+ul5J_YCw}!u`4u1l^v;D>aO?+7UBw z{LY-62}<`qVz*nLI^kp(U4ga~F$Ug(i!~W+kUy~_U1)LiTqOT65+s#-6$33T6iA9; z_D~Pl%i+=sG;-gnjGtydH-(Ly)R{F`$lw}ph1*mmHHzT>jlzeDoK~O}C_cqGA`>Pl z;)-3C$m3wLn<+zc;#%UU+A36bDmfvobC5|bjHLxLC0btS=pgKB?9Kbg>-^;bA3IDokF|56 zH;!aOWhKKwhHA!|A5)#b|=Z*k2g#!@^PwEaT5jiFnlspF*XT+=xp#aw z%WbBVg`5miTmw*@xvP7dqc*;=yp37P=Hy4jdJ-lGIzq^!sC{@GK-byEZH07-~A_r5& z9>?yA+rX-)r}=Kju(8BO8p}X7>Hj~p1A8#Epl8uATE~WTRy}G~Qu^=I3(&?Hh(hLn zjgK6F`Tw=C1}@(fG@rf)&g>5TBnuFd=)7}n=% z!A5IPso9`e?_onUa<0gstwL2+7G5HX=jV4vRaRK8F2EkBHLT+ z!HgnJ`?PqBA?*o|mRPt+Vq-?X{mKnM@Xzs`x+fSvl|tXwHcg5eWNUugrR3l6rlnTi z$Rru3E{sI^WQp|y5LYVhaVoCNW5aS$q3hoPLAahSBO5(!fg;kNOzdW}cGf1N!%9+4 zabS>n?JuwldSSdH-lEP7o#vCU>7L)GUmOF?w0F$RdX>KY8lRlMd5i5ZMZ->Y90#!w zCF+oF;SkxPd+ZMo)KVF5(fmXxpqeMF_m_3@S&A}^Km4fzf1IhXZ<;ty3HR z;M#@|tb8qjm^h;AHC>=IynL>`QV4wPGUdPC^MxK!4};DsGDoFDPn~qY8JQ!N$8H7Y zes7PgII7JSTQ^wRpAqTr&CZok+92SKjX2B`4@Pi_Ytgk)~Sfd7qlW zOT<1?<Z$A?+DRvUkiHtBjh%u&C@y!lQk_>5DwZXlqbU&j3&5(sNxbiC;W!sIxh`)uX{IL&Yr%1>INzxz=6}I*QFxu)~e&7>Q zGy=3P7~^QV`X?gfn|4`L*o1glTkfGqOUD6y0EvtO@wM*K+oDy9Qg5m(UM#11ZxDKL z>HP}NlaW4+f#vU2$W_Fk(CZB=%4119%@U7@0E())>2HYNELdLw%!OaG!Dx(On_UTgMvpiAiMGu7)?S@mB#HzH}FKmdR-Ng+Ty`hZ^ z!_?Jna$X-OxNr%Evzw;_dWx!kYLeH^lvWKJ5LWoc3lO|h!u~ssj!f;3F1{8uP`oFF zy*DU2nSSw9106LX=7*l=Bl^Kz8kB3Rq%5}yX4mJNTy}lMPdl{gWhxGVP}_TwO)$I1 z#?K<9#$HJy0wvkzl!XY5l|*jOtw~_yAb@&cvAqG=(v)fuY#ZmC5XZpuPgrrm0niLY zclIZzO2e?K?>=Gnb5c0TO|q+s$T84c962VWZ8sP^xCtVD7+T)mYRX$IyVrIZhZPJN zR205FGciq{0+SJf9&}7-k5>aL;eKTKiRt}g8rAUnMCX6+fFW{**yO%xDLnXY zb5gPVpCxoX<^IfMl{`mx!D=SEfR38hM*`LpPVe-4k<&xm@?>&V`23s)Sfl6q{0f7X zs%=l3W|< z?c1>3>iZqYP-H{OhjF8=g-lt?kyS@z>-tG`q6zai=>FLbwd)_yT!%jcmI9h!| z`Y+WUUAVX;j#q=x2}lJ6f$6=M<{G47NkmKSiFY<^ig2rR)l z#P8_*HukfBFOSEdM7mGx`!5>ubzp<~f&&u=3U3Wa$LulxSZVbZ4ZZsKTSbke^mwEQ}NAs-hR!C>^mim7{n1{{rBYvkysVfd-{WCMG$5*-82hQ;rBpX+)O0$)CaXHJHM>m^LEW!lvf6|FZF;6@%`5ShMNslLcaq zq6fHhMvI@2F+%6m{licG!;SmDVLWd?A>D;d*NuR}iN3R%%Uao2Mn} zv?H0O1Q{XhVw<%1UzctVe<)M8c($alHd}Y1oTW9MSAxjvOyNF9Ogf1Nv-m3ETj&K< z9=Z65&O`FEhjW3B-aE$I|2@MUs{wwk`zr$vVjxA(C%9T{2r&EPPa^m3U>%-HW5qJWsEd{;$REY8gk*|QRE z@5bzadp0lQLQf}_iR|>TbBknCveDFsJsJ#PAk6k$M;_uy=9LVy@`uOQ3$~35tI}zM z3`WaTEoSnQTO2OF%@b=&a(Wg$OG2y5N_TF4g&)g+M66Xx+)&)4WU zF3t$11bwp}D3UOXhaj-focfKF8s?TP<^iRhse15WBOLQR&lJ+7G8bKUj$M?Oh zXG(#5S{9#$JvF%ymR#-@ri$9lexGNajXi3y9ERH-7KsVKyXG3AXiq4t zGx=rB>o{}+wOQuX+hqE^l&@P!9=aYMZ-dQcP{?jiinyzU`&7oV9U~4^NIsG^5BW@f zHF7OF0Mh}T`FzxLahdnQs!pdq@7YPY?^IFFAX0Zphu1;L0N(q>)}Mj_n_|c{h1zn? zmBev9I?U*_a-Q<$0$%a(PcEJo2_nnUraSVAf&8u04~se7eqZ!JH-%#X2q8iIJ0l#J3%=DXK`|cmA5PjxA#e0s+0(~r2gcH9vf^)YbTT(B|Ff)2#OS{W14)YCkxo1j0y`CLFC)X_r4 z8t=^hoPnI)r?lc=5IlX!gfF;ODI19Yt8^8GWSRmF&DKpq3NRj1&irIhg4ydH=&Tl^ zGoE^3l;L-1R(9GyvuL&Nnl)-ylOLB@`TUn!4x2LZXOmB1Bp)3g7J=6UI)x~y7x87_ ztOXbtbPu?9`IxE`JWB|^@tM6TR-YvrqAJ3=dU3X{Gro&~_oewTEI)>fw<_JL%MeXd zxdf`UYP&YuVajikhirN9u9UN+Eyx~}OT<{11<|bPgy%S3YQ4Z@@fWe1a+Ib{J_)TV z4opPmwa}QWw)@R{MS4Fi)Emm*b_b_{aFyjD9tfDo@8R5(aSYGw$ zXfK!p9ehgt$?cXTyp@U{w?1d#+%Y}#%s+RBsdL|f1YN0u<~%t#){v0DgkCBx@pj0` zS@Na)0WGXuDLX%Ml|b|i9gVqx>1eCt@)%Bn!Mm$}3-3)_fm>3e-ngeiHD9z!-^uj1 z$p0EMs@Csnm6IAO0#dGD@mhbEAaEOSt_^dD1Ne|zo3prVX#ypx4BzKCg0 z*l4(+&)eoxv;$G6lJ#Ca$5Q*+%`6d|8lU1JU)ke)mn>Z@Il;IV%x4j0)-H8a-uPrF z~qLpNkjHdl}c$D`%Lg-hT&%hiXJw;>0(C!P+)m`PppRWk1DVf<$}9un;Cucf62 zMrP8u#^8gB1a(gDFq#L$P{bw|o`6r~VSN`sJ6S@%c)MZrt z{XM0)+_|t9j8fx==EbG%<9?l9>p?cw9?HJW@%ycIjAV;CdeyKu zUi>7QHy+EQ_Rq+G5eJ1@9)T<-S532mo$NO|{EA&Ae%~cz;DY}%>G_{Y`u}2nT|gp{kM24AT1u zmi3x^V)jhW;KoVo#O7^5Bmv7htCEx1T>oj7_P znA0T^h2A=^Mvs^#f2j^p6y$`4#06}|3C!01n<}H``eX6h`g?{11Os8paqUacOHLjG zEfuZWy^d)+2)VfQ1}Jv1nO^@>9kIx*&OCWDr2}u+fdjkxp0cYMUC1e@15xg`&*DFT zyL4`s8^2VVn|>f33gj|rCX9Y!s8$?>fky?pA$Ya!IkC+TXg>qljk|Sw5(00x2^3|y z-_y=B8n?rhY5clHNTW-7VjvD8n7sGO0-cNZN9i0?SmW5s28fg;;r#H|0(alDQHJ3U zRH}L{wO(sfGcUp{Yz!#IU0L(qN8iywIa+Hgt9e#>nDE9Fo~*=Nj1TuVDqjFKk(&NQ zlIgdHkJQ=GZ~T-jn{)cHOukg+*lgW@b-4KI{r664KmIX9-t-ZL9I$?Fvex26b*$9X zlUoZLs88~K`lC2pwqmoT0{Cym-~Y;z+(n$rZ?d(XFEukj&XSo*2zLrV^gTz#~|okU^{I7Fprs45WEZsJgmsq;x+FSD-GxA=hCrk6V} z>wmvk;q7L30}DsxM}_OvcT2}(ea4O}F2Q)TFeAI%Rh*-4q~lDKya?#fM%@;cSfhRb z>j4IpA$`lTa>Od{DT`Piz^-_5Ed2WuI2{5QVRAh^(cPGm2Gw zZ`jE{&wyTKk>;^({+NU;n}Ky%OfZB~9W(RLVTt>eylQBPPR;zkp2f2+oa zoYb8C^J~>GvOc2{3oD(#Nb1dcAcc?3b)WtzlcG_+vp`35bhx>s~*H; z(^3@ud@XOPuJ%~}+69@6c;K6UecrbWKt)qLoSnG1`bX)9-+i6-nG`6hM~9dvre7ND zd1K1eBcu00O-G&itVq2|h9x78fA)%za4ytyY;vvEJvD_W_}~Ae6r6T6)x*`M6KEuL zN=wp_zmCRJpu-E%Mcr^5j6P++4QKs$!O91y%V$5coqVF^Up4MY0{{7^-$G@n#GLdz zcPCN_RmR^*2BhsNzhY20mKRDLYS!xSFI!yQDer94xNW^YJ9ggY zuC%qu^Da|No1<^^yLGMoq&0alXg z7563>q82kB#8ZgnnX6d|Ij?`}=wRK(IhQfgRi5J-Eg1g(XMu$DrNY)1 zhFo{-p7S3_>D^zQ?jD$Czult@;^^6{f z#$B2ykZw_6V^^po6ywVESn;(6O;W5Ig_l0_gpX|WydH)kVtCmSg8D)N>*HO`kDH~0 zp7l(@U3oI`jhef&C1$QcWkbgA>e20!-%U6tm9Ztvra3HRI*`Bn;bfdQsWKj?z5&qG z0Qs{z(9Mkz`$ka0QKukP^~g@5*|iYgd65fvW01{u?Do7is1S|)2pOs7b$Dwydn%4S z*6y0Y9{8G((#;OP00ri0H{*JmXKNWe4)Tk1X9j7Q8L%HN8ml~>x_9TMOD`$pDPz+a z#>X|hbO>!4zFaYt>w;*C!)7xnVuwE&KUQbeHzZf!tmQn(_(P9$VPTDH^vJot$sYF| z7d+n2c$XJwuDls%AMFa?qwgE;d%p9^`!;xvgTI2utdi_W(_O-2)2S!1G}t9CgWn*B z!BT6OIrbj%v45y_IswL<>9BU#1UHI=^j; zL&bJoQUUcUh2Inra7$UWo;MJxc-`1v257X$;c}|s|Chu#2ZBi33|*hpNsfB8;j$;Mx@u0gp@2l=8Ejh(6HaJHA8ZhMSeF)~c2TZ`4nnwVnH+ijPw{YY zaBybafwlXSpa77&V^Y1EUeiz&DQz{w{9c%Wl)Mj&7}EVxorfzu*)NgDS_}T5)S&vB z$Cw9UA{X_0jY7STXE0#)YJdqb1Q2J`@Oq|nZraEL|1}H`$f&*#tcCWb)`H(sNqk+b z6f&t_?w%U=>x93h!j0S<_lV_7-XR2%2iW4qvNmq{^O4avodL*B`P{a%r zU(j`7llcPatouYMd=J*S`G+Zjf&#C-S{>;PT2O)?kVW-L-HrVQP()c&1LN&$EJ-=W zyIr91&ODZT{e6RzVH@Ua4ytVK@yC3%9?*US|9!nyRnWYxU=Bc0zQkBK?QBGuDQ|YJ zzOmcmmUi7*6FiM4$HnB1Ag$i~zd=FcGtXRZqdM5I58l$VH-;Una2| z&rf0yQTx^2o@dP`MObC3SB<0KGOHrLdG!F)0oaH=wrwx*jzQ8+LqCV=d+*){SJ_6O zlYN(FYBPJmo&)t5kIY@QL0jFT6EKN&x35aBh7BT6&s@$g^HX`WV0<9P>cf7_g8#)f{j9sk2fFIZSKikE$f22^v}aQA z+3u>-oW)z1LcspFsh5+qeyj7kiPt(@)@v8@cGH8jeqGEU-`6L=s3Tp%oP{{Qw3oa6 zbbnmlU@;tC+k^_~#pFE5-l&~L4X(zD5+jDLUwHbu_g?0_o2%0^5kbCf?|j1yppY?e zt%$*OB(6YJ&<=-!`+UE%VY`s!tl56)=<2jmlI(3HRzr8(@4qGF^*NGr0IsRgJm>Ku zyry}4I8z0*w;ZX`*eaP#vT?-&bjL}_`2?iPn}s>=Fw|2Q>S4Pg-oPZ`!dhN;j`VJp zUH2t3D?gYt960PmW;5N#a}E6QL%2d&KD?a7?DTDYl&CxmLF$foTU1|f#Q3nzvMi9Q zuKR`~&wAo}kz#_VjGPmN${3b5iq(hS3#bq#x8L#_3u~GFARyxQE@o6cQ!3(9YE;2= zG7QvpJ;}(e!W6nPk-#5m%C&TcrQYS?t|Qze>X{j(aNLgfsXAJ3m%H@}?+*x_yq!wE zWT%3FQ9h5(TWg^}nEAkScGY>QUox>@y1k8wg@6B`T?x%&$0O)PHIm5b+dwApzbTw| zXUgJlImxLC>GV`p-l%J({(WmbU4kokEX;D*`)BZA9Swl@NuI@g=b^}D=2|Ax{?Svp zqsZe)NHYPc7c*mU@b4SUQl{u9YLA8jM(XJN9!lL)96TX)trBKvW@btmIEgIWeKy*q zYq8^RXa-0BezaQR{P50Gv3Be`{?jqDa-Vl%*~o#22p#;9lJfufB0i01d4W^8GOU`2 z1~`C0ga-QX*xIj|INRAE1JDbI(M8}Yh*I*mdk%OcQ0f0r;%0#1TLDw7YHo!C)xJ(H z7SQdu2`2l+aZ6wckpk??rL-A;t$vZTohCvR6h*q(2#89gZoMgei1K>9~cv?8856-Z;fdTI3fQHXM_dbS*t&m|GNunZE88Yz06 zzLVq7Z&tSRlL%|lCfh9|*1eC&d^_)0%OZy_?sAxhdGmUum169GB4@doVj9HQYZx$c zf~a|^gH{3>MqAb0xHnptE)d3-7;aslz79_t(euxx>JE=V(~6q5OJf>L~kz z5F6GuK9q=h)q599ZpzSA4ur9R+t6DoW0ttTDj=Jk&%F1&3vf|pt4prIjRxLdLBhUz zH6&g9=L0Fhg9?klax*Sbq?IpOmsVd5dQ^69d57Fw`M$V=x%%4pI?(;5g@7j7{jvQc z>FST7fAMnoQgXD&C?AXL=cb%y`WYyuBKsc-@Y1N==l%RtsxE%wSURmz(L#UlfnS!1 zFbQO$B^6r8?UD(ldR1ar+MsCMuH|#V#LaaSh9kxpKh|Z(nC`D|8o(@IKVdM%rLmLb zyUh*8|BYqb_RS9i3KXAwWlD6Iw-9c5%5?&ZWm3o8DZh25v09gI&%xqZ8+4(k%+-{( zLO5}Ra-IhmYxQ=rS{TbGKPuast2+Qx9?yC@x{%k;$;nLiyVd9TM1*R+>wWDHBjd`; zl+{|}(f~1EO5)Vq=-DytD~CNxyTA$uD@-odr^>7Gc(+efUiz~XyApe>%m411+)7yM zz7GjhVDg^KF@Bq@Ubuoikl_R)Y53o zzW>G55`Tba>YGEBq!9L0OIT>HJ~ZV1JduE>_=DZ&Z}$NKlW!kT9w+nDnhl)(1e{kV zZ8$vT6y*lKMmY5N)QW~+4#}%G%xpeS%B_3<>=Iz^@PR%zfyjlj1nd7iS0zwEvGgYg z9U*X7P9WquE7^gVlS^|e1VgkUDN-v6s0!UDTDUY{;c)dNbkS9EmPLplcE21b;otoZ zEqOsVK%AhBN2f2hK;4KAAyO=NDHqB2iIrMnFJ#V6@=iOO~$^;*R6e*`y7%inzfKq#+% zh&%rapcJb2MXwpB$VExZHm;oHfFN7?u$Sn4SClS2sZ)p3+pUq*Rbiilw()ch)pzAD z?olgmpGyR6o5f4`l@9?xD<^&){ahTV3(2UFI5l#5QfRdw{m)xXoEh_^3vALesx^5u zn>$?br1m#dGz*W>G;bCVYR*V1?NRWIm-cH%4NI%m%~kk?Bt}~Y&_XKLzCI_p^$Tn* zogQx>8|&Iu24Y*~{>rhp`TS}?W;sU7^DE0p*D1e;j8Ou*=+JTNlNNic=f9e56xA4i zyGM9S3+m5yQx7y#4=V~X|M(h^9ZL%n-dze=2geueHg@4X0<8R$Yx&qkr-U>4@d;e+qXER1|PudY^ggyQE zb#tgu@q^0P(QN8J0*}&~vT_^L%z3WAN@uJb)D}AJn*qxW6w_1-Q3;=3D9;uIMTtme-5PvW&=_%Coc(T0sI==uHA^;2{U6>-m2%4SpeM;MrBTl+27o8RwOUuxSjLDq|Sh5knOz_WM zY4V$Xc@@WLHJF_Q%cix7Ulp?sy8?{YKacyK>vGS3KY^x9Qa{SWa}TrX2X@Y@trcfz zjv?+v)VXdDMuWgG-&%-RR(?Q|qzerj1dH{`<#0WM!g?^lmltmW1m>|Q-5qrut4o@z z;pg=SLEg`4pgH~iTZNu?kX#ft2g38_j8Boyt`diYq(i42j19QSP3^2BLo4&CW}yvuj+R- zA6R{>GR7dENh`{8xhCbV71KO*EH8}~XZO)$2(~J(J?&deOjy{UY4I=hkTt&TMrk9a zEGD6dGu^|hH5?3?=7H-;55be4xtiOlo`?sXrx`7d{`8~qw8UfILPWYoE%}ow&FQ%H z<|ZYp8r|4?QyPf{3WHTwX3n(QZ{KP)ZhRgqHZB6dMe5_a+${Bm+^7yc6$9Mi2&dor zt@IDvQx9Qq&Bt{lqxt#u_T(d#3j~HGu`Dq`lx^UH;&)|xC~Lnh(|MA+YN8=H+36E$ zmCwyQZqW>PF*xaeZqC1LCsy9+$#7*qmiZOVP@SAEpOG&DO-BA0tDWBBtz<;EkNr-W zs0#LdC5Gr;*>3)|tzz)HNj;HUMnul}Cvo)d|WM%nDW>hvQ{w@N__i-^8cydq^T2M8*nt%q(o?%Q=mb% z)cm`pbo&Ax)zx>wwCWw$u60khih|xI#z;as!U||>$9$5UK&+NRlBe3|yexHvWQVn0 zT`NkTkEohtqCD!R&QvB%)+%WCUVrk+3l56wBAGPG81D@;{foLm;x^ZC54Rh{v}%ZL zR-y0<;gO=}&i72PE0ErDhEanKH4xFLJP-bT4wPNfsXE3ACZI8s{uRT~t7|7G9SVyG zErIwEJTA+E0WJ+$c*1YXKrsYANWA)eVqz|Gt31~uOSS@K5~*~hcuIV4%FS2g{Ve0* zViGF5&pG}W>!=<3!PoBcMyUw&%3!pJM*xQjpGlJ*0IoV)xB4n}#(sLK6`4%1^O&VG zNx$IIy5!3H3`WTB-B5PY$who1#>VD~`WZUT7_azzQq2Ms7=j4qog%&#R_S*=|J110 zvWCDO{O9ys9%Z}4y^7LFSgYQ9J4@+B`NiJ=WFjV0i^$bVQ-6|O0~;f4cFDmF=!%m8 z10`nV?Ih^!p)5I`L!X&CBOxEKy`iDZ6!vc9m&=$KWg-UScBUeGByg;tBq)n9A&e(k zYhPPa*zQ4jbyMs)azDQqnbsa~mxnHLPpIX$)(}cirdvgUqgFTd>TeWUOyn1M?u>J0 z^%b^jLix-Eg$Hs0GQxoRGp~YedTr@C_vgs(N7AIV=S>4=PxtT2TF7JCy zeBU?P~aHxu0rg)%z3+Be;TWWsy^+0@HZH+iXK63;nOi4uR zhx;Ul&+h5zFa^aYTicR8pW#2_Pg{!7-E| zOO{&)=`6+h`0^Zg9`DdU z-V`T*IEb&t#JLXvWr_JQxx8k<5h9$jIw&geOG@kwKj%%B_NU~QL5IkWk1KCtG}^3j zmWB%5<#DUWI5NK3Ke)=4qgnozildjqQoZBTQP(8k_8tQm`j~*X0RD7$U2eclznoLw z;oMGgol!1p)y~>=4&1|!840QIc0vY>TK#PcI`cRA=IczS|8J)OdBPm=f_4JUYf8mt z$Vzd7XjNxDg0W1&*mf_DMBCwjp;~;TXlT6GfXt zKfuxfCLCkZN6~>sE?ya_{|Y=D81IlYZ-IM20yG5W>{nDomJIB$({CKhRr7yT%g&9< zZW6Txp;tzF14(Rd*;+X@%U%*W%yv*LdXa1UM6QsI+dO)*>gWCzPDC#AZ33t{(s%9w zuu~q3&F>Zs@s!U(Np38SS^+3e2gxUnm@j30TLj{?0!?L-@X-<%$I#6gqrgUXmwirG zGE;U~qaXnDOcOqAR~qeGxijCn^thsyXpcqonda%+OF=Fb*89V;PY z&}byupjlDF1VP9!=xH~5Q4V^&JYH|7XrQJfdMhLkik$nnm&o{BCjqDqrD0%l%L{1F zQW$pZE7OLN{->zorFm64 z<;-LCDu}?LF@Gkh;9Ip#e0}-U6IDp)w`v4P^ne89!{bS=<2&z=UniV@kv1jq#9$)tv;;0kH75V$4&ZKUkRuYB0WZsD3UU5xbamyyPu%#mv zh0N^^%tUYTkLC!`Y7>&=RrYGWT-*)B0(ncpXj=ur&{T+U7Sv0@AIYI9$*&GklB zKDPLm?X&ZHc@AG3R~*d@5?;pHp~cd4!FOBo7#y@$jg&F5VXurEo{m>kcs=V9jJ^7$ z=8gYu$eg0%=Q5YLO@6T5y1@{;IB9#Gebf4jQ5EwD{5^h=C6;Ei4g&K)$F&aN7(07_|eiAQMTHt|V06rrmSk+J`2s z=w{RXOi3-A_e!Kiv*M9Fu1vt`aHmc1hR!0R?doW`+8}hQLy0d9VkwdV|dwx28=Ea~M z8$rcThC9k!90aJ_)LX*V$ep0>?0;^vKmw)++*az+3y2@(K02FAdJSlrI5{8lT8<>1 zyx&+HepAGad2xAHrAR>U-YOn~St^XM!5=DJn#p-x9km4U&pt~4Xcd;fvCisZD zY%NaLVmh$Yd5WDx3JumA6(Ew?9i_qKcO0`}eQ*4M_1H<|*Ba%&2pw8xJDcwzKdhda z;$7Gu%DjX!(VlyInxBJ~5@~I4gGgL$g`}Q|XV}^*w z@89y6%rJ$?$a&PKKBe&J4q9U$wI5(?Y-r8Tv2yipth$ktQcs$PLu_P~$Hr$rNzM=$ zk3)J!@y|BiG4!FhpWh2|1UBI|I#wkR&tUS-J*FF<*~TCc3wYQ@4=r~I0hspkzsgh* zrV;8K&9Cg+NdHr_i!;$Ap28aYeVnB!5WjUkS?do+)M3t{@Y$m>nxB3NM&7TKYyp66 zEz(*LHz97VJWKAi1{LgUC<5d1t0HK&qS0AKKr2g}P!n<~Po`0(O!a`x)eaLOWSgp0 zsUAjARIeqYpqq@i7iYi-A)#vz6@c2x1hSv0tHV2Hywlhv@mmN+NX(wI@5@pzgtycd zPl8&wF#1ajFY~}Qo{k|_p3&7f5%lQyq;~epe+wE>i1=pzmfBQ_2v{)PHj89pQbqxV ztq_&tI0)1zcVsFbc<|Zt{jZ2vIJyb=GN(FNG8=IJyy~$4JViz0AW?zWm4$}@Frtf;32XcXxL;f^^-4fOI!VcekW;BPHG4-Susr_dM~Q z@B9Uba&fuVUVE-N$M_BSn;M^{SH8Ezn|jT%`5Ji#5ff`NtVT1-Ll+?K3jKT4;VZj7FNGpm9weOvH~pg)!c9O4nD+3cz?v-130z)7tpV?tgE(%$Na0bi9`Rr{i)z{Hr6kM9Q5c9g!+Tk3h`#TAEcJ< zKWNt|!$N6Gc~3xh&hKItbFW)0-B#nT-EYAz<*T{xIw0`!aTB~K=CfqVr$(e-d{%j| z=iI95v{7}uYJOZD&QKZp^z!wJv}o07z?Q_@8}m#CA-PyNXYp+a-^Pc92N&{k>cO{A_V)4n(zN;;)laLG}D~& z9(qQnoSjtpkMY?<>{Gg zHIW;#cFYZAkzcO7JgX(Q2?io4#M_^NbxmB)P_SsC$k!R85c!3B`xeb%*kU2 zHvrt>X}CPm=@tNqB>PGcFGdK5V)c3U6uRMPRO{gBd3nC!GttN+3Vwh_s{>~7gyV5Q z!iXdpRWV#CdWb*7N4bpBPgT8|VTPAUFjzWNsY!YnK*mV$PY2YQpI@yyNo!oK0rC92 zA%O~UVLZ6R-Q$N$dCid?Vf6DpaEdQmXb+1UE_cn ze)pQk!nW9x&gEh`rtlgSv;FJqS!vCvVlOnTq4XZ!v4OqpzFP5BnWNVvkwKJ^M~YD$HxIcHEk zI`LTE&*)oA-5G1uH7es0K^!~7pquJ!mvk(cC5g4%iCk(*3TW5X)B5%G6Q17w!bhsa z6&o!J;R*;&ZgFJ4qV;GOvAa8OnURy~A)5Ve6J|`w06D&+tby3cm5MSgFXfVivzrVP z%!x2_=VoV9@5t>{k4+ly6*HDg9eNeyzNXod#Ml*yLJ5b*?~yoxskf=25)XO}<;j+R zn8&O7-tjdg-=>#^oSz4`-y{dmkX$@acsg$Oe4=}N#D_lzT^tK!FE!7Mo$NnJV_r}G zDf5;YcY1Q?vsu{gO9_f22(EWo-pkJtT53s}61-VT6e-9^Sxn;H%O!nh7leZ1o#8N% z#^H?YAIJ7gN%i~ay2e~}92bm0*DJN^W00jsAoYmvyHc^bV@9tdd}z$L*%wh&>*ZP8 z^z4q<&K8nJ#zV{(8hCq~9+0>_a&hJ&LE;lSM)R`dp<}4+H3;b8)Iu4IL{bZsgSSaRIK^^QLJ$1Gy|6xL<|s8QsX%xKns%qq6YJuS z0A^tF31rVSbmcH7_JDo@LN~RRFIP{6P$+14KbWDf$O$3i!P-Ry^`(;Z250Zzh-gJQ zwYd5r*8mTN2w6jCo+k|{YQz>0^Cbg%vR-UtEY0oLK?^ur{Li;)9H;#9bj%zUI(*oWYuW3h-f(889370bJ{^( zc#JuHQ{ozX(vX5A*wcl_pB>V_DKcWKvjAO8%g_AfuUy?)55>ikK!Pbb0jh8lcO$}0 zb+kr^T!BRbqcHzkV(IxZ)v^@l$Y4icF>=Fu?AX7F_TK`Iyvg~n2Q#v7M&+CF)C zszkT-=e2asccnmsafDN)xmVNTFALPJns*sZ%7a(x{K3x~?x7D%s}XMH%)z}O5N~@N zDCv#BhK+09kFO|--Y&qZUP37M%Z+~(Fo!(UInuCmSB9GzV4d-ykvx**+HtGBTq@;e zle7%1dgCGLFdk=}6WW5tHTA<*&s0gD1R?tbwtqky=zy0gmxd4`%` zEYi@gW8BZTUA}J;yRFeIDY-y`ksBZYb{@c_iLTnnle2{<&XN8_rm7 zHa@MU4YcFp7xF!zwa^FGU;mYO3jxo*_XS{ixzfR5xDXy-J0!Zv?y|b z*ji#p`tnm()&o{orY-$NSb1ZNPZ*__Yo|H`-C+RcR!4S_M5h*R{EMN`VB7h=aVSmV zoxq>{rG=goHqsTn5>BM`36GOG^~&zSoe+0!kRAj0CW1@5<#u4~CT<}5O8My1NG03K zpDpZ#YSG$^jQlu)kU*ga|zof-!4_JlWO^|f@OdV?M{U5j%iX6 zf;TRbh)eaxpE$S`#NzwLH`2{sAPn{BmLGl#Hwxwl?*-7@vg%f974@UU3LSp)fq;^} zn721u+jAI71B)bGAPiaqHWvn$msdyxGob8sG`FNBREIt&C{AFT%4WwZtlSA}!~+R) z3k@_^CrRNf!LCurrvl26#z-vWQD86^Lmw)M+F3MFqklIPbd5QQlGUjaLU5<3xH{N* z;N=$WO)se1??)jMn*9&eKnKKk{J^TQ1hS*a^51vNLOS3Wb65wZrt>if@NlGopsNwj zXG%3bb4yJ<0i9?In_1jIa-?&F(XTnC0Ud}~%2nr)KyBIN80q;2@e+4~XcPU>57~%U zeG83VI$t_Dd&C0-3Lt=^1;ENV&PVeW!b!dm%%eiXN0g|1G`{MU`U0-k>t11V89Yia zYlV$|OqFj<#L;*KNa5L5Ve9@hpR{Q;e|u{-jL3jC;mg?nUOgaN?6zw$m-Ab{Rd==y z5ERi(Kg-|I>E$>0gQ2j1J%m*ksO<0!PW%w*ev60nRp&y_hXgW$po{O{GI7Gitu4Qm zyvQRd{|wqIo^J*a%{Ez>!8Y=R-HbhM{?~%xm^bqF(-DE{^1iUqG>b7j`DA)@)+ z*#9KG#C-S-JxD$B(m~wEw#d)fmjuVkO=2J*q)`o=vX(O>X!a`K%tQ(8Xl{LP;L~1` zE`GlNw;2l~3n1WUzEOTl47V3@JJ7X1h5bdKLk;hEp(^T)MWrhgx$7`ik{-kiK`rg# zC?K^hI|76k7UIg>GXztJALbNtX6tG@<>@ojAP|~G;kww{m4y{Hu`vF+h1Q~lWEU@G=qSVl$!zoxSCQkH&Cjm@+8WE*LmTf!?2|QIIL1 zQBVg`;%JSyL)oWa?^g_Cc`>M)p;sc7n%qmuGo>x&+$NfzOl{?%6h=Y#=oN%cBO~YtoIa zn-1`7_w&XT0XE9&Oj)W1*?GNH;_PRD%xrp1WxTUvLE*9Jc+N%7)viX(gM=YO&eHT_ z{-m@X=#^~y7#>nKRvpO#s~d2+Xx0gN@l!_5Z% zk#1<-Hb{?WV(1oJhhH1K1tfY`Sy2ZEZ+V^@#gw4hS0O^3BV3Lw$Ac#&RNv1*|7XS} z{t05wYf#EUX>EDkT?P+bSSa2~Q#lCL`!*0mIh=WGJJs&blR9n&35gLvq$)zE?A)k~ zL`|cl*9T#)36(;6Q)c7B!Pz4tV7-qIJ@KtJiX*Kp1RR}K#|u~X2xF*B(v6+QAmqhI zzeeB4L8$~9c&MO9STO!3C6Ax7_n_b~Zx8qZl8W!(j0@!hM?{fm>Po=77kg+VYJN7E6!^k`Xz;B?rb3?LLnb4SEm_ z!tPDT`4dSl77YTUWfB>n@9c*a=>S-N@8GE29l7F|&ZN5eb76}pG8i+&d6F|=5d~Kz z@#v~lVB!^{!cP5FO4Sv(# ztsx3`E`;5A^+&ybG}z#k8DY|PAil9_q^CmM!Bp1E%Eyz{N4mtrEy6a2UN?H`Yi0Hp zMM6jeoq-uFyOoW&e@qdyv6IRZEA#ocGORA6A^P&qN0`b#1XlA#uwXWIA@49MryJqL z0T&60Jmu2Cl6@l(Q7;Z$A~1WNz!ZX9`}N}Rs0{V8%Iv<7)LiKcg^;`Ti-A=cN>^pY z`DD2a-8CF-Oyy3B8p#pwI8UHj1@HL1z4Te=%>JLUk|qB>#Z44?oD2G6x14GCYC7O| zE8dfDjI2ZZBd&3(@9X#iBs_&Bb9-i>j`;@C%Xjv15ZkgRcK^>f)duP@=ptT*!Q_+> zdfhybcZPa_7Q83W5oeOKNIa8UaEe5&lGrgc=I**2%21P{4ZZHjT*GQEJeQ~~5n+Kr z7UQN$OM!|pRdyx!Jy?plqrr>~>+ytz)tG>bM0*fIf$vj5Si+M>1n5u2lmr3@uACOAD_n1N zP%JhZK97p~c9)GRA#9n3h^OTWVdX z_$2DT8bSis7#Qd+gO0Ni$`a@2{n49VJOzL6tS?m^s=qrx8Y2|WI~PM6Of`)B7aK5RbzvKhNX zC7&`t6Rg!!w0qN82d)7A_}h~>kS?bwlnaX?hyA0HCDp?)w2GD5lZR3VY8p?xrnc5yi7M@QXP9f(*s zWd(O&j_^}{@B6WcEmMWF3E8-gMg0P&|4Wd2L(DEBpqecBU=US8q65xEG-^xHt4u0e zA}<9C;lX{db`6=U+pgLO_<802+-$C=`ZDksGuie)ftHa^OiWxp&nC!(usKN}1$L$R z)?w&xjO>Wl8x?)WJNNV50P0tZLUaPgMZO-=6GQL5Cn)W45|>4X6i#Q?IY0z#q7rep z_6x6>&uRDfx(G{=HF@uo;}{Q)S~O^!K;+5sQ%k$~pdvy**=KIrYoqkOBqjUjw+G7j{fF|_;XC*7gZGmP*uk5i zEc`n4SpwPu@<}k|_YGFV?%+7pP!SS#*!g;!RQVI5vQhN1hAIm{%AaCPit#hPXpS5t58OKSsG?iWYa2!U#JmB1E#|E0C~{hv-Fx z9Vww_19;odLkKWvaHwe>y5OLM()td{pxM;^}a1a)_9Sr^S4s2SPyh zqcV2&OdrJcxnloHEa4+>pM;v;(M12%Kkc5Sk-a6F?bYoYDqG#Sz3etGoyW(0W4RwF zA5XE*xrzm!O*Qz5xHxN>5pmpECN?QDXVs10r}EU9jyLV3Br_A3<@m<{49Lm(;5&;M zI#aAZz+G!QJ^xnmEb9F@^p|y-pHs1R%ZEmO)j@rEp&^kd#oA5s1d)(1jk6wQK_UT) zV#_`EMAP`hSa7sjDYxln*wm$(>Y$;r@c8E^ot9IMt4$qt!q!73M4oGP?9dj>@a9$z zM~yAERL07)#=2_r;J_6txKGTt>Ra0>g(}pauMW(%Zf;qz`TFyTxW3$RNuz}3&uRQ7 z4Q-v!P?x57YJ9p*&Fl*e;=q%A&XDgvSxawW?+1FGJw9!)HwyLts{+6TlN0N`D`UFz zyj8yg6pZIWS}VCKQ-D^m-&VD4S{n;MnoU|UPW@V4#vXK*r;wU@4M^}SQSiJ)qLh4% zDB2y+*@ ztk0(wf@k5^Nsr${7<Ee@NKSmN>v|_I1C5TmV-AQVzUO4o6*1)?W+}T#ky$_9 zo-vya<2p+fok09CZwB}=`Lw;Agcv7RBsaz8h%HRrOf+vaffRFLrbUuf?>EbD!s0L# zI@;d??V=fF7aUqmW$s>q)z=(yVQe%oAiWq0wpZx@L~ycz#Yj;^BtWG3Bh))1+qZw@ zEn%+*aD$uG``d-Mai4qEuUZ#58sTkYLvRC;HI+XX3xLkbBU_3vL zdatTVR7b6Hp8e_6`@)g9#F6-YTD=8EF`RnB+S92Jy^ql+Qi3Y7)Y^Hj6IVEdZ_v+^B_ z*PCz8l*NF8zLRds{&=#l2g(GMR&&!RQnfddbSjR3jg`@jfLs=_9kO9UNzR0Ee4+QG z`{B zWC-hZN9xG;*`~+QG1;ylGsftd!Nxd=pCYxkiROfHbvV2R{QZQH{(xq3Au79mg5_FwZ^L^-p{=0{S1`Ll3>a2mX;xJAmX(PbdkP&3~Z7 zssUOaK(J!7C`?hfC>VGIj8?M%Y;E^j4Rb|Le&{O0t-<(j({B|Zcw_ty;qrJc-|idy zGDfgI_(Te3u;}KG@O`)fdb&DFV&Ms9fDdLoJ9OhW-D<1p2)_GK-OMf^hx~|!8HSHJ z@C}tj7Q_HPj4K-S6@5@rot!RG665mWf2Z6OH+_K9s|>Vk=Z=PvMwT2J*Xel68vsx& zDVKJ_K@)O>I3ebnbNqcJJUG3xpNOIyfbh)z1C8?~=#7a3TE~xIoX}lds|ct$!d&?% zKgQm59W)5iGr)aUK2iqK>rH(zXSZy$^_%($=u08SM=6`IW@w|nW)1%|=z>w@#(gJ1 zJVoY#-?|LpLnE2YVwI3DOnme+I5A^ah`CD}vHBb^9_G{5Gh}xodGa5oa=Da?;!A%6 z;g#7Gg>_W{67WdYhG%KZjBM`2kXDNeh6+T-;S9V$epI;*)R#(hKpZ$! z8>l3&-*X|xEi6IE=Srf{?#<(a>85cJY>+!zod8pOft%U*3>av6EH8|7B6CbfeZ|Ax zF8aJOKAZIzBuRT*yA3gDXntk0YY%0IE9B_(;C5Z1&!oO`7d2^o59qPWhckGzJVv`a zW1aA@8w5jeb_f{=81#KvBiHcWT)=zFPX~SBfoo18ISpF@dSdGz_h?fTicWCMVvzq5 zV@krk^$`G9e^ipa-&QpVd`Agq3_x zVv+QYvm)cGYVs2e_JmL7*qH5ll~6 zp(?HTAsG8@WTK6CI{qQ>Zl>O0*=hlND3dth(Qy z$BfS|TEbTzj4x`uP-49t-KuO++LikAZ&n&Exa2&EGmFygJa z7?C`tlf8%$T@~%7HEdfn;g32E<OF>9!v+3rsyf zOtKD8|9i}A5xm8~kQnSCg8ljS98)=*)o!`eYqKkeH;Q?Gra}(%2;(geR~)kCYQfNm z+#6&3HRJgVi0B8OihoZE6s3Kp4D&HLA9@jSS4;9+_T_;sTS*P93A zm|yvKyjje;h8;dzX!U5wkHPKWJ|&q=gjQ6jeNO0O4-4;bMScuw{^H5phq?ioOM<2u z_6~WFOm?E+Jp4`hd!Lslw&ugRtR~wae0}|h?nfe21(*vE=AQh=sST7*>tKWiVp9foA8!FB;ru6tZN`RG*lF{ytBywq2mG*dl`7}c1m}=9mtb4CqLsS zaqwdJ84U7FBR17|hlb<>B;BlYKwRL7M}Z%X&9H%v-+?sd{_JSPt2^;Uf|0?_{Ld(M?N338aOR2=+pbf&o<;){8xHTz5;56=H7} z5NK489?Fi=2xBn3aUaMSav*b?9pbbRCw-^h2=1v~gE7&I^6&YdMuH3!LV>3wY^Ez^ zHoS^1zSy-Rr)YqbFUX!5RCy5NIMCp)!M@hJd=in&6gmA>4%FT!)ZARBMs=!bIWs*G zXr^|=wLSa;QpiqCWSmitmVC&8CC)(x!NUx}E41z+^Lih7pD!=xIZP5OUvn(3!6=$(A$PnUH`}L2Y> z5X=z+dJEn1~{^cS=e^<2u6iPl58cxA3}v$V1gKggI)0 zGnXmrYr|L#D0xampPTf^R&}?!Ooc$3XD9KI)YQ2P%>MCPyY(9xQ*U~6q{LJ7DxfQg zv-5MWW~{~C%Biy^*SOuFi?q`r2n8Gak?G&Jyc;k{$PlD$`-%Sg-cDw_D+*WaX1eANE%5@+${0Xg@(3r}GfBB~Ru zp5OzFE)j9*k=lzBj0OR-VlN*2WwTE0O>z0=LUrro%(0Ic>PCk65cYlE$4w}@ zVfX}5K_vCn6p6c=I&swvH+h>n&6*mkc~;9=J$GfbjlG9IiS{G6egOd<#%I1{_uS50 z!6AF^Po4MH^`X1Tif3AEGjQJL7nGmdTTe^}5!gAM3wG~Pl_TRP%|2v@2L^pGYtKerr`^gk@cV zX!8*M)*6c8KztE%a3ZqXEBkL*_?N_rum|pSIG=m7cP{--G2{>qLILfx^1JR&q`_sX z5&-N$rQZ2JPpbmdVLIJMs~jc4{aRL=_nS*-!p1x1LZF>u@1Ht!X}52Q@Gl6r+Br1Y zWJdE|#=}E{-#iG8W%ulD!6VQ=I{Aq%i^vCR1;M$o!KKn_d8V9~qn{t2uJ!geQ!e}H z`^eK~rNVM!g+lT-N)-H3+KeC)6R+DVMwCqcwW%|OA(PCK?EKmqe^1qjDHF>xr=PN*3dr|;uvlbArQdh1 zgsl(+o@tTTk3|E`SNq9@Kqs;FS`!=K&Xj#h9Lq>AE6TdrHIRXKbD3izp-$$U3(fHV zC@W3k#zr4m=;89Y_3;b#)3XPwhrw``%W{=cCa>x>ue(#&3^Phe7!|(oJQ%X<@~qGK zS~M+ZL_&Uu_r>$Ghmw?z(%Qm(ABh(?MRSq27cBJ!=kB!Hk~D4v4^D15mSW7miy6lT zB@Aw8AyU|j{Pz$4ssB0QoKBY0bx6I7dWeUfQC!xCN|HeG-{F)C<-q{$LWCmI^UGNU zK&vms{pvhX_6qCX0~8{`lxBMqc~jLUz#aB~3`;+9(NDP$SXdNp?5IOphzDD@Ijg6N zl_o}_?xDb8g8C;Nx|_Of%*U+YH+o)=JKrvvyr03>OA~s@ZTDt+u~*&S1{>AE0JYuX zWD6(iW%X?}tA(V>a%y)#Gj3S3dSF7VYr&-4PTGbs&UuvtxMA5ckI~4G`UK|3+;rQg z=t;a;?r3`LA_CAp@(StA*IFeMFDUh~mB$8Hb_se@Cq zW51=$kt+fBY!7)`zle)eg0WJGs-U;j9+*Xb7E6uYVt;Cvss(H~$~jb8bjE6lx#IAp z`DrRIGcDc9HPN~W^FmjYQGwdtx4 zQl89!bXn<*Y7Yg0seytqC6vpaVs7<9J#Bw#Q94enP0l908x#uBc@1YpqUmtQn3z_5 zuD5sf`6sV&5z~+DT6^b1v~j7_8|9mkwNsRTS7LEGPyroU&XMW0+JB$w|L636{m=zx zJy#uw&uq?b+n_b%2nbOCse7w2dpkKM)C9QW>@klaO*|tuRtR8i88l>vG_z%}SYi9H zH&JE}IA7Cx20!_m<|%rA$j1JkJ@J>#3M`Cn@5vR4vdG1Ld>EgJFEfYZ{5g&O<*4eF zR1m)x`Q(h3d=XJtPld4uwcUP+i6!f*6YThyFG{HLdiyu!|VcFGma& z(d#-neAv{e8;1|W#l^p)#E63K0gUqSrEWIUr~Y%*P|}b``nr=p4&7z-U}kJcsOtTR zPk3zPSH!81B(|}VPXK=Q;=-{|5Zb7*=sYNhh<$aZ$2qW+0iRf?)ZRR&q(YYOEo1d| z4wvybX@zJ2+joeH#N6>58>cShGoli}f9CW@1H~`fu9C=wkP&`bQF`BIc`=t;O#lMs z%%dBQ?Pvi<+pp>Q6;Ud!=L0oDxC1p2X$l;85_TXgwG(q7Gx3acRk z_tJakfSLPAIbHWdMmDc2mCbek)7rCRf8WKr5|IPc;C_|jTwCI>2HKc5YfzmqI!@}T8hN^$E|^xCC3+?7t@>9^E8TQ5#i z#Z4YzSI+4+mj&$k2NGTWP3Pz`BeUYi#gbxUF%#Uy`V}W7>xMH>+(?E^!J+lWCiTgv zkGWlgV|?TNC+5gV-lBIA3*uvANG^Pp-22r*1|w zN!}_{3jO?M#cQg*%pZsUTLM;#S{?$U!;&DV@awU4);DCKkRSG+_^gvs2OfyeduEI+ z=cl^wtbstO6em4-Aa>j85peTHs+OHw4A%2KZRR{a{`uJG^*0M(6+7YZ`sqe%Z-Anv z-*B7{6_|g^+^n4B1K1!>x`lYCU17~u@772_#*5l|5s_GL6Kc$=Rw6o8riDIml^rS> zv3cKxsw?Agb5OBC_(p->w~`05gT4uh4-qIVu6bo5@P zd4e-OMX1ww?oJO$KHY0L?bNV;kInsi(R@MjvgmMLF;oTVakl^37XBf|*w>Pt0$t{6 z!Oy~6`JjB4s-pNO;C&kEW(*Q_R_Q6}l;xS0Ch>yuE265WwC2n7j-~LhIMruk zvE2M|e}!0gK_od&9GMxx(JO4%Vqnziz*=X8lLr#PDp9@Th>QhHyAP>YUN)teHt6TJ`t}1a0ceFPL(={*CTHv;rl<-W zDJjl3$EJp`vY)Ebxg?r1>TM2k0aE&2ve{y-SMxJrbc|#k+JB6;j&R*KwaVK+DUh*W zbtnHgaQ}M!tnP~H4<|7`RvqpbqP>O|402UqoeX^g$)~{W799%Gx&!v5(hOs5Uig3& z;DS66N?*KWAQX5%NL@iQaxA&@@6Oam150*^D#(uU3jf(f$izDF3aFN~HLRSH3J(uj$0!-es9{JdPH>s<}Nkd{uXqR1sa z00vx*X{X_ky4O}O6?GP|8Dj=xya(_O@a?ub_u?F6qJ`7;kX}wrifT82!o#8q=IQ+T zUW$+IcZT#MpG$cn@5`bVsf<7AMWchRH;jXVfx-SM1S8gtbPJ52HDH6dzct#(RVYm5 zeR8^VV{ar%WxY%0KFE?yPgBkjeXrxw8aswc&thkr9f){z36v4V8lTz4E^P|0BZ!a2 zG0So2BfJ)WczWFLnSR7HW2~AU6`m*jI5@9y_D8ZH9h=W|T&o}=P!LdKqwxAnk-OBM z9W~gLPDn9(Hm0)LsTL?D>SF#h3d$t*S(E`T>`d=xdJxsUx~<@uOSF^CSNRcx=Jqmu zzU-DY97Hb`WfTbsQu{`?zCsN(v>wZ!an(Z`Pf3wRDgT~@$fH8Gs zR?MDM>9vICozR=j>x1%|YEnD&biz}6o_aUq$r(T!uxw}uoWDOl-gFoA%$uEvV$d&uf~+5-!b=c=>G zpRJ%&`JHJJD+dxU;~cB<5WOa4jnj6peD~4sUjvdkSw;LQT+g>t?}h=K>jcM6VaN`6 z{^jv=s*1dlkaF9pAS`vtK1s7?opdQ@8mXw#4@isWNXEIJbxLGWerfTxkPcTdQr{!y z=FC#}Lg^0y%&TQ@Pk*$n`{npL4{l1t_6l!zwJ2w{=FGP^9 zrn8z+bD`26ft=GQ8;cYDsw&=-h@y0EyoK@b& z_fA|T4ZAzjoa*(Um18qMFZhG z)bxh*t&W^?-s0Z>9y!4zZ<`P!NInnz$At0s_TWzzTm|&3Xy>-coQ7g*Mu3y7=U9gC zL+S{29ngQ(skg?HNoES2y)07Q*hMNYE zD)B|+LkV}gYe7b_E&}UP-&zo}_rm0Eb`2dnv?N0PHLa<{W5M4&dHJYyIO8{Uay1(0 zVx|cCQ~O^65nCAtnkm^-$g{_Dy2t~u!cCvaZ06(Uy{U>NG@=%c3Jkiw_iknheV9Bh z(-PX@Xp{wP-UDNdS!*Fq)y_7bsmnj!gxnN7c7V>86LQ^?g{_ziBtn+zpB z>?LR|PSusfYSuALBD1Ljcdrdf-_wQSc>ifDqcKMJyp$v`O3YKL-8{<&KzPGuo#eUF z3A}iH3#D)7WF8=2cWw@H6THedHBOD z!-0ZlCVnLc#PgSK`gNfPVIhjV5I&J4iPQgIJK&%C_D2A^uj1ukJT*(;5)f}luI&Mk z9rpl`cn*orV>F*rTADpajKaS+?eA!(TmTzfzuwk610JnsNc{I*{#{R?l;?t@nmP9 zvudl%5`~j)duMSIo$4V{I#;PG1~Q4RFtA%C*un{RvXtb${;r$(X?`@HZR9Y@VS0b~ znZRZ?N(PlXN@TkD1FIeWug~MmW@!fb^)_T@#k%R{9d8h2GHqwXn@N1EcLH%#2Zx4Q zc**Bo1}Na5(0rNhpKi>?&$l=pZK+egq1091`>2fK?vvMK;# ziR#H##VzhvL z!wXj2%Y7)6if+#KTy@C#3MFYX%+A@-yg;B51q|$m7Mp6#qKQh1;A*6pp;8UywO*~F z!J&=*G~X|_znbl0d`^hVTASFGML%j0o=3yqFx7naTfCpQ%7Usp#okCbo|HQRM!oB~ z39uwiHSf&P4JwLN^uRa11wh#B~o{h%^Ps<9|JP-Y+2zS8p+syWHT^_l?J*P;!(Nv1%S?FT~fg2gg1?=(!AP zk2KiX8N?r1-|tayeLYA>k=rBYtt)Tp?b-Sk6AlDz$mfWD1O!F6YBL~n;hyJ zqDD0o&*K;wSu6eT3Gly965O|JK4r7jCWx;TDkU+_wEzsb_LdgzUt<6Mi-`dKwZz!H z)tsA8QVkh^={M3dqZHYQdijKeDLCz7hjM6u;G98amxmuOQT&HXpEFh3BRq$xm%wtF z4~D=!T5qTa4aHOzjcr4ngwkMJCACI`S-t|qw#oY0GBrK9;mU5p&#R(+ph+)bR;2*Z zlQ=+QZ?saXVF0I~I2|oUQa2Hd4kPjBI|R@N=?ZO$U{EPwA?CNaNcg1qj;GO$Qikp7 zkGQcqWkMJh=BtH&_ggq5>0b339=c<`A0;qe2Oq1y-}BMypEDIL3587 zV(lfR&Z#a7t#J!G?mC?jp(-9XORnM7NJ@EwPQ$f!18HixamCVc!ap)UptinPlPSs4 zX%rW9%sjT+a4??>e37PBL@8(A7a43T^mtcBL!trH3rxwMl{J`J%#IdPfvyslhpKp( zc7u7en$kRyYKo5QK9pl?y9@`#aR80jyJI8G@W;-BR75Dt z9Z<+26MPHI8$fr99F(#$e@R2POscqXQ$JV|?fH#+Fw36w3hHD1xp|-CHl335_2HC2Zlk1$9;$mEQ z4*o$1Uwhn!mzaD=yn;@5yKBt2VAo0|fN@_u@UX54d`~-Y5T%wM;E9ldq z70u})o6*hYUc12Hh7-==nbf}_5fODUF1Fm*-&{CTMBn{=x3Jzao}TWU4q>4>4@x6h zL8~G{yZu!?i4OE?_x)jGxp?>SBLlK+b6^^Ffu7m!K}9%vxBpOJwNoVVc;9WUg>JD{ zLom0`(%11xn89y`4mZcL?l(Ft-r{QQZOOWxjP)r%s&2~e5dD!RR~B}%Eo4)@kr)Nf z3pZq{;OA7CQe*2p7OQHF@WcvQiA$52a#BxJ!vBx4w*bm|>)M9}MLLu&De0E(lJ3qM z1f;vWLAs>7ySqcWySr1k>5lJro^#IgzW=w*H^Vq`<2X8d_S$P*>k0~-K((1S2DU`t z{4;NHd0+nbBol?vN$KuWBEah5a>WYpT&a)Z8LSgAn}0AFKw%pv39x^@mH`Zb?y+ZxQdig#hC55p?&g>2tk1a3P8AvNksW(! zcEj1sOVN@S$cz5iCJmCxKpxVr6l+}X@*#im0l1DmM+h@aZuP1pJ@0P9Un27)aLVw2 zIz<2#95=N_9VIKj{?Ns;1%&5$ASbH+(PQ;S7g55dpAXPzM07~nhkWk zTGqc^udJt#`FdmeB%r_2ZT!G3 zb+;6uH=m{Avsl{EkJIG>LRqA{0|DF9QMEd9%<-*bscHws*B2RE_AM@qD$}u1mdTp& zCFbixR*URg9u0Lsl~?3sMP7nhQ13B$6w3~jtpS>Ml?)r%p)|<_-Y3g;2YCtA2es2b z6~`UegZ5qN>rA$DnZ_t`ZML~vE5-JbMmR5~aQ?1#gdcr+GH8g0fBq*d`9Eze|NdkD zE{YVQFo}tW%7>E$gPxuVg>4rnt3r>KP4RFA(ljy&z^18pc@r1?Vg2jg3LK!|Xaj4s z2CeYX%`6#PxNOL#Ga-K6^hzP4l_=N`bsg8((M5Zk$DN zw%A93`1SQ7G|~~wYn%E5VQ>n~CM(3Ehu{Ab^MJ56IQon4W!rT;kLk16;{b^zkGo~xy$?XyPnj4a#g&Xkf1xUn1k z_L{V5iL~KAu1}92|CSZqR*~@3e{6C7-=6pX{fPo?E0d3EEqVsE`8 z$Js+w_lL*z<)egwt74pKtebs$9(pY zOyeZO;aQ4D`6`zUP%6lupK6lbRY#loOq+~Aa-^p2wFCHEAxd7T%2QC-OdtG~&SGSb zsMAHLQJqQkgs3oJ$<$&c7M3%Wh{?|yrM@-OQq{us>%H=3ZklOFoEG z&Cb|9m_OST?Q>9x4=!(;lb}}FnFzynQdN}WwLlE?qwT3=Q0!Zh= z!|?_+O9px#&cK>Sm+nusE}=Jniz9|KqsMYRo<%f&C`RAj5&z5CD>M?y`1B{!sUCqe zE-vFCw8+!!u(Q_x+-{Qk@$1Q=0<6p;;lC^3|8vMp}BvkXVr%13=wZ z9hqytjt{3&3CnPXY`7%^kEImI`j_hQVAZ|A)60f+YN{O+4Aa?><@(~*3A`s3jj%@z zVhvU!hHK@`$k6s+@w{H;f2_9*|7`z$OXB#o*5p$%pN`i`CMc;8&@00q4FfX*%=Sc~ z!wHUuOgaN3V@mlR3`Wf91^?l3b zvf5-*kPGCTRaO7)h~>fS(J?y$Kdtm3l?-mOYb*eE(446g=Nsv!2?q>TUJx%p2|QC=AT7^$j4T*5k;Vz0!c=9Wqfmvmf-oMnaQ zgv)Jjh<&UQd^?PyiR= z;&dcw64qi8&Dp~fHO)pwTdt^&HLG?7uyO><`dwH1Btq$D0X71_*zbY;A!FozO>_-B z=1Yx?4>n;cruiS70S}y2uh$QGs%`ZIoVl1W?ss!#yi0YG9eA4dRT{J;E9p93ZLUbl zalm79b&23l3wX}y>0PUG#hiy%$zamo0E#P4H5ZQR)&!k;VbJoX0n&L+ zH^T#Hxl1C$d;af3_#6M@$AY}6-y{RU!S7viALG^t;I{RpL>HN*h43!h!zn&2rgHiK=}UEz%8n zhO@~Gh5p-Y5kyoLi(;=*R`WAJkaFaxPDAGU?10Pj;;b^l_~RYms+`nw{C&&G*kv?1 z34m|2QGLi|Z>BDGv+ld71CE_hT=qw*rjv~NypRGDFxy|ZLO&f%cB3bB**IfQ-YnPG zP5dNX{GHF+Uhj1oE^B8)P-Y(dFW~HrHsX84p{*unQN@?j<#|Czx2?vFHC><+2L`8@ zG4`COHJARF`g_K66^VbZ`!b(MP}Zk~?)!yY|9{>ZMwH6ABH7pRwWLmUo@cA4rpLA# zrD@`iacChJUl$dA5>r+y<%SP*d=}g^!u&vMEGzakPcRx|aX$8^LhOyw^q1vT@M|iU z;oe%(Bj}WM^dfcs*s)_+op=8kRPdO(uav=ptT0i`1*@F1{X@ICMl@y8=^J0fiWBq` z^yLYCTM8>-jTN#pt5sNiao3YitJaQ`SD7`>^{H;aaGZ?*rz#T8&JX)(B}oMoiUQ9F zvO$y7wv{DqhVNcrm|tj5fzE+wE`y9(g_jz zv6@!%OrUV+Ojf-~&Pz=Ca2~A|BBBA{hLvk9M;rdBjIL{~hP;_|b}7UugWC%T+RWHs zNrGInYL_*-IXG)Nca~L9L5eje!{fIBr)%IVnBw^p%$-Oz4S=XMkhSIn)Q;& z`6_kZqucAtX(lyT2)RiTVP*mq^)l)W?4MsfBK(gP-3vzslphHp{?9KTIH}IW_y|*a zS%D*bp{r|!!iI~zQIXCP+MhG3y3XaPl-rx*Xm-(ox%@J$)#MSdiyv3@PPwcItq;Cc z0<54CT}yWrdvvABl^8OYbAavxS)*yi0*eW9VAQptS+2jg0ebe)W}#tO`0AG8HC zAu^p@R_UsZfpEB-;h#Y=>t5?WRgTVgqanbx$FyFm#5BEL+oZ0YLQw|$&2AB z>6-_F#?rzWN{r)%e|p~n^_o4~UBW*L$QCwd$)eVP)TV5#UPMr9oXF>ZYDR0lIo$3V zOFNoR{ms8zf5dlh3hQiu->`1k*H5q<6;C!l6O-!sTs@o$P(^BfT*$;H$J-Nf#8l;x)8n>jZjkt9K0)&pR53G18el9jEw}DASHLQ<{jy$uy@OmD&agqxCwoIN99yhzRV5xlh{^ z_{F=x=s;cfIY5bR{r8Xgl24bym_;RZS1`^7c=%k&XS)fGqmAF!o*#)){nPGt6S8^N zZ33|YV>9=1k?k((<)}eZmTz~~q1FWs-_E|-X?4{;;atA1R+AFuhbO#+zRKXr+P$W6 zIyUUX)LD>gsZU>l;fxwjib)lqdm#Qs_mR-&LV~|N#_8z$$A}kJ^JT;SoXBK{bvNQ` z^>$3aeo=OWd!sAKh?7y*7kbdXG7{}=L-e0_kKcfZAHvFjW9m2gUw84}m-4^-=5w%j z*)?e)tL~5TC7=dw$bpZeF6Wrv)hfj_n~scsFU`tizJjqaw3I<(*)nXFoH#$i<-K|>`;uUs!dr?Jx@o}+j6v(5Yu-?- z+6e%My!PNz-*~sOZU`D}b5!D`r;J%7JUI+Si3aH!bppRK&1Wng4G`LDWfmHJvR`?b zX>9ESiiYEZJ$>$Sfx=J75nRgwO#fnhSr#M-2Xfbzb%IWilL>sK(6V&=TkudEFHtR` z1*QA_R%o|Ze3|Ba5HeqFus&{j#odzA`aPpuRh(*mnjdhl6&~V7tj$giqJW!>sW6rUJ}J6&;;QzN%`%G0=UeT&7(oDrbVKiXw&ck!Ki=W0|#9L{K*Z#|f8T?vx} zV@9!|oQUFcYO7)#ev6 zNraLDwhMOd3$aO#tb(wS7#ircab88ff{JFUi}7lYhs1NQSJfVF1wP2FsOyts5taCh zs+JAkfoseez=YOz(U&_5sKmJb#tRXbV~Q23@q!Oe6v((m_VkLRKRR+8L^J7auCHuprxK-$EF?OYW5h6a9>18VWWTevU*?{+tW+dv}&PJfgwE05aFwy$}HS34N^HuhayDq@*WJRwt5EY;d{_1!YxeeQt zC;IysDTp(lF$Lwd>Hey-e|I|nrKtY#Ek7*e8i;FSv9i9$5rpf1!SFbnV13XhV@>yn z1yQ6FO2onU$gW3e*WJfpTtpxw;o9&5JO5MoUeo`?M53cb@J6M*cV7EaulxPA{ulQ~ z^~S2A*PYJ+oeCQ^zo2A@q!cSEyf2cBaC~J_q?5&NB?#t9DX9{4G3qnm2BTDjb&)IG z8VFRi54+kuLe$}$!mxDv-Xg2P&F}i!ug6*DONd8$JH5Vm7pj8LPORT-_o3I7ophF< z$=}_;qAK}fs=M7V6U{iNN_SG5@j6!?B>FeRKGnH_9r{1_DWm@C!@2GY_cUM5AK5Cc zL5NaJrUqhDtBv6d^3Gfr7%muy4XRx5sM z_2vY$&<;{JR-fp@cS>Y146FNAcjmS&a$Ws1Pz8s1AiO(p){f^UQMhpl{W;923QGwj z0;0Qa{BCwU{}_-{+#Fc(M1gn{h1168w_jfdbgj^&}kW2G53Ll+xbHL`ca#kc0ixUt6iC0 zwKxMc!($IqZ@q^xgxoNm6yjr{^Sj)bCU;h#slnH5SnBO0W-3;VY|EI6^SUu7&}eYX zQl=(19KGl|GWCEg z67GaSl4qn-NQLrR^6T~%1JBJ64N1$6vbsRV+v#!r!!g~$0hjX|b$T99rN-9sT9H|> zg#_+O$IH9h`H~AKcyu+1bpZ)=>fW0`0>|yb>Pd^JU&Hj8Q9nPb*ETy@y#b>4e9K#o z=MFJ#u1?IlhT>Q^HBk-?3S&Q z{(~`+Ax;K)Qzpu^;tk*G0J5t#jfT=pf|yGR3lo3yD~7BoU8@3nP8^w(944u@ZSV(L z%^&>)M}4?OFMiM@7SgBnD=XgC60?)K0SpoN)4b4VAyk&>Y_cHZ#ip8OHANNoB@+>; zq>uNl@D^$}JoFXc334au1^fo)vuR#|zi8jm7`Hb;Q4#TO2Nk!xiR|f#B3-up^^xAX z7oBd?-a^5_A-3y9$19vCf6F3e^gZ`g6x8wDBGLG+Gr&V}{tBwf(ATh#6nJq1s}q4) zOik{rIS#URs#K`EJ^podX?@-!hB==Lmn0f=`W#M|ZGrgj)w2suGrVqcboiVm;#Auu zUoK_0POn{;j6VG}JyKY`+j>0CYrT#?|M`E^-hch*bGo;$>x7M6#y8FPDTs%+d?80Q zd)@9LsGt*QBqL%`7-!_`gCM`v3jR6jaU$?Gl=br+Eq4lA*M0Ao@3JXDLTnDc`}<&$ zITI64x)BH2@J9NnzSLlP3`@rqd)zQUA>&OyqRxm3SlG2vbk8rM@Kow-)BI- zkzk!TjnM^ih?8d@P!Bk(EPS3AVl|fohb4PE?aT0;wgI);_@gT?WqPz;&z88BNQn6G zB0Ce4x6_(DU%JqbSnha0XB0-l9OMUGm-J&#w&y}o)`b}q8&H+0H#DTMf<7I=_j(2I zq;G&UHCYMb^57d>bFaZ>TQ8+MdU^!EN%!Qe#(Ml+6So{)K7Niz-?Hirw9lK($1~e8 zD7U|(w>;RAndLc9h}Tz|h%gBf za)bc=Gsgfx>S@*U4nYOAH2Wf53^o^tpkp!p-4SDx$X(E%f59N^{`brLzX1#s%kQS% zBll0rccTo@+6vd{IFXc&jv>OYOx0o+ceJ6nT<#M-UMGAQ;KZocG3jl%B-m+ZQZ#AU za(tX!MZZN~-+a*H09xGSj>Tx-Sl+jxMn%lW3R;Zzg`zO?u`M)E_}l$~Edde*E2`S( zaG3J++34cw_8On1Dcpwr>Xi-5oHd+r#TWJ+$SDqj zn;j4Z!$8#_S_trF_mY-mJ;r0a!|@XqmkMZt0%V+LeH9~1OhBD6!6=ETL%M%poa45S zf9>4tz$A?Jox1Mw;aG);f3$n8O@bqLVl{NFZ4{tbXjLvZ+U#6bWRg*`maEt7*Z>Yv z*R*N+05lR}xvHhs%=~5D7jU4O;AI1n5;yPn;UzwwYRVw;eGh_~*EZ|IcWSJ1Z*nb_ zpzU}tecA94kLdzCNZjYF(gi%&?Q2Zt1mcEbiefDA-KIhVD2QMh=Vm|WQhj2^y9hRj z3br>KiC>W}DDMW`Y>a<)-I&f^@L^-WSE{fSTWI5~#iuhvZM@S%-7#8kf0HjW>eUeA z$D}82OE@n|aPOP(0}-+D(Xz&w{0nrX70q=O!e3*r5;G*)nMy6m(98eOqW$Lz!cU1> z37SG;!7TH)o83RNTzn^46JM$0V9`T?Ah?@oyIoePGl{Lfoe_8$gG{m)>XcJt$+N(v zol}zREqArs%39B0B{9CNTS{imL~Y{wXhAUJU;Hr?Eg7Iv$g^+1_s>bzY20)5>q3zI zt}mzjGmR7b98m7e6e}yZQ-JgVGmBQ|AMtOkG}_;S>n%KqU>t*sQtF@po|^ct0%>Im z``>DBynx`~d37UfLXZSW9;VI}pm%Wiuz6U3dp5!}q~gSyQp#of2uHT?OU}LhjkXsE z^qs1gIa zG{cU2!|$600@6ILUvKS7(MqM$Oi{NKRRz#V+jkut*F7w0G!2oL$ybeLr{ZZLwCtlb zkWM_N@HOF9TkLI2(x)}*#C6;3AKTBm7KD3EEoLgLdiFDx15g_=Z`UhmGb5D}?B6)C z6Q)|nkZi?eA0YEa$+XhZs4HB2icUl1zRMJ4_bXAe7KKY)7TaL|u+Y8=&9Si6u` zl7!72Sk!dTH(~!UaElQlLH32=h&8=ntg>L0%=6EZ>!9hatqr7eBb|af=0GacIYruU zze}Zu%aNFzOh)w8POAy^xK|;FkPg+YXANF%_q-=vL#Wt=jJFv_CfBJ3j%b z;7lD5HFYgZD=uGSAFU`+rYVXcE8&xkR$NGWxc0c0qM{5@mHf71IB#WKIMiLvPN)tZ z@gU7f$3?~5g+L(TgdM0{?RBXZ=i)U3SwP6fVjftONAr*?o$ zt!3+@?V#zp8@&tUns4Q9?3ryQpb@ilzfr$J$gOEaI|uYWwqIMJvW+=F-Qt zhbT^M_A)b7O$e2Y)#?6zXM{;mo^`^|!{0m#{SzE&y#k{)X6X~AYDA>}prxi+7tDn; zZ4w^EcnwMfJlT|0JGVu~W`YA;@Y^1KspXk&x*BLkP1k`}OV-Q|wU-JaaTW7>=+-RO z*MhE(Pq0`=KFYUJlSFm#9%5;51E@9HI$C|Zi4hK6%Lo+D)@r^qqFUWh#8TaCKxDNK zI(1jCkZyQL++3Mt$Z@V*t&uESZE7%qb7v{#m?G*~F~^xqV&kChgw!dx;D}8Bv7Kpn#Z}fzoxLN>un6n+y71^ znFFb{ah`9jehd#ZKMPr|K)Y^#s+MH;-4EJdkhedM59hF30n!)iKKfE^hVG~8Bdkd} zfgNF9FpPQJY9a3q$@w0x?Y4JfE*ab#aZ}h5@CGuKp>O{_Ru?jqbseSMrv0$Af4bh8 zBPf;i?8w+LYa-X`m(f%ENQp-o7!KxhtO;2UGfv-%lHi{3iCh54>M46oxS&u0h6lYR2fCq zJMX93U4=Rsqz=ay!6VW)7w(K(0zgKUMT#qpQHDjQqO5wX9GYdq@7SNi=q{0|eeTz$ z4n!jpSwj|PTxTMdY|)x|EoUbUE&g)KZF7@3v;chW2u%NFQz9`=Tn^<>ijV`*1q6bKpgi%_y(dt>K}mP)Wwb&9zw|NY8@(7TAeY zD!4R(`aZAXNm7e99IkuT0KF=@9NaJNn?%tSk{I%>xYMjq%w^)fdPC_qOooN)erR2L zh984cI-S;C7{80r9nEr4;oa4oE$GX*zqZLA_4)3K;j(_5lcWng zjCJ)c%EcOsibvAyHi}66!P^fR_Vwotil);g|N<%&cz!8A1oMkFZbS;YMF?ZX)L)I za4VDo=AO~p(T}p6u*OrLpNtxSXZ_oYyTh-vor^IpR@vfRC$;?LGe?D8OU=fgaa zTH_C&oQ&~`isaC@zZ+08!IqJy@AebiZrrZVE=NalSJJ92+>#FH&qP zo)5Km0&FaIc&L*dcVoAtlM8QmlZ?LO-9JYwre-*2+rEHWw!lv3cX{4si(gt$v?v=i zTcysgcZX*yVz%zLK~@u;6sZjsf)s{6-5)>}fSUbRuK3ZsvOUX>eHrmVC)W0zpFih! zNg^A2pJ~tg8Mq6xz^KzT)*k-KQo??3Y_(>m-Ph+ZP@sM4o3o|9T1cbMcn`^1Tcq8MC40ECBy zeyt#NgA$XM(a^}{xnGXJS6oX7;+fh!&r6uWN1N^?%HWvB39HK-rNZHSa^402VdrOu z>&Ay$RU=K5lBI^h? zq9RNneYyLGxHm$-v&S8F4e+D9(O{(2;|*( zkjmRsrVokoH?MnLMMJBC;=)D3tIu{~=z#v@9z(|Su?(4QB`P#ux`x569=AGDYLEoj zANOt8ncZ3jS|c{VeQ>z*pjnFF^{@iwM<8)=Ylu2c&pQG06LO?(EnZT_zY2AxIcgBt zM7{p8@8Ev}dH(%56$*skA|&NeU8(~sF!2R6ofhe4w;4AdjgJYQYEHw2vb$b|4Yf5? zeuGkr(u>#aUV%LcMpNcJ^TuATUpznS#vp>Z2sVkohQ;A~nh3l;j_bvq9T_T~)Yo1g!1il&ibaUYAP7y{;Dc4#f z-(6=Qk6Y~cWGq^y4xh(@}*V`Aqt0{*~3%@7J>jN~VeGv}Xy4^JPC&?sQb#29dDnAM2w>0mzk@+lJ zXjKuC2fMO`(Ynaft)0V>0tHscNAK>9+^lZ`cs-<|YLI%8cRZZ#ZTSF1D_l`_w7XE= zem6Xhbofv7JeCJ?)9T59o>%1G`{%!RApaeig(Z(-b9)cjAF(%2t)xlWfF$LNyPGFS zdG&O{X27V&{A@fs6a-8&K_>P10IEbS!#U^;@pc7^Z4B;^tT-;W=CDxys4MwBODw)~ zWHI}h1kuXHE-PbMjU!%`%_m+wm=F=ch5)*h0GZ3;l8b;!b#(|Ww=(!CMj6^P+Yq7y zse<@|yLMwdXAfSBz=dCaqYNo^>V>dhwjHWz*ZL=}A5Sm8T^e3rUKE#vHEu`z6jZNE zG{CiWy;h5{4~q9yud_ccvwg6(s5f=+KXu!LI%L3WCUtiKYx8UOf0-@(&|Yz;VQ6?x z8?2dNIR%fol&K1%IbkbJcB&ZN*5%Z9Mfr#r!Ck|lh*S!wWe44b2D7JsZnR_tlnPnbP!)$`gTb6wx~1wR)@@&C@py1L z^&^WyKh!xhTbOnVAtzPo`teYB$COvk7Vm~K<;z@Wi=&bxv(4Ye%*0Y^OHUehqyBM( z55->*R5=sNwM+mWt{z!H_>{$Lj&C7|OMhZgHN?>E#@-|(E3eUJjoWMnQno}{{fu}! zrfbT#);bHB%L$_l)13Y;p3d26O;T4t1-$06y9>Z1Vp9c=|N7|2V|+x3mp)`3 zdl<+fBLhIn<_&g*wyA(yXL6T}Ka@=(5zL_w&`Xo-X)tg%Ue7EpRl{zF)dsA!#`5-B zdGNbk0Y~$Jj#ne{Nw5QM8#$4CPysEGe5YTl!SDvrb!h7uUY@L{&D5Bp5ud?0bfF%6 z+KjV>x}8BEFw7VA`T>V@>roaFtn0QkZ8N8N^-)LkTZ{CmUy zyjljy{CcmhG}9mh>;Hb&{>vxlC}$^&P~&~;8;e(5kYn6fVS<~Vy<9Nky?>ZuGt8Dj z+xh+Y75U3Fw4ewGobu5B0@>7cAx~BVMd@)mpi}Pu%KKa^I!#;89{+HSD0rpU6Q`OC zOqL%CQa`GDRC#sSQ(eD@vgthzs-^4gk>$?Hjztexs%7jtRdL+l!T9CVph}NwI9lDM zNBOZ+*i^@4`RedUunE^{n$*BD-uOr}<_c~UrK2G7|5q|l?e;m)b+>pJo? z7;Wl6+*DD!Ma$JRh;AGx(g=8kCk#}ewBxyO_fr#&v%YP-r{6V4<`s=yLHW`o{pk3? zXDhPYy1xPm9?%NvULV2y*01LnXG4Wn?TcY;Zf~RLhFqtS`NYUIL)U(b50eL6jPn=? z&zI+CQkV~8IQ**NPT+K~-8!x~F`l6~?*}<5eVX1KJ~5sj16=Jcx1(=UWYTtGuO7;` zb0PG5cSF4UQ9E}APcx5CfB7BlJ2oAuh5dTyC2<=yF;HJ(cumj+?Aw?8BKn&-I=UujlFym0k0X33ea*E8)Ezg zor`foz&Xc6BqIJ35Oz-%bvx<4Gvba9WTG7$2UXHBXDp)COD=c2{t~Wa=74@r6iW*~ zcnjv;@i>#wrMNMx2rRl04>*o9(H+L%3xVeu-AW)iG*6D!MX@w|pXy!>;c$fIFE&q@ zAJ3L;Aem#izwmf!2hYaL1ZZ%`8Qe}ekdc7w6iA@S_ci+^Phm0_fq{C4glpIbY*W55 z9E4^$9|If%OvvvB^5r|TZ7s0+{jL&~G};MT;Pv&~<@I7fdFt|bHPts$j-cxoR&HlRD&vZl193THM?Bt3w9z8F68-S<_S zDX;P6&bwc2t`paibljvn{bg3+f*vsaCEx2c0Bd5S3wU^Yc$hed0cMy^RTmNm=&3Ip z!h9K{QJ1S+SFd>e+Km;CT z@CP>c%W&j2*QWFR495lYC5M;GNCKnOP50}#n7NQ1QE)qS#qSoTMxd{e zT+th90XxDs2pT}>HCU2qIi-oi*sUh(#i*2p396ZeE;815c_mmEDq>iD;y!?BK0?CU z=o(2Hx(5Ifiu_az28Vn)P-@6nmjdR=ZNWjQIM*_&Y~%S(%uuTVB6;eYQqBF&U~;Mq z)hUB1(@{lpxlawqd|_jxD`4bI;ZS1e8L>DW=d2IwUKJS@xtufg&j6y{$X2ra2^m|j zuP1m2;SW?Ca^ASE{$rKwmzEn1%y^nfa;%lQ;dEvQbM9yLrl~;B=aq#jp=YCOGHHan zBWgDVe1csb%@Aic3%H<1mP(fz&}1-YI&Y16FHxmO3$oCUoz3mYI50eVLBsj&XBKql z7Z>Q$rhc5ujf}gT!WEVQ2*as3r#YI6y-vEeT$zvh*SRECz~C_e!6@}t&6bOYIU7`p zk9e>D51SF6FJB3b#&FL8$id5>YWni>b|C4?r$F%UfVXM%B6E%Vo2xHQMK^N|X3(jj z`{?y1$L^p!bg%@>AEztz2dLfn8wZtaEzUu5e&_?qNOMoim)C*lPs-=zBP6TZ_}Yv* zZ4#Vhc+LTIxJ-s>+Zf*EA$iRUU*!DiK~a+-BtHm4+Kz5Yl5clZ(PD6Nu@FkmvZ?lJ$K(tI|i_U@>N1lwh) zfh}0y3YpWmWR^b72JjjPNX5Ip{91-o<^ga|DsA)d!*Q^qQbIGkb5*cUD^lOJu6bLt zI)oh4hi$oQnoL!i%?to%EGZ2KSQw#Tk)Os0#Zo47hY68}N z2O3Ucu?t!Jd~r5JoxVX>?|f|F@^?*s=W~}~9~|WC@DD}#4QwnWWIInw)bo<0crc|k zJ8Dx+!M0pv1pAYh$L7?^`~b6nPkx$94SXlwnxnO5?1-U#dL`!!%!YevXh108p%@}}$Lf$)O zL0L&Z6j_r{nOVK)$5NGRJWCr`@qSJgpM12BlDCV-KN%)eq)ktXFc21#lCV3=8k$+u zfyyniKlZRyp+K7JqZisg4vch!yT$!O35Z|QXL zwUs>Wvv4bGs$(()xNRS`yaR`gDDPWSTi3^h#>qfdi)u#GSj9^Y6eqoJJJ4 zu8pj9)OTJVZgu^xr>^*MKl2c9tR;p3kwK4QKaJ13LX0O@2V%=(u$>@$-bzG3ga8*B zm25AW%|b=Ex>DgYlglV!nh&|cU2RA>NU&|(uGM6z*W7vioc=f^G0#uQsUqnIof05a zbz=itCNw&zvWNP9*@m=(k1)T^QFmX);e4cNAE&2w4`bCr{8yer$ehn@FJ{}_KEd@} zx|@uUZW)?X>~clh+~<{=xW|0uwAMWdE*5j+9%zDvZ*z^zY?d@FkZX{JsyY^3;0`YT zLMio9AZz9&lMs}?JSgFCi~|^2BVHfl`j^z>4w_S|dwgpPCMO*COVGC6TCuA+yBtSz z?3cV7h<*bMl9)WMJA_O%zt=OiPOQ}F=#4_1chRYDe%pLI)uQHfWQI$)y(5l)TNzJx zons{1V0|;*4JaUKbm*WBNx^IQIz4|OL2~l2J@;ZIg=!1*f8DVEZ7p25@~(CHMd95y zF=;qA0V&`)6SIB2w7j5fYC+^|3Nz&JO&f@+x{BspTVAs~hM1`4hS zQoS|Qg>4`&_wM~0iM?CEb8<(|s@A|X&T2)+N!NQ!-LAdrUVE2cdEK|K041waYf=oFyKcvxs-Z}w z{M$N7r1RNf!BJP?j1o+OVTv`8N@QN9%zfEjXG&Zi-yV?N>U_%K!05DGFS0t;nZ?*w zZO)A2KS1%Ytu~=DU{U!^vB(|pR_OAFJ%ao#opMExpa83p6%DY>6X#ZcxLlS3$wjvL z$h>F4kC_iHs7<*qWk3Qq{nqHnyL@RyOz?3<9LF%VkH_OvgiOsP{0}|J`3IQq!8+9>pB4S4<;`Ys zW>Q}{Ovf~TpvJ!jY3P&-UElHw%qEZ#oxF;4x|+CQ6)T0yY4nrn2JSxH3E?#tp+@lD zqqslb98lz@_tx0(tmwFFCk`&XP$lBBQfciAZo!0MZ-20KFwD&}38iGUxX=fbI$w=u zrH&xAg*4(Cxky&kb=(;}qsN%W)vV`1N4~wrzOx^vOq;+G{S!DWwULtNiM6R-s}G2= zp{lf6>TO-hs*iZi*UHp@N7kv)=#~9$yF^yR+UJ7Bv0=sW=PvkQuCXK9UGT~LcZ+~v1_Tfy z`7D2}do2fK&Xf7Kkugq?zYM%Qvw4a50ph;RSjAMJ42BbUX686NeOG_`uj$_VqtC(1 zlyY&v@;_3os4yS`bULN$WPP&|Ax9E_!VMo9b$Z^%iJfr4U{^>9m!b3J^sSJJ5!^GI ztJ?b#@kr?Att0P?UfNW;L4R+1AcmAN8gaMN_TnftKYG&){sAFAF$Bc;l&Yh4W(q8w zW=>6-PY|+Yqk9<|{~-{V-aZ|Lr+4mAT0B=~GEc;gB9U(iLWj&+bF6cG9W^9NEm zZMt59TX1u49*&|$$6BQ4b8T-pk6ZFYL&Xh;GItP_ax7FU2gd}9@Xb$B-jR$7x~0R( zZhv_CFq#dTPm0-d$1{w5zkioHaHFEKucdV}aW_Q?a8TMb^Q1Ir? zj~3KlzDV3?ASqyRvA#e+kebmgO;u*e2W?s;awlz#5>v(Li}OlmcrIbvudQ};xP!QM z&tb%w<}h{avmB1i8Qr`eYO=U*Dq8z!(hj*CnRQRKQWBxMd~CL9x){~(@d=YXB{pw> zP{dwFC&0{roBOE}*KEG;ZVOeqd*siaUakn{>&tPW0?{|4e>|%YR0j5*)awsFUkl~b z4#nR42{f*-iVPw0$e#qL%A4KG4kX_5D!ASRWNEyMi&04X#Aa(G{VPke+{bA8eV$gx zGLM|5elbDgi$Ts{6VC{8|{64+5cMlmXZo5X6k4>nT0pi%@QMD#vx86#$1#f zf@8X~?N+1GGEE4^;-rgbJ`7cMMqQ0$`Q`+o^~@I&a7LOelux(TcidjK^4NOV_E3F` z@AT(F(ikPkv$0qtVCC<1#(%+I{^MJ@7udw3RJD5i+%g48bNADQUP* zxriPp<@CIwrz;H`>W`8=jv-c8ClAm`Z!JaQ`qKk*rt5m0sLS`ITuIG!hC1E4=9+$D zL25QURGB9cShW|?)~_^81B+fcOc5$!`FU+Wdv^!E zU1(d51Oh(>My0t+zRyzV<1QVo?89)kb}43=py$!;RT^F|2cx z&no4<>3G!T%wjFw*Un5@f4;mud%Doq49rF{_WPx1A_)OA&2_M-MFNpTuE5~FV&(h& zW?8&^O-q~YX<5X^r;CM({t3rMEe=>*BJ-Jx9Fq+d zOHyuxvxu|JwVc9XeiTuhKWQLWZKc)^p7ps+cN>Y?ic?c0hH9P8-))&S>HT6Fi_PQO zLmn|}yAem6E+*bA0B95G?1RH%MJ?T~l^Ib48pS`2+fPeR!V64KMOckwwF1W0G^r-t zj{rJuNe1A^x0((4`pflHi7eFULV-)xgQSL=03ib*&(VUUys?Y;z32iU{VD27YM=sm z2%w;6eB*f;-r=FW$vEQe2z)yRb2Yluv4QdUYHXL?P>FV21gvj=h~;{v-Z*kU*FqVk zCpGS8+iNF4Zfdu!0IjTSD=t^>{qe7O+E%b?drdgczcf3~@ATa4rgQ-Js{c&+`=5Rv zD+IasfT+uTxDyn;@6q}OqJU|uU5!m+%-A;!mMNO?-Xd|FQ?m}|HCoEJ=olF55W!12 zaL0@p(~xtQnwaHwL^kho?M~L(I>XtDsm)(-eC zHC?HtOhL}ZcNGQA<lXH0W!M zjnX}1qCEnzI7G{~547$rBTk)}?&-wD#bH(Qy6MNA0es8Pyk3pEhvH0rIqG=K`T#Qc zQ-IHyQFt*m=8^|J?(UNA?!CC5Z-0A_vES!*42M7T7mIbS zIp;BtFlRMBxhnaz)rcQJmO*paxa%w2kGD{nUlt_FHF|!q@1w5X^cq{eFaG(8($}ua zsXw8g?nNW~_Frj&4@?0qDClSy8Rq)!JYE+=RSwS z+O7cm-QUaghoe@f1K7t9F@^o$Uz?HqcQr??v{EV5wAG2dDBgbt!ohe+^KjGWP3&2r zdu7GEGZ}t(|I2Cf@qycGO8auPZg)QKGuf;CIc}hM7fl{=!^Sl3uz#Aq^UPp-r+1|c z+lC(g^4ST@kn4Q4Zq&1yk?(9{@PqV-jaD*l+^B!jKR}nEwX+?DN2V!lA?=4UlIIkD)Z1d>&F=Di^Ndwqlb$m?V4;8ntpB90leyWTyl_7 zi~2u!hAVDaT&9y@73WtL`9OaPqWxNE?QZxo(iY2v*Rr@8S31Y*Fthou(#bhUR_1L{ zrc24ZfcebIa!bh6)c0qlQE$AJwXA=7K0=l&Grm(HaSs?w(414d{L+l-X9E znTzbtx=8N!QNmXMCg-ccXCpuTG3OYYsN93s}9ZydhM6!@CxQ)b>IA%EIPO!$17IU%+^ zA}KHMji$XLgDQJTGb3L^h6Ioo@1g{|UlPU&`w@HYVaQvUsuTybD`X=bUCb#-rCInc zXuZWoLh8(}lO_FH`yPkk&m}x~@94$Y`@hJUP?zXlGo4TFFxd@?~AwGkSBa+wEn3W zWJ0J?+8O|e2OQVy?fjyb2mTz9?yC*sbjhA6tB^sA!$b>Gxl6r6tyF%jGfe&JCn8LG zl)&l(5`DopE8!64vXui?ht^p?OCx4nkAswd*r*HLgv%fNPaM}AaDXQ3+yq(&? zI(jJ0U`zlExf1&=jhaFKcJ!;GHaA_1z?adUHs9piMc1rck2^$2@yd{=n!deB_Ck%z zC-;rtFez;%6Ll$ZbG8#~k(Xs%PO{w7yIK2ojFr!~lFz1Yr}&ca$9G&_qH_m2Brc;| zW)hCtr_SSO-5-Z`Ymt}f82clQ|FX%iW}q=81Inn%$nCC)sOBk9o9ECv?=M8 zI}U7T_}&;Y*EufPBbVxE{d(?r+(F@Xsrkc7NVaXpIKh0y!kQ77s`xd(MKc?an#>)Q-<5)ZX_U@Y=C38U_IG%G}vQDfx+d% z4%u;$C)vYA>&V^Z#;(|^<6>-v^FgQ=@1I|k6k5<-6O|$L$LD`#8h`o$3-GJv(_PuR zA3PWR@7CQR0?La9#@_@X$$AI%5skh&QKtgTneq(cdjOcOeyWu?kAe9!N4wI2QDM%acaztoW+50x7Jh)-&h7)FAVC zn27)jz7JEvb#_VxYxuI3d=JxP&#~79`=EGJ*#!HH3~>EahP{g$C4_b-g9)Y3W@Wz8 zg19!oTiUMKh;Qdt)(iI>earr*f5ho~ry6Hp92eAfKkzL_9O9)MVhzO2GYBLI0#QDn z`_aE8TV?6rCRyHCbrD)Hse5BonrsAL^5BR>UIaNBr+xq6%Flm>2OUc+AF>v&`pTryKLo?tnh&Al;HUIRibJ|>W=9u0`GH7}z%aJ#EPXf2#m@9*j zKRc37{Nm4g5KJ51?<|^pAa;itFAvT zZGO%IvP!HWw$N<1_Sdq+p$X<=oqnW3{dy)2Aw?nOzod47;`$SCxK&D$%x`OZ@mih@~IZ8Rpj+JQ z?zaoFRJZyhvDVq+UL@YqM3ByA{Syal@JG_UpIV010DO%P9}cbkTRZJhEdA zpeB0%l~K=vCKS*C1H(Dfop{alQfm%dbxowL+A{K>?AXZ8y%Dyt zsR~m`AJw8DV%L)X37toDyEbeTE3f&di?e8$@b?NiC|9D>X@RE6k*y|WRZPnxwOS7{ zpFG(TEiF9wMLx}u^CKVuvydy}c6pzP& zDt3^iHhSsIn`{%(E3VE|KcICl`s5)D;3xQ*nI6&6qLkOWli8V)2_gL5+w-;qAltxh z#l~FTo#Jg87ue3`vi#7+H}vy@wRj^4w&RNfb>W37)y87NoYmY;@4HdW4(>uZs@GA7dox5`KT_P7pJk0 z-@(yGjxaHE($3^6q8>w2)7zX1gU^0fB&l%CI7AT0;JBTE^JLq+YTU5)L zUe9H-HHp5erf>WuD9<2Fo|_UsjT`qyN|s$9HGhPHYSm*6g$8(Or*LG3Gsrz)(}e-b z@44+=#aP8qhM2uAKB$N`i1VJnqs+l2W;V!fEt#DGZhwx_Axc!vSH^0?eD-^XK&lgl1G> zqIj{WOV-hE{-1LehSj?z{`T`k&U8Cuc_*H2zZ_C z1!LOuInr#Gal6&jjz1SwOwH^{{ZD6u_DTfh`tW+<@-LxY!739MRbFUEu zeHTTQA8GKCW41aaDp1XgMLHnS4p*=CL%r z$R5(N?gU|r;={Jd37wP+PXN8%UUwWQUm4ARU?pg4RryP^C8-^H1A^(+EbFm*ch6@_ z?1w#48@uOb5O zV8?>vCVAs(6kc}boe$ivywASS52&R#4UJkz7Rzap)4dNgEuDfQS%*V zbm^oM!qq+0-qNd;oZUcz<%`=xsN){(!TzvZqm44kG&A5)mDq9LMq6agHy0;v{p#uY zj(r8~CiITDEBN!@;E`5@n#Kp{8TqcZdTaM~E)>PzZ-8$_*8DuYNNGW=SwdCd`8Nlv zLbU(_)ihLC(KsH9b-HBa&9cbP8=jfEhgS9q9WZ(aYsuTi{&A(6BZxV5xdG;D65z+x zPGbm(gjBe%Xhz zjEb>;Rk;kFh;i+>i>_p@ukq@GWV7oKuTe&utQ&s?EJSyhtf0FFASNLAuRS!Bj0LAk zoa;y=X121=F(e{KO0_4SeA#^TEz^{)>Ndj~usM@y-hPn|gkUuqJ-a<1Z-~)Y2B~1O zoge=jj{Ww8+56W|a3gZvaRhF3;7mDh%M9E%tPf2dcx2Y!CKz`o-h%K-53_}xt`=V? z0uHZbM;RFsCwHzC)Ftob+brR*!|16Pr+pCX3bi-?8lmx#y1~?hjleLV$~=5EnKe_L zJ;o_=!=8|w&xjHzfW6;K0#^Hb*DtuZe*yR3#(No$9dcaUJpwpNrc#e8wjYq_YHgQ9 zCUUj>_J3A|TyH2eUtB1+Ocs%8(C`UgR3IMRR|+$EW)Xt=OW?lF2Qke-m4`0k}*4z)4*cZJGY_2lDehC?cG6RwrzIzr=&8k0E&xZIdTcSw+_{m`H^SgX3 zWexx{Ut%JZ9bgja!i_DS4))=gRDJpE@iAO3*%TdbT1=jyk17`ah1lSb$IQ%1t67IH zs%{_;4&U8yKz~ zC^k6>?HOr5`83DKy0mk96}MlMm1L-~F2}3)N5NDM@~PjeYGbb&zWC2mKZ3Em!0Tf4 zR~gqvguvk5cK4pEZ;lgtg}yggBFB5XXEuw;cy*ksHlMoZGMG&u)v$_bY0rQL_st*q z`F9GzwZxFjn)<56uja}DvA-kS9%!YQJW&)dhxgt4m&)$vOm z3JHY*)zGOI;~EeM6hEznxMx6XMN*=Ioqbt$){z2|L|0hpav-sp-p$_0C3E680}Or% z2X%en42|!$O`JNgL<+PmyMoZ-ZFa@paf(We?sKS8VZ$$i{^r^SpT2(YisKNwBu`FQ zeO#ld@yT?E1Wsv#7wB}!Ap&H5GMXxygjaT*zHd&iiXfXEJ`f`1G?Nzhm zR{DVpk{@JZ)6MU<2lZpx$bF`KnTQ3g-U(FKuqP+b-TnQR zq8`;|Uz>;c=YR~Y2cz?O1mWj>6>pfs?@UCbZU|}6sv+XpxO&$%+<0uXOZ?%&=tmN| zHe)$Gy9pr{T&qmKKAJ{SWpD!doaNG zG86q&0%s}BL!!-jliV^&`n8afMVq)J4ZhFilbk$Gna61b?iKJOWO1*}aO)~Uqh802zEaCV^$ z>-6I+8sp#{ zIcx$By-^dDYvUJ2~COB{E9;m(M^aa6BKqV9Jj5!N?EUQQYb?d%8$5Mp*w^%*}Xj-{@vkIr+XnmF4E@&wz}?H?1=qWmR*EmM_PKKl3B>E5iJzF$17Q z>3a9Y!u}j@_hA3h1hmUG-wv_WG-{xgzF7;V%(#i=WK40BSn=@f2#FB>?RvmOIbnW0U20Xm zRMIJ=gR(2psbkQUE)vF{* zZwcY$>;#Rg)1jYGjKMp1h?KY`2I4<(d0KXPK1->}@&Q(|7nvxuRXtQOPSoxR=b_az zDyruyg@*EHczwq-U{@(|{CI8r3~1F5J?I;!gS=q|YPKolk5t~p{eMrU_sNGRsi1W- zF8N>1^P_NctzER<19c=H4#gtMz!%O_v#bSshO#bR;<{R^nbWR-8aRl5oNz5_kNXsf zhU>nj>qk|L@-7Jya=5Me8Nbam9Ek`yDLI~^cn4zfuGhx6|4a#5YSta|880DnW^V|i zViH-xBK@6k^+Jr1i9m{~3`S}y5k0-h;=Gq(-(HkYb;x(1GNdu`?3-hXU9F~5hn|Ma zMDn^qPuH_Q+Aco>+#jz#|7B66jjWLd$dW{KGGtPgXXM#7Ej>RVKeyix2H~+fY4r7R zmgol(11m`>ry8SU1gFP)1#6j4kR+>`xOBlLelMVqUjyDY(DMGOdpbD7AX*PqApR;3zuM z3|_1_IjZ}girKWmX2QrlMPhz$vdS<+XunqS-|n6A8{ZKRy?3lKKImue+k|MH{%~#D zoG*tAHEaT#b=z5u)kEZbHW0^^ZnR1h6`nK{d=2p91c(wr-JFfArMjMdnT=f& zu$GvAa@X;`B1N6iK0j{rW(Br?|80CceEujQ7h$P>IaX$Y1yx!vQk~$+HhjKz#P6rZa?RfWVeW zr_Iv&m_Ww0hPN>8MSXlCW47b%bD1C=}qT z8o_PEbyPDzG4LM63tKjmt*VuagcKxC*N*uw)Gw6e<*gKG5dv)YJZPf`B8R*k{W)X{ zS1+=8IOCd#;n-x#Y%lLieewVH0#IRAtIk;$GA|1Lkv8xLdo3DfcZ&OXfc>S}?7D!3 z#kP7<49xm^jUNIoP%o&`lGW-plP7Eq|MY)pKD))c`E{eWl$%d{r1Z`YZ=Jd0dh?cV zryGsE%G_4>_F^neI_WLL(vVW4tz1O(n_Vh-f>hbQc(oU=e~LyD9h0XjS-dWMD#KA4 zuvaVz8utT}|2}lSg@T&uS+lw;6DrN7BCMyL4aGB8XTx5gt14{D^C-RlvR|V~zb>7m z0O25=m)@Q!t_gQVqa@*d!161gJ^zRR=S=Nh&n!!wI5f?5`Rn`9s^-k}Pa-E`mpn1i zWYUa-0EPv-1{XZQ5)^E3$q3aPFU)wH&#LBmT~sR)y3r?1#NhG}8FZ*Z4rSR9(#*Np z?!e!{e%?Ae2iR-qj(RQ7lv!4nk^FDC`5rEbkCr^?WmVxSqljFxqpF4A3Ar4`op~;? z+mGwb5CGK$XDwUv9Pm*VyfTD1y~XWI5Wlaib~yAW>H`LggMqPVuJ>pr$i$EtGH!*% zXIXV{yWZfk+cyCsmFJ}K;R`+dCg|UY9xZD=Y+hro(+D@n9YL(uwvTUf(=<@|ZChWy zZwkjZciLlGez-*;VzPWAtA>P+%lywR#(jf&t$o2XuA=1sKXp6*SO4?P+spm1c+s@^ z!L!ZYe(*H3nQpO0oliUKyGYD8B%wA*%(XY>XHHAJcIhfiqe&%IKA&=#KE`>S{Z>Mi zxtW^4ev5oN7w6n-N%SM^O_9=lPS{~L^u_+{*OBxO>%xf=L?F;}3X}zXjUe4@_{EF; zN|lOkzwz<*bBMZ?QlDXCUjlpxQZYjV%gLtrhwGl3`~7Me(g@qtF2hSG!wo7E9eq%(IOcH~ zNvfyaGiZNMD4V@QJ&PdtdK@O|IIsKh@j+z#cbvv071hrEtRe$nuIuWRHM>%=gWd!+ zWjwuoNbu{G^I`tSXFy)tHC^gJx<0WsB8rly#e1CQ-J02%`LuaRH$!|ol~Q7+T5k8R zuL)o&AVE1+smLmlevunX&zF-X%^5bhwt4MFXbJg^#qwNVclDh)mCMq^cGRq;Rjf&~ zUI_X-)M$yho|x!!M%7OiBv8d{;;;}naEU!QxcFRTQmFT|t$Pref}l<}yCNyox5MqW zN7qpgu{CJO8;;n%wobLcnT_8$a4i4}&A_8~i3~hE>b!jkX&f0wh%1}+YK)t$r$cj} zn;PK;ey9SngM-*%XHw#P;2v%Ibv zPB&kXfl&&2%nO?*8~U1!^fZ6JOmGbT}`a(`Pwz6h+M|OgV)rvnJ z>7YXO;(IX7YvOW^(?c}>gx0AaFF5WXNlp}7yKTPf2+9=sb?AQCLn-}b<~>&o$7(&i zimSNJhJ>q>uN9*7_Q}ReyR9f1CG76b0#zlk;+7t~3N_#Zx^*f15 zgw>{E)&3?RLp+9Hby2US*<0!EhQ=S#ZSrdLG@xN;E+cRT-xb6zSzg@W6 zz`=3Yw+XgyYMM%WMKW&;PI~x*BCXLJ3j9a;xqT-i@#>`jL zptN}n9OIkI3}pc08}+9dPIC5lPtvXj)$-(4-)ecMyNMML#iREgDQ#b`^iY)&v<&NH zAobwnoKyYZ(<`EuVOg+fsH%qT*ZGBAy_q~mAU@K?mZvBw=}hq0;WaWTUcyfO_L~Mm zOC^X>1L`gtSRpMp6F62x)5?3OZHk5I#p+Gp!?{PuKrC%W_wMBq4D!r)o+`;J$ZXj9 zQM3B5Z_j<73m=}Ro}FfMG-r1VHGLh1{h^;mFR;*{0cN zy>rEB(g?LIP8X`aNBxqdmfK=3#H>bAxYVYHdw$J$f{=vPV-;@{>0Ves;3MxudR&V? zi8eG{Xj&pqwLkI4QKMOOB7@tX?MS$M6dR7g;J0W{JiNZU&P8T6PQ^Y2g54Y2nll$# zN^4+ef5s!vcbS|TQ}JfxYk$mBEfXY3N0VgtLbw-W&F{+QskHYCKcc<$06 zm|rya++3|rIeG3|WG10nb4t=079~pDqV_YPw$2c1IFV{AczZ|}iWL!`f{tLvFXEQz zitEJtPi*TnVKEKdt+h=xUe$N&uXM~N-^3PtS2Bc-quE?(M_k@3XT91j#*DMQ96b1v zFZrzYd5PSx$!Gh%VhJWhU` zyhOd^U4YDP5w0-_(dOrU{%SI_079rvYXMdR=zYW7r8?dio{yS0Q;#^>1;;@bu4h4u zAE6A(j?M3PXt-yqhZHN(gaUAxbOnF^c79*pPQmB_QFKBXio6xM*wSqt|3#Ij5wF{m*k6BufKe2zb!HlKDJ;qma@sE+VQR~fXxmb zh8gYKAI@O2J1C%CEY#Naohc6&q)mF$F5mec<4|I9bgcMnm|NEeE$~$m5P@pAXi-8g z0~Rrw9(F{AQrUo#ACB7rfj#_1_M4+l{2iC2LM3V6kxZCkpHIJgPtyI!f!;Bf(ze|d zW^<@Il!*<0$zwO@GAB>Xvrx&GUh_<)VKz+LgK6!c-kuRph(VA~uj!!D! zIthPf^Jdtzo9qc%A~^=K!lWzre(Zn0o^M}6g^R4oTbW+#1-}Wu>qw)X&1TA0=1;R0 zCbQvB{%ki@zJGPk&Y)frFu#51L@rHe6`2qou!WJQdRpP7Ty$Dvs%)TrR{vi1#$4w; zqSL*P&RgKUBMb*g(CSrg$>yYrAh6D~H>Fwhm{X@!sxEqN`v(`q6d6TsYEq?!XOqBb z?$2%mE6N@+`c#%4Fzg8yXP#}%Rs4c`#iJu+WMt5FrLvkUq6wZ}y$y*&TgqgJwksVR z7WI+K%Rt>S!rLK!db9a0sqaR_e0w5EY@7gqL(bY%4OoT6V;M$b=%CnJvg|C;b;*iv zRdqC?INB?Bh>UVPG2l*?v7whqbbXo)P&uL9dP51+|NQ&w33%2b&C_=#9c6{4$}Q$p zjmbr8k-jC5XAOET+db?Xo$8otA$a@so&jr{`@+(x=F%p033Rx=_6sH)mj_hw3a$=x zxW)Au-~lxh5w!L-QHts;69d4uCMUlhg4Q({XDIztjKF+^u0<=v z%>ZgdxJ*ic;75~=o|ulASX!Ey%Z<^~a^pFL4iGxm`BiMD^PvOQ8OvfwiRsffDFPI!^0M0b|6H^1F^KBYcVn55`*5x!UMu~oVD z&jCfLzzVu@$reh}r0J zxAVUUlv_=%T@(@fwl&=sZbBS9Mdmust@dVBX@k2kj_7zpW@_XeV0Xr;FfrU8-3_#N zz+ie~{tgb}am@H05plez(ECo|w}Mj&1!+LkkGlbO8M`f=j$jP`z}J3bW5;VKS0|Fp zgWL>ETKUx$g>6&XNy14|t>qS9HoNv_WFkd72a>jRO(o!z$iG}SwiXj%JjhAi7ANlrg;0bvY+Z9wtRc!>} zhsAzJUh7GF?H2pJJUuSkuA<=-O-ZJrype}fHjc|fIuovPO*|;eBA-i5+e^OTteljd z1bjmw5{9AhQvLm!bEVxxq`k#`5hJ5@t*!d9_d~JS6&VBXat(6AVZX%-?9ZyLq%g8r z8;!Q2qa>s{aywMKxjchlzY6)m%BoU5=od&NA$4qUd%Fa%yTEYp(_X@qm(LH*~z!{6Ntk=zO(l~8{~JZ`QS7v z-NHPy6^*?rOP%ku=XtIOzcV?|)t~sXW+B@3;Bx9^=M(5H@Dn}Vi~X}}{Qkd_M#Nm8 zb0+sQ>X+XLzXXmNEf2f&>+U>~OhhinHjcyhz1HLf&d8hbC3lZSoN%3g=;GGgRLs@c zTT1V{R!e`zzgJW0#gOK3?9%<__X{GgbOd`AU3AMYc%-3{kTS(!EHQ8fdkCuI(@6E*c zEH?OB=4*a&BzE09`xr5=5mJLB6YhSSJYDmd7fmE(-4F2i8H+pM~n#jzF-LCvBz{P`oD zR0_}%rU%-Nx(@W792y=@Kd2m&fvh#D*KgdpR)?e_}{ z>vq1iOW{&f$!m?LNFj2F_$6X^;V zUJ-VS8<5?6XhziTK{Y7ARGt>eVoQ{YJP@ul~8gh0mgKhT8A7On0^|vd`Hh zt6d&Zy<={=a&fmSyd83MO;h?ny4MsjaVI%36cr$C_!WEKpWk^t)>^NHD-rN(eFxEH z=X!!ZXhyatL5|4kuU0taBhx%((3V>eLi~-_^_pWYs z@|BpF$Z&FHtn~l`oW_LzkcW=%s@X5iCF@ZH~(supt$$x33#YiF$rmG%FAo~|^1m4<3I}E=AL&i3rZ*WeTmoBR$uvroE z{}C2aU!zq}ts@#mwklZ}kMb_Dys1*L5ju^}I~u(0Fq=nAZ9Z(OFENQ58#_M{uIGVU z7e%ruFJA!6+O;EHcJKh|FY&Rjk8U;kU>;oT=VR!-fEncHuSmFSJbBTXdOhu40&*Lf zzjh+)>mE!`81s;ck<1{N&HX`#){+)mvwY9g+h_aKQC3;dVLGAU=X51m2GqnmK?^0Z z4Xhj&d{4sHAd{DqaWy!c=RAqw^2z^fC%-?x{c){RSc5ZA^{znm7cQgMrv7)_n1iEm ze8N^52bqs^bZKx)?(A?d@bQB}6=#z-K!L-wsOI0zW+RskDB^m@t1RQi#CLvvK|Y}| z{W$b?%g0^&(@p$mjnUA{;xS=@!}twL2-ZGv6Hf)o+w19T3<^U0AJ)A87w{=Y@_@Kb zcLzJe5OFq#YMX1(jeCPxfkk~St&Ey@x(K^2<6N&(J?(f?k{dng_=zd0iSU+z+seeN zx@+-dEw^p@M9nksJ^eFpD*3(E-9$oXXDIZWpdv*{GQW>pJV#zc#%YB22`7|?bto@a zQf8Fjv0ETXO;wy@NpnBpF_>h-_0?FXf3RG_;)puB!ZTIMaNm}mf|e8TTsk5MTM%o{ z1nX}XT}9@djZcD|M8kW#kQp$o6}8cg5N=sbzO-lkuwlgWO?!qVcp>$p8TQH0#>B;k zK3D?j9EV;?DDdTqYhXN)jc9loLQ5`+jgRVELC#VE-vZ0Is+B2r^z+aOo{bQKwNB2X zQAr1;8HNfq3Gd6JmC@5O(yt9Ofg)kQo}kWU7VoXcKEac}bW8%!q-Z6F*RNIeQ@tzw z9A`hh7vFclmvdQKxkT+#)^0BQBiZy7>CNdM&!G8VYse0v?xy)^{^O~wYnBM4mN##U z$LmfRvYe(uCAI=U?7(st@l!U46Vs=GA9&px>rXv4Y4mITPTana5E}JeKKowF%*xk- z87-hj2ta>8b-FfmWja0(di5Eyi>$s-r)D;5(7D{w;#?BDxB&mMVzQ*QKQ6+gU zffg-Xnd$|eL6(y;d3;aofHw>X&LgfhWuQX=WElJQ`Z8^5g<&LF$gkCF78pA9=j@$+ zvuUcT&C=`}t80iX{@d z*?<4`^wqBhJcEN+k0}>V^zAK)NA?2w!$NCz~;NJ6XgMU7 zyKRAUOb%wG#-7)df(x_Lnh{`{Oce zeZt0T3QdH^fmZ$eoAIpl3d<#)=qBO&ehePbZ4|f87Z_D$Oc~C$#QeZRX1acgj)z#? z|JiiNT34JW!wpBQ(NoD1lYqvnMi8H|Vf*oyCsyyf*X}rCGQoRW%2PtM!3QGU%G@0TNFp-0Ik2_2__>Etce=gmPb(s|a{#A?lK3F3y| zjhh4tAu&}FeN#e|5I*K7UoS5PX!w^kemFMBNCYm9dwCtJ9TZ#VhTI=ol->158Vy4+ z7V2h>E|(TBUuNG#;2VPt-wyIaY%jIs9MCV#yE`^gD}EUf3=GF-R_F;$eEZ5fQ&clg zo)Ya9`Lz(RfZN)$W{9iX$4hy+A<_^Mt91|aXJz^lzd+j^YIj!A3Sp?;yS@I$I7_Uj zy`I>rX4gZ57I5QZQf>MKxq5E)0wbB!YQdgsZ(0|?IE#|l44OM1+XApxtR)*Znujno zLQs@tsfT5R?@$LdtH!>TVC9l2?8kX@JrS=-Y;TyBrf1F=568ipTru{b&_TJK=1m;^a z_@_@k57-rJDO$g0{!5F6kmj|2d#E#4vuG?bdxGWG35P+d-5T$zhuIsBDX5bvA<1f5 z`R;Laj#0zOno*G>hQhM5-neSWEQr-kzV4E&5WAS^4(klJ+ii4z!k z;p_*#jN1=&a8xon?v0zr*g=<;FcGJ@{O5gt;T=d+h8P)<+t8`yl<{0L{(IC9JSh9= zM05wS?ekm3;J5es7?b4IYu) zB#<9L!SWSd?Xf^@LoibW8*=eOllA~Tnex&=rY`y`u|eIKNRD~-dqtX@u=;m@voJ4 z{6eE}RyLN-=goaMu0O}dTm%`tjr-gZ>ahLcGkuCWc1s-9c)N!Wh3}%{{jx45;#*$t zSA14FvlU}~rOHJwKoAY0oLBoN7icOfJX#fVNS}!#hxen@wu$i?MB$rG!$ULt* zg2_b3YxzMCyQn|X;-%ikx3A>hW3Y>EV^;ep86z8H{%!dN(Xz!PV z(2>j^jGQTE@;+meqUQu%zKD`Dt1$c3_KN>IlSMMr>(?#DQ49nmfYVQ?tf31`C~^-6 zT8IZ?|3NZpV6R}m*SPn^|2r$IofL{Hw!(ZdUBh;p({a6Tc_U6DoCuWA7b7Bc%u0S4_iSveqN1W7~JJJeb|-m}SW zNC^g3IhBG8_4cVXle2iGd=U8{5=KNg&7F&Ryy#wyLhO+jk*mhKQ>;X^G9Dib|Hrii70T2o72*_Elm3(uDUlJuu(DV%UQyA}fe zh|HfJJoPi^aO!XrHk%%s%iJ=QIXfr!)AI{c1!nS};b9_fo#T~em7wiTCR!cTE-q3v zU3Zel_f{^SPwiWAWnTVVh>QEBvIL!(gSn~IQu>q@ET#cnTWbN@NMWtTZMMPhy6qFn zffS4q8eBV-X6s@wVEs~Zq%b(~nqxR1js~M|Z8b-Fr!v=evzcIGm-A070<4zj0(E=j zPSr=5ogbXciVIS0-O^+f7bwIQ*h9J79J{oKn`5I!bX{4E?zW}w^)6FZUw6?DyCw>L zVQI}sg9U$(TBXk+*$n(z7x!?wH>P|(`|CH0&SekaO7@5gkQtqb9%{$08}uvvXH0sk zhX3OqfnnIaTaS+5ISVBz?Uns)M$H8^CK>b0(<`RwDl@OO1VSsK>nXC)38i01?)Q>9 zb!AvAMz$)#Mv?AN@wMd*C+{8?MH#gw`}<^s&JYX?Ydc1_M%90oVOf140PWH z_oO|N+Gsg18n2-!R|@(EGrM;(`v4f96A>h;V3ixE5w$&B^24OMMyz&=r6R^q&K)oN zI>5>iW#z&EwlR8NE@i;?I*)tWy%~%~#~TXiis`CCjkon;l@Mm;h3V3^;3bb3VjPdh z1%FVWIjmdvuf>ibn16aAd5HUsO3QqPeRHzEMnz|Q*yzPm_+@hg^sCs$X@vlRz$3jI zTZNoffJPtZ+3(3bj{dDQPYqy}QNSF7IsV1Rt>W>Z-VVf0^&VJEI!t2x|>g z1N2DHss>Nxn*F=KkN^QjiTNWGCH&=ug||%LZ|z3}bCVyA8p)~}lo4xA#-tysu(dCg zwVTkW2QMyq@r?&0zwdaQMYoZy4=7mqn6d~$(%E&|5qMt&bT@n%N_lvBLJ@h|Gd|u( z*4uDbHRL1$Zo^w*kk3h2vrC_pyK;ZC?H4r_>xSB!-XYh1+YIBKkX5WC9RfSR@pyNI z-7lV@R%??8CEV9y*AXN816bZtwY&{?`ElpQu2g7)uu}O%dh#2deqF}MFfC!)LG?K| z0(%s>0P{>jxA^RUinkb83*W{2XrrwEhUy_YXySi$*y!q#EFaoY@s$dfhoh>i4{W2k z90$>eoF_1j9rp6NPB!!$pnOe$h}wQN)zu87KV_`GB2~zHwlc90^ubq8{v(l~{P)=zx_YSyM{>|~q&N*_$wPgdng5Z8 z(0uL>NU^a02mx~E=2$AFc~?ub(1eM-L$OWrh62yWhS!!28%K9q-F?VK?eeRoo<}

u+0s$%k9>kC4?UF=IVFAJ2M@VcSywh7%%SSgF%?6R;A97MX_ZRcm|GP}VDu@0g@y zIwdz+xP!&~NS!rd#CZ)A<@1Y=7wQac4|4}TdEajw42@e(wga{7N0ka+%GW1B_E=)+ zps1o$L4P!oiPE~3;;8AcXO$_8jGR0;%wy)fnPp~e2#0!P)ZX=Nt+y~U4pvM+8d_D= z;%rM-*n`gxUN#(+=!eexj$#1?X||Ex-QzA4Tvmw{qt8EqcV_)ac`nvVQG)|Q@{rK( zg9yT%Ab8eQO!FDJJURV$kh5=aQ<@2(z&t$FiGBJHH{3;A;DVlBGzPQLG^5#fAd1}l zX|qAgVwoDAn>N_h86Kfi9@=Lo%WsZ072%=?J|17k#bD`I!xDv3(i1v8-&nvxV)5!> zTK{^wS~kU0?bl$>m$7XYKre&wxb6shNhlbTQ>e9%zUqbyx)q|*CET8AN;BrCp#UAh zJT`2>p~UDf7{Mna*m(=~_i3&#;x~|?NY@9Fcu~6Qv?3vJY0Y#C!TzWKeGwFIH-+3u zq}A5=C5-kP6Z@VndrUh%_(3?wtXD4vIhgWr3YLn_oi$WHt`DW4I9fy~j{rRULngb^ z%-46W&?j(r-Oa)Liv85`XUC`m#UYx9M@^)?_SIc$S@K3(Ei zj2}n+fi}eBp1;93}eFuI= zi(Nh^D_tUPniD>5RukNyAJ}8Lx$!IsF4``yh#5&_ohD=oPP4}a6nKv2Z7YH2N*JeY zSe_c|Rao}aVi!^Q|03)gqw8waZeumJ+1R!lv$1WXvD3zG8Z}0nG`4NCvDMhv@xH6? z`OZCm?sv}^`Pb33W3Rm)%x5AdDpU(lk*!=DZ7G<_|1*@JKwWv@#iLl2!~QSL^WPm6 zn`&#R0c(MqF=lzKSeg9!M-m_Vmex|xh87dcwDMEp5TyBb?dJDe>E}tBi~d6w4c@YV*_qY9%e0Oob}Wj zEZlb3{7D94Xzlx>L)cVW;eVNO2<=a$lHO|`lu~?2294uiVpv(&Ra2B2zBc|INsbfu zF~|~Hrf4rIJ(TB)?hkraA`xHaUWB>VAs(>W29$mh{rD4@RO$-=uKDF*D)eS*s`ue) z;mhR?{lJgDa74UahVZvnKz`Y9$r}-^D4_Ag$Qc7)c9wM{NJ~Y5H`d^ z)>i!Q*p{rS@#~_y{n^5ak1sBK2_z_`@7>$tOB$Vs7$(yrkg%-sj!P$|pD-BoI(7DV zjh`FITnxPTCP={y20pz58U$f4Q(t7vVDoE$4W#RgvGBN72Np`LT4VOtr`^FMzx5Aj z?MT^TSioDx4#lV~5H_URb_s(D+>Ws~o+Pl~amHc=OVOuKxF2rSvA%O53ia%Uz&k9p zYLYlNmTHI$E8d@}%XIra?$@|?zC9E|uQm=C8$X=Xnh&jc?$ba(U0yefmt@Y`H8! z+|@whChCpm2+h*{~{=02YY0!T0 zs+H7R)=~7G;t{9VjI#MD25=dEZg07r9qw>)v&^}DC!YLq(piY=?|75hU1`X2d+vCr z$doQHA32$AK9*@~HV4{ljYURDW#!GcQUX&lm*E_4ti#dQ$EIz$G`k)q3P5dyqnOKd zw|P4H`%0qzX@A^?FTUK#h?^35_kv8S7&xPxoOv=Bi2<-mNCbIcaTuk~30MeYzjMv} z4Y4KyC6IZUUt6%#v48j`*5)a2x@L=ORC|{*!rOz}=+{I9B#;b9qAkP9X-lqO|wWroH%PLsQVrcF*%6VJOL zJwA-UJ`Suj4M0&mO!e&y^k!RUg#jUw4>u}zm4FS}%yzusiwb%y-UqMsA5MaU32Dg^ zzs*Tli3<&+C+0XKDC1X~0L<(TM4J z0ooEt1Mo}On%B^_PO-h~by81tr7=pAKNVrfR_aF3W^QS^yZmNIv?rl?La@P(|5VOS zC|rw>@7-}v*C(_enw4wBVp)Q3p*)EUo{6hrm=L~^J7`a;GTKI2)r3ETt+7;n4vY8@#uG<)kU3tTu z9$3n^9k74T0XGjd)r|HIZu|^E*ZgW1%|zl=G7KNu8c|MMj%WN_%gq@rxi8;nnH=DI_>Pd{})mm=cM3^@|u?N+$tI zHxvef{XGSEH=QJL?vgZDZ2K(D zrcVreKgcD}nwZDfzor>dE_j?GT#NXS=}w7+tD~Wc{YjRTFG2`dWL`<*wU6Ej`E}b_ zHcX8h=l1IOpjaXrPS^ssWLkw&lt|l5le3|5k0SCU491z4 ziS#VAaUE%uy8tGSX2zFICP4gk;4dR#M(tDyIh^(RRJcHxc-wjT^@ExDvUzjTkUumI zsdvY=fFy-RG0R4bF2;eF9|{y60Q*8Zr`4C*hKsNr+=+o_e7ZSi zuZ25ZoPdHFAwa2nr>B}e7ln2FyCU#)cwO?HJ<*HSYNPMr{Jc1vu{S|c7Q3WWQzJ%6 zP+%uff{5PEHsiRPbR0*aMA(wRMX$rx1DA$e8PKUK1*dxUi9y^wwL0mw5dcoWyrVVr)Bobs#DDAL!sS%#xemE0l{Cz4$`5n>y1^~B_?5;!V=)*&~aF>^!^c@x(;xx^n52`_RjDRsu}cwb`}w%~z&qxe zVrJv~FJWy!Es2ud0<srh*R2R~<^9K`m**-XL9g(p9Vx!@SCTg8TrBe;y;o{P z{_}=KwJ#C-OM(XyPLaKknx!Q{v!s0JnFrw#H&`pbo2vejHn#3pCEGw#Kx8t}#ahco zGCCF^K9LMiO49y*dIQvOTL0}ZS0LHFmXB5H--z^wMffwrbfYOrJm@$wYs9(Nu?EH? zw(G;sGQIrXljM(}kB=H9^uQreEXMy_=DDiU@Gl z`^$AVNQX)NRTJs~17tA+-Hbp;o6!^y(UdH}5O|;kTpqEMGWA?$qzHj!Q94_|zk>wa zf%NI3a_DmVt%FWssO0ropQ59CNUvI_w<7i`KArC$NB~8i8WftE*OKMc*^Jb zk4Cni=BFf{cBg!oZ7)>-c9V^+&{R+XX3f*P<7hk*Nd=w4wl?28yY`z&!5L<*$XN~! zj{e-|x^}fcdwV9`!H`Dx-@akjAP*9=3ZlG6w};vLDnIM~kut)<#(d;Wzr~IEr*gL5 zY^-v&0Ahj!bjK;z-?Adsj)5=Y4=jzF7n)~H8@>uJSGubJ1Z#m1OT>%13FPu>|DlqJ z@3%}d{9~E$t!L3W67b~0@Y)RUDvCx)zo1GG3Ka#v%rUOl+s>)hm`VNp$N#dyreNwN zymj7|(~@Ru7;B9P5fAdA-!jedNL7owKEJT0lJCwGn>x9F2H%SJAD3!KJ{h9U2F4-Wv)+L_j}DE2*9@r-cs7@ka$bC;Y<{%WxA6vZfD>VlTv|TMuYbVq!IH zH`@<$y5EkVVufEB9sxC?)!CLe0>9=PkrmDLIWp~UK?TJ z2R&cHV6uWp)%JALd;P{1y?3%X+){riq?z@E!X`21edBL(TGzW8&JBE$tLbFcb0OA% zkv1PkPu#fy1D2A9Yb~6W9Xt;;0#=0RAEfTjRwHP0{V{L;OkgqolJ-u{|Ky|B2wy^-X^UHv+%iQY-Z9L~g0rIC6Id|}^D za#$cLS(C9;=4eo68vxsc{_wR9xZDyS@cmu#u}{zyo7p(hsd08o-^11Uyl!wiM^$0Q%NdrEtcJi=56=JP>9+>Ux2<{ z;D(yUQ3zew`xGe2_mB%goJ@7e(nmMqQvrd~o;4N~P2Sr_#VCn6)s!R##V_x?k3x87 zg}Yzm00`veD&ierIe8PyDd<`B!Lpq`GnK~cmv09gS&##EvM%%R?XF9}021DY?^uXw zAEl`4`z6wkl1w_k)b=h4dm4)^1OV-t<=AZh*Si*-+V@bS!(U-jw*x&JLGH=~TJXin zEtiLd40Qc5T!2O5tW~4imnxb)6V89jT!EUu4Wk=LhIy33@1(#XA z54a5g=KqZ+gp*N~(#GNHfg|R*{Ppff9q?MN1)V3;*! z>k<6(WU&tj&5l|%x9jh-4woStJ&dJ_kgdKi!4R8$ulh>cpkw0!yi_=46dm#Out?r1 zCDU39FoCak+5oS{h-qelDCiX+hIKxeeM>v}K%<&y)K0|XK=(#WhEg_~4eyO-O2rt@yAjgHL#5psa(rH2nNN7M1SGWicY7KsbFzTA)} z3JTqd!2+8f1YJYMug?1H;x_!Uuakw_g;_0VZ`-+=uAF|E^f#LnynKIl{-*GqjL5VzIzeh>xSPAHm%BISx{axZH}z}McdaN713W8c~t;# zXRbzm4|@fVCcy6RE#lNl4^Jh%7h^vRcy0d1BgXrd7Q5-Vl65~QXd!E};f=@Y8J!|` zQ{&CvaYeK1)5nu6#)J`h@{4CsYms8V5i(kc`{*e8-XyLC2nX<1(m!M^Av&c{l!2na z`U?w*y|hfC3&JQmt~}qm?eJBjwK&H{WsklEDHRpB0;5#b?>hmCx2QA;ydUVwoj42{ zV%Y*7?X@YSm-iQ-z^M1a-$(Z=0AY(3g3#k$ayp-DfwqS~;#>#jU`W1Un`iW*Ib{Po zYHNrlaJNH;!DZ43W;g%sGx=S>qg3aQ7gWoCgk@rD7{kUjFps$b#6}%qGDc$uF(!{@ z?BgT?RA=WAc~$C9;5d2xjP~U@h%}IbEqx=`=2}Dj@^v#Ex=Y(Ssk4UY>)u&ZsjtUY zMH3XOd!-e(p>FYu6|iLV{0lTSO9-AnzE?!&^O#(uofs6b5W|NdcFNFuILkU^w43KqmB!rT(^Cn}s}E7@_Qq+5~vT zekMwYCk?yox5vua%FfvoARyRIxL^+tpZ{CxprKp?nijOGAa^w@C_gVdN#5V6E6<(`y^WfB%DANy5E6>FS6rVD^en)G9<2iv7*uZtKA&9} z-MgUZH#upJOZbiyf^ej`xKc4fCNg;$Q?uxHw2aN3fY(*h2>d^$bAQL#fB(!-ZH`g`e0L2>PCj{H*GV1MRy6Irv$`--9$tOu=e6mc9?7#TmV-iQsvVS7WZm*JEdeE044$y zw%{Q$(3z!nw9zxD$rOODiVmB~CG_+js8<7cIbX&L6D7lt^gf+fVdwVtqLO)<0>r`l zGk4PqqqNJVlYRg~Cl)$XTd#94{qSwG*y5VXbSEzFk4TcjD*LmDSdG5;#0;bPR+WP=XY(7ccb^yKSN7DeIVNNT)fg+fghY9n3?!|Af&3P<1f24uM z-~@*6`1y!p=sY(G^oCu8;@hpl0Lb~4F{ic4LXg~XeKaM^vAg*2WyuksHi3(>S*g{; zclwn+Zt1pmmy@yGfI#7uQZ6^^Up6Z2CJk0F?IO=ZV=L zz)ofk;nHca^57`|W>wIO_KTi6p<|%OrB@f!nO?Y_;753iDRwsVgrX9l;mNp;l6RBC7*DKE?|Hal-jEMT+>0is6AG#yDTzu z5TA;CpiQk`pMwjq(x&037K99~vS(LY3=|?SQ~QCm7x8c3aKHuSwv|=%f8Na+@$TyZ z%z@}byG8LI1UGpVI($|N&K->xo#Gq4Dkht)a#wt;g)Z7@T{bM&Vd=aLUO`1D?PN*l zBCT$CB;py-HZj4ki3^A7a4YNoTAr!}VsRzD!c_Vaqbclax zH^5b5Co&kIx|QpyRa=RBHeN)lar!nkAJ(@&6-8N;#r}BaB5NUw*gptA!c7W3?iy>D- zFMK5L+zt>2h90g^+3#VT8WnF=>`}{gwy`&olt`zBLlVr8{tkl(2@o(a-lRu zFfPHiQ&`U^h9bp8r10&Q$oSWRh`a!1QW)^|$U3Y-f3!QPY;yp-eFKN~FLbzfO0`go zttf=2pL}JCrAf^B(dd6WZEgf7h$>^nhjUDqY2VT!WlUTmP>y|_8%btCCi+=>vJQ4u z4Gdg1Ae#Bp;$D2aRM835u8?*)IpT5h8_V)yiDC-to!!DXfDF0USjPDJ$h38h76=_# z`mOO%EmZL$R{ib)|6H!=i?ffmsRGy2M*^R^ zpYPQamT~_+kxTT;{SbB9R4#{=`d`Q2P{~C8o^PiZHwn4zBB?#>{FZIW|C&%LtP#?i z`svnl60GRp8tBseXL|hC0`^6|yEU+3hhn94-qcRr>~$NH9_5ZGe@Fi!zuH)y|>PGA-y>1aSy$@;@G>~4dB^;y;IHdpA=Za@;_n?@DYxsb`OyWx`5Vs2KEEg z)kCCW6oDA02fTH=Mw&c$qt6(G?$O_+IQ|u*9D+{)4Ll{3aLf;W&@WkPmEFB*>+7bEGm`iWIqZ0w+Ozk; z=rcZFjRP7`R~s{2!LOsOY(IqdRV;mSZC4kfWp96}Wz+*m#d}&nK^JrDSE|D^u05Q| zgApIl^%VW{pOe|n*I-siJqtCXN(Uf8EbzJ6n(UFS ze-A+Lj*r(#bg4f7nkV}aLI6%xiQasWB>*s)Oc366I6txClxQlDWDJl6m9SpF(*Wtj zU92q!qY%xSa!D75gEpO`mBmHmb-Ok~2spn)3x=BR%SXtZF1NU&20qbWwKqDg9d=vi z{$kxB6XJVGo+j8fwSD!a1z5X)nWTmJ3KwI!mNFc|A2oTU`K@fPSilw^Yfql42e3{x z04}?spiPzb6FMC@VDx^Et_yW6q$}MR|8Z=6x*T>~_?bTKpqlFWk#@_o|04Jl*Dtyw>V|>E`fba8Far8P% z8f;W{V)J`?Qd^^&o9k=Lz&~D0aS3i&p3;|O;u&3<)Kw04hLYp7PcUXzk1`8#fo9bGYKCTE zax=dD*L;*iw>thJN#=fBIYBY^SbrNga6f%BwqI?PtJxCU@Jd1b=S*-=_2G$1MIt$; z@>~DdpORBVR5TO^f9N}dYR5M6r&dlvIRUHC9D~BtVF#&?>pN^8)G(rmn$FM zt5$r6Q~&vZHyhPO38;8c`1xl#0#AN_l(D)xsj9hoqsE>5`gG^f{OSn~sFW54x>>&E zSgqe=&GK3^$mo%*m9PRaYpB)|)gaIZwbg5CU`kX*N0+c!ptPuZm8wtm?aT4%BDyEZ zlz>7uJEf`=5VamP&vos3-lDtnxX$p#7&j}2Q)0$W8U&ndU73sWtf#hU8LccKy*6z7 zDw_1dlHa? zrpdD>vx7cQ_uZzbOEw>B3)6Sx??^cL6&eG(Q9Xbm0=~W>ykT73JGA)jRBX2;p7PIz zzlRl$hG!Gvd37y@w1JQS$&(g+E z@N~MY2OGlGuhj(iWKG?7Q^qcqIiXq07n5VchB+fJj?z9h9HZjeC&a@XC`wgdI*t1B zgiVHH4TciL?-gqD-bcH)eduyzN*%8&obUFQddJ#^UA^M$&uFCd|+BgcZMii4rJUPyBIg(kTl|kTFUch~!En$znhH^_d@P+TsL?&(Ogf7$( z`z-+b1xx|4RecagPumE z!SZfJz2#VH^L9AVbfux-w*>Jivf56+`_ro(@{?@J!RP{Uw;W-=#&n+tSX5G>$iKa4 zPU|Q|Gg`at(-CFhmKR>#m_3fL;@w%HOoG|t^?r`;u)v5fF~1v(+N}-tTvb`LEr{EG z0b8NJUOEDm#ASke(fc%C>jRQw=i{GDvxwK1o2lsDzLf@hV7%#KN!2=+Z4)AsL zjRIbOywjY=&f8f9;e%%aNpvDLm^#xwxSB6hY7XnI^K%ikWGXpAh;hML0)21222vmuk!-4&La8=s)+6zu^nw z)lTin#LEFs^d>fQyIv)po0?YTca?G15;rA(*X#Xix@b`6!OPeEksI0gc@jZS>>!SN z=J{s2Tr?5zi~H0Ai<;vKMt<_CE8)Z0w zKyXWzfX67K;L(wxGCutY`P3tTXQ7e->@;hpwB14>v^|f2R2L=kA$uPs(BX_^`>74V zJ^w{j$F*MKK}pc9QF;*Hl?w;-{a z%sOgG!xN$ctgyOrT8D3Lfa}-I<chozQ2J_A!?Jghuj*e2UO+mOpB8 z7pGI_qt!4D5+%udR-gE>1Pd*f_%T-rqlB+XLS6elEV&{39U3{i*8)tBLS#p+96`X` zc83VS+J+CJ7Bi>Zgj3)IDWx8#F|gL$(`MYa$@L^o52$NAf2KFdo3nJvANExyb@v&Q zt!@Mc;I|6=+}UT;D>4>vvHBNv=|O<909=bUu_AN*)mT$gnP0)@cHE3W+Xu9rl-cQz z%?3_DOr^rhphxrp6JDdUR}>icwMmmjezCnp>qWE>3;g+by4v(pKku9UfDWdt6V!U^ zk?X3Xn)ikNC-2L=Rd?fLBb-8<(X_?L&F0`scuXz8rqrvgca$$4Efp&g2xvY1a)v=Q zZYh7bpH{KcVfblab>{*W5J(O6hI!r|TXwroWrTsW3^Uviih-JQGXQow=4=NUB|o6i zygY+bw58L|y1=b~Bl)BEtH(Ql{9xVVka6A^?9Wqlp~KDQGjK=T^)QTAwlbBwgS zhgsM>pcA2Gj;8nSfUJk~k6baHLirEGtJ1xPbhOAw#M=fZk-{*@S^<43{qF9V90@V!dW5 z#Xjnl%tJ*0;1U-&+=PI-Oceo}+~7udRoi}S<(!bHM(0;>a%VJjIN64tB4W60Ic3a+ z$8Vl=4^ zXMEgsHAs|`c4$orH(|c3?KcI z-Au2a+lLvhkj72_8zr;^u_KH2TX_NVLF8fcA~ybSsw+1qEeAP$hcg?XY1PqGs7t7P zgS_3MrSnI?%sB{#nU5+Q@l$MUS^*n9A2GMxH<@PQadx}n3?Uy5QtvIuDu;a!VW%}d z);7`4*enSof?Dl#Pdr=PnjB;!dO1^HBeIy#6~yEB)!3d^?5o6;T@bj8=;jR@<&ajS z0ap>DFWegPnB19J-XGPb=?AK=NmbGJ-hY)L^?`v8TM|hpzmRfU>)4GKl&8*1v6MA+ zkyA-jFIL9kJi55o5RW%h)A{)xg1MxS7!RnBAE%a#*cR~m=>x{x?~aOd@4x@f`_@>Z zgk-$$!MI8^`p5v=t;VFk;<6b45;v;jkaew_{g7gE69~8&0=8UJzF!qSW?6jXxIJB- z))Adw)9mTsf38jW{X`R>9hm1 zLByp_f0%OcB8esp)A7(zu9cXqXXLYwlMt!nJH)D#cyJJjLB-~>)vacnKs}ZMa`quh z1G)PcqhS{{KjiqG_b9M-e3G_VE>+Ils-u{vt0IZ!7l|(?Jw0ys=`IPnXSyH+AuWC3_1qz@bN-xwU_-Gn=O}3QO zZZWd@*&_SA69GmvDjc-=Mf)%Eb$B@h5ja?|Y@@FC78mg@=i2z@K*>t!deGvNrNbk~ zla`4AO4@Qi7zQi5ZlWca>OGvjn4%rh;>#?^V#n(ZT#)(Wq83fMoIot}(wg4FilK|3%5S zB7=^VFDI3AkIF@Eigi}Yy*^UjP;00pw>mWv>mViuUe4oX$IWwWW$0QeEvIA}y(_Se zbcXQB>FU9ULg{6*fx1X;_LA8BvWW9_l${{ox^x(_juV=aQCQjA?FgRR6TF9@7KmkU zsj(;_KTs+bSTqc5B<%|;dQz^q9o$`(UV?wTFuxN7<}CDbmdwaL`Dj}rA4V*48hTsV zW~Fpl;Nr7MAfUne#>*86&Uwr#QPXB4!j#p6+TQfRM9;p8E;G5OF2cz{s?7<4QcEXP z5cf?KUIyV>#I%M1=_h9{2&QH~(Q&Os0!53O^tnS{(@6McepD1I_e6VHwm5Cua)Zva zU5_Wv6F1D}k!NGdu%XBhWmeI)8r(7fvFhUg)KuBafMu%0%E6R0%+pRhkG?83QZ`F=Mkcwyrh z4`ZE6MJQzP-;FxaJ1UdlR`E<;9ywXqx#{j>Zo8zOFYaE1XB^Hba0jE*JcRaRba2@w; zN_Md`z^~AP83h6bVy$}*DOkOH%Mc$ift9Tna?b&l>xNVw2f=beZHoK!?!t zd{c?^FXw*}HqJI|ETA6Fvq_+7*Vp_7 z@MjK#SW*50@ht(QC8Tc-dr=^~#1--kHRVaW5_v$*vj{w4Md+?o;13*^&>_0Ha`=I= z7cq*n2we$pmk+N|-azn7c4*)n8tvs&l@?vuLVu3d`ZjPevSoNK?o`ETcv$pPS92^s3uGq&Tod>2eb3IH&D_ArB|tuJ-iywgwo(dO6ZbCPIx zgX>)?B8Zp1w%04fUZ0aq$YptG|r9=?Q%csaqTJ?(aI4 zWU#{BH6b?H>TWVtzlwZwZK7UF*fQ2HdFTy+;W4J&jFzkxbi3R{uGt=)eW=ogo*6Ay z?rfQaFw_iQ&Pw*vG3%?aVq>oi*e|y>hdVzuM9cF)tV{nUaxJV^uD_#VPI z_;JeiBP1?517T>w_7Jgq?4ST5gjo3w=$RXi{Yz#8m8mM6#3PgtUIIiUkeX9Gi=`rj zkw73b3MB-Ymu<9VKRmB(ToFQG2CHAdBxZ+@S%IV(FQf%Su?1NwHvHfX>j+s`sma!4 zXQ1Z@e~!c1Xa~XA{S$tWw{4fRRv3W*l}8*R0`izU+50)N2GgoveIAw#S!~F>D7h;|PT!xqh7j(E-OE>_(Kw9tarm6p zKro+%{1|kw4n!$zHbfSh^ha8lOq}-wJPzqa%F(DG@;|KAl`F6Z=FlkVDb`atj%^bV z9HhY74l`)mzRFKs{VuW9aKZY)`fxgb1&*eyTO(lgYU9g<{FO-Bo#)&(@kLVlMfbFm zcC{%ia#Ols_P633UwLSFh4HzX1!!{7CGXvuFfSq@<8HkUpPN5AnR(-Ad3-$NYk|*a zqe4!hCyBrHpluC#4%hG^;DTjQLZzk-6yQ>Q&nB#22^eBK*W=n*sx z6)EU(grZjH?L&1@2EcBa7VLe&;wX_pJLn40bG#_m4fF-A-$JQFec`iKPJz2UU2FN$ zC8M!yL{@(c-}aajurO8LV@cdGSqbaW0*!X-_G!rmlgk6~<{J>CynhFJI6%Dl{+quW zGw|i=$GDEcU}h$!m-8=uRX?>$=G#%mx`#GjmInKSnzc*TF667G9r;1n_a-KKPGOld zcfltiE9a1S-DS<8Iqm)otv&^mDUv2CnTjD7+0blsj2v=wnf_!c8-Y~p9vO1Vlq!Lc zK7<#8@PlYX5TuYfw?R)yzm(`dJNFo@1P&k+M7z^R^(i z8w8W)V=6M#$Qkt_(K5cl$&P721>~#`lOJeF!2&zJK594mNS$C<@cTD1sax3N2mW9J z00?SX8e^+gSB#bX1%+HF0-*>=4)q{IEhQx-*oRtn{c<%#M(Cc!VEAJw;?oxnxU%x3 ztpSo#Et5?sfKDqN_iY(c;zP5A|Dnnc@*+NnB40?8djOa!_PglN&iz0@Kme1fT|Mv} zQ4H4vx&B=(q$(CgGGwYcVkK~tJ>yCwtHkPMAPTdEFToL$WK>MN3bldI<2F-(89x7} zu3XbE8+jF0J#Et7KxrKG%8f!QwB$kotJrlF8a{y^?h1x-Xb61uwXAdPKovM7M_Gb} zz=jaGJ0^%wf#u{n&tG|Of(}#nGs9&3@A3wNcsvHhh0tw9nQn0B@oRD@mY^`CRP!jw zcbu&4qO*-B;lpuUJiE#U02WH?e$9ci+5)!)BPDEPtag2L4gSF{$L!@01BfC89W=CCKT?oZk-i<*-%y-y9o?}j46a6Bykhx zpPCRPqR$YY8Yr8!tfs!o3VM_EKGY)xqotv24tcX=$OQ&=Za-73Oc%~wxKi9-WbqvqTX&}7@i4{MI(tw1<4sQ3rfq`jr;mwE@(e(h0ak)^sK z^|E&nR-J=J6U|ccJN>9$EZPRzfC#t>d-D4%5+rosxPlK3uebBtXx|HouV|!Cd4wQW z`T{}e3R?GxVsjj^l9P+<36b;G(Y1@qvc1}|J6^7fEj&nJ!=FjhkhX7;3bL;uIQwqT zkpA!^YcVg*ZAGDP$@}F>XmrZ!-jI)WND?rwbNfB0=!fE7{d+t^^LN1j>PDi*k<$l` zEBi7nBOu5(&8e9pc@as81Drgb8_LH~m`nBTv7~uv{m^p4-ihV!d_yUm_(UsZVLyAy z_gr%S&2JLD)c>Q2><(J8;9y*S_*AES&iL=s*;4OE9~9Ygx6I-a(*11CbPjz|>5K_| zyVza`c*t&@*Ij|@4iKtlCr@VRxMyjast>^#K4&zc2nf!SYgs$DT&U{loM^?hF9;mp zLcRw@#{(*dgrQj&|HJJ0gnHWihKBcX|AZ9-H-L(|-?Ww?!%_#^0kMw!2bS~Ct-;*= ztkdLEEaif=xv8JAKcADmnuLd+(Y$$e^dx$WgYu6zdEK5E0EpAZ<@Z6)IBX9b6(S9hC&dx@N?lF(?IJ(cBf6CBwT3!RfNchE-Peq4N1ZX!)O zPLD6s3K;2pqc2q)44j}VJNx+YNmeQma&;B#*U3lI_vlhlE9j9w{#jq`k9Y)n_kALH z9s7f*kwmAL5iJ-#hAaC`bW;$23OSvSUoW?QEE^0^ltwS_qo6KJgTF!q?@;k|V*LnI^xt<>AX@W|4?a6wR+IQGeSGeWsg^%fEZ97+qM+a>VrMAEf& z0h%H$vfghe1k2M#)sawJ2x1NlA7HZp*fzo*;wSsi?eTmJ9$IBlJc-~r%pt^5n0>_C zFMk-^T7bqVHHM`?207^ul+F|>h@BpM2qH=dIC53jLt)?5+ZUC>EZ)s3u+EDuAFh(NhoW9$iTS6KK~lmq9t42aWgx76 zS-d=4aXBopp}drhCNl}^kW(+12Uk`Cc)w$Oti!t}0p|^&)}x=9AGD+mL?H;#L>^4k z2Z77Qleqy{2coD#ze#H$q6HQ?ks;OIKys}OVt;!%!rBeL8L^u54{5BnEhqZ9BSah; z{~kJuf8~YD1M!q5TW2^dxyP%27*}xY6cD5C(wH4I^RqhG74x*;)aUuENs30*;k>&} znbnuZV?07d^tE1uh*_^bayke?_YA4hbkCjEVFhXYm)sZ29}xFCk8suoK%%igQApIjOY)Jai$ z>`~FJPYApLe_?%2#wu_f=F-QEiN4zsv4tq8OQ46VMsw;2rZF^v&T<_ z+F%8D;cwy} zj638Z!AkjzIB#8+yi`)QkOJ|F!9enLS=H~8L&G*-A5J?| z=cSwXO|H0heNB*5S6r&jmAniGKj#C{9$qO31skJB5Yc@_9$%NMDf(fc0^d6HCc5Hk zeJ1!`&vW?lepHUe3mCMWWhNDgWagP7>d1&pEI&78Fugww6S=&tMu1A_-lYS(lgNi-w zHitLdvGT8~?#FO4j=tmO+Ws(|e)WZZ zC)21=bvsl2zNlgU3!VDY;p<1saqLy;Tf8#>d5hJZ$v)Dw??iDtb}6xeKVtx7+H}>A zQ`wK2@IAG&sv{jujAsrczE*TP``S2F`3DJ3`4xT7^o}^joE#b*mhN#JvvLEgRbd*7 zvZAP^O-@OQzo#`>3`-p57AUO0KoJF1CT$)T+4Vh5-`lfluB`g+VC3g*T3ZxIfA>aN zZ7NS1I(>gR23I2-bSFGtZF97X7PHUhxsybK<%D~ds+8$qhwE`t<@(_7-efM3a&z!~ zMGS6pNI^p1HpwyQEI}HV#>-=z)fOz%` zq;dTKgo$R&mlFbSsz& z{KRiZH#13_#fAROw2WwN29aYEL$KDX%LKMuP@2K@pAzdv|18P0+RJCbR&L`Aeg!o- zz!PKy=+^g{T%@0}J|jS{@mDKi~NGbPPbU?&zB_G9^s7@W&w{d&P{aO~C@h%X-hho-ZPit3Hl_zWp6 zAe~A{H%NydlG0t0(%m(Hf4WOLrMsIMq+7Z}y1Vlp?p^mId|=HQ&UxRxpZz?)4Wv$v znFv@Yp2K(2)cpOrwp9IyOh}Ai#=AiOjKS{C(hVAZ3y=6Y*(>>qzkAKSnjdNOKSJHo zI={K^!T*&FBfBeH1f{&?%noZvmKW@Xa8_f)#rc=#lC2cIH|3pThp+k(?%xr#^(S=q zHMV90a}}Hbj-ijOP5S8QQGI1wHRywpB?cnBgk|NC6iDmD3q9%6^;R1BLP(Ibjqd$C^?_pGBPm^Y7FJ zqwI?7=hFL;PLei44fCU$muQh^;IY>CI>IwDtg!I}Zn3g&8GH3tmef4PEN)emy44h<8w)>%={{iG%+qBNzWA23YIGf?9QaSYcEXJEJCIP^^74V+#Xz4M#Od{h zre{+VYqY25)_Rv2_rI8;pZU z9r|KhlX)p`Xj(fO_48peDeu`DSDRH&Dx~@sHOY)VS#vgeQCObNU2Imz@ap#N+RX>L zg$}H=rz$=@kmqS8)4n|~U+E4`$+_klrzNy56@Ji^ljSu9RKd_a@7&a_7yIg7Gm=C@ zG3=7__>*PcJSWfggvpDDA3i^kA|{@D&^~azkefMuf_|ypp?`X|8-+&KL?20D6pZ9X z0CW#}PMo7A3$FhdCIuuyJH9CZD19;P8T<$j1+?V4+l_`CttwgnYOF(7z*N9#nqdYD z=}l)OGF=P$9W~*PcU0cE5;pM}Sn9R&1Kr6`rS^m2FkCz#yjETfm0WXJ3KK(Z7wU99 zrUty3A=}44J73_o1=#Wf>J@O<);U5x%Y;po>$j(}K-05~1K0=?-DrJFs!ve{lo;5U zseW@O2hw=#<^Kv1+f$A!QenprMC8HOYOp3cDe}={3aCcB9{hU!mEHI!&W1gZ030TK z+b)m}R=w<_@RQh7rwG7SY!QJ?p9MoYL{Y;be2(lIJ%;?C2}Dm1=XHLF5o)eic^1udQviu|*9V8iDS%G*fT z47H6LlOhbQ15wntS!GOt!vC`!fMI4o)sUIdOc+^-{)5=r5q`Y~OdE(V8oDvd$Iyai zfMBl19-Ytgf+HrrUR9iIfbhmW)nHJH;qjlxH#7Eh2Dvv5}B`fLTA$I&}Hgtj9H zCs*#sV1cH~wl0A>%hB1zFLemT^5|SM3zNVU!3TSb?*l1h{<&e%R%Khd&%C6j`v0`I z&4H1;^!S$W%p!M-f}&{2y_Z8Oc7fam)9&VH?dM_KT$_&<$O9*NgK)M1dE~c`7nV^q zDg)RCgcsQ{Kui5dI+)Yew##iK8T-2gTbivKdoiMWGCN19CQTh#2!KzE@!#*t_y$M# ze&ao0LB?3`Cnq8mql?cQG?@%%HRF76=8@7E+TJxvnaT_PmEt^7CQ-N{d@wiOcTk}A z_#oypCi%;my8>AkYwik06>W0inc%Q@wo&>CPWRC1zv}5-RXRewPU6QUr$25lyZiff z+i7+sd&jMjP?`l7n4a5~|%%DTqtV{8$;!RT-eDB&##o}ANNxCl3VOk#Yo?O|B**gr*n~wF44GM@hJ7;Z%w;CD1`Iy8$EOyNP%SA6S;^Tbl>lx_ zljj}k+Pn9}P3kh?k_4L2q!K?}I{ad{F%wycQL%mJG;eI-+DT0Lz4Me733VWNSgX%D z#Fim=`eG!Dq`Wcv%&+OIuIh;8YAna~oU8>n`|`YyHhgbo`Q~@706C>yM0ho*iZ_`f zUhw7TOe7ciy%m~T$C=}A%U=TO{_j#!I19X{OE!i@aX_yBs4jpUuzcr-SeM-PxJN*i zi)#tO%n1h%#vmAu>`fNp)*{Ic$dExmI5A+6(G(A5Efysdkktpd78#JR2T)nm#Q+Nk zJ)d^R^H3mU(tyQDQd;jb@*1FjZEbCFUw#6E&_yW)K$vFzTNEvAsFk=7TO+VvNpA@r zn~ur!@E;u3q4 zqa*;J*x9;-74mThvNen4L@bu5!a!!sf96agi5H${4CNXG8DT z#t*rr7W?RYITFQ&R*r+7RuYN%Ud2Bn)WTGk{uO}Xc8RN z22NqEC+aG{ScDhqr1f+v_4QLZ>4F`d@>V^j^W)NR7L=^Da~eBPJcp(N(sFn_3}2PA zrRKO#it$@!fmYwAisV*4WqyXfWcEP~9Cv4QYAtsE%JDxO%JFYR$gQNON^kU>Sy9EyaE#U#~279X$8)(qFsi zJ1eb2SF_xWxc0u7=#-Ox%gVfM(6*6fICrR$BxcKwkfTItNOj#WPT?|=uwH$zR{Q;t z%cUes_^w+kyEM1tcM(|5`(_y=V!-H5*77rzXIbIt;n&%MBTc1_#<(365oJ}fyuT8P zba$uIlLknC*||S?bx*QoS8}waq5rvwUUg3B7VP<%_LL&j^Ds( zDzoMs^{C!$GkUs2I=wuc;E2Y?y^Cm(QBK>h!4ycZ!8^n)`0r(4O^3^1IMt&&Heq@~ zxC;xG1EQ71S+cI;V8HvD!_xRor22ZWaVNx+({xM6v3W`OTq>kW$ouP?=QFR{y57~? zG`g6^B7KaEY1|XrR+W_)w4Ot}F(FCPT2Z;u0~yR7@Op z7Xcqd<30QL_wsO#K|?=Z0#I-$_1hs}yU}#MM=+H}mdV~@Nohqc5I&RK%HXxrPhc5G zg+x$SnKkJ{zQL>s!a-AIaBPOUDeTNnyAF}_ng(=)FQ9d#kjW@eM48chm1?GggS&sh z6cRDJU}RF5K&{)Pxke_dcHLL_oLx>(8@yoxQC?1m?q)M;-RS##s#^MJwHqU*8{vI8 z(3d!Wcw~mY-~rI!l#R9wAKmpQnzl^r2KMJ_)OOSq9QI`ifOPAG=@Y-%- z`Ex-2SV|fWME=()Fahl35bCDD%hN(*TPcuFqlHta8Y+$ItU>9ukcQ!CkswB#G9YOP zFtwewcEix^88j0n#bAS2>RXzKW4w5u_kekWJcaLVPL6b&oAd-A=)=XA>VaeQuDokG zeg*u~#sOWbm31jUS7G}82}*EZf&bH>{6s+Uoz47ffH|KxMd{kpa_F77=?0*L7N*&C z@XeSV4P#xO1E1ws5Q_q0vlL-*57c&af*Mx=J3sO8&3&3r`FMV2_*~EbnHfo~XafsPje-4t;JDR_{&Q&d3rV;W|BSQS%)ny?Hnhd(Olm zqxooxAtTyD!W)!4-4RyNQWD2oli={Xnk*p;nnBfZ{8zMD$Sdi5!GS1VKjF;FlHRVD z{-2${Kv7jya-Qvxp&pT+#`$<3g$7+@KH+fVNr#T8*O94UFf~k%TaL%}FrSDM^|sX+ zeAHZc=Oq*Q2Qv%uy;(jD$MaGt%M{emC0~TMoyv2?LBh@R)AWqLvYz(kS|1hZ+vvF z;S*8}44G;8z{UUR=CYPvU^E)#4n7zc(&kzGyhT@XZ&Iw^E}I@0DUG@sHo9o)$KjM7 zHvMi36wn0t&PU`z_6}w%39iiEJAexkMY%u-fn;dt$*zbn1C$W>nJx?xo;hx-(sa}^ z6l3#Dj{@dXltDOejgM72@NK1V!Fi-}10c(IgzS*faT5l&|7QVY2*^q&xlnzHc-!^< z1@Jq{&^CPCC~+V@@B!0`SB-*;aOpvk(rd^4g09gd+7a{X5)_>UAHnrE3s$VD9HwD5 zAj*48N)p&r9CezHx@DjY9r!~m0s5>ff~d#OO(acnC%oe==<|Iz2!|tW^>B(Vu~Rcv zwA=7L{Fj(AVt*io+8AANWa!Lyr_gSgWxoCruvRx=6Yv})OeHxdYaGHA$=;y}!vfNS*!?J3c=uZ zML<6j^16N(E_^*_4E$FS`ZbISG?qXAb5wsW*(R|LVZ&Ks4JTm5uU&E+V$NL&tFc*Z zZ~*g^PsK(E0s~+^&U;Ba-5w|SvMN_ZLv;znDn<&wM|No?6t)f+q}-1;JfFO6W7YGZ z+z1=b$@kO6`PNV|=nYN=ID_+j;!aF)AA$OZ7FP+|x|ZbsYm&BFk>Az|{mI*(6W z`;g+9(QL@7h3J7hZ#WPW`=B-#Jt;|Xt2FGRpw@hX+l(lq*^(fp*IIQS+_3J>&d8Ew zmmyw;kU&~icroIeBAr`1nIiW^C|wv;nNX|pjS!okFL$NsJyR{m^+}eP)y(EOQU%kB zu9aWeNp18ma?g)CwJxK4_}IkWd@~go7L@wGEXt3gX6$BnFvZQtk?@TCN>EYLL;OjEE=%b;tbA80!Uf3+HWdXcb zy;k|xiOBIBJhh+%0XJI+xJ0eZzbtIt6`mM(&uta)F73PJef>!XxprMqOelPr$Rh6Y z#LFXH_&{{ntxw;|LYc}@7dH`KO@IPleuVGyyoYGJ%8^_>nKf$bUICMK4RtWIR5R^` zuS#IqacFg%`1z63VeGf+LvQ&i(71pf$iFTxhCT5VmYMKyzTdj-&`ly;X zcZ&avwL)+(CRm4I%F_tlCFV(8ygK&kLt^~dGTrQBE8+5WP+C?R&8#iIo>BleFDo#7U#Jobr5dct?CHG|y+Al8d_K~f z6)1~};jG|D-&7^(f&UUQ`NR?WCpeUY#i!69wuV;W8h=^TzIy!J;lZ803ccz8g>3vX z@EsSe0On`C2#-nvp-2vRm2kUoUz|>^A?ZJk^ijMG|u>o63iD0N4Wou`ZD1 z`vk!X_U9f(L;gNN>d@7!IX8=91!?4L$Fen__KfdcL3?Toe43r*L2oBXJ^^ycz_BLK za$qGF{|H|Bjg(%8tE_MLrVM7tJc9$pV#tuo^|7cmisQHtO zY!g>?$aGph15q6hgy|$4~gM|SVQOq+m!l%4N72;-{_}6sF;=}3+(xtK)-+6 zA+w7lJLBOIrJx@8@!zf@A8BIe!c(maBRg77bRIV~QjB_IWy_+#Ec}en-?rvoy^)+q zZQoz^>Swi+CCW5(sOWf%GCWH-$EYAAznB+Ywj;BAQI-3m^5qOW`x_AY5eZBtVgBdrKX3fHq!Jm9!t^a&a8q$7=jUqlC!@2{;AFcjbOW(B` zg6G`b{>Fbh_Dn-HayWJ9GySV!u%*mI(hfsktYktWuU?s*-v_` z1%u!is%Vof)j+zX+iF{2&BWhfyu6SRWt^%A8M!QFea?)*8<1Rb=>AFw9*jf_OPq?r zvHv}B(Fn^f%LF+jAx^W|4ZFny&;gZt0P=_w$W|*>I-XWZT3T9F9fAsk;E3}Z^Az>E zoXt)JFBR{6%z+T`RI`(Lq}Xf}>qHKEFB=aJgyyp4phAVix_3eya`asT2ZfDpOSX=M z9HiA`0|LuB(P(z6-8eRt?b`u=P*u9X6Vgi!reqgcO<{8^k!RaOhyC)#nE@9ezip@H zG#ieY%Ee5hp#EYjND(D){Ha>5Ch?W@CW*AAxiH@9W*7VK-Pjs%UylEMH<#uBHN1cU z@iz%W=Oj%JjL0{kCFYg$x_NL#9|FYM0(RhdSf|aTfBUK4q~rM&Z#TPdkM?`A({9ZF z_;~=B(fX_?kIy#xR7%y$Ka4E$Hwg6FsBPmrzTT|=$WPBbO?_yoVsb-s&P%q2jNs0a zM!c1mtfPOJdpKC9BKu4BpnYR}%AtlSCfVbzU%^~v@9u6=!AC3_Y}T4coD=330$JRr z-`i{)bKB@4jo>_Pz2Zr7kdr~BzOXF1kIjPnk2^=u$8_+xbkxxTcc6M%e}3&~4bnY$ z7HD?wP(WT9x>!x*U(=DlM34S%u!+pF`zEL)PvMjBsY-P$vzKyR)6=gDLr$*+t>0}` zjnlnR!q`Gy!mTsnaV?PcayYc;mLiRY<~=d@;Qe+h0#6V3Q#A5I{ZMwkESkm3(adKr1r2aN6oV*VPFbZSv`0II}}W&Tk=!w9-UO^J4yXTL+|YFFwq1WnGws z5%5@1mJU}z6h-?w9&LM^8g`GNaE#EurSySXIknpLp+MzZ(hI6 z8xI~VGh4wtvuM5au;M*RL*8c~7?Ya{_qPQP!~zP$IiLf%fmF4z*#>0-*bxiBbO<1p zff@Azme_?8iHx&_-Py`4oS&Fbsygq!C^A6AJ;CQC@7D3NVTiU089|(QjP(6_;lyyi z^R5V|EIJO7xER$9FRtSMW23tGB+#-$b-J82D{7}J2m?3m>U@aRs;t^5R{;gdD(iG~ zZ>kqr&!s^@jRf0>9m&jQnwEi4?zVc$l!kSs_0^5fUJAS;Mv=CQ4dp*r@o6%f&p5pM zl(Qy1I+-dxp>BUf2O$v<#*RvL+ga#tIEBE_(<2#nlcv3eQs}Kd1>rJMp{bL#Q|?*X*y0Q1hqhOrM;~p!B|$&aqPOW2z|zWOW8*v7t&N#a_)c(V%GE1IbNZBJ-R_$GqEwkJ(5 zRVzJw^<%34b7h+{OvzdK<`^J+ z#sEODVBWR-hVQIU-JXbQnzm!J$26+?ayg%)+Yul!)jU;`U+)nNPcSj9p>fvBMj@pj zH>{iNS=q#F|9aTqV@*ZTsWJ)4h_942NX5rVL%^am8@+E~4GOY`%<$Fk4l7d7|Dc~S zopeKoKWm4{W0gHAOd<2Wyx->}g*)}cMlCJM)Rf{#cyD%xAUfS>Po60JDHd?$@H|Z) z5tbx(GU2;OI6hqBd98ara4S`6sE;OIY+duqW^wNeo{oflBzvb-9&K+TxRKfaqXtVU zvnmz8=mRLxji$J$NHL&q{W}r&dCvZ4e!`jc?-V*6x@L;%{`r4wysziYyMno&qe%Ib z5in-@P0l2GXwejHY7h)nQ4ZhOPh(4wV!LswT1OgwNizBO=fAjA1(7Fb_IK6E^8x1& zc+9s>;71E;AVG(b_)F>}2#K;So6>j2HLvB$0%ZIMq+p5Pm;&RqNBAdN%uW}Em)-cR zF_50tcY0NSo3fRM*VYvb%-i8%`w5%p_-bqc2Z)qbp-&oFtav+y>_D#g0;3cOat0*i zwyJ<8+7Ra)e(IN;A@n;uZo(Ty5t!u7svop>J=_XSloMpQ#{fDO0E>TqQ5~aZEL)57 zqyAz*pY@wF6&?hD*s{V>aIB~5)uM#1H8G3vM`|L?Te?dauT(s5Kp{@2Yn{s13A;d` zX8UVm-vz7LaFx4K*NajT*P80$X-e^_FY=lSa;Csx6@#h z5?QDWJXObKRA><~Pzpk+rWekzdI^0GzQV!kaGgl4i+XPX!dHF?+4Oha4j^fUaJ@3A zAP`B+Yb)0kjOA8`Ih*{y=JJ330sdX|X<`iw#rNl&3y!_gs?QU0Hb-(+r!__=PsCya1?Qcas!UmBaGI(M1CWL z?s_0QjtX(h{h|uYGwt|jEuxW5ba-U5zonJiir;g9UYm^lgNiYrdc3u|()VjeTbJKQ zbYxuI68ziYgCupyZnmah&(Lj^&^spI8f7)rFI$*yr0d7jP}!85I||pL@80>1E>lxR zry+UltooKIoj`P7tMbWzn7 z3|nh}CiFC33oE5;tBU^!Mi)eTu9qzPU~GpZ&rePCz(rG)Pb*V(OS*I!>?#DFDPOro zLj+_z0W51xDY?qTqLRr!wzkoe#fSe|LvOp>eyfgp1)^3oMG4aCv@-0BhB95>u)l~0 z=y57V9)uBO{95Go>^bs$ktfqxZ+ug|nC^>dS9s*v|TjiaShVTNkAwo_Nj zZmaZNNl@T4yaZmGzRBMirZh=H)FtA*CrrC*{(~Y4)lbFZ-{G0427m{Qwb%l{FXygQ zC?hVhU9=AxsCSmDFQ9!fnY!r05_t>rO5?RN!;dW|^)zcml0&4Tk76|T-qu8H?p~Cd z=0EhjFeW3Q=!E(Gx)R4gT*i zpG+3LJ9caGh!D0909Y-INj69f?H+s59{@8+fsT{>B0OcDWqg3ecfBBBI-^l|_i#BT z^78J=Y%mCeB-Ipq5}d5=UvQ>tzdui@J}LuQakU?2hAdm7rc{psvGDu%v&uiqUO^TL>@{* z>cqK?OefK!3M9%U78OSfO5{~t7vNvou4Z)D@c>YCJO`9PJg^25iBLOYq%Oby^wya8 zg{`-1!~1G~y3S^ir_^l8`*kif1;(19aO1ThbHKC{G+@1^t+QcFlxzEM(Br7=G%lVuUcgMTI?I!4lsCi zSSx{UjFMGntG$xFS9MOI;1vmBL2GFJEWBZvUejOF?$>%#=+0z+E?=kNa)K^v({kA2 z1ZuaU4{?7FZ1Pg`VvSmKoQ1bI1Qw7E8^!2DB9fMSqbt|#aERJ4*9H6Lfj*4Ot%4Ku zZ`BuVyhv&5^00P*hCq6AtEchSVS|AvW>*kJ+oYAkoibX1pWfwyNi&nRDLeh0E^n;7 zOEm7Ge{ZDF<&x7Kdd0V8cl+G%41UvzFg!`Y3Wns2hZ8pxy2D<}z(50bV{01tF!;;M z$-76rzjq#7Gd%gm&jD~)LKkwFE=R;`9!q1JO55e#1@(YwpPT3I{b$`bS2v$qgeZ@K>MP@$+#~*G8;2x%iTI!}GIxz@M-z2I$Rar_Qu6@3f0J)!mwGl57 ztW*Va;gCqKj91z`1{u`;nI|GzQy&8PZyuR>#@ur(SdzjU7((1vkUof5y)49R5sC7m z8#iMak_%^dSQ~v-fRhM~=#8K9YquSUqq&??`8Ez1fIsO@eDSHF+A-v;|8RD#gt#WO z_dC8!)s7EK+)x&T$L0!um|6i>U{>Yex<3`l1F+_sQ$WAJ+x9|r?Aq0gX+<_7IJp|M zmnin$Y$GbZ2G3{3i;Rum%n=^M%^?oR2rGiskM7Hq^1di@7x3?8axib&Bh`s*VFi~u zE|DfX+BN+{S~*a6swGKg1IuwQbW>KJ>#}!ssaN`c9#;qhry^G%tO8 zId#HCYNO0~vd?U>d436|#!Dk|8N^L0qCdM>rPk_-8wHQVPTzsZ{a~By?0g8^vP13! z(p~K!y6na+hCc~vU#-ds-X0ms31_{&_;QpACF=6{@B-I8ltr)yeJNiy%OBAVnhQ&noT^<8lvJ8`eq zZLmReL`ZS;JmS8%6UwZJB8u;V@aD6}8?UijSSdDW+R~-aYme}~T zlHb|B3^cL7I~QmSXWA0&!@+~EL|=1B@COD@xoO5~L4!j@-rHOYo}L5@|9XxFH4JXk z=G_R8PJ(=r7Yprft9n-ln=7Y3&1R`~T$SkOXY5=H%T666zBeVI#6?<> zBoT1lg@^>f^%|gvqAOZY7RX}^1>u6fBq1DDE1*JN;*#|~7t7VSfI8p*yzY;svhkxv ztO5{*6#0M$;cYS#g~w*>$G?U(4|o-JdlUIn043m0J6aXH=FkzGQWwwU8S5XI01X3M zURdWunSQHHEqo@Im2BCbYo|^1Onk_ryE$04K_A4H!EKeKueYHKcWR^yD#~J6r*ObOc3LKrs|oP`uyg<{6tc`dnIkW3q_ZOW;>tm!ofqJ+n5t#LKL`QLw*YkEy~83JA@N~~$T9}1AGz@Ts-h?UcNJh*<9ZO=IE zCly^`^fv0ur;O#Opo!dG$Z*pi49~H*86Wu%tJX}7bEuJTZKa5uzP#%1=g&c(&>Gq| z;~bHkrjPH|H=-NuUBfurEpYS_<}^e`ZGM)s2QFA9%g!p{floHi@hGo{wZbRL1&631X12=h^LiY}e0;%n;od7s?hY zaVWR5(&N(ryKk5hsohDb834gHHA$wuP6e9(q=f;$34Jh;sp>h9voDQ{%l(EAAuyEx znLHD&V%_+Lxg73D+D8>!n1SS*3my8!I_st*zzgT6e%jDm35~*JO&xM3v{|UbJ|d#0 zQ%EoJXJ0pU`SR5XG*Q`pt&P!5KaDb@5#hzDv>dK};!~v{12SY=CkGySv+2GB+a2Ow zRj~k0_ylgi2No%f1*-*s;fve>12+J2HV=@i$7@W7tQSKY*MQu4x4?gklR$PYyBa|T z;$7-e_hUTZbABXM#4<4Fx8|o|nE3%|G{UP&nK$Kpc zOAMMUf2e59*6rmF-0%DXrXsPf6VoEanLC5Yy#s?601iB>02p9!Qca28uZOc*8!!lB zMTtDSA7xhCbTjY1iWTLjkjC%EmY3ZZ36ob-O$%wWZg014D2RVWo)=PRB#jAJf!-Ts zFos^igl<&Fm)%HSl0IDwqkBG{EnuzuNB4O#9D{71y=%&(lfAOCVR z^arG6)oa*|Yux4gNxQ?pnFk_2?BzwLBfll)ACq?$stR{EJm0jA+*>GW{iU~EC<^ni zPnbNcG&DJ!ER5i>F7mmMba4sc{h@K5>PQdl0lnymYU}X1H#z?j=DlcD7~8PU*Rh*B z#gW%LdEKJeU;t&h-%k7B+slvk`y1~nsjDVZpm_LhZn*N|w?m4aWZ5cm7dyfiVD9}K9;)Z=nc$$OAOBzob)>24p}xVqj^WEjm7!(zvl%U zQ)bQWC8>KwQi{jVcw2QF_d^3Oow-Fk+Dh)Is408vY=eS__JhLrnuC^Z=pN_i#pfd$ z{e^~E^+vqQ=kB+8p0?4PMMk~2+-bef7tGqC&C6c1Qx`S7fK;2fN`V~qVz$OH;4J-FP4J9Nt6PUF+E3$>Beg&@wf-!jYm^$mQk)G&)b zjp4VF^DsLf%tQhp_H3|4{Cp!Y28ivtMz6kv$<(&e$uG)p#k|CX5J_kRTp6NQ;O-@51`eJ^D{U*{eK%pPMuaj_T`W5mc$B z$J!vB1#r(D)i#sLJ+`#3mEFW2_{=3iOZyw@l4OBkndA25cT+`nT)DP*dQP?aWRo6{ zP=-fYf?D?j%yUR{fSinqQAkdcc%~VXwAt)QsxmW{UZJas=hCS@?E=t7HGyJK@YL z+rx=9k|=&au4rGLwNxW`w-XuOd0Gv>!?gGB@6T8xYabO-E=L9STa4KXO%c&HED96H zfei@fCyNX024&i6@;ezC3R!-c?x*t>g{J2V-^2!aT?H>9vR1stnh(eiKFudyG}>i} z-7No->LY7B5Y0M27kp&q$bvSTOdt}dDmplDDbQ;fub=fjkqVWk>iagsdX}=UH2sx#6jIy~T3gw})Lk#5ipp&h+hIRF z8~9<>NtqbYnnXP6ogs)+7Bh~R$ITF9yZt^`xoeMAir{M;(oqRMsO>!+xXod1!C@nD z2G$BV0Xz(y20t|+z6kx~!2A@#ahK(m^|XoQ;B9k|x_QmVNDLdwxNH}aLO4}t01Y>{ zkJW4vW5Ig~N}_`ZL}6EVT@vbxdCc<;oLUzU)Tz)3Gnj@Ix5SRJKN7ekI@0Oezl zFM2fP$-G1={FY_=W(9GPI2i=gc%p!(lj*4A;n)**@B#~{9pnU#n>Hlc^@;2#;$1Mt zZSf!)aD96ZDN#FysYk0&=rtCES$F8}V#mr*cX$Ytse=vb^Ss>6slEa)WuSWij=CR! z2=c*Q$3R4M$1o7d>MAFgyyYSbb&X7J98OVuOL4x6w#@AM z#C}iqE}GSd-U~Rxbl?`?-bBc|xfe9pE_?0}ifqOJlpT}HU{e8O!6esK553n>5i8c_ zS>Tm*+8Ar|nI9lafnB9%3#L1{Cb8-`pGh5t;YWl-!|Ze}xp1kvm?5`waz* zA^h&5nZEpKtBDg7ZDx4g4?9`i(ElFW@YUUpmw#g5%vK%v6AIZcyJ%ewb50NImLf?< zQW*4%D{*)@wbqV}rrC{}f-0S;HFoa-7YFy-6M>U=r?f$r8OS~zP%Sqg#wn%NJLwsr zp*re2NS`u1s1GYk8rA>Y%v0|=ltdXxend+DvxSzIW&$3Wd^caM1FuaDk1fXZq$6=(d)bf_dnH|M9mXE9{9L+?%MU|%0;eCO(|1PlDFa8ZFhNfh(++lIWr^#p~{8HXWQB#%U z!as}^;ehcRT%aQjX9cBV`r&c=`5Oa*z;gbq|Jn(Ze;sH=0Bv5Tvit9`#7-koG$aSKxU`Bs!bKax2LhU{Z!ErBgy|Qfre}s z>gXBh=Dw&=Jm+yRzGE<~!hTEiJthA-I}8b9es_PHt^lv2mFU+cJ6DA7b2MPW zxv-*Rzp5s!SQEWhyO{mg!Sff&(*qz{TMLoZ%NpskeYt}FjBXjm110Q4xf5>E` zj>E{VIAaE?w0!=}F2$b&7zH!FfV61qI8C-q%FVAQz!y(GZVGvjcTB;VHj;$k}A=HKsQDkiN8pAT`za#_@A5)R_!9nX@v18^;i1je;L?D z7(N`NI;Z$3DWaQiaXH8Vt$`2=I85HoHE zAPS;<`uHY)rP+yIZ z3YP>tsobBt1z2%s-0xZoq|h97l(5&Y7}f#nn06z}S9doqPdn!l=Hj|ZAZ23rIFJLJ z1?${xEf>#LvxN^QC;2q8sC!8|3>JVX@GQGF0*tV$@!{RaSC@pJid2!; zwkl^gz5Il?GUojXWxX$UaH^xP;P6*95kRK^@F6cu^^AKge4-`HT`(K2$3Ia9&yffF zss}DCVILW7Ykzv&NnA-OwpWoR(H~P39VC2di2fVgWYqmPOt4;BmB|>y+Ra$i?tSMt zK5S8#LAwt|Na!wn>Hlb2Qv_dR2BfZxj-5vTnb-Ok!OE-IPKBRuH;9X*3@~8ShB}g( zcCEm;7;671F{^vs@SHmyP~&5rF<@}TMx~RB34>J8@w0EQ$pPx)b2V{$%PVX-G7{5c zKMr8|8o$x*`iOigk=LJ&)>VKh*x{GYlc)F!`DwJdJ9dX)nVTEuCwmgq%!4 zLafSRzb5K=K0rs0FKYao8Lw#SM@8qu2hldaN9^{{cZPr6S53z|DEuN;Zb{~korTp$ z3`8DJ1C2rE0?$e|=v}RYjjO(;>YP3eVOlb&0UK3>XsXh>A{}gUlokqU*_@;@Utmcqkt;KQ0W33 zp}NQ`{f3hS)wa`!CZsnHT2jvxruBlu#U;7|{8*qEhwT($q@TMu9~7{ckk^)bBM)YR zm)3YLM#NUWgg)t!{xiE_^Cx#dtOhab;J4$y)1vu<@h$l+StKRLH-2&hS1o+_V)5Z* z{3jj3nszLqciAcm1KF8EM>dbqbz9`Ez)^Hsi43|wSrLL+H|_J8Z4X-#bC;yf_NKSL z9Jg`)TJfq>4M2Zf6eW>j)Ij1x@It8^DZm=qD$GLKEh%N#4aA~uVI|3^Q`}r~s@*Lv zJojklygCuN*wHvk^?h7j`E5(U@$+86$gu@Vmt&P9TE!FX_RZZ-zW7oo9S>1Kxq0MB)x>@wiGKxmys}#);*-+I&wzj z*&Tu!bx&Zz=1@V%UjC)aZ5-E|t_d)0>YHD4;I>6W|D_5T?M+ZVt-h#D0KP%{DVB?7 z*ivp<*K1$vd;#$?7jT!X!4!!*N6KK;F=3M#xV55xhpb5tUsm8QfQ?|CUmQ0~WumCO z=4HO1+I~idnwZqRB=nMt_>Lio;Qky5J3$&81;)QLe5wV|k z^5kZ<>uq@Z7cF>uXm3TTl3Q@hdR#{C+g36@i7IjeLrg9%CBlGEXvs-9YsXu#uOT*% zkqJD+4M1ijq-vLny}QQ*yB8Qi85tQT&7<$U2NURxh<;Weea@t25JiNLD-58qwozV_ zCk|1;GFXbKz3mJ`VXLVID7g5o5p2)0(Sx)@NNRa5j?D4PIc``6Erx`6a@MX z|C=V1=#3783W>{`Ul>7ppvbqvIyhA#CEFVGP%^UKI@GsV5bWLry;GKjJh-1!@^JQ_ zv*b_$+tZ6wurART1uy@8?h2N^3rBnp`bN{~G>vya4nnJ>2KbKi#l2?0@#|@|<8hlU z`-|o0SA;#e=gc4sckL*U`x3Is!zC_ zBp<=)mE{3$SQZx)z3r$lGAo0lTVzp9Yt@+U;se@wp2>g9-hf-cs6j=6??2)DU8+TZ z1z9i9+k@0B^IDPN9Qy;d+QmvDNq?7BmFYp&B0y63(Kp9vaqzeH4NPAk;HE8N`8vC7 z#)f{U370-?wlD+X?`WlnIQCzolGXA?bOx0uHc8=o7L$XulcH|#SK3HIl8*so_QKTG)F?S>+z5s8M`sQvUI;OYV>*)$lt}3?0!+383l;LSf zUvFoSs(O0tGQ0JcbL=;=dt;lR1c8C>zgyE48(u8C=l+cyai3!=@YPY84S@3ug2Xnc zm7=$tir*GPcx=n0018e2VSoKa2KGtwM&Hi4+mh$yaqE99?z1L!E=Q?72Tqdc_erz; za$y2VKr&01ym5>4`+FCs1mg_p?409e&RZ`77I;Tf^yH3x3=(nYC5Obz?X-bKwj8@a zuj5zUY+casxs=_(Wmb9!@t3YN(Fo?d2le*vLWx42SwLiJ4>1s~mhr1maE^Knyq;kf z)}}#6M2X)^qBxxFuMSekP#i+XQ)-Fd;zydE-xthfD5Uk;bB#YblBZi2k5%pO2+vPH z7zpBM;*Nky=1aFg`VqB2+(DwbV81q+{uV2yB~?p^dS)vfw>7p=|EZ|vOBfK52^p9c zsmN1I6tn(Rolu6({&t|zIp^A+dI_1&e0e%RQVDVRD-VPh|8KUlq^h9=4cby~m{>U!)?5dj!Z&)~dJPt`g2^eOlJ(gQTLl_J@rITms|$F+OX|^ zJ_LwrhTj-(V#}YJ$1VN@I=3W^S=?V>O(-e3M;4s zxVYsj6>dF%^1Yk?5VR%nxvU#04q$Y!4|U!A4^?j&6;-(Q55v$P-Q6V((w$P$9m7Zn zO83wujdV(bfOJZCmvl;32&KDh4Lv#aH^*{&$(`mHp zqqQ%;EgDUi654;kGY7blKt8vmgGI8Ig6)*fkR}RI&oF%94N4KEj{9BG$@aLKoE-no z1&;89%Vj|FVs*aR2fzCzeAdY1^U-fyts@ItWE4iUG=%mKf-M;Z2ji?3wF8)2`XKA9=iCR zVrUn$k=S(#;jL-3gQY0hEIN#v?w1CniuOPD!vb!5nfk3mV(s>V#aB4~TI1r`J#mCl z1Bbi6d$A3@H(Si!E}a6UPC4~n>+3;nqZsL*7ru>)p?w!~Mn`ADVN@QZeJTEqwK0x3 zYPWBe^9dzX%YE0dBq;~==eV{|Yph3^!` zY(VGYR+hJ?cD7x1*ZR*)kN;n(=FJE)Fb}7%n@qAM`Ib+y&!aL0hsRY9rtH&7u0P;& zG+ipeDW2!I-Yrn#_+2O)iTaCg&D)v%xWn(*hfu!$eb>y>AM4+-j5-E?1fR_wsu zD(szI-<;Lo<{6&Q6b9h2>5gU;!kYKVpbeL~y^n12K5dtU$T&zT3Pdg4L?TYktujs} zZ(-(lcxFoU5n}wf*eid;t@ddE`%=l-V@xw6RF~z9e0(yFjZB+&`8Yvi$$G+f9Jgcd40qjo+53Yy3*^rY`x4JG}WtXo%m*?t6% zNPM$oz8qoiJJ)~KRGLoD%%vwGc(0i#I7OG~e z6lJ=R{3n2S14abipHh!@3wxO#CSC~#Q$Vk#XdX84M?AoR8Yh|Si@To~S%6s}Oco!J zaDmM7rp9vELF3?5bs?Z>PZ)52@7INkYr{feU*fYC-3;CVp~M&;7BUU$ZHl>EPJYdU zzy3!2r&uL-{;ZD}wY^Z4pP~u~TN$C(z-QBpxLHY-nfQ+_PKmkj%&R{0tA#^@yCT;! znsG(KF-r~hl~M6I-|Zi+_VZb5r>llwZGt9^aB7?6T+6`#PW>l0qL)US?k~`nZGo2a zHdQfAafea_cPj~Sf9PijmoI4lcCNJ?kA7?pKBVL}M|M3~*7xEc`u%%0>lMiAr&)2& zZq+yB?e8KYI}IVKQ@B>DKC0hFqBZ%jiY_6k{){&6<^l7Ulq>{&2WgF7jtL5}Q=R6+dcq;ToQ^O)P}M<&8;mJGbGk1x?jp0W?}Vl#+eRAmitB1iHVnst8w5*J4xM7wx$I`bVIh zqHIK&w5mSI_lIFtB>V8k?lhVg_UzPi^Ma0SKk$dwTR-|y)Qn}p9>t|8W>v|6&`-aI zH@v8n5O_)_jY2-2wHfv~=W%?z^!)ByHe zTf?1qXa@eO^e7jm4vRBL1wT?<+A#@nQ=Q#W?KjW@ZLcF$Q6!9$wyzV?2wA}1A)^FK zT70>c-g~Yf=W22kyNFMIf#jzrUVpfB^P}v$mcq`BqzT1PP$a%N7|fVdo^>Z8!mO8> z3B|fsS`})nJ^J}A+_v4Sxacla>K_Zs?f=FjHd5`EM2cJ-rg))*vpHVl;6!!MzfVZ` zvVaL>A6Drgz5bB`f3jY{mq9py_eZ9T$t2*cN`j!Js&p0-aHML_60+Fjz3tb4vHzXA z;9^|thtyk|5UUz@E&Zr|Ojw zrJX1+Ra#oTgka>+2I)`?d;?h$|Is&{rKrW;+i~12ty+_S{eZd{g}QJHo6_&BZ710< zks(@O6WPK*02bmIo`#Obs{`9vg>YuagZNX?^xf`{r}z_K>DCQQi}I#gRIJAk^WDpI z{|nfCO1NTLfxZeyehZ)?2iFz`FHJs2yiDd+w(!*51jO1B}>A z><&U%%&H>iUl&+8>x!gc1QPsvhyCSi0T8*pxwdk`GqW z&a>e@C&0whVb!&ly(}vjdM*i2ig`m5NnX(4+^dVo%u4+B^l8orLLi@?s95)S z0TR$7`>Km2=}YcE0euMS_|;{8K%GOP2Viybp8!g%@wF*P)2ecrDbo0C4Mh5;^K8bm zMEgD{LdKo}fN<-JnaGPe7aUN-bQ;YmG@dPPl0Mx{F7pFka*?PqZc)?`&|-FSVQO6pY3}1DSShV(T4@&X)deeH*Jrqi*zW$`PQSOGJxD5a zP5RItQy)YAfjSgun$fX}g+0vHBTVk?Ni=rjyHKh(GX8)GA4E+evM_%5h4(tn5^^CU zf7Z+@nPAVf8gk(Jv-v2cd5S;kQawM)@70pq&V4xXNC<8j)_J||^X~2O2YzkAv0Xjg z{%=c|0E{FG1DP>)w;(2XJ#N6_NtCni4Oyl1{aNUBQv72phGWAXPWW&Ak~i5tk61c^5kXRcg%Zar0f%}2+STF&5IEwo zmsFYqQunFK6FB1JU^6z52dwN!?FpI3x$MR@cdLW>_$&wKLUy7I%x`lXx10d@Qu&XU zz1c)dl9+-Al;fRW$_+P)B7x|W0|l-U-qg+Ub((EMSVaqaN+{&j!nE#3Hw=ib-Dz!J%{}qDaof2is&^aBTehM-;%)2&;(Tt@h$qQC zGy}vn?rFzGY4^y_I!V45;(Bf1m!E0;ke}K-zr?apAxJPLh3_!7j;c}3t%K?30nRU- zFRs@VA?F9G5z*&Lzt@+46A;$M2MBv|%Np=>xSrL$jGmy(kFcC8cj{0f~e*{2hqN1>loNl6e(n{D6cyo0pH>T)QrT z9!CJbOxQ&qr>{+&q)nYlI}#Fu3GldQAHQ{!>Q+`bQXCEeVK1_0whPhtFdW??N@YVp z=gj;9RZo4{Q*b3cF~40#e-2B5{v<4Nur>XVKP((FRjvyG=hP;6egsIH8b$klF<|Ed zo)sh#rSJL_CKG`w@MccIKW7uW%GHcrSD_gu8b*xQltA~Fc$puNlhXIdFD#r+DB%@c zOMh)I4Ze=^FS3Fsf&yCviLMuCc1lu`>|&?4_o79L?EW?CzL*q+FsW!pU4cF0SQj@d z?0#4KD*VOpO^RBV=ubp35S~RKq^soJ zM~e#BzXtIDC9Ho`ny6bT-9i&^@^A$tKFRHCs)_;uZa>xYZD3D7zFBv-$*E#t%yhvi zUX5Y+@Li_lR;={8xF4^x<)8iBib36s%uJ8BD4$xVzi|Pg`!B}C-xd>ot~|Fq$WwDM z@$r>`cEUCqbT8$gbY-twEQLj4L+(a^;JZpi&-g)?OL{dn1aUjd7sKNMEq*T9xHzv^ z6Am~QZb$ehe+UJzCrCFng~Mw9h^+ho+sY1S-)!{O@EB%c{jl;|0HUr@+z?e(^EFDX zYPK?opvD~o-iE9Lxsgj8SEC?Ji`PG~iN7r78=_cAav`E|=G$f5^$*_9Kt&TCzGDe&Uz&ivJxQt`YQ{ShP1zKzVck8sfXe4}X4 z_NS@ZsS_6LdAlOT5X?o9AMWx*;RU@nR3@mz+~v0RruJLj1q+GZ~4-?z9y;NZw(q9PN5eX9H%r8R@$S4v#lmj(7*VH4S z&y8ZW)pJ|OAqX}cp^G1R?uA`fd@Da(n7NhzUlzc&D^D%ZCx<`!}2#)JGmziE3%mQ zg+Gm?=<{mRn)olKD={qpyY|51I;+f|Y)lJ16H?J>TiLLb{c_952fZ0E%tj@$nLYUz zvBFV(+{eBG-TKx_D@avXoCsNx)^mUX(ZT-3e`7a<10zQdq__B15k(X8FnE8t=ji-h z5<3X5oAG! z5iX8M;xk&c!a&>V4U_c?F@wp;kKl2v@aqL}$$?9U&;XoOD4&BW@SHdlXV ziZv6(R`8%FqgkTDyPtrPD6gwdILxpe_SQ2g#FXub&uS&R>Cjyg7(ygW3LE5?dhYos zGQoIpH`!@&pjW-&ACc`%C7~t#OirSKU3nl%A#s?xE#_HHC4sE?2nW1_&7_bkM z-d#St??4+$Gqz==xO=|EqW;n88Mb~Sd(ySG>@qHP3y1Qx4JqZ0&iN*1=cJn zF?nF*?2{|n+FlxVZdcR$4u$&tqkzrkO?{+o=EEAvGVMRKln#>>Og5uORZv%j0gHtd zzx?_0Bu!8U_0S<$Wgh=?3|#Ky(>-kS76#?@sfo$}ndWd=i7+(tlQ6CZ-lcoG>5H++ zsCCj3Z`H{VW6C*05GuIi+PSWE2Gn(zfV1LG>Hh;^S1g91l&~feeruH?Iw`q zfoW0%`lPB->Dyg|PU`7lOq;;ZX2}z$%FeU9Yu#xGto!sq^xz*@UMv2UR6 z38&3Av+Wdj);nsYOt-lTP zSyhK`CtF-2&F|5or`UV9E>%oI$2qLyI6p+=C$!Xx|MbXN!CZpCdESnmNEc8S_o?V1 zv_}M^C!T|BwF|Y~%s$>ADD#z;bqWS-^=n+8OoRk%+;DVD-J6#W%3%srSiaT@1YDqg`xc?Bc=D^(3@aT$zamf zl}0-INhjFKj6j818u)~xM%wv-r7R)tItL76N(a$17vm%`hyw~o4Mq!5IXFJOyhnk_ zLiOhZ0E<<}io3>APE`Ld4R$MuInMrpur~Q&`GZVsZTP`57#`SLA^Nn0@Z~Dnd6)Q4hw*e z$re3pzK?976o)i7aHMcBFiHhvE)$aPT>h$6qd;|GCXRr{K&)x);R2({ z5%LlWiH85OJtL1wCZ=qvnly`+!}Gh)#vvv!^au!$Vj=YgJ4pdAzh%64?aNH zMX_qO7#TO`Ll4I8TOI~${4G)c@YnHG4}U@RI>)CE9lq~R+h{=`kg0UFzq}?Z9;_rh zUO{-6R$QKjHeOsFm7;nCYFOr5)WB}b0@@D;l+?eAyp*e3 zpa|pCuc7&PWwYAa#1xiP0&=?S%i}Syw_B_!Sc_~%or8@*W3PyJ63AL7%j8E&R1ETE zQrr7|xRpjMV>B_~+$t7*&Xrk7V|b(lwcTXdt0+eZNjm3+F z!(hi!J9zAxGpvfz;^sTVQZp03k^Ql^gkXj`N+h*g#_Vv?Zn%DN6(0LKe;O}`GNEC= z8`!XZaa2r|b0Gtl(gvmACtS+B1=;WM*wLV>$wIFFiqvDJQvULAruA~8w{J_x7OIgl z%CKxc(fKJDy6m{GX#&3r?dfv6k2kS;=Uj*oA5XJIF|6BcCF-@vU6;p}?R0nMUC*SR zmEetmrwAVggEKfj(8pyJH&-EW19`${$vE4JIaou7knmPms|qD}%=%gCb{GWP-jt8vKlgUpIPkxmJJ@L@Jktj<2CJ4>30 zrX6{PebE^pILcl7azOAF@ym#U0#}tgFCQO4Nz1DlA7kpQtMF2JFhM}Eh4Oir|0;5< zKmXu+y6QD5hWhft&&*72${rScRX$zxm#$TzP5Udt5*R>BOWPYKidQ7|Z*Yvdy8gHg( zXtl#%Bk^9<+^gza4TVq)~q zFn#pFZZFvh)+wL3NG|F*E+gf$`MZBSZ4Wl|#-_=q7s#S#trH3DW)ZaZV;V~n_2O&E ztw6L&(;3c%SO7T`HmS)JMFmq<8w0qdP&@i4j!kvFVO%#e;|V8L16ApfCXGB+ygW8= ztKKzS;=?t-g_#YW7pB{3&`(H>Bm;W+4b(4H*}1{0%YXETfE$xCuuw)!UKDafTQ1%Q zVTwUm`Yh#9(?u=^Y%|p-A~wwm&cQ}Rma=RXAssE(?wIbBtWBb#?)XQDW^P%@4nMGHnM$t-^sDXGdGaCVS!}0xLiqjUB zfdYE)mN6w@pr(nKn=u&cm!)HJki%v)fYo(+xl$?+fkPQM$7Ey1B;|{;J?%635j=2` z>Anh!S9kuv3pP{xi*I)eq`)e&3w3&C3OdW-nQi{ksAwZ1ti%Pn=Wud@jq9$Mc7$9H z=%TGPK4$rfd=#348>eo)*%X1Pgky?cYx8QbB+&J{yb$tvs7sw+ZgPelFv2^}g_+gm zq9o8NQXy9;eG%x#cVSxAHb_+G^lj|eg02EJf#*Ujmc@AEdq93hq)35G;DTKSBa?9@ zbY69v)BDb07UWsR0(N$c9$50oqcZ$uhK~B(zU8<+P@&S-1St5fwV9BbLwzf^`pY_n z=pRAe_?^$7T{%l6&FeXx^CM`Th>E{b4nttCyQ#GjEdct)Q5U{7nZfCXv2O*%VJtAu zsNkO9WX9hIraa}64ApDrLGZ*g_WRps5sTwLzc!^PSBVbvcY%4)D|iW5=|qC){7R4a z1nf3anXW_UeVkI*}b!6 z=ZmexhYZvK@4Za;8v`5sVX_@)0Sqd2JqfCxc+3Dm?8 z37irQb{?D_P7W1j!@M~}!0EwDb#{XF{`zW--d~vwdVBoP72cva0u9lc++65h2)J9C zVX=E~Sg88k(pK?(DM?)3bHFqCboGf~0Ph4NK8q6R98 zwGzqsLw%e3aWjNf8_}pp#s6|a9ZNRXq#VJ`VtY64><&iW+mndj=xt~FIZNZaP2xrG z%EBHtZ~Vp_dgFn$DV^sCpGO`d&FP9yd-wFtf8ZI+!AJW(*A2yTw({o5CLd<4gNkhg zIrVJw=+WxF2V@)>wj(|MNt!ZT=Njo{GLW+-%Br}lU+c<|YIOHFfFCp-tIW1+mla=w zFXY}q^$?Ds3`Qe2#J}jFEPOkg`*(n}^~vWP%gK5Dsf}Nu{``xWO)<2Z!w=rfbV{4U zuVX#ge}5$vp%QoCPM`CHfMKlPm-=P43{DCoeZ5-j_-T)GyW$nk$mUQgi-+6yGn%e{ zu?#a9ufp#I{AuGmN|0z~fg_K6`eSYpU@*j_5YclY^<3^IH97cVi;W?Rs}cAIXtXND z(Habpw$1=dMVAqQfwU&9u6YtLhg-}wj+REPP)CWw8YW1OINq@-3Eo`}y<8Jpzxunc zh6+mh=B|5`WvY1P>yhrNq21Wah$Q9F_%6)M%oCL+;_CidZJw^`=FRqNV(L~GKtxHz zW?A6*%GxA1WBsJABp<4`+RvU^)JVgud{k}J8H_lx=SXwc$%b~=f;m_Oj}!)D34eSk`j`P)Zp(n7?=JD$5%bO*05UrM9MB_`XtSt)4Y42-_sy{F z@yOhUs&m5CMX@lNO9t>JN80vX!jwOo3ZD~gQX07KqC{4cEr*cER=((=uU)2t*twJ43T_gG~joRhH^)WjD}K*UvE##09bd@1pqT8W6x|5&X6BnmcV8V+fTBHT}Khk zEeB<(kwYeiazC*T%8m?!+bTUqU$%%=Vmk&`tHYGS>6kumY z30=AFF$*PdEg&-p@4r+kIaTPpy}oO_l%RHaeR*CHrx%|rVT$;Az4>Do)vB6^@EUs2 zEf$k3R!yl@VMKiPCv1zq9_g`l+50cx7e+F=%I)9yMFebiq1{k2!ZVU!g6?viQUStF zYeKNeT$w&?L~y}wn7~@!eb!zE2Q#w*S>?=VkovpmH&7j~xDlWoh?ZUFr+#gr07Vv% z-)T#ES0rFB8QGuZOeoU98C@hj(hem@Pm-o)qA*hpo$m&OMI+OO7yJgt{noJd-ra`? zs2(fxXd>f|*vJuQfB=F4^-`2)_q@w2dLEM(17SsR^B$$qjY}EpRO+1`K12H_YSpTdjB=8z2PL2G>IX zxmWcYUxQ``?WH94^m*&~{hJbJDBI69xKodA=Mj)FRsTDc82@T3M~TRmLXCnht>U-q z(YkjclgBlSG*_d<34NA^b{!Jpn*AM!=eOeP?tCfz;^SGN70Jr>?d{Zf9;aqQoELAR zB<3ru*uq~kd^=%yS}4sQ6$ZkD-Bq2j*faL(`7HjBRsCv>BvpZZpyM=^$tq(?TDhsQ zeD7%Uk;2O!Z)ePoaG|!>i0^j0BhR9N&8Fcily5Ck#kyDBH>}v>BLk;>R*-1RsM&b? zQ|VxdwvHQz^$kb0GN8+Gt+srVvUrLA(!`M<@+Bw&kE<%U{`^P`+~J)0{FphB(*@K| z8m0G~b))gs|D*?}k1`R`r~#UeYC#)D=EChLnA`Or0yoUW&edNNM(_6>Y*&xWh28%w zwtOi_)$PuPcK5D|8ZPqqn3hHp_0fPf7$rcfpaCj95SW!F!S-WvPZ`yqpb0{2DxrbN z;CC)&CNHC2(K9Bx4gvLw;pM7j^z5D;u2IDg{tpg3xRga>zLaA;lfM#K+ATQN;T*GY zFOFhYFC`sLULy$$VmqkInUGV`8V%)UqEM7)t+`kR?ImuW8ams%mK4o~>lzl}dc9D0 z*skH&j=-imJTuLS^MwA#yWQP&5L9S2IE67faG7_cNxN=k9$SM!c`M|R#9t?ugPUQD z7Rl#B3DH_7D-WK26`3;rTbBN$2&`fd7*9H{ad0}_Y&TL$)StWkKCZM zJLM<~1A(4sw^Ulm*D6>?Db!W6&?=LE21C}3Y494)JJ$+?;L0Q;;RsTVDyna1vXM9} ze?d)jBz3_}xT{TYUSs`G{_GV!OyL@4t-5{8252Kzm$ogI$PYG1q ztFs>KbBwPo=o|&@jbEBHH%;e22xV&wvQti;X1y=^Ie0%$Uyq?@J+=C1MCYRa2dZrwd~>K7q)jLpG$=?3>uz*`B4!uvHYT#y)dpGRouS;LK1{!X766t zKe-*HJUkpfmt*N=Vn}SM&zooh3Wu^}Lyj8~{R#KJGBwJLJQ_3t`O*hDEB)3ySwR|e_UB~l`tz=P8!x>G8dkR7Ek7=itNykG za6MT^=M?r;(@J@~Gt+dy1i&^HAQC+F1q$u&>f>x&-w7scC1iP|S?}|9Y>%0DC}`i* zN8;^Jzl+K<_=OQ%#pJEK<)BW&UZu9FU!ujVZWI~$HslTQW)klhJ)?@2Z=o~@%;B;v z3GYo4+xCUn=u@(TXy(oXY0gi@+|tXn1c%SVM1xE9Lp-D%zWEkYn>N*Lf2WhOIxxpLad{WE%NsvwUWSwouhIL~u5QW`l^ySG#BN zfL|xO#Fcv^gBcJ_etfoH^c2jc$X2dD-APb2fFOH!)$uBrYcZmHV-qCP3MHfP90UO+ zIG6>B`|jt6V;{~^bNyA&=4%Slh?C@_(lYVuAaWi*X{$abzyU&H%^}~c=20oUQd>iw zrE@;)%}vuSFe6r3C`MB%EcZL1!jlKb(>A&OIYb48UOudo#AM{J5&piGQn@r(c}_Ul z-a2(*C9aMr#-5VHqwZa?u|Gf0)z2MM?+2s!?h(&7lmBS1{i>h*bJLw-PN$su+O}TQ z%L!E+Gx8}2t?p+!w+BHLiAR9-Yh-eEB&z|!?(l4<=X5!OhRNgi$KZya zw+4TI+1DGMz$n$GS9&mcJ5#JyVo5))H)3R3@O-m=&9P~$3}E=gD|Gy<>b;p)_y*6i zh>NSSW7C+|;x89Qgy1Ap04;d88MPoX;H5Ri^vUrQ?G99&ER$zhw-zWO&P zq3nCWd77`fX>71Fa#ZJVZx(0BCsDO|X<0rC+I6x^-8&to!uGZ9a#8b*ga4>?&E$9ixW=eK3jJ=|_ zuj?$k5ek+P!<)DutI;LNU;aGVT_WE7I5VTQ=nNlxoGCmqUa;bde9MUX?0J2F@4!3p z=Luj6_Y)w z5raiIP~0>&@(*m$S_Uja%VN=cN#bVd9LIT$!0hMrp&pR>dD+@f;7Y-vxnC2@XDvo2 z#*4>c-fC6yT&*JImV9uBK)cIo+>3uoqLc!ANIr~+hPwNHZ5A8)Xd8M&yYKrv$^{(0 z>E=l|YRe5VT!#BkVQxGnB~b0BoskIl#eRaLw1^AQfMi!xcW}93dMf=dYn{b{10Kuh zpyRQfcvKp38VPuFW^RaxKn%{;Zud@sfh`G?p@EPr^Y49xK)x^sl;Wrdwaln z-{{jUU306?ZyNu28WZ;T9_}7Vz+2V#7KLv0+tEqs$e(QS(?GkG8XCpcNUHgA=g#L_ z%j&+P1DWFHuatR;+T~x&PZu^*yCi>oRJ%l4T?f;8qy^t}7|HPayU-aAC2EiD z5lEG>K7yU?t}Q;c^U%@-c_TN+h~=G$)rFE~mr+qxN#j`0uuid-#kMqE9g%5?kv4a5 zwT(yBJ3Dq_f-pvk+Zld>2pT7|@J_HDsm3(b`Tj?>tLf8DqgoZJ*@(e9tB;A6_+us; zr8vl02rUvLz$}BqM$BKwxkQ(VjR3t_mLg?~L3nGRX39cX5h5HBrEV0EeCX}S1d}P^ zQNwhl(PCR|^fE{)YxXaPQI^liai@EthQffZk8Q^B(cXj{^+4=?8+Ue~9B8(TkX5FeRpKZ2T2eZzR22q`4i?U8HbO0 zJhA!C?(wLy#~D%9?D6IpZ9jSAWlr})bG6w(O=vWWmi;+f3Zubi8S3eOf+B+%rj%^) zk3dd@FfrSj{Q6Fe>m9XCVvef;X?kg-BAv@lqh{5p9Y%bOlU^#*(gL`%q$nI(H z!Cfl(ja$CGNd`AScIkH;0W1k@@45wLz~HPuWw3n(zz$}H+1+ygD+eUo2@rL)Oo%$h zR`9aD#^Mffhz2e<1AuTyg)Bxfk!eHK$&MsLb3>{Dum>w&dxxbE8`#JlZbCCJTt6bB z&Yrh#0cIs_UiR|?f3%6Jr%E9IvIL9lc{qjRGzs3FIc`a`n+4Vb3zlY`5LQ!Re@~}k zx<~>NSu!hRR! zn#E|>F5=p!l32{}_UE~-EVTT%)WNb2%n#@SU%r$Cuz{+>oh^COju$op4efthoQhjX zrjmg1LXp2F!2HILl3&xPgzdeBax>BA&1&GS06l0JHnze6xncIn_A-tKEtHZZxTkq% zIxL~x7Ii>)evg~H#r;+gc;p*@_*@t|vvO>2mU0gA%yJHPue%S~)WV#Ttgatiy`cfP zJkVd2i7o@Suk9_lf$@h-zV~H8P4z<6WvQ;-AU*pIB|P3DuE_2=Qae_yylN;4r*@_G zpz2c>7=Ec^W&N0A!|W{>XNrI|^s8@Br(Q*%Oq4Zv%rDyL5xd*o@t886mF?sO352tz z@aZ9&u^lVybQwRQ;vNGhG`g9_slP=iX)IO;9tTMbsZVST9wi3z?65C*IM0Rr8>45R zA`*@F4lhZAL^95F4iv$J6=@S{i=stj)L>`4(BfXn)F_Rn^Q9qGCJq}l2taT_s$#?$ z)hmQx5wWZR;y!#UW+>nH4g{mY4M;w!(_M`_3P^^YLpSGETDJzQDR1_G_>1m;QvEE-g|(jjD%JeHWjG{^ zFjFmF0`8UTa-iFs=}Hb3+o0zPkNZN#mFeqzP|rCcAxTcKv`FC>nQx8P!l>?!%dLRw z?|R}*pHPm4V%^=h0Tedt$1iaWa^FZciTr)qfxEq~Y(fNrGtGKqG)URVJRvr=Gu*Y0 zwC~jkV?d|wU^!PBsc)+&k+71C>`~2zW5@F|WK!qt8svV17wJ*t;{C{)9c-DAhXCb2 zTi`k%@1#5%``mPe&~q?bIzS<2hs7pf0_Y(Sg3LR%yb;FL>-szFR)i`YQyA@(RaFVu zR!2UaV27cpIz)9h!HpHn=tcD(vKTP*KTua%xh;qpEeD~&ojn9^{fw6`U1@}D){cu^ z8a`EO$tAtj20EGgOvU>yr*zQ?rqXi2uHcE0+5P z-!ESJ%$AL+2pMAh9H9iZ;MmXcPY={*?B5Tq8c&bJ}d%#8y z8`dC+dOa+bAyo$DJO74d(~xhZ2@5>;3r%DE_I9SHuJH@bm+R*Z5qy%BL%O+^J~=tCE>eHXwwG(JaVuYkM#D z#QWt*(?8w5zVMX6eV>|Yf~xp^B5GrZ%3H2KRjJkK2>i@Eg80zSW1mFQK}m34-ccEceT+mpD9t;_o(Xjr9eo@Z-hg_iqk<5o6-xQS*CdeEPmBVy(yhz1k4*;onqf;J{|2@iE?Mm zwI&f|iu-}_7|kfVU%q8SmnS`0qT?2_5Yb7(3_afma*~cq)+wLtt6hbgE(c;Xyc^)c z9sev+xVK>F20)388z#W52{zbja{Y`XY>ylD%F(8`1L#eO(CJz+L-gG=*%?j>;tR>O zFfuB0*c~Mn02nH!?mlZRfcJ)xjceCKm#b!oK2OEm4GnY}MY`;1o%qw*fe~D^z=0}+ z_h$1Rd&t>+`>PMdBW(2E9q9mL9Y)Lmx$MEh4MczuSM+1kVAVP_At{MInnz%6{XP_p z!eI<&&@@ZpAV%3i5__!R40AxX3=O$Iejr$$DoHKgFDBpz6tGahM-nx?IpNTk+V>)DoJrAaw zvlF=%hyd*QIxfH*Y>bkBgSlucZ)WT`!=ZCI=~o>^=@JLGf-o;qlSXVl2#MU}ioeKzI)x+> z3R`PXS>W)%-<-JC?B%BftjNg)TTgTjW-ZcJubA%}sXI#$vN=htwR?0@U@{3U3phS| zlJfXt8{VcwK$t+-6DY7aX9wF7YP@&w7q1WG+|6EwMUqPGpPj|l8Gm5We|eKw zGaUsJ5g&oi!oa@Dl*$~zrEWtww3!=toO=qacv!IID0ejLv&}Zc974!BoRoyO&}M+s zti?au5^3A#$EE`P=dWn6g5jsKPf$F>Z7;v16?@Y&)gM@M68ZR@r;fPYGz*yK=w|(m z>vZE%`qZNRdEaTiGGN3kRLSx)XBGK$JE9Gype@S6F{oA|8&0HWpi%#KIPSR(m)e<( z^q+e9^kpX_1bYVn_JjZS&WmwMXVlztAT2S>h_qV z6Op`)%QVZ2R9VILoz@x)bQ{J-0f|WK?JyW@HC33(U)rU+-9rg4OFUi&%sfjU*c){; zdAN;~m%smZqu-2yJ1{KIPPO?>y!6Z&{k*uKVi?Ekn<=jgj+q#hwtkrjg!A%01{DIxi?)t&^>A8Zpl8=#=Dw_mUS2<)fPdo z?N9$3<+w22G=e@VP%b^zl6!Bi1^H|N3I@KC&=cLd&`XTDB5p06ax`%Cz-B8^r@~~R z)&bN`(5)6=NCAS4eQlMwxS?DLZfkuS3i|J#10RQ}fa;eF6!_k_zt;FY z2AikG7afmzr9tZR%M$2oT1&2Y7S|PyB_8ZxHo}oLHi?6;GvyvfhD~8kjM(zSh&zRJB7cAuVam|e9gjGbr-o8?Ez#&t6rN0 z*Jg!ez^zRU%oGs$eyRRfPl@*AFNh2xCIWUq7&U;VhY4yuRQ&CO6n?BZ(W zHccRmLGa@dkFWUqxIcI2$zfJXRV@1TWpVmziQ;?eGV=ePh&7a}Q~Qt^t+nYU&Gq_i zvwryT*rXP+x<>JjQX)318St|uvV~uJKEXk6^jUC4iaP5)pR47`nAe-_42#@tc9lIBnB+*pKJasghpx$0F3L%)+j>t(E*n zGBwVCsuS8a2Z{W9wu1PZ4b2I7IZ!RbgAAPrPX1`#H&a98Rbc1$H8)LO#*TFllWtZA zAWbhJr5!%cfu+aCq+%(`pDI3pzzUq(#IEp1Zo=~Vy5E{ZG=iQOs(c=QIW|BTfNY$v zM(8F7_@n9VxB(8>RniEO~VSpV&tC3duJi)*VDDGKRMOzsgz=iN(2s? zxv&V<`Gw04sK`;Bw`(J$9gWvI9$Y4nfz*5P|L-a)`NT4kxq9YvmwX5ao92@mUV1)nV6;d+;m-;PIXIqS8WdJ#e#~tK#bMz5Y@$j(iyVgIv z&D6iMw6gtRc9`2@SOUOY+WvnpAUjZa3}!4qA(V2cW>Q5!K3I%A!gpzuV(_;I>5tAz zbsh>dQvTC|B;&5NVe&_TK(y;lC^pu5?f!&pjQA`B=^VvLtY;*V1ZZ!>3NVNREJp53 zXTHMwr+{e=TM#hi5*Qe)rh1XZUs*-@kd;HLt>S_lYYCL))_^;Mnq-cNIFEAn38ddi zOC~uia3Ac=g*jL8EdhCUm4$1v^MG_*Mq7vI9TQUf%OAh<>YHCJ$+Yau>eY5Jy4G7{1tl_HpxW(!HrX1-^wo)QH5Qc6D; zsdT=ys*ln*bo8J1v7YcaisAr=*?>I1b9dld5 z{YgM^ZSaa8(3n-^k@dd6OJQZSyykXz@zUT{9^y&SU;XWCA8r25@DKyi`J4p&T!CZ% z@5}$+PzL@XB@g2+2i;AxTzGr40xl2(%&L&;i5_+iPJm$f--OxGh{Zm z=($RBL^%`t`->ZQs%bb#e55EC1rONw_o|R7I$@hdoIllruYbNI;0&xr zlh=0I#h_0*<;+YNoD-TXGTVzFjW_X*cG*W1@cy#BDDkHeXS?fITF8&TYn$+HE^9JZjeSs5Boh+i=Nn-2du z?*E;r06aH0caHn%DyP?tO(h5i7xyCoVt`-%zccqo?nMmv5ikxLVOL=ElC=_hf8$b2 zr7xbPHtG71Xd}cMP~mLZHTH(-%*Z-%+=CEHIx?ls-rei7Z@S+w$%kQ-SBZ_A=pumU&R++f<^d&Rt8*>3?aD`ipg*sV`egNyzGqO= z3)G*e6?V9c?v%oDGrT+$Jss@EMh)OkR4>(xokqnGx@1>aM{ZSqzX)nlMv)ko+Q03^ z4|E{**U_{>fNug8yORz1((RcpE%b2PQNi!Oqx=89oG^A!zTCIKoskr2K;V`Ol>GkN zi58li8IRUFvdg&=edPtLCaCIvbJ^~Z8jA)PNdNCgA|VQkTbrDI2K5{C^a?T0JCZ#G zX$neQbZ*3NXkWXK~_b?I+9q( z9eU75;TZ&hU$ZyOL`5cwh9)dK_=;IScd(KCzWn*$=E{Eq)81wLLkRjv050S9o+U+?xt~fl z1*JB~a#!)_wp(~+7&@h;JES`WL`rEyx}-a$W9V+A1SAxc z4(S|_ZbZ5pq`Tp}dEV#uyx)HJ-rx5p2gfl3*L~e*taF`f8DYM0YGj4>G%vRNT)Dr* z+rCqC3RYl17|eDsIXw_f6B^G=Tg^`;iyt^!DQ}iMRgl)g4?uwiNSLB~q;EiwZSEQk!@eh~{%|fYQ`^0DWa+7 z>dJ#NDL)hCWs$m_5QG6W>e2a_Ir{0fT*eM?-so>{4?^y01LQuK%>!tb#=gq3bbm$p z&wDHJ5l_yv8+|XM72I>^O;7sy{y`6?s*1{UJIcstL^?Th_cwhXQ}-%8{r!X&n$S#q zll5}};YjzGH+@f)L8gDA#5dcSX?%ef+DpPXu;Dw;uJ&^hAhC`4`l`Qv$Enx*6&>2zhk>P%QEn z6*ur#wmt}FpGU!odK+xU3zMWxw;vRVxkL^0X zsDtrb`7)mHxKZ91J3K4wA_`{D(WPvjMYD1YjvsZ1H&%}X%HJ4(=G~K5`sNQs3`!27 zMDcKJ`F%% zul0`jNSV#s2hp&xy>O^OKE9ZXQAsQvHzIA9YkcVq6?7X!BzPfJ$EVQOoy7PIB_2*2 zmxe%iWIwYSSl;H=vEaV@dinDwQ~!&7w$*IXk(rh4 z+!^~<^CEb|O#QjdFjN|<5?U-G7mlHbqrSK1O=F06R+XUU8*4rIwF;6{4pl%`)Tz-7 z_s+sQwCmfawqbg6t}`qnI(#d?SYN%)Nmrol;uVvz|h z(=T>~Y7}e4bT+Aab{4As*SmuA0z$x|q5d8}(j73@Wdn@7LW-{NBmR^A!u=oxzs;y! zf3x-yPow*@H77d{2i>knjv&Lm2FT;#Ze&0Cni^DiiSQ20TaT@&+ls}f?|#j8!ocNl zH_v{9n}>-#(Y9%w8bWvF)3fhOv$*vi-!wYZ%fE?IjVY4z46Yf98v_c&0FB$ty8e%TTML{+V|2ez#oT7JX-$V1DQNN_F8(aXD5bdT#->;_^kn zgv)%r&jN}^zK&X6@Ux@1OzwbZw9dRW*0bf=1lE6YY_hmLBXd*W?LkV!f+`{j4YSVY z6o(tB`>U%#uYzp7%8Ts(Uam$Fng>ITwpGlY0W)3^xG6Wh-pTeo3VPxl7w-h6g!i0GI@F2>N zSvu`qT)(JJv(g<``pO(HzHBawdTDXz=YPP1XN}&udM^2*mf_tkd|7QezYwr_yM+)i zuXo*BX`QRC4cC>VAPe~$2rp1%@AGQD_#A>6W$LSm8yjZP=PP9(#Qs92+DeLp_ladOD*Nv)C4sOa0v4?i zO^y?d3YSn;4-TOz1p+|<+MlRop^|Ul&icMfJsDL5lG@TQqEPhcRR9ta@k_6>iKPP$ zK-$z9mahyvL*T39sYhG9-RpN49CppDJ2SXv)Ku_s?yCJF4@fshxa*4 z0Llob$B_|}W?AN3#?wO2KaWTyT*9Xb89YtVnA$CdY|E|6G!gzH^nPEX~ou0sG?CKlN zcV1Wn;1i&Uf6702R9psVyvnA;-P6cMctE@3MnV8rS)b!~hz=qR2H@ z#OvfDzb$+RxqE2B1_FNBSrS9?<<9kB@kF#(?Q z4Fw`|V_#3fhl;Y%RPxRC3}9y(ju|JLFX_XpLeRk}u2%de_yjV%vqpc~y8Y_;_y7>yd|E*ZHZ?Az(k& z`(I}skYV2+LJ3<(KT`SnZV4hH2oXGCn?{ojY_u5FTj@>D%N%91p!|4|@)gAO= z-{n=Q<sB`(8zRj6mY(P%7=8YAbeK^?pjmSc;gSNP<3;SPlhd_Gc21ogE0C_9jzg z3ebMEEy~as@a23Yw%8se1FEMo?VIuw;BEvr6`z?+1<1P;g>>thAxH(}=HhvjgM6zA@jB);VOmCd17671(Alstv zeQjrX(J@GY?=rkn)1z5t0XP0UO(yQbP6OE|n7&X&W5py1s<~z}DlSW%hNy#D2@1ossGLP`1wt8X+U)0uR3HzIyf?pz~#^yuHJ2=4(F3qYZ2dOCeJ zu$!RS!p1U5LBA#49|Rp;7`+C(b@AE1v3D^ABrEfGsa7f(L4&QjrCfw;NLD`y{Ttr8=I7O~ z#KK}F7MITWbpFu-C~#<4LDi>34*D$MK2OF3CxTqz>JFQkb{6?xHj@!L35d_2;1jz+ zbZhY!H`8Za0#NaMC@(t}amdvH)qda@Mcm@$q>;lSihw^%rzTOn?T=D~-&H%}lCP%6 ze3S|u*E_o=eSe_j=Od7rEHp9o)G-OA=HtDBB4P9b7}J@zxc_O>&iIlz09}iTUaIPnPn4yGGXk;BC*0W1Mbkr;p)o@ZR5Sv@>SM1E4lO0BS!x zi_gxwNoCBZ`5(RW|Ka$YpqY$uzd{VM+8-u_{TA+VR!CM?sXRx~ReXl(aHdwMRn9}B zC*XSJV0`@(W-U_w%_&)Fx#2K$Fmwly*jvXBx4HC>)MJX&fJPFfj_mwl-Z7!iesS_T z1RV`xQ2Uco$QN(AwWY#xwH;Y;GzZ)gdA9X~SsP`csgBbI)}O;GMWul{@0M}Lzsqz~ zs-+t8IRfl+ZC2Gm`sUtpzFzNYm&2}SMAIMwIq8i)EP_r$ed-7Y*!maMs#R!Pi;Td` zK5R(^y$yE$uUG0 zxO!E!UP|PLrxC7xQzR+JK|4FZ+g}cS=YIk1Pe~^7%oKce$qGYU!(5S?06#&WFd_>{GhxC9-pCDxj`9Ua6ZbC-0yiEmsuedyJFSxVbjQ?i(Y02og9*&2EH8^R< z<20{VNdxw}z?fe>g9q?yHSs@FQ>(T+5Lgm4TJP1fFwcZcxws26Cpp4jQbTi_139C^LpzJ8nGo~j#*b^NY#}4_pMW)}A z^dD^Ge}85FeVAf6j%2ggJ0lkw_=lhSH3Ml}#GyR5(fEvIv8SOxSBAzMPp#(x~MqK!z+ON@!*8Tl( z5o$e^LbOF8sFK#;jAr(Bxi#cya`1?rf2w*}&C&Lei>o5EU&SE5 zZ^;2t-fSH9(z}|R$NRiW@~ce${jbBf?>f?GfGtAuTD$QrLF1p`5`)rGk11cr-ATT7 zY;n5REnatAGvrgLBS2XEaA}|Jjc&*tm0OR1th1b~`DyEjXu5I0`=Ihy0_|*UF<0>C z&u^j4D3&5Uel?%5l-=fxSpF?%GkXRLOi;`Rh(B%3bsWcQFq<;R`u zCO=G+9iQR?)P>vas>LybLY3mg_}v`su5?eOVW@(BuFVIhkn5O zkdJ%7;tY*jW!o}9bC_I|*xMK;L;>6jA8IN_fSNxag=;=k1s@U^$c`e0txDlWQX{`? zT!Oh*ttAy(O{l4X7?fkjd6oNF-B9`agx-e;>#HdQkpYs3;9W@m$|1>mST+<6ScIU(qtm>!bG}WFsK7wnS4g za-KQ~QL?X3PXE3mJAO=xgpll!GAa1kuKHrB1JiptRVB?cdl9Yd) zQAfWk^RI7M>0S(Q^t>)qqpKN9v9{ezNv4;$Jea8>RSk}co$2n`?*%lZ&0!wuPu1b}yGjox^pz zf_uI!hpI|Aq7ug-{m@NyP@L>@8(C-)sFji^J4GY(h)Tscc$GVkK;w9j3F?W}_jFpy z3wdVW^8PvmCV|*3w_5c6dJRe=F<`L{Jd~q2xa-t&DzPuwwaaNCO&NN&%~;ewUk}4E>`{Jd*OnqtYV$ z3=}CufRP~6_2HY$|L?`ofq1q(U^I}5ZOI5swNOM3`UtF+91z;73?rkXJ+PiFsQ<*$ z%2?3rjq?jmkyz_KDYux{bq}l6SYa7*ReA;24TLtYhzR%7Kb{Qmt@P!*>eoGZu_eb9 zFVUAsx19e);;FAO<(BvrdS`Py_7$Sn_)+m_A}<8h0H$xyTkF$+heh7VPk~f?@TpuF zf{ZQvVK7b4HQ{|XcNIGifU_Gt0>Bo8zQSH#49NT9D_X$!tcHt2VlDDv@m@U-iJ6guY(VfQq|iB(A!Yuwr7Hw{txEC;6=2ww0ahrT_If*&ks-q>89AiYm%yn8{1gh#-Jra;&2aJ4f& z)fsZ9h5tlEh?Dp#bD|ajOyPXkC^+Ge7VlH#x79h1e~YmUoYpBNArPL3yG}fz_^eX9 z+#sYGpr+UPk;gVMJ-EN8O$11(Iw4Ph@&l~KiCE?Qfee+MY=wVZrkjc~rOzlXbNhRK zXMA75$KjG5waW}@%a2f^Z4y+Vm>k!2b$ora67~`ETkLD-9es{Z%5?@$KJEYiiSR&% zVakENYg6+&`O#k>1yvgjD+xI9r|mxWd@=b0{`&{DPlzujn7&voL%^nK!NpQ%1IAo- zdP3--Xc{mDtEZORsfFL3jhl03^u5T{=*3i!e%CB0UWZO32Os$Unp#RCU1Fi}WNm^H z=8EFB^^Zugw78u<<5#sp_9GWpHGoe@g*Bh7rp}_W652<^DFAbbjlK; z)%B!B{Z~1J!*aCLr-hPT_0Jz;0-<78{1YhXmUZ0?liEIV}pq)p%-!-YWCSXGabt_$q>+>^Hh%ios z2O_#%oxQt{!TTyR15mSeyOP;#OjA6PsD!jLNOF_U!YGFWUwX?K{bd<5?izhjg&9eZ zq#Q8?SIXMsa;0QZ4wj7Y_kx`KR6D;`QK0#EH3P)lD`|M&LE6RM%rA|@(lEdj>(_c; z(h49Y5dcu>J2=|zAQr=!;Q52_fAJCi=Wzx|prT+Qua{`EXT=O5^##ek4U>*{82&8{ zx^h$Nc%GO@UAFcMmwKJR8Y1%*tWzBilm6-H!iws#!3K=EQ_;?;^G?)pdk68S<-Nd- zAGs=1Z|H2*A{1SDg$hzRiibmK0PL3P6`T^xVg4>R@L38+T|ggQ0`uu-kty)j$hZim zm2msz{*pK;yZ-OmOu$Flc2Oouy*i&Hqk&v_J-9unh9J%6Rh6MQi$Z3vhRM#LKCA^* zqeN~!wysq}fCn$fZcahaiN@{Vr;k}@l;PLVXBy?Tl_9Ln#F}>_*~r_Y?O3TCng(<(RtPH$0fuHU^Sc6%>m=l-+$IqL2dwm#0WVpDu|;$9AZM(>FwPa2xGuIXSl_bLDYiK=5JP7kPKNhjC%_{0xY1Ov|v z0%eCA#sBRJkJ1O+wKTQ62DGRqI!5Sw2Nf!qieo#Q8HCRBvC!$bcd0wF(TdbDgopfC2B)WVXlD z&<|G|^zops_S2EA#umtpsFS3>+kREr1uH5puH|t$2d6{J1QeZ6E7$u{bjB1A)_tOP zkR~LZgmFGq3g&nN%OnWS= zGnQHRtscjD>!Kw9!yY_w2CS+CKdJKPh%#|KJ??+K3^>9N!MNi0#e~au(f>u+!9^9s zx?|I~yHI0(2$0SHMbj(bzM+Owm2NH7pU#H!Xo+2jbApZ+&~zv>?z`AQ>F(Nk)6abH z7Jr}PWyri9&iW?|+YfbI$(s}hez|ochA`Jx8~sR644tUoUd8U;{vg-cZ3*$##u`&q zZ!7-mNulHr0K&5I1_~}$6(F}JcnRj)Y?IfaT1Hz+M6CLMTTa^n1~{?Ud`GQcs?x5Z z9dj~S!1#Cl&$|3<+P^`RerXbGC1f6G?-Y%?F+}iUyyA$8cu*ueH-{9D+L3P#*M5cd zRO;(y**0%1iChOFpL*?8+Fq?ELBlV+*$nG{M9h_DJjbHIH<}l`HmNj^!87oiX(yEE zEs=Tmj;f0q!X+uOS>WsC@Z06sOqHq5zqKa$=M%Q6{n5*B zfGsG2g^zf1keG|VF<)rq*{C6vvT!HMHq)ttK z1>II(VJeEoKu7@AN^s1c$dS5F0NM*ANB*fojGOO?nV}<7j`W zPI7sZN#6Upz%1-YXG(7jqObi;$7`o6rz1N$Ov(|G5pV*?Y>fRgIef$>#m~F}7TLap zqeXzw&_79msvdfBdb-lbm>s;gXPb={Ne2wFE->`!9jX76XyYwBw;A0A=dR{KLcQp*%U7t#`_>aw~SrY&ctWpy0 zq@n}2d0|Ms`fR#Q54r5;_lAAB?Zq%a9|stMOny0NQhw+HNCac$!!B7OEZ!i6d|>2` z&NLX{eSxxv=;tAD_fZ%zO<)8}z_Qt)?>_UP8w1(rz6SkirvTW5R7GiX&c~C9=?^PV z+*c{pVFxCTV%`EjvPN07!rJM?r8tY@+h3%g&_gO;<>>HTCvA^XUpqnS3=M1gk)$I% zPCpCrP&&J8jC+5lp@gCA&F>f)>w4P6w{Aumx9d7!9-a?P3BU94#dQ>fZjaGs#30`k z8tQxoPnIj2Mxk!>NZkJ+rs5i-iu{X*S5pkgK)GpE(kM4!xF+JUR}A!`9GoK)RRH*# z&*O*k!5PUmQ2=oB(kx3Vdo?l21-}0IriTFgn>DZy6C&n&19Y@Y7;O(XHdNZGI{$e} zaAXM6!!%$hYK%6r|CUA?9(9VAX}B!921qqU?nl?40P5V&fBL8q0qT6cwG=;lbWRIf z{wrolKxty(W%^B0QB8nao{5GnyOiF2VaLCI$qW%iZ|<%(RvV-3=kV@>*!z|WEMaY0 z^8vUvju>QrZlL&dvNQ@2eE56@!H88`hCdVXOBRI4%#`;-o5!b|G>+Wm)#;|2IWHBq zQl<){xz<|$E#V(Pj!(N+BayWDeHZ^iQmgmHj$@Y!``JhFBXmfZdZ2Wso_O4DT z(~sIp#E&OaWAdpWVYg}kQ<|N6g|6`l`PnFXz|n$gSOQ=)%Of@5VM!hL-?a}A?wOX? z8vvF~d!y^_bc%WyH3tVaHxJJWKpd+8v_kkR4}0YcSYY@Ec_A?8MQEn2cpDbEFg$@) zwi%%>>_5-|kjo8PyxL>QMLuc^6yZOGF}NW_QX|Dw%_Jc&@gZR~{c@cDNzp=g;i~7x z8R%G=p$DyVP=3-jED`s{<-M70ztk0Bjy9B80Ft{VryHcUTmf{gjv~ZI8?}-c8jem! z`XM6B>h%u~GZv>S7jyf2>B%rpxsBp{LpT1%yHrw<>Jo$vX=q-$f0chK42y96cR>s) zgMv|!Ahv*O5rShSLc=ny<@_|%QdJ%Oay>~Sjh9&|s)Pxf2JQxSi~_ zx`uUl{6Y# z9nXHaxh&$+V^nz6V6Luhc#kZ>(DUdGesA}=QzZfxwXD`~|cQ zIKAS9lwWB#IGb^13=Mk8(##E)eYPB{m5RV!DdbIl68HOeR`_=$qJpSO!{a~E%}`K_ zSJe#-J{Yx{HjR$!-3nLmTXh4_$`P*ITkZ;??K@3A_6tAP{n_m*l#Ms@u1brL@HlLm@M4;sseqsx-W$1WI>jouvAxnwBCuGhc2hhWq(!L4^Qpg^@pcE z(O($2)Fq8R{jENLpU$+> zjrq=}8rDjLplK5FFTdPKVPhxerps;#8zCTqE2bx61E^Yk8c|orVQcaHcd^HL)bWCy zZPU^FXW!7eNx=Iu)mpT&`}F^=XPy{Yj@Kt=63rW@H6=?QGUztqSFyQ*~{+V`_JM)maaK;^;(};7H9TC4Fj;m^#15MGt;^cSDd5P> zns@tESU9Vd@h6sMLkdb0CjrcKv^lP1ZaKD8WK%a}d3+ zj)@pIt>shsA-Oe7SR3U6X(?7)mTLct8vQ&8z3s8g4?{KkCycAf^(S1OgE{T(Imd4N z_eg-J7D*5R*UsM6^wlWsw~$nCGPF8RLm`SB{=#Nw?&ahRe`f?tgfz3 zzinesd$sYlb94WR*z2goiTOhWc_@=kSN=Z6S)t|} z+VIl!N(CWc$N|8EAYUBIrNz!pn!84y(-?GiLk$OTe0Gx9YrQQ3Xmfrqq+BeHOroBD zh(v%r8`Zd4oY8p(YYO|N=~OcPVieK!K9J-MMlgyrzdie*76N-FgWlL$|LGIUTm&Pd zq5Sz)Hl##HIxycxD)7>-JK965iVplXWu5X};coy>j$GAG0^GLLJQFB^@$g)&kkQS@ z`|wu|4PA5>$CkpvHP-2AU3*Z)3|>ZfMQ}1A~4HKrOM;`D@!>Pbh_o+RBP=O&&>0L@tj!1DX}O{{P(Pl zCJv+w2h>_;dTlT!(*6@?fw=nbrj;hqztEW8zU8NBaK*gT2Q0@gFEKALvt4WwMoR^V z6;3$z>LZw`Oria$0+OejcT;?B7Y8sP6nG}=p=$I&$|G2hylBRU91IcFbkxa8KMZF+ z7cKr{`M%ynR)kFappAf=ZFXmPZB_bG;80npICrt~U*Vo2;Klo^DWhun#4iUKqd%0$ zX<2nl*{TIC`adnl=r+4`6~XH4H3S{05X}d}gS^hKdI+!6(%X9%TS^sd)1iHRfI`EB z*jLx4pHG?|eU929|S*JeDEh8v}L#L(4~8#0_HvyJ-aS z!IdpXONz=FwRc2|rcPho<-f131@z?Pgw962kyl$a0s8vOKgat23br{y0pA#rD+SJf z&&h52-Ed_#6f^6Q36PN?UsxHqadHo8H=<0>@%EKq7Fyv<9a966SAQr0Z@fez9oe57 zVq593Yvkap^3c4ls}+1)eh8yd{d7uGE^J89?5)`P7INLp*2q^6+Bj5)4wgj#GyoxX ziOLdsc$)TRqg=`4sL^{?j)^~Bi4g+mb7%r>xgBHdTr%w1?2NtpIL711xBQR?FG3$Qb;)`Z(MKOF zGSMH{=j?M(0*!JdQ;ou}b#ZO~ipAynw|>`MPa(C(%Q@%1UQdWtA(QDILyG_AL z%>J{71IQ`H_(!mU5o6fQ6#fr+Ed(`+A9@Ma_@#|XlFep0rp333y z0Frxi^S8f0c>cY7LvkeSii@+e%jFj{5QBu>`3b82Y-u-fv^q)x<$zhC#hu(W+xk{O zykCH>zDeQl=&0t3$c!{dpc1Gq5<9=i@s&Te7)lJtet?h&&o!igmEzY|*^QCwDzn!E z*_z4#Q8_^=$5{H>WhWbUhqu~tSA}@ca`~Ce9&RO*1j&aCpS2=k$6@l_hH*tVNYhRd<%QsAt5QMJz2Gr4SeKlRsl^?Gw z2BLxZT=YIdA)G@za;&V4KN&O;96sN{L* zw4t{T03!dEGDKBy2JN#m+r8~B7 zmwv?TqB8`0a{?^g*sj3<@*~kZ_=!Cwy*UCF%%Ps5B-Oe?rvhMQpZV6>s`~mNT5V!C zn*wD&Elevj-Xhgkz?{W=V*=@Yk>0apK-VqMZZ7Xohhj!H+gce~P3w4^2Ws(N6CvF{!Jzxz z9%MKAHHr`bj_I|tVB_xeZ-1jC|Rys zhn2#v->-2`<_~+gMg|lv5qc?ZtpItll~7W9|A+hADBlj2cc|FzkIq{W_vmppeTnqB zfX|HPqfVVYrt9B73BA;g9DB;sbw`JQ&^qow&qH!T0<}_fi3W=WK=R$$I4noLNYHO`^Q7m{C?V#x zogN)9>{|mol!Ff29u4k(aUBBvsqJbS`^B$MpYRi0)49z$AHuev7wr;ky{n$hut&f0 zq7^|MfEmXDGZgg$)?<1*1Vkwg05xO69!zY9A-`3@Koj*>?%Sief#T(UJtj=9_gyHx zjrI$TnrlVZfg7t@fiM7IY7+heh@2k(^$47LLH6#2`VU7}QLD+4bY+Djy`Iy`92Y}u z^qZASO1~@cQRdM{z}?dS>Z-RQly1VnR@vJ6g>KDE=TEEE^Gm=Av-$4`Uh^sqRpDBk z3F$Cx$NtfsUX>{-AZ$aaxn(F=2lpiWl6`PZPyQbFxIB+T9Zo`{fMwFUZjXW; zhx2M)7kJzqW5-3^Uai%KVr>N+cmmw>2e9?lz$i5JHlsL}!0Cby`b5Qw;hr-c3lR?TARK|HAT5boJkQ!K^2`{z`0o`v}O$aXH*T|*& zER)si$jf^VG8o{gfYDUm~y3IsLXU{F5wl!&FG?nfs95BS}oYFc{DyernY^1=_|5SA2O0YDG zKlB=MMsmCrai6?sfYraLAI?lE4XmtwysW8u{>}&vwXOC4c_b8+%<`JW$E5BvK)FKz zAoRn<*)1e5Izi`PaSEe9{mC^ydAC$~@97|1(oe#e!CKYZ$@`tU)xBmZXW&t^!$#MW zyn?hqkZgZqsDf!^v_VV?fI;X9ogyT8@b<-e87B$U>7#I#M7^ zY5AJlNP@zmWgvqMc`MHxAX_bPjHC=8>P!AGRd`tc1LsSu_VkU_r_#@WRlZUL&}H$E|u;plEu9iU%OklJ+WG zA$#{%Cqm)|cY=Lx^K_I&SF7Vt5Ru1UHRsdLwfzdKlhsb7la zJ;tGs&Mj?3qu*D5z~3OAfPmMFq7d!Dhdsh^AUbiu!2o(D6Gy>@Uy^F^rS;A{Px1qx ziX#c=M!c`-Tc{1p-ZLu$M6yRv6-NKck7=%PuY{V;C`kG6#m;h!7bqqTqVEm_^Fi>E ze+>P#7G)5_0mK|>I1`Ws4g*E!m(sPgw&&`~;Zo+Br)cM?fG?NJ?vsv{rA8L8uKp&4 z2e7o5Fk9BT1`t#f1xM=i)QsXkjCcj8c#KMCHa;$zj(`6YCT^@=QT{NIB<^cIzQI+v z{99c9Hie~2Ww=~6j-sfj56aj9B#-xT32J8Cl`)*wf0zPL%hZ$mI_bf638+Kr$@yV& z%@^G5>89s^5AK5h6(?7oX>0D0X`U9id`MxPp7S{MuNx;<8-NkSp(}_Myx~hfQa0)k z1D_`U{b_yY$PN!S@|&<2Oye@x;0EjEHm5l%5sUjZ`@6j4J8u_MxMMZ=de`DcR3s7a z$N3?b(|%qsy9YmWycw|6QIKCjsW%ryhI^)R*uIPdHHg`&X0CIYb+~4q zo+$YHcK<8`^fjCe7XX+m7)VqR_itY)3yCuz;UAFu-OfFWK7NwA>4;e2-7hNqPz8P}~)qqiNdr7}u)FO1b|0nYNl{^mv>LY*I=IPQ*tUH=ECC4*8q1j0?iP6PCkq6qKAJ;xPDFX+igU3+XU(tfNgW-_|Xga<;b7N&Hc?m2$1lvnLjwEnYv6 zr%Z!009DF%>-%C8C+fWKTiTW>cE|Z>{k-iC_v+Rgw)<)aXt_|Z0$g`briEe)q<%eW z7OI6hUfAx%@&L;S)^231DIWniay#8&rNwljXhySK*73YSjxNoF6sy_=jEQ$gya!8!iHd?jVGR0b!ZE*CFV9aqZy)Pt!kTBEE6Neuy4&N@9er|pAI<@)z!&5SR)RIXpF)vu1PPHxa z8{InNJ8p~q%x}TrOlOChOs~UfUcVtxNZ9oBn#dT2`C?x-9M0E_yxdxJ8ELs}X%Wl5 zJZSM=+}-g!3fF3KiRVsaI-UwnQSvD0ap+kXPJ(YaV^grlQHw5 zm$+yts}O2Q)qGOI^ba8|0a#A%0yCmqlp+MvbQp_tVpLL7`tDhM)jPZnj>4RBfeWM< zL07TPv;@--Bl(uSs)22g&w~e((V(oz^JuGCFQuq%UsA2F1xi_ec{4b6jKmj!$gwHv zul(IjGsmgIinvqJh}7KCh-!L*alY&H1LP-WMU#=N>*D+4yXCYUE4|vZ*g{iu|{`MkbpRd1Wys%;@edWYTme zUGgm{JBCYG@&|c!*D68G~^s8$Io>yO{%N82htn+8=_CNMHF26q)2`_PJ zRME-uN+2vf?NUWJ-~C<{Q@mIUc3H|4N)V^*uC(bdLa&>ib1CHGMJQMEbc&Uemp`sS z1Wb;559NGHf0yjViDO)Z<1KjMSrx<`Z=ouv>g>dx$9o~>0Am>y*c9` zdmu-CP!oXJqQs_Yxq-76OF$KB0jZ>2;6&!U28^SaH-S}_ z(m`%8TBH=n$GqTYxWs~^6S||2e-lW^gC0bWNy6I;UGR8cFz%d|4@j75Dqv~WfgC}s z`|6{q!1yEGKhZK^qaK$2Y7|=J?a3jsFAr$ZcYOoNdR6ufvwo-s2Ep;dX)$vN==Pv; z@EzEQLkGMasvUO?-v9e!^Q+~Y>gSU+18^^{gaD|p1s5dAsl1XCkn%e};7KnY>e620 z%^O&QW%C*cM^-?`RTc@!A-D#l9$7T6JLtQc(dV!vTseZZCkv&c2=^W~o5G`l>?@R- z&wk54OIv2rkVZF;afnPid%dRS&t6P z|D}ndITI6(j&hJ7&)yJ>gh9OA=M&mM7&d&?p5-5Uf8D8N4Kyu`IgZh&WPV6gA`~Mc z50(m3d3-s1DiwAt`}K|3T?e5#sABYD zu!HmmVvvvZ(CiBO7_-k5NM!q{W!RxLVyVEd`Jl;c;Eyl6X6z3ySJeSBn-@2!PW+uTq*!y;wxxoKO^ zVeRfsHB$3$e*vBp&C;=rG%9Y}wBc-V@e=<2Pt3ot@y?lOY3~NiY#oIW{f!Yw6$C-u zLeDCqG|Kd5-l*|l`qzSV8qIi+ckgu)7$ZUg zLP?7NH+R=#Fm7DgF*Q+Rrw&FV7t?9+v_K7nit0bnLwX6r@HlI;ATY#w}>$^86?NXgNt08nd)ef|;X?yZtq#!-`)W&`+s zN01X+ZULE35$?V0{w*6s1YttFrO^S8IxWKkpbXs}XjqI{&i%{`NjCLvraa`xwvN-x zE2coR9UNq=1hwH6Wf!b5Ibv1Ge@nn9onhs8v_ET8POsNyN>ox~#E8XMFAh|Eg;|PD6zGlM7Wp?`tKKUwLbLJ+ zVCrN9BcP3;OltPwA(RndjZC9%W&$7#4wh~(-e5w-0-(=I$<7>bPQtojv@(V3joEy) z8pU9=7*QJSiDg4xW+9jkxV>G^6JXkS)(d}Z2JV}L4{vhACQ~I?3Jzh4$ zn$br$E3~#cCUHyR_gP}8+;eU?yQrkSiy*9F-&8j}Kkz=8Ga~uT>Uf&HD0y0u`O=W! z`Nw|Cc{%@2ih0`B%;@i=-U35kWbTU;T!d(TV8dHtsvxP29#(}TUF z*z_xD{EX^6I1LX?4oBQDni;hQ>H4dngP?;@fJDNp!qG}9K%y{@dq11;<%=Y+ z0xex*^L=!>k{%tWAS?k$AQSLrPfuP?=Z}X9Ex#Aerz}pss9*V_m;aStcYdL%l#eaO zG!kt1w_(BRR@yF;tax8Y-lciYxI6&xK>L1%d3Mx zuZEDnx{WwLV)BliipP8qN541Fb*JR#Fd7}pP4L_YacD-a5nkY~eliF@G)9s049PzR zS--7hQYzbAH8kZq!R)8ZpJ-k56s% z@%3D#6DEk8xJSLD9%KM!qP1Imd;azV9)kkst!<3RNiVJ9W+PAsr5!|#$@n9J2R$O5 z0~>>VMi9tpC832YBuoy;AO0jP#H52i=*M2ONw~}S`Ix+kr}8ae}^;_q-2yB%w7`c*7$*; zb`zK{)bfJ(;9i!DAT0lMEqc%%#3$MtXAM`OA7^m9e(8x@1BuGDZhZi{Hbn`NGFhwS zW!D!uL4e3Z`mXx2`||1b*Es&EVEEzk2rw~3d}O`B0@W_G<5%o_@Pi~#O$NYQ5`VQa zAlML`1huFlPD7_L!ze+6iO@Cgll8u?yaQl-9xp_QN&%QZn0Nj#U}f|B^Fx)EuH+ju z({nSu>kPp+Fn9Jv{gNYj$tsl0Y6x%VaLN#tMd7tgZRR$uSK(_UAK*#|5=Ft%FVSmA zTo_W|Y$2EXg10%GWdk|-CWfS0Z`_5}cr4=v#%SXXG_q^m{A@FLPGUmRHilI~Ug2T{ zYo7!M{U9&Cx;kDPayDX=rl~h`@58W{C-MP5?cm+_Ive4%v4A?` zYwWMGzzuj_taLQbO757{0r9M4Fs68iTK2q~($;g@IlvRkg&E5W^bI}!<)p%!qd|0K zhBR~p8=#E{vjWOZx;Ug0je9wfu=}uR{lBjKU#-yJ_PN#ChwXZL1=0RES;J$#Y$~@} zy6Rza*r{8o3O_dL=`z&$MO(N>uc>m;K&0~b13_1H0(A57hG)a?I-T2Q#xHk7zrRzM zhMG^M+~7rxeO7;M9EQev5BFj;yHV43Q`^H}grkDVAL;Q^h1CLR7Pk+cFIO{O_}#o| zWxTV58^WI7&FlA$U_1nv!>+(a&s+~b^PiWq{ngkeE)7Wh8gO$y0iyR~Yrm~QB6kWF zj(Ml(F9dXX3X7B?od(kA*W@X!sab!OzK@g0@nY^R*c>gYDoOGdyYBKXnjCM=2vQ5L zzoO-A1BAUi{YL8J`0c8=iaP#u>As2SQKn*aqt6J^+2&eR5rs9_jo z&J)N;v7w&=oS%;_7m=0Y@uVr(63up#Mj-t^G<{`QR9)Nl9tH+!1?g1jMp8mh>6UH~ z1VLK5gaMHbk(4g!?jF*c?vM@z=?v*XAKC8LcZPoc1-6I?;ElDFSRxbk!0pa!q|0dUKw0QT zV7rFrZN9jC4}nfuFWr8P<^4`Ixjp%?z1Oyl_f@LnxRN0GKt4-+(1!$0!}t2qbTP<> zrsTM$1oRKYnZRz!eNStkKjZxmAXyty{9x+whv1*E>yA!U+(D6mH8k>j#gaKOLR3xcXpGj6k# z%3?sH1ae?vkySIsJI>kGJ?pbi2u2#2{>1e!?2t;`LV^UyKbL?{^M0*;f_v zU33QUZPg61DHX)vZ0T2|>lnM@S(a-E4O zXXO4*>8E%&9)gawe+4ct--LF2p(1{rGhiLzH;7?awDYhSNp`-1ZvE!K1>bw4^TSHz z=ZGKw$eFn=j**x&_mUF`v_n1qMrR@&*CkCZO`MJ`0(M5zzWxMdeqYcPMWkmC>2 zD1RcjCd0)io$K;j3bs*TIxQT_-|_v!L4iyqDc(l8&A7OBpqzMUk2;mt1D-~%%ZVyf z2ojp>x*O@XdN&xVhpuXz3DA;p$P@LFiUy&d5MG6B#Jk_4{Y*F)(9jdHOdd$E8*HAL zu7lYr7LfUThR9o|yOtf1(PZ^axd?0?(HRs%CkxX(>%7Sn$$B)oS94>V8c*B?`w~x= zmq~HLZLc}CrtGI{O&#+D1Y8$|=l@%(ZvR@lN8=P$k!$|sFpAtytl$+Xp)X!IWkUb< z`p4hKORbn|OR`aV&bQwESKo-wf)VZ(_y$pkJ>x4|$y zAOk9ykYpPI(Sj;U2vJ1NLmrNTJ}7`f$6XUw#pFA@Q#}i-6mLcv6u@NIP>_X3PHb37K?1+$n}zRT&!}%Pa$d{5!_VOsgnwmsr}q)n&qH8XZ(q zpS5Hn8xiN%OQsku$`GUhla@pTf5UBf^hE%s5YPVlX@|d66w=ALpQG+RSz;$v;sQz- zgOOJb#cA}?=5t&+Q`&?bm|IY%zPUO4W zSY9TvhWz%MQG^}9_`gVdc41% z#K65s8~Eu7REsxDW)ky|2m`|(ixhVimxA5&FK(znn?Nqdpjx;-eOvJT+ zelr0Kxb3%^0-jsEb-jV#F@C2H6&DIm`Gg3j(77D~7PJV5vI z%{vwrKUy(K3oO+PFVTVXXftLnC&?XYKoK%j z_>zHl#kC0PXBE!Hg6y9rIb+Jr*XDuxuhM(rQh8Iw)E9-~M2hexw@Z`uyxt;~P@2b?wEp(Gj1?4^-IV(K9U-5iv_u;r+i4+LF&|4OA!#aK^#MR(%fw z_l{?t)QEW%Jgo|P(5&>TtVJvSyp|s{;7&0rjghszP7{-m>B;)ofps=)QRw;gk$1-@ zMOOW(*4(=J7zIkfn$j4cWQ*bHo7RRiOF4lNa^uc*;@2EPGOwh ztVYO(BTm@U=n@DiRUHQHi0iLDCxghQ_qy3OzMUL4Ko-!&2MW)>kqy~McI{Z1QVCR~ zLbEvokY3@uWubww{O|k@=wS~dJMUmY%%@MC{Q1~dA$X>7ICIhd-q9qC?z!xHZ@W%WKAQ>9#3vqhtP}DO z=nn%j%E3Mz4E24FVYQmbA)YjiWB*EtNN5?o)SCfW>bU>@P#F$QLHSRx{60aBQjeiH z)Iu$N#W$6lM(4yf3O@xGs=jD+2}P!DD~Cm}ZX`L?RvUGu&4n&7zHo4mh^ZP94ogfm z48^8I9I&9;Vz|RE|th>Qa<@o2O66S{Pr4_FA0^es&*|Z{gn2$}H?52Nr%~yT!z6D#BsE zu!pNkI5`MEg-Qe0?g4Z$b}T`sl(YSguI*+59LwNz#D})aZ?2JKCw0E^QoL%cn&11@ z^3*&#vmdfdv=SL4713+3ReH52EQa(f*a`6<{=--GBG69caOKOyWyh|3PaQ%^ZR; zW71JWfGK#n#3ZApB{$?W?`Rdi#;kI>^>g?%2EWl&DZyVsQm6BCf%CA~*&3!k!<~Dz zO0yv5<@IIWanlm}BAnyZc;36GKTRkON%S?Iy4pN96P~Db&PJ-Pe56vqz6^gBN+8Bza zhkoRfeS1ZS1DpA!S%K%pQ(LTwCOg;W>w9=(HB+96wWtY$O3X2CZ+<5jD}LTyE=Re! z8keH8V2Qwuh_DmD+(#jO$5;Y zu&TkV)3Af_rR7|CsH{eX)1~0xd-9UcbF|-e=QjR|Q7CSY^sTzI-5SjfN_t4`U>USd37RHf+xwC9pcX-W$V$!AnoPnmCPR59ee}N& zrZic>CGV>3$K?Wdtm(i;-8BZuXoJ&6sIAAZFEWx%GKy3W@6AcNkfBT1ZlY);Y-4Qk zx!-LJeSve|jp__u4wXaJprT18y7u;Z>r>6~(!}jYk*)mfd&9z7u&U>x2{TwaHL%qh zc~8$qkMbW_l*Rm9A3GTv{%sZIGXF*j^Ek#LVFLCAiLwMHQmbOHWBB1H%43E(Pm%cE)RNmJl>_4O0RXXRd~kx{mUb& zV*FD2s^0|~hpj(04+u!q1LyX#!?nEWsPC}`5y~Y8gb92&8-HP!C*OBQEx}?F^`7M! z#l0ULmwQdU1Q#sxrnfeQzQqV2hKbLL0*}6_y)~ZVt&K*y6 z>)RJxh}WbTqxEs7@OoY*q4Z72H}c2!+KT}B+DMsj**Y=N&HUwd>)C9pTrn{)f>Q(iUat*2k_HoFMtPD$ACEqhypDqS~f zOhGzk^rWdX!CEk`6y{Q&%d^L990EHGZZQD3B3g*VC*wxRxzt*fKjRFk-sH$t4@BBl z=?hx421}H)l%UN6Lf!a5ELk$Uwt=IRBt$4eqEqj#g-qD>90gs2s`Q)!TD{W7EmoGWZ0=$Y=#3sm zP!b5`6tm{$Fl^MIvJbHd2y*VogNh4c@+A(`^sLGD-28Cbzqf%JXIG4x(YGOSeiz8* z@;QeQHNsd-N=}LwUU#sxS{J|{mBp_PUdg%_qd7Pz5ze%1PnCSlihbjbxCTtxtJM{y zY#@z#w1R}FyrXyQMf>ydixEekFS}N{tifmlfTa!4$i~pKDd9%pw}DGF`}HjlXgr|q z!Q;e9Q`E3t7*`RePSNn4z8mTs@E+PMNjH^V5|E?mdu41gx}P^_Gq268!Cyp*J1u&~ zx#H1E^e9_gR?DtT4Fdkta@Nt@t?!oqD)UAI@gSk4UQjdXfR*~@=5ZnS7EPaMgNqKg z{TW6JJAc!qhsl(Fae9mC_gHyN=B8xt{1*cm@UJi7D^Ro32BWxN9UZQpV2Q&KT^>(%tIdV9X=~3-gR#n5z#ukWWF*b9oNAk5V?#;`7_K&3L$~D#^ zv}NKqmLrKgm6(qOrw#HZDP&3TVt0}lbsxJA>L#~0@DoBrOLBJ zVSJFO%wr(Jd~Ko5^O%kIiU{^uL9T=6z(12f7-MZ4sPs~P|#A`^-IBquGZ6JI>YM|&hkhb{FW4ZR5LfW_|%<$*Hv zohwVV5dFUa-eG#bf13UtN`wX?17Ak5xAw zUk47?r(dWQMET0zLen&m%>e1^r&kyNEq9;Da4vpC==Gf9K?xp7L?kq$UXgog6;NQ~ zK!phDY5nx^O+Y{@ZTJ9ykHsQr#TTo~K!0D@AJZf7$yJq=Be8-%r9SsKdW&}Np}8a* zgRPNz$#MDQV=+I>Gi55fP8ZDj`H*uWDr~h1SkVr(;54DYIR>c#oyFn!M>x(2`MZ<{ zWB63h1A}0K{&3teB>yBbNa82!C7x4}a|ToN@Fvq9|G;I8<)zYC=p{E5O*}q3JZCXa zh4vgn?*j@Rx`hg8{Y+xMnGS}LV%PwYSI<=fHhEW3%tYa2{!_RGFGYHWd=~DK;9uR@ z%Mr5GZrLQngTF<-S-U=5S=VLCwMwK^NqV`gal1c*Sj7e7@YCxO@tclc^t~U(EZ_+G z#H(y`Blw@7P*vDIg7&TO!R$lx$fqr384ndw@1RmEK42STMVhNKY)_G&tVNxz>BR}Y z?to>~eVw4#YayDA%Uf$6g?iGK&_*1ZyN3nXbxeHi>+EU&w(2yW=5J3^$ZFudeVfBA ztI)D{bK`ZjktsQuYL@`$qEQK>v=?y9b#4o~Rxyir^C#=czw`C@Z3!elFx=5a9>;yW z`s{3D(v5Dz86tbkKX;mtdxG|9@`8)ZL}(aeD9Y~q*L$#<%}w8`LGeo)#{IRoyf`07 zBB+vvBz?B{1ywgX2{y)K7#v$y&>}{`e<074-!(PaTA$0|TQrM};dHJ-G|r#d#5U4v z!v#<=#7%j#qm!2sPb#Yow5X_Q)AUh)89fleSS(s6&I)3(EJA7xO*0=L3U#(Ri1W}6 zI%=G9Ow>cLaCc|GbXLEfcnE5<=&sr)3$zJ~Du^wYME09sB^bJr%Z1xBr8xNa(tmy~ z#R@$tghopAdA-6gvl><6*>z3P>L0rt?$laAdf^oOtda@P+Ff_m9pND(CBM`39~u6i z95g>Dg_=W^ZN%Y3<--sBDILFKAQrwaTPU7A0{QvYyq|n}OK?^N?yoVPhLU zZtE%b3KQrs8zT_u@)1CVrhf}1iYrVqtIe`Be%IjqsJ{WP(^oN)X4?>GP&IzZN{9xr z_jof`Kr-W;nXKmbr(CRBr4d{<2e&upZJ)&R{5?v5etF%v$#ply0w-D7CG~2|X$H}# zNXJ8&eUIDQR16{mH{r0GZsFxGldU*(Ya+inqkB7oSoC3?4uXsB*`%66tZ=`#1#Qr; zI~8|ipTy(dk8r2fg*T(XGe43g71kxZ)65x6{tyXVW@e<80U+ql@evD(57)0O{tsV9 zWxFHUG}|1RK>aU6^zd1sD@^W2k`h=6vTF*3+$>Yw%NmZ2P!wxMXnwrmmoHmVO_C;d zv|M4TzHRuSIhwiR(NMh~g301LC@;KI8pIs9l>IDD!=jzT=?&NMuu$}?h9$1dX4(WE zhkC!hlbs^buJxR&SF}U99ZHnHa(NWoX~#B<*LCS9b}o5noyto$I&;pLIt zJn82*%Sw~D%~;Pq!gE@XwXSSt?f%E<=;l5malQ zvG?!v^Jbq2FR$ower$n~o3?evgynK(5Pn4v-{mWTkIZW;K~)7V)_A9(($hbGvYhl# zn*Y_^y1!O{PXaeTCLw_>FOc(r8SN*}kodX5b3;!nid2v9$8HSm z(^y__O7!!RdpiDR^4lTYl&WMf)kSQXFvh!S1~xthcjY zY;Jh?TNu|#4m@-(kEli6qa#xHUv`)gLxu&3b_{h#Y9gq3p?3f6#IKM+YI$XJC zH2LhEwWx#zcmEb$my%q8P3~KMC^~wJ!h3>Jo}IGTsmAY9*b}=t zyM;epL@InqKVes~%}nm2A16H>XRY!)UN`H0-g>S+xvSe9?+;l9rJ55DaP`OY(Dfph zlSqrZwc3$bqWk&i)A!F>H67AD&6%=o;;uitJl5SOE*e_>OF66<<8BqOW+Df?1v&3y zhapo_>U;oN-zUX;xHqS8ndYme>(=*%Sjlxx7S$yEyvdEqt*z3gDLBAsDOc<~oKUxG z_XuNEtuHpWSYS8*YO=H73fy^m{HNbY|1&#JI_{%4cTU<%Pge_Hp`hq_Nc$DDowx>j8|Fzw)zP+E^R>Ld!az1bv?bykJxg53qxx`Rk zmQA~G8^-GvK|z69u32>P(1VXaP%|r%H>k#X^87n%I{w|H$4pM4Q&Z>%OT}DUOv2$( z%zN;pa&0j(23${a`^~(`EoNF7nXh=se?$+xeXFZ@PZdeyU9MPww!i8b&+_%QCG9$ytr=S>S59?J09{0DuEe2il4ZKqjH$fsh2DEx|C{G!UWRtU^+=!`gW zW#FuoxX0#Cv8Osg=KBa(oy{UQ0rnJr11{G@X9m4XoCvkSpM@^d+mZRD;;5#>TDQlW zY&H%C_A|@nUrl`B-pokaCoJM$j_v?0Mzg{~B`9=tAF%4YNm(MUJF=<5E>CZPRz0t8 zoz=j+4Q8?vw~L^wq$g`Ce^mt38B$&}h$e(D;4$GPh`9 zSGfsm1uBmCWjH;jEQNi>#n^ycl#JxIZ!uh z;ZZqCvbq`i5ctO(B$~+=xjMJiAmqys2RA+=8B`yaXpE`(ETdnle~*o@e8 zyL95Q?5K71^F1r)t%6y6*CK5&U-I2I<}*6_!(zdqiNwr;>Gl*?+&;vGn*%WAd%sux z+a6kh-{m}?@v5#b&wn&xN&POGnF}TS^fzX`1J5Wi%aoAX_BKb_1H&M?@9eP+W!rT? z@;Iskt*}!jy^79ewlieBVAH!l8w}T@2wiZGk=#gU%LHQScIZN8~j z78>}p9VI@q1f!&KRV&8lZ=j~FP!=~qL9Rri(_ioqA{&5y=DQu?bXOGux;3mbSum78 zsjbmCn*%E8i%&QCIz=ea#9yR-q^a#N_FjY+Kn6B5gJ_--q!tL{NgP*OjlawSfkzfb zLD9h(P~7fBz2jPj#;C6*%>DJ#S%`p9@9{Xah?kE~*tIGw4;f7gg%-;QC)^4DrYX4b zf2j^tlg4$&w^^W@*9u9I+mpi17OuHeG#Gm=r$Q@dT{C>RM5VfDLNzLVkE&ulw#Z|D z7U#0c4RksgdA!h-nQB;IF8N-!&mSAN8C-8k_vA_Uyrjt8Q#LkLx;}RcQ zCs|%-zo_}Ujltc)#*Z|21}$%VzBk@qC-ER;N(xn3XjFa5&dyG-;KGyAiH&e@SDiFV zi%VqMQQ&Fma948bR(7F_F%sqPfuy!a5GiNV&Qn1{&iE-6)ejY+w|`gMuE~7ww0Cz% z`J+KtSTeOADXIpjSJ>!jXKH7`8bL@yl`&f%)h+fN*4+N>{QR=n<5*FZbe*VtTVC&Ig}$oCrPrxF|!-=?Bq5FFVFfh zqoBk&?lRaUIK5AlQ0@*@(`Cjg05QT!^*Q_5-rn76b|vrk7X;#b z@`*q0|JVtWp<<10w$7hbE&lp1NDa<<-98>18e*uclX#E#zW(HA?(R)XFu=mn`J&Iq zKRiAAZ}9^(&o(!gRXDUk4Nv15U^!^JBB#TRB4+95!^p_G^cOhskQyf z+m@rk(uJ6tv^%9_c9v$`x%i`X1I z3ySlRvdKx790qYc`p}rqU5y&?e>~skCa{PY6QM;(X#b^P@1Mn)U49NMh`eBUhAREJ zk8mo$?VP7{-|@8E2#x#rJVm;}uN(Iow-q6|<+?eQHgboO-D66~|6ltmBUaC+)Hy3E z`e=w(+sK0fHekk77;&isbI+~qy3|v!WyN~qp~w%XBb~Q$+nY~H^FERz8p&;MVp!Sg zDJ4j}1hGq)!;DF9Vfo6b!p=Xa z-GoeeB7=6{?xru<`lU{mrQ>FFJc5gSN%Z>iJmnlc=MbE)lCv@BR5ayO78`7WhB`4xkti!V=2m-ex`R3=E*` zZH>j-qZAnmTuOO+3%djnC3@xnE88KI4!iq@m?+A9kJsdg8}EzYbIhr zX)Py@R8sUKTKOiZ2Ai`0x*HcIIR(1nK~sf&zsqGQ74D|0~o z`)}X(meT4$IAKW)v22G{G_4MT^q~QVZ zN(a~Drt&m^m9}jp%zd%w;Df`^vkg_iz$lqyE@XaopeQqLd%oa(Jorh4Ba=ck0_i@x zZ28+PN=zC!7BWDq*7!Gm>z9;!+~|H?;Jl3TPaCXp%N#MhqP1^1)%u#`-WB}du5CJ4 zq{4Pi;Lh<{0Ax%9HfNEx&kk!BMug%F&E^i6pTL&mpadXT5@=WOryn84H_^qYrCV=s z$lA)QUFxvnbX+~HR%+e`3OJ7UtuT61m&{g6Gz#+)<4B-+`F9nE551R?lW|OfFJ7Lao3; z$t69WOK?UyoGV{q0kjaVe8=08jg&LwOg!p?srz;7>9V$|{t^meWU_c_O4aUx-&s1|I&DZ>64pzE|o zE8%Rr`oJUww84mHau8YshEThnHUM^tU$kB!aJc5%^)VFP!OmKPNM`@x&~8KQSJbXb zuB@ThRAFKjr{h2RJbc>y9=>@j-;4Cpwj0l{49{a)VHwlKy1Y)$t(IC_deQNhNew1C zOB&;2%3yKVfoYxNeumz~)o{TR;M&TQZAT{JR7j_ROX<7Fu2yU z8^M22CzuX7sts}I2fOa^<$o~e|4+Lwuk(Lwq6%E7oXB zc3dM(3Gy-CL~+tf711xfl6sXg>hzyBf^V-j?gzM6u{?QI79rI)Zd{FcJmLp#;0!*l zs{BL>GZ?GWn4RBNik}-VFRxNvE2JS{HTDT2e70A=rpPGCO|F+qZ~Id3^1H9dIBKTC zj2h|IE{yxxD7=J7S*V>#n@s-)l@sopd0mEhGaQLA;BI`_VDWJ^gh}zU4tKYn`NhEs zV?GRpCzxd#7C^Z{Y2Q4m^ja>iRwey8AJ{LzL07GCg|kQe9p38rI9qLH1AVqr6LM?| z$JTJN&5>v@ji|j+g}H+j(@u{3hRJV#G&J<@Kv-qAt$xm5N3!LlcWGG3)H~scB52mc z>oF*Af1#qT56>&6a(#bj!wa9mN;;@=(A;_oC)u=$xy49iB?PTg`IB6+%>DtGhF$N; zn;=cKowu8gwv)?CZJK6uMgkWjnXkatTvbT&X*h{=Q7QoGqxzQqRMx3J=n%-J*@&{W)Z0>zzB$HL8uYm zjHq8+M|EB8ReuIqQGIf}PYc-L?wT=My(LgPwb)V++{lR8#CNUIzyT zVzJM+gyA<|#y{0e`l!ve<}BGe0V>xD-G>d`Y(D}<5$X%|1?-tWU% z!Zk3pUBO}nY{(k-f7KBdYKCzzuS{y=Xlw`!l-61&^g(Gewe9w)Tfm}3_!?w`<{cSj zP$&|REDG3OC0!D}7H?G2gXc=Y0iF>(RjN6R@$t_}npfp!IPc4I-s^n*=}dkI5Ad+U zUWG$>4NjBiXrdkSZpJhVK5oqMGS;u}zH$TN#Ae?>n@6r-c4W53T3d0eitXV*f_?Mh z=cjUDx%KdG^#?v1i>Kal%S@UUn!e2tln+u!fmsN=k>bJd#%Ue^eL~GNF8kwv#bv5A z@tvlbf^Jf0FpZe!K}hZN-yBux*O36q8BhYUpj3;K*S&cHyJ5JTM&R=iA@K$BMfKV0YPEYaNDnLK8%gY*zGb&1X&+Rd`B5`m1 z{=hS>b|FZ1imTlx@)e%z@&)AY-H8IC1~J@>A2r?#n%iaa#vfP~Kl%6ij^)Vp zb!<(Q^fRTD@!1~4bLf*)Da9t;gs7e}ee>WTpJFU_93j531Ml}I(YUZM@jq2^p+3T&ILHwXNSV0SqA`HD7@rOuoaeITb_>Ep zBQ}#55>tSpC`W{~X`WH;*l{t4i!a%AipFUzcd9yrWw9PTSU(%E^304yLRnF<2)}>SioNpil}A`|znP z_oKuM1vTJSguA?=aQkaX>q@&}aSF(fw9@j6FJ#1R+M|zpNuuDfnd?2bTGp&)zd6e) zEAt-DJqPJ4LUVy-w{1_}jIJlLexUZMZ}RJ-DUf{mn=aXUV|0 z#?p^zf=)RG(1@tbZ_{}0q=AJ3y4f%SX3C-`gvxM}e)Rh<3Au$jZN+A6SK>+i?g?g5 z=#M@HZSuF*k0tJ0Zj64?@1OYPspl!j0kXYNX*&jgIq)S4>>_@O?MGnft(Xh64+t38 zhrZ|}dA=gNCdxoFhPdl21}QXagFC?d%DS#z0}Ijg3%7isR=-d0moy$1vY5O`NYmuD+p!tcS$OhsiPmUqOpkjf1t_=8wzM4hDjF3RDSc(&izJ6 zy;yC9?S-Z8c9@qQLx9r%Pr@fRmW7hUp$sUmO-qr*#cjx9TJQT?JRDkaf|k)8wdTe0 zZd!KP(iL-RAld;Q(3K zloC9@@PlH$zCBoOR}6VxN0uEEJH&~*BMg)uI%;~<@hKF*h$U`$bM@DB4Zy@UPVCa1 z8YEE_{2#B}zxpU{O#q%xg;HRq*}L|w+8_)>QV=FBu>bt}j|m?!QA*vYi%C*v!q^7_ zpTe#|A9EWSCx-vE0{;-bL%GK1uiOwA=MC8yYs?05Ej;mus5t!G0Q277O?=6Td>_gn zi=x$`EZNr1NHTFoG-Xkfu|;d0l1KfOoFt*9@xserHzrP;vJC?Xzm}ftQ1&HAp5OfW zE}eguWt$??)CdE00jzp?3S2D{wv$qu1cyCs*j>i6ws(;##G5P^V6D8}`T5W5sF!J6 z_g$(K9D$nw1j`A_yQ{!b#qN3!R=ipmKyN~XX;zHQPBCvcd2YF0qf??}S@hoL$lBKb z-IqDLrojoZw67Ze+fPE?-vA_bKj#+%>%b5{YJ5#c`#-^VM7@3Vp_TIU)nu37I7VT)3@3kSnX0I zo_tbJzygNDe>4ZLjG59d1Rb7{xF{u>47cJO*fo5pC=w^}88ZjCOc8>6I%if9FbLVO z=3Y-*m!=v-ilxcXFN4Lfll|zJ8l^;EBcB>}qQ)6T$tlHubyE`I{Ms2N#z8NQ%{fwb=VO3 z-P-WF933h!ZfnH~WtIQ6+z-mUIGiFhv zpf({NM2(7Czhp6 zI%eM$lV0B0;dEm!Pc}O#q1)&FKz>q8sUmh7Yu#hGEfHb?R3%CPW+$~3v0f-#`uq3q z|k z9~Q-Vl%_v{$?ngpAJt+dR(F7F-$1o^0c{FqtvXPm-o0JiYgLg&uLwo%U4ToYTjNhr zYX05>+cx6qZ?pnJQ+8Mr5r5BO)s8=&-;wv)wqGRHu$eH>ui$1hnh2sx+{BZGt zjg?%?pv#MdwztDp&IyP=FHl_&pYbw0`C2)=S*YmtXPh9;aa5YDd5GHGrLr=yQX4-j zmU(HM6=(VZRvE1Ppzzb6%yW%Bf@rl#MBPeYvH)vU&SXJe`1pgvX%G_Z8zq%iX*pGl zwnQJ$`sm#Q%fEW<^{$EG z`HXZc%Lpr5OXMPFL~RS-kFPwUfd0sKgLB?H+6RE=uE&j_5S87an+)so00^lgQP@7i z6iCseES^D4t5x!6Q0N29j*8l8fakRv4GJM=Dpm`m%J2R4t>u7#;&$}zzLE<31V*r6 z5SKts7zq)xWha6ZX&ZkB%7_MpqUrCSm1uNm5>Lzg-V035NuO$$|3N@Xkc4aoHsl)s zQ>~4=BJ$`Ve;3@#JuOdpW4g+X6k>jaHM6ckN01W~W+69KqR)gOf`3eyvQs}s#;7sK z1lH`(P!GY8Umy~R>!4=lUGc_O)3{CARx1gNZ?Ed1 z@d;6F+WXS8K4tJ{1P4=|Rpk#YbG3M$V&9M-8M;JSUY67sYJjsmXX!$mFJ~T*Z8uq^ zMOXI!Wbti?zO%hNo32(Mdheh*$JDKybIEx>zLZ^P-=acH>m5_ra{K@oOf+<(K=7Xh zCd&+N|MjZ6xk2@V2Ap;E_D1n8Q#DrY%pE(iZXs$NVLpeQinFUP?~nA99uNc9UMut!*xfeWcU!`@8&Hhy?A-&^0S3_DQ7dN=xv}L`kuR z44@!C;8nlRZQWRAyb7Y_6Do)$c(8CyScufOd-mVr+Uh&?I_rd86 zU`EgYerGR-XEw7i_=hBP#*gQ(2@5m?HLWue5ffAID&8_|2Zbt;B+nm452cEPO?C`& z>Y%b7fW+6+?dh;d5TGQ3>qc}jB(i_dkNI~upt@rFyG^RVA^-jU3XC59@-jKX?Fx+L zVH~dJ40NMNG9G^opSs#Q1=63Vj|8$vIrQEt?fpB46NswXaKiFwZ0wA^4>4e})GDxh zGH4clc)-CtGm}ti-u7_|@42QgII_p$5*=Uw)--pbx&cvw-*)4keuDeu8}MZh$YN{< z0dC$Z>2N5oF|S#th=kKXmNg@u524u@-9t9k)8C_7!%MRg@|_Y|0dg#=ENp{)OIV8W z$G^G%CH~BOg(=Lw{4T~?E5eGe5D+wvY@LXp=-(h7KUQ}>+}iw+98D%r4_#+Zsyv=q zhGGg6B~niaFjX|t2!S~|A-L>0zOd9|6VpJSyUD{8a)u_eeJs$b+UQxAjl)Pd@&Q(? zMsmj|+*{Ph`U0HVpP{cSQn2NB^K*7OHrZA|toy{;29IVn-1h>Og>`>_B2o?x))c)M zdF7CFaCCSVF^DO3LXG=#?6l*NR$K&4zN~qhl#{Pt%Vm`K+Yzk{qc-tu!#0?E!aCv1 zKd4ZK4KVeprE@Db2bSAYe`|pHym?e&O82T5*kmipJ8bo>ZgEt?9F@_=qb%P(-TXdT zx=rs)k~f_NdSBMhwX!ND^}=2dOaU+*UE+m{RlPAITqW%aD`9DboI7 zVIc1!MX6Qq!h6^+Zq8%nvdsbjhAaM&jP^D&$I+~pA7#o~h!M~3E8&yJD9=QV+y%ZYrz5zRW_kseE_nBU*1Y&}XXkb=LUOeY za z4DM~6gTWe$GedI~T{NSwsPpS9IFPbJoj)tw|MJwL=tM0B$PQob%+`b<2x!=Qbm8{( zD+uf8zm$7}0PQCvX8wJ?aC}w|343>8zHv%xzHgA^;J>+1>ul4R|9EOay;RKPH7s@X+ z#t_}_pSS;u+Rh^sJeJG)Nmls0n@v=Nta;CZdN{k5-FRqH|peJ2J`ouUR$d2Ybl{!xJ#v@aJvZTct_)g49} z#v^1&kG1XzxiGw-b#!N?Xn({*+;~i zLeb>IqoeoyD#5Tr5+=0Ye0*T)d?x z_U})2H3-Aa|M$u3wQw=y*V#>KOr`l#xayAD5vP!qQ^>{^T*@tGf44<1d~t}`ccCMd zVMrd49Ne9^+`x#CD4gFlwAfRSzzYw zO13UatCvB*-A5B0*LGm}Mgs0LQMHj`1r;-pa1zVX6K0Hq-AqwOT>e}BIuWu)Bljw#e`G*rQ_8B-@Xu zk4+nCUN&m?8_1_?F&G@_XQDB2$>`3OZ!Y(lireil=$@8;l^6IA$gJ;o=oNmOzEgV# zPv@XubRwVM(6F00>;mbGL-g8y zc~oh`It7ndVkr!YkaTeW)yG!$5Sh+Q;J6~m{~Q%hh4mk`O*K#i$ko61iAtEVPf3o^)&~I5A~R72IOo9cO394ty8wirusJhr7goI+wizk@6W+z`6V~@Ra2Z~x zQ@xq}Pi#VR{?E=+)2Z0vokZ?OH0~Z*I<#n2UIx+y*wQBEjMT4J`BaPI#VDtzN;17* zA%&kh9H2$vQ=*#9M#Ewxkuq~@%|#y@##QN3I^Mq*0b>P5VwYa++hNW(e+zbcOWo63 z@}Gz=EyFX;mft}^Y??=PZAK?b37P!SI~10SqHz~4rE1SF2hyLZ{Di18?=JWPM4AU< z!a01^%{YoGse2Limhh#DNFlJp9!S7tsN>|%7v6~*t>l#Z-Wn{jDWtvAAkfGT19Q4O zT;TgVjo*yU79s7JcnCAq9Lly)SF+_t^#wK90`bX%3}j51>VN?Mursz@|7+auy-g;Q$@5r=b=MYj6rtG+7Vl!K&*e_& z5j{TT=vO8c62Y9;uo;(BC~m)TyY#-QN(S=Ih=xVz2zN(r{|0L2QJ@wfW&S?HBq1pS zBw!Wg?$?l?yy*|L;C5lA1Qzwr&_#p>YBBFa&=oLlKbYrhq85aD7d~F@ntlby?e;w# z|G%_!aJl98^LS*WCh-7;*2I$ivLzeOJ?ZTae=;^pQfdrVJHZ!E)_z3QDTO-ccZTHK z;6jS?*eG%6T+SCzXK8b*gpXU~#X*zd2ZPxlFO6>O5CP=m@Srfpa&ezsBS=ZS8$Y`L zg?Qb9t&NL>`zQEtJneodMsRJ6`-Kui3}SN72hn9u-}A=HCR6>7NwBTB0dt_>m8^Nu%< zn9$0osD+&~K-;2-T&ZGqCG7tQ`|7ADytUn7fT2OUL!_h|qzx1)Nu?b?O1ev6Xh}gp zK)MA<=^k1-r5gc}Zjibgf9I@wzO(N7?qd0eh08rV-uHQ*Kzf9E_HF3Veu112;fu&$ zL6CITFR|7Heh9Umfis6!&Qg;!bL)jYs@zg{?>S@XE^HcIyd2mU`KHOH5nb@6;#dL- zQEcZn`BA-m9J`V5WVfi?>QmlpK5yxZEdf62P9m#SJHJ}8s(dCbX$_>ErAMJa=`pVl zQ>I&)i@&v=GaMSsyf@IcZI}v7Y@^Ybg7Np??E_LIYo?fU((OdeN>F7fXof9N+mxB_ zWH&42@r8Oz=dqeoU#nVxYvq?hyAlEF?@Db4KH86Kamc@G^YiETt=>ehIGN{W$Sna|=PG_9_R5#m&JB?v^{~a|TJNjaOqacTcjkOjP)(lw+Y6vs+Qy;$ zo7$PzS4nvK7el&tC>wuJ3Ep_9sF@S0=aTRWBvOq*gqxPy0F3ix&FTHP%$AT!u^x7c z(@67Sa%Ja&w}2b{{8Xy1P|0->I9^d&RFmIwqIT-ShJ)-sjw&k{i?fR>S8l$ogfWhS zkk35z&GG!IL~<_n>}PcQ>y#~3cg55!8J-KmeV#EtXPp%;cdD7LRp;QIUu@rbTUPr?{c&?Bk$U8f1MRe-BJLuuY ztjiN;t!@~YuZc>Z=j&BqCAuG|AX@n5!qUCemJSZX`nyhtr z^lP|{O5=#y7!^YR^t`V~Os~EHeI4-`3c`W+kXCC~J~Y{FIfPMH;Lj;AWgh{C2ulfN zM4QYz;(564En=0Uc|Dyn7sx7U<4)X|ZSt+pHLtcQ*6rTI79Va5mYnw&mqfScA2|YZ ziG=W&A*`LElBPfMZx$o72khLD31OA;M3V-9Mf=Uf{HYY=rVJmx6PM6{f~J8I!i&!g z;*=WpHcJK)I_>Om7E3^%DlYaus?b<&L{rcZk6Jhz#M!;%gkp>gNE|ATKE)87P=;q! zSWT%t$ddr}jG0|HKcltotCEj ze<>R!KQ+Mo?G9$NeJ)=69ppY3*N4&{kFf?h3p`;MTInxB8k@B`ZZ6LNN~BbJ0j|G$ zvPq6avk#}RtRS$W+b<38uHZ^VC_=&JG$}3c4tAS*#n_iuQ4Kj54F1&vQE@ zUhwy1Ok_Pf4^>Sr1i3OF@8oxYG7J1ti{0s4#&M*0iMp?rviqa6Rg31}%bk^h6X75t z^Ntnu(KBZm$R^kLA!~_%nQne zH7W8>nAB_B?`_xo=I^=LB$#n%2o*e|HJd-kF|xmJG4G%6ur=vgZ}7z8M?skNuy2k6 zDu7Wh4HW0rB|XPvWSoo7+DRsPZ!Mr??{84RogG^>;9Sy!IFO3^pabWo)%A%nRf z{-fLA{WeF_X0gOVD&dxHrZGTaWl$TQ#|k$#<+|UfsCOUrMD^eW@Cn3! zJ;-|AhpSR$y`seCuaBh0OD| z{tt~{wsN0M0Q%m7AT$Q-CRjO)vhNM+r5Il9jjRQZd2ASZsxab@ju8#nitc(;pVyAs2t?IM4}~iG3i<$HJRSaR=_S zGdl&ZKCRj|;m!YA#qU{XKWpDL^{E?9=|2^U=Tc>ht7}83b-zo#SO10 zIgK?+4D>naEsKc_ktqq%LsFZpsQR%T1;w;I@WnFz*M*FLS;&48kyu3gsvGWAhWph3 z+Ds|4QL|-EWK`60MfzppPsl~eL!GsT-gF-0&1f!-%sk2 zVOjy2-s++x-&z>z6FjLgO%41`5i5QefLs|mODgBiB7|OIKTBLft4fKD*WPP0!J{tG znV>|{3fpoL@gfbjEYokmcNf@GL@r8?3j6LXc%AB_RuOKwPTupa7=ID&DQ>u!%Fol| z=hd<(YYhHj7`+F7SZhO2S-IQ3k$>W11PqVry$M|J@F5Z34rmxEEMKb9j0;$%0l^T# zqPBla-I|y0dMM~esQPR;*E$YyQ!NWuk)N=CTE-WrSBP@-&9(TNn zd8=akIfNXb#-L96>sIydq`R}F1IKti5tY8N#Hy{TmuIiy`V!BFQdjxG_~ghp)n5zU zT@>e=;~qL`+?=&Sw>hsRffpUkpSW(k&C+lsXNBIuf4SYP(@nbflXedUsx5SyrS;=c z25-;M^X@hL4&Iu1WiDu$+eEK%8j`fft&hLw9yximUe@*K*vxvy4}Bn2;cBS5__1ZW z;9GcScteys_nB1VfxPX!YwXjx-%^%%Orfx*KkOeCws(Je&fQ3liF>-(WPW*6{n(O$ z5igvUdOkO>KQ)P(I!P?^=_YU!1(q764NMVk=!>$!W%o%Y8L&^g`z)I_Fc)R5#DXki zX537zmY)yLLm+OrNxRBDFgX->j2%Qc7F~u)SUBDj?4Th)W<1UoaqqqzDWMH>(U~AIg$U!_rY^Xx9}s z@7ql5^2v}1z9D)*>Xj2 zN{gUv4$xMROe40VANVZ9VMPv6!_Etnkr;pGVn!z{xyERkBM1!3dF-vlfXirG)mK3j zH23w4yD0dK;~35mZZ2?ZaGQF(v;V+l=ufpP74ag`Ip@7W=?pU4(}2+R+y-g-vgya z{dx#S#3S!`N}T(XSvSxn) zi*Al!{bJs@-^=Te6IFa#C0yfw=o}uMoes# zq^5L_z9CG^w&{Vp)#=9vP^XfHAsKJ)yTpC8chYb`o|O2G=ucVaI@}^(q9R|lO;7Y< zy%g;7SurIl=Q?as_JraY3_L0lXKi1oviP>3k&ANfe6_1M`e|dI+~#*l$pz910MHo? z*``zN$0P<}+E^@|B>Q$7QPs?ksT~MGM1Hc9ua3{R6^YCaOg{&b5+duOu*tw|vpdp! zqi^KgEb!p)iVaw5i*IUvIED_}}xJ5#@-MaoE6i%QyCVrl9J#{=LH86v5p zlI_Aj&&3<&I@WJ!D=cU>K-22{g2`gGrtE<7*;})k$|M5vq*EOFqquL+NP^r$35p5J zd5BXzD{kvLrfQfP$E%#4mDd?#>?c*K17puCvUbs9>yLC_i1)Fgm`~unQf;x74qMHQ z<$7k~;dWYh>y`w((LZUCLwm2Ec@JjyrQefpfNdE#=Q2gF&-vkP#!avnMV7lt^+25h zc&J2|-)p>`6?GcAI&F3baqrhPaV0Og0?D}ha&dSnTpf3x-gO(RIUI2EewNlTc$jhJ zsQ!t4`-`f3`-hV%e$((P@6>;hrWZqC_|g)AIu$Cr&#kAQTuQYxOUk_v%fY#&Hn5E# z6?j%hCSH@1T%8;$Bnevpox;fl%)9E@M2^s%eAvi{8pTZ1^M(#n4&$yhptIkvU9h6g zy;lh%l0$v_Rlt{uGAtKiuRR~CvTN>*+yfm_$=nIP&QM*Mjw zJ6X^LnheDB4U?%a2iZE9-2*2AOZ6Y{6YMYnq9sIUE5pUjs&7@}kXnvM^lhFGCk zk>^9+qeap6KruzAHb(P4V9ncMyH$^O6p!E9`Z}RAR(^{9)tu-Ro8T3Fo5EAfp*-`U zjTzWgGng7MoB9cq8!DnTVZ@{IV#_osqn)5bmX!mvx09JT_$=&7An-O$zsNs7tG$7h zn~5JC20jhNk4Y$-#r|?fdtXfCpO7bjN>GC(7rv%~g73?_$!*x+vNc)u#pXxDPm%-! z4Hc{#zkMvb#lA!VEJurMKj+KbBpd!N#Aq9=W+%bePAf2x`(DxK$s|~Rig@*8h6Igr z6vSGz9jeGAiZVQZvQN1B~`bJ__*)W}jfVM7ktk%aK{@X*Rh9pw<+Kv3US|*AxHkQ~I*& z709DK3G(L)1i;Cu4q&xPh`JtcS2gaPa{z;ONPuwnRJrWi*a8Df#UU@IbK{hCkl=K; z;julV^0-ZdE3*9V{HrGg1lt|Ez>J37#h7F7hGOi#_AN+Sx+1S zR7V^`UtT=;Bx{y=m|j`nz`F#L+o@0 zHn_^&zHD=HN^WR5$#&kUT)@88(D+bX$8SVsx z#bNOI$bq&jmCK(%co?Vvh-HMtBeM!uDAd@(j*E!hXnB5oN>qS?*a1+I88mFt>)VO} zlw_#NS~sT58WjI%h^)9;C3IzIZ&DhR0qHm>nO8`Jpv!|(T7C4F+y$~uCdeZ5UPJ+7 zHaZU58Vaw>Q&f4CAB16cq+z1ru|TR7M?4#H+U-NgScxp~KgOTcIQb zRrcm1wn>EhLBERyw-fQRcUz5n_1STF&UHbl^lLIUGIxk|jj?p+WxiUD*2^DMAyuvT zqiTAnkbEP2Sj;`EiNyz*ZavG*cDtw*5yw9PdhG1Qi)M)}gMdUsoi;U)bhv9OyspqY zW+}vk1mZpHdukAwM)ijm5Swf8YW20Tvk#ftxtHE7r9do4fh7+)&1&~4#F2e%-ErUn zYr7QgE3DKqyJi$NociZtRuskIi*ArO+op3kiF-{9N#B{i^iGd`~2Am{xC&pG``P zqhXNu7dRcCj*fQvMl4|_OQI>q9*+&fc83b96 z<4RGm3d*ZW3>>|7)}#{HGuan-DlBCETcRXW&cseR^5NCR`nL%g6C!VJW+@7)2g3n% zZ)me_xzHi70m*nwXsVEIQg8p{P=-nu{I(3X`yYKbovhhr)MuQDRrUE+(g_ z`QFxXzqCZYL~RSsQ@=PlS@Bqv_dn%7wCWuO=RHRo^?rl>=1os5f@-18`774g zo^jnTMjfFnU%)_v*NnM!ZG+{c10}v#`uZ$u>S=}JV5;QWQ+nZ2DxtUSF-D$Z058-> zg>7+T3EwTt50Wl*TXvi%)@ONa=qxpYTS_eho0Rzw)G|_xCszFCEqt!dd5y(&?`*ok zz5GEgU)3Tv1@#tW#?ZgILIF$`ySHGA)JFmCI6jayLEU2rvHl`Wy0u40v%+HR`Nc!a8p*>YtWMN9@$_IBvULe>F^y46*0DWAWkTEo}`?i+R-nTES4>0TcvcSziIGi(n5X+-!O;h2 zYGL%$CEiba)gW=RoEGJR>uhv;NkOyGJBWaS7co_wOY)V-lG@5ETwVGn5Us4;9SOuC zC!3NV5<=twL?TU8&$m`!+^-TfSOJxZ-n zuV7Zxn}!N`UIG1RCCmw@c2~R$%hQj{d^g`;pgR_OpDt>A6-ybHIk04~?U$bxT4raT zZIueum+K7azyI|yYmeAh;+%=AQ*HJdO#k)G2KuHf5J%$BzNouDydOK)q;5y3>NS`s z9C-4t2R0Ujm%J;Rk9UAbJMDd(99HQnuOH`~T=3LEq&wVAS;}eGck;j`j=lULZ z>4&(oLGZ=d`4F#fIneo)>Zd*edKF%*M^w`HvUxG5(%ko_Egcgt+pZaoqwa1fVyB}t z(BY!!2i&Sd+1Pl`<~mWlpK6i=(X#O1{gZTNXPdvrd9fPieO-(AEI6g$ZjU64q>AfM z%rUv#`BwbG5=o9%sIU9|CjrJ(ePf*%&9!bRL)dB67Qy!~3PXG#nB)~I6;jusq-ZDc zmZ~r=Gh&hcs>FZ=Roku&MWqu3R!R*zPqrc|5dFfV7-f)t^V;XC^7~u*_Y=U2>fc}9 zx0+N`77F*0)gnDW@mG9lEt-OHcODTFrENA!Kf;%aqHdK@4UGU{-)Sf>)wM-F>txQsTLhCtL6*q}p?cUkr z1&=k&c|C!|@c!*8TkfqWH zqgB~a!cb zNR9;W*9ttuf!Rs?b`Ai@8MQD_89#(CR?2^%lN5BDC?L72AG3hqwzVviXBpb1{G&bo z`+*@eQEyeRX7 zZl1rMruBAnutJa(-YDc>SHPaJ0hX1~+jRYU!4tWx`L_*!e(QY}`{5I%$Y_DXiA2Si zMx`8@@1pA+Tbz4RXQx`l%-Xf+ERrVXD)rq!7N1{B5f2jajXg!Olj_&}-1VJS%MyzV z_Re@2@)DuNL>L+8_b?o{>X$_%9*6x7R1KR`abxJr1uz(%d>*&{*dWaO)V7&Y(BTaY zTPd-oMV-D5>Ww+_dSLkN$iKE8kYYba1v#U}I;MdG32L^0HZUr5GzkJPwqJC7NYNqU?#MS;% z%OvqZgsxr-1hCJa!z+|{KG8#_ljC2rk&#_yZOQB=c>DKuoB|DI;@mJD+S~ptAJ$0N zcyVR3%kl4X$JbB3M*AIa#>xWOkD3q`t^IBzWzPvI8PV}>ngZ}>QNFX=83A+N_IQ7? z`>No3c+H}pVBMvIXH>cumY6W1AVP?)a10fLZuztt6Lv(I>i7Qa9s6^B2_z(wEumYE7ZUmftsC<_ zlWRPl!019_zsP1l!A|O|q~m6?q`1Dm{?fz`%^&(Vb+Uskz$7(!5J9=4P~&dCFps~U z#`7WY&w3okK1;bVR1rCbHfw`U=fNhu=C0VXb9HrEzT}6a^_kT1EE%qcshIK^X3}HM zDrpi*EOnDiE@Gj+T;BZ@;TiI#enroK1&Nk`c#fr*B+SKN2A;w$uE}Y%vvLe#ITmQw z2l~@8<+(;rSG}BG`lFb9d^_z#2r(RL%CrYi?lw^2qJA3(Q$RU|m7;s7$WjdOdx{9G zCjp4YC(lK9H{usBPo;yyb!|}XL>Y-}Wti^gs~qH$XS6qdKugA+$g(?jVEBq;jCa^r zjdJOT%TzbKASGijwM~O236~+6tP<=);e&MBp0BckWYHRe3FNVIHH>i@)I?dNhKo~B z>ztyiyO<1Uc+>B6Mk;YD=;poPup3o6e>?Nl`N9s5DQd_!r4oxqvOuOt=Ug;R6(V3- zV^>YJa8Rhd613Z7oDs@HYDtrmJDqO!VtaR=nDv{?43%q)V`3kgX+L`am#)zQK44bB z0KaxFi0RO4=^HDZZ%^Dduc6=dWf5i-{N|ZxlH+Z%AIq1683*r)QMx z{?nG#{?D8dLY_ljBaTa{la&gpT#%2X1}@`FdxwM5F#yW_{2KYSMsHEdLCXV`=7*)+ z;69ckF%Ywp^7B_5p0yLp?tQ=KD_&~x+N8gn^#wa{fI>g(*S)A^_CF9X%Y7V(z2@Ux zypbAi3l>^WE%8T zZ?ThcS8-?gma-26kmnpL?;^3ZVr6#c?}Ma-X|MH+$JnzzJ`N4kT1UbTRmO@4^aeiI zKsO)C-@VD>3z)j-T)en@$L(1}%<`C5sL*d#n8dddLLE>V|k5{kt z-}U|974=5~Xk%%+dI4YV518f$0VyfQ?cV;o-e}JdEaX#^_ZGkCp$|M(s9lx5fXPe! z!Oa%7f!3IWl!Y}uUSf=`_%tDtUuOGo%3XosI8&iv@adN8(wc3{`6!v;yZd-NK^0$% zNmIiD)(2A7pD6$n>nrX=O&CO!*>U}gQH+^+%T*euz|#MH;vgSYz}EF{Ak+QH%B68LNW zzNi?P=*`IIT|iJMxaPi$_V=;~k%M2lr8Z!mG)*hS1r-`xrEeXbrnmNRshCcSs~EM* z$<^24P1^aEexDlT$xG!q|Jj@|`^{Npttc^Sc;{(pMuLdW&Q{_5kuT+$=3encb7d>D z0mtJ-Hf)^X+dnHShTgUEqWp<`daAru@UpVSeZ4zS_S>e>_O6(dO6nUOs!0ybPigA{ zxvYDO{`YGr^qcWgq~_35#7L2e0wEJ*S7%w&pG5|P`;(4)$20tCQ+*6xA6fmO9;D=C z8LBO>Lc~8w1s1+54MuRX3qGra6UMR-;uNf@V(l$qdzVBRi0{2;vuBd!3`ptEbVu?J zYz>5CxxPwshnh->+OXVt0bT8x8p3>kZS(weRvoh|a^pkFiZ+>Y5>~q(0uT5p$K1gg zNrwGh$>&%kq@)i48j=Ctz@hYqsBD0)4w#qdfC?jinPC4^F0gQSXSSA@U9*5mVlY*a zffDoQ+FC21zTyI9oj>sQXal$+u1Qbq^Po&e%D>C6$S{CL#$$jB0t*p;jyJ!qh_DXu z0~%U&DRB2lFH8BZ4?^VkfG2#YavA-`} zX<5pF;#YG-;h`7}ytfbP8S;^@KgMyw*6!N`SF0ecGqp_OxdQU3Xa2KpJUTtdXm0J; zq==2;GvCCj?`-l@i{_7+N{xf5yG#$PeZ*ZV+D3BL7E(ogx*wfg1hNK_&{S?dnsSZD z8Z66m#-tHl49ZnHTpE-_*-pj~)C^mvxvwU{^vj|1&8J8zRpq^jlj56Vw> z<~PZLP)Uf{*i`JUS2%zEB=JPQWYeb_!nS33WA?0MkU%b0lJU{vZZqi@J?)F@rb`EQ ziEB}u!+X%=dxlU#MqVo|I>l5yij2u{*~318jlEW!#7?Lq8%xw8+Mi<%!~@3xudpc6 zI2S&xZ%q0wpRW>S)b0!!uzw=Ktckk4rBPV!C_(e{^FjXtn8Vg;H{sbrAf=FOmkOoB zkm$3Fi;Hkyia0szwt5B_Jz(j-i;F)n_cMR)h&9r;B*BM_@o*Wv-)|TPC6XyV6hd-Pir~OMYJ!%&pmtR@v|}(p>}h z9Mn`VFl5?%w3P4MA4$u-wSfHto&AG!D!z+2kH+wFoZ`B9Ve>n8DQ@`38E`s{$rTgT z&>jaLoUtYThQ-2e`QuHFw4-y}t6&e10w9@!{Jsis^VQ(f%rNJ&4zPC4~eqM(Y%9!Z! zeD&4L^DG-(7191=~*`HV{PgqTTAvWaoZ|XY|=I{8P&O zeJh#n`+}YM*fh7ydLHN=a8Ku4Uw{0#Y$R~|Ml&iPSS}=Wxkt(Rz#7WT7gnRpE{A+6 z^li($rVXfiC*|;yKJ|@nd`cxIkRLRg36Eh2+S)|q(@hxGqR+>^6)JRA7p?w`_Py@c z&+dG4FC_u3{q7f1_K&lkp7VOYBP1dF+)OZA)sH7K7fVn*_YyOhfV$WA__wfDqZsY) zF+R+k1pJ#)BJggy{2ISV^LYvHz}^m|e97*}zgp?xEPk|+#Wiw0=#xCj{rzJ5&2oo{ z33Q*S)FOtMj61w2-Mck_^NY56eZ+g#81w9_9*%Q!dr^8lgSqMTH1Uk*G0jW$yKXJ= zf(g2O1JpKlU&ww}9w_5n>@<|$kv;9~9M`emf4~BlgKJ`ATh`_Z-Kt(7N#|Oa()LGQU)g|~ z&%2pQ=-b0uhw;*bRWT;=J+Xo^bOXyVxs}>%F1z)?7yg1?{AGQx_bScdT$4cj4T?-8pg6KPN(> zzd|eqGUF+>Q)QO*qk&x7_iM?3Z@#*_=Uu|IH%FewKQz9)dU4Sh=Yr#L?Y{Fkrp((e z5#?xsA%WfC@Q8wb0f>JvJ<>ZboT2WkK;=}(dmj|6+*1m zu3m-iL5x+NSzWzl8X+wt$3tU_-Bd~Sp%HU2jY9JVisqZ*^uXRnUtUYo=PEfG-B7%~ zTjCe7Hu1VX-Eso@Lw+y$0XIE|qX+ZmST~C0geiCtB$&T;v8xWa?i4O?^Jp(abpSoV z1C2_sv~AASa$)6k-mzsp&GK|%oa4cI0+v=Vay^ZFsljn8_2IW~(iUo;oFTGodF7_0 zQ2hOz$RCRgT(pr9?FNhd^GM9T}rYwI=y_m9yG}B zJ4sl&6vkQ|Tei}F|8E`sF%=1jRzq*UgHgR7&d{=q()WP@We*nIzf1kM=K*aYvx=4$ z=51L{%&PpwSmaw@Qs!w~e_`O#yT9K=QGRjgu82)u%C5kErHKMe(5CMZeat+6+mu6^ z7>du_2-Q2AotxLzC3@&R-h0fGMnfeZ(UaCw4@n6>(4}C%c^H{;j#Xh9PtY6RD{Bx& zp7$`GTdX{tg)P@{kXv->ip5abiW&Gbax{^r8V|NJ*zKiLqn8HCysY$desh>%yFgO$ zE(xlX)LCqs-9fP1wCPvtm{RI!wx^!&()U|nFdNx?9f$2wUsYKKPqcG$B%W-VD++Bq zb~tY*zRznKzMHAtXnEG~0CVW9(Pr*l8oJx{gRh#sFx_fXnGmAO%Zu_QW0pJC(Eg;G z2;;~}Oh|q+a1}$-bBs9eT8Unpud>GH9^*Gv1vc%pd&AGB8q)8Nh_v|aH=Y?0Mn-!z zzc?fJ|D*DU`{g&L(~N7YGf{i2_8lb=drF7ye;zvqOeoWI<5CdyBESYs_igUXin9Id zblhGp%MTHpdmov>zcW(+H6YvVT;aC+3~nPPG2h{=6qG*$ety6?BxImFbX8p3=aWi< zf=PFsr@3N#ee9=Zbb|vN*(c8?%EGvOV^~SG8g;nc`ufn!7Yt^w;lLW=MZYsV?I=6T z~FoB1y zA{Xo3QlN|5ue;V|blSu|XG@Ppeld^hN$bm4 zt&|l0qU!0r7c_Z%(6o2UP%}8O>breB5m7`HW2K+t_5y2vZq`%l+r<+_Q&YFzJLt98 z`|)Zcs68U{8`cyEd}nK%qUlAZ2L8O&Z%foOs=_i9iJ{> zU!ElC&dmc4{xCbsb*TBsPtI6&XR_kvCP-(5W9tiPi>@Www3}TNRGLz2U08*MF`j)R z2-XngM-y`$Z@%gJfXb;yL5&j~eV?>zSqVP&MdOR=<9dQru^;GT@wb2ygD{%;;XWYz zeNZw=kUw0**DH)6EI;b|6xB;Q>$U-!)vjj`O5rFt0{t#L#)Ekr9r=8^J zmzOUR8*zNcxQ|(3?Ww#7LIq?CC|)UK7i)U_mKL#b`s#DNBp(Z5)1QXbWMGQ!@!2nB z#l~bU(xWvZxU-&e8H@;8m5zeRuC&Vo*=@5qo!67 zco{7}7``7Rbl`R2x03SV&u=NOgT?sQdV80Llr;j64-N!YBqKCa-~Ra428!^9H`jgy zMh1x#{MmQNx2DeP1IdZ66Kl}ivJ+}fKPhzn=S%nB+ZNz?TT?5rZ)}VXoQtHYWszxq z6cwW)**B#xt-L=D6*vSv>MJ5*Qh1Mo?oc?@{4ziT031goTF}wVJLz|Iq zd1G*22Ls0~$BO9qiRsz{%LeJ|1eB+oaj;(BfI18Uc9|K_jk@pLlo<~b?tbDSV4GEv z{n|*$IgfYpl-cggvbgMVN9Z4Py~gPxoq%zI?z??*i=T99hKiD0UcXyLlbI++k4mFY z&XE`R6o1%`)~ATvjx0K!3n4B~SEk)wU+8OKjIQP=!SzDvX}-~iNP7HSYGt>fzGu%< zNYORtn)mA@!{-XU;CpNt~kSB*k#()1)%*sr*=OLGHcvHVC_E4d3p&*X7C$2oQgIx)y zL1+qE^1#yY$ni&M2CdW|HrkgZ$^eckACF}Y5$K9k3Y@DKFg)8E%dSF_Uwt-eB-wJI zcG@sfoD_5}Xv#6zB$P{~d6Ak7i(|KlcRe%>M3f5u6SnS>PIC22X?-YH$-BWj^_ z&3?4dR5qj9A(y&L7$h37LdMPMXIat@PzNK&h1PD4*VMF7_1wS5&vlxo$u6tIla51n z9pA3dlo4K3Z72}E((qrW?Z5B2M+^YYwMh_vWuvXGP(l94;Zx(!_9&?+fqfynG%<@r zezpO|?@8jC39oe%zeTxw+Ah_ayYEciCHF5!i0F*nBi`MbIuQ0#1KJufvSt#ivDs(W zcl;kdtZ#H6-G6YRL zi($%zvCi=a&8B%yOI*^kiWnPb^~(9kvFCokV+7f#BJiga-Ar-S<9!?Z=GZ`#a?uh% z94c6xx@1Y~UtO%lBT>I4h!NFX9+3Q+5|AL=Jzc0@?BQ^Pcj0fl(!){{&<3OzU;xYX zRWcL3I$Mzi$SCKLa-R+|=im+;r6d^b#_TWP`u}Bzhb)Sz zPp%6}(tiajMTr|T((H~=U3(jI*1TA}4qE&_seWfmOIPf0KQL$v*LK0>VU4bxcD8F4%#8h9Y2W(Ntn;OZP@0?| zluJhxzQ625(q)P)AIt^i;o28!lO6~z%`b%W=7{0uxMIzEs>b*4ywB!|itw4#+@~wb z+RoQE?U~KfjH8IHNO5ep z7WI*_(4U;i@QdCQg#y3&v(BW7IeARqTpA{3st6ObE#?^~5WkBft5R11?g4JBwxApo zn?k~G;@*0`Ro0FSFF(aT9$)5ZXKU` zY>dGFXkq^QVh4i-ao;lI6qRg&^{6c}H=OYz=sM5LyPY^tmdQ0|WPM1R!pCm9#?`wx z#V(KgVT-A=&<`&A-1St+=}%vp&Yxa@#p|>W!xBxwwz*ee9kO3sU&zcDD@p!@A?GD6 zALfY(R1Q%nQ2lll_{p>K*|J=o=)!9|M|DKzNU~yd6abN!nLp#M8|5u{)6t1;?h2RRoZK81c-GJ5J+MBW5cC#XR)IAV4Dq?j0a!G4jh%PewF(CA z)RZurDm2~6f?Cfl1CRYx)kA=Fm`@Jj<55jA2*k+N?Kd@tB!B{NrfdKuSu96i3_a

wk{B}sNRrUY2nC9Tc6xd&5{@TUQ<^7cWaLqLna1Z z2Tt`gc$(`K?tXOJ{`uVSQ>f%biJ7HO2e0CR@d%T3v=109nJ0eNiHR#l^~`ai6|z(S zvo4nOXNUmCHyPEM9F0-$z>OCS(KgwhokDS3`=uL}Bt8)Wogd;N@5~EGE|B*9TAX>2 z67;DR$2VY#{~69nozs0pN7(36Zjx3z$4dLPB9hH_#^&DA>!#twpthW>;kna=K|g*I zH{U=)^9s+_1inPJ<;pgo(bgnaqpTf}?)##8{@br`e?{Qx#lAh1*zIRD$&W`@J#h!S zBo&jDJ~z;_Gh;G0Ry$3(o=kh5nAqwRoc{l<%9G*5`uUTshP1khcQ7W+4;nG${UROh zil?G!#)o2(W{phvr&1bQ!@Uy+Qij7SgI34kld$moa_>#$|<1|{krW0K0dIqOW!N&7%g26m-c1D; z+3&Ej=k1;|*60O5bJ{bo3Wx49s^mqYPFH`p6g@Ah=FiYB|K3H-qg*EM`fJDuz~S6|R5tB=9!5ST}Z(-8aN$5(A$qOgmM zc$oG^>I3!q6bdQ5*O=My%7ugV_;Q82Mo-n=j)yqAh)Ujm7BYQU*?BXdoDbJt0?IyC z60&b{_t!-3%VR~-Cc#c;vld**$)HZ#@-x1cA5b%6l_Ec3G2!~=Dyc7av);Q`V?8&G zV%wCM$ZNH%8MW50zUNhrrcq2&_uh4_?-7t8NZ5(h`h-t-g*(?ZdgjSAdrup){;>h+ zrYJvG?Hk`8UjaGRV;P@ziWDdRQ0Epu^isQ}R)pmaaQ>}19@j;^obvxW*ZhtK+;L$* zeoVl|SQ$LV{kC?sH@+uq+G_tf_9$;+o}9$jzI> zTjnl&r85GhgHGrziJ-*@6EaQmf9yO)g+(xZ3+PW0!FVDDtbx7&Di^}1LIULTLBW(C z|9z8gwarl8i!_MO3D`VH2{fteN%VSG>NJgkK8>4ydjWhO6rmfJ$N}YFpl(wzV+C?^ zTMm*mmH)Lme#4vG=9(y05)EcGdRLvMWf4=UPNz6X+)pNN%H zKC)^yNg#%;Cs)hbufsw6W|0}G!?_sOx#V7-X$epA7-T8zIeenZ!?Fzl;SGa*Ly0hbLzjM^bQKDDU_M&`XzsY&_~=> zDPk^K6>QXM>W|4&f|A{!29sZ+QOAq9P()^350;h^ zH1ud(*WwF(cW{dM>Oz^8>?IR|5$nXLnPK48LVW7x@$)^c| z-t%^_<^`>-tt|!CdFBA@JtAHqfnsjWH|kEZ0BtY_Lt%3lkje*603yJ!w7KGk=gI7E z*N@S<`TF0`5bHL3OTGhgN|Xl_g61m!d_S;%5w)0w+T}11g#b%TY|vTIa}9EAL-V_L zYk7RORXzyi_Ba}ThI*N~Q3hz2OEtB?pXn~E?)&n9iiBdoJ-yr2LJr_#Fq%7_@GsAe z3nz1P%xk2C@L%j?z!iQhi1979Qh3M}?Y^vo@Y`(DwI!pmRGNJ#>mKe;nzK8|hMM&3 zJuzqKm^@<(ZHn9RMOEi~puH_t`U*RmYfpZ74+|VJqHvk6NPN*<350;z5st7e9gf@a zeQnl&IU9eA(GjF6M*yWjg=~ZD7m=;0@;le3(tm8RD1)7C(AEVlFhyyE6Qreaid@%P z>UUv0%UrMLgBBp=OP7c40*7)hKYtdIWSpm&tm>ciEb#dgMo!*hUA?+oJ#59Wd9p>6 z1OQE8y~|kqSNd@u>WRPiidlcgnXYzybh z$9i)C;9~oSxbZJm3nt*-cxY6H`)pTEEcfiaGo(0qbNjHgCn14+1O@!yQ~WG=e_ng( zC{gASae8-t9EX2Wx_s5rCz##~*CI_X575R-J2d7h{J`%ql^Ml}Z2N^tmsXgxsf+0`;|w zAK~}!r5T!<+ePV;ve!rIFLQ%Ln8?y(1^M!}n4QVw~fNqpdd>l8oh zthXmO38}sG#?t5SH%Y>kZ3%q&b+R-7(K*@1*{M&m$)*7dkpRJQGsaDhF^0Zmmid$l zyzmjInKMg(-uaRXww`o*i~Xw)P-R|ps9gkFhyXCiUkN^`RaFYR_qSsWkVnjK{F;lL z-~XlmffhVd!_2(SZ}(z$EBbBtYiWFs=L+kC5L|P!GYs-Yy9QdM{5Bg@r@C%G-|u@= zynFD%rQpGHVEa^ z@4>MoD#D($5}sWa$AjUGkQXnl%hp`je4C`c^n8+OzjDCqtAaHoaQZ8Y(tNv;iE3Id zP=gi^9>anUh^<6J*&$_BVq@=^FPs5CVrjFaJi|i$s!FBvcg&0Ap?X(3(Ui^wR}=fP z=5s8GBNc4#wsCKADd*p_%jEH4n*ry$Z0jmdW$Him79071<@p$d)V@^P16CFPhp@Md zs`8D$KH&gT0!m9tBaI-9bV+xElpLhHq`Nz$yFsNpr4cx!bW2G}=Un{$^UTci=9yP6 zbs?_%a9^>%dw+Jh#@7Kp9RC0H$o|_b+5LdAO$=aLpHNUCU@wS3_`&+$ULK7YZYMPQ z!=%5grEL6=;V)R*L*M(|eW9V=*QQ>S#@9UC=c3nZnJ8|oe``ijZfQ#{R6~k;f8!|J zF6cf&=EVA2Wu5;-HsPz!{nlXI!{gSlHhqEO8$Jr}J6Kf0?|H}`GI^w}7h3V=s8(E=YO-O3Yi>hU4AejVmd-6aOkj#Oso@)%Q-%1nEZJB(QA$9AQnJ% zDdf`oC=D!^%tkBcP-5RL&DGSb4aEIZ!p#mFGb94@5QwJ1uCGF-%4!(hgwgpP_VN8C zO|HA+est5XK7B?Zv*G!*@^95~4c@x+5$3Qw%AHwOtVJ7qt06*~aG~=|$3F%8(YZ5~ zjy?azELkJvacT9Ue^Pe}Dj6?pvf$x_68Zr@c{gCI;QqMcq5S_mS!xD9P*~Lo)?S=C zH$4{cH`u5kdd3J{LjwPN55%qUX9BDy9a#Y>pyRO9<=?09+bee$%>Et->x){+rhg7nLfs< z?>*|8LGPPLckiCzWZ#~>QBufyVGyrUoHd>;_&!D^3ca+kKLggxWx=Vd>{PoY$U2uz?Skr?*>pHA~|IODH@-anskS}i-4g(2$xCgLaL zL4w~W$Un*OW^+oT`MIfcM^FEl74oP)f6h4TN|wMUj92hnQg?I_(PS9ym|i!f=+B<7 zzCjpki|l=*lET&cF6`MoKQ!a2@984T&bK#}lC|-45Mc8YrvYzrPpoeWz9Sel_=8$k zZ{C;t5Y%6^3UUFpoW73_nzvql;;jM51bM3jtKSdbG5$6^Nt!W6uDE22$1W z9=ZKi;Wksc4+@TKxF-;n(1Ca)8MntwetmGmMfbk$#X!btUi;0CG@79d$8D^tABGM0 z6O?WrC{?DBx_+wZp-JJ%5k+b#{Oz=}>MKPi-j29eH_;`190`9+Kh^955(+J-Ce;)wYcuH)@nD#BDJDwA$nadQi-A>ggxv zHyVtm)T-$#MRUQ>-ytaRa3l8)Q>a@&F@74a#V*29A-cjx&N9B$(1x{4%Xu~xBV?F)9_nuhn5~W;- zj#SBO!7)0pNU{<5@j7m5h@3fiTvQmK>}R_^wBrEbX{7jdef+k_rRw#U5$eG>NDOefJ~ zmHKpgC6@b>+Zoc8CQ|z>hhZjSvO>C;=0{|oHyCP=| z#+R>jx|n=yhVki$Jcj+GnQoSWH=)TfVoGRUd?(FPWwokB#XeV>D3Qrx^apYqamPOe zB0X>S_nVsow%Z}(&EpJ?FP{?!mz(}(hmUx{);}vA(YbE*-{=3o-4y<}-$cFuFXw{C zbNSc-64YDlrP3MK!Hm?D;k`-nzjKWp`C>cbaZIX_M&oA_%X@<>@nDx!rAZp z<}ViYeAUYv4HeFAgtt6*3pw2`wC_Dc7IFcb8V3wdwO?iWv2`Qf2CNdG?beq=$Sd)@4qPUXvtv2LF9-#H5fG7oHW* zYVNJ0!USrr?r&9*6F?lY)SV~hVMBt-YBTZ|i+ApqsE!TwuDAZDRd-X4gAopM|0~Ot zCPPg0x=3V#wxo|$G(xnT+JHS}_v`Ls!e4IjKMR!Xml+0kH3vx4%DX?EXYH-DJolOysy&A9w%;1KM+9d|exj3f&BI#~0FQ@ZWHB zEp9j>9F1gMXTlBFOc2>#{Z@KaO7?w{4meDpd>Rt9Qu4qos%q-hQxSct;hSI|wjSzQ zduW!&mggY8k(D0SvTzW6McDOstXg}I?ng1#GE?*(-_{!#Nz(9$mu!VQ9Um4)O#ZHW zjFUd6DlztcvlX@AUQ8Y=hFE4nI`xYV#1>-yNs_kHWREGe5F_VAjO?rM+fpipFrGg4 zH;3z1yqRdNmsuAL($%=}Ni^BeoG}mt|8p;?q^I?byx-OqtC)=F?-td=t~g`;bvE+} zuf_0)f5m%*FLxs@t9%ZGr$X9HBgu0WM=G>2i6zos%-*7RqpA;w=umx@j58vMeE!9Qbnt6Q)!&?U(k*jf1XA)qh z#4uir97xgOe7I)@e$Ic}#Uh`}E~Xu~n?LqFKe)6#AOvE4C z$A|kXWoL8YtBMMy#qgscxJM2lg|aE#fRyUH zUMz%7_JX+C5VXt&foW-Q$sy@qm_scyk)}X_?O{WitKa$ME(|*9Yw2CO# z-dt!f7*8{OYglik4FwDjkH>Fq7dm=xRQPSSfS5pT*ZXI1{S4SDw%1pdg)qqkP@YnH z4JkM2dk*(E=LmOEZckkgl~`xb$@!e5&0O}T-h5v;umfLEex3`CDC0j&q#CdPq~|(p z4N2xrKNoX8vYkin90mbs48RA~{VR59d2_bQ=J-dBWaqwU1_n+%Yp)A)J;t8=mrks0 z`Yh4UL6~&)pzxu6J)F_*`}G*q5%BfdT>fe;Vy?^nt}(|l{q(8qj{XotBTCKASG?nX ztG>S@t+W0~rvmIaR6J`}WKYV-=j9T?!a>8bZ$Gmp^6C>GN|-rn^10&Y(|}D563Scm z?u;ouzg1ON><-*bbLzR{HVIK#K&;rr-tZ6U3Ml3)dQZIU{m=o-d79_$rx(SZZUBt# z=~>!Q>XXLr%3{NOS~?VVL*VhgB)d$fuIy&E-OF(4Jgo03Hfo=bT9J z*>Yzda6wDA@-G?yfAF*ZvCjpB^42FN+xupaPQgQ?ggn1~TvbxhkTybW*l=d-iE2>7 zjMlHrFL996VF|j`548g4D^`3iGb8i4{+5+joyI7b$sH{6AGIQM+?_`7y?^R8RP!l| z`Pl&&3FA$U=5~IeY%x|-{rYxbQU~v8V^H==PT88;RCkwcic-( z33~*Ucj62N*qE>yS?7$toU;bYS(cpoj3EiRQmZq)ajk$pb!MH~P6FLbZryLs$%MS0 zqC=p$2?R(2C2aEe+o)jRTf^tzYSu_4Au8h7JPRWA$-%*kbNN z#9vLH?VQb9sWZ=yr_(e+*yLf(n_byyA>$wsHp-D=;1xaaBlT1XFZM}MPq_a-h+VK`}3%V_k`%=|t4+7%GKMz!rbGN{$XnE#Z2D-uz_T)h=t z{XLo9W1ii#wp$zl1{j>Swh#Y}p}q_F(aNi=Zp6rTMsfei;i9@)ANQZZBy~{wri|SY zqTPYD)XMu{yx9|icUP2(?z)tOhC^$*cBz2C80X*NETRumBN3}LO34IyF zL-)DA3A#TUlmln~(ehMyx7O zzKV0ki+Hl^d~(KR+`&SFmyyVof%SNo`$EYbfc^0@BHt#GW(;5#1sQ{{q{rwrq6XSz zPvEWqV8%LssZb)bGycKD!trngV8?hF&sFW->I}%1M`k$! zRbTV{^=3jNE*9O1=qUG=59qNJ*-Fn^0`Uw+`ZNqa{UOHbM>~>mDaeG^aTjt#{!bZ2 zBqRrTBZ15ChYiHan~^-Nyeqy8dIE!hydUdq^oeSA@tTCAI6?}^jP%}b+Jn87bhmkr z?pFqCLPbs2>dvg(Nzj}UL6VALD%h=`^;X`j5uk7Ia8KRyP;PuK50`>0a@}}#8e3UA zdr8pHWCHDnhr}M}Bh{6tH%Zm?eDYC5w4PD97kKU(&VbEzxmaLIGlyPN(O$T z5D8_->vE}BDVxr5N<)=g*1CZn(?&BvFL014?u@P@ciyuUHUBh)FTKZ=-6Zlkm~Yobkq6%I`((4N>mD;w0A`&ufa z93@bPqb33FgMo;>2Kd%|tGtqNxo@H4Iib-;^s4j~YIa?L{_A)>?zsZFs}dqcKl~xQ z+0|dSd~ZtO5uTNd6^7)(9?zJB&c?-p`!&w=ChuSNDthS@kc3AETI7@{aEED~3ZLY$ zWmCR?D}*btpFGG9$Fi!dq($F`RqdqIS=kD7Ml`hawtVzG?mG9pE9E;a5-gTbrH6mn zkM#7^xWRb(ieA{^li?f3kA8VP?-cTnG&#N9H{+ym^)gn;)kR4s`KNId1#%Nv4VT$0 zeIBOBCR)lhd{_d4MLmt0AFSb7%G9e`4*8G>dQp0Ax5J`Ku?$+>elYFE-tEqHNw@~D zz;-e-ZnX8B)H0T%xR>df5LjlIFmWQt0TZ~4jl9|(zAlq;RPr90LCzYIr@mF$u&g3} zWTkUme97_opM!%#_bA`AUlLDT< z!x6&0U{uR)-EVTuxZ@Xqhy~+*%p6R55WL=Oy4V7gu^v)^9W+)d_SRKq z51wc_n-H)dohr+As*;GP37%LL&VAYVPvPx1W?L$qdb#Hhyf}Tx0<2l)14&emMYu%j z?cq-v_D4a8LU8~S>#{M`3`wi0mQts8C-&+nFi|H4sU8+*aEY5x0H5q*WDYGBI zpsex5wwQvk&#QUEe}ByFyvIok7kgReXASs?-v%hnS?FtaJXmCKzmr-KbCf<>{Y4 zb*JLn(jWh_E+&{^Gh-MB;L+F^X#YC4^ESX~?{@J9islB>$r2U)p#1*q4r-i+=#?u6 zC#BOVYy1)>-eBHj!T3eNgg=(ZG%} zo_OByvCBUeKiq8=2O9~Kluu1$*d@)P_mrC1mz^R0k|pBNmdG)Fd|tY(zWI$AIiDg6u3{%zI-0-JU!3p{7za< z;XbZ|Dy~e9vy^}?EEIOy;FFjvL~C>kPNQPj2g9i_%HJQgcTwF+zkLDc5GR82!Bg=E z@MBFd5ajT0hcq!TAvYRfM<1T~pj>8nj}qNYtcl}hwiQIFjbYxqr!Tk2I{s<&W)d{!{5jhx_UexFy&Un8Ot0Ydo8#s9V<<|o z9_D?byYnJd)-aps-6mvtzV6>~rmTUYb-iBi2Z+N;eERLqj&ehMGOf8{LiABPvCfKF zeSEfuQ{!!M~GD+wDagbJJth|*3j z=hHWN)E)=j^nFGtigO%~QfEF!BKFVFp0z~sSzq+K6yKD_ zWMjbIk!Lba`b9RKp>SX)x9L0fd@!>3nr z3$>hwEq#0*O&HLvO~N$%!W$gUu}scV>m7^Tl#WCGoQi!n^tc%(=-7La1_SSf;MB(XNr>%vRp`fDt{p@;Rx=w#&we3QEWix55 zB6U2%yv;edXlgWQCn27kNHP|^?}zc_R5tQE?;+M!deKMEi{ko%QRdP!`gIJN@K8N5 z-M)$Bwf)&|Sp4tjB?;+9{IYt*o#>zSABgdl=za@Pd0)#@RefOvled@a68xxahvOia zci;^pvZJ3vZ4BO{ur7>7w|XvQ`w*9Ng)$y*@`Kx2QE>s5@5^1lQ@SD=5$21J7O^4s zlN5r&d(;Q+XXfl_;0!w&DN~r6+fJqd$-oWMiwyLyTeRhfWEP+30n38l%`YVqoxwm|nqVTXFld_rdrP)0bk#_-`&jZH!2NG^r721brD6KQC(> z6Y&u)D1oA1=DZYSseCoG#KHsSr98|t00O~gfmU`EbO9mi$su!kn-dU2)O^T1|FU;V zq&4b8-Oe-G&vOdhgF8zj!d&Bn5*$>-7(vAj9<~SMP%_zoGfl|wDD2kPXZNi zKCVs;+zOu%bu>mlz?%!{?BHNdOuXJ76>0aT<#!;xMmQDvVwhU~*9Z&k{?}@$x({$~ zueDT)rODqdJ|p)gd}QV;d{uiKR;rrmtM5Oh{>`SR>DhH#oB^~Or%JFTsOdwguHpZq zhf&2QnFWvuo-KU2xofb=Bkw+L3V@DbWXMt@7xC0^*68^~8 zII*h(p|XmK$hCH+)KGgu>}R&~wWU{DX90oj&^X3bT%FuN=};mwQ7UYTJ>E1{-Xe__ev7R)Muf^RB@=&@V4;j8s6ja?Co}DP^1&ykcCtPo!OI< znp1c|OX!+LrZi1lMOJ>Z?~Dk1?t+Ln5W?5hT6$z$J5QaezKn|0wQ2d*{Bf6uKAvj$av zLDt2t&D6;hFX_X2um@rO2-uwC9IvAD-}lvw2L@_V$G(P%@kFnqx{4qsMP$9JeMX|7 zF=bh5kgINe&W*||ikIMS9(dzIBu1S_^%A8H-re2Zh#=;WD(u6X(T}8Ira)XlFzJte z=kz9OY$%Bl89JxWaR;{I>E3s1EW$83@$F4;)0~np@VKWx&zuU$LlVs-9LvmNHwm6+ zfg;hyWz)a+I9zexCasjk$jq1E&1( z`b;|3@~nXaBU(#ak(2bD47x9LggiRm`~pm+H6@0qe;4rd;H{|ep8<+PNCNYup6A() zat;!rWPh_7)==Ub6P`y-E}Aam#t2e+5u*Kwz^h=64ZPT8^EQGBx6FyVFpctQ*IH1+ zWf`2G01eVhz>7lK^@>3}{+lP;>9}Ixb*{b?sG_P0Um1>|b3B7_3+_7saXfD*_r`X- zjQ6HWn@?2WhC<>_0$fdNLq~VGsgL&Wd^$j#SeerDicsPPFiDmBU)rr;h|2K4{074j zblE)}OD!^$v4*_7+APf!MjUI7CdCM?w7dVz^Da-QQ88OLG7ea|;fjxbDtwwXSf$r| zzpBIGzWFPaFxGa+a1T#YR3}2?(@>eB!Dz+_?-?&JyL2M`ew>oXPI&YIH>$O^g@|=I z7oo)hI9kP-EG80mN~9I9+7;Q#&bXhqRROnw+VZ87|F33EJ>KE7x>yz3llE?rHKLI^ zz;LdbOHV5)`lw^RP``E{Xsx#Y)ja#L_J6ZPchp5sTfV#7mA-de4>ay;K@eOw9oJm3 z`?y%=lwtU};gN z{`j}@b@(e3x;;gqdo?#+?T38S6h7xYIGao~>f^Wanl;uiiQoAt3=#l2Q8;6p_G;hJ1c1L+aA>TSg5{6ArRy`l2n~1NNg-wNRFVEszfO2Gz;J zA-3Xbh(x=t$C_PTB#}w3;BIhU83>FK`8~`=M&R}7?av}NwZDkwKRhPEf3*Gm-RQ&? zhLy&14@4zKzQUWenQS{i{qK5G2xr28L{|Rb^k{#pqt$4an?}CAeYE2p3yu7#4(eP^ zg{-eq1JS#*Hv6oXbHPR$yq?Wz5pT4HBq4pGatCt(pbRl#Y|dBF3%%u(>>gZ#l%Ln- z&f}Qihbk=NJL4*<*6@G8pGXb|h~HC7C-GvN$2Oh;qP5Zj(H>Qa+*IGhI!1HWUb8g4 z^cYWAKM+{U4H#v67#^lBV!y)-@1#Vt!Iir#(vDF-2`j8`XO7BEFKrObA z)8~XmaM-jddhf-39AU-;od^c%x%77$2kIG$a!=}Tl-@WLA0R)$r>Gc;`QJWaHcSw) zGnB7Ogu1M^5V{kP?J2vgiS;7aZ84G&gG}R6D#TT;OE3z|1N%s(>WA@HW05qgQ!hZO z6K%4#jP88tXHQ4fc#UKWsgf6TswD14%J>J^fJ^VmqsC0pjY5U$xV!4^*Uvc@|P!?eC-;eUGC%Fj(>njI3@?!B18qo%zf!vFmxOe3zGr4oY-Ob*>6LKKOvJ z+s_FvkZi86RZv=V!UB>8a_x(WlKftBouggqfkJ&NXoYgKlf^roy`GZ$;_cwm&0k@s zpTQmK+*!>aCf^>5S>e=`Q?ls$L-ftCRuh2dYE8y-Pm?y=PPHrDN;qdPSC$$U>3@al zNn9#zN{=XR_Sw-a;?6U|#lxPtK}-!&1+%d|utD{{`an9m&9!=ngp8{7mWWrqwa; zFwJ`|P23-aBQ?H#%9VLLn;Hy+-q{N>F;dC?hjU*t*;$#1}463Qr0 zva^)^b(%w{E+SI+NzcRH4O_S?#ot)YNRKDCV)bVnUCE&j?u$1(e)&1{qJ-p0 z8zLf^$2u9mCt4Mbs1-udHx8!DQStM+-e3 z_)=miPRQ!a^n^gJlCTJS{F*83bE*G2y2TgzO->R`YJ|L4$y7Q9!|#|#4rt+*fhWqD zIebC7K6{c88gsZAddYnO>&9Z|;n)+#hf9Y!csxkG!D!(D)8@o<7kId~jP%JW3%HMR z`8&#KLPu`OBl)WrhiKteiC{pbE@G7Azi`JuZ>9N7DmTby3N73>&|V1WqVPudf=W3I ztuy?JXSbm8PSq}c_R%5R#KT3olg7FMNTMPq?PKq%5f+!VQdvLGM)C|XhO-9RE4h~_ z*&?}p0)eIG&pM@0#`>6q!-t^`-EDR3@OxaF0(!;Kb#tM@$j6?r%juAUo3 z)Nn_n8o3fr(JQ*T+$n|7(?E11TwE8Zfg+u9Ep_X|{}LjTL=w5F#)fOg-h*!|^nS17 zyifd0FZZ<+5qbD99&Yk3Uh$Ox+RsA!+}~0ner5i7_3OvgOvgH$D<@@ zOMzy>CyB0XODptxno6mRkr9MzqBKGCOjOZ*gpIF15E9f`HhvtH=G-HcaPqmkhmMFJ zw2b6R8|1P_JUVBYJk}@QwK(q=t*$d`C!(P-<5fOH_r23_p%+g&?GDOA)9K;X6CmPP zGZqY*7sYZsdOY*C+$0S?%)fN5UYU~GFjfib)6$9V>uiiwB%+H_jpQqvtiH?nJuzM* zwt`r@EY!Bqj}#+WgCA$t_*zBlef|bS4@%ccMj#?Z?UO56{pH%hs6F)EG?iXPQ1D$w z8ccN|EW#hRR+u?5;!gQ5Yg9m=89gq6K}5IK(vJeypN(+R?ti|T7r5s-D1VLO(er&h zlHdxKe^+~Nv{|=H`oi&8D(r$iuWY8Q{EvfPV!!;#O1}q_S z1@La|K4yP`hB_2jSYI2~@WF2@#JKmJZSnm(Do!+!-$%U5^!RI_x_v(;w3OxyN>?4d z906V@ZEe{te|&1IR_D)ryph8ni}63OVRj2cH`b}G>W)?gIjf3U=$aKkS{5K-PV4Y z1$m)6f#P1h!tKLcgQ8(JZrq3@PrR<}J%7R5kRgWO`_4wlme4RZhrOh zxE=}=~IM05aYgld=Y8|1XVSVcZLcX?a# z7GFhQej_%@!J|!s7?f?urm#$ZXul#6?AKW*8Ur=%O8M3T%pc=WX{1YRe*(=9Ftf_8 zE8L6y@Rte?!i&G0ku%@EvPm#IpzjsSV-kGQEu-Ma(=3(7$eE$XfBxPRSMu%Us*I_& z$oU%{SqgU8(;=FHi(~Nm31%QZe9(z<>y*LIp*T2&S}ngJ*}T?|nO4fgf~p?I{MgeT zjUJE1y}G&0KKJ{E-8`kF1iTz4$D6Dw5Ceu9Zq1_=>MGIw)+}m_d#T}EG#z#PJ{feZ zQmhEWxAv@g6HXPMFF%zP)u;vqGe$j}cNIvY>;inQ292I{B&|B_?B}Hv?KH`K= zVhc>>y%RJS@08u(sJ$yEP(N`eQXTGlRr{h}dWq7n*y@Yoy1q`N`4p`CCAQD4aWi~o!&*;3j zY0)m0$dFW?@&1>!b@em`3+oe+^!nQ7&W5--wa#Lag9rIjn~TG)%FQ3#G&^36*(6pU zv~9%o$My!}Ae=hA8=?FeS3~Tw-c53Oe!+mAnc_G4laB@8^UQ>#Jhm0$u zD$KdImqDRATmfFE2$IoBkj1eVmXP8LQQ3P(KJe5W0+_2=5?qunyHv;zr)+bVzUK=kf`<|IM044r7k46^MD$*O3{P2`1 zz%}5op63*ng>^e>oE217S;#t2VM=&)S0>$mlYr3@T5l+qASk}SF7UzKV-H6TeCTq^ z`>wo%rnRsAFB*$kYVQvzlE>%!9PhzUG1(AbV;7@w=|pA1AE*MLq5}h5VA1!kJ%Rnj zez?)2dlAeT#deO4WX4zy8&{7i$yArDmYVC1Gm`1Tz2FFJPy;~4p5zWWhGMM}IyC70 z>%DZFX4DdcXZv<3NI8`qz*kxdS1Ehzc}tRiHM{-WQCN3uV?Shj{Me^CxE^xp+9-|g zad~(E{^j(r5Vimck32SP({tcH`79i1zb2;iL$gZn)%XNeB9r#pRX*HHv^iwNsT1Ld zF5x2)$$l14-yCam>zmw^3z~BI-aI;{_4(u-?7;Gva(1q>eerLcS3BW%|AkUYG?uuCdpu*S7d{;7WQ>B_Ubs<^fm>YPh^aae=p>=S5&YnWvmv*1XbJ z&&T`Apklia%wMjeYZ8w~VJ-F2{bX@&Gds6vEWtq_%1@}Y-zMdM= zYqdZ!ZZG}$w{4X&Bvwaf`B%UzLi^DJLSO(5xi~n#^_?znQslb-73uJ- zrp6isThcx;6e}XgWs&d%Pq&6D{%5vj5`QhY6igIt)%f@AiY=351V}-!OkribBU1Q-#Lq9W{-hrEp3fSRkI)47s0h*;<<5}^W_N|U3SQI2G3f-Kf(3@oPm42C zuGO$ygIRdEkb+Krth%H!4~Zm!RvwMlfJ5J{7f%y$$vp10>Tp~RapklH5-N}DMilq6 zt2kt4O4M0pL-NS+D8%>FcQX!Z-y*|Z1y7uI#=HJaZu?LV?cn6fV1i-^#M({?;NUIjdWus8BT*nAVl#UBX*EF_2M5Ku<4g@ig|1fT-ZEypNhyY3UNl#3RWuL+si z?!bqFaMxfpN2zD`!E5~q1l^6iL7^M^BFsFh?+>()KlN{kQE6pj9lT^#T@H?1JbL(e z>~5L*Y0Lw4yw%eDrK;6nJeNG5PlCuRbRHy$s>6gXbnA%v2e>BHl|WT2%;1h@11z8V zCX(nnD-<7t1;S`o8w9$eU37B;FA5gKCwKG%9XVoA)Pd;C>Px=|+J9Y9Nt}*}U)`$? zLB|heKe0=V`KmH;*Xs1@5vupZ-8cSblsd;d+ zP#{xlC+#V)L(EA2UpBBp!gtwB=$?}pqw?C@oLwh;cM{!bz7~?-C69A>(7<7@+-^}# zH8Kz*)Vy}s@-J}Pc$fFH8ro;C+I{4CrNF}_bU5zPNV6FpX4==(Ir_RmDXGJbpp}*) z0;CqG;>YcIn??=8k8csAZ z4b$2Txe6IbhZLE+_`b@LVN~pU%2R+Lx3%YqbXPgo|!DG=W|BWM;zG zj9HZE8~#wa0||)Rql+AC4}qSYpV}9Nb=zuh!U`&QzY@iz4xf(ki6+>U47K9=>BmgD zJBgE6B%V1@;z}owZ>1H;EWvlPmxUU+N4X6#4P^4zLrT8QI#l-x;4!GddD!&Wn@TtU zXHz$Q_knF-9GZYx@|IbPC`um6F_C~9+xRFhwbCu{&>Mj{Cc`!WzYD#tXh0S;)Ngh= zxnOu;)`im`5k3w$%p9L40gNOA zj@oZoUZX@gn?|@IK+pwtmN=$PQSwY!g&@>bK$LFDW-U1i9Rrp2z8%Uj0296%_({{q zE)H)QbTw!|3o~5IQ6wS{Gwo`#-gzJ=Fn>8!Fuc&^`ZM7FGYV%Qh&6VJs&QVK> zezHHmM~IEwIDNWZ53?R{x>jDH6&$z_A40!k(p!%^Ei^Ee(9usl@1grD%!Hz4&{;x7=FnyeLOb@Kg3vV-i$S7J1->C%t==i_;RLcH;9mqqkb9|0UE| z7`^)yH7Twllj_j)nnz1&9U+d!x|&2Wf=1L?Q{P!#I^whGR{od3&qxZiqEwcJVLQ?z z1Z9?sjqExmFBSDBAVkb``?k_wsLD)q>;XlwkDU4oUY$f9Pxnaz^D#vik@k0t-a2qo z)~^sYvwU(aEC~uGNRns|;quP=FYYuZRnZOq$+I2?2{h_DOzGLLQOH|w^e({+-76jr z>3^{R;Fxi7d0*KoYkehWwxZY{q$C*SX$Fb&JbLH*#i-PTM*)ZR#`+sHe9@CE|R$)!rn`Hk<>Sn4D$MvYit4rzn!0viNRwT<%x9bV&79 zM)1-+4GtGp2Qq5_qiP4!fF_KtZ8GZ2HeYNsL{pyVNqz{BgndZfc=yuJb;Viq{^t5S zG!7lfbay;Y;v+dU@2`VKs_z>VC86<_hWhdQGYh3k?R3hXjIc)R5`f@$UCdpo_dfQ! zxRudE?+p7d2Sh2JW22AkeKh>+i$2yoUQTC^Fo-x&EJZ_TzndkiDP##)luVh1 z#a5iae!h@DoGRBG8+X?6Gu8DX&Xj&qjO{FKvA^!2Cba#dsR=AH_e!ZijlUA!G@VHl zH>$f! zH1fQ$HZ_B5ZXs?NPA<=Uuo^WQCJoD@C8fH}qgxLz?fw!VU!c&TO@F{gmh(Y~%S;GR^`G3yXgIMgaps?pUj6cJ=sd~wn* zF-uH}!AGD;qZ*E=K1qtJj^8OS_=)>Y9FuP?Vn%|D`_4Uu7 zIkzh_p#m{&(jxY4aDpUL$#@_b?1NR;-hU$?b@XJ+!{!yjp4P0?!JE%f@&L6+Y#wJj zp-dk_XZS-ZqF>R7;Ce#O%OFV+gU?=l^v1GqLo;7lQh>H%M^#IndW ztXt-W>xaYEhg1Rgw*tUwwkkdun&?eYXI^WIjk@Inl| z3I*4w@Q*)Fa(sW4;)2M|w}10bOI)=&?_2>HqDlHCfy>z2k9@OdZJ+Z9UmlI^XW~a9h2_l+os6ghkLZM-GmVS zqajzhIDKNp$KtG^&dZUC*h;K^$|Fvs5l$t9dc`%BwCl+)MBQV zj#Cd@UvFmzX@@7~gj2@U4!i|iOV%%(8yAYhJNEPo1`+GYOHUvdNn8OdA#G0_nZD|` zgA zmg9X@T@;eBVtn4#HDtXFIn7>9hc=+apHya{T*tmITkln97eBkIgjubJwk zwFc5D>X<+Vz5a%_z7WX*3JAokxC8yowB<8*GgE*5%Oi>X7QYDezgrB#pN?9n5~QCS zqk>g=Zg)~j7w)ZX7EXA0g)+H`FLwzwKC8JD9}vchfL**YDLmDTYNG-94S{tn9L5g~pILit*Y~(jCQNuUlqx+1e}W z$)-=gNT3IL%cBnuEfA@g(<|n$dx$AXsMH*O7YC|F>pw?gUwpYP#5q@QDuMl&=?+=2 zRSa11W<+IePlb2h{~SXu3c>M<;Igt?tMXr8(x!aTwnh(ThoDzw@{>4MZ?iL_aDM8d zE77xIc&ZP=Ey0qUr2|s5H6u83A2?sqIhZza;2(nfSy@o^c9y3?owv*aKh~?a)PD<* zpA){0G+AzS7xnQ`ocGtE_HP1)eAr#zlKr?fga(5+q z*1pBCa~yiz`@qj;s)@)x9ZE>P4=3WMjgnDBFT)gE$N|?GKkn)=ri~H4<@4u-G2Wxa z1=B2I3l@nF67er+Wa9F=j-}RaUoc3>B)$k24VA+arO16wqpU{sPBG2mOFzfm{&Ksj zxt3+7(GiExnDeyf#k^(f<-F&&y4kkFe*Oe7swZZ#Y3mW_IB7jO!>I95w+^qhb>AaA z>iC4KqP{ov>5w>c=u?4~#Jr^Xtx}Bi0%6pJM7V72q)5|fMQI<=>#h>HcMou=FBlF! zWg}&sF;Hq%WT7hE&Yr`p7wTcZf0&elUlDtS=rN*SaRh5r>6QAAsx=OA^njwC!m{Rv zAW-8V+nAnpKyGC{eOGrw0cZj4&N`o}g9U>_R64FQt3@J#y$KSp0{mZpjI>PXsbDr4yvxnm6{dq6T z^{{Es-1`?kM&(N8#1oc1@)mS3WIB2qpIPLVb9v-JUD58fq`}yQO6akl%8*MgX0cKp zSW+ZFwp7oTSCT5vGzhg9331;Y2u2aQh$=MLl+Q5;fg^3dW%J;Cn5os`b*bd*RTC5MQXUn0 ze^?|?4QJcW#yTz!?GNG7=qUiK{$+qw^UvL7FLwy_fybYnJg2scA6F!a9v;8k%j1Fr z$&%L&KZo{E$FW)<+ky(CG4RfJlYU0~gjyl??&k7YF*c|%OF+#z1@K({r+E@y)!r`7 z8Eq1O5=DJZtq}bmyMetb;JkSsbCFXp^>vzCCRdPpX1w=-7m+aWEJ#Ci6;mE9{XHpBl8iW?O>*_P0`Aaay#Jg86bC4}aOrQae&YZ+JdNc9H^iG_bI z1CMd2?(2DMhM&nF2llg&6y=kT)_zE2_TKbW={Lj&MdM_w1J@EuFX`>SD&>uqPVKF0 z7x?JT6e`9c5fC49oB-eGG*Bj&jBnpvu8KsL-@k06aSJ%Kw>jqkSj-Ic!ZB6(w60j8{s#8mSD?E;ly9>z2v}eZ=gvg z=$sqKuu&K{6UPc;TdHF^ro@2$IaI+HHEQ3wbQf)!lq@Bqy*xFvl2Pk_h0oRx*$@|Y zF51u+M9~b@`|z{CUDlfsQ}?^AxpUBjJqMJ!tn>UyNhS9dPCjhRr&tr=9K0+ zvE7pR^7Uo!bSrEFa)~^wKD9*kF{!&2guB9kzSG4~jb5NgnVmT!#)N_4m;cCPVP8Zi zjs5o$z0b}!ukh&ueihl~ag%QHJ9Jpkjb{e&p~w*`?@x7V|uAWk-5k34#W!{LtA3TXiaF{}y!}m^`{hVbH_9^!f^>UcCI>9iF{0h_*Ngi*+P{e%wNZT17h7Vd_-oSx3&GIMf@hR`wQcak5uF|ZSZJ{?Wr_W80s`<`O81; z;6TQye(jxsD1TNI#N_d^nv<6-`^lTFo4kg$gc#+nOJ{dRn%}voQ&pxNh_8C-GFHQc zi;Z^g3fAcHOJF+z!2FUB1Qjy%T(F)gRv}-f+7%aY?=}m|81n;Y>JF0Bo`1qo%qPg% zQa~U}Mp%ul{6rFdfy*M9ey6OHr#vF^-OI{DRu4S9@QZj`(QOad%pDjXCHSx@x1k@O zJhJV-qmFw?az74#yISWn!J#sPAkb;zrTbe}ZP+nH%z+zt|JU}tbU>SH4YzR{eE*Yj zn(+Jem`JTBKi}i}OlFtrvUct=Ui3|#dr zNk3OY+QF*3I^Nwbv%olSMKL*qW|%sfT7`YSpH|Lt9Qe2RyGwt(BJbyKgqe~H<;D~p z=bG(=>)%M6=%yLjxLv?U z1dEqNR}7hm-SCIfc=pe|%9w;+z%NKt}x_Q^EcqFFe z_YgCth1AE^yL~tW-`@-EdzDvQTn&H%8+kn2cG=5XTfYt>a{-sQILfiS4Z}YALZ2~R zS(6SL+iQfL&fM#b0b&reLQ|osco#6XFJS%G*w+q2>W^OfRjIB49bY~iGPJO_5w~e* zs!uJ-4rQzjAnbL>Sw&;|%zZa5Fk*FAiSyle1Oj7Ck#s29 z=;_%c4s)ls0rLPV=RQu_GAPvNVA7Eiwrdc@+&8QUlsT(I0@&}^|9SMl;H6@LEB+j& z`mS|r=y~N$=sOd z|8vYDXjtk8c5meha!taSrysX+n2NBYmEWFQ2L6bQtrj}ryZ>iF$Gi9qW>HgDC!2#` zJ}Y9$ttjGbUf{!s@R5b6{&@K?p5p>U6$T9CR0yT=^F+nK7EIc%>JtBwV^7L5)Bi=b1#MA#}_>x zk#ZhpUEuNe;J4LIALdKH@Y{?w%sR#WG!nF}daz(gKmI_Jy_k;Obr4My z(_2X)DXQhoIh)jUe78A79cQ;QG4Q}jp3OO2Jb;26P!q3+rHP*$g4 z4;vrc_y%q`dOI@dJy; zy)U`V=v2IBY-;L)d@F0Bg6?BnXku`V6v{;A7zEHZ?o^{Mu29)M4j0pOYD~CUJDCQm zXF1*?vuTo&kx|)qfgIW*+gu;k1Ap1Z{Uxz)#e9$CSN#xD)`0>S#tK6vb;?{C$I(Sc z%3;!v5DBLLJzh$_gZ(O)0sz3qlrSC&1{@y&TP`XiMAtDMdGYgW6_DoCuKUXE0si{K z`1p8JmAR@;Gw#So%5q^mJ)h-7GO{6pvO}QL$QJiJV3)R;#9^{Fn>m}5F)jNpMdcm( zR1-Udlym6?`IlGhk7f=$FA6nk15}_dSlU`W597;X7sd^yU3ojy{s{afDsC3g=nW?i zCv=1Uuto$7k62iw>++%0>^JBlR($2bSrl4J9<4P-m9BvPNpGFF+Hh7cjT`5(ir;`c zfx#)!qq4Zz8eXmRx#Ox$HBBcTwO@Yia@F|FijQI+$uR$h+07qM{!80aCp}p=D<#0a z?S*t~yrX{kA=7uD5L$a|hlSD`X}!zo2oAL#ND&i?ki8Yfi;lO%aNDtOoQNrGN0H^7ZI-;KN)t~q?kcl-CYR~c~;d_);(K5CE;mC`#F;a|Dr=0 zV|I&i8iIUB-ueWEF{LCl72w+mDD2zc7JVt40Ujci1Zt5Iup2LdAXXv0m9! zWaV1MtE<5ZX{fWlbre2rij8&CPjl_n(BIp7Qke6D&F7l#X2cA^b*dGa`&lrZs@S2> zH|V54y(xxt!!Ym)f!3VL@*lop3bZ|gMTP~wMdi%H*#scr@Nl_(4vKAGi18sbr6xZ4 zUr6k9KY+Z7gnARCYz)MZpl+~P2HPKy zV?8#W4fI|%Zy*EwLosvS-0WZAuUmI$F)@^yJHcv9q%0Fx>#bER6O&GQfns|1 zndAg_0ij-po=l0;l>7reLCJgcXSy3_8h!68*C4Gp5pb7G4DFxT>YcKms}A_I4Ct5Z zY*7E;eT_=n=wJ?O;pV1TV-`6)NPz$% zdiX}DbyBq z-|}#!B%PXwL2MlFeh^^o*tT0}v7TdMybsbR2B;j%k0*dRLU-TbGz%^ljMc$f{mU{* zij3HYJnm4)ekLCSj|!6E<53Q7s>t^^j|{DU@p68B0-ShKas^-WV3v*Ios$RL0NGsA=@qPZqc+E}!E- z1doTW_(L8Kbmj(mJ@gziV||_Yamh&<((Lb(O-MGJFR}{fU3{6j24y|QKtR5i{uX%v zsRw~8BpqaRN$Yx?x}YQ-M_ZXU#ryq&MzAFH`jVFg5wLc#zc7HYK{GP?UBQ zovnSs0=c_u4h#!tey)ir_x5rpZ*;oFl|_EHG&wfz&RwwH?;de%dAmbMZj>ws z0wWc@mW$`tZ)G4#NuIC<8_%{QaXNR!QeBs7I<%H3pJd1x;`Z-ZG4|(N$9$w2K;hZ{ zR)8u5#9>@p3oa0{+*3GE3u6lD23Z$_KIPlLIt^;*{qxt?cbxk}oY4J38P2g9?uLNJE^WO|vnj|)YPIEij@ z&q_ckHKZ?Qw2EhysQIJnH#%9{MT#Tb%!VOlE6XRl*|6&Fhx?l}k0|>K?&c*6|IJpw zEDE>r-*Aq$qLgrs7GK16e1w(MlkH`!%OP^?T8|a#$f&nh zzglj!;)SgJ_4R6uKxGaIVJ;VP?;~%O7c+5hy;ib7|1y!e{wVvz=*{(Bj4K|=$$Q7u zqr%ldElaM*yFO}D`;8qd^c97J6rwq-%`}kXN-8hh75+w);reux=HyQ39tS}rl1L(e zf)(HMoNXa|hlY4V_41=B zT$bjYPpTp`-QA|0Kg2ojLM2W8T`@ck4P+gD>~?$kY?AIfJ8I?$e}enlzQk4hDS;Rx z-tXtB!}y4x?(|KAFkrUpd2LotMnd24(1GOc%a7SkHuje9VrsP6=zt=3vX@^J~d#rdEp)QYPu-K zX_>S;d(m3`#Y zXT5cz=MwNc(X^qdw`(cYc6<-t%R@ffeIKGY>-mm#bmB7H-UAofiGH&}Ir%&MWof;S zmxcxrn3YYWROkztd@d4robnZm?E~~R)}uwsPkmrS3EOIc*tg@5aS`&Ym=to>bO>;r z@=%qv&84^o@^+fp(e1uzv)j|W2vths5Ej4pzAH@H&=HNTvdBA_Iy<>v#|`T-+%7SJ z-@j!ej_klWNbI;Zf`-e_cB$t1q4vLHS9l`lz=3YUY(LE$MkNftG;HboAgXb1Y-IR~ zD(8ckR+9S7AIiS7XsS)Ch>=!HiPj{VbRC%n8(KS2OjaHg?=Q=~4$qN%bYBfYW+!RH z&s0rJRF^8h2&tfhe*}8SKyo} z2ClDpl1Edfl{c~qX|?QcD`cQItARJ;g_@$&4dAkMgc)l|MpX&7ghdjkkHIgJ2eD#2Is^|WMhPuG&j@Mwn{TYtVZ8RBO>_%g6Mc^1hh}-e8A92M3&?%;WMu1Tb zd91E!%8Oq3`(a+bv~8MK2EcH6YjA}q(7q%f*(*MNk1cs~&``YT6aTcz*K2e6pHXRl zrBecrQXu>ve#x@o@GaA`#2>V1KfzYxYPar^z#9(gAJa0Y<>BSP(#6j01cC=vevm7a zbZO5#AtQ5DI%kHqs^Il=caioziu3*Jfbu2OuvxRgb=9xDG8%h@d8jWYelNZgP$N1` zdG6Rt>V1(AhFvpZ5-=-=kI8FP&KLTkZbJOc@V6YbMfb#}%3_a6(Mo}Z(UI=;=Oiv> zOGNBEo?00^EEbkE&ZZcY2ZzlK<*u7E2mP*?xtu)^Zw6Sa+p%j=rZ`wRbYU-;=pRo$ zdbZY2kh4Ku+4eRwj>hYQw@br+7itgW8>Q|1-rePB^0#)ZukVF$_$`!MSxk#={iy+M zhjKlE>C5Rz&_n0bj_#T4b#7ih1B~>)9mZ_m;f1Vy ze6VuRz}?3QEg6$t31?Iht$t+vw=9)uJlW2QdFk+HGc^uSQm1|^QOlu%*RN z0_P|GFedWg;9w(IKkv0FqwI`jS+diPLZEMdk*iS!v52WJ0u6M`VzVnd)U3Q+spFlcb zumOvXv8n?@m|<9<@S_neYm!i0L3|Y>G&i?j@0c1!-pD-Aw7_y&*wJbzqmwH5Gt(yj z@zaPPWiZV6IX}wmwqwU@@cqW`Xt})k#h-s935bJFU&6?kh?|}-WRm#!CFMwi2P$|& z7LEXqtgJUXa!mX)_KVBJk5tbrKWqliWT9_s$lilC%%a-GPVVA62M*$9sC^xJ2$n)h zWuNzAgjVdVxtG+-H5qXyfAJj&Ss{JMR8teztLJ~KT>b0cKfx(kL*#O#;I=Ydw~T}|BNJv z-SNud@+d3vx07~zv$Ge8GLvHnkdf67+;n^zO|a#gx`&^NQQI-mn-clk=Ih z2ZJai_;jAXChvQXi`m{C#vilM?iOhOM%pJtua?E*A_*q(3^ce#P5fTJnXwHc%atXn zjRd(BJ~Q3j{yE~)6@628rFr|S=5M!Y?b_TY>b_??W4Kx~3*<>IJSSmg)>&qHEm*1| ziRS%_tXF6OjDMeyMl-?a1RbE3!((HG0ZDXH15PPD1YrQqDH3}PjuEWt6fl|=TBl02 zgTH`rY%RKsBE=xbIe{{twS_7Y+ZH&7aZju4UAdM7;3ovw0dLYm)qr$Q`>Q;D9&p&E z1lyMogd2shE5P4stN?Dpmb{(lERt&}a0bnQ#`5VM)sg%9_9Kqqf2d7S{o>a~WiMz& zU3%a*a*~*JCN>;}pSYYv%`3Xl=MG$moFn9Nhy$<`-kg5|Yb2tPfZLNVpN4bK%+Nv^ zq3&3T3iGvQLtQsV5`tezc(|h-E5=b^KN~_Fahkd+tJ`7?8ecJZ{?3&HSco?hVmsf! zaaIjKYQSb^p9g1N!W#f9p;EB!*jf7vB&Y~sZ8X-kijz^Wf$f`U*D6)B$gp1%)^=tr zbQGjnben$87GY#kzeeO1c2BNxVbZVE0tj(Y**Q<7JDiL_@z@t(g1Tuy3rvP!y0Sx5 zons%*IUbGrF)`SFCB_E^f=*thnd<44Eg>aWxhH}y$s`u*TJnfN5QEr-c~U{pxFxUGHojHrwLh6Km~BX84@%O772cinh*@ z$fS11D6J{xNO+7qva$|Xl?60}=2S9n?4^TfcgOf(O0=tT5@cUkbsA+fjSZf?H-|{M zjghtG`oHBf^6VL*H^2$>2fIUWSKLOqk=J`sWYLgY6>WICZw2(%``!#h6GGQ#JJBZ4h-v!r zeCPp#9ik<7KineJBzv%e)|TqM)eS?c-T}2)VGPmjqvA!3Y?`eQA3(ef3&@;iVN`1np;dVypth%%&%F`rSW+O%7G!>yoaZ@HOig1wix z0<0v*&|~JRUS{%z{5e|odqPdvcgz!(?dQ!MU6K=cy@$572p~%u8X8kmg=@=5*y}d9 z)Ff7(U@|rm+GGwD@;Y1;g5nQ>$hrf*mAQXb2hmlKS`^G@EL*r??zt518fVXS^X(}C zk)%*2f5#WT#6QBxo&}l-U7r@icWCp1zThLzcJ#-JVMd>?OcEn>1}!JnjRT9i3x4?m zXD9~qmjI@01ct&4n%)WilokDVEt@d{d+nAL)d*tK4f8eQ2lla*wm!`izMq63NMya`y)(afgf^QGe zVa=(yJkU};_{(-4u;sn*G47|%z_3bzBDYCLaKui^%6)P+z3ZEulxg3cGxeDop&!l? zyvS^HAZM>%&wEx+C-I~DtxgW9cX2JV5p;&exvgBm`y19lzjnp^zy0>G6T!WSN=u|` z1Vsq5;#%W(`R-NO?MC?y=<|XX?e@H;M_`SLi1q#O)7(t%j1x82`es9)IPXaSJ{JGr;1h2gJkuBy2b}w~?JNuFtz} zbj*<+iAcNT#m?hVNBZMmBon4Bjp>x|c6lLoP(NrYQqiKB2r@tMPYvdTXUXAFVOu*E zFdTz)B&8~YxUQsgft$&odnTOE%(x>vU=ftEjUHj!fc>#QiXdYfFI0<}nx-hr70N`2 zhMd;+UKA=cav$p{4L*yF0NWd#9e=s9JN2=6uLi_#m63R_ zzm{#1!#?UA&y3Z3w6`4xC|a0sOoT(ctE~JVk>r_z9_mCE4mzFqZ20CYLPvnbqQF;0 zcD)mk@_81$eY4n3b5ARdGWFi;2xvYIh#zX|8Ba3Z**Qsuu*RnN5XqE~GAS~&6GKxv(ElWlB0@~r)alKzTU3QOVGTRwhmy)IEQWT-f#)ITq% zI`D|e^XAVFZeNUax^BW|y`L^l3+$g{ZN|m_RTo@n*B6|m)T<~rH7?!?S|wKBPrg*H zYRAkHSK~1yQ$aqs$nPHNHBY=W^)H~yh|DGVqWT8yv$I!BokE??A^mHLC(2!)vv&_S zum7ut331RkbQ%HPHjSu*ado=Z2Y_KmG$XJjk1iB3s3%{aulW77!29u_YW2nMPmA4X z`T%vimCfS75?fU09`);$gA%9ZSad|f^NzO{NNOxkWgpPh(nI@k$r}VH)<))V`OyZ5 z%gV~D^7CaqIEs${9R9>9zXUUAd=mJ6>eRS&L1~&tyj$wm==U4 zM%crAfDrzsXiCoLi^q3v`xhklyvMnIM+G1RPUYhgC>p_gn95WOnB)cZE?V<*D0lV1 zuBbUT04t9ey?nI@Zrrjlx&Y4Un{A*w%pe|z%^x_3!X+%jdTDQ zFjjbM?H8u7pcw#ZoReWM+64AgSOw$FSGGLmyhFduBeLcpEpj#el- zfNCoeyTePbtebywcdjPyiTJPst%PT4?q<&w0}?NWzzy&n^`Azr=U_r2=FcH+A4eEz zCCX3ZtVOqJ>Td|$$sL*TGUBngY9BMC!8NU~)~F%`yBn_GS3tJrU(r1Fp`y-|)46Q^ zH=2$}u$S(w@H<@V4I61KQOAf40~@o*u7`WS!G)l(cYg!F3v<0x>l?pXY}zNd5OP=q zakh61;ur9X4HzSd_lc3m;aU6Kz-24;N7Ire&Yw3Tf((hv7LKNT46p!^kBbYbKTW!b zm>D?eY=A}s_fzk(C;K1Z9ofzuDpB*mR;S*-k$v}sMg8t1N`*_l;-pOfyV7iB>2^xI zWzKOT>&aM}tBy4g+a{;XNu*I-43Hd8Y$prb&DU~)d4*8S&NPNQlqnesfmfB$(T`LB zCakZbjy@H>3J;pjCpWB%ev9?5x>7%6;}#oxS-Aq8*yd)p76nk^Wvhr@>uwq~GU= z;&8<2p^g=@sEoukaR&xgR4ZsAC4|;GXfYlvn%&eInw?-gR#^Y)u2U z3ZK!fpJd=ONt4r|l>cpFm&EN$#2b8;`{HEX(6) zDXYqbjdS#;jOr}rnt=yE_uWs0(M@KHB(0qaWg7l^8ikKOKDrO#&>eyO#(CCBR1wWV z`n*N>H7GjgUvvP$HGAjzw8}IqJ+D73)NzoE4scp0gp_H4reE@sC7M|vOx#T^b=DzY zd7(;QQ2(USewM@X`yfy?742w;ICoxu)n zct(cE_U=$0Gb2uiSMstnz{oE=w$w^`L8<$`M`2B9&WP#a zNpKzW3|igPW22`(t*FgGjNtR_PKKFV{?8Z9V(#07Muj)oT-8A|3$BbC1^a&9HR1Lx zJD7M>%n6Bqhd?3kG)CRW!05Enk(K}G)1sp{*a(uWI6Eptp=YC8NiWDO@zy<={eU`0 zlr7UQRMhW}ZRMN8awdUdA_Jzytglxv>HW>TJ3@3bVIX>b@)DF#5yVn<8)dm#(7#a< zHlHh|=8sDknd7{OX8Qa2y)oVf?8ws9Clpus;hofd6*fW<*|2=akeU~!51gpIi|P0~ zb2VOZc%=L3KdNcP_vA|tkdaUN9)?oK`53pDsM$GaF(jM1?-apcN%F$O|=aY_i|)iuW^H5*<$<41QyeIhOm$G+0&p z!q`ar%RcFJT=s%&S2|k-oP<6xjS($v4|eMd1&3v0wn%e)4gQ9}5LyzvZy(s3&ME1UB6^gLVN z1AcxuMqdpjDuY(>F=ih!WE8f42LuUWWT;!TEkEF`-jVgS_65SPwuuyw{yw+SEBg(` zFvB=`(8b_2NYe)FQPP-6kbX{B#J?hXnqBW(7uYG`i5$SRF*Jzxl)9Dq-?oW@bw6STP8zdc27-kZRFsYVb>m+ZR^kibOZKnw)!9#UsAD<(~@2*12Ogi059T+f0hfF zVs*Sidh7%CHFDW&cexNMF=VFT{BlOCE)b0|1Cprvu3)zFW(!5nN&5VyW^obEv(}fV zz~FYXGqRPE?;7l&)7)M3>E1@lMbM%-EQmlPzpSyKWd{?`h~{&nAmkh6idzVHsp?-c zUFELTjQoBK(F^V%5L$uZo|%so=XX)Z4q7JCzvvWIdj;}9KdutO+C6vkj3-5((tG{< z{Edu4zOvP$&d4v(=e!<^Kz>#M;UOW_T+o6;4L2PBvT9Cjk{%8oHOo^Rc%w3}odmdKl@5ZYgj&D^k9^1?& zId<7r3x5!Yqk-7K$J=2n)4Se@9e19ez0(J_v!fb`X@((1{3z>fWn`n(8xqJQ%9@`> zA3bV`$XCSz(jYE{{Ltp37`FOw-y$1A3Q3&AUa-iHmo;Gcx5`#s!rP3slsf;)=H9Pd zkl1D(cHjP9oFxYF0942`kGphJV=oYzmTK-dhb$I&O$y$LJVbrzETPTXj6W7$ppjSV zB`3y#fjHy(T|LYt8oNv_NB)GiRk?_lC^V$_{CABS$K#W`>Z$G^GLQX7{`g4(x>eB~ z+-;txt>^Nx4;S2>wUE{(JtGu-)HoRdY9aenv-9p}VVDHM%9dbDEcN0lBF>l4G2czh zbwgpVj{)vb7aZPl{~3tpi&cytTIwy6nQL7t8TEaq%Z3?p&kF7%{yA>#h|6w+ zapMN;Rff%KEsRDEEgnA}Bd3v=rgYt&s#IuvjBJd5h1Pe+F)S%C2sS~V zDdRd}XiAbT(oq6@)tpQ4&FhcGyG-oZIGc0&B51%6s@H?2LfSl&O)Iv!a zkA1!niM=7T<0Iqb0g#=*s?c4DHW??cP$&CS zI`+XkH^k~3>{sanJU{ZT8RJir3=v&L;c3tZoWDIaj(zgB%+Gf-xDFRm!hx=!z67)) zzJW^TB2&j@+Lh_`v{2p6yykU-Vd2PvAmC2?x|>Jg{Ph6LyvXQ(IZ$q@O=e;RQ0-q) zIBd}8;=oq5^#S}o!PW4ui*5v*(ygT!@&Qn%BB=z0?T2y0hXCE1`b_OSghRW6V$(DJ zo!8Nl-4X(Nu%REZ6V|=8y-js;ht*DTiCAceQWM`}eqes?8sEx%)D*|lrYt^(^0+DR z?t18~9Sk9dGRdiXJr5VH4cs-;xvF^HQ&cwk;BL1O(HQttao9%ce>}5d3_)I2hJhh0L{!4Gq zUI}C245Sn4>{!sMzDjmK$*qE|o9eD*K1EhxAN#mkUOPR0!hdP4qJ}2?`t+_Au^Rhc z=qa0Ij!%j7(T`6~1@J>t`P(F83nRP+FqHn1kgcHaK|Ro|`JU=gfIym&#I3Mw48p@2 z@y<`?n^oFzDPJzyU$v%7IZ_^n`A^L@RQn$+Vjl~YDLg&gzQ1WG+WF2x7#AROUuM!s zQe$R6#pr=0lsL;-oFQH*&3XJH(VCXZ)~6=rRUHG=r+X6>H4T$tzYOeo%`A+s?<%_*qVIBEc1H3H|zn_ z)&Hbo3Yf^rxswvSPhB|AsrACrx;!d-II-yM#Nnmi-LEp(628a! z`|hKn+-kk~({?@g1|0QF&vjanuoZtnLw_$G;@_|Lz4*J*-W=$0DfhHKs>Y1?7;#3R zBn$2N*aB%so}rY}gtxj4pd>b@kU|*rRCT#oeJVl}bG%jqbeER8U&N3=D?kenAM&A+ z={WRP9J(1NZjm;jLMO1-B$BDd^PrZJD|G~ zAV_IL$nV-fC-N44_UgStKWfnyAS3*Gbr*Vf>}d0D5S1Tq)3#$3ZeKv572=o}v`)26 z>us7>akfC~R6?`rhV~BvZYfw~UyUa8dNLr~R$@B`dSIDYEa>{^{5J+IQFLjL)l`1Mn<|XLJI3$SdGP~Q$mUNYSy}s+?NR?3<{8Qu&d1mD zyI3htgekr{cAG_;chFa0GO5$eLlaJ9y6DTQz_~u8q*Z1(ihIV4c&QHZzMgE;prfPL z?>_4%B+#=Itl2S<^3}ZU>GWLonDfCrK`_fhDMA2z_ZuR+VPg6n7qNDTNTpi9Nawci z2qKW$I@g$~peEOF3=i8RNGw0GmDV|bY#(bl5x>mt3K@zhOZ6n6gY!!N&471xy!7T6 zd35x6wj$7SNEEktrHU)S^v|S3TH?;6*W05e7cem|5YBsl)k0hRORrn_*ll}TV`xDL zwO@?+2qHUmvgi;Op%>P{v0LM{@^v)I?Tu(EQ8XUP+YUilsLv)|)I3GjO18a)dqkmD z?C2JmgPYpAMm@Z0D@xi#;?gMlYU16y^FEp>xioj{e`}&(B9NnawA`Rd;_Y<+xszh6 z49XG^Uiaa92I(M;YYMhhFw$Rw^~v)7-6GF8ArL1FC06=mun>{|G!lAT(H#Xvc>G@4 z_~~3n7&2^5z*-PI?pg#1zh}{ZFWHG|8gMb2yj&I*O&Q-2ht5>?%h-3XIMN~u*Erd4 z+@RTMxe-TKrySwCk7Z4jEV356@B%f43|qoQosyu^u$c#>OSv>;bu&IcRmW6MH$2Dze2YtEmml|gyH0@%$LlXy?{ln9jBqO0V zxVdO1{wIq4v`tz5#8TWYp$7Dj4jcY6Cr*ePDB}f&F>Hsj;S0rA5U6={_`sQQV{OT+kQ|FYruU>$A_w6xFsN6 zq$;YZN{BQ?o4qJ$pVubOKiBhtiNc3`Uvl^cgW?yY zT7#jgNqDk#$nyW61@IbbQ4)(sEB>TNI^vH-KSxKwz{uJm%8SptH#!u;oWWIiv+HJ& zKF(#?S#qrSN-Gh4;*WgRr_j#);u&u7Onl(k1b3UKt{V)sJ`I5dAZxD2q1s-N4=95` zftn-}!XYWlvHrEar_*Io;@9Hu$|5Jucj|w=?sYx%Q>1T$EY_zz!$0=K;U2Z~YDs1n z>r;m8enfQIss%jD@pJnCq9p=u3Rd1#Y%kR&mOXJ14A5D=wi5h{$E3|4=hc4whr579 zn@qzjw6H13$0|L(IOj zY6c^TgFano0BB%?85Z0Y4uK3_IhebLrzt};vmW6ESU7x>M3kT%5!48IZ_ z_Sitm@&T~c64r8idp4O6^oj|b^v-CtSeO9*E0I_mSCXhf0+6LLQGn=Rn_P&PY?uN* z=5a+xs)~JHrnUV;0*m<^U!GC6L{78K7HX>=H4uvo%z!Ml ze`=?I@aqtSnHXihu@>}1p>aEaogWStavY!lEu);gJ&1W$w$pq}yeR4S$MJ+8jO(&6 zUtAVYr8b}759kqCu{BV?mFE^o^@d42Yd@wNxDhiV^(gu$#!RTAe01omv#-7CZ< zH!qN?Y|IMxIR!D7`H1cZlNPybyYrv0ZdzFn`ta9|DSs%S|OA{rSS}qBp`3 z@7PjqdIIx=7uBV5PM!7-Nz&~x#nDYfC#QX|-H!y%JWQl7N9Xz507(z&{ks4Y?Ha zXTpPj?oO%F)SX-CWoCWfBU`2V@aA%v#bUVvHs%#K_`X%kyBn)J`KQxpzL>jP-=Zi* z;NuVf{_%fhOfu8AvbTZnY9wdtU>UyH0bH8V-EQu$H~bXv0kX$`XSwe=7DmqVEUjO zB8y4Dr_<6d8aQg6Zg9%XxPv{qwNW$8x-Jfy)nvEw&1aR4;G#3S%4u+23d&4lX z5U-QZk5>lt9%WPI6H?;{2wZP1*yhn94S-thDRAjo$hM>*$Ap5S2RdP~6#hHp<*QtN zl4?xljkgpl)~o(!2?Yc3((yqp+q*UQ2%+;m*jMMbGc0M}gI%Id2oK&nR1-ePsELjPHCc`UVxDdfRV=*Anq!NiyC2 z^uABh6S!X^_XW1Mym@?bwXUcqhB96i1Uq9=jD}_Y&Wbwv**rO)jFiGty$r-&j^{Vn z;zI_RqzXa37}|&%JoEXg5I?*JS>Yi+Qgg?>Fj=_}L;pFgn)+r3TXM_urCKDhtyGkg z1jXDUCA@5C)XrCRw=y#1xvtJe#NrPH#jd%^rD*!v}6MV!^_Iy$X}&A4(C&zJcZWSWU2B=d^~UM5hJ$KZ|Zv-Fm0HAKv-+JrWTSAKyMVWLQQwhW5L zBd%CJq)%pNFl~R{juu)^{o*6XC_?8u{8+<~h7Po;VDp7KdJOwF2>cZsYHT z*Z+!(960k3k$w59iH6L2TrwQ-%jSPy=l}kQiR=Nfny+@h6(G;{Mbc(*o&-nJ^gsqE zk;~XE!{~4;?vFq84*i=%{`>D`iJA?X7+DB`c>Ve2m#8BTY)T;q9cJZyw+Q@$L3XY& zmCAAS7cTvhuCHcb9Pj^Bt5=L+Bh466;IOe+W%+z7Lro6qHmxqxs=WJL&%(If2EyC4AYaDN@lr4S(L~n&y+tMv z)em~AUQ0xOZ9cvt^w#L?lW)Fa)lhZc8R@R`5n`y2k$BUi$Ccf^rAWFpGa}i+`=)%> z(+Tf?O5J~N+yB0l0v57=RUpXGMsWi=!c(x&YOMt@0~!34UC@hq7F-mWhl8T2 zj~54Z8Xq;^S>^JPhj8{m!ee?uP_NNE_RGE`Dq}Ux$vn(-=bf`0FSroScdxz#e2OyN z8jGi5tr{O0sh*UDpLy#Jo1Z#~l1Fa-{}~Zp2#MOaB*j7^a1Km1D-blMW2M6A zVy0u1!vyMnHp7xjZANi{b)VA=iP2UtAE?lh;m#MkfZuJ97`Z>QcpAC;JP?Px4Oe99 z?XR48yS2iz4AU93@^@;C6~+=C8Rj&Vy2`wUI(Lyka1bwrqNwP!zt9l=LCs^~aB<;r zTFmx-`WdAX)RhQ{0dCv8l=582zHIfv!cjR&_RXmEFTCpZLmf(CbY4-h25-QC?Cg1fuBGz5nL-^#P) z-Ez)%{y;TVbal_U=9ojTJ0dpImedUn_+)MbTgWFJPB~M@X^Nq zTHA}Cc((ZEYCb*!&WDp*hs>{k|F@5}AQ$j_AfR1_C(4SbYQuxK>XRApMnMr5wI8MtM4>3Ej9#1-DW560Pk0U2zy;TLI)IY6`#*T zJzZ^@Uw4#@tl_VYD2opz8AXRpllJ%oxXHCUimVJjfRF41|NAQXr6c%w?fK5^xF)M{ z!v%DMpFQTM@JPIA*8~t>(t)@TLA65Xuoxh z{L;SZ{73hU|2UawGN2%6QDv83LyusC0v6mbVWt;gzy@(&z7=1Bb3hak|x`0`YK(q39faV_cOwh+M?02haa-9ethr()KS_%fBr$ z5|9#J2HJZ-Ewty$^|Y#G8qD-en-Ub9P(T4b!A|D>WlF*ch`fdY$;azI5Q@KyZXPGp zl;S4xeo3@0ap=MUN;CmMOx9?*>-T4|51}a(IP_4An@9ycytWw;05Xg8E?H#3pd0GB z1gLlyw}VJkH^+vuM;$HlCspOw3#`_TbChzg!xa=0dBAP6=Y>O_FwOf%--N1=jRpuL zMRa*%Qo|1* z*X_^^pUjU?(#@d;V7ie?W*H5<$>uT36?{>=8|e;|ff*vd38S=YEUw)jQ%-$Lt-Lu_ zqMK=z+;*LJf?LhQ>5wN%C3Pf&Br&*9xL^CnmeGTRx)zqq3zvc{&G6Q|T@&n{PGz_* z<>6|M{2(<0nlQY7d~B@n?BY5z;??ADprr6%_66k1H0wALROvqEv!tvkcyP2T&(U@dDM((a? zlCl3Z?O)KJC`h%LW{skhwVT9NUp#6#fjX#}D_J@MDv^=QbN81h9_P)UmYZ3xmw`Nf z0U^Ki;ZAgIa0+8*pokH+1kM0D1~(hzDpZngaf4 zPW{cWeDD+C0|0-yplhqGMms=;Oa*Aq6)T}w2>#m}0bw~%`=y}IvCzOrS*S|VslZ1N zM{$={+l=2KLE>k*U6jC|c&NEUCewC_I$)xIfu+1SeyTXzHE_RTS+G31<5OuaIK(!n6o37SJv=dF>8{nL|w)BfQZ~oK*x73*% z6(TXpdgrverao|z#?Zlol-4u?DppY>B2)ZF!3H1-lK6XGj#a zj&b_lQ~DSfdX8LTN<5A_rBIZ2PG|MWWqyPpT7c$Z$O$x6WIbG^15#a`uk#ho zVYcoyi~gTC<$&stoLAvEaCx%;&=1xk@d(-BaTH$^+$7%6zsx|406ZYbaIWiqTwI+> z>w1CRX>f^-cM8*ID0bWRRi}GE9y^)iN z(((DC&xQRFdq~75^UVkhUS}?P8fsgi7u(Lpk2Sn}eQ8z(+$2EU_ff4)g&Nm zA7nUArn{Pcl8`v=h#l3TMibd^>lMzG;Qn%DdgIAI&B8<&s25pN068ahVEv^mpac@kVd9K5b`+LinCV{3{abIYKWYF6_Y z+HrpX)+P@d%8etnTC3jhNxk!E=HT!>9n`@sRWx*3M113EV-~@~|)a zo~ugCD#0P#)}?#O?YDhscGE^RsWN+DZoCh(p{HaXQOBy`f7~#{duUvjeVFD))A?)w zne$KccHhAGMEtHUTL+L7ngs|~alQU{aVhY8fCYrLzXLEAM!#$<^bBpV|Ju6(^vPy~ zposajw824iz;&7N530A%y$OPvF(6ZfwJD5^`sO*`DB;2zeFknhyba#U{uqeo50j7&#m7r`BwG8#Ip6!}-^2ub2l#jXbER{1F|6ZN4>9)OR@7Y`=XpGi-=%b=QQtGn@KHG1uRj%Ew5U*Z z=q70W$&a77A1nbP^s*?L zGOVN8N@)lY=Ydfi>9rPYc!%GREl=lATCE(;164HmTxoxdj=vq6M#Jc;V~vgO*d}>e zc+gwi4JLqXpYg$h~t3QKpM4AGnZ$1Rl;B?Ktjh%)2?0v00OhAM-X+$4YUzz&X%#ehOZT=GzB z){ZxQlh%a`vj8CLVMz6z4{4$OnA`e*OO zmnXO)SmgP*Ve=i)c|hy#15&TQb0dU>5$+#nz5bZ^0I?sMH>b;&F6>7c#(2eYtYU=hJ*z(${@>CEXCF4_-E26DIwkvpc;Qnur5& z=!da)!Z$6YLs8HH$ANC&GW(#il0H)o8?bvwpGtJdZT@-Z|K0Az2*IlWw1d3mG>2j8 zYrrK{_VMxgc*%|Nul8}s&rcp;1w0U!LZraLDxYX{qzRsvZaN}`Z3n39Uvpd2s%QKH z|E^TfXdw{FRxlwaCxfThe~gnW^FSubD5vRx*RMxBHs8m&t%K>i+}-Z?fU=z_C~p0| zjq@T*K${4cl{r^rFZ33!j}Ved`uMb@^c29&o#Ku6i* zCpOG%sYZr9_x5KDte5vT{tEJcB`0LxN zkybes$D$BL@hz}U)O}9|n|)7|oo%p&U2{(KS*>%VL?HKmP8;q-X>R6t&&!e8aZeH| zk5W_UyvD=1=4NC+Y~yk}5E3maN^BQIq{S1}|BpEMpFy2@xDXX+9cXZy0Z;|ODgDjn z;TW8$|Id~9gblf7So02M{qg1|(?Zi)G8MrVegUUi= zH<2O8e?ndz@R~2NJhu~kb0gZJakNJW=}KXLe2F!flN&mw)6Cf*{#2>A`2;g;TdoBY z;t9>><@V(~t9>SK8qDpOHk&1k?tQJaE4J@(V+Dx*0y3QL6^f=~xl_5dN7FK_RJ1aJ z`_kn2ao0j2K^VMRW?+eui4z{LlK=W_>?S^-}4M8bv++u)34VH5xx3~D~`2T?O zPi7s9q)uU*!u*mO**BwLrgU@s=z14~fRbH;_mZ?#Qp$4JxZzOj?)cmUblzlh5fMsp zEjQV{T#5^A7;S5FGNk{%rTlvwf`R7$GZGIeKb#iu4qnu9C6Iv&Jm5a2T}Rfh^eJ=S z=wGjtAHd_7SVBiB*$I&)9ePna(V%2g!2^rwwzt@XLX;E0NqL_(TJ$OXSIg=tW7@MFkAUxz%C&Q0b0wKR)3G37lK7=8;m12> zjts9C_j@rcP0b-34yP{&_2$a5Z)1Zj0jbE4>GL1t&DR4&?rg>%afVc4ddCuXFo8;U zCt_nX-zPB`n6OCreM5RJ{G74Wn>#bZ#sJHKKhLEfJG|FzGWWK5soEv^#s0y{;TUJF z6c_fV_i>A_PaA{FVPkC@-if4fdpBoZx0-P6PZd5d1!csUAGtwp2^kKFad>_I@qCNn z`U4HAGr+C;rSO9E3V^{(0T@Bj*+QI|KV#~%l=F%J8w>Ox)+-QZbHnBOuS=g3?l)fV zl#s!hU9K(>5@|8_sgh6#y|RwYYudkI5{pao{?>9=plYL#+yqP{)*$;V5~dJ<#s4)J ziZKf)V!yFi)9RdM0({kOYv+>19C43d5+#^q1S5&CI?^ll1Ing*C6HVAXuP1?b7v&i z4G)L)=etxlrTz+Polp4a8Z9cXUJ*uf(*<7${5g43gBoccc<+9V1eR{25)r;bC)ezD z{+sc*e>oBUVj>VWDypUltXiENkB-*Go7!a>=oD8%s#919z_^h(47mMh zCgn6jOUdObLE!U|hhlk33L1(mhgDUS7n6lbf?7MQMLnrR9*Q||5U&^WdHfrl`fL1F z!slF}iNoBxp1`~x3o%ghU8yc=n4x$;RiEK^c^dACtGruf&?M&WIW3HFQ}^e+8%eg_Cbz(dQ{9veun}*V;rmTpG6_|M{~PTb({cWD%2^pdxSAl zzGJ6JmFJN{J?}+L4Chwco%n6D#$vwJqr-E6{{YB=p&3OR0=?Y7Sgz?eYli}a3|Fy- z0&o^$|7Gd^w5I%f0?7b0{oelzh8rH+q;Ey=iBW?QJeI7RKWOAo!_PqRV1x5;(LUvs z=Y$T=)&GEFja#9lxWBYEFTR5;}#t5pqOMoPy8CtREu$LCB) zxj~CrF5+*{h7n!6LmoetbzMymbK1SOxEk2^rZL_@#!M&B%S4(vlkAk0tf=cBRokxX z0Hbq{$8QiD!gjMmrQwI`imfh^rt)Q&$C9#r89a>qkWoew_NE7j4nX~ryacZ(1pY@e zGQj5%m3sZAS=A+F^b;#|-7aeA&xsZaC$>9`NnQ6C!S}zb8No1q15V z-pYJiY)sxqPHTc1Ey4GhJ(0&H&X;wmWlnBEUw>4ukNul=W{3oqyhc zf2DW+^%b!waSCYNASlA1B(vtRz#_?LEmP%sDO6x=E%>!2IP*44}Q`ZAf}~7+9sq8c$~@LXN`5uGQ!FG=)daJqA-04aO?)$a)qP zDRxtEAehVmvy@VtS`(=UIiBZIkzjJ>RyS%j#S0~$aQ39szo(Y|nOFbU75wXuLV56F zv0X@wgFHqeW6VxbZCv4l-d0*7##3Uhx&-m8APN>q*JI^fWmu`ITnS#)DN!Do|YyG*PmkNwu9)j;A ziRiNdwn1cW+YIFs_;N=^l!EH+@Vl@~;ywVwJse;hijrzOc^xDT2C|viR<8=<9K;op{?XPj8{+UJBgNQ!9VGQFGNCImH!`(^S>r84L~A75PdlD z+;eG2su-CU;n>zH?JX&Gkcsnl2x;ePzfPcN9**FM-u`;MVp_Tyc?`OQL|55UfkmGi zOD#WB5DhU_S|U!|>Uffdxm#X#P__Oh(IoGUyO-{1W5L)OCk@}Gi9A7>dcO5G5~meivQ3wpTMXP5OTX%O0B}UQnUO=dGu26%41}^38Q3K6KOOj- zzG)LWfy zUj`c8?=g8;I`9fcMJhTZ(>3EU*81}Y^i{&bTK{|Z-Qe#6XvT=Y49-;-fOYNegjXpq zIal~v?NMJ(RvxR+0A#sOq(p(L#bF@o4_9HDHH0=6nDw%sxWDLeB|-C*uI3xalo=mq z8_Iozd7|bHaxmXOy1mz^{h2a8;Iyxt;m5Tmf=F9Mp*L7Ko7R}0r!h4tk@Q0G;0!k16eMSAp+WTKi2o;`!rXw_YU%j3w zS2BHSY`VLwba%dY+>a;>ReS7J=bX9F+)V<5E)R*QQ0$f#YFT2V@wDZXL4p@T6hNA+ zV-*rw8m9Z_V~zKFqWr+UksBoQ;F^^zDs+~BYd2XR*gnT|V6Z}_GT9AA9q^!^q0qGz)q zz(=xOq~!5KRv2=s{Z6auiB2sGj@s;01CCy=%vZ8krYnuR85p&3(}1N;QIG*x$jw2I zca2f2%Xv~Ein0u%KzkV4?rP0-7j#pUrs1f0;kR!_;HFg_ymUwT5aI_@guM+ufCl7H z=i|+-l!#BGulGqQR=Cy_=4xHG{t%+r*GA=P&rIO{z@6#fSlJoIVmW(@ zuoGwmKU-c3Rm{*4#UoI~}>_w>IrmuLtj zA6(ZCxOY1-K;r@{6gWdt~eJ*9zv@FNV^vWw( zaaOZ(VoTIoyv&7WYTmO`;AtpX*$Zb?P156d&Uv#4W**jzT;Cb$(2NID&_QKXj{?+f zpZKcyXr+D*o9lj9vVMSTf14ZRpGu7pf>BrmkLQTro^?B z^+D7DWL}1^>z;4QgeL(BILWKI^;m^eR_o=zhb#b}Xzmx-n|_h=%~PDIVd_zh7l!6c0#WO#?KB!nThmLq|EG z04hSfQfEB*zdY>!G+%k5LXsgtmcq4a1>fX~nu>Ab7Rnuw7OMA0n6)P~3SH0EL}6@Y zFQ_>>H=vCf^}8v*p?aZ1Z2eAioP9Q*qe)bhtnCn7vf_BpDxuKeH*N6xGF1Pz-a0h` zh2U5J;uDpQvlzH;=wjs$ScbiJ-1a?CaIY5KEX>p)>l=!9>BoK-&{pUJONIF>tWLa3 zw8aQU&lz{IO89kMr*4p3hbd8UBtblH<$8;HYk(+Sq**P(Wj~=I)f^Pc@9n=>~6n_D4#4pbt`ae9IxD#k4frgC-Aiv_S zq-1F|LlLn*!6*|?qj}}S=N1D9aO}RH3GtNciHv5x8cog*a8o18V1x-kb4?l-e2wY) z4Uulw1Gz=xBe-_}E=0t>ok=z$%nCt9omN#RDDVW+XC>;<6zKcd!(%i(Hovv;<`MwB z)}UyJ^WF^pawX-?mq$9Dv%1|ZN`d>V7v7)1ysd2Kfr@&rp?K=Ww-`Af-yyy@2<3Ud z_381^@(OYAb^qrpB@5J$eJB}AfDY&XaiTurfwD}CYI;Le&3ce*=TDQiL+JJZBHhNo zQ}0g32iNZJxpv0$yonB#XAVGJxabcty!{OX##w>flqSCI6iHx<)70Hwv2yzL^WD%) z<`#>xS-qvUC}=7kz0%PpF+y6M0hKBPp$#&ok8QIRe0_&n3jTTRkSF%JtPAfqOND-= zFRq)z{iP&sx~m?#Tv`!jqQGn0i`Pnn3sCYM++YlZHF;<cq;ng25O=5O1f zoCbdol^)72_Gm!+?YR?aH9YtgzgNt{mSn+XI1b}+6WUQsZM1%Wlv)N**P&EQ?#~9` zQb5k$Awbbot}}^aOs93MfimMrm!VNn+GP@n5i4R0N%Al+7G+Y?i~x(~2hgV&Kq6el zm<`B8e2qku-}3I2^L;%l$RQya6&3RNNf7`{7qVF<1pbVo&u#@Xkra281f zJf6P)aqF@#6mY?Qp!@HQq|bIdqh~YNble)+G9tgn>Lc)bFr*vFHW_(iF)j9t4D*y3 zXpPUJ{LU)GXZJhZrR-=nrUIEm_|bYKCyrz!n^i$Y+PoHMwtalrf$RU;QlMRf(Er!o zTtu7QJImldJug;I&v!@lewr zF#EIGCpI8OYqzj=x2{cslb@pwr`b+|9B5cQ{z=n+q4_htQnrDXTQYqooymO333iX5 zQKV4SU3Y%svD9^S1!H33J-izhDv&VsE)Qr&YEurlcNicb2c*y(f>-uNbb&Ea50aPm zyA)EO6>kX$2Lf8lf!;f@ZwS|5Zg2y6k8R9Lf!xf_LZv>%TyZXuHl>L6k9HIo7#oiZ z;Vaq71wC5PNW2r(-143h2NPbo%nZi3X33<@m?-e;Fu7{xN0{FSS246 zGU(P6`8@BY#I8CM>gyIsU z&;eLT!t$T6W(;Lkae|?$Gve? zKG|&H8es>T9SUfv&C_aT@h~Q6I`=vJ@VpJQx}O)(EEau7cpV*%e}t)3QQM@~t>J>G zDwBvDg55X=ZZ3#$*UsmH4qpX<Q@m zWrpGyO{t_prN8#=^g$AlG%T~K-lDy{Ee9D^Ek^~L##a{(a|1TZY_Sqiqcf6-TXg5C z!9Ynas=>E2LeH~2X}N*ls_4`OtVaxgozLuUDrDmM^9@}67kiDSL81n#(=6`?5g28lt%YQdRrGlH^wN^9CZrKln#Uz7EVf20FVOQA2|SC+`g z1%8jXulCWVo4F-Sp_DLC`;xz}cp+wiPtM!ur1;mPtOH-oI$zIoq6Oz;1<%_#V>hLj5Iwhot zW!+V3ox~E^TUfK#N5B{PbpdQ>fZGizPfz_R58mtL;XwZKCHx|vz-&D2^GtHv(QF+s zCej1ITTihvx&P>NO#-m8QKk7k$wkFDU`%0|I)mgZ1UfO4m>)bEB#98{UtXZ0fG{w2 z+l96wFlMSa%yUdo4<8solf~9orkHiscy$3o|V^1K_KSbNXjqpnEEHLO`lk%qH|qu8Zg5@#I=h)p5t zC<*ObyFFQx&X(kxoV)Ox++~c4(fj)Nov+2WF7yh2x!OOs|l#!}@Ms+X3dYKNJKs9L<2jd_q|Uhn)dpUY?u62L(c8M~BG3!NJW{N%Q*m zNo>@x$FrX~W(9)yJ;^8&Q1@VsW=A;#Y_-i+gP8KR4;;E3m>h0Okg?)>Ld&|qg121;T;-0m_OY@CoTG#UJne4H&0RmZd_8HJ)5v!aJ zV3}OQugroN&zgEOzFj>k7RnPO%E=qK4zyV#;WC^$Xb=C}uKvple?@X)$IX6$t?Od< ztH&3JPJ5$t#jc1Ot7T(T)KhzEFVSCVmEB&?77}o|TKsU2JU6WK=H2{IR3Sp>;A*4a ziD1bnz9b5Mp5?n8wb`88Gtw-nSf|A%B|5G&!HvE*(6WGLArFCv&3awe%}EYG$-((W zBxqmmFU4GaHR<(`qid3a-z=owQl^Vv4oc|i@ZP>c(EfG5L$-;0N{H~hJZg$WW;$K@ z9lG*LV69o%XPw*jGf`mAQU*Wr#d+)ZX|UKT;+l~?+P5E&>CK_2(SFQEeF0cysGYHn zdn`S;`iJpldet=v4053ayt_|^yC3%+sOSBeXub?#K$q#!3uYLv%~h{-h({P^D`s!{ zn9WrXKq?3lY$@yCo+EohGV-j*XQaU*;k@;FUq-xgAf1=3;n53A%J*QLhtaR}PK;Ov zqpC_t?CJn7c}pKrr@6;hZ4)W!ZX-IRCpgg4`Y!+3WW*xd#hU(fxxaIRsp@iQBr#Qo z=jL%5cEGw+b*82pX$nBw!BQzpAYs!4G3XC;Oz}XR$?+im%Q5_y4g2Q-wZRxn*Jj9T za*<2MB|7Y)MFIp>UXTnmUuAJ$gLj)*S!%x29QfU5gN@5(0?a_hA_7kP{BAL?pN4C- zcAJ$zLGoQS7ssWrOmE}3Lyd3TVksOq`hO96%KD6ZC>LrqFcU7G%wl<$Q(~pguNB*c z+eLJRVg9;Ob6Iayg)|v<0NLUAk49CIZ8{5pTux&)eCy3~q01vG0rV-o8sG&Vp&B9L z&Q}#5>w1|BjGj+T7b$(W3b(?IG$xl%{(iOz26g-?TB7MWdy-zeapX9R_@v#Z5_2Ycb0)Et}>AF@a5ej15 zKiOU~Co>ZHl|oYbSiQ$4$N}MtLjYT_{+(q(9U161V3nL$6}$1P$f+@Kxvb}PFb0W< zhk>C@Oj@O9EZ9eNS*1NP&1rW|Drb_LBy|HV0TcmzB12fE@8WH=KsYGP-!H%yq3uc@ zQKy4$1GfgJ&H@|KwzHhjU;SzyU9l|bZUC9bDwFqMdp7|RL1ei=PNGQvRiMryzR%Q6 z>43ipe&pr(7bJGh8I=GN$WD=BU0tWsCIkt0)b6%y=jOl1Th~Wjbpx0`Dwb6$ z(%SShX7d}`bl}!g6pG4|HB0!-&2cC&Fd_=cOxXP@z7X(;b=8@#^M;JlBHK>dcdjQ1_a0@&{3;1%piiIZm^_?lo_v}u zpGQ*f2%nx^QKjRWCP2SX6Ogc5;NE7&*d-#nHIImWbtP8FXjLp2)tjX3*ud~GbeS&Rn5D(!w%xF!CY7$1k9CZ7 z={}o`14iDf_i9DhvWU3QzzY>9x+|IWP=li4bxN}L$_?u-=~EoB5K5avxZ8%wcQ5SO z;w#0?sY-paGA$EB-yfK&ifgmQhnpv_j!hyk^kq8l`hz~1$NV7qGU_|U>zcB4IiPHC zX^6E4lEnAi^n2Ih%K7mQlEZbo@1ts%$eyt%>)e+*p^c6qhP&W0*IKgy7RZfWM~MW^ z81r&1@t!#9?I#s=Yo*pAEM3LWH&VfSBasy?Igi))+`oO(Rro>i>=)POm`~}6Abhp8;gRs2YU>Q_7-e5VH_ zQ{eV;1sNK#Uo0BE=W+B2PwK?vUZR7xn1k zhFd=SA1r|Fh-3HQ500CWVUSdcU9YF7Qn03DZubyeG~zeA9e8i=e`qo2Y-Wx$$PLGR z*b0b*@orJr94C0%3CDLkTgny^#la6Q-x)ahd~>W4Ef%HQa=7Sp>yJ5Ts2hQMQVwl7 zz4xQ=7__53Ga$aU+{RUsq-Li-CW=3IVkw0H! zbhqRH8t!=M<$COBUX@_IV4-+t<{5r>*4ptSjRtGLi9hCY=?FDQGbm@1C$ zT?b=8mJRPKZ&#*}Fto=GD0da1h7IHxN<6(&879?cs5Y)-SRi~)+ zH?4*`+;PF%B%BluU=9!Y_Y>EOOT&aDAtZ-t(|W`EstL2qNCOf*gL8XWAzU#{p#v}i z=Cr0ytZLs%dFnXWrrUBpY0)o(?W1g5{Tqk}K-y{ZVMrU>r`T*fX)_Qt$zy!PFQ>(R zXK|1+J|vBsh|4{L|76|$WHGW&>_(SUy^^Enq_WV@iYdiNaJEA)?`z1|B6|L2{!F<=zh9=gmN&`Z$Yh&TMW*Cgo&YCE^j(Ge4> z2j%4CWO>{imsk_AfBUc4@9&0oON^LC8oJpL+A(444$%8E(O=H-QzF~!$N7DY$+H$T z5E0O4pX;ZIsNHR`z4KvfExLTiHs4fdYhp>EurtXLox+LZoTz`?JtxF$kK_ep zCK#a9$|jj8*V!6ZxhFCkg(fm!_YaHxU!NHK<(Zgr;$f+-%Hz1|#&Qiird=N~W!2B+wR?kMR z+%DWVr)Fl=hFVlg+ucMjrekesoRwF1ZMu+tKa;AY{t)mg4{L|9fjffWon7;0r_eJk^$P z)yuU{@<;~uM&vH~aT8)PVOeex_hK}u-U>Q_4u4_8cdd`|sAgGyqdQ$T2@wu7=8lu= zrEWSHlQCmd=sy^(Ge4Sa_XM#r#Wd_IwfETfkpN)1Y(OXPgCQ$n6hhkkGY9>ep%cYqVIRza;iZLq*_2dhP;+c>?K zjr2SeYkERddHU9rP_HTr=R%-W2$Ejg=fYfjJXUf9)^o?tHj)I&F|fM_wmxO62PR@p z_zB!8cd?*v%HbG=aB)|xRl!-VEy?8ia*;uRpE(jqNdNM((liAkl>pSK@xn==Y%sHI zIU1FeB>p`k^AGpupN2lpno_WM4r58K3|C1ffZ$Ht-hazp++HvmhRgi*Q45H$=B1@igHzalKI8p zgu@&5d)L{@Z!kzZb})EaB0zxuLl0n=}C0aj}fu!fV&s}OMQ3yZP;?53Li?yc=yk$ zaMQh|>fX`YY?y#&f-Du^;52Rk0bVO|*WPY6~9 zhGF*G5RD`xi)eMFavd2}t+tqpmxL+!-G3(dM>^?saoqi#>*`u0D)Thqhm7tA-YB+>mT zH;3a{v>Un_!z|u-L-9e{8X6wnjVurXPH#Y~Yk&UkIPzo@^Q$Ki5?s0@Vf34(>b0n# zayt%#>G$%fQnl)ea2jl*L)7({yIeQIIy1%Vb36h4Jc^S{dIb?E4FV*k!5~92)|tbc zj|gD?AFLYz~N`8R>RQHJx;Ubl~j{37_izyR=(f&4{i8T4a&L`U+0FAz;hNm7pnBn`=?SssRJMpGYdk74apd2Ak8+q3&`+Jq* zM}yw@Z};ypQjKS*-X`NAx|w_)!g!GZEpb>#vHq?`o>qH~n@YQHl=Np%==kQWrI;WT z2>VMN3SiiJp0><9B47eqX&*BjufOCvwTx%8qr}P=wEt7)hz^(wKnZzN7 z%5E_TzA|2UqD6*#9!MQZ4<)WJ_2Ze$7&6^$zlExwzM4Ih`S|aAZPT^IbQ_g?wtlPt*F3 zZ`?0!GWgyDJ~NcSZ>AJo+U>3Bd@H8UB5%T6P2hCVW=zN5FcuyV=e%hyu~!5v>40dq z$gcyW4-3pvfvIfY@0<_hA6TKKrsBjvWuTs){&#>PLY2pj#XW$XPm<~Cv;r-%n^KSO zpBQ}L#CuF))EA)AtRVxK+=v-IkLFas)W`rTy-;K6`=xjxKlOXZmsi)T9elUi%z=-_Ia(ojD~3jpL?0ta3Z6v zN#Hk!W)H(-2~t3z5v92JHsPTOh^?u%>TZxZ@Erjv$2`aO3!uB0o+giHd1`oPG4M*W(6aF81)oXhoe@x!V3cM|c|X%!Br zGQyokgpI+x#ly4YpTBe)cn&%EoGTIObsllp5`5qd3{9m!?o0In`ekJc7z5TND#bko zvxbgjO8;|_px}?kuaI&5lFx{`gsJeQd^?O*-K`j+P+oHpNgi{yqFewHzOx_Ike+(T z?lt%OJI$O73+SoS=zv%FEWf+jiN3vDzeYzh`xe1Zb{4nxJJ*wi(f2S+0;cD4BM{mR zVlXBi^2yMQo_#146$mez`(PIO)1{)znCGXM!S@P&6eL!9v=4aM`Xhi z*)H=>V=C*F9q%DyDJ2nuXZ($^&YjnQREE!UTceaj;t{oSoVzU^)x7IDmp;;3Ij`?a z7vgYiEd6LYM>4mbCu5aMqpkFA@5&&HNu6RvTGZHMc6IoxB7{a!4D4{i9CM+K$u);` zUHotiCYSqkAFScm&YfLtWeY0It}E>R*2Bfjc#%NZ`!+eR@_u1ytgn8IpxNirF7XM) zBp%y!zPGHVZ_)(3wUY4qv+qMmHh|KP)>ZZ(7*M>q{1_A}vIeLO{FI6dhsmJ=8Y~NX zINW~td3)@|tEX|8PvrzlLg`_-_y83mCi7`B%hz;7V{|I~ww{dNI~xGb=JQrhu$ny` zq6wfrHDVJt{ENepwgM1sQe$@BAI+7F2J$(cjHJhaE+Z2lpLh=t;~@xQ37~&wC*i)j zfY*@$6eV@5J*~mo0iXy}W0+$;-~rtp^>hp%aq0;r+gg-#A_v|w>a;i^0Q{x>xS*9y zW}=F0=aYE@CN9BQKrHg8f*Z$u0XDbWi|`pN5qW0hy~r#}FQlSmyT8|6CEBhPcb?`(jzaIP%aj zN}C&)JashAJ>PHiyWDQqN>g=$cOlHn)2KUYYt&G@4D^OKE{teM(pI|bn<|&``_^S= z^Nu|OLN1_;Dmay!VgdfoX30fcLTfNn;XsT_eyQC4lZYwu-;=Fbm0GeGFR$_5P07wf+RU1%;Y z*V~WsvDU#xn>lrb2Ggjr$#z0pxmf#r;B|#P&_wKH$HEH-?JA_Gu97fE9|0O)Z!i+B*wX2?%E%*Les4@LE5f z=kdC?%XU*&!3plCmPaZv15NYw$5cmk-QTcP}SsY-SSM6 zgSMBCqc`53%6}V8`&|h4o%M3vkR_zqpx;GS_F+k6fSF#Gk$y3g>$#4N62P`P_{l4= zT^+uy**0IMRUaNHdwU8;gxFfBkcpBnlh_dM=1$@|!1GrT?|*Q^F9iLk^Vvr)Wpq&G z5z3qnS2bS(j4!vHDB6@d^zxrR(@wa#3FgtgfR~U`m*`ePV}Ee-?KfIgl<}CF~eLUsyP16P18ZrGC)P@f&E|v4y`|fYw`zQ$%x+3nr z%Opw!uWm+{sMe20a;3D_elr9gI+bnsuT&py^EiD0rE#H)Zm)HHHv1_iL9y<_vFXr= zuv82EhG^VH?`!Z(;lqNc`kDb|Q;lYH{{po7isz3G6B!-=!B<3c$?zf|5jq?#7-d>3 zI?%h;y7YOa0c7fc4Gr{cXM4Hp4(i_VEm3OHgFC#*)J83y@Zr9JSKp!LOT%2i3W>*% zlCUY}hG-ZL3R!_NDArU_y>Z{PLzM}3ft!WX%;+iyFR{ub2$;P#xglhWLuMn{fL)w#pr z0ED`+^Wj+9{>d)@xU*Ep)K1LIJav+Tg3o2(OeWy_(zaj%^6agvV=MiNpRVT)DLxEP z@OXE-KeS<;!hZ#w;c?m+>wil;{pC^XaCfd-+O%rF(q`qx*z($fcxa>D>|la+rmenW zCNGKp3@DqzEAZc7t#)|xI%3dc{$>>c(yOZKJ{E@ES7kbBHZ(Xl;7*+4C&SC_?W<4K5}<72-VDK0i#cps$d-WMGr$>i0~za~ z0~v#LAB4RWh4gJyl^6oc>{Wf&WdYZUuw=3!$J;a7cT@cw&R7>U3+zH1$rcZn*9wV zTT%#4wx8rP5$|Sx78m1G!bQZ>{(p46WmMH`7%vE$1_5aS0YRlxknUExySux)8$?NI zke2S=lpu{DAq~IbZzqKF{;ZVzWAw+s7Ar?Q?KU+HYz- ze%$Rh=1e%o$Qf<`(2))o$XinMSnBHLIdIcdJUFIS#Ni&yi=KBPj%8!USzxcVp+FYu zIPIv??CHqKz24MU?tWA$)6-dgL_i4+&{J<6SJhud9ZaZ%HuN5Tm5hIh=6CbY$R76$ zCqdH?(q;m++HM%>79mtTn9UVI$V0H+vdi&3`*11tO?r!V|6TOGZ&o}x25w6P@I2pB zh;Q&Cd=508ATsd90X3cMA1Wh0B=Dc1wm}6UJNB3^XsSkVAsvaQ%X`MyfW|4FE^K&z8=h!EEoXo*>R<69hrZWX+BA z15;T3G+UBUt2UHj==fJ5hRa-{(5LPo_?id#_+nRkOrfftVU7tGc_Mtl&s0-%y?sI2e@L;r#P+w{0Ud6wP^i@<&{Q zV&nHKzE^ZVAs$EkeGyeD;4k@O>uB%do&miwBFgg4+THbwxh^4+AF)xU$JO;CMpI3V z1(vWdBG+oeplo8{Fo+Ab7ayO>iXbj#I?S+Q|H08-;k zc>hD%dd0N_g< zR>&2f(__%q;-N`Otm|lb#an^qLYLUu%9uq-b_;TTPu2`)GpjAywoTBp-P7!}@jX}x zpUI%hf9WRm>sNH0CVMlk$toR*ad*=DreIygPidJ43({?1o9ZTYXkSy6==8nL1)W?6 z11E@__d_Jy{&(CN!De_*lM~c8GZPS-bb^)z?&Q)f)Pl*uqUarH5xLf=ojJ&Jt{AJB zce2Mp*M;zMa$)nzod@G?uY*ieFA1Fl75kom#DyjY3!zM2HVv$YRQAR;;MO>x98FLU zV#yAeO_RXVFFt9#0KH#g7kkD7>EIPz@MbRh_HFR1h}Hk{{{7E?3Cuy`zf8N^>_6QY z>Y4x+MZpr!`FD7W?WMjQAM<4&Z@J%$Uk_=2qh=KYvR`lq>ao0er4Txa zrd9IjxW6=(pa>IMegMaG5mSr_yTkz8gM4w|tk-p2QV2>2j4|v|VF&LMg&zGL1-heB zETV&wfe|>n@XVKEutjSJI`! z5J;PtR(Id>-TCGXaymzp$eBRk?G*j0%VLRW7`h?|FdndMb92SG-z!JK?mWbuLywME z%JjK9G4cdae7WF9!+osK`;wy`R7j|=tw!O|tb&ilgR!U8|6q>o62ba8*XYt9XcL)3 zBXwFaTzI(IXSozJY*br}87l*)X^3PT2|Ri#xMHzEBZT1;ZVIk?K~h0gH3SK7LVyG* z6PUf(n^axBByrezrBy!RL0uAwPCUE!gPP#U?3?Tsw(7k=y(ozF9+E$8j6B~S7t7$Z zYaZ)JLs@cx9n2%d5Hi)avTXmLA$UaMCprNaF~Zh0sPdgx73Q(~>7L^$aIn(V1@ubK z|9h?ny#-w5nN&Fn7VYC|r?CW+kI^fCr(k(mYxlK=7r$1Q+j^nqHUy7dFcFPGuf{s) z(a-N1I#IaVw5rm+FAwKUu=bMLJu?$tP&OTlrS3o6Z;qcP|Eb#^`#HY3-rvudY%P{M z%yx9$+c}h`8Tnm*N-%yV_~R>Pd0ju{&)jj!k%5uWQWf8tGHAKDs_a}}0x1pADFI!+ z5mu5jCzP=;u+vY$AzDmiT4{jBTwx8k2Bk7Mmz2ZNPo9nM?R)f)o;7CoGV zDs*@6@6DE4ot3(p2H>MT4E%Z0M;#9`O8zX|=rlxEM_Q{9_k5O@}BxyXn zCd6b;8kM@~HAlmupa5<@iT<|2&SjG$CjY}CSAxLhi>8mR%#s6U4JiC6lUW3M`CfS- zeY+Y|zHw4bUUaV8<@{bh@f22KWVu@JiCNI*?j4c{HqyBVS2bbu)nqrxQoa)60sU%2 zDd?l5rUHm@*hKLcug&)BOc6)if-F-jIGHmc{JdKhl6ER{YMEgro0CLe3 zU{;jIJK-iP7LeZ*U2wvey(C2t(- zx;94?mmKOUwuLOuR5Y+UQK1>y9_*+rA%{f!P{fbwlcC*S_OmDY@v}$9{I#}e zr^{$W!-gQChc5GwlmES6h_RiAD^f@>YEAK^)l|G2fp%3WI_&O<$5QDlLFY+NTIIlf zB=+jf7&m@@K06~z>Y=9y!t%ew}s;gN;v1Frzl$bb7MHrCkqM+T6Xb&(!Am&B4X5p60Duc)Ff<1 z?Y^F8eK+5*1zKhg2BvgxgCBInG|U!{m9F+jLjsQ$NI7i{3*xR>wgf>|752x1@BedP z{x2enz|Ww3bQ&&W?&AwxG@($^R_UET?}G(|a4mS9dU}wNd|>NZx@E_7;eG+4Lw8y; z!LEfv!aa|Y*ZQ={tM7vdnf^jTF7`uwp5@V6&oByPl&LKDi-bIL+>8z|_>{#*!%VTD z(Oi9nl-+)@C)>dKnqXrM!98wj*2cC;o}ruO6`o|?>N~m*zKh6R7t@dioM!(e2<+%o zhaegN8Q4GAt=3=@f`m;zFufMVcrO}}D_RX>O$V#-A zY)yLT>v`EGv=M1~tTHthzlx1i@tQSxI19JD=lY0$8e&Y@|5$`*=tog?cbd+oFf$2W zrf<{Sonzl)eOWDdg9d+^_;#Ll;=@kbFb0?lCMs5Ot8ZHfU?7RJy0izKXS2iWoI1n$ zh_Co+1m|#}DIzp9^hrJqCR8=v8AY+D22Qy~)fTTu{%aesb%kcoP*eBB60!M$z>w6)VXW8HxhP%0$`kmr{L z>@!ZnaJ-Cs$gZg%m7CLtdt==$WF!2sqF~tZc)6pc6a|~yw17Ndtw`z3e~?LFoUIx_ zXjzk5PV&zsjq}g2k6bo?RR_V&wx1dJ@7~4sbaDJG%=m)4_HgR}uQCPK*C*QOq$2WK zg>pEiKotqKnefEBR9`XIu90e@-*%3*#WGM}$YOl#mu<$?rpgvHW?j#94XxOaznKVX z$+A2-YlAPZ_bjG!_21uGz>c4-2-V)uGS#DHa#MD0ZD@GVh9%)+_}-LG+N}M|m!RhM z)m+}C6r-B=p}XdZ90A&v3^(@Z4h8otuA~<#I=x?@S$yU@zZvGY$MWD99hN_R%ALCa z>M7RIFc>VULTC6`>yX~X1gXCbeARXEa6y2uuq`nm`Q)i?@iK{l=CNZ}l89C&4X;E? zW!^+^#HqB^$8B%N@bC|}W-CX$uE)}xirLWM@&3b&j6cP$Btg>K7+l5srKy<;oq3M8 z2SJHq!bWthdS{45SXyD_v^M~Az~}PYn=W1S)G>XTk+TCrw(Bd*eb)zNT_65&jDWV$ zwx~h9=@jCyAke^71OAARRuOJOVm4I~gk}iWNsW z1c;XN6i6MHqXkBRryUCby4c1hCN1-R0^==V%KxWtf-q?BcR%*gWXCQPzUI}B3eXpG zbD7-5czY^^r`x`h(~2bbS&~Nn1;P_X>V+%mi;B}P^YL6I{}F99sW_QFDq-b(n+b5= z-9q7#3Gty)6a-CpA1d_i%!4r=BWIj$hGU5OSysWO-Zc8?qkK*Btj*&OK^cPzmaEtK zLjUB)6EtaGgYR)|-fI8zk(ui<@a=#(8h7V<*!7{lcIp0CNle@9cP!ANiM8Z`^6b+} z#JqLD`KUCly6i>Oy4`5uIHNqUN#O4aB(!;Y!%}3G*6k@p20_UwGW6EKp_hG}J0ssq zhj33$QMSW0>dyPG-F5szGGl2N0mHiCIZ$$_4R`Ekm=M=zU!ZbW<98zun|Am|s3d?| z?!XoQn2;Q}R<5Nyd z{j2n{HUCUkA%R?AsA##Bim4oo{zX>>i4f2PWQz`H^xhXARQi`2H$xRIAd@(!p%SDQC*{BN zkE!k>E()UvxWoDNYmtH}%FB!SrzdkhLKSVUjl;)}(JId3;)s!zc=G9QacBoA!0E{~ z07`tImd{48x$O~XKmV69NSKgXv7`0wbT3=r!OJn>NZM>TdL?D8AlPc^?zQDVSC3-5 z8v?*wnl3T6`dLK!?`bK-Y~T*7KHQ7FBCo+m7Xl3$`%QwY7cL!9dfSx-wH10yl2q4 z9Lu6k{iC#wL*Td4108E<3qu(;vx3GcUA91JPHgrAmX`R*OIT~FMNXs+;FkD_S?m`=WDlXBt5#ZkZsj>R=efM+8$?jc~h@$0GAdLLS)jiV%T1QPzU z{E%XkoyC^A>12ZbJldCkkqWO=OB93RRNctSz$>7}?RUdbWu5AjAoS2Y%wPKql>7dr zK7ZlNj!cUyHINr_3Mv$u-(McDP?(QpqG?y`DTvwM{>Fc*1ASsTRlZDQd4oYGf0~av z;&XdxS8S@Wy$&$vxpZQ!KDCMKmhy2$^0Ect(3A&Lm zoU?Dc!c5rbshO;D39c2EfvyK$vJ*2`){yHNx#q_;X&XGMq2!3Gyxx%cmt_k>0b9UPW_!D%%j$ceRM{QCS|FoS!uMqvhU$6NQy;i?+>_@SW9sl@dy>p>3#<{Zh3w0qVbvepV zF*}N&8Gd^DL9+|+=^z^$i#p_Z{n8lP7lQcR(t8}-Rwb*x!!1#lL&@9ETnrj}Q|O!IiV%A~LRyGoP%NR}%#HsA=(aJ!Pp()n+2 zo$QjOq~HMUl#WUea^n23EofpbTi(# zx*dIwjdXhOMQA?_hsi^ao=eOwVGBl@!$wbSs}Wj?TYF>ABGGa^I~5LKNEMa>C)MvRd0!tCR<4{k;Cg2y`QgBq5b}O!|+TI zfH9^TEn(M#uM>o8jQjD8LSoC+H-&OtA`Tnd` z$DmJ;IbR&j9>VYPpXZ zi;1f023z$>*LBcplbHAPM$?EB1k7J~(o@ugU-4=qV3I^8tbF^1lCMd?@q6zXr$aWm zqU$RRS}3otW{b;MGO5mF{@O*)pXyfAiyzorqo6TdztR892Fqj25M7@G4jalp=>p)(J(?QzktRPM{`OmY4ou0D$d_h>I1~HtZQ;yfY17vcWZ}$gb}9wTWjS8*e1@Y@Fb`1> zj0!?0*m;oH)uSYp^}3)(^Gx2#QX4wIx9>^#R;kxU&Di02y8fJ)xO8v@uuv@*i=r9m zMDS#6I*qowN&7>|44@kXx!7|6HIc7`0j9?&)rS%m>D{4^!#U{rBbGRRw6U_SH=-$T zP$V@qm1qD@_WTTq;ppmgL)}A9>z+aVJKSVvs-2XTF$5Hsg@RsoUNhDg&p;)Z5O+VH zWbRHj4TOm2)_nP4YcwNWgFR(9nIX02dtb0Ll%dQZinsLHf4>F7^11$g*RR!GkuEem z4;xTFS?vS0A{eQN4-&1XJCxF2c_;rI&MhNQmN~HpQ06-Oy_u*jyfa%K7eaZs!f`7F ztnxDoW{s143hCKOt7sk+3paQN)dCucwvhD@*;g9Om>CUni;*hDcd(_`mC4GYiNFbX zW+Jg#TJW{9s8B0yXA~-%OwRFB1*TUp3GusmF&9YVlYUrz=fCskj zi9EXKMqi-*G9M%4?I~o^`DJwNyz;CE8S9^4c;fl?=tQ2_X9ZDX4Zd2HVrjrMz+3H@8K&GsGT$Eu7xn8TQQ}p=DL@;|xOYe4N zYr$T~Q$9ZYX7z%YlHfmoFom5U7kfXEJ}op}E5K>xhZ1J{$m>5!Q4ptpQCZdhmVtNN z+p!h`e|Yx~Gegw2YZl)^9*JJ+$>Y?I{z%rf{BM29<25j%Y*xL;BCm&*D0N1$+1lO0 z45IR5%s%rR&kdm_D@=vC7;Hj`OFtO3iK)q zz^FTzn{%_RwQ76$GO-auFo?&+cx_T@*XLJ`4`h7Pd9!|^#^g;b8R_WAn~l}6tOIqU zha|e11K#|IgZbT;ux-IXUH493M{;K`Ffnfn`>{lV)XvgKc1uc-$(*vp2%fw1-e zbg-EcK-~7NMPB>6kLw!`BL#LklkloOe8-fa3vyMGyR&?g#`1vCx?cFoXrwqc4l%we ztN755TCpY{FcHs>3+gPG98Yos5{wjhad7g2KwR2$GNu*lyDQf%!M|-+np@Be8f;ylJ0y7Y6&8Dj7i%lps-@S4j zV$oFW^`EuQvs8+6BPLTe5j!Y;mhqA-nrOCGtJuv}Jzd&80T(ldXrR_coPV(^Cks<^ z2swtZzk9(!N^Ghq@y(FO`PDxu&h$W{{nteL0gBj<@6_&NHo(3mvV;(2kHsf8pzFGO zw;scC@F!-WO0xz6YV=Hr9CX%Nj*Y(#N=!Fq9kQ!l&Hc$x6Iuy)^lNtA#T`jw>qYAZ zPd0p(0pQ)Xk|c4m@gielLY^q%ry|HEw$>4#Y@v{hePQ%}|F5)RM@x2Ep*i{c;&0k| zm9@WTvs=j9^X2FJ=46NIuu9N+ zyT0K&4p-j*)mxGv3{=FIi#m)qBBvc+$Mi06z7ggjY_9dL-=g66iA%+Vt#`li z+z1bg()l&Vh;Ug^qWZJr5{UjOUAJQ%LHb+oz$b&40Zn{O?FrNS z8FXxdkjlu`^X-GV`KNE$K2ihBS-fV(b?>jna%G4*OeV=IfIVuc&vX4&D*@eQP`CEm z4E-{bp0=P)Os{$UN5pnWuDP!^Ook=VG)U~m2V3iZz_<;k8_Qv*ZbV@#{dsi8= zYNpj>BiS$2LK|$ZEE=O3stmZmu1E;IMU$2JSSyUqzA68q2JRsxHVU2^ZblQucD)06 znVookcZiy#cFg&76%RUMUob&N^jLpT3p3O*xfd5G;`=*47Zunl; zKl)i~5+5xMH1|fIT_ptaL6&RH?lpvxUuGlTJx+-_|ndI$SI}m z+Y3)t6a*4A-9OwOw?LMGF6HV;#}>^3h=~mAyh0Uz7STSMAiew3@fFH|hr5SLs=vb& zT=p;9FJ^QCjrV_-Yv_cv$7hj%d!EB)2D}aQJ89zk`p=;*lScyU+{FhloZ?pW4ZK5qW;Wc=CXHje}y4f6V1R8iIqGP?i6oeSRALuGei5LOyCY9@dWRlQAH=`zJ4u^ zT5rcdbW*9Ah;`gGeh{dNuOA=~oNke3I7NMWUaC^;Xl;rxlDWII>XCVQ_TcAKJh?=t z8ywbA$ayqxHA@DTMECvrd_hQMU&}2lBmI+}-(eA9X#{k4im)`WL6-P&sqIP>2oS1G z_%N9b8d;F!9w)VyvrfkNB)MG&Meg^jfvwlOJR?VpqXIYR(_2H1{1bunj;?(Gp;rHp(C@c4C>Qt_i?wCzq|#|O;52`YX*EPvKnb1*r?5~SKq2X z8HURUPol+J)yh?U0r!##96+{ZI7+G8d26oJf|%{bqCNJ2=b-)(Iw zZF%M|+8!6k1qclQ4OCEYOdN6w0*%1ILKAkGMwLBLm(zyi6(~A9Tz5nof9)ZDN2Q_5 zh!D2m1@saU5@2E6mm{Oqy?h_J7?Di7FxxnJDgIwZ1;~e7FBc}C%h#1o_uhDTCpX7R zw&z>CC2BGl0S|P9>{t}`fio2a_v<^xMwf1|WiPT5InTvFqvvW}H7}40OX7v^Z?p|f z=F<^yx~y(tx3dyo4a)fmbGpXT3oLrj+z(C%+I29lE_LvL2OW#0ef9d92ILs@#NeY* zz&qR|h8X_jcy07#KpO5{c2-e*|3CZ^WPE)qXtjs5O#9Ed{dN6gSDztRKLc?jMx!L# zjYKS!PBy%|Zu_#z+g=AQvz5G@w`JQaWY&Kr?5H4|kmHWjA#<9Ooa!<4@p>yOR>MH@ z>79q@{CP4yUkg^xY7d0IPm@b+J8;>yZfEsuDtmQWb*NXn#oXmn#mZ*NJmOG22o4TO z8W&5-Zqbp>lI_yc*cV@bJ=CnYCn~ma`bqip#GTR0z#*;GI^MS)YroXGP9n@k{0jmR z7A+E4kH88$(zpmCWhiWm7|V&L%Jr_Hs!zNa%c$Hppd`-tCwu)~ zOnA8c)B;>Ul*K|afbPK%yWdB6NJ~J2Srb!0aE$hZ6SSQIeM;6k00&m_BXIKH zy<{J#zQ&s%$Rx5~YVD7?gV!e?;6}g-x?lsEkO&<}r|^HDi?0}PEPsik za0}$q+Rhmot1#Ta;2$D!J|kC$lL^`eAz|E|%U(Rpd33wpyb*tIgH*!;BQnbMOvatq zL!NO0)W=M_h-;-wM_;YmfyzK)6c&=#OuuR zo2awvs+|1gu}1>$PsQ@Zw~lwscUO*^X9xBIAGb0DmuT@<)XV5|yl1$`{mA23AbYTxI{87@AA3Wq zoQ@0UBvv^7rL%x+QB6%Yfow}12mEZ8X>jCrq7?bsO!nu`kbwIOA}*%1L~sGD@wzi> z)%I0t(e8qI1nACQzqB4oqI=~mPyWsn zRkz(Uv5ic^TQZk4gXmeVB+PTi0j=-YCG*gm&PE-s#1I8s_-*dADu&CU3?A>8NzLgg+;FN?qEBHUn?wY4&@GHc2^QkPE1*7mSrP##Q-oLnx^Xs^<*rzgVi`{J(*5!cuM&i?uK z#w@1|>AQ)lk|J8kerhU0G*}T`Bs|mXRyzH#T0**_dq%diea1>Dfr=`^X13C-k43LF zNr2X)lkL3Ej|ih2R8vc)rxPiz`U2hih*JG`T*!4=>dm(gUz#gKxTe7S?L0;MiyZ=V z)ZtsBS$b5UkR1s~y+&zmXDjvGw~Lta00h`5(Ig0vo}Z&Yp`mZz-jtZXufN>x6(%7m zDd{|w^!#KQf27YdORnW#}0Yl&CHP3h!d#{SZ+a6#&KF{eYbg}L9CE@(^ zYKW&hE^VqFpo{f7~*G0cE*E0Mq90m@;Wz$Un;vNn2_@s zz#8{oxh`i?wV!$YoRs0%#krP0OF0S-9FhS+ro$3f<8d(Rv%V<2YEZDu z?Q!(-Tk)-X;&zndOdbRv zuJT}DF{6U~$58)7fXd;(95CH#9479R|2%i5!tIrdNuRZ30w?|1*&p9+&c)l6R}cQoJw9(mPTWLS<6_!rYEH zjiyU1x26x58fqL?@TEUGH&MgR)T%0hRl>x$-R6r=gCaB4Do;?mR|EKaWMsGf=d0t< z2!$8$^b?72LKlBEfhY|nwOrRK5ERm5P7Zle9orkl5V3^!^@-bmElvcSZ5Zf^5d-G1 z^kL!H3%pqxh3o?>C3Pe7JkhYsMt~8fzd!m_C{t}VOj!KS{qT>-<-vT`x*3jl2f6j{ zvi{LC;C6G~5Qq~`Tv1UFKmuaWuwWz%?Q8yG!t+FMf$9B&) zhwCO+H^cg)#2-LWe3c`q2W}ERO`8hE=6`z6bKM~gHa2TGQQ;R*cns1^2ErlQ-(3oV zYvVsm7#@FM2EtZghi_ZMhzS=-dGJSOt3(?WiiE{W-!-o9vWKEQT?i0n-JOiF#ZbVIbswg-zeCZ#w9MxDcwHwI#ISHOz}YtRPfq8FFKR^tPL@P}An7v+ zC*_i!%RTCNkvT)gV{CM+)$gWJUKFn}2r6d4)@TSPMtu(wK1gVldi1}C&u-S59d6im z^d48O^=`rhftSH>QUyNjJ+yC3?JkV7wub_MQRL|50z=XE$HseYLe(=4`MrORGr6bNB>Q4lCTB*#hD| z^>H-mlUly3!nM2?p~RbN;ljEvAG4>_y_IqAA8KoI6<}y38?&k3Ix2LUwux?2DOuYw$G+pm#1bB6BQ z8RCgLB`P9VI8=gj4`7++`FrUL%!H+mx0i>pm{Y%gt^WP_#rE5xgYu-|59E62+d$*f z1i_og40m99*qJKE=8B_Qq6zHz4W0Srp>Fg10A3&>N^J(KCzt`Cu;i2FucUzFdLHjc>@6McM{`_bDgs|t+ zG6)1N&hUT7h1fG*2t{~bxSou#pAB?Q6&Lr`n2e3nF4<|H#g-bH=63;)Y)B_ofMH#m znUdheb76RgZ{ltBW-;k4sa^q#S8c^61Cq7G_kUS_jb+w=$i#G2!_SgG5!=HsY|=KW z+L=F0mVF#zT>TA3T4I8ls@iP{n=?ea0$IBeKLED6U*fiZ_qg=h;ca1ABJ}d`Y$IP1 z^SVR))eVPr61?}nYL8{Rd1VZn8Sy$8|7B04^Cri0oHbv)<=TEkmM&XE_Sk5h#oVBj z4wc^#qi%YODj$DZBYMN+&Fic_Vm-jYe~?3Z!Z?4#n_F zKU-xGCZ=A8_xp?{pJc zjEmjkoN(|uxq#(F01|?X84NHqNA@bcHbU|E@mWUPGxba14{X9QimAv?E~rVjMOJ%SjuR@ zcpNEL@Tl5i0VW;$f9aTlSZPicU5s%E-2M8|TQnu<_$dOur^>5E$$tw$@qv9Ft~SAV zGoFdgwsa#ra!!_P{Vurx0^wHUMK2JC4-1STLfLvj*dRhkK~NYJhEXub=v=D{71yTb z%qm}mXA)SV%Cc_JWV1+2YgczG`pTS%aNZxlPD1qio5YWIOrcotuDK~;IsVGWUJN!| zh1aRjMmuLEo%@^hV}!@XXRqG&L-g8;05o+uySLooHe&YX_*v=^yohpiQ+)r^{-}Jx#XDey(Q=02T0ScC^(O1mPKlx^EV(O_bgVp zZ*hVGaiHDi=i39d^D=iZfrTmqbiDnkdwZxmy;@&3&uXAP8>;gvL0uj`8~nBb`+B>` zhi;_3n&dLw#)#-#fLym^D*^9i}x482T` z+F!pn+66P8?539pTL$vz5-l>hK>ZEi?K+cCX=uzqyJbLgSzW;60~l!ANtk3!lfPl0 z-cH`?NIZAyFo{m3S*u*F{Kuie)`(~3=?lZ`-KpX_t73j1t%&%=IS|zLUzkN?AUjY+ zx7fTZ^ zmOtAtUlVofdf|7##suokY@GTTs$P7Hx7|626vb15(#7;rzHqm1*bncdL-RZg3>`%0 zJsHwxtI#qD?i1e(6{8Fe`??z(C<`6tm&tXvJ6_glEtxSpi^4N6QDY$Je0*4gVJ_>8 zW|l?}v*&+5O4VuNHOM7HYLCFqn9og8HT;5uj-Wq0TNxLe$>mHB}7ttk(rSU%lw zX98dC(&zAx>*ewpxol<{dTHs9KNp#hy>XFM6!6i|`#hHF!cMi>{M%!2jDz<;w_(q( z!euKhM2T9hvVuGbj9FM){vjRcS@|m5>8s5-@h)Yz>Q03n{V$)Zv^j!%Qb*BMaf7OL z^K&3~1=u?aqC>6r{s3BFYVwv-Ut`C2EgG&Ym)-yg03*|O-W z|0Z5`f4+|9yKc+p)MsEdjsQ!r)BiY*2Ax5>z+xOg_y_ds{{g|{w_8Nd?O8y3wp2E^ zw;#=f8ha;JgG>YV39yF&P`tgZU+et-Y9ryK2ZN=$G%M4AvtS;t9DJ(HYRdp1?H%+x2IwZ#j?>B*|F`kn9&$TkJ+=SeXhB4oROGD>4-piSq5v?!4?!kg zB6QRG{AKm1TH{r|De)lZmdaOi?RZX8m4*7rAY0zSrM6MWy`(zFk8)&3vn-$GrWgbG zM#wuvQ<;&4i}m!m1F|U`hDn*R+QTbI?erjWW4h0^Hcvs81S;7Rax&-h?$kp8eaf*y zmpJV>dm=BTLC1S7n0PD;&tSU@WRxBULu0hN9tFv)r=`PQOOZELxLRSdn?#!*+fkIg zX|!_@%w|yc^~B0WZPuz&ZT3=W&%q%01^p4Hq*%Yxd)U`P2MAg$_%CQ=-@gRO)$hA` z{!^-OHH@4^dTgRxj_9~Hr%$4DNf<@WL~nAqb$9T(sYe5oF?=Zbn4SIo_I?PHBFF91sGl)ae@4_!c4p@PZjUYH3Nn+qtq1A353fs)Oq- zlP{L%tTfQX6Vrd1TVolCQG}6!2tl8&)Pm#FWKtMGpzmFx9QYuC4I*H1Px&WatJx7d z`mW+P*j9#dL?kw7y@_iaN%vw;NCk#-iAvI96A?^O&QM^>;+6EN*Z(GsllI;w#W4d| z(&zV}QS#Z9J%H(-i>x5DC8_aR{GNEgOdw%{pY$4F&c~=^%Ci+Z1CsdTij}pDn!jXN zI{G9{g?y_+P$!K3DAjA{i6UJZmV_&WDn8c+i4OL}v?&q;i@;F-xy55JAadDMAz86K zZWbp|?1}ZC$DSNoY<>B9p}}VRMRHB2?fg68pkv_WXJ3Cw#H`oaC+X7ZK*s0<_Pbb9 z2fZP7^uc<^e>+NZT>)KFD`2C4w|VBv7ABCjl^o-gjb27((*+_as%`OCLHyhQ_BIB6 z{{X6o4syh}l{K3u*k^SH4wv)-{^DT9lDzdW7fqc^e67bPy2d{1b6l6+$)P=XdCip@ zm7n_K=qN&Qm4r89D1Ppji4V#726K-OfjB zqO+?&a8TJG6akq(#E+L(%JVGdZZ1x33Flwz{#*TM*u6s48(t{7)&omhQ^aM|HZ|<= z**?$En2jSfH+M}!RuD)Y428n9e4GpkS&AK;HU^}`I=s(TJn~73WIu#_p;_8bD2W|8 z1$Sv0pBE$Mw~=RIDPqykPn46kgpSwj=mxz1HdB%J-TG4BS?W-&GQ;DZw8})Ewa7r* z+Auw2G*~I*;f3t}aFnw%R%zl}bzJ_jdGalkXw^}xR%rjGPZ4w4Z*kd9V}Q~LJMHC5 z#C%^D&?kPu1-$bw_&uv>=MS&0t~|}BmV=hK#}e&(UG|dgUrCFn${#!ep0az?Dehib zB0`3g2p@O-9eOfoMYh|l0~!>~I&(}9pV;Zz2oIoN2m|jX#<^nfHliZFfXkL_`Nc5<2($UB|mCF&e%Bbvckw%+m4A1R;h{EYG1#_|VkTpre<7L+;v1_rtT=q+Z z{%EUoN^Sg%c=FtUfJZ)UY8r$#7>D*_^=aWpt8_<$fzS?;%WMq{UiXaf6^GeMEjY41 zY;q2*u$ug1{v0^dKUe)kuhZ@O_y#6G0J`9DjI3M@v;VCf@XG0BpcY#wjN7S3wV+)Ickmm;oEKlz~~>Pn8pDfc0YN_*I;&8l2KB0(v~(a)VPG`DIM${Vgv_{p zSsmov&GyJ)zvPCy;_|t)H6mB)0j`?)ou)^;w<_(}h}N@}m|(NyeHzp9g{y2UZ5|Hg zk-z8?sTQdMxASGe?OeMvT`o?m+st|gb`SOLI{>dXORVQY7H8Y-5Fg2~uk!xeVv*?_6kdU0k0c2_X6&jm7-`+q3z^>GveVp`+;3Pt?M<-VrI& zQs1pbxLz_2K0*c~Q87tR9)PJamnf>$~f-#GT7HZaHnkx0C<}m)g z;8VJ9ak)F6{Q-!SZI}KOLi!v}`-Na?4f`kCAc)1Kl+IP8WO6~B4wg=fstkQXLmave zACI%2_oZ%rJ+O|xzPY$(@t$v)>RX19q^s=o?%Q!p{lp~3g$J(aS}pnO*?GH#Oq4X$ z?<`MDgx3sARV!@-N&6V7p;FlesEZuN5}G_Rj>d32^X1P1Yb~I zc0;5{y=% zLD5+5CpA3vJpXmf!vDqrTj!TsYD8!HbMsNxiNYA&$hv^8Fa~GY$8G#K{%3-wIsAC8 z649mc?C%GKK&U5&Eho)28@o-4CjNeox|haY8e5|655eoYv;Enitk*l!wKb3*ev< z2I0O1K>;F+8SsljOx(_WBeH`Kz7wdB0GfSiX(w?jO4BKPNfz)k24?n`olL!*?7~-y zjbN^&ZM{${`P1xVGFNYf=lggM6HM*jcLUxG&&e5Xpn8-7!<6-4t_I;Nr`?{)DY#Vy zu_**cB+a@GRkZ4BY#=qZS3^(A+mWlS3{X(Ql!JIW^7GseJ2bXaQr zP45q|u;Q4-GgDW~8=9;wJrE{w{mW^PTAr%!rvnLRq88G=zbrNGhh_hxO%5oyg%Cq1~coIJO|Gx#?## z&qA zXtt7k`iY$;CyWnHGC>|`s&_b(FGKn~x0+WlyV5{eI_ZToqIP4fX-H)HMQ2V=CJ%Dj z`{Abj{G6|Z@A`|rl{~^awT-N6f0Yn59ECo&O@(=oiyYbh{QR~ee?cq)O9U({bC!zZ zMGiK#FQPsjcRCJ=F9z|5Q|VNY_hze(o4l1yn!v`iZvb56{LYL&F5UO0 z(c)k77`xpv8g#a={P}>ifCnl(i6cXoV8<&F0Qs0i_3|;bueulV*@oXzopfCLlFisA zL-fr<0tp8L5fzF=vM6fAmambQ%S z0P0&ivtzlodAVAPY#N2nd@jZ9O)ESul2PSAzvDd2B(jb7QM5em_(Svxz`;Q}BuzAt z3K3F=_wKL4{=sUThi-JBDIrq9)yMhQYK;a$ZWxP{bQtu*{X)Uchw`?sLb(u~-2cj` zD@{oI`}E!jH|{I=|NlRcT#(w31{VuA6qVgjO;r(I>yE2@tp@JY3FgcF?Z8a`*$x{T zdDnKVEt{%{>oOa)Srb!ZT+1oR1nb{PVGtSxQCYvTymyq*FZO>=VQe5?+#3)+POI1( zT_*l6y1z~4V+g@Xlc?eOYI>-7FDEb9dPZQ_y1tkBc!$#kQtVxKmUQm_u)A4TDhl}P z41JXnT`jMg9$z|KC_fgHf1=1 zK#GUxK0(L5PU{H>44Lfszxevb=sMSK?Z&okJ88_uY1G(kgEqFYg2rZJw6Tp9yRmIM zY3%RW2k-gD+55a>tRHJUWAsP5?q}ZDob!U4TwRnLoZ&9Zk#mWWb8rVa6he@K*gh7; zPQ9r;%p&sP?kjKKmZ#kj6vmS$NOlm9O*DxCq6(P3`#-G0u-qI zfM6h@M<(0_z{r^~Q9Ej^u{1Jr0Rs)R&=7})%;PXHJpjABcZQ7%ku`eh&FAU%FKWR3 z)wt-(z$W0*5fK@Q4D=Klk*t-6Z-bv}04U!*B+*wQlTY}UIMtLem1d#cr}b^c9v~oa zgmV!96f-=uqFMV_U&HbVJpy~_&yvuS!{>fTlp(od;bj#CK zJO(sH)o-jR0|X$KVAG~C*v>xXbcsq zSa!&@A`H`hYB5NJ4;#Rwz8~PSj9Co>2qm(#vY@Be!??*GTK4ZBa!N?n6lLV-(IEWR zQhZ!ycBDAF$x*3Rr$Ri%w1v3hmGKR?zU65j@{gy>u^M(J?>;@&H=5YasaBIj$|^v5 z-MXs)L7bD5isAXUXZ5h$yB^Hw@oN4&R_1uM3dKlDY24GBh1rSIkwg~A2bLsAry3=J z`xCwyP4zZP&WF6~A<8fzhoxx{bFd@H!j21g%KSbsyFYnES| z<|w;EadC2b$3FNySHle_dj#eCX#zgO?+j4y50>W-+*Y~M1F;Zr=lVfVB&mYDRh@L) zIPUu-T()iX2WQoYrEDp2Vs!}2`UlviOOaI(7>y664O?G;_>-mT8n;TJl%JoMd2T=; z6RomPt&^qM(7Sau8szA1QPCkryVWBMs4ruTYl0z5Wx$AotL?j+5^6nj0UcwtSK9AfPEU1i+NdCM_!H#}$aAgr>C>b$n@-Mo=-(-C7D7^guImGnAKI_hop1UKGz zfjyrc16c^^6Gf?`3IeTIvp2%py|n;BA~hsn6eL*LoMPZg(AT=JvI+UWtMPx-XBK!_ zi`CbcS;3xlPx)08^$5?M(fkikZRn&Wf|EZs{Y}Q_R%XG586MWiLNXE=wF10wuiTNE>}^X0BIdtdZgvJa(}L8vvV}h8 z`lIQThYpwdLQLSn_0aSp!ZL=qdUKeq{9t+@RJOM@erl;6bhk{a=q%o8`^&xYz?CUk zT>(0p_!5!VDoMrT!^|-Vj+r9D&8(r`im1xg08zkI-ER&d53w)n%dy@o!1!raj)6AAHJB z4Ok9uaq%!j3TBz^rvp()aN3`wK62#F)ksHOi`x^nw_1HGK{fJLK~xs`dJ}Ow`#Zd^ z2@%IxyNMk^tze-SnK~}J@b>&}>uS8dwc)o7)?rSF_Rg3rRsKMtPO37BOfVC6d0j3hGLK+O=6Ej}9u5dABiB-entOx-Hk30srZ5 zRO~?3-Ica>x#s1cqBoD>=D*jm|9DbIIZ>5M%A!l(5pV-fGD>7u)VldqBjv>V3*61J zUAjtx^={qO$rRk35H#_;rcF!AKUMH|JWx5n%fj71ovD$DQTE+^{d(zg zLOX`bn3Y_nz4@2hmDzkH+VfS6GW^nt_{LU0*LaitxUf>)*UvIHM?Ylch9sYEQ7I}M zdRWbo&lacsAFf6h+E>G;3vKRSy_jf~c_ft7SnfWQ2V$c!1^?NlAf6iqKrV8r##2qw z?_EbsdR9ogwq5kC#fxl;5mV;r=Z9a$)sn8@gBt;b49mWw*&*?n%I6U^{fxxlrxRVF zEPARzYHnWwTmJ05Em@%sS_ra;1Gm(sWyM)Rr4;{2Ahcs$F@cyaYw@3^kaeV9nVa}EjJ?tekhvgPGB~D2?QBt z%%1_Agna#G$9f$y(;EfbZWOMvDD9i*(R}J$d_{6&GN$js-mroV@)bO1JfcEq=N4$ zI2x18g`o(@MCR(mMHHo!2JUo9Gh+-@q3Hu#-Q*W*UPt>!!)(09>mp>FeaO=c3}S7b z0xOOY5aS|Nn5eC|o7Z0fijv$y-4Ly0J1+P>kHF`NkOGcL4#TLfXQ@6IX-v>%>hrNC zk>`zS$9lNyOR6XWHV(w$wScSj7qu>VP=?T6J>qCO(dE-Q!&V0Ra^5r&Yb+FjjdlYw zK@yV{T>`(`SO~vUozt!N9ziK>MW?Qf(VhgPl$;nU;=weXz&>Un*1;qdiQkLHy|9!@ z1J~Se{62Re#p9h@_1K4E-^0U?96G}Z3A5V-@{qw86K7*T0hMm#U)OthT&Dv^1{ml` z?VHE^F!()zu*@=o2-?3vdf*bFXMnbCRb1`5Uf|g1LF2BmLWDRvZ6tEITYV5bQ$w~; z8K^Phz#Fd+-NbvxSYRwki3#K_y!onDMMdCx*dKiIb_qyk{Doy3g}XVUUB!L$#-s)N zvxwg?&}p=hU!$?^JpL+O6f;KbV>oKQOlbreVABK;*%%&=mA59NUvT9x`nURymLJ3D z6nsCNk+|VgoMQmhF7LApR2nxIphs+Dmtd;wdk=^8BT^3x{jrJM#i2~P@H`*@JV4|!f~<%OXr@G2`@!v3aa&8 z#oXFt3(hlS4B~VKH8p84K?yhm{HKsqnuG+g6%6;MQ|h-DnfHvo_f^UVGE))W=q~S< zFh_-U{aInnfF)}&TDP*&URyDza6dgpj=xqIa!SHGgkEkCAl|66T_x;zO%tTicMSxH z4}kzV#{6xU_kY9W{{+sGQU2J7Yd(!FOeTwWiV14NVM>IuHDA@*Ls z6LMm+Z`nlHzA)g?aDqxJICC66?~+I;;XU^g9gRg-4;HFW2^)IQya!1NDWVy~HY;ma z2>9KLSXA$tBf2{oQy;`se~zLzGHiAbhx@LQuOA!0{=Jv>V$p*1%NP2_%Wx*=rk(hO zQ_f__oprx)oC_eyXaHE;LZ|JDHTOkQfbOX;`iYb_LrGqRQ=Fl_?^b^nwV2Ca9yZu8 z#In|?S{h}1Ky*8AVJZ~?QdEMbekGk-^pqEZzbmB|WImk*Ay7oKA~kL~G-Z8hv(41w z>s;or)i2<~4c{jaK%ML}NGjLN#CKOm6cnx;`E6TId4%pa0uQEs zMc~^Q5CGRU`ZTQ#OC5?bP|I?Mn`8e8{Q5)ZeK6jU1wwUQx24JEDm2{sBc$7YCL9!I z>&6PVt@q8XLPR)}y~2tVx@OKmAbkl)CAU9sJEFW^tX;L|n-Q!w&7*)}bQl+=y);x= zHQ_sF{Eu5A!)hA#ek|*fcVOXy6W?lXZ2A4Gk{IL^%4M9)jMUNbyjO5)=|QC^La1Ot z`t@G=Q9wt#&+PbR-)a+?-?ca@Zt*9G`wb{AfV~r5qOsU#u(BMyi^l?L^AdKy-yCGd zC{(J?Rm$8r_v*IE_3j6jtxT?`A8}k7$Z2R+GP?DYlikkSrW|--uJ)2JeflS@YO&SN zmy!fK$-#sc<+F9ge>wFVHBt?UX3f8BA>k472t_2bj3dB(6*Tl+1OdSumnXHi?zXtc z72-LnG{VWua$_D^x$*DIitg5&HXrtz-{`yYp(-(dljkTxK@>g0#5Z24Cf+Uz#^2oKwdW-@T8)FLgQ|M?BERLrq8 zXiNRB`=W7z#8wnxRac?r>WgRabKqpr{B8YXLH=SyQU?iYQ=^?qw1t8==E3}U(NUhO z`iTIUA8|INSo0gNCAvPG-e}rot4F3)`zSyZ7G8A&UWzn<1I-?@_D+ zK7Tyevsv5<#_nW#}V5 zLM7=V*W7Igjtw%P^PEeV#%Qap0sI;- znPU!bH)^&7YyhjNH%;{9{s}Lc1_46gBioPiDW1L7;tzr}5hX{B>$U)`Lb}QO$YrD= zM({M|B>S0_0y;jyrRmAm1vGzLi5CMeJf#>LAHOSUQwuY8c(=YiZpPQDF$S~?kHynn zj%tM`ea}kvkdBC3h1r^-jq;+AaAY$|{)rp1+Z7(Q=Ed3R%Z>Bf-AJ=iYkjK+5Kxib z9`lsM?w?z;`bNWc=~zLB)0y{Yf;XRa%5QdukAbx8c-{D-{v?*t0Vp)>m*}QaExL$( z)LQ$==x29U4MbCOO)B%zn$3w^^)kH_%AhgaL`EEswXS#kUUMNZNOc%*LlD`*o<&BL zfyHWhzX-T!u`1h3d}DwNTG!3G5du?8n`Mi}LMyEVe0by0*V4mAEwFF%cC@qgI%zhP z3U_cEU9CZ@cUngN)t4XkqPy?R>&4&&b`d03Y7N-+_vs%HwxsNz7Xiz1voAG481U(k+#Mu6^3BX?oo z#{tPODdT&4s-Rmnj^{rSc%m=Z*_qiuN<4>;XEIj3DU?*NmXSxbwa#<>WSbyLR^Zc% zBA3A`@%QkgnY5uMXez)05@Iy1*bQ#%$2O4B*t_1IX`KG?YA=I-yQlTMB;xfuss>p` zuE($LL14Ul^THd++>D6Oivkk%5^~>(8wovPf~iPi!h3W0iNzZ~Z)DedTuFz}W%HFH zO};CVfCrh>x?e+!;8TVECU07CaVJg6fy33P<3uijU55=;ZEeBE_2HCu4K;y8h;GnA z*}?MN5I_a`B3bv4%W0GG;O=ap#b3r`{tWrP!oEkYlZrX|$u!F7&B9ZOo5(u8d(jPwm{YioQh1 zVjOqgib6pJaSEc=Y5!ixf~h`iSdKgn{=OH!qxF1D%+vF}{AhWeI>R8J${VReQ^$(O z+S31d2Zew@WUveF57>_L@Ol27$7@U~JC32feLgvS8%t=iR`oU$hP(K{VZKZB&-eV0&t_-h_l0Z02AH%%H7lhV>7VD}s zmz$?UtSY0jEmo@t2{u2<#4sYcAGQ-wtYCs$lveH;&UI+;k<*nRsnN-x0kV}cV7~|Z zmwh`bnMg+Elxmfr>xnipKD$_?BZ4U^*xTv~?Vnz}cL?ADD^Ew$FUT!;D-VlO1?jQOW;K&5s~MI!uhv{Z8Wmm)2j!mW(>26ni^+I>xEA- z(oxP^R2?_nlPYZHyAk78aUazJ!~eK@7pV=fjt%|ZuMNYBa{n{5)w~a1f}tP@Ns|HO z&Z0(=l7K_2$yxaaO@@=PIXXe&WdYWdFCA``G8vyWq-BFX0rV7ehtu6Q9m6lcHW=u_ za!;Qe3u3eVlyK*LJ@NO^GOE|=?aL*s~jY!TJ*O(Au4$AmLu6gs_hXD6~7J@9qIbe^~3;$e`$5qzwCYZI{ z(*gkIzYmY8NkM>uWwQ$|SSnV$D*W!b)lE<3*T+leIx(84(I{Nc>$OGij|OV7{i+LWoptem-|J_wV8mek zh430xDfKxSvgXOcm>R*K&W$ULmG@`7;qXuq6rd&=@=RKQ>&48;*=-d74l!!3N#r{Z z3(WlB1gTcIblC~zs`zKMib6+78OCjO_Gc&G;7@~Ar!9Upp9vL{(Sy}xNxZf0PQZJG zL{O-k#wpdN%l2yUOzpK2Fu*#m| zp_t=~{wDOjdA&WW{zu?d3IPnDDws(n->66((^SDkA9+yydr%!OI7tw z^T+_DtHgPk`fs+KUB~u6Z^2q< za2b;`LBa82#2B$q1+b_i@OogTbkB7k^KiHPrcr>)COQDCcRi~`M8C%JPF`TJ(tv19 z_%;BBl~^FLY5oqf=;O}%cN7$$_fYL7;Sm@m<7QMtmX63xUp#oP#;x_B6#n6d|I!Jr zV>u!B$A04vX5x_uDPoCAR-2k3$8rDkHvHH7A-07X3t56eH3Jh1a8#@P{3hLmJYz_L z&#xIoy|<-OIt9MGUlW`YUhltuJ*nUFvkd{HXF8N?is|9)?GKFoqEgbv8)8)P>gKpf z-L}nW15Usn{JC>my}|CnxG)uix#7-dAe`+bUEHXcQ$dR-Vt=%|#Y!wkk2l|_sW>^Q z2g3P~varH+BSCqkduS}mBFiCHk2l1=aui#fO0Yl52^i(`6-gBqQIBZK7J`$(OYak#TtQ4#6ni_Y=K#nM#1rDtPt4(C2j;zwkO_4Pl1WHRtbeo!gQOP zXBYIdQ@!VF77|+!tFESoLz9oZV^X0q?_XFLL3V$sas3t^B5?X#U(gqf1dLe7;cTc? zftwR>yVft#n~y%OYWUwYrtMAzK1zF=5+-se;qlznV3OQdwKQs%#p*>z~hAiSdYNpqQde7v^WmWFC)d+IrrZ7Z zStvirv0m9SI`!;m!XQ!@(S8M2;?AggWQbnJ3K!LdCDOBuMjXvj3BqE9$(Y#WLfM(9 zHz^=Nf~h6~D6js$fd8K-AarxR)Z(RLXf4PC_s8h?^!t1BwA9`1VgXu#>#+Z6&3-6m5BfYe`X z?b`33&L8ThZRN^!Jpo5Ft}!=gUDD zLlpV9pg4m$#OhL%j%U-Z2)49|=2}v--$5a=Ef};kT^+u`h!;qKHm11l&ETT$TnQ1O z$08(dD|19V1v;fHb-W&2jkW-Wf!#l00;Smzx^)vs^WHaCZtP|pz>?Yj78)?}3u9}C zH*%*Bw#K5Bw9xf9)g#L0|Cn-n-j3u)v}a0aZ5H8!4}*FI@#1Tqgx`1ITrvkS~q#A7w zmRfPqxxw#a&hP&`E&8whDY*iqvd|`wZu~nlVv6K)fQ-d*j@eV3X#SRkPw*1L{r3E( zQQW9r;5XYVk5NQ)eLG~k(|*;2E83ZkppcQiiQmtOZ)-xesjfBfI9kCPLBn|;jkl%Q z(s(q^C-fUe0#Hfv8sDVrK^~mhe3M}eYBPO=Q*rKnh=8IhEb6ue?WAYu?DV-@*7sU& z!Si}(n(~zzLd+EnmG}UA|M&vd0NG0Lts~`=evc&y}kPsO6kO@?MiAu1l!&v$tcUy)wu1YwJYlu<<6~uXPxtikrt(` zWh%k-aZ2>vnp#IzvDqrP8RB@30Zm9lFU8?f#&}hrF%{*2Sby`Jvn0xQ8-?hsZ9XrJ zR)zBE&h6eJB0Ra)fztllWH;;DG>(W~vYdjWN#im(TsK#&SP^g!pd{f>|0DkVuX5!N z3q6)`8CKlF6^t*(Uz^Nwqur|MMaMF}9uoVy_nxm^pISqoAE)YuEMP|kZTH#jd^s>d z*zWeNvF#bRMbrmHvw|61OKCxKDG?s?3o9R6n1h-Xlx|y7dWHrBF<-8~ed~C`kd&;N#o1e|>la33w<@wfx46^dUzuI6v3bcCKa~9=tGla;@Y;^#O!|G57A(UT$aH ztnnW;it~Ep*?vqEDqEu}#pUh6qHsE#q&hNHwrR3K!(I9*Y1&eJmL>N^Kqg58E!Jk5 z@NOh&S>p@0e1RY7{{FKooUQH4Kpduc}I)hlHR{OqV6H3W}rR%AbO4bFcF{Z=-fVOTDjH98F&iF0GbA25(pA z*KuhS#e%j6kYbVoai*twuPKnJ(P2Sbei7KxzkoT!zpjwKf9Vwj&8TM{ywk*M`gl2M zksZuhrfRw^?1QLoDx>}1UI6=szJzo^T!o(xyS=vznxlW&gm{+2wJyp2DM zrX7*|kcoi==tdNo;t_&;n!bmX0lT873~MZBQ5X_He!w2qlSQ(-mZp4*MB8f%!PS1c zO1t+3jv_ayL^)D){&4D8SEV{MLqfcBY_y3)2;t?JATE%;-6Q(8@O*kaJ>c)S+g%yt z{M@#UT4=x6@Fdl}xsRR6O{ZXA9PVaT3=E5W$D)-+Q<)HA#mFH6#tDao(*4#dk4>a0 zyYv>)Fr))>xO;PX9lSxyg?Cy13_S&+>s^=`3Lhy0*) z*8ebGnqMAh$v*UloZr74m#PQ0I}$cvGyj}FwMr! z9D5zw_58M*>iXL8LbeNt3C0j~TLNUqv@_*izTU2nKdEyxn1pR@9-&B6J%yd8;+gv; zc7F>D-pXOTcDyYCwG+3avr*y41tSm^<~jK@jAUHo_>UldNJ>P5mCXv!0RPxGn!id0 zwhtey1tr#`o)rO*3k6M#n$1UN7IgYvx8*&%ond_Hi}=F|3Dl@UC?ha+>8rP@CrFis zu-h=mQ_37X_P1cF z`#`wa>lf8TJ6lgh_EL7m0IXO_+=Ql?BE=BBiVV~kVe>z&b7za6`=);3j$~Th`r)$i zY{!U}@p;XC$0J(`n{njPcm2HGlyA;{@JuPIJD2PEAtSvh+1OC!_a_T|YiZVFG2ZytBSbXH`B+%koPMRlZct|+ zQKpj#FHJ!J{MN1$`aK|hN!`_1->04gAc=VA^=I^7gcLw~-f-q9k=#VL%O!-*$7v43 zqLEtnc7i~C$9F5+y>=Ziek0#>WCnh-y(}&sw3pJ%HH{u0R|@-5NJJ5KxgpE};IL2$ z78&38+ONfxdn+&E`T~CU==0rM1f6+fq#o%aPNTN-!DLS;K+jtMFNOoDB2~Jzp97x^ z+m<5wqfYOiueMqnfHJ7p)K#2q^&ORaD_%Ze{k5w;s~9LBlr&X7tump6|;N#fPl?@T=@Z!4dh3Ob78xdkxDs?*~Anyvm@Yw><-*2l2-Ad zKrVID&75?oOjU#M zcOuWfo?Oa{DX9Q=+)?KF;UO@z14+f6`LqT*ok8(==+W_M`UD&AZz~eoqg|W*?s_;@ z=JVKR0ZhC^$Uc4EX8QAz;&<&yQULUHmji*vK_Xt7N`i;IMJh^snPKyrE~Gh_HDUs~ zN~t+#CrysO@Q&wbl;~Pk&eH+pw=#mgH`};&)CoX@)Alo^C#WnRuN??Pc~Y%Y%o@R( zA29H;bO$XiE?fDEZW~SQqWS6Qkx8YR)nam&FO9$|7_j;>zOya$GY;2ziO;C4E>wmB zuBpEI2?o|?DKGH(>hX;r#D3`~kSGo#dCs&SNaH>QS~dqBe(3|%*+nV(PU9r#rQ*v2|yCQl_W*Qe+Uolu}@w2KV{+@y@wWXeZToD zMVvSZM|`%y?r!U>HJHRMAh_;5z!rgMI2_A7+Q$~SfPFV;sR6kpR9%YOCHs$rwtqe3 zhAh`3vL9wSs>sUrJL~$h8|+35kW!<$Vxs$p3`sWbJ>@344Rq+#5=xCvr=Tpdy|D|= zPm#_yJFc{5O+km4yTwz$+r$hFZH^y!5&S(96>-3DlKVz?IaaV{ zsS=;9E|B85t1cLV&6O?_&DQ)z%o}&IqWmPvIUFJsEKG}R-l9rsfvF)CC{qw zRZeUkQeYa`pA6nBTq<(^<>#JC1hRB662jW#L`iJRiET zES9uw1KaJcz|2@E@0o6=@gMm-Np-r-k-#eA6CJx(16N*`qSGEVpbfPdJ3rz!v*rOs z5h3}Z4o9Qy=?Q7(N=1;9r3DPJgQg{qI*)N;(m*SmSolU^c1sJ@n#e@B-JxaZVLrD% z?)EA7(Rr|J8J$_52$b&Z!5C{f9VAc0!j_K9$vAw^>FuAfVB4 zQzDK>;a8Pz{dL~-9HzbJ9Jdv?>Ab!3Y>`na7~3pm9|EJS@fP)~Fs%|Xq$>OCAQdYG zEw?4@n#;l}K6OTa!FVZ!Wch~@-EI7@qiLQa=@uCK%t#eYl=ru+I$Ib}>L+}T)Q^;F zKs3PivR#rCG-e03JsNS52p@ekFIK4%q=Wp%y#pJm&kU*S%P;zJCySe>iYl)C@L){) zR-c6y?mtfpR+ZeH=18wCs+MLS2fJlS+-V9{TdCH~X`tQ2d9Lc`t$}TjXTZZ`K9|FF z(PNvo5Gd)aX13(=`Dfkej>`t{8=PPMda{*sS5n>T*^R@>@#l5?J_7Vd|AI?@`$9?k zsWOWcO|)#StIjF~O^uGEtNAP2hUC5`-_-_ulkW=yxW-2A;?)QJ2iX`DXHubmUBfx1 z`m44G1ziP-u{ymq&_8cc$yq>))UpSPtejeeGE zQq{6&rgIG_P=E-j`{TqoFx+=A1eG#A;X)K^9B)P3xIdVs>tgmxfjXl}3^_tM|3og= zA$YIjdoYB>Ni5cQLgPyiR^`H>2DxRxjHDuL1^9R6^Z<4!;`!1`t62LsfLdBa;Roic zaU|HIulwAf72N%SN1)6!*C%f717b!dW5IBWfwXHJC+p#{6B^^8*Slrqe_(X}*F}KN z1r3~uAOV*dsaU4<6;R}Z(}u9?`x;OA$%+g;bsGF{GXc#mf=Sp9`GoH$o}|FMYxvKl z`+X6@O(!)9-j58c?pM>1m400<7Zo}Q+Dh73bc7DWEFcorUr6U}>r#!UzIe9P%d&R{ znH2V!TUKbFZJ;ptocco`pfd1z$9@*OyLGktZI*K5T>gIU2bILeUaCGjtozVD-=^be z8Q`qcE4p47`L#i*!sogS4qoIclZBM{izV}$1R$gs5&}S=g6it8@5`o)$aa9_=6B2l?{~WOVNm$sgM+=Cx-sY()Jk$H{gHI|;|j zwlOdNN5a8>PfutOw95rHujxGZHM;$jd3qAo-U3-^G-#4|REE$*NNdom^!Z6NE9&l7 zg90-;!%bwu^TwTuYBZ4$%<^5LS%K+M=Y=*RR{N$P zW_uTgHCqU4*f0 ztfh-pK~DV6gCV!4^@4i<8-iwqch-QM1$QIUjXBoE3>bm}Vx3xZ{%VuJM~*V}ZCXWS zfcoU%&xMTyV$thfw)J3Qf?G&r9d&Xtqy_oP9CZ$ZazXGZ8Nf}%YdjSHr9)QZSQFqM z2MzyvxyqcoQ6E+wBULFU99RAp*Jv4lW&_PTP^Mtp!O|?ioEe?)T$*#}dF##!x08z=2Nr5hy`%*;L|eI<4G4q3mvs;z;b4W@&kx zRGl^yDOrg!E8fA@HYEgr@*4*2?!>&d3?i-5`e1ieUct-;sy22+pV9Fe%rl zImpF@4T`E;-3IQS!hxL)449GVWrd&Y_DrK4)lxWC^%rC0M`K9uBc+X~;evm&eC%Zd zne;?9ZGP`I+Drqc$rVCywZyzs+e0Cni_KJ9-;aJLN*t9sefqYr5PHR!3R*0~3iwiM zG)OdA1k|+VCu=r6U4fc|ryTB85I0Aeey{1G>wy4Slfo%3 zpd*@ugwIvMXBLSq5j7Lnq}$JT@yLoZ85mSALA#z@ghF6!hTXHZsk3Exl}}bsUTINO zxmDsBqL6+d5pQj_`1`+}TqqHaw>1^{`UAVS;(CTq6`=3rYoi{Yr z*LOSRJVaCwqX@q~2^FZLrqJ;~oJobX`78?jtI7G>7Z4u}xfGw%UTR#TvoDk?^8Q3+ z_kNMJh^OzDBKaVeiu(CX`bJ|IW)?emm($p(MoW{<^A0@#5lB;TBKm49>ioGJAk}va zBLO;lY*Pl;URdSI1GV2iT8Is^-J);r)C#||nyU?SX>OU1a^`E4cCNCa3GZM&x$GdH z$m5AE$u8O+;xNH$$IxSHx32_}3KQQSP;u2|MGFeAhNSC}-L%HHU4?xL;)GVr(d8C? zyyZrWtAP3Xm8a}wgHB)bcHjFrhHw{qzH_6}2sH!^Xk^hJis@k`5gZv0#zks30+7U; zs`g3=-hfs^GNE&!`iFgpLo+XY+x|+kTrAWket3+(CKUYP1pXsWnJUd6pSQ}6nabSU z`EV<0NKlsWsCLay_1tsa?yuQkw8;G`02R-cNcn>MxS;A=>{g0=-`0L2EEZpW*0+b= zdSb@ZC;Aco?~3jyuF*887`QO&zM6YqMZx5@T($(66g#uF9Z(9MHT8f-(`N-$Da#nOV*LLa zP_8&9c;fXUu{GIWTW-0O$mfm*z~BPddo7MG_|R>2xA$l^t4hub*ylT~e$!rBLG}p_ zH`(vO*$(X7>a*=2dZ5eJo&YyDDEW$1OWYnhU<%$9Ze-r60oG`buS8_M;g!jvK6Qw2 zV{=1V5UnJDFFyoBA}aQZEt%(?t0~}|I^PU9Sz+zGBY*`Mp$k+%Iyoz|5$ShNt@pct zva0FvThzWsjIbn!QNBTIcZc$@tv)dDOk}I{HOk0LUT=n`blFJaClSED7f92i^h=_K zBoRmj%q=;8Cqqh%Mb;#?>}ob+h!x2lO0E=zuu#>G_Z7A;E)CXvVp@tnt>{VDOlqE zkz1(=wTD|C``#Y^V}}UduGUM?jsuoSk#5`IGBpzTQW~nkx_Ya)P@!s3yfi@OI^XX; z7cRxfR%U<9)%(t1hvYbS*1tHAg51D;HY0gFD|q2mJSo#3a26&3L@Hg3z+M$HmaWBJ4w3jErU@rX)E4{Z@SPDY^H_}aTz+dvE z9Bp`oH0s1BlOTE4$?Y&Y#Fg=oI5e%W41ZiyaEf8ci;Li&b_VXm-01Wpb~Ob|VS$tUsz}-7n9;ZI4V8+Sydn zX2T@wa+-85@!!1+n9-eUZU51$|KK`O^tw{y0z%;({VtmgE)ZHpGj&4gN2^)-mm0?- zCi4wGge4`y1C<7~CxmFSG8?0~_{3XvOMPbe>BecJ?JiNu((&j{F})1KB@NdlU7W#g z_amBst216~`4{zp=s~vy!ou@UpRibGb%3X@_5cwvEm7zCXPjOM8O<^>i0Ys)R z4(F9G4 z;WY(6Kb9j;QFnkhTD=X?I7e{xX3Yfrg&ga9Lj_=2@K5T(|9mK=1Q-Ph8~Pbs@4><6 zOY_H_qUK@ciTF5v90BB!3EyU?K3|$ftXJ0oM?)YA7{QtB$l{e~j`miWC_Ufmxknd3 zpmPaGLaG&{8mmmk3`bXSe9oZQBNQE4uIDm-yzU%013`(@+1U*gt95x*G&R_Kw-{&L zY+vT}^{*WUDw9rvHy=qrr)tpX>de0x3auCofcMwZd$|qdNt>^DSZw&v&$!CE-QAv! zb)UW8dcgZ>9JvbMJ?2#3fwXdZt!?qT<5+!m#`QNqC$jZ&=VaizYD@Nb>tcGgvSRk2 zIe1lQDrLOLDIaaGB`tTiT5r`RZj8S2K&9;ZEC+0)q#*O9e^3}S2BO$36FR)TFj8d) zjD{Y*rvjo-cLJ$)j1|6Lz^fNyu5p>Te)YPAZm%#e+;tnG&6z&z?NlwrN6!o}ADvlm zl~v6YB*N-m8Ap5D{rTSCL+25=gpA7ii z+5afrUs$Pg&J&BS1#z0qEc?E_u<{XyF$tw}_AwWjp{!tlZ8Py!TLsArkJOPw3ZbHL zZ#!HE9`1hZ%HdCz;+}9G8wOh{2%^!O=i0DL~*{cTWxw+-RdW~lf24Emo30U)O z2+bUg|K+1$g8q$em3fBxXwlMzXW1r(omwjBOQia3ENS-H&JZ-!jTIUND8T zfx+V4$!4ul-Z_$I;#_`YQLnq;eHjm@riyWJvN z3!i@imYWU=KuS(VD&J%m_`M832@nLYtu z<)Gd$F)(%tpaGVFFn@`czfq3ivLJU$3F?IQL9Ww5>#0SP;GmR9-JizH)#LN$V$%gz3> zJ2@R({OOt^oMFAq%`n^VyIDtYR0d2S2)d3u=PE2mN*d7C{+ofXY>p+o&>I*jMrccw zMS<-R1CV|NM3W%lJr-3E2gU&c?~$xiT-!ujVu7ElR;Ng8LH7IHp)qT3K8L>q1hRvh zUdA(qFpocexGqpMq=>vV0T!7jw-cO<8i@DXXiOy<@+(U0Lmyi{*ezkZlaX@vl4oQi zW=lU^A9VSz`A@y^GQA&EAi4UBWyesk?Xukk7iCKRz3P67Z!r21*o<^@k06!yMHu-i zcAKTpoN*L#h|HeQuDu8o>RXFD(br4rP^j4%l@d^;;L$vO*-{0<|vk${?V!gf=L}WX$_EmW7qW6o7+u8L- zUPKV&`+!fm0jM%D$aP7Di59DdpIZ z2k_y=i^^V{WWW-Mfuw3`T$`%yH*+~4AbiRWw|4)XBRMJ*f}}V=QkOk&F!;CN6c$ny z+xfU$iCHJ_1eUUtrf|uyV=eqU3YJX9uJdE>bvG%6y_3muLnr0nC*Bz6N1MedfAi66 zK*-T%FEA7Fb%G%vo|uc%d!f3AuF31l+k2s79j?4g+_qztydaKEzmD*zA#(QG@9nhD zWuCvv%?_A?%+T}SxW105k5~ISsDlzQ9 zael2K$q9}}US|%7Chp10Hvnfnc$rJ;^Y6NV7>A`yOU*mC=azFse1&GpT)-E_7sKv? zKQnuZQYkFk$Naz)MlQdgNr=^=v!@Km;U$bYct z^-2&(XhVxU4l=<0`Vv%cd;9V2&QCLv;PdF~upRIZ-Y{Zod4PULLWn3ec$+;N0r3nU zkT?R}OAShEhQWSeHrN+bnZZyhgT~1`82c7T*V#truBiZANdGtL;rJj%w|C?P7C-6^SbgLF62 zT}pR%$EHiVTco@DzkGi0d-!z>&KY~(`(EpcIX@E>aC09skK3>Hdy(sHVlHT#vizGm zzmo;&;iofxPne{bDKGDyR@)zMs3c9)eJjK}Rg#<}+p3t;Y@I15jj{*O2Y7tWcMzKW z`||cKygkY9KuwE9p}$*I%%J_aLx~oMcD;7D?}t?s(BxtGx-|{aWlJ&){dB?hrouHM zCux$_)lS!wM=f~!#tsA7aZjnG_&d{)!Ey6g$gH08X5AuklAwUviz4HX;6K*6IM1=~k=Zq_XX{AOxQXEK+NwX^vAVQQDu11a~+l9vTG^AoNhTEzAJVA97mu zY5(wKhY+waK#COQ1JE@e8; zfKe_XGt~)%XBjGZgRW? z!-}_8dVI3!ieumQrWi1e&&NzdTn?r1C<>qTFXQovxM0CjyFZ%gS>Hd^dIl-Tmi@x@ z@cOZj%o%a}jA473$a)GXU(bojyf(IBS#UTlF*K7?h3VapM6C3Oj$*r?Ob(3XJOt`Z zQycB0bZ->2m+DI2bMx>8oXmpTsAe@28_DE959=FrD4q(&3He60vCe1~Fv76&6nc!i zN5+%TX<$NlLkLXbCHBNGTLP@-g`sux*_{Q-f8DJ(XCeWaK{wht4}BuC*Gnm38eL=& z&shwMuhKv!|G!_4H$Uin>R0RxPuBPa;1L^F?ISVRP5(5xynPe4-vioCaJB-sPUGqA zHt8q$jSWN-A5EvZp|O{EFL3DhzzylA%_L+O=(tPG;HA|QUS=2Skgd>k_ouw_!5Y0c zY$8DA=_>fibe`u;`<6IjUrur90c{Kg%x!wc(RtVhxwMn@8(T8jH1B6EnVt0ZsgtMO zfcL7Cd9I%@vK+!JRD>u9m{d-(Kscc0MY-G?q|#EC8?H13fXnBHav?Mg6pH3e7ZTHW z^ugJ-a=RczxxCKb)_C@X97Lmq`w6Y&@roGt*(KgUcUIT@TCTD<>4&gLuB67sa&Z~r z%C~x9T#yDB2IlQaT#?(Mz7{vYJ`U=F*TBdI0|RdY7pu%V(qQ`+_E4c@`YO!Zy-Y$@ zkYuLmn;Ng{I~<(1B1eBxx#R$<$%W=uVJr#Y5A`&|5J@J^JQ2KExqWAmRLJWVrFc*3 zwG>Q~fzIrT|A`zHI1mC~gh~8!7lw-f4s~p+&^_Nz8pkmdp>dYAiypu;ImF?7YZqB0 zRVh7&PU-_xA5m5r&$p3j)GOX+TQJTK#wK@w9^=%0UanYZw{XL#RpObinforJO8Y2N zr7pv7bYH6;<4Fm&%ttAe%9%?(2B*U9|m z2h?`ERyJezn_E+&z1e(kD&_BzyXmrIX3E_DJyNm1O5!VY)n2iiHnCW!Cj@K0mKfcD zLviq&eORcaT$NR9(aHagD&S_PI78g=@c!>l_dRBk;PUITpV0^|nO-ZQL8O@3m-t=O z@=X_o!SE)Y7YdkG%@Mv$6M8De`CdhPg8Wd?rD4^P1 z)_RlHXvO5ypZQ>SzQO#jYa94D$ypqW5J2haxeR<1L&vgO$SMqH6s4*p04`wwI%S*8 zS_E-j_Pgsu=t+ul$qzWR0s1iTgn%<{Ld^fAr%Z=)IK#3NfH3?pk0tRrQ~z>;kIGW% zYB0M4Upgho!ZvWNT66@Fr93hXnH{q3rxn!P6Y)0}=4-O`_CP_=t&s3e?cyEJ6y90y7%_9EUt{}@95z5WZ(S@0ch zeq$l+O)DL{M<2cRgX1|ptfwlbd@n1p+S-h9++x@nP08v0-1;(--#{b`*EMO%F-Kuu z_cW$ZBT1RVJg@KZn&@=i)C7QUP@0_=PiC}mFZNAEYR%T2SuggCvA?(O?U!rs_S_#Q zhL1WmdbK^b5y1JKt&Fz~dsUuYT)Ny{Dz7i?`OgwH7Kk*y>lF#}Ub6Co=VLYf4v(k( zqD1@axJZ&>McbIrao5iH#3{#q6ObCdv|K1S(0|Fk_UsLtL+i8`t}N}|^r551cyI@p z5_q3qe?;E6vp{fV5$X6K3LPu4Efge|f}M76&GL=yKcZb6HZ%c2^r|^f``xZ&MR?Cr zL0qD($NIIpi9eBZzB*Xk_S51wQ|i#Vxw_fHvtGw|&lI1Z8broK02AXU(q#nos-T?& zse�UvIoOtbv~}unhebxSAEYdZcBc{d9AN=d=th`S_tVZZ0C;U|Y2m+2u-u-SejN ztu}(?%^xBpY8keTUZTrSvfr1R#H&qQ$F%_Mkhfjx)Vw^|A10?~%;eFP9(1zdqypgu z+8R3Kq-KvMqwkQobT4arzdLR~LSn|}nMks?$JB67S9CoQOh|V3;W^C{h`=b?zfXy6 zuHcyJ6pKE95(uGBuNFz;m{8USjc{O?YmSRj`RD22=zb$|X1Y(j8RY0iE8s5NyhOl?LML zsAz@$E_A%yJTos{v`R$V%oZTABmhFbU#BkCtW{S%5wh0v+?5%))!!>l@qFcl_&(S z-MCPC9+f7D0%}$gup1oz;d8Z7d5##q{*p6w^jI?aP?~O;)%oD$z0+KOq=W8!%Y5-5 zdtj>Cm+2gv3I)o?XF=X~Rwd%G)UKcMH?lzKw{v>4Ne!GWXQQqcv?ptRvhDlU3Nxh* zgL{*GJI)^nS%i5H{>)?*?OMNMtbG}z(rLPe|j@0EGP7!#~+8#xD9Yz{6L*4)Y|IYe%rdS733K<-Ix)pupQ|8pLUN zG1o9o=y+Pj0IFp_o21$G^D+CL<@w9C|APPtufrk}ekuFinE09vS`)!V2soSUw_V1F z=V#UyLOzf)GVZGv=%#NM_%2?odUSqe;)qkown10UR}HAOuUyE2uk1VsJFC3ITZM9w>BoAzFo)d{%Ls#RG3dTQ#~? zTE4n>B}gIu1eTS9wP^mWYoX`g&uKid&ZYfcL%TD~4Z<{nC)V{vDvkvTADMZheml>I zJ3&MZ-(K~bX9~d&s3MMP)j9mOPaDZZ#OI%*%k!?Mper;w$UnnR$ck=0cIu+68RANO zT8YfD+^waxwZuq_k0(cno;ld@gfMh87XG{b^~^E>d0F5eKUsa&R460nvxcrjq!Fb2 zi0I+bb&B9Qo8PhuWNRv^5^Nl2CMXyr({^915O^bd;jaym5@FE{hVp zL8D1OkC4BudQ3<(7!{J|Xqi0oO;lc2726SiTjgFti+v(=UMZs$UDWe1KIQwAi8t`F zM@)YZ8;@Fcm{9t(pHd76aO&<)Dh&nOY0vXB?b6eM5zlZ-QYr+a1#hc2UbZiBh7U1l zIYK9h30cG24dAgFj$C^jnhEE41IJIm173@fx^Ovp&&wX<6Il#4ym2hhkwO_Ez;rD# zwWrgpC|_@Nns!cE4a3OiFek|y4vDConH7fK-GZiSh=|&$1 zkEws(r;Mc9H@4oF{k_(PQ$^cWRas5ej@vB}Zb@nMKHkz$fSO#)Ka&1mE->MPztYbx z0?2C-kH#*h+y&0BS>0T#@{nRk&``G28mLTnxNE|`Ga8R!e0_6wSdUv{rBfw>E+kO@ z%2Cjb#S|vZE2HXiJ{ric1{(kwPth=id-1&Lc|O z`b+iC;8f3|K7n94NoQC@7Rs3D2!z&t{T68`zsGo;K!YYv5d>D`+g18 z-&L_R9^9Tw^nz!LL4ZN>bP)Y27M2BJq~W+nLf0#HHSG_YIAPXsoj z8MU|Y$mV*I4}a#`T1kcy>Dxax-&?`_6;<9{R)5z?9HN0Un|owuqpQ7}R+GW!2!+{+ zyRm6(`18vru^H-rOMqS=ykUaksl6NF;sbLkJ9rVN{oG(6C#D}v%qNoP#PY-=f!gH+ z*GH9oM2Q5rH`*6v?Cb{S#pNQ2;I^HM&n$!K>lwP=n@+nARXu;|>`b*z8Ehnn>Go4Rhu4$TH`+X}d-Fa|+-iP52G5&nVQ%-?_zAd+zuxFTH zj!17}R1rqtNe;Vn%0<8p6!KU1jV8VCx%Gbm`pn+k%TS>3dx;Xhs6} zICX!F6T$8VR_n8ZwivW0w7Xk3$v>sXm`UCa7@AE}zMPiCa~1D(Aousjf)Cq5PVHFC zqAdt5N{}y;t`{s>T3l$jOxT=ih@X#jGgrqQ{tU-V4jituZSfP9DtSR33BHX!4$d%H?|#5|k=q&rUt`0tW_3F~#$ zG*3id^<$LZ-26rIx}SMHpUWlKsG)|^(NXBsQoe7xxjIbs(et|0-%~c%Uezn8jkT>5 z{d@1W%3tm^LQl;VeLkHQt5U!`9@NzFsMY0m*bi{w@*+8I8Vc?swcm0R97;9VS0&@L zy4VP%mz)!Q%?$gbN(CTLZ%M^s{zYGUyWd%4(^H& zeELC)@agkW3P*DP{#(t82aW0_V5#wTI(#Ruf<4R+_w0t`GfsU8FA8Ptgq0`jc&{@o z_%uhc6z{@dY9DfS4NumCEP5Vw@-mbMUCYf1c+DLGv~ZtwP@n82yD`c$E%t)18NHfb10QV~A$yS`hNN6J&MZ=eAAX_*M1 zO016TUbgU~{i{y;0gaB% zdQ_spN$i}a#HdolxS>CDN6=#-IB=A2*f9 zVVAS#siwx4=tnJ+St4+<09N|aaqnb_&YRj82Yj?DVR9_2AHOrmGrO)11s@)&@uU)N zrXX_GYe0OFsmZny`2mA++suvsIt?j^#H6cscEPSTm_f}49nj&|0t&y`U!h5H6=rx} z?__VvQIK-~Dxq8R*d{60eBFnZ9eur$+j!32VNE&qKS-|sw^J`5hb82Ew=u+Q`DI4h zt3y~@L*#%+Q@7HW+sB+P9jpGCPyu$;J>CFcH(34XhI1hj!`GtHNNXc(C0FqM?qe24imj znq7ofFu2!VJPO9@=Gtj=>|fuX?w9L=G6)M#r$+6O=|B-H%BfI=>B`__&7kEX2C3TA ztwo{rMFg{nAIt!0@S6}ud&U26gZ*}ty1C}D5!h7vG`@?$tG!<#8V;5~i1 zqlYKBP`C9mK?p{&ZgpMw+V<2`zb0CE^m4T?aLozcKQ&P&At}jbmWlRS6%P)@?){Po z0h`oXA6Y99 z#CSa(k)~hyW4cpitw`d;m3-tg9?u^2nBS|$)J$m9(!#Iac0aovt8m33zljosrdNeM zeoT4ySt#p`N&O;P<8>sS5k*T0J~nf3wN<9zf=5RP7TvGNzVa;VeDC_KOsEo}|NWH- z2T=YhNyY%>)5kE}Q+Tou@|>2?vfQ0M*6mxuAA=F}s^xyDGWyv)R_&QMOsg<$^%8kS z$#D4)N3PwP8y{fawXme3;PcG{ij&pCiey((idlKAzm)5U;Ht4O`>pFK831H?f@z*j zSk2Dxk)CG~VRn`5`7z26)+*)8!hBu;PO+kGbrOl<^t#fZPt=R}5p#=18c7Dj-qL#7muesnwwyoPFoR1D-O!h#(a4SiU1-+u3#7$-F^6Y^(%4?3c zH#J`fYjPhKJR5Pi$m2taH#XwD>Ik@HNcCQ^VZ-!_$wnIq7wm83JBa4b*ld?~!VchrjOtUi?}EH^vQiD0Qi7W#y|64Ln&|qeiDP+AGZrfl8*X1^Oe3B$xu__ zzt#p*(plW$!#OCNzL%NhEbyJ~TJ;bs_N`$;#Oo!)&YG{P%B6%0S&vONKgme=cv(F8 zMco#1F0RWox0HsY`Qlzs<6k%J4>vqIg+2N3vVt9AnA2e>tGQ~9OLtBis$sACYs=9n z4`?~T-Olo`7tJGK>y!ucV-?}{1*#)2FbE!2pT8kq5o-2+N+tm>t`K*zC+uC zHtQ?-4lUM&SXM_UpquGpm?43F!5h1)6w_K!?wi;MP2)@`Tjr+r9r;d{(0@cnuv`oTtaSMw%ds7eA)g54-E^RvowEaS&Xqn8)W z^6bX@iTS57p|%bP2>bTM7~}2^9?zb{nhEXS?j_RUzPF&Mvx;EofP#OK>1oy-uV&WF zD@xiUSBMFA|74_|+_V`4uK6etM0kIqih@T)`&m7Bb*_{I)La0&M|d5x-{0H$c%^E) z+NDxW=GR>D5t4IZTPhZxQ(R_n!-2UTK9A?48&*9R!zLubJ9nEU82@BpbKunGFBokeZNManUf9q}Gwpna~pE|NZK|K*;&@LjsEu z2EsX5!gOgVWc8Nk^xHZ#-~^=&-u>0- z?YvssX*ALPy;sU*hJksZ&Jm2?upb?vfi>$V3A8F%Y=L8HwCLpXK5 z@tto1%Oqbn=78)Q1VfD2qlR^@EA??H>A#7>Z#<~xW$%SrrDw~;g@js|V;D5-V$Wr( zmhv*R0JX?zk9+x4Q z>^erOhDKX9eXnPwHBDV1!!kYWn|UTjqq;#t$iH^ApUAUe_hvZO_R9~s_ZevZj7A8d zV+>x*n+m8m&qUU*)@3PDq2Bp7oN(~L%JPl)Ygr*Mos%-dW6)mgZ-d-2r~ayw0CfJL z#NN`tPF-yK%vTvgPji8wyElZq8hhGOFcf>T-prHfxZQ5H8KBbLA~%=OD#(ggdwsCj zx)?br=WH+#v-ZOr@Z@XX)RL$avwzZcgW{%q|o_1ih{2Xs*F{Vl15n_!K4VoJ3PpT%fmoHRmf zteU=NHYFl8_0cGD_73QP022A7#OBi2RVuAYikp^vz!AM;y4AVA$z81-iZmX=yk7~y zI+mc}0njK;VlVd#SgB-ndaU1gOyTJn+|^zuK6uafdO1du$JZbLME~4Gh|?1BDc+rV zk<%LWNKGOB1>cRSf<208IAf;Yu}>C--<}Y(N-^P--ofnNv=1<%`v`^f$+y8G@4*Ip z^u}2?Ch`62w;m--fJMu6wdsqL29_)y80FY9a8!9R)$ifz2tJXg!h*f*cb;C5{XR}Q z0dh_JPd6>+86=Z)Tes)eX-T=LO+52V7!~Z#Oj)l3I6coo@Dg?Rl(S-g;XckbIbxun z>bl$h*3Wv1#-R*M3j%JT)2+l#^BiQ?(R6dQ(Z)g@7#f)~FqPFd^XBAi=R0CCoMS-> z7eg)RrH;ETr7-(4TZZ4$1LwetnDqQH+U^Cj#S;ZZxn4Uy*UFdP0tK_*ng7>3>I?at zk1mE4DlOj&(cGIH%t->@UJho%7Sla2H$knC7 zd{=OcaUO75N>Hg`S zGtE}4CfND+CoZdYfhX&}(>@|_$O;rbe25nPL-#?~7Dl&u=6bbQXpe&c&ncA`M`W0O zbU1>L;8%m~O|7vMjAq@tq9^F~a zL9dQ2o;NEv&XfcdGNLW?viTna8%}ofD;IOcQ1plK>6uzsp2s%bd0|>~g+Lm>OGvZg zQGRA6PK%4$oYuw@x^L10$jjQ?gb{t@W!A8GGU}rPQBnPBcUn~P-9E?B~ zpG?n*Hq{~P&J>R5*pRR=ANw(|YlWy%1%E%N*Rcyh_!!i*9u`{?T=4ub8Khbo^ISIc zyUX?R#}o1nh|@j=1rg9Y^$o^*_QQ&PLwS0+n&_Fgp}b&>0XwuBDHPZk=dMxq zU&(gKa&{C$t;KDaRcz~>Wxh*2v22Mr*`@x|!&O+RWb$EQiF zp!v{3f7_%jKlVZ!ApAX|B;=7B<%CtW7b;%Qse%b-T_R}ZLg@{zk(LIH>gCArRy_I6 zzNv5aicPCc9eKB(eE7{8O(l68J5TH|BCv&x1po=Y(UhfNPL-Dti~ad{Me=4llfwP6 z7KOP*zY~QkX_{TYP#C);zL-Bly=AU^;uK8T`F#dTNqD*Cb0t|(g<8~QvYY^jb}5N# zte9HlEagWJ+l=60HPsBp(m!>;Z{&Z0n-(}AN(KWap_LA7Lxb;z)|NYb%Dh=d4*&Sw zz2bg|Qh-*|{i@m}b{MOeX2aCb5u($4_Mq+1>#WF5L%W7^Lmdl+CScS9dU()>HL!nI z+r5E=T(9|s0gk;@aG0DZ3g`<}{ZiMVW?p|BZrCe=1mM`|fboL#2 z_KkC=H4jF0fM%Dr`$+gxo}PN3C_{Zo4Qz|3sC;pLk_y>tr_-EOhGO;jj47rurkbwe za#wlWSbl@}pYLH{nw-%>-NYm7iX_?rW>sC{5vMRDkCXp4M^q5q>c2l5vf@ASu2Q{YPV=Zk09>jM zWE^?{(|pG+ag@<8T&o9x72!C0v+#n?OM#!eOVI1z1XfW2XCB$}eWp}n-^=+({Ldc` z7jTM6-l}m5i?ryuuPY6~oO88Uj0yjJGEqaYsQfGf5+gIE^X=9kGV*A*(H)galQXQT zwdK}?$n(CG&gKo)m$VeMW{EDzv!(~A z{K>rt9h>TU5@R3s;iE!ObKF7KH6}x4mF&NnW^d&p66rV@B4jtG8b@wuG<&r@c9w;*K~cEGNG|pai8>? zDJ20_OKw8#_c6(|2qU^lQh}QE-1Br}rbI7L(?g=AKh#@^A>+%lE^hcDNkggCNyHlH zx!Eo`X5=s@{8gQg0KoG-rkV39kD-A4 z#Yr_skr4u{1 znkSBpIgDdBw30K~l%RRJ>;jJ>IU1n`A`H;H^!2M$K-RBIXN1Q+qJ769$RdL}Ek%Y% zq9-mz3Rcne{k+LStY3^|HRMO<+<5W0oq0MRydR%KGb-k|2a#h>;(Im?19~RclR^6T z+w|4*k%_uhZ-e$i=SlO^4161&Tkowrb=j*Ozq0_vv`?svaM~#!u+bXy<;i%K=#f2p zl4Chw*?t@r5tOF1hb81-D?s?DDxIt-kxn+v;_iZBCn0~^jYj%#CS`XGqvv(lsiqe| z(`Nh~S7{L2Ckm!~>3F(0u~7M3C|%@b3oy+#pCE?RnmOm$yHFrEe(&uBTz232&y9J}sBUSYu4LHO+?Ck0aszEEp8q2HOr;sD%ItE%};nzkRPF;%A!%qwY!TFX^m@tjDeH6O8cn@xwJNM{_V zJRba@t=ARsOPMQtB%+8#@Osv1Mqc1aIMz)ad6E2z+bj-8Dgm30%eFZsPG4bGD4Tce zVyNyb78u)-)?$WY1rcv_^haA+ET=55$~-E@dm#H&?U%dMGP#WoNB{Hw3iN(8eblxX z5yhUX+p{+X)%(46J)E6Zb<(isaR4J~i!PFX!e_d;Hi#EY41+bQEm)E<@!w09z0wFd zymjMnO7oIbUJ&(w|1lWVoL!gS+_`2U1J{Usk$q=bC+)CWe!N6q%nKwpJx zEm|MxzC0G1Z*JoIKF}h(eeH75vHx(p;=Kt{k#w=~_GeXzLwdt2v&_|WkuJX)a|I$t zW_-)T%5=vhX0RUbLPuA@!`a&YmP)5t6@DD`+6qLOb*4MNb>sX$DGyYwQ3FwaW*Zk3 zrX5+Oha1OKL9{^h;dbUw&cM`QA_(-oAz6qNqD`Ymi5+dLhBiE$EN9p-kt9b@bZR@{yDBc~|{5Hq% zGiK7e>^e(xWu`_>Z5to31Cn>={qjo?ELUZ>-=dClt(N#pF^@zFs-?Zf{{3gepdB*> z4<51&3MGWi-g(5Ti~&{2J1-9N-pr8s+F}g*ZL!ewEv|J4h~MRsTjZ#J4+DFYzdy@W zfM5qEmvK(;_oI9xW|%tbp3b<8XX_(e0{B8J+5S%<^>Hv8iX2Mg&Z85$*-l7CI*Eo!R8Htn!9Y-4ml-XIoK%edBQyygq3wFWT{p=X z|7NFo`B$`*_|KJ_CR=Jwy5A+N;jb6F`~ngTzYi6;sIiqObpL?EEd`vqX1noeQ^Io0 z4PE*&5uGOP@s>syu9Y3dBS+nSebX{HI<0cu(O0KntB~jI;(Tu6)$#TEcPz=5r3UUb z`4ljXD@&1sGf*p`?Yf-RqFot@?tMPuV?Ju6VOTj_`cRh5fzMijp*pa1=R$iUmr<5@*O>`v6v{0wCUVrZu+Oom^r8X7mX zCh6|MCNN*agTz!2)EFYrEg?|5VPTD_z3vgoGIF3hP%ofwAw9>&?@t6(W{2pdPw7>JWoJq6S9>iJp z0rYv=>+>a#JoMs+5UXq$>#Q97$bTKHpU6YKyVucz&viX6+b=(68;Dp}6rLK}*0vmS z+Cvom$5|T#U@<9wwRuz$y3CUGN9W?VKTIS{4zchGX8J5UuM!tD6snlJJU>vLv@Ha0 zf95-#f_NLz&eX5E#(Pgkl($Wg&Pl)H)9}5x*^J=ceWSEi?%@U_op(kid{s;Um#Dqb zl_RmfKld|;kQ{nNPUy05OWMQ7I0maUA0Kt7GE&IueYHsliZpc!=Vq?B{=JjXckV8q zT64eIFxN}(p0KbfZ`a$*pxtQ(d-1R}UF`-Sn#_XzT!=8krbd`2^7qj;z6ZAS+u*A}~qiBG!OV!hrk>YXgJ?N~TzZA)IC5Z-CG zHoXsVL@7{wxyO9dR`BWmy1}q-d2pP~y*)veCz7Yvzfg%DV7OO`7`YIj-_#%%9;I9d=|#68K<9q?K1HKn6IpsT+Q9bZ_Ud!YEZg3zkUNeY}*~v3E?0E&A!~VBV?!%~mM7h_HZbjh0MK)IeLv zG&sFbpz;k^acFaZ59oC*_5O!{ow8dZYY|-hX7dVop_}D`_$pPv25E*MKyPFNINv~w zKCRa2Q!Nr%u7!7VD&A~&KG=Fb+HTEUr{}`!p32p=R3{+9v8QoHgsf8hb7uQozanld zCZ!9Q1(%aJ=&Q5PF)SSRjFd;0dOOIC@2|v&U4Hx=PV8;j!p+B|59}eQ=)jphG4dmx zElxz(sK6d7rJKLN7pjI8DexHOk#)aNxZ%&>pD#+QG(ZD#g6jtf#y2P2hwrj6D}r=7 z&hz0I{zD2G1$Ahc9)jemkVZ;4QFWE6N16G@UIyI*m7%8N&ICrfCCHm#Y3gng7yVx- z;3zcgpi>j6rx>+dnk#tnEvJ~{=#Nkc>>gs~i$7rrK{ZW#;B)3Il<-iAQ)fCi4v#fn zv25GPY6Ul%W##7|-qdEg@Ma7J=o})ntj3=+KtD`?#;@NwK*Q%q(>Dm)oy>kXXv^u@mOGSuoH<*5 z{TcEKP4`<*>#;;=$^=>1xFd;@#>DSX&RZ9wWrmv4(m)gIWFG}<46%C8>o&{@b8jaZ51Ws&@0 zVPfTdo7VbjHT;!5^M`3SGPxWxjfnF}IbHr!fAPlnZ|f04qsgiSB2#WVKgDXyj5}Tj z;m3EZqN87is*6C#cl|x?A8z)Ug;dKfKJO~F+gV=God&8k`sc=iDy4!U(yUA1TAiH2 zC~)Zwi(L+gav>$vKVDw+55bBTCI1Kuh2c3}s)gJ?3*LrI-iIUdv9GS-Kqm8SxA!+)|z0h6J=E>X;~wVV4#?XD!RF zYulZZyliQlXJWwl*LvYM6%Vebx_eq4$EY?)ZgT@eTQ5`AO(v4n6Kpa$K? zvCANuHy4R8H}(62UD%&+8~4=)*17wG6JbXg_}UNCpU2i#!(8D6)rf8>ENCu%eNf1| z|0~UD+~zk!pYtQ(8)P!ac$h6V^=25MH&`iB@sna2a>||<2@GF>AZRDVtTf{RIU5?m ztGM=3Ep{zc+gT!(juP!epiqzLS{|Mlu`mPZv7XaDzV#=$cCCEW3L^=Hg{+g|1U6?y zzJm&bp1$&>P)pjHt*`b$Y*OHCii(5Qs!49F%j|y1bh< zqOVWM*#JmamkJXZ3tPT1Dvb2V6cA z=%3x|YL5d3E*tp8_14xMT>+v^8UySVhLMg113wpLHB(c#ZnhHZIyf4^yx*#cE!pbg z3w>Ej)j`^lBa(_31|<3#Oc0=)jcRG5t-wCJ0LSkyj;Gp{DJpZD^JPoZ$ndLw%@Bm4 z2Qa}IgI3lwn7hxQ$fiT^3mtw4OfI_(2F8MpsP?QmU$FJ31)AK6~x_Aro3a#(yV>hb(2ZnP^2e=D9F>eAaYt+bS%_P zV}-uGPvAZa!f|NHhuNZOu3{C3hWpl0&Y8kj40c+3=02P(H9WCI7j z&7vif`>CkG7$I<^ZbWdEa354W7y>z7Ayy{Yzm8gOShkPCaI9ca^Z95ALRuGOXSq1@ z1J*~C*k1aUM=fBmYj%mM!YW;^F@yJt&n!<-(bgiJdptCZnMtU!r;SUuYh_#fkh$%r zs^!$(7fz=YZuFzfwxC8Gc;b`0oX30n%aIr=L15>jsc;8m=+8fmKy>=NUC83`OS~kT z+bt7^<56>^s)s@Pwy|wB)GAr`yRSFSpB7FZud#^2nak}Go!ermzx`^im(nprQEyaS z{Z`U4H1zO)pXn{C4`?7X-+JU@zRpwIUT!YVmW+SEV9Hlp{2lqUL&-HNsKsa+{ek;R zJvL=2-ztN^&evPIsYEds-*Z~je6kmmSl_H^P|jzxNU0PfkNxkblyGaCls95MjC&%n z2l z{!)>Ot?&b%QwhylNtv_7>x1LP+3;Mcuqh+W$B-IX8k_q=t27&5EU(Ah<=#dJbz?PS zqU31RKfdc0OH zH=RZ7a!!XIS9*#-lQdQJL$yRtF9$NtS$mL6uu+nZ1~w%aj$t2*akT2NKkq3M8Ff@B z=04DR39>L1v&p9Z>D-JI*QEizxgzcRxdd0sUll)XpKI+$(;f(JW{UBy|Ia3X4d1U z=N?64yO4|Bzrq8%iIJ_LN3#!E@Gx)wJWv6x(!XUC}Mmwd-)sYt7l%4P$3oTS0j#IM})o=d&`N@!x)M{pI=g~*P zXI)p)mSR;AoI$6Fuik|8&eF*>UYU)Ki;u_WhY_!Cnw>a6@ktMsCZ7;o524g3Y)5%^ zeoTo%CI6PsN3}U>qy|}PomrwzTL#Jq)@@&v{K`#~%hHI9hmAbwB{hc<27WZq=lG#1 zYmM@KRSSf?B6d`^!B2)>fw-{Z{`X0R=P`E1DIZtQw}$5Hr_5MepJHb!-L49r!ZF#| ze#1f8uAv0v$u`8L zEgcxfG!aSmR%<}*+jm>AW*JUavMntpdau7aFsdbvwNCg_fLGkCY8XLU?hjtQp=V-Z09aC_XAp4YAIjE3yNVq&XLx`&}&SD98>pq^N*p?}u14ruks zpqI)zbU#MG=ohWBhz&n1y(!B8NIO*SZ3aDE{|KU<^_ikFB-|}uiw}UmO$)PH642!6 z6D!9FN9?oexmcu~cqmz;gP@c*$6^#L%7ahktTU+jKG{a|Zx+gC%X!*dEq$7F7%m*p zy*@dTyTh(J?qNR&x}VGRV8B*P|KBZe=GXBvC`CSB)|D%wSgl&FaC%|popq|y;h=7& zWbx7xo(zb)7?%d&YleplH``wp>jo~7@EFhDQ)2{m0nHXTXgvwBs9kLLORc2SzVsMI zYQ&@*kPZ&5dt)Lu1MJDeo&N^zpn0igpfH)Psv6zfiSp^4aZ-o3Rb6=LOTnUBy>+{tIwc&0#BOSa_9ITZqyZnWo+Is4E(_F(?JnLK zD_dx=F}IB|f(WD=7L)KR;YT1-Fhm>y)h;2r|97JNLuww6COkj*=F=tnuM4t|L8Ir} zWOI(!BPV6Lxd@YUn#xrzKd3V->wUlSqq_AlJq9K zC28Mhsv8WeBeI-)N4_p=myg5`DQRO(3WfxD(!5s1`4 z)WGn0&Gjk{JBrg%5~{pLa$vx-QnL_*;qu6&{!u9w6}{$wP(4(SSrn0N$kUHO2B}&s zrwZ2IY?)@(>aTxr)30j2Ft4*l>l=3i!hsb;nl&71apKPqIiY#m0V>($?NF_HXaQI5 z(}#0aZATTWOUs*!kH^T{p8x!6eM&ICjCTc(3DwirqtowAOED1*DvF)I{ukfE9Jg5di)u^oq4T4$6|b%Vlu~dDs#|q@ImLa z>~Q$J;<$EueYLp6SPE8Zzm7`qlo4e2;rf6)#f=O%iuQVn4<2hiH?y#%7jN$bZL3g2 z_}iWZWpLy~g9h&Y?}{}_6Yl#y?%Qa@rqRAquIixw%t~7A@UlhIqt4}78%!E|uSp`&t{%s0$eLwBoWY zFn~L->e-W@&FLaP=ELDmYd~wRiHzTdh0>Vsg=k|{%-FPAv(2hUS(-2AJ@Q~7MHS1x zo38-bA6{P7gd%i`qxmLDmzj;?YwC5wkzveEKMlo$T)&X*w%}t#lbxk>WQhv4Jk~kg zuK;mR@$-ZCnv zZfgUj6cA|%rArh9lpe&c@MdmN0x82;Mq zwf0^Ya&Gd@^cd*)ot+{xE|)I+S~?)lH!R7#tHKBAD0}@uX4? zN)vnjlnKX<6+_;o#F5-#eSbdmgLLTsU*g68`#&kf=%JAQ*~yiwTgeC8lTd)Zvj4Ni8l z--$~mlv%5`hm0m+l8tN*_nUL1O)rf5d?G1b0Frv3uL_GFqZpfR^(mjq^QF=oBO~4> zWV8B}4#2@(HjilXWx63x@t!cv|CfQq|Jf#T2ct6Io0Bx&bQaFb7YOV6b=6q80Fjv= z04=z2rLj+woA+nb$!deRy5BQI=)Tw8Y3B#yhE8|iDnq%0=~XSOolI&Bg(vn?r}^o# z^UwSBnh^JMm4@W^OX_pE`pp+7tVCyu5tUJ;i-)`Wb*gt7D@#rm`-%y$BM(qzsRMNmwyv;n8dMD z0$1eH0F-I6$?* zcyB_o@E~w=B1B+XHxM)?VfDu~2nUC8e+lDkKuMo0#=Jir!Dy{!EAER?6e28cibm1S zm_YlUS0Bb>;Wk0!4K~KsT&#nw*guYyg$V;-+Iw_nXxZ=`yMOjZh7WDu9l1PHuY=i3 z6o zgzd+n9L#JvyrjQkG2w4I$iH3PxgF3^uokUbe<9wR1az5Q9iHC~`uV?E+ZZ~CC6XHT zsQMay?D7mSHhg;oT+J3wHG}aSYz4IqeqTDo=4^zYB%K#mC( z*Fy$J^x5pbZcn_yT#zs0u4;CTe%zVV#Yv&>*-e}Yw1 zgR#!)r(0o2Gj@^re+~LbrPN_Yv5reMu!{~VO~pfkm`Uj(xIBI%)r-et>cw9>KQ{M@ zW+xwlj+ym%zYKqW_J#9pE^^7CMl{tJXBE1|G6vQXn6oX9%Z8&L_r4|Koy(Oq`6wSv_J|E zw8gjb6;L}3LrE)y=4hozM3EUyl=JypsEud8aUySq|FM4&QV;lZc1r~Zjg&|%3hJ@7 zbNw69@n0-68ea?!Cc3|V<)0S<(=-*pF%;BZ*rL$dbhhNBBH~qZsNo8Q8(iJ>ZP$(L zbtyhMfZVyopy>1+Blm32D^^OnJ~3~`tiz6K>$L{u$aJ`es;K0{zvlnfFb%x#jjt&` z#?B9;cbajUE-H=vI7U7{qJ}x;tG*^e6J~KyZe+jjC$W}PkPzq*sU%M0xc)BmeIs*- z^DKA5iMY(+d}RBvwKsBh>ypT48$nZl@chN*YNc5b0aGb|i==*nW~<%O%t*b1VOO+R znF%%9)r>D)NR96ef1$LqLbbOsZ4O#Vv$ns%qjOj_Dzsz z!b$GT2F{nUYBdJa=KCeN9IK197$Ct2IgWv{v)M!9liD8+_i*Qr4*XeguL%x2Z!aX9)%hkbRt5oWCzE zR-+;^7`D0|c350l!NPyDh5~?1gjJ+$4T->S<_Bw%b{Bu5G8iP*5B4s=bKp)nVMaCf z-QW1YHhPWFX@&zEJA1s&$5P z_f|Hu^faz_RHqI@yAvmC2q&IMqfLc?9f~D{M@A`^@rawI#+GggSDEhiG)_(+^0s|o zO6zL_?oOiajYWC!13SpIn%hFFZumatwTI1~GxnF(bO+z+0pKF4 z2jc`>r#e!20F|TO3W;4I8TyKMBQ2i)^JE_K5J3}>h>apeQ18?dl;?uFAte|1T;L#4 z!_W73+M6=Pig4&5I)lnG8GI#LfxWcx+xsB3s==Ztl1WVVb_oxazm9idLN=J)@vj90 z*c1buL1C?p!?ul3qgbNk-~L1ljp9^ld-HF7Y)l2xj}YW zm3xZd`VbDxkDaPzf_JW!N^~v~Aji(zmv;;i&IKXj&&Nt#qzLlF=3j1P><_`k+tm&6P`2%70I;IY2E=o>F1L zCkf>iAeB}XRY-P)h-URf!KJgh6GJRj+QKTIRJUa*4Qf_SGTa(CZ*tGO*FIT2c1XwILcgQ6W7JKf7yEC=+HcbD^zS0JNAu@@>;vf9f3iT9 zpgo3T4$It*9SpbX?@Mt?_euF?(1Ev?+4p7{$}P&mPq{YX~bXffJd$^aJ%+nHoE zYjc9t@FYU){NN*5Ak#6u6>NR)he}f2qa^?)uIZizs@(I^Ver?^t_;4rtw6?zoCX{v z-;yWg(7jR#TggrNOIb(W28& zIoe&_jZQnK@NHP?a7FuNdg9h@#MWZOhjnFoFG7Yg+?gVtcCumBw4NTS;LbK@BG`&B zhB)-4hqCfx82Ym0Nx&_t*w*?$+2WSd3_%svK?9TsLEsa{&Jt%>k4A3rKii27msQkC$Jt7BOR1@JMDbLfdh#4NU6ATHww-( zwM@Z9mwX8%v}HAb2>6{dM@|n-n}Ss8JIt@w7I|cn=?f#0Yy-Y`N659RSc;KWS*&4D z>W`S4Vr|8Qf9?HV2fB;Lq-CpIYFKh(!DQi8hHrL+UXsiwe_fUZ?sK0xv-?BaXzS5+M6F!in56AB!LHUw_VW@nq%<7lYI)QTwe z1g0azPGTmEp2|eMUXKq0fN}aWuZWRvUVALD9HnWqra|b|31A5P*%CH^ ztFP$r-Bg>9Ym8$F6x1>wj49-AbB)v8nJ^65|G`l0nXje~?;BcWXGZ6GHxT?$Hs5fW zZbvI5ykJO*=mZPNLTp9pk8;vzmZ7r~SQzl(VIj)`x@M9Ipbo549ZioH| z#?q)#HKneN-o5qgyJXbuv^}?tck|4*20o69(VYS!a?6EvpRhl3FZI~3^3$lhan#bp z{djq+ubyS461GAiu2@9|3hCI*XO6#{AK@tX zg`|DdkrNQ-r&ns7+sI6Rn{yn+YwLtKMSFI%Ie4*}Xm>vIbVu_nRZx>&Io(*Noj@}0(|j~LWI#vCts7&xk$&?K*PX?~{)a6uriJy^Qr#W+Ul)e?8m}7O{AcL({{jO46DzPx;J!M14@~0lnjQ_iz(86m zS7!Hxdd(<-Vq>Yrn%1*d*jBPrB%ids`KOBrhxJ6e+6aUwMjsG|L#cE8K3?<6S7D4zEoj0!qk*0L;80os%pw70OMyiS^v!Lhch7`xq{@ezCbx_#Br7~~( z)natGJK_4cxM6@(qWHYHQmDX-p((B`Rvu(}zFSHYxFsIlPH$C-W7J)GFGf;gcgoJg z|1*>C?(!v=Kc9c4Mh`5iAN9sk_f6V<|Kl0c;(GE*mVQ&}&Sage9H+K3RN6b_bo4`V zaAK|8wg8zpWpLAwJF{BkSeT2UlhC7;giw?z*(M6TqhrLT?py}Fu%;8O!BeGF z5HO3EZA4tJCJXu&n{6g3n!WxyrCvq3vkZ8%T<)(NDer`@MQfDb>13`&KhEbX?Yiub z{*a3pDr(&A0Jpv5fOH|1FK&%$;)IMseiIKAw}?#DUo?5kKUAFmfd@A;(`2q$)D{Q` zZiuh^Jm1O$gW89FTdb829XMW;v-IdN!pr?3O(NIUi{f7Ee1<%fE56qvU|tWGq(g^r z9sZiadB7X(YY;?)2GdER2=)5w(}()I!|9(l_Fbf9i3$Tc24ATEc|^y9g0dA1YVq3L z(LrxPTVa#|Q@VP?cwVJz0|6MzNe|*7Z9uHTF|QaeCfqOc?2>gKk#txX3==ZxpwOy^ zw%lYVm3Gc0jvRDS$Vr=Qd*+;6ew%hYhY^cKMZ}?y6U?M6Y=aEv@+`RPc}qoyPU68! zRQw~H>+=__y~*u&J->5-o|AFPV|$+RFbV+JaJ~o}|KdtL&uCwHSeuOn1l-ig*HVpP z90O#4Uix6@>2a@%R@oXcy4){2QwSJ3z ztw0No)9TY^U_C2PMaIIMNX8}5G-WlpP)cUsFzjfGnn+Ql6X!10@Al*pe`^XO8};?q z)jYcOnp!>c>AWQAk$gcQHj_kC$j!Jcet$gHjhZOw1OMuqLnivLSvsVDez)G_=F^&j zVCL9ydm(VWPlrQY*YEX5PPU8It&xUM&}6z8J39%aCKox4(+1#TPZHmbisJiP`OAaz zF%axJTg5iZZOxp^_Ocw71Ill;hnUO5m&Z5g1ca~0yNo?Hav|!A5gF< z$4Z6r3Tld0(EwL01SBH>n)|kq-o@_opyLe&*$rNPH8Y+3*>6$wB4vyP##1)^Qj6CH(#>Mzd#`P51WSi>0GRzvIb zY5guv4b;iHUG19m7xH&V?Nmj00kVS=86i#24nhQamLBc3aav% zw^LpR4{xEypHdrUKo?!=1e{V+p8bV`=P1IoC%zF@6BB@4>^|G5J7N#Rk-6rFU2IRK zQd*VGyW*!K0A@$>SALC0CS#qxKt2bCT;z{8RJBpWN&dC6M#FwXezXsX#yGa%a{_qAW06&S8hJ-x;KY)3qVF2P@-KBdL)*nd2Noc z*?$6{?6#$X(JpGWU4@$(;1(OUx-xq5`#}uSth62b-#m~1{m4sMh6kgm4J~b?`-7P^ zg+4jQlNMO@{+N}HB#s^hji_^Up5lAI_(33xw>YfHYSs`SSjJ@?_r_x@j}6BCAwVmt zZI~UkxHwB?wBo145ixwn9Qa7~*evlOnuZz%+;lw8hPFb*p26)DddQ}CF9<$2W5k?G zIVL;1E<+*=a>g&EDjN4{sd&Cz`ATjyxvg>4n3)bN> zMYuZ-g?O4zhMp=G2|Xu7S<=Ms^<+|+{*YK2#+~hc zjK(uE8LB4vVEBr@J1Jfuvf6()p}<+eo{TdP$1=XmU1FJHvodGaaO_AK^;zVlZ&z>- zuE9rfx%R<_yR9hmB3lzrLq4`z>k$IIFvt!t7Fwy85((;$D*V6|&i&#&5U$~@b#*mj zCV;|vwUn$fveZH(gnPO4EdU0O1JQU9#N^?VHo%D_)p- zZ(Q4hS5pj$j+1%WLmx9d7wcK*9I|B9@p0aR*(nBjGVX)1pw}dD29O^zNw{7M**;Qc z_C97)I@!q0PHR$G!G`lagawvT?^E?C_@g;xKhoj-Q$qoIjSrupR{JTbL(S?n+d6{; z07D|)@)v!&EP1+R#n*z;rVm(CUHE6q)oTfcJ)K(ThX>!%}ejayPZrSs?G z;-33zU|N_vJ~|w0R)*;bw54W)v3pQcl^OHqaK(0PN zT}vn>IfLla8d#`xtCy&7BcR(=1d8641EudqDZ4O39NDzHIlC?x#Z>dxyob0 z1a;P%;wNt`cExI`ItgIvA@BLQhAjfbd_xkr=-KSsE&+`G1xraYL~Pa!0AwpUae?oy-=S zs)fCKQ%rfKhyb8SuQ&!-%KgzySKr}_Rs=OjyG4@V51jxl_-v5|Yc#cs%$gBpbQv;R z-V<_nIEHUxHVn~Q!uaJXbw8Tf6{(be|L#a-q{>Xoa{{!haz8cnzyBD!&b35_f5Qpl znmNGCL&=f8wvd*Y(=-9Y2fsJ9-+)!ODO0|{JZbe?wXWzj-O|6?2M09aDUT}lz9b`u zp_tSKF42s7>*a}T(w}3RZD~q6A7xiHM|HXpDVU8LXG-VvKFb3U%gL@ z4rKV1l)my`_}4G@id=ykA*{=7^>BGE-vb5^*(G3e%mxADi?nLyM+h(ZS>1-CK{mDN z8dGbv!QU{6ITs{4w8DptFa%GDF1~S2O`qH>BZ9afYN`b2CS{^MZR~-N&Lk#37 z>Y$+TvX2ZjT*ZMy(a$AWh5`av#NYjss7kuvb*LCTS?kABU@7)5VjZa@ce_H#dnmGS z8Q?1vu0Y*|5k=s(|FzB+yk8;vDK%<_SUT1uIEze;SYt4R_hK|zC73GzaqrTz85wY* zP&C-zzR91&5uq9hpu+z3_yDo98nJ^poRdYTO1Ki6om*zyW&q?`%v(t0E21{xZQMv~ z4bG0vj-+!amc4g+dVZza#z73d$=u z3H+T7zpR~)1p!U|%KD|y8`O!qBS(udaZ_BUd|4_6jlj!;fG_4LvMg7%;I_oHm7p$Q zaK9P9_+aCEvFU{#D3Xz&vC5G?;tu5BtCe$TlpqoNb~rNtno=Q)YZ^U(s49%R5DjQ{ zQV=R$Cv?8jPt@&4xH?eOrCB#2dx5Ywbyh3aZ8A{%yj{5)QOxz5uS200zZ(t3Z^?yA zjK!-eHZ}54X-K|}o4k6?gffiu+ipBA>3V0e=-_ujBAQL}Fx=v~#^%lVLuMLkk?T?T zs2=fT+|mTGBg~5Zjq6>|RT~jz%_VR?%xW%s+h92mztx5+#5~S_Hyw_(&a!AVq(qq4 z){d}h|IiG*^MrlU`!?ir)jwH1KR+oZ{CJ)|9J3hbZ4O4VvzCG>2s&}Bz&mOpcyn>N ztIOLL^>U!)=V%8yQ=dS{e9hHhKkC^ESG~^(+)Y}i8BeW&!)O-&^ zKO3;%<0O#6Zs<|YR$r>g68(I%CQRi*I zz+9-)o;odVdkp~NtU{-E=@})+8nGO`Gg)~s0Ne$a5r_!Kh6D}u5O9anBEfJ0uMSMVa(%7wluwH+j-p;a3#^1 zp-^y|$3>GRiQI+elJI&H&$$>99Xt2}nv-)E303W)Nb?M%z3&->|uwkO3^2y-F}_w&GrlWy>{=IOey-IC!% ziu}&Uc`N57PA;GgeAwd-O~uzu$$&m;^&Tqi zNF)-A5}H;TP5H-6g)}{$0?SAjk0G}ws-p+RDhZWk%GO1xd74IFeCOXSjQ~@H{9=|a zD9E6Qfm@8W_5E%2?hS80nWE@5ZbM{4>6}CWAP4SnNc* zB=kDlAJKeX)4)&ziuXW>0OM-06;Eg4eM^J4$oDltu4f4IVow{nINXl)!j@Ja zculx+6)tvH_Rv5U*8FHb&hy{3jtuET@=!V}Go_N5>=2ys*(yt@;3fJUK2?^8HyIX?=(duNTHPVlffJX_Q@f1A zAz66&Q60@K8}6UI@m%%p8~b+D+zb7W%ZOp?l>cQ3z~$5;04;l7Z$Jx6Im&aP5pTMF zhTs~#MQ*#n7$nsan4`cq-a-@0THHl?n4?DYqFpsT9U|XDp1`t+^?u6H8%U{Eh>Cb) zQB)uG=(GxNr@A#Dg(w(UyiY(1Z_>9l6)r)mIfz?l$>&d_`bnv&i{R=*w62A339r}O zYf>pVnDtt#(XUA0h|$14T@8>2taHafZSQ7^=sl@)!D?&bZoR)jK#$Y+q42Qkb%`J? zzN>E3w0E`w(V@v-WEO=U4V$4C-5iPvFH?gHeYQeW{8)jN3N$S2v=|0#iY z*QJLn>mfY;f;mR)Hs6>L`dP!#5#V{%TBdjN+59{Gm7;^od6E%j_XCU)2x6)0w zJO625BxV(vM6)5oGQcahXD3RU7S4lR*AR;=`)ofA!Twgl6!*0f0y><@U1NG*OvBM@qH5{zTQJok3PL?-S-hOJTCx zCC^Kq@@EU0^8*$8HcVPVHnSu|K-u_)RCk+J^-;E-`H&eFDDqOjhPS_juBf&ofR7;e zC{TF-3`imF82ln!T5MMH?<+1>YZFWwRbw7Z8Y;mX@1I-VRx)nb#q&8^@bza&1RYVt z8Xy7Stj1p$R}qKtny9sLwB=8r9f2#eRo;+q@weNpLo_7}M>oquRTIb~X`#0h{JbQc zL1V6%L?$Gr-F`4TOEl`ZVrH9SW~~n~Q|f0qN6OW>qw-YB*bymHC@6kvH#Hw{`rlkG{gC}iB%ZB#5L#MZ=#UX36)X-)^yEG^UFUawT;D9os*rY z=|p`twa~kpcjjwsH$KNr7x%Y%yISwAH(xFJz+PLHsDDC-H$Vlo_`@Ow`H_99z9dPz zLp;$H{CEy(<77ZO+rRP12quqy=HnA88J<{DQ7m;LyMZN? zcUSnd%gO0R7&1S<0WiUF1tSDxn#S#~AItJufw zrob5hcY8KuvOPv#0?DrwcjASf&`b590y0H4m#I(s3;h1{O4rA1=I040@?;T>kg{ie=_*i0%_G?mxwDF87f|n)L;6IYIvqjsO3g(s%@-U^Dh3TKt}P)%HtC z+VY2OLA7_OeGji2#v&TEtK{r#+d(w2Q{LF>FHB*LLhH!EAKhuqLR!illtEHWqfM=K8?8S&k^4)I|VDTeg0rl;4 z>Ty*or)T16Xwk;~tF-X~TUnVOF4@kOC;EFW$5t^E>DG-f|0gWNUdX zd94to!9j_HX1K%f@L0Zt^dn~3KWu+IGFjXov{kT)DLz>|noB&KldJk8p|B;JUB7(s zF8XT4hx7Pu;L5=uA`l;AcSBdu9rZg5HspW(Ma;c?yaE9hyyO&4oC_eGZwV%eKTuEU z0<7Jez&d<+)~LXR8DK6D;&B;()OfE+Gj*`vU2YwgMA9s=s4~5 zr7x;uJ`kpm#eMc*dX-`c_t!&s_@W%!6ZaPC4Vuw;z@UPRIz&$GLCw#(D`uG@NTmL& z38%)fDE3!s1i0)u+)YiMHxh}z%@DGbN=YHyo=lRcW0KrR_)^q4$OCb(>RMpw1AgOa1M7ft zW~#py3*x{0iXjF{UxLQRSw#z-#)I^#?z<5K10= zH+}uD2dDv3AEr|yX;;$YBn}++t0g-j-H@`|LVUq@>*u?9tB}zEA4Z0Uf7>qOVtT2_ z1H>k4c8H0rx~`R`qwVk#=)cw@DJO@>ZT47GrPBi+s?Cz2thRdhKx%-4vCNyNYi*|t zNlHbFcPvnCK6UQhizUUGqR{61F(+ph1~oN5Cy#Cpu!J8r1BM;rsk`LzUwi)wdXrU0 zulX8g@SbX3ug~ufM6kW{2ud!f#nt9V1)Eq=5CPFF>X^HkAGXXnI2;O&1O8STL322_jX@YRxYsH5`MY=v{G8vjZCUvY)qI; zF28WN*@)Kci=t9y3dCc>+?_1QJJXL`3lqZkkR`3jDETOx#eK5c!Jw|L3sd^iBZTu7 z4wFVbpK`yX3*S@4tU+!|ANGH&Klh^0_jb4w*uL+5esGt&dGIY0&QGGd++*3~ybX@( z0O<h1NjZ6KQj^}_8YZ)JY|FG3V}^=abnt^)xDRl)o7}Yp}^g^ zdI-fhn38C5-V~FM=+y=F;jb-c3FCW=>4%zHWH9m?O`WvFQg(4l-Kwo4U?lg%yNvpCO^)qpwjCBF8=}Ie7j$EJSb^q_*T1Dzt3tK-2;^`!K ziyAgP*^b_feWuZ<#Jo$itN&~qgHTSVq1ZQ&yHCu`9v;Fe;=WfT32kTHNuQzMvA83a zeCPu-8<&NG^H!s68-XvLPSTf^8r$V{Rc&@f$DAe$em0MAWzMBnLKa;X&R!5cc1DGc zmrzz0=Al;%<9>jK&4SEgw6?kW-vqu5rT z2<$}=pAXWGe**oF--BI9f`oY})8#+HkCW0=s!WySUQqk|k3)kHmI&BIa^kL=_H*O0 z(Bca_L{ZT}4RH}5gI2j6xz4nhH+Zg0$RplVr0oj!y8IspXpd9e6q_gUgm3$2&yY$P@-xMv8b`bE9tT~8U9^?32))}Z~cnRHM{Mu$N`A% zezh2YBwOI>LJkfUrlzr9b1GN+@XfFUzH;Bdgh~iQv5^1?Ywmlacs$f~;x|_U<(g7x z!>P&e_c-+C#2yNTzvF)shrIp7c3!2y70pVAG!V*H>FNTpIdBG|aQ8y|`l1Uu-all) zqt^p5t5{%@zv2FqhFv21HpgqkjNnS3YU6_{s<@)R#m70AUEi`7&b~aDf0>)?2C;R! zfP^LZi0%w~wWZH*DKPi-OjTN=P-l>5844+Vw$9FMFjq=gTnA=x32>50 z$&*3H~CD948S9gvHmhiPutFthV^^UE#+!`D_p9Se=}XO;FvJ(U0xS>PP&B zjP76OcTgr=%#^)=@PXN#k<~i#MDryQzPfoy1!rT=a;qG){hw zTI2|9R-=ywMQJ~k_HtQfAQ+B+J|tKf*WHtUQE6IX8`Lu&-uf}k-N&JIgB z@vLHlJ>l~n$}up}RCPXtuRiW}S?01P+%Yf4z!-bua%p5JTrsu2&Jb_NO)PgN^5_?LSBn-Vz8|;>ZhL6sXR?=P8i4dLsLC zar&RSUL_K|V-)k0%Ul1$o94W)4YN-Sa_n6V7FZEsr*XFO)fXTGu*f(1a&v>idJ%yxo+f=Fsi>)Pk|5AYzud6Rf)<5aZn3YZ5#nQV zL-NX6rm+BbODfXGF-{Mw2G*ZWJwJ^L5U`T!P_HkhG^})>4JZD1(+CIb?pC@zxiY&w z)(>iAieP+@17!0dP;}~0n60-_&bLRPv*WArFgTF16i8@33ec!jzAkKfbo}}U*f`Nh90#Q$t)34Mr`Z7G?pf=g~q*E*55mo)BqsC*d6=koY;YCg)Mt*nKh zbLTmK%$pxce^qX>*9keSPRO9cC`dR9=X{lj{M?zzWjA2>2N@8^a{8pTU-}2*tF-Hshx}B(!EC%_??DcC5nZmYK15} z_|xFG{j4vlmZkMvrG&v!QnH7qqQEIA0?p>l<~yShR4V>=nXynunWC4;$#WF}d|w*+ zHyX}=V@V{I5>UR0PIi4om^%LMgpQto#wP?&0;#u$iOw2db14f-yfZNAu&m&;mr9h zRMssEZeDJoHW=>)63dM*9nopGCwC~Qxo)>T$iSZ(0z}b2kwjd~TJ_d@fjy~?%^29d z(pf+u4JGBkgt`=MwL~XKVP`UdMEd>3;(WzZb>+X>gmnQJ3VCZN#Lr(v5t?lDt{7H? z29;Q*3NKo=lJ{!hRwhq9VA0wJ;>gcN#ORxzs9iDjE*7>APGjkUdO{cKx_DF*u3bFA zdhCp?KIqHeg^{QY*V%QJ2)uLT`(?jecSIW;R{i;U4n%T$Zw@35|B=Iv9iSMP^M(_P z35Dp3%1pg2F)!QAmui9R3VusEtg}!f%Tp){QA5x<)L9ePy!S~K6D_W4l%C^$A|uHq zWw|))5(?%%Wf%p6rdJ`mhRA#u<~juT<<&06^23IHn_~3RJ6L!s70)vJuWi=#VW{09 zaP3ot1fXbP+Sj`Da4Ap&wsvVaVuYT$?{8 zcr?$H>3gp)pj~6Q!xcKTOcrU-7{4S4;8mRN`Cm&JAF8mZ>@wHW7x^W?7lKb@J?Y9e zx794uvnaOEMw@qQ!oj4b*OYuLa*w2h9!}wT2WAKm54Rh;Lp&?ooepWf5Fh=6aCf|? zG?)}TC}h?t9I9Mh){rBl^YMLpiD3*3mEO6b-lq(_s#k(x$7=iQ83+a3eI0>Ub4!+Z zX9njFhdRW|kx$`7yBeebrjwtW!Iesh4g@8d;x>w7C{Y5wKq7Ej(&Y7C1u zL?-@{j6kXU=6H`6G#jcgI{asP192PkB6In%`9USDmnQNd;al7=n*;IW-;8MlBomDT zxfzL$=dFfTd{>ETM=2Ew^UpwI`2W4|6PghN?Jy^}3s;+mdiU!<#pv&}P(Y0S;!jQ(0 zuhN%_$rO89n2Lx})bm8PAz;x8zpBYaR8{!<{#x%1IP77l_VJ7ZcUM#(4*VCDNj(dU11vBg#ra-O6-DM|f z53Me4cOPmOvjy)ya%c3zXvy)c(aCVSkRIdK@e01d+$5m5GN(!rDVb_N#nM5j)mCd> z?2=Y*zT?~s9PuBVjMfPD0al=*`iZGZ*=T4GQJ=nkgP+W(HrC_c#{bfKt=bHuPWOgn z`mnYT>5a=4ItWSqU_VuWDQHQP1nWVb46P3@zk&g!)AIFICksypbM|-}luiOEEHNR; zv9z3aYU|F~jXCM+dptCWZ1gxrlRsx=o~Yf_&r7F>i#6Z5&+Fb-5ddx{7YBMP=Yib) zs5y!y#KQzQ|MO1u$lrV*|GgDPn=FCt(AP8GwD*pnOM3wt_ZlOH^S<2lz^l=P!+hDj zA|x<_u*^`70yslL<>_`fl5eD5XYfv>`{1}fOe7rhj46!Km++@)_SM08Nyxgc@GhE= zR|WpHAqcMsLEFVX$Q%~StY2{%Nx_n$*;emzWO;87As8Z2_R8-^?P>>J4u$_d10x1x ztwz)Uztz=(1@Ob~^pg2C!V0zItj(TMEEK{_D*rBARWv&5U{&9MxX-cSReKjs7s^Dn zlO2}z_OfTDC|y!KwK~l9#2&>>rMadCN^#LOj5bRe=zSt|bHGwcVCduXQ3mkewtCO~Fw=6ZD zhR)?x{fD6)6&li<2Kd@`s)N1P%>>si?C*_$^=r1mNC*!EDOd3s8vdcCj+FZ_zrs~( zu>T!~ScqHDbchm0oknECxBL9$tm@KiAmXozpWXue9 zev0?TBYf^}B$!M@e!4#7qa#Tq!E7%sK!XeBuQL6OJ4(ljb>W(~9wu{8!nKlRVvC7% zbc{DOhZ)T{}c&j-Tv1ZCKk(>_zF<-Pkli!X?5Ik7(x32y-Jn{MzzT8f6~?-YLW_zD zv5t?CSwkd5`4Zp3)e;72y;I7!j}|r7w{JOS@6~#O@f5z+gAuGo)3D=7=@+PU!M8`@ zHhWOTnw|u^lQzU*OsMY|(2kbAf6OU@>x+6@T>ga!-QLrn<2YKl{sq3Xm@@Bdn!~6` zy$9G)r?618P-fxp#m#U1wv+$pZY-F$A>Kfno9Y(wa0@OEP`cRcO%^QM*4f;7zL5_X z9{d^fKMK=-+PLl&ID7WZ-MR+H4xWeG5x1_$wp2fd3*nBfJ{t6uG07d0+KbgJ@iV># z$K22Q!6&d%d#EhB>U_X)n~P#6E0zLYy3Vt=E|HDSfvM;MTH5LNZ(2@?@INuUVzB=K z_6TuMg~hC!p^>K*yr>igAw#(EX=%xD=1}s|vc=Du`(qb#yt99-ow4fK0t-G$_aQAU znZ;%g3#qdAT+nkZV;>HZI3TBMTgX@6W+zTa2#F7{qN7f`o2_ z%R(IYK{!mWaBwIirQd#}mEVBJ>5>p^hVcO4P+P8Swso$7|7EFe-|l_QNK2QN>AaDu z{{PlMdq}}#NnW*$q@RYUW_&n1a>L1s_Uc^Iy@PxGZoTy?;4AY((MebLaCb(h(^eur z;0WP`^-39tsSo17)t~&r0&!L@HoG$o0pkJ;w&%;#T=$&aBJ-NP6E*JJ(a??jELc0IJ_6u<= zyo4_xh0|eYbiz*4no6md5cTEjP%bPQb>sS>{wr65oqAkGsNia5-=;=C=B6Qp-kP#i z6I6Q){{T0Z76X{V)e*|qn_jgRc%Ztp)S-3k`LRoJ1{E7kQCC}T_!nQ4$h-~u<`FI2 zbsoJC1^z9_b(fx6$*4eI>&a)l!!h|>8a}_l-pox}TK{{+3H3<;tMBCD5YMi?a; zZ!zc+J^qR(N)<3NpZJ4-^iKW_^x44@!s61D=hJqw!c#Er%cl}9$RSteXc=1O0F16C zG!%t`m!$L4P^%|2NPC!tH&3jPk8Q9vGl{x=o_R7DiAz^qoNx(`Yzs$ob(AhF)bD3zthNlY7Hl$^eygO z2HjS%?d54;hiLM~AKo!ry^KKEGDspde?#$l@ONgjQWh@h{P-(0(;oT~k>mIdixlu1VKUtsxY<=hkxx7R@pkA2MPYkg7P8O;mJUtw_X)e^-P;71uC3oRe zk;*wlMf`TU+=pMNv+LSHHR%t@q3oFFfw5|Jy*b@5zrVJ)MmU1YMdV+m_A=WMjiyGN z$W!S02+5i=&15*<&7MQp@ioF_g*NqG>4hTUdPUGsSpq*7s+dDvEIGWQ32j*6Ue zY^-4jRO@lZ5m%B5z1z%(uLr4|O6h`#7gh2F%I1I~#7+CPwSAa+%>@gOvP^%dGyW*? zy4enD8&}{VN43UMDwX#|#nZgY)dIy}SVH>tJvWST!GdKU6?5&lecT7u5Se5+hS?|Z zkO(%p>umrGZpG4YPQ{P678@>++)`zwhSR<^T59C5KA2_DyNZmZ(AD%Vv}n3N4WQO7 z1Nk+dDx+Ng%hS)<(&9v7ckxjI{udjqxqhpD$vyhUt_pX&ky+Ncs`! zX6qzh{Hp+f0Dw~n%YfPr=d)F{PS?OR5?CR^RZ5J_}$C& zC3Zye{Nr?2|8$|0%jn^{e+)Eb;a9eK=DCC455vN*SVt9_w^Sy!-BJ-1TXNT`L~;=*n#JUuLrTr1y*c>{gxffH5`@Kp=e^UaBS+6 zpzX&0Av2;xnkhw8p=P*ZI0`kUs=67)XditugzlqxurIQeN#2nh^gY=;bR#p9l9zrN z!NjO|<=q0(Y(cLUghZLCUlCzw6&V2bjCi3Y*-z=6EpoSSKrkxK#irv$bSC=tGspavzz*_#E@@{$(WK z*e39gvZ)`t1-)<%7uXs8nvvsMXdmVi1e6Rkg zGF@fwCuWMQX~$x9GZBrTUhj@N-CnxDitJ8=1LelyiZ%m1XDK~&Ev=wF73%$^HaJ^Y zhtJF2ZN(-3F{`U3G@;qZQWGMb&JNMsE9No48|q+tr2pbcsIL9?K#PF7vs^v=P)d%s zu+twoiL?{C^EX$xG^@S5xjT#4ilMvbtsZZt%eC04*$=~qY1L0VN_4T@uY_68u#&0a zN9u;3tmaoY4d%aW;gcqMFBG2gF?AKh$Y4-E|imB!C zw<7TPF33aiv=&&Qn)U2jU&fE>iflC;NWp$7D^keitjceq*jd#dLHUY=K%QL5&aI3JmRB}XT1CNdj5t>m9$hrgFPGkyBb(^1<|vwbG)y>@L9^V&^-Ao~$_P`)xy>>&sWdo}E@_k*k z&%bbK1=L;7*O{w$ur+5Z#ZEZ|SmLIt1!I@`c{aQYDZr|l&G@xbE>sx|00+i_ zO1|Jq#lv9V^U^G8i_AQ0Lpb{Fh#Lz0+B}xLHtGF%SX$zkbycxz%#m7|I+KHfE|lDJUEt$+5O=?N zKT50rOqR-LABukEW`eEF>U(Px$qy5dI9$w5D#3x+s!z0%AOuS8tdU+~V=^+J;PW>iT8NtmVTW`a~f_$;FEc z(IrTJ*S~lm$$ToR(Lbb{P61oI()?eb;Jl=wa5-Fxp{uAf)I|iq_&Xlb?47b2vtfApK)?H~(2~Kaw#XW8=b!OLZF)~HcN-9HeU)1pmj?^N@;w8u z(779G*CF8}3X? zek%V;AD_80WqycH!_Ojo+5Kn)G@`P7DwVSdD@tw?6@Prfn@kHr?9Jf(gYLlyQa&d* z03Rx4n4~P9mr0ScJm@~oc3W4Tzc%oF*d>{c<%Gm({P*MeDX@3%7jzcspl0=Td8sBc zO^2%+pe%4fnIiX<{^4e`fEQu`>O#ixzK>x=+wgj?sG6Z?yW>hXj1s~3)|K(5-C8dAxgmce4P-b3=0o;@MqEb9 z6ZP59vlS5NEK`ln-1~Q#0jfV!y`S!+#~tunKFXH_YAlbRzap}H@)U-9<^K?E?jp8% zkjvnBJC+1iZ!nRO5}!q-3<A(C`*$?tch<3~=l4t(b8z-kbAA=6Qq7Dwa8} zDAjFymjih$-*Q1_=AFU+iB}@i{=$-PaQyeV9e)Dj1b+R-x-rwHR+XIZZgXu@|0eOY z(}NEdtJ`GuJz{wFv|UdW9U&72#0?@!RPj9+!WG|5=6R6+{Ys_e>;6Rfk1Y)YABY~h z0H#?{vr0!8Me>@Y3&nQKLKv@n5ExXBnV(d$(`?OYKJkM%lm#o^y`1$Hn4=$WPf-DK ztX>CR&r_`$He)Dbq)1(EfwlTiF;@_!JlN1=1<2CJXVxx*Q{cSHQO?@@2)f5IE8{10 z2KycI#wMBA2KNeh-P&}f*P_nRg1FU~;nXgg#EqgFnF^o9``ho zQXmH$kB=&pPW!-dC7h;!K1$@${1$e;WIHDeeBxZQj&>TpSBl=xG~SfGlDI}`no-qr zo%&6;X{yx_PGdscwo-KL>LeKOlEs8TD>zv1lLJk`Ob?c9XcaLPRHpo{Aaq);? z9HlS(32a#VKD)^pn}7_cK_UTzIVtQAL+GBO6Bz&V?N-Kl*YR^akLYi_o&O#vuRMrx zPWgJiz1+?3z$Xui@tBTY0ZU-eu97g-_SYgwc6=7yGE}jmtJ~9+<7aKeiaSwYlZ2jk zYXY7b4mcxhRj!4N zHY7As&5n)Du}DIDId-!7zoHXF3ZP2zQ#XN}XkQr>E;HEQY4_^&1vS3k^KUCKc}w0c z>lOsTZbXN&O*!?x{+eL$z6ij8*ZT2Z_%aApCYj?^KRerHGTM}!AJ_j^=AgT(R|Fm` zF2Q*EFb`TqHUN!{C9zAsUVQO+3i|;B)L}ajS2fu_TtQtK%@|IS`MUB*uDmWl!$xfs zd%rYg8Ll^!qT(~l1c4-KQX-Q`sLUhp;+q6*dtS4|{Qd3U^aBX=YTO-Cn5frFF7-tR z*XGK}ci^fXVKozRNq9R4l*_&^FzYHlY}&To_b2*a|3Ynv%;Gf{(0#t^>1`Q=VMnLm zov7c1YE--p>>VqPWca+~_xHP@^|~Y9_BKV`F=;92D4+RJbVl9DslLKO zH|p$@{0XqIt0jZw6Oegu0@CKl!=Eip03j6QZc^}W6_)pgQ>{#?kFKraQCgf7WJXap_A@uk=0~<1Q@?O+`eZFAMQafaw58$z&P8R<7fjF8>&RMY`AAfii93bXHx{OukQmsMXk zEhbAmj&l1h5S|xR1f9cbCl1J3#5pV{kFTitNu!WehVJg5b3dl_-S9J=L3ih2(>0)1 zIWk07u|OM^=1WER21L1Zlq?;#mI6JzzkZhWC}!ZU8wxl4Hd_H>rQr5SNZwJ-k(V2& z_#O^!Upki5^d@G0-d6M5V`~NR9p2{cl_QL_D zISWPZQ<~TM6o*F}Ik@DyyP;dX{p*l}@?y(fL;r=!D9LJ)qn zh|*#ceA5PqA$L?_B~~6=6+rpGPM<0R=zGu~6v1UizD=DHkw2b>gKW*K>$Ap4pf~b` zDI87|zx3Zr%J*T_64o}CLqF@&3by*#Frcm9I;CY-Eg{&_A! zit;MRX^-nZPB-KB7ncL8m?;hPg9Nw@Mk7v5_fK+*_!i|A_00j=rM#V7?3S=IluA!p zLN}RinAp3-^KRkRj8|d}gzyn%4wZiSmvgChi+>7MhgT9_$J`KW4r~Xb{j-vnUk#3e zx}L~S0dmN=;QdjzHP{*i`Cdo^{trkEg1B_a3o)-R?&^4|p+P46E9}$X`QfzG;n*aRp8gW5@#S@_2!dd@Bx^+Bp}uzmK3OO3LaR7pdSWEPpfIF92WiFFB!s z026*7E7tdLOq=;I8!0%TDGQHqOM;;x_S02Z22isCTx=@gJw-BfAc|K&vN_e`=SQK^ z64fYXDwm${@JWs#Z%)4&4bVVp18|>9B|`}R{u>FPN>duuf#qQ6#}S zEL}B4p!@w?zwPL=*b345cU~%;R874p%sMEQ(~Xv@dc@%QD*(u9|LaVGUtIbrn%iD zUq^>V&xnA$TVy2v4Z4h(wfnxnvu0+m)Hou{M0^gtBG!8LeRbqW&s^-GdrA4>p)Ob^ zQ5?7MD-J!IAFwj>QcG5_opk#=Z2~}CC;;ES`ow-0iN1b1??D7%TSp^+KfU~&<&ToC zc6)t(V5m!2F6FkOEyuCn=q9yVs48E}Vm{^)8I0j@n9LCj9{vEyNpd^PqUGd|o#o4_ z*>V1LU zTf%P-82|Krneuw2U#UrR0XZz5T`C_K+Iyi&tNzy^yQLq_gfr=fLgV0?x1tDz#iq!> z+6=C+;74M;$|}r?;^gmj0cYjXOSG&5W`8`3sMbLIUfW_Q7i4p0PW7I|MAM|62L z<7(!e?LbZE*wi%>Mq+mXTwYMw;}%fqC+$EZIX};In?Hij_y19s{WC_GAOU*gfqf*8 zD4ArE`zVuB%EV77_Te94PFD;dJEVfksP;Xl1B5bF$s&hBk0YT)w&dNJ5yR=iy1W;B z`@U373l-1lM{90fyw$af^+znxeO^1!?pfKvqeu@8!v{Lm)Y9K!YRM>X|Esdrithbg zXL)}rD#sLE#1c(3xFzSar&}UDHX`rwc1_NTZiTy;c9t4Ruy0Pq&cEfgKf(X|tk z^|rn>M7%TC7YC>=J{RTQ50g7x(F(dvrYGhJ4ajHFX?UW6qS|fmtC!Kpz2p2hey3p* z{h&_RD565F$QPodwup))@}{lJts)^&CGmL9C6vOk%!r*SOg+qm81 zu+A0pwhh0#9mF0|Hl0SQHG-V663jXIQ_}3(3et{JN9U7k z<`5V_{LY zpP(xhXu(_vza3qA@?=@9~o(LsPn_lrr~3>qL*g(6mg|NvYNTp4ZF@vt zNGedVw{fWi;S-5px03*-#yGgnLdTU5J0!>jWsn@4gMwZDt};y3mQi{tc}R3nXW)4` zS~@U(sDPg`UpiBwwHqKK;lZ*qn$r5Xdx?DU_cT#W0DP^5pHDujf>J?&V*J5F{xh{Y zeE4a9r#5W@HXfB9vhlR9xM)I=W%p z!iO-_7f^sszVYlxTu~*(mK;&+L^>~TKb`pF$9Bn}6mnf5K!xi`v;=cHZ0Nbfnk`nn z8EK+w`uqEr8F%Tb6bPl6Z@b;-GTu??M-{)}&*UiCJqNs^)VA>8Evr{@-}}#+!?3*G zc;Jf}jsub?IHxb`O0?OEN?Pjq#$1j1eWoN_g%sudT#;KG^E_!9Pmw`J@X7u1ld#S? zy?s0=6&WbTcr3b_55&98CLva9{od*mZ_KyzdETb%FdIN}O+NT~EbPIRD9UB&1 z9@l#z9~|=%ME5@{>tAhclENbr6AtA?&&4Xoud^+PVQ6_x03+Oa&`rKqsWh572j-A| zqlI!&KHvXfcz`n76p70=;<}+9;LYzZbC@bo32@0^ht*;9jDO#D6HzLW@a8#02b{%(EWGCz?VV`vGfX z^8X_w8yQ6Mekdm1faDGwe|3JN+hgs~e6mZ9+-lIhDU^DVajL&3OgqbD)!j6w95>XK zbKjGL>M0CoFEuAvCfBI>L$<0>drKdTdc30A<`h9K4)j)4{un;Kw$9xNJZ9}4xjRhp zZ;G=x)3q-#&!ZUX+dwB(VHQ_T+8KtG`G!D0_Hg~^%UJrht-;ZJKoNJf)v>wKrAP8E zl$F-?fMNYsn3euO-{YLykth@vC7K8|IM)X`OX9DU&$8S^zeL`}2toeT*IKACJ4>F* z<5Ycb1Q>$26njHUR7Wd=+?=)Cpok{;Gx7okPeK#LjXxFIuo2xa?7T|lawcb{!eOf( zW2tjpsow=Hg@br}IN-1C*25wII>-?W;q7)6%od(=JWSsIaB*jlU-NetMl9FgDEAkP z#?A4JPd&WKALgmI&#OIU72&fhR?H5MxVkwP1q<2d3lbGoaH33eZuB)(pO*2tRQ9P{ zz$%PYkV^F_<`?QgA*!2$=EQo&@-Z!TmD@!6a}YAD6xEBN)*u$Lc*RU{*(w0{lO{`0 zQEH157W^r+oR*M@%-=e1-9d+7#o&I9uCiIG=Y{lh0KHY!Y=pLAS5Xd`3*g^lxbPj$ zhXoAm2~Jgbln@gYA)C3fc(#Z8%N=H8#fV_g%_kM@q(K7w2n7{t8a%z~_$w;x_N{ph zgTd*zg}DjK3Of}_>SDEc<)`Y56U|1i6UuZ2h%%L_Vs(53>kVRtuYQRyNk-@e_3jur zktVo(HkHS2wTvwCFy=i=Tz!MwR#K9!MzuD)+lEhzCECA>LEi)Z+zT$GNC&gr6*U1x zuc$y?b+G5FS(t?#*X8A;mVC0DZQO*EGt_HEG zrrS?pHPG+c10CvV-U1^Yy7YTIyU5@M$DN@W>!VZyUqaV-1E2tg4LHceO9#`10TbUq zh1XaKClG4)>02f88)Fqs8g2s;68v_0pY$_FOEakdvrr+C2LR@m?~aA9d|dD2?Hm!& z=Qj2cTCVLUMDoBmwB3wy{Y!@1j?H9VF8!B<_gMg5(#X|0c>^|A{!WmrI*bCX^0P*V z2hV&qp1ftbKJ%l?c1Iu8!5sZQqqgPo#BrU3$djgmD!AeQ%qh1NY;HA(<`r^%PnQR& z!tM?*T=o-_E}u;-@o`4QGw0qxJi%J4!mm)Tl%YW@SKkt)Q2?<5*2j64aK-oHn@RBq-@xI9 zlB&G3fBV&=k({_p8%Hd(?Ks7X+2i5`!`#i?GOQV5!^m1zog!;2K+|T_d^DQSoU7q- zk|u;8XDxO$n;+%J>v8BQ-$lQR;t6CuDtMnsB!5aa5r^tN;EkDbi{Wsr0wuI^X@(iXM94~ngi0S&2_kNVvpK^YEQckd ziHa54SjKRWm7_~CT11`EQeGTfZN|Ye#X;cY267Q!98vNc~8Mc;;oVW?&8!Y2OMwD!qW()~HW#iJt z;?DSM)?s6)H_AWPySzf%XHqS{%c?K`11krEJ8}=(#rGwur(dF{+nk1DNRJGWY=yK- zU!=v|+H02Bi7v#3+fTe1iGP17eWx(~AFl3?Ih@dgIO_>!#PDg$@Rnf9Ya1UI8teW$ zxb1BcHgES&C+peoULFr~zx}iVC*}e1T>(HC#zl6fV-H*8;7VaEl>Fdw+gBp=*IVh* zN4(@mK*mPL6^E1zM16{52|cWp`h4nerthKbKQ;f4ksC0MNkn$uuX9Tjbr3iP%OpGX z1|ctW>qzNpvLJmomVy#`(`CXA(I&r_4-TKG`yCyLAI^@EGT%`8V*cB3{p8Vh8iWon zoscJ&9z_(VK%fAXvBjooHA{ou1EI;s%x!XG0b;^JUta{>bH3wmY?B-0Anp~WcT5x}x9&p&5ef!qXW&bUSKGCsXaWnuh!orie&wXd3 z=7vY^B}bY4vGE%wLHY8duIw2PW|rwVasy64>R3wp+Yj~^8Y=!D5H|IgIrH`-u*ITQ zE4{;CBup%R*31UO57IVnncu@Lg}qt-67%8360gb<93{4He>iMD^YjY?TvLjo3mo{Q zgR&x9%B<$RHZ0W5r%0F48HZoD>f$+GzIF-cx=a5`2tx;TTn-USoQ3%mG0-)bkZ_2K z=K`z#Sc3Emhf|Q;1Jiq?(yWvOx_@xGH(MH}oYhk*F1y4Hh(k&}GpY)FLjf-g!ozuW ztpYx64gjc#)bM;2`LKzoVk{>X>{bnYyyYPqjXJr8B%r8-jPE0Rlew%;$KLh6R(GxL zr%j7NGU-4Nb&wP}clYZN1Ku`8vemSs=+OiR^_V7I8Mr zH=M-hyz4IFBjBsZ40^3{0^Gw8ZJq{xZ-9g29c%JuAw*OAF!(3SNc?~bvg{BwXCMJU zmC`jD;?H>W>!n3S((?{t$Dj9M;stMi8&9>nSWnZal9$6qBFI(gb>O3ueX__9&OkoU z53D)ddQblxne-=I?G*q7h(n5?3B?QW3F+Ba2v3TlKfg$bOH&8(e#wGJo?_Ay&E^pk zYOR&o6?rlebxQ1y8x2N)8@lheBPmo8N{w$46@{?YC&Ax>Z78tMU+yAvQxEy&D!Og{ zqoIMKG$e;su}kNhO)KLwLGtTtjJ3JlqrgjJ?UDPDZj7xASzHe7og%VdDiE2>A+uOu zLWAf>3)hG2?d<-h*c@CE9FD!KKpBlB(0K}z=Kesv$J#CJ54gmXEqZ;0!{n`btrWy4 zK79DP(drC-j;1V~&piGgt4c4@A8I)Dn!j5DM~rk~xS8r*my#U5vS0YMFxQj|JMYm2 zT2mhG(#^N?mGXbyNu{e^X!9ar#_kQ1?@p!^4a`>SoKaq2;{yo>j*?SUK+0s{#>pDv zp&`tL)ttk2RSh^KXlBaB!0$8^kot>EhTCjW0ugnNemc!^>ju?HW9F-vN!~hsq2uRW zX6NUpmkx*X1e@E}16$8=UYpLDLk`X60@4pXJRkhU^(h(u*6#O0+A5T>FRBb+$2{t; z)bmTFQ1PJ9U_)%2si;Kz@(08|5k`&$U&l3%_+nO;Fb_4m3({7iE1DNCdB ztcqJJAeGw#1_obLD*yO=*`wpP^bfT0571&S!fbag_f3J}PsPMi6W9M3Nw}(D)dNUq zP;1N{vp;#%Z#`rkQMV)D@FmsMZ4r^$uXRKs`i>$EY^9i{oC8_{{{-rR4h(7=sTdM^ zrv55}8DBioM1bZ91}-IJV{I1|gCxBH%4pjr^rRZKaupHS5^)-p8N6cD8+kx1bo5Uz6qDTh5fT@HQjMiadId6f<|f^dI^gTC!INhKqT#T=mNPpIt>QXKH_Cpl!@K~}h1nZ$~ z|Bvc(1d8Q!fBD(d>r1*nYk#N@Arh5^(30MqU(A^FPuc%{yD?qhfU27hj#%dN3elbx z+=hVr5$K@!+7~OY+y*+z;Qpr;misv{2@go^z28V8Nx|R5qt^uihdfTM7HJ(FzpRmZ zt#9&=m9pvik0&eUvAVJhHIew#{c#_Lie?OryI&S!8_wM0!t83ZfY}s2D|+Gmajq|R zE^U?3uz3Pzk#~n1)Qf2_wd)bWXxCFF{_Yxux-u2Fs>VD~$-^&fD%UPpmF_f{SK-^k z1W&&g_=Iyz9Cm5T#TM%h7R4hT0};TQx#_TUt__d)rb@ut!@Hfi4yW*g%i%=2gysq3 z1zP_HU$?@GoNr-rWANAD%*6H$>c5EDIX9Ki-s_uIkF|wLAEaBGR3ANCQ7;#H0$Pu_ z`}{;^7iU!ZI&3)sl`hBXRNKegs5z}t1yYh?2;I@?GGO1}GD~IAGrZXBn>m#vwy-(` zG`WWfNuSsL#ReQde@4V&0z>3Y{wc}cT}NDnd5iyE0a_YM`wKl~FW^ZVcF0zA}A z0v}K4E)oJAlmz3XOG_1V)#n{2v-ohDL8cZGvE;&J?G-FI%B_Hal)q=;v+5ea6K`nq zV6r%`YSsZ#vwq;!aRysvT=A^Ci|u0!f0n7UUZ|Qjr8WTW2Ip4`KETZU$v+IYwA?5D zef)^Ul*s9F6J&69la(~mHA?z!ive1d@82?q?{-_|K9$1en3Vl*YId0VmTiQ9PJ)!{ z`&iC`ibYKeaAw!$P2a)}wMBd8`I$7oi=-tXL^N7Vi?;rXNesIaS1JCgRyN&cwpgPl zvJZ(LjUi~S$t>TD+zx&`aQ`rWy6K4hT>Mo`ovRl2sFEi!c6G4KapLL z!4^L$L9?>&{Mx{;DTd2)K+Ith7ANw!6Dy36Xfk^KZ_NDm-t1>uennjelh7g-mIl}R zz=>2+e&@L6VBB`Fhn{ug`C%4z`E*nAT$uzcu0J zxsR>DSqCe6ZEf2fWk$2$gQFnJL;&o>w&NZxK@{on;FEf;t!Ztw4&KeqX3yKHe`&7k zmWVOIgwWs%DCNa+qZ-_USbGdk}X7NY;q04i(ig%8;|C9_axjTF@IJ97@e z{|tv%`(Lz}UBa*rhV}2^SNIDT8@VbuLd1!oyj)v$n&91pJm*;E2Ia!-2R-F*wFD5mplh`Y+pCGZ=~?+6pbvYu;vMJK}6i0K+caoE*y#-*4sqJZYXU| zp1C*8K_nK(ZNin~?if0+x8^&pF2sEE!v5d^ubPP$!R7ZoByHT~lRirK#f!}IzpgXW zdNY5^e-dmf&ifEfBK?uU;VlkRaj0Pb?uL*tqqvn}o;#(d?SX=zeL%d zr#YLch^)RYn`QPOIU&30=HRg%Ef9sHX2^=!4x9l8ToSN@IK=oHXlr*@N#21ac% zfD|QFVwCkksIP!2zMraSlMvZ5NDV5yjDdq$nLf?zdhZu6k+re|_e<&=QK7_h(P678 z)mkF0Vo55!HiZ*+d{vWVkt?Vr)HsM8Yw2jQmK^VjCBC24pu4IPB|fQ&?rAK>M*z;0 z5IM6XGBtkM8SkUxNDO&{j|KLEb3;_zZ@_v!+i8Xz#wIoV2O!buMQSLB1$wy(b{F8lab9)O@XogXGzpulz4 z72AFzQ-)6QBi#uiNy<5-l9*8v8f8#{LpKP7SU(P|H5ZnWEb~;rc-v- z7JBikh^7$Uu*PE{>p2Ty6?0#Ys-;nt+=>oHlmo$BWZpwaf6SzvOx7l|l!5ee z40JAq2Kn`B{j~sC9-J>f)7)v!_eQ!hIT?#JHu3y=C(@htwf_r&v=Ia-3cH9+=G0N1 zV{_#Cs6+Ln2fLl;dc`L%dar*Xk01@t{W1oG3W#A`Ue}UzjWnm>xbJR|f*AR%zY@?p zLBK7(FBh>L<~iY^<*GO$0#gce3E)I;-tpS3B$ly(?ssr@U6Afv=@kH{zkC2u26}_Z zv~ogQg2JKb)A4;Tx#^yNzE>-of2WN1<6V|Op$6lNLHj+z&=20vyyc7v1%E2D_|Uos z5Q87o>gf{{B=+=r5GK~v;3tGVg^PS&3@*s|sAPtO&|tu8XK*IImYq|;1vadDkX5Vl1jGl zmmaS7CG1GJ`6LbijE@rdph&bblbE;uR`mmICoKSuS|Y3^En?P$8$3pJqy`?~_CHwU zH#PCB92UX~Gd1ZCCG(Wl(fxhAbvE!F^+%LcEExp3x!}FoHyCjbquoexpVT9@IwOe> zO9BGOozqJ3#m%D=FF?c!r|pC~YN~trX574HA|LYm4mf@C_3P0Z_Ik~wcG!NpPuedk zuCH-7$vWiqz~452Hrjdfvj462+TLAKUA;_V739rXdKvp{tYiSH3W{WUrpn41&@ zGS*vc$<7b7nTzz1HrrRyF16-9X7rO*Gpq@ddzs8SJ)xx%Cr&VSHF4;^Lr? z<##6^@&@XDG{(k)&Llmhgox}r%mWf}ZMgmaq6P1v(O_!3GeR2LOCriBGLp7xg>E%r zd^YRn?1?n=KB41{9h)89&=St~;;K zKCn**eB+=LxP82H>wA6(SLzM)zb#YN*Zl6EJNoTqbi#f-WoU9TzVmo<3(ECYJk2tk zBGdcMCvFZNoCJ@e9kVU$s+RSUp3nrPb^G1WdMnDJ49>sw1C8B^_ESeZ22s&WbRb+w zyhYxH;vr(r{SagQJ?extqnPNmdKrut>JJ`k^oc-sNz?Hl5#6OLOh02(3K-`ytoY@p z740AADM~F4e71eM`NHL1togzRYBMUuCXO0iA$dsBeSMv-zIxnN$caFV1^(l8y{cqu z{>K&Nt;Ptf_}Gs&n3cf&PONS`j?~Ff|89_4Vi_>W97Rxw9swHIitEs0I^EXqY>f0Z zrpcot8R8b~Dn$+2JunF!Y*@%CMq^250Ih7^BrSVr!tmXEGKc#otH4JiN8R`8a6AcP zEcBQOM#toS&-oR9r`0xkfZ{&3%BZknc@j%v)2Fx*1&-Awm+$o~k*3WU)jv{-`D!rdvBIxqkuKXm zkh57UN$a68%x>~_3_f0b3%gr>vA|bs?)owi(l5_Z?8Pq}K5B|3CR4A~C(IZA0YlK*;iI z+=tkoI!+uwNwL?LyR#9b&c0i$QRheJo=1(^9M}-_VrWwq@4bDDjn7d(eSwK`^B0fh zvMPCZ2ZwC+hC-_!V1IA4=o_viBL$M)D1?NrFNqX{8&%Ltzx)Rxdk+nj0T0lHYWF>F z*FSJ$_+7H@V>CYKf4onC2lELgVL z^HT9RdTuBNvXrRyXnd=qgd ze_8dJ<8lU_*eS}0_FHp1%v(a$w4+gUuitD|Ko;$AxqUVRH3ivgRPY?$%ZXXFu$k9k zLybxgD9TTpLRhk99|B|gC%fP615GQv;IE>Rn+IP2^48A)hQcyfO%F%AIV4EEC9qO* zo#H*5WB4DQ!`lvLIQ@v!<_iH;m|DP)xVh4;rO_GrXxNm&?^j0YprBCJ!Rzf{F_Xr` zNWeNoWv777KzT9EHK`8vcU_iJWgo}YqRckak#u4Qt$E+fYl!yaH2DF-5!Vc}wA&U8LEO`gKikmxXV4jH zy7m^J(8I}K+TL}z`$7c|yn|cVtd!-E%Ing|osxA8#>DY%esG8xQ_Np_}19)u;yj2baxP*|3Q4Gv;OR*QWV`d-!Uy#?>YmV`=u;`6c@ zxiqD5{Y;-eMM#*SNKmb0+O>nFXcWNUKKgp|oyaWc^cic762)8=szeMDUZqT~S$;hn z)3KdtA~j5=IHb-3FAhRJ;k4NQSoeWNnSv8W_}Nf$b9WLg9c%o>`grjvo+-oQ!m?4j z$rv#ntCP*Ch`{qELN&bebeZqcBSiQ|1lxKWXCA|;?`81hg>f?->%&Fe``4FqclRUu zXQ2V|4pX+e4Hy_tczqM7|Emp|!{YVefbx0L4@hQ=&K&?Tr~UnI_KP9xF4CKv?8gj{ zAo25k+vC|tJRqz4!GPamy-1WuvC_QT7sj!=(ir|tA`(0QtGO{Y>#u~_mRi6J8ZN!H zV{8yox?oV5f}?bd*ham~tg#sGO;&SB+=TfDdu0P*^bQUAGL4o00z4mJRS(1s)xLuG zxV330C;98DEQrf59`DZQgOlUK=<^#)=;MK#180(2RbAB}d|zhr$nkh59AV%d3xuo#TZ@$)c{Cd$;n&ZnC0>GNu|hD-OE zmFWKD=T`Zm=dG_4Cvrvs@(^Mt1ji%!*HMIoA)&%?m%l*Dls?$Zh2zToF|3Xk)dTE} zO=o{^1|tbdSwmThii-nmbS&DXQ^%9&aV(K!AHvf>DnkF(R<${-a0>>N#KN&@=V@)7 zI-}r);u;*wP+ZVuSu7(mL3WB*jSy!yO;gBq>hK z87wv?X0=@Z@*B8=ZGVR?46KF5<1#QdNXLWXgQ7jxJ0*d#NiUP92n0-%(rFiZ-9)T+ z1JUL@xmC}rPMyDP?JXWHZsa>&R6;x~){NS+NOu!aYNgTKmm4ih;D8XO(xXj^e(#NL zAD34p#5Ji1wIX`?=>iF{M9EGj)ns!R`yW$IQ2%eV2=ET-?e>Ar%9d1^X+PsfoU0|z z4hVQaS|C|diQN zkkRr#z}B)mX{c^YSUfQ5_ywrFQfgtr0f{+!zWI=&d=9TGHBSj8iy&iVYSh*5j_9rJ zArgK9uJK83NuOd!C2txZCswXWR= zmW7V4N1b+tlX4Xd3DX9o5I-rcO_O>9!N=7PmNw?|QKRDKbFaQbvzxhECLWIR>Cms} zL(=AGt_)DUc?FqF1E7awG4E32thgr@ zz5FZSRPA^Ab(_kT7_KVG#0k1cz~0XFdo7QYdy2%RJ|(*Ob+_S1ZZ%hypAh>m{%4ik zbths*l{8xVxbIWF@$^xhBAg+XWd~pb#uruE^~mHGt0Y%ZsR&w zHPm7+oJLteGAZeF#iDtDE+zf3MR~nSU2J|Kc&_gykadqk?(6bpfo<%(3y~%^L9v#U z$x8x)THILGLcLe6IU=CXY%Wr(=2r2cZSqXFUGF3Wen`oR+}NuboDHEz%0wNPfC!g7 z5dU%JL;CBZ6Vrc2Uy21JCT)}6?H~R29nr$~L!RU&F3%eJm;Dpjk9WR(o>RXsFL!n> z3-eF`7+W;Z64Yo*yhKaee&fA^M$ToGgurcG(qE=bmNhE<{Dny~w)0*#2;dv;M2clB zro5Izq=n{Q$a)Uzm%N}+`-CT5VGY| z&6;T#w?`@z5ho3@=dIjGvgTmLOezcg#Zjfx&6;^`bZAjgCs-~Duf%nSs61`PBvU-k zgj3RgeOfNv$k(Rlxt<_~Ec%rbXq1r@E^&-(u(O^m8F~T6;ESB-s6sQnr&cOx7@0A= z0D|85G^)Q++s-3Nr`x|UEX>0bluY_Nl>UCRjGHQf&Fgmk4Y?#=#YmMDhTlinKR}ri|XrPzJI-8$&{kFN-4D zqE+PvI@_a_KhD~2C16QV$9KQgTd3nu@M!t1Em0pSMaCV2iW_g@|Y+Uq%wPD zWyS>D&gPmz=Za+k>)wa}cO<`LMoItffA=wqV@6w@wBch#Mri^z z^e1ha<4~c(4>{8es}wO+`UqMxJbJBhzc}#>`*g4QlFYAZ~Q#-VNtPC zJdb17rT_`+@ia;Pxdi;;)@z5ypc}nqUc+Ahzg+e3S^^T{Mq2MGeF@Vwxi&|w-V*z$@P z!CdOe9YGcPM;uiEyB5$x(TgE`f6b(P@w*NR$OsSCu>%jJcgz^B@{*GX;ID`F7!5*g z7!MAdt$`gD^|t(K!)-8}&N?IfQ+j1japibZG%_bojs7!~8in+ht3s2IxWOaV6%$Sb zVpXI3Jt4#>l%ga~V8(%|?KzWRFP5wMi>JOJ9||%inGEExp6U(f)HeSVIyO_6 z+dbO~fE7FYpC6wAb*xjrXDjbSw)L~nht*cT4+e#jL#zTo!C-8sO=qc-==ilGX%=>| z9<_wSDzx)U6)2_Iw;o;k|H5TiP~+3>x*yE4H)6yV@F_V!^x5HI(wFyp zwMG(kM6KEU>BVSLqw`E9HXTuM+CbsD7QT)zVL%IDN9^b2*miGB)+sk=U9;089ZO}! zK+lMjFsgqwxof2mRRZFTzW_@eko&V*bf$@(+spu|D5`j#Id59jkDMh+(s&7?w#6W# zM~V1(REKLx++annLJWwIsI3@&B=pLfSjLA>5EeL}pkf55Wz=%XNdEDNPX@ve|4iTs zrDK;Z8cRX;B>ew)D69RJhK4{&Iv-SjG zQA!>}q{MrbzXH?gOz|SW$Lp%;QJQ-jM=Tekeeqw_f2SwO-70G=~p1-^K!p<@7IpuL>4cggof&T zs=*L?nG59vfyK0z9S-6G|ET9aO5sQ~TJHStn&5qo{&rmJCVsO%trA?2WO2R@<^9%0 zZZ>%fq~1YyV#F}9l`Er>9*Ke^KP55I`ymo&(Ps(tP+}64p6h7T&?&VyuIFylZQpYK zCze?q3uw59js27m@nagv9*G%Wv!w>7M1eWZ`n-_&*(X{dU20ViAL+`!?($6D=b?VnxO3{!$5(O6k2q9af=>l^B>!ErX;&+>xbu{wK$0P8c3pcRl*J_xf1-AJO^h z(-F-XY6FR@@_dbd%D-Pk6_GSU2j`UHhr4_2o?fP}1RZIdQD1*(JNK9K9o#{TZW}d+*Tn(qN}X&4J+(0R6rMwsu;obW~ATMH8?a z1KltA@{BsEO@m<$^r{=xK$Pqh*H4d86O&&SSYH=v20vrN98W>dPrrO4cq(r{(0h#`O_@DSe}|?aHbn_f1-;CetJwK&Fi|!Ke~Yv+ z*foQ(M;?7)Lql2u&|h9Q32H@sVf`{zz=C4gVlZ~=0Zf%!0IU%)_BtN^*{n=w4;L%;&}xEswrxk09Oh0Oe&UIgNgd5Vzrg>;)msK+wXV_H zbT?AcEe(RC(xEic-CZx;-5^MpbcZz3oq|Yrr=)a9pU1uS+UI=VZ%}wAbI#|EF|L8h zZ4>KflaKs|PJuhSxk~E|ecPieOxuHFoAjm+C=><7Bz)NTn->he{vgkf4Pgx5G?-JXAYdLdB3okYFC@iZ7GC?t=#q`JRBe0dia?b^Gnb zU-)ovS{TiR2RaJ>D?_xgd41nPDZGtCsWT(`lojQOUe!Jua5EXA_%$$lqkg>OJdz;f zd;MzpVZn`f(9XP3l>Y*!ac}5Do3S}&-=M&Hc@<8M?;{tmVZV5sQEzwGV}GtRE<6H| zL5a82pOYMGnSQ#=5oHUw{YE+A%l2}0{jG$$GyFbD8m;IIJwt%$Yg}Eb-fz`Z#ZBk+ zFZsdv6}jW5N{XB1w~CJYp0C~lciF)NaYo_DObiVDXR*ab{SSG&x^5p+vY;;MRE&WJ zruYoXU5y1%Lij-bz!Q;I(Qqb`FOHY-HSG=xn55y`8tKFR<>1Msb9JMpee*FZcGvXy z`GuzuS2E=qNDKdxaWb=~Wun8izyf(`vr!kIPqRz1a8Ur zS^I27rQ80b#9m5iqvV&a`=2~(NI(86Gr_(?c~(98MP`skP^Z4%DDdwYQ<|G_6a{}NU7)50Y3~u5;;pkT=QtQ5P1a^Zwyobs#06y12CAt)B10?h@?^?@|iw5 zg7FgL5PNGGW4XE>pI-R)SJbPE{mXWpiH1z+6J2(?j*gx!n566m{?(ECK81dCf1wiW z70jXB94q%j$3B(=p%&?1F@y8FWNzH!(vNtfiw+8LDX;|wSv21C2}o{hq+ur_qSTTN zvW+oLyLU5|LfZ8ZYHUh8MCszZ$ZBFd|MqH=2yzT^I+vwW!MqUd){9*{)&9MoJnbMg zn7B+;rr&%^Wp8dQDA>$T<$Eib*8kaJRS}R6yjhhoIJavOh`Lx173S(r7Z&PMla(L& ze<~h}*(kR)7qQ=q1a*mR+GUBbC!bMsV}q{lZy##yEfC@`g-otij#k&WWFq3yO0!?? z{H$6kQ*!~O^$SS zH2WpG_uA};lfQhTWL3_>#;2bfvs`~(I*w_ z5F%!NQ^so7_i=f7CMGg%ZI`%3kek1BF;Rb?@<VsZ>(xNkBnJncRSW06mYfitV0e1>-3^3L;?%p zd+nTze^~%gdxA7g?=Dl0(M!mqzJGJJQ7!#Q%jF^dqgIpb6Bu^Ct~h!cm4`Ql(-YwM zznLGgNl4==k&==J#Wo(WAl&bau9oxN5N>TKW0uNju>AX)bK)`tH>C zT}z*q_hZ?>azpf;t>l`VJOV{w2BT_*Voe_Gdyh}PKT?YCKic!xldyJZFb>saBYMEsLk#qFY%#d|nX1gcL`9 zL-n&P9xtC1_e{{M%nAG`F8)HI%@w{i4=InU2DN0JEfDY2NJK>#?gc6eBdLUvitxUK zu;*@KqMsUjtjU%j-d4v#FS_>ru4J1ah8>2DCA*%;HA#M>p{*x+vBdJ`M}c%8UC z`^6No47sYkd4BcV&H4<-1TW8^5epF!ad%NDoVA0QH{DO_J=7E?x$;OW7HUA`S1rZ~ zgZdqbbo-s*V)(tMs})elKaV9oS8fJ{8j>r@joB7$q-uHg1S3xtr098;a*^%oBr-Tb zR)8iYRj<0#xgA2S+B0@BCZzBDNkz%57$-dpC-~kRspc>4>>SVH7J1N6CYU}_Y5j|;&P*cTP|z5&c+3nV`mPz`!w?`23@v{ z?|@I|3GWW@ZuD#9!JE5TUaX}Eguw}Zj79w((8qg(P*}E#5!-(^=eD!(4Zff#F9Bkd z$wMB5ofnDz1hRE~{v8<s`e|5y_vCrtIQ;UH&V+vkiht!RQkk_VGh7>E*oJs(PYB!~aw-(A0gS9a zG7?OaAd6<+u?DN!r;-l|r~I^pHjX$T4fg{$%~oeiu1b`MIMS>dy**IhJyiGJBX0C~ zo$w*=L?IV%;>%}HP{wx&k4)w1X&OpI#=6f^;Z#kwP5Ex(LLY8@t8`~kH^?=WDNi+8 zICkL}P$?lv6|Th!uW1yn<%&A&hWqE2peTalwsW4SHsmwn_X?5bDk_9GL+JdR#u*h! zB%w)#|Nhr@)lB)y<uY2_@p)(D_c&DF}O`B#@w@cP}@fpbI-iHZjxcvk|~( z*-55&%+*(`Gi$cEvBS)Cklg-cJ_7N~PMexX9F#$mA7Aq=_vQ2NZjUAo8{}xG9rG2? zV)XrHViMmjB4#;r;ittK{h8+V^I+lRjE%WEJ(7S+x6je59aMFVroEQWWD~SAtaa+; zA7}?TTQB{i7vsx0K7QlQ;caJe|u^ zv(`)U4+Yj^LWzyu9DE@ZO9?5&AMhDEW2C#nLieF59h&&Rqm_{@jA@^i89X(CHi(i%DAB20`jo`V@0?Ax|DmkB z5(k6HNKy6$bPsJ3tUX}r%I%OuaSwPA=TS0Zy8Biw3sn$`(dq4Rd6Ue`HiTd-?C!hj zv0KRa5|jf6y!y?KrX!2+^yfecJ^N$)ld?cx2t*($M1zElUi^ehM?HmElS)sg{lrDT z{ddFC-ewdVcX+6&OSps%^R>)0wZ^A|@-Na9D3uG%*CQXaRZ*}P zEOB+bqOFC0ji{gc&a2_%uvDSxR_u~gz6*+6^AE7RnM`2!Z|D)iikOJF*icst_uqYw z`wpZPY~t%-nB)WM7~lb6hoSRX)+P8a3d^j-w5e;C0R+0;AV zCbxq{6>Vc8D{aqP(f1v~sS_}rkmFhChcXfTg<^-f5pN0?IDr2n9zycZlgb_-Dr2z3 z-I&U^!8=p#Rv_Hhn#U9%_)v}HvPaPYiN$e0s`1`w7tE47?hv>h6Pb*J(Covt;W29T zQONk>M>_K~KY4y(KTx^|g1y=va=cKeGnN11`Vr~#g*<=Q;yR=qfXw_bWLx*00-yIW)OpZ8HGHtq>n zde+6oIK52x*0!uU_Bh5`w@@I*A4M1+J9EV-=5_A(7_p4lw#CnSq=J8${N?_cuY`SntdarMut zAX`yr3EUsm8Lz$!m(@w-VSUx5iBVY)htO2@Rimb-nja3r$PvfJQ6#N~Ol8u;w&rOj z6oyVLxdhU9Tlz#Gw-Zw269BwMbM#b<9ZVa6Qf3={PB|G#2eMg+CuhL7Rk;MJG+Pfy zcijW#eMG6mry!KCYD`y03(8vN`UTS7OL-Eg`th;Zj7m}Mm>_^Bkd)i@nKSbXMrSv)|a1sb4?#S zTo;jCg{Y_>wi6kY^Xy883tz(QTU7EXsWT%G)dS6M`VX-777wf3n}`-^0!L3DYCVP? zwm^nUIS6JW4S((3bSKD=HHcc7+kM<&@e9az36r9{c}v(|!BsX+!0|S3F5{E1Q~Mv> zK{;^gG@EYxC87>F%>xEzDR7YKIS3%|h_HVGTKv(>_8v>kg^0-BH^g(r>sZoVQO56< zi4asUX6K(x=6h$l%{E!5z-g(!3X(8P4moax^~1w1nb$^N50 z1RgpNk?oU|k=cN${_HO5&)Xg$Wo7Yw<)ogsJAXRVgRt7f5Wf&=WKb66jo=5_bC&80 zwGtLooe(O|@Fi_IFfF#XMi(gv4H=E3qH&a@w_beo_)_Gxa&dY0>})UC`1zlwGu1y4 z%FGu~;8UBj-^p$b?nVfzSk^X4_7ra>C`3*ssSn)f3F<{d_-E|C)-XEtqolBxygp#? zz7;x0YCXBr0Q8^}y({i2jhfSU!~^loD|We3WD`*n^m36pU8py!@Tl@FlSQqtpgOtp zahPMh^8LD4^w3vDl|XYwL{OhH+t==cx&r|5y7=-YS&%gs+ho+C*8(m{W;@to;~R^I z_;zJal=d5MpR=)j8r5P2oV0#K8vb%X!A}ZT+O#1q1}>4GEt^(buQ^79Ztgv{1>OBk zuq~+l)|6NZ)ca7n*tL>U}vpyWZOY_Y!-L5^;5KhlN z-%(GzJA6_JXX4(QNxITuWn{IH>_XyMH7Vh0 z5MggGjl1lzn>qQ%UA!mF7U*7wzf#ggaxrVwhXd0D>iz))cLsi~ey-b6WpiQOxn&Kj zCYn@0EqxmtJ$aFMOwuWolPsgwwfjFQk?a9q(Xo9UzSVUI(JPTnUy#>QqqBx(W0nyW zE5A35TSfTZWXdSt2aER$dYLVf`2tWrrT&ym$B0?MvKX+f?Nr7=OI!iNM{v=PRs%mZyxITZl7S%q2dj)O)5u0|rt?X= zM;;rW?+rPSC8?=x&Ae$np>utnk@Ghf4UL~4E;6u8Rh5#Rtv~}2+}K6DcQ?XiNkX@M z3?uLgta5J9Cu!yf*;9aI`rxFtx!P3HKrv{IY{F$NaeaqgXc_bNnZ`pt{5FFMjf|uC z=S1A+4|5Hw6_VGBR|P*_vn8Vvp5yx|WM$w^x{Qc>1}jeA?vhUyG+4bUIOB*=$>4jd z)Z){hXLtSb+t6xt1Z8=feJ4)L;F|RA(Qr#%F=cxi(ibcRbJsm8x%5sO>>b~WX6Lkp z?_2h$gNkbli+;3H2&VczL4LA}zF|=-p!%S*UGsaoAh-0ck7IfqI}h{6(i7Qq}XY3G6$x!$~1ZL>)s%B;s#NQkc6oPQhw1g@qpvNHjeI zaSb6QO8KD-B2D)13R2+HF7lW&nK6{Vi>SZMsknsxbQE3dO+@N=2#HtE7nykmxE~rUj7|lMP#$KYw{}LHTlz z0~E|N!pzD<5Gqiw{im^j7ha#!pE==yl-kS;Ye9Y6q%xMEU>l?6FSzK-F?NaBqahV+ zGazlXnyLM`)`4+%{)3s{o&8J4J!?Xi_oT@0rQ*sQf;Zi^SXDCUBIkQ1?|ZTTtrsVgpjHW3bq`+Lln(;FRj>1k%b?}c z!*^X~2=cRq3OZ69hr80$wIeL0RbJHU@AlFITEJ=mky?PkN_!IqK| zlB=xv@@S;s_w%UCH7}@STCII{zwg=ig5;g3R2G>{I1h2${mL8-sc)iguuam~>o0Qi z`^=g~t56@1TnUqDKD<|%-H{ZrTg8-NFprst9?>dfIS*?Y6g=(XV!Kp!9Am61=wdP|4Ssn}9qBmuDy-ejI)?TT& zY#BEdnY=~n*pC0cK3*Owr#sth~A!3pxR>Iu0;m}IIPr0QsJ#N)78(5f4K@|ML z#ev~2`~ao}r`WFkipa`~#$Kf`I>kSOHNIh+t^i^AB-I8|DL+C1(;tZrP$+Vl4!`J3 zozmyXya!>aj{H=kYt=&KeLB3!8@@z#UA`#TG=f2}eXSDCV(3BY@3+?G!`6#P!5518 z-GuJ;@`o+=dRdBdrpn=Jc*9n~Yyz&^j|O`8WK1XpGcmB5jLJ!OZ>}oa`K;b@s?WK6 zs_=(%oCPL=>36uYCciEaYa9x&bP*W8!+Ke9V7(|rlh9Lv*P>nh;p>ljpgdX)5UJ$? zR`7^<52CB?TO^O~FqKzBSQHwAIdSXkUty2V_Hy8TR%M{1g?Sz zAIoLlFv2eT{7`fjlc^gi{O?8hjVaE@&1#I?zG*%_gm7Qy21*okIVzHG>AhaNRUz2Su!8{(PqSB$-vt$aQksEH(U* zCD}B5Nbno&<=-6KB^^vP;F8OQkbyu|$+OFx}9VsHByi-y$4Ec~OQWtAHSUb8C8wzwK2 z!76Yw?ETqpe30h6nQ7i&Q=)HnqFB~s=_!1p*Zon4l{u)1?|CkJQ}yeImhDue_W@PMb30!RhyO*{Kj2^FXSzl?{u!sh{ zp?Qhrr0SG4;x2JHHDD+(*tGdLiJW3y+h3Qe$o5HYF?V?*9!4wB-;Y(eIeBJzpf zSkdTJqrJ*W?(3Kbt8^W{b%a6Q(cZF)+%j&$kz5=EF|<;gy8B(XDLEb`FlBkjm-iXG zrP5Kl4#%eMiyHJOK0kfN**Cy^eoJ*5BZ3fwPkrHQ6-SI)>R`EwG7jj+PVprWCBwXqFot|1-%0 zPe80Rn513C+T9Ho$u)aXCN-Etbf52j$Wcp{6h}$id0sJpShGYI0mB_`B?ZLxN7tfM zXcp}@;-Mlu;3qiWM&tPMSr<*PQE6)cD+5tE{L!9+RTE9R5h(zAm2bZPBCJ=1wvuV$Gx8r}b=6LvcF&O&v1N`sQ?w2u_RzD(rBZ2kYG*WA(0=Yu;PH6#*QoujJBz11uKrx7 zAm{)u`tO)}v|wsXM^Yyj`WbKEfI6q)LpDC}0`F zJ1#=B|1EM`?wibd^Lgai7?1SG<;~8BT9X4Z&*En9VDO6(&yYNcs9STNgZml|>iyGo z#P7`uuSnh|?X9tq6%)SUyrAZvE++7ZktrI$^YV`SVuz{6QqG&BN zYUs!IjP^R?@7I0#`L8|r-|qRM@G9Ta4!4|lZkNoBv(dG8mg#5$^S(S*{Qy6hfQY7> z)*EfKwY}ZIIE$jR=Upy?OAPI!g2_`(JEYZ#q-^rf&l#>Ey#2l0JjK*zJ)Q#%WCikS za4x{TFMGzL^Y@u5v;94`3)Z;7mbLV9=`=2gmoq0|TJd+NJOx5`yp^D{4e(;V5C_M7 zv!28F-g^VPNrVK6+4J0el6z}DN>UXoM=Lr|KLwF_gA1g4baK}K?Gv)of~fvJiIZm0 zY@iv>&M({h9Qlp)?5B>~U&NN}57Zp(?P)ny#!Wp&iSXD@)rP$V{j9nziNKvBEtAX~ z2mUg_ga@D-yN>kDj0_Owl~?T%er_{*B<-;vA+CiAcnxO<@lJ}6e4Nv4cBJh##g>n# z1;)RzNeKd78f_JeEyv(!lu^FV@2*pm*<$dH$U&!@!2iu?(IcFd+GSgI0NtP4&>uNN z)6@{48S|!^zU@Jg=f0~lUI7_zUhrV!>C?!}WWPL~D^?2RLc=>9@S2*MJ?;Cj4Awez z;IQ4t`xwUGxTS?MQGW0c2o1w1Qc%Tu_Taq{LeO?kH@^=9SHv4uUfEALZ#=s}I`?9x z8Oi0(<&*-lD2OmNKP=nU4ZSlMkFqjAI?mVg+%GIa*b4b7o{CJN*e{Iu?C*mjYVte| z0u?qK@mJnGeh0}_FzV0VK5Ob#dcWHTgDv=*Z1i8Q(ZkI&r36&#y4Zr&<sePkj;+1dHoIX7)>m0`_-c;{1`}s^4cxxzbCp0-RWV~!KmzX z>xYCcuMiVE)VZJU*E{ad`<${lR)v1;q;U3qRV$sd)GY{?K&!2f6d89td!h&`lK=T4 z=($e9TnD?V3CI;|_sXkY_oHbNT`oIeaP)@ij&vF3msi$C+`y?B;v3-IK-A9UyAND` zhB&cqwz_`v?C*ly{hk_>S9rIBHP(S~#>ybp7B%z&wxM?K&za4JRfZf0%akx4FbQAC zBw1}Kppts_3Fzd6CWp?(%{t)`3?d_aBsg>Xw+UeQVDJOK5i9oB#CR@!pVvixp8YY_ z8+%`sOZ{xDlaNejSt?tGy-+>3Off=kJlvSB(#H%0x2^k=v4l;Xj%Ailt&ZZID(eDP zmPT?ktDKm`bZoHYhU?@ATMqA;0~D)%;;k%1RG7R>3^kt`&7ARhk%y{*1~=Y`i88=$ ztCizrTZZh;V}Bj)1}}cnNj@L5?t1q;+kASwO8UeXImY|sobac$T}zJfX%E8xT2`>X z`C@Fw`Ly9VzFmoP_-H)s$mM11Hyw`?;ZpP@YiI1$;_jDSQ z9RF86N3@-N(K9R6I#g?F=i%W~F5& zRu`<-+j|u2VK;T*rpe{KvAkA5xPTFt32b1F7pU0ulwL0ixg4kzVxh$hG8$Ip!aPd? z{PD6tA-}j;?f&YT$1Z8r_*HHg84hON&-Uewp2*T^N|ZtJzE;SQDm?Fq$OxR&J5CYW z>CpE`Z`X6s)J2ixDTASlYm5h_>q0G>fF&^06k;4ajT6<+a z#B{lq_S6$Pi1hkSbqSWXD0(_e&H&Pj;NzYl3Q!nUK`q208Cw-uZM6r-2v_vAW)@_i@4brek z21juf-fp-|^j!~}VdPR@+lOOqd_tmEk=6$_wOR1xT{D0MZ1*(LeTup9bb!=+)sI zu1=K0!%$^mhpdB~`$1{3w~(0cX2#P9=h!$vNTXRg9EsTg4UfH7e<)VmW?N7<)O;wf z-a1C)VOg0x=;b#z4`QNtJ;oMDJ{C&7 zZFqx#CvwSv+H{;RNLaA7wE8DFinoF1Y2D(hQ^P<_$RS(CS^~p*Q&{iya`T-zIg(jE zI+4*j-SJY_G01=lJ%ZRf1A?39Ng8Ceb1yFnBt=QDUI;coS8mM<2|hnKQKl*i1qqMG zc#j|~2N?+yo+rVFHwoPzah^OfUb7t~G#%#%VqIzrrNIrD zDUUw!c|4peK-O=4@FR&1IJp1c9Vij<(egh2qnZ2!;sMUdiNcd1{~s0P&8u4W`e7U&JL~XI+Zf)79w&o@Q(fkq+G+42?; z$PaAk^n;95;aJA0xR1)aavnMBB>PLxV?Jl!5I?dIBdqI-A7TEVPd9V#4altaV3jt&z)B2?F+BcDYbwzpSOyfML6QFhUo6x>-IgjIu2 zeELe{l@+&%9t0cD2U0Dv?rG(tG`MUpc=MDQiMk~->t|m?)qad@9Q{3kf3Y67XX-}Q z6e@r*BZ$S-39+J7R*WW)caE{I*^bg%Edk#V|80Ht$Kb=&*0A%A z=YjiO?Xjccd{b*i4pH8BVJ}6ExqDx{N2G+wP%)*PforRIfzXSNABcoRZUSv2_7FnM z{*zCf!mK}O<5!hTmE%LNYp6deRaxtC3ljxuxEC2X82y_Rg(&C(ng})*A+X|o5Sn&y z`xcgqI&b1839C5&N;8-4FRxA7P(gGPNo%cTez&rMloh_F|PKfQc2%w#t}fvM_`^wJMYI*ge86}-`{3EE#J6N)=! zTl+>RvC+pXAZ7CS(F28%v1SULckzBy<~!;QVvFr7CQJuH$o1pR8mc;aa|Ob+!pKX1 zA<)#|0zk)BsTT|9A>`%1r#QTX!4%!CfQ7yf2#$T%Px3LQ>uigRLnbUw>T+$?#LVdW zYo8Eo0f}rSJMODsIFp_T7t-wEyTFDAeHAPw#e8+X+IEZzrdq|+jltWaLCst9NEiO?Q%FXYIJAIq+h)?4_=wub@bRf8=NuCIc)cP zzFa5z?8Cb15qa!&w@dE9fEDQ&jvmlpdv)Rj`ILsvY2FeXr(tj~jeDA9bHdi?-WJ5C z)quvLH!vd9bH-J@@%+@|l>!O!jSBukT>M!e;K$td*-TrZ#mauygoI~jqkVdm=HLom-#^XnFlxc)$ljdgtSBs8Q1`& z*`0%Z?HVByy=gwP9m@QX2GWvCZSuIQkw}i8bE{2!Ne`VoL-CZWFa&v6%z|h3{5mRI z*earv7C)4Ch)c1Ej9xZH4^SDDxi(3PdTU^9>ef?gKPteS&BZL|UZy4*A;_g93RurZ?K}A2(=J>0Oz^5v3J7S+Mn5FG ztxNLW5O=(?s z#URgO(=nP}!Z#IqsMZ}}?OKsKTgJ%X0rAu@_cz32V|(D%>jezD zkjr##t5a2f6Sm))=}AYqjR~#Mp|M96N4pcWs$~m$7$Imo2@r_~DF~M0Z3(R6vwE~snu9GsVta3BGa0%_cNw#u}6fv zP8*Z)od-VO!JALK*+#vRiZ$H|s7elJ;TNYB7Uglp3_mcoNu)wsIV>bR5K?U<8r z@(g6+PD#b`83c%CRU`N*VSv8EW28KsVbpZg^sgsZD>xtLRFV-bR2VamP$4~vO7+mnipIldAUQ@V1MdKO zt_yG=+(~(QZE;a5m5VXj!N9kuQ5&el>aSQBLcin*LK>rJgi8=Owm>CT0%ZHE%oM9} z@JQZ2;hI>#R49Pp89CH(CH3%0ip3yQqXOvPL8mxD=-#$5TvhI05gU8hIv*wzt2I>x zujP1)@}lQ=dEs!nYTkmOa@CYG%{`9z?!2VivV)YtfaOCu$gQ~q#UK`fnYfp{;LWmA zJB&!UX}(KK2%+;vPlz*+)JRB>2ioxt-|2Ve4YT(ooiy)~o#Z6F7w_BYUqaNe-)JHo z@+H+VMdQ_~H9>Ycn&)s=(&%)^hAG|wVRF%-7^E*}TO#B$XtOw+kGlXC(0ZgCZiAOs z=wgI3*xHk812_k7Fl4*C3N2ukpEr7BeEF~QMPdBcRw~JR1Z-=@TN? z<4dI8vS~qI)X4I$APjb9_j7MpM1vi=&s!tNTARm!vumrT!0^GxaR+zxO;mF4^@4mU zu@iIV^($B8*F1# zYl<#vR0X3*P`EYKFS#-mnvg3BrIl-*#7aj-ZWDc0DoiRg_`WjRd^z`He_L`&7)4; z!kC*4DM!G0ZyoPlg-&yPeD|AaW`It+_X?rVZE;q@;v>~<_jK_DVGNeQ@T|u(KP8j* zjG%PW1{fWFkXOuBldRCkg^;)4iE-L~v**f)C2t0ba!af|CUL{buBilm> z?HFm_tkPOpyM0_iW=1_&st2ymu9;$sdZm?M+%qrR{)C&}1yIIZK!C7F2y-OF&?B1L zfQChIZ-hU<4AZQG9>j!`0Li&w>h79~qcdg)UO*dw72||-Mj?B-CfIT{K_?6SnOpP~ zS;T!F>$9^K0ybj?C8hshjfG_5VyQBHRXB<0n1w`i>GxVNx#0rLcVPXBR{yRIwsY0# z6M#HfbxH(_FrtGwVit3_0emU#yCfvLwHGPOY$WPf$D3N*U^N;}YHjKvg^OrzMAC~j zp6)PFq1)PUyg|pKYzU4amDYU`D~qCJ&Hq({xq8 zVBEj}ksZQh2>mihIqZS~|i1PMSdf!L8iRXEisC>U+Bv5LC#_)8oY} zHU<5F-7KeY^WEtKo2ka|6!866GBB-f#8v1Vsns;es@rYzZ2XEjVbtRjYmL?lWe(Ixy8seC&MY3nA-vXWRPm z93yZ9>g4_2nYl`pyRVwcwwK|*+a2;f?+ZT)B;;1grSIKbt@)zre9^z z!?*IG_Cxz~l6TTs#+zfmsjT;FO<2^kzM2llwM*Jox$`WLXuk7k4%|K2UTu_uEFZe? zH>j0}bDxEQ`)15x4a$MFtJ#Hf=easnXLHI&_{bU&Gj{inQf;PE+*eTxn{uirMs++t zbB`M#&dj9^mD_n@S*OJ+Mlr*ClBq!-bxkFLGxCLh&O;J-;*_v7lWKP;kYyG zp&QKBv<}m9+Jl)Jh}3*s!Em7?pK)~Ne**nilnty5QHpYQW$3A6(# zRaMb{g0~F0po0(qUfkD;_^$HA1h9@9Z!qr}mtQ($ zm8eyu-;hAputm8l*qb8tZHSVPtPhYfJHpOcP8E54Ce6U4P#g3>zQ=k3Xk7fO_2_la zf?oR!dv6KZ}-fPW~g1&hV z$&~WxBHS&(rI*Kwq0abnia6UO^y!-4sz{EPt>+DK&{Fkp2b+!5p0i+nk(|!~=n;zq zKc~~C{@F}I;Ua}W!D*Rid5AU-yUZ=@1!2BU`r#pRj_uDmLz}W~#dMwhtkM5yTU6RD zz4*zU5}c=&G9V6nYxA9y!375CF#ZYdH0gXm=`F#(M{OULf6!(K0U6%o``i$Xh*G8P z!ki_@d^Ph+u2;&bRRwEr2w>3%KgE?m$pIp<rh>|S{{Ati6fpHH@N|8Rlvl6O3} zLUgW@PgL7+v3)kB3263GlladUf_!diHgQ>hFD%dpi{0!`p4yq*I=1WOyPfEG-rHVX znLa<+t8k-+3JGZ`~bB{{80Fj{jh_0nJ)ZSV2D$3QrVt#+D} zsnfUwlyg@q!~0a6&yda$X8&0rNU_TXc;l@44~*t|mYam)q8Xq+zZsw?o;QJxI|l4M zf-fVy1poG^AKn9Xu19fERhfCFX$NjkBZeP73AFS3cbBm(#-r+W9L6Nft5@CY3g920 z!C{bJ6N1s=oY7e&G~U14YaFb-r5karaJQZ_P--ii_tpN$v zRf+DC&J!H@Hr9jb4?is)+>1Z9MZNXBX{d82pg`f}vdAG&tkT%PhgpTGG)aH4R-K!5 z6^ed$BZ`RO-uEd<@{IMgARhK)wjvAajP>BT7t;=GO68)7%ZW_Q4%VWOh4QKcTMDXc zH~Rk7vZO-kD46B~=5(z^jPZYEWT3eb{)p>u{m^>SVO>JK+?HT^8kg%x@~@=dAGt5M z>-zV(JfZ$CP&387j~?S$bDkvk+Y?x`YwD&m3;5{fUMwlooD6&>S^)2aAJ zRV}gj{o-@z5u=v6Td+njHN#!NB>Dc7BBFt^ZT=(KlW1$Q@Xien{^R}&mU=U}D8jDt z%7=ky&spFSKrr(t~11hsk6wKk{FnLXIMK_;f&{ zqR8gJzo)JN41<^f$e*frIaCw#0Xi~QF_QoOG5^jT_9gsD-KWgG$lmVd(q}2UspS_+ zL}D3w%3>58U7B-)&yHogXmV6osVFjaI5z(8LHq(MfY0e%EK(G;#ELQsFQIU`r zq?4eyQg`Z}jC72lT%GuY?SXQ|vpl*;b|^=NZRW?4@AHb-NgK5~Oy^Te0w{3D-kjcl zn_DcAME7|Lus*GF#|FAERMm>Ij?RZODV`0Y6tPRV+FE9nn!NNYa4ANoip$|EOKI1W zP?OgD);?f8mj_yP`Lnglll+$1dL9x(daykG&tAnpP`L7xXRX2ZixGnqCo~Rb0IL>2 z+dzk?qE#>ems*8>=W6vQ>Qvz}oa(67WAFkmw7UAYpEZc@0NdlOdB6b-6ruf_8vO_5R*QNf0sZl=^J5ah(z z_iyxG>?dp6nABx<2=Td&h>g7~CL2eJTZyIIXGv7Ul^LZa&@sTvv%Td&Yy5S4p*x+A z_=D8A=BiA+aXg0f{6?sDfydiaR2-Oo2FzjC96_Kv$6l1otU?m(Nq3O=R8eyupF~Gh z|6+i;o^o}X?mFMc*T$S>AONj{k@@1!i1820vb>&j6! zC+m1~=%nnx=uJD%GUJ~PSVd*JB;T#b>yb@#_>eJb&!(CUpb;20GZrMnR-F+I&Q(U> zW=ca>#X?)cA|M9i{$YNB!6N_TzYp?%O&$N-b`(F6k5=2iRCe51ouI*X%Yjv|J>C|II1MpIvmefQ5;c7 zFDDD_Ho}t?-dO)qY_#L6w}@*|=KGbjg_b6agQd~FI4|?l=$dApT?hW3R~#j?=`JR^0}Q6%w6|Z|_&EM|tNiCp{6B4pC}3vm%7B%5 zK5$~sC>~bowodZxW*CR>BK`A0z;{wSB9XjfJSjN)RR{q9N=evK%(E#J;zk-EGGbOKggS51rxeS5oi&u{+-;J%lD@p~{_ zp0l3WJQQKHld0tV`+HHWAZK`0vRHmWqFEi>L1A<;L2F{C}`F051AC&AWU zK=7Qz{DI3bPzibuqG8d8WT^QS8Eg6Rc6Z1tnZr1v-mjD2;r?39YNZ8oratvPi#CZ# zyHu0&>fNsZSn)M)<2E+AEc^m_IZh-ScLF+eJzTuXFrP8&B5b(~Krium1$?e74#`?+IPU3#J z$zYP9|KE-1pC}A!-XW@0is=gNlu`aeW{|Y_2Dq_58}W18dKfPJK2y#hvamSqeO)qlK;9Oo8Y(6`IN{mFz{o2><;F5tNX#evCk*%kkX4MDwCV zI=Qc#S)2)#oq|EHIZ=17F6KEfuMU!=a)a&N41@Xd+Y^-AD-Cm>JPYW~>%xau?aAw_R^^U$L4CydTPnO9;cJj_@_^IoP7P+%=3plCeT%A zXoDO9FDPRq0UFMBa4fwi{Fh)!R2vn%F9B!hJT)VP~{h!l*VKRDe1%Z0uawFC#ZPj(i24% z7QDUp+|O#KX->C3JEl~4Uj2C+pRS!~{qyhb@ZKIqB1)@7AzOB1HeEXhokc5gFWvg} z-t`jaULfj6tt=o;@VVOtql&xxW4o2FqTA(W@YpNwZszwl!e%x{KMM(#vQ8s6X@9Hz zSS-&B6+ILhEk>uVZ<}w}JH6yS%bbF>=M}7&o!y?-tzVgu1<)J=TUVKGq9tw;As;2W zS}hYEhz7IwXDi6=uC;kwjtJd5#@l)7A__JzrWLcV9;|!U{xh2k$q)u8>h?;L5E57( zeG!BG$-=(_NTTGYU7_b(5D-AL$rt!vW6s|q2wrX-3|hH$L%@_YB4veIws<%A|9e#< z!X1uD;#0dKaLG8F0$cKNRl|rEPB)G^4wpW?R;OI6g$N4WeU|`;!ASyrlaCQNcQNCT z^PPHG3L>P3)1aFC4l7mED7tvVEePp9FLhyQgS~O)^w+f6-1ZnO(A1sC*?{BY@dSWRORpePGf}2bDC35tn$=t(gEiS%!lA?Z|b#S#+4>XEo*r zo|9ZLO;!RJrvTOgike?PSuQWeB%v7?L*4Kp8$RpMu1YaxVo5CJ`ao#VwNtL%+AkZE zJ62lyde7^^?Dh$7-U&P%|K}U`u}|)gZMqj3m{84b$qV}V?4mSf6{rCsrXswIOlvv z^Zoi#B+nYjUcpw>=WVAU93GebcqabDt0ju~HV;xd#aOsx7Eae`9=nm24H|?hXM-K|~2@l#njzSkg#$v*_+_&ds~`yW<<@JL3%gTEoFu zvext5&z#r1=I`RG4}E8EBsXT>^+vTLGV6eSfWM78-304z7J%g5%+{KH{o5HNOqQ`G zpWSp=N#1CDY3aoT{ZpEqNu$xIDq5zhPsJL!(f6K0zWj%E=V=~&oGKyk?CEuB1CZ;` zhO24^zMA{!Cv0eMvRoEFS}cCJk0!G3b8}mi&*@SRcLm4ED`Y_rwM~%r5?~?^)4EN z$Ww}}pt{_L(l`A@A~JtT_qe%kjc(k{DNYKU?f5Zwms~ zx&UmVf*(6Qn6QIeBpf+ty?@V$)9$BQx!;HKNClIVbzV|_j`;Nf)>l3k2?Pu%GCw^I z6q(dNl*ZoAa*07{=fY1myKx_MRy+UH+^kzO!HaIE~znLAgsEMPGd*Ee?B2 z%YabQvY_l(r4{Tkl%gxRBdinG2J%Cg9f1y%psrYG`bkb<2ax$Op_U?DxPE!DDYAsbWl znJ`gIOFVFS4Epd-B?w4ZeR=h6#jcZv&!2Y5P4+|f%;1dR>}F@IJ0i`-3MZX7sb!qn zh~I1Gydtuh8GBi*H@(?-vO8A)EA7>nMW1b)sm3TOKb{On_AK;8At$pBY!Rl@GQ>-w z(qf*&WuL?IwfZF|jR!9{xOjVrkn`3GORI;&s0w0dwT<1>!mt`eXy@G>nI=QcyOhOl z29j53D&J2^qj-wlPtnJ$ZSGh0RU=0)ih}&Xyxviv!|nV(iMqlx-Na5T|GJ+4eI`8C z;QI*fyHgT??S#9JW7!z|8Hn9q_m$Sq|LP_FI&hw%RM~u`6|7rJDGc8al%U(1E3Yp7;0J~N@#?b}>EVDt;6ct)o z!RKNjMlj|}6Gc6EG_W6Oj9xRyatB$^9QJWvonEd42?;CiobUG&4aQw7KUmlxPlkf4 z0w!0dL~Si(((ZUIkz%Z-v{RJh_19%*@IRxL`~GI2FHhUJ|2rZ&+`$?LnN%nmQGvlU zQ;ae5c)n@Q%&+3*`Twi5*qUgVKug}gb`bG?R=`dV>1@|aZXzn!b^(4PaAwPTnH(b@ZOC*fV} zT2HXge-&t|7-2^e2q-(Qe+`veXljp5DEdI&^*_qIedx)$! zpOWRX;cRy7dqtL@F3R4?NRQ!Z9&a8Gjo#`%K)k>b;(vbyHy4%R%_P-lZ~onW-q{)| z3dLJ_tBf_ADRGt8-qJ}x-iDwS49UwNH5}ZY_*Le~_kx#dvH}JL@UTwCG>))ElT>Oc zG03|o7vZJ|4CwGts7-MB+O-Y8F5LyY01{~Z=As1lR~GA|NY~0r73c|KQMVWkO__OE zihZ37`+`hoF!F30(Seyr;kYCwnch|y)#=5t+zxtD^l|m94#UPu99jmrdGcS=qTijT z7$C;zY8ZKS$PbDTrw6+*Rf*ubRn4+~c{;SV)}wGDaA-fhoC`c|0Yx3NK^GtSQq4?c zPB(Vbgw}4pMk687D5k7u!@?Jtx#PtGk;|<&0kg$kSW)_A+RERL&j0uX|EsABL?uvz z+l0JE)a;6366xy7fD~xje-oa5R(8UERllRGe6>T>1y#ZGJP^qvl8I;NEUB6(wlhOp zXIUD{H5{M)eKBdgE+lwXccjMf$rEmKEJ2{cW4m3QkkS90W5iE!$lCGHRC&-OA;!Ar zv3w{O10iXLn+2S;T&4~u`Y$Wfyci0Kv5K5Dokp_RkI}l!C3Ot3_gAtR4k^4^ocwIZ zuvjnNePI0Tb0~pqX&G7*(~Q^%unPMPD(v-?VhdY@8VuQNTRJa?v$wUtQR_V29--_U zcBF%D>evW}WYYNHZ;R0AbMcM%Fk3`DFWvW`;%%&dlU#d8XARa7P50Hm<*9!aTK~NE z{{4Z;s{m~(-X9-P!|`xY*zT=fZqku+dw91{Cp?-$CDpr$TI*}qrpqUz#(yN6ytRgd z66FOwp4Og1a3kR#tqPTI1ad}=z#hYGt=zb~bJODk!uL2rL9x`9z)kqAi%*LXT*t4a=8EOnis&^eAO zxih_{;TU+B1npQn!x49)SiEC1F>8Tn%py-p%Ffg9A8n}hZ&w|U<8nehFS>CfXM%`> zORy-ve9eprUb$HBw?|`L)ZEC@YgN%^;vygXzu$-d{wx$TaqvKKzrOw%$EEy|PS-u^ zpxI@Z*mzT@>!1ayJYL}Bp?b(09KOBZaDp;gQXsj~NmTG!svk?e9>^F*`&tj10Ra)kIT zD(G?IjeL~+*iOeyT0^4qGhV;?s)mm3w{|IdP}ilr^y8wqD+5slCT6+@TH>+fcV+3n z9_gyEweY~-krfT%lN;K4MbhCG_@KgG{t@kK6D@={=S%Ym0nv2_KvN0{)Xw1jT>ld) zxY@g$^S@sX|N0kkQoLucCFgM?t?`%KTb~M`nCRIXK2!g&IV^hyr#^qZ)wo-?ky_=L zWpwL1X?PuDcKC#UNSTl+U*2?kk&yXxEq`*IUNLhK1^5}W5YeM=_CtyFetJ5cxn(cf z^wDz%{k7^GmB7wN$$kIpJxZ?MjR6REf$InlczK)lmyi?!Fwd?&$laj@Gie?(pbL_O zz76}6Cl{ysT$!yan(prAggZ_^uP3>{!B}XTXQgYR^kQgtx-;l)&-AykpM(e{>@7tB z8`STV8W9@BZWpt^GOH{|ePw57p7xzvXo{PUQc6REE%i4fI{XI3O8xvF&C0(%hW~ND ze8PEVsK?$X=&1_TecJQ+`?9!6CkCS_nyeuSZ=Xa~b)Nm5t0AvTZaigx0Ted^>$g#p zBsw}!S6#a(u3e*ucWTt@o!8xSc!Qp+FIv2@mAcp`JqzI!=mJy^sl}^}vq?fK6h&re zg=I|bhsvSuu8>C~gq$C61o`C4#v1(T8Lr8(R*%9#c(aM0Y{1l@5?Acq%gS4z^ZFs0 zSwD8=a~RB0oZ87Mp#SuWmZ@p-FAdQr?S-Y;M6Y9{?JL5SK~xSqZ=TIj1>Z9 zD~kPHmxW|2qsHgHcm@^x$4CEP#l-)7A^wz6wcraSBnbX`t&!&*!GJB+abg@j6bPKE z8KT~%*0=Az`O^E|6h{G-AlBfitNLRhXE#=ak|ziLC@WiY2j|+S zTZB3L*xHtXoc@%v4emZHF7uZRmTyxUy0FHNSGuHfs4LJk=O95&YWN@6($(8Pfnc1i z47-+_0i{B%W!}lL5lZk0aOxapOeUNwDz80NMfW8ifF*#Nt7x$BQ<|;wgutiD-XX zA+fTgoRH-!2F+5+hMVvvAE^u>pYy8M#d_t;+`37`o!(XUcHepJPG_CxY7a}hLgR7* zQrxf_@3&g7>}$I2McNw<1h?8FMt_AHLCCIeo$9L~4p~Up8b9&;Z_a$&O|EXE{y%OQ zITyGbg&iR>9f*FJkhpS8Sh}wolyf>e^y~eNaTBxc?`oRzoVIz#)o$P4bnL5D%^)>E zDLqwU$yHF0oTxxm!f!4KmzXXAC9xCX6ajD=8eSO&h-7G=u6kOV(v`UtrhuK_Dq4eD z3~KLpLyHJ^2LZ<`Y;(8&=ntvm5ISQ$MgT=lG(hC8DwwT~uKEI3zvctcA|`43enZ3@ z)v}@1!~L~H0+^;{``t($5FeGiH60YA#AUz}bRmT_ zZAbsx`S2gD%TriIy8`rGyu9!D50<6P+duYZ&(b~*ifm9T2p737LmQJ455fDCHY^Qg zU9C=#1$*`SU#&%l8`Oh?U;A$8=^2+%E5hfGFi6&l8TJIUtpc&gNOZFk%h%w_z@<(7 zQc9(bU-1nY3uoTnhth5H6D|3cj z8tDkQ%Mmkn-?s%=UtJ=`(8TOGZ9_}iw1(&8dW^~#1m3w;>iAlAoD(nBpH9FT^u?SJ4${<`-4Jk~?Jlzc(ZQb$*C{W!e&A z0wh1^@NbkA*~QJ**dh;&M)YIl-@gjFMHV+`-s%ywq%PIpOz0~Y7(YmNKlk(dMZ!PC zhM*A_fgVYyPQ{lci0rp^!iX=21Q*YUtgNzAs2+x;RbY_Zkl-hL8IKwWNnqXoG*CzX zD(UUnUX8uY*t4+f^ZxI5H&_0>!UD=bb`{S?Ez(`deOJ;${}!d#B2AFU`(i#i37f5N z@M0(pkyC3TXO&sMuSjC<4H z2TAuELPc2gRzw z&CMTXI|6y30YT=9`XdhcJJAk>zeL3U=Y2%L1V_lM7m^ZD-@~$~SsAhMSt?AJ$Gd95 zkC=^z6UmtBg2LPF9()!umTOMC`twuKJ&NmTqr8`s^W=3tKW8?23f zgg(3^ky(1TN@mSS@FZ>ZvFGfK9VKLgOW|v<9-*sSoR`f8WyDf5$Y0{J-qk_2d~=-; za^DW~B}?iAfMVk%jpmRY|N7zxcwoa9N*cKmdF@6r;)^6dp5Na*0lTZd7S@GYTL))h z7)wF2h)t#Kg}2UYEUtHF*Eb`m_@|Q_p5v^9CnEmBje1f^RNHmbcuR!^M75KD`Y{bi z(P15)Z7iV?vbf$~EoHROxGk=6Yc2Aw8!J-pp@0?O;E7UW6+pz{TM~Myq79UGMHh{$ zG2=J9XFwT9TZZ?e88JN|Yn4?W|C z4MAx%uK{J$rbH*}pgYm7RF>-3XyxEb#s!(?&Bf8sG$D~zrPmoRYed0?9YM|EU1hNA zc+&4ExN^sbI~&+5M*-Isw()MBYN`2VNGh+i7aOccp|e+^4!>qAHYz4=zEYp6Ir#C( zqdnXgFbb7%5GPA0nlszQR37c0KrRb`sbXmI*+?dR#%J>-xTKcG|8tdbfF|@}vhZ!z z{`?u-pxn&h4jp#T&3P1!Whk5^+c zjl$E;YHo@NDaB&gJa)~ebcfB42zD`slHPlbFt65K;HMOF;l(u~a^vnUgbPk?gE~X>lKM1aXTM<&EW4QLgsSUqk$WOzI)^{8yqxny(<1f%UBum=qQAM3v39_a zx;*@3pjav_ol-9T14*{MTJ3&==Li2w)38mx!O;Y}ny&TB6uv=L4Cars{J?%A?1$f8 zgi2Hzco8RiT1kx+vI!Y{EgH6L)JRtaQT~jX;N9+nJW1|O`ou-k%I)gC92u8|OJ(L4K{v;_eH= zo`)uqDPLL6Ulma|&&iOw8@H+y=VHjICAZ#vt<*xy+_z#3?+i9@9 zrCF@^VuaQos47mW8#R57J3(oh|mc(3_WhFM`e@qco@$&V?UomU{5qgVF zK{Hd?MQh+qyOX3!^auS5LVAFx@s2ZmSPEmh9CSdQAO7wzSU-unwmoC^#_&={nw*rU zY-sFfvg?3AIli#eX$d+RpLfVfHX9;1h7c9R*P3TR>99TValTxwtax=KS=<%{4QFO7S1fY{dvUZzUQ>w-Ok#hCWI2l-(o>@CtD*X&ia^I3OEH zvok7>us1a7{OdgW0&y8!8&zNr^-pv^>d@kx68FE4sENR$v z9Tc%~T5FF0TXDaLLzovs`5&UWS!R5wzeHT%m!l+E$`WyyYgEBrsMiN_!klgEqQ0NQ zB$K2&?**QmJ0IAVAF#$(*32--K=}TtL@g$kQSZ8im*7dvYClzGYQratX#>rYLbhan z3@bGt>Uj@#2Gkm60O711{gA54x>dK#(3NjEi{Hq#&G` z5Zmv}3fLZ>Ln-b5s}vEx#>+Rn zT-e?vmwqix-IFkPv%AYdVvAq z1E&4;D*ja_raP2%%C0Skkt)ct*WmQ1>v*L@Tfg=ofhngY(Pp@HyNR;t^s^NI+4Tg* z0e*17f>#%P?$1Gd_(EmR(&Qu??~^yp>mjnA)qy80llBmlg{_cK)hvHMbfuJp=j}e% zC&u}{lB``OYMkrFq?G*Xc zOy&js-pbK8=$B&N$S=#>YTA=-OX!;Zaf1Z!PE*tad;+xmSycZY&VU6cI zN!F^akJLI{wbpjYbj2LGJxdaMbj?un1SNB)g`B*~hmq|QaN!E&uDws^*ARV^@Niw| z`$=-Tf-be++~UqUmnJn=9F>6G9@qZ-`eqDU^wZoTkyUYg;bD_cKa_0FBO{GTOW4bo zQ4$gr1Yh86_H^;Z3M8tSFedO~x02Q3*GqS-Rc>u(3NiyS1{}^H+U1|SKl9SLel^Q@ z`uW_8kd6OT598v%2$TxK-yHjiU#wf%sfr!$XGYk3e+{72Y^O?S2HqgGdn<_`>iIj% zEWlIJX!LZNrumo6@sk$9`R+8WU?%b~*wysgrerB5?fz)`FKZB}UgQ+ub|ER`DZl{8 z_gh|tn8R&T_w5$qWc?Ysb8_!mRx-EY=~5Wm^)@rHgfGDkjOiZtAphpfJG&5%l_RvT zOki_HwJWAa;c|_*Zb&@+n|ZSMG79A9$-Kwf9n!^|Ib#Z|f0xp-UJpHI&=d}+WaZI` zw+A<*KJd2^m3@6~j$7$&sR|rH-sjW4K;Wh)91j~sO)wH;sIgr(cZgt7OJfLR{$Yl_@VA^Mst!HF_mX>j~d4mMO$ap7;6*gaQg>ND7SWaRC7py<4Gi21E zv-Xvv?GeW}I;%o%1@WEHPKS3za9ndm{ULbt;UMGbG}H8J*+imt6;@;I1H@Li00$Jn z`MQ3-SdD<7`&=lc@j|NL`f^#S|Idm7Dwj3jJuW}Jwr#6$q^0hAfe*fF z#sg6`N~yJM3?}3S@H?`}+@*gNp`gA8P0?dD2+Va~NWZS>{kMi56o5MR^ZUW&?GC#O zg$M?ZARNxosx?MwB*9Zs!BbaRJ=Yw1_dVeHGvX^f+gYh(H${U2a@$aG&5SZ?-14+! zcrY1X^~gw$v&8Stk2u0!twe4el>=bw4+ggO;E&*vlmBlR*hfTUD&3;1x7DFZ(+!(+ zx#P%3p0NT;LI#**9^0gpMF{p$@xCZ^s`KKWtJ+x9PRXs_T1JLa66v;Z5VaNPKE*ge zQ9Tj`gOp+DBvYadDSh{3fRM^RNdim|JtW81aQC^A1GVK$vIQSL=p!pj&VkBH#~N>rGVH zF~Rpx1gk8B*k6J>dfPe?IN6t(H1fp(Zsj}3n^Txv>2Alb1@8T(*_T>e2s7mL(F|tq9Yp=+l{3^0if<6cNHZ|*71j!Mus1`$4|K?+L+Wik@)>k+#y zii$?z66IzQo*w*=Pi|dgOriAYV+&!5TPL{k7F~*D3QJ%;n-m`Rew<8*ALHvZk-qOD z7xd+KHSCHV%-o-+rK-iLM=(cA4!jr;+B4#FGPjA5_kf!;E+yVjPR@3BR`TG@nt9s^ zHfMVCjawk*`E z|K=6G4Gl4i4z-~kh~x#2uTa+~vj4bE0#hFiY^YdUzXKc^k3RS!#{A6z{Oe5{|3+s{-tzigTNAl$nbGdHMPMnV6vRFmvt0W#R3Z{l!kwsHiyBS-Tx@W$7O>DE zG1sk1k^06{sN##q?fBoKDqzNbM1Y^}9xPYW!r@^zwbXmcrCvOZ+ zit!~f6Dy%IPGYX%?LM&ON+G*k2%l{yD=-%Nt-kx?yuuF(XtZcZ9IfmR%Nl@xCLN6H zk^rMK{T-EwzrVP3pF`Nu38fMBd8z^2}O!Nd4;+0b?BxAj)j1((&uzO7yvb<}-UJ(Z!bm@2fGxrYfuvNXkji4YfXEBM#fuUWax+askqHakva_()a$ZgSX=s+ zw z=aig9SZuMwA%Ku?RPPPA+Brx6;Zpj$;0vdRoi>qKH3mHuXR);wOVsi|_VKLcsmI2; znYA1lbsBja{}7I9rziDAU0=kG%ycH|!if>HyAM0GggTH0DnroO%D{$z;AR&U_f|$g zh4r5=ic79Z2I_0sCpwd+b)KA5+^BQ&wfQ&*>HdNl$H%J?NLY$ZCsB>`A zWEyBH*24ch;l!z7!5yr2f2wY>VZAtW%MAPlRcN3IEzL-K^)<55<=IHROoI{sfGqG5 z1JkA*kDoWv-h2wHL3{-moj$q&SnaXGhO4o$A_On;K9Y%4`3X_+4Lh%&eP=AQPfJV?J`x1;roeYG$d}VRr~U;_VB>9t?>X_AUb(JXVZ!)MAS&av z-%W+}`7Xxx?4+_|7bd-8L@rXTYART}hnF$HYKlcnmkM zV40$Qhe(29XcxH{Wp}Q*tN=RnJV@V(=i=q<=^AZMh9b2R>Xhwuyxcclp`tgIZ{9GC z?j$_djx>YK^{|^PN#hBVi6W9;>6Zp^TN%f9A`1_OJsX#^pucs>;*R+oUw8iPIkq(s zn}Hk-C2|;$s-m8=6d=f+IQ(r$e$;LLS`HHLL(NBM1PVvAmu~9gnw%mv1*&-XGb8Qi zZO3b!9%6sm8zDSFz~}u+ThRjUV&4BQ`rufRSTzu}#twZK0lfsx=(S{3gR^ABD ztp0_~9)9Duq30-yc-3I1eh<;i7ynCgp<;lA&1-fATThiUmr6F7oFr`yM0YvNop_mZ!-j^F0*-Hx4Nl zMezZ5pSOItL~eR(7e(9F(C(JytL<4<&~_?IUO`g!u^K%~ad&)LboOIYAD1J4Nl%XxEv<%klJ&HH4z7SRW(mp;ItL} z3P2Y2e|RUL<{v&(dQh0CY3N>HVqknQoR^W2_>_qP2gio?48^Q972ZS-me7q5J~9fn z17=+_`q{HcR9)%!78v<*M7T225{R7l^=up*ePaPCbuT6~pf^nJDjtIk&`D$MChsGy zadfvqp+o`LG6_Z`Na1KqT>>g5g#Ug-;g&QOkrbRFkuWecQynA6a zliT()w_et1RBvSF+oxd~j7UqQ+ecmR@RDj0uCF%sdiHD5t`FPvY?j+;&Zf*?j8S-u zsZJW{=d(4I)eG(vZEugM7Zx_1f0j^Hm1}UnE>|w(aT&IPSW&;@Df4zq-kx!b(rcK2 z+7JCyfAPZ0Ic~;Y$nW9PekZH`xvXJ>TVXDipd5CAf`^~t{q*-Be3daVbbQ9+6X>qB z#G7{&UX?N~aLs+{%0HF;f{RorZaJ9(FY0%yKU0L3#tIP(Y(3mS#jas5esOe}!ZsG` z5#}4Z$0>kr!w8y0tdGlUJC(WMy~BD2=#C>6CsNMXNBByj0o4+3IX)APjFy8CwAmIx zo~RE|*MJC_ubd`0ZCc^oW*iiR;HKN?S$+-h6yw|Dg?muo`?XWuRIYmAPh9`~k?vGi z!vBfrci5;DR1;P%krgiS0vhljqEC^z^BW5-?E~TF8vqgE$ycdP766fh`ez^l;3q(0 zC9Evf6%LZ|>`oU9fA+g%xO!2-yzAn|{KJy0Iyy@G8xN zcR!Wz_<4u1LdpAXLh%=<5&@J=<%RV~dxYMS@kOd7ejxD$US~ke^BT$EBz!E$j^%tu zWz2mq>!#G(zND-H;S)s<$4qkvBW0Q|__BwMOGe+>b(?+b=3^fNO4?sle8qs7oxSS@ zYM1@R(WU~H!0IQ}L9f($O?SOh>;a)jPN%g$UkMjpM~8wJH?bZB2Vqq%uWYdYNQean zlmc$`%kdJ!Su*k~a>W61ol;PIwl_+;az9M5VH|G*7yY{U$I48l z8V(+bKOO$G7UdB#vJW4;V)Tb5gQjPgt!4omf7d6}s~e4n%Y!aRZpcAv5BF&!AH}(qt&gJd% zXncS5j1L>Uhsro$^g8T#f!-8NY&FQA$d4r*%&Hn@2UP{#q6A>Jg9N$&s>}4@(01jj zK)aOEPi{dW&2u^8oV%ur(nrIJGSFsDXROLv{pP9^M_>ux0UCM{iofXv_)5P$6#;sWF3uPnAlztv2iuHWSSO znIvWCVcAO>>N8~;HY^L-TjnuVj1o8jsWJqe@Nb(w1f0?iVP4A>O%BPLrF{@!U0^T5 z9--gFpK~EA>Nsp`988{FOn2L#n>J!J9n};((5|0JmTva6KQuBYcbvGd^{6c+iOnSL z^WZ>QtaF~YAEIdej#l%s^V$ty_*HuF25$fzcC6NEWmgpaj&YxHUEi~MC~R3*7V5Sd zC!4#c+jVjR=1sGAwKok2+e<}m<>mX}-mSSA`SM})HO#`hQ%+C+xmH-uqkgmR9`8ix zB?FRI`CxsymnROBMwjkG2>T+2Y94Vs?$8g_X2mfVa4W(y*zC_XN6TKerp|{C2ZA4Q z5=?Sm#}~g$g8DST(P3F28++QH*gE!tYFYHCo!6`{ULQ_fvZUf5TT@WHwqbnecag5; z`N8#$f)xw1YLmBSp2JzFg;e+cMt?>SU!dklF@}pR&!;jZ`{ue3+XUa-U?fXMoVe0r z)EA_3rAQ@>&V|{7yn0Urs~CzK(5jp&_Lc{Ph=)8r!(EpIeLGrAZNp}tfV;+kcc`A% zRL^jI2a`e+6^Vv2`dR;=L7AOd>&rCRdbjH$GYD1r?w}%L6Gp*Qb0_v-8`rdreRB#4 zdb69xVO;FNI{nE;+@~pzcM}@h={~34OwX@YL|l(u?$OcP z05julq8b$~i)w1L;e*(n#ZL+k4C5GP^oo|PQHMF5@{eW3CdFd@!Y%0z3&QIjXmHTg zAD=k?cmvKI3s-`_9}CCG?}bI9bpV1xBjn{foJV6A8PC!~FtDB%fsiSKBG^oqzF8eiXF;2d^Q z#^(sU=QD<@)2W*NA=$K?>U(?Cr4>sl!U9);XcJ!}%HNe57pO=Ct1GE(xj$EDTZa&W zokkqwR@8F0=V0rf&luFt0(Vk=)%mlFF~hetK|XT<&2snyw%r~7JRVLz4a6qYcR8I! zrwk&v0)|WX#i7K8BqjQ+j3!E7xPS4)`>k`JN8TP8Z-FM%v+* zFC=z#3D*QX+!F<*PS-9}I4VQf(C_$Qv3pPCmjlJyCV*^Vi%Y`iXsAln-m)CY%mSA| z{~A*W8_s8DP!2rdq{b1?e&3OES%jZaWlJikq3GD9`5yP2*M~K*tC3vqWl4oG__utv z)&J~;O9CYo@vf3FnI5_r7=GHx4p*R;q%VzIq>$E_8c>8i|GxPKtecc`s5mWO-i_u; z?|{kzOQh5(aYv-n#T#H8knVYd-0sV}DBtC1rHFF}2?1q3U(_eJ+uu5kam$ zL&@U-bZ=!c$FK9oq;4@F(V&7pfK8H3eF@1z7lAQ3(AR1}e z+GOO1-EzZnw8Gx3!@zd=vjQZJ=KZR8VXcE|eI6m>vFjw)m+;e>L85+<#oES7cL`rG z-J;Zx`;cuW6QC}68Ep>|$adBKcQ1t2OI+4(hxw=+1Cwi{>n^TRbASBkoAG*kRac_x zLhBbb81q(uY`<;R;o(TM4tn*j^z31MC_A)q!EWr?T!XHG!ApKD0WpvH)RkoJnhvcj zXiW(&t=bKUQid`OU?;L+#!5Ky!R>kpFt)c^;&MNEPEYDMnSuprPO-0levFWkw8#RJ z*?9FqBy0%-rY$q`)kY^h;-Vh_R7#g&i{o-Ehkkub+bmmB9U+b`2Kk@XW-t4-jr}xv z66D<+TO;;{c`2o^H4z~~F6(_$7dY3mfYVX@EYo>Y4|x&MGm$%A=%Viio4P_9#{Hzz zW#C9z>qDkN9@c(<83~qQ8Zwpdr~arU4CYmO*szErR@Vmn=5tZ?D!3mrzGC7GV?%CQ(b+@bun+GlKqduol06iq*A*@!Kb=yU^@~+O4KNNdV@kSaXz>I45JDp zHpvEPIN^knL4gf&bTTwBJ-UM7c_ny^-c|XEW!RmtfF_Xmj=$4ROhkSX_LHL73Awma zFfw*EjPjR1Xm~LX6dlizv^NmnX>VVc6HrYpG1Pmt&$n=Crer-)NaBsVKI^ZNn~>SFR7r z!hm}2C>G6IBA}}Wd?(sZ(_J!PF6pb^vdM399K*FI1SttiAiB^6My6fM+%d)q5P7{o z(JP90%VWZ$8u|hTl4DM{bZ=_K`8gWtW$Z+7&bzTGD==UX{;taP;$vd(SMB#dBzcRF znyL?)Q;ImvEAYF0hGnjKMNbq9I62a_lx)5SnUHQSr1`F)9YC2&lQy`Lh?I(aCt)BW zK6^RHp9zTUs}fjx?l_Yh5`j@cJm8e7iz1N)6-FBw{|(P4CDO~?O0^1g2fC36&+}MY z>rtTtTiQm&vO3QO_ml`%1|OK%hwDkIMFDPyo&-jwhHa-m&pVb4h|Q-~dyaxKrpK2z ze|c@?j2&QuUW9WI?H8c?snv zuo<~}zdlOWrd8R;MBi=XI5*& z$S@|Y8Ft$lOGWierPl0&zDFsJR3TsfE&09&vs4WKGD@{hZ8SEsRCMH}7@rbohLO2lh~hc&~MG|nH82IUjl5vY+oWTU_a zjy-1n(*Fcp#0Z+hU)}(Y4<`~Yf&MfWxxnw{+y?MTCF1Guuw7!oAmrQJ0Gr8)R7hi~m#N!OO3%BmQ{O2RwGo=4g7i}i_WQ2rM;I%K>`JG@BD`*6T%qSaA{p?U zV)VJ*K65cM%F!3?IK|;dVm3sp+ZSSyQ^JI>p1ORerd*wO(p2xysO)CY;;V!o2}5rO z#W?Ld32VPIE*95=2vnN4p-#t(!Pr^|2-SwOvIF6G{-6VmOe!!RXH>r0QjQWJTesJd zrK9)60^=PvpT_oUA=KYGvb$5~cj4clKv&zl_%PM?{Y{pY%h!42p*L}IUMBbAWDeaO zQ!O+(gXhw$dR6(V9SN-ZTKpc$DAI+%g0!BR@bDd*2I70=l?v}`yhOK2PVC3C&IRK0 z7AsH*U+%7Mm$hN3BP3vd9gyZF!@Cfly!AxAy)9 ztbLAaHhBxYo%lH=C)lu4Lb_V5@1LuNhXO0zS?YZ@^U7tHwJIh;Y=by5g24B3>H3Dp zPS1UuPmNY7J9QLz^KxvqH5Oj5USEwW7pA+|aS`&8%7r1>*Jeq_scYFy+}#it0Drq* z6K^m}bFJf3s0zQP{Un>%>|WL2Eg7cg3*cU)w-8d_-m7s5xaalrem-4LJ>=o*z^pzv zTg|FEnn(OI%{@}gL8V_ruRDzDenD)nG8Q}a2tX( z^IEh+LZ!j`1Vl)sOCvd!Hd7pT9yn|@YrSVKglr%AcG&}lFYMYoR+P*J@A%dI+dd!` zXtDbFKTr08C%157yA5`V-KN?^&V5;%iM{PU*+U%tULsVyn#e#}$lC3}~(NtWd!)VOey<)-Wb6*gY6`3p-!B-V<6fluRZ#zZ!APN_bI1$kfK~LNs zsqEcC*q;=5D%Cut9hgv!hHA>Mc(m zRhXnsAJ~$T-!D72ev^Ih!1LP$^mnRfqIOd#XBgf)L>mU%QTrgzUUm9>Ym`w{Ka06` z6Z0hIJ*;3i34-?6f=|#1$jf`CptAS2O@%xX9+}}6yCUdH+j4(q>Oij1Py+NIDMHH`zXT}I?_YWO$v_MLdg(=%cmHY*j zyFGlTOP}?L3`DhPH1D z2zC>&<2OCofwr{NY(}T5qzfk(W1rT4Ruz&!*j?5(bkn?w zgRhWcM>m8w1?gB1Cc&~D^H|ZeIQoU!rx)D&HzF5v_*G9B(jUuL!tp?=6tMD7Hqlud z+qvA~T`g1wVbM~3 zu=GtW@_2P|-|Eh(>|6j|dgesYuo!cqL2G3Rm6yf#x9!W7yS-cg3v3pBfu|hJ z+ZY7xpUiOIB_TSJlvgE?u}9E%fSLJMnLe-T8%;3|lIUv3`5i_W-rr4G8jQ8#A9}Zd zWv}8rEY=pz%jm3TwkD8xBi~{(HMs)gxwPJ#pC4|A+!`@VuSzi&$5ywB$15hQ?Kp}R zO!9{zH|zKZOfFoR3wR#c%m*CQK>drN4vwRf;LSotCs4(01DhNj$lLh#2_bClWtL=3 zsn<7md1R2NYgmo7J6NmVF`Hm@8J1!FdA zx#F=k68dovaamw7y1ikqT7NNiq}}oDI9TT|jvA$`)uG}v3z5OlAiPGXt?m%Q7Ei@b zyg~j{&4sY$e1;0|8Gr0}{D@6;>whshweQFWqU{$HVI%=C)xj{}?o11ev!}fIy>Ozu zy#%)wfeG8FTyqamR30h}1{G_pP&vn$^mG3gDNsdm-<( zsWUZ@i*~hZ_#wJ|%&?bnGw>Ia63|5cvrjOMX+j=vb|wlmnRkHJRTDlwo}OVkijf`# zO5n-XMtvLXRsi;-A|c0|H&<5y342vucv7_@0SqrEj%P#fF&FCqfTm)wP^0VSpru7P z)|JA9+<9I962zDY&5GJSbLw)5acbd@gK0F-Kf<~IruxTTIP~pX&_ka$wYvmNTP9w? zLEcBh?+cpEyDbJGSM0fNhj}b5g;7<9|I9Eryj4plr}iik0zv8x$#7(f@6pzOjtC+L z25iEZ(?DDY_yZeZ@$P|?%>qM{bMiPCsy1Dtvv;FT? zK|{k1pE$X!{Fu2ZJ6Ru965f+V0XS+>7wwJvr6=vePu>MIZuq;%&waOnL4*~Ltu>B?Hk2T-{j-WMB*cr_!a+T+w++uZTtctCq8O7)2yMDBXoEeU51SXTgz;lQ9~ zzWN3Id?2jw6kj-HD!%~iVyF|PI80aO;yrRWlF3x2&{tCw#CSRqsrjqgSW}o2{4ZO| zE97lDd|;=cWp!=RG(Q1@gM%Ll)>Rtuu(hm*(iOqlDvViE z1SIo3);TceRJu3dk9tLP$FEV?73D*-i!OKcYQ`naRVyvEj3Nr5$h@w7}&=+5YUK?{LOW zpa)C~f~B~OSoNflPx+Zd1_F#puFU`8=`7f)>e^_%HknS`zC@JxB^ z`a7KpKkMN)Fv!(kgIboa{KF}IlBjBaa^kECq?5zADabG2SqAAG`VBE;!(NqF&uI#k zQwtOgeQl0bJKjC|EgK-dqGADI?Fi|knU62yID`K!z1< zJW|dBZug-k(5sARuoh?zHv5&t+VOkA3pby|UWjRDQ)RgY_SdD*jn;GxRY(v0%{P++ z>=0`=&kKh!k7`8w^vU+SD8Y`Di*Oeuaf>d%br)HmTnN0hyU(htE3t4QQ*6^k zdMUn*sjgQWw{qG1LQ<1C#nFutIU`yQ+OBQM%yxh8fns{hJg%kr$$p&s*|A^3qsV5~ zvZWZ=X4id`sea2`ftgo^v(9Vifld!i4OWGjo#%D6pq8--uuvXtd+*?X+|d3>a_OG` z{m8nlst3i@lchyGgrjxoF-eOQg%{G|kZVP>rw0w;^Z(v-drDwk?P40(W-Q{N$Z}hL zWA}Pgt5W(kjnB2sLmzIgDs+P7kLmW$d3{!X>~>k{J7vxk*M1gyttH7mqb$6Q7wfk8 zr#B=u%aiX<l9;z_#q#jy-0Y=}=u`j+M zj%6)vKI|Z7KPvL6H@f1c_MOo08mk$pX^dVPa@#KZbg^T7*e<>l>`B8#Tabj`=9j-0 zYP|}}`8X?|gjbYksAN7x8YQJcNBA^i@q!!>ggIT$!sqborI0EGr|0SKpQ#&I%xTC1 z487O7KdQRWVQ^X45xo(XGfal9@=Gu&!j^IDL9%czMO??tCPi$^{j;dWKy*Cf6Udk$ z61v$h3%w8rUtgXBg&||e2@OrD!3BFQiyU+FH!dwr%Srhi5!!_D>z~PAZuQ#~>2kA-Ik3EenR^tySO}6F;s13T`d9+j zod7Mz4n-qXm^%%=c!=>4Q*#>>NI~v@I25yJBEwCt1vIIwqp7^L?`G#Lvm(P;-0rjB4^%zOK>LbgLB2WV6hlk{+~Jc? zdg?wolU@DnmwV#A5%Vl@{L1KSc!AOB&u3^p3=~m~cU?4zqmUu= zx2rUIjQNqnccPq;WiZxD@AURa*MC~-HTF7%3wYwdvRe)ni8N=HUq#Yv5I53COjwKf%{+Te@-p@2&I$0I&Wp znOR63l}HLY%)GyOkF$YF8u`BI4%<*Ups%ccnXPW$AL9{b>?;py_dJK))TiYNkYP9$ zWokptz<;Fdx-{^hePP*dCX0g%I$SE2&bAARE>U#L^-#&t|0}E^@?bQ7aY7Q&a00&7 z%9(3U9XYI>_LQLG3H^{6+!M$v+|BUEJ6&?C|DjtS-1cU$=UMva!_CySZ>!%eR~8$K zLL}ULI_y7u|3F7ViA0AkzHLb^JB=y}WR||YRb+Cu9q5z5Q%YxRXC8U_ZIFwqKUKrG zotxGt&yCqO@b(mrq@%@=w^=J=}4mh;=lQ&j>p=lNhi>zsuaQLpQU zBqOdWVTl0Fe{!G-9I(BNjEuZ&Hp)p{Tt?Sx3zUWDtb#f_qh6wtej2WGweg4ozN@y( zo12qyX)4!N+}{Ux*uBi@5ByXDYGZ_rwp!ls`%a7(D4UznR?OaAtVG{Sa4r1MBshk26K3=Vqe}%M5dM3gpin2?L=|^c}(5oQ@#&b`ImA1hfBeO7$({HZt@Nl zGqQvodgY6<*7lu`dYZFRgACHs_erbNM?dPUEuO$XlvC{4M$7h%u1tjf>q9D8K28eCH=H=puBBuKp^exsx4c2U$+kw=-%!{#iUpU2 zJ|&QXxRHktGK6%Af(EsQwQ3knLCw%w-v;T`efaUbWo67y67HVcAZY}4bHU2^2N#{9 zZQBK@Eo>H}Pu&iN3VM5xfa{p1yhe_RZWV5GTwGE<&J>!2QHXco*Uqava0>9$B*M+5 z2F=0|U@;X|lP$YB)BYm|wq~<3+mBkIRU46OfQjJi^h}DS17VNoW zRw$25Noe@K^E`g#gQD_O*-jUO-A0S764rM97R$r%GVpO9Z}H`=k;AKvhwov}r+Klu zqW+9ZQ9;kOeCzn9&RzG{-L|qri(T(yi@b~n`fc%UPBk9R6_`Px{L?MR3f``@d(%PY z{?A4%xi9>-(hElW&b>bDBt@dHDr9(JK+e~Pe&77=&UY*54r6-#>=s5On`b*01yjw( zvN*|nWg^jU1S&0}eEAhZ@jVemMt7HvKD{)47}+v!_(2>q{psMs4KL z)8W1SinJ3-K_O!8Cx-23_3iz)>qd9gYAsV3wEDbD-0j~>FtCeddICzx923g=L}Jxt z`S{rR6!L>YvFL<{tesK5RQ*(;duT%X&>?-|*zcV?hZMe-^bmO1`v|}OOQXggiNH{A zUazMZ60z$m2z_tM0lee#J}A?1m>Z#6Jqc9e*$>}&3d7#~*Gk3!i1;S|K`&X=fbqxY z)zC}Up9j!8OZ5gL!^a_1uaIhX_E}CSxojxJ=>MfEe#T_T&azVJ+o+&FqH<$>P2tb+ zpem%=ffk_Z&OOYY@cVy1puRM0`u0R8`(WbO^c@<9)W|99VWbwA8{gsmFDc3WAtO#QQ#tl8~4D$%_)_L-#0egmIQ*i>Rx+Qq4A~SX}WMy|Azyk z>G0Fb?+;4CmWHsf&s4LFUm;M1h64#C!-hHbYOS#uq_gPB?CTVOkw(jXGzYs;S$n-a zMOme^?LXwJJwa5E6gMJL)ERJj2h;eTmr8s(c`Cv-}9>{a!uce1l1gy6xCy0}|U)oolH(sf_gI<%1IOIm>Pm*ual zTd0ysSTCSWVuvf!cgLE)F+~2PUN~4fo4B821(o-a3Q+;GLIu1Y@=WG z`OUU{l>yN4mM2}_@zM*{)bj%L=Ze+=_h`hXb@UgTO)MT>cwqJ4S{cwNGnyeWXNG!U z-nHZpd^ehhEhnFze!Zb^QatTYLv{KLg!gpi|3qPpaqlZ0#~{o01mCS$blsxxu85zG zz0k<{A%nkB_@q>}q4vq95NeIq(}M<6g|=rXm5)9zOT9D1i+wfSeF-*_Q^1Hd=czS4 zpIH{k9E0pQr(7V}^LkrvYv3~d=6%&4NznT!v`F*!R|I$UE=y@Mc=YLYpMX7uh{AY_ zVxS#ivY-8P!}tbc8;WVF-vliIZ3rPvk3jac2`0%`RDNthKZBsvkD!zc+v6qCP;{Qd zixeGM*4{i`uo&;-#-nypa(}4ulTCT$>P%4bS17&i&R$1KC%rNe0!r^ zRy~=ZQ}8RKx^bkyHX+x)^}JZd)#TE{u;y#z-p!RmyW_`gT$+K&)98RGo7tqxiso8)G*QZ(yPrmAwq4_UHpukw=wNKLHNcnNnO%ikT| z$J1vJoI7rf_)V7BE}_&;s2rd4SRhgFK0k5dr2-e4Oq9w=cAhL5LG-5{^4l&w#(5jK z%$OKi@yUPfDc(ZGj6Gs#UAR>jEYKh`y`gQ1yrK#B%6LxgFbqqt-gYk!+eIl zSjR*oN84|X6&3WM^RPxiMsX$RfCQZsKKOSq%NfXw9yBgjA04|0#;c%JGzY!O6sv=0vW%+jx zXPD3sIEdF3#xKwsea_HzM3@Vhs3vtE$EZLk1oXtKK@^m43{d+g!kxf&jO88DNoDZ< z#|rVT{iR3UBSNa0`AQSGUTec43$H=(;};>79wZUv$8gpgK)-oE;Yf!jl*3L)XZvo< zIrzqRHdmH_vw?850QD`H6W-HbT(YE7jJ~A9tva#5gVK`uG)_0krH2CFJ%B92dEd5h z8GHa>2Cs14j(?x5*|i5%zpIei?@C>AUfo5D9pY%vA)CLCf3OStHBRJ|-N{e#UTNbv ziYi2smWne66A|@2c7Tb|RZeIT>-%Eh!iQi7pRufGDm7#G(34l`6Cqq25yW!FGjv)E z=iWk;J&@Gr9^3WpdU+(boweio)q~O*ndnEHOe;*@qRAo-ImtbTD4RszSFHkV!}k}L z?SotrP<@?_<4yyI`JvHnqT`$FuxxS02*4X=o#dTCwYpDtooCITpVcQL_1-s29N~p_ z|9%aqbNDsmM;}!)jaEs89AOE#(@7zb>F1uAkaN;M#(wNe9y@;nTurRU`kx&i-Xib? zqN1E<|8`+{tiGO2qn53FM(AWv5lQnjamjl%>itL*dL(*r20Fgj*A$z@&u{UOz4Nsl zk>AxHY4Lhbm-1a%aM!m02d^1U5tHoKcRFG&#VU2l^jC&j8btTc=)mUMW@p(;qg@zH zfto*-_Q+@80em&8=v~~REIbfnaP*E;XOiw(!Q1qkD(R4Uu7Bw?axFUPE8yi`bn|dZ z9Lo)t+RCaY=v7KfCGkVcc>*@a{T6tUlv)Z5fyd1}Zsl#GUx(^AjSOYZKXZPZ<9p=1 zS+)So!cTJA=&Og#@4dnNT2f@-Hhkyl>0&*?E(=c=>$mzKO>`qvs;N@Oud{P?)_y@0 zh%xip@7C(?g#u649G>xC(fuRe+dAUUsf+buW#Cy#d3k(#eS5MnbTezZ@Xkcgb;V%C z)iLO|W=W?Xcw4_wl?bQ^xXG-zkEtJn1en$?$sgTV)cd#Lxx@H!R59)V3At)_41^N|PoWS6r z=6AiY(vpOdso3+PlF<>`Lq5Fcrq`Vn`8>N%NRFHTpuH^+m$)Kgaf37`0VV&(R%_VN5R&+CAyjDhOYD8PgHOn z?2fk(+?o>yc=OmLT=fJ0u%Wzk8^b_!9L5ffEWXGpBqS<)=q#JYV-j#(b3>t&`b>9W zkc0`m@kdS0Y2iq*mlo$ z@+(m)I}+lg;9;9G{))%g}YsISx^|;&CD7c{lWaQ8?|z#hs9$P=Jaoq z_t8>yVS5aonfmQ*zP`+7g|j!(n-qE}!UX5a^tI=tFJk-i-(^^e!W;0VV}XBo!D2mD zH>dLf{up-TBg2kfVj3jsA4a%nWQ2~PhdJHF`Lg4L3%-mJVoSur1n~s)tq$uVDJUd5 zB!}dwO$(m31NE{JVbPGA5P4+$ZauOJa&(~MaP#o(<@xXk)b#oOaNKD!Lx_Vb9I+*a zt`9mc1#**giYM#>c(JO3|DrlVhLdkl>MqTaX3fPbsFVGGS8<-tcx(%yvDEA~Q{cp< zfLH*~a}l*!zXQX7ejI6Xsh`rAa&YvmI?%DqNZ@SXX_!|HH&4ly+xC@({e|LanIVf^ zT&xCJYF$)IL-(I(T#7=t2RU2AsY#Ay;FREi=RVCjRbdds&*s1~i^CfwnD2+hEsDi{ z+MO!tH5{+%sKUA<5A(DJDF9YA9;U$TX;mcV5^!jHtq~#89FldH%pGpU%b8)T-7Q31 zlRsbilUR#Wi8-^lv9fpOwefz~5P_t|cbp?GbMq<~5K+49awlh5`vdvz4#EP0*0V7- z<+N~aAs%}}k(f8?w-u+}J1sXB=e*J3hVz_Ny`~4~RG_#Mu$?+f+ZMJvK z7J;NV=;bK0(r7b?>tT>0AhngqI`jZWk)>znylw2pPL(QozNl37NBYc<$DPs!Z{>(R zc|HCqBYl)d@Hk4SqV(zZfS%oUlrD3+8SYf<0Wy)_QvEZERSZ5u#Odjq03oE!W9b*p zYZ;mZ27e!UJ@_hQW0p(zMI>wzNo$tP-lfk^86%Koy3+dh*K_*S&zZ`ss$kfS;ihEa zs>5`v3~o`_(F4G%si|;g8F>9~r;)Z2)WmR4HXzSqvr_b)(X1?1CK&nDw zZ(2mImrV_P;@Z$5;P;$n7&5ClgQjPI=SzF&r0rTEEN{xMOLJ>vs*m0A74DYNox1d_ zxQ~>e^H;xRb%WiR87XFB1o>W< z8OrX|z>AuUh0Le*yWwS2DM0zB?l+g;pUoI){D&GwYWy_)dN2MZpghz1d=CgD5@hX? z&~;)wm+1eBYRt!MGhg#F;6-Sy-HN1hinTICC$Ko`6gw&La(Vn4Flc(70VxR;E+gIEV>gTi-r*g5a&1tdd0Z*b zU3@@UsCB~4rnfb{4&AtE;Fs@AI7bJo9f1a{)w_g<-3F&8mfs@Ejk<5tQl2Opk5{Jr zvubuR_0RpC-6NBw6!?H-x_}=k+^%q~_mbiMmd8YtFN#B^BtpLS0Uo%*e$N3xM`#w| zy6d{b33*sw28SGvco?j@}fgA47?n9#X}A*O}sOa z;KIkI3O6R1axpq=H!7H}sF)9_W)i>=~$@e0W*bpw64tf{$KD>7CN)t^1%r}ku|FhMw>KX zK!oT&7<`Ev%mA}!!@P+A!rpj^g5u=hVj{nh%l<@TrVR{fbu3UNgpgBkHm+oWmcQ=B z=zCR@A|sj{OFg^0yRqpE<$<9AQCVv>lNg&9MC&rC#%l6=0dn0i`}3>2)JunpmaBC# z3F62EUOVv=0n1LlK$Nk^{)mPq3o&aTp>t@^A$~YK(>tj9?jZ}K+n7a#jmKSCy3H9s zUx!*|kq)4vydk%MI+q|0fq>*0&u$Rgk#ThUYvYU$LwJQP!zX%wwP__9JCW&6cWIr0 zeQL4&%K4&`s$(Xv=pVLh4(r*gi}riYO+3&V%wc?wQ{Df31|6%C(!ksXM9p=gFVCs4S!+#YJ_7;Kb(9N_J_1!aasF9;+w zvkAH>|Ad30x$zbe#y|-6WsG2R$C&G40KnX)KpN>MUODmB?BAfMV%_V&r3bgz&=Pda z#OWZ>{Bm3B2l}^Z);P$S^dE1W#g8%j>~R=FNXbi3fN6} z&mfa|7bZ=k`wubaqg5AYsgDb#v}o)X0?@APm_z)9S5_j6m)ah`P?&rY{reiNKEyfo zXz|auM~kh&L!Xu9=RR7MXF%*FB^)aw%LhA*jzP>rO_!^B=VgpIH`k9@U%w(S(Kp`T zOsI0A3EdU5*(}Hy;Slc0u;VMOo-Zhd$@S8K$7XtgfhL4gaS5W^}GcYZ}&d$A5ne1m}g1XekM_1U?UF1({AJ`X8( z1-44_dv#*2yzy8s9!GNhUGlx&uZh?g1!beQr`adEE}%@~#|vHI{^Eo!v>xGfNE4?0 zf+A5=o892;MJ-?nEc?9m#pNM=uf!D{GX}%0NkeEQ#)%*p+S_y8YyGUQ*tqmAO8OR_!pEJb9fripK9ruygH zXNsG}Gh8qIcHi4dc|I~)1Szz@yXCa$m3$40ssA{|_M_|hI4LT~DTsroaR(&s2!;>` zNk-!{aqg};JmCsXCk)T(o0PHScdM#T6dhF_6mY8F;~>4$&R&g){k}4Sgn79@>+OK> z)6yp$1>v-)dmYIbG~jYtY&BEPaa~|n@Zj0_=vej(Qr~T|bl-6U*QOJURMso6NEooc z+&h=vy4Zv-gvxA7snLF$o?1#+ugK>1xZFt6;On6#Pi=My@DlX7I*n)f{a(yvCm#H+ zkevIs)xk3`?=dEQ+G@&W_KmbNm_@R}hDo@`%s%{E^=nXy?APpk+|!+tyV?7;pm~2M zsrglO>es0jS$VO`c)lWE2%W*hOXvMae(de$M0Ik58m$G6$`TqEO_!A2-6)rNrRVxV z*V7Bf+bLUx8I=}1<*B&2($f|5a`OR#FBrN>L!+au2iZ&1xXhVCjMFB zf}R`Hm6i@foyjjM22g(7xH#+4rhoJ2*_AriYT-kvwy>~Yr?)@OX2^+6BE zXe8h5OGN6Y$^kB;8K&)9x!$MPNGgiOR`4Ry5GXZ*9IFquJ290Zq&aSb8P=twJ$F%F zJ=iND*ZVC7>nQ#D@&^qZ$!9S{u|Oj++Y4Jn3lrAK10vh{r~M~hBsz6dqFjGDK6)`g z+%3Wkj9)Tzn6(aBE;Xmq@~f|<+K)TFUIX*oS1?2csk0-p|Kia~E~1yyI1TY)mmjE# z{0w>>IpuqXqg90YnZ5Z2Q;RIl*eS(ppS$erJMOr1cJA}sG{GGa4-g%sif}Rkea+RQ-X_~FU6^rm!}8`-a7TzhOn}f2!KLQva?#f z2VMM{*%?bsT2HdPrgq|l?dtk`+3h$$jW%LBv>(!BV_8U}z|+h@ud}V~i(At^JDs?p zR@SMlXir=rH)vugkD{njzFni$BNa`?V_0g|$+ataMM}}VHNvT(qpwzb)hIo{_xV@> z4joBem)`iAzTgxU<0n>!`!ZcQtSMk||H`z4DYMH&6&E2ZcbRp&5NdCFeI-A<#v(e= zWUg53m=RmBGcvZb)@{!?oB*07fvnHZAE+CQj2%|#n&DXM@N;4wu{z6%9`wvkx>U=f z@`*^0TphKj1TS0}$%7qQt}fWi|J`}{IdEUldLKReD*j8R4WtkrAX?P&tG}c(76DkI zg-GWMkQl9&PIjJQRJk+qIZJ5TOtJc9_PMS`zS=#sC=YfN@*~l2aiA$WVd?%VYALzI z)bO;z?{@GUm6ut3-KtUvZ|bwmN7F67T&#j!*xS(Lc3ZMMm>yPCFH6W{$iv*Lwm)9)*Q=^)iKdgEI^11u71qEGMSF*K zKZ$srez+*|iM%=D>{n%Lj(&A}oK5>@;#=cCnU+u)4%wwS$1cn9de-1(L~4KZEdZR$ zhsfqG{Qs=c(OF0~Mx;((<4v_R8oFI9H%GNF5A-y!Z0w9XAC~*QFHnxd8*K6(7R-fJ z{QF3UJ+;q*?Yg^IjFfyr_qmW2p4nK)rV)TE|pYpX~r>Wp&s6y z#+lpom43X{7dr^==WiAqD~R&l0|ktR(eK>`+3H=JE>S*`JrMe{8Yrkan$a=M^tkMDZ z)|p062a{#zFJe-^!k(!A*Vz*c`oy#`G4!}*qym(Mv60Yina+{zaH?>{bDm^I(KRk1 zJyas`K*NF+y6xD=C#@Lri9~|jMYeN)KXrEVFdM5TnEe6-R|@RNwr4^Ln&%=xuAmvA znp;pZ`{*lI0|}etA8Y7$uRdm~ByiHp*fQjf3Re2VvZzi2w;#A`xr6wtO^8Z)8ps?g z$m4G7-*^)&)L4=%yc`ORi%mIde#cNo8FpgJq{eIM?YtFr`HNIX-z4z9gSdUcwz-iz zR7_^Y8MyL`fG22X`E{0}{#?_urGVeEGa&;l|9H~DE9lnY^01MkmC`r0Y#5Ie6j}wX$YNyiZc~mTxH;zO z5=8?!lSiHTyj(YTa!NuQ6R#;L_+HAa(zxmh0#~sokH>)(vIhA9F@yBENEllx#ttJLvTIE_(HQ2Mi%+7r=frIBDS4zx%k{*6YxqD%!dC@uL@L1d_4T zj&hDx?6*2XMd%5HlJCA_=2Wx`5U){6bAk^-=0`v&+F5Mm$CHtrRkEc>6BRyEOTjRv zrqDdP2Df%=re{zr92!!^$0dz}t_TGJ^}wyURd~hQNx?<(=IF)3 zSC8GY`U3qEoS0+}$aWYNNxFZG4H-du_UIK1cynp&N_Ga0KlR_uC*XED%rn3yTx@06 z;>iU#mrW>&md*xUA?|r}A$^1#H7rmVBX0xIphCGM1vS(!Ix7C}a{d(zd^O@XfW5;i z<%$o*Vh$incDKL>{RK|MI=W<`@AADhI_vuQQ*7b2-$|`|DFBC|5iNdxh9g#itW$P0lmVy~JHT94%#$JV5G18_IE~sCAK9f<7GH5kRUJ5p zl-tG*vD+FP3${+)Pcs$AvZQ|N+`-yFO#q1#4LlmL$>s0i9z%ddUd*JLp`?Vhf;yZd z{%BTpRN_M4>!b&y#`PZ^NX9q^icm1eErxA!&G@7ZJ5jkwwDvxc{iHc&6< zuROOK%knoso$k=J+G+v-jrW-+&&{Trwws&5ELz`s=K=ns*&hVhNdm88W2Z|6PysEw zQiWS5qqFW;+XCGum>tUX3K>7Q_jmn6^j~#Mwj1c}8K(dvfQ&%MEQ#W34 zXIunjUS70Ego(stZmA@{R-m)EwWv2{{GtmY;bs0yuI#VvcQ{)szyv*i3o>s_>1eA< zy^l(s31xlQCAL;b=FQa|JoW-Pxg7+&m}nVs2yZR39Y?jYziftyGw(OsUCnF*7dhJR zt(*w1;~kz2>X1@!7Ax+z-`~}k79`j-1pX<1^Ub34y0;QJACD4@06U0vqo$0J^H(yiK`ezj=W#uuq0cbe!3sB@mdfZoN8BQSn`Wb=Y>!Vb=wl}cZ zT|u*4d}Ad7?4eC)Qst?k*M6<;-X^&F+;B@ofRp2wA<86xe_v?67Tx(Tit~M%gG}%d zSp9y+^|aH(?COVMP#Z^Yv63f4meinzxDD^CacpuH#}t$tS@q*1ApW!oqVnLKNZeGa z;W$a&0JwXSa%#B;ZH5QrJR*5*-Mr{|wo?SWYkCHb{fw+<(zO3Vp6r!>v|Ts^RY z>&VJ)lGbxwrWiIw13yCr-iOg1SNLkUZ_giFL{#LFA)=OY+KBk6zzmr+4;s(khP6+M zjkse}v#%o}A~ecjB9RUwa>eLOO37KC7YFH{{#cf?mDv(H|4`wV6m(^`Rjz{=N9DQb zr7oh7UF@x4tcU0ssJJd|Zg3&cb#G05#Cy0p3tG>mw)5gpik|P7{@KULGJ%)l<;jrr z|9f+yD4__A3d3L^cbi*~@aWO_dGiFyQZbee; z&ry1U*I#~|{S4J%hAM7GhQ4UX7zT~oQAC#L#7TMy+Ha-vNmtn{pn!c|f$GR|^ZSp9 zT)@ZRbUvpSX0K?}sV1wHTWcV|6E3;6*idXrpRc-VcEZ1=VY?`=p;&=_q-@_4$w%+T zqLyrhMv52;O)<9W zE8!*07qC2btW9sQrByrGno+;r>$(U&s2NV(z3w7q=jc>#(#YGIZ7?&l|NhZsVXoKgT81b0 z&%QOuT9}sOI%|Sa^!NDa&sy)q3&Hjq@gZ39)~e?u?2w+50;Pknx9Uw|eEe+4dD%F} zOBQ{UM9w$WFXcXeF%JK{xs~asl7|8v7U1B<%b~q6qko7VH=fYUmX`f+CJ;qp$%D~n zZz}LYeES$+OCaUNZD`d6hhn1zg$^zx{E`S`{g-mH{m)KpI?PZ0;h*l7)B2ngcvtXy0k9PRZ z_5?(ni$^7|hcYgP)1M}}ZcA8v+s%oNM>c<`5;%x$EWNA!)g`0jXqgwxxf zv?rBC8im@JCI2xWToyIoCUY}0swHd1u{9U3KRIhpXvN3Oy$8^`3E45L{R?8M#7g8(bE>iO z8^XxDGsI5X`Hc5-+i=%*t~xl9B*2B9>cMMy7o5 zfUwEX_{79wAVi||cYgoBn^U=F^bsi|J8+$?RVp(arWzPGY@}u}KC$p&?08_tkby4d zR<4rTW@bvQJGAV@?KJwK-qtg2ac_=qS5;g&c~a{coYwnwF&dMBm(3I!lXmd6-F%@}9y|~P_l6Ua1zjN>TD5}f{QQi(}D=)3D-IlZ2 zysa%(GBH)0DCh?Kvic9bauq6lr@1oGo7z#cErA zhCDw;mK^p%82igUI37{Q?d{k5g`-y8{|$NbkIi&i`Y_&g4Hu!lM%l&1Eqx*rNv9*n z9ONp?u@SLZFc^kW|DUlLyo77k!^B%0*g$J3(IfO8JQVmq_`|=@cLSblaesF^$6 z$!CV?rHma9Q5atan-o>Qi18taxB8PUKc>~ zd=?wqA)wrSZ2^vzKOfi!b^0Q|I3?j+OvTy1tyB<3~PH%L9oS zI}osqe+#Vg2G!}$zVZHHdjtQi$Ws`%-n3i(BzN9xXMnvTZ^`kRrE)Zw$a0zd{m8!n zZ{jNgmlt{r2o=!W{)QW7G9S0=&@n!O)l-`%B=_Y{;Dmj(iTZtNNfQPO?W$L4XyqHM z)u%iI!XGE)G^u+Y_QRjIZOdvte@=sl;s;jhNrXgFRJpL?KnqW9F8 z8lenpDYdq-^SBx0LME9Wn_pP5ctZb-h&GCH5r@p>g&c}5Zd$F!mE3ad*#2{2(6tMU zRhtWz{Rm3jSkLAZ(bM#`>;=PPktX}AS3jp)=ZA`qx;0d%;KPhU1XsO*dvq0?FCKfZs)5#{kg>@Z& zmL($*((nVvaWm3FKZ);pJaM zPvo&hR!1hha1q3j(e|v8u6w#ac)A)toNkhy-_$MG=F+@~nlpoxx!=}=wf3L^??WPv zc53+Z)qfgog*iO{XoXnwf`QMXs|$TIuEQEo*b33u%3e#L@EFAoN%rA*I97v6)1l)b zUKc++Cg)@@M68i?tV~ZB%$&GKT>4Tkpk*|3n&ngCxN;4y-Zq@I^T(mozlIgO z!1FEN`fa}R+YNn>_{b}+o??b;dW2i!d(0iIYf1Z6)O88-fp|xoz{g4O^h_`a4B&j_ zR{c&54fR)LKQ8w0r3f~+Buuts^f;e;KL!4^eSM@pJ8Z+(!s5?V&S@!54i#{P_a#PT$sJ07rMiTTtD*JBu33ig8pF6#)1CrN z<##GiL8xgG=RrW!Gu>;qz1h#Lj~HH4!efLC)}&)RUqzWtH>nAdOVJ4TzA8V;SjXIK znXTDR0B{R7>#z81$rlL5G?IyE`?tI%{|4lQe1ApmDJo-`Pm40BFi%r?^QqSA!Qc{= zbfiJ;=f9L9KT##5O}58}DAvYe)7`jU+bj&M2({hKt6R-jluQ1pwQ}qu!{Y$UK zueYiPvHMxdFiy=B^^_zc(PPJ@@9!8t&I{a2f=iAS@v^j(HiK57_3-$mw3BWFW?f>-DMOebQ#@)Z~pwU2q;8pfX+Uo0i0 z2$skh`1$Y@+L%CyVg=6`p}c!e(>;I#mR<9GoRl%1Ih)rg#1^;i+eu%Yq-dv;gkb%Sumb&J#1 z$Uo{z^x2M#SYaE@Vi#n&EMz;ue z$}$yBen$C}(KtwxEV-ZIwQB4dVe(AJ9mc9dxa|~|)?|>CAXI^xW=zuQv6bFyfK458 zUHh&ZTgZ1zls0vi`6$Jzyvm@-=?7RpXu-xty8E{(pI?x5QV*Z&OLSoI+?Je+$q3S$Z!*|>=zm_X`C+;XbM=)+_%B7$MuoYCFW*umfT@gvboP|Q~ycc7pK~BF@*Hd zPi1w`Itl(NA?c33R8wNnci&1^pYJnJ;%MraQ88#nv2UUANWej_Hmc5xoY#@oef;n% zljbNYxc8Kz?*PZpp^;BME!?T@r)8=8m?Zg1#O6p@{`ulbw69?Q$83!v#cMFT0BU z4?L1FW?$$oY3Oq{WglKGE`<9zjlqTB^F`n1Cyn%dE)x%}8?cn7pk=)Z5p-94Bu*eV zM$YNxPq&e(BeA_9>OeN~l?vQ`5j=GEezY>9jOAS@GQ++T>-49uAC&-p;tU*LL^|Db zr|4G*k=>&N7p~S^_lj5)I)%fhLSKSN5yz@e_iN(KK$(P$=pZ-7>MPpJ^^-te6yyy< z9*CKknQa#U=f*obeM_+L>2$);*_HWa^{)=knh9_X`KTT-5i0#8u2fDr3GO%g4<}|v zLscUGXte(oxj`OiTO~hGPrzKe!W6jq@;)Pc$#?y8L}^MDRmM+To{#{SOcL>HdEmgo z*>`}Y4u2%!LtVoW5-<_pLR`+4hC0xsMps%^f#t?};?_FSimGTtHxQsGtM<$-%b133 zkOIR|{??n4{>e{ZLs;q3!ed0v!54}<7v9yS@q^>@w5!`dwc&D2&ndk%B~R(Wt%RYR z5LQ^vR{GBd7kC7|NY6b)HC_rUh1q_4D1AE8gLZJ+qyIl7JwIu=+$|>5B42g1489Q9 zR0U&3IV<1Y;z8UuhUl6;TbpV2pDXr5Rt`Fl&|i}q_EmVKC0{J#7{9rSqEYc^JiafK z+P0#McTvoYXv7vuPvkV$pjiB9p^h*5vK(i}1h_F;s4mrjcw%1D6W<}Zm7ynht|Vdq zSW3X;t%<-KlxvvsWYW^6Y*NidqQpzgRzIzlmFo8VXKBh=| zS;ix*SM!z3N2>fwr}q6I_pvwtDaUzm*V%lVj1snG9Qr{<6-k|^jHY>t^bh{HIYeK3 zebBMX3$9_j6d3Sp^rFykQ-K&9O6Y?D@OtPJ(%hwF3O}~C2ltGWn#b8;EiAidqe+C~ zr)}3C39XuZqoNkI%TU7HaLz7n(h8NSd>(z7{aaG;%sxGju>dtJnD_A~9CJ-F+$Um$ zh2JG{5~V22k*K>C7(EzrJha_J4IdKb(X#Bt)`2=^{;Zq@hohG#8H1=l8PD|>1H!E< zl%OKZ4!T-3^w=Ew!okn7qc6=*j`iU7Uy-9_A_c|;<=)^WL#)XSaM)@XOGL0Y==4R9 zz*jeFIzwqd^p}0}em(+P%2wZ{~@_}8@DrL7 zIMr(n=1hHmO>>#N;u!Ir`N_lF5m_iya_#v9#-mbJ~_4})>oMY#a|7{L?O#R_-+aGc&NnU9596y$~; z`C(z?>wh}JQxEU|A%16`c~GNf<;&pZ7JB>PSJ(PPkSG0c`|otp_OKOaa<)p)#cYJfe&hs8vwTL_}%H>y-yQY4+L3SVY zmM&9Gfrn~BdZ`KRw7jp34>EisJWfgjw>#NH`~Jz0-SiPYPZE`IdEa@qJ6S5DK^n1UMGF6dJ!r=@_d12g_4~-iCK^g?jmb8ReS?d<=c& zAXcvQv+-zfL>~*v(Gi~TA|a!tb|DywBYtmQk1KLVp}9l4C_S%KAZxIhWOVBf;-wS; zSwG8ceFs?{h|M@@{puCI`=HtXVeBoVvW(VsVR&f?DFJCgK&1ugR5}HuJEWvL1YWvJ zy1S9??(UZE4w3FNU1#mR*IxU4V}FA`FdT&Qna@4%dEHkW5c%w6#rDn;$4|ic{eGX{ z)@=>!THMYyDKxgX8*s=c39&mbk^?t!w zVQ)~-gc7mi-61lxaHB!J!NBjYETxXXkK0y;=~xJ>41N7!)!=-0eDiYK{8)aKi4i&^ zj*PeS#>`a41!7=S2>|kMrm$JAYe1F zG5Sh3eg#C!MVZswZdtD3(|YNQMB<(~_^khWy#Ht|5WY-+d z+191I5ftWwXuXVYWo^BY&S%@@{mf}Gn?Uxg$EeG)3gTh=v``7JfwgEp@MEeRx7D@S zD5GWR zTz-!^-YQ!Sf*jsM;2bYBs=$ z1@2ye!(ra{<8GEREMFx1`}IKefHxnt)ejvJuYWq2*E7!S zL})A9A2u4BG&WttNQ@OY`02@hvFBkdCKmiV3nCKJI!QILkCXbw!}F?7KV{+q;KPj@m*)92Me}WcQoD4OA_Y2B@58}uXI)h|5yn}Jbj>R}@Uch`@8lsZ9Pk+MZ zCN_d%BpO9H=@i6h5O{}L15+3vrgxhB+r^Q>;OdNqP{3ea0nium#Cq=mZ4yX;b2KIW zt;7`p8bn#X3)n{iUx~k>_LmDVd0Ine_68Xm=@9b!yD$7;7g0LXHe{w;Ne+cYyuQIm znF78?X#|Yhdh6pw=`&pp*dj;y)!n1M#I}0NEwmoFImeWN4d;mXg3ow!GFiwPW*aZJ z;h|V{Gb0F*%W}|y4JIpCjn|&9!31QnuMif83#kvG%xB>o5>bHxzom>tGRXkdlJm5g z6QY3CWr`(mc;Cvs;X!f+Od03PuvT18c-eTw$W>}i(OlRG>n8WNzaKkUhn2%Z+z6IY zWMI-AT^&}}H@^{VD-xqJ60nTZ06S`OtI2kbAWnKV)2~Tl5Abb)^$hW3PCL{xNvomy zKx;c+`Da2+0rm_tjmbJ1 zfq#niJ5f^UDIW<}Vd`~$cDo+EVDj#DrYT2UZ4t~MHnRz7SVK5dUotc~r~2o~(qRnF z;2@%rW|I}y(vX&e*Ot($*r;ziT;x0Z(|hcgTZt{1Gqd4j!krh`)neuimq%YGi7%bq z&&SF9gv>>@^oQk2#M6#F2=)=*HH}w61_k02gg}K>r*qAb0s~+Xz{|hcoNP!fHB3ia=p(Z6?EOY;|MspoWTtWOhRP)e1O*iu4kwzW0-rX7 zLF^8ZM{V-xzmr>>#D;K&L^t#jRVvszDZQId%^d8P@>((+bFgH<3Vx@mga40IEzs<0 z*pYdF-3bqoZOcD58geIqHQoYI5t$<=NZaqoeD3}dQVIicXezLP9-QC43rnEU^Pq$Yk8rkAxt9D88M?kc+uV^V)>O{YK{WuI&l928^>`$x9K$yLB7 z>DZst<;~P;ox_pZt(9#w7&%DM?s)?>FF$>i4}%K_?cE6H zAE6dhwN40cC)h@y?`Y;l8rXnhnsxgKz)-%%e94x*`k+;192t@LyeF58so&OXM+D+$ zdnz5T1qV1}Un!wK6Am`!`bq?t96^WX`D~+E#?F>cv!QI3$+2nkhvb|AjH_K5;-Gp=cn+G7hcaUL z%3ZIT&F)yn<*jeyF~;B&$0`F$fxa1e&;2i20%Z^jTKLyO=Y;B%_!i*lja>)ZABEAA z!JXmRVVy<0ExY~x7eG;7EK+8SW#*yE#DppRG?B~Ilo$U=JZad6?gg8QPNjF1Lw?~I zwzxL+;$MV29ub4Lipv>eB8lj@LGt%0X>mYP$aH?BnMOxmv1QA+x9K1GwzJR5b5impB zzT#2la*%DU?@T(H(^==XlR|Y)&qjQ}&}MAAja$Hdoc|K_JGY5^0e}xzk9OvDI)DsB z5w7u&VwN~*O+w;+2h#>a@g#^S%%))_f77-rzoUr?N-qBeh^A ze|6-zI$yE(bR94HR;qp{7i%Oe?+7vQqNXxK9jVxAK&mG#(+3GCo#h4WB5c`CZjyyO zQ}iv0N~;~D4WEVy5l(~MYE@t>4rziCLnBWDP2SjpI2W%ZOk?gCJ}?ei0HdpA)sA3_ zT9^Gn9<(V~`GE#7Z=mR`0lFwdADAS(nFDd^2LH}s>LuUm!rV$&2()vcz*UpY!Sy^= zj&MnOM!reQDJ%O7+Pm1V9iJN4w(rutF`jjKAy{eoW02ssSthqB-hw!0g29OXEg~wk z;$+EKI$aN7<}MsfH<{=M_uJ-y5UDOyff*wnm9knL25)VDCv3wJZ43o%JsCIZgpwd? zyas?Q)^Y=#s}O>8KY+iegfu=q-Yvk){1H-hhiQAWEXTxb3sIg22t*c1{E1B*tq*M9 zvn8DGz zmmbq7%;y(L00K}qWYHc;{2&P&NrFPtetHtb&w@X3JzYuIi~cM{?$ zb6gh(iNTpJbuX-?=0+;J`TW#lYTK4K&@B?(69kOsWs=1}w>6fjrGD8O?9vBxEoJ+? zFWXc5^X2q!gwx+Rk>xj=NDfHq4?us0(r3+6-h^8@%?~{K`_pYXT!-Naf!>h-{n}&| zh;`3ET5?+nSu$~3Y18+*<-(X{p^e_4xaVAbLPS2grA@{*e<-Hbiz$dpARpt%}gYLC!lY7Z}*H4M3 zhWZ{1?)*0L_`+CYy6yWuK(O> zY+uyu%9qPwjWtu!{+^L0>;7BNWp+=MU(#2O)IybW<5ftBmHsotQT;GZkq@R*`N^X69C-s1XcM0xeH02VlR$rcQts%2b$PaosK! z(sT+Jfn1!6z407oNKmkFnM;V%Uk+upG=aq^_qG3>GnBgSvVrmwqYfJ4Cu$ldjE*w+ zEY6U@>oo!$T7q!?r+eGX{8Yfi+CXQhCqaag69l(+z5KG8>U@8FN@onyV>v~HsK3ZK zg+h+H7zfA(6Nsdg^e}=Jum{QnBD#+AW_*ZuF9gEJ5P(xO%sG-31Db1{hf_FAGF%dK z8;ukD+8+(1ASf*M>NCo<4=^xjFv;>Op2V={A*k6GGsVEo=CU!vwih)58W%W_6u!cl zN_8IUL^BHzTM^L}z@=m%Lfbs2g4yWMK`(>2@6xm_zd_OHkO84x1{iS@iVhshL?Pd> z=Po**^H+KD!hFiK^mhLpmU+^}{j9dnNfh?a7Ia1Cj_EO-U+g1OOS_t{haa1W0=hKE0k`^onaYURbg{1d7Tn*CjF27{fhSk#Qk_>Q`zJrNQ#{KNedATo zm(=v8frLI9D*UoR5%{pcpGDiD4HXB!3PTjMMwo>31SO2$?|p86VyGb(u< zHkNNuWFyCOxy0{Oje^nYSY&lPI?---<_+6uHv%&1? z4@DBll|RCAjJ{05#J43Hx*9_)!_v3#E|@O?MK-xO0^0i8QZl5t65b;{HLU)thOz2S>?%gT3*7qkpD0blZc z%04Yr=@^tUv~93{cao-HmCEI{Y}FK59>uK>Wfc^Xqr#@qL6FP$yl0eyLUsS3&6kUtF$NYSrFJ;iVQ#)^0KIN!J2CaM((NHT&_}x%E2&i%t$p_bUOp=gXm`& zBjyt6A*s~zjCwFN0lvY*!76q4g+F2s;LY?7KG7vb4uDSC*Om10@w%IaO*XhqGGkvz z2iWhxo1w&n<0dRxRNiFKgCRpjbg_JAAhUQe%f2+sLDkd=utpt=m2B(l8la-A6c)fU zcXl65vP*#>#`Ao@F8UT9V$1@+Kn>yZ?eC$0YZf-MH`9`D^!AzD#tMcaXY8-zC$QD1 zfU!qZO$q$(i$Ursdp)$;u~>3*3?lzUbai-QG{v-RiplL4HPQr8na`LtE1X40_&97K0o2T{cMpco@Ctdg|@s7j5G}|o-bk5@s`_U8cYFINV!G6_Qcu#UrTRz zt=ZUqUIoDpV+F4*cIDtq%BY^deC7#W9y8qbkqBQ$SeI( z0*6=x4vDx-dRBI5M;%P%N7kPNzU7i-QL7MIaZGl#uuiR6WF0hKbwrPj zj#kY}{@BNn#)(t{yyOgv>gtB_!^YbVN)L9-m9?9F#lL!mAimG?q?X>S$6h{SY`N}~ zxw>k_S9iOkX1~lBee>?eEGs(E?dWu_)Gxyi7R8ouv;OvnbqQv39#!-x=2s`J-5}?4 z^E^r@MT%4eE@7%o({a3nY&g07dsg4(QlFV>r=L01V$x#TK`QD=^~2TG$=ya31NYjP zfvOd1yhVEhp3oymTq!I5c3efTUQ_kUxh+-*(Qw}U$37XzlzywGguRJGO~rH`+1``l zcBnsvH1rl8GblxQ4y}#P9^B6>``c^~{a*GDXGR5}xdlP7_JP}I*-NsN-*rzq;qbPf zf!LGcD}o<&8EVw&2YDCAOi@zjE_a7T-5UoqX%ptKKSHUvmcCtoxQ}FcR3gh*Xymo(Cn8RAHn92XwYttklry#XYgR10jm{+4ZOW>Gz(vO?B@Hsa^8~|0Z~s6|hhM%wuYe`O6o@#S`*uSr1|!tC zLIbtk%+))Dk>vI)>NfI*T1TFRM`4cYefGQx*1&Bt8sj&=7O%R6Usfz_xh)(XA*9x4 zBJ)n*HktX#q^8I4s<~I5q`mB}9f@qtY*Ie?%^INna@Pwam0Zq$D}B>n`3}A50RE!N ztGYFgb7mnP2y$iVRmub|juvJOIeIHMUkv3LSLU&EI3NHLWPm9Qt10C5 zBP+863S0T+9X0Mun^(!3pl|Tj(!mXxUaFtvn4n^~nChbor*1C&@{I_t;c_W_{}f^n zV6L_jrsdJhk9YUO8!_+w5;F1vq20}vuilQLQLrh!8hlsdjK;vQVr~6i&uHpOj^u?t zT3!m|Q1{f5dX>f#1Rd$k3SX$~)@?x66SB-w8Tiw>?$q4eE`PL8NqXtU$y~fJpO5Y) zaha(x7c9S1_nZ?hZnNfgxT}t!3nfjFKHJ#|;xx0rzWlcMn9BB3=x}kcF}-f9dnbp~ zlkmg&1~i3x`bc_zT&$EBohX)mTgJxMcWd{oSM9mMCd){l%Beh z84+1Qr&6RW??v?64YDVuT8(I7=#e4tHqrW(!Nkv`T4$v@73J~91GyDw?YkDoIRg4Vmvlf3g*v|I z?uGlrP}xE_6760OR%3FZ98|3fUjNDOn#24ocpqxMR~|#txQlXr_)CE(Kq@=8xou0Z zs|cGR(+w}yL(^~bm^A>4ybx+kYVO2}L6HRBTOpp`53g&GIo~FxV{fY=*IS2wEG~S9 z&mP##8^mN44$^DqhpAFW1&PvalmnX)%ii=@{$(-R(mDjBl-Io`^#2*aj3-D2qjjh! z`VUue#y8tIHM!waYwiLozU6Q|l(7o*@1e$QsTdn5$k|^oP&FNn7Yn$OCmvM{U?4TnK}O~LRZjyw^)oe_0}fjqyeGBZP9Rew$L?y$ zQIx(;SVeE=*%@csiLa&mPEw~nh&OjeHZVf!YMk{}|5Yge(2Xz1yzPj$lwTtI;Yq9c zJZjIfxw|;??b+5Kdiifjl8+lbFIc2=aWGKT-@Rs=`%IG>ygm3d>G`|m ziBGl(brdg=(qp0c!#A%giuMq-HuMUJ9v$3_HA{!vXar z`9Mw`&DqdSAqF~AaBhWIG#;@Ps#fME4N8o*`^W%Jur#eqd~vaHQ3U3TCO|ZcPNDZ0 z=K@^I$#qE+L{@%sC+r2wL^3HYXDl5`lCriMz`1e(cniu*)V&q*Rmo|yp#*H-QN>Op`r=<%l5+c*P zb2bGU-ndnbG*qlZz|*JTCByroc;t5uiA%-Q0+X9QvnN0qwyu=EqPX<=jG>nVrjQdL zLo_RO(;?xoE(H-5i+X7sDTDD8SoEOWf&5@86`NmrertIjRzAb*J~uex7{G=stJDJ{ zf_SB)?wYOb>-Hi{;yu3)?^V3RzVP4`HRagUk#IY(A~^1P0Akw-R}p5VARUO4;E#7x z1_O`*P1LnmDFe6+#^VepE@bz=G8B_&V2K&bX&6H)g(FKR>q@`U8a1%Am$*?QLHvqEb!prIpu*n1?H2fd)GW?0Kx8| z6lV^GixprF^8mRvLkLk?J;*@8Nv)zMyn^oe?)b=%U`XTzc{+XU3?mXj5MbJ%w@`{sTUSH?&uC?(NndS+P!BtK7Zhu zE_)g=5+><&qEb$}pFT=Jd$5aEkMVe&kuw(5S7 zV0V9d8wk`%MW5@xi8PFJpGiHi4j+`_=MiCRpWSuiyTA$y2Syz#Z5X^cKSP*1p>^c#ekb0#j^SEoDTLidYXYeJJf;Jaw*5$dGnQK5mD@bl~WQQ*&JfkA%6byZ&P4o$jl#gV#1y zCdK&&x=Y3&37r)L5YM-`HP&BzsA=`f$NMN35O-<8rW)s z2ank?k<&K!S5GF!I^4IIc!An^tOKH>K&i60LR@fiof|G?)$#vyz`M%7@N8SQ4DRpm zz+IP_B12AVL%c9)B}n-6#&=#I3j$)ILxnkbF4Q+KdnI;o64DrrNz zMKPJwMjxFFJ1hBb;T^uhmsLhw6o2DAS#O3f>KNeP!X!EZgk#yYBm4qj!iJ7<+R<|4iff1HM$$1?asyg$(r$sU(176 zR0VncjuKvS{;50KVx#9~y*c;0sdBMDE~4VQz^hHqDx+=~`CxEbfgGInhC4q@c$Lb0W+(WYnHm!ZQf8pud<%QpqD0EW)_=bd zE}20webnPso@xv&v|uX-z=@nv-(xzA79jL>|85n=OXVqDasV4p~= zotn3-O(Q8R#`o96T$gimHHu4(JHhwxU6AzMI-lIMCl*fw3Z`#Lf$^dYpQ&Gdyj8c1 zA*%bL)rIsUwoTjPr5=7C1tE7$JZ?>TwDY%F25&rukm2h$CJ0RuYTu!Mm$D5$9tKAT zA_czitfCofa<)}Se)$X%-1kN4@YCqllN5OhGRTIRjN)qv zGXo%*5i8tZTyMA(T~`Q<$?)D=u`PYKRT@H{I9xVI_C`YLVvuUGl2^hzjN^lExv%h< zuU4p~eR_0cTXJLPcg>Lv<%pl8SNGBr?WC{`d3)$rZ=>fe&FrWoPVg$IDNRlocVUaT zP~}(J|9Aw{(3F>YTRNe{N;RLBUg@O~s~j4D1sC9eRXM5B_-TBJSpiwI{LjZm{u2cr z0^9LAe#Csycr2dz+1H%o4qP6G?iVhSMN!l-m~`LE&qP3$S#Wn);Qlw4^S*q{QwDjv zDY;veR{p0i!7J08>y!mDtG@WGFVqa&R|VYEf9!wf@F);XFB?k_X0V}I?lzPm^B7v! ztgY7R$9|~GlqQOetoz!(b0Y~8tY`;ZPOUs!sG7W{mrGk`X3aI{IN02qT1sCe#mYrd zBki^$H3vH~){#$Yv^fpZ|6;~JpsM=3fotKF7{b0%Z*^Cng?7P4EtVXJ=U%5)LM#P4FHV`n;tY2YIysV zYKpRMR~8S}e2MPx4u?!#HLIp_3e7IC`^%au+qq;PMy%iC^o_FbHl01&ZN8eiSRNpQ z$DhpIlFyT&ldp{DaAVXgB=e}Y7(NQb`NT&`1ewe`IM{w^DKiJ+e5QPQdvXR7j~ zSufAHne;x#r;FMnmAL0dMTg1RhGF_AQLF1gR=C7&7YWFHgn!B)DR`)IS!k!n;A-*vm$mpu zU5I{d1$YW$7R;3O2L0IOy!{8PHk)D%=M$2?F(n?NDJxI+JNy$&ptqsAFp{N?IEK%a zP7(oTrs{9~02!$Goc-zlR+xX38v#~$aD}t<#m6hQXpZqMc=-~R@VyWtbqZ64&rC_E zMg*obC-p{knmb|kbkQ^T%=n`Ak4#k;@VPvak+aR^yft)#$L3MIX z3ld<(WV@YzSMa-X>F#&mATKRn)W5ATY#Q=Zv8n9Ozh zIAY?EBzDVOARcdFwJZx+0-Ugkr~kHN$1!&oIiqebCS?gbD@bb`r5znFrd93cbWNXO zQlq`>$n~7^83+T;H>12Ry4N>5M)K9B7%cT)1#cfwA$lpOJ%%!$@d3+K2a-SebeE>} z#cnGGBj%mwS+BT0!~uf&`(js4pwf_&-KV}@NFjH#LSPtUtoy|N0Bycn(v3oWMX_>? zvYd)ZUj${ZN1&{QDB{hjgv7v<5$~pJ(FVANiev$;GZFzG?=E z&B^N33tXS@jSJELVmUpFXdXN%|l+E&L z4Vz@@gu#>{5Bb!#`x`;ER;?wGR2Nf35)U5Po80fhV%}&L^SHPtZ~ypCvlyK+(4Bzc zCxhTv?+B46mE`&Ng{rm7E+RR;1xAQW@U6?mkChV#)FvbhOnSBK(v6C9zVpwKn2!Tt zPG^8tB4F7R`6*e9w=0`xb z$OM{aiSPm*pk$?CX#(b`VzSQbfi!QtjRbPc=2&=9aD&+wfJXqM`BFnQZW~zOJ)k0b z`!LG?pDhP0^(vC5A3BTi987ucIUJe^FlpsvetlO#~oXxg+~nd zyNo4%7;#(7@c%JK$QSoTkB@&cCP=s4CV2$ZTJxZ==VZYoo(U||_BfEHRNgIn7CVhC zd)Kp2BJcD2v)h)Lx3e*{IH80w^z&sr4d?O?mEj61?!-^KS=e3#tr9WU>Q5Jx55$N# zzoTGKogT6iT6EhX-y0ng&aXE(?fw^qaXLcJ5vK{%aw&@=m6^aCLT(Xa|8!XDq zq}T__dnc2XW!yip(S5a(zBxl%!GcBx}A@+=CuDZ{6{nQ_gCmiua-1c?8WP!NA&{U$ZP7R>8Q(c z8GQ<$aFk1r2T4m`$6k!ggOam$9u*1$4nz|I=1eh_p7G$;<3`z04#fUvs5U2BZiKYT zGb@a`uoa>M*c?`E>EcnFC&CeXW^yTv_1WvbAK$9OB$wA=#np2ylf3aZPq>(RO~6chyXjlK3|iPgbBTsAAjEmCHRhNDlxxA}PG`uv zQXs+j3x&4E6|j1w-B^VKJuga?$)w-ki+9k9KPaF|g_HH~t(Fu1*(!I%3utKx&=Cxb z_xwncwL7Zz#YHQmIj)Ay?rVSbsUqLm_Mz)3`Yn}L-tvH1(CY8#&`~~GEOqt$pCA=L z&ChA1*r|Nuc3)rHCcU7+gE6 zfNS(SJE#5Qu?%l#hT>q*flCRcqA$Io0&;#>=~B*u&1YwfHJpHxFh~B1h4gDCXsy6;4|(>&$_MeTk1~*%`|j?< zMNWLWkS_HZeWTq2zjbDdw825+o*Iih*S(l5qS! z{m%RHNFi-J7yV3e5}aPIF+-v3teSUMQJ6D@zWbMFNY8s99~;BZED!14o5+X7m-b~T z{lq>=^@P^SQ(>z&(`d~n@BMix)#d?_*+A2!4FGFq1zFTncEYlP{7Z!cH!Oi$?7rdQ z5A?eLP71-`+{DxtfbVKi1hO7{#*3ZL%`O+;QKH~<|MSKDzu!&n0+HnlH2-?Bc7LEP ztmh9CUb%Zq9(8OS$Bi)-QDA_yib0^AO}!M6jnEZrjv zE?D(^@K0g;H31@Y7PSGFjKak76xdKEw;r?$UIwj#0O9dKxjdXmvb?v}D7biptU3a) zp#*Z9b-y9?#uKjvQ1Q1>P%r4r18Tm+F(}r)MQ*hJW=Kr0-?Gf@35SGsyN+tV1Xy|b zpO7r=ZI^lMos^#G!hQ$cG|__zeDt`1|kC)?Y9 zyvm%;cCN9bU{dkzF}M-S^&u~$+teAA#|yTu|KmOTkGEQk6QZiBdJaN?K@?unZmR2X z24xrrMJCTjw8c_w{!=egHlRkP^s)NI{LdcXKl;rG4tPSgq4Y?~3%t*|Bs&ANs0DQ@ z{Qa>U^+D1SJz>MS`r*^EuOCm55*ETNf30gdVJ3mWTfQGtJ-3d)D{#abeQxF&-w5Yc zXA4u7sj--zwKUTm2}1#)M^aRTpB9W#V#NJPdhC6_N!Vb`)la5R$}*;-;IPIl#OBnP zvlzE~@pKS)X`aI;7yqb0Sl)eUi@c07ltMJ6wthTA?`>n#bm4fmy$7aicj&gFsO$yZ z95&~>W@@zz;b171R8iTS(=AQxp2FhcK2}wc@+NrZegNOU6F&1*8=g~EM(=D-Fig1J zhH&D>zwaIa$#hWgSON22p^qC_d2(GX+U^0OTP^@zHC!}BMEuGm?5fJrr+mS3a{k4@ z<{C_Y+-SSNy1xInMf|)5?}sr=L1T^FMfIG&80gMBSg<$XzmyQC&uR@~L15j4)N2T) znq6Vdmy7Ys_CJX)XL%#O7+uVZ5jh<(s24b3KwaJ3q#lm4Bj z$JzjkmU=Pt{ikqX8InHAKMGEtNpJkz8b7qRnMSGBcrMS0`<2nwDKT{PJMpP`Y5#W3@UF0CA3sJJc2pH9Y@ZlKG9KrzYc`W@; z)hJQv#Qfem=_Jzh>7ve3?FLp^9$O#nt4Ucw(`AhZIxjwlv2;2>X+F$W7l4T+Vk4;f zrc5<3ymdLbQ#jC0_14pIg&wcb5$~PC(}X0a-NkfMRvtDcSpcf%@8x$4<2JAV`%?W~ zw=4KUrJoY?YE|h#)1{ztU`k0{zF+1v%ggug*Q3|Cp4-DLUB^T&lNG`rD}mt#!uhQB zTGS~@OacLo;aIv^!#1H&k$w`}OS^fK8@(j!W`s%t8g_4^Vc8&@^roSMD@f1S0KTBLEGKfXFr64Cr6)mU-|I7QPbf-eeyVb(5-W%;MRumOkU>k-;?kgr)IkyLEw{6>STD$7nq%}$g@9<`@<)VY0=rFIQ=Y$8>fTK#-wZ#tjbvt3W5p#I_1Hcq|f?yf-2 z^?TxH@N+iVE7iLF*lvvzx#(B??kqdMlTp6k7P!dal<=KN?f9A%s%#IVKEK-tmo%QQ z`tw+?m*uB!(Vv;-KEQ(SK3y1-d^AdvM%>ifXt(C7sz@`(&x>-nSfGuo(d=Tge72{W zSdjG1qF$lajMM0UUli;g0wN-9RV9vnuWmP+L7pYh5EjsiP47IejwdI>G(WkQ*rBeD z9iTo7C~2;VxV@6)J?PeUSN%w@zCC&PYfBhE1MN2%-W1=W9w9&cFlAp zUHD;K1HAjgG(G!1-~x*61NbU$(`CQldP3@0bh-ZJ9l z6%hy@wAvN4Jn5n~jbm>gP`+YvYtT|zE{Z!-k*OBYdA!FicS@J4^VcNSahtWlJO6` zqV)gTE&u2)#D!Pq^jVC-S<}n<{H46uihB`$3J2nkO`0zS*y*x-{W&6;vZ@k&}5m-G!LHI3>ZvF%YYFHHY)(ew*lXyhLe<|9I^UaF~JljB@?py)b$>bRz_hIy^KLzp!AP-{+0+YH`2+p@wFj*liVu0W z+`$t|6+x`4?`oNHu)xlIP|Gm#=Tvg$a zLTo zk!`-k(v*o~%Qc0V$Aj=xjk(`kAO)Afznql_23fH)fm|5}xNx(LZ9r-EmHZv5eVgtjz# z%({1LlJ^RCBF%%^ANe*f&CBkz8-XGXp~@_wkyW`6ez1`f%tY5MT3ei~BkXYcFg1xC z;vcodD&7tDUx>lrog0o!p*p!<8N!x|7JqjkMlRMO!R;vjp2#Gq?Qtajl8r8y?~x-e zjKgbzmpe}79$37jJG??;Bn?Ec$CwNR?f|z4hRrU*D=WiEj!a=OwCLqoqNJfjai1>s z>&Zm9w_eGGdHm^buc?)cn;KNlWZJfS&FE>qFf|(GPbxv@BD{^|Mx1i|e`A`LiY1upR{Adc6LwD?ZzdT)vYr%|iKJZ+NYa29zatjj1B#^9!5tOmF+A^GTKh_lMn1 z+qA~(R1*{@&`6aVeDAk4>le^DP^yN4_)=2;*PmYIej-$;^_TFU*x>p14)R~7O#iG+ zVz7`DZlekB=Z4(d23-LZE+@Qrr`wyvRT}qWpMHgP7;%3=hU*P^o9l;u<~vsn<-5)I z(K)T8SOQb64n6(yvYI4Wk2`g~=0_KlnB5EFtslVr7>Y;NmvwoBqh6*%fGC|>JUXXp zbXuiZ-yz+}Pv%ivO6eYuFo8*H#3=%QdC0sAT#|P8i%e&z?23oIi5ElU1K`A!`#n~y1L7aV-8HhqnUSVx4vwN(%(P^YY+QLe9Pzkko^ zX(L9-z%o+E;*C6af zn3{>>Nm0fQTtYEj=bDI?M>XEHw6TP9PHV@d80kvGtsk?2qR0k-5$-d>nFj?q8ZhX@ zDgu+aF_>Ldr&g^`7RW_|-BYF7uq5_Vu!KzMp98F#x~rk2emT~BJ-Zo``l z;(b_gxqx&*3Z14}y23ie+Xj^qaT@O@-r7sXMt>fRy~(7X4IVj?DihP~|G{HAPI0Ew<7KRr(es5`K_fgk*V zB!vI8$%7vy!xGkfAvYlq6_wYXSEu>9@#N2L2kOPT(F@NM?!caK#*F1=)O?o!8p$r?!0RI9ke0`&I8wg? zKb9L#swqUmg$wKu_is(Z*%iL1tNa4+p~m>A6}OQQPWX!J6=9WOXhX3K53_)?vj)TI z8`Q9@5KyaJPM=;H)t#IWvX0Gyf;D~7N+;ZZH>me32X(OZM=dVrRyoD$>s2FI%i=!U zIxUpn%eY#Br#Jum&Wab1LMC>1f>O(lYvbEY2A>VWwrhTR5 zE3?nE>lOCHO`dKKfcai^X$mT^`slmYA1|@^ta6BE(>Mz~^SIX?b3NJfeHW*(u9d%z zFNr}@j=Z`&Sr6I+rW4*J&hH^8T|Z#vD~d|qVSdzb)2z0HaZ0&dFviEv*sp@Tob@@BZdt-9Vb4{^x6lsk}nL_(M!PL zBDz{wRAGE=Wt2f(?09ja{f2IEk7j!UMJMJREJ&2HH9nf>X?VP)8|W85UYyMrJpbMK ztK)~ImVA(7a1ffyhwar)G@|B6>Xc=sVy-s`a4RS9+KI6mr{XcH@d^jq#7AYzT4S~R8 zMr5JZGEXArUEmSwL3|ppx0o0gC8JkTv%x=MalrTo)8v2di2xSZ05)CborkN7sCytr z`S`&UcbsMPb-nzwQH%Y)!Pwd8&RAchDyxGTbMx7Foy3A(B5cjp%ux?!#BG%3i3bZm(XCYr#TR!Iz?qNDZGhPfif9? ze${-P*+N!hRIk+xEo(^nM8fTYv-+!Zb>$IzOm%g5n2KvJd-F*DfvP*BJGd?6>lQ-Z*I%(2kYaU?E!7%@K{8$tzd0r@7ZcqUzl z8!*4q?E-9HF(VM%9nbH|rt?0keWSdW6i2MHAd&pPbB(a}8MNgwrixAHUr!iB1>jSLLNkjTCZ{FPZ=^6>Tjore5Q z0yY8%dudPid8~6tq)yPp58tSH4V}Y)pcao#MdyM33XIrCKLgs_z|AQcwGLJtkX8GX z>HoZ_J$D_7XP(NJ%lgMGH2ayST$ac#y)pF7iZzgVQro#~$Aq#bH zmEUy_5!J^jb*VWEcB~YqpTf;9U&qkd4FVn26QUCm7Be$DK6;b7 z{jA+zz~PwH7=$ho0Idhwz{vKWKd>K=)_%bBh}iBFF1(;K+n?%BBDl!U3Lx*IeG@^W zH%WcLOWsPs`Tgqn1E9101LP9M>6bH29&o||+roU2L*V7hMWmiaDGokP&uq-!4B9Qp z0O4pZBepx3p&tt8jTd-_eGLX5e?cx?RoByQ{3s>hbv#ay`2MSeN-Ft~(O>$BGvqIw z4!BFHOMP)uc!aKNGZAD1C4-E0bn`#Z+H%S=u)ceBB+QS1AZ`@`%&%JHwVwJY!#T7@ zX|fqTftpSd-#?UqnZhl@YD6Ki4Wz=os-bwGqYDR;L6kH7@HwouYmLkQL)Uvp!yUeB zzllhg2nI1kjUJ4agy_B35M>aZ=q-9D2%-hiqD3!JhUnd>(WCboMDM+w$KLz*o^|%S z&VP}$u*}T&dG2yupQ~{L_%A|qvdCWE?FpV^Lz)jjzgYts7QcybX`yq$kH&c9A2I(7 zro)$F)d;PAg2XA!RnyUe=M9I7A!kpAyJOvp%aB(`1iaQ3^Md}YwJ*SCyoVMu;$1tS z`j<|~$PJuUPFFYP$ht|zM7kZ07}5t!*Enp<5dE=RvRoF^U+(yk5cA@P+Rvly_*fFm zox3q{lFiaduVM6y`MdKdj?22*TIO4sY`kM{8fBK(${b>T+m03DGqKW_E`)_3Mu?PEUxO9?H?;r&85w5h|ur~!W^a57<}lQzKc|5si=kAesf@_4xyn}?hZ z2puzCuL*@>YWLwjLs3(c=jSNAGW#p-S!Uk)`MJU6?6X&&j@pUn4Bv3i#@>+mcV$?P zKjB>tljprYlIJ~;wiU(pQNf#j3&tE!&*L$d0Uieb?{C&ZmM7xGKL!*}FH!LPn@MEZ zq?3}N_hA|@gs4PP^oM>ZOr~&o?+aADbcl~8@vi8}==DX~4mEVT`Pt52a+JoklXpj% z%Wf1I7OpozbarO3I!i@I5^-H?g+;80T0xfjoXREZ!VEaV%1kaK;GL8MrwaAmpO)?E zo6&SlB|RVg&-TdgF^w-vOGB_r)F4<1d>M_45?D}2Nb(r|=cNDNRgf8`@Lz6gtzxJL zZ1Cp#J3)K8&sC2}@kS!ZiZXhoGGh~%Fx^0DCPW7R7+ugK9*L*?$5u!x@m*wz2mpwj&B@G9MY-~TT5l;9k{kR#gJ)lvh0EQhTyQ44 z=h%bYyL;njw*R+i`Oin`zx#OM3<9%x!Qt|Q@hV%t8l6H`5E}9X&(`-5&R=sVIm09e zru%-)-{=D=pWE3VMcUZIpC(fU9&JyGsnR~_CAn>4=BRutw8j21@I5k5J6F*cEMSfU zYgZV_ud2+HAqojUs@TWtT~L^T#O~^$&p0a*nEmBUCr?ry&UMEcKe4H8jZ;1fMDT` z#&5mP$q)Yf6CtgPHk_kCK=AwkV-naF|6HHWE8QUz9$}Ltz+3>pR|H7TvDAvRI3+^t zD*wMP-TzTYIx^UuOeO;16OZGlT3|Lx`58gIO3lrMr~&8L4{aL*zo@V%jV2OKv;bT_ z0l#Q`f15^4Js~P@k28bfc=zaahojAs<0Z7YB|)zZQHd4D>B{HF8)^%(^&V|_#;iH( zfBYXB5vu9hEC(X+v20a$zP)?-g2w;SiIDQ-G!0zpbW{5tCAv{qR-~XMFhMLR&|$>c z5x;ZZc*;|yYKU?(Kubq=gt0%-aNf?z~MDhC`6PZ{i$ztp4+8C_@qTuBAsC z;IAppW&Jg`$Cd&(}}V!^S{3Ihbe)`$}p*&C!Ye zZ#mLo{cKqi9`|Ha!qkgseXafe@3&v_V>{M|M`0hMhF|pudm0E|U$|4HCULPQnhhk_ z+wJ4RgDIR71b2UbV^d@%*bBV}J1Ex7^rp9*;}_P{^xQpbPihTG1v-pO(=ICTsfXV8 zyGz#F0YDriGvEVEunV2&qpi#814P?UAuiDN%hxjtL<#)wedB)~p|2U}HjO+a{iKd<+Z6Icz8nV;VSuaA9bVS?{$pRZO{j4mG7O- z$>^#iABa;mqs@skLKHCwq+g+Rk~zqKUrG4YgI-TCfpy4^e`kRTVe|N}w>UaYSTVx= zr2x_8B=Y}mwkI%W+%35XVffgT#YFN;_y~#ipK%Ony{p?9)=$oJeb8@!7N>`eEo;fp zDE z`_%8j23fM5s4or=?MIOqUSq-N9d!Ma<9@yrqGrdyz46J>b@4^>f`I6^*+@C7}WP-4QdK^c@|>!c?bZ%wsjqdf_1jW4+|?lZM#!kTUW#{->W24ud>J z1%$Pc1HBNw-ihJeeu-8Htk{y|UfsVH7QQ}EaL4<jG|upfp=As#IRTLY<$sq2D=|^JhjFx7&Wp zu$v8rzk#Vl`Fo#!dxv%e$U1OprQ{5U6#QU9r!htCitdG*E=~o(Ke%YGKK$#H0i#;B z9Lqsrfgn&Z(K_!8p4NE(eMMo=o9ya*xUUtNY zce75~A&F~-lFni>+YofpJow=W3GkJh6Z_T;Bd9QQFmV-HlO2=3`5{~cC-}re2KTcB!&I%a+1yxbm1d} zEfn9tf*N)$>MBpT>F>=HwFAaQQK{khj4Y+aj5A!F-`Rr(Kr>PgxjUagbB$wCop64( zFV*^Vzdx;{vX@}@$iefs6sX{zr+C=h=V{mwI-QXx;bvz(f7!(t!Q;nic{K1_A*t4E z&@fdt;(07)Gum}9-bIZ=eQFXvPdwOzwzK@!Isrv~1tut4r=tr^lET(gIk9%9M;Lei z&bgb|=4JOyvd|mVD>!sSFwm z#XIz6iqn{;YQ9Q#!cd^uGBA;5MG9U71Ja3YjtNMjEO*(}{d6pzl@EY*C|a*q7ogR) z$;l^)D{q?+`NnD5PxeX&q+AkGDVzy6C&k3jP;^AR90n0k%y(HbK)_64X!+DYjQ=r4 z>0c1K*(!QAJ%97zBc=e-ep%9eyJEc8(FVkd6<(ZA{*8-|mzv;h1YbLX*phc?f?ofC zlmw@NpX&Aw0t~ceB}2e}8fX%fhUw%qUaaE?rU*&rAbf;T2QaTc=yY{|nJ)_Ycne^z zW&l2VXS+IVJO?$Hg5SdpbCAT<$7>Jbr8MN_A%3}|} z_LIWKOZ1e^0OY!*e)ns{ol(*093WmfcWVZD^;2w>o;(@=KCvnfwJzHcU=+MTARZ%( z#;kvOXHuA$nkKD;ylnRZWPXi}$zKd9AG;Y1sp*{AFuukC40qx&i4!((8w?2b-P|(? zb1nun{}B*7`AZRqwCCh41MfYJ+L`dTvi+*3%i=)<&uYRD*S)4pCqO$*nJ74j{Mhott4e!b{6^2`+Q}UhGiKmT z{2(+43Sv=@5KllO3Xj;9sKI=HHCgZWD!l>6DX#=jwy=&P&TjA2E5P7uFir_5(5u&Y z&dn_kjxRxa9_Ma5fL69xtqpdF`l@Pf4VaCd_owhBUCY-wZ}*l4iy@WCJ6tiADZ_Z! z=Y-vX|Jgi_Z|Gr8P>9KB*Q*YpBJHXi5QdyU88%7n^g`%c=ImnQ9pGwPm;UFS9U!1K zxSUFHgr!{R&8#EA9kXfXyg;a{C+yiPv-5+M>}$R$kubeMMH-*#NcL5+hW6^m)=Gt) z=>f49RwAceLJ>XeNR^Z(xZ1Fs-{IzK^l|G)(*IV|A`~6nr#rq8fPZQZq%o ze(h+4nX|L{QBLuXsErVgNX_69hgO*n)8}~QtUcj=?^~}w6X`*DN_3$=b9p9A9P+ZG z8k@3Q0jM#*Cypzo@FrUOiS0F#NnVCJbC0BB2bW*BOi*~4<%zjTT8NNb(-ME?EmzXL zGz>6_#o8gi>cqIl*r4dn27ziHk^upxI-}39ll=GhO9EZBpye|j518v{+gG-Dv+uGIfxZZ2)sn_nn zRd2S^9lKfwnTa9(X(S(JidUWlrX?IH_8rd*QM6nGnXdGx2{I@2W4vq54XU9}_7BJ| zq>fU~e)J=Z9`OH4PwhZ0x~;})=yF_A{G?dW!JiXac=pKp8i3oST-6=4s|=(1%~0YL z`}MddW)J>wX>a{`r2qay#r?cP@Q88l`s_(*_x)GB@YM?ad2}0dVg@& z!hRCKh!TdJ-oJR;3C1aLEJkmFG9yzWFGPTn_d+D9buaX8;lZI(qI*;R2Akwx>tsF8 z`NMh;QP|*eY~kl}=`03+!3;e#+Bh&9l^qx?;cZ!D_e0>HT^*KPio=vHPu>rhyq_rf z1i{49M%D4{R6h_fpI8HlzpM&%32;u}D&9gm7>~Co1#3Q`>Su4uk}xOszJ`v;g@NOu z3mQXFJna8ER?jSZl;l^R4d*JIl9<#S0*2^&^Lm@^1Hj_I zuB#La!dXEPEr~6IWS&=TZMG9N>Vl$8%HsehU;%#cM+1pNDa@>JFz|e4crfY2`xfJl zl>X64zbzQgV0>9HU%Co7%a=i7;>&>SHe`8V8TK_>8?*z2mY8DhhuLa23%mk!N-<1d z{#+?}by7e@uPQOwr85eJu^c3?zRfv{tV9)wUGDGt+)fL7#RO&_5}-X^23kT#(%2(@ zDjZ1YDVWQq zI{ZCmLkUo#V(OchNt5`%Kq}H8Lo%9WmbR(lC3^?_3s|Y0Fi9j-qz004 zL`L=f_1mi12uM*q#6HDujluxC>~*2|B~{yZklj{qP;ND;(%s!1xkPSI5>NSz$kKnfc~)dd1^dIRyLp0MzEZYE9EP3T5nt^L#@(qMYcPU6D3%O1 zf0cT<7;mBDWYe~F41)H|2>Hrx+j796X?qvB#!Kmnb6_W07A9C{E(9YXkT&2H)0aw5UNj_6Sm z=H%k*|LTl{da8yLeo+bsFvnmTqGs#AAyI+HDj1^gHH>j`+T z?3{Qg-TpRCNc2;^x)1T|2p{z#?@%G-`X*g`uyeeg+ukimxF1FmTfI9myIN_|;}36` z>x`i%gvL5GHV;G2P6dtE_qT|O2Z%=J-G0+&Ll{zp)mJn3#4RISFMHBb>Wt#cC zuUy9oSjT7sj>Bby3?>KOJVK!g{6grMucR9TD@cvNwfY$uy)0K7z)AS-Exd3>BDR=G ztsr4h|I^%02mHOqSW-C&PYF(=*jqVKUrPNT-QxBygn|pG`y!Eh#+aL4ADpkA?k%M& zAjn<^m}CYIe^`N_u>^Ng+m}Awdn^|z36KZW{b@+v512n2qKtpeaK19_50fS{-v`sE zpRW3o`kxcc(B{BcPl(ArHCC#;1%we=@z_LNwV*J9-xb=?TuZv!`Ls>9Yd2p4+`9OT z?bM{`ljj7IHH*0fdxrMm$ibajek$UlVF*^(P<1J02OC3Si0m?S%t%Lq3ZC-}8YVt$ zn*pbie>!&dzJ1*>8d+)iSbWQgc4{6_76@XV&gs%`pw~mKA)z@cCuQY|3G5=@t%D5$ z9tw^xcA-9}AwA7zJP-Rg@KodoC@_?S(Ccvp&-8gJ5oXUFP_h{+-~OHGMh9A+`a>1f zy-^qDHGy0wmZCmbn++WxgF0RQC)_hJ4H;n`tRpGvU$S=#I9Xf)qsEL3nu5t!&%d@E zrj488u%YSNean8Z?L&(bO7)y9HV`5-;wPPq{6q?)U$P%&RZM)@?(2$(r7yXG?x$0> z{mPE<03fLB{Usg^`VN=JO3qgR#u?jjAFsL}5_*FNaWSdiyINnPrq-!7=>%hGs5@5Z zkcwj2RltQoA)4+V?s|;sl{oDuY-me^jsZ77AF;rs6}0y3s5O1D?wqYqKc4lqtm^%e z3G!xWTqaF-+LxA54Lxgpc>8~D$~ZV97tp(<=2g-c@;USCTg~^!xcye|-&bS{m|II; zhR=c3(SqM!qAzVpw!G~@=ij%ioJb$qvw>LxgC9fci>CvS4c?%OvsT=>HNR_lnjk^} zzY0sa@)ggMN%&Ai>dT;oxMn4x^-aJhx;b|GslL8U!aeAd$z{WHWZ9MN>yuH$CQ3dUdO^#_?~X?jypw|$B+o2{Dn zX2up8iLPd%Kv)Zaz{YHYuA%J?Ty=^}d)vnI4{g-Mxku?j?cAuMVeo5}2N?x7xeNDy z!`>}=9~pR{A!6%c=qHUt4flm!F9 zaI%5ae$>tPO$k8s$^G3yn0sq67B<7sEwqSN)BllPY_?j{1{-(R!9C_ZrXW+xQHNMK z{EasNuoU{u`ZA%`(d!XeTP7N$YPzKkcoz-QWQ98gFnIow!)BznD+qmzqV0K4X4i`S zMtU=H`plj}{5KMw7L2C*t*P*OFIs4{=AX+ulU{t~hfASg*deWjmJlF#bGG-}e3F*y zDnigbZU20&A1gi@hRMLsgeD;&!XXvS)5RL?irvg^&F!!ZP0Jz^+zWN2q5pRkAAU$qt_$rH*@nI6O8GK z8>z?I@fm!PQA1oh;a`lE=*`clO!bpBDJ$+fBS+Rhyd-u)8!N51V!{2BrEv<{XxZKH zx9Vq+7h9Fv?dM2$4CdT@yM5>$N=py;*>(0sXGJF$DZZ9IIc`0NV!7Ef-7o33&@)RO zyZw_XndK_=i+?9 z^I*t!11-+|bST!06&hEqR2XSP`Vk>h=}_)%gr{1LgT-TPWOU?DyvxIb*mbhZe-;{P z*Hl}f!sGbwZSFrK?<wVxM9JpcX=|g<=dswE<>o|}25sWmp?<#J6RcF;ARetVRt4p_Vd|)FQm-beuJlCdB zaC$hjT~fDg2s%<{%~k{s0!f{{rM;a;SkXo7j`PGqJOVvGBAe^*G`VXVd*r@1q+Ws@ ziOevK9L>cmyQA8BFszLK-jzg0HBfs4C<%$@U0_Vyz34Sjbly3;BwRS!3*#x)L8-p;Uip*b^G^ zR{c-T;+Xcz7*|nQDsLE6B6{#ag%Z(&5tR zl6Dsyx!6{n*i?Sn>9`w(4Kt2`zO^}JZ%s$NMqZ5!gQS`GUj+{r*)|>o$k)nPWle`7 z`)u#C73wYCR2>+3iP3(RnTn#3B0+ws1MQE|{!;rRdexGeRT3!!(kW~<9PL;w`<_Os zjz(Zp4hKB>c&!6*?Le-_)ZA4lG-fL{^U#y`m>6oJ!ofy=V1|6!Rs_~WN(8CkbzoYXf?#3orlI}#0UWN@zIx)}Iq17k;{&gu1xHq`$ZaP&MH zG^kF%cCXy)!B8j{uJwaLEiH9Yd%eZmdbh*gqm2v2zt~<+;6!!yFYnuet*C%@M;fQ4 zddel8`EoNZw<}?cQNAHQ#KDs{JfMA%@I56UAjEtyLfEQ^#6ino7Ko=)qx0lQH#kxU1|t?G9R^YbvJI9S z?q7JoN!qw6*0jvOdU@ysgnM?OO0SU8WZN%0R^!Aj!*0-btySadiZ-|BbvxSC-~aJ8 zy2|xP^9`hS1ucjMzmFE596$u3p4NM`7^>!`P`phWj00o)yY&ps{b0tDT3Mk))3k|J zTX{pxw3eos5m|8}8eaa#apKI5X7?nNZ1V z)b0ISY@yljhibJ=#lxDK%eE`GNrA8YJj>s59{7F3-!sIYkr~9~2(=k{r0vD4AUVbS zh~8u?=Jm-u)ImSGQxIa-G3(5R>D$6PY2*)_fY~_*8n_~)zonVevQzjjRyoT**$#2aka7%u7!pVRJ7jKPE z`ee>*h2D2GNT0h4k?cm=QSxaXNxV;Z?PPhb8c)#_nP17&Azn3Ii^)-9k)&zoVyO(x zfFd-j;$yGhh?CgR$0~n7Ui~D<5%okUE7IK@e5XLrR~?q#O3t-3=g&$cLr;^zoWC16 zm|6&AZfXa{Xu2M?h4To{Juv!7q-GDEd7el5+U%=awIQpcHycn!%d&%xN}UjMT4Pg;-3>Ai%o`$SQ0;y{v!i zN)-?r@*oOUwJwEv4!YRY!mH}iDJzqwPMBCpg_cFTVD5~_4e$~R4s!0}9lr_pL}^t_ zd!~o&d*1u``^0XmX_ntM1+1BdHZ#e!^LN-r`#D`7{mPxa2p7b?ZqUZC&n$corRcCy zT=uNmuNF{Zh^{yYwylq8M%_zk6(ty9yL394?@FUP842_^g3eEsRNQtr_{V$PUzSa2 ze~;*LN@4}Jme--CY;QtqKstnRHF;}?c5`UyQ<(X*n|9M5$;DKsp2xe7kYCaHAPDfc ziDCBscazwQp6Z(MHtgZ3D4ONRfs44ZjjH+RclE%|X`((-=NP5kJzGj+=J*fFs{eUqkxmXZ z?Yc#5uuDMUliW$Z?i7@xYm)-&blPQYa#4vyUwiQkk`?AVUX<6$M2kqkO7TVB0J74` zm^tWnY}A&Rf_d~OJAT%I-uc`5r$F^LIB|}u>WmyA1>YxB1H-rd)nZhUHX9wEqDk+w z1;`-Lmd8XA@|PliZ@TjRRjw%Ff={@X;)g)gQ=lUSKW)TgphJ1$4-@tRj>*|@V%(7Z zR@qcXbs+h;_)};$=%mEi2y&pW{vG@sD`Rh4OXn}V1>(~*X4WwoEFxCbF0Bs<6N)-EtpAG%vRE#{xu4bZSXB zn@&-+a;!Dt-8f@)PB_$-p$h-rRCZTtCyoUgk^8$!ni$~S*r3lspMTD2zu0wp{~;TT z+CDg|$Zl&M`ER|W25%3gk|Epm**>y&szjbR$Qf^Jr@j_wVTHd-{T&ZQ{5mb4%hZqq?o8))c-{GPOuGii=9^hTFN!GZ}hEj#V8jdcZ@aZ1J@iZ1G;zeBG zAy|RGP!{%lQTNE(rmeEf*WNZimF78x(?_Uj&(PmWf%rgNyd1bZrkEN|+x%~Rj|=Z2@rf0Or=P7VsXj%=sZ4X?Is-Tx%1NfoHiGC0@k@7qYkX_U2N z%ekswM|mAC`580+X;awkPZ8M`<7@J{ua&1Q$UrqN<&;-xR-EM28*(Rs2 z&wI@vGhQxkSS9IB2T@X2L}7E@b%OoIL=WgOG9xfct@G&&$Ru$Bd2ou)KiSQL4d##o^9BZd z?Ds)%xzv*L?i@?S$@>yrf$W5*`@(Mv@CjOFt4ZZ<%Z@T^Rjd=ki`r6V6yD6zt_*`^ ztF?DBDsV;jnaf#C>Ay|dg}~~epZ*IfarB7V&=HX>R;=OBF1Dd+BR3rhOxGyNQRn%8 zay0fJ**&Y=vlv8|TH6sTOc}RYUXj zODDoZ#hZVY6$${T!W-r9Yj+8s22K+6?hQ*{nBNEGyB7}%yvs|y-_XV^_k5$iu;`PC zA_E3hy+wUcyG0attS|*^R88>%gc$2)=7_P3U2PXHc?v@l@#l;O%%C9SKQF<$IyMxj zJ}+%L7m)WP)%EO@#Em#S!42JANjR2~Le`N}=Gej(&A}jEkEkwqOmMRg+ba-nN77S< zw;ra#)1ZR?T`^{%-A9xHN5G!U&Y+CorH}lVfz&>N>HN1hHV4R$CLzjAjdi>#+1-Mr zJ-Fl8Z{D9LzeMwYv@Rj}($dXDGe-)>dfkdN;Nu(rKcdjHuKFKt8- z(L`k+!IHs`$y`rcnT%7XF5fYDw!hqZwM`+~C1NNiQm3(Q%ON8xnvA%Z-^9(5lXv{A zLtj}=PbG_eSmlA~$EuJfIJ={Ep^fm!1S*Nxo6`faV4wBH%rktS|F8gpDC2ag;ds%| zWtqsDU zOAN9)kFR}yVyQh;22&$z4HAlU1aaunkvwi+Ac$AiqW4}1sHtgWF^I+Bn^QyCiDL52QK#fL$u zRDcu0Tx*cK=zX)AWQzi?0RVbLUfZtQmaYh-vxJO5@X#R!wMQSRBB;wtey@K0V9>7q z-KrEb5GWpK1y(x-3l_Jb9<0u7&&*B@e)Q?5iXlyderH%Oa3-nEcut zA5!{ug7NzDltInoOU$2vFGX{$#jTwGJOX^Fw-QKkj}(AlKPr`}kYDziMTx_kzVwyv zan(!}@>5ZtQcVnV>Hq?6h^jYP{PIxRU@f2D zc-e*Oo`&GNEuh3uUF31)uRfe>F!Pbr9QMqibvySyEH{&HV0cPWTg&hArIq+jQ`sBqmu-ASXtgVGqEo*xHpwQknfy-Ym~-AK^)Zyj zb#Z_kW&*D(dsd)fxUBt4{37iu4EYXHXds3&ZIRDq^@RB(8=6KO z$BJ!1V=+uG=8+6)|0c|wmh&(UM$dfqG;%hm203D-za$iC;xS|B^ET>febC?C$0 zk)#^pUY9V;6j9CMJJY#b+}}U38XscaK40-x|Ith|jpMFRrV*$s`*lu4AkB}a=R*#} zXzKIxcT#!#AuXXEFR($Pg+GfwJiL%X*hDFQhxGVH zd@|K}4=xir%r$Cz9v&XrN}F%Gz{;hb?*r!#e8_0-1%){>euuomZM zN_%gv-{ zVQ+(On)9!*8|d+FLak$D3c0Z4WP#oZLVaVFTwL<0x+lC#jIgP|Ry~E1`4%8a69ZyL zkPB1@eC?c|b3QlWU}gt121iA@oOXdMiz>&($;eqC;|OSE8WE9>Y*bkBzS@_HuE4@% z{w^UOj2rq>HPqp`N@z#AQiQk_O%L;s-woJ$=E&~S#lJU<=xZB0SzTg2N`ks}z6;Bj z)qZ&)hKVbpmUp?}kDK*rYVdIDR-;HuLN1=Q!XvSXgPuS=zV(U)cjV!y!#!Azp~gy(ONPFv_ZsK=OYdUi3nr#$@-I_L2o47R&;VZeCkGmN zF&zjw1K>HwwBgXDi$qW8_=7J9Rw+090*%a!#8Qf_CA4n{;)zT}+KN zZHf>GM5f@;+nygY)-JnGfZ|B{K@)4t7D5w4%^ z&(_fEqY7r9<(4PqzqBf4kBd@zm(w+$SOWKxzM zm`aV#Ugd9)oS34)?$=Fp9Q^XpC?dHjp7Krx*p4c6^ZNvfMSYWp2=0B_c~Tfb=@smL)sgiqj)a<`MQ>LF5|dVgbm1wqwUsb z>J^UEy%8#*Gpw=y-i`TBd%Nrq|A2`8b4H?_NY33%7JvHB=M`LPj#>1dy-fIMEq|%t zZj!)}x5H}=1zYTI7uu->jv*}OnPs0%11ZgG3pY*X?{+c0(-mpmUkZCK5;uhfQqKS{ zT&~0yibuUHO&$;c%@W@Lmor_%QP+WEWoA_(h-SZuqtbL>O4H-1uD z@%qo+(K|j!l7~n4%aZYRRTx*7WxMcE`bOCH5bE5##3b(Va=%dE ziCV*_=V~ANhP;cC=Al*vg?23)7G*qyY)23#zpQwE(XXE`6<4WO0xKcvQ~2%q0a7oq zd~09Z&QvQiZMC40rgZs$op`zXv4#0yLAi{QI=Fi>@cTA9$fREtLNiMO&BU#S6@Zw9 z9!~qE(k7k!Z3Pc>QGA)2urzPz5FlH@{;P>XIU110XS4&wTkrhQCCAreLiJ1#r@`vQBylL>0_MUZ8MNkcz_ z2zFGusV~Vd@T3KjgUkoLm;}!7ckSq1uls>HJ+S(2pJ|LHBS6wgOW%h3hg+V$pc1Nw z)c1RBhIsWJ5G#*}=`FAA66|6$gIm?JL119a>&L;KfV#*ku&Pn3uOqkGcy-Vd%rcmd z`qoZ!7^8{P=C+?KuL5xe^L0(Cai;%hl*ef1ihY0l__6#x3%p^YFzzzeJ9h8kG=0xhuk=!2 zm+-27w!&yfcYleQ5GJ8Mqw?yN#RK-l68vn5sYm)AD?laF6|F<%jrd2Sop%qb0$KlW ztN|oi4|%l7IA%>1P{ch-;hppy8K{v`g`rU_1{q5l1ua5~w^u$kW=cIFp>L}zI1&2pG0#X2)$g7;S|2u+4h?vl#os)@A!*rp1oTm8){X_2y zJU)(<;X+M|#TbF`iBz{m6o2W9V1`fI@6W2U8EwA03 zZh`qkhJ{EvGup9#qs-)ULe;yp@mkLKitjj-K(=X`mfxvLY?B{FN7iEM>{E_$k~JP?qpd)mrQ){z%>x z6|dj(ZrmViYTTC8w^TMIu08iyqqqrustgRGWmgS({@6z1#s%Ny2%j=tZ-2;WI5KX> zMTGGJB|V|k=-h;%`s#D7Q-^3}dPU*-s4u6+=FXc$);gb{3Uv3ztY-`Hjq(b;Y5>my zP_v88g6i`shXWC0q7I0^WP+a6nkT%vfBrt44(LhNvt~m{^5vGPs^J}L#p%8AhDQIc z468(sw?WxCL=<2PUm^CEM$a|lbeH2&a5wuKJWk`tqrY5bF+5hnbQL2<&jt!!`#hJL z+X7$*!BZ1pA@U6sDySp~b)aSxBT}|dI$sQ;l;3PDi!pZ6a!&2VH58kyoGWuM@rG`6 zlMTH#Y{NuxjZVmVfH*wE(7uMUcav-#=yoMzdCD|!7$P^Y-Rup{_7$-4v?^|o{v>Ka zEAG$V(GZ&E9KA01q8&dc&ilFY1K5DdQhB~UZG0yPg1=Pr0}+Yb;}ozC-ZFbXy3{t- z$LToB4zr&a5CZ`fgKrsr;gh*5!f$AbkmZFI!!vPM;$<)IQp{Hzoc^67vsmOX+FBH^ z(+jR8$w0fto)-Sx)f1rj---5;h9B#&=WCRt9!WH5gGuAwwB}g0$s+Hl*r-D(<-mqC z?$lRR=R1kJxsy(u6vIiGpaKqrsP`TQ>_owdU4 zQUdDqR=j_R&&tPccT)`h4}sUG=1>Pvj*Y%6rES~}rZv?atPlqA5X(fj7}E^l!t zoocgoo%2?=SUiqk2m00Xy_0E^(*Srq7F#d=A?Eu|6RQa}draEn7uf1LS9UB~ltM%`N7ZA}4Y;p0IL37_LVI6g+>c39q*Ix#v`%uR`r$^iU`l->RA{z| z|H-dfdDZb4;ug!(H1#4Fl^g&mJG(w<i4>3n|8rw7NfaU#Bg<$@aug-1fT%xaxa?}VVFx$EwM3v(r#PC&XJq406jrB?!f za?X1b!gGVhK@m^@)CDC*RQO9~RwlSTHw)J~%QF_CrqHoLBItHD+WS~3uW&THy_au>ODihng(|3hzl)?Zas)vRb^W9f}f z638PeW_4u|!rZeC^|2p#X*0vk2}UEPIKh{8a9RH|AtLKC_0@N024xA!W~vDU|6rUO z&SWyL`;Ac|gONOM;H-L1qiP!$Y&aJy7^<$5%c-5U!qk%~8Q|44N z&zh^q8cOFRci`@6kM2o~ICh=k(`R_Wi~syS{tI!m>&y7ymM3++F9 z!b!KU4e1Z#tY#>E$dnJby?!PMTvly*4hV#fT<+#p(zJEQHnsbAxlD&UOc#AUr&C3L z{*jVz%vYK0J_nUSmRhpIP!~#jJMnC4~2R+aM1jX@sZ#>!|gwBCMo{ zwDpyGMm;1VF`~Uj{LrF1e%tde^X&F(CpIE$^9!%uqnAA+#VRtC>fiRzc%GSBG?DW< zO4EO@Hc(g+NDk&t2>5DGqe$iZo5bOza`sTFy!!RkM&@A2Ys)ctKsjMb%s8WfEyYoZ zea-{UWqz9(*K;i9Z)eRF*3)B?x|b71kxuY))PY%9|4664r77a}csN~WvkjHA**#Ex z{iWyIxYg->MDN!yISt2Xd99Go0vmamWIahc4@sd_qW>JbsDcogB%p;qss2rEV5L7i z;XQ3##pwBR+io_SFID8nLlGYF-m^$t^x9A&HfMCyBz{%SbS3hIDTq?v0Asa?PyvfX zf-#;F(Da!qivg$B(~D-H>DldRk zxy_wto~lc5F@S*o#s+uz`0;%b!9?e3UH_3veHKD$eownTsgi4nP*}+E!-gNM!(sCK znch^*E_{f^S%(3E*}48*VEJ;p0#>QIQCw>8l-7RioQGK@4JY5%o|ZScE{CnrhGMi; z@3@Q1!rxcFEeu~L%GS(~JA)SY0kf7HYI!P*;cMsFB^aQ(_XqwXm0N`5Ziyc}&lH)@*t32L7k^3n>U z(LzG+05hod!T7=ZD=hT*2%Gob5yfW^96zWk% zv3#!TLd7yodcD~HTijS10Us-3#!7u*C!I_pHifCGk@Qgfcw@5vF9*JK^1TcGq|6Fa z(iA)LB+efGyx%0w74zQ7kpVv?Ta%_;(dNp^dcUk|Rf=oB(Zfyip@@2TyO=NlV0lDh zz)+ag5m~}k*F&fG|Il=nQBl5M7ghu*rAs;l6o&4S8bVT~hHjDW?jBk|N-064aX`8| zrMr>t4(a#i_h0WPKe!eySaZIkv}x>c;Z)w*anh#_ah*zU$@0;cu(_ z`XHXLi)Bqu1tQf(bhmgG(bjLkGugeD6bPf91g6xrSQ$#t>HJ_e`ykaOzbr%LJ$BjL z%Yhu*bUlASF0PtI>!b25gm7OqXtEM1Q@81Qzb>8nr?4B+)8@7>8co-a$`#R!ooE$Q zD6QNAUcQM~$KS)$c9QH~alxUS4pK`1JICegWZZ?2f^E`jnj z2L8F#(-*bpu?lZ)&~{{I1DrKrYxRFa$l`kOh}^}6L$GJ?p(dM89%iP+qBF*X@4{Hm z3||(Z45V=7s=qcHOfRNZDq}GM{Sz7lM48nTML3<>KSC|N!B^?N%I_vm}_VT$G84)}jT;+c!X>!dCxf-AvZtcRK$+cz({apdWQv(<;PsNIZpb zR2*UE*o5I`O%vd#z_=u`3^(G)Qd8!!v0}^3CqQg6k1lJmj#pbqwNkzO`~)dmdb;=k zG%$o^n|6y&B)>#|DMkI8&iI4oTUK^Gqe{R;s)>L0T@gvoiYA;m-^L8@ytd7ByJ~CA z>)nazO6F|YEYTZyR~gNcqSP}hyn#!Wg;^lgu7&)Ht+XhcXK)-&KjfTIRcb=1*Xq_h zM>d9q29oe4xP9u;^9JaF1}Kk19G-d>hR48YKGVVT6IC zZMCDrRA0I11n(E0S|*C0ZZ%s@W=pfeLwNexTN>3MQX&Ccu zJR>_8A@<(lB+jPxXt@aQWWu=;?gkQ*RHNw~8-C(7ZIPp~8|sp_O2eLpZt^HB>wdvh zS9gwJLd2u5d)S0tg}f4;_?xo>mKw^P93zy5;U_R^Rkbzm%pfUjoa?&zw~T44=W?4D zpwM8R5zVrGy+dbz~*xW8~;3%%Qts#ciV&tB=I+uX9p}ns%0JUFr}PJ=U(LRysb@5~5enJz6*7RFej+2qr}>NatM0QM{t z9{t+-_4tf|4`jWeNxeP-@!fMoP5EgrE%wHUs;-lJwm)ypIAl<*FXBw5w3{>BtM-x& z9_usN|Mb8@X7U9!?G$~QI9q#w2A?mTGh{1r*!*N5Vhxv08n$Bit#bK@{24RbeD%(2 z9V}r%m)%#hmZ!%*p0&&Y9d!RF#$yvI!|wtz%+i4*_ZuEMGMx&0uXslvW{o0?w%Wgd z=T>f1HFa2d02*QH0QZ)e{CfVMn?y^(XMUS0%amjE-7r*!wTHbS zqE*ptpm%7bGF0OKi>%DO`2-ji_K!k8jW!JZ29S^fFl$t_f*@42Ycep$7Tf*vN1?Jo zHM9q4{7`yu_!RQ~-(lF<=fjD0fUlS%$8(Wl7H+@PM%JunI{uvaBC6MnE&J%$Gd( zlkEtec|lrX*^=(EpSrgzRB8okg&5OY-#@sx9g$+cWt9$53VOtUe|PasEr#;jSh;x* z2Vf-NO&jJl!dRZ|6_!JBI26RthmdT=m>Ghm!;NJm(kX%v@BWm9HvdUE=O~fEjy9$I zblQ#PABMkYR9jXS7x{yX%v|{K!A8dHucWDJhs)bGY|B-Vd0PSgyMK6H$2PZDF8;^1 zW}8ibiM{vF++Slbys?FW-A1Q^B!+9F)NzA$x*~ohBLyc0IZW(R&bp7*k82Q=uW&Z< zpn((T3!?ThPa_(Q@cL4YgITg~3F5bVu|#1;EKoBhSK6#xsQXrtQ-{!`iTUK%!b*pk zkc|h&HWA0vGTIJ@&I!)-j$ULyL=UHV*%}Xr5-+7_8y^_9=k2>FqANsTi=KajiOFf` zAT|s;2KHE4SmnxA%0-S=qk>EyMUOduzdb{n*#!I7fdIuU9)jQ{T8#@5MGZSD!FIac`~>^Up6qo2FfH=GmDCAl z$q3wiCyL05z-~9i34G8;qfJ{(PM%|CJY;Sq?$Kk-dAE6|g+pEouQ%d&Mo|b=s#l6$ z?{djRx(|t?jvsf$a2R)fD$e?f9J-HYHObuBXT{$HO$=Kl4Cp%<5E``bpCiDY;X|&g zJ~wP3nDiMtz7@T{R30^daS`gQ4i}gY(0~zykg!fDe&v3Bo_nIpI03#G=~8#5Suo=S zqlOW|hg2u-@UQlJKbF~n3#y@xPOO6`$OmcianXg*Ktd$uc7q3}sVoCzJq@9d*83Yx zhuCw!CU+FoAQie8`R4IHcutfV$ezhR%pfS@kJ$d$))lL!n~ujyOBF|t46b?#5v$2?n6!0 zvpweWRg6DF0(4@rHUmS%s;x&eFHq_)hsi(omto?W`}2uz3p~qt#a1;Kb$x*y;5W%^ z@SJwGYVmHb_RZxgp1ZV4!S;lra(#ruHsmmT)jt|pLdIZK_P{2zG?;`Ne&JXu{7+;G zSz=a;#>vkBtF^`{?JbF!$K})aegiEtlg2S3KZU*!imEi)G@Pk-X?;}ZeD4>QjQsVd zO9Dog>N01AeiKW(_^JUO%0U2Qn*07?7JiYlHf8PyTLvAUPE$ELHD9zh0B=jhk2Y=4 zfitlMSQmlT)A7s?J>g@eV>z=%0@~K3nw&>iD&H8`G**uPG9u$9>AIviu+au=0nG|T3DVTstQs_U7qvzU2RC)T9NH&?G= zS~A`pf`7~r2CpZ!ig+Y<*IhmlgU#UAR}oK_Vo97sJ7wl{e_#uBX4Prptoj#Q^V-YrU~r^p#2It331#8uKtGnDDyQ1rIG zwN$1(TY%o+cQf;^7uB#=xfLq>VZN89QCihQ!i+ZK1+p%t{Wq-@^DfMI-wQcem#=Od z9Vr7n89Y!f<{p7`q(*XHr0t#<2~dBqItp2DvTG*r z9v#fTB_2Oze~rFT!C-h;`SoOz@)7Yr;og<=K?d2LfAmNChW zVs5d_sP<@;WtCyBG_?MvMppMWq|Az!q10L%R=1(7UaU9NPsxPc5GUe3hH7};)a?p5 zQ)=q!TE9``lK{p+ZMtm6q90f_vgW_DQs+fGEg#IM0rJZjxW zjwBoBkrn_bsCz3k+n5xZmb}s-_~jwK7IijrMUm(Z{&s6Lm-!3viD@<10lY2$Z4yIN zA0o2ZqQ&>aW)dWY-{M!GaDqYpm1kx?!6+I(ucdWq4Qy+P-{$Bc;8KiB9a+Rxe#8+i zjhE|W%9RhLn)*b?wRD~QqF{a42sF+!TYVg0{l|EPB`63*e~kVrTEaO-_!v$q>Qi6( z4{;cNB{|zEBFTwarjkM?_9Z%IfR@Dt8?HCm`Ha!7*~$B*U4gAQJ5^i#pO*_A-s9%E zY3pcS!;JKTl~P>E7}uLMH_8tm_rVgG`b~J#Q8|t9H-%&Ue73P0K|M)cSEr=Y96vq~ z5x=g6~4q>EKI+@wk`{3Ncu zcj-0GD-ZEpI zs}b8&mO(P4WjBIW)Z=q(RNF2Zxhr>jT5!=&qLcc+t5UZL+zB1k6q2YPF4mbhPu6p3 z-hNqxyw;m?oM35aU*cxMtQ-FdJrP}zZ78l^o5_0ql+yvU@AWX)KEDV43d@*pGuga$ z-6Ej*gUT9R!K&P!CEblC;+#O5n6SK=X$?F)n^q}aK(H>@O#D;JDK49XUnch1!lTD_ zyUY2AaoMauXw*4DHgy`yxVLc8`_a@6vLu3%jF7>!ZZ|~%yOk%+b1Nsw!d=qt6bz=5 z$8Ff~HU-QeVgHV{v7~hmtI!-+6*bFOU-%cpeuAuzXuS$6CfxP=O>aM?hr<+zeJgLM zll{fEfc49@T_48%lsP_>+yTykiR1SMY!sa)vZ$x+0AxRSa%!Z{B=jZ62S z7I8QWo`~{L2k!<8u7s=&fH27gcA{(%vRlITZP2Ob+J4=0>I@%pnqPrcmVn*KCbXPU z@oV`w*@WXfR-&h(dN56*qYy+m0M55<&@;b>1?UCF-yA}NU(J_KVH)Yem1UyIx6oAL zaSv!I&@P--grjplu;JKcS=peL7F}z(gA><$OX^~+r8XJ6;vl#_3Px~Hvxc})kkbS} z?~NQDdOf$^EnG|b`L0Cqk@kg2iR7)2*;I=c-R&BuPJW|hKf^tm`EE`bfWqrjt-3#c zE6c|l4PZt+5i|6qdfH8r&5`a*GaN**DeH;a%68r@XSR61x{MFP!$sfvAiB@TrKiq96+%Rw~{MpfguB#d#i1FsX@#f7pY%BP|cHU^9%yNDsR9GvR1!_R# zh;VE^08GMLB3(wdg^ttx08BX(VQaA?n>(J^=A`ZkjUgtBr9w9D7hlc_)EUV}hJsU@ zcc*#Oaa9J>*UetgunGRAVnP!4Me5Ii;W#anNV6b%2w@G>*HGDjR zZ32IVX1(mk2y%NRG6j0j*GK8{SX6qZENz+MsyW(^j|`U3Ol*Vxhz-Y2kBlS=7cp(= zfXJcq!T@hx?GrcLI=jCT^WTg98G5RxwMD#c3i?s|4|CqOYTjiue9S-%QvxcQdTr`L zYY&p7DrC>>Q}kLbmEkMqwLUHY?80xhK4>_vKKDwO>Fj4VQi~}oG3w^=mjWt?Tx;2C zmQ+Ng7T;(`@LuRVmeCS$1J*R8Ei%x2=d~kO)~UlJf((UVsMPVEJ>E=W*SlQ9D8}|% zlR)#B!&)wZS(f7%Z$Y0Ugp(t=qV^c}d)m{KU}#cKSOE>L3+^&{ZfV6rsh=|xUoCI^ zOL+Iu3kwA4;voxV2lrOepFO5KGhIZ-uOen!kSxa(Q#k3boOkXU zy{~O<_wSHOnTU9+bya1fOC=dGLI_WwocE9w9#dp@rm@A z%>NX^N9tHW;MJfxNYI#wb(gfD|8f!QZ~&ug9g)){(Wb!+<1N(CFOJg{4PfDxEiUiL zsQC0g>S}h8$+Rk7v1}BY^Odv1$OClH-(3^;A?i{9rmVs73LO2gvTXz-@w4C?m& z?Af$E)jQA@Ka$r%u3TK08^AfLS}ZHhLE(SIIQZ3+3f1$pJ9>QdQKkWrgX|;BLJ(Sk zECmNo(nWXcX_o^Ze!T5?fx(40%;dc3ejJ}7ya8J|r}y=eVle3-I^N^C`xJ;3T?Xu> zYwzpn9ovBR5O;Jo|97DJ7l&^txDC9P6+EBt4kZXoJq$NC`p95~?R|-WoImpqfV);0 zENy%+-o{euu}eZwnH`*Zr&(+Cn|!ngepco@w>oT3Ol7bX3{fFJc!I^>Jk2^u@i7oG zo6cjII4iWaDA@9tHjutd!XBi9GAr{8jAv|AJp)?s+1dh^sb$4Hy@2>U_Fz4AsH6_? zGw3yGm@?_>4J8xXoO)I^feIi52J7rc9=bDG~qp|kB0hT+!oe8SRsADnh3FyfpI6@60jk`{0G zU1tR3iu{Nt`;+v2wt5oZCLf^Z9W~!Fue(3Q+1EcU>)}8}0akqmAF-*FWNW=7d=gpQ zG?ldnI8@uwLi_KMC3993tU2`V?Es(9JpOD&j{?{DNP=}9l&@j+*X^6Qy1x0EG!d zlcZKNicEtZGc1rrP`07{ifghs0zOnJcedZ~mdM%qgX@B3j)4dZA?hr?vhe$DxU1*+ zYQ(%M$uYSv+7#rlw~$5Sd!9WR!6Qo5c;=-H~cc^Lb$(Om2pY&WH{~h>#KM%X*A)p zFZ1xuX8&r`!9Qp5u}B;r@Nkg6rx&-(=+?45J8V_0nt!EN{J?-xfEVR3%+`K7A?LcPO+hhwa1Aw>RZtfL~|bQ9PSdsdGLppTG?kgN6bNwF(Yapz$v_W zCHm50^ND?jp(Hy%Uu>H!Ps^v0Sxnk>Su!_ch(edIzf%8Ls5w*;bkA_2k@Uvz>3`H<#fa8r-OtLT)}Esu_5BIZNBv<&z?xVaWQxJu0w zLZr-KAzjO@05Zq5V%_Pqmn}LLricGp*(y0x=eo8`tU9s-1t_fN@Z|T{Fn%+~WXWm@ zqOtF6p&c$&ZM3N%D_GM|(WH15&@8?eKb&L~X)nYcLV~Nr*OUhgJf!?~nb+Jq2jG;e z@OQ>)p zYXq)N1Gc9ETMMF@4Mz5otZ89F#{^?n<%JGQ@v%9#$UJ#kM|m)WZ} z_Nej8v_0Lla96)5Om=Pd>$jLfFjaWf=vK$;71Rh_N3dB0EfP8Z`}<9G^e3T3_h&iW z`JZ~~)BLi44)=>FZcwx-`utL9U#{;9W~`rLcaU@4H$GHt9aXq9(FEr~ZJ}nn#A92S z2EO?1l2;9$YOYRqq{_^3;#)!Gsct=k(`MMA*R+wjlKUi}5Jq4=VyB6lU2HmH^% zlz2lBHNpwFl}9M#dJn+WFTdjUWzi;(1;2$K$#%q5b!z*OG!*Rvda?b%f7Y9*wH(=$aC`VtOKIW&0(#8fl6dMoik-u3+aook>dZg2~>Pu;jS4%wj z%dwew-TSXn9%Jy$*-7@YwH=jlUj+=M@mM{!cJ#ejp{mFtN_7N-4U-jHT0xsj^+VSC z+zDO^Xv4W8PnF^BPFQSbZ1sl{#Zw=^j8FxRwAmG+hkoPB63%Wt*oNN+{Dmq%D0cjn zS_3M6MsNOrfULF#o7)R)nzw>j6Uh%*OKSqeE7*q*us~WU8Ukb8W$NiIKp+&(3VjEd zFBd>UW+32=_t&bj*@Im;=K2#?^1crhs^725_ipR`HPFJj2~Mm|p<~un^Xm|&AxecI z$L`NGGaM+b9fankPM9Ev3RbM z6SAQ`Q-Bzp%|PVc2DM!6Q74&mDBgKn799jWA~34c{ZdehJem2&3VX>EvC#fAa$kvt1$T{DAvZ_g!ZT-si z9qwcM#Vj#$RrW4R)n$&Eh#7JB?h^RpNr>}_EG3&TJX~kYvv-fH-p<=e<_H(&#m&DA zU>vyGuY*H^N)k8763X~(;&*-2XoMcw5HZ)Nl~wcK-q=*7ebCTVlj$sf%6Xn7ck`y@ zJhztw6TMq~*#+&5wV;`DkPK#kpEa|)yDl}5Vhy5T!?4b98p~O262EF}vI^YrpYJ0dM*;-Z_ zQo9l-A7nvZ3adRd`TG3o2gWn5s2JL=3qtrBfa$0AbnTjG%7ePQ_NcKq!sg}-jeU&^ z7o4Oj5vk!Ujm}u=ZZh4KTlxI3x4*3^Y~;8S%3B?Wad*0S14 z{~B?+?V^&aqz1?ZE}n%@bw5hXgo%SH!MZ6hUAIT^F zv*u*HQf@v<0J*87fQ0=tnx_)lu=?H4yT3{9&))tmzZOUaN}d)Q18EgPzdt?MNXgA- z$?}y|gG6o6O|>*v?#R&Y;W&a?r*9r`$<1gET1^_J(+P>5UJ7{6w!43qUX^WTHG(Ud7$;g)X&3#Ytm5{y@A0l0YTUz25 zDP|b0;4Y%AoG9$jf#>}W3?XYW!+Y}J5Gmp@(w>D<=vh3c6o#c#o&>GEnLjVw|7Tmf z#gOu7tB`1-n7UeCS{99iT+hqrxQB87N9rQt<4RX$@=UknYS`Y`dtX7+$GIdc3n$ntXuSD zZUNXvhXiWcLwYmS2XA@FWDZy_KN0+qOuYroYcKJ2Gg~Dx!>rStu~z&&gL9`P@Tx0`?iuzzt$;#bTIRc zGu8=8JP9(U>-mDG?Y#K_tjg=sVUOb*BV;;@{0Nzw_i+r%ol^{jUGw8IYcqM^ruaQ5 zeDbH-yKVz|B&Poh|2{ydbHoiRf{{9L#QP4Ur#pliGNY*+x@l&PE~EGs4(!9#6q6A}#b;13b7esx|dow(|$w!{F7xw%Dni2QFa{um=tDq|ovhqzf zpm5X^%J{`*IpqIm0d$N^IgJz(L;l&v@N8;;@9lvplhgwk|Ce4Ys%q>JO;zb1)wUC7 zPtx0UDfu@hv~8U)@2>Dy%)ZwG5n4mr=?`XH4RN$|fYf9%nV77FV_3N)q@7E%&l`vs zgf5@op89+QX8M8p{o<;HGE-*Du1frPYeCD2?njVKBSX6~EKF~3x!J51;-_zYGhO~7 z{Xc6*0%#uZD6`8_UC`jh@wd!}yktw15>NU5`29h-IS*KjEO4KL+sECvaXnA#xQ#AC z&R=tR=krw(6uvhMXSt$k1(qfGU+ldi0A-W~?4|`{$sB5B*@g+m92Y?Tm$K7liI^*( zz&&&vRO|U2#=@C2yPSHlA*vBB=KdR^REUF_O(w;!LnE!i+ruBuUuZ(99*=7o)8Ika z#~P4RhAHRGaq~fA->{6CphuyYW5SE!k$r6c`)14RHsc-~P*yNS>b3RZH%X@lE%th< znDApZmc#Y>a17cf_>Ah>eVXd(Mk489fjG}*i(-Dk{_^M~g?S-!C{1SDGDe8r|FNfr zJ9h|=GaR<#pEs_z>W|I)LaZ&E$#7#aV^TyLxNc1;?C-a zOzO_%eju0(12|Y$UzmqcPkMjdsIvM_9-CC#S#82R(A^$}FS3y$d|b>qSk;2AiiKo8+;QplI1?z8*#7s+iDo*GR^{q+2F(dwQIgP5TwJ^t zD^uX%q%-b{jv|Y(@+ONVKJRn`1U-Y2)cVhp>nzhZ-Reg(ZdulHkmu4So^#d1;oY3H zv!|phGr)6AcT6j4BaB8(YhW!*nLye4Q89BoVt&zTuKx5k54SzT3F;C5c`>KP& zct|Ey_tGNheska(vQz5n?msdfQe+(OcoaL&j(;$cgjMB0~QouLl7 zEBg=gcYXIPklILMQ4H*BXuKn8t5QIbi=wU2}8)_;=V2#18? z?!Br-n?D-m-hZ25A<9BAw9pv%yUDMV+n+$Pf1=!u0^C-;JJvbQ>mbw-)D2bwn(d~4 zA|U@FW>KY1!mdNzR&1>VI7Fj2pr3yAYtvo`*H)Wyg!48|dG#2*oX^(+p(!#lGo}^l zsjg1kYYIo!(~2;DKpy2f_nLLZ&1S?a=81%{E4^v}#ux3CQi@bg({OtqR*5}0Y(NPf z3m>Bc(GeMF(C&<|arBq|onh__X?K8hfOV-z&#g?QLU<2T@vTK^8>5AR{}z3o*^g$Q z*=o=_>u6bY{WeSxH;42D?x{JKF-_k4Y1`81BqLcE@ z35}5`EXuy7@0HPg(6TA@!}mM2{}>u*$5LGV9?C=~;@Qf*|I`Qa>?`*)PR-46f2{jS zfl6TiZ&~V4WZ;qP^#COs2j3i%g&5M5k$)3vYzC2cHP*3U~utEP6e zI2&BC`^VJ|ZE6NwbqdwjKBlXyf6S1-KbAMtqq>+=+1lchj#(-*%pjC`amJIbNP9 zbqU@}QN+?7Y{gLK#8Rg&Xe)#}%W}b*~trQQIBK1Oa&>W-4bAu036~r7Tz_}Zj zA$~?!o35dxaYSA}g7`ncJeB75jsM+(_6H_dJbvuJgfNXQ!Xikxg_8<4@Rdj2wRQy+ zQ>UA+a{A|W!lUgo(EF=)ysTZP_^J}^>2DP!0VzYv1rQ=Fl#U`5uvSH8GwLviw4&#o z4g3vUfs_&Y>0(4NjspTRh7o!&|8W8Lnmp)_P1tzQI2#N$>pZDT2x(|B!k&pkV>A(} zqOTpL5VazXDmF1RFZ)bUgl}B;7n8MGZ1(8`ya6Rb`d{=O zTqp%>OM%2E&a=J|I9;WA%vn!M@f=b|D-F>xCY+=)e>DDE#@u*-15q?3lA8-m=j5rGj^oMafR2t8Gzx&TsrSjXqL>XzEHADpfZt6yNa zJ%W7+a)!bL#{_il5y;`z&A3GB(Zi_CcrQBNX__pbK7P1|rS1Rgk-*QzmdsFpwGW03 zby_W#nfT&AwJfdHf1SJ7i9bsb43Hx^XE}mXM=civzzE(AOhxaYCps7s;u{Nb~14y=v!US0RKgn@cls8l+{;0;D$6=EM1^Dl6tIT!skWYv2AzO(`xnGB{+XdlOj z zTgyRRXW^wpjwn@92UGk8-Y;(XM*0%l_yauA(sJZr^d01QiHr94f$q&pLVK$w&nFt~ zZ;jqOkzY$r+Sd78;{9jbS-s5oB+8!aJt{9^l_erHGS`n7BF(si=x>1YroEZ)W}SI4 z5|Z;V@+dDUuR|{qJCAs`=x9tuS-`&A_?avC{V}O{m%&k@E{cXDm(mGWm z0PF3(*OCuv9k<_b#iZWOA7wj{r3Kusp+-Av13p^2i8F7vJHPYG2uo4XRLsKaFhr8n zm*_Q*G*b(hkj#C^4yh2MfznG_qm4J@j<5cBn#6{hEyV3N)P7TAYO==;5lFBx3kF6K zA;)VVC=3qV;$A)af{6tgZY-U}tM1FP-{XJrqfvnHjd1A05YsA``4)e7;KSr4L{}?Y z&#P1m96gFZ4sb7y22~X0uaXgk$B0xyZ|^SX2YlywoAcZI9A&9zk(V`>63&vR2k=Jw zRFEhHr&tbIwc0C*Itz&FK04@|mDG7(uq0l5c9CB+gz%k^Ly#hf#$mID8v|0ujZ+J# zrUlSqZ93hP8~2_#xNsL+z2%RR^@P?zQL_Rf5;pXIl1rBxg$c!Jt;^@JGV4Eo9pwf^ z<}9HHiJKMu1|CRL1msDCz=tq%UxS2~?=64Yd^|3D#c9(0x%|X{2^He}a~b-IWKt{R zb-&x)aTW`be0Er^L}@eG8jZvR8cD}Wa<@I~V@QX%?v%+41kw=hCPfy*x^Hp={qe$| zzSl&jQEMrR6X$&sThaUb4B!~yWyPJ6itn58g?;?3iKOszzPc@ZiN&zbdl)I=RY$4C zZggMFB5>$wsu$-;^$}bzvSHT4RiJUvb6c{@-rzt%X2S2SZ%nwpwQe>6zhRU$tk zX**_Km;LD>*QHN1BxUm5pdMnfT8(yy$BGY0u&LXtv~M$BfYA1KgO#`3(9oPaolCPr zVH}CA$h5048(K8kwLV)O0p+H^cqB=wlneK31xxe0ikF=P1W4cbCc95~PAa_qa1e_3FrCDrB3I51%uUQx zEheMVL+WYkk3#0lj&qVm${hYygzqN(ySr|f&@tZlE*ZLBLb2eQAz};q1C!OwGQ1oj zgoz787(fl)xyx0b53_0brC6p0TbxVrW;V2z@Qd^v!Sclx71&Z{-4ya0L#tmSBQ95Cxk%h2Srojj1r z(n^feWc0$fp1j<|?2AS$h43bp#1kLNRkEnj^2%R-4eO798qFy<%~sCShi=-@;uQNztk>4VZoq>j40zPul3hI=FvIrFStJP zPq4-d4&#w{=hnkDHb-w+bLpoesR9TEU=)FCT%yH%Kq>kjEhQydr2dgy;whe=n)2+F zFA$Bcd&v?Xi~d`BvgVpGTcpXgK^lZI0;OZdjXB`+HK3W=2-3`G(8qrG3d!wR18 z4ymd@dT9KblIxVhjkGU-Xs`)QblgT#ouI(Z11s!m|KO4U_a4u~S>j@1rH3*IWdU3C zW~lT$dAFwW5MlAWu%P;*!(YTTTG1ZkvQOgL5aQUvG5JaJh)|Ja>lJ;cVHLobluayG z38iK<0aLdt$lqPR$POZue|S3=VCa}N`GUI!e)EQS_K4d2-(xR4Ss`H&nOZ#o47tyD zX2@K&5Sj$hQG=%xw@mwK3RoBH2&?_jYtV>M%x-%$`uEM=fY5=$W%~SJJu(gJ7^6!< z3s<0<5@ox`Ao3Cf=UeWEp7}7v) zF}%Q?6J=ON&>^{mlnQ*otjpB_Z2-}PwsWYp6p)K$$^XRFm!EZ!ShNWG_A#8tg$~A& zu6Lgr1vjMEZWjoajZ#8(P5IcSWuM1jpgw1cwXaew)}0k|d0xbrc)OEF;i_> zfLoRt7bz!4s!TpyV6{LE<|-wFbcch`{Ht3^ODhzOXt|ZSf5f(tIGTFvui(ho#px^;) zkBv3H%AoNY9l)T(KDEbrX*he}L~*5Obs2t#C6TO3URg!hXSWTS6nVqb@WY6+%u6u} z+dt^*UD)I3iGOySG7O~;=pcC-&<2W84luHq;G&r0;&YE^3VRIGm#ZW*nAH!*fqL}0 zd~-gH7`_gieR^h=hv$2gh}%Y@(XG$_*8om(N{g`rMm$!3eL$NTKV}PfU8xw! zUWd(CfgU0DRvZoc6b+6Tje~F9G`vx z`foy#a1-kfv2uiMx3jq`=89`9Xmf+lmH#mk4 zkaN#hA8&b;_HNsi>WYSaAr5f;qiJe}PuOt4p-TtT`3)2|XqIzprfnjH$?Ergst;NS zR9yD`(2<5*CH^+_rv&nI1GE<=#8REoF&ieJ$uRz_sBdHI6p1s#c};qf)JfMpFrvPE zmxoNs#~SC^gQtn6UGQ9*NT5jNX8u5Kbs@JzSfiK? z^O7|DeYnSQQAu=t+xJg=PK3SB7@-Ci^ZnLjS#rSB5g~SczFrwJ`OeDLCw4$jn9AFP zh>M1p;PB%wRt%n;{%>^0O0#oVhS>$m8FO#r&S(1O4!#d0LO3==GIpGD)9Q6zE%k4Z zV*5^qJTELGTF%Q3?--KUzaq6F&Z^fb<$z<4hp41x8`9* zdOH2o3R+w)wUoz_4dW^zWSyq^Revl8)oYhRSeMo( z=+D;^#FoLMx9rxwLO_AnJ^XRgj#tI2e=NbD?>6Tz6-3tJa`Gvu`l*!Dwv~4Hjyxn) z3V~9Jc1{N%a`qY9R>Xyc*toOqtsh69c!#68r8X)d<7(N|dbMF|?}R4{)2h#U^&fAK z7t)mtQRiFRuc1`6`AnCYHm-NXbg!b+CKghGGX-ub6v|B^F2p^aFsly<67&YARi&=) z2lic2i$+__X`dV&O5nZarnr4kF9u^^?<3(uOaY}&9O)=j(x@uE8o}i;wYcQA`{hg^ z%uU&1fi7__)$(8#RH%bf(Vj&gU4Zxw2{))v*>eZqKM3A5Ct;38V6eGF?-w=R5f74# z1(X12`}U{ao^O@Er3H5Rlh$~4nMp7s~(*74F#aL|PfJQ-5{-c$>s6m&mp#vh_6 zYxuvE33nQ1PM~%foVY!IhpNg9i;M%&O@0z`Qm}bhpVGMT#k1cbM1LrYUDnS zZ&oP+RW0tXgG?w0F_+;?;q#?yyxcMQp5)li*h*yBtU-h+1J$uq=p9oB+qtQ5m&;NF zTch1R6%=~#`nS*`NfTfb)M}kD;vQQJk_ovKu}nu&wmC<7F>rMy=~B|t&P6RvM-B93 z*;xzNKOY~n!*rzJx{7^-{#ooD8bn`@0*oW_s|AI;GSMQHv?D+JJ7w+d_x2p-XE#3X zTto}meBSs$7~99TNbY{M#S)8xSs+k?ob#8RN|WAhJO7(WiD@J2WYJ$Q=0d`_uCRHZ zI&bF7Bz@LD#j9;PYb2A3w=&t%WVGYsyP~Z+O@`gq?P2Wm)|740f(5j>9eD)>v1UKk zZ_YMjXK&Up25v6L(>->^3)wYS3$T)HmXUd_3&NHbh9@TLPw_U`!|+YJGx6+H5yu~B zZByYGX~j{7QT8B~CD*PKEhPvjF%A=NSe_eoL!rt+xBm&n&?#+{2Ux;#c2n8|`P1XPy)tfTl!; z9aQydCvCHzaSVfRQy)P@wkz#IP}FdR7%1OAl{CUsFSmx##7m*c>D>rwf6V@c`@YKZ3Nv9g2`$A}?>3-!=>N*2 zef{Y4)&Tqhm^VZ#14BEEYiE!d6p}NefvgU|E%M0Oa_s?>_X_TrQOaMr-vSqk?T(iu zR0m@=y{^mUW&m1u4@9FeU=bcWUh5<56u#+#q4v3Vqe5)5p0|W+muU~0g@2nBjJtOt_p1CIz(O==ZI-&Gr*TtPRN(*OE)wh3xHhG zoM)!8G*yB;z|XOnUBpfaQ!4`izp33}4fB=lA7guK>1*5FNc_E~BdQCDG8~}0 zHk7XYxi~O*CVw)_3%~yd?{?amdo*ZqnEI5++HrjGuSd%*`~K)ri?B51YS>!HeA=~h zL;0dk$YJC0n7)Z^T`iXzvL$=-~A6D@KY3$JS@Dzc@H`&oE|D*mPiY z#(L0&Y^ceC+R9CrtNWLwZ}IuLk-1W3j>@sZ8m#J``eE|@AeKO6H$b&y|rr{N&RDQcsWK+2vuk_=oEMa*@72+r&9=u#h1ey;d- zOj#wduX~oVGhYYWcih>*9!Wy>qP1H~NGMbJ=Kiei{;qbrtj}Tgm(wncW>EHwIJ}#C z?CR{fcNT8?Cj;v$&EMf#FKnjEWQ%oPGsJJnQX3!xY>r%-eTe&A*{qG6_mR{S_mbTT z2n8aGUFf9&{byGsJxg=<|F%5|*+<&9oi@x=ECH}xb))A3SMm42i0=ShD*8QetP_D# zB!T>;I9Nb{Nid!sXs{15Y&p8v+-o6ux8;W>qy(7d`M=S?|29XkJU)2%ePfBFctk-x+OPf=<*w}r!bd4oM5O##o z2<^AB<~aAF1~(D4?HFI~2t4U%h}uRf{%&x?K@qcRt6k9MF3GbQ*jdu`G4eH)dt|L-g1nM4cfmPyA{AkFmFas&ZSy zg#{N#N{5tmhje#?fOI!VcXta&H%NDvl9JL$w}gb0bc2-K$=+w5yZ0Xd9se0)4Oxgo z*7tpLzVnSI-l#Q5M>fpho`I0=GEL}a-^vR8BVZhLl~CWj?EMRHSKwHnG!{pL*<}-7 zU?(^~S5w>p_rX!tpsjV}1{7P{gTdAz@+VcH2>g%t=dr|=T?L!*()0b;%iqV(Dh2r; zxetH6SY&eyyGiH2yk|OOl})Qn=(K|@G}PRAa?Y2*^#4WgTa}&QKoz;ZBXs& zgpjYlm|4Xb;fB-Aj_^Ob)?YC+53o)RGeOt^|QMqPeCW<@$ZZPPhy8h*=P zA~s+W0See98-jbi#!qQzO^R2bE>iGp+Pb~i+de6N_D67ej4w9OkLus$YL#UNG#$W1 z_{T~9e}4#m{>N*BEC8F~y-Ml+jkabf6JcfEZu2-NJ9N#vX6I5p=c{fM8J{ybF?Ji7 zy@=9@kwZ<{WDQKY_@uV0q8yqQb|v@HUX0_*qnAjE)^k;tRwP;Md)}4v$qW(G!)f)h zp}w1`#(|1<^-g{IAyUNX;U#EfNQrCDI4>9#wEi5iS4g74|+-r3) zKeS}9nsP!VP?!b-45t*(IjOdC^S+aV9z^YI+2dT9W`Y&s>&+8E0MAc8^Sw&Ac)}x$ z!Y#8Knu!n|64D7O{1Pm>VH?L#!bF};%%NVw#K2cT8-*AO!5stAYw%d?7?9Zm7>*SR zXu1ShKKecwpYQ?WUbG8f9AX}XjK?Omr=S2<|30d}%RU$(&LwDczeApZ!O>a$+tq+a z?037kG7VtZ+KbA*h6FYME#99^^!OY8{)(-J@HoHfsJh?8q2Ntl}B9`xvuWWS^A3HycjPeFWxu2wfIZ)dGY!oyPM5vcLB5IL0p!>N1)uMsIA} zJTIn!T6dmcT>C$61-LD-#Sk9Xx(F0(MtVL&#Vj?K(TJI<%(hAfefe7VQ97Ccq)yV@ zRv9)P1otR&_f9H6)Jle>wn$8Zk$pE^U(PL<&Pk^lmTacoc*&B+JEvDI!2VJ~6xsp) z?p#@{?N{FXS58u{qgEb=b;BpY?u{6P2e%qvq(M>xdCXU zvd6(1kXSDP4tXWbPTOT*fL?mBH)n;>=W;#Q@mtj^GV4DV{>B{g`y|GXt3}#?pclFh zjFt38GPso)^jbCaSax2*qP$=c1K5z(@i(=t|GHN&@|~a&Y>v%Xp?H7e5U~nvN7;_q zVFMJf^xpUSKxM8;B{NJ3s)ti9R%NQE;_&x9kgq-qICX5Psj8sYFvf8J;M|x~u9gN6sedsYk{Ynm7w*KZZ0ddQ0pKtOb!S3Xd z5b9NA<4Wc+@Sy_8pR64K@g}T__s!oW(hT{)z8N(G4RK|!{n}8n4F(z-hYI(u_-)WC z+!h!)yDdt$RpR$NKmH?M=+uV>Sh6x;>>fQ1XOu5S5G(_>j2Ef!i+BGy3s1A21UEfS z56-?2aA$Sj{Q?&85jc_k|2;$gb7T@9!8z}n;}Fwpi7A>ct9nKwOjl^eS#wrS7s=fq zd0$g3e#m-Vs*H;2J+3|roQ%D`C@$!+{jQ>^{|Qs$te!$)U`10x+qZ^Lx90*~F^ zpus6=@f_R>rA6!f>37KWXD1Vqn8&k}UHZ+$ue{xp*ymRsvXmt0h>4IW0t})ua>WK- zCx9^4Sq%Z^ghh{ec|(6@c<&d_ZK`bVJV4fGzX0vna)5!Mu5!moc6*GXE+g!78b(_r zAV3PjcRIS}9Re=%X7WmSml4;WK{Jpzc9;u+_))m=lY0^IYEA6FmI67dXpWCpHCAhC zHxP~b^oEv$pd&$u4_aJY+$gwfX_XyCU(~t=A#sX zvLjMwjF12!2_G690?Od@a|&(+^B@|tJfP#S3nZ0PUi#hDf-Z{s*OOzhKX3`;qt^1ZiMn0{b;K?W}d|2jj1 z@V@MRrK98CBj>V!-u?2>W^}GdPG-4(F=SA4z-Tt%=hFPpMi7c6kbOEd{+K0AsbLS1 zh-tWD?s)Jy#)ik#p{VVAL9%)f#~P1B_L$-abdik%0tCR7LfE}7t=UZmPyrn!w4B3c zff^3WxeBxv%K#%_7l6o|=non7+jYJJMvrAEq3bwI@k@{lyn`oj=t0}9>3*&Znm%n* zg<1SwWq?$~^W!_A+tP$n#$RXQo1c(Z+k{{yu{)j{Bss7M1b+dNKc%ctHl+-luZ`o1 zwRits@j1?u5j zTD$44jiYn@IbFK=SO*}x5N@Yo{KxgV@iYK9x0VCam9>>(S$R1qgMA7CWQAtYL8T0c z{c~p@fGA~!>q`O_KR;C1;YmRE1wz~efNhl-5aM@{{h=uOSVlqd0%LUdF8CVY$Lw*; zt6($#vG;`}^Fx$b<{S}gO#ENF;Lq(6E98szbF!B%FPwn{(&}@oRD(JcyrN82b2jSg zu)R=^Hq-87>2OxDkNRuv*{3Fp+k=CP^h^SlByS3^DH5+0zD|uG1qyBjan~eIPC= zG0;Zzlr5=Xn^UwXZSxJ%!(R6AuQRps+pqgFIv#WMFVE7e4bRfR<*yNU z&Jsp@8ZrL+y!)$A{MS{S#y$~Cs?<&oam)7Aa?i`-tJo64EcNyA;;^38Juo{pZ-7gs z4$U4Q5%f?0$J=KP@4RE=XVYQ; z#c=$t!SQM0$yXV#w?8z0)@P68&l4Mz=SSgxihEk5Xbx$=Wz5?eW=7O@`>tAArF&qk zftqU5s2jInC+H+FSE0C@A2Ym(B=UkKF7)?(+iQWF5{XK^5f4d9cUF0(035v3~<;08_)&jQ?84s|tb zB8TtH{ol^C!l0Ky{-W56x2eIWSXe1{j0;aLvpS_H>Hl&Uuf=op+DJhyq{-K8PXh48>QUQ8Y%9K2*n`B9fMj4B)I7+n z?1EuF*`p&lllMudZ9%X9@Yk<&$Pcs{c=w0`?#n$KA_v`SyR{SyB_-&J`y z=Uo$Dm)jvGnMy5Hjl8@J6_G5e&z4ELk>XHx$M{0!YHy*K93SS*E_t5&!l!i;qowjT zN>pkZk1Adr_I{-e zex3dh;jqGUPy|5PX{PBS?V0J_z2AGxaqn-oz7@$=D(d^`YRoClRrQwX^4hJi_;^NX zHBZvluqkh#p@%6L0C6rD*)htwYW;9phKmMSWp_ZDPlf+>1l_H}vX%LN+m!!0eg5^W zhzN1Je`5_qoTRj2+{9_QHLR?H&8*SM?7hjbbD&d!HYZ%=$aNZjx)CocA(8aE&b}zu zz0@c?8kg08wFCloOO*oau64ZT;4GinJlfZ=Mqyo9o@MHhgmD(nDv#flpo=EYG_Ewn zsP_}B+a!?Ck$#eehR1+gG5=scf%_+p>Hl#|{{4e?3FnB0l9s=+n;>l&Cm|>_VRRB- zt!&)a&Q(ZL%Kv>)&0yILi_q(&Dj8m|*yIp5tMvAKiQCC?I!5s0^L)Nvf-cZMF*xiv zvi*9{D9UVgBR(v7TaL#3>?x~F!(%TyhO|C~{n;De9jaamm)Pn|zRDWlwp=?VOTBvW zf-4`9Er2Ir^!tOEEx-)#0;T?A6PNKE;a$*fI{!6A<_!T3sIWj{KHHPU_fgIWq*)$! z-K!@+E4}nlF8rhCAVo?^_jBIib%mHa(wes;crFv-I35TydE$r{K%UY4Zs$>G{!z5& z24GwHgdfQRGi_dOF@les=+pF0@JmZT4Xmv|5{isp${B}m1>ODK$2PSA zH4Tj-fZ1RC3LRVrlW#TUqSq%T9w2x=D(j#AG8&12b?y+6gqg*z*2iLo5&Zd;}%CS7Ov24 zpwC&@j0x=XZ-+psY{!Bi_Z)g+@$8}dG1gs+-zt2wfDz77D(3$23^7Mr%y}^ zi*p_|#{T?U@>m40gghkId|UL|JWDf(nw*HqXZV%<{W+x=lWu`(x0&UT>w-n`W}P`I z762>XX7N3upNKkY+!Iq$z;Tr9@o@;|0}=W8-;*DygeMS0N~ohbeMvEEOtc9!7-S10 z#-=GlRh(Hi3jM$XO zrIVWCS9keuWjO6$KF{_jk}*6HC21B8I)D8L1O8$}3y9|8Pt-DPsuW({a+&d_^h*&( zLcaaz*036OQiB`L7StyM;6Ka9Jr8EegEAjp2mj;4f<5|CE=TK94QTj3%M6JCd>iYA z1g4+o?kk$(GhZBNbP7IL-afctxYFkpY3@)oTe}s#WgC? zZz{H$8Yhhy1LRwI+ZD$ZR!ttCueYU3)`;V)%GEy#E_PGXwZ>?Vjkw-252rI1pxY4f4Iw}+yS;^B z#Q;_G|6Qg5Z_v%>M(HU<0+N}E!&nlr(z{=)tBRWxPn?UHqwu{cWNc4Oc6ZW<$|_Lc zm9wK^#k2ng_^W()C=WQ4kD|T@#TE2-O8N2eq%Oa&T}e(wQLywnIL%|T1~C-gbZV!z z1w0h?wE}$HFti^1TdQThJD3D}d1+qjQI_?d^_s+3$N;6a zO8G{YP|>c<6#IA9{*#40xg{bwgSwT!of-n zVd1QBRlw?0@HrOtsrU7H%5Esa?{c$4fe_d#w#3k=QfM>6 z8Rv{kDF9{dvQnO+d8o+1!y8rwrr4U$FTLO7``%4>&jvihF7JzCc$#!Q-AOp_b7kZ* z%#00Fz45F47GVhCD4G|$bUk{C)#Ft9k|w~ahh%s>gZr|1zy0<@e*-FZiY8@1FdR7- zmI5^+EGZOv7>Pa5&nyKPqn=-PH}$}sjY!b*v2OK`>TCl(v?w#S#}JFTs?5LQ@G5BX z0G|atSsb8v;Pe|Oi{j4$5)KAR19!Q`Fo>!ec%dBsn)p|6^ytoPV@1%_4bBOXX-3`;eJi# zr!Ian-|TsJLhEvW;evI8#e*)D2*g~zf3QW$klOW5cU{-{ue1klY_+I%%)qu2ml2E|3YL>qXl!2So=?BIeT@?&~)!s<0VaMM5()Z0SlV7O{m@kZB z2LIM1eX=(w8cKUHrEmk8t5in7W@Q5Me*#Z@q=iItT6F|}iv=YoZKlM6=#LHtFL57u z=URofiqacOQuJsu1de{@%VJvhfg`)}LjS>TspOFaYBUh-BV)sImxKi7dkML{H6bFz z9TKx5rR>QG{|>SL+QI+%mY4~CzrAJtBVn+o;Da^xi%kktY+5S_V{<_jnGzjCJr%dB zb^QmPnY`2b?9qGyw8vI;u(NBr4Jfn=8>|hp*Dy|7W43OO$Sk*BfAYu48$JAfrtp@{ zxkNYi=5%O=HtXlBY&^;DpT&2ll`njT2IBT_&q}MD6q7>`M|@s`jL5P$bNFmkXCuWT zUWfy3V`bn2vgPZ4EwTT1j>{s{38b(b%hi=5Q=s-EPaoh3+;qb6m`S4p)w{qz0-FW# zNdQ=R`MX+mq58fr6uU!}?Q#6e5X7;Uy+3Kqq}IEX0TEJFoeaMppIJw#6;Jw>_o}Or z#0hVb1_PeT)+vspD(1J}e4Ui4Gn+_&_);7L>7a}6Zg252c{cSQj2IaJcij#wuKOsM zjz^FY#B2n|rN~=dL z@C|JnXZ%pCZTCHo!^WrsZS%{+TQC~DwEA2zygWP1{OlHvepJsXs%;DeO+1(Yr`#sd z=QL_9`#%_L#6HJ6tRsX5r9APOB&l&O)xG=vncXyU=XkSujp7X|Lq}Pe%o+x!x~^^| zEiG+eJfWtrYJP0->n4D$G9%UMdl4!`*`%%pYb>rATb0Q*gSy*|mgZr%P)B2LDx{B< z*zogRP`mR`@}0H~Q6e*@4NHm3kuBoR@4kAR0uR~Go8kO|Q*&_Naw{Pto3e}I_b^d* z?W2f+9I(cbLp^sa->ulEd}K(khn$H57pf_`-WhfYKe54`a~I$S1D7$b({PNTzvn@y z*3c&vPn0_G_iu($mhR88T~zmGU74@RVj$au-tG1Zg#_w)n(A1ZWe7ff`7s}>lnk}a zw_j(N`^ist`1vBZD~@ev>r2*~8#c$150B?2ozgS7ALX)4$QxQ*ry8GM+~4e_P2ia3 z5k#_TKm;C^eD8bG-5}*`!^rTzBs<%KdpevyVq-vrEBai~=jL1yjFFYVR8@gn9}wB2C4Wr;MKr1?^MZlyZWO_aEWh6# z_15z&+acexybeowoPXv-^XyW)05#td(3BPrqB{2jY5T{_uW*kr@sN*X%k@@sWx&>l z*P_vCP98{TzsUr0_+`RDu<*TUQ9?jzBp1lzW;asSyPy21Truu=G&tJ@O!xN}j_r+> zGcu1JR_h%=pl80zyuQB!=w*PC!S(D{zo-8S5CSxABZvkPJ$?*=cfe~Z$P>tfPd`e> zgA>|mJLm@HivRMPC}kQ$hAT)*w#zf!mgk+?N1yUZ#1K{Ld2C`)y$o{$;!pBA=42CN zKtjZ1Ch6^b65}NKJu&*1i!)$$OE*0Mq(0WYiKoBR2RQ>!O4%a@#|7y6l>!CLvUkWi zD1!5UX#wC7(d7~ue& zSfh|LVf;=Pe{TbNNr6!y?!3KFtz3NR47*M2T|pO5r7lU}AXcF+-yy0I*p7U^1T^i6 z$9?n8f=h_e=3Z{W|8hQDn%@~>RuFng0J+*s(Wf1@Ga}rD_K0)=!#`zEKF`g1ytuJT z%mwBdK#`5447iBbe!;kTt~mN=pinngs$9%-_gF`p@ktT^SM?u}7-4@{nr(5crdmA9 zWn5s~GQ#c8UuWribDp1ym}}Sh08)*`L2cT5n!X@&-f#wt8`^VC4o;`7*We%&oIwb* z!ndKHUE|4MvxvuIw@lQkG59je&At^{sol7|Fh&USb9rw2DAP&9_8<@3n-VZumdb6P z5X@caYUSo(>g2;agRbi~+5VU|+(B~)lAO}hQ^(@5-lZ4or~)NqiHJUYH*H{~$8}G* z#hZ5c)czru8)^%70E5f8%74%H5&YVxf8ITSsF5!?`mUS5FO}JNda_WqQBVI-MXgWc z_=^ixTc7>+_20Cb#rT4+I`)A8XJrYcd`8J5P=XzZpcgazxHD((eC zd<}Fu-TgVDtq7J*gcqynyomGkyO)ZAdiwLL-}kq?XZMi2Dy$7~Tis3x_bkvO^O>f; zH?G|{#C(31bze7smp>U&pmZk z|7!d6fxSY6muh-CuUpmBQO`SJ!I;{qgI9g+=aaJXX|Eo-@35{|a8vBR8$^nJpvtfh z>`F}<{eEELYIN{p|fK_{Wf z4P#N333|_(eeaijaJbHUt8L}ub<=m&-m${Ha#h*ZX9f)57QFcPJS3xs^DcDj2gX{C zu&40ygvXd~1ky`vUMG3ZRAt$M(|tr1&DG@&YG7oo0>+Uuu$U5y zblW^#EHcb}NG1d7tTn%z8Uj7-m^#bGCC3OheNt}O^-$O!cZ)-$dCKOP>wss^dULU7 zh8jmoU&YVhJ0(&G;0P!|aJZ(Y@b@QRwc&$ zm;I4N)zJJU8YL1b&#x*`)EB60;+}e8Cc8XdT8SKCSRKudz0=B*YS*fT+gjPZN)@e$PGo+9ooOoXg$Q)yt?N^0@;Gzv3 zR1dn?If0?o({+hA@4QBV2$UV_mk9@cdtW{86Y-#dcZD-eAWU{mVua_PyYqFdX3Yb^ z(jci9hAt|GHYij|Wy1KJNTgG}pJ@7&75K4cVV4ri8rV?7kHI;8F%%kupMsZV{8FIaVOWYa?nHo4NK zzV@k~55CElb2oR7KTVZi(8Na*prpjyL9wSFjTffPPP90hT(gFd|ioOem z3cP;5z0Z~pFNq=>*6uovyAUPDuMA1M@x`FsLO!X!To^}BVurs80ms$|*TdbJ?hN7; z_0n9}Pn%G^Tl_Omua{Ix_7->s^7!AnegqeN;ePBGA(Kib7ftzm@o@&>J9_(EP1F5y zccH$#*5}q$_tLOG%)6qpBdOz%b->F{11296z>gKA7Nc%OE9kYxGV89@7C45n=)3EK z&FqWEJh&cGaejDA?OhikV@-Oly&#V_v^6*iYPu9bcG?4AhtHN63xeLi?4ORQ>mjWN zwCVjixa1L=FA1e*3SQI2vUz=_Bre|#_X;}l30dc#Ut&ezz~f>bd2vPeaM;Ev7@{>K zCv5^b&bmre4$CdyiJ2;XsdXl|(NN&|U9FOqV{&qO9d>l>Da!G;xZV3sh1Z(ad|Rqjt=U{Sp|ceC23bEXuJ&mZFT*Kh-=nwP=yX8D490W3O|!dvEH`Bi z)#2S2nq-)To|JjQ+r`1P7I>Td(nNZC9japPz4iq9EEz1190YuX)0*O z5KC*CDU1C#B13$u=c+=uv1>9>r7~SZfb)uktV+YzsB>vrk9X>)e&o6xg%Ox?v3I%O^G&rc69O<#W6t-gT>r%gkf&bVU1vfv-~mpPJ#B#6}2nhs%q8Wtlz z&a<7mys^ zIVv^KK0j=`0wVMQa>??qC0Bc!$xs-h_ku~MSXh_Tnl?9-iR#+&2*S%%kkmcq8P$=SPwY+MRGD~Xr?Un1DB7!>mJznwBK-{ zB>d3~Ab))c7)if?EVuh8XN&c=5j&vbvPvlI?tM?cHO|YZw5l4+HO~;-HVjQ8&)4WM37M;Sdu4c>#Lh@Y0?F%nGBm?! zXFDX$ChJi&caYjbbWAWThCK3OQ$Y(J70fECc5IgrC})&dk!*w7zIBBl_Oj3EkcHK3 zQZ(=2;rb*DJTtUy`~Fr6g3S7II&`4gHi1gTy9uTP%tm0A*oGpdjsvkbn{G$>#*2Z-L_8QDmRyY4=eX_t7;t@9vm;LoK*paH5g1`~uH2-3WeJjXsyX?TblZWpty)lN zI9Z?bM0+sIN%IUVj(4nhpUI7_7gP{5rAV}cBsGaJ+6uQcC>}94g6Wsj6@YIvo7Vbvr*ck}x35cU8eA{RyBE!sD4c0WkWnhzgF_YKrJwWCn@^SVo9Q!^S zYh&R(9U$uk^~dX0(|+#AzpZ*Gi5zbd`;9oylWjr zi1#Z?xJcYM3Kq8F2)&f>+e5lAt?LYjQ&OQ1ct$5i7g`U7q3~LvvYwuC`54o=qLh^n zKNopC9BY3a`3?1dn=1$>h;Z4PzVAgFCn(h+6?)#CbWlWLtXTN|^wmoPh!P&uwNyH} z9IORu1S9eOH9PA3&D9apQoOYA$KJqi!)dI9svs0ubUJYU@W+kpb?K0J9~_vdPG~j- z7#S8i^eo20nM+HuF`=;WyGfm-iC=}XU=3lit>e%-q1WX&zi*`b2f_>i3||^do?YCn z?pT3X`7E&sP(m)BRp`&=x+=)nNv(4Rr6asjXmqL|9QMy*dH6jv)<_2kDsK(5|MZta z3T=})P}j=F3_;hfL7B)=Z5#~`zZU+gB7@$MBpBxM6c!0J2p0KcppIYOiht`)urss> zQmGBR(!_LfQ0a3%nJm65FIHfm7l;HX>+;YACe= z`szrG@~3p_Rcz7;zVLD-OgD64G8;zoE*H|dZw$KOPRCy~lXDL4&d<+(HOE~PDuIN* zQjDDbKKxtw>s)kMuoq>soGT_yoRDodu6!A25{vPY!m)2WUv^+_dR9_}Z{71m_zt*s zIVHZN5#jw{R;f`d606lH?eCme%Ya_>(mJp6I?9AA8}RNhaMfMjog^k|`q;=!ZC3NX zs||0Zg&VFkmDr8g^2S*w;24eLe5T39Y7JVWTT7#K6Nqz6qfr>>uLf3{sA9EpKMKr3 zp?-DH58K`%a!g?a7;zI->FaQZ=RSSXEwA1M!IgbrqDIVp3bN}g?zcQ?q51NuI6wOg z5<_YUg1xKpQ74EB`kgcjZZT+z5tM$;TuPSTe?tP27J_@ldkUv`M)4+@l#p7j3#4Ex z$XJYg_-H?YQ)E7e5i6!t@;9B%KA=-*B6nzB%Lsy6?M&;k z1r05!(~DebCYtLRK!|An0GLgL(_e;&{J$`c!fVpd(i!IeIv)NyEQtM0QDbpc8tk%j zd3x3&g;wD*>_tA8s4ILY8C;`n8(I^1!F5c;QlU(h+4G7GN$=~~W=cfw2lm&kXHV>8 zsGvO5i=)8MdWV!slK*)D9+!eZS|vqyejCu>8|N*WetBy-mz;eC z#Y_q@3=PCyahj3}bix(DL3>gP2XZQj6T?`bh><$lQCrSdSfKwJU~lQ{qLb1n6PGLb z>PqFpNo9hBh)2x~AokyGUR=Us4S?3j`*+Cr2&gCwr;J~r!=L+6qtGh935$OLNBG7^ z2ehU3Yw};kvK7Bh`$5eJFgUu4xC(LS~7KFUcv&sA{QVtVidta z3lQc*7E_~c(|p%!m&Ss(0D~U)eL%N{pPII#R^$C??pHiZV(F)LsH}tRWztP|pm!rD z-Um`yxzg7VX!t6OSn`}DEK7hhW*@AM!ed7_f7x7(z1U#Gs+vNnkVS7VU`<0=;xWM=tJNn$)r&ILOB#G zHRl5IMF6cG_}JZWy|`q6J7E!@zrS;gFXKVn699*X(#@TOHZL^Xr4j0-1_H&;5FLJH z68t&A0YhAT)eX+rqTD5JP~h~-fr`9p%)c(!DQpjF9UU%~*m{4tO}iD0wTlq=I8cJ| zILZd36opawMTQ(s361IcG_bhAmoPUF^En1RL21d}uf2h6YJBc+x7J>&{5GVEX&{`3 zK69G0go}8vbb+2uwB8WTAr43C9c_2h)F-pCNiWEN_bb#Wp~;XgzMDepXc%^pC3Q5E z^rsnkr(JEb?Ac-h#76=92=Q4iD7%Hk^T3l7eR%eNAsHkH=o~z<13AeM;^R1Gm&~>6n&!Ij8 zKDt#tLuCREUM}X6O(P=nY;`RQhw5H_ge!%mqgSxn>1H*W&`xxG>VsSMokS4!CBHSp zFH!s_Js4Z5#WKpEm_GX;*C$}`XGlTndZ_(1Y zTUfB%RtVZtWZSf3)(}Nv&nc~LIZz6pG7R9lZ{^7X$?XS40+Ii z>13<<$#-HG4m!&o1*LYDumY>`t|m6Yy6gcYv@>A{sRHd;jeS9{PY`wtvJwqD^P|}w zvQ{u>*nrV0Zu5tvWbt4T(`B(bmat(GIUGExV5oKQFVwe;XV9|6WDw-=5{|@*l4qkZ zBXyG$5br0w{hi3kW=t&>-dKm(TdXb+JLa#+!fFm ze5fi>;wbCHr+$3&Abe+pc9R(<3maEa=kX2Yhj)B@LY)AR55Q1hlR6m8dVP}~rU=XM zO7@HtIwN3ju{76jvJcimuLNhbG~om{HHwITLFhVHLvux%d7K!!HQy3#P3l8vQp$z2 z7)?+ZE7}=(SF&({hDBCq92`2@nGl4OR{AUz7-{yLi*?2q)h`(N#M2oW11{gfom9Yg z;=o`Q!38d3)CuYIP#4tucsRqatzqA6Rc zVW}ewVm$u#xy8bGCy@9yzhyw963v&qlZU)9Z5uD17?pbGbp>L1*>GrLLA~ZUEW3pk zX`|KDTFxPn?V6kk_0x49ax#%zZom4;c|nUH(E|Ff1XNfC7A>(%rAPQ;^mG{d7f;W$ zmh28LYJGVv(rN9O)^N*Jwj;wnz}h<-iQ#dMb5^*16Lc8iV%BjIGf~|VO?O9HxWi$l ztBa88?E_FR#*v7DJy^7#q+7<~ou86+^`fn^;MSw7Jou_n)AY5dHu6E6S|Xw7lW~;Q zJ(!Vjnv)d?_ZS9+#lvpZyAz4b*VPVNs--D>Z`v^-vNeURaQ69f;Dop(bII|86K%bc zG~su`z}h4SdM>-I4dQNd0tce>;#ratvz*32U4gL4;Ojgv$u@R``ne(6W=_wq+QpZqpT z0Ey^n{WjvZ%`Clu+3z>#fpl|Y`HfJx9E%>8z#3y28 z9%d!Ae0)`|T3j(wstBczEJ7Jco8dK}D>@h-93oD7Y^10u8A&0|6EyA8bqX{8e%iX= zLNu$uo*-EQ%fHuv5N)taNL@j7*|$4LODc*o04G?em>d}uP6aUvd-PXNSDSLH@ZFOW zTDG=}Ow22SOz7B&eTWeqiV~6IYmG((GVnhqZWp*f>j71-N-TEDY(2PE;=K5U+&bh= zP}y?8F6_)Yi&>yvOP@@rCU?dsk`!K$&(KL0SYeisJu{$JiPKj}qSzFF+GAJ#GQ`)Y z)zwHIw39M+$jc|Wi0R6tS__~x@D#2nK0x$h3#CRB)VqdNj#|HXNyH!({X)}nXyI4N zHiuAaDjajFo12^j=_dO&U3S(VyV_Qw46PO8>>Tu6jCk_S!FVolu7cK1ryta@1s|iz zoG&INwRoVoiHP(3XqsavZfu9Tu4=Y)zfp{Jh^#Z z8Ykn>Pk_Ikq-+dbty~Zog){AB2HIOyxqvL$T*l`C7kkN|^Q^%3h`QPw-JN-BIFoly zkslR)ZVL>rs;m>Ht~krKEeQi~O#H0wYW&p7)kZFys@El)#Qt$G1|$mQxY|&174(F^ zj+1zuN=#+130b~cF=H_&&9p>Ci4ow*xU?n6Fz z7>G)Hut(bPSld@v&OGg_?kGB}XS2(~3%a|TwsG?~IGeC_tGC>}wps7nbgKyGI0R4v zGU7R&V$~26mL_wGBwi?-V|g3Uk%hKJFooq@i$h|3z1FV%wvLNZ%23Uf+6Who_;t$W zM{ox1%&oq~Jq%fkRyDQXW9)`yR0(gwNsh!0ljHnsU%5B^24(g2*KGL&`&Lv)*f7Iq z=N)wPyx$0%rS>?4gHm5q7MVn*Fr;<(?vA^2g$q|W?Ur?Jbgy7`!rx$Fhp1G_UCr4? z9QK9Tn|ziESyX93yIyiGwMJ1WSh0tZMcg#HIp3MI{6fgPw3?S`i^OQZR;BvZe&i0Q z&cZJ^6Z2Le1>IUilBK1k89J=C;7 z?*-@sDg6h`12IDjna#gS9}dmR#`My~!im3qDcNTF?iM3X&op$HIh_MXD&|WYXIx-G zze+$V8G1mUb1g%}8NeD326lIa#nQLAO%%Zji;*f8$|TjWeSV_jSH`W`dc(FWW_k1( zu>ZPSgH>9)yU<>N#9i1#a;J7&XeL#NoZ$o**CCwRqT%uXa4YEMjw{TMGBE<(PqZ^Q zT4*$#LQS+dHBY7_iQoFfr3(ldWND9JVZ*A;gOe~5$^raf{%?135UsNXW!+DcVwhoj z?pwoYlie?U&g0(H3$j6>IroCDooJ9u$9h(fR}g-;57d=LnxVYVhWdPNm|@*XU#p+t zUHpq0USK6+5(?>iF)V%XuGQQ)tFv`D%8=-t!;ibOF%;Q%+?S8SoY(%ObkczKtOL?3(5ZQyP@+0f_*(TOeEdBZi6-?VbLfP|<;yMMw6S#Ma} zUWOAtHT>r;!xSZ^ll@K{6VtGZbPY#_3I)|`wLGa(H`jE|>lZEv9r`kh5;Q?r!`@&G z(vY=r#@rop{H|QX6T=T^cIuwn7ag*mp`|bv&y{n$yMIr|BDOtxdi6z(a)Ur|$wmEF zi~gSbN59=Ig6GM8@om>S5B_MxWdW1s&FN8S)QxcFj8Pk%8vxVjDpw>jBt%ScL2g0T zad#?f;ciY$lK}5%{1VT^_nAY`^mJEOCai&BI3`!>V$0-XSGw55|ix6VgP)!vht- z%rOvi)Fiadkn0)oR86y}zltAZG#`IYzCcuoQkC*cSBJKd7!bB7xaKC24vZ84Jp~I- zB|WT-OD#`&CE|e&PzNST0Ne7p9;S{vQhw|sp;$NRiD{j%)tTLPgxxIY|s#2@$z6aPO$# zW~apFW8^brJaT{}sH+hrMqT6U8Ir1BIC<+o&J&312AaR`V3T}TnjDOG+-7i4lT7Z> zgORYp6_Qf0~vY1^-p+T^BWL` zn?^?6yd5Z@z+lr{&`AS!f zgiL#1^?5r*B8LfJP>}C6ZwL#1EVp^3{B}JHX*Go7qgy~e6Q1FlCgME5Qj{d5c%^V* z)Z%)8PW*fb$q9lQ-G}fzmAp%^UgU6e$c? z%&=z6tr}Zo8>KcY2NpjUZK6IErH9bD@|ptT1Ea$)_#e7+)}H)h8$jVhzh13}>JnUk z-w6e`Wl!3|IAFirB4Dx zzE~U~qhsSYyltz2h+Q8CMnISI#z1b+89$;Te?C50U!6feDO!sZ3+h4kTPBqj2PhM7-M2nV8i? z40YeiV@86+;qud z{f#(B&6*SS&AnFL7QF8s)(4_=bQRi>+!K?PTt-?l_k;+y6A&Q2#&bOnYJ4)o?OzO) zS$f03!5r{wcP3V?XbF*Y3pl^DzTKnA#KiF%svnI8kvD?}JK>0$0^D%2g zbZ6)%0J}m1pXB>Bp)d*UEtcn2us3^}b;EW;By`%Iredw77g6!+SYK0Xa6kWYopbjUwL@bos^p^J`L|Dt^ZjQu0%a!x*Ue70cY z_-XhlKe9Dqk7!KxqnQP#slCoW)@Q(JfWL{LhDGqd#9bxj7qBlXrU_Dyn*eRUMj>y zrwe-R7_B<+qD1RfDhItzNkAFYIx|?Pm8+gmd#MCwr}OB$WiPL;s_QqAV`PTu@W(h` z)RkO43xt!F*ACJ|a$?bw5qssoN7Jd*Gx6@zM$~Oqa!0C&N@s9u-e+EYoK+@pxBWQy zSzkP~R<~lGs~ZLn_4q9k_)#GbWawBTQQapsCn#z*xk&>!3H7$7Q0D%UlIBg2y>~`F z&mg)}UDLPcseGI%^+ZE5r!9LWS-o3B$;GpXL)gPze0(tiFfM0+nll@Sqi&oCme$!X zpBjvkVV!COvN#`iV0xKyMh+@sV5t2-wODJeWGPmPCc~LZkS=5ND8ib4H{2T>vQbzsty7Ok zqvNs|f+mG>dQ*6JsyMo5K;)T?Avmy*jj$`2p4Ku$-e-S+A@Kd)D3CMn8xqy|+fzbe zbpZ7_>p|gz83UH`$Hyd41sfiR&pkXpZa2bZ;?2N-o104=tV?fo6;{rpx!8k!%c2^n zE4*PUZwZijYUD*mY1h<|FH1msBerl`XE8*$$B!$N@sCLb$sWJhGOO(P-50QJlTV}= zL+V#;SiQTy)yQqdc(r3VYod1%9Ul}F|dy|Tm2}AI6EWTheEdnnM zu{}nfpBc7!2ORtfi#mRp~D89%WKg{34q$!*GC;}W81 z4_KDaa}IBylAzL;>$!^T=V>y;x!1@VTF$Qn{t&(0Oxb#ixBt!GiyS2=`G@pKZwSy# z%69-oE0E-nDbG!2vLI_VKUi*fNT5w>C^TGVmbMu`j^pV#7jP(Pw6kae^1tfAUDSWY zE#`I{3a9J|J1khR*TvCD8Y}@A2D8hFQ{YV&_v^fYpdV0_x?(l;r`1D~QW^e0;7M;< zTP#%OMsqF+nZq@HIzwt1hNIDmtrAoFeD;oPK19#*wQpP78%ZDq^cC7ZqO0agD9wKv z^PBAOAU|w1Uk+{l%+^cR2E^4i6Q*e82EW&w#-}!GNoUR&7 z4C?FOag$*o>zKz(*R_qA=4|7Qvn|{5N5`~cogRa_+285Idjqn#f&ZG1Bv7F5E}^TM zI29`vHj`jPJ|Py9zSCAe4!J&@RI0!b#rRp~s$I@&US}(*yu2n$2pm2Ff~j(|Roe`t zh-W($H4eSDvntcxm1@I7EY(Z}XtoZw|LFgmSV%Ov7H6AIetxSt{XOYKyGTCg#k9KZ zmQB)sDpkgd2!oCQ|5cWk3Z$+oYZt}{hN#}&-YP*6!*|tOSidx+v3l8OE9p{@Aw{?w zM(mWivS_V@Xis2Ker1}WZirb?ofWDT^fJ$CP;n@Blfjo>UDVs*cizH_$~f-b~- zDId$3_IY!lOYp%Gb40iOvGch2GFtknmQ{eECW}h;OnK|*v4{fkt7*EEM-q_C^6*YT zhrxPBd~61Amc=5@Yaaw`q2|bgYH;sxJ3qv7eMMbaL!GRyqFCj%&b)rn*(IppSo!mT zbmvjt0py@`yMtK=cFdx+2g|8O2SZE4_WNslgdoCwM#vbspxZC(7Av#I z^3Yy3)V5@ZRZBHaBa7=>pb&QC{7f(q&1KlA=JT@l1=fxvdJnVoPb7P~D5w%tB6NU2F>NtpQK@wPo-FDp-;$iGD zlzwqblbPtg>%R_5C;AV3Xe%?nc`bJw4(C0&4zkS3tf&P{mc?*`9$;hl3iN86Uqw$) zblknK)o(X?IfWHFu&RXzyE?nrxvLxbcilL4O`ud1Iayd=^=WDxPl}aP`aWHZy}*7h z1?pR*P7+JuvYNoqlP`vRfA4t_M*GfWu|?RUnwA>ZFNWYVL&6LLTt%{PMS0mNl|J^} zq(WN!ygA*5AGj+vy3l9^Qo)c7NoUL^U9VUtfyR=1vUc~t^VNhMOG?KNdWN-d#v=M) zpFd<}Rcc+&H93GZncp_0eoSnHQTW4$?MuAqVYn0uQ=J#ZK5FO$1`#uB1qh0jW{qza z&FiMHAn15%xVMg~Yh}{|ioqV-x16sM@q#P$CU#b97Sd2cqJEIhh<+V*Ib~z85P3mH zfyQW(v4`H;bTp(^e99bMfrXSm74Q0m`$(S_DUR+^Jy+ricOrKEdr@dBp#yw~i-9(vg}P#Gs0>C(q>ydQQR&cX{bCmOp%WDPg#m zZ&68MZdO~1X1A0$)1_B!WD9o>&PK(fh02Ff{{0ki%@#3jwhiO6AdSAM!QHWFYA@ie zUKhngeo^u1`cB)}7rfc#$V525;46n-GX3S>@VtjFe1K=QEc~Xsk@UZIpDb1-+q0tW z9S~vB`jz^t@9qB;vi(QTCc}ytcwf%^{PvHN-Km@n&20_=IwDv;pCfmijhTmz(K*Li z(cnp@Fy*CG){7rmkeEoIP9BpGzJwLpF>35Ky60X}?+5N=I5`M7qZ& zM!M2*b0~fSavie*p*P)^kHWFeV!O&w4imV-1t37{!d>@^B_RZ`wv-$-TkR-Fzyh?& zKu~(fxKxMW)!Iwq!^lF1j{Qb-r55L5+ASRt*ep9ubrEEt89>=9=`=pER|0{hBdo)Q z@^fSI*m}P~G3O6ZP}t~2zK?j=FOSLVXPvz$p|FT;YL+)c`1K>++1S@SnRi$3e^;vie_1n|k_)uQfEH>B=A zBVhH80uy;8M|G2|4A=Zf^Fl{pN9U(eP@OR!ld1RUUk=a)qTjTknP(55y?_oMKpHqG zHYt?9|9X2!iF}_OE{uG8Zs^3MD4pt$?P6Gv)?AUyci6Ov>mcyXtQoW<->oGaKE>Sd{;y3qq_44sFD8r=OhPs@N-`K8d z7%KNPa@biIyKmGHofPH(7?Aq*xm=HmN#{u@fSb zw=_qmIgL12tsr#qxz4}r5{y_h*%&oZae_jgu8Ras1-#YWDh`v<$Kfk%(;Asg|Z1)5wTBGmXEEqeUM!hVduR?yYhQr6Bz^_R!LgE~iI+e2@!h#4>(D^#yJX5b%W0DW4 zZfB7dxAcg(yh4kp^V#d{^9Wx~fuu*dKM{PN5N&RI(i9$~U%ih6A@VYhEKaV=YZU^B zF$9k(a1CU+flf^*y?Q#QNTWmqJhwK5@W5X99sC4R+?j=WY59h7h8bn6m|{o!Df7Ol zl3`FIyAC`8h!r9M-#uCztF=4{m?`wsvPCAfxB8l{{%yub2O(a(46ML8=_kKPFgAq; zB-+gDvd7u$uC9&GDA3Wkwb>MA95{Ttm^17H<26EubdkXS>L_5ukhKBYUm@fb-UC!b zqvG})()Sj~7U)~Q$Tw;S&Jl@?TYP-SnK#ZViTRqEgTT@p@NJ4WC_yx8Bf zjru*eH^Ahu?Uz8HY=(TUr${Kw`B7(pUd)K~;kW4np{f5x>P|uaDY{Oz@*!Cp=~~}1 z_pl}#x?UNy%U-8@s(g^E;%uD~1q_Ty0sshuDEoThnq9{}vw zeIP8ES(23P`HDzk0AD517ZWjwhogsk4bLC5yvrg+mC0JpIw~8AVb{DfjFu&sX??N8 zsKq1lyysV0Hk91ju>4$%x<GlL4quU~brq#aaXdrz&y+=@5o;MjmHZgu!Gq*zqE;b@j(fv(R zzh_6D`^0`B1j;zbUW$Jvw@JqTyBU+cYz>kFMas|X@VDixXLYIPU z9(fymC)+Kq3B(i217DYV+UecRB^kH`cw*C3Ott3`21n|6nRXj#0ljpJlS2p2h_edr zM*)eSh~~Uw<)8aR)4sF0bi|>MOPg?SgKD1#a6ajBy|>AE0VYxNOZqdZ+haKm_8HJi z5;`n25Y$_J*Cf2vnJ3*wg2c8ZUbj`^mIpW>5WQ|8z|}FG<)y+KHWTc?gtuqoF(izl zzgf8iuDz`BFZ|YH0t4@TeM?^Euva&4Q@A1x1gstWIR5Vu0jv)F-7=h6oqii2mFd?U zR*dMa#yCl%NxNn}FxpH-(*HUIsuBU@h$6kbZNb|fcb|@Q?lCbAV+%`a6~jhtpnsu~ zed5nXn0N-S;OmWKsWbWUY=-cGas@4pB2}HHea7Tj$MrD&QJWs)!dGkd7aN~m9SPp< zR}dzqmA(vI@m8DA&OaNV4f|0m=l|(`KsxC`O5D9Hb8xj-mAUFCQy`_zPFhc4w2Y!6 zg#)fKQH9<}O@1sTjvKdOGv8(HZZ5`{$G@ yO8dBkppOUIGpI?Pr8R^Euw z7mt61qkJL%XW`4lUI6H42m)MU|o|)t+ytX15+F!%Pd0wzqH}==vjl|~ z?=#XC2epLFX~3&s<1$frm#p}5L+0E+;ny1CiX&u$%EIKYkJz36S|`FJ@UNbLkXA)C zL^C&_82RoUB&gI`8rAg4<}4%v$DIHos3_z8bknL;P2p7Ac=`lhI`10XY6b4>e$ni+ z$J)Dc|8WJ-1O_GD-CA5vveG7A-oAC~i6Y^!(YujIE{Z=24c$+($k*QjB?KU+MghO% zsNWpAWY`hh9&7UyI8{TVlRe-R7T| zaADd%bffpL!cTMxUx`P7WbtID1!j@M;`p6LY2W6qr`cgGvron_4it_1SjoEW&6cq zWJB+1@{nm$nyGiRe{5u@p&>f49{xD@1g_!Cz#pvWmK*W5p*T^C&^?q%U2AAa0BCgk zfFr!Spxn7X>;pL45kR&NbXE!<;3nxETZy;9S=MFxGxMtCaG`!*qq8Z}|9FpFk-?JO zOsb}ABh?7kLxN7>ShhkgOqP8)px$ zDl(89TmyX@R=48wn+t|R@IH7UkhPW*7y@fin|1zX!>)^mFN(!XPkJTKZ_hlz8{#dp=2QPYPTJv_r>6@q%78w{;98M=sv+EE7et^FJJWh2a>19DQF8p|Z z|Kgxtf_wM)#b5JPzryzMJO{O@luzs8N1R2ml=~-Cm%Bm^-OYlhON2ZPfe(oJ6NU7= z^bnI@UI!Yj^&kBN#UA|b1cSVZC>fwD_!Fiizi@EE|MZ!i0(dulY^=gxCe@D<)wBCk zn=cV9xk(ebO^{8>U^&j+7*p^-;A5L|{t9dT@Q1W^D>m?YuT+(z1MD&e91Duci)7Y# z2dEhO&rcd6iCEZ&Nv0{;&Sz_b@!F!cRy zgk66`O1p&;k1$?3M<`OA6R;ixIae)U!%pyWWe>t)5#a(Sy%Zr|L0$W!!u}q2|Jn!K zIBKDnzpGnbVgpuvEAV2Q9WFH^-bs;`>~GQ;R0vu07MK{J0)t~ODuS2kEAP|Y>C*Ta zeuZ~-{l)*&`q+ui$Opzn-q4>qIs1qnDws>XLRWxT=QhUC>V9gFxc4eag@AfhSK|O3LeVLVn^CD4eQ6&~I zO<~%&{(ZJb-6Hisz)&Y2OQFU;QJC^iDI9x}=)~$fJE`GUX2(EvzYwC9u*Jsmfg8Ba zw6c2kkIZ+L_+Ed1%DNRQX2>kY&s9w;00Wz-)L8l}MrIA&)hE>*QgQ;0+1U>DIn4Lu z*2pgKUwc6Xy3Z3(?pua@^= z6i0PJqq2g{_Z z5y;8SI7rx&@|`MrF=bvSLox>;V0y*8tUbyrT8;r|6f*d_96#1>a1wc+JCBIsHllLHAPZ4RWnCfdSjt zXVmu1+qe1ufK8!za#K_zL3dDYO>`$)?(lkx2Pc@S>ztG*6q9+(FKZw(Z}P{BU_lj9aJdwhjVj5!~L? zLj&Anv}$CVG@113r{Is7S-jfFUbY1vkj_rQc)_}=JSqAnSufa*3%v|Fo0@SzMVE{D zo34gyv;`=F%>P<9%tw}AD?*XqFOfZxHHa=j8JSoIb(#d3S;^>siUI;Q%bb8);o9W|%UO4qn?D)X%6a%9k1jK3!k#BIO=_u zDjyGzj$^2{3M09ABlP^JMvp~FE*vMh#)U}?y~Q&v!lVr|Wme+g`7@e83DL!pcf_<2 z9EfSAxxlWruigG&gRl|I7XF0DOEP;3U?WttAjajOTb(j@M1+JTzkc=V7PTz!{_Im7 zdnBlomcLG|15cb;$pDLyB-2dA|Nijvkqd8!E07={& zi<<&+37Zy&oi3{)K*g19m3)5UT!Wn0H>D4(xThwdJL?6VlQn!}wp?ofTkCsQJAmVZz}dxPik@CfIZ98l)yeY~6TNst}Ey5-S(qIwqayXq@)zxe<5 zM9>k#G^8RwZ7?a9KsUfza*(ljLQMS1MBiq@&93&TgfHN7QaX2wNy!blF#}PG_9y@1A}CxxtZ2*d_~QI3xlsHSSs=(AFs~i z_%tf@2?FVl?(b0BdWK5{u?6$-9E#_^noSkG^O+ymlB(H}nw$bG$lL@p!7*)oycYsX zo5r731BF9g2IdC-@6NScz_;=lrils)7!V_Q&4I32TB_K`M$rGHjCS|{m}fmJw>VYtNN&A2@MM?I zI$=n4`x)}CdkADOu{s-x9SrVFd|vj5k9=7m-}SMEvkW&1mApHjwp5;Y>MP{9WCLH# z_1P~UZhiiKh!cIviyq(08rXh+Un9V7ft>gwgwb8z;+=jo05KHrIrC_j52+BbA}Yl^ zeYK6Qd<>*J1dmqgrX9uUJXRdDf7{=)_7r;ZQA&YZebuuTgk+>`BXpOIW(qqwI@;CJ z%C$MRUCi-a)DO6vQo4O0Gx5q7U_b2e{dIq3r`3Y6ugen^nrD|V6 zt)T4#cdWW46!XtZ$1qwnU3 zEb@2Fcxmix^EnNx>5{i~{);%2SKiaitsTP1T*676;gBN&m0s~9;^=+@XH2Ug@&*c)iY0gMfyJ6?SH4gnB+IU3uAnTcE*O^U8F&>R?mj& zHMSJvDbv)Ws?>NQ2E3zmGjhbOC&13xe^DrSg;(K_a-Zs-GClp1srE5;y}0ETEn~?I z{{t{ORBmF|P)o;uZ4*yLko472@|o1ipI52UeqQ`%mxy&=m2BfG7+33gEi#^_2Tp!I7!E02;6+lMV>d}o;bQGhe5I9B3 zm93!2D3GaW7!=KNM4oT9fP%F9XAtCxzoHccU)h;jwolpWv8XMB<_8@>yF-`XFFL4L zHi3Zgj;%T?vtF@6o(l9@IzkB3c)U8GM(wS%20q*mS3*95ob8IDfyqXo(J};O##&*# z6uZvB$k0$eby8cl6%nZu(1myZ7Wwd|yai{WPA;xnW(gUl zXGM3uH<`SdUZ+R`mk3-CPurBlV{&IXqEb2BBJi^m zdR09`DhFsd2=OGh)R#)i2J>RuLyO>9N>S}74ZI!c``dr0oR(}7=$#Xr)`I-BYA+|1 zu=;8`_J^z;RS>s@SF?FQZblx?p`#`$QQBXbtdQ;g*t1bL!k=-saGG+O zP&JP)`?;aE8JZ!cQTJFp7}uI*?k>F!G=adaRk@JqCuxXP16!KG3V&(@pNR%*_{q4bz}m8Xw1`mX4al&AnEwoz`MLmC5yyEYlf_boVOjg>~VcF z{_kXEJ0F{uAt45xag~}80jk?9hUSgV9&3VA#I!8it};1p@9j9@M@c@(BsL4`Jy@TkvdB*sk! zXeYzvkPMRQ^)|xFoR6D~G!Xz3`94#CU8u@S@VwA*`CIBMvadTNN8UkgQYL|gfsbW; zCe@`G$(2GT9V0$tCon4KlJW##VMw0qd_8$mD|4vhVAV}EG?eaF))2fZ)7rC5n9-N{ zCPGX`{ZVUQw{O#vm-~pbv87t#pm)fHiIuC|Jss{e5%%q}M@5g}Lz*<19k)1X3(Nll zX0^%89bhjm5pU>a^&#r>=%X41OQ`4h-(l@z7yK+b?Oeqa^82GsG+ICWl3}Oft}Bqu z+<2jN9qHRKI-!`Vhxqz-;`vg&<+$z-oBYi1uoAq}n-?rOMN6$A*2?dsPR*WP*0f*S z{gRw!LogB4+uEV!vF`Jx=wBAqEhi+pl%KuBz5GTefm4L3yM+5cU;D%Sh;BQi$>BAF z=o9wJK1YcV%tjLB8O8xdnQV8>Z)Mu$oL$?Wd#2=*naR%`#Z~<-bQEgsbWkSqk!oG2 z7J~6eqW~yCDOdkBlV9G^i3^AZYCJ>!<}n?Q zQE%1vaIO5TiKu{W3#wsHe$Ab)o;8YwNAq)}MFk64mDdd*+won^MG2kNu9rneC z4bY=;tD^|lvJ-|0bUh%}9%CYQ@sCRsMk|V2ok?;)ZTa7O;oF(8d&uE$=h2{GXFrWl zrmKG2B{qq@Z^38SfL@3uB}Tx;L|F|c)z zaeS=iiw6h=@uOSVlQ^{*A+9kNV0uNsu8G~B;x;B4{{Fp2T#9)^6j+aMyURDz|>oeophTT?Lw zwS0u*wXv>lU8v-7CsI*6RMo(N6$3CJn@dK#Z9^VVWWP`rbpibYQ_AAoSqTLH87$FL zu-v96leCTm-+FKDxLB>?y9h2C$UBq{jh9Yn&7a$|nkK%+jL3e3KsE@{8Jl{ohfbO2 z!=Om1;K`u@B2d6lkR`g}s}?YgcEm<1zm<0vGqup40P5;d7smw$8oTq|1=Lz$%T#BKg|I>Zx|ldYS7Y*F{lGQg~S265>1?JIF9t(S{RH#eWKlQQ-DMTrx7q4DELlPl)`{r~%vI$@(>KQg zr>MnBp(f@@A0W@#v^bnwx8dWj0^EDT2n}e58Du@T8t2QCnXSGLKT9k{Pfa81%YX6V ziSqsKC-%kGpLv3JgZ>JsxjjU92%!u^Q&F-xEfPNgF-{ z6@hn6QoryK=*5^?31B$kXa^DzP4Q1+L)o!j1-`rN=mARuzwgn z&qs^Mv%+tNWTdq%OW*%9w{hL`)LD+ue-?YhjPH%lCi8k#mAko@E>*?hb7ALTGAW{LfoZwCKUZZh0qAK~4|HiiQ5R%~-%Q!zXBcLVbJVob$I?5;xp^rA4G(Wup=kCStV4-H-7-0~gny`(M4VI?(yyEm;Pl3{0Ya8YlvGxxC z=Zh>d2chQ^XpzK~qtWQjVbir?Mtv0WaNTVMtVPsv0}6T9>A7gqW*p6PLFH#g8D!N; zEMX^$`+?syG}i@`i=kP+0v~QnfgGD+_!jW<^3`d5@P7^M^Mk%(!e;6Sta_3KxY!(2 zHd`~WQ1ESsQ`m-m^3|I>MQR`UUS43<8w}OaG)*$mD$AVHn8*D9A40w#HHMlX7SD7NHL94)C=q$`JHv;%m)Qw$iH^e33 zi3f{6^qLj0_6oj*N>+6xWHTK?y>)r^kK0bOk0H4N5%|=+8k)JnnSek$iZvcAUmc$w zPXbdu9d30tCVaN=Lz$zdi7c7D@%_8b`p{XLn;k5k);A>NXBydZzW&EqLYBR)+`2G` zq~{bWiR~g#!C8^~x;hIXdYP~tYtNNLz-yX0H*`krK$=nl2p^6`W?u$Qvgv=LHRQ`> zjfs_g`#Pa$$`Mtlv7padg|@Gll|uEaEAhXXHJU`Q)>gvJqrg>-GldJ&>5}IDT*+%s zjhB-{-?@0N&$}dE8|Qlb$I&`2T`_WZGC&AsPRidVASRO$Zga1j`}b%2US>I(>3^+m zxz??2UEN(2Ue#}J9Xz?g^$i8D>yVW@5^^NeEy~%7-y=t}BM;xI+x%`#KIgHTN!O1N zjNQytjk% zYin(IQNL`cmSTUvW*-??bnPx127cG8Y+BX-8V6DUcKctL_UW0h&BQ6~?cK!Iv~RSh zpR;6A;u9dW(Fj)2d=6!J(#K3+6#<$P(ObIEPA*9TG-U0M5dthW&lLX0?~4#$z{y zlG1O~md9YGCCEe3RnflwmEXp~idTAKbh|+%Q#I}gnyI|yp06JECv=Ro+#cBboY6OcsAuSR3)dgMiu zvn1lZX|0`6w)7`-T+Ej;0N4^sem;Kw?^2it7Ao<|=xJ5*sJthQpTk@?Y8v|R^$U-P zu8e?RM(H)~6yz=OW(ZM_NVt6|5U*&?N;RaR_cZw4;nbVVT@5scs9Mip4HrSfgAwb8 zpOd0gxno&&fG@+!lrzNnGHt*OakfCW5vv`W7*SzmT0bv5tG$u`ElL!Lp#4ByFmThye!3*q+qBdB zYd?3{yqRAoZM7Etjq)@D{vDz@Hi2KN%K@5Aa%`s`THyP|7S9^GDT(H0-%)pajb1g_ z8GuNsEH%{-ljMD00{`z}ba6##x7%>b+aW4>e5QfWkOVerd<2PF0R99$GmBh-G zBsFGqxw``{oh;=KIyC@Hxu$FnPhGxKvdqs4zc*=SJ%>^(n%eeD=4w9AvcSHmf!Bw&Ns zXZ}mS@dvsohTFZQ5at>Zzf)!9qgL`6>50%n_1~Kvr+heR2a%&wyXpKg)KBMubK}qV z9qeq41*rKPnn!n2Ih<%)rA&kK2<>l}j4=@n>hq%{@@lp+Z=%U|V$ab)8wMp?+ESb+ zi2jO}yG|mW&`u9K`CVPbzB}x|{IF)>{lRTKgn+~Ia8th0N2!r|4K`n@7psmrXE41K z<^N%H{{PbEE4eq<=Vm_DI$v8{Eg?rAU3Y)Z{j6^kD-Bj+*H&gATmM-?c=Q1QTB}o< z=fBy&ZSV$uxnSIdB)ETR z@DYEcE*Q;v^7>6lnf4Iv-&Bs$49B^3ySpJ4lr}65X1uD$<$IKEPSLwPo_2kC$E9iI zSiRNUy7HNhW88q5GJ1m!Zuc$HT$7dw-v%npB5jV%a@9?Sp)eWX8FIh-A@)|80W8m$ zdD7LmFkp=39W?LtGwGD_w{Y}Uu*=*_;SdMUg$8yUN2}a z1(I3FCKs}`SC?dw2D$byjV<_>%ayIYo>FW#k>pb;5k|B4I&IJZyH?T3hfsR zQgRgD_-ZUQg?osXazen_WEAL+>K&xPa3Ahx5_YxM2}9{+D}Y4D5-3|D(`)(>(a9FD z9UCMhROrXh#og!Wzc`#?=nu`L(q4-|=8RQ6)T!D?{WPh)_61XcI$mT0-IhAX%&afi zm3=0?ay9?c33N??GM}Ep0B_l|tZ+Dn#LKcQBplKfYh)zeZ|KQ=FpvjF4;}Y+Hf2fJcB+vnk)=wp!(A!cBM|qD6x*x zcM6sT(#Jjoyptb--Z4gpA@U@eA=%>**D;jiS1c%p=r9a^3;Bz*V6(3L(^N;eW8Dpe zJ_u@{G3ww$q6f*Sp%cuVKR2CJr+A<2jX`6`@U|UGlHi6pGiX z|!2k@6N&krTb_7h)?6y-$gS6 z&k2qp_(I4jTt{5S1Rd|`#cx`=conokL)|U0veWu*-}6C+BVM~NfFN}N57w1}`UT6L zwEOS328lgeY$TyHZrKjaL7G|b!5c9Ty3}&xdz}s;{sJ14PvrbbdM67g#8h9X7}nm6AXl+J-p8{t zu3eM?GK7e;s;-|nL|s|Y?fjt0AqFJGf|jVMP4drY2!=JF!>8s@QifBLa@pEXtwCz4 z-1*cMZi(VQBcc)@b6m5h^z==DWhC?`;*i>AI^ww#yb`j%)CO^-wCyrN=8H9>$IesTDZ4F0&Trb=`4Hwi<2Whzroc_U8AkqD+@|H&0`DewYR2lb2jyaiOI*Q|bpt6seH)>-ghPzjvj z=U=-e@I0c!*QI2$ru|58`aD^`#!-n*)$YUFI)OoBPMh_@kj%Fox;5|=LGY*7Ps=o8 z#g@4j225|{R$*c_i#OCMCiz}e4QYt1|9Wl^o?n`Q1)9J(iG#{{k_0CX@hPtms^kL( z$Kcy1hguI;yZpej@FO8f?x7H1$&8!b#Po zK8ioc4J;xMP5AHDwiy3$CEQ9=p88~ctwc3#m+cP7a9UxOR{24DWNt<{y|-0lF0*6_0k<)7c{zBK)2g zxB>V-bYEM1hViW#!{5e)90Z}1-4)3reOaWD{Z5ww zMv`u~b9J?1r!hu5v}sRT6}dB!*+?Po)I7jUV(XYd_Q&sZw3NF>F;>7}HN|&vxvp4V z;#da3dtijYDd=izs5WBmpQ>81u}9px(r2n~?zWLY7XqEB%;3oN#T`CgIdWTPa#pT; z<~+icvAv|KceLop!8rb9NyGi=N$r**o#+uKMv$nHK#%`p1`!H6mPr-ZuL->N8e!xO zn!sexF>t~5tNVd%@hT+)J*@8v%uzGA-{pMk$EgxlMl$s~L0L|*4?|_?_2fJb&ifOB z4%z>(g}ZhWQs!Bwi#R|2pMCWWlY+VF_a`o2!M36V>l!r0Q+g(mDd{tE-=)go(;%XQ z&4OG1&&fI;)t0a374tmtmeWW$Ebrfv+jwGDIZm7~e_0e#b2@R*&|Q~|>(7nxJA{5_ zeDjI1yg}(Q;eH?dA{lNs`={gU6f}pskk4AVNL`mc}TcDJ)DO#-Ps|CA2JzuXGIPe%`;H*}v2k zpD4z}rd&%ssP+>M`fe6o>llchyS+OKywqWlj&$Kc9ZGDK z6v=$}G1C1-*)C{H<;3dd*?b4Y8CY3``@rsdPhn!HdODo>Nn}c^cLb0UU}}EDmLyI) z`tcc1`Du~)9{TRkpC(#kk6ZjS_+pos^*MD*>_lqyQf=!3a4FKyZVK1rM#JBlGvpGr zCcKi1mIPay?^mI#ezIrS8{)Y+H2&wW+>nUbUfv87o?#~Bo@7VF%9;S%=*gqdJT3|F zDOX-e#1WH&DlI1F75hME3GOVjvV>>RLVu;URNB&iZ9jr!0LN?VNU*9ESRlz*a)_5L zm)rg8cYkVX5SnzXJOZ+JyQ|>66PtEh_8=q`E3>$~=sNFw2f`-m**~cC>vCEqN zTEH$v@uE^@e8`7Wb_8qRce;d}u~!o&bW6>CQ~C+H`QzLrp3YS=GfAY?_fwni2l%+~ zCUmC>x>#i$QTgmRN7Lz9SbQHIOw40u$A49-tGE1LPG(ukQf0=qUt#CoG=tOo2$eoR zi)$dw_4@OL-sNKUYNM>s^}9NAO{=EbCE`)WR2^Ytm&JsSm%_Jji`43A?{VG$wiXYL zyzhMbHwdg6TBTOeAhFmtuwOCZWzOl|N&<^w3@ago@Y*FFiTA9s{?z6Vck_$Q6Y=^X z)6=)%)Jto{_CwXis2GJ_S$KP=zszG~3h44WI9OD;h?;2hvr%C@#$E5cM$(*aAC=#U}(L)QJ$6=ocx-uLNfg0D4J> zkpKSnOhfC1X6c&kIzrN>vFEa-nIPCy_&nQ;6Nvr&9%OW$n;nWL&rs}Le+H=d$P-{> zsgHfXLuv$>EdAeVnw;Smy^#2ZJ_ zV6kt>ICf3K-!u3JsC*3@YzQ)QYK}Tdcd)bM^KfI%j*6so^P$ElY$TgJ+6Q-z2DPU+ z8)v5@Ji<5hLeV*a_gB?*^3n*TyH1Y)xX8o`Y{o>`Yy<(rXnTazo^B$U+sz}BM?w#X z(A+$78E4sWWZRuO?u59e{8gggQ!4;6luEq9b&xNb_6OcH>Aj+4dWHB!7bEH8xMPqB zGgYu*sY2rY8U?b{_UohJZs>1jx9V5ZJ0;5R=sFbxg2LN}`t&rZ;pNwZ*uIX~reA$))NXq%EG}w4NKVQ5X_xc`+GL zd5sK^J99@q|CMCtjnJmY%}nw>P_TdrQ?ufG-j{gH1{bFA-plj(@Q$g1Pd=mRvOaiK zpX_Cx(Ow#jIIjX3%qI!T?a8**bfF`CrJx7k_zP~`{gr`yu;#Wym5cz(avCf0(7p2z z3h+zx{>~!j+Y}e`!4P>W;-HV8>uZ_qh8zMBRwmQ}r5Wdw^kgFe=jewRq~b~-9naV@ z7LK0Cq$k>+0E)Eg>Al<(gOPyHfrtv?U@^j>EhPd$>R*=g$vySg7qyhj5?JN zHido(J&R0om=X~NEOmx59*;K}kR`~&WIl}5>`pkzdXxIq? z_I9$dg>&l~anytH)h+!XT+bDe&lA|_n}ml<+e{y=x|yJ6p-zj%3mJNTG{~ z$vzM{!`g0#%N+fjD2Uyi7?V6T{*sk6jv|k^^@|d#P|f9>fj}1S?aW|OqAu0$SE#$k zy3c}eX2!HB28IS5Wuf>?lEMNt_rSHekw!B!DCLUjgzRXxwRW!va@N%4BXqgT6~f7B zKgZ?U&&^9t*`nvW3j9ZyZAg^QQ!GI;EvpmCHz`6nMPK9&Os^M&G(^Z|#uF2m)Yd&~ z%f!}N>eT&yFTD91oog%1GYPO=t_Cr}xWDV?#U(rH-1k24JsO4+y=QO=q$5TWfGfoO z5pFQZpYnD9Dqe`wt`7yEED^AZ_jBs141`)BRVCG7%gI}_l)4bwY&DjP&42MOD^SaW zMU=KV-1kk8A);Iysqu@$W2c%M*WJ&({NRH-$zry*Qow_WX!!zd)q%=w z5ZKMJ%9#F}dztP}yY=h8`J~}`urxrv;+M4^xAkASL zHqeO7ng4<}{WBrvH~PdN1b8w*MuN>rImh!hD(i^!)L$#nDgxQVSaZ4F^z>1|OzjgL z%`RB>uR6o|@J`6dU`AB8z86_fvl)`2V0h-{@@4QB*1;U1Y~zJ&amrXqA_zzb29^uN zt)v1_>p`R#L=*+U1BTeVxshKoses&PZqJ{)JEWhSUN>;U+=b`H!4%~cZ^K6f+^jEx zbQ(VeN%?_qo>J_)eayoFOO96D7XexBlEISi#xJgN(h2r8z2}Qw7qTV4QMk zh%M~N&W&RA7Jz79Pq+CZ@6($QR)4+k)=pnW^AEo2Kwvt*-G-Ejis(Y2L{uV0^=Cf0 z-4k97l+nKj{9FQt{)pZ**x zKCn>dM}PEm)!dn8Fa&v;mPu|nGlbovWYHYbASd{FbdmS*fB@o~80T|=FxpC#?HMLH zz{CT+6&NAiX4xD67>}m zc|5DvhVhoYTsBs4m07S*4Bb56_ft}v%uHBIi|Dqtf6rTq-1le04 z#OATu$&9(BJM}^44d1ldj582#|F=&3Uo&g|@szwAiH?uk{j%~>iuQ>G%S4@6l*@h; zaqmq-W9tDsi80X#8JX?!RkdUh0?LkGaU*0$s`@KuX~B#Ec=D~b)eSDy^>tr2&yL$i zP!nKj+xM>eqQ*L=OI3vA=}^u#G121=hfg0v@yUer=txP5G;;?v?d$Dd3n2`K{(N87 z$o-gz_gk6S{56!5>QeWUHBn4MWe3N>1ofCavr?tb94pY%SuvQQpa?Dh!Uik{vY#wx zMIHB3T3@gC%h5m=AE;bcF-_|DcoCC#zU|9q^qsEGegFmV0)wa&WUIC4)s<_4@JVCa2HIWlRckIxP7zea)hzv%u z5sr^HB&AN}?ggb(^Ysp1Ewy<{L$5zii+3mYhCD0kJy|Osa0)9TbGyR|fTx$lp2)L$ z8eZw>{{F&Ckm-Ismuc!>X8hk@dA;eW?gKpWP5@&XQEbhRM)UJr0XjX^!@*@g5Gsnz ztOy!0k{^qIUio){^S?j1faj1}?kXf7LdPSGji;p1-EKQ9D6#V#wA?c73+a3gdMV1q zDu}Cuf;jC)J3+rDv^juXZ_n`t+HVp+$Qu#2t7iqRAbpxh7R zKI;izs5b}#YzA0$VZ!)NFvx>xQWSB&0shW6VHiK%1g0jRzwoWO*@P_B6GK`mx8=`V zX87e&XG@7?(U7{BQelyd!nPPNAT`e?TLS;1pFHz1F;_iF@y}}{yapBzjPXYMKwl<- zbI5FYzCF?w&K<~VILtp@pfYB}iWm0(JnC`)1X#aU)Q>If{8;pghf_jN-!C#PO_4#Z z3!k$_P8q5ez9ZT3RgdyfbMhN11#7KazNej{#6t;K^3LbTdV#cR2-bp{LhH5M47NF3 zr{;yKM&u(-+ruEu9oj}*mMR7?zB_g&j1@O}DY66i_pd`lD534WSztQ)ur+%^mGgyz z#XD3CGJ`=0((#k!E4YMuWLDyECuSE=ocz-As9oUR`Bo zaIt4tkYVg}SW+O$+;}Mz@CzZK0e7~Rk#STgTY8#EzIFcNcdXCKH|Jw$B(eEo38_wJ zSL5%OYq!dbxJMkq0r{OIPcj>eYW}DE2T!<$Tvz*Avp*zr{E#G;Kr$v_aseFpg8{oo z7`US@11b~6Z-tCeMUh7Re{{wvzZp&!@tUp>lgbltQ>_%oKA7wFM=)9Wl&@I#Y-cPt z1Rpo<*(NhR{fRUmkA`H@eiM>t41!{L778X6>`0M@*Dz(~>>Q-E{8H14UfHdrcsxCQ z7u;)ybJSR_>ewFFUF$*iHylOicJS8$NM0sIUbFh^ClI8Is+GPqD_;?2VJ+WwH*gbQ z?xX75UHatJd)!-XE7LR!kkx^Lj=|2Dbt0R@Of`oH2C*-VGp6{l-p(Y@ai5{l`w<9@ zQQ^2GK(gT{?ps6qUx5Qer;83+(Xq^wr*1COCGX{&c%$6{`-Au(MF`@)4DOA-QE!?a zt}uylelC8m*2ba@l|YP=n$OkmtIO~&5qirQUcZfPaO}=D2iBuEs7gV)&Cbb_!4daz z!rR$*k69WJltG@}%#>-eG;g)rzXk>1cY13pp+Im6cLb%uz9rHYKrKm4QAYU$MNJe0 z4NoMDQbHVL8KeIFVbS0Se*JsgK$gb$dI$IBdeZXpa;B%m#6-Phrw^E*le8yVYy@d< z3h_Ih&yki1DekDM#WGM(Am8)D!a@)eLHTHp91I#Ac3g%DAm*yf7;vEPR5GI0*jT zjWdgFC;3eA*jc(JQ&KxJB49sTN#>X>eR=CG?wQx7Xmx+h?Vg2IpM1Ui+{gX4_gzfk z(MY@SA;KWbjDwbrhzoWI;ZZ6k^-JXrr5@?#oAGr2c;ks6jhG;3MqD6bfUWLUg6|{?wx&_`$GZrUQ}ZGI3F8q zk0>?UVB`7pZ;>J8vaaFMek!l5`13RfIoWf!nJq19 zax7cM(n8!LaSu0H2J?`VF`6zIml$@648&uP$;qhHr{_^w&gj$0rP!VJmTIsl+)1c! zfKYHFPb|8MCX_n+D0}%4H6R{Da^6aG&6A_Bmx`f79A%lh-af3Cz*i@9`6bv*c$Cq= zJjm0U&MQf^v$Q0|^kscyh!1->OG;t86Z>Fv(%I-&Z~ZP(04PuAQLV_?~sGBV`QY z<`NJ_p`JOm`y*$AgtSgc3b?=7pMS0?3giyy@uV7r+&;$MtL)^%b=#S<>&uqsy6|bc zlVqGLzSBE86FdrY@bLJ|MzRF>4hd6=(H>9uW=Nk+x|zwq1SF-K@Mg8S90%i=2kIuZf2adm*6S)v?_sfZB)vz*-n&8n!P3H}VwLD%koWbKGk}Q82z#A{i z9835%k`)p;TV;2u2lIj>agwCzBSF;71$bmV#yWkJ+3e+qkDh0@)4G#a2Vs0jqq`FV z=D4HS3RRbgG26LvXFCCJf^s`n?O6;z!d+=dTeD{*=a`yhq>Zz=G(jk0q{$-x^W7#Q z_X0@X$b7BMjl}9sh1ndd*{#Plb|1jUChy0{gQlNtEug;nFbpEtFiM4aU;g>r{&k`M zI4}BQTKoxNw(rjx*Q+&ClTOb9yF&W=sa6EWvPy+fwvvy?W2o)xPBYy1OX=#?(XgeG z3Fbn~+Q06j90N=-{6pB~*m|#foAvO?S8vF)ISYp|d}YpGg*Pqp2)o(Z%&HpV4K1k87Mdhe4ilzT<_I+MY$ zL|=sQ!2o0J#)_xM&i%UhSjTD;C5l4jPyFrvTY-*jmy|IWPZTA3VxA!%Y4#B~@P+OT zk|@mwqY6jx+$w@M2l}&rvgl6XT@}I8pPxWs>{YGXbLBYOy_m>dOS!#B9Iq)Ivz0Sj zobP=|j#OH;{dqFON=(Y?wJ!#8ncVw(-!IhFO0uo(Q(Db@?@;x6mj7Bbrvd6K!bz<3 zydo6HNu)onoRz79^D-q0Ph83=^ZNEf;Gg%6D3=#-k{bmAM4gvTGCUXKzF|`tV(2)m zrmtwVni4nxsAI`nmi|BPr9YPJziuodOsEo9c0d%Q6(n#kXnrDpOf2`koGT6}s_p7e zlWauSxOU%h4%08xg{ukWAm{3ILkZAu&@jcWilSy}cWm{3mS__igT$E=o8&~!&#QHB zP-><7Vjw9|#G*+>`{nZLkicb50n}sx+-U9AtxgpeJ7+p!_{SRnIW4CFMUeIX@8K{o*ta7@xT9Za)-j7}xyQv>CF-~8rK6Mxw0yQKiID#O#|@ox3Vx5H?Dptyzi0BP4bK(Y^p zYZSoo6B{4Y*{e16!D$Vskj{|k6w(=59uF*3t%z*%{J8>VO3Z zz;Jrl?aqq7gbtmcxg8YyJkqP>)G--{FY9c>Aji1Z`?Vb$F01YFQH|ejhziObkY2JF z$qESz-?mj4Z=?WshGfnbm7ChFdk`C+49sRuM9wRXQ4w+0Q=8ltR%`P4D$9fgwJ&yw z=oOAKLvZWN!@~dY0w|o*o~uH_dXE|4snlLtCiHYFv7`6nsJYslo|vHB-(|a~zChSM zwc_+ACuA@d^mQEN9I;7Ts{g7pU9wAJ7Hm~oy~7m(naj%|6_0Hh z5y3|9n6G*V+^PI_jwOaW>9rcn$02D zn_iB3LpzVCvj>*y^4PA0kgOsad_TN;4s&g|ul&$xe(#8Uu@m{QwLW9;V0*LSEODDE zxL|wKNHQqt;L^8wzcq*$ORc`$S2Lw4)cm9=N~d(&Wml&w(SRcQ)f3h+GL_=)|8al( z4{B%GC%D&GYSv+~H+xK4m(1d2d!3zt^NQayspP|KOzM3OrCp@tZdEe& zK1BT86@`jXhbFu3nCl<`?KWRDEYFw{74iMsZ?D%tzN0uKG?|3THIjRfj2V-* z83mHd4BYJXOufd=?|)_TnOK%Kjjs!q(_8KoiK6DIPAAl%d-fCkrJi^d(l;LOq^X95<#$n?bWZ zTN~*p8}(WG>#U7Tn%kvSEEtV&yyep(|K*!HlvjNBvHY|`H1%&+=#O*qFRx)v4wNkAI&?a@6rSxM2i@6GPI|(+Ejc}RVC4@x9~h+D zjhKD=W$5@A%~QEB0_V9AP?}X3E9hK!TJ9EBd!O&-yUUT{m1IWf$hH3x>#_aG~6zV(?h&a#2*hSA>_LZ*#39T4EV8%?C^S z7RxeyfU%Zmo?_G`Mz8|o+>$F3xbb%==H{LA<3EH-x_=jE)cJ*W1~Q=*QHm%;)pDlQ?zP zBSX~{Hxp>5+UzV@G|vpRwrbp4b#v^DN-Wrd0d)eN#g8AaxEWGHT|NmcH|7CMaLQ4H)H#%K+oh?ZXKdc{We?PX~X@g=75R+^= z4M4t96f#KoB$+M}GhDp{qhhMBWu_{x=l1=jHOS}DFQKn#QEfQyqjpYxqi(O|*eau> zb+Dn|7p%OkCh4$rz~})g8v07b5F_dtBH-F)LTDLSNCz-i-h_2!Gk)@kKPx!>+rt`MR{kXN3 z4BljR+!?Rpi&O;{eHW|c9MF`Ja9%3Y^W5kech<9yEO}TA3nJhAKEii8(1TTo_Otn0 ze!Au1_M{0z?9Ply+#RY3H9FrI&Q?6@e6uL*^v+zR=3S32PBKR=9Nj^zWVA%*Wi(-&pPQ6~QZ@+%rpN=naj2a?tUT zhmb`hW=6a@RVu4lkWsQ*(DrI@#sFF%j}y$a}I{AZ3alvLq|nH-VN2Fp7BHt~7|bRGf2C zh#VZrm0V`H@S&RS8Ab6q9e2Lsif=D@91x}|(PPsyB+Jy=^2`+{W=K+a;hG;GyY)L; z3x!)Sx!wNgU~wW%)GrEf0{Z2P#yZ8q`Mk=gcjzoPd&OlJDrKbn{)C#6JkHmKH`XRt z`d54t~s0hhr|BBf%B~19Kv%)sF@-1INg#UmXq1(717)no>Bb_3TULu)-&^oF{Pm&-wQDcg9wr*O|@h^MgsZip}Q!td@NMSypAdPG9Y zO@Dw$W6P=Y8C$wK&_x{>HI=cC<(*}P95IUrq~q~p-uQP76q zD99cTwxwEkoYm?2^RgUTp!xC_p_Vj7ZmODPH2h!I^(B9bu1E~!>3msccxQ%_ul}JN zew8_yeHi@cg2P>|i4y%mqXR07iJ}nb(KBUuOR>~D(PlF03VPT5$GC(z4b6v>O^IWGS zFP|ptfDj}B&ixV{tk$foL|kse*F1H;s+HdD7B>o(clN6c;&AvL4Kb6&^v!GJ){l>m zY92>*cDv(G`#}|d1<@%B7|CdwMx8(`0KowJOB{4Zm8gck5Vw~Hrxyt-V zfVhX1%wH_J7$}D0AmE(OS#%6m3Cz}vEGzja7PBh9!(Hd|nJi@LQ&^Y#$WIesKO&4> z`)kVE``2j%e2xz4^%0+sd0p-}87C%7zCy6!(?jBZ-$9BXvRxU79Ie`J z%(I%+m)Nvg1R3XrRt)31u zayGODZ0-(i^^)b2;@NQJi!~!y!{{^;8uFl!J4j)@@JK)*ipQxUEC0XtegE^WdIptG z=!TJ@QVzMfMBl8Z*dv?yGVc8%ryU-Y!D8bj$>ZDuft5l&+QtT`=m?8DibJ28(ORwZ zGBVDGj@a53(ao03hpWu=I1g9s=2%_nLgvac!cm-3%3Z?{5obab)enk9lJ8h}61s-V00UaCww;N5 zAMQAQ`ei8$f-Xvb!ov(fBrDm6TO=$`PQx&sfN7ywJfHg;3e+wPlPQ>;{9^A49pN2i z(ug&DyC0E@&eX}qli!M$d>wDTF*R&LWeA1F@46)v+_|5BLcTaU60@5W+hDWFO0IL5 zl6kegv@|ey?us5s1Z5on3R;_OJ+V)I{}X`!_Xhl*4Q@eU?&Z_O3eva=@GZV+=;OqJ69Vzf5Uvy<^aZ6Ql z`L;U|2)*t{8oI731ZU=W+=_<79AMC52niMY{Kc1;XL54#OwI#8iCD~j;y_pgVBQ%+7ruJeWUWq?;T93l@!7f~yBQw_h zX+e5-!AO0cjL^zA5&*1XX0z-wjnhU&L7rlAHA>(?D87CB&`$dHSfjEpWrz=XG)rpI z&13tm&x`sUFdh{_nahuvQV%4NZ|a%aO6pJVr|ak4TJEv#GayEh=7(xnVO|GG<^OSo zOXLsN(uycSKa@=PPQ%&Z@YhP8-scguYWr9nB3N%uTg&fbMluPJehLvJ8?avdEh&4& zS0j-!UDX-4BO+Wzs~y~6SYd|20->`g9PFJ?ovi83pXy583plW2+4A3i>+J1en=~x73p?864L&*l7PbXMnAk?Rnp_h zjLjg;2gw@HO6z}@ty$Vrk4g1%?|%iKkla2F@xuRUX8U65K&!^(K$zKD zj{uYgmtCGirn4!&KYeGM*#_5P&P^LB)8$8l#MgcsLa#GLL+l^nf$l zG_JYj*jkuDSfoMTZep*i4##8~-}S@Q`=D+&*2s&S&3K|pGm{Yr6^x<}h-oKlXWQQ; zMk&yM@8Giq>)^1aJ861(WO@?3TZ5s&G3iU-;7ue0LsfNf?6T~IRCi~Noao{1`rzUi zCx7H|O)RS0eY;b;b^h?gG_*dl5tAJTY&9dS!uT}bvn}Qv>Aw4NcRAPAv#?%{(&G$C zvOI7rM+%McA8$;4SW=ngB@e_g9`lpcXSAB?RNEolC$u+cdJaf_1viFslVTzSJEZK_ zVCE5QJW)pP`Rj02Y{xp}8Pqixlw@8lAH8CZMwtW^{#ItoB!#;a9lU2JFc?eO-1{&z44gxXI?7f`&i6 zNs7kxz9W->9B*n=(t14*{UHR8*+P@I(lEk(OQ8m=bV@RCAtY|rl1nMs2Drb?t>Fr1 zm!1frhN%RC`bU0d+Z=!M`T;2dncFV1?I(XE+SILNJK-SPId|H|CO zW%Cn)@oYr^^iF+R+9DWCb+2Po3Xu5nN2B+^K^y^Zh`#7UrnHZj+ao?8W{m(dc{hOQ zO1y53*<|t?f%A5%JYm`BI-BYAJCF@0Ebj&k!&=!Uu|A!k5kvh~1J;YKTyIZ1gwQ}n z_2#^O+oPXaPlAJFj|(^?Jwn{2_ZVto>AmwH=HRAkN)wSxuP-tYxoo`1SN}BIu32EhABJXEOR_ky^eF_Nmc*{k%9kRv%)`s=FeZlkf$Qz*I z$wl88NW|5y7N&ptfa#vGaFkfE;(Soa&ACT|dUBXWa)iX55};TlUmuw^U^=HLI*}*F z7vC2>TAm$~B{g=18Kb$YBk;wY3OTN-p+pBru1??__a$O`%R` zHVZPIiACSNe_Hl2M71k8!c=S;}k zO%}pv8kM$k^04yC;N9!#QXqb+x-3QrJhr@XNZJJ>Ox(E%4ZtX;Rj+wR)hMov6(#zQ zB_<+<1a8)Hi%MKzqVUl@@bCxiMgYwd2;GQsEvi`!>dA;&J!1p5yz5p#0;UvnD?v6U zs6COVa8J}BCH0e8YzTsMN?hXERW!h>0}ZyThp1qHn>N!*Dm=O8EFv%|q5It2;~7X! zvK*2@lKeO4AkimCB*mx-LoYrGE~Ds8@1v6Dd!6B{r8RoP$3o@LDA7ufS4%=+KWZP0 zr^q>%pHVz<^fj=WZ1%*Fbux*_geFizRxc>(s5Jnq7`mU_9P}IJBiV_YY4lBRQ_6=f zPfUC<-}rcOt&zoC8_qDQYmvnE$*c8Ntr55sL_#+hGf>VVbToo4@Nu=-bnQpqX@YxQ zzii80A2o#Y`6nOasX_Nc0Q+Hn>snnu`5{gE*5ZV&Z=y6k7~kkjs`X@uZ?Au#ovLp2 zWiq8JaCbb%Yq(BZI!*A~UC51${4z8MW=Q>4^Qq?jG1$YN5M;oS!hB@E&JG6gwd{M{ z?hjlq{reI^m0*KJLS{9PCKv`ey2(~GFkc1s!$C$_V_bmvgoj-^ngoLn$7yj@Wq(h{ND z;X6=HaGB%<1g8HU9s@6(()f|Q0x$*>`dg456C%JFm&N+@r8-^R%@<^z2U}jZM<KSa0+N{?^%iR+VYDqhvj58e#7(IznB)XriVAEv=}6JP3SqmA6#y5w-X^! z;3uBLs73`aYRLK$#a zWUnGp?~~f(fL_ew#5{)|8XD;*J3|A3 zLNbM2j__?NbFQm!{PxV42#wSjOwG}v3;hxX8E7BK!qJf$a&_u~Y@`nGON<5+IKD(D za@vV2Oh&OL%kbX#TF|(`9=f9aal-urqYeFS^EJq1_X4GB9W=XCaIS#%x#{pdjPH>> zYKA;5w_?K;Gnu>*J-pQR*E2F$G_Ue3=QKu%zk`WkS|WUxDm6PSsk!A`*L@% z1_0zg!#y2YzrTAlw=IEa8tNZDS47#UF<> zrM{L`XBS9U(_#=hvz&p5z38wc_4Lqv#O9b58>2*BVU7)k0Xh2p`{r|TykXY&`{W-f zp&B$FFWIcuMR6E(pjs~^-k(B&EEpI-*8y6MgwX1SKoB ztfg6gR#bnO1`<^wmI&t}NTa%|`rt_}fQn)a*P`Kblw{SP!?Aw>DRN4nxC#6z&vVq+ z&e9@Z!mCgzeuhK9RctF7nVgt&u}HgGq}EoNKSifzu{hLwVZyfHkbLXF-f?w!kad4Q zu6~8W=m9zbaIT8tvf+TqLGCqSTPY4pPlD)hL0qJgCSBsk@VqtE$e2QVSY}hh1( zExO90;+LQK+)D~~LPu$|5*aB%IyG#MmrWW_NB4sv9V!#ONA5ET!iXs24;3N?6n z3IIHE+XuP8>9h3|&s7i9chefA41_@R91KWJnp95H>5>l0-lR`Z1n$?Lok1c(+A7)m zzn(n4=rthjLbw4JNoXmUumyzufFC$KYuWczx!mm4lqs83%9FKd39_d|8)l!nGcQwNBFP7NO z+JecC{P+jaL>Cl(e1HORCCDrEa;+SDcPXeeG@q@QhFq*(d^sG#c568Ba~hJCCt_M{ zxo@sCJqAG5or6tI^5tJH12J`{NUmf%tfvcQg$D&WhjzgX^Cur4KVU(m(w#^{F3fCB)#RLt0;>^ZdDd*QI6;uJNa#&HpWr5Hf5eQmps4eTVL0%Re2i!9tguXNGEF@{{mJds{8%<9h_za=8M!~XIgn_3wNMIOwAK~HsZHV1Ab*H_ zHKDu$Za-HZgX(cwz9j*+CA{Qh^4p30;!ulZ<$}@LhcAlK%Ui^3OBlZ?->5Fuzr) z0JYMtpUJ>a4p2W#>oh2Li%ncUTse%MZa5Pk4)NiR=ZqYtH(qIag7QORuIeDPzoY&c z_Ur!gVl{)~(@Q5x`uFb)V+Y$zCSO$;MG9Er(b17xpXT~(7bw!cU@=-vd5&bMxUIir z^;r0Rt!rj1y09;n51-LQ>p&7qb#*OFb(@J#n;NI6S@t{5Og~f(0ZfNUGShry8LIh%JPEG)@ zGAK`dD+>b;*?BM3dte{$fz_)g_Z`a8txlcpjJ~&k>i%&1ZiJjHBj(wxUe$u`2qEV3 zT$V2*l=$P58RC^A+z3?53thq;wzL_cu2&7bI7Q__+`NMLjhYsUmt-JyfO*ECUzw-O zP5yky5c}rlMn!JG^s&g-9n;O2YR;H7l&dTY*yjE7ZTw3O;Kc|<^wzIN9vhg*(6#FJ zM^6rIeoFor%h)LcYC~Oop|@R&CXLQWyQ7_AV8EZoYL+mb;^mX4M6sGIxKrFL?pLPe zhM55k?|pt)53$;PUe&G}b|W=1eSOlqhfs2Uw?6|xC*m1R16PKg*{CyKC}F#uD$Th9 zh5f{#Vz{$zkThujOuZknuWfnEtG?cL9z6EsvQhFW~4*HJBF|GdfzrJ)8!& zrimjzN&l?1e+8bRi4wL#est}r3WL1a-ojjy=hb?ojZDkl3A?1gMY6~HmVA;DF7rJ_ zA5EuDnIT?W0OwX8zOg^5yu-f@sb1{M_Ce|^x?4F!>*FF0$MZ_~2oLNXLft`5c@A*# zoBhG%)%EG*H;e7Jm3sYZGXU^3j@p+%2v*#p$J4{{+p~wJyTg9$2Lb(6aD;N&?V>!Q z!SzSeri1RJqX2ui3fNAn0;Fe!SgX=_!VpNQH(x7P<^aRhh9B)>n_6kz;u)?HcQmRM z0WV7Hvv~`&nt2%mZ*G9KyF8=WOc~ikv6fbC*g_A8e;iBTurUP5$mbw}`M97`>YPmm z+q>p>XwH|%fq1rT5LWaN(4NPra5~zzka@2?El?~*u(e!UnZ(_*fv9}|BVd`BV^BeZ zz>e^@Op}PyN1FSj<697w`^^S+Rgo5^AI<@S8BN47NYsh73!9(nm4lUGU-9iRoJ+>0 zQRYV}_xImV3^}a*Zug!R&5TH41^gnJx^6@&h*|`BV6OV1 zzuv?RZR7qwspE$(WOK-2+=u4{TzKC2ij%eVH#%Dc7(Hpmr5*Kqv)Of`i13<_l56KvGn>#`1HVpiV-@%ipo1R6ji`xW^0Ptsz)P>s{ zVIID`El{szJg@pi-9?gj37&;sDm=U$fL`d9IS zj7eu;R6k(FSL6>~yeZSW)FbihP!}L38Us==^X zb9FBLHt4Gw(+%b5x|ha|li^POq@;co0nr_h@b>u#*KGH1$D7c0%diixPa4s5q2_H$ zMh7dWG)>$^0)EL|ygqnlmjq6NB5o_%l;o~!x^w@d%UAj0e^cN5*NMu{4pD=x>IW`3 zWiz@fvloP>5Akc81DO|2J7j$HVTWr5&6hzvP5e+I-Mnt|_qb`19%}Ma_3myjOr8gL zY*~+lu%YqAFiBE$kEBanJII+tr+()?ff8zfkxELyzOv9~Uo0QGg>!O!YHVEd3&nNr zlnJoef*IiJXzeUhW5YJVp9i#nTG~yGI(d(_6(gG#h}|feDNI2W_^gvYk-A#oo}-8~J*Bb4DsP+#6CTW~l2G zg-QRr!+PXKmGLEDdXrAW#a9q2oa)@j1=d)TB(x16rX=K#GQa==L25N(J zt!8%^c-<(Y_Aqqkp*Cn;U`GNIyl%e{8N@~d$?`i#)Q6CJjt;)B!<#|3#S;Q1m`?W7 zx0*BUk+8A`kXQVTt9TeGl0rstGw`;Ih~WoKkrGxL)fq56$v$vZZm|GH@;rw&7{*+V z`lx;%8o{vMmyypi^@~bkwfqZ2--H}@kxqYmEV5mKJ9q}1CRe><7Jf0 zMIQ+aFfP@q7*JLyk(1$3a8k#z$o)PSI#*+r4*rY#={~`U?j`Gf+WJe4_Px!L6a}Ym zDAZU{@oukz&+QLl`q+LuVV_>(bJ_;ny)%4)&t)&a>3>@yDm-_US%#PDU2fRlV34we zcv9gkTn8`|TrT^4SpCl>HDa|#!1T11I%I*3rh-XX)L@y{Tmfl<73&NK&@U>yP6Wx@ z2iTShXzqa5ShQuSBZ;3^Da3A0UcCYy{3_ykJq^OQy=(5z!cGO?)aEO4EkG+LQgA}R zqQ&9nXfd8*oU?q2<8`ZRB{A1`S9A^KJ{5g4<6a{9dG+4&E=l2L-&7Y>U0p=~Q#MF0 z+rp*62-%ya$zY~yzPI-2bg{yxM-Lp)T5ZC%z*iwMSyt_Ih?9STW$<%JW|daX5f>N) z#(SL^uz^5E@**2NcbW!5PTQOX!5Qgpx^}TBxQ)>TEyvSfMvJ6`>U>vjj?R`>5er}& zym`IM1RoayPR!}$UYPn*u}<8i1f65OzMP|hJVxK{(w4az5Z;E6^eoDf;kUD={(}?a zaS``Obtoa<;;;Y!aO715BgZv9918clOxN%MS-;(u+0VNo5o@cEvqN(JxO z>`iw)mf-QZBBq9T%jtk8yyjYA_5D3pM3Z#H1qfo&S@S3Se%DBa0*f=ms|M;Jlc$=i zdEI0)F|pxSOkD=!MP__YJnK#9(JICejQh0~6 zV%@sK?tL7Wv$?8ME+;R^ zKkB-R(V|;fYH93heX%wZ_~<%2^VNn|$2Tlle|^!Zx&}`_66GUWNfJ({OdRPCG*4Wa zjCL|St^*kMtX7p=nPgsW_&t~U`UCN9gCSdA-#fhdw&i)H*KNPzn53%HK1P1xhr8++f~i^E8q!a)t)&T1?gb zEQX0aSr~@>EDulP%~&pP`)6@%`A~zpij@-@P1vWJy$6s$(YV)mh^UUck`=hO9|erb z?(qD!;2oIPJ4xPp@0k@mdm%G1I&Y~WtRjo5tS(v%*p~8&mdL!onKFHK48L_%%{!k4 zUM>*2*oqM80LgsDvdI_MNgvMghXFC{$^=!5#u;$+TGzKXEh^WIQo*TiHM#~vCJL;o zw_%XopUz)cewKFNZ{~D7)-F%(Fh|)+ao8B7#>Y-(IhH zzLsgR(50slt+Wqt|cq0c}dU!Or*WI>b}}Vr#;&2s@v#ZQ~VP zou%tnUAG4PmqWPuCfW_1kmEI7w<}Kan^%$KQjAJ0{7cWXjq1H9>W2%f7mbTfy^o}N zVZO;Zm8Cf~x4lALN?e7rv;y^S{ea6g(C}gp6;+M3HR3$z0BgFnTe=Vz_FMUhD~xYb z$2`zZvj-Mdjo)zD1{=s%GHCx)>ipYHN`$hPJ)k_-A5Vg3S=iR*2l03?+24$=l1a7j zRk#iqco@ehCJtEw%{ zyS-wV;+p-9rEs?fNV}UIRnWH>C#rv9sEVd)j-@%(Bm2(z~`5zy8fUfMz|Sow?WbrA&Tn_@%O3h^I? zS|!lJ3b#Fvo~S@S21c0KVA?k*eHv?2B19dX(D}U&lY`&Wt`!aNF1)UF=+cf zHG@OsiWpceuitSxzyD}P7gsc!)W;qWO*DuHh6Em^68ruLH0?-UgR7mvk#sn>sUT{Y zTKFnZ4+)h*dFUq#b;LsX7ed%N7$iKypr54`E@co~2m#Zx!?vKS@07)k5Ymt_)~2!d#RYbyI4PRYSS73tG(!C~F6&@#B8`>qYWF z70;l(n}J3rCK{BqclCjTK%v)Zd(XHMXOOhAF()nNR1aD{oX=?_U9ZvhQC~3|qI@QS zYBC|mA(Ga2u=Y-28}T%OS&|y5`u^bV(_s130adCu=j!P8?gsNhQvO|k(aC+w7M^>+ z`E8LY@pR|auj3jGT(gE&D0bl0LcTf6Pa^iW-M73kq(Z)`65pIVE0|xHpyEdd8ziU4 zuB=BUnz}3Y45=(IjCDmm(*b&z{_K%-o{q3y+UFf3G3u-Of{2aB=egPU_cV2}j>O9S z4YG|9*fD8?iH){X8oXFeF-N87h5h?8lOLWZas>-feyZx-@22(}1$2?p;nBs)uib@V z+^btl3c*H;4IN31U22OrXN6%Pmb%oh|850~jYMy$E}!+N5| zJ9AH4N zB_+}g0!j)x?8$MT2hei?(XjH?v73KEuZtf*Y(CZ|MVAo?|a>I%{k_X z>-2v)tG1@Tr|><`luHv0`brCi*$l;H8D(9;zReAyuENYq%*Gr;-d+>3a_*qaML zcW3iAC_8Tf;uvW$$7H%OufiJ;+CF%upP#H^F!iUgdSe;g$(%>%w6T#l z9}MPSmEZGF!?mHZM@_MO(}DsWbr6`F-~agXQQ5j-sS-d3@`8dsM&C3kttI>ytSMkN z>}r23B)(s>=SG?#qK=wFwZ3Qq?bIQ4$>iRt$6J}lJj5IRV4K2}X0CJR1i6r4SVwB& zRfKimm7)=GAbguyr00*q^QdP^xFFNmR zBGxzhA3LsGg&y_AtBVm=oA1=S*f`w!$OZRM8nS zc=T7IU6(i)0rZoN75+@oa^ZlR-TeZOhlZaHBzyDyk%FqcJ2}To9*rTI_P65>iz4Tb zM;yLgsdv>O7uM^+(<5wexQY)AB42oBrhFuigtRQB2I1BfYkn`C`yD^bJur}Ma%X2= z0ymupRP)2nwHwLP_=acGj9P^Yd{Rc^Bu}?Js~7m2vDhzp+uCGhd3}_6J?QrXvWEsn z{)Ws6)&y03;;GYN%$P0;Rh!t%Dkwfbt4IPWGKAgC$R9C3L?2@ujPTKyfJlD2@WW=T z>e70B!QXdL*ooecsa%2YbrV&CjD=x-;n8^V9ZAT~0M2 z!!s~aoT6^v7k$B{Cr2zj7!Neq@Szpa14qfyP~OoapyORX90KQqf2SA*s=?e%ytZrK zgFiV$4;!9&Hu|%1H2i<3v9-`BAxl3o8Vi2@)u)f{UQiE`Enht(p<}QGB@gjV>g%Z zoRB5*Oa~zLxZEZsX5Xmn6eEx$=MPkDS%k-eni9&3W6orm-oA_gfdTD~!A z-Sh)!O!#W576}XmHD~xf?6Cw!a+^dGPWGYAXJRt;AH6tGv>DgLB37!Xw zf$TFWyG4ST6DyiWJ5cHB*7L0|huC0vn)>{F!&sDyH6Px;MhN{9ru2~n0v12lczN^Q zc7f%7foB^%bnu=Szewyy1=l2op!v0O6k$xGG9P6W;H?Fpk=l5uVFX1&IK#TE1w_4k z+<-#SGk<<5+1?JgSpANUHJdKX`x<@xWsz0xRpDYfaxOYimMr()y9?JgTJk?`j3Ns+ z$KHMH^%6MNgkl=rZUsxHz(-Xh!e}(-k5@-N(v7z76WWGMrTo_*;pYHs)Q^c>l4RjW zcnFoIV?t_`YaM~{YA`Ph#sT`tOv`Yrkk4$$YcP@fl@0SI;?3MP4G<+B7LdOXCB?;8 zw2AuOE>LjC9Oee%a=v@w3X{SB(qP>UiC>@7Zl?XZ`ByZ24hRtlhGKg77%q>tLk?fx_~KrZgjj9{R7-=Ce0t~ofHU8R{8G=BB2@x zzw2$$xpQG#TFQoVT3p*O;h&Bu-;#sBzyPH0k6rc{d@6bE;Z!Ic z7bEw#9A7-z$XXW-A`j^snkTWX27J-C&#X>mJjGTkJWyNC#Js(DP)})_80#-&BI~?p z`<$Cz9Uxu8YI`oWwlq;cy=ICvl73e*oTBwW^lc^HV*afc=?Rpuvz+LMHDIQp4w>&= z3p{;fVMijzDd`R;&St%In5mnim%j^CZ8UU)r*6$CEJO#MucWR8xh;UC)}uV9_T_CA z)A>fAOZBzvCrlLDn4T?!fL8*WRVfStm#nI@8H1su(}ah|-+4+DsSI<-j9jQ?Q;BBC zRaLVFX&&5taZz|*p3#BS-z8*2t`7}>_&`lWH;1a`*m0CsRk{3;6;3muDgE*#5q;Q*1;*%gRpzM*PRDG2 zoWDu`W5-3i8%{VL{W*u~Tcc9430=X&HVPYG|Ie>vpGqAHMX<`8mDI!>$0`Qj>?F|0 zzlVyZsD3mm<(y`TJPi+yqm1bRzP3iCtKZZ}9ECYlTX7`wJ&f8S+OWRIxQL{(;&gq# zXnW;|{7JPodoa}B_LXd0>7qa-rsYqX=JywZi64tUjCi|Qze91~?>t@hS5$^Xgn9 z8TrBP>mR~=p?_9vGsHHdy4^L9sj=NbVS2=+PoBQjtV}o;KBgPElUu&#x-G$=+(1B2 z&#F3!Q)Lx|A39zX!YszvW z;27E+JsorFjU5U>jaiUsW$VSF>C*Vl@xQzPUZ<*N4WS#Lfzg+T_CimvafyEXR|HMu zhXL$@$PoJ1p;F+n-yt;W69t?Ml->@9q}0e;{T`LYF@@o>QW^Rb)%zHnwjD#4mTMP^=w5bQ{ zZ-jG5s&>SBJgRMLgvF$@ylMTGs77k5)QW;Bx+|!2L&p?F#9_z#v(xwb)LrOgj^V@c z!>^@3jc|<^m8Ir6N9#hSdQ_e2jm~97jkB1NeFQ+4Wo}X$no#5}6ijwF7OYKvqu0|Y zH(uE+q>E|Bu~t$8_!37O8H|lYXQN6>!7-K$$FA_zmH~@LQ!Ra%y@`R4x$#F5&De z8F)=yd{Pi%Zw2v`Q_1mRn2zzmB1?Jt<>M^ih^-eR`*mPAI`nN#sTWF z$z`+N>0B-)G5<$6kapo^zpn(zp+4!1&Br!r8rN<(N*<4Y?7lw?3cUIYr#~uCN*sxL z(6ZMg7*4c|6nVo9i11C%B$;j=!?XTPHT8jKkLF$^F%=RYvZV}Yn= znq%^R8$Gic78^&n#)MT9ld#}0<~k9j)KkvI(DlTGP^8Ya`@m&`^1u}HvakMIzW+Tz zMQ#o8a#wz^g?JZUt-_?o(@{B9?s`^OM_G#xQ1m#Bezb_}yG}@3TzqfCTe!PCE1&zB z=G9WEB9H^#rQ^D;?qn{{r=z}R4Ak!e&x=-zE;%!R603egpk^H8x51@MQMkSY?uH`x zaex0g8llLV2yEs@s}^|aF&9I^ihKNwBXZ%!Zl{$wj&TJW_f1Umwg)AZIgX#-w$k!A zcY)0Yr76ge|A;1_>6Nmxb5aUqJA9;b$!H#CJHw*gSh6=^fETl(V!XfDXXF3b1*=yp z(#CT>_DHa*lTBbV)Vyw9j(83`O0^Q>PIWtWiF&}j_zUT5%DAT4A|$018YND`951PS z#OidcsbsH6)W9R&MGiaAobi=D{~KgRv|KlGmaktkgN+3>4$@=WwuM=mTuM8b#374a z6n1~wf$s9bzx?2jx0_*W`OpIyH;ei${(x!)C%aja+1`d+2Iyh6>fG7mEV{Y$*A(98 z9jVaY^Xaw28M)%X{WFw_T@Z>o3=s#;#vb$+AzSrWNUG*h;>Dd;eGr@_ z67{$Kv=R)!_YfYpJbUWXNN+(@+wlhGR64yK`-I+?t5f4!axU{zFBO(5@}Xevfq)8m zk`Us`A*w;O#gfI;h%fX-MH(a|1Ab12vt!qK@{mm@kSjmGEFLT8up-V~>EL*%aFh{l z(DtUxS*|XnOTGWbBFwOiq0BtL+;rDUZSe1+F?KN3^4yh1qMw%QP*#5|;WKF;EaR53 zBRjQYqB0~DJD5EOx!%BkTz~JL?yC$W-4i2w>wJs>Awv07P7w$#+>^nfFGQ` z)A#k@FY*l@pcCrE!pc~uq#PBVR=NcnkUZv znsb+|(eL9UiGGW?=*$2GdtRY za{qHz=n@$S)}GtB`eT{sY*g}wfPb-`cc}rW71{3HS>z-8od#sQe*$Y84a&oTEMU2`>afLryrJvZQ?D5Ugf~m#g#=B|_NCWnWp&Vr-fV z=uSKIzZG~s)^4Bq8+nh$lwvE%TxF>t0$}iH-c*XGrp6#N{@R2%h6&a)*WfF>545^C zMiYgJzG=}7dt31rH!e2hK$F<&%7rd~^0S#NR3`c@l@y|(EK@wy=Wl9ZV6+f7>KV4~ zl#!4>bEE+3qT;Usfs5G8njmHY3F#%ETUfU@=eG_N6=V!_3VL7l(WJ!$qj)%j0QlgP zc_GpbSN|IoXz6`kPAV;~AgUj2BQHd;4n%gE>+clgIxE46kVUNxvW|p>Q8@Om3l8AQ zpXCSDEW1i!ODn@_2c;_d5Wmd>Zf^O~)$3$pZ4UX!jh~8X>%S(;6v7MA!l9o90MM1z zNkU{YYw*3;W&kST*S4Rub09`Snp?h>*29p<@4S{#5oY~Y$ou8G({>aj*k78+7>65A znGG^yION)j!5!mjvMC9HDg41p_8a`4L>j^7RVJS#|DGS&wiO1UQ;X!VH6lZj84K>} z5H3wFb*m2R_+^4(CNjp;fyhJtWt0v0ymVe?OB^io4}%fO^AZSOyf17f<;6vhQ1s|P zvH{_2*)PQmvzSQYHHHc|5Na2@FcPt#$-`Nzr-2NBQrogl?YyNL|2b5ad<#PjZ zyB8gErCCqC!@)Q?&~40%B(zfh4;z%+ML2G!>NrRv7|fi{nhU)Z6%9}AWbZ|7N|nvK z!+3!sj%Re+sx8+nFI8lC`lY{jPy{af1kEZ#?ueFC-S@S2ckWBMgA3!tv1bnM6g2K< zcMc%^)7_rSk~-uCPLCs4(*q;k?fE4C=v1U$mB#G4pKlXisKqK(i(+}JEwuWOuqS`e z?FgE+Wa17SPxu!XF^DH%?%w#oV}pq@9-5~kKq?@9Jpl|w=HAJyh&A7|*9&uP;`>_MXn$DN1$!R~A6R-twajEqlveUe?n3eks zQbj@Pqe~&1bwvP8#iJMeXL_aFRk$}N9(0@uii)yZv;Ki&jg2Nka~&=yGD;Xx?Nn$P zQrcoFd&7yNFy$6BDYH{D%nq=10Py13J_bR-%Go|Y_v;Ue6(ALaNN39+HU=k_=m$ZH zn~vi8J4!`;jn|Tj?V3}66d_F*&AFN)x zf6y)JpfP>;+8E^mWd>S&%_>FMl3YJM%w1tcnMyYE>91eX@#3nEYFJH%6}gE!ikvrl zqpC0Dsr-W0vXda+EHkj_loMnu`op=1^6b8iOw;~B%~<(iUP`O!c2t#}q}6y_p5M40 z>}z)$c=$UHncGBJX}*_JyE?`7`Vb%A^1fofIWlEuFwuY~^3)GQbVJ)2;LYdg8mu>g zNnP3hxNDYhdh*o>oa_b14L9U3^a~AP{S-O^P!)_}a+FgHOidA%n)L53_x`-A*0F&o zVtpEoD_mR!o^tugAN@SYroSrw4EZ6v8|;{g41hQVQ4iivIi_cNmMB(MWhY z2O@z$t>et1wy=zA`Nc`=H9m_7-Cc#-3r(hc304$b#!b@U<|*Ts@!Co}0iTy77juU` zTD-htUV7{pPN1VNkc8r792*`x?cEf^K~D^+9oOCoy+xDvFl_yco$`~HrD&g(I-Cdy zHS?w8Y%p35V7u)ByD1G)!A=sfmYzcP| zr1A56W}7j%S89}qOguhNuF$I%3$+=L$I_24(2+v@tHzobqLzZ)nQq-gx@$5o zI=(BLpHT;i@aRVE^u)wc*}J5NEL0v~?u)y%>_ z6~%2L$s@Jh9)1NF2lNW>5S`Z6rFxXg1sri9Y{!+7bmySg>S7Aj4%7(pC* z%cY)&SL<>?VK$i?WHz(hC$#v@X_4C@Bv*~~?}zMY+3VjuAhFNyxUr(s9x;`j;q%2d z{B2^OAPARre|)Eyf?R_&;F$|_%1}NTM`B07=c~VU`5CC?f3e$vele$8;(f*CPe09I z=n3zRcJXy*8#K1^8{HIqh1RG-w2H@Yo3xUYwkd?OAaS~r6>WHg>r8||UepNHMwUeo zDyO~$hv!LzC}K|$4g$LZXrynNAbA=~9jAWif9EiZW%G@C)>=^lc?!~P_W)Jb0O^PN zlS#dom^6PuwCu1gO452&++ZE+(ZU8*Ro}Gz+tx@nlqtFq?hVlL5`vg;OcY(*t z39avR9fT>~97>M}L?K{yGN$TReMCqeXT}MAZSCB{IRt67y};uUc@=bEe?%U04Fue1 zO$-;m^Uq!-OFKwa#m^*Ue(m~olWs1xnj8|$^IjeBPhty@bZz60HL2vYaUHE^28+C7ebvSGGT!#Gq80&U0Z>L!h#W;9CeYD#NWO{6JCUr7%xh*!7vNtHqWmT# zkm#6C_kx;``b{BRV|}cUKCyU}8PfF}W{IVA;mbb9|6WK(MuVj3UY0aOb172W$-Axwa!*K&uV^5NXR~4} zol0JKH<=I-KQAkCXPw5lJr{1V%>x@+owg zM;Lysqus!$?-yB#eZ&r%;N62=?EyhHp7WKNujg%-t{us`l0U0Tw*@bY8%jCz+p`Nv z1c4mKad_NXXKdaYt`EB7e|g!@`VBfF+#Tr{S7TRDzTaQd(vLG3cEcB9_L?w!CRwrK z^}GKDTr_Z?+-mQNjswffp1x6&|1^gyH~$-BeyJY=1=6y~Z10~XU=ePC?)3@?-Gi-Y zTTCaEyHOEH-L1b(LBLwg_E{BHJ0%@nMuJ;KdCP~MVy!SRa4`zz*tkHYBqB`a)&%ib zabV#I1Z7?R+y0QqQmvvQAu56)MG0u+?FM+2V6~H56an3XM709**vT5I1kd?v9!b~d zfQO~E={OujLxd@N26W5;=6puAV6LLJ&?peLeb5pnNA|@s&Hn*$(WhrZkKYP;^Np%s ziUU1pimY@p7_V&dp*M3oUelAyB$B^QDU+rsOUJo_TgO0;MV%vyYTaPXz~gTs;dyfcpM^T9D6iv^ou|C zRU9>Z19xI<#7gjsnJR_KcmRJf9n&*Irnd0uSeMMOc9~YqQof8LIb-!nU1P%jH+md0 zI9d{){V8I=Zyy_p>M-YaF!SCXLH84ny}xJBI7D)udl_;EttpnR!+m;2IGm#6m;cad ztxCX;waxy|wg}APx={J5#+|jm$HN2ydX*Rq8|ePl7Si)w$GNQCCw;$ee$|#Fi#`h2 zseCSHcwzJsGSx35ywN?~%oMAGn|nTlP|tg=8xAW3hrRVp^U*GuZpd*G@8!dV3lNUn z9@1;J4!8T$n=)>id{k21)$B#9hkrN}NF(v$9Z7y`J;VlKpR}uxNDEC1=emV@aFrzT zme^X5zkr?NQqL#bc_ow6Lu3~=U4_DErOMA$>Bl1SUNz)mQbCq2QD6hVL@1W&%P9qBF}o$Je-F zh!(3ArDjs?$0dyUF=E$Ir2f+3YF@1J<{?}E{)Tim3{Y{U^O&ACc_>(QW-zrpjK3x5 zc$C@to6zgC_NzcWg0TG~{SpaX=dBCsjh95PKEaw!IDVX*Yj+^PUs= z@B5WG(!QLJ4v}{GS~?ys$gD@LbOb{or8L{sv^)B5T54p_Wy@~zh659Xmq4oQG zjOBemr72y+3vq>BpIr>Wf(!!I47vuiMqKUPjJvC;jES#ldDllN51IjAcVu{s_U5M~ zFvg8(S2bIp+WZZsce5Y!3%7kp^UiId{B~;h=kL=-Oyr=)+WKBB1J*}X5F7&Ih(x0` zLjz4XWMm>>OANU<31x!y3x;EA-pvts3Zf&FC?u21e3j_ovQ&?Dfj#58RByB)v|u(p z_%*HMeYFTstN+voCOgv1qrHcz9@lCU3<^7p*r_Dw@6I>?&9$0jWCvfd=e#hOF|^T0#z%SVJj+90neM&8e@ZU7Jr6hCrB7uPA7X*jUk9ReFCV%MZgI+1j1eZQMm zKj?$JPZ8pmz_u~#>su@}InD$SFB-FIOh7{R>s5~`MIyj+0#1yZ1ASFTQuzQ!78@ik zdwr;osY;)g^vrPYhMr?9}9)ntX-&_6`7nd&O5Bc9RGpVRs zRS%i&IqTU{JwH!bnr;*UM!#M>npO0zK;c!OWHDZblFPNTr4Cw# zf4ChfZ0hY2&6Ml;MVuZ)_A?g5XQ)3iPJ|EHyHLG}WXCkOctfXCyKPX2&9!|8k_Cs5 zxT2yP^|xpiuHabMvPY{Xg0X1cJdG2-*=rzMm3A;4O&83she|pQ48Ssm8Q7cW9WmLIEsb{yUF4bC6!UEwRD_T00oL!uKo@z@;!*e!z^ z^pt6P-dhJc9&C|}O_M#4-$nzEz?YCc=WGI``m`j^XzgSIzxVssyuVO!N(q=Ls;~|y zBgxZPh10coaMw=8#Q7|yeH?LDWBp%NIumY-^y9}=9w$iR z64`rCkZG1{wbE>~-XZY$Awyk~(EXo~>h$k-j}k2Ut~c3T8t)O?#fTs2ouu~wJ<&F+JWYx-;&Dyhn?`Co^dI-Mi*C$FG$b=JhztKHcuz}UPUGz;M-xOu zF1thm?tA6+u_}a2=v&+DuAs zzwWHDVu9w}L@2U5P#pcWKs8KFLA>v5d;J0ZPQSi)LoY6 z?g%RO8${sGeuFZ)-X-`{c+z59wEcirA0`FqC0C&uFH#D;rcM6yJ8Yf# zQQUW18zNE)8hU&bl_rr6AR7@u=RJL)_rZ`JH@SCwIhce8>)y!h`^|7@1CZ1cichEdRC-<-Ec!NZE2UDWtTDiI2T zvgm^Hm~Y`;BYu&2^A-bROGFOtT_)@RGR3R*?Y-ywOYVjWJ|*I8H)B`VmXjS;9@Z)B zP>x-C9K}i`~i~`HYy!q^~PtXMR|#x)CEpW9!=Pr(YNww;;b-PRqYhKJnzM}Z?1Hxd=Hifvc5aW zO%im$aN1fz%eUm4NZ+3yX|g-el42sbjBW3xYqB@Zy%olpJek=7f5uD73N+a>id26s z{@7?=f4iMEP8I)EIwzTh&Ewck;k;_K&t+a>3gaSxQ05C=@hnZ_0&zp8HU^#(?2^~? zr{gc7JVZVbAP0*=3%tp|_gQu@vZ9uIC{3U-I;YmO8IbXmUbbN#2fLW3UzND7%G=nQHN%&&o7w>y%cl6x!^T>C=t7mj?pon*5Pgn2`_g)mBpzEoPaA`+60uHjG)z{kN`U9NT zrW-4{$xUDm$*Ilz4)9*f!M?wlR2a`r46f?!rVIjc6N8@W3u$N8$puFhD@gy_>HN{cVsk2=5(`@F&{$!rV-Vc=Q?4p{~*k{Lv-0{6J@e%3!qZzgPrzgo~5&;g6 zBlGZ%2FxJdr<=Y^U)>K{45jVJ9$b_xWp`|~nO?V2RDpW;c6vW0oB8D=fOTSCGs5$A zjbuoPvp_dAkrn*OSleRSW|gN~zSN&_<1rsRxZ>ZeC|uotle)^vy{ddT_qa75gYF2l zFo_DR3)Y=^jK$N|`PKxdAz|RT`3!s7Mn2xzd!SQ%!q<^k1H6sl0_E}WbY08UvPA5< z^M1O5$f=0^?0?TZDY26i6>c5#P}I_2bI0@355LS?&O6+uUD59aMb!k~h&f;ztB$~` z39md-EViC)TGgOTp7IOUMIox=KWslV9JfW&an49MpzE?qaD|)_mEaVx!n3p-=d#p4 z*o>}bdxg;^%8%VCmAEQZ!EOL^wA5z<9_A)!(+NY7$l~STMNrvD;}(NeUcj|^PN?oI zh#OMksjcVJZ!%l(1i$K8`nMw%GcTqB3p~JeTk01|1TUL@4(^n-XVstU0IbVZ#52>B z+PTe_q&J_hF7{~zZY5~*bAi_jLJs0_53_BhI#EN&ytNRY=tZzzO5q}L?1e1PQRU@T ziC2>=NSJO%xme0p*?x9@DzZ4s>HvzEa^cjT-(tPXZzLe!j=8_p+ z&(um-P`haB0X$T5w!O~^-TAEABzv0AH%LcDW}><6P<6KlC0!R5w*Ov?l#srQ%<9={ z4`oXIUGhCL(z50AU`r-LDVYcl^UfzRx4t-W@WGC;F;DyZt6gBgly85x0y=-iGx+*DX%WcD53WaTCwp1*#$QJc}cXt{n z@bG7C7xQV(c_X|)hY(V$r|$K5RiI!2xC^TTI#0H1 z%%`B+mklIwHGT3OMHef&H#%7;_b}xoKG0;yksG-T;K?yF4o%%BgpEHp>vY=|pb@vnNn90$!yC7!{e1@@n=T6sfjH^3hm zm-V`vX!Fe%5Km|-EO$NXHR3oF$vxv}xs^WJmAX7B^H?|kM?oiAzsNmI_-MFE{VJvj z8hD{L*}0Ar@vT~*6H(y#c4Pu9A8;aga0EK}@q1D-R*fya^xZ7V`>3bvihVAMP}yh6 zV&ThpT!xhva9f1ME=L|9$zF`k~NQeAQt@t|m)YJNIt) z7ZP}~@tU`&P`k}jQ5a`mu%~XUv)=agtldfNo>>DI*JhkRE*N&AiB?O-xnN68bK z_@5f@gZbukk4vbuXpkK?)a|fL{BB>R2MC^8hi-n3jJ2yjx&p#Cna=7| z6}OnG-Ev5dv~boBCS66JDGzVwMm(zthC}ze<27w1fGd5+Z)9&V(=!yE8=;(fR*DP! zp^cuj=ZBqk2h*(^Ob(5=RVQB0_rkBlb(`MXtFlLuv-ZC1ALdx6E?xukvnhVlCyUDK z9GOF@4x30Cj2ON>y}}VeO3ugWn&zrcjf838@`)V$JQSBtK*6C@5Jn1~#h%uQtI9l3 znoD&w=qX(%@+8|8ugxZ9v-y-8%&4B^6czcG{XaMLhLrA-1I?y6qJrHY-|oi<@o}q~ zrNalmF0R|}>fyfC;X9o6VLZpCMBDBgggvs}PLEfF;>Pt{t%Mr8Y$b^a{LnhluI1v! zasgZh^Tw0_$dafqW0So$8^-ciXD9vTMM`2Kok>;!%zyo>&oY*&JNvlX!Qz#9<+voB z61ctB!;!`1hd$-EOc#_A;`UWt!}r5ZG`Y&VE;-CdpD8W8LU5)Wd_#WL#?Ve+q*P5& z%3V8t+-fhc<(mGuD=r{e)fSd}r-t*MBmuJOcA2w#w)L`v)MYbjs=poc+_IWry|wbJ zi9TI@VW(|k^1hCK(g9fSGqBGAQG^_xA%4OBh|yXNm{L8;JZtU&1D;>iO^r7$dP1ZT#lO?;GK16_ISPCW-*IBHrRPm#}T z5anIP&{4<5T=~iVJGoEC|E_QLdDBcFX>r}l5{9i3O%oVkY_OPjTJZ?{s>7asIE%w> z!x~>*`+2VV@N!39?P{ZZi)ne_+LLa9ru&ZQGK_Wjy$AP~ML#?alCBwz<@2~tL^=N| zxW9D=-wthZq7O0F?(}1&0iCJX|8vnXVh4rPo22nkiOQABhaxDYx}P^{l9?KFZ^gLO zk%^l4Haab<3E%%7=a)H1*F3iCdxTU|+DPXjcmUEMD>o>3Y#_g#3U=Z{hZ0M6~d9?MJ>J9)P~>$PKhqQXMJ& zSktfRQKH`}X%;3{p3dZIs=RG2K|K|NAD(B|YxgtrZgIRZeIW6v&bS7eE#-nk>?ZbP zJ-{H=STn?gW3na0t2?Z6eAy_ET9tdX&zx?m6&g^~e7C=Mk@oC9CbjqP-oi{EEdqWc zmF=RjM^gTjGx(g-K6Qw{#Qc~3UAbMEJE7`_JHSxbZcIvxnVw&E<08FU)L~BMmEy)l z-Qlbz&Smk@MC*wthXyZLt$nw~zC%>Oy%K0|z%;g~wglnD-}_@e_-8tq2{0z#auTav zyC^CSwWKJ~S6Dgd5Xg}B4+{?5eM&ZZZSuR^uJ~i{b5i4;v=zayMZp9wP|#=jJe5PQ zN^yqsJpRLx74I9VzNS;B>z4JPhIrNR$3SDvmXHHp5rm7chpcn+)` znHMMOEa`wMzun$)>l4lOo|mfWBBi|{wAwH2KV$Fb1K98pG4-^Z4`y)dW2Kno!N|%- ziYcL|LcG;=h2f4}t>vC!J8NfOE;4W;>AZIT1cqo6OqT*DZ*Ebt%);@5Qe*DHV6ZWb zBv$~OcF;+bbMw-Eo~b5woc-fCs^4-_yAcukNYb(h7?|GyCO;(^;M*bhlJXds#k>?? zf6yj$Cr%)n&MI`>ZRHGa9!ch^Aoxv3U{wn}rC_ggg}by-sqID`Tbl=AqW zzCMBBk?~rcuBB5N)uvGs)C&}vgYA(=#0p6LUbg=uN%%$&_|E4KoMqIhH0-&7fKRHs zCjAl@unybH`iDD0+FMX)Xm7}?zbh6pnWen``HvFuq460sc7I(ie&73Q$ftF?&Q5Ds zY0dh}97~DH65*+uW{za~oXy{AJEG0^J(ANZ_b6bjcemWs$%hg;_6v;ze-7sms2WdE z;kwPBUG0wM(Pd9%IDG&01YlD#0mm4-aFfways{&(PdUF_Sb1XadT#slf1kG(KBB;` zm+ym?%hsgkrTJK?n81>Q3KfteCReTnRE%@9aB(~n=QM>h0yF;>*b`@zQ!upXFpkRW zlx?y|KL&TNPT{uHGNss$|5zuF$b{mr6Ip*Xsq4>QZ5T{0+ z70USgHyZaBXQ$RNr~iDEkD4$r%WM5?3CFpa5&6o)9(_{U-#p;1!33Xp!s}{MzZO79 z@Kx=3E5&jC0|S*R@L)2Q`*WkZ2-SO z4rotursk^nJP(sZW35W+_w)A~L))10+lS|H&3o40#(52B@Te-p7>R!+JP87eVNbdn~&|uQ+66=Y~ z2=0ee2PM(H${!bxh0a$`tD9$)%V`|d`g$=XZU2KLY$&B^)ZMQ|lpboyDI*f)CESuM zt0i>q#^bq}@8y%k=bGPgwAt?`5X&~azf0Eno)!B|&AT@Gh0#55o#Tu9XS*IjzyCK{ zCVyG(Eq3|_C-#n)jqfT90@2Tq{2APo_}BJ-v34{usTs{h7cwj~JH%zvEd;_14Cl^j zrZ*TqNZ#k~(a&Joh)13V8C|jJpk7Z3tg6COi*7)5)jWYIi zsv9i`9NWoOQcg@v%jcvSF=(4m)i`tCjC#7v5r~z=6r5FQaeWY#%a$&%N@NlbO9I)! z(dW?<)XMj6ZSYq$XFAX75~Pp2Kdhl$xHF_qA6r9plF!+m^KMUuP~uo`*aWct3*sw6 z32UjynVu;-#T;f%a?D?2-|q%AqQn!^de2_bZa^NX9Q*5_6Di&uuhbbKlkxjplX|U{6!PeVS*0brGG?%alCD zlI4{z8fUMS)qA^^=4zSZF#VyBzzu+5fB!Wby?-%vBStk?85kGQ;quISbX>M#%@L{| z+~hgUb*V+y;(TTZJR)^<#wcsSVJynMJ3ha?o@;_GH;fgEUtVQ+P~zYz2?B!Qa&`j(#ap6Y{etJl0Jrw^(K`+Vcna=%30p@DBb|2?OQ<*wakV@w3IJk-PVG;?L)aWN9UuzKDMB_41Y z#~v0*8yAhe70Js>6tQvZGJ)7jiD5JIR}QHRRyn^rrH5N&=p6i2Iju6OT0>6jr z-r)k@PRKK>h-y4?~Vydr&AML^LpcU%SC zo-&?p_+F;%)?W#Oi=g0)bVEgC62y2k?UsL#rh1j-X=z59699$CeFtD0ixh1_F%CgK zao`}&t4fWwf0oBzSMHag$UDR@lTf4I4v|$KEi7MGlQTW8@XRWD+MN3c?#Zt^scsN(RTC@Yj6VE31fLAL!}J&qK;;~X z#Nqy~Iw6LAm61PcvluC7#d8GOd&XpUFyJr{Ei=5_2uR3Ccep%R^+ku!{lq&tD=&3E zOzNl#ma~-qeE0)}@rw)^$2;9%L z%WsI4t|`7}Oje72$z!vu>geeR%>80hHQ45wF6%}-7?;5K;GY>!Y>q4rG0??}J(@u0 zyOg_2d%jI$-%cH`X}&5Nx84GH&RV-m{1Pt)WsTb~_Pt4>%V5i#U9rreMp>gw4dh6L zX1e0A36I0@vB&E{Tq86nNqarJJ43lT4JX_j?|-kO|N1vw2@L1ui@R}XivXKWbmFh) zV!lQ;&)q+g+}jB*To}A`MNQX(?hNiSV0+?HcOzrdHwQX2Pw#(iWZIu$Y~@_i)$rB> zXEVAIUUCF`nw%bzg3Y82+^fg6!21bteij1uwm-}oe-%hd}a*mc!tkO6# zU7_5~{Fm26C=*etkr^&x>C_}Nnv?6yLng^KVq;RMfuctJ&dILw8lcsh04J_FBVNo% z)vwUZ*F-*?0hXc468J{v5-Z-gfpq}w^CS2RL@b_k;4&A9fk*Iqx=2G$bgFnsLG)bg z)KqZ1JHiLmT$&+lat3!fDn1phLmYfnNH-~qFD3~F_AMIU2RxZ%^r`eyc%}1>N}dBA5UtCVGA0(NnDwi1NAhO^*%zr1VJai zRHM7EzVs(QY1!83*=Sna-_|7J&(BQz>|q*C-`*5xHll*yW^V7nzmgJMyZ_5y{0O=4`^=fS&&*uc%s9}A9Xd7KeG(B02H4p5&$Ap8qNzdIT%X6a2QfpK zPNAp8Z7$eb>N5)Rv((c!Nji@(53VZ+d1+aCi~!j&rkbCMG`YM?@ol9?LqC~q6vs`W zk;rQ6uqPzZa`gn@zHOl_<&JPc8F_PO**I4~zp84d*AuQ+uFWdsrMTwDmusD^0@``0 z;K`1^*%PIOLZG0Ia>1*lq|u$}&emmT!F3!587^LuCwrB1q)kBp`X*$`{`;8sm;Kid zf@IlkzqHxTs^yO6oz2>p1!7W@0f%|3-o3Bgar-DigcV(09i_FIx+d+qGTMTlI{W#- z*kZd_@9|}5{qEMU%Zkj^W=X0zG=w>xWu!>D;BmS@t$m*9YyiLNA?pVmipxe4DiO6mGI* z8x&+u64~0Me!`$T9^ovznt%a4u%%s=Khye0-k&_doFQk$I)I~_2KNOs`B?ZFm&N{a?8iJW% zZV)TORqi(omRM_JsuJ#ZhI#_K6trZn4r4{lmRVz}S~v0_1;~2_yScrY zdS-lcy}-3exnmrOXB3lUbMMxAoeS`{8eKVru|zrO=_iAteMBj_827=Mh(LL}7?0ez zDYJK6cTM-6>$|kN%06GTv zI1Q=uLZ+H}#c;XKfFEgt`z%qDFoTy{(j=4WS69}bN!U%r*u2)&^AsG0TMTU#HVyfyZQ?K{T2R}JbO_V^TNo45Tj?5?G#oie=15PQrhMM7uVDiVsnlTkT_UjC*WR#Zb|GRZbJf_6mVVa3$H8%9mTZQ!QM@@b1f3Z@ZKX zO0bL@3IgTr0f*1@xls!>a#CWSoEUtfr%c!!|Khfv%jc+=!ofS?0xGZvMzZ8b3y0;lu7+7%mvVea zdnL=SMJ!QiNd5#m;hqs2q|pYnjTJ~%AF^e%BvKehMOHeW-VvS$Q{U*N2}i3lQP9Xw zliZST=4*wotE$5qL-4X`{}5C*Ce8;?FsksLhKFI#!s5wN5Gsq%wDA z<=FL0^iqm9kcLHXPTpHsxkG9uM@&fP&AWOoi$wImEQX4G`L>x5ON8eS#XHE09`$Xy zzvZiXfER7UEADM1==QxcC6tR7q>Fwmi?Y#~U!}8P*A#%-@(+4Uu<=7*0n1rn-7^=1 zwuY?MZLvjK8EApoMeDO?dc!gdPX;}59f~$t!_Je6KU{#GLCO=CYjTN#?w}ljpU&Am&mpeVQ$6Px?3J=ttpDLW`<#W|BgnahU(H zu*iajxs|p}#e*Z>i=iLahwRrU%X$)H3cA)aAGCc`cigq>+U`_fQdb7fVE0-98H_H@ zKFt#P6^{Cy!B%MemgL}~7w4G@R))O2mALv}XADe0Ew(fR(}}^nMkboD=z$BF$+wXo z&1%N7B;B`XGW8G8fIhSSVXet@55knz1Q$FhQs~I?GGQzai&oY-9#(i$CorM_G)gQr zQeFpEl@%15&gFpQ#n3%|eAdcv3?d?vW~3+m6gl#!D+}7&YE{AA-Jj)RVd(zeYwk?U zZ5&d!27waMB3|Y#QH&P6rbc@d~dolP;tdj0e(|J|lb0 zM2lUFS6LA*{NRFfmj=~&?q*ZtBbQo2t(0>GWW$B3CZF1WUIKZZ)0aGT0w23Ru2epE z#*hO+BrxDFXekRt3KJN{@m_Cnm2_txyurOjwX*@n>YeqL*J-k70_U{{mh>^vxSb1; z9G@n(=nxgoJ0VGVv%ZQ7w}hmnJ~dW;zHGf*i`x9Pzzo&JyS{jDaH*3I#WU#p_UHj@F!n7U?_H;%1^b_LLJ=AI*1kL4jGcd;#=^ zO`h02L5>S$txQ7+Uo6+6vQ8J$I*rWYg4EzmoKu! z*!|ctkSH#**VX$}=^)wk12U&t-Ed4w?Sq2CZ_{B_tE=nzo>S|f^6Wyx0e3iPo zE1A{N@1JS!d0UzrLGrc|qkBwBg#6XB_GBH6%=c>k@Gy0SxY-pO^DWD6n()}1-gbSG zNR*J4ks}Z1?T)$Ct;H1-Xtegy|AwF7A1+H#ym$P{pIOhK{Swrl))znz#?Z?VoaR+4 z=3Ty+mm!>8OzoG0rf34yF%=1Pt_Eo{cjMNx9pHS2+SkfroLz2a%&fOEbR4xowqF@M z)xmw(h6WA0XHgeoEzKtqhSMxjKe(z!T zcC39;d|R59i$YuVY8y74Pr{+p_5q_`D-(&YYT(W=@wh`cn`Hi^ycy47f0jr@lU0mK zcK*|cTGw0JV@~o(sEu9|3Rh}pXbbw_{i1*%qtDNm>hw6cg~TgYR|_3IT%g1!hgXS> zF>!V?fb_kWMad@cJ=As49p{`&c&MztE)=XAq{hl9k=5C~mM(ldU(uisZDQ$d5RQr2 zI*MnSV_(WH?#cF>+^5A8+@M1G*jg9gbS-8lWVO)~5OMC_%`NU*4i~Dce0t#SqgC72 zLJBwvGTtN$73_J>==YvshNhQC?(K_5RXV&9IucxhRn;&a{T0V-p$Nr2kOJ2WB{Dib z46u|K8??P5fb;{*1YH++$>OEMvBR^QJCngGY)$B#sf#u|9o~rx9>>VN3-)Z8yW30Wjb5+ZZ zH@N4v4uUOL&p4&nOm2d%2v?-Q^D)b*XU0BDtITDLusA%~X)8Aj%Eygnq0)&}W~dP# zf_Gn(WE3w6d|8$cb4+&nQ?v9i-#oWVIhifEykPAJiCB7cADrK#W`Re@Q@KuRy|c7K za;{V=mj}AdwrFl3Ty}2?XCT&zJ(j-hsiPXzidiTX*R1@U7Jhr4W}!4|jBP1KqatgF zHD);d8XoQ-k^J1HvMODZHe?f`GaZ4h@+jVItKU&AV{z)2KAfWxiaIry_6tR|jQAghs6$`Cx`kiXw=DvWU^F}YJUCdR4 z&Ht5B2+l~SG6M5Xquj15LzgeA;`V77Sp&M2DvdbTX9nib!x)i1}Zj3#B5 zD=>^_rH!bySvJi+V@y0PmV@fiGlf}K>!Cwuhw^-mi45S4P4{dgI0l7M4FE^1#?9>s z;mqUKA+47te})oYu*I*sW@~P-jy*p)XQ#cQ-5T4f*+zkZZ14EF7?M`mJJ_~K+KWm& zrrwVJev-qrCNu6(fWu1=kGKKmSDkgn-B?dS^CbWaSc~%^$-AyD_4w&b7EGIFPcj0r zjYUki^VN??CGRzh@Z}rScg`coL&UpRX6GKCj;YO?#@Ao&P>1=g&im}TSl0!~TqX-Z z?`=0SO;6Rgt>ZISL=#nUy||}cQlk>W;83|eGx;oYa?W>MfCcl3`Kt#gpSxdG?Y`KV zY_?z0Opatmp}utN~kDKTa6W^d_@@)*XbXBY+<)$^yJ z7OoaSgh~SF3!A@vb0okr&kv*P-X#Shc(-0tkJrcjmLJlb9|e&VvBQU zHqpJn+q6}8{b*){m5%hax+G&b3OC?-ccHxcG)YN%b_cXLu%9d1q~n(b^INn7Zw9ma zgkL1W{7HkQSUuyiG7Z()W;!o9LNmRpJ{hnd^GLsMb{;W?b6&eqB*YFH3aKBiI)ssJ zbfKj6Z|DCi2461p-)lZE`@{v?+PUe)ci9X;7`ce6bk?)Yo;Uc~7S}4Ps_nMhVSdjh zgf|Eyfa04ZII^0$sZ(K?pCvKW-<^^_($2VUmMhhDNT>zhLt1q%l@A!A2j$9y&;uM)+}t8JQqIO4->i3M{vq2uQk zQeCIc-+TpBB}skQ1WcoT^Bhz$m~7nKJAsd>N_5i>x3xyV>=msp-cGJ*p}m!v?4r`D zJ3lykHGd>IQg-|E>=i)THv805tEToJD*4x-8d$1 zPbId>?cbFtw>$HkBY2=wnyZlB5>0(v5iWMEcKD<0dbZ|kz#XV&QFJ_YY^HR^wtx&m zHGNvoR4`76C`Vn8ZERRx4x<~2xOJ225ZAjDjw8lWD*8LA90A5GuZF;_{I!%zT$fMJ)^*pwj9Z& z5{#_6J`F7M_<{y&KvkPieJu-rn3ZD#3u_8im;h=IILOtx12}4H3=_mGn{uq zF^Ql=yN;zY9P`4=&bc;X(^O}KLZVzT-D@A6=7^ER9oYv0f6}1VB~Ryt7!4e zaM$-w?2A5ZnE>9b)T-&yl0y}j+q_t>iL3hh){y@dc*Qi9lZ zP@Q9*qklVG`E~+CgExcOTyE~+Ho7hItR3n-kaMP3+#GOyu$}Ex&Ie4SMbAMk!reoX zWDivo?}M?OPt1#D)vfg3C_%j5;@1jwvv2O|Fm!0rh{ytL=l_{}!+ z)TadjLMgd}iqWnr$DU|>5L=0wFCI+r)>&@%zt(}e$QbB@5$De7?ob6a07IZ%w`(Ve z>ZJf;^2TS^!n#i5)YU$GXA3LlgQiV3h$vc}xp2Q_Qh)bTS?2+KekbZ^AxX7gBGrn#!Y+k8PGt`aKxUMgZCnSD3ZXZX>oBO@;IlR<$`lVT9gF`xtgU*0A zn4!LUWP~1z)+zunE-^pvpgH$x>eB`()ay4a#(L}b+-0wNm9prB6kc<9N{vuxxLBv8 zTbT8ZF#rvPj>9R%3+jMhDW}h8c?_;7pYsW|1~scltCIm-DXhIC0gf}+G{9FbpCNUC zG+0p|#cjsk{`k_?Gf0#%UdH>L=x0XG80ytueb({h_X@U>ylV=gi_{EJI;fTwq{&4^ zs;VgQi4e|o3d4*j7hAx7sdn)2hjxr@=xpXE^6AUgI-|!4sUbMMz)us9fl@o&O;Dr` zZ`MCw?kU|Vk#o6)Ai8}D-AhSavRH@J-C!W3IgNyPbnI7DKfCd^?>N`3Q|L!qX9Ciw zOBKfCc;#F4`8d<>Ny_?SU7STu9mhNLX zrZek4i{b%9Hr;b}alhtGkPN+uqTQ!z6u#OhULSS~8aOy%!o9JOb&f^wAx46WjU8P@ z_z1=gkI&lNOAc3hC%3T~u-XFmR}}XU7ua|S&>UWcnBD;23myH9cP;$GF!vqH`>k&r z7;BpGF6nBvF&dI*(mWpG%UQ>@j|Ck)u*=!uJ%Q|=VMG^f)qiNH(qrqoLw=0e4RFn; z9#Rq)E1$D12lyonOEyPsMD6lHU$OR8Opwo6>AsilP-^7gKDC5n02~?1`UzLNO45$) zKzZ>SkVnk%8VXAiJ+#pUFHkb@SoUL)Ll@=t?iKWf#W?>EGY|j1yxg=~wXZR)z!c5{5J&W8-jUU-vR3=Kgd25nc{UHVDFRs%I(!toieXva!i_AIs7jXxX8%9GqW=QNjBaqP%a3!ISU zHa^TH-1MJYe3#|>`aH~h0ipDyPOmvsZ(!+CUAUJpM2|<#z3}NGvO>;ZV0*9Oz%FqH z65g`f^^WFEotOE5@8-dPVaM`>=nOTw0d`M!)vqgAprKwahuIzjcd=C-15_T4OMA_s z^k1Ag&L-OLmiyha$cr82FVisxMAfati?6&ve8nTPWkKabVj`Vs%Frx$rE>S(xkIIA zm!HPHS{vxqP^0Sse9Tb0hqOV^hm6)XI$y~Q7LTd6d8;Bm6$`|I)u|>ofQd74dZzAJ z3SV2Ge38O4uLhlQm{_NoexN^m12?ZF%Ihrpetvn-+<)u3|2B?;kEVWN6VykRi9%t% zu|~YFBg9gp0eke?fY0g?iApKXypqTUsCQ1rZ(m%YVa^x(VIB^LkpQ+nLMILy-vkAE zSYg4$#W9{64fZGhNgRdeXl_i6&|V2CxD_uRc!yHMu{Dmmj-;^8z{{Z%u~#}qGT(dZ zl^+J9a_!944Q<(#{7CmUx7G(<0v_+}K)4M?gP7>ZExCa`$B2?N!?NyW;96yleC0wHK}sq`oRj z^wXoY8~cG4x!oKCgd)=c>mX6S5l^64P9N#KU8=8 ze}DYDUs~$r1sA{2)a1`o#e!VZS|iw|8AIQ(0~{wt9F5vSzO&4~Q-;s%zx*;X(jVIs zxGzBPgaH6~5@%6>r2RY+v=qu{5k2{hIelN&_m`Wj#opfsgE?wKuHSE9rV*hZlcAK- z+>JAhjWx7R5MInfeG44^{dF4*sHvADA64HydG=!oV0$7xya2`~aV~kVw>OT0>9_%+ z%xel!2yE|r8HfeyZ^Zt?nqSn(oAXp`r^90?9~m)xURu%wE-)++u&|8WvAUGsD=|(*N{3iPAcfM3;#eYPac3u8AU;1S~^8DF`LGQtC zmjA2feG!3M5!f0g7#OqmofiG0+&;@tG-EKMC*%h?{tzwx_qs2_fjDa5xX&zqzrjCV zPk!kL_43BZm9ig75B~8gru;x0C|0H3e_Y7-uNp5+`TG@{<=LWK|9$L#Ska#!eZB+4 zAvhvlt@ywH?|<+3(R=^?T4aTK9^F4KIVGd)O;#l#d#@xbMMCz@9>+d7 zzw17#&-?p-{IADDblk_euh;b&&)4(yyq{^PDV!mpC4nI5jN)xsO$Z`qgCNoc3L@~z zcoj1<1f62FmXXo0wy=Pp+s|TR9%@ACQ@5_7MdyPh&q{}A$h^FI#Xp+DfW0U0jpXkX z4LKWB1^%_?D1Ny}IXr5^7?#I!l%%17_*BkMIPu%79M0d7(zhSGkg~%dQG-qJ>P8+p z&5k;C@7d!qqn=ncc6ga^w2X{vs{5X?@d~V63ki0p)-4sv7Iw`*0HR z()wVgqN2uek0a0-N?)@4*{P(iKats?LLPx_dZ?>cXgJO{rZ@51&tpmXYdz=wtEt5< zBj58R5Ix5}9U1m1)@k**wou^az?&~NIqHtuo;vZ&uM*07PBF%HY*JoUXknH0)mp>G z957@nJjgAOwR3D|m9lYG5k~uw-Q?rtKVkB4{qv%7#6@|mU-v^fliF!dXut_##?*OE z#6Zp~_>%kIb=7Sv3!=Rt;gz);ClaupZ(`1zIq8r8Tat(!|8Dvz=KusRL2Lt+5JWwW z=PgAs0PQjpUB<`zf?sJ$%nr$!G6|i0C&^_6sivP#CQ2C>t-$~AWN4h5hA1L{LM1Rx z^2BHW;mN>4iVLr$_#)3{kUB+=gjep@+HRY6AK zcCzJ}1sNgVqkg;5oPHW$X4c93HJ(|8{=tnL8Yi0Gr?@K(c9 zv)&@Y0i`9yr7KGmu4gff3ZFP8Uf73Mm>QntCf5yr{MqQ!nTZ^GQg??M;Zgk6fQSZU zQ`|V++Q_cRuHpf{RA2*(9>V*SLl8p{iS)GzTC*2xPj6p-N741n_>Aen z*&^BEPpZ$Xug1QUa!HrhK)-E$qx<$8e^8FnRL^cnN(s@go4+nI$UOY+u_@$x_LLTf@w8pZW9|qKKCb>}JUiUhs-5aol#uV#(gVU8;0Ou}ir_aY#u) ziCU>#kzCm?+N@pZ%cqW|xcO)gme5yyv0JaYUWu`Isk-Wfb3f#0jk^{t!nJdoF}6Q; zCw7d}Po>VpS54|pqvA!m49$smP48J`zUy39D^gj1J^A9SYMTLv&RdoFS3RUTm!q0h zOm%p@5?CxfEFaFCo)MnGn;|?O`#eo4RVk_c+}*&d_iiecs+20+&C1D|{Sce?^Y!*^ zhJvp;5qBiAMsvjr8hcKD8_56u_IlT=2kUyb-c$XIXVveOzj3$N7+pstADVx@z(sHI zgTQ;+*9@6P`92E7Kd<*a>$~;-Mg>W2yZ7Cmcl&oUt1bh7%CQSwsbG}pR^?GI&{~bD zOY|)~@;wwk>?UF*Iz{6;?x!A4N1po8S<|hDy$f3@U@dSpthIINU+DiV{Q2c) zg%?cm-&BTGU6VHBz4>n5I3Hh@_%;4*LU3YVeAvf%8+F&~n5VaX4S)4c+Ven@Q{_<2 zQ{{3Uef&}SqTHq|*|^!@TxE_~S5VCJyYY8ZV!kC>Gd%M7VPKVMbMAo_65XlOC(u;( z-Sv59%d@HE>6(u>3eOFc4s|(kRottHTSj=XVWcs5#P6P@$;32kDV%S$YNb+m8TG99 zL2k!atB1OChhH;A2^rJ+Kb@SocVh345NkBOTTEaGu~C@f!-z-vx()}E^$`5mve zUwy~_Nch*)eaTIg7pf{Aoi?0;WmmUdlAR1~hrMeQ4%Tcu8dSP^7D&c2BU&3nfLGjo7 z%JO!4kqtO&>rcnPxXwxsU^6Qb46j>Z2QVG-5|0UEBjW~T5bLl-VOnSW#`AH zkLARIj5YdphQ?kfd-kcw!lwM!d7Al-CNt)DRr>5pja=@9d3U8I?lmJqLc#|at@H1` z;d*!I&cFTpiEwUJxM$e@hDE$ZLTjqvuHfe3r=z!T@01n2S?^uXnrb>X%I2BWnV6V3 zlca1g9{VCTA?hOA>X8lBNjJ~#u@PNn{%=a57>9)xo{ahz$*y7}PEUPuw%+d#Cb}AU znGydedMoqeLNCV>$pWqDoa2Ywp^w#vy0s<|X}aPjK2jUfZE{U52cgPwj@(^FYL%j1 zSN7XwyFaJ4>Jk);u5K+bRLpuVe|BkI)Wrz-#M$YM2>JC+&Q{)Ax|dR}igepd+APOD z*tm!BpOAkt>soas|8Sv3=>84onbgDD#T}0wBzXY?BmFv^2QMw(Rd0%Q(|xn1 z=KcsiCN9xY@u!T}yjhO24&EO0?vIuFBF`m~@Vpp`*pWbcY`X_atZzh=NKmn%)1uPi z)Hzgds?w>Zr8y*5V$4TE+u}HQs2`9XMei2R#?0mw`wbrT@sLoe)=||M?^o}JbtX+A z!^XBdJ-&+h#O>Od)veWbV$LxQFzt&Ic_$luG5Ex-o6SKu_O;rf;WQ8FAAPCwAFZut5AQxT;S}=tm;dlt5wok6QBMN>&WUj0ri^Z)| zdOO45l~eY&^_?IHj}!OhgqjDa3?g<`RFxy1I*osVg8TLO9a#t>are~LbJjF-W43oh zKCrg2WOnwjw`9fvHND`JINMkE-=2}f7L)Jhw_<1$+z%8fO@7EDh6}!JT zw7DptrXR>tUoio)6*(l6dU(ZhW>a61X z{aXe}>*qrr)Tilnjqd#1OVFLw$BM67yxB&63EO+tLPbs?@#WQl`MN^Z#*JO77J^I7 zyxQD3zpjxI$tm~nxkL>e84T)gx-M|iiqg0X>Y)qS&zw~)8cy@I!JQRMQIn_p=kh9OUj%?fb})0i699n@i_ZmKj7OwSy0OhqPs=9Qn; zB!04UCPFFcMaLbFlSSu08r#>KW}XZUc54@@;XZfC-SEXN_j~C-19L5@Puu#An>*Bz zkAWmZ(!uW(R2RBGxnf9aik1kS?Gwez#4{#v|9m|xOvO{6)Fx_-dGut3N0!Z! zIj3IsbW0IcDi?E+P|s@{Dhj>(R*2Bx#P{wVv*gkv-&&qVm1&eDRSl_(Vq(b0PWVPi z@kkbaSdwfEc}P4$R_1NJt@{A#zwS9|BKJ|y)>- zTdQdIe&x_#F_E@k&E{NZ7xIJ9{?BTYF5)Iw#?K|%_rRSro4BmdNh}^rh*)G{nGI|%&!hn2Im49Q14{UzuQhaey7S< z^mM$-Hm>WsAdz6|QgOQlGqZ;GT7I?Ay2H-oiCsy9kgMYR z?44&Bl&=w6l;5s&cYD@WYRv6l)vpv8W>p@vmNZwC`N?QYCi_!rQ(l8;fbeaCkC*OJ zj8A{FsYm6EwZAjS`zb7+AC-3Pkr;88{`t{&_IGW6K1L@#3T=(O<0avIHvIGQuWrx2 zyTnl;ypj78FC^O*wVXcx=&!k38FeuE(lzg$_64F%p#|3k8!0rg4OMuu@$xe+`BS2= zy{_6wua7g^cA96(?PpB+%RU-WO}ETTddB=({1N&=-;7RcI#ckCTEjNPHD%ht&*eMC39 z8kUeR^;I>qRP5d*O9qqqmj>lS`w7l``Db=LRQ7Ka*nY|o;=g$(u--1}F-i`(FWX!* z|6M>C$6FrwkA&D)8(aF zk24Kug=~3Ms@Z!idqY_oFF!korcz1Qf8=njec;XD5&OnV@(jXYX*nL^Y-&G^tA+($ z6dG3;m$E`WP(54tVwTIW?BsIw;3bi!yv!Mn2⪚$9`fN??J7i&t$^M&sypR=EF zO<&87DQW-ovDHJG-0T{4(cv4$cakI-Po&JqBzdpK(Q4!p7|=~4%#5D+hm95~SqygP zw9nGCOQk#t&?U5vv3$bqLGdC(`|Q=!&nE)4y5s9Y8u?g>CN6sDx0(+cyr8i-S8Dqr zwCl6~r-Ls-?q1G04#Px0_&@AcU{13$5%Lh=rS~HGWoSAh(jnfBPl^Z*pGLX>~6bt{GEeyp+`3>vGq2H9kEIKD_2ar&^VfHZL3hfh zLX4*na+Y899p$0ux%b_^9IjMhrSsv&&esAaVz<8Dzg{GiOLN~oSL)Y;U0OHISZi*R zb8pi=*{`h4<6EpPNg9RRWHTRCcU*b#)P~mQMlIbk?FdniG+O_s%2RL2Umf2zbUBIQ zByVkMN4?He`+SnlE{*SUtvs>iiC)x`t`z~n@V?YF?xbLXK5QH5^-@BMGdw-_H0 z2W3bvL&)(NTKg$D8S%?fB56)AfAgE2mC7H(^r_vpSxHNpD`2%xa>F}8dliZ;Vi8W1-+=@Jb%3|OqC^`I= zA5nMfg4J)s!xTCNzcZ(Hty{a@R<;;ucYi@ssrjLjRF~GxY(q94s$5A4au479&APr zTUfXG>xZSixqlTTlii54mnacsvm!TlD3;GJiTo9pS8w#%G=5zECx_h!~ z&Z*_H(e5NZbor%#sfZZWfW&1=O(W`_AVY!mrv^Met75axr_itZ>DWz#d^Y2st$4|1 zn6M)@`GT_aBo)6SD4dDQo_ckkd3kQ*m3-!{gd2Da`D9np@rtwbN;dA@{K6K@_bk07 zL*8!8A)@(Zw5{wu_43OTUzGEomjYA^DauM|yZ>4`l!!_kto!Y8^v20t*nH*FnG7_( z>v@kFfgI&&fnQcrv8JuNcQuw>R6ZfJOZwbS2N+8Q*4Qbh(_MGSe-`*l%oX{@@iUFZ zllBWvY^B|#743-(-^(-E%&9FJlsHuf-pZ&dP$xM9DA-=EfR3i|f zE$R;|TeUB9ycvE}N|e{Z#^V@q>hHV$``y$nDC+OM?Mh0w?P$IAcD>+mBWu_bfw;ph zQPD0Y@ZanI{_KxArv`Jz@ERA(>0Mn!AV%zVJiwtGEr{;Khbxk>=K4G4_EuKx`5a#c z{zb)i&!P83fz{zvZj#&4VqT=jIeO$|hs8Qp8gj>7de`@;)SwpQelg`r60*XE&{@ zuIVeT72HOz`iG?5=lO!ydg=+F0YP+ov*{lj4LM<4A~*NHEZ2{fIU7q<`W#8XccWzK z1Z-#%@z3!Z+N!GdE9nt;3i|y}11F+yosMaL7>7V)e*c|^LOt60&iS7$l6Cjt7OSC~ zSH{mxpYRS0zB^SDAtWB{F=NelyfyzYN?FE;)~R(Nb0K=Nkmj+cuR>Q`Z{IQ+CF99e zE+B@WhM`kzfBiX`zHxf$$D)yF80xoks_`@<>0f(FI@EBPx?vtyaBx`-S@F|`eV*mx zawmn78PQ7OPd%>Su9J&3#@;S)8a~?h(WJO#l)A8ZrTj@d>@s<_+_l``vpD|es6w~4 z_L+Q9sCc#df;mRgp?)jxo6k`+r-`s<|M)uS;E$L82%^v;O>=i;8^4^xpBSR&Mk{T% z)trTY>!18<*|mBkoCa)ef_KL%+=PT}$FtVBpxna<{8}yGKgRvNlO_?0`t6z~)-6Wm zT43PAABv(0lQ)r*)xMf$G|PDI_~XERwkl^-t-FgdMi4Hs$Kql^Fr#`1uV!-xlJuR8 zp-lL*@FCU{{a8(=@?iWbi#cC{ zH=^j&|8fXr@3Ozq{yr`WukQRFpBg3`%P!#ZCWWdkjN7jNImmIDtbVzw5xerC^D5kl z&b|_a(Y+pi5G2IdmOs^Oi1JPR-InITGZXobx0R{Xz`SBW8inajz4%ZYtS)yTu{ODy|` zbKQ;+mef~cy18}ZVp@!j4nYNqKvjSMO7>ckV^0734#=h-x0NJ+BLh?){`*27B;F#6Nc38OHMv)5P8dqrju#Mj^2AbYrgl2#3cl*bp`G;)xBBzyC0ysXQ@b9dDcA)5 z_-h{s8B1kY%kWV-K!iDsvZ9)yL78+ehtLHt{bc90jEc@{xKi+(t2VU<*k-w`UstUK zb~7Rbe14B+7|%e^aM=Ic`}>86`p5FJ{7EL*>pOZYulqbVK(f-wQ)OYH4J(kZY^6jL zfr3kgxXu`nB*+tS^NnG?c%q!F3TqS(i-6QWUm5?Aoo)mSv4cq}G};%p7t&ng=1PBj z(FlTLE!o>wl_}{*47=c!exKXaiz#shJ?NQf!~qnj^1Aj`P0MxShQ>VjZ8=! z=RXGm?z-G@9W$BkCYPaGz;);Ur0{{TO;6?GLEyOv^d30wImYvHFTb|pGQnI8;QCtr z<3i3~zaH%sAd(u8b~77KKxX}JwFy8%{;|K8U?c1ijplEG8I8t0Wre~*%lW>YXiHK86?BIA!;dRYC~ejRzZHJthS4X%`i z#RFrv$x2>;{LTMUZ)iE9lbhNqvrRYd^X-8%1Bt0aEXU_|<20ql3dmXDKNy;@x9y|Ej1K5;$ zvL!lm9sK)V@C=^O(u`P7iqRm>|&jnO< zyK1nFsaE@zov&J)q!lOK{6xz1J0#`CT300iooLABlIOHerxjR$>^`3LaEt$r-}?OtRt7e1^@ zb)wJz8AXjiJZTJNt3*bz(Ba|`7#d4~aB$gjy0sI(^18p;ofh$ZUbOa$7}Xz}K|;3H z7yE{!swt}ozHkw)51j1Mw`wF8=?@ZC7v~r8ti(_P>uak^gJuf@Yleqz})g{K@R7!68Xf|1_@zEYp&#lQAoUUVNCFb${WR;B+P$+&9nSpOxK zg)NDLNH0zVqGM(Zo9cBsWa%4zdsJIm9SWG1QQh%>VNBM044{xtTye26&B)NOl1=F0 z1<*r(s1+Pd*T5Z;MF1l#{4s(d4sDFqh!cdQK*U|-rpw!^gaas6l7RhyxhH8?I%~<$2 zu#KJoMgp$?T*h=x^P95}ec2@z;BrwX8>LLW{!>)LGXY#jT%@eG<%mzlZ|8`-#}9ip z0s(t@Wvk;CJL1UPUTdGN!@FzlMS9622UYA+%lrj{K{0v7DAmYYKDI~y0A&uYrx1{; zK1Nf^(8@d9d82@d7q=WEh?tqsO8%JZ$7r=ojyJ*LGH|U1?tc(tNtJ}Xd}yo&v~+S> zR+QrwegBQTrnc%B{K~nxdbs9|X2G7#GP$wAlQzq!{4%@MnuH}+(y@y35seqE}xpD_@Dcs{qgWaEW&ZNwnpcVNwDHl$e z!y$i5Zo_D{y(0-nuH?8Wulw1eQ|kD`ckeW{;e3rke5BMDy9XS=Zf6iZY~d00f70ni z!Vj%e4o+KJS5e8Bbq_1JKqHb09W%ev$i=h$&0=y_tcW8OZ~>CkfkUBA5hUYN05|>Z zzG@;&`Eqd?BOisspunO4E?xOshaS7>+c?;jcrj&^WwkX29l4dQAC=1q+U6aLcIP>F2Ez48igP2uE$#}K z--y6LH(>atACpwTUEHLnyW$aMpFcdj1N_!Mj>uz0rGrNmPCQf=VV75 zP1>+Kva$tk>-_&Jgg*mb29Zx&-w`LvOv(0GvAm@a=%*PyYe81V~d-Q{Z)K2#>y+ zl21$mUSlWJ@Wf+57WmUj`%hhrKh4CQLajd6fd`EnDRq<%6W|GXJOOYsLQmi`Xt7`a z_yCYO>djry+RJ7Yn5EgoL6nW=TX;n?NT5Tw8s%&@q%PmV}3-55Ge#FHuuoKYS0;h5`+W-aUP!!J&c#0}P0<8;#`R^eWc=QUhh@`p3=$pSzz5j^hK8YU1b zPch1KttR!phx=QL! z$JZCX1#BCk_aQBpDf-NkduCu?RtWa*OE@|NL2fJfxI_49eKL%ZY>l7e=Kr`VK zK@=`S!Y0B5f-cD$_;KO+s4Y4LCK!`^+8FA47QATsb*dOFl!-3~sK3=F##rp&!kKsL zRsann55xxoOMGg1mYfe)wJ09diXD9%9rfGW1U^qLIOON7gDiX+VOD)2oq{%xn#}FI zidyXA)Jcde3bw}X-Mokgz!SzWa1-?mWmR79k>It|$~}-t!g&yc}OlXTtlfcE8c_lsWrv|tO_7}>ObD9!N|}kU;N=KDuMid z-NlfTLwNSo%@Yggkoi3e@v-H(k$_1M;N#-QxF_Ac$D_*CYvdw+*#qIy)Ef|l)HV1p z!+6p=(q-U*zLCj_P}a6UwkHUG*-sS!$g?Odc38pHbcOC&(>>yNUwz^#x0$FAko~C< z5^f;SzTS=^>)NQwMpu3qIc{0Eh;H zhJ8I8eSLiyiwq;kN3Dv4@nWU@tIG_9CO$I^0GqrP>)LT*(uLLn;HH89@YMd0UXjDdI2^q?!&vpL*ccYF7hfp0AS-ZP3V6HbZvYG4WRs|sk>{pK7$4nac930gdAu# zxZ}9uJ*$2WwQiJ3G7*{sZ%1i#F?GCd-+Dg;;`^e}G~?9xNApb?Nbpnp_vR{FPrzjs zh1&D>y@0q*8wRQdjA}3khJUs@?lX(-n^V&K#?56<0%3URKZwu8r$+o~yI@Y`=FV?_ zazJcLs?EmOAM{L#5GL69`C!Is)e{SHIvhPp>(-5S=;p_CVM2Uks2p157Z9LH72hTLF23w_n%ub$lnXsVIzx;YW5|Ve{cY5E)yc`E>!h{ z9#{%xB_b&z*AY;7uEhd2PHd=HePq0^Nei7b-t*3NQb@7~w2d%Pn!D9Ev$IuwAb~d% zq4idKbg#;?9($khm|S9^3Qha!7%^|uvzyp8>xKZ- zNFJC(oIpmE^?TO(o(EksuCc}0$COP|;|;Fm0vR1}K*7E^!fz~S1?CgJe-RTNHv-9k z{VZBZNc1?FGS<)Y5r(bOO4k)ozE+&7Eb#TK_1(CaRp>}En$@xC6998c&zTu`a?!RJ zVKrh_V^e0+*AgCOarCcU7R#UPPW;$0E?N6@IirbVA*{Q4j#Nba+$^Za8b8rab zGq=M-_XDFFERzQZ2d%&>Kq!1&Zwi6=L3)IP?-7RSk^q|PuWQt4ep4GD^Jh^aMGAs= zD8BF=u8DS_6F>qGEBP`(S9(3GK{-)E+EJ3I9!@vBt)R@}bJYW~$c7l8l%kdj9U7NFt9v28@7z)#Pxz?7-JuV1NwYU{Si@PThRhEpu!(BUv& z!uLz}Oji$D$1_#^YE1h~Qk~zBx47RFdZe%KuE*9+)6dl&0H`)*`;R-ozw?=pD};(q zo44kapm8Y6Uz=K+pfe(9y%64$gk@2YG9 z@K2*@q?a=|f@G2cVkw6#^dk;a@fY(IS_qW2?PocSo)t} zN=qJrBj#op8R!K;UJZZdab-!((>uFRb3YNIh+Zv8djwx~31AYy=j+T1{aBRJ5QTFs zBPk%-3pYS9u{S>vH8#KR>Mq?gXddZ2H$xM3l@%jSF@*#UHK~0Inffb5t6GT< z&)~@3zw{P+EM-|9X!1QrV4K7Zbvxnsb->Khv%zKnGZY(jPtlujxN*eST$`N&+h5Wo z`B*lzub`p*l^hSUkFw{(6p;;`WE~6h-*C_u&|&NRINuKfXXReNYmgf>tP+~yJHCQ5 zo~_$}hG%yPp>dgP=m80h?|awkL$E)X3#%;9dI`pDQLn z9e#r_Dr2|DW6@ch4D#<~P42W))j>jf#_vaOw}&7!#Lwb;Q@#XfndP24fjA^61d!0~ zZP0$XrD6SGK9S`$+$}p)1#}Fx&ul29l>gqrj0{^4yT`TN-;jfWf@$vDL%*XvEQWc< zWh7)MV4U(c<8iYLPX)|(%6Bqu14e#sUhWfXR&rp^{I;;bzsXJV2g**0GJ^5wGa-2{ z**)kCQ)+J^aALd#K@l$*1(!m2$>4oN;=i94)!V0KuiLBwv%OqZp@bBJ@WieMH_SP_ zU3vQPW&#AK8Kbvc>pPw6V-`Tge1(xuxBp)R+pBZ%#!Ck=T_cPhTHfwRGC`YZQXHR; zo{%`lw?t)_k;e#(A89tY@zeSwcyWfXi>>C2uO36ci>3k75mDPb$Lth#*$=aYy|feW zdXX@C4nFnPza36>ADqlT5USvf=TLecw|CUO!PQ`P;6KJ!5VWIg<`5Bac=j`>;ihyo z+$Tbgn}Fd=ASV`_xbJGd=ehN?$IvXvB1ZZLJ!*K!Pj&p6V4)HCfRH9op&MdP6Fp%Y z+P%dw-M{RbHdm(>VgvX!r-*>5UMqh2+yl(u2g-M09f{c^zfAYpaF z%5}zKzn#^i5%S8`Lgg*b!w?h-9vuGf{)b*lU$&m;fu~bDfB}M@b8xMtNV|c_nh-r8 zswtlUJUR?$)0z}XWkwF|l-VtCK5G|`sY-ei{1CR3a@RUqXuRYUTd`4C9~~4+UX$TW z9UHYpdJsYXg(1-LAwc9e$kMsjJ|O`p7dlxflK@c$!Uf1y5wFwP!vy9w?2I?eo;GI; zh2=jkHQ;eP7^RG~Els_@cetbj69#_<7qKk2hxXv{0EV^(@(bz@;D?}Jem<2sBf$eHA$;UQ_s-Z9scgpHf6{F;g1%EBO# zb4YJ5+H=ra0}>oRIK}fE@tpQ$O!KbB;vo1BhchSgi9E(4@4SP#hZVYjCXgKeIcGL> zzpAEy5(0e|57O6XX%@YwBmKR1z9bb>W$*o{^p_6fm|Guk)XIVz)w(zy32{-MMl4Uc|8DisKz*0*Ejqm-@PpmT8T zu6d;u-Q*m|>5+{Agvmg5qyvd>&bR{!a~h6qvBm!~FT|BX?Q`$sEguO&;$CbugMs)l z_i}JV$xO&zRWhcGQD|Mauj%myv}P|$a_6D54%_XxGsKj)0GDzjB&fpjqvxg1d^?Al z)b5i+Lsy`X`;%e$5L*`;Jm|5&QMNjftD^k$B1lOG~v!BLo z5%z!0MgqvU1-H#oz!AyHMt6{xc^-%XifeXurpSPkDhBvE;EXzbQiK3j$}R6mt6zph zLV$5{V)Aov!0M9rNM7F%5IP* zVa$SLNNzM-1dP4EcjvWRR|0EWUP=1`Ut=K@)%tdW@D*Io2Zxi#YO1bodsfYR4wH%H zPD%S4YNAFLinx!@ zygu6ZSa(`FZqTIS)KBXY*luk1lhfroFogrlchIOQFJIxCF-kL+SL<ql3&RJp*Y_ zaE!+VHnq}jsGz4QgVg$+U~#~cV~LYv#95&S))LcOEJ8-sM-!s_P=hr)SHB~95J3^y z6`r)@w6rt@PU;rliTV<<;pkKV-4zc6qP_bUlcc{draRs-G8u~l6c`m^3#6nrv3I@2 z2RPaaEhesX*hhM*e@H9{K$PZ<4jwpVVaM(r_xnoZwUYReFoEUoqlh;S>p~W}eSJyx z!T`2}?ce+XSj5$Q@AX~lahu#0;Q(y zq8Jhv!p}v3RtC4m31A8Z`=%hta3Z&l#&F%PFysqQv1(^Jd0PS|~ zJpYc>Z)csw+EbH&)Oq>!UXF%im8c7l2f)3X4iLk%zJWCdU8wbn0S0DMx2rLCbr(0l za%5X9#Dg?0W8=Lb`dCX3m)B)$jngj7m7C4N_Qhl*euXaX6_h~XLWw8_*d$y}oEA)9 zhx<3mKdi>BU)}T;v)jHDT;?PpexkUt&Gl5nMSTfm%M@UonMq+U_lgblD*ZLZ#8Spo zrJB*TzFW<6N#Np)%+0j!yhVv47itNKU=&?g!M3xHPGeh(`5k?edUR6UJcfhjkVs<5 zGM#u9$Sz#;_A%s#xXH`8F)~g7tlJh=wIcWI0y=f_ZlX{?Fom4i&rYL8@VL5N)1mFPg;pVW?!;LpV z6W+=k{E9FtV6)00G30Ju-Y5+i{VED2R4Yy-wzixmV#}r!_Vwrnq`B|W9z4SN)Uv<* zn{qRZ+}g0-q#j|~QBHt-X}2?soOSebiUNKs!*^bdNXRPr_sfxbH zxt^)rwaT`6Vl4?MF7jOdWUGHQKsus9N1Jv6$5b(?WD zWhb<=<6%yt;;&_BK(GfMRv(Iv;`EOzcO2ezUNiIRyw-KiYz)XjY?ZqKrm8$0k~b<( zLV&iz9f&+`;#GRfBr2R-OYg$CPtQ^F5p@MuS2xiU7wXfHf2KB$>}@GKaCMP(90a=9 zso~7M{2pN|EzdGC)^fK4qMDY4p!SVW92Y(_*^hj4?Rigk-nq}&Fy#vdeyzy8D$pfe ziY4EuyN-$46#9MK#sv?9hJZfWg%=$M4-e|G5g3ixZpZ_sf4?kGH2dw}`?S*ZjGtot^V*tD*0+h78541z{(5Dzk$GnW6CWQ2`493oO>GoKgCCBPu(S1hesZ$j zJ0oskC=_nl$AA6duW zUZSDsZNF~X&bgexFaz7I%1p{cWzv8#vhM8Fd+Y~Yn(o)QULwk$E??FpHjfb405qCCnwCq7oUQsq4?er$Vq%G3!~%Df%6kwNWd$0uxNeG!$EI?z z(D_wUn_$zIJ`@0{2V3I45;ck>4mrGRt=6JPA1(l8pzk5bs+FXT_gR9XkVz^>o1opq z>8Ac_lIVq{5NdZw#Iya4NP4qZNu_6EaB326WXM|2m9=};853>yOO4|WX)&h8Z5(_B zoC0X23JJ6l*|R;XBK`9W&_LXTeP5JSdFWu25LL?8M@D0mnnbsQ76YnjL3wLr{yUWd zf{P(wh};=ry)r}M&gVJoX2q|^)Y81gV9MpX9pF@kGjBes!A2r7O7-`jcS=}{dA<)j66K1 zApb~ls2lIwomD#mT$FkvZZZnW(ysd)Zb{#v521YLb+3ZkCvECg)9vzRH;}U9iF4VP zv6{cRgI7(d0p?vkYysfETk06+Fpo5*#&w3C%zL|!ZwP8JUfo(wjvJ~2?h?`QV{ zNE&3lOREgIS9YX_H_XK%lpCj4bCEcA-|WLOoJry+EoKkCgahuTZF%G4 zWa5kU$oP5%=B)I?3jRqG|B8;ux|W$K=>sVrD3LzQA=K>cZ&J#GF9`)$0`Kcj%0W$4FQS zr<8bf*@Q?h1t;_9#C50@;kS3V_`iu>=`Zr%5I8@3M5Q(jnxBSn6&)UL&b&P8aSO3} z12wx^=8rSf3IceueXB+vc$AC5jN_~((Rd+9uKpI$O-})J2a5a_H=t34508Lk0_7^o z=*LiWYBQ%}eiKvSu_wT><|XnbSvr;DL}!R1-VZQ+Qu9+Mc~owo4jz+tO9V5CEhf_Q z@~kkB8dkg!Idq<@4gu9nM`X!TWDu5&_Gu&&lFx6!C^{Q2;J zZw%GINMYC*?aS>-vEu1XcsHKn zdy#&~+u}T-@7-t0l>L|Wx61skWul+oLZ4qb36{xG)% z*;ByIeoFD;)Uoylhk9%GTmb1a6LL^PmoeihD|)10oY${rRC+K(#V3@0coP!WEMghhS8f>jlEhS;pq-;ghU>4(P{U z-Wue8weuB&nG8xKh8}(xr6_^tk1DGHGeUMtip*N77tD36_ytIljB!gImqe&=LhsZ< zhU)dQ<+ZKF{sNQwaG;V0m!fyu%u!DC3 zcE{IJpL_P%uqMeBYEc#X!@dCfx9MhcyX^rZYLu7Pv=>AW&F~>aL`kkFeLHFkzB=1 zM^Qo|n7MlSlb)kL!D9%&=2q#*LV{0Y4tKDJepr?rn(=Shtu7?)Ifuw9m9dBQJ!rG< zX+T>HKX?M3%2=wCAa%WBn+C==k?5zs^;pgw08nvDh?0u-a!AQHYm%oUWT@==0&SA2 za8fpo;N>3IMEn;VHCA3Tk$`92BKfu4Crr9_z4KaIBS`D_f$|?@5eRZbsBjD5Sqd4? zuP=F1FipDh?$u39Igh4HMb4i2BU=O$=OK_Q+J&=Xr60>_c~*E5^k=Yl=^`UJ6D>(* z8E)?uto=00wY(!8v% zC$%0(yFjPWa1STSKwiu89)cf*`GkCu3LazvkM~gBsYze0Te_B>Y&T|TR$s!975Te4 z1iM)daMq09UN81atNS=*mMM~?KeyG$lRkC9*R5Zm+pAfcIM1s#G#4i*lzwV;%eL>) zR@lW=Sg~>3rF*>)3~QZwuyXy65jC8xCo}^@TsgQ@gy9yqk@bFDU70;c2q)n({A@z+ z;EC0;(n6l;NNG3R1ap@0Oh{ABD<9s1>G%X)#}}Q~I0EN?>{rXNdJ{l4%ECvySdr8C z?e^Z972;xb6GnE#QF(pK?}>*WkE&oY3 zk6=<$>WPT*#Pl-zZTCWL{WSNtLU=M@Xuo0wNMiW~lb=!(7N@S%Z|&#sT9U9!7}RDE zKp!2!z-oAKIdTIAtF?QFwqEycmk%E7M<5WOjb%fmgGNWB(r1h{SQMy0R3&uT>7GA4 zr(q&S)(y1kH>GcT4B@D!$+ODo`a`#glmT_YB;(c10}@b zKk5cBOTmO(vYi51!78{yH59aji}{a|9JyD34@%c_c*ijk+n?;gpW_h1TQxUs6M(r_ zk1MSWHD}%FTFIe?vaBgGvW{i{kK_EDPh45|Bl|c�h;65d*g z5p|2ZsNF2%pIivZUD0dy$qJT#2}G7nlZ%{-Q*<2025c()^{$)sS!G!#e( z3u|`(ns<9>+;$NcqGeo!BGhmwei5S;kM-xHPX)Po&TE_-XX;-@ytf9ox*yiV;^6R5 z`;E8v02q0*Y(pX|J7{l**4d?y+{H{OF>5p!@xvtmpTpo}6Q{(De zk>K&p-`k(+6rVp?71lcq!JaK)&Np-mT<9__WcXJFXIj>y`5VNoQ;|g(n=|(!!MkM# z%QBUO_)3ssmfXTjfN90CaNigi=;;JQ6TG%A9nu5BKi&6Crw?(Sbcx6}^_@QzEMg=? z*p4X_>33K=oW0^a>CJ#9L`@4z#kw{o82t{MH2q`K0aztvhf<5XYULkOU_Tam#B8W3 zL$c2CY`1hx;(N?~6>}Z0ZN)DX&W9HA%A)q`Z~Kef{#w#PzH%*Lo6_|nME2gwgKY9JbQ5u3 zKqg(OYpSJ_mcj07g1jyYJ}j!X?>!vZ>sJ9{)R`Efi{3fAR1jM4^nH^FgVZ-a-9G8h z5(lt^k{Rmrg9d zM)?jE(AmxIgQT4fkv?3nT0dv_P`KAixYFonA$@M{JzL&)P8M_p8ZvBs@#yo;NFnOZ z8Ug%Z%Dkf+@9E+NMB`?6rO4G8!`Wj!qU_rHeg2SrNUl_9)?L$BaJ3HQWKskIZdPwH zzw+ApBmeo$A?zM2?2PiD9vN(5pS^dWA9xU{in0W&+LLn&gs2bxgY09wL+{3Ed#yKt z_inRR`==t{&s1IX!R(r8SNEf^(3ZgKM3L!@9`}aYNZPHse?mT=Xqad93r0>+`l0T+ z9LOAioRm&FZJpf_kMiw*Z|6an?@MNTfZ<-6yQrMa}b?-4wqs11&vC&u6qT za>#exGa?rJzqwtJzMOU(dgc^Fli??w$FirPU0km>3^(dOi_?}(ylN8_mb#bjJ17Ag zP%pXv>afV!;sg`1_Na%b;ekcYGRfB9+&o2B=?*fr89XS(i){@9$UNeOH|L>K~)alLQs)?dGQe5 zWjScspUgH5g^yrzKi0}B0nsVE|6aJ(PoG{19(D@|Rt2-yP7q%s#?{LKE5>Z!vh+!} zAR~#GFaK0YoRa{ZCudyDY>nYzC?HixuxIkdlsbeqE`dw`1*fOmQrjc87pIJD+H#fNB?F= z#By37dpIhx-&qvuMQW=dT9X3Gg=YB+i`Uku{VqX|MaY;EoT`y>1}S)+&fqT3s6tny z8xUexrmU983-#qv3`$Ib_G{=FV_kx*6d|e1Q*OWb z{dahV(1J^L5df7sazPU8wXwep{^87C+Tp-v%wuqBCYyMx>O6yvIQzUgv$_jQdUGa= z5jlKk&vQHS=fg`oMiY=;1R69flLP)`*P}d6NFz;jjX|$?vz-{oH6{#a8Qz|2Bxx z=10j966%K8RO4?Dvb^<%R&&p_L0`~+ybWBfV`v~!L2Y%p=>Hqn#FqcYa-bOwHrExqLVp&9%81W7cI+o$F@1~k{{+$mF`fMYtZ-oez zP5p6x>XAO@3fI5{@YOy(K*0sy*e1lqIEV^)Cy|P?OXS=>ocKX=cyR}&n2}e-ZkAf?aaGrHP&J^? zDPbQQYyEnUUuBjtFyRhM@(zULg|`lbl!bRXaUUqqJPY!usmu`V%VpKkph4s-9G+iU zRrRCAJGkG*y&Z{oq}o>fH8Q~B`CkmjNYp;&z-EhzHs$_-^ll2vxAtAxk`#EB_An@T zzET-+AJ#j_7al-#N2md(d$SeD*@PuK7j~6$Ovz2&O2ZFYwJ!~U$Q&zhiCYa40E6u) z*Y4FLbX0eoSFGK7$@{L~UDK@%a#F-nOdTn>G2gMzBdR#z=KhyY{LwkFJsFVFt$ zeTPxEcFD-^^fuF@V5>@cC%gRDjQrnJ)p|^eMrFm9sZvDiQ*y?l%3F8uVtmd2$XuCN z443X=;73&Ag|M&t>??81bY|OQeImBSymqi;&ew|(-^K;yQb)Sh84OzW>G@APch36< zg8esx@M%|)APZ0dR^Y;}Ar*phTObDCBDqp?=_;na8lX3!n<&O zd$~#|Rdp>c9x=m-Z7Rbnfm5OAt5s>TQ_Q%j8-BvGhs;A5febu0h&kd_e}q$(9QxI_ z)c@MwKG&;p^a@8j@kVW}T!(i?oxx!3oRTkP=-EtN1|iMXxv_}`WBVaXN__&BanEz#h%mK`JsO|!3vwNNxVtW23(3X$jAwk? zUcR}8u~{DLYcP+IP4Vh=*$oZ{UGs* z>Aoj$IDw>LwbqeaNz@C%ytK~Ocn}$BFy{YuG2nC)OKNKeyRxuOL~%c_WK*>Z#ypih z_t6Kl;Hy>zA~EoX=T572b1P4#@Pc&1L$60=@`Ey=HFQFYBsT*^r?f95@VAXBfl~_FE6qU=Hy^p@ z%NNDqLHUXjQ+mQfj^b)OS9N0Jea7c6Q+q@$B9$H~hG#g9ML21Y(;$Gjo-<-G687!S|tC8Um+$@<@0_)fsn@!PFmo-tMc9F|Dyi&v@s$^&d&RDotg@Q!5?_uP`=_0Q zi#eG>gzJgvT-H7yg;>ZS8eZ~Wt?8TQ4-DcFDn`prfi7nMiIc58tO)f)=qL1#-ZH5C zLSYgqNxEalu3bdT1Un%WJEJIPx<$Yn!>JJQ&Q?|1Xz7$wSZ7{6uJUSKM-XBJ)j175 z*5ALWLlw2EX}O@$uk8!ZRJA1k4+vA%(jS+Omy=tY9X}E5oNNg1*f5wfvN(PF=u!v# z{n8)Tc(^t`yHG7F! zkG7LTsXvytcVZ?Ir|{_PS}HR>UGfvvEjM#fm?I1_pF2ci9W( zK%(|{`h1EjgG5#I?k4nHNTyr8?vr18cCxR1lEVBF-SXg&dfRG^%Ia#j(JTcwJHLGQ zDRsv}gZqkmybxjHyMNu&(KJe5|DQYKOK;}>6!j}{3f*NDj^A&U%5@#HS6Dt|w|j+> zh@MkJs-UBF1rg`Po}N02w79zO#d9Y3;Y)%cFo7xqXpDpeioIBVTf4WxKN76C>gitU zeUerL&%pl<@0;5DaNBq;G>${Al_7baXESN+{i2XUM1H45ez5#rp{`}F8W8`AzN^_~ z0N(0q=mSQ`yQoVOciFI`yooE(jI+eY;}yHQQwvo!r*rdiD7RX zG~ebh=qkR+3uA>f4WkC_HPGTA)1s{~`R{(_h;}hzvLZeR6A-bI>YFjf6qF!5|MBqb5b`DA7@ERW>}HP`7kE z*NL@vMAvFKrk{+8vb?{i5yX63Xj!*R z;%e%8O^fE6T^$uQ z`lI#QD%Gv#hiUzid1sktb&icQ_W{ki(lyms8}}#dy@YHr*B=>w4!6uYPxUJhRt~CX z6ii1{+(NC|=Y%*UXG~*klBei%N^;Ufx!l&77Vq95wXG7j5=IY?S9H)0#FALTxzmV- zfNm~+!nOf1+nvVw#>z1n&~yh+0V0xgUcCxg=)b1EuIpI_i&|l~TO2%jN!iR^M0U3` zWl~+8w|ywABX-KC;kq{_{D)6I;$gR=Q#^7^FilIz;AR4d!Ny0Nh$<4Me|0X@7~7nh zj+Ry_<9ytFN@@mfKzC7aELD9z{9S^kH)W?$zS^|UEtL&b^Tc^^YsJXz&wR)4fZK z7I-2}s?MMMsStdloL7A!ZNZ0X)9>|b;Y%S|hRpWsDPNXO)4X?ePsi$<%C;+WS}wBV zZb_IVzd!;BK)4tO8qd%M}9NTImzle|g$>~l($TBtRnQH&92d}us zWO2HR1&p~rys%lIlGn!j&uls1@x#PkL7i7tPA*trUp8*S#Qa3>bdf%wG~}iW5#3MV zO8V{6+jSf7w?2J~LD6=HlkPlm@5cY_yqr-@sJR*_(jV4{dpPJi?`OlaR}@}w893wQ z3$F{pZsDPk-~F7~hP-HYQ1>;T zu}Q@UPF3f_ky)QfiJf7!Y)vyf#;HVNg9Rxrc&QAkt?KCy47C3GC>MTGGOk%*=X;)C zFT`fo{%Ec+)-d8(l)wcwyWQsv*lt`l*>WNri`?4|>vGZmSP)xu5*8xLPkkDv&Mp3w z&tbT%tMpT&p&->yoJNYr@Pf1M#X(ns?q0o70F5=Z__MRDAuLYI-})&cK$8AX((&`l zmR&-~3kG$=`U?i(@M7y&SO0Z}iB^}Np25i#`+aMuz|GwDZbZ zR7xkZfqB+-w)p95iP4rb#f{B5=C>+B>L=&Q^ag94!0d%CReBF#F;^$xJ8bSCW(0IIc_?B;coe<;qjT#8Lz$VOeK)iRV^K#Xh*nx;y^wccf=DU zTR9d=HD?z$u2=X4!<$1XEZ1W9)kdpqu?A0zhh7CCOeldmA7h0Z&(W$E!Og zEpnrW@L|mhxi#nVN`LEmAq}&*00oBWD|E^3_guxm!b8?0@kM56bvt6+b&tsk`2p36 zj#vj8h;D!IUrSgHKLTqBk02n53UG(-g>mJ|Lq}eH-;t zb#PemkiF89%3Qpq)sBbkn=PF1r`gI|oGdl@q_Kt8-HSJb+hjh|I06#N=va}$#_!+n zJ}ZJqo`62j&r68Ic9m0#BP{TpHH-$6ex5uR6P9?^J;v_2JWU6dRcIM8OOL2I?$?i_ zOUbO&aN9);S%{^elP18v269)7erVT5LmM2?0L+74#;}NBY89II0;3si!he(m%#u{l zC(G(W?mSfJvlPGrPs~ob0Eud!h}k-n^1pMh;L;(DTa8RBD?hBpA7_V9+h?j%;_8JZ z6e=m$Z3kQ{n`wFjf$Q}@*Tv6#Y$4@4o325THhc2XHTjIfuJ9?S5Cv&Ma-Hm%?uFRd zo_DBECpUf-Ix!Xp<#uFKnn%$@j5<}!bTb}h<=|3ZR2K4hD%+(S%x;pNbEK<&Dbrh}j;5}7yLrQIqD#MC% z5kJ8;Vg>5T+8%Q07XGMkTHD53)Rp6m7|@VLZ|}KeVZS)*KJRyshmR)9@2V*c@f4VJ zT+N^twuPpLxbENww5oG!T=Yje!=q}u7_p5}n9OYvQcgtVNOXi2w$5>+*?sbKj{6U) zWvC0oUgPniW>2azOmQIju8UHA1b5z1g0BLe4c51Br*>Fogm#l0tkd$VI*ptnmb zXa4feVcJ)vO=>nfC<#r?&F7Y^!9URtiO9yAEfQ_iMh(aE|DBHT)0${Ir%wopkjFY= z5{O4oQ2IgoWpF$$?(G=A&HVCTMO5QFfnb5PbWK+r2MzZ>C;sNNELT1@_7sGJYmnfb zHs-*xtNWOCQ!qYS*^{RmF!~J{zKEl{QrF||Os;EW`1)nQvED%ekYNYf|MO}gI8uBCl;Eyoc_GJSd$Af zS)8z}LFh30OdokDxHd4sF{MZ%L&6GE&KejQdiFIiN^Y`kPzF+ z2DZ;L(q!+FckJKpf3vt^(okZZoTY0>y z`+S~N+rgb6235}~Xp_M%#Bz6Q^J28_s1EPQo>XuucQy5gqNQb_>8HYrhq{(DHgC|| zI8i7csKXrK7F_-naF?2fK7L(6=8RIMRKwu~s>+o}it9L4B%bT?tzbatLBpRR0cS z+-Q_C*zQtwHk;&i{S!fZpLoYqy;;#9Xaik-~!EcL(Z`RZl)Zu*lcayyC2@Dtv2NQE-uruuDiwyldnAF-bgqZET0 zBJ(gOT66i$)TV`qvlKJq(cM}aEzQe!IV#^3i{^<9$?9!cw0X$G&pidCI)^&!^x{Z` zFmt9QP~Czv4$rrYTs$^?)5}3CgNV$TS2LtmBU*28tdTSR*mMTl36qLx!fN3fbPt$)?1u?NieNms1Q8q67+5B8?7=CLkzC&=aPE(ycmz5@I;R`7_v)0_ zyaW&R&rPZYryf&_&UsS@ezby9W2m$h(|tauBb zcNQY@2$Bc$EuRh5x)_enWHf%mlspZ4_O6pmX8XF^*dn5D(~2)XtMzV_tU4cddabMB z^N7FZZfyR-bW2!xOW~g6R*}{LwzK zXsi$+^@*4K5`KQc`XYIdS`w_ZFnMS)qMdf<~ zF0yQf@Gf$95(STPKDdMUvh8*|3L~a{wC43>_K)kuX8?a+31nVsqw)SRUPoIa+~Dq! z>2rf~Xnv9t3rxx4Ie0|x=gcv+5n2CGA*OH4U-$I~2G#io(>pI6>Fd8xoU@~orDudX zze4>HJ(BMH@^9K0&99F{e`03wCE$c*;jJ_KkUBD0C9EKpmY{v=c%qFNpCQ>6>#3k( zJ*)Fp?;FYJ#7E18pZlKcSx1F}?!?4r>X8)-h^|rFHmjgS#cSK1ikJz#Ft}AZ704aI zfii+dHJ9^}uNUf4JPM29v^V+k51+|5o2$0(_E9MgmAr#J$L}SSb-b)S7+b=dwMU{y z{B)4HBOd+TKVapC@D4Qvj1)hnDTUoPqD;7t7n8f=){o%pydKV$+*j~m-P%1U*q8N^ zHqcWLQ#hK(wEnRMad?2bd2PcOlF_UTCu);&@LaQOVe_lYc%mP+Lal<5Z<9!!3|)_{%7)*A;s6W(<;{NdBmBCC3xYwqW@}wimHJqvkX7 z3xAH%x4S2`58d{AOyBM@zH;G}J&_!ZRgxITIW)us=uyY$?Jb~8`>3A$&_ro@=68h@ z!*8mj|1AOKU4Ufm;#vK{>FjA*I%fy#lyYe-6tw#Ul)GdZLRY+=f5|W zffe8#*`DEF4W)%o6u2 zz%k?*x#!GY;ht4*RX-8pq!&6u0UD$`-*#^5I`W>-zeb)f_ugeN#j-eF%!}bxUR(T9 zzg!UCGQys&yHOVqi@AJfzK&k9<2rZml^wf6QcWU{%PdMa`6o+z;>;mFmI5e+I1(oh z#3m6uw%kYX0X?ZcXUB-Pf+m?1Kk2?~vgN#B`)UN^T=|e^m9-*VdEI{v4s#@^d(M}+ z<)ALFEQdHRO{xd~k@I2PA~(F}+rE;$h7VJjW~i5<6>2DiVCr=B@wRrA)RG$W@EttQ zGaH^-wTS$96*#=`-uSK6tg%SGwN_Nv2QPi<*t_-?s5U%^j3|4nSS6fjjUfyb01*380QLuNELbW`_?na^d)=*rHhTt5jTZXF}JrI zD8CRpMjjl^UrYa<81!Y*wd4KN2V5bwy01^Km{;MAsLobm{e`lYcW-zlZt->1zW36a z$O-Azy%Hg&U(Ee1adY0-_=w|t*k$9KnE;R4TmMoq4N15KR@HoeB#^^ zqDf&QiX0Iy|2u(JiSy;FzA>U^=IfB{%X#ylK_gc$dNR%8daiK*#v(i{;vHUdVwMgu zujI~@CB3icgGs3u^E#bsxqvc;JR#5H_-E5tDe>Dc)2Q`(@Z0S zzG#{+g?SKFZwuN6)S}i?4o%lx4`!7txfaC49r3vM8(#BlkF$2}pgi``>?XwJm#j$twXBBp=!`;iySt1JSCIe?hNdU}j4d})X` zdG=^lM>9kDGRlB8$>Ge-kvF8B>AQGDARL7k*iJyhXiK+iL4TY?Jz(NkCfau( ztz2l^Won8_Or@Z+87y8=I2_&zq1EKyyQ!2;IwM$ugn%78bAG5OZQWU>4@is}e#bQ{ z3ocL2riJHouJb3K8Je7}tji!ydr6g5`kz`XHg7}hGIoAMrW+ul$b%n2(6X-H-ATxf zomDBi8qZwm6CR&my&zv{&(VXdjfpdlnP&|KPu-j!bHE|3-Z1z=uQ7tU&A^dE0_4Mcz{gm^rey6>qXw&H`#y z^?^=&RfWD6(QW=~1K?lQBZ`qhKMnH%PYhz)9HrhawaLik7?A3<7EA7k<$pMas1cC# z5;5q()duXmLQ8J0m;Km$2$c%Da_;+7R$1it{{ZQcW<*Mj^JYjC1mRXQEfU)z^t|z& z&jPw)SiW0!Nla&}MnR5^%Q(GnZ>kP=MNY`9Z*{{rDua%w>_N%liZs99^SlpwjUJ^= zjE0X=nT%z6YHF4m!Rg%K-R5(D-=29g}4)>@VlP4_=wE~-qD^S(-2=h*pMZ#&1X$jO;yD3*puqN=~yzwRP*4 z>XAi%0_;k>Zl2$FcaztOZ>+iEVjM+k6gjIk*@6y-PbRk(BC2rKxSR9Uv5j}v<{3v=RoI<3~LAJY`xNZMTtgTmN5gD>{ zCKn19zdjuWiHB0KE|$pYCC(aJL5EkfK2-!!qD&R{T<-(5FY;(iOV zO_C`XYsF=I5?=VpYuT=2T_L^KJng9MLqz5Z0g@4_TO+LiPeg^pbUK%>oobpIHQbQP z2U=;KCiKgF;}QU}0}S+7e<>)`VOpivMO1g4waa|vG`eO;DTi}pA5{E8V8D^uI-+^P zlr@o}?M_A&Qe8f=zo6jtyN_p7M6cvq;8nQBLZK)cd@qi#>5ct9NrR48juDl2WsXap z@#UZ#^eGbJpWu`m)aC7Vj~$=6ScFufbS6ya5JMn05mZy76nAKAQ z!#h@+Q9zgfgq$+4ba)#pb6+7#!D`SkDF3T@KK?(5N2=`L@X&zY9uSm3mG41Tc&gHc z%6GZ}Pt*r4``F#Z$RT6CxDtp^in|c;WZmDJO$0YF|RA!$UkExRA)H8nowX1mKZA zBI7n5ZlDKVI2{7HxX$Ty@-qVC^P@4qDU%S^T+PjqyFMP|=bzCd$Z#)k?3_bb?Efp~ z0pVXxM%pQKXegdv&03m!LHRf^uae9}5a=Kn3C|Lr!EfK|JSMWe`UJu_c7w&la><-B z&Za&3k6tUpW$O<$DX~A4$X347z2)X8G%jBnq-jvOs)LG15W z>dp`NKL^CLFE16%6+xv6THR6ZTxUGE-bKw;9P9tVXZyKhV8#=j*a_*zXY~hyq%}I!(-S&0*@4e$t!_)kh7Q#+MA^*) z(#&VKa}C!S$DMye^CQJSEjx)df(DvSkWe@y#g7`|bzy_*kLo)jFQ7(jr$Q&ra=%b- zR*Np%E#6~OJuKUf8+4lrtsk^}@x`*BpAe*mckkb^qQ~u;7-Sx?JH}Dts)Z&=j?hYD z;Hg6ouTq$Aq}W_ew#CJ!--`Cu1R1K6xr?rYb-X@?bJA34=}Qj9!=LJXp`1QB_O2nI zPpNWMnN07H01VrRCO}q{{@eS_a`qbHHhnFFr72 zOvJ5%mWER%TzGJ^u3KIUsPI#b2}-5jXl4%T(LeiFd^Wv|-k(0XB1kqc}dRs3_$eZykjlQYkvT0`us)~4QBAqtRipVN|@g6*|+0n2XE@%%v zc4uJF25{KOL?OI`~YXugQC>Ycewa05lcxEQJ+Ov?M?E z7mvG6Kx$_+PIX8@DQHlmYwRTcY5DUx20jLCzir})$b};^!y;_W-ET1z1;MQRPyF0; zJEZU*N=NQ?3i1}pk!^pCEc@AyuS{DfbkWk=Nqs18Vjg%YKrgYzdx^WZ6`A-Tp0zQA zvEJ=(mh>NbR4`-EO1=^yg8uvi`2tenZIZ~mssQy}{Qop(@LOH8U4lC$8^8(zaGgT! zqWhjPM4;C?kY!OY)Cp3BXPBgkiue?y@BcM&V|0#v#o^F9a4V}14rl3wMu+bfLhDYs zwzd^wr{qhF{&qpQA;$()oTVO_$;rR5Y{}w$=&=*m8)LdXeEOdufvNqR;ZZ+TGAI0g zSJYS#+&ac@mFG8z{ zj6|faiUfkwdN#vCF}&lG$fX=^)r@+<65H+m7qT$q3HkY^NDieO5B#+~IpO~*^LD%< zv85k{`_mmFDaceJT5EQ|@p+k5kKm^9KPo!z@}#l)zq=E{j2R#Yvi;%5DA1JXTHeoD z6Ct_-GPQr*)7FpSk=jK$NGUQWWJ;WW&t@)LgQ^j5unu}0AT~Xd#wQ_Eiq<352wRJa z5?jaft!jL-i z;FhJWyr7kEMr?a9CIbQa7_FP4$U|s#Gw{kUk4i8G_Vzvh-x-Ra9?=zo>#PR*3OSy34pb#vKLV`i;(P996irftXK#SUFf4B^Zyjk7q}(xE&hm*(M3Guq(IAkWs zolT!%AcK#X#4$x;$8@3pC;+=|s~6nLYq=zq3MATqax0bv6Zo12?6V{8ONSsS%WvzO zql2}tid&+b(vMb2N_Ad?e;#5TXu+>P znTsX^1twSW@d^B?WAYbrIk7&RC?yLxpIKRRh`2s6pOx>N*_f?!F?VQTl24H(^xAEl zH!iOw3;Dq#P{xJ2x*P8~uJD#u0x#01vUA}Oy}nGEQ=xCSuk?G%)z!h}*@dicr1rJy zgWH-7@Mw@aJNK6&_*#&8z zv#x|WL6+0f}SbCY7&4!wY z@*8nu^}{niE;BEcA4rSYal1DkOzfI3kg{(pV@)kuA1N`Kd|Xk=W;*;Jxtq7I{tW~F z7*J*OjY215lAdICIA>Fib2fJ2b23!Z3?%6TFsZb@#D+HA!$vWse-i96Y_tIy9uO{T zEw2Z$b@=UBd^GZ9HEHs2&QdBZ&y^isD7lj8DzJ_yi)$W^+R|`+N;2FV;YENKv3#u# z>aaNc5Hd9&W+@o61-I^MN0iCl!FhYgkic>qk@ru*IMMhL;TP0|sj(8PF4k*d4pE9mF+0XMa;9ycYwvxl*j|6ymJ3J?BTf;O637g^!^^Bvd^`qa?@BHN8RlW3u zsmf^<%REASR+6rZ7g>N?8gY#zgr!zbc~q){1S% z-DGL-i2}cCnYfFd;|tTCD!O|LHZGNc`183MhA=ml6=s0ef?^lsmfSOq0&otjB?6+p z1iV25PH0M2m@6mbD20WqcK&Yw>#47E{(1AVd7b;Qy3pIUX@Ma)FEZIO(5X9SN0MZ? z4Ae~2%Jfmu#>N@_dZjE{C%1QjJ+Ze;HTtYEeH#7EKe$HvN6^!|7cDZ%Wz~@yw5y1q zL1S51J|(d+3_)$ceW79)K2cGQe&EFV(h(8iz~nYyj~$w+sEnfek>wR~_c?JnFwUOr zV9k1MlJLA8Fqo7hHaj^P4E>n{t=Cv8>z!Z08~d11)*bFy{lb;;j5S}^aWrG&G(+a6 zed%>84pB$N{%;Q@4jIR)k%LWl4;L_{R=S5)aA@h8tl3C?HWMCG;7Wo@4>393V zmD3S+Uwb)^!!yhVAPnLeVDgt{kc4&%9tR||v{(E0T>;rSF z#5;_oG=F%q?s`E1**L5N-+1bX+oH+GBUVALkx3Cv67m69qyVodS1$9&OzWx7ISQ6L zjb&Jh+F>%Pf@PD!9 zgc&$4lIq?5y@>0hCtZcx!b-T4Av(ASMPuTpn%N5YKWJ0Wn*8z6C~q4g?#LXq`ZUt1 zg8zR3M*ZL}1KFh7@GK3Zb6O7t6FIj&Vk{Ir)nDZ;eAWpP) z&ZV&3sE8^_16DH=QjTYl&JmNQs2ar6+ccGdcaC_%byvV}{a*CwKT#d)OezVkn5GTZ zb>5DV1mXaQD*sRHprlcn?4=4xI*|TgDTtN-SB2t!)92lg^93jaa3X-&-+1HRdS$M; znxsq!5NZz+hQkwn>Ex_U6zAR7hm-cqyAd4#U^H>bwlUC44tL{Vh0m_U4!e7C`eo&6 z-0o|l!pNnQ^_id~71pdZ^X+yiboDA%>T$nNIYu|QF;j=kS2U2Xo7a!+*RQWc#zv9W z`puM-&)K%1cZFA<-8Va@q=~@&;C2z5F#Qpe6YKLsq~peN&=2QwnO0K+nRr})W^DSH zkC*DSUey|0MEL)cQn=PghrkfS6G>*;8ezb-LLH8Jf4jF6W^=Zhm|NH<`+)TTM$yAC z><6S(Ayj5JZ0VaOuR};)A033V#rRI<3-A8@46;gq5!R^k&?ZYi(_-SPMb-M=n)6T8 zw0w1iSmqQex7)3d z&l;RLzPC51W>Uo{XaX58p(tm_^pm$*E3wL9Qdwh`>~^0u=9s5(9Q@xy|;+fRhi!?;ON2 zY*tc#^McY5aXTOexMsB0MF%{1!`yR2C|8`mu-);sXiR0gj~;cvdAvt3_CaQe#u`re zY|se^WK*=<$8AB&7jAU)&t~93CqmOV#mKIl!hDI6@hhQ#gJ+sA@I}aM83!J-k`fkO z8TK4=z>iGDhh`3glU+C5LQRkXp!M=QFlUDO`I)A5N6=9xIZQ}u*hiMh%h`I(vsM5}c9jcC*3J&j zmk`bZ-#7P7e?=f+LdB@|&bZ&caDWiAo&n$Na6h)p$aeN_7*8^wPK4pIAplKL<3-f) zC?Q0(KTY4y5(Ru#PhK%O>#tEREAIl%fi@ca3`a7;OSyybU4*m-Y)z-_QUeaa^l<6i z_q>uEKDSoW&RHo{?&un9GJPEO4DrS9Nt@@w5?a-F45$bNKOU#&E_E&a_8W`S%~L#p zFlOo#5C6hR>p56ovc$eH@>*752Kx5}dsDgQJg^)CuCSQ3)A;oFy+8B($b*f^t^f4v z(K|esnJs(}(759K{H{bVE~0L5JAjL|eR?g^Z90 zE2o1Gd%8h8ylgrS6hjyVTA}XqW!WgF9$HOr*Mb*T<&=v(F3fphuX?%tZ2D)CPrv0j@tL>#<94NlSedg%JiFd-ZoRUT@nhh7D0?+W+~a6-^1+GQ^(UhLV!ZAi z%X;m>h81sl>cBj?{2^qla^K_lcAjmUMbADIRg7z_b*Mi7;AblhhNn0JIIx;fa9NYt z(Yb~fAwk<~mfr6}n>q8`ZDuMXMPu!|s%A#m`@GJ7XQoJneM?JOQU--VE5d_AqJ?Ri zF}ks`w=8aDcif~k=ThYS{3qw6vc-IU@bSM-(XCV$u>i0jpoz*EbAD4-*4F_1H`?%R zrJrWJ*kPzD+giasL&aa<-keL8X&)~>i^pmu^RckOw|t;TSRb@i68}Ccb#V9^j~N?E z>+&Q2T3OfGU$@8oNX7S;O~ht9dhXW0t33a#t7l?tpIu#`u!5P=(lYbU1*n0Z`6iyW z8r0S@y1++CcQoKT{(2>1z^$FZ6jfN=7lLE1(5IK!T->qR1Anq(?Q2(OG{ryd-lndq z%CKx=a#T;4{m_7zJlILoGtb6tc@2iB!Ao0O(VSAy<&Hel1lZo@OFA>73IOr-UsAIZ zaIBfqQ>sMUenKkHB$V_UJ{$f% zQSGUkKAP?%+8%UE3Fmf^&GCP47yeM8`OF=TV?N75MWQhbrh?w~Pu2r7TQnzzvR@d@ zA|rvK=es*rUU_OFB;^?;y25Z`6joEpwhcJK~&}2YG1wE@?xp{9EEAH?5k2+x4N{tSyN3L^_SE$1DMivy6L)m z9`DrM?`Af5re5fK#^y4CsAXF_>Nh%z5|OV7!36%ddx+t;g%Xet2$G8l*Py-sT53@N-a{dFK zCBg@B4I-nb2RY+!y=`^d&!XbBsBgak2VI1>=72NIazOoYg;3(ByI_~pBkJIGrR$g^ zn&s5V0d8dW!fQvi4zliQ)XSw4Hqgk}cepud%;wZ&I>G4RqX3O562z&JS^Oy-N^8vA zJHZQUh2e?)f=7cgQZ`5Tw^ftx@MYdJkt&rQ96rG5&2^UsuJ?C?QDbyQbkF;QFEf0M zl{(Bo_heszOA>oIZK+1YxDG7AkHHdL(tGA~lU=dB3t`x$^43Z$xLjpnzIFU-lV3iS zSJO|QPEAWoxUHJ^01({M;^MNhtHG_5IX|JGBUE#mfB4HJbtn8Gf7{+R% zbKx83TQ~4!9u(8cQj7nOt1l0Pa&7+~N{S*K?Us;GqSNWMl&#Q0A(E7}QYrhs&$Nis zLRp$q){2y}XJ1CtnUGYnHP)DvP$^ z&q=~Rm6i{f!TGTY4!H+U+lx_8HPI6mxFBY(jZavH5U=A~kq^b;sJUp~(;Jmth!5WH z$#~`l(pp#m6LliT*h}K1(X)^35~Agqls%(iDCwgzk3#MGWSmQ1CzOE&LpPb@dOzM8 zU!uv^ow3EHdt{Us5YC37#&2~F2yT8c8p{&d8+0}cM-34u8SJI=Mc5O&H zpcT^Vua=P1HORGJrid5I6Yjcan9~+4nJ27eXEQUHVPy(BNUGU(QTfnPq$($(TM=l{ zR$KtSV<+pdWlWJm@9Y|M&&z#3*42?_C7=vlsTHj;!zo<kH*oPYH#n-o{*Ezd^qP46*EX|j^407H8$Yd6o0 z{{*VB-BkIgLq5|JE#sx>)zpawB7YOQKw@HR*RJJyO@uI}ECT)#c-gVitSaH9b{8v5 zguJ=S9ZW@&tl#AUKX#6;4QDbBw1`QOD-X}ZzLJ7Z1#k}1U?|XAI&d>rnVFCjE@0;S zk)j&e`nu>4*5CH1Wp`BnVhCO1ZhLsS^~Y2ZWNqI3Szp;j^u_Dh6&bt9vi$fxEaU@i z0ymmyja3giq3VN|si)T74qe4MPG4_4HAv?k=Lo~s+1ky${uxZ__GmX>xl16fVQcte zfjH^?@Y$032UXVGWa?Cz3Uxfmb!Ff@cY@qr*!S^rcF&gOpqZu{4H$b>;$UL%&ifLU z|BOFb95-^#p5?7!prqVQ(+-vt`&p;J9hbIh$~mr^G?4sfgt(EEh`m;mwlMY?;rVYa zYT-9Kcl$c)p(H=;c7LzmlGJxEwuxSyKzl-d@q*6@@T5l_of4}4Xt+;pXS11~b1>`= z(^NyVxaC{;YJxF)ky0j}OQ0neT5cKHv43D$px2Gmvfn0a0At5C6 z@z0Jo=VrN+vZwt?AudA@k5{ZQ3yXMZ3sRIBR!!q>?5Y&~_%zNCo4@Y-`JS2WHM45; z10aoumqH?qq4B|6=H3;Db2ILRNo%3G*$AZ)F|_8dzf#v}SXrehG6AGclZ?^~criVA zFV92vW@4-Q*G-@?W~`M*yE*)UAX_SBn6;VK)9~hUM6B_n*4<<3Yh13@bu4<^gOb(j zDcc-^ZKgp0^lotEnAUqO4NxB@d^#xeA>UZ023h0m zY;ht{6|#(`Y`g9RD9UK{*hE=bGl)=(y3;>+{LO@!`pCT^_M5M4KZbSh!(k~adw}#` zQt%v^ubj(gY4>iwl(tnNrr2wX&cCT%eEOmb5JQBXu&Sv&rCjeBBEvY~0jN>^NRXap z_$}!N4m(+YqVM@|FbV#WlT2cwHPdYM=y7NMAf~X5WBI`T4YNDeJdF%IRP(`US@1b8 zYp4%A?zAxYTt*JvGg@6lYir0~>=S*K)ITMx9z&bDN$2l)ftV`geZBWc@1TYg;u&*$vc3o15_UtC%4! zrfT>T9@h5;ANIJ0U7l&LU*%U=733ga8rDG5`{xcU)AWwy0t;6An>)yy98q)P2D)3DKEiS79Qi-mq%=nAmZ5YpLjdVbow>{ zm`fBPlvb;g#u>ZtYfK9b!A>9<&ir{R5gIq}$ye$_PCy{N8;Q9ypD%qFIJY0YCzk2( zk!2+hZX`rN&@UlOLl8`_S^Axyb!Z*p)xBBVD!3IOM5oz3#Or86x1x7<-$3W_gtc}? zk2muvWowQS?40A|wYCn_S{50LR6BLF?ip2SeML^+?AUampi9)g^#Cdo?+`2e2)T9X zzI?JDTl(3jYu@GZ)=j!=n(Rw1pM1jMQU>LdU}UEx&hoD$6Vi=#fsuN*wPv$4QL*w| znUQ;LZQ@@WSBHeYNWEx%r59+~)X-Txs3{*0*ctv-V4wRrEh+9P1A~I8v-gA7X!tpk z{+AHy57K7q5=pn(v;NST8o+W#(sJiy&S(341eeH<^gdC`+Q7SUsmmuKg7hX}#lqId3M=BfBP_PAF{R zbPj^a2)`qYH}M#|>r$&-*eO%^_uA8?HGXy5Ki99yyt=i%GDUk$YoWDGSjDm}_V+e# zJ{gRPrt1Y2Dg91|+=y52GXJtC%H0a`q5TD0^Y=T}&4!p^Xb^4iWlz3M3YYgNs=|rMd|kFmD#}*|3}xxNa|!+0PjFrUjZ6fIVJhUqRVo5R z(3^AE5vie#R#&Km+ZoJtM4lUe8@1@&)m%wFTy-s6vlq&JnZ-qyH%{KUDyr-Imte@J zu)QblfOK!?HL_<(@ks{z<7Bmdv{T@$w{rA(3SB8Y*8BNwxX{~V&w;g$^}HkQ*BFX7 zT_ay=FLA?l|SOP&SQQ>CR}lUsF_iB?@M9g-G|D*O1VO4wzi|%SDa7rG_*39);CnkE zU0YjveiKPtxk_rcjy9(OG`kDGpsc-FT*t)TpR-FlTUd6m1q(?_N=hb)&j;b&phXDv zCY-`$rrGsWNOxSm`#c>98sF};82tc|#?w`_`iuFI6XY4a*Ci70D@g_i;);>I#GYCL2TOsk?NFm2@iMrXn>UMpl22LsoBGA%^ z)LpQbuOCLx`~DmdnUW0Z&D(sry&(fF&ywg%6#93sAedZmFZiFNlMLwab_(p&`sFY! zSN?+7FKX9)y|f`eNm~u=rg&~sC^t70Cs8VngUOc1cfQzjzangnm}gKB8nkh=LbFlZ zV*Lc0vsNrxT0Cs{gOd;ifV&*2@xd)hqxVflwsx67{gn26khQ%_M=AAPV_I*;9YykH zx~lo6saVt5L0wtDSN*hyXQIh1F;cpN?_)jwdky|#~-BfX@FJ4$= zzM%_5lQ|%JPoNxZl0&rv7)uWS6Q?epm}q>c_F%_Vc{Bb(nB;Vknk@U(ygFhVY>2^- zG+9)8sqoCR2Jmim6wR<;7-vNhBa)!}x0msZxk(_lnYy)Zf^)n>D#K`J(<{Md=>~Fw zgO_s=d&Fps5dLg3$wTgbacsjkkZm5QT7`S-t`#=1>-5_9&mRD;5E~iU+)SETmnXa@ z|87kLi0)Frof#fIyX<@s|GmDYz>ey13tJnFgV(?QsRM6c^YM? z{WHxsr(fc7-M~aaWMph?EEjBJjK#tG7ma9FUe2C^G4_G489#r!I(X@)58l-LbQ0NH zU>(&DJWib13ZyQO#J~o+9=qz{rQ)}@N{6unrVRAf6Hq(gtf7JLqWVKZd!F1GJN+|s z59_eg2Y>=f&j%ca-ZIF2yBN(@(No^%3r60{`t2QI|M00w))}BG-cZkAC$dhYjY7{F zXZpv~8R*j9#?g2cv0}-fsvOL|hG^hg%ySib%BTLXZ{EBi8EDP+%gvpZYR3zOMq^BqDq^7;tcKOVC4DQ1Dx1l0aRS!B^$KpM9lC1LYw0t}@S^Ff>D7Y3=&=}~@y(vN&BG6h%JT*P zHCBN0!?cf~<+P%n-N=7XbjWZX8=nam-C9>h%dc9ILp-a?ucxB)f(XW4^zxHUF6Epy zX<0v+w`W%9VP_;0U*6psF!RItb-&;%aHS3@J?_XjVKGiXWS~Oqp^-nciTbfc&wbbu zWq|y${}5?7J2q{Nq7DZf6t?a_lzc60x1O}g8 z^FrxZVi!4~`izhkQSG0^*6mI!-WF|yz_b{ZvkOEPJ6uY6$W zkQaM0Yzh?S+b{Ks5-#orowJyIcN7#9xU2Jo4Wt0A9d~7{_X-%MYr5J$Z!uMXKkm$c z8C1UdfIga*aT^TvDeh(S?AkeTlR@Cf^>53&+iKXZJnAc+F%q^sz0XM^0Tz)Jg z&&BT>fOqwn{4sIF-w{Rm&7IBYU)a~w-H%gok$?HtRic4u9#c(mI=?YstMSw53NR9S zS)2Ltbrnywuk3!-IzOEy#` zeKx_6JaP3+!zWizCeAxwtvgj1^PjZa^2_2pZl{{s(T`~4wW=J_%hLg)QJS%7 z4@F-6=%;E9wOyKF&L`hNGIvvYF{zd|LkGb&>*0Mj{a2K^v0{lzw-Wc&QOw4j43nil zhtc7lJ$=V{*1DZM?&#QdsfhL12S4c=th>#1hh}BAdCW?G=YZLoBXPsNb-zlsr7o}j z_u-PbSj>+0FH@2Zp8WN&rgkK@=<&-Wy0OsNC+xv0Kr`K?`hzr~#(qeKnrY1DU?<1A zJq5BPW{XArcDgAbU*pZs2Sr_h|IJ%ueZ{4Otv^cysMOhIdhr3B{FAxo3T((QnH9Gw z2d=S%h}_BT)tsr8jN7%m zzC69xRcQMxb4VctBs*VIo6(AFSPSu8sC;CiNi(_Fo_lI~I$GVaDf3!PtawybX#Uh2 zFQ+(VpM;|5$Az}`c42udV(~V&Ke%S0>G7QwasLdehUjITW%nTy+Oy{u!E5Zi40U3< z&4`=)^yB2P>{kzWQo;`wTUC}REftmm1ldhZ-f;AqL&HcYe~RQpBuBUK3`lsHbiId0 zh?FDA^8yU3p>_V{;q5FrhCu^jVknD>{E=fkw@JMLS#90A)`m6m)o6Gu%nx9RSC^l# zIv(pQ@A2ZrHb?FYK|Wln7N{+ddZM4UMZY}5H+&?cKr_tsOOBq%l`bA``gf#l?~NFx zQ#(cu7RR1E#BW;XB*Z$_=oefy!-P(G7k|xc!Nre9MA>dNex(Y6Wwt>!5y6TLoC6bbl58P;$eekU$A>!V?0vWt zh6vV1+Gx{TS<1tvK{Klk<~;eOu@k{%?ODd3cE6!R>4dkcXCY1o;dW0;i^oUbv$pZ$ z$mFv8FE6$=WC5nc0!v(f293P*=5y+0K4@(G8A|TIA=QH@4jaWx&OPoEcx}G*)Q+4q zz68PI6^`-py|Mhk@ynQLxy?7>v~-8}s-&R6QF_r8k}HW(+yhizR~EP1uKUK~&r*r> zu`rR)@?Rw){#N(cP3_VH=91E(Fr^{lX!7W!WW367*^0;a+>$))ohv7P!SX47U#3hM z@7b7{PEQdXpP`0{aHblLyLO=-fs6oBk9q2}8LO!{UIkyK%jtMFvE9SNJit#U1uy0~ z$wwUs${j`d)_#M;#2H$XSj)1l(!m;fx^(224X zTB%CJbG1$vOK(ru)dmMOId~0IfgS;`RM1(Ft6NT)fFP(V$l%~_L&iR(Tr_g?QOsfn zj*y?5{Xz}pvINSZzUICUp1Ut69C|nb5?gk2(*dd*OG%#3OcA-L^8}6>P}s%o~l?BY4|#a zDq`DxqOR4F*w(r|UfAbJE-fFNysU~Y2L<8t4Ry5R5wl> zK((-Y3b`B|_u3W;?dML{1C|t1dty-E%{c2C!(Z{}0Kh*EEY@HgYAnS*5f!OrScdw9WF~pV9V}oE93nFG_ z`+rUj0c!Zgq(KUolPO+Oh0+lu(oAz^+|Pa=&m4r)Xdwdmw9{*qmko}=_6IwFUNN@_ z2?>pABXWx%ApxXpGFQBH5eVP-lqd8HG1-D%6(@UNr8(peM!+A3VJz#7l*3)?W&QPb z6rZevYfiay()!9@KpJw@yU^sFPOuTmPTxey%eU?RkmpJ~P(`|4c_Ch|-T!_tK3|n- zESO%j!m+LDzDY**|vUOQB0)!H}Z($v-XC#^2{pv=qJJ zmY#Zvu9~3xqSsFgU;6M&@T}Jn`pXThoZ`#uXb6?Q*V8B3+b#uhM!yVEINrOD^i7NJ z>t1rJhf!SY*bjfmr}Ptx(i}G9RRy`Ed_@mm=P4n@XmU>Sel#xHejxeTa&v;;4h{VJ zJmGaufal~6gfHQ3s%v_RrO#x;@cPufpEH+OSp;afHnHy&MS#5(1PNVQs6C5RclJ|o zq-MuKN{7!h-3qBD;Lk2w1EX<^ewD=RE0f6ZVW%X|QZYVqj;?`Lci92~g0S_FWBJ7N zj_$R>I6bM>_|efDH9xnC>;-%>8BTr#h++X?XM1q{rbCVsCK-w3XW*!M9cR25e2Ra^ zT!*m_U;WxAO|#nCaIyAhPDkVP9hVJiYx-MaN*WSL!y?ox4t#OAx|NX+G^ntp<=CDKAIZPB# z7-ac-dV1dV-$LLZR5ZF>ASk%9n{>e7MH}hlfqzv}%TIE|#l#>6Y9kG#$U<|F>Yx~Y zxg;QqL7|}&G`Al{>n2jX?AJQMxBx#in?nRRq`!hho7Y1ld5-~b-RyPWvP@Z|H>W>F zMWSSbb>CY_NSN@Zas@2jelRv zosnTF#~pz7zBN5C@&i1BC>rNA>HOMFcRnPow&dSZop9f*~#0% zY9(0b^mr#$WQm)p+-UTi_ym-OlUa?Pn$0l!7N$R%mv`1{zs)P1aROpw{U5WF-{ns2 zUC&{jf+`9%`83%tzp%1rwy08?Xi4W)+*>IhPP3EyXyQi3dox@Q<}2?lecYqt3Zg*% zB_YVPoJ^+4h1Xn!`sd7XnLpk-1e2y99;?q{($0~r*=b)%AD+MMUp>P7hycI?ubDq* z9Ma83*Fv1GAidPm>zIxajYt+6k}A8QB^5AhF5(nyS6e%&nB`qevm?~T)?`709#m$u zZizb_aiju~A#W<+QrN^eooNrf1_I(N>ev{GyNBhQD~dt(r}3}8(6D$dGD7&+6L!bY zkbPK16Kj36J=FKUgM5Baq-N=T@nrCGO=^Q}6VEeXJm|oWwhT5$C3lw1){Su88JNUB zn&aKHZz7EoD)d5FY3^QCSG|Ce_{G(pUipDNYg%@uxOdyfi#P~-T=>Mt(VatASi3rj zkAFz20Tfe^y;vm4m+F|8ty~P=)vZ`=&CNzno2O4+svM>n;0UiAyhCa}K9%5zxP5y< zEBjTUSy{zC|H`S?W7md+{!-Lg>a`vTe@z52p7qvVEcBXqRB}HI?`?DHvLBA%H9gq5 z2uCIEBYGwW$HW9|ys^!{cW4aFyD_l&_uEV6OP~+;yMBM(`ejLq`K0s4O+g#48}EHY zlg2+_$NV%YGVYeQO_ABLq_~wboN_V6E$s3=?ZmPb+x}&(#y`{+P~4cHt5+9RIkva( z-*fC7j=VrD#wl_5oS8WyHk$%36(A)BVpiKRD1X(mebDr zz>mLPyH<23zN}{g4C($(&McI@-pk6C*&0tJ{+Nhv!z54>W{IlVNu;0nw6afO7omHp zjj4ZNwTTJ4QhYhLx?e{6P7Nit?%engeE6u9=JVA#w(;dXI7Ko?dtvNBkm-B-i4%rWJ&RXo;R>BmNK8EBy76@Rj2dfmoHf1eh~mIl ze+3?Anc#ZWQKXx6m*1=5$akgixO&D;frKKymIi8}YDw6!>DfYy`bNLH@|PT$b)BaU z)jT7PH*PGx$e~OBh}ybg^l-G5bJD3Xhi`eA>6^aOTz4=JN}FCxanYyVK2l=ybi5>u z^I2uI!7gyN#x;#jRZSqLH)!nHv!_8@oXzyBOkkX+v^DXGms+XL6xva zDDy2nueC)oNK?N)>z!7LK zM5FWzN;ThidX#7~E@3CXG=pENCZIwy>Oxr$!WKGp>b0vsUX1GxZ#!vu_0j>^@CRO9 zFb*Nx?DL+MRthC!GwXb0uI*T`do8OvDV`d3%((B1zBMVsH{17|(&ub}tV>|5LZl9C zJRG(yE5jw@fIOYpeZ3&i43M~O6yFX?u4cqD9_R^$=d4yDu!K-G1x;X@*u^?b9ta|g z&Gw8mREClAc?P9{xUUb(pFK5!Mc>Sx;ntn~xF;_$F=R*D7ZdnF&0Es{iNn z49uH{h@lBVJzPmKPtTtzDR!Iyp(=abBR#4~v(|651wxy{v_iSR#y_!EUnAW|!CeQy z1$f*4)aF(nfynmkDr}yTCBwNyRnSHd=arYOrHuFkccLdIJ%{|%$=qR^q7-y)ovG;fKduh&C;hnMcLDWXe2` ziW@RydhZoI{Cbi>a5=>Po_&0eB(M=8ql+^9XLZ_YNYa&dV5}meQS8+m`Q_o_43@I3 zY9W2XiNkeszxt_^wMgq}@vDJgMs$fh6eX_y3b6d6NcSM|Idr4F*A~zdfl16sVQkCS zDlUp=Cv$&Hl#!E80pqAi`YRjSb={@T%pzNIdhL$N+QjHp0ZZK6cdV-jc{`G_?@iMC zHgKw8o3-7r*#B#j{#()2k4RZBs}2(78WU6wJoqciG@w}ATx$7yV5Rd?38*lz=$!@g zJ{9L-q2xh!Ja}0EM&ZNjJTuL8NP@n;Q&^~O6O5E0F@nyu-z4!~0)_py$VO1Rd>`+i>u(nIi z+9qMHfsl8KYqq*b?L=>xgV(ua(pk&0K~?pnJy{zU$B7zQ79SvRjQ73~-+9O?@!0{#8`Wy*SZa*AjSn#ypkzgw^m%N*muC~s8@X=q+D7#>lI$T&7B=EeRt3}p6Q1V(=IVl3Yd*K z^d?1hsiO4tXhvDpB8?47A4)Ka1iJ3^+7_!@XcPWbVnOyYZ&{&SeNZBo-aMe4&{!4t zt^xEMQo5;J7?RFB^L|-H&xKvPqbJWS)eH}_7@Z{A!{9y7iCv*u+Eq`Qe}!(i-K64W zU$pZed+H_Q1ct<`C?{o)gz8F3+z@B#U;v)%lB?rHF)`XijvdPIv9(n({3=f54J!Ts zoaSJ^TXG~NA`@~fk=t0nagD4H3FvJ3yTJqoFy1t?Rm|*$!X7Y+;ct)109@%L`fBM> ziR*NFfc6gJKIf6xeGTp;9RZ_~1Ujdb*$80&H>qy{pM@OLMDuOW9WA2gw{GoFo&1-F z=$|rTH;*@}_A)D@OlkSW4+mV^$1kMQ)CE>HPkii#gVdF*^e&`!!n>nVJx+OZsl8GK zEhy_+G3zEh=!9bFfEK>)kxP?3BSbS>JKJJKVF&Z}RVgbzH^p!XikQi|9MJdOaD*7%6JviuDwyelNK z1i;ZaS?0J$8EYoFMH%0Jzlhow1|UqeEnNSi6=~W<#d~u*`_4)ItF|jX_Ou z*2e=xSXMP5HJjP-`&!9TS|JYZ`o610C3H4WsE-xt=(rxIO?=_|@htPw<$m^ci%ZVj z3fqi$#%z#sc20_g!~a4n(wY*?FYL$~508Mxkt0CoVk%iR&xX)n?`@$?qR6LLcLa{TWV}7%6B5;c5 z4%#QacF9Od-j2N~%Rh-m(DnxGN=WF}%h<8aObYIG$-5!6SvYmX1QANto^G-gZjy)> z|Nj1ErQ~z*FfntAs$&v6&sOKwCwF3H4CkwZjkw|6;H4!Qt_0O^3*G4{n1VIswDvpu zLb+~0@B^J>lj6%1@E{pOBs%rTZd9^Tqi1 zDzo?71dcXy9y4qGxDQ&Lm^ty^rW$G$Ipf68L(z{ZgfjY5=mBBVcb{nA;Lv1V%?vuI z8NNKQc>m+R6`xo;-~uzo*-*-I&-Bm)@Xk6)lP^;?%lJvNJE9fDBiCqjAv zW#<)e5%$^n*MdWlNCsPYLfJ1vVu|2sL7Y?qTuWdBglt$rhMIM6Q{hR#K%SUwyJK1= z?FZCG&qZ-ZDj5M#UvOeJl?rMEQ5+6r{Rx{chMXjaAh_g+W6nyU%Lm5Q(wIPoI!f9O zL{nAPv6gYQ;mtcr*A_JZH12o*+cZc-Y;>-)XwY6dY*ChyPRP1LcQkS&iO<}mt6s2p zcRqMJf^&*{-QB08sw>f+J(JLBg_HIQ4fRFElyzD&<1=T*)s7|GwM}-yn=1a#1 z&gQ!hd8rpsj^YcXOJ@SuLBmI+GfHDmx_gtdR#P}kf67F2o(mq=k?!F5e8R~9bqPBH z^LOWvt0^SPkY!6vgq420U^6_#mT)4JBG?(reSulBGmtZ^(*vqjFN=Fdj;bC21_lg= zg05?j)Pmc~{m{1dO+qt$XwZtQiyOHor3!fgOj8_Tfk-GIG={HL4kA)!OmJ)Br+(8i z9Tb5@c9ns*<}E_`y4z|a3jMiDUHEX(jb&pytf7a70b(7U`-{sple;jvg5-?aGe21B zNsoPW7b`wL=j|)iZdqrfN?GR^|2{*GwgU&wBlTP?&~8h=GEIcTvx@Z z$0O=Jq$19&-V#b~+W|`NAL`?3x0pe|s+0SAk8L=6jXq|QS!UZ>zsgH;t>MuDDgItD zThWW8k+|AiTl@4K&dyT!ZCfg(V(b*AzVm{C$S>+paoxaBC)bY?_dNSCP85u_Q}1GA zG&AGutf*n1n9QR=5{D_aLfyh!7M=BzKQRg99a#3Vvf!)_hz&qaKeGH!$eAh^yR%k1 zWM_(htHS6|iA1?BUAGPUOk})pZW(sm07*|@+p(0C{ll&D&Rt$F%=OB;5Nn2q=0{FU zTQxmjIM!~Mo<#x5)DVxUw#s*R0t><@tzESG+Hc# zU52K-R);QCAQ}qochM&oUUK|9TBi);^Jm7JPYP|$;DRV-jkKAR(yy15c;WFGJJ)jE znF|{Yf;aB>*49?0Pz5F5`2`MN-UR_Ug>38iUX-oB2?{=U_a#u;4qa25l5Dcq`Rdt!(QJO=kA^_g?XQ(H5GmpI2Me>=?)FoY)eS7|-RBWo3E3ln?|In3Kc+YGD?yR@=yTG$D)GvOL6B(BW z($K+Sw zg4vYV%yPz`OS?m*(7P#UBMfxqJZu7mi(uPiXv3(G5aWd5OQJ_C#NRl-Z@61DBEAxz z0%f8gxX0jZ-v{!Ev`pL?<6)5~E$S?w-jVWu|GsL9($f6Pb8b@zoXj@$k-vaKfKgLq zF_&78Hiz!kUA*F0gs;4rCAizhvQ=YZMT>9_DNmXkJ;zY7Lg{k3F-s2R**K|vbgyl! zO*og^Sp5dS9{3cI4YP3S2Ba=>FDgkCuW5g_m_l{@$9;B`S*!{KL6foDKh>%WSKQ^p z<(+c(1;ec@Ta-Z@zgXe9MZ1c$qT|W996r@#3Dl3*6;Z5RigOy?_~9$N+y}of%qKPI z(LF|#14454+%lgpqyKv27_#BhSm{R0@aV4MjEsb4b?ZeXZJeV@Ck{JPV0gOO3Z)DFUNNka4O%aN zdIl?Ue=U1!^j%-4|5;n{Tb{8Elz6f2xl)&^K;!eq>6I|GFo$bt5o8yie4d(*W~ooO zv6XJnfAU|}1qrC3z>}4Xk~lLI^_*lVm1Mn?HqHT{(M*M#7!0M{Ll5)XwH{`t{9s*wk;kPDTFwkaHeaa^8+uRN@BV+}FoO!2HKIFVejIWltT`P=OR$^PT0A@!Dfg@zWK zU=WDAUs2V}HSKG_`f~has5Y2OKt!5~@o}XR+k1h0QHDZ`?QLexqnR){Df|AWeWgF-qc)Nw4kuXZ0r>U;C>)`a ztC*7iqS@m6{{;TP`ao_1G6DyYZgY4ec{u86U$KYBR^#j^pF~ zv7K#;amT9JHHkegCf&)Vd#UBwB=eP;S&ysmO>LERq2bk%0kiwoSE%|d#~qLfpMKD& z0QEF+@TiI75CHqVmaeVykH2zz#!g7kzueUR0Jb4rRQ52NS@xWlbU%`~ZI?`<(YC}M zQk6Ze5pvhpj&P<02+@TAsoEY4gjD=iyTEl(+n_`sIfHYC4sS`vuDM)A&%{a7Y$wzl zP{b`PSf%R&skSFzqrEcLR#JjAqqn9d5;#IcTuly-WRyIZac3+jX|N>LY|BvCPSp62 zhxH6ZKlUpHF;E=i8EHztyn*fvO&qnJK6cLZIh= zR>3GX-LrlrSto^BLJOu09e2&}FUAp8jm-$)dM{4|H8ECy=qkQ>lRE>Pt4UksDt8_q zx=*b~L0~Dy9b-9deaxd`#m3?}Qz~3tfgR+2tH6f3wnV7EB*~1v)J~@h6TA+-8i4L4 zDL8`>@l|KyeP^B9LmBQJ#koz2oTtc zEtx-#1kI)t#@6Xp?pRVT>R9V&0DWR}>QZ7f|(&7s#y~mV){>n2qhaWn_UObaP zCl_ZFhaI7^Ss^n=7{W&B?JCAuepR7zuf8LBdZSH@>>gEt9z>oj5Z*#sQ3LIK_XB77 zaii4`v_2CN-*NwN5Y~wl9GU_%ojPYhuS?SWU`fmLSVW|4>hzNvk2ES9+jQ>%C1)4L zt0l1mgbbnZfzPY#6#2f$jDKR9OoOZq$N{D@)U^rC#KF?@ABSReJeBN z!!5$;BBy{l5VZ_ftq%YgY*NcT{vfz={=VAlwxz!NQ|$1PD8t8j^mL%J8A(%tvr9O< zA;JDw8e!}}kG+kJjX_A<`xIaX{EQzT*~9scsoEBCxP@5I$7@(3Y=bi2GyZ&N@&|HM z5a*KiSAV{tj7^ZWoo;Hc8?!eP8v?I5F$zXF&z(KMeh@fWBV8wWCU0z^b3P!ocnoFFce*KFu5<27k2?vbqHKd2_EE7x_{83 zyKLRL(75W|G%JIu)8#rKA5vAPy%AU3HpiHqY=VAFkR^e>LR2XzJs*6~+14j$nkX&j zM}x=UErJ&)NdU9_G!!(CKksZF{eV%r>VEGy)$!mfZ`j5N82XH~%_Y7iFJbK80Tooj zPUy7f{g~dn5Y#y{)_12Px)ijK(YiG_1Q3;b9!ZH%Xpk~7c|{j*1y;o&RhA2`vj;_E z2CuzJXQ0LOxXv-G<8L73~-%fEZ{D$mWN+Z4zrv1 z{)ZDW!+JnYm)e9us}AIjcY~iq?83003&q@NeFI-9;=B(hWL`uq8mAN|+Lw0n;n)E8 zCSq+`?Sgzx^@;*90r9>>GAC!~Ote6TU`hwoqmr#%U$KOy<>E{%PX&-;AuaRaFKK0Q8@jnu1s- z6pZ5oLgo$>&h0sNk2)=IWW@Rms*HT3c;gWy7}4@@26&9`W*i%McLS%fhZl`?XmngT z#*a&_2YN>(D{Y%pCYZ7Mwvg0!KlCn z_Yq5Q+61 z3B>mOiOm&^4+h|kr~ZSz^4ZoA7Nopat~jw^06KRT|1G)i4$S$QI#LA<>6X(gKJq#p zX%o}2odvi%_CYoWWvIL!PWCRW0S5T&_(+V=PH@AwpNC>V13irnZ8i|kbv9RfZoL7g8qPSV<)i5- z*xFPO#OVhVlGG#pEt$GyHk#Ydo#YxU#W9RCn(ucH(2hS(G4X0D6SZ7+ZPBQP`3_Bi zI0VOeHN7--uH2jomzKB47cqxq+wR6wkF&UY3KABd-LTE5?Igw@g$QIRhdtt$?%ua# zez5YVyJmxV;gm8~djE(aLyTvN3F56F$NI7h7Y*IF&9o2j%(KhpPpa7}L^((EOHoQ3 z@|HV+=is^aV*CVl%$2)JshsnzkW`eOUqXo3i8y~CsmUd;6;sEDD3|}548^<>YCMO& z=mlg!E6s;6gHZ;;>64YHhr`d?ln;08vah`KY1?Hjcdt0WQ8)~d7HA{>gSyYPPV;&y z;YEYO;);ZG-1Wk+Nd<7@%E!w7`0w2^+lk$xDgDbzIK|WoX=LsbLTrzUXP?K3@UHwE zl(ai3C!_TImkcNw(;_1=r*A#K#e`5ErxW?EU%z%C6Q$^7Y6!s(Y-y=5xb%wXxyOw~ zbq9}kJf7zX#B2C8{^d+8e;ulxtzDa}z&Cu8Y=I+OTseObQqQ8&h%A~?eY3Nm^YsK_ z3>B~#_cNciTx@h7^QBFcB?lkJ1t?=dVkGiO7-OKx8;%06d3&DYN6^nl4&D-a_X3nL zi5COVS@A2E%tL0@7kytJy?~Mx(AC#}+P3ZpoL@m&b*(x2KSxBNjSF}t-WGbLSa#BB zk>`$GHA5_BmqPO0{dt22%=X(wz#olJNSr%^dWR5i0b>T4dN+Cfp_Cfp$87AuGzQ>P zACp&yfp3PM58x%<%4bD`jtlBCbLU(D}+&Lk>EihDBSwju>7q2ksYVW-=7WzziDTJD|uin0%wWOv=b4 zW2j#*Fi;Zgg;etV#2uWxx>PiG`=cPo>i>T6%v6LD9<8LT2g&DXB2axm;fluqWIE;Sp6{Yw1LvH3;S4?p2`T&3JaPDvAnfkwQ0e!2XR35B}5On4ui?yD@{+Q+3d7Gtez17J^Z zA_)uox*C1h{RW_x1Sw4EO^j6?3BrN;*KobixO3Z*u^yM23BY!f9N-n=$Fnn0XmxGk zkNL}{RVALkM-wg2R0qZukk#jf2+fcWoUqClZH*@%Isvpx4L_x~w?iPl1E|bU)xhbs z_@Z1v7Gx+Py$AURBi$-HQAJb0PN?%SAH1-!PwcLPy8>C0L3s-$6 zKl^M79MDs|BXNao#5x1CLx%ZNb%8j65SE;_{d%}fKLgetG#S)Rxm;U3cW+Bnqa$#* z7C=mS`=MvJB0@F!!0^6$%UFH_#`}^z4J6C^!B2J`bR>fhnJCw>2OJX;uhgwD_6mlA zrg~YXPgtNL*EPc2=ncKnWeLY92*Pb+adq?p(-+~8r7f@AguI?MP zwi>&Gf~ZIlp1z!t-hJ0bU)KgV{f0qi5S zklbu_L;YQkre&&m*$v{S-D(f#i?#%A%{Ms7B8I;^ZX+YZxKj{e(625$^ZjLMmI;fl z*7O}>Q!y+GTA0xSm0|$p-)c)nJObGzcIahcX2aaWpRgKR2QT3PjvuKd2;owNUU2)E z=HJ!B@yOAY&YUsR44wT@TR${Kp3w6uv#aPv8$E^ipmQhCK^H(Y5|na|KZo)T)}+^L zn^p?AYtiLe=RGiHVcIJ>xQHBAQwP+F(gS>itAAk(_uvZpL=aa>EDst}nzSjlX#}sM z;*Cg;y`^~}*TL!8DP2HX4UdmP(FhvY`O~y+`8dq_R0raEJ>dEOLC?Pwpj`O*|GEl0-oUY@4K+NnZ`n8c=Zws!w^sl2T=--U zFKy$cp-qC-j${AlaU4WIs{mOzb>d@<+mX;1Y;Rjx5dlfB3U{C%sKjD$G*EHOQ-h|! z=QQ_Sa3U$Gl1{uCCo0g8bqsv2lY{ND2!$@dM+p5GeXy+ePcun7L1Ve`gG1u^j)&82mj7!$j7vn6|q)5PG*WjbRbTgtM`8%|vVip+p;~Lzx&4ca9bHvT2j?bk2v|O(X;=K>D5&u#qBNuTL_3$S zGs;)q5(0{dG|u{#&sZLhGB%YGw1DqIj|NT}r8 zMo@I#o$E=$?jQA=R}|P{Kyn*83%U#+e>~x;(qxLR+~YO?Wql_?kdsCI|`?D|hI2*bx9@VZeeRp{s+$GmyAK zAq0A?W$UGv94;2dndvx7-u=%8kq`GS(-3#Y-{bci`pnm>c@`^tj@0-KX$^p}kr1L8 zMcwt4_je?naAiQ2iqq=|N-p2+01IoVx742QL^90g>S@?@$g^wiQbMywhZT9M_bVjAX$O+-xGc&rp6E3o^Q$5 zp({5Kyq)Hj*7)2rw#&%mpt+XS5|jB19_N;hWDdY*U2H_NVn7lIv8yKFHXRUJl$@CO z0*iE<=hPsIkiLIIAcSay^7TL?uMDzV_)3}kdZYAxv~7qcP^W}*>~qITh=Q7QGh%}` z^xiqaJ2V^&UqUC1V6gBIjLib!z)brgCUzcEqQgX4IzXa66=INQ{Po~$PP3a-^X>NH zs*_+AHmYZ0n*$POzqdsq1q%fIE|+8X*?+Lv8MCjY8R55C;j~YIz0p{deG7F1x~a2v zRm>)%zUw}BLaQL;1n$T9_0(mftYanf^qavRf~E z7l;FqudOU!e%$Z@2pd~k#uvVl8CI7#NjKptuk_OnQHUh#*z|~4YQ9$53GRT{Nbl+9x4@x}e7b&N6hA&INn8;QwK1GxTc{#l)Z;pa){) zubE25dYZN=xaz)@W^>>HLEoFQ$qvi=%U9aP@NXL$$1^9?IaOLwG|FCH^;3G$!J+;k zS`W*&dNwV$YhILOQjTUM3&Fk+lvrrql%3f7ZL@6B9ZtE6B_bmn(j8yd%zL3sT*-h7Wf3K;A=tx+?mw&g;JEzVf3bbI0QC z(8Dn95)iUjNPh`}Em)rk??wRs529|A)WQjUb4C!`{(S)}aZ#@xE{=mn7LY95%~4|= zL?;Z^`n1v2$E>YugJI;l%YoHkxt79v>BDUR>;F=`dVJcUu%r@#f#aCpLQbjVS+v(e zYd6_RBjl$R%vDB#YT4f2ZG2Se?BpI0*qz$6_Z%*@!dy=?i{q!(ARo#zLn%&`bQC=vd>)bEJm zYWF1+ONbwMxr10BA-PK93U%ehr*_I!YzFcmSQZ=^~t-B|7m4&b1pv=^d z%aaO?NR%w^_d6pT(N@P#bGGA-HBG?8AX(V3fPmKc>M9KOxsUKiNCcDG z9s1E%M^f94EhW6+egqx86xJ%YB#%( z{{$GB5P=^Zd7ECc{g<1&}TSuFJc6e1~j4UF4b_FDxw;!rzJqATqk_J!+F4(}n2 zzCiY8b~$VZE@1iZk9$aI@rgnL2+#y41uuavMM;RB=6)>pTcNRAVeaU~Y(+Pf@MGH? zgjro4n7DiSoCo5E=FtT}lVJ4xq+Vd*^w8$H1Qnh!*H%=~su8vvz#xoh`5RIrZw9eQ zOj9A+Fc+CM#d6HkdYirq1lfgz-iK+vA5o(=sAiq1oGrxi z8hqq}g#@8YN2(Pp>AZFB0LT+yq`La9q1)W%ez&a9pNBIDM|x3guNxC);Il5LCmO=JZhiNA|?QP1IbidOac%HP6(2Qi%Qi0&*205FysVfxq!*A;N-@EE(rQjqR)OC z7ORD`=Wz^!e&#~@iU#0KQqjhnD5WNvAZ=50JJ4;JTQ>1Een1$|Nf8En>!A*T;8g^_ z&Sf}e`-gl%G4?;#FADmr!X5-)_eauj*k%-?^gHs0K%4@SF`U15aKuTdKC;Wtkrz(P z+Z6cCX7cl2IY2!c?BX3>I)|P}A{}e=LK9_WS;|(t`pY>GV8OPfI8!hVWJy6-1Kl{Z z2>mhp{1EP7BQ*E?@KN(JQ4Xce67Oum%o*Bzk-Kem0PUgrHg83pL{%ZNkKi2d2H$(W z=BD8u7926LmKBQ82pja1S;&ifAI@Q4lx5%-Jl3|x>sexH7Xm$`SB1EiVTCU;XWb7Vu_Lz!wc-0!h#LkB#SALKt=$~08L(SE1|drZ)J4^MdWjb zqI3WMIkk*z3#Fg2xVp8N`)#(MSonZwIvXLERw|J1$bzY=(FZk);?6ZRUzDY6ayYo8Jw|_sM?6S}b3O zARJ0m5Is+IEzM^)yq!%UP{QA9gIr<+#sO&&R&2u3b`(MX=K@DRM;ILk3YQp%4?!0` zUUUAtrYq}sl>1U#wFy?g0Wcx^ni8Ml@!3n5rlB#wcj8dhL6OhPs14C%?!Dl9ly%6_W85!l zG8|9dlG=~u52B=Hgt3x&EvClOgZ zl6UEf&GtxyLub^NqBn-=AIgTYQ3JB93aR~ieDNS=7M{WoQR_}_d1p^hCG+1cxKwi< zn+vLYf2Oy=kAA9#9#HnH(T*wf z)jIpxX{lp~%_#0Uh1AjHPP2K;)l3Amc{i&}9S*E;B~kS?%!$n1DDyF0?8pzIX~fKdgsj0u{^W@XJ@WDE@_+ zy6|{vm~s{ePr(?j^WoO9Bi88PS+)Thi7Mel80jQDAKs|TdGPC*UUe6ln5EI1Dr^AB^OE_Pvr z`xZkKL$O64NGWpueVZ z*lJ%GHx8mkg1-5@^#k}3@PCYu0mqJg2g7hs*Xw?O7LgBtUyQBlhtGiKd}aj8sxic! z(*+nz?-s}+XXY?1unRg_#}oS3W61Vh0Bxd@+EOwbX;av2L)eoO6Ker|`|4E)VN7WR zpOIs^ZV4u8qx{vYJS9@qeUgi(B!140h%3&(D7V+DQ5c~|MdT50h-y6E!hkd_DPXWd zNz)gjq&@2?@%Hm$LOV6pEAaoZ_2uzUuJ8Y&Jt-+#MQO7XjuvDYg*HWGN!AuE_9bK; z6>(b75h}}QRm3RSmm!r9Dlr%gStboLSqEcge%JksI_L9!{q8?Hee^N&+|P4A*YdvJ z*ZW#F<14q!TsAIEF>GKCN_9k`+*vtO;g*AQxfTwhy-P_qgl>2 zJWvXr$EM+*tPhydj+c%?7`>YjTE2M9_`!#zq3|9Rq7m7k+9CFB9bg)?e=SK!+E468 zp|UbGQed@e-T9?|Ue_fz>0UUTp5DJn1QW%$ohK0eTWW|tR-Cfn=`K4s?h!RxfnqYe z0MB|AjJ0?Kygsq5wctF;9kEgsPV>6>F&OnzkBY7{XBSn5{#cD_rvLB5^gqt)ZI|qg z1KNs!lg4&V+T>W$XCK>=-q@!$4fk6c9V`1N6Py=%QiSOVfgL;jYH@chjr*=R?rj{5 zGO&%phblTD+5E!cp;2$B7y7sTz<;eDxh(Y5yCtwKdZ^c@X@Qu7H8FkDiO$~??RS6Q12>mz zHT@$>uz6W%WwCG4UJVXC;&S%nY!%#L*R|V@cA13y0Qnb7y-(7UbVWg6UF{<9*0LSb zi2-qF0AQ?bg+Iva0F3Yr87QE#BV}In`WBp@ibk0>FmjxeT@4XKYUZ`koHt*4o|-G= znh%CHv$U7E9|tR03C#d0CkKe=rd(cN>pKf(!Xt)eeo>7blxRC=m-mG8CaIn0pfl`# zAFeY_(1wURe5!QeN;_9zM!*eW!-m$gMJyT_-t)C*;UZfx12(DqPEb|&Z>G`tSt5-6 z*KElc>;U*|D4OxZSl+R&Y(>6?fD5p&(y63Fk`m|m$E^W+R`)!&v3e*{D1>u}jEC=4 zMpd#9tLE#9$5SR=pY5u_PUtc*BUP_PWFs|Y*6==yq?mr~uDn?}>5((4_REX8Xvy?m zq~k5~J-+Q4Ts%Pp${tXOyN!}*2o$032d2TI4};ZNXOe<|#G>|gc)I~`+8iyeUl5rNdN2jw7*q4i za^WC5RnDEYK6?fpm~lvuTR*>wv~Og;=b@XwUM9Iu>PJP#yrcm5wCyP($9Bi3AK-n$*ono<%o<)0SQtlaKa`y6z)YtG&(ls`v=n=_J~fTM%Wz7vn7#K z>8eP`<5i)*?Pc~kBqBD5muC1(r(lc%i1e$UAi4pnTR@cq=ET03T`h15kYGn32@m<- zP7rt37Q^U^qHWT9gSud!$;bu45}-EW0C!YhSesQ8_-di39dG#Kl^QKTeq>*~Au#Du zb3VGRRg5%zMkJ0Opcer42^Y>qX^RZhQuPD8g2#^{gUX1aDoqYW()>v#AJr|ll@jQb zMus$hBapbzP>=M~N-1+Drnh$j{t;rS^sj>wVo)rDtn|p&DR1y@H$jl^aLz3bl*!Or z(dY0%S`u(;vnu=4qm(0es-JvOgp@7FD z8{?AC9HxQ{cy>gM*{*9<6z+zUX=pc-8WW-msc1x3uI!Y`Wn}AGx}np+$->$c?_qi; zstcX)Ff)nS_`9%b+93GAd1lTBP$FVz~TKF+;mrg4qwW;6Uvi?c|yHNEK+|) z7SR+}Ngw1*yc(Upy87_e!TE4Xk&*ehHS8ZiM1idYID>R9ajL71Hi~-^O+7p8&$&Y( za{{v`dn%!8p#Z`?QZk8x&wR(4{MsuCcvN0|-re#Z!1~2sqC^%#FBn}4ge|aa@EQsd z+EK+nKE=b^b!MNq@fYwmF~)4l6qEL3!7t_kUr$rK>w)747HZr@Zoi6Q2(LL{VxEV~ zdoW4A{PG?Krnh!TZ~FD*?EKcPP&B<4ARn?ZTI#%Zt=58QD#(J9pNpA;69_r*292J0 zwP-U?4QPHqGq-07(N+a|{AIr}*4DH_KC)4l0(wbEfKBLLH)ioQR^lJZNg= zZl&|Sr+KA>(NQgadi@~{V&t;H4b)98^F97Ey7YYhvDENYM&d6YD z9c%h%OM>`r+l=6;_R0f~<0<4(VrKG15Z)v+IMDF}qqM<xxmsBPn4 zr)UC!G*^oYBWoKb{U7`po)8iqzht5n8HOgsv+;Fsq|g4Hbiqv~H7D{(l&7A{zukP2X_iT)UlM-t`vf_rwcT4C-}C%J9yBlCT6qB4W$ zr8P`w!*6Bo?mrn1mh)qrw={fLo38LC7=yTQ%W=OXkS`hRw?OEa+B~Fdjd9kz^(K>z zp7?=xytIRr_pe*e4-~JqejiH+m!I^gd0hRuI7IRx6`K;R zcTqpLP?xfHhg{bBWaR`CN&WQ#^Cl~vwK{vPd0Z>4IaIa>Ebj`B$w2@+d!?T35wxQj z-<%P@4rmyE3*j&hH(-arCz;bI1Y~b>w-A&;pm9fuDvGjL&8t{XK?HP~ZUcTyKwQ8R z-v_m&8r0-q39&C&1!g&Rhv9S@3O7ULRy{;PU?n9Ly0F_X_4NATEWFWttKj;~oeA#8+n|Pz#y{#(~@S$lzphTZ4c4t=2K;^hU8Ro)M7_g74r5(!dqBx8{765f^5NO-{O7<)!Lx)DH;8S$%h_( z_|CFH35>%CM6sxX^H(@nfd~m_1>QmF3@>!7cewXc@_d>vcpnK}nxJM`v-B;l>#MCm{P zltC3L>mK2xI--sj#4UFi8G`eoZ_Rs61EF`2v0DD7<^mAsBBHd>h}DRR96&?7>=sIV zyN@Ea8W@o6VBzGJ-1*df+~EPgs>0P*HMZ|X7(5@_QI!A6yxWTIaLQW0L zPf%L9fw-LbS_C_2%v9bk`L6+?_^J4|jU^302yPi4g{(vd%m;pgEZZ*d`u7%Kgwx&m} zA@a5FO`8kL5?~D59Yx2*lidhVI@r<3?ARp>PUcV-;?%UwtVr+BAZ z_j4j2s?9$5FxJT|^|guVHSYcU2?m+?b06}q(EB^jDXvyZSj^d>!vUt(3OFmxr+~`u zq4#cRXh{CM$_Zmi_sluE@--u&9obzp^n%3%R&GQY< z_byt9@0FTkU-^`NT+APRX7h%I{x`>YbTG1ke~a%g6^WC8yzN}J-wH8f5{;iZ{g`IM zNyi6voAF2yHD#kPfi5n-(n5(oLdj^O_APzF;RDGX+9J=5bOHAjDqLO{lR#r1ghWK< z)q6&b8UrK%Ep$*o^@K3p7z-aGfp!=pd>Bnpex&xnaH?~^tn+47yo-SB#P z#Q|&@fAVJ{m)8)JPKZi4L>+-9+9*V&uE@r=$oX2RlrfTi?$u>4G%8JtbgpHl=#}zM zqCNCSQ-NoN3(RPgZJM`cNjOpO<5eOwKzb8A7d}(_ zTtQ%fxN*{Fi^oG;>`=5wZ%mUu%xj{?UoDdp;WFC^e0uF5k1D-Uadx@>(1jJo?Z?_& z4&|}@W!2|CYw$se#815K$uxYGR!!v-+G7a1>`>(rTgQTG&%Lixf`q^jo2(-EX(xNZ z*7LdP8!E;wqk>Qq3jrnUoxUl>c4<-(nY*A+ruAsNp})U>dvZ#td5*hh0a*~M50HC3 zTRv)4n8ACSqFaETKO%Ye1FZ-_4Gc`Nml<>R0UTBbLOn#YT5^`tSj^nYUrAUv&VyL!42e!ugJ`)`OJI+ zl?g8+??rhG?D#j;Ko(xDRh!y~=5|=>?R1@7nMdXM;d;Jcr9HJOtp#PfniAl{e`x@> z(Rh+b?n&G@_ZnP?w3l0qyaDw^OW?!{{4iNY@tO-dF%Tl2Y$OFIwV5|>pL+)aB}+ZM z$6wTxlz_RVZ~UpcfL}1JKq&QDh{F+gNkvH?(36yw2I$H>ogM2hodpfFw<6T=e0JO8 z-#bG89k}wlkl`L-a?mX&%?E!qDxqGLZjhHe$9`>SjPLPm%W*)%n+;&_#DL{TcW&O% z3;DU*>@Ku5E$Hb$~_4d+vKloG4sw;1*l7$twLe?R&$!%7h^iV}k%~_+4 z^m(}Tsr$P*5RAFzaez3r6M)etc9x1XDa>R6cyPTU2I>hjXA12pq+-h@pikr-+)yIk zi4HQ(r?RANc9wKgP3`<8O?Y465Hc^8mJ808gIYnFV*~j5$Otwdr-X2eH|bqYxL}uD z2y_a@P>KEk*S`<4OFg`vOH!|>(EdwI@1XS^$?k}RB$%Ca+tl;us0nDnqk+v`!TeJlvfV=VAzaQ1mnFN8f$+!H?ng-Dk84aAI4B{H@orJrR}NgqO!W^RY3K^7sl2tn zo~S>eHG`_#|64lBVjx6di9rcd6LP)4lK1=EX5l7+P*A}+s-_;_ix4ZR@5~G%5W!8o znwO`AQjZy{XxGd(K$DBdMf4NSc&k{+|iYV5GC4?E?*D#MoW%S-JXZ_cKml z&P7Jb5L(->3&kaf>bbp$O1X4SZal8$HLg{-2S=}J% z4wv9yv&L$B`@wiL+yv(5lx=rfxRn+ACOAO}nvr71to{7N4r@=oqp=7rjV}qMtpG z$Lf8CLOr0%-WJHM>KwB3qFtwvZAgSMF2j!S4zF)Lk7aTKwCT2~(rBS?=Ee!(CbY1( z^WCLAHu~0?MqUPtsj8k{81(DP9>N0Be|K+clx>+jA7o;2DX05JUdNXi4*4$!jC85t zmh`a-U>yAYTx%Oa$PbO^Q=PaX*T(jOi0oZzMUaFs=lJSdinFV6xR$$J-QEFU@Hc+1 ztnKwL0MtQjD1X{3io`D&yyYu)F4D73{Dk4DUX>JU14fnjNyH3n_Je7UJHOw2n`;$2 z>wJc}lZ2QDK#$_DG`iZ1!tN-MgGcby_$i80{QT{6i}5-5-3g+6DUa1Ah8$LDSPtxR z_t+E{B%OgW13k%WwCc6KlAt@j{9I|JD%bemE&lo2JMd2c2!v;YvAKEEo@SwNEtoM( zx1Ph3mk;C0*v!H?Q?V_dz-z;boN)gNHdBl-%Dq8K%`_;d-=q%xKx^3N^($V}*!cDr zf)Qfo@D(984Jec}ZeQ;qycC?D1vUO4DH~b7PLLXBqHqx8&@HSNgy=-cD%wEl8al z_7iIUO9C(T+CeTPkN{j~#cOY+>kx`H#YlC_Np{g*72u%}b~pVYM(Yl7gA^tl{GpcI zJ!nD=E|?n2eH!g0JZdYtDK$ie8q16qH`U%VrNobLC&Rm4n8r)q0k0b(>ik(DT&yp4 zMlMJgH(u*#HomI)&>yk|^8sYmuS-|#$2tp0IyDXg4qdQ+L-F4(&$Av_?ClJ2wMa4GKsNB`a;!_WZsbn` zB??>i`0Xo-JW9dsq(RLW9Lw)^O8D0hgPPt`zhB=+4})EW?#Y{gl-jMG>6_2Rk%RT&gwjK z(Bp-C`2akl|}Z`I%4x&p^3>+BOs{00(=)R#vXR4Ln4k z8E(y9-sE6ECWAW>`-^)1iTl~Xa)FMFtEIo>vq}eHqhNMt`L+3C(%MifYmKcrkPv-d z*wohAmNU}NvJ-t~7aDiv=kopSkwehuXsvA*EK5`h`-fcne$NOb{X-rDG>`MPcgDPG z=G%Z(?i_j6V38CN@%Zs$kY!(WI~iZ2)!=e4Z{u8#_?o-}v|%q3oxDazuBUgvD`F;I zs*MKW%?khuWT4V2Z3LVNv#AmX6d?S|mlHfW;0WfO?B6l)`kYq-$Pn9;7jCUs_dK@z z<4OAH=o^!*o}r{;^VGY$fMjJo?-m6GuywIZJ*R?&6KTrL_j$ZA`>AlJQ++p6h9g72 zol4hK9H)-ib4%w2@34xGBHrva+=Dfpw%r$>1?ALY(2l|M)?k z-UztRQs)h5L?yYh!JTvI912v7WjI2{#3lQg7Vs zw)3;Tl!!d0tC8>mX=!Obpj+4-KBA6Aqk59-)^QKKOVzEu$aymO;gkWVb_m`+saNEO z9#DH}xDY6qKOKxHuW|4c#lkrOYyWp^ZEcRd({;GE(%;2=N*ZPCD-?VFb{~GIaIGo_ zWT>3Cqy!$%`JZ=a`lRq8QqAXI`b!H$HjpZdxclnd-p1XZT|ZDDZa9d7PV4I*8@jj6 ztug|q#MpRhxZe61o)~O6H7R*-*l+SQez-W!Y>#0kp;_|4naaon#%N%yC3Z_Oo>xel z$tqC%ino(doNXPh80L0-DZBg{zng$`5%65^5iU?RZkg7j=ZGpY++~OfnxzD?56Wy2 z=^daU_P_S8eUm-1BxN=yGOB|YXgqEC2C=`|Cc;N8oP_bbPmbL{;kb38a?A7T_!qGc zds>YW2It2fo41T27p-Xpm~h0sUa0}sr__STqwcvr&@U%utbr69A};I#0EsF0;^nB? z{kvDbiSIA*DKPBni5X@kS##6}6K;E~R#766MEaUasiF6~&HUOd5j18kS=q8&?gBq= zcru_rY+Sw|AHOCc#XG}0I2e?OfD@$nao8^xAxHD&!fc>0T{DdMF|C^$`qK@WtDl!b zZw&wdEzagqpd7^t2$I0r&)>C%hIQOR`~{3mC1Ovq+;`7*bs`yRFaYWSn)~WMProyF zLG!@w4d_6mIQESkjfdRkd1*kw{FKuj&a-Y|51bfq?`Qhc&QW6ZdW9a->ypQ1Co8J9 z{J1Q3MEe*+Wj(j+3oPSzQ#PxUdzGK2TrQA)N$8l9OQC69{?%FD8$7%rh>wC-8|Hz6 z*}45w6j5MdZTen;-i_`EZ@<;O#RJcWc}30qIWY=@--k{}xm9pcRy+N{{?`D>4Fk zSlv8}Y3QE_1!~AcfGpSd?{k(H^F&e2@6{ZM94Q5fo|I);Wh7Rv4xwgSOZh~dr=SOg z8t}0!L_Ox@<5suPCM?_0%Mg(paq8!#uZu75xj`ci6ks;<`tjq(Vxx){bUpk0@^f#n zoNm|3Q$54MykAH}FLTcOhTzZ1z`$)6{gzonzarZ2vq+Y8_hx5F7RT^#6xq9xMfR*4 z*bgoi5^ZoFMX!-dgE9=S7Y1Q{`7sFqUirfZM{KqK&KJ2i8<&-^<~FN-Bk({gl@Og9 z!k6V9lL700Z%y!D-xsop6d&mY*0bCbBrSZbN;27ipdAd2@#^0NeW=V0+ANur;*BF6 zlj9&Oh~C~68eaIb>x_jW`_WHp0c1AzZ83w08L;Z~aw(byG+dourqFB8S)mUhm7$^MK zUx0-~qZIysR!GH;f3r}f4KOCSXoavgOcNBC&8yL8WB0w1HgP}xi@jfQRyE%FYSD%_ zR!4h-gJ1FOL^mypeZA;w@bm<%6Uu&e8T?#z?eOjq(l_kJrovZ+P+y-cA)&b?Mz$c8 zF|R%uv<(}@-#@n0dxtc2$dD<1W3WqH%gdn{0&Hq919?t6OJP|1Omc6A1t^W&AQX?o z>SSk_^R$4D0WBnuQ^P9T{gj=$Vo}h2nVJF!!{VeL6}|w5fxXX1JPfw3qWHX$s@ZM)7Z|)0~@Xa`qQQe$uii zSeNag32%x$=z+OKqB*o3N*3hI2jcsomInMMI7Rrod`5Ug1b@no&oG85Bn|56!-Hy< zV}k%qMB024{=el=KWSB?&>ncT6%QS@V0QwSWe`{6 z#IJ*Js}xj)(9eE7HTkg&=ekeX5C4tfK$?XMxwby>lj;jCMEH+rtvI@rrMvo4yDGd~ z7{hsS_+e$gqB(bW7+h(A!iV2-!|OJ=JS3AdzV%+9*g-|gfgF^q9e9V=9@12w`Xst- zc1=7Xc=^f9rzCL99B$#NRmOQ4fg4M}hV9(LG7_ zM%k}5uQZMdf{4JPMRMchqEh(R|E$jRT?uu|$)cf(Livlah8%kG)dbW?g<`?BBdb3Z zvE4^{OXu938}njTc~9}2yLm557H+be15>&0K~IL#U-%|qRfuA+5!>_fNf>Go!iwa zi|N1EpQMii%4tD{7X9fTM5~Mrz88pPYrEN$BpRmTWBG)w_zMsEfOdjB)c_P-po*yq z72i}qMGox>K5D@D1Y>2vfm4GLe^;EbyA)!z$#em(XUog^xJ_6OK_A?nVJtx|h!4Px z>j}&9FQNH_#!drGiz+X61Yob>K4ta$X5A~fC(}jTr6O*AerE5v`ALYIuw{4(YcN08 z20z^fWyh;k0o`TEd4$KB!RA^&IC64#16eIwR@o^iDP(KQQ(SW91Dh&;Hltoz_0)uO zWfbxSJsk@?JP;DEld~OA5E+@sDE=rr;^(|j4mTC*WHFx3h*U;TGwk)ii0=0O`-sE%PPwhPm*3Es} za)8n2I!KXI3uK}8X41=>@}-eQ?m=A3S7UAZOSR|T6*YU$g!djdkt1%XrLD~im|1EO&peox0dbxuD_ z)Ska$Q)UT9fzY+NF64y4c8|uVrl+ShL))|+24-)zx^uNyP-amMG!&LSz$u%3&yL}* z+%mF=^IYT#iG_&i%v0K|(^{hj1yX4r{Rb0l>AwMspNO^1qvcLZut-_Zz|cF^=MD{K@`2yG-|!0K-@d)5^~Q~D;TPbTgrK&; z3=RDGb`lPfGAu{_JqgP?!VEc=)dIx2{a~iBim&l-H^L9)*pF5p}p{eYp zR#^(=J;S_%43aG6XaFf^5?ArwKTCeew)J*Slj~7HU>3daIR<9{*_iCwn=B{u$d*jL zkuNd^CP%o={3w|}`lNa!D4lV|&efu|zOs8|)W$S2A9TYYfQ0~q0q32#QISF{fR|7B zH!az;!7)c&2|m9Ku`FOwzd1{NoFoZ_&`xYy!qDV%(!P9#e^=9cTh()CRuww!e3c6p zoXR3!R)>CD5rXsWg8jtaDG+QIylKBt9csuPBsq1SMDqhHm2mUfq zo_qpzJo!=0S$Qu+Xy#gpwgG}07!TI$yhj0V&rUf>o7k~;xfN9f#PFLpoPFVvmV85T zsTdGSf+1|R=#6{(3NwEBBFk7efXB6yEt0pvg)ROv^RT@O?WvBpS)Q+Oy2a+KCAhNq zLyQ_IbfI2PfU<4>;2~(Mu*SoIQXrYxYr+R|nh!L-9t+|u^FOk;MJ3VN8)4q>9WIZ= z5M1q;M7K8`uQ+3Mc`)(PO+%z`qe=c4l$J;4F$@nyu@TN#6gX{BtFJtXESZc znjK9e+sO}2zHp(*499Fjcs&};)?l|WZTXQ>sh=(o5`u709s-~cWb5(t>RDn;E?ZEE z7YW}QAQsZG3*@GN61$7NjeLIR8m$_HPuY>EgXTYjhTng$9cnbdw({o>*GFHP6X^-< zyMM>P&AZZL;ox2aO5jO&lHl&k+!hYB|2?n(JgS=D>t-giEc^#?BRkOfQ6K3Z+ExtWPSlV z3Jyid&3s>^v8<%zM1t3%o}^j&wRsOeWT8xk%Qw=sbcX|{`54Bd^2AJA7fES`0CJz z>dO{NKr~p4szzdsR_CG2^Z$9&ol-c-WYmgWk8(9!;7tgqanDo4Lp7_kJ|_QgvsJT1 z6QvJ_dZ&gRT^2==19F0;NF6!Og+g=jC?=>r} zgg|(4^B3#F$aVS{sk?XUz7GvnYu1PTQ^QSd>Ik)^e1Qm{Ag|8i6WL8Mxw1yb|NVk3 ztb5zTZ>6u9LZ*%S3QHzH56TX+PJsW4189lb9o4_9q!hcEM8BjFrm$iy&pClYV|epv zVq!D`pG(EaDn4VcZOMH4a1?JcAu}Wc`V9GN#GopP>HOp!0AmS8OzS4XIZ-{NQvuQi zB9cIYCXBypB^!vS=zB`*G=X3QufhMU5F>>fulYbJ*o&f|kop%A3GP2KPZC{by)_$a zu^*;5x-dztsN7W~;xBc>+Y3MIEz#_{8oqcfSe~|uz><@#Mm0J^B@{X=u^gZo`eLx+ z2YqEttEFmlYeYA{voU7eVA8K5M6#^wBvuoJT05WnlTxr4xp+_>Ch)x8b%ka@5`HL> zT4YOdN?;KDm!a74oO)m9xvyMQqKD3tGbmY1kB4aUx(}zaK~O4!g9Ku5P-Kb!twk2% zXt`}ST@-G90t_O9@3n_axE)0rcCJ{nFow3nR45TsJ114z<%17v$D9F%_c9u>%9A?6 z3l$%I`4l&pT$E3iW?t0s!|EW625?waTA$+6Zv%x2~B^yLGZ4X>QDU;>BXR ze31#M2jZpRaFlZWO0ZNTUP#5qO4!a~%mbWMGU~$*AyHiO02_60grFGx09&1T=IOP5 zuus(Uhkn-l%)jJ2ozds(rv*}NM#j#Ce*umUvgVz6akjFhciwR2*VJV^JvO{gW=SFUli@sH9wwntzSzjbQAwwR4Od9TrTU=3ho#wXepwyrw7i z!e?Pmvr>?3Ubj@%$rZ@7PK?FK{5u6PdLhy9x~V*%d2^y zT7QU1ts6YVF-Bw=re)^guJKDCnC}YWKu~J2-i1+1{*=xI`xN}?w=KR=yIu>|2<>(l zH$4A^f5|{5Fz%v5V_H^m=0+JX(gsCuw@Pb8& z*DK&_Fw}0a6CGM8g!3`LC_^yvtQ!O~G9MVGCZYcH|E_^dtV7HbOq+9sV{b8sQ#Q32 zsuoag-tcmNY-~BL+92w&O1`Cp+$L*AVsf&1|C|(u&*QeM^Gl>OOYdof#o4*-tHM%| zRpiCt&qxX@s%=!$bMS#9gO^pI@L!NRk@1Gu#<`eHAb9edS7y{M^24%bx>R4DW6^LR zcePnSTZaK>EZLa`ZLr#T+uNfUST0kFP{3aqCbE7ie)o4BYjxJvyon~U;`L0G-k6!S zyZRQ_GOREX3dR<4)5uFjNPikTKf%z>TSk!$!U#U7W&TdU^>j?19*Xj4xvM@n%9nz= zMgmZQ(GRo_To%PGsl<4-(2)M@C6o=2ylL5XOPVymaS`6{_x0r9Zv%zSdX9xcs>s16 z#3^cVSpR0cCg<4j$~xk{npopk(c^h_b~!k@{)wVCw?~gQbZHcEQDb}C@xXVP^V1u6 z+UXw_(N?m(8j~qLM+@x~d;+z_a~?fnfK|cCpUd&L1F#3m0#157h_`Z3lqNgk@OAuf z7u5x3k0!O{B~dZIjsiKm7Zi3V6SinD#eMQ+f~U#ehUPC&AEV}sT8}>rhr|=&;4h$K z2kjJtmM_g4zXCB#*{S7*bx+gEb)QF{G}9#$Ou+#2gVgT0_O^Ffp>Q^oAc|((YV4*Rb-|?5AXpsELkMDD1KQ%4sroCaw0ceHAjXe?DI?SLh@W96ePJkf(5qU8LHXbt zyI0a*yWOdr2KtdL%QQFj9>xvI-gB%Eat^1%__KW6Y3zUnQv(oE7W)61nY#FUl3vW8o=9=1hr2O z=v&&PRhPbEKfkHd&o#RipZ~nN`%T=e;qX!-?U*o`bY6lpe;4&6&D255_rgQOFDm^! zWQiNS{cHu=}YK=ai{Y*Rx#&k1-V$m72>UGPvQji*_l&@ST~%g;BG{| zwb7Drjo}p-AqBVu=M(LbIYDe)w8~sgZqsh~%YN(5H4ag;0aD$9hGO>iE*a0LS#PtQ zJ@4VN;N?3%Z3;Wy9mG7O?V1F$S#9R1(lFlj#WaPv3KTAc*Y#=#%_^#Td5N6Mmo^O6 zix!8xZ-4P=WJ2Ccd2(vx^rU7MPj6!4?O2M(@Q+r9;TPE&1ZFPl@{y_2S|d?VFYo9W z>(lPA34KKDO@Nyck|;>q{Y$oz@6uWwl?yQEDF|}ExbmMTS_y&w04DM(XZq&ZT|HcL zj0Xu!n}_4pU&oYIxbaqbtQMJ%OL_2>bsKUB0#dBC^Y*OEM%9t5OI*bY(DOsezd^Q>ACDr>#w*EYDw38r~K)nc`q5h#@vh?eGZl`M6inycI?mhn2 zw^el<7nS!YdA`E;E z1d=pCTVjvl-tJL$bq!6DoH_qCd8d1E_ulS|$*&v)e~rJ5wI8mLq!8a)pg?KC!xw26 z5vWOBv=#OTSUl8PY5LHpOC!=d=CzeVzmpnPhVGHtjJmQ9x$U6bL`>CRb8kGlrw;;} zB+sa23F^BbD@otw@4?Q&9`$6e=`r(KonexF!a`~;92i5!G$hKzVEE{9(C6xOVJQU)h zB4NoiZAJZvLtVP?pP-)MqWPjynh_JxJnKSDq&O5Y#)XnZp{J#GCZ*TI$tH(R)QXR6 z$YCc_;6A9$DY#Z87abTJ%p;P5kLEmaAIfDB#ih>RpK{ZvLDqkpc=&5uXLMK$1sS5D zaEy;oSlFzsN@t_H_}>)*o^>D4C2-@=+_TnfQQtoY8n51vSbW8Nv+`OJv~p{tt-Y7| z03ZOEDE`R5K2M^B=!EL+3pTjtK2oXgzL+e2OmTMQz~7^ReQIDn%4}=a9Z|#*c?3A= zdGE8B233=D)Z)^~z>eymgn|I5aooegm#P>Pz5VJ%<{0CFVv!H3s{BWIM{u>iPTIb< zuDE7jed9Z}P%N?pdt?4ZdD|~BIcLi|WtUtwD09rJ-Fx*@RAbTx@GD15H?6m_LeiB? zIki=@-nTtGxu>I;T?~n4nKoP@6El|gYI#SM{V)jB#g+yTumrSam5}JAgKbF|JGh1( z-u64p3M|0rs`-xEM4-+5_a@H#-QDYLm9y!#ov1FSqyMFl-hhEcPUC-j}r&XKw+OLZFZ#a06yv z&2WMtT}@gFbz^KR8v|cM4+01qA!+}z&5Y`Hn5TmaG<fPt=nPJSU2*skl04I0ZrkESyNiBzRMm5K67x2$Zq#2;R;wfs%*e zDcI{o^I_WhYri;rIcTP$QAQU81@&~ODP#Bd&mU#&Co)DcOSl9=YI2;$lS8&le$+{e zO>Zm{a#4jUwa;>geo_YV{JA{&(;Rl*ujW(zNV{zZn&}t=LRdm>z%rBLCVzG1Uw#k}w}1?c ztbdo;s}pJ@M263@f%*wjBY=mV4{FKx%{E_{HywE+H0BKU#x3w^-{Yk=`8}V4Q0q3U zYuEl764G#WwYXZfGEG|=K0zWzzm$htVY1&2GZZ}JAXQUS3;pP(65#m)*bzPOa4C4 zWF43r6e~howp*k(%AeM%m)`i7E%2)0=CaJ&3rRS7Md8od07k0x(V zw&P25`zFN8&!*Xf9`0nIqMA?0@5A`J=C4g^cSWpcd2;2y_X_5Ydc;3Z7C>Nz(CwY2P0zHg^+<8&_Msu>-Wje=5wn+EkiyeF?nEPRFmz9%P^ zK%eN>slROcw)_^CbPiu`G8^Z))^lve3 zOt8 zO94A9R*!`8a|fplJR4dP79s zUcTiRJ6j50N~Nb-3*tn?Iujh*ChXX{H(uJql|vkUw0?0O)N&_$AK-93AfAcKBEB5) zp`HdiI*ROQfLp=KgBIW_^LGJAu7BdS%1K~Ik~mN=Pi9RyVY*2r3)Rd&6w4^NyZGVl z=u56Fx-1piR-Zt=6F@eCjD|-biOAY%lD#(O4Ev3$?(Ol^NL3y#34qvUow32g9`S?Y z#c$S`sWajGBwvaj0bLfLf`hrw8!imBSD}IAxbv8ID0E6Ea(2fUVbi6co&C4w7d z&o@gyXI_dMmK~A2T9ubB5-Q!sOMd%<)>|-jQ)d7_P=7W$x!y>{hCik=oB#m-?dST1 z(_@K@J|Il$y{kaHZ=&0`XPrZCP34rxfSgf2i9b0y#xSxjPiV`_l96Bb!#1AdJJ^SZ zmnVcIbMKQP_=58&@$9K+`oj?fz5n~gf&ZL|1d*&wFvFHb0r%4bJ#LiWG3x6&P}Z?3 z$OF>Jf0mB-HwGkcN~+l>oTr2YeBw0#ZA#VyQ+!odPn*fape}RoVY$!j_3ZV|X2MnD zBay}i7D zNptFA`p!4z|#@u=Q=NMojaceCV zRq5(3J97hr&Co7=u2O{HN!|lqp#9_B!{yp4vbr}IH$tzbFEg}F{e0H-YIa__d{@tQ z7MPdTkP&sb)@mL-AufsPPqy^R;kt+9!>((9H-e^*G$;vbQ%R1UeIW`Zwr1RiMF!sjP zsy4_aUNtVg-?<9Be{oyA8|Kxlz(QRF&I@>3Bq{}Mljw%PaiY=Rg+nkqPayi;Y0${1 z3ooq!3Z)VD&s=DQmoQuSL0Cj8?nda@m&C;t2m&iQo^MeS^-UzWAA(z~e8vO%0gLkrB&htTtG^5l`F7cBY@)9Fcf-{Lh zE9iA3o5i{hJC2p25pm=dGYaJ?kLCb|nZFo(1MZj>6df&Q<6xS|9?%{exY=tG4)4YI zZFd#Ptg+(L&T=v$nyQWnjg*ZjgiY5 z*a201U_897nr?+zh@g#HUJl|1S?gTi7iz=Ph02C$%_VXsX8AcyUb2o?%ga*Lq~Lek zKBF=&nCfuolsh|_@>4+UPTd3H$syDxhp*w#G3a(si!~ef%i-%(7NBq-wgnib;Bq>1 z7be5TP!teSe{pw`uLSz}xY^@}R#0hkTe#IiFEgdjo6!Ohzo(4n>s1y5*PbHhQ?E5g zgA^!bg-?XAv8}@TFDy;bEnDa0Cxr5zPM@dteZXR!+?{Epd|&5b^HE&8BXB2+tWx>?dz z0z>(jaz2zPXo-COd0~&zxZ7UK=2B~h0yY7s^Me4`LbY8NYofi1+1-u&{RDR!@LT);H?K$WUg2%V@*~fxVO|0ZKVCmK z25Qs|iC$P-j>tl+^jSDl9n@xKow?HM_bA;Ie`tf7nYD&B@!R7qAsBLB`4U6`AYv>G zjp+)>NMF__ZrHb_8aIJ)@j-gSDn}nO%X9hP{q)~vm@Rz6dXY28DRR6YskD-9(6|p? z9~i3qbm=+3JbQmqeD#%A2RpqwAxTfZ^7C*#G$~hX+snFS4a0!zJ>7r@t03n* z^XIx#hodTvG~tD@kYz92#vl|9F?i;cP!)JcS${=9879hqRQjwR8ozD|AKBPRI% znLk6?vrniIwcxEs)M#42-!7=mgNHKv3sjyzYRxHI(hrYl5?X~F1#OSrIDP%AwGxX? zdf9u1$50*ATDs9kO*}!7A65eyva!XAq5I&D)B|7vjk{?iR3cK;9W5C3FNw#Z1k}0h z3B}J#dj^pl{J$&M4kgPDo*UmHaxpIONcR1_<+EVM7V0`TZ73A^r)l5k)w#9ZtQ~QX zV@bTi$e}XdH?eid=2-Hl_oT-9WbgcP?VffiMpfUnBRR!q+@E7It;PYxey1*J)s2=H z4*O}HL`9Z>$L{Wmv%dERFMtQCj~K$BSe!~qZtop~x+utIKb&roXS;+ef%f_!$rNV! zNdtjCwZRPGzNMwL45Npv{?SIL(qp|YZp)FfRWJ*ZUKnzMCE|2VO!r{-e|8x{+nQJ` z4W-`6Q$iDa5VZ$Cy(Tsh_^+0rp#2kV5MCuBwT$1QU-AWYmeDr{0x{k#|E1Hg;C8K zNubJt`8F0GTTsu&;R?j{|AMmM5#}g`TmJxM=wJr0wfn~D5mo>cp)TyK)>>?*kd%UN zkAhz~VZq!DE-{1~_m)u(~3?k?bt<`T|+FKZgm%=`bcE)BS~b+ zcul;Uy>^Y(!JL<S+bT=iR% zS=BWj%qwXb=FU>_jBR&fBfKYzZ3FTb%2uuwa>%`aF(JiJ@xtxa&C0z`pcNIjf0kZpXTVH-$YiRpB z$0W3U8dGjKzgciEus=JGhx)K}g|EySNDx;mmD1sTqi__pKy2?)VXg9mMh z=yH(Lf`uyusX{ncnT#S}2yh`zI|lg;+5Gx`%6SL(H>VvPWDCYUL2v-*4Pqs=Yu_j2 z-5t68s+8>aIBP{m3U7;pth!6(*r8pszsGtHseN>^QIeitqasO#QB||HwdD+}Nl1H& z!lQD3ZlaUj&<`g(saPs{R#o@V3FZ4GFbyYrPANheV4eZ@4D)E&5}2JBwGlB|@Agg| zcucQ{=@I}%C`#9QVkiA8(0f7Nek>V-uVGZzR@5nc?|;Hzd!>b|gowOji6hw_C=26d%yR}%TSbK78?K^6&Vg)&8IQX^zM`M^C6HZM%=9w)L zULVZ8j<=Hwi`fx=Mu}Oeih8<@o$l64XU)y=)asg?1Mh~Fc{&@EnxUk_PPe1wiXQ}& z2#@uV?4)nc-oGmvQ+EQB3V+vfemkWr*9IStUea89V+1$NXS|O*~`= zFf!5UltZ)Kta0>9AQVsl55CzAAerky!j&Hq+N|cNJU{$A9}^20Dn-IrqS^XI%Z?bb z4OVZ-eCb)*>YI?|CG16i9c(f0jC34L?M;YhD^KoLVkEn_n=5l+N{yYuc_Ghu={>1w z5pZH&lIWm^j?~2<>HFGQu~NR4r~iU4R)=OZ=#NFr4d$>vh$ZkPH{AOaY#{WyNI2RK z#%ShxY7L}_0#oWhd(}N<}Fuq_VXu zvXrdDjIC^;IE4md8%tx!I>s3DyPx;yoX_|7{L$;Y&MTUEzn|xR?)$p0>$+`xbzdad z_#UPS`IzT1blP!Qj3NryoQ{ecW;{Nq-;sq+t&(;x`R;HyXHA&6q2b>4DRjc;Io?8@IhChqPDIPrz9U>5Byb9(<$t3v!0hDoQUu8|dYc9z+7%u4J+ z)WP_PM=3qVOBI>y_zCM~hITOdlIBMK0bCZlNDo-|@WA?>o3y-JqxfNo&pRIlTk=HutNADOao#L=;wWVQO-n+4CzMh-EO)21|6823GS5khDX zf{Y{tJok_R4M-Qc0ddXISF77c{w}o-S*nD!X~4>6BE0tQST3}k!1M+9^tHuDAZqV} zAT?|C6;Dsv0t4_Yq~pBk$XfE>l_9R$CRSGMu8D41zmr)$dbETnbKytZ4zq2SDxzH^ zCI(5Q``WL=yJ;$?5~m=lLknV+VPB8Qit)W8dpCwkjI-|s-ZkL~>4VUyW0}4+F{Krk zrIs388AwfgIZR38gO_iqtyum}ankZnh9}V|PBKGRMy0GKd1+z_d(dmsBm@4vfRs`E zHVSv(2QAQV`2L+|-BD4h8*KtVJn~8`Ygz3fZcMh#D4(YLCm%iB5YI?f6EnFna6KAb z1H80TCWHfTOSkqwh_5Rd>>L*>oI0 z!r{~gkxti<5rc(GdPrg~RCunT&~W*sRd#{B>H5itb6(_=cD^qwC#E)f&T^E4mXkJK zCkGbSv4WdCYG(hY6o9gcmoBsNC04fmJQDx(j52@dqjEA2ZTjb2p1|V;w+#7sYPR8( z>04>BPrn5%dH4!9xjR+LaNZ179&1c)41+B-Sratsxj*X>lDtIxgr zC8L-JUjFz9dL1%O`>;fg;@F)%)_y!>ic@c+uNo>i3rDv zuVNS8uF>2zSn-eH3o)L5C~ippEQw>6DEcjq|Gi>(!oI3z%8elPN6z&Tde9|CUMTrW z-{VL-He{k7Z!v<_E7%_{c|7H-{d!5>ISlKEj@8mfqd$7yC-`!ZR88|-5Mtu_C_J_S5pQyj|ay~wQPa$9S?+x$PYy4p%xb@-YtIYPq2*(&}D-lV`&v?wZwF1fh$41DgC z0yib;$1XxHS|Wq~#kA4n-h}7ilk~ZG2b`+Cyq*#G!~BbCbQ){@IpylP?;5j1c{@_7o=AgHiOjD_yS5iYyPaR83R;MMQ88QsGL&Cpa@JhN+YA<@^ z$Eam`9qUq4vkfKWr@Ix}jV*3SX4`y9r&B2Gg*vK|!>4-c)>{<3%(CAB>bZI_>=Ae9 z^bbN)Zp0bU8qcm2oM*4yOBI-xbG zKUFntWuj_2-HU!mrzr0mpnEXtC-L)ycjS4_E;oOxaDRWoVOzX1x2=bP=l-A@-^@uXPsRZ|YGAT2AO7khP**3lom3_p@}hsR_*XS;%4F zXI+KmC)l6p>l?z>J?zPfWd{wD>eibwK0l>g4$uL|1=IumoF8POP(WVdpCKE&so{{6 zW8q?EqfZc}zah}!Le8|8l&~8+L_i}e?aJ7srl0wZiqW{U-M2_OGA>EJ&G@(mio*(~ z+>5!Knx4<{QNFh88vOi*#~{_MpsqHZ%t@TKhs3-fg}zq#jEFMatv5(ckpGIvRRP_Imsh`jFhPNlR9WTgAZ0;Rs9UT(hDj=OYIfs`{(y8;{D6Y_3rwcatmN@7AOK%ur zhEm9F!(7EOoyem9q+?QW@e^sBCVYFJ!)!rwW+swi$;Kd3q@`6M7|G`*~mP$SE9_5PcNn|h5F-`9=YNSXAr&-*K$?Li*e-=oQU zB8Y{S@#biiNUq}b37(Xj6)y2T=z~8Y!|_`{mrh-Fq`|<9h8|&V^yQbq*?|0Cy+33V z=;Z6ZJ#r7NZkUL5#2aj!v!S##oK#?23baW-NYx_}(^Vf@v75HOfC$zT!nq$ApW{78 zjs680;A;8=p>J(-As!l|;yEqRh!$@r200PsfYps}u9gT~#q^R}VwUX5dC^k_LxD}d zcv%7$CL0-<2T7p8EySe2cpTx5#7e`xU%O=Et>RTU!=eK9^0PNjiN?BF^X1m&-Q*3(AI21Y~BbRTe#3o7p54Q1S_pNhYda6&$ zK-c0{uOfuN5>Rqdk3E?+$^UCZbK~yo?ZB<9SF3a7%}Hxe z@KL)DNUj1Wj-CA{4)%SDhxpN#rv%+E-BDF&N93dOcH5bt9M${{m4VZmk5SA2z_2xB zXNTU7vT<~qZRN#bYYZd5124{6Dl0OXSM#>>5_9j_pRLw&bI5lIw%ePQihggSfY)D0>&E@5ahZC-T zJhD{MmODS}5@s;topa_T(~0<3GpKm^4FHGFOSYfDd&V?^?H-<$H&)O8%M{D;gbI@&cgts>yi<5tYh(Y z0@@9-LW;vo#knN@>awUTc4JQ~Lnh@2zZjNd~UXnxb<{T0E%Fs5; zUbUJ%7t-JPZ{EE)xplcDxBxh;$lumgmAY;JvmCR6;tuDlU5qf8hI2nq(~~4}*R8CK zor?KoZtixTAh>a9DubFf@TPL_)QGhas&p#tC-@d2y!^>-9&`+PaTVXOiIeWmn$q*e z!GL9MQ@*}7qWi?i3C)d|I}~VL2QhouM5R9QqbV!fM!tQzOuoFpb9Zt_rLN+)_j-eTG<3kq6WwOw~3&o%Fv}i zZ;}4(;?zbtGz+e%)rJ}5Do8dCkT%sn!?YGF|wibBUkWW-t94mz!hqD<7JIa_H(D1O6e> zX^{dRk&}~rPadB11r_v`_73kyx#mA)5ZlR|GRj;x6ENs;3N;KdCq{J9*h#)XBnixE zA7^`XtqXqpntGMW`NNxj0rv*Ha1o6#GICLL*9y$)55NYTbJRkSHM<`M+}z%_vdAi~ zwCIQD#KAB6Px7CcJlrMY+VR9DrQl7BM8#WO>b2lBvhDC~O!EKF0|)m>4kQcuZ%5sf z3oLU!$WpeAhMBSmljA=oSlQzZKrV>Capa`?+|*foBn%X;SGUcSoqf`%To?zaPuK>- zNI6}%UQU=k1D-1G(uocgU0DlgrR1hJ4Hh98e+R9aA%iJg%9mvDxs%Yo#LVu%g%lVX zt)He|^_f7kvefFv#jp99Ho?jUjb1c}Qp55<`!zHiJ<4TYBA zAq#UF*+ky6G>(kvpUd5?(~E3*Cd-SWM~@a*^SB$`9gGQBwOM|3K9tots;3h7W#-t{ z3@4OjrZ?zBrLZ@pE0L!fIV>e7w(TOn)+K{uy))LShoAUa-)P6{0C`<6Qh2W?Y=ujL zp^gna+zSprrfG#ClVOffXy8b0SW<^tfGqV)jSccm9;-SnUv3Nx(9^=a$NLiZ!0F6tcx)dRrscqE$(^LMVoN{FocIFi#H8M zCOAF}jk$Uo5*sl51ZOwQ1q+$JFj%^R`F|8Gqw+?8XC*`1=kCi8JN>u)q^W)0OFwz<3u*yt$Nl#G7lbbE{|<$Nfv0l#8H8ha32q<3|9;6X^uRDNa_ z93&pFdS}Li@nDc<{~ysL@R@A%m{YMb(m`}+6b^xw=V`hEv6m|QQ4AEWP8o=&DBYE6%ay>Zv|5e zXqo7vs&c~yeK79;KP%>%&Zfq(&l8KBqb~mezn~;|rC~aV_FmEUi5$U4s0AIVWLuE;Ay42 z{Xd8*S2GkZP@Ax;&_0 z8@8F}Zwvc}jTKGzM*X5ApQAqC-#FRWB-t~N`E6NH+{ZD-RATo5iXBn;+c_H`x71Va z5}lV`_PA>DXf3!LM~p^I)2O zNs(1^h0>;RTI_UlW&E(u7{PM2Q<5V#7GU~NN|XO;5BAeai7e?Tuz#V=dO_Y(GS9%& zK>wAj>zm^vpC=kRA~6aFMYvOdWCH6LHDEV)Q=8idSuUD7tcF_W}u3-7;ra_ouCEgv&PFGg^au83vWr`^aPY3jI}~BWF38dsaOq{IBcJu zlQ}m=8Jk_kZBt<0I4d99V4kC(BI~N>SC9c|!y{sd*D8vZUXeFpW{s1!nKI&&xc?X7 z8eAv(zyB`Dh()u1;he$0c)l4Mj)<`CqSm7`%8N~eLLC*|XLrFpJbsuljPfFS9(mit zHpG(-2U$c^ZKiHAPUtDZq=5H`jxRKW%WihU)D=X{D2ZEHNL5h`(M4ur9wSaIe*vLh zHzuL14M)9i(`=_0h^ELD~aYZvKQ$S)Q=uz6V%bv*iBP` zh~)QWtn0gh(yZ9cYv4H-vi{iKO87SEg3-|svhZ-jAvFUz9rXE#H`}eM`Iy}Cz&pX9 z(z??f2`LW}*a0LgcBR&wJjkf7kbB;k?lUZ zw`7t9lQvugYpWBgu_bJG2{O5Krs#5 zU`{(dbuiC<(y6)2 z?0bH#AgrKlbT1rl^J86~Bw!jKPi73-g31E7%{ao*-8MVoY7e45^jxcYhQucQ|U)ENu#8SU~TYxilXfAhRG>l~{V$5;GkJp=$Px2ynDA*QG4v{mrX1 zTU`_{+}pQ$#0a-hl5w~FJR<6+BTx7hZS9hZ;nZQ*`a`wBM;LRUDGL6~h#Lgw^OhS2 zpX=aNwO!sRG}I!_Q;E$A!;HeOeamO-EGMa?A7_?!6xSE?Rc%`w!;a^iwHAzN&dv;c zt~G3wQwmj&lQdyt-t-fZv&tH!ol~@$;U66hV%=7Y)NEa@-p+}0P+;w4=}c^7Uc;^S z-Jxl9jHY6A#)>rG@235N@J^<D~Q`TF394$Q$?5J;* zm{>pIW#g=oravBrA1oa_BR2Aiv9}i}uqEV`-R(O@RL~E96Q3sazbG%>&@>;tFCuf;NY+&F`lUMt#)W2j_07nwr;`DK z#uLZbrQWNK8uE?o7dtQDTvsK8V+lGHGhx()ZB*@L8`m9-!;lDh{a2LcvAlrp0K;Ct zSkA74gzUeNJvPF`UUg*@LUz$e)?IeA(CYK`JhlCsLaLTkcVjZbQFx&fl+h#gI0p3z+|eVde3o8!0$<%Qg!T9w9P$9k2>?#I&`ano(V8p;Y?i10m(mTSxw73#5py zV_Lz$(F;VN8~OOs=nS@HfPhEub>AxG>o=bhbjTe9(gXe*4lTdr+#iyY%W4K0*dI}9AKl*Tc0SW=+hF(&ZY~3( zGEv3wuZ%B(lH7RPDFjEu0QR9mOmPLqnm*Fv9)!7+bQ>a)yrTNKNM_UPrKTTvXun7j zhjVG(CO{{u5_K`x^IT&Pn%gg3OK-e+gZJ3-H&4MxUICvUo&{RT`b#fp{;7k@0=_;Q z`Sj16aVp*k)(ud|Ipy!bN*L=K3I&fC07+W&|GM1BIAKF+F{tb7(;-EMk`#*pryUZl zgf>NRl>2zb;irxHRCmu|)UrHuAWh^Y|F`I}Pm1or;ugxB$uWutV|P+Xvbnl)7XpY$vdXmrk0~V!A8} zAvG>!Gqx}b0(c-rMXg42s%hWYFoQ7d+iuJ|fWO9Ad~gg#1_vurF3NK^K2oEsfadLy|dQNE8jBegif<(gDqwDH2ei#`XHDz@7If;>nmbXHMcEk2>WB z{VwArO?5)vCSUTu=fj-^9*w|6&|uzyosjCp8=SLNhxE;E++mOBTqe4aoR1+#Hf%JH zcYJf8zm9C%&oSv4AB4PA(0eA1O=0F=$ws*RN4z8_Uq=SX#GuYWD@41y;ve?jCxW=D zxvI?ZII9oerHbR@`m!jf7un*>Kf*Awq!LG$%>M#DMOcXI6QeEl$5_;Ckad$Al zBihAkbc!JrWvf$Pyrc6AqKaT~kvsF4f+*TBZ+eu;wFthH#^)0f^ld@r0fr~~Fj^en zx6rH)DgMpW&VM7(ZLwXQoAJ+*9=;G4XkCb5dGJvN<%7*(hS2megewKaF)D&7I!5i0 z1>|~zD?M~iFEZKN2`~6M7e>@vAMnxW|Jy<1B^#qUR>5sl`42KQI(xvSXvv*)Dng^R ziNVmD^`NiUXYFk;^Jl3tcR)AN=iu|Xl0#BD8NBi=Xt05$$XpH&*XK}XGy7PDL(6b3 zB3EJ~mX2P(iQ5>)=<0S6iUGc-975h9B<6Vbz7^aAh=OB*Nzg$q?(wlFX$Cd^QxAEbq=dBBD5#3nDR3d(2`?C6xJcMP4fCo^W zf8NPG1??RPQGwJ9p{S zxH-&0NbTsmUT}ucx47`c$X;x(;K)3Ws|o{Bi5}2g09&Vz zyL{Jb3|KHubxmHiX(Z13>M^itgBPp9d$D}wz!Y4k)j!^G>`Dnh0r3W*6A)@F-DzQ50hb#J30XyH-?VD>U;cm!2jZU zo9mW@g)&RjX#OEvo!{0G8%-jpxeG)0VAS~KHSg*|Jdx4hor67^-QzRM+mb+ErO2H~ zD+4QuUrwxKnK+#9>;Gj8D09jeq=+No@NFc!R-ojuhI0-jk8dPC88G%T!prYp^@r~P zPTnxkCdd@c6NzJp;!qLz9(~z6of`O@GF4LPf|*knnGmSSQEr0M8|n+CbfT)V4Pk_&S`TnF<#EHfK;zq(}0 z6%q&N+q_9HA^YtYc2{O>8JLylrK6__@>zGwx9j4UaI%L7wbk zKn=jG3Tq(2^DN1z-e3&jDKjH@Ie}Dd-^hMI0Z|&UURN7YzQCqm%__%YyX(~Db-drR zBA;q*2);nh0B;5(XM1uCP6p>rZDbf=9|9g0oU?c)%xDE^5Qb~waH@}xrwqvAHpq@^M5gA?)tx&^)z6L zkZ=^FbmnS)-GaLt0iOm9JSCrku%AbbyRskRDAEoG<9SSa92+_q*jEVIajdHxYe!>G|D~ujn5EI*mMtLt9`$HJgpuY)zsxCZ-Ul_BC3vt7?4yajX z%sQALz4{x53Sz&k<-Npla{8~cOErKLF%OrrfGc?Fe4f4X4RkA?iWFs%KSf%2Ep~ID@VQfabcf+f2?DjD{%R5uxnLC zpk~h$8c@^dAA%E&L|bJ@_V+u>wdWDf0WL47OOC%V>&vRSt9Q%g#^(nde|Rtrt)t72t+r@!D#$NMT>AVCWDt>0*VH- zMy0ppZ;v6QBi6vkPBWajC)0rI z1)lx3DY5+YZ0u>`@`LtmWn&42C97>Mt|arc`G_P!2Q88Al#~b_;dXII3Y!-wWqh9U z-}^aR3>z2;yRqLBq78%^&M_iNZwC8TVy~g{hFpmEBOg5e-0<0g>jD1PzE1;a>|+~35H40DsWg~hU1%NMjBFhZ(p+v6T!fzf$$CrAD;XdjU9o{RSOrF zJy5U1%F%KCm4jVVpx>Zf{PzqSuf~RTz|Xhc$I5Z)vQ@9Lx_L@1fm|iuhiTv-brU{G zbQ>QyG2%JDr#9q5)LU71-b;zGh4(C~4*lz~RARcoj^&5Z;^+Zo>1ldjjlgA*ip27% z|H(*9k*p7`lI5r+!1G_t8=El6f}X`>C3@fa)`Oaq;SkS8hOWPqM<4^-xJZp)_zl(; z`God`>&Vsi-w+A0i8m?hzkT-#yMb4K=L^~YU|#_Yw?Kp2ZnPDFeu7lX&}Q%51Nyvw z1ytl#OnL?xkxfTm1DMkO&lQ66?FraZ07=ui_+~tsG==5}G1h5<>JN>H#956OE@aH? zh9#-YTCB-~CP0XO$6KIXS6AQ0=PC6W7wQP-ktgw7J2M~7|yOqs=-AFokW;G%?oe&IlPH}sw^MkYP?G5 z%{!muInPGd8^5i#@7`zHA}(b-GNng^%l4wK_UZmnylxN+va4bn?rv0S#=39UW^{M& z+avwtTFpvhbwl2zVmCF7t>f;13bwC>s^Zsv)rSvz^g;HqK?W`5PkbxHJ>1bUMjtY- zYmVlF<_K-%4f%r-V!gIDI~B6_VX`0jR&pHQ6Cf|V9bgN_l&4-DD(M^aHcZmrzOROH ze^ZHwf}^pN&k?eyvU0~m;YCjlq<;JPV&HOkcz%9<7OERzbtXN0y$%;*x)W~L&i5i) zm&x;&OxL1-1%Rr4=5mK<81QH3#rt?`ir2J#XQAa^f!b9^|0F_0)yD=Mq2Kjb^@)$S z_hgzWq^lGz%(m?4D35qHx;cv~DwZq~VIySMH#0K`B$el1-SlpSzc#~5XZA4r{8C!y zh9Gq%apv^DMlEjp$C~K~4wpI4K=ZdOFfj0?rp-An_Gxh?oHuasUCbHC0g$vwiuVW; zoQs5URQb?+2y~j#OM77=Raz*vo;6=3p?1Em?v4N`Bo%J`&Y7D3aaQO0RLCj#nWulx zjt&nG4Xu2RUa)A|4>FFrxpE2AiqFHRgz8D{lF&81n3GIg1sZDt$#(K@{^p!K<}b6S zbgJiL&xVWUGWJe}%3%MJKilG%@1KKk6L=T7>_^mMZtEA6TbnCJ~ zj}4Ks{3U&<3*V+Q0}=8X8nC~83!C-?U$`VtM9@~o+NQ%)P)s{9+EJgT@x%)L;wAk| z0b@g{kPV_l4p7(AriSbjDcrd^(;afpzzJy8JK=5nA8nA9zU?{@G+l|CTk-ET6SX9j z4`;Es=O};>pV1(pv3FT=IOk^~Tl(h-h&k~$`!WpW>eA`-(FDLxTU!ZxZZ)#2i+Ot( z7xV4o$M|C8mC7Qq;({;7CC8Pboa^h{u^xEUxU5IJ;ggC->~zj#;Zyk3)z!5V6>BF3 z0-)CBJd=e-h*qw(p>r^{Y$ksILigr2IB?DYdm5?|M1_K?1pa?k+W3sV;1#das1xy% z&E>GIG`9P;ySJKhM}jJCm9a(Uj%F0%{kanifBdy^i|d4rCDVK*ebpZ4lwrn+y{ckB zdVcyG+j?qV2p2N^$M3kk8rTxStKSvk&O*5vCnuNsO$>T6Ch!^yf@bpvF7suie4TkA zi_UNs{ORqz3%n`uL{rPk(S{P|^l89nzNn^mI`@vF=w}8 z01{KcveP}DPi{SKzXTU^-GhE!4MNHHmk0$Y*8j3Hr2RqVDg^#~y9L}V7*}!*yP?C; zwG_4a6)MB52|#d$g#II?VYDIH{JWj%kX+^=TnnLI0D#)Q+2_v|T16r6A08*SgBPR^ ziT0T?*{zHp?d%*=KqG{LbhwS#*#2A3+kft`{|Hjz9>MdexskNwp48{hjsDJ+NuIly zYm*53^NU(rigeWIukT^Zb&Xc{QpMzS-1u&JIGgm5Nd($`_07_hfQ79GW$GIkT&?vvW=vT8#lew$JMEJ~8B(f5;a)sy&2>*9uBK zHFf7;=S!rcEu?!^J>;3g7sSe+nelrU1WX<{4dMsujXeeLO=yM;Yr3zd3V}?itUA6f zUj+6%%DT}SdJ8X;gJ$!R?iKnpNXu7()HK+qY&HkQI(R!=Pdb)lO@5Cf~ML#CBB@2nDD3IDMKtXo&{U1rIWu=YZoyMkNQ^R0P;R z$>m+3#q$xFo8{{3$*Gc@weJc%i;_xzQP+tG9@3{thFF`W2Wu+-cV<1WRwM`~d)D3r zoW15#zGpT{NF45MDi-nKcUQwcuyRL~O4vP2D#|;&j$@&?l&NkrVo*Jf$a&zd)3xfIidw@KdKyt%S=hk)QdLEN5JDf0*MaHSfX zNp*;K^37z3M5?30XNR<)q|Ahs#3xWs;E^7o{)0w-EsFBMwE)m^_bQJ06s;S&Lx-@C*$nYtgWp4 zXpr>4DtX*{jjhFpr{5>aLLgAf*sg(2*^ug-|3U`$1NYX*mB|Z!Xqryf&TcqnnYNV3 z0kk`8*GjeMM6QB_tBSelY7yPX53PI)ei$b8I}`Oeh{Bi$bElKwBFpseN4_$b*u{#9 ziXKEMfGaqIiYoTcTxh=*Q*g7mI1CzNC{rcPB@lxB{}pXB&Yr9FT$wL`Q@`WkpoNmA zc)8H0{52)H)l3JeTe7ev$275~{BsajLPTDa5i)#MC-3wS5tko{Whl37mSuE9I=1IG zBw8XmfKI-Jbqh?!)4gMHtr<6Rkr#!+v#bSNmG{^5j3pCSh<{}S&F|WG_>t$OUF&S$ zcK1VJ-VfW6_fCPJDBKo%%Uk{B)8Dwl z&U9<^O|cl9Wfzuk$)`GK9<$+k&G3m>_Jb0+Ca z@;R0^4i{gO|K4hD11`iMmX9uSr_Uwi*^aO>6rgt90`>?zRZsz-YcPMQ9iY4dCDS#` z{nJeP7xXO*zh2^lka6mjmwx6#6_w`WJvo>uCnp%H0)kF2QBfH<Tc2dmTDVc*+Pk8jO3(&RDM>hx6pWLGRI z;b>yZV$oeMMG--FHGjg+C`l8;i@R}cpAcp)7}4|BTd*Hivj=cQ{CDKDRM=lFWk^or zWg-9oq~-n`{%){Dk6-`M@B4okk@4b^R~P6>9oGYgUihltE+$&d@21kR=nZjuepH!8 z2F$S)tVB}0#!->fmXbi0FaKEE#M(}?xjf)!>bdSNVtYL_SZgx5o1@v)Ywc}nH<#Nr z_TGqcnc93dnAXwJap#~~9*1{TJ@G@T6z79T-jtNIrdXZyoR*w;whjj*+gA_)ZlLX( zuB?3C+|Mz@XpdCB(iQs8)B1$I8fq~gZ^ToWo7D_8zd@2T&zzqesRfJ9aaO|kL zT1>*IBM_cde+R7eO$0&bR3A1A;Cd8Y<${TV6jhv{ag2y!a#E6de{F-@h+YOMt*0kN z%4Fog_Nx=lVWz`jYFR)^A_lP3jeX3o-;E2cq^epsKw^EiLhfOaR_Up(UR6s`DcjZR z_03JG`>G^*UX{|@Mn3EH0aXFk+cJAq?FkctxP+;Us_s623>O$*6RWGq9MU9`w-a&rM)O$ zu8@08fUM4}H-PRl0a*d;=z~4c!SA9ZeD9Txa5%{vAE?>)Q;R$LjUvcySyaz6i5L}9 z|KVlUbVilkV4q>0#RK+gLJGGb%LfAsdA};=SeQ(zT1_9;OzCargQ4na;fbSh&?e2La zM(u6FQ~KpJD47wP>rOD%yuBITLJhni7q-07Ie1!Cu6uHl?wB^?k44^he!)2Ami<+= zwbtB+--mk4BHRa^*WELi9{4KD)q9rD3Hou{-BXXN<~&EHx=R)mSMLc_#!{M)sp=AI z!BMb_N0-6*e%%BX0RJ~V=oiu5@mW}*e549D?zvGbPU<256KT%`?20(^^-?+7wIiYM z{XiUq%Ub&T-L6;Kuf=AwDI@|r7pp$R6YHxXTVJ1L`E$yaumMI`Pn#Bp(?GD-2-!W8 zGUEU^K#5DQ=1zecV3R03%OAeDTQ7K)BY++~X}y7BwL2yE*=@!RK{hxI!@$vMZJh5FCh!OCA7l6)LqxiU_xW0AVijc2O8 z51+m!;Cq8sl$=jJwJrAO%Yhub(`kRPdu?Rg7KNCuG$zZG0;Xg(7B@Mz+D~ zw!pF~E$wkT&(K(==xFiC6JaQa4oJ)5gQVWgu-naUkc2{2h*GEv)TlD6x36G_(0#8* zdo6<7oCEIZ?>t@Nt!6)?8CuEOZDG?#36-mR_19yA4=dgooW;ysAdxQa7`lDdu%!iq zG{0Ul)synBy7_8C9fwo<6x;kaWajQ_kgUOqpaM{DQ+u|zXgEl$#HVJs*iH`Gn)HoX zA>(4JfT>{@`#X(MuZJS?D}j{;sfoxCSn2}O6b3lWJ&`V--SQs({*YfoA#Ar(^OnUT z*HJ2qm39W?^;(QEP!2&>1OHlbLtJq!!{ARyr{vFx7m?Edfymn|2Pg|uZq4X^JKrUL zWSR*ccWhF2eI5~^u|6TBzCZBBy$OFZVSNo$vSyptP#=kW;1vh-=E2XAzs%FJH_ww~ zHdb#urfgsjUrhH?EX@JFVb(hMW<~Uk zGdFxVjIPjiIk^35O>}i0n^_>E1yf0EE9i7ck0FJD$0v4iM%C}YzA2Zzl0BurOc3wM#+A+jDIdSxH;@A&msYy%GA4_zun@2D{76h`i^!Jjb=iYQ)|B?Pv(^TTL`6mFalXwG z*{!E5z)04STP7qscZ{gfJw%+VBoZdMrx)EnyS}$l9n5Ex&c)@bV9zZpP}LdDlCJ9; zDjM0E)LM6wq&AZ!9Sm@2FbbFMvYv#3OM7MVBSf|-s}yJQb#AS%Kc6g;fn|mRIh(Nl zjKUL%AtQa#J8AKi9&sZipZ>^{54s3M=c+O!bT9pnY}T20|64S*cZ+YK|9v+RtkJ-SjMt!h@P zzo~Vvf~=j*RpDErbr;?`ijX66hu>#(wtMEgYT;mL@p*)!+r@h7w4$VLdiGN+Y=8S2 zrb=z?#lP@CcVT8?!mHMPr)KueGr{4vL~#xu2zoo?;^k;=d1tbXI{G2OvcTFA@Xv%a z7Fw=#+_c|AN^r44M0R#2%sri&RUs>T>OxE!sl>2Y{lhS+@~s}JxZ{au&TN*!3RI~`6ZMsgN#Ed2fw|K7b*NPD#L0Vq$d3YoG`nVEy@1i)Vt<^W*H8eOn z8qy>cs=w?P{HZ#0_ZXixE~MQKqG-uBj4AbASV2sXbyKoMnnm71tHi*mE{+I}e!r_+ z+V{k5X%%4XbbZm;gNv~`9YyV(_htq8B{?p_g~{TBbEM(XQSQk2Fsa7QQ!wT%M>R%=tvN9idPG07 z$z^*OWW-e(oS8;!0Jhx5Aljnn%<*=20F@f*;ZBhRMd2T1LjSZ}&W^ft5LIgxb$Nl< zH|PD~;Om1{t}vVKqRmbf_2?tYDhP`<+bMQ|OsmIvcGNrExX*`sa{KqKc~m~MTFRK5ZrI-o-6eqe;ruQ7 zwZCSiAq=sb14Y}#sdA66QAr5mTnsbBVSn;E6}v^@IkE?fh)U4;r`CZ8N${ndU67 ziJ;@;%qvt&(JhX`niLx|mdDEkWqa7=-3;ay*<=xUnnr2wBQQuF8X6ydk2iWMEEVns z&>Xam|6Au#fHofRaZG+)O?6)d=XZ~%vKq7isM{+~U}=#g6n>6ayQBw&gAkX^3k?F} ze33g)GP%jQ$_7)ZfqvphCaZUAg{MG`k)~9L=~@x84s-;xwbN{EGIkG;-txKyetuwk zuL%jR|JSIGyAzYhbKAkCYi@|Xjp#~_s%HMh(i7@%gN~cr8oti=VfWRpC3JO)Y{_K* zT_p8W7witsRE|=C0qME+^hT)+H?5ciYy`mXuN2${S9OJfAWkT>ReqhaGU3@3{S~ZV z?$gz;ii-34Yl3h-&Hw+Fts1KWXjn816{@h(e*114=Lc{kZIPu`kSWAutlxNm7n6-} z&CuX33EB+}50KA@2-21)^*5XE5TKy+ZJ}&I=__;-J_eOJGmKkk0?Y`@a$kR9c62sL*qI6?&9Ee{j;MnyI|mS|H%i)?T7 zdXY{{PV)eI?AzK|LTM#Xlai8cq-VeJ?y$G}PWH8vH|{|+?fVGCMvQ zwPj*WPRSn3jx!hengzvwHoEWud78+*Dju~FHj(-)R-)l`3-TUFkbLbGc9lQPy9~I< zT3{B<&qBXzRf&Q0hM?IrG-a{&IjYT-Z7K6%#5ZW)PGM+;#kCkOe_Qr*5qZbMTIDqb zvOl~X9c{mD1{R$yAxIWaY0~mvgEf*BXUQfX=m2ax)sB1g6vGPFU#lfh>Ok72Ae$+& zmB##_&eF8UU8eR%l zoZK{%N%Un+8#r8W*|iRg4Nf5Vd6vxAc0h2AT7JH3*#vIxu84w4Sgcxp(KZBIw!Ry5@L&iMO7s z)M4e?iF>5#=Hc!ihKchMPf;H3}-)NptatjZ~M* zyX70?#6;qX|uhie{*k7;@&Ab?l0_-@cy8yBkf-`Gqk3^U(erTm6T2g zH9n|pQ|Oi2+Bsib$*i^2XFaJI-a%SjtWHg9jt}boHKU2gOomI;VQyFco30`LJA)V0 zuXkBPCaaon;jaZdO#Fn#UmJ06>j;$jY49V>-1*PRY7R^1iB`*BVCS7{QS!g7e>{U-AkB3=T!5~=6CLjE zdF*a*!UQ(K$DldFXjtxv8%}Bimfi|lp{cB~A8;pw1G_ zS@NttLy#S;P%zNHNv*cDhrZEM8IlBtdzqByu);r1xS03SHt(;VNP}MfzpF*Hb}v0U zyR|5|pv}8>Zx21w5vR37C`wH9d@{je-WRur_3dJ|(Lhpizr5h->(Yl{THctS73yEZ zSpm})z6{1$$Q2J31tp&R(2;r6*XPQ9Itya&nxK9@JB{Q0rM*szb# zES>{E>DLRBo7b>&-oJ(jTsIU*TrBhoblWF? zfy3FqEA1ZODN5@)!KTRL#oaEnaA?y^}Nw3Be482!pdLAH+oEQ z(wZ4?$w5P?AkO|=CX?0F%1ONbLh-rCx4v&0{v-vh_9KG2nHrqxJ}hx!iwXFmB(ZyJ zdqjesyg;?XlFM!+=Hh!34CR#v-Ed(mq3zIrNke)h}*Y966@ z_{YSKyfi6eIQ;*u5SM?P>jM_2q_CElxt?=FONG~>YVAMa%UJaW{Z*2Irj*_3ztt}f zP*p|(e}0MgU)UnCCkyy-X{u;Hi|cp7+rJJ(9lo3ZToz2Op0$RhAZH^Te zhGu(QV;V7H9b4f7&^Ralc1KOd_4_C1H$c+AqkEn|BRgu#A*fZx=v4i7+IyYR+>f5Z z{w$vp#L@TFI|+JRAALQsK6AB^&(i<||F?iRfiPe%we;^p(KL+XvT!cm0-tiv&qS!A z4;?oG$VT_v?t;uW``uDZk`4Z}U~Nm0UW^-YmN^=%EhZnQ|8jTy#lmp{z0<{GKq-}}G&~^AXvjfPgYg@BQd40~A;ds<$(qO))SBHQl%lsgn`W7nk z=+ZCG%AOW*Ym7aWegEp!BK;f``m{<(9Us9`ep0BsuR85sW)zg`;VTZ3GulSL1A%Tc zZDZ@`5Z$!G{;te_4+s9sr~2c^q39b?GP(hi11G*-%GMHvkZZpIMUdCfcZa|7L;()Z z#GY}I-8kGaW?~lstCJMZ^h`C!b$*v=c-0uJjRq?{Gw)+>o3I-vg?N6G_y&5TV5vXS zP;9ggu1DD_61K)i-69*|m}hs+j4PmNa80(W3uOKmZ+V!tV=MPtFPcTm5lZ21l#`ne zm;<-k(1}oJP)u;U=b#V^r^phyVPtL)Uzvzt-}tKs4L1zuS=D%1@jqD_Te+XaG8&A; zplLHbIcmVQIZ*MO`131%wfG5+IgGc9N~i2pi`gp}q4qkVmbo}K4HZHN87*dwPlS`g z!g*9R-YE_4XQ#r}9NYFC-UQ$jkrsG?F#BXFac$C(itO+Z zjG&-lkrhcJ4USLU?RhU9<)GkiUf!J-+fWYUr2umsiJkm)5c*UeqPrm%!!ce0NSA&Xb=C;MmCb+TN*&Z@oERv-zY1 z^ygXjGiv*vKr2{R8`ORPm|@uyyo?tUKoEp*t;vqg#@`IjpSdJS+yM`;UlRS#IY}|@ z$MEs%v9v%+-(I?i=J}bOZjJ+EL3=C0xOL^+t#Z$%eJ>ILct2Bb-068i%Hld4@4)}Z z)|G%mo&SGblx)(LQlgZ|ol1>sI>=T?NscfoN69&Ij4(>T}1}WVG?a2C+%#F{83Vqv? zfP4H348%TC7{X7@K3LT$a-b8sVijILC%Sgjmd?x>yT0#$kz2yvP02k>Z?O}kBy9?g zdSejwnm=AKHb946<^=o_O6uZ%Y=Hl@YuDBnFHB%gGjK>FDE6Y~za0oaeZ2&P>t3g~ zVV3#qcXCHr8iZUORTecG8bqCzCwY0w;K?a&3P z0;y?NVOX7L6sYZrcv{=8GJ`~ITXJ@}pSS^`^ONT`%xMDY6dtzeP7w1nqQTe-=`?{1 zrI7)Q^!ykkn(&&~Xb_094Px+6c72Tr$y0b6&qjw^+Brn$?FN1+tHm{3pbn~ zrHTA9N}V7LQxPNRxk=`&XuNf#DZI5y&fgg3#F?)G$EIWbMM=^G0~b$l@3Hw}N-A{I zjuL$$m-P5@+8i(21 z0*=;43%(sZ6`2oT36cwOVfq%h^N}(tp|N|5Dg!tQqP&H=YH6|4oUv!sQpGFRI|?Ye zyHkc%jKwX1hIe%0TEP62)^-|=h5irG9h>VaV&bwV-Yonl|~j8P&I^X7BgQ>~X#C%Xmygc~cQ1yKya}ws!p2VqAq) zT440<1UAo$-cSUW0qtJq==hHHD#+82E1RCWJAH2jA$QV zMrle$RVAhM%Cqz|AW)#OMDD1uv;R@sHO{ot&9&4`^I!&Dz<2RPLCJjm8K`$d@T>vq zov!{vnDiAbcAXUsm!zBnFcv5p9Q|jZHk&D{<(8HCm2C7x{iO|@+&E7Z6x@713Vl&b z&6aiWLd7z2xlNPWBNK*WO4`dxo|=PTjQiXkT2@`JL(I_clD@RDnuA#upqG#+)@p@0 z6Ao(KGrn5GoYdY{%m!^+(r5iHzXPh1mrBV)wrgaOW3(m^+UHV?;97J)6`o+Y=I_p{B=%m*5)gRjR=1QaD&-AT zxn5=C;}Ph>IWdFZWQc*LLPAj>uS9g{3Uj(k)*n77@lLs})~TT6oSq&pVgU#c4sT)s zu8g)>p*#4s#Z`xCvD=GjMc97vbGNc8DurI{ z418uNcyZLJ_?9MJo|i6BLsG?+?%}!7P2)$;w=vjNlE3+Mn4^Z7d+LU0x*PyX0-8EY z@>Y^v*@3Sa>KWBH8)8p?8O2t_P-v_=^WPH^mNkFBnTl4c=fmVy;wfzvp}YK2r|DMA zjRKa6U)TFT9}h|K_lnq_*N^wzh->ti!wPsGOmW8gYn(r)ZeeSa{Ji{Xu5K5qJkSr( zfr_;sN9D{GHWAI^MnMp|NzBO91N1E1MbH9{_yhxAHVSU!VD*|p+|bms?SEnhs72yZ@=GtuijO!dU$@jS+a8Kt@tdK}Cs)u{$&4#Wc*vqsib z1r@2KshLbSavZ93je`{9j?mJ&tEH0KEu$DWY32;0dYL%Gua04Ms$A#1u2gZr{!ufk z==TeU{CubwS*!hlIw@IRb80!am2-UW*BL;C{i_(k@Zl;&rk{pBy<(y^{AP}*EsPX9 zy)x#7N|I1#4&1m^wjY8S0TpK(Cjd4qIwEnR#e?CxC?n((lz{2T@HGbas_~#E1v+x9 zllN711J~kExqZ#p6cxqwm64gl=kbU#G9<1X{3RoPp@wERP-wAcx2jM);VSB{RHE<1 zB_!`P$AE7W=+qR{duGj6sw)R^xgzyrrK;!u-XQ0fZ_?kO-y$B z$yOGY(s>dQDY@Xgw1{=gJW!V#^M?j;ahW%f#W(_ztis{`7e0z+E|$ z6#5I5e^o+h>>O9?LLMWxUazuMmC4P1do%7zQ}&@ij=u5sN-O%!Q4Bz2o_^dHD&?aS zl-#rZP1(l^wbl1S58C?;wOUk>+cPE>Tz@X89ZaqrWPknIs3vofPy3?9izL(!`Of9! zfVswV=0NL$qh5VzHRr3Zn3w^6?4!H>xSNSuNC03i*8Tgse9cU7sPW|$w12fjh11$b z#YNHdD`gb1F~l zCJ4Un+;hOoY%R)tbe)~0TPQ(TRTX2DOsB`J{ozHUX3F_uJ=+&p-zci!a%vt8(;n5l5Mge;F zV*kE?gJcj0uhy;xxbGFG@dJDjO{O0PDIJ%Q|ubXtqRWj7}}Yn;VeCA)D4uLblt zoCJ4*)fZKO5oZ~lOk4uRHI9YO7%iXDrW)ej=@Juq6dD3?9fR1S7>UF7nM(20>QT>V zYkwZXdHnEn`i-c7OFTWrwIN!y8V=`%_X6Gc8M3ngJ!^$ac=Vr&WctpF1eD9+2sM;fh5 z3k6(mn|q(C?pbNR>Q#)u(A`*??>3EMv-V@0$1+%8&G@YyR}?rmOUhBJ-rA!G(GPbJ*_sAS0A%~{G^iLp0K5s|__FA$4<)UX zt1E0Q(a7OVf5L-UYUV)8!Ydjt<9>^^@apDaOM1Y(`byuP5<6V{TyEx*LkcNZzH3t_ zIp-0>2VZhEhEFh2NADyuC2m%!vVB_|7W!RdIK+T-r<%LX@fUi#I}Z13nKQ=upo#50 zD!lY^8!M9XpCapDa<2UFeZwDuE1)XW=zo6nai>|5P&9Q#OiK3bZH@+a03lf3Z8RZV zb26n?T~Ax4MHpW~6H)~kx%{6%vke&hxZlI})};zGt>1dkkhps5Qs0EZk}n7P++}21 z($fM*%@POii%2?ty_=-HIzDO$Yr-ydYGgc`G}|sOPyDY4Qu@G>Uc@##IrE=bJonqu zdt(DSJI5wEAxHgB2F?Vz=6a-&Pd(rAu;hM^EHR~|WO9reRCw|>3IHnfMb4`Wh*0uq zqkw+nq^DHW>q))-MWOe1AAP_FUi{{cWTPY&0$_JW*e21he*>Ky6x_>vtGdr7yJw7? zeSy5U0j=7-x@l3Tva3=*6D3`U@ZlR4v3^94G6L93*slW(1qvp3^U_WeaoqZ8#0Sby zcPNuTn$bd9C=*$&5*kO}oK>M9DO-7>e7#P~L4+wi$f@Qj^2od){07fCX`TVE4LwO! zwM`h?6JI=03!&)cS4#Rbs2SDy;CMx6|V70xX!aJ5Z+`BMG zb8w473Wy&b3uSnpFBxTw)9G{r!%$lC8G?GEk(dp8Uyl0nFMUJZXCJ#8jHM}<1+OnV zBN&TLk22qd()U1VzxE_=YZr$@O$K;6T1qZoowjIJ0Mb7GW1zqx-1@Ei3|};woWso= zSQdSRoL4Mac;2i=g(pS1KodFTFtF}j-ggWTkC>KzOgnJ%o=qTogux)WyFSH$Z}NEc$A;x)e~56Pr!&31f%(X( zJLH-}+~G=*VdYTf7X_ZN`R{RQ-cI`&fwup6Yt3K~%>LC7jW3oFpnhKCpDn?&9Ui-Y zifwWvm;7$NV$}DJi8{RRhAjhu2uBy|lp14>vEA^SN@qt>jmr5oGNTi{q(uSRcbme* z4T_&vKsbc7!=BeX>#;WcfDU-ir^BvX#W~M&%pSL9_vZ!d2NRDzTDWGHyz_PMxD(zQ zG(XxH+b7>y7>lfgOKzz+NSkM0%=zAAt6Anpg5QjN`U+dGldDxraU|BBsy zthTMfa?LnxD1&IOZ;{kNhP*3eEs+}mKZ!qeYDcHnpa zuUHiALi&(kqJ9bc?)N_1cZd=OFu5Cpd1ilurulTeWL+MyDZ;rN2!=KChQL$|!A$ba)!`ZK8wZMHd@%#mz&8kw!S1PhrVFy^QaUW&M9*uS=mi(>7L$YFf2C&rq zK5#pi{8^f!H*TzkYPcgIjCt0ZuB5soJetF%c@|Ep{Lj~i2DPAk!9QQ$qAE8f>_nhO zhdGS~6uLejRNaU~%p&EvO6yatJEl5_ zt2tijLfvI+P-iUKKTh^eWw$_cb)DbBX*J!BO?UQV+k2H89v4?2Fm1 z9n_13b|kUFO=2k}lfmdrIND@=inI`4_nLxHz3Q=-w;&Nod03)Uu{(o+Y~ItPOjfSF z+Mc70NB*#m?-}dv{>pHxrsuLS3q!nF-Y2|-=RBH6Tqr=Z!8V7K#BMw$wl{`TTUk}m zBbGUjwN9bCGkhZdr;8I9(1bdLA<>e(HjrZv9d-4`I7`2O^$H71$>2BWr$4V9&O7_O zX$>#z@DYINAqExc0dE2G#r6CgQDGs_( zg~CeiC;d>;y!nb@Mj%GDf@YWMA&K4@(%2&&-aKIn#hQ)n3kP;3VyXyh?j~V6`)%uA z>eqX0n(SP#s;@mHK8SRy!LkFMa5JKt>8> z{#$y$ce}@9Bb@v62H|*Um z`IM?=Njh=PIiX@*@C~A3hNKe+S^r@Q&5c2`5AcX$Av`9Pt;L&dEp@M66^n^<-y4=X z)I($aW3v12V|z#=+hS8ocGq{?0**U3V+p}nh~=3NMr>Jnm0Tpy?(#I6q%OM49EX;1NmM=1_)yP3nT;3BBP&M3LBO+H9)jea!02h}zMt_PUmJ#Idt%j<59dcOj z0Z8J-Cx?%>U2QFVuHctS^eWe0L!NAV5@3V0<+tpxmc!INtWGwogeMs6{jW&RK&%it z&>x;)7R+W2KOm}U%m-F6n1RVaDSp`l0-m`2v1uOadRJz0MEU)#-Zet9RoqyjZW3>6 zD2nF4M}g^SBHN%3JgW15z&nw>fQVIrO5Hanj~LxAtco;U>JnAqmSYZ+WG(K1ilM5~ zT2~?Ca2}%2skE;crs^z%J565Td71FT5TY?(^v4)f$8b0N<5h>KA-3+ zx=UKxpfW(BZA?F4ru)NK%yGSE&sWNh!WI!XyKQE*X4~k5BvmOYP}i~WKnk0lh{M@$vXjS4Ey6?C7S*IyCSehu zjd0?Pda7L`Xonc{3XTGq>lyyH{Hqp#zk#%n(f?<&jO7`QhTDqS`5I-sLl~5&=8?NQ zVY`Ux2+P&I`nSjsm2zQUc9t?sTX57^F~xaNDRHJyJNZ<(($nYS5=!a1y8M(DR~f0d zBcDRevl*2UpZGPYeodKt9GhAkjfd|^KF(@l6kx4gN~Fi%J3E&aH6y&Yj0~bw`Q~>) zMhA6DZ{#MrL~1Kk257jWW|Itx(wUgdMN-#sz-AXfB9>4ZT z8M<*z@zaEbxs1q13wNqHzPuq`vr_E*xMjWly20m%3!g~plAJBy<(*=Br*cY+dI+x1 z1JD`f?Xjw&jb9H{)Pk>Y^*ExER^GVOjg?e6Zp zxZ1D$dNl87hyn>omjw4=JtrMiWwH>ojvv|ql32t!j_7Vajp`G2 z{JRc|Ke&iPWvkYX3-g*gn6pXwT?heS%!4wYvv-9U?MSV>s3^QkHLr&~*mkthdiX;!U(9CYw&+egYFA~vF7OIo%t4cgd_oj` zUMajmBGWEqonZ^tJ3Y=@JgVZgCuLk=`31AmlaYi3Dr>lt=sKT980DXW!$HGLxa~y* zT%z-CHZJy(le4X8dH=0H-D`Gxxu@F{Ya%H%)b3w$x>E4esc?GUwNcX)hr7kerg~?D zCH$3+1ME78uC=%8oOb;5=ch4HgV=JT@Ed0ekco|Y4-aHKw+x;>$Dp4rwZr-!klH)( zxv#0AIIeBn`^?nrmG_$9{Em2(_+bMClk87vP7ouC?sAeZZ<`&4NQ#XDvj}X)oEew@ zCI6jwhB+YfdGRfzmf@ke!^0x+${BYWJ8`##`JO4U7t8H*G(f^V*xFlNrWslBy5{q^ z^O{BDC`pcvx6SCPIKoB&RW;^$w4A#UxS0N$c0ZPIVPokCn<5%sML6r~dQ1sq>( zS-L>C&JDD{ys-KaU+4f}q5wcH{~GBj?;rMCk)!x5kR zvvAvu8dNN_NOC5}DD)kSnL^BT5@AS0*6CnU$LrDag3-)eFFHlQZcC+VV!(HT_0At8 zX0U=9pe4(@cYpYm3%TIa5FbV||2(I_493dVm1A0RO4r>yUm)TYRLNJ;rCINvX8I3P zE5PSrcL~m)HtiaYkJPYocrfF7Q-is?0jlE)O6N7#*|#$%yGpuUj_LU}dD$pMYItnQ z%=nkpV1U{!rvz**uaDMqpo5N|1WzO4-bNszjsKW)J*k`!ZReQ5D$w8-8+d1?#@9Jf zh(vBa*E{5QP{1GRKJ6FW@j9=U>@Tjtiol8MxYyaN++}*qyh`=8i83{gL#TY8*Oli< zG|;6ooHx-cU?r+l6I0azm2~)N-wc%AiYZTecnBMyE-JfGvZhLvev>S5Mk?{yjPb5sXRY0p1mYz=SEA}~bdeMnf$ zXhFOqD&JW;vpQvE>KbE?Kbx~JTe+5Y=1{(cgp$>ojkJlpn?< z;rcsnEM-5eWnX-2>F@;YE5mHC-zct?CE^;lf2D7Grd7I8BWFd8Z-HWFDknyyXN!SnhNC%N zFP8IJl)Q^*idxe$d8zwVUC*utCW@pw+Hr)o18SE5s%uE&_Y}ZSrHepmj0dAMMTnKn zTzgT)k{~4UIWWf5Ya%Rq#X)I)uUWjZ_l0Pq$tO8brmo%N+LX{K!f-sOqrMY``QmaY z&`4NR&Pi<_uYXJ7#|Gx}{uQxa=!(aKpCt)8iy}X@m|tnpJw)HyVWYrGVpGMa5ur=3 zF+`}}3@pmRKXmVCIrWx5?ktfH{V!)FtJA@^t`X;>v@DQb@ymgG26A5IlIL1z*@88U z@l)v&6bH-5Xs3^R>oUw6Gc9h%LI~BE+bMSi)(#0n70N=$#M8pH2UA>8cT-Q&g*p{s zVz?QrdDyJ$NTWc_3s{=(Ej&N-Gv_9d%!n_?1!vU*EIfX5?Q}R;rk)J^+vAB>XX_IXcTwfU{Ek66B;(U*03gLrIW*oNRw&K#Oo5GylB zrIS5{L9l>xkRX0}-dl}Cr|8GBxTm^@nfqBeg-Znm#NDxu9 z)V(khk!nTUbRJDw0ZFQhaU^_@RQ%U17;|GgZN&V^&pF))CZ<`^f7|QByAK}#22llH zQ&{zEM3!{S807)h{P%k+Lb{ION=I{xuzQ?X{i>KKi5YSUeoXiLJtuERCtP8CPXgOg zV2ML;d2ee|;Sr1R(0)1M0|`6vT@G6r?B`IX;n^A$yNOzqli_jnON`t#gm&o!x+&vx z%*6A}q5>9oDgw03L9$?rMpdp^gdwOV;~680!_ zbZDy4CHzeQcf$rlV>XAX@_XuiL-k;*-PeQt+}@`cS*C5D_-NxRJhnfDU~U=G_?W?D z$M)ZBq@9cRG`7UJLrA4{MPHDZC({nAiG(&CQIp6V2h)lBz6;#I0{!}-27SUy7Ojma zl0O5ayk0#}6%ZH)60CZeP1g{M$@au1AIgJt3L%1@qc*!RmZfyK3<3-wB>?lwTE6tA zc;zO-n7p%_T>nPBmgz>d%Z2EgLdwL5cO3(jPT`tb&YW~1(rSon$K1f>hLURW#73sL zhLX1vgc+J#Pj)~$=HZkZ-YPopf-e7uQNSRD#bQC%90O#|`YpphB)xYP-6u?mL@cN{HmzxiT9 z6su($AN(m-dBq##iZNGs& zu%LA!sW~uc908aCUi;qVgAwq{h~T}ftyNu66}!%(<(Xr+V=oID7J(nI(h~4hBx=f% zOl|*EgWY@Wr%7w2J-m=}R_vbB9js-%nuigx!a+*1MZi<=56YLmARnm!6@T77?#0q-RaZ5fTFOOp zHQdoShh9^Y8aT&{AeRNxPvgLRGKe2qCScyE_GbKYU_HB<{n>0jN1xEarPN%OvVj^D z&|8Qojf9Cgp!U_@ba`*&lNXNUOlvT5c_$Jm0cqkJ1X~_FlF;r2%ix zP6)T{>9Ab$#PPL4NWR41iVba>2LNgw8M}t^usv&n^LcitetvwQ>i1ap%_bn_*$}ks zHwmR??uvZVFHv5VH9oKJxJ`i~`JiP)3%-oMpk=x=c*la)R-d<-en&>Kn134%9#qJ=}_53(5k#4I^(aTHJnYkN(~r5&3e zmeVH$>Jhh0j$7(gZ1u!qr>lV}k@&6Ufg`Txw13yu^1hU<6Bwn=K(ixmz6_UFxkvor z6SLA>o8NTnq?s)d2s-qIK=Ir!MT|379pR8APnG9-?(MrKS#{Nl*NJAw@V@dkwarN{ zl<>y!464C6S9+lsKhEP+EBY6dLU096`NL$QhEr$K=v|&wfw2mnY6e}> z2&)sfGuWs!*ABZGiEOBsLB=Wm@PI{)aT~XBWniLdbV5&s8Y7`(H&y*F@doDZS9Hb8 zTB1&*)t4RZypO)f-LmCG8oI?DVd5-%ozIT6=AF15)2#xXKtazd!H&+qzIrfnke5g- zB=y6!2AA0}ENuy`-zEI5Z!0A_6792qt9S@TBi7S08Fde@g=iZ4%RWf zDa|2`?xRa+SzVYV0%|ZM?z`I|)guT=6h?wzClU;~|M#$#7T1r8Ck&e$&-}AeOZ#?#K7P)Yp@U z+BPxK{7#w9blob|A{8Ue;b8*JrmFgu<7~%r2OtK8hc~N{ubwxZM_dg=BUS}4&`Gp9gVKE$!N(| z`g84;c0Q#wakPprd{c76tl5eQl1?0f$m(3}8Jnjqxk<6yOVPNgp91Pyivx2(ZM1{5ChYlZt+kbr4Zcnuil3gWw zZUj)=Ojnz`r3X^xd^Pc@IaR5`Ws(}Qv6_o;Prv}DmVyMHhKV zeMC}eS4F1F%Vgh4#RgR^KSC%PBn$!Vx}FzjWz6Q)_1}?}O7O~kOAgRF-N?N({rt*u z4a>G6_4{rRx!382#*6XC90Y;L;XhUa3WuH@QR|TTF7@0{Jb9z>7m$;In-%bA$P8Z5 zFYIrzA!JW zQ#GWlp+QU)qjH2Op=7&eixY_DdG_lj2Scf%GId77lP{k_^VdUJGFn;1MTqVl5WMvn zl9sG=Aw#-FYWsbM%+xQ6Kke5QKdvbjNcI%)-6T0sPQuP#T)-y%Q#7#<4 zQqo$urlsMiKAK)Qb{Kj%R!Et{8VD3s=6l4qnqYw6X;H`0Irk)EZ4bNy2+sgwmk7i35&LJYa zD*ERTZF1_8a#j+?W*rnu#pwo+WNrS1b%5y{K4DtW!YD);lP8XCu0+ zP6ImVAm&KuQHsP8>X!lVKn=CI;ObSDEk!osX` zwlED-8joIEHa)^BIRiXKRhW?MYP)!irY^;Xvz{9czi!sROBvxWDZIJb$gLYDWgCWW zNRCpJYGM&m9Fu!;qFC_k$Yu&$yTYP)(O>PEiGmO*JnLp17;W;;|I?;`SgKV9Y*=n` zQQ`=Vu2(;P`Q2PdEG>!FRQHN1R9C<;^$58#vb-(C6ZP7@5~=Q_orr1a-s zV)V``h~a1JK8%5Dj&^-naFcyfBAHRDsGi|`9?ca~eOx@f-B-Kp9Th%~lw zL3h&nhNj9M4X@~Zku4PKb={27*;g@f`uHo=@v>+-M4?!K%^X6^B38Y1|IGJ&Eb@DB zEj7m&6^?@AV8>hB#ixXUM0&JcpEzZGwOy z5O`$%p-EqHeDA@Go_6n~^f;xz)^rvRE}=b9Pak@6Vt@PQm&r3b0YLUS3K8JA1UenI zVmL`uAmtM3YfP=OK?F6&iu<(=q-X;(>K_wsRBKb7L1wgYksAjL?FO2SakDp{{2J9Q6?`k$$_G};^N?Ko+j@Y#s!uHhO6g3s4?T%cL; zdG;5O0YiWYv%MyNdasCCbeuz?CFGF(T=C6s8XSI-6nZoVsg%ojn1sX6$fptQt={pa zR~@ER{K#1Md(=$?oy&Vma2;jz1R1H0=s?uN9B39b;{AS~0ek?br-!~8b)V&;fyh<*? zNHyo9RScJX*3utmFu;bR(Ie6YyWj&ssyTDWImY4r7mE$ioX=vpVYTL{&tkh#?HTuDS_lR5-*iQ3EO zdEG{@GHSA)U$sM36SUzX09k0BqX`@FYuPODZjy^|@3tDYjQzqNWiS=^Lk0+({e?#l zmb<)@v<=wUJM1{Jz2ijNjTc6%k_%6=zJC2$^f7KAF#7=nax3{OlT<@K#w9@D$a>#3 zcS#)FRbl^Xo5>-~v{02C!U6lw5QZ|bEi+(8dD0@RzSb^*le(9xr=?cZZZ^Y zz4crRU9}&?wWsSLXzAJe)@rcp^ZkB^w^Y^N<_6sgjvd_i*=br64dOzcSGe32vu zStfk|ZvV=ALH>5ReLIwk82nq!VWnaQKM89w5tg6pEOwJ@BFKJj?W@-PKTA>c7%X8b z>edG_;tp2V-aH*x>-NwVcIAqGd{FN^dru&eVXRMRl1a2q5{eljVX9@jjeE^3Z+4Dv zT0)~gayfn%h^;RBW=AH?hExr643uFa$$HhOQb)Tdk_N3*`pEsNj8d z>z|A9xr;fm8-qVQh3geE;H_sK6iVchGqX)LAkocp^GfvK{t;$sYcB58N7sqABs6J31y2JJ33-LJv~oPop34o;u20GCm+qfqM3RM zrCs^yZJkGy6x>;E^-p)!_6bQCrVCkg7V4&aDEazk&Xq=OTevAhThLfWp6>hYX=RVw zHKSEc&1TwwGMB}O$U-)LkWUJ|)dag(^a$jG!w~RKnOJQtXIoqI-}kx)%~041!A9{0 zaiHXn9O=aR>}@^5f0;?Y2so5h@;>+@xwJCkLx1uc9QlvADjJ5icPv0Ns!v!gu;@mD zImu#ja$+2g7=Fen!XYS7Eetx5i|@YX`Pa+L&hgXM7ZYRjS1Qy+?S`fnk^KET)au_w z8Bn+aA)Rnx z7$$@r+(FHF5oNh%ms6Q!FgDo`TA(nu10T9eK<65!N(y^)R%YRA=*x-r_OP5&8+k#2 z$Q%oq<#B_dv9gH)kj4N(INuUW|NYR5KOkysAoVXRpST25A;N>!3Kv~i^4|48pw7u7 zm8&Pjch=QETpfAN$u#gBqB$KD6pFoXIC7XFv}iwZIG6LpfdzL**G_)4qWG`3sb|4n zg#Jglztn@rcy#6Vb@f!Ua}t%-Ys_2;&6;aOP0n?ER>JJlqCo4z;)t;}YHA}|r?AUN zC=gvsK#9UV+fC(daAk9yY**YH5-tO+Qt-sSO?m;0<9a0$lkCxN-@6#-9y1oM@G-k} zo)e6*^`Q0uM!;Gkw-ZS=E{PeMl6mP{`h*J;KL~^sId2^pUkciK$IMcl^ zHaBPB+Z&K0^<}{^Sxl&eWI=!D**@Q=k|tk9&ff3UKOHsn)`uv`?}H4^j%;_>CX@W) z5SXryK^9$y-s?qvQF9UKuHjN1++1}0CKw4mR`*D%rEpf89pAKM{AQ)XMYz*dskYCw zIo9Krmjx0VeYF61$Kuf22!C6_-0A%N#CwDT@X7n4?+^38aD61n@}*GR-po`8i)j*8 zC}|{bGz6G)4f6yOZaz*~7j7*qk4_ZI zP?QK(makuvlFygqi4z_e?Vm_-hBRmk;v#c~OmqTa6<>(DOHHuU2HDQB=-$0(D=Fks z_rxEj(6u0zOm(h*S&<6m24s%3yzDfWWmV6p3#{Kwh*TKk)MO2FS`lpDVw7{L^q!r% z02=6{N9QQ(&yo-uPw~nY_&|S<#SQ!UBzhZiM%!-u=_4W8_Jg|Xw`%I_H&eU1PtKJZ#`%<#jA|b!vl5@p>2XirrYJ~LZC9*(Wlg_7@eH)vZ_S;q>RO? zLt-%DV1R+`q;XmiOZnG@m=23IyDEgmgU8C;Oi;W&?yqMZe~nd6Oa%cm8fXg0#{1z3 zb_JkEW7^qU=hr75gb{qhiNpBTPii?|LpcXrV=tX}Bl3%T+Rc!+6ouy0x%DT;Cy`+o z_18$f*Ql%I6I?3bm$`&M`<1im-gV?uqc5xZOlzY9aJs(>xx2(74n2HOM$wiL(0`pL z8y^zx8^;*}O%L3(ON+P+g-U+)W)mKJc$mh@2!%(VuqEY`nnW-tE6+l?bQ=;eZ<5F3 zp9p^6et7@%F<|P12e$ES3#hcCV20wk?DJpW2EPdJ&B0|>d{nzaYYsTk;`&P~>x9Mz z`~5jbeqWdD9@QUN>P7zWDWHNgW3OR0KIM&Uruv;ZcLBC3dz|XW86VG^b0zT*0qVDK zaVU;ElBB)=;EP}NYjTtrm;G+%p4*URoff^db4|muC1g9)Q@_sqz}qoGA+?z>at3Dp zjqUirf}21&FWzUni2#KpObEL5_pGUtfjmIACc=en`Mih>&LqD_5S(yL0_rZNX0 z5oO>!+h6zP%!4fHdz{nHxrdmAripbrT?{*hS+?lt@nANWFkcCBE18RL5Wf^*>%+E` zQ_8&x{?I_lC!_|$s^ItX{wBCFxTp)BIqmFE^Cw}h$|(Wg1cQWdow>kgBE}Gf4Vo5^Yd*BPti`CcH~r1-m2)=O?mOK;A8o!iT#KZZ0T(Nm#$$ z6}Wha;h`+~19=}cOm81TLj1A<4&T+@+yhV+GVlJuz>b%(hjmXe+2^NPE#sXG+^=hF zuO@Vr!AZN;0pXzCZ4}gldj*7>!TK*R=FF8JaErm)`Oi{M#NRLx1aOiRRMwIICSknF3~} z&dUU1Pu`A(EW7HeW|p)hG6o2&ZxGW%66m!RMDY=2VXX?!hax*iPp36s_YDkM_-xiT z=3xmYuuzt#D*oW|y;>2BKsLS5pS0>#Zwu!L#XOLkTNLfFcfDNe!pImsoAar6!SUl0 zL`n-g*(JSF`2J4wvECF(p;1mt9X!a!luN9%!S)7qaK`d(zV3H-FP+!2eMT)CTd-%B zy=P3&h8)eeW`uC|B{j)3LX}zq)!*&dEqB6d?L4msf(pi<{uJbO)AUVn;udg~eBAJ~ zmO+Qik&M=ORZ)Po*9!!UQ4~Td%#910cRg<6rMLGd=V+>)^oT1^ z=SP%zR##(-&zoAGIxpKTy6=EJ`N)ly2@PtL@Gea2HL1kN7~COtJ80iU2{AUNss{rw zQ*L6dDrkU0@@#J<9qAtF;y)mgl2gN989 zAm3hR%^U+i}s%?@B@?MCtRDT zz&gx=wjE0%|6uXCiI7#2y|N1LiNT2Z9P;lTu&E=VAE3dibun#);pMe%YZ1U;H z4eyocOUIkLhs<%h5SG3@vr_RDliN51-g<%Hi?n(edW+$H)}Z1St6xYk?2lQpMZQj3 z;`8nZZOwM1>^vZP!$SvQm!>p}*oD{Q4}=t$TdpypF>(*ZO)RZY!U!=-Hq&p$)32vF ztW^x5NZ1JmOGgjZvnDVaW?xRh#IQkoi_id@Rl5ic492>`l?`h{`n{EH&@<$y?(~zwU=h9 zQQOlAe5c3}Hx5_H`V!=IPrQ6z_5Pk6mdvd=-lnMTn`+uvySbTopuJ7$ifPx3(@8_G z3YpBx^ObIZefBN}U;9*Wo99m+NIr<;%Zc?CuOaX)3pScyyO-+Vp6)+-x5w++G0o-u zqlNd=APUB3N9z$)p`z4|3tNy7u=)?UbPhYuen26F!sZIBEnX=@a>qY;jP310zJ#7B zQjF2vh6`B9SA9yMkqLE06uQujvGYNJv;}6Mqj>>lt8EKU6m8G%?p|V1k9zrBXEAZ( z>2Ys7GQ3Aj3>;l$m7u(w<@_^?rna^gS1*%~;A<+_sslFZRA~`GF!KU^1yllHt;c|c zM$|=2-g+4G{lgQS21Cedp4n$UmycuO4wlo%~6nKF(358sCOhEBXG6^ z%~4&5aId~IOTm|a+@DllmiSWEE~R(sGS%!xLC@_QnIc$xaw~PQ+7a-wgu3(?a?KvQ zRdi5bZ#n+;o55^K373zNt{RJNxKO(PH zrni`hE^_NkakkT(y=UAb5-w0d-@znQ&bRQ;2mR)a>Sg{4@bFN39?*jurGIBq6oXc% zI>JJ`rFdSz{n29V?AbhNUFH96HehT>LdS6KZkuksxGydWFyhC!yv%{T&MIKz&3zq{ zRkQZcWXiy+u3{$0+?|i^EAXICu})y1qOf>b3p~)Tv5^h>$P-jdHkiv*BMt zy@cYBl`QHEBgC1v2(I?ZctrrX;J^=HL(@X4R$8X1Bl=hMHT0KS8Hj`x2s?^lABDLN zV4Dsz+{Cuh3wr4`tRlZGAIFWsj@uS*$h%tk>SoU10%iPr`kwQIpqRPvt5(!1lhb4Q z`k?LH#8MG#>~x2sbkl_u>bTH&ptYoC^=(4-*GmQhoXu|3*Y7Z$&`;8;YhqocY}&0t z3VW741pntd^q(X7?etsH{{36qPAZ#ivh?Rgzi~T%!+b+{AW*H+rV47 zuFmC1`kLgOdNu(y$HKmVzR;X6MmI_~<1+5^hk(z30pd-26nn? z?X$L7T3s_P(NTIbh{6;~;UE(pX=-7!f5U;a6}W>)@8iBHF{xJ84+i|3;cCStXJInj5L@AJqwGfagDyxb3^T9DZxU-e|K+9yW14lHY|$c9LCwqnOG z#t8Rn{|wtvdc^jzdFC#@uyN37G6YQk`j)e32U7z-AhfJ>RdUu|R&jhDO*y_{KxF#n zp!ztMWS-2ei6majG=9LUe!Ie$q6f;c_#{?X04EA*RbO~7VsgfOi1{uUY;suOUqA{# zB}u1S<^o#Cgv6Xc&WUz1`fMcNusAs7fvGAR>T)t!2kyL&ZG;VuiL~s4N~*t?{9d{BsDDGQQ?anC6A0GWvF-wuf7K=?)(5<Tei(R~KgDwqwy<Q0}6L4F>t z1+-(!-9PW1h{lY*Pps_?eBdh?*&zcg0cnz-xtsp;+^qaltc{{BYP*rk;MEk7%-82k zq9*f^j9qeHw;VepZc)s=n@=jeQ<<-#t49_3O-1j$tdK;ASA9uR#Bu{2@Xul{ve!pB z&4mB~42*mrE*WWc51s~9+NfwWK1N9X^m=SQB*j}ssKJT6-HB$kMh~g=$p+J%fKS(U z07L?co-Rg+qZ^c9^!0h}lIx{J&3nEyjmsQNzq6`St?59Oy6D3~GhbR~#@aEvQW^9n z1Ae(?14Oh!SBt?_PALora)UT z)Y^dyWZx5DG_A9$N;aHAFx28xRuq3;MfhCWRafUMM<5elM*I(|rXZP#OuXM5tnRhU z4cd4_wVjF)Jjt6;xm%aKxzD7b@U{Nbu;1b8P@A4zVSS(2L?uzLRw=DlhGwe*yprQc zsNI<&-^EjDRiiz?81}npw6!3sa|P*+KfOD^vH4e3FlrVAV}Jim*3&5>?HT2_p{vE7 z21(;{TS~xgLlE8(v0nl~`uSmYoiv_iMDwvj1FL3V=dDqohJMMHzT*%z_|^IY za8tYT9ttLh{Hh({AlqNH_q~=8qZn3X`ji;tIkI?VWrZ9IV0>pVI#>lzvwsv~dkrQykQ{d%W646Rw7u&}Qza5BS8XX-@A}<_O7ERG9zi{~O z1;oSNVA+~|E;}I?Bfyb#N)-;)Q0M>-%uW^R7e?4a25@MS>|ECcdw;{%a)3Wb!IpLh zH@9(wkS5vD-8AUp0n-zOBIKH_k>6A~dTvYvvu0VyDf0L?B*RUtML5a zGyez`JXy!`jh|NR31%BeqjO(Gu@-i6ibj$>-)PvgLfOQjAL8C&IOIn)57uf_lv^mc@AXJ9f?k&aOuJ=8{HVM&IbiU|!*l z{dxyMlhfnltw4JC^XFb6&!B`Ek`25HzYX$6aVzHqsiqHxtqLL><>lKyKK%wr)x_mU zE$Eqa3bHA%u#&AKrg>2mU$Vie*nko_UgV)?AA~|)>bhYJI0n59_W(AKMJV6pSF2A+ z3x08$#h4&Ds?N(%^AYm4a&(Ck^VqrvW7RcV?;{t;P2-)NYfbmfmlK84AX6V2Dxohk z{z>fUUVkI`QCsX6iy@Wch1e0F&Iiy~E{O`Z1k=xWlQlB_&2DC7crH0T*KoBX`ox$u zDy=@_&(#vyDio+k!dV z;0z3J7s6xg4T4{cfX}7g1#1{EnB*6$6-C8Ay=fXpmbK?(IS^$UNG_WcDy#N75sM=Z z<_*{Xso^W8N%D!OvUu`JzK2**UC@L+mF5vKEGA-Dj^z$r!0CJeSw}#N=I-t3E>3Zz z8mevl190?1_x=ifayQW7P3`J*_lToU*Rdtbin8=^K;ImkO=rC2x__1;kiiLmw5p+_ zNC+V=xMce;c01HI_kOf$!Is7MwF(euOUr%T8rx9wbiX8-AChUb*zJ}Ckm=o5BR{4j zWKdMJ+t+5+u8KWuh$~lpkQw(mDT6P#o5Y5jYquZ6FHVmSk@F^5H5dssc>~v8DWIrM z!}tx+7WmOZV9T<+yw^J0Fn(O=)cKPq0#kAUTsh?=#b6HAtsS#z(szX%)C$WtppXRG zmga<+K)}4&aj34z5Smme8A$Kfg10sehkn^;p!9SJ|bpA=QIx4qjxm%Bl5BDwls%HrwKyMNEIv zwjgaTtD|MeLI2>pZvGXC;+<{SNIv&r2++?Th>ZX1eKr68yssLmxw6S0|CnqW*$v_C zwZ7@PrxsFZW60RwwAR)mJSyQqZs1AgqLClrXz&~^9l)vbH9uVCUoMkWqoz^QFHwxx zOIL4ZQ?uERyq2O_bHZGCNdj+SniF<=p{ zdpt+A6W?m-Wa%VzC)3OYH;%gwzfyTKhMl1iqKDr!?Ao)O*>_Z*C$KYaSzP?gr2h{h zmveWu;yeA+=76zx;b&5%2C%0;RBaHvi&uSQ>(}&KBhw_R5Ymw@abI@?e;o0lq}@H8 zR`ZvMTfjTf^NHy0svocu&X8Botp#7RSe6OgMc(?ZOs+I&Ab*FO{6O{#t&-MC$}Y&I z(!wGd_PIV5#iuamm%-s;?v>ZpA#O@TY8rOo&z_XLv>j{)M(Du4968O?2dn~vji+8A z(j29%22*15k)?Xa9=AA-pUV*^PuGrBE?NG_$8y{mDmG^-D%o z6RUm9fT>N!mNQwRjJ>T@p`o|l304w4(maXf3T!qyGpZif_C8!BHI4K6?yLR@ti`U` zlsTx0Djj$gVUqS|3HjV)Zn{8VRPl>@b*p0Xr|{r4NAHCyGKnLu(l5Q4t(0_BY&-vs zrQ&mh?eig&&yheaYiR-CDJp$au=Dv7%C0um}tT4J$;kLjF zPNrXWW!AskstewV`UrM37=h}g^L;p99^XhltW-md;ndYuiMdF(yau)uK?{dEKKJ#7 z@#sq9ZtY`Fr0-MIaG17I&*X< zjP);5GMYxuHa18edaTF@$!_hLs0(EZjf`7JB$AFKj7uD~CFo(qVf<&F0EFYYl&s$W z?Jl4$#ohYBqC)&8W45=@yR%~?v6Bb87A~IWr*&n~(GH-uej_+6^kW?&;IEc6U z+@HN4&PXyOWjim(ob8H#pQvhAFLRrCKr}@{;kjBXc2mUp@n~!P16(5}T<WAmXAR28%A3h-!7`67; z{Rj-1%*_^s!t$q0W4~Na0fPlQ$ zkwL`n_B~-PnhbZG^7!dkYhTL5;tZGQ$wz8r&nkVy@-i6!l zO(I&|+y+K9hwE_ze*%ljM(zN&K01_52lg^EGcRJ!vn!#q@vqvgP4UKvXOpU`ZMYK`Y6Xg!foR3>VSn=h&1RL+XdyQf!{Jh@-sAHqT*hc3qQGe z{C!aViQ!%NP$tT6RTi&A5Fu=c;lw0{vBo{cZ}*amp~g|<}jR77<*eR2aMfZ-R)f5O4=v? z@|$XJa^C?eO+V(VFDBOpG*Gia)5wuSd0bcT`&S(rO%!rj%FQIhyQ@Rrm~~BFh`%r& zu)s7 z+i6+coT1|vBX>yr-1eeDwf}v{-op>s3+$h{zC$YLL$Vjos2x)?Q)_sfvRYNU%y{HB zxv6&LO-HQW?&crZiEhI2%7%CU!5e?>zqRvDUGl?n__!V( z7jVn$;_7V(aU}JN`!s3_^h>tUl3hjv&Ki{D?Oe?9$_S!gi`9wn@D7Pe4 zOG@rqt%63%@7;5X+9??jzP#KM(s+zidtg0#1}?MWhgb_XnkIlZ>_hg|-+ASy4lpKAv<+im5*CI&`o zwPvT|dS%IsK$)o+UIvvHBLp@{EGkb&$OJo7q<)e{_0)V?S}q=@6N{G&bTI>b_uT#c@g357RxFS`)(t;|FD0h zulc$=s_a{%KcB6R8RD|B{<)v?;O4zi^m&jypIB=<9(Q?jTBe$4 zvc$#uD0NDmSCZoMOw1jH8O!r`$P3D&)6-|~4|Hfw^2$5`KOH(e&lutYA$o9pLnqFM zuD6H=_frXw(xU`i5LPZ>^}^p&RaM!tYm08kh!rA=E{Z;s0tkT4<63HJA9?pIY5J39 zK7uitH^Q2l2gbQ-B`Q+EgyR4^cXF&XL&P2#*S#=2(=+Y2#fm`QJyu41ICi>6V75Rj zU~%dOeK-;VQ^`3Z7{MANjhzl)4<+_3+K$e>uU&6yqXu2uuO z3eC6Qd%!5k(=c27=0Uqpn<+Izxo%=lw=33xB{^q4%7RQ(z}f}6VZx1qjhOA&&3sjeBIHBGKZLTTgx!E!#|$%O?|Q>xDudD6I)R3}iNvIYgydv7p~7fmT)Sl!Q2X%{ z(33|->W-{5$CsBpt>$}1CcqA>H+uNavKALz?MLQohiK}GWCjTKAQX0t|6^YvEu|07 zYi)o3Q$p_cQ>7`V#6IQrrcSaURgOI27G$;*`HAz)b$RI{klH*;FfU*91T7W$(VQzH z53xDaTXN_lPn^8eVFTCIZ=GHsD|xfmy}+7_X;d1xHA+YXtCo7Ft`2$D_mtJos8`sW zpIm?@0IZ1N%PRs}X4~a>5x8M;NFElP`kmofW+pYx>xUa22lQn*Bcyt#%N|pr$rdO7 z^an86;uCM?ROR9_U}mUBWJ-*WqM zPj{9_Hn$x;>dfCGKl`uU3`#`leBN)Ojkf;ws2m6J z3&koPoKg8vIl;Tr-^%g7zeJt{6ITF?`y3ne-?;@88_oxhZByXRP*?`|1v%k#j>p`) zeTFq{u~cNK&zTWF)USw|VP`@TCByFN0nvRs=LI&Y&3*5g3eN@fXt~x2n*Hx10PlpK3*ppMVg30g z*X>6bZ0=u})tBAwQR&ChFJw<_IN5mm{Ho;7$%>XICosf2ZPqC=4HQ11=V7a(7E}|S zsYab!2X&vtvA@;3zb4mBy>&!DU%vN-QcP7BeH|DoK*A~@*AptMqc9K9bl7ag&*?Rh zYz7~bkcK#rotZgpqUl$U8zm)QQ(|T4NP;Vl5rpw6&iuaCq!%J&prLD-J&bX|#b8Ez zRN_b%u!ey|sxZp9WpYx=J-B4cKeoQedM ztYSVd9vfBYYGogeuC1u3*mg+vtNpo(Ihq#~`$6wW_=1lya3HTLVazW_3nu^@$Cu3^ z!|%EY%3G`Ez+(d^twO0gk;zZN*vXXBt(QXDGKT+xBD9ng&fgVI5yLqc$y4PA3gX&& z4~jq@H$kqxVs##GCm61mJ*uy-4?wM1HZX5(v7V-wXQV^Z0mM}4>K`#8TJk^U!N4<&G$2m`J zEXu5^h!jfnZ&S9i_YbVe-i}&5aKUHY*)I0H1U5GGe1BZ!fq@bU3P_^k*RS2~w=yDk z+X7!aSuVY(R~!`tD{x3Eo&kc>k;9+}>fI|}Sy?GyOpOingJ6FrVaLXpP5u2@wrR)HnzfkS5bp78{wWA6kAEEQ-ZHJu{5&@VgHMGROM4&R=z#5q#P998D}V_I<>eT@n*38x z4(rQHMg+B?nnEw1 zWe09sLiF!7#SR!8NzXm2SNE{Ft)_!^_vBCyIBwFKjVS>4oIR~=dGF+;>J>!qctBdS zHcsCcxC8e#bU{#uh2BmXiO1qy(Gx)5@#I-<5>&btCz~;7(=Q14T&dAWroD2@L6Zxl zQ-CTWzdYe*^u~XjooK1g!>>90bythMR&*dR@43Lfta<6g4avhzVZI{m zs1*J56i@z2WLCL42&8L|y^5gI4|loDZHtlXS7Xxw=yUCGca`!?*QW!b$?Bp7JLTDw z%GHtT4yd%jl(Fg^3B@8%^<$7`4n;XtH41(>8Ux;MlpR|eDpV>l7zjyq5_fHYZ9dWv zW%yjn*EKaYcI;X-hB<6fq@&S|vl&-)ll4P+5P-EXT#*^yTXj@c1i}xv;)81IOEMpK z#2S~ULaOJoS`|W5PgZ9H8nAf2k%Yc#pB#|Je7-V7Z=WJ@ML-Glk1Z%P@<+dE0{xD% z)u$O}&s@|}@b_asd9FurKzM)U;T#d3AQ?XaOt_%Qq#F z9xIT(kR=j=a!t-{5Pu{NhVz?Q;ph@|ri$oh7#esREDI$bsvFF$M7CxD9cIVY&0n(zFKi>{bq^(wxQdgv> za?c}zoZGW0Bi~p-U6jWIC<4>|fk?Z!9|65Kgm=PR#osCD&lV&i=4)wA&uw1jZ7I|% zqh09IBLh8o)!w0MX0FBgtonhJ`LEdSMU`e?IFr5j_oAhQ!CD4j2KMxlJ9ugjHXUgH z_$=^Ey>^ewSEQ3xI8_ty6F#iLU({QF9mlf)Q#rGGsIrgp;ni+OysuY5&+@(5?SQOr7%_|Eu0O`(JNUNAaU?ILl za)^OwA{`&cHbEemMQ&EiCF}B4-HFo@gUUP!$R!oL_~6BaChCsp>vyalTuEuqpf^1$ zwth}u_3*BAfE*C@aE`QQUw&hHk3|b0kb7kAib5k#0z>BUp>q!H^k(GQkYj6`08Qhx{7m(FL$NCjw};&dh#CQ&Fc!vzsTkFr6h z3FzckR#n|fZmlEU1sH{COL>IA^BbkSVo^6Xx2sigMsCf8wfPB41?Ujgfo z_=nBUnpHEiO%r(hOW@H}Js}$d;@R~DTpT!igQmlPoIAHj z(EU}(Ob-}sz8Wq|nah)L4|HhSRpYeRo;Sh4r%B3X`TF?y^faJtS9u=(mTW0a$nLG?@zRo> zBJ)+zmyX3J5>=_fo2zfTeLg>+U6}9NAFT5)#t5e8RY8rD*TP7AVOp#EBJ>=GH)lyFezUcvU(l^6_*DK9v0P z#VV`BdKJ&tp)*y^!wZDwHk{KMh8j~Ka27fX8e*7yA$nBEjuBW2l0U~Nf~XH***voF zyKs@Zq%+L(lNa0-a;ZW-H>QqJ;dD|DLu_a`b_a$aNk4j%mOB%zj*k^Vd>3395UWzoDrdX8-qE8lL!TxVDJqe@8!xpA0&JhM%b@ydsdbr zPQO(&O%s9qbCI{}y<7sP*j&oq)e54C+BM6OVJ?AP@)P-%2xoJ*4Ebf?+;)7^*D?~H zhT#f%?n1jc3@C&z+9E!W=bP)%e=LbYE>F;5th~B?EVbO}s;L*NN>AZ*#z3#h==Y}a zjPYu}dq*e2&JTAU&_KzmC6x=`2}d!4Bssp^-Yor}Y@*=ERgFu|F&y+&cg4{c+46!q z;V2VejZ9ZJq|c**`YD8Zp?-1skXIX-eHaQhCFNTlV3$Z>re|h`R6N`&ZzHcqKi*GVO7)h@0_@UzA>W^% z^Bcb@_84i}x^&vH@?@6)bkYm=BL*9um&T5oRXC*iWo= z%7cHy#Jj18Hepg`cu$L_IaN<+Jx}%J92b8Rf%1P`7fku>l?QNo#Ea;l?H;m!6#&r1 z(+E5Blbv~Sd#X~6Y?H0CueqSf2Tg4Y<2>H2cvpD)nbt=;vM*a~Gik|&`&b>6xHn@%cdwpZQ1x~61J1Y+|n0qmkoCB>DOBFIP?Q_HWe!W4xiba+6UK&L60LQx2 z)@zGZn2_-0Bkni8lBvH6Inz^PKPf{#!x2vCwc$ZQ^ZN2vqoVkZoF!X!ssWDWdVN6{ zBnxC_|7*npHqGFsv5)Y{Xjz&Aw6%|;0!D+VIC-HX_CxI|<%}s-RFpIR8On8CkneB2 zH^>`4r(8xZBgBTcy4Dr6FOHn(yBDBC5P1>-^YA3C5gVyrm#%KM2|)Q%HTpI>A&#qG za-{XX$zRN6#QrEC^Ot+zfg7LZzK#!12OilRV`^6%b0$M{Ry6|J{ja2GWMsgWjf+Ak zs0<=iKRAjW4@^PF+sz*sQlfaa@%Ni57w>$vBWZ8+d`4|=$kd88N&=vGWk(kPI7mH- zsLx>OUgwEG-K%j8-GGZJ`Pro^rTq4UqeehgoeSim9mqS(^xT%lyN0SipoB_S8F)=+ z-01RqnG)*wfyZQ#4S|T*?s4SFe?qh_+lqP`R;5U9-W1`5z3+%nbI<9&jJjhtcxb77 z=63RF603s6uSP!Io))S8zD~8d2Z?gsL9<&>>T4SSkfOlo8aMkC^O>avq)D5|TuhQM##w&Co(cc-s?%@b3Q;^=`;1?7hU zL?hhTxTKfMFesr?V-}-+Z*q0{*lx{8T6LUNpl&*$Sbj`$BL8{Qoo&g@8|=Z7>|LX9 zcqh(b!sC)`XwBA38zI9j6T!z9d=os0Ar%bS)j7(6ydaS7(WZV=U`0M2Yf7#Y+{G9r zV|NFt6}Hmh@i%~ufr&ihOt3ma?*?XZ8_oVVFcqHiN0&c{^F$TdBI5qKR+6v3K1%Vd zK@NM_m9ns#ToL;S1Ick8-|JuvpN)L$TMA$qNQti-lmT4=^9`Eb>GDbQHzR$`gZnnR zH7%c@mXz^*JpF@bW}=2-g#L$~O9c@N>|a~s&lPc_+$#^neJejwd^kZ*G+ce){kFrh zyWkdH!*)59VXa(8099nc{XjUD*#FhO0~GY=p#iAqA@XhKaSrOv1KaOCv3HQ?+_Wyl zy9TCQMXS6mnwU!!k;3sNe#sYT*d zt70O=UKiPj9lXe5CcYCU3qkw_T3n;qJx=Gq;4Vil48=}UAM_I|LU+4EJ9MnJS+{-SZq*RRe3z;Fw# zqkvI~UtIO6UY%os>-}_~Ucy`mY< z7&^Zvs5IZk?*47as8l(AV`9FZfBj3fq%jsBI3e9SxZL zVQ!02VkhIOZkgLQT8x*jEP|5VX=py zYl@bN)FCZ$9duaZde21Ig{+dtFRlQCIH%#~(R#9uK2kvjIk59s9v>IF?w=({>8bbC z7udIpYUjx4kuqsr9rw>o?^GYH>I_2hIQe!W@m@QzKXce;)oXKIYYqoC>7t>ewR_s#D{|+@HETB$CG*YwAfp%=MjfWmA-lp_?31 z2%0HN5k%T7GUt7b#s4;bu792FT1uzK1PQz-jxIS7`wsjgYD4FXa-CIt)>2cYt1PsKgH*t)VSj2 z1JozYQ&UXpco}B?7N~#9{T)O*>~XTy$Wwywvdo@f-?0+QoK!S04@>c*!|+!^!p9#; z#FB`e(4}p9Q}=V5Ll6Frvj@0+iIa(p?8!*3&yf8wt&)-POg<^rH7)@DLW%n|a5XmZ zDTPMp9v9jGqO0iuIRq`5Qc~x2C@B|EAKw+i=DF1wQ^CE#MjvnuAwyA)oy%WQ=GMF9 z>KMX8s%#A-_(uBQ8$|i>BjY#oRZjUFj6b-h=i!NNeF<@1CLvEOx6;DwMoMBlt1t(J z0#y)!fAApH4R$7+d0A!;ivs8%I+5@%XQ35`Z9ypEKas zz58wn9yM=3-UUbjq*_PH_iK6TX1UBykWvSfu>@vgVFdk!+qTTiKlTK@X~|y)cE#}6 zYK0&v9Wf1>kh)by{H_0P6&``Y+@B4cG9ZP-!pwqm)-f|D#}qR z-(i<~9fNka?(ds1t9K)L=W&1Z;BnNiBLetlcttmtuqVROAMhLwyLhufwY2YQrK-VU2}{M9w)*jC8{QM-L*HdVGS>)K@7TV( zC$9vf*^Ra0=bD&oA2LT=+uj!=a_P-9Q!bUyn@*C6tbKN7QD-MrUAZ;*L+)u(pUIm2 zRGWp`7rM%3R9Oexe#FJ*zi@6Kq(D6h(=sAo&|3i+X2carYmhle$%*IO$#O1v*-QeG zpwt+-fa_c5ru$}`3FSv7Y)sQ8;4hm`0BxQVy(r^0WZ%@SLip+4Je4Vq7$ay zL}pTKWhwSo5_A% z5s5*E?YOiH8?k)l`uS_k;SP(oPAjDx54Y)3&QLN~33GB}!|zynXje^4jfY8=TtQs> zhonpE8io^@@?Zjc1vqebk6Cv7S5L@q14LGc$Oxv~)6~+%{Z%Vjz^=6o0@_~?qvm;> zjt5N8`F;1!^yi=r5~@gBy1Ib#DC2$ow8C;;nDJpySCZ#NpyJ{Z!aA}~^lp%Y5`iP+ zT$(*()j%ncE^^?yB=+k*(%y*~>hJHry~Ew#_k^p4 z--A}EH%2%?4RRlG U)%%3Sr+MskKGE!q=2fL6gx|?$8fbz!nlCOBPxWLl48x{BK zeozmE;d*Kb@l}O)71dpHQ&Xm^unf6bmcTE*br}HR=u^19%95XIqN(I)zqve~3=8K` z0g#_6*ts4)E`YQnu-Knn2fD?3L8(fh(1*7}c$2PZSAV1J(5nGpXW3&bIWQnu0#XL; z^Sxq)YNK{Ew;EuM8K5%g#a5y3EUx7 z0DE!K7IX!r{T&**st2lcMBHeV&E%fBW8iQ3_i{S{4p8Kh)mD?&OGFjjM;BG%Eik%1Tt{eECJKL|w$?v#2G$_~s7wyr%ai}`?vAD>{^>MRs!zBkjo>YH z_>{oEa{sHuGP&@+tX-TP@5m22 zAd3Uw%^VSw8G3+y*z+;ZBix_G;Y?0`ejy1cY&#{b7tg_$#v>njc6;W#1Ek&kByX-} z!gJ~FRf+fKH^DCq{zGdZvBS@FTmUvoHHmlef?DC2RL*@qsqz^wU;>qvtObR`6}<-zPBFr5sojL!W|KJ8o|s}Zo)TG$q&(v zZ<(UWg|`7@Qx;|aW$_ptoZW;u4Kt%TWfQ%w3**710gBQy{}rhJ`G6NOBnl+dJw$Yl!F2;)A*!=JU2Ao3VyxdoSO&@|UNWmk$$ygsm4!^0ad z;lXpN38G6IDlF2`5DkH-jq5R-@Z41H2Z!f>ufpTt2mZ1>GgIWm8jj6Qk0+312P|G7PZ^Yte;+x?<@YXFV zCNbd~TtNbcq~U;T^CvjIBwTC$R0(LC;8Qn6jTe>m*`{Hd+dDq_3^2i74JBNQE#rg# z=nHu0fDV<%-iDnG(AuOYl#A_289S&q9D07_?+pXP^J^`9*EW+hj*k7+JR9?k)~bHA zT6&dX#GQZPm+_)PRWvvsaff48&J+& z08mj{%2nk?Jp%)SjkJT?$A{|JbV*4tM+sn=E&x2&lIc}d*&FxD-+Y(dSOn%!JTkEB zZi2xf_)9su=1|5N!`C)#vdTtQcS5O=-e0pQBtLfmB+ZyOP_x`!EOAn~{Pj?5>P2z= zVzt8NHb}#)S4_oOsviCu{%>ih{}snOy8T((zR$tg+qLMXR0s=sfNuYdERFu_&*lg|Gc9h1%Q*G(5LLh`u8S=>JAT_7%L$)=NcB%=dsO7T+AI z4*K;g40Wj54zb_eKhXXdZu8uvd5g!^x!xE@c7Nn;5Rt)US0FWIlcvj>Ts=hN+6PY# zo9orov6dH)iG~6qRMOr&BYhx@g)EbcgoP*rqBP3pHbMRSTE&876kEmS0|b9BS(_Z! zSrpr&fcewUuV9S|>qlJi{R}|+J>7Ck4(`nYnnT96c#`e=+kWkz3?-1ut02Z6Kw{Z+ zJ#D{3hR-WF^6rpa>7&z~cDg)rM6`#Nx1298!p<^At~}TMFrjO1-z7>u9*dq2UMV)= z$##&Pg))b!*9NgMLI8<_p(ZK@o*?q5RSipz6R}%cF|#rUa&O1Ry^YHSj0(;|`mrc! z$wM*%P;1iR*w|EdFRA&!mi&$vgW86A>|~bjCb&!f=B;^L8v_l*s%=E^FnhJcARA6# z|GgfLjMt_bose^KdwKsoV22&p;rt5#>n$r}CD6CiQE7L{mlxe6W+Kw`)&Zd)SDEYT zWsTVHytosZoS6Fv)bU$xtw3l)DnSiv)+koF9wj`6u#;mjkR>T;YFg6RK%rB*1i$JM z0BKFVYYK$K^aG|_;O&&hmlzg|jQ@#@-n16}7PHrIwvh;=2w8>Mm5(^E;iTQcDsd|d zT-De(0|3>Ris(1)7n(sM0+yhir){NH7)NHj1JC9Et=Jwg_T_sc`O1~cQok!Ht%tV1 zz#2U==~Gu)Nq2_4M2l|BeO{@>=SurN`bq4f?GS4GS-o@RvHQ}Xwe@K7xpo4r&V0Xh zbrNWM9hH=XX;0t(Z+z(o+u5K5{$&b~v49(&u}Y-hb7drL@4ckGjhsA2MUCg{W|Cfb zO7lve`RsT#XJO4-(8>mVtEJTanG|f z@SYqPdivWB6uZa;lt#$K`gQ|c6eWC96-}XJ>~i$4iOxv=Wc1z*wY4JGbt}}-zw{$- zb5y5QO5B6@a)3()C`cj~kE2vEvRX$qYQpUCRx|puQo-bhm+FH3BqeQc)UHWh`*!q? zc{w)F6Lm@f+$4Y2Up&nrUv|~R`5Vb1`E5`d@^`xs0Cl{%!gF<6nef5IzlJ{_L#5Rm z0NKjS4`eJn*l;%90wGvsDBfW%FkwVddNBG7)$nc&ae9MJuX^8+_mxb=-xCMF5A-9(!% zY-c&AHjPh`KZVX)i!WNaNPz#Vi@bYs=cq_k>ews2#>UsLUk5A%lru-iDhou~b?%PR zXw-XkDG-!KsCR0MzOuNb3b0>_)mFxs=f}&qchG`{--hk| z+xODEH8O4!tU+Bh+U+qRAbthxYyF}KEMtTYC*8HSs~~*GEa-XbUv*H+DEJ1C3U(*0 z*{(Ry`p%8;u(rN<0OxZ3QPcRv9hd`VnVONrXUgDLXgKrB4<>&|{tF@yiOsXFj;R>8 zIj*%EW~J9LfPKu2x8&5eNW3 z=Q2W4d+0=b6BL^iT>k6K;cY7?U)*2-cqECJ zB4DS_&ogQkA)sYQcOoy}G41(rq`IItVh)rT91g{vjpKtU)pEeBiOp*}?rE1F+#{Nt zt^enx=b!xTbW1J+Q37i~oRxxL-wVa14b3W&)q11Fkt0Ag`UX2az z6EIVr{~`lA9KF|XB@TW43ft9Qoq7N4jrRj#jQ{yR`$Bfg;kmRin^g!-CfV=;20@-O^d;T?=N}RQN-#r(t8_{qm}w-Gvhjtf=`uE70QBbM+4# zew-%uHE3{8{GKPIzoKzx1D#yXw;>_x8X0k{GRPjTbu0ldx zSJpt~4_zt0k*q$@%BuCTt74;P!(cEh>XsB9lTAw+U1@d{t@M{9U*iGW0A>~k`%O&W zQE%V8mv*Ln3vm2hXiNmxlWIex#y9~cj8zqw`8^!|=K=|emwCRVC>+n`CD-=;{LJ_k z(|A$~%0IL6lY0&oee{AL*@AGk^CBBgw-r$jRu zWF)a}1&bwqJMT=ZIYBNx4pK##{Jd$?31J3s&G`3EpR+Z(m>ElUqyL&&76e4Ld)GuiI=9wBf98dOXVX^#o^p zqE4d2Rg?P%2vbY0?Q^BxenEp}AfEka;jRB_ zZ+e3EX0Uejn{S`0R%i`(m9$s4H<^1LwXjs!zd6wloetjGiaC#SqrtrD%jf)Fe|1WE zE=0^9z^(T;!}>FBxdYe8^Xzuxt*E7=w2|?@=+?vih&#P0*+T67z)o80z=8JIAO9jv z_uhOz(8rmQfa&xlp$t~-!h$IPGrGFEq9eeRXQ8nRtHeA!SF)FVGI3M~4BQ{YU;#1c z#mZ=qB~vg3BZCEW*pBS?x7V9^u0||<_2x9)qpcTVm}Vk0Zb^V26pDY&${u7Poy zq9H2Q=KA?=zr`1R>r3l>Fm*qhTZ2jT_G!zn$Fd3hm!2Adh{cNi-COab;F;qHap%xo z%iYje9->;kZc5j-$DH;#ql?_tTFPV9YrI6W0!qskcl~GYW3^Wjh@7A3(t8@y@VKq8{!;oRYzBA$EcNzi5*2t z=Z z%Gd?wRmmKr;TDHp=LWU5>i(F9pR|{@=WjeLjBDe3TaXwW4jwzzJ0y2uS5oay+{Mr4 zY(giuKHziYJ9GAFC$_LE>e#G502A3Wuk`(}5%QP*MwpD%R$gJB*f=66`j@@e4UY21 z3u{noHw+quvU-~5B;ep%E^$c}oj(OXe?1Map;Gzwoc9vB@K7U1SVNxYCziEP^ERUl z4}a0Ju&($)fBM%k9vFUur~P4cVlTa&Kl;w(^O@(C)mctj_VzIxv%lgYn3ILs*piaD zAAhhVL4J18Y&(NoD6}~^PWa;!+T-WqAD?cooZ}e`7MQMOGX({3P_M>e+JK^U1Dn+C^$+O#~BIc5~ z?)Kf)(q%#dzj@lP!=B+@<9v}&Y=6l_&*Fl3UISw;c+9eC`U2MsCuqkm(265=X&miw znV9-i4}$r>SMV~N{^kU?LxorCkXMf=fk{ZYi&HsmRIYq3s8avH9aX+kUU`%=#RNhH zYoE_?yc=)CpvIT-R8!|etoH{&mp!e4Cf6=@5kuVfPj>PrzxN*J+TD0dK-(mrk(%C4 zXUSuF<2{vC@h3{yqo7CufSxlOt|^OcP)>P(_y345D9O)ZdE zH&bn=m2(n8`#j{1nU6*FK=qy)c3K{fCh(yqxCa;+ENy2k<=^wl z)AyZ^emeEryxza}6wqdqLD`4BZ&tRkMB5U+bhz5Q-AAyg~y% zQ*NZ3$qUs6p?6?({ZIWGkFlZCKdXO^;@s>>QmwB$>*2zH#5d+`R+E{V^H@=R7>)hRA_44 z5!_J57=+44W+(GWgU-bpPIlekl&&w}^3Az1S}vU0CM-UF_eA(O4(N6q1t3WfCVL*@ z;EI-cYYpu$h$fXg+laSpx!x2?e%>>sC3Zk)T32YhayJJ8Z}NgMQhK{R;ayd#2)R@(_ydL zr!BQpvBl~Pg0xjliPZ*aMJOpjM1IdZ&gkd+$M5-rCNI4De)5d_zV7R~?#cD2kwtUW ziyG9#;S0Yk)(*s8is^_fwrm4QmV7(%CbDQssU+~s71t|jpVlSS5nTL%rSyXE7{32PcLgo>1z{pN9LjB517UzVpg zMPVAr6J2Bmx8LV?=AFf}*?s&lZ@TlszER2cZdTjuK3;2ofo+$kV$`IHQzJAf%{nHVCRC1KlUc-U0rwz67`;Bka{vlGnkC1L&k2tcP5-`M4_vl9(TH zvAJ7OIyV6b($Ca6=1Zn?wn?Wj2KITdnhg4TqnNHA8tQc2*alc(1ortdAT#%TWuo*< zglCBz9c#cb{Col#sqP_Xjc%F%0RX==@q#LBr5I;X#!=7{8IQz9nQl+O$xDc2A7$NO zZU-%;+0D_;C1#QLH8AS_oGP8U&`slvGCI}EfuvgnKS@*QcWD~4oU`ioYtZ&CGaQ@x zrSdT@195)=*uw?_)ik+$V}lh}U%!#JKz4fTj_E-;fIhd$a+(4PNFa@*PU`YY@gXWL zgf92-azvt4!sv*BO#Y!+yAo@K-C3D7{9mfGt7OmG4m@}-0vnfCYLp@}YMKua$7(=~ zxc}s#3s5q87q^+8N70VC-BS47rya3rRM|12w!HTN1#rjy@ECwKxWBVgh3y!cXk%Bo zN8yHq;?doH%`G~6ZwtQLF%Yy;JU;2JtHnA<;#WL#a3V67V18P@%#Z<+>VHT1IUuZk zJ2G9#f!l(~ETagEufve|uKBLCMT0g~}>0j2yg>LRT61@wW& zj0tL9mlW8%1pM~TL?-gqbX1^Nx6Gmi!i|Ne+uip146L4mz(BT|`DyPc#8Xr0s~?Hf2j% z0h|i(dBdDkhgEN`6gy4Yud4lKWZvpnrRm)Oa-)MM=|Yn?m~+2;;)rM0?aRW>Sq^YH zsrNa!H>@tJ==}ZUvC#y&jpBmF;WCHjD!P6>9^;d<+>O*r8-I&@nO7J28V4Ue9X>aO{2HDl(3n zn~vOH1tdSKDw5Zs2>E*rh{X5I|6o6!$rDK6bve#wGkYL9lYew%1o$L4f1F&p!y4HG zhtX51E824k21{D}>c59s#N92A5gE{<h=RY|YVKV> zY_g!;(m2nVyhTRGYVQcp(Ti4kcZ)aqnhbTmhGU*bBGU5R_55}Ih%Z4BNA}(ogxybK z7qrNCuA2mEH)!=2af z?INK{aSa9n4gWU+V4?uOEPAqbI;LxJ->lEH7Mm6<$#==M4nRKuP+^cD{Bwl%dl>4= z5H4Y8l9o$?P?wz{eeQ@d@kce9A`-VCMEEapp1>0L0fEMw@$vAhV|!cD3FLahz!=vK zmNz{|W(;)k1Ev?>ss0W-B?>Yb((1iZC|vejSety}h3Ipo@SLq@|44M$pO%Zi5NQ9( z?^T3(Tw*!t+5k-%b`!J?>|}g0qFfquY@S_xwX7W2&0c8sDVl8fRK~(Z9)O;FW9ZcY zfSYh_B|X^{NdK?nS|&MOBR&v-`tCOk}_kRR@5|+b&zs3;@H8cogu-#|EHdl~57qms7RgF%eWE{CnhyIb}tUK^@di zfnj_EFZakY_&o!i!{xd(| zd#}W)CV8rA=JC^477l{fH%qSBbkF3xS!;SMh!8u7dGt`f#^CP5V5uU-du!pDM+-w& z0ALP^K|I~T13=1>$rLlxVQ3@e)>==1_RPMEbFQ$zXs(unpdP4xTBzn0 zX`G(>rd|HTOtaqP8hb!uTg-|3=@i42b*s3v-Xr0*OV6+zAfh65u{EhhW%>_c7QC+; zALh&!w|rmKB)T^Btao60bx~2r#}bD}Tf{p8_Z6hDjKo}<#>_EMKQlq0wv6%zAS!g( zTjJ#wsn)lr7egR?H(Yxz+mMm>%S_$3;Bx!AzZjihgnO?IHHtp_ zEqLzW8>NFH&h~3ujxk>MXNalh?5h)o`JX%n-+H`-WQS7M!#_XRo#c^+eM%=I4qsLV zjEk0iyQ=FJGuuHBP9~egL&gCnkgf&FUn;tz^HHSXj){j4qn3q^WLq}R>atBZl~KS` z@op4rY+Zpv$W(A;hAD5+$LtCzvB|?@p9klQTgz#<2adsRYZq)?1zthjHn`APwKgPN z)#dqdM4Wr!8F~K0HQSUSr`&q)NcNRsIyM88(p;7W#Aru-_o+50(;p(q)#wVsRK@}+^Z^>e`l)-N z|B^D}AxHY}qbZ+jlf!tmU5ug>* zc8QN4*r7-v@~6v;iW4{lMd7i5>TW^ULFA z=v*90MOsl~-y5GuGp1&^=Pz81ypg#>Q2m=GaGh`XsK6&5Ae3;32zQK4wd$`}I>HhH z)%F&0quU({V^zb|(cYy0s5<{)P^0y>+-_P0M^OEO{lN(Ax;{ZoifLzT^Y_H%Ms=^0 zTzv!f=#8@`fa?esq#79MxCc~s89xXU*oa0GWVYxFp+2&8r+?S(w(f2(bDiqpDWOFznM6_azW9*wT$i>h*Sf)b8_b5+OZRl zP#O5J#nr;lTt1jND(Xu!>r_pS(ykzowLSh7up5Hcl0oUz^7RaFbgfCb+b34?`<*ar zv6@+_9z)?;;(Q`FtE1Xd`$5c)hc!n)P_ zw$1L6Ux4f%=%}mAZ50*QRtD~kGy{`%P!_2jtiXItB+REu@&yZtS)0;Y5GR>|HZs+R zQM>Nz+w8suF1(&g-i|6qPHDK*>H3*K^=JSa5~J3+wDZ4_E&tBZ7hrP&GX(4w`qs?}&Z(w~~2dyjT{J^bX-)@GsoHrwi_ z=rVC-_Z{WI0Q3cop}e&4<$o7p_0nU6gVxS{wbmPkho1rHgey<<@z`(%`a9UvJBT_w z4i#g|6EiJX!2JN!>`2^#YlH=}IlSV8o)~;{RJ77SAw_3o+(AOH&-PQO?d%`c7|2wV8PP}kMDeDN|w}7THrFTl86QbMP znvjY~-+tHUS5zg?gVa>lq62_r^P?I!HFXBeov(w0zm7uVc2F2(%>!T&a6eM?e&H55 z4Xgpj3FsKotJ#YxoM|LK*!_V5Xy*QJo+I8j2ej{=2KIlIpLgn3T{Ji9=e-w?D*dSs zAOUxfZQKXB)bVG);Y1tYqIx*g;cCH6_Dj4k1_ewN+YezS6gwPHUs(10pWY*yI6WRJ z@&ff$oj3QwDsF3ITqVrxoZFv5Uf7r$`?QuW)<=I99vj|STD6OWFU)$d7b9~(5jxOKESB6kpKL#+o$)`9Z?lgJaJau!kUI8d{~MHV>X0pYX(!|~gCFF}#`)`NL;Sq5FxlcyT_g7J$F13M+hpl?Yq9Ko;vsV~_eTRlK1{*Xl3fluy*s@j@LHlju|UU6z!pj8bid z%d`GX5rrO+hO*Q@o%U@_XwIX>CYBR&@lQoI9qZo|o6G|nI*f^8gr34@+KV%G^8S*7 zMAt*2I?GS`Mn#I#t2k)=c=15R|~@%N{;)4NDGobkSu%^5E-3oEEMwy-yY(0+Fv(eq01aI_I!a$ltb~HZVL_ z$Lu*%$L7PWZB!nJHv&7#%+SjURj`~{}@+<@7-pcAZ~?Fe6%kxbTyPZ zy--yTI8GROo|{i*?x=amOJdW3Xc-NB-gse* zYQ&4K(gPo}YGPO4^qfhcpvKMN`?7lZm+J&4Iy*YUTW0*%rQ!s9OzHRzC18{p3?kIm zwWmARSIcSz2K4#`8=cpdSB})oQ)((nFunzYod20=fEJL#<)ZzQOv(C9+ zkJxv@8~TG>Mwi?qMM28(FR=oJDPcw(uy=oU$gvLMy8$np>~K`%aP9aP)A>kK&5F;R zraSjdhDQB1X~oI8M$f^19PDyWAMWd*%|AgLvrat7Jm}&KZIVC#wXD`+^@HLTaE&|# zafceqH!MM+l7BM^m7k%jxB@6SfFuU^nXSNfw^-?K)~?t(Yk^_r`&(mWb6>zL-Nsm? zC5h)K!$p@jdNRG{T5s+^Sfgls*UR~pEL`bmWm`DKrY4;frGa~t$dlEHLoTef%uv$> z4>Wn{dBP=|weF~O&UX#B2dqp1ldM1tS<;!6&bWhaO*UlQC6pcz%xASqNQ+6f*X6OF zx@%!)*vEkT_7MyzU{q3kENJ-NKn6*60KE6l=x6~+D>;@oeA7$lt>902X;+qiU}eHY69ri1``Ds!9l4A+PFA0kg< zIie-K6_1iT(XtlPdDL+ZIO$e^obCMSt*fk=G*V?`S#OI!#k-auQ%%syu>kI zK}ip76Q{PIJp{#<4Z1?2pu(%bVEv8*2xu9PAz`{X(kHOXzZ~%0IAXJp57Z(ZT);Mh z1$NWe)Bb}PPE$ckU)MaLrR`)sM9cqs^kk1u6rYH@E6?Y=!?>{WX4KYKj7y~-iYw>JHZLVyfI^X76#Qa_sQn`GTI2=;p}MYaG1y9c?9)aV zxZ#5ETeMoE&tJ>+HhCQrwZkvER?rzWB^8Z`q|md0vBC;7alh?xHf&<*CAK3 z8J01|Wq1Evbs0o`nVq2`I3c~B6AsgZ3cd5K5QJu>x>IHh<32tVICrF@tjiAo(8ziQ z!vViFfE6%|wjdTDEL9~mG6_0~VGiwqgF}a*&_?L0*M%}ekdi;i#drF}PA|uxcT{!z z^@z^(qL2!|06xR%U`Ax((w+z|ivW*YYaPy-)sA~FCS)5kc@#H$pN4kaY^h)~`wD!_ zU)O^hECX0v)vz$Z(b(WEG9h5#tnWh0&rpRn$m^#q%*mV`K#f_!NR!#W#?7X6#s(EK&%jkZAwBv$ zQ|gK2uJh+$PUsQUP9K|z5F;ej^^3QTLforaErD!4g-~F+vWcT0&i@5nO6S8oTG|?s~NY1LBpHl-v@-2{X zJ|cJe<$OJNo?clz-+$@oyAW$1H{%ywH9`L)m)l?@aRvTRt~EU1An>xmNcd$hau~bs zXS>bSwJC5Myuw4;5`6=k#`^;)R&^kkZ8gw<2tijmAO2aqi3paFhO>l&L(F#IXvUBL z)oCmzn>}cu{Ahc&Jil1PXv^D`g@|_fac>(uSxHY~jAU_o2Txf>p#6FkyK1JO(c=!T z6aCO#515(kx8S>6&OXmj4Zm0D?0tl<1?#Zu8@P9ZGf9pb>lO~H8Haa?I|fgldXBdJ zZ$>0G+_1-k#EcAjA{sk)6Nlg0K0~lqg9Z)SM@>a)*?)ohOV0R&jn=8u5LTPP=AuuhYU9GK%W@vb6!{8CoowcB8p z-OiRK*A^y1=fI38;5iG;CEf$kt@WF)O)l!oX9ng5j)JZNl$Cn^CFRDy%9`63bLGQm`n6%Gg``&Jp9c%Nzx@})WJ$t!WEH>rOtbFlu`k#e> z{dKdgrHU}0AR14^R}@bNA_GN#Id-j)T=)iP!T#X?qGkbZ?Z-`@J9p2Ny8&**R;|di zV}i804E;|z22-am!;ia0FE~6rAGHQ7)c_iC0jhYacfA}I5pAl_-(`~pycna}X9G(~ zDl$JYoMs>&_}pjw+C=wUZvJ`gh)^cUHKtNf5~>`))cJxfS?&p3z<6r!*I*wLoT}XO zck7#AXixV%hP$eYmNJNag8+Ei8Ll;R|Sl-fMpYRFEx9_Fzc@-wM~O1 zM!O3H?~wk{L1*dRxN`y1y~*bXE}A}&!MX&$t$GLSCg}@jjz2lln@rE?;DK2m8f>?? zxkmCAp5vO`e4&>i%nK-SMnYHGj8&t{OJ8)O!$HXi`TA#QEUqhdRb!o$MpU1q<`Ud@ z2I#cO7m8Twi{VA?j;f)-$3E|n7Juap80(y#2J_dxde z2DBDhA9?%s?fKWsEu$%YZ@QkI{e9jjqm3E~+}9Q|xg3SLeTRXz=GoY}v!33OhdT}w za;xW(84Y@VWPv#%(SNg)tB<0<&FW#LlAl%jXKYHsfIHmuC|np*N^OTsCGSr0>&&A? z6iKyCd#Pt&)~7{YXUCX_+xE>Gxv(yxBhpBQgyI8Fhtr%oS`s=bRuUZ}6N>=}oO<@E z6f>R{o0$_dvb83O9*}UOR&L7?mcwAe*imvN$nI;(6W!`P<7?W}7`wc-wqcgS{$4m| z>cvS8jvDRYvSy}}(lypo3>i}8()h#xZ9=2)e>0>~uyFv1P5GK$A3q~f*iv?{_tK?? zuR^G;{k{7X=081W|6aIwX7T@XMLQ41UBN1v3J*P-2Gk_+Ba%x?5JMa6tTS<`%D#Hn zDkg(SdiQ9Jnb+%_uf2hQ2#`UVK|Y+1*T?0(pM3|uO*{vV#SA3VQz8|a$^6gAVyBv> zl^VT(^6bfMpj+JVn6hu+8&|e`r7bKtau$8aQm&)z8^b+;JVkw%q4&19lXxj*)w6uMs@t=20XF_+!s6@4FF;nL0UMv70Oo4&u$Mz2jo9_vUuR|dZ?V1SfI zN0@I%5`ixYOt?tXJC*WfB?~O3&1Ar>!i4>T#7A%4C=2(zrCf5u2|YnQ{LE^p^XX10 zl6uT1DG6odBOoI`k=M5L_(9fz+REGBBmduU?|ApiH{eRv+C|vklFT2f9AbL-2Mt6+ z$g^h!;bRfN3ioet2G=ehcSY&cqscV7HT0cHN(tXbV{0u6TkENP=u%B3cbKy)@&aCF622QN^@bB&QN*T)F!d#QFq zt^TyXeIwv`W8|&Q&FYGXCR5x7I#4&U^^1mcTHYh|bbB>`Oxmjjt^-iRIk>v|B6`@F zW8VIF1nW==uX4qGjRZTf@Ov5M%fNhoaRNipZoscb)JJs=y#3F(516jt-_e7%)v(F0 zfTHPN4avp89T)Jx(O7ane#9kB&)!aXTd3~;XdyRDl-TP8Ds+~Ux?Y-yWaaV?XWr?U zz}fbVPar}9L9_XntvT{d&~D)8wWk= z^LEBFY=WAKZ}@#>4-o2LLY*{wQ$X1Jn|HJ4GoOPY1=Q4yE%~T&hBOfvAn&S5RP^9Y zaoermMYt9a5TN{(67(i~FVM-%__yFR*GE439d?n$#0TVRVD2-}|4bC1%P#@aH+ZQi zZYWl0pL@A&w`2H`P`Xv0)Uk8AYmgrE$!z+cCw3e!s=N=SAm+@W+AfI6y=xZ)G7|u` zxoPDMXt;1wWfk0mAjdd={v6NFUCFI=h)rz>tZxXMcUwzi<)omo@J!t>S0UmeKAgfb zoYh4tfd8_fonjcX`{Q9e zVPQ6}7!+Vz9>pNkyBN1|sV+yzr9fl2RN+Q7WY#c8IXYD1f|+*)kDbO2JkFn!jmq5midYNyR zp@N3c1ojP}*p~hJg-W*RLicJ%x#}&wc>}sx-->!4^=!w$l4|DA6PM=F(_OUs79G&O zf~)BnLnj1S;oBXoA9HaLKYX}g6%ehU7Ff?RlDd?K=%;ZQ=kYwcQ_gRd(RGYoxReWD_7q_4LllUn_ezw=u&>X4tzsADs&+99;Y;s42azf_ET6Ud zM>#G!;oKz@YXHu^L}tu8kOxIgx|STqN9d>8T#v+E3TP*wkS3%<_TBzn6g5_nQ7evG z#ik2h?1i#Op0a*jd2&U9oCsRBhl8{g?9laNcwE3> zt)Dd5qOTau+IaS`)GxU&2|^m7#b><>_0D&mYc+k33CN6E+mCw7#B`~j;7h@CmCo6F zvclaYR;k_)HXXmwF#`C*bOd6ZLdd*qZ`nB6Ty~SeTtpKhOzZ7+b3PR(;h|>Tt;MmE z0pt$3ta!wh(~sn?Y-yImMk@=MwIX{ZUR$KH`kjTdY`S<^A{X;ws4L6$F+l^-EKw7$ zB~Q?j86Pa~U@~Gf>nb9V#2SNE&e_)!@+oet&=JnMyvLc9MMw3Mdo5DgOi)1q&THVd!Zr6$d*i|R^pVy~ zQ>Tm)Su*27a?mtAf2eDefvS=axrEz+5C z=PkOVvNCEd@yN7-G{@r?TbO>jqbtW+KCte_Y^k-pO-Ys4ixd&h^q7MxXY%Egvx>a1 zimt-5n}%rh^kl}?;Q4nQPX(h1>^_8{^5l%Mu)j;af4eS_eI{79#(pDciVd1KMWJ@y zNVq>z3yy{)QkNGFS!LLU(U>iXQWYN*>IyoV+1soYQ{PEZCF6@bgP2t@ooMx*vY!eI z%S2pv_U@VNxu{tyv=}m-cqgcfa-5u3EPA0_p8RmeWXe1*s=cI*H>_-%QaR=3ox9~(<^8M6>@^3x_%wr!HH_n>_j%mlPDi@xL z`?G<7c}rb&DP$PY9&w_~V7J!u7UtAWORQ+VVO{!ZQhj)p8uV*?yO;*4jRo~mha1L2 z`%xGdD1Jp|PMYp*Uoje?{itTarPNio#NbMChj1IlsqOEL(W*HyDfGtXyxq43FLObz z{*H|LF2;>~g_xyguZ~LY)Q(R8pYTINqZqel?H$;$^gGp=4#;3sSjwGon+nh1@|XnD z!+;5D8by(J5jf3jopv~IAsZu@QUaH4@M?( zM$ntQekDV&6261VP~|^r1uR*YuVx@}E3ZEjME1d^ZrQ>ui5i8WS3e=scimIxKZj{~ z^27=l4}dlptm%Jfj$fCTih9k z)!{eX%w&cbu&Ua7i?nn*zRPKVTgP+|KI|$Ukc+KkQ&Fk>pKDIHRYfI{-Z%YRi9jZt znOf;H}wD=q*Z>K{@ zU5&&QbRB?5N5-JoV`FqEZ3voSMpxj%QNTPzYxn~E%>E6w1TyD%*ap`agjr*N&S3SQ zZ^fuj^xMwgzuH)z7C0^^pzp{?W_6`X<@G7?OXUdoJS(od;U$!B9Z1;ii*tYBZj||A zk4Wc;B@K*SyC8=tpx5`+1w1RSoe4DQS)h!&p5N}Mi+h9}t9Z9`-8l?|eBq)e=>>A- zEp0>?D*IU6DAu75HJdpoKM|OuH$?V3?T8unQNC+YxT)YnEo>4h$lN72)C?L^9h8jb zOeW?tDIQIEfe*=}x7Op-IvK-|KOLwsZ^Mps!k*XzJG+TXBQ$Vzc1(Br)wMW2o%K_j zdZM`_nUc8zw9fyG+AiCTN+s;<15(Y=6GB&82p{^xN1YTycKLC*ZJow008_P_#e!8O zS~qV2g~sn4Bu`XK1?PwETJU-v3EYo4gVhDKcKt&q^=0T8R^)BGN`@k5-v=cK+{C!^ z43qtb0}kGw=C~A^EpgobKs5GTch<^}88B+vk)J8J+rvQd7~rCxAN~CK^WCg^(qLj| z;he?Rwa60D%+H`W_orfC?M)HUB!++pB8TMz7koaHEqp4JRJiC_kTZno3sM*xn_*6{ z+P={~YR^L6zSoE3Cq~ZqdDJG;8do}LneMuhnJXFo*6ySjh)LbXkXJo~C!Jdsd10cdRyj<+Ea+VJ$| zk1m@rLfG--BAoL136Clf<{ZrWK>I2%Bqt!GuZ$QEMyggG+Huqh3Gt}~wxweLGMb3f#L;%F1&&@&o zbgsY)w_=|gjocY_l@m}#=?A#T#%UZ9Tj zavsrnlubiN$cOojZO%7ExprhT%uXw9k%@a^T`@ctkc%lLb=62VKF{+Pm16b_zV5LZ zTC?XTMA(W^(cyF8i5(*HLqW${_ZDy>@TJi-b>JZQzu?PI5)PK5u^C(i|=oT zcjgToJ+;zb_TmFNlE{dFUz+65@X&R3xs3FN8{Qt$CHVPK#_1W9&*(AFzOl&=I(C$~ z<=j?21M3mn9nluaQlm^IuWS14p-p%62b7l(HVURUv?t+9C1Xn-{)f*o;OavBibY@XDb zSH?~grFQu_P;M1u1`9azgar$!7^+^B|>vO1%>Gvqz>i{}O9#)*2Y?rF@@(XmX= z2JrhI1792IQ~R_kurnr@X}>GmW4mb&+_jQ9b2?B64&3}}CL8AcMYS_m!q-LVX_R`6 z1RPg0$`*@Z!T*Z_tdDL$4}qndjwkwTIolUKB4{qg@m{c)oG)3M#==H}M2sbay;p}< zS{|Eas0^ET|6l-icCZj+ap?+#3Pwr%9vPMD$@~xY*=AQFz7_DVmwzH1$v7M;B@jzH zRDXuN%}35dZvfj>c)VbLP3??+QNsS4ZJzN;orf&-kx~c1w#qq41}udO+(Zgvi-PjNd?I3$zr0ZDpjWA#&)K>n0of_LV{&&=1x(EN=KdGc&p&oL-KZAlP)5h>hN}< zIjiZ2+!usIDaT9eKVCh(?G2Z~Q`X9q9v=O+s(+7(`u<8koRs7KC0_LFOk!65MT zj=Y+Y8?g?xU64S%dyVB^M#UU+Wv+B~JVYrtJ9FIPTWr0^7UhNYE`+5@FV?=^4AuZP z@#}B=e@taBuJq3Ld8v5m?kcob&^PM^BwFFBKEIc!ML*i;C+zf>~M;!?6MvJzvgfAr&7#YaA!sbvr z=lZ*>?17l#6SuFB>|3p=0<;$wSdsUUQ4-(0k!kzvgP>n9>juP1$%2| z!1VZt16CN(2b|YWN?DhxGGSeBy8VD_Rj9Ued!UWE0OpkpfiZ#=nLFwYpdhg~cFefX`EU>2@DZu71FR5j{>)L6mA0sm z498u_2ag56$stjfG!FbFC`pX=*O>#c-IKGcbkPlC{S6OU_69kd8~Z>eH`9y2D9|m2 zQBQdJmU~JnI(@=Qy!R4d3dQeojM!J*dT1UJxN)i<-(6-D_^dY}8c|X18KQxmZY8=Z+#G7e5)#xhezMW3+)f1XT=vXLXK%pYHBe`8xWF3H-jLM+Q2yIEYH%UM`UCmq}-L(Fu- z1$8=o5o|_gL?Yp@WS(y~L$_!N)f*-_y5fN9D+qM+7~Us3XpGjDcSj!QyBmPEL+3rB zX<#CwW~pd(^I0nN!Tsg9^?n*P{oGhy79PDAt`0aUyTms)LD4Zm0ng)aFM;3cKimEBv@C-{*ZPcS`&nyM{7G)g4cd=#}(Dc@!W)n0x;D`tfDdc=ayrT0l~|6{&1<%>_f z(a65VYVk9S9$Kh;l#kdP6sYgo#jP>&CW2Xs&Y5h~-#D=MbK^+ONbkt!4*u&hcmmhb zL&ux7ruyF!Vjkf_!j^jQ1Bp1$iS1dynwH4UwF<*EJu^}sMyu-2|f*ayoa0VVPQw0o%6|G`w0F>8D;Xm*#60D z{$jFtBEhG?QJ#TjD*P8qBh%&g%Rn=tS>Emho>cPJ{M71Hry%3cs?C7OxT?T{65%%?Cn{Hc)A(y z>uGa>v)#Ny&>5CJ2|2NHG?6BFRv7ry0Q&5?z_rI~!e1{?rrOd!%MY;n)B%KAr4uB+@86}; z;lGiG5=sQ++XN9+gCW7YSJb$^ma_7Ed6|}QOI`1q(o04~v;%af-Evv_FFo=ueScrS zS(>`)=uM0{4ay50VSi0s9K%i+Jjy9~|A$l03a@)J=e zOF911Cq#x6Fw`o6i+#~Q>m8WT!CEd$jVPx~Y0O@;ugUY*K0RkhL|28kRWiu`&I_#E z*cW0MBmVl^<%^iJxbK~Pfe@g-+4L?x_@j~<*TA@{d}N1wT(D4Df_HjDD(0IbTskx6 z6##?2ldnI^|1f(_q$7`Iaka`7egxABG4Jly)y;=!=T}8}3)Lw7CKxJ&W&_>k7XZEo zx`$o4WFc&cT$IQL64v_*?$a;S)Q=ob9-*t0OTffjAcN}b=pjsw^Sjgc)AeHzHBVD*Dwe$P6&3=<4VAiCtQ*2QM0^L{ ze^?$r@1ojvxfMGCW~^d$rLq*OtBIktvqm4LVhC)hxRl=Zj)gA9AJI@|>NGgncp)JU zRCMg^BWB2T^b`hLqpuAZx&N}5m?hijGq~rKOg)d7XYTemXQX+~7?91Wm93^Ry zq?pcgD;&@|g14DE1NC*jdl$ltbS9X<5S**L7Wqi(K~9XIVgm1ExR%@gaJ2+82RV*Z zvW9~$gjYG1`bM=XIiYNZFl2YTq>tF0-(G1OonP#fkaO~z-sTh&3!BEI)TkC}}!lNk zH5?c4ok!yg#vS2p-nHdt7ndPBH_O){pDR+Znfi?i*rPJ_9owM+565qCH_I0jS%F+_Zt}GiKdobJcsjR`(VlWCqDM5pFr@?`d^>4%<(q7vyN1VXFdUe_5 z30b)+o&R}&Xt@lNBdFr}sbcP{bGxD|ttL9ej_b&#@|$p@jTlp;Fq@|WkqNm+-F4Gy zDx)?PcT(h8QZ?J;tB^iZXze)NO-H$ReK<1VnN?=3p{k-*=G(6Sa<=JesM!n{j8vZ2 zUdoXw($};jZO(sHLEK$Fk|=ddDn~dgcc@t$R*<6K#oT_`7=-ae$hA!=x=8MY5~&m} zv*mLB!(oLCl@SgGJAYm45NRYzoORhAV0_9&w$zA1&o8jCrT_m|7TpIc}S zvuNP_H6&btHsdJpr7L&%THwVO!qa2!i*$Szjy?Qkwv6(AwVWVLBy&@tK@afs(ePbid_PZ8qo=f)pzy#p_z%l z+)^1_c*EKS<>YM_7l0q(@?yD;g@4{1af#d)*fQF)r2ycr3t|=Smwc?xLr_P2OXP+Z zA5=_&=xpV(e48_lB}rk__QJ9&&+)5!`CQ1&|I}I+>>6c<&>w9-ehPh3Da%7ib5l(^ z)rv)W=0+OvR-eHK9s>3v-5CA?72ZWl}fEdEz_-C$qJfZJL9 zrqU^DrhYO(vf+ZF3Q)CvTJX+GpHo1TdzvUY1rHE|1H0B;6WWRQ7~GVh!*gQoMnZWa zkU$QgbP9AYfSmnffV~c1dVtPwgab5J8-sE9pKwNMe-3?Zn!X&qV9^h*>b85DZ714e zLy-)tE8-@S;Xo>!ao5YkN$rO;<{Q7vEBEKzIEhs>SH?uzSc<%Rb(`W-pHZZ%rm&=*)tcM;ZvEZaA@)|D zZf1Wyjj?7w*F5IxM91>ak6Iv~O)gvXqn1AVmytY|DdWYmOtxtRhc!Wxc`1 zMgKB*pAb+O-5wtSO~A_Ks5a2UMO!c@0Dilxe+9>7w0sWZvJBUcg+Cav>E1c%KikFi)rO=5eu5Gw3v9hmX;Qxh&DUT<8vNRFyqrLiPKQ z#vlhOoob>3cEp$^#&uztP}nbM-bY6cu+s+1^2Hsj6dGE{B}4hoe?y0B^l?ArA!%y5 z)Yw#hLW66EKd{HHv7o-;%D=ON#s9Nn(zE+J**1lL7sebf%Do>KlnECD2A!Ea-HH3n zm9T4ClxxaIJnk_KGF~qH2J&jI?rAiX8<4O87YjW0wKUXLvEB~nn1eORRwscQiWPp$ z(J}F+Sy#TGr&HS|d+Y6X1cZ<>7$%+kb+4?8O>u=vfZ?>zj&r$dP9CXx5W-=tV$;d#J7_w2`ZN430{ zmJC0$AA5R!uMdFT$0A5^2z53Y2%5h`$>{)oZHOBJ(tP(1DujQdKH{&kYAp<`KLWu? zi+=siPQD2N^#c+3yZ}y5$DDoDbR@xZ;#9C0r!SRI89GYPTM8Nr{1gt|i@W-l#JLYT znZ$fU4PkLhtYGj{L`71+2-ZsT7u3*~i$YXF-_|1e*( zAX`jTmg&uI5B}U^n)ZfOpBVx#<_ei09QfbP4qRz&c{yZ#JS4t1DMr#JIzGN@{I?gi z4FRDC=6`KI<2~<Gf`b-^s)RNORFtkjN3 zX6oI~<4TrK{ua*sdbr&+DvHAbK)Dj^{w=OFb})z#n0}?ey~EV30&}4y%8_Y1eA#Is z+_@umTwyxxiQJOzl$*^e>i*Y|+wp|oGv7H6j(RZXifzX;96iX3td`i*%31380n19? zzO(3a<^3;u|Ok;MC9)ttsm^02{ zDj2o~lY;2TXVuJW>O9kQl>8Q0=gdl?pC^+Ci@O+Z{e9)!M?|~@&CNnz-xBiyCFLr4 z#>$#Twlp=EYwTGZ)2)G-(lP#bE}RB8N&z|m# zwY^C8YFI%OPrFGV1;yk18iaI?@L0dF@>>iu4i@j>vYXGSO{hJLJOeVOMHK&@CRfXZQFC|h+xy$nb)`M{Xq;V z^*aYWFyY{e7W1@=`pXRqJSolCVcbu(svF&$o7p$NG96yG(&9o`SRSDZJwMnYyJaR-{^VDXYBDWXT z2r2rA=ay6Ppyf?;P`nyYJA2rnq8c96Jh-3w;;z?K(tMJBjhE0&D@NNG`#fMICQ$^I zOL^**&Q56_C5k%s;S4rOGo%M+r$U-(#DNq#RQ;PDC|ddwxe#KTv0}h|s3}?Mxqy64 zDq{g;p1o-dUUXD9x0rt`zi=(+zv{6bWn;f|F1#VE-rL+3x zsiqaciRwcSh0$!N9A!ISMK8*ZDFqFuz8=Pm4X-^$sIIR~atd18SG*7=48F1WU01gs z3|3Sxdf3|dNBIe%aSMx)ozLFUj7uNhw+Z=@5lXk)B2BJSkm1DcJ(z|1%kv8GW-k#E*pDz>>3L+Tj?udPnA><%dQ zh9JH`alYpV3?kZ81D4`Y{Z-Z#h>N(gv4AD2;a0s0t>*y{e!$%`h{w5eUPu={b6TV^ z3YaXwGLN^n7jL>bwSu_#DEQGKN8*xd`7vrK#3O#$(sAPu2xP}1xdo4CR_97{ZbRSv zYIv`%I)-#vV=RN=$qiFMM|RMY{Zs#cw@~j!)E?4;DU-txkydt2S|dT85ni573lppa zx~ZWGpBWyH(d7yNud}4*fSHUCZ5;tCmx?ntP#7<$WP*bBljg*tvqkXZnO_Um`=eG( zpvXmCou-qqdvYaCuPWK4ZWkWQt6RsAu&CO1uzjm&Q+#+(#Sg>-Y;$?PuBesOfPoVfAi0tuPMJKs=h zK^q@0xyM~a-lHumgG5W&12=Ux1-w7I&9nNl&9{duz~Voc5cwDN;4zsxal5kc{Haqf z%cSL&8>D$^R$5WM6I@+CzV@<_NEBhN^=ItZpn{K#es_x@Jcg8j;U`B zT1v0TjsJgaeFsnz*!Oh+!2*i9iedpQC>^Cr4cO2Cf*@UrO0SAi0wkgQTv=TOC4ka` zXh6DBLMLFO6N(V2!KFmN5FtPaA<6fWxbFOCzBi7$?x?eQdGEe^&pqedZo8!7k0XGq z=S1&?Slo@;SWn^(F`9pzS8stQL5rB>5b&n%w2ClJ6kBh7uQ6(2E$B4kaaTR34KsmZ zm7`V(bB2FD>&(K$s`$j}i>FKVoY$MWTO)5AW)wNiIuSRTeSTeZrIl$Sk>R`-E8MLW zP=8VshT-O^W}=p*4|D%+ISkeS%US=zY7%T#b*t43u6}`wdj@hkecJ}SB(wdI^T?%$ z(}p|>H5iOrLe3!7)z;QNeBNE2f(vvTn9{B0iP5_WPSRZd4u5r~^Wv!+yVfRuxI2$q z-vnM&FFkd)7cs$FVw09bckn(6Oq4NxIrMVr>Bs`{STX&xXuRPV{d{Jp> zsfih}YpCHrEtNhWesH^^W>jOBT<{~xIzs;2^t#IJh&r^@KT2QTC0wQOXB^D@(oq1Wy|yUR=;&;sL{C;GVrj9V*A)`wascv#zFr5&E*uMHg_ z*49hMw4r50cW&MG@^YD5%D7xdBEgSt?AE(fGl+zI1QAahc)u?{^Orj{M_p+V4;b zBN_r@?7)24`>C()*76YStCn?QW?7q#B)E>3Zd$8$4bW+1I9CcY>SU&$?taq&T27#s zgVCQxXlc8G!4|8lE(jNpu-hLF*!sEM?LhM`dQ$TZP=|&w$++3jW5A3c;U5I-uWJP+ ztJ^lk^_-V%66mm;L%c-`q5=XoH{Swf3T!&D*wEL3623n-L2%81GZGH12*Mn z10wl{!!kF6+yQU+cKgiV&!5nN-U>Gwq&ZZ%;hY`ZO>#>8kMZ2er%x)8>;_T>y7LAk zYz5j$L`gHj_ad?HZHGaW@9l?GcqJ~aS)6c6#H=AliEgLXG{8K^4SN8{fujzB!4svP z-W0bYew=@~X-K#Iy|RN*WAWhNQFM;Hg-B+l=9bqLGRlprnDhWVs!z!wyjTOfXW6=X zhd#+WwGa3_q7uvF9rgp`{*j}Ysv&~kJgut2fzapKyTsg^U7rD&_9vw3K6y%r>&p_Q zClmfUw2vU;VO&5-A)-bwdG)S_oGV}*Z5OX~MT=cg3AxUd?}M3#9Smv)l^++LFslN_ zE;&uD*JhN0uYO}_UGMkFFyfV3mKat6`Zk%)4GqYA?0s_K-2!*jU6Y*a0b7y2ZcF0h zVJ8wn9hUR=Mb{v%2nEHMwI28Ko1ZKnDTOGVlMqvFIAy}khULU)Mz(6Y zNd-ycAVTR;qHxJ32edpUwPV|x*!Wq7tz)FJ2@7_R)o*xnFTAn$BGy3L%WJz=+|OYN zv2<3W(tdDH3~k!|Z|BF9F<1bl`$oaoeJT?|n@R)i@lRX_)4~v{=2l-}6bABH)FkV? z8MzIZt~oIm^EW-_rID7ghe(7w9AEAAd+gGRmFI;=$c?e-VNey zMT#kZAMk;dA(b>An}Rah0H|JF#x_?*Wwz0GDy}nLti4gVYB28bW6)Kg{gKvwu^lsu zhAbqfoEHmP^KQ(>?2?SPin!g9QydX}w(8*G3Nb|b6SJrov3zw53>wo{RXUs zWhPO6;N-DmnRHH{V2icLr?wI_dF9ID z5XBCX0@7^_8^5$VcenO}DkKcV=LnY5xgep@c4}euf)YZ4$~w_`J<|x>~8m~t8}_6VG;p& zjb0%rfaUh4$fSvVms5`RbSO)101k&5BmQo|PNFP#H|uCJs3Z9jKjB>TMTTi+`lfbp1cmZ%L#=K;g36+)(dD6g3DJ`bo? zX_AfT!%pPKob`7O(RE*_nj_3%R`P~{XJYVd^Ve&hEzat4DFZ2l3o*0FQCD!-F}}hU z0J`|=_&}+*Vmbs8&t?SPXhq!hUwOtq8N0;_b`NI~xiClXkgBZJ)Sb`LHob^RyCmgI z6Krw?+4zKU+vK01S+AXb=g_nclyj50I)95oX;=p%qPgLt7W#(~)8iWzgf1JAyN9Ht zq(HbZN8cYehN7lbOd(+7YAPZdqai_Sl6$EGDANwrH**-UV(P(zCkVII)V||Ise+R z*ecLSD|v|Q>oW-^J^Qk4GuFuteuN9iWe zXX#8un;fZZm!aDBvkYvzpz<|OA#Y*nC=%$B?Q{Kt*h4K(qx9BGCrfyTx(-F|Q?kY> z$=R$UGzl#>;!P^%`7z3HQr-OQU}CR~z0VNp)cTe5^+sfYtjI2vDBKru%OoHnMJJmIlN$DRMxyQsN^06|gV!mY{0CBvQ+vZ6&2 zC8@sA)Tgx5>B)!6hQs_AxeJ;>E)FfVa5c^31k_!pr_v7a$*c@0ZT$Cs^yu^V<7UWU zVIP?Q#`V>WJ=LB}C{WIrB%IiF+e^~n{K0!yxMTk23?IhdA*D<__`hc&hg(pLVYX(o zmVE3Ye%_LB+tq2=Cb%H^yk4O=ZS|8_P+((}1O2p;g~W_J3wbZK-ooSd(1R^Ey>ViR zZD*~qZA=f5xGu*58MGA_!5l=0CG@7m!Y>~2DObv{pQBOoZCCD=KCDNj21{e;YZpbF zaK)La^y0o20~oUb%|@Uu;hfav$`F(0R2@&R2U#LXh9L#DP7#Ohmc+a^W-t5^P0;wsL6H2BLic^1Ioal9a>}aoY z4KSv-M=xcM_1=;6*Fz3)Z`tD0zE8zHi4#-QEi;XIpf^DTFf9^~DwWfL%)DHFPnWE| zL*n8Y6KP8RWeUxOaH$H}#5)5O2B+XS%RL?)O#l4m%KYVt0-I!dTi-?bV051H{lz^G zUp+1(y`6hCj~#P`Kktim*|##<*;I$qPwnTTI_GN$vwebFOzX0w{Is@&+N@g)QJ)h- zY}Tutv#7&c#gkY~z5@^M#9MVSkw!yH>s|A;UzqE+rfJ#5G=6jA=T4oVL|SwzSRiz) z{7Kp2d6e96mvd1>UK%8OqijC#!`&&jMSdz!1B_XI2(W`!@H$-DZ zla;;t{R3?+f<2W0D23u$$qA zU%;y(5@T3#%9IwDG~#eT4*VXx_?RrpPx8>>SJF*+<7*UsQj|b?@Bg0?+@NAy$U-#`q|G%r`d0Y zpXp?6l5cssyX5D+&6_c7fJk^RuY3jxY7o#9{GW5u6ew}UK~wVUi{?#|_izde+uryD z-EPZyRhFViAmTrLJZYJlcEX`r=M;Ie|wPq}@!*(d+dFzVzVYu;vF3Q2+`9p-tW76KW`gJzyPPA{57e<&@YMD7^vd{n z;NWMGwPg7Fun^|hW)MnhaWf69+JtF&HTzpOf|sq><7%hi1KS=&$tC74(`qTUvZol_ zL;TMyJ+Ci9Ish-7iX5L|smDE#GtFnaKAMqHWM z{@?RNh$J!h4 zO2Y*!Rkjzh=-4-UGHRmTgL^0RgNCwHryK1YZwLDji8&quWGkT<>A8G?_90j0O6iFW z*CKsYUYVCW`61viNzU1YqqD<5WH;zGx{JK5!Xk7OF`E-I^Z4i`QjRQo*F}eXFqr*z zVMVjvsXKkD{u>`>c15rJCn!G!Cc=ZA^2R`|(-sXsCq<8^Uyfc{D_Hh!4&9%S_HHk( z9J}c*2r6KbBNX1wbhiw~lQ=~SgiSrjp#ph>ugYu# zYBjG)X!OfYb)?prykfO2@>C02-`_4h4=O8D(e!V9R6Ly28G9qZrhR3}9{Xgj7-o5W zmf~ldbT!EKGVYHuRNuX!#*(_s%!VGZH%jGp>*t0VRb{_dpxulqFT869Hi1)Hx*Rv{!EqPL#?ZXWr>Y8Tjp z0CCH1atxTf#a;GU*MoHRP|s;A0qF_ek20X0h?6x5?e+hGp@rvp#cz*PQa0%h2Moij zUx<^pL!z{$gZ%yd&;MO|LRQ7ZTs7aN`p=B2%!SwY=G_KNl@oYSz^{@I%>3e)!rW~B z#)6fTa=E~kfFyjX*p9EDC~07k3r{#XIh^90GnRMQIh{`;&Uk*ba~)vKc2xPsw867V zYWoBq8y*)*Nb5a_s27`ZTygQAg^5O(fo`%MNP?_llZ0PijrC9UxUE+7tWVH6eHhs> z+3wcgH%+UXrC*@D40Wi4OdqPM3yKbYqKaN;$&CULqw7q#vrACG!72qX6En~+GCDYm zJbanAFK?sDX#{TNaym0rF2tpl5}aw*tysb<_voKqg#8XE*^hq(ly}y?A7SN>p#`w~ z1XnIvc)tGHaLBF$f2=i-aJ!2WH2IjozpW?33#LKaJ&m7P#ok;{L_w1Vr`k6QPf;0- zz~|0|G&1aXUhfz(^_CW-zBs0>TtMm34i|aZiDG3sd5Bhh4ekPJA*t?3R@#A4Vkn%Y z?S5@)Jb2lCp!?wcuX4sCBR-F;h7r%h6e;wt%VckydO9+L4m$JZhoOoAp#>H5#VV5D zhf`#h#Npo2!AMoC@Oa7kT44WFIBgm%3J~kmOS{M1wRIY&s=v)vCi<=7%NzLB-xpP4 z-Bdk`A8E5{mFNE%xf$B%!9_%YzgAe7_Ius`D7eGM{L^#bR&$%EreR^u)xbY(e29xJ zNyVv+bO7~_?j)04{Y}pQIx77X_LSPu91>>*)WINmQ#4pRjcKH9Ll!wn{AiVl<_hb! z7)LrlprV9PJKS#5ttp7w%E|=80_J|C9ARD`GqTQXo%oZ>z#D<8rn!BX_BG9R&lj+c za?Ou%qV2%lk^0c@Q8(nRSt!RmiCwNMKjjBnnjZ87iQ;@#OrK}ou&xT2Te!gJx%19y zOxs9@s)!Z#nqj^A!oKb_%|H`@IC9A0f3alqr^4N)7{blQr3ht;&h6AF6%F^5KX|9- zwrTgtnGF`$Ne|G`JxZkYDAh=*(cig)Yz3+nz~btZ_^UpF2fSxTB^i+xpY9&$Sawca zqH>2W9{Y?j$GK?9LaO&{*F))z;m~YU5$NH~>W}A;fEHCT>@qJqzWV3N9}#}NL$RU@%a^lYW9r5b@Cb|u#gD-xir z@YO7FufEH;#iqn}?Jz%*W+9P&gRwz?O!sN7G`=9+m8azNwH%v&M)4MCr?OpO){dya zfvGdL68eNYXdBn4=;`p^re_imKxXRcRDjD;F!@1@GStX z&%U(EH2$|+osfa=hCc#H8HPsn`E&GSB27*5bq3R|llbqK59vA1FNv2Z8n0f{!(!S> z3$4l->p%JxVqu9(?u8!bkgk<_g|LDIS1tefSvMQg_qh^fJ z>jN%+Bj{nI{k7ZXxMR`Gj!?&Z;~(bMj&*;-TsC7j*h_Fb-#T57pYBe>So3*Rw3S;` zn(%b~COJHlDfjqJCqP?pj-S*H69!d_A3!|DvC05OA^h@2?}vZPsA`p&&IlR>S0i)=t`jp}crDo+LATMMMr|6XDa* z!ce)y2;((CCKFCNDW^*usxPA6Dt34(m6^QV%+L_Yx_tiQr%!ebpc_}kOajwq8XD{{ zMAi$)u%f6o8gS&IGp%B`sKv|2xfc#*oR##^^1kVE-xUTWNSBN&0_A(syvns}?eq*U zFL^oz-CThc`I5Uau72i4*)*+K@)ew(`U?JN+mfPz(eAlK;f?1BeG47eV1DF*G0yWU z+$M8RRJDA$Ew=IzY?i1s2IM9LoY#_}11x&&2l=$i^;~^wSrdFDi-_Hs-C2j0${wu< z<-@I~pfv||h=i%ATP1V=v6_!etv6*l(Zr}PbtM0Ppa`lav-ok^x zVL)tV_q)Dd9chYP#3@_DfOMA$eWx1^kXKx)*q-;#lZ_2|Jv4HjRwT^hHWg37>(XAGgkD=m6o9P~KY6Ag$KcW*?%xc_$I$fa z%EivCBSVH=f^V^Y?X1cQ+Ih<#QPbfUo>WyfNIxfz{V2L){W(CYpn|-cq|LnELBzQ} zFi)b#9V%VTIFbXOTo+F!)LG5&agyi<^PzQP@$tO5K=FlO#n0-S_{kPxo`@#d5E-vHj7t^ieN8F5N?hFT^d9MNQxrraG$=ml#)l-)83OLx{cJQZipY4}Scg{)nr=rc zb%AjZWL94O{rfNm+jKUQjA&TS&e>?BAx-tB7!GM3V^CrB4-Y)>mMGzNKkBrSqdaA< zrTtEv7+cw9wSR6$LD2A-11t6epv)#{hkyxxIwkp*!F5w1ys;R}%4M>%{8EjQ(Tfq- zc90XF_hE4h9soiigB$$2!>&6!J71{q8Uq({!@cBKJBVa>4eD>OuIbz_u&&aXD$#mD zxI1)^i}3^r%cE=@^E;;ZQ?EHGpmx65D0s;3X!_^*M%@{w+kefpp}h2UvKBGdoQzte z2b>+*LRkkkoHM}7?(c22@3#0lm7&oh@bIRYMcq7dTS; z4$IZxBzk`+`n>DaEXp~x{$1C>iyDTsrX8TCAJeN9@wY0JG{8@Dq}*)=4=0}>?&5a!E}97?o02yFR_r(3 z4Eg*=h&1=tP3I8%ESY6l-;=#CS77SmDHSVAZxrsQ#WYPe#y@NMe($7{kNQttRAbQ2 zFp*OjyVA*5Wu`e$=FPI?>KU1(PrwQSXh=9nCb>mdkC_e_HeZL=*Ff1R(qqWD*A-2& z^7!_oHdaV2`>0cgV*)+NSjoZLqu1ELVW&H&?GR5nndRm2P*#6@adEtAqx}?W_T4(m zm7{y~pPxU=2{ZH(!K8F~s;T6rFZb-g+SRpPshw80%#3{uNwLkF{`LkeULKSEQgmEPnfj^EUr*6Cy>>$>zUuP*1Hhj*kU3%uyq z@jsPmX2(#a(r8EUmYo5(Ll#|b&bH^Cm`;b2+--zK%Y0*&MlD=WD@8ex)wQ4b(%I;x zDczWi&222JK~W8-g`hU#1uEe;87X@xYXHIfHWCJEN?>xxegCT~;slDwsw$q+F^@&+ zB{j`>pICV~}L%Akshn=M3X4*3qRH%)2m7c+bu z9szJcSy3|)``2O@H2q+FWZk_5ieVlGWYKp0PLz zsy(HPB5&O%g7Veg-s%-lb&xyigxdB9dPU@ndo$XjBYXY=huz(Ml)Y?m3=DW{T+4Z4 zv3`WbE=3*YTBZ(pC7@ElW~pe($%&3@u;@wj#SLV7PC&C<2~l+GsP!f3Qt^#Nt15-^1WvsaESXXf)u_p$X;hAVpcivXQ*=KYyvPAp z%UD^gY^!O1E0LQjwH$|GdM@HxDQ+ccacgy75{A zTvFBfk30!4*Ng*#tp#$Z{o*GlCo7A>8C*qN<=4{B#kvl_c5;%)C6lPh>BJ=o*#FN%xK*V+XImth{#4+933a`VM(8WR&egUQV&JuK0+@*4@Rm zsS1(quvV%&YzbrPw+P*hN{m2DRF8!Y@2fX8+)S%!jY>8Q?2Jm*3bLYC9|D`F)aU

Ra#l?=gxP%}Mr$zbe<_bXW8H8KB1?D4^(8e^8Q>DP&G-UMz2Mh#Z z*Kw^*yXR}6+aW#ftGANn$&b`74MlMOR^EyZ1^hVhZxwbTTfh*ib_}>nf0qmDd^}`D9wdV;(5u@4r!qdfvk1dRfW|owC60 zYbU&?NkSICU?Qf+Q4D#PiMJxf8t z{Ky>9SX3)!*1Z^;+wnfRT;QK;F!(sCmG^}7w=^5N^N3ZBt+@P(gf!U}YdTo-h7u`Z z_-rMeC+S3=k(Q9u;^jN;Glgh4I`?k^xY12@+!^5Y8Ji9TA@-s6}1q~?FAhxA~mW?#M$SYf7@NiJ>s+AR@`qdEgrXL zd}qoY(aTFJzRNU!v;fb!4E(+d9%k>y?`f`rWffb%Dp5&JXGa1fnHDHuhYV`eq@$Pl z=k|G?l2R|PGcLFX3-><-!<{XkxY(#reOlT9atx(vzSXc;@l0_*a~nL}uyTpKWJTLTn2$;|lshr1_8s_iTpodoOPb)(336sj=vUG{@aZd95-(yD_S@_Yl?q zAr?6awc%UK(60Hub!m4i;zn*dt4{3W3o!9o2B`G5$i(^1?aWT+W$lp%= zvk(7<*^ei%04Iw0jC3nMW}cpGEFtONTR~&IRQs+=ePEtUVsbI5%$BF*Fp+{}WFwgY zyynbp!1$8!eQ8z``ClMSx!0-r=xn*>w$5{gL}1IY%)a`h(Nmyth~NPGSzP(L>lq;~ z$`F0D<#O=o|H2ffYdA+91*WDf8BCK2)HqLI%Tj9fS;+g(L!7Sp0fiI>DCGixQ*}2@ zow>K$4s3zcKY&lF`y@A}g$$OqdUNYy**f#!u@rqv5kIt`F1#xsdFfBoY%Z}a7Ho}Z}O z!F#KPeN~wponu0Fp8);ro)i@zt%6Ac5=Vq?nF48Oxy8`Nb;<9~1l%NLo}*kMWbCay zZafm3@OEtS&yw!g_M3T~hbKKnF87Y16i?MSkZHFxm7~9@m@^vHy=#@p%z~9GcINq% zT3*vG;pO>&8r7?%Vogd7DD?dmPobow-q5Un_?)Tmcz+X7vqp@sZOI1JueW&|J8EJ& zOD!qzr_ZMj9xD|mZUyAMip)v2epkWcQX0}6gKDLzw{VZ&1WwG5yBZk*AK74yxZ2L& z!?Q->xfKR#gFbMkwF;lVo*IO5>8cwW-C{Q5qZX}4$m2pF+Tizo!n1rr*a#-IFa#Ae zehtN0?eHD@xn?tFA`a0%UqWtMG2rEM{UMtN7NXKl5MM2Dd7)->+B9KM8T=3M*B3t7 z3z8gE@T@$O#I2|<-fO%Xi#snFH8yLGnw=|6p*MWTflV$S-Pk*xnEOA zkOMEbI;2da;NZNnm~|iyPc0 zOC^M$N|jP3rUtCqSs9+~WT#omq<@uWgY=SR3Dr|h)2$Ck&0yleU9@{wDa}^vy8@(3 zy+sAuS9Ae8wwSqJ4rrh6N@b(TXRV8e!}X}dWyTg-G5A1*v*(&yxnvB^TGaps&+u5G zjntQRCnNiR`lNYXDsk{AO#hsna?{IM0bvW9 zVFZdxKY79l%%bxiVbo??BI(w6l~0~ODLE9=eYGU8YNVKHAJ{<_sJJ?Q60oSiYmowP zB|G&m9JAp1aly8y{6e2#Xv$2V*S9ihb!zAhAG!m*S8a=Bi3woRr5$N8z3b8x-rrv| zyvEzSzi@wZSs9iuwqNXqI5f8w#SXbm>l3)&E@zQsHyHwkH43YcHliXLz);d;nnDeG z0&wOPj^+$!x{lYT+B{`C1slxsx#ceH>payT_k2Zv;=kTa%m6zlb-f1P`COT4&sj6U zXLj1B7;9@QCwJ}Q0^)>87M}|SFz~k6`E5?L{gN{1m@N)QulIn-qn2b83J}X58i5g| z!de;ziEb`SbjODt9H}LJ63EbtndO9Y0BRDk9N3%TTmbk}=GJkCN7fHT$NSV(FUJNm zGQzzr)7|b#fgAB$pnl8zmvrCIv3`CN67>MCz}+hDisp&| zz(1QlM9EyL=~u3N9qFK(XY-LTCvqaM+3i&7?nWmkY0MZ>*e$E#1&5DvnFR$p}Swe5^6iu&1-XXv|6W;WI?)!5D+_DroV{UnF+D zqie8w5!3snnWU28!8i3Fwz6il2iC^Bf=*7JFbR=XhWu0+2>dy_VJf0P-pF-kbfKAG zFr5z2kB=89Goifg;r^6zf!ASySg8N;dX#~v^q>tz*f`dfJHzB@o*hPW)J7r{3PfIi z#V2ns_=S^l2@v|@rT<1Rj1r_LC0a>BF)n3z_w9*#JccvLStD~craOFO&W(tUYir(i zyQ*eci6+gL36YizufpCgKbWH1t+UdZ@+9QYhMUHp4Vc zFI3svleDMwbSt5J8XHQz^VG`oU}aK!3K4_+aC7Sw&t1WznnFEyH!9I&6YDLOGq~I? zujgTa?efM%q-*a2H8(ARbA|7Pst21BYZ_)Hrg-KIj2 z;gS_-i-olbyCLDJ87-ZRE-|Z;(Ajjdf58x=0Ko8W(|v=S-!H2Y9~gwpD8@zM^I@i8 zjn6m9<4cbI)OEGN=ISXPTw|Wf>{kfRR%fu6B0Wy+Kh!yiz~KxH>sZ^G^ba38)Hxk! zuE%3)kEdNE&D%ng~gOitxMvVj)TB| zq+)m}qm#x21ftgqw{9$!O<0S}qK^`+1-xlbW8!}rN>(J(A6t3UDU0sGWi}BaG>WC+ zu3&&#;WG0IZl5V?$J?~vdMnP2)3C_#tGV22AwIrM;nCane{O7&yX(|Hqg|m6KBk19 zmJA%^ge++eOXK+pc%PSNau;508lUDyr6`T{ac4qv`ra^__{x8S45P9Ql*mV z8kxp6@9A~aX~M}~Yeja0*?;`3TT)aal@2WgLXip!5$UPVsA>vnBOOHxI(n47Km|XJ z4%;=o@*%T^9slS3VO>Mn(rpMPI_`Aur<2HIPvZ<+t5Lo_$)~DMFv(h;vi%jb!Wj1; zeNY8i)E8$e4LcQK0Bdl8!5dQ)mvaD=pZUXM@R7E2c;fp(>HPwYJE3|o9w z@Klp%wJJ@I#$(K4nlD#jTvQ-tG!B9TirJwc!*CL4>wy7}6Ah$JL!={(#m+B#NdTv@*xc}|K83fYyOy(Y?q|$iS7uS0vzo^ zWeClo(X(gC&gSwQPFRslURRNWFQt)0XZds-ZdmKf`yJNy6wElYDR@>DN9*W6P<9Jv zP5RidUG_0iC-eb1ht&Cbh0&UTwB0h6sbVT}*lg2PwS8kFooBTJw)VA8yt%OXO1}~o zU@lvA{fKY&6`sIw0=9l>e;-RbfDJ*=RIsbZXtlRpR83~@UnzI3RuIS+{5}F`9yg2H z5iy=^p`Bb6)qY}asefr2=U9#oO}cQf8}N_9i@1&si*M(McvkR%=e3|S!<;c6prV)~ zzEP1;*XX^19f`z97tXKTV;2|=IRYNd$5ns34j|B=W}AvA_SHsyllNzi&_U{g6Q~3S$9y7_rj9+XGj%C2?hRW@=^n!aEVFDf7MWq&g*kR9N%ms63 zYqYytVeCXMoXRJ9>5m6_d68dpX(@)_<=lC7uVW$8{tY1kwJ|A8V(W;$;jFlVA4G&d z@NGl&->ci@igKo39SzVZc=Ap;ZTEi5s=nB55@QED&T3&1s=xrP`n*93!qCI$ijVLM zwXbGxCvT!d4d2=WddE8aDgBX{LT0L!kcC4LIHO6?&C~77mU^mb$Lg0mNmlDb$@nXJ z@*4WC0g@+QnlYjbxcXaG!5ACK&{wTI+!58~ViMvE6>I=kSb^)Xqgu#jb`g$60N;}? zi$Y=$%8|tw!Ps#EH_8@X38#GrHQUCF&ED({N>DESDR&xcI%yDb57NY(ikKa3;kGEN zIHUUB>eITfG9Ebdx=JN2l_kI7L+|w8&{*_+-E{vW`j~nto2O4Edylst;~kH=y_zp{CvfNx?`L^3{IR( zm+d(b;>J=cegjbNP;26xZ_rUTQ*2PlV1f*M){hc6xsnOTvY@3*xdpBL=Qq}g0CCxc zLDkkdUc798Pw`+JTu zlTkKT)^kICQqIAM2KZ)6s{|#xCs6c(9_Cl3KaNn|Fn(`vrpjr&iWa3*jQOI$t0S@$ zHO{$^bh1)ehsOi1P8I+G%L7K#^-yRYg~lBxRL zLf?&P1}guvFotzu%tRP6YSKM2#r`u&IfT_)0G!3f;%DoNjQoG!fNQFAhP+?cKcut` zHbrG_I)F+&OCwX+_^}sOwAsTbw)vQJMF3Fv=k2l(H@gB->SItK#w!=7FcE3x&Ao4{hFC%bpNX0w7Iz9t#@i?wBj&(r1H@ca?j}R;BC?IVwe0M)nlqO zPrV51h_3C6ZCcXb`(5;r`awC{+Nkd0b;PR*CL+J=r@POj+)&^kHQ!ETPs(l?eZ`im zQW5yB#Wz+{E0hSIyp4vL@I=UTUK^5J-{LiKdTHH|n}$4pa@P}|SFt*hGfjU+N}ms} z?CMTc(lgQ-d9m#i$hiaVf=BDH2}%4CCR8mx&Aa~l*hkMq+VZ^7Adn495m_Ml zMX4pw=l|{E!Q9lEMyEbUHov2!G;2t9-i5<;;03nA5@i!-v&0=Nk8u!A)8sC}Bl_Jd zA?N?X&I_La#*~|qcgsA(x4>C!Pod+c3@O9oqThch6gnm%*o{`AZXSBrYuoHV=?GfC z_@w)g8oW(}(86jk(F*vR+v0Kog5x~yl>Untc6Zm+FREC&&3T2`%iy78N9?cmE!HQ(4xk`#T zI7Xgk8h1~6w%tj$G6=nX0h)0_&1G35Hc7tu$C;sTU{Rp1Ar80oM1xZY$30HZMHqVO z^}|6g3C%?vmS;|ISx5A)bcOG8(j43zsh$NNpi1pCI0h+tSQm??8E)oo$twb*hurD8MwH!c8Ng;cQ6b3_w zy1(GFj}Kw?18LG0;rB^gqD2WK`A`+0J$t9|L;e+^3+ksF-pci8feemb!{te7Ov-t1S71v=~MbX>>8E`upAZ)mI~uRt+i5T$iG@CAjU}i{^UT=DsvQoS?M+Y z$rL*C0gsLKVtXf6sa#lbX{GT#i$;e!?LQ)_A{Vjuj@m}L&A^B+HdBAv>F)s9CLl&9XK2= zc{)?I3w|w4o~SPM-*GtqgWJQb2s7CCZxFVG@-tr}q5s5e?jV=t(baASQ3uE&D6Qz# zR<}#WFpt*QNy+FJ_vOk?eL5#}u3YB5fAob$ltiOK<1JT#15VYk_Jct;Pp24gp0Q4o z0SfWfTM`|08wj^oO^w?i($9?Bfijm)vB;wn>M??l>1scYWB2Cd+7h1ucW!@x$MG~H z?sDP{5a;cNeliawhhz(Y2d7z|1j=$;Q|`HO}Sm$Km%MK0sJEu#kXTwWp+;#RxMzo|-LuU$MV5LOJ07&atan&}z}nHoJmwsj zYM5)j1lQ!z9Dqoi+pkvgGd#ic2$1BqQqv|&vZp|T9nPf9QCni60t?DiOcgax8)P4E zAklf;zQM!zGrGFE51(`E?);U$E-gJmigCLR)oHHw(!a;ym;C`G8azu}%`2C^`2RZ& z+ZVRI&^{-zoA7=YH+@Z|?btEOMIY9_QK1}WKG| zzfoSKAuo*+_DVShFfux1(ygXgdZK;;7O8}qADhuC)IYfH-R5U@*qY|w7f}Cd&3vmg zl%TBnT(dM)shVrKO0_EOzt9ot?ZdLFPWX`kroa@eiZ88q-?5FiAKx(R4)Y5F31EV( zoTQu15;B5&B4!3ztGot4!NMycNY+u}tLY6NFo-l_+kA6JB6CM@<|3#Z*D07{=D*tj zSvbn$h(q4=0?UWzWF}C*J07!y49kj|cpdQpFUVOgedS|?%z24HLiFPcdv_9yzJQ~k zbp9Ri>4q6_IBihINe(E4<&0fSxW|F|2euVl33irx>#G3a>r0HK9vPps8{_0#KE%7b z&xp`c?{^IEK8_Y%aeJY*_U_AsJLN-}haUb5J0PFfRfJoSeo^X=De}EN_n`BM$+`5Y z5DZP9G3ugOe!t?#O}Ici021#yV?ha1k(%uu?YSR3QHve%+;|y;vMwe^(LBparai_? zHbXi;{AkP?h1#v_zOgx7=TZiUQGxgw3>goy6#lYJ{dE;Y{v)x1V^D=#?F~8q5p-0k zb8LSI<)s@V*j=Uzj?Ago!#LyeA^ZH;+hsO%m7xo&+*$2?Piu=kY4xlSEU2!q7IR zkq?1hH+A=l%(uykVph37uhFEmb)Det1bp&D*IaNRV!?13Y zb1U4&`bAXO?J&3(XrWY;b-)(st!LQjMfb%SZ>81f2*i}Pd8z?9Ab5G6UsK_#- zHFi7KLUPv~V8R7xRZ7vG(3BU?259w?cGf^%&N zlKpccjh=PlDf!Fn*JQuCDa6}~>~Hl?)OrGKM_RD<%H^!(z7is^G?FiR_M=4%_!xei zTk$Gmjiw;wY45&6JYvRxHZ_5QU@}pvlfAvYxt9-U&Ct=84v9H6oqi?NXI7fqBG+etpHlM)j z9s~vg4gi^5I%&rZ%}?lEPyz;31}- zJVa)QP3eR6cYJj{SgbS@k~Gon(P0}b@@KBnsi!TZ&@f?>)T5%vjqfJVbs#yp<9`cqt8@)uYLE3wi(@qzLy0(&0`I9zEpMkS=`9#Dl1=w~? zd{eV52RCVaqasZeLF*{MoI(O9Uf`!aT>2LvHNav~R8P|Ak6kVcLiD-bakd}tjeX}M z1&sp{U22`(BfwtQM%lfV@bAt-uwfA8!Ug!@Xn zpd41@f@T+k!GNEK6aG?)zngj}pnBfWzP5hH@KU{Fg$*N1EjqWttp(Iv2y_XBqLOxm zi3-@7+Yx%ZpDGTI!dB*m z?(gJlo`k=bmXwqn%{AWZ5U-~_SS4$JS~Qj*lI!s;dILGE5;qCcJb0*QHi~AR#02`O zU614pFhn|lwHoImzxlwnPjROm!HF0$qO{ofe7W$A37pjkBFvc((4P`|q6;lNQ6_C! zJA^z8?8`%YbEa3W4b!!pA1sUFbYuax_$VGjB z)t5U;630d=O~9gs#f)1N)fcTN2caan903yh-tOlc+(*}Kh$}E@G2z)w zffnZc1=5D+dOZX(tV(<_vthNcuQNn4HmoC&7E$9;Ua)?3g?TKO){ZTn1kV|7MceQn z#ut0tzMkKZH8g)j6sj=> zrcn|UmGKYiON8aS>oBq zLuA+4CB_20YCqQCdFHjFnc3Jq*?VR3CT5^j4+ts4}@+MO9Tb zh%;)dlaP;jA3#H1@dV_0&?cDH1;o;D=JatnhL?>VhY~aoj&Rxx4-^|%h68JVkzcu; zNBfFNO_l=LnMaKuhH{3rbnsQc3`_d-m|ZF~6(`QKUFL4%@Q({)ytty#^hSY_)r_A^ z#ioRpwyeojU9%~jBq2CdJ!y*^x`<7Msn8q^#**Rv2f$(Zx#MLRDsoeu-nF#@x69Fc& zT;ScpzDG~*pM~kwc?#Lm)_Cds@V+MXgEBj)R=(dbHa@9MM?E6nRgf9ZWo~26tjqR) z{;)`iaGnnN=-bLZiz|d31Y-t&_(R3PES@Th!vs}xkzAp$4sxW#&u+4ZM6^m}rmkP; zb?p{t*XEszAMSk6=SVA(EgqK(L5yhJ{z`(YxnL51- z(W5)Wp?^FV`{`bDl~b~v8GC71Ug%_56PN+9X?ztJpBaYx3asP~@nDL0KHIZ4VJ0xn z_#Qi zB=6h==l|b(-&)REx`st^zkAO;XP>?IkzMk(^|G+zm_Pz8g8%t~VXd6UYT<_*lG*O;*rM+^bJS-Q4CYb4%96fq9rN_b=<|8Yk zq~OTLVjrqiCk^2B(ojx+{0X??eP$opuYSx|>F||>iDFtP0Kg-NGK3Acd<8erI__k0#XW8D_S(3K!*v^=8 zMji|Pw74_50+{J27>Mb4jH-EZV8@q2@hZB5y1Y_a=45uxK7p&JPFc*kSS6noLrRmV z*XNx}3jwSj5)%4{QePeRb;H4c+D4+8F-v$pUHoU{LD348=QZClQ_GX0dRxS^zStU5 zUv!Y5CAJ@k!s|>lW^ON)x8C{9p!hF5dP7vyenK@;`UI{{z7vaT3)FaXXwFwbAy>SF z{RnO2&qY98;qxDbU~t@%Z1oYjyu}f;y4or;QC)YiZdJU*Zx~65ZZCe*_Z9sE4Pg z=dL50QM;7OC{;pNH+09XkYa{6 z+Rfcz{8EuyZJs~*Pd9O-l`_!RclGB6OyWrPP=Q1kA?nnUN4=R&pK&et!aF|LhPA)$ zEg-VVGlEwuoZU*ME;*~)+pk3;lXW`0Tg~oO0G96TjoBq}N=~ysG0=o>rgMR&*>0)2 zoeebA-iHy|(&>U3f>OE~GW{m|_vUJ+g^g3kiZWp5IA*+Kf7^&-f(NEICBui7ow(^?_@qC>25sSkCKyH{VU?TUre-qiop>7v z$zwzY6kxwCVkJL>37DWAGKKzm#d=J^eQn3&nN&rSZZI=2vVI9IC~w5CIX$*f7O0nz zuBJuvQhzp$5CbXbb=>yoBu%uaH@D&QEBgz&iQMv_Qp))pZegX?pxBHsXoio+&BW=x zX^QcV$1Ipe4}E8$P2vR=xu^Pqu3d2R<)3ZSP+1{MEVkkO%dc4Geq2pSyKo{YDY%|get4boQv6AP#sQJ!t(iAX{hA|HQ(Uf% zuj>7}<-RWHsh4_rd#c!ZMf%o(VXqhU$n>7@PMsO0(9Jr^z_fZiV*(rI4HIm%9~_;G zX7n`)v{a)USwzy<{TLmoR*~=nQJE}+U+hR;Y@ob+XPo(FIPlj{f zsc;okm`GwT=#|+NHu)akA1Y>y56$;CwF4_~FOWQNpJPkSQ;Mwl>3FC6xV$tDp-?>S zLZi*ieXo{A>lVrZnI3(&uAxN)I>g0{4zcuFkojmKy$gVO)bDzD2Rd_Wu&(ikEuKa-h#LPT@k5&~djp23KZLGZY#Gq%vnqT~ZvLmUEUi0uYEQ>7 zx};8pIWTn~H;Y4i81G2>7*nEOK5P+TdvFcs&p(jO%}00c^I1n805a0 z6Y~R63wME5^#5M3V zrn(7&C7*03m`~L7NwFVbRXrp3WbU_6UV2$5tHM9C_m$LEcf zw#E}%eAT;x{sbhQJR3JafMG%+ZnznL=D?Tz&``rh2J{bejrooH7+>t}CS3_9C-i0ZI9f#IuWrJ>re0HaP0e-`A5r zH~jx>7mvj%-0wCGx{@$0heV>2>6a-rVeB$GY!uV+Ey(CVW6;MW@|qwwBI)lt73Oi_PjBS^+d(F`qq(ANGZ@2NI)Rz|82-@p zbBdPeSyZ8OhKp5)YaXUFyx$+V!}Ec|`UrO=o7|8hdA*c@}|= zxqdafXYRxy-32Q9LGpGi&GP%1WTm_T-HF8@UFk0-Jyi&-cqsZtDRz?1-JJ6-HV-%d z!eUYpr}zdo)&T~iVvS4zebZ{dJ?{c~LTz?VZ8ZcKuih;~f$oMHLp%(vWEZ(eeJY~~ zJA+8beJaHNrd+EgFs^siO;-MgWos6#bcN@}@)3$0GCQIh=JFPLBWhPx+J>FUejrem@zxW1?)>XemJjl|}doGcFN=1dfdG>4{B=qpE zeRE3Aj*gD%TAOm)95Q73BG|ucCKSdTcGy6M`?U8E^*UH%_I?$(f2@&a#`{fjRJk4R zCr@^R5d_<3zu##>-00HK9njdDWD>wQwP*`Ax+)Xa29(R|R8KNHWU}L%B zh;fVpk&dziSd$S~yc?pZS`0S=Ht|2Gw@%Lg#NZ*wZ9LavaLn75h5hM{&UxzMIKRw% zM3xHS3Q^AS6CfsCjZjoML1J7Ncha|eAN}7-6e4x%*Gdnlx9jVtX{JyuV;Umv6@zis zkSQ+d-nd9sR@c%OqPg7gosVQ9^2|h3VsIaJUy#RLp}fnK?7mo$=ida^o#98>pHHmE z!7IKr^tv!H%kla0oCQqMPz`UT$x?3f+Xz99Hv1AwzFM;%Xmt zwc{3w9xwYzubdbStiUuoJmZC?3*iQ8T?7{@Rk3rlXZNDxdb8{N>=%NzvjFO>*upZL zKeeX-SgHpU7+8drob%EGe9+AxIz#@9Hm&J*-t5H5@zwS-noPRS$YlyDQ8^{FXYO@N zb0)+n()}#&KdWnz$lO_G zShfD`H&Il=cjpf5^s_5JAQUuHfBo+_QJd#Ar~I@=pEFV-y1FheVw>&rR!S;IOTW5* z-D-r30A9(!ttT(pfMXWy`0J)!edSM&uN&>zQfGL4@&fOR-=x=0j#Z&wx1>DYS(2S> zg2mOlt6%Ng36hi0GM0#D_{Lv2nCB7gFhcpiAUG>7HXQR4!VIl11*2_pr>rU#F~G^C zQ?FPsdOhRtl$qH@_n^Gsx&Z&5@Lub=#_u?+r$02gKUd*W#^K`$^NjMbB}u=Hi*ID* zSC6NnfQDjYlD97CqB86oFWHniwz*jCYGv~eJ-T5fvtUCqe%Is5!H^gnbUu;x(Z9Cg zRKEwi;mG~pc)Wcd?|#a)KUVeq8by6j5bV;^=eR7Zgn8p=9dwNC0Enp`M_HDgv6x%k zz+!xu+0gZG!2bkuunl12VnUd2Alh6F>YX~2BL!=8zHD=^vwQ!`aZS>C)~nd6w|aNv zJj!KjTSYP=AH5-8>Jx~meGlP#Kf2lF<#`zW+E%uMtg7K4dum&>l5<*`bWWdF zcdla8y1G%Mz>Eog9wq|p_SkWtwb2smiz z*fab(z(bE{#;Y21@OhKW_CtMQERKfv6$xyWr>R!d!$CrE4aL$08hEKZfA{v!kAdGR z=)o>Z^m#-CEBXU_=Q?+m7znPl!Tp^Uu=zmo=B%^jHB2A2fHBDoF& zMUGS24W_?I=2JWVC!F0DWE8W>7h`!x$_Ac=Jm-21cJ_DOyD|bA95jA&Rx_fd8!15$LnKG#Ig#qb6gdDGkS=%E$YU#B70y7ZaTd@=n|LJ9S@%N{jqOGmBjjcf84S7Zn?I~o;> z6|g;jvd}SpW3e%Bx}$wfFTNly;O#GYy^QSCoQvby)>Y@An03|np98@R*aQF~nlEE; zB#8H@Cl5xn72*5Mxn9{gYQDYc$L5PD`<6yZ&K*Y*v@_COv{<7ii|lp=H2k^h*X(Hg z-;^FQSbE5)R&17fGw^raD$_OhyO&-Rn>9?G-7kt=xm@Tb9uzTbb#~*NEHXb=FL(a$ z^*1KJDJzhxJ9~YW(%1h;^p$0GZ*T0mfRuXG;}x$*8v-Rci>t43+~2i1=&b`GN?_>3 zg}?LN?Jfuz_&xbbjWkr1HoyM7fM_u<>*jO%woHL}i>q!%R_xJfxjo5T(=vNNey;4e z2dLc1S}469)^E4?#AU>WY4AXCPWb`yl=3B~8VSxWM2F9B({K~y$5da-Rw2PD3<-g% zw|vz;!3a>Sw0MvRcd`EgBnz07aDVl_DMZ49HZ+$GEttU>vdPKFGHI2$y;LA}K&4Se zIRk%cfGgu?(5y}Pl@uHNIi)71B8X_U=6YRydY~xRop+1vLoOd(o$|vl*I`G$Nvy=) z8kZ&xVa(O+O)(7%j+JR`EEv^xHnJ+$buQR{vgBsD0j;j0n9bu0IZ8Dko`o8X1>DC`4e zp1nsv($_sXmfvOY3m2!(`-%G5F)ke^&UFpBUU~d34B`&INBV_@qrF|NHkB?kp;moz zBIs`0;T$04SdKF)$H85O+2037vv$Vp+amKWU%*Dep!Orf_N~OJNBN$6=^-5jbMRpm z{8K6Y(Sct7ObiSs-q4A>^ea=}xcCI(9eZ_UXdZIsaEoSf#LuQZ=r4ngmS*kg0~ZLB zD=QBt`*O&6YX;XtFYU&{LC=mJ=Zh8EbdhuJerI?$jyEwY?#@jE)BJ!b zta*Bdxwz_cYTa!3hyn+6<`D2udwIvB#j!WKxKU;&B>6n#p+l3c8E#t6en)%W%m8PY z1yfuVzq7?H64{&aAQBXo+M4Bj>rKB+9CZxt2&o&pVHElTx^LVB|Lf|h>}LQtMrrv2 zVOfeA=x%cpJoC_3SN*6P1k3dW4*olB|kpYaFjp3$?7>6YV*otB3>&WSZNppGif zbQyw6p;L`%Sb&9bSVg&Qr*9@XY+ak7Z6ejs_@U@4(=t3>@8g{)vFV+L8v;MoC6DwZ zvs#Gvi3EPS4i*>q%XM6-4H%`~w;hIw_=1P|{z*Lov9?TqNDL@Y-jq_k6`pWO8*eSk zUdoH_mZ|zg9r4uc`}{BT8M>JrPxenf)cgrz+;tQXWqY7GyVRUvNBz#sk}YJ0L}Fz!|}`O$VXbpu4SA$Q@;|?fRB%~bamhOB-}eC?CPo_ zp@>PQZfQwcEuL!KfU#=W{0+U2X{v|Y`fE*8^4RCQz4or;l=tzj(G}&~>{(=>gng&6 zjn@=ZUwK=T4;;(u#>;;fT;LFm8kB5Fmha{R_GQJ_9QmQyye|%TkLZCZ-RkWzkr|_T zGj8~+b8d!|$9uWY_0E@HNX)@tX11dN-SSnB8HhTx9;W$>+gQ}U0j}!P702d2sj|8{ z^{gfh<59oxfhp4I^c*8srHut7(r?UC#4!Hqjq@=}u_RNMD(mc%t6yjr8zM+-pwk~M z_sb57SJG};vx$OJd)7wrG9>^7Ao5Wdw;}gg%BJXc$8Zk`L6keVUOT4MV~?0+Xa;d! z$-Y|(8R0FS1Z|N6*YwAH1_o*E1Pv|L2;%6^pU|uRGlzsl^?9D0im`=n7t)rWS=y5a z^vD+hwI$_`7L#aiLY(AZ_DoV|7bl^i^1|0da9k(n<5@~QZ#t+Ae=a9-<;>0(_F--r?jOPnr6S5j4*sT!E z6e3D5HQqkC9~yAe%1*-lrQtpxsdlkVdrJ;{U8|TaTGUIbnrvGF0r7`b5+ZVICJL;ur zqWgAFz*$CvsF`IOlrt({ax??SmSm>nl^$5VtK6j1QawG1ce)M)6s}8&(ab3@(b4qD zS~uKTjHofk#s0#Y%EL8QDQx%U1{h8a0~&X#_U(vQRLM^Iv@NE*S94>gTepGX%0 zPPWXEX)&C$92D0@$uPvWSo-g;-cc*LIuBJngZIW z=$RH_V%oObe7wnqM>VHB7BaE~2<&ZN9r#|CUv>PxyQ=B1Nk+;l(GjVs=F0c-`_jaP z*XW|KMpvEET8Bo;EQPKEh29#b-8tbdv9l(oF4fdC-FQt*SCi;B2WqcWmYdBj*OUHT z+G?Qj52m=b;qvx-=3}RCyYTf6X1ke6ofWI8U-EXI5XFiShJ!bVzOkh6*Ba?2zRG-Q zwf>p)?L4B$j84y!9s+?-wMu?D;wxTzxd4>q{@nFLZOkM(HQ;qMH9l3Yrl(y__l$p+E*d@DgNNzH??=s>$4;H{bKbh-N`WAu1z=AQT1g)K0 zW`^P8pbj*;MFlzF{YkOX7?w%?FqEZ@i%AK|p<1MZ4xg~F>rvdfGozrIkeYR*nIsitbhNtsTDE+zP);pgi90FXj-qXlN`2z0(wV-X z;lE_u<(Z5Q@6c(mMP_11Z2zA6$Q|?)W_Zab%^y^uJGWa{HbhaXxE{17kFH?h1%HO) zttoWF%N!^zt3-N=m+wL_YOEwHmt@O9Q>o(7{N0Sh-Rmv=U9z1Uy01{!{{RDuU29eV z2L4Wk>M!#NxnCcgEa?n>sTG)gvLgL14hFPf7CZvp4@5cFV-7x_Z^0kdqSZ@mM~*$c1C)se&2>d%~g%y<>UahFreDH_!$-*Uzh z5|uK#=9TCfA*c)yT)i{DtbsFGU3yrRK4zFi|0G+z!yu6a&+O%o2pNx)+Q)UHt(pr1sX!y-TCSq3KkNe0FHi*=Ah z$siZ_l}?7$^pkY!zqF9-NqWQ#_oxLy7*=q8ID?6*jfpXvo5=WML6P9$IQa_fYXI;e zbTYPOk5S9!HV-?!Lv)&q&ZQwzyXA|25Xu)4Lhqwb;}!*fKt1;9<*4u9tUqG71L|k) z8Wz{OeH5=J-suC=&aL7}p%Qv;S4&H+uIJ*Lj+G4B{zZ|q(Bu85nP^H=bmqLzLXKJ; zmat49%aEgnX@(n?PLqRxr|4~EF~{tGUmNBhMm4a69>*}EBUCPalHKY76L`&6eCLU+ zxXaBdcg?ze3uM*dw)w2zc_PIIcVx3(;^r>iG_FRE|F279;8sF-ncjf9!%IK=gWsjO z$;ldL2e+(lL@GZX1WoMZdD?lgH_r*=DvXCYL8!pMM&*LSu@8&3+Ya1Gz3Z3(BQfwN zD$+-0^QF?E%x26nDdm0wGYWIVhKNwfFb%h>Zw`2{y1vcI?61z6p?V?FCZE zLfjCL7>5&k;kF<=H`R7^b938qdEQ)@O+2U6L?ev45#SGxeZq+AxzM_>=T91$vWCTU zeHrSVweEX)U@?*F3XN=ks|!ZS)ICmR2fpsRTHOCkpTkxDic8C@S@r6Lv7o&0!8pbDdB`oy`lOwiDaiV@Yo z6OXE52PsFvARUu?f?188+`N5zjMg5@t&SY+V4z_}YZKInhoe!ks9R`8>M_?EO97(> zBfX0p;)1X-mFenW@oMYB_@hjR9A#VYIFp9R!0P*wbcd6N8QJ<5lak7_$$)vl?%aK3 zps0+YF_h~zC06Cr{yWN?jH8^(a^m799Y3+-*c5P283*^2@h>(N_8EF_jJu*TKbvY$ z(4Z#XS?Hj8C!rj29Uy+2>jbYD%{;2^uB|^sHT0x^$Qo<2?Z~30Jui|t7j}=+Grru9 z?cj$MvEmQn$AJJGYCBCn9%OuUapT~d>_9`qFNbn!zgm@T@69Zj%}}59xmRX&@~&C_ zw8ECnxe^KjpmAaBVR0dQf$1bl;V*8u8z-Lz)T3o)6s^_mmS28>4_E!eo$W%sT&Zx0 z4xp>`A4(sn6DG>D$L=K2b$6Uw(7>4{)Jpjs+03>+(TQ=lK#8G}^;v}gwJ!kq5yO=M z4Vie9@T7hId!H}^#}{U_orD=JuvDsVR@tA87sZtRWcs%x8y-lneRcj{Ul=4go?%bY zB~ln*VuG{G?(F9$`oh1aQd1(2t1vxR8D0Ve`JiWTF2J+-EXFpN$|2Hv=-iDqwo2tD zhumAdy*7nnsxz`!Q!9&mEJD(_>_fd0NmYiTX^t+kU2gqcWX>-8xbiLerY5E3Q<`yB z{uljI_fgNqTLIRhNGYL@SpugmWH? zikdYt)*K{@4y0Rp+EhHa@4aBR!>-fLK=qD*ITop8ga!tF=|G<^0K#Si!~x+>@&+g#;1cCt(^8 z8DCyyXN;C116$YKv6s5S-9GdU=(T@#*eGl;*kOrg>rwrK``z76+RyqqAowKg^D69) zwT8VOmHYDtM$%%U;G96+&LC8>iIe?G{JhU!Talmtt))f z-1}ZeIW8jG_qnmCfnAWe*Bi1InhAe`07?^ zXH;Wtd*K5w*gf9hQ;{U0C|zZNsv5I_qzPq&vE`Ls<=wT_XXZGTU$^3XKz8oAl!6OG zfA(l<*^$(6MCYDRiov0_B4UgrI{u5wNSytXxso~gSXZrlRys$3lx!4UPxe28A+#jP5uRqnR@PA2TQ#2WrpHo=Y z`P}KB+CcNF6|U1;Yd9aIXr#t!-34ZlDK6s$}*68wdzj-9*yeStxV7BakGLfZ~DrynW4&w(d^Sy?kppG*4E!UuL7$6fh*^Lk~x;`10+i znM#xQrzw=CoDD(BE#~N;n>^D{0>Y5qN0={L_+(dJSl}HZWqog!RCjwc4(fOI`uGg3 zpR^vP4>!TY-rk3=dgAp7MH4k11y^~;>`1Q1S;ON7lPn5H=c6Zoz;rboI(KUK>7zDp z0uD9?W^_8xR!56Zs!4?dqGq;|zDx%1ztAnc zVnGo{!%Sc|nc;KR^4eee$J2q$h=G=olEYN!_OzzcBed~M;Q|N^{5);Y_bkHUtNw5v z^n%xxA}Ba2$~1c#JdBmM|6MEI=s#?eq{m~Ju%07OC2oa&GMtX%I2!FNy-Q-h5u-Ha z;Tf3HmSnC20-Y#J9v8Vqn>N=QO%tLkvL!a;E_RKge=BeG~}sCJpW7Bi>FBEX=1 z{-CY<#c9KmQtx+qaC&b3#p=IYI_`pO82{0~VzmCU63R%nv`y?0T1&R<71Q#e8db|r zfGJ*s{nu*jo&EMfkV}?iiG2RXgeYXdH1Y+F!=F{S$F?mrIQ-d@R}u-_)6yGRrhP6u zrT~~Lyb%mvE*mARP@U7W^ji5AnSmoU-S~JF zP)8e{My`g^N?K~jx$NfVXo!n1{2Vuj7#IBdWVMw)SCP*^67%dBpqP04O|nvfUlfAQ5Qs2iUhw z`k%JF?Y1z*%JitkP^y*WEL{>)B(i(s6#m_84(;z7w3_;pCvooEK$|sJhSNtZMEAPt z`zWSSSE&8F68F@Sj)Clvk?wm?$(iZ>X-O54%g3fXPPBsIx?_K>=gNX3nrX=;S%+~M zgwVnj_z%oXDv)^nE-!N{Xk=L2Han&T$7rP{(}dr?e?=VV4jnE1ccrIPX6@2XYh5eBV_sd>-F?UBiu#er-7)c> zr9@H$$vcWi843V07%-rGl7UT_PVI?^Rja?aQR@=lV(A2;L>u91mcTjV%#R)WSb>yX zWzKQJawfjoGOg7iV5=yS@5=);PBjY|HzQ_-6!?s;GtZ6t?ZFcM zq8aW=d2k<7s(WazOa3ZlrWMp4@D{;?ao-CnI54VwE(C{c)`s~R1`x`FBrH4(9T0Q` zMjPxel=vxiUk0K@%Vd%(!KSD4`PvdCb1}QA>M-i@!a}VLq&naQ)70908}3pS;U^I; z|3Hy?3XsQlsEdBpzHX!mE*?fXXSK??v3`doE-433m*#*bk~=Jq{&aX9R;B!wiO_U#xUR=kC_vpCm{9kQm?oEIjswW> zkM=V(!*7UeuC*KhmD&4wnSwB8IF+DI}1oj;!T)p$_M z_)vj0L;8t*5X^CisC=0uwYD%r%)xf5alb~cP~CTf$vj-d&m-M$&y1U&J%fl-5;GR=HN&M`KqCpImxWaLesi+4<2~%eyKodJw~-`O=L*G@5AQ=U`DTF#U?dfdln zrCxXt_ov%hKElyM!jR$)_X1@NEjBHR_E43c5 zSWMx(cO#3o8)fH^a-LFu6zK@{9+sblo4CXdb1g+ik2~2@()nN-FyIzRr-_1H8;3K|JvoAxt5`(|_jL`5#+D z1#b68eI-)><{3>uL;(n5ypC&g$`Xs1GEg2&ub6zE-#Ol6IVaORI$6xg)RxCHC2EyR z;73zLQs%H8<)Cy8kYG6>gv`(PzMy(c-3$#CSz>~~vfU?}MFyl+D0Rr_e zhnZcsaoE)krKv(&mY3I;@{;Y8bJ#^#GbC$yASK1mxGqfR(Sdnzjl6qUA7a)Mu49Ot zxdJsmB4jIGooDw9WTJXqR#588N&bFyeyg{}){}=O7+z%8CLel|!J7;^`jQIfiPUc{ zbXzz*Nt>>LKU~#?>*_gI|s@vhXR0y)X!* zl`#+9g1*HGIHJjLBVrqqQ68!dB?OGG2yo22EseQb+-J-b? z8#hn8LBd}My%rXpe(Wl9MySNaXEg@cgpEG0_h;~WMs>eYJ*Ro)F=tVckd|4rVc15P z$Q3vBj6HU)@UYBnht6L1+;iC?7U01hpW`?wg6Y?9@g80DS#ZA6VuKPsi%6y>gJs?T zo#gyrjnE!4G$bCEF;y&d~I zUcU-4bC`YwAxOsvymq@P#3-9_<=0>oVpBEPi9X(CE%UEa zhhZEL2tWKlBq?(hZn-^mLNR+|wR07;-~DoT;0?69^&*-pqG>^VQcqvywps7)g2+kz zno((-d6W8NTk1*2(DP9+J%W#=UJ*!Vf1txxhmbve5LcZGJA$4e5XO0Pwy7U9X;xOQ%kB2 zQeId?*Crn4Y%bn-8<0KzBc?@3R09#+$d~QocR@MY2+c5(+BMTPC?xqjlY+O$;n0#- zU$MEmb-xdz;y6+nD={fJ8if&s`~tZM<%LkD|F{TEz)+$?n1ZVu2MI+xV%OO3osjZx0?sy#&A)U?DBUX;h*t&Ka6jHv!De&U z`{u1}uiUlEpM8z$nc&MKGA%<(GwM~L{iW6r0q=$}QW1qJoB<=V^!|Cj&8le3HRzVH z@ux}d+x(Bu?t$GjQNMY_k$Z1dPLD;2{aqH(^Sn+02XA>>xF~C*I?0L7bunVK7QY~p zZ`xN$+b64GX71=aSb_uF`uaPo@!E|`&G;uD0FR9QuQvlYLYpn}OnZ43YL;6GtM-25 zpB~z6wPRr4aCcP?Fq5Kj-&?TvrWuDXcQ3O~nBI+?B_V|4>wFbJP|hHrBm}pr6ybtT zjF!?FtS!50guA<+IrFA{yq^XHO)?=Aq`m`xx?lct{|&*AR^j7{52z_OesfZ+9>&nX zfX_3;xbp=@MOi7OVy8I{fbYr*rB9``vA0*Fn+Lp9=tMfK7`wlFqtM&_U0%^#*XlR7 zJ&f9;*VkoSH#Cm!**=w`?uCUUnzHZ@L`8wY0luJvlw2JD85FC3!=XhU~273jQ$3l1Z@5YNG^9YwwSYY|5^6lvigS={Y21G zuW%9xp=}@(0t&t*PKwCmy7!%tDewrb8h|X-5XA6L@qz>aq=TSKfZSnpmO4Z1%?6^WH6wTrBr(oAuo#bxA z9L+KnZzUw9OYptWNU(x*K*_rL_o6oP!Ah-Zzhix2qZ@LYINPKmtpGb5xiqo)citup zMOzhikv)5PVOY3+#?IqF5~~PX_)!jQs4XPr`)~e=F{vkI?RMN6!*!sK5khwjNjgFj z&&YW*I{Z>Lqn~X(9Z;X^HEQIIfg2`)yEfXhVKX_CSvMBlrcjt*u zJFB+Ea205yWPp0wEs5Hela6KNNIN7xBN{6roF{wG|>W#ii#pYsYIS#18OMW2yzCXmjb>A*khmN@nQZ5RM->0o1rsV=w5 zwFiCL0Fhv`b^K*uY+h$l?bYS89S)C)pQk$ye;XWE#F>FbUTsegVFgIoU0t;{AXWL0 zrFnbUG%xwR!wYtU156t|3C8r@f8L8-)Nfdl&ndLYnCovQ+RIIFMgGdM!ae)c;5Ulw zB0U98@#k6e0xJYuB4o6?M$k>Iy2|Vf7&og_ETY0_0tt1`Wkjj>uV zTgD&{xe;fK#EC!4VxnXrE%JPApQ(?`^)B=OnOPYez9BUK>pr3_lEK@3Twx7wha*S? zKcaOOvo5b@eGi{llxO{^>0DtweIO`HPhR@D6>P4ckcIW~gH}*A9)+k4G`5Ah_uE;7 z?Vh3RX(6-Pa`2NcR$S*W)YIaoXV8r?MBfYE?tg80GRNIJ;6S6_V)hnm>JY$u?HM!u$F$p_a>8Za6EL5=1R)C7NM z3%qhOXT#$hY1#`%o4wS?1y=nUYRAz>HmF4Z+woYAg8-44hc-9e(Pm=JJP0F^^3!Gi ztVlf#HHFR-Cg_vaVjJA?puFI5E(|lJ6{keO?eA$wBYqBM2 za&2R*jgPD5ZstH%$Bk@agjPszMuEy&m-R;rt{?uh^wp!08--RnbF;IvA`e%le^Y(i zY%sao`EExC+!mCX9(uomcyLo)tEP${9d6&^XxtcI(l|d~=xpsYSK0$dsdCk|ZxV9< zGg|Tm#MWx=!UJ{9X=aAKyzz?Ea!2YEN+A%A$ek_6_J1`;Yd) z&-;<%?ET!(ZT(_9Go-dxkDxH|Bw5>EqmlZPi6M{s@@g{P6&dn6V@TPq zgzFG_2{exgF-qj9a% z(D-dTj4rde9w=cFrZ&pS3sY~sKe$Gn$TVVX0M*Z~QNde_6#(1X2w(nI<7%rbLzovu zWa0{)E5psX@YTmr|LO#_)9dX{DX=`?=1!vDkjX9sUpheL z`C?Eah`lm-@L;Di%4?nI*glFmUOn5U*)38d)OytYw$I_6h$$5EtwSc(w5zP(xW1A_bAk2%+_m z&H;G={j7$uhSs=%tUzNyFuyMian3|UMu-6A_yrnbLM8QtOSo2RQMZVy}yhCxV) zjzoLHDPNc)VH*0y2km+{!5}o&1+of6-~t!WLbIvKRsiT?C0WCnFu6Dy2u^%HA&$9_ zUs6)iGp_wvPYrc7x6NGcNB{u|>|+?<3(#>m2)vfz;f=1McCu@zcgSF_xhs`^LyHT-(oyp_@? zYK5EeHu`eX3Q0vQf4%VQ+2XErN}<=KV!{K(%IY5=+Xvl#XK50mEX&H8atJZn=+NG* zNx*cF0OXokl1itNffoZ;c2`$^H^j;e^o9>Fkd(p6BqkKKfS_ouM=>{qF+`(Jg&Vdv z*doi?4^3$+@9)mAmUfxyYjOKs(#pQJ7P03T-j~ui=*n1RsQ?;k3!8`%`u!y-dh)=mggb(@!#oJRibjrUv8R3+zdI7I3%Za*3VGO-HA4 zrq3}^CzwmKT241 zRVg#QGA1eq?hvf&3!uJ)@@;PtbPmpH1wHi!zOyi+4*b&-7ay^yM&6L;ApJ%F(O$ z(TfNfuZ;95G)E>*@c18N4q=cW?3XqSo4fItigFnq`49HDnihxaqjsq^TsQ`kF?o3I zAX*Bf6p*c5Vx$zYQ9EMdvx*s5m5M8Hf&l0sVmSlrlpGwSc4aQ1^A+Dn{Ow9JB0E)Ls3S};Y~e4q*i2MZb!Obq%NW`&oP zyrtUOrlS&QCr&rZ+=uoMYBm2ij17=(ltlia&w~_w0N0V+Pr!ELcPvCfL+pmBGN_0e zRv(uLE<)rLx=~7ahY$f_KqaRa2t?%{{W)r7Rzh1bAi96`?ss6?E?qTN91Tct^y~tr>os5PLSrb9m z4lZ9-D1i3l+Yc<~t0We1)1Gq>tgY$MZK_e+=P11yp&flz^W=2Wa<{^t%C$vF|Sl?;PgEW%fo$VnigW*ICI?aGRYk!m(*|p z;hyvOvwsO)bPC;JdvcGOv-OeGr$Sn$q`UU*eeQW(3gRJGI^7#CFRcG5G52AB&&OX@kJ@_y$A^z-{_(a;MU=dcOVvyjso9Do5|2Pi-5qad{ zg0_mGQF2LMWOvAFos@1e&2PE4*; zRO6`3*-w6-I`!36Xr088jF%4S@EgA#UOtQivUinL)DDRClwoWCnTt7b&%&3Z2zSLB zNr4Xt*NoPO2iiDq^VE9&y8D)qVB&JWIc51zvT#gb41h-rM)Nvic%0AJh zR9()kW*S+#)*9Ph$Ful$U*HrEj3^SWsJx-q&zrn*0vsgN@xBF*aaBgXHhhT3L7QCG zfX1OvM%k2OrQ{ds!$X5^7zd-UxP>W+FQa}P_Jxg;tEBLH%Wdy)0rlgnk%9Gb&DKXf zE>`5FW z&kVMTsq8J4O%aKlBntGuQRRPsOW_o;#}HFaUxXWi5#=|d6P z%VuQQ9EDOrBPMPLNAoW;mW*!PwZm6MpYA$kFS;q99^-JDd7w~^skq`et3;k@{Bgp3 z)NpFzTcLfSQP$mul@aUd5oigLqFA1_|BgGoS?j)xrtdEVO7QS}>UeoEiCX!)WDPJ^ zI+TM1nn^ar1JOJ6T;TZ|{9raPqg+{YH`I5{1Ou=5k zV@~n6SB2VCEhpwozj^hY-Z-yzp|L(q?<%okH|bs9GQJ&ZAD=cZxm71pK`_eFBX#y! zMFu{tAox3;PdeJ-R5O{fPR`E%v4x&!=x@IQ0#^svDZ(lH;KwMc`%U3QnEUGft~YIU z(Le13vE*CIr8n&gOT`NLX(gImAu9oHH&IfxSsCe@z^>-EgH+Uy7i= z^Wmo(Ps?KW{`YbP!L{&2f{6KtuhG8D+V1*RH2R*FSLA)i1`JmcRX2@#;gQ>nFSN9r|33O7~#)G)PpKg{m(rUtmqe zEZke&$oP;I^_DUvm6FRN*~+b^n@eDGBqK0wsD>IC!EFiz?IhV_Rn8cH-#ZSgA9F8` zuSP6)0i4#l@4;|QNVAvD^hw5-#bCZEGs?rgscvB;H%={Oyv2~QQC5(Xep2?nv|h2& z$+~{7D}L7`9Md`^9E%_7`u07=S%L31wX8Vl(wluJSTLHS*#Yjbn-(Sr&?7SD%ZQi~ zVgF!Qd_kbFw=CM3o`bBR&zvy~ow%xVh4Y}c+9;;)StSuhqMr0P?9l2`VltNfeQQ6L zRyJ7t^vl&~D@1kMEek-#(;uWV3S3epV;VFAHgn5o{R2Wc4%D%;f>BJUc9&4r#U9h&^~Bd$FpWOi{} zqpv*4njGX-&^T;3Rm+7bf4n^{Z;(_;Q7zy8EtbXPYVx7r8}AI_$Fi?`e7)@3^W;+d zPn>unYFPC#!(hXV+E)E^&SYCDZK zRJUo?grgn3UOp(oCco#G4PD|KmhhTj4lh}McS#29Rs7#sNtb-b#e@{HsN2J!ea(1A z_aW7i|7sLVxQZjJR7mH$^|Zlok{f2eZM|qt(6d9Kuwzui=Cln>C3qjIPl>zR9p5{( zKe?it&qVm|0m^xO z%E&B|w_&HoJ>C}9vgk?zyT}I;Ag6Ph)3v{8cl$ZZFZHMh{xV0OtXexx>tHyE)EqSo zZ057vRsC!`S*n%MP34JSQZ7S1<(?civ1r}p3Pe4ruVsA8snZjETBJAa%Pk$ziCu<~ z74$Wz2g4{^sw+$RViZerejYKKmr?B@8*hER@=E*zBoj@hpClz1m;0kyZI#}!RoOT} zlk!q_%))>meX&+$u(m3k#TeFBLimTl!uxMK(Z2Pd8;_^C`fiUMXttO`CH96*KLdQe zQmR`2UaXYeATlYqN%o&PXeRXQ_Egn2nlXT#kpdIR=p*9GxNPMnIF+xfEd%ah_P&Hz z`*?~H6w%IzKe~yd*>j8Niv@+sS3#L-L4Q3T|LJ$)MO7Cgk!OC1v_CU|b8(Elnj6h_ za9Yo+@~Va0t-y$?Vb8#DSa94<3e+%_b*l#J`~}ef95|2#`p*N6*^93@6&g9-p{gW2 zndsI?=t*J!9atamT7pnCPTu&fn6J(rvn6;)$EMv`q6;iq8@U;Vyi7NPcCy_xkSK;F@`Gqu&EkgTh8N0&J;tW8!96;(aaYkoCPoD_*EU3 zBNZA|2N|0WgV`(02rR!MJ2T;^vmc!9wdap1XWkK0_L{$}!{!8&AOI`=czpzR0Z(LC zfK>DG#i5b^>y#Ny4m5^@xKrhcC<_a_3Xaqo4!kR=eS0&OdnK@3;R$)|-G+*>3;i+bk7%6D1)kQ=yV6*}PGP3@IdIN@bpsjN9-T@IbDh(5qP?H{UiVs`^;w^_?##cbVZ9;y zf((ZqekvAPve@tDv9Q|MyXDM6RqNhwi(THY=m>Of6XuCAvdB{%QHRtJPRS8LFk;H0 z*n!xlp%qnl`-@zE@jcpt+ueG#(88~L(|izZZF#O{sp5sv@{iR9pT=00@y+D7$m=XQ z{0daRh4%$f%VOIKZ&V#@?J2Dyh-39vpN?HQ@>pxg(jAx@>~VqnG2jmi|BU-V&^rAZ zCZZ2|PB}$&y(+Qs?_cy#YJ)}*i5Ii0VzX2KhQ-;cM@>DC<`Ut)vu;f?%In%a6#N=j z8*KsS)pe~xi6w*8!1DtXN7OS9`t=X92=Iwa6={&caj5>E2MEDlPdS5_IVt4TtD-~KJM#Nb^rOFpR-E< zamm{UP%2Os4^PM>rh3*W%L(^4pHAby*=%Pjki2GQBal{^SNAR!Se#GR^@zHv}d z1U;p7L)8e({1KSBH+i!&!hi>kx#K+Wg*8JMQhV^Z7}d&36%Qr&1?i-7HpI{xglQ%>FS;49Spq~&+v zrcMG*XefMVEv^F?Xv+^X3F+K%rD7nR-ni_}Kvwi|j zEn*<>hE_dNQfp~J8Cr|;{O=-X5H_$m#Ovb0#1@D8abLvumqq@_=)4X-cF-aJn7b5^ zvxi>b4b`Q@*rfyh(6M@)`San)jZkZoCsDo?7xgDsf?i#Yl${<<2=As&oNh?Tbb4F9 zMlV{kePCLV+$eS77CAf~T97hvV|{}Wvht38(2Qm<~(d)#g}^G z@tFg>*+m9SUmu^RAt1;5S@p}=ACV18nY;jyI`D3D!LK@f6L@(iZ?2Ma3G>=hjoi4k zM@qe$1J;&)4My18UHPka$COjqT8Ho{=gh8viy_snu>X4sHl0~eXIU`ddcl|c@i5$b z(#Ii`dgJZ>txnRrLz`-I<0j`Cma&v3;sOIKYryqt>+c}E*!P&&d2zYCHp}k(Ig-nT zi#oR=jA*pj3yTJsc0bISZ;ztn!qS~yz{zFVsZ`Pt+#9G)v~9a-P5@IeedZeW42-OB z_Qv`x+SGdyesCSQ%FrU(>eYO&aeV`dQHL2p60JTw6H%BCXdZ?g6F&X=D!B0{6^|;i zZ$z3cH(%)$BNi)#JDi(w&AXMHO&#o0%Az&B%H^id6zN!}T3fA0 zygW8{fACFjq5rUi{H|twyCeC|4m~0ZWl-bC;k4;JopN1u0n@{|w_USgP4r4iqgMOz zd2Kz4UgmI-*(nTrBu)CJX$+9vg6ExLX2(y5#X(jzztUtxCUBiHMES)L+q~3U#k3!J zUU+@5yTg7kmHf%W8%K@HT!LPs4}!;oQ#$)C>2LI>A%5-o?(r_HHj&+sbn3yRnZq7F zD+`YA)zG8kzc&3k^@~Z!e6<=++g-^>J#lTzn9f#>VZOBFu=44pa{pbN>b9p7)Q->p ze8`9)9UV1mgNoAoIp{!8t67&<6=?(Y(AhtG&~~zKJ!7%o%+%FGEw`TG6ru zd4K)+MjGTXW!zboN)(a=UfaoHQccrywg$ zbeQO0lfx1_I99N(j&Q^}5m^tGkAi=!rRfA1(T8i+66p^s_@kNv)s4a>=zDz+;U{6v z;X0n)_C1lZ*h2Rw(A~JM?}l&5$uv~^Lzn2xECsga^DOKF+kunIX^ReAiumGeR*hhX zZ*sEwd(b<_oP#4}v))G8T}bN8cLh`n|7Wu%U3TawIE6g7j@4gRW8V1WqMwDwBLK_9 zKFF|@(8@Xc?rTqlRcAkBzE5}B)2g7GL$SOmu2XhIddipT2yrW3+0K7YN-}eRR93F! zLy_VOf!@I12;4pE$RthRWQ{olld)u10iTyX{~Ld`xtBvVw2#YKIC(xgp|z+qzBXcu!V zPao{%i8Zf1kJccZqaVOEi&G17Go`CYU*Rm&AJ9?B;nBxq=L`N)(a;N!AFlSnYAbl- zI62PJH{1z1YEQw3bX(iaQqug@byLS%96M!$iTT9a8*oo;SxoGOS-|3v#mnqCw^XPG zh>K-(j46l>nAfay=6Q9o-jQbeBPaU(Wu*9HEoNGFS)Cgl3E2|W1L??6?xo#NAbS3} zO@?$TH^eZsNyB28TUN1@8@@@%Wd?Q)i_HsuTp?JT17Sco(3;gh889OJJ+Aa^5%+b} z!6jyBWMm{R1~E{^etZ7!Y(q=!qc!@m#vB*Tig>fbE2XnHP6$?;z4<&t!41dy-u?NK zGAi{?j*waEa-e$jORLwGtCpv93wi#VI1@+>>o7L+^9r6g6g&YaM_04SF)7TdJ%jV|LJDk&l1;A9R9t1R76Y2#>2}$ z2vk|0duW!mH{lhQJj=q9kiCiCfkxh%l~-_0Y+1W+{&$EnLuo&|{AYr(eY0^dvy#2c zhqg8QTQ99|oSTq`UiIk<+q7Bpk{Tk!%shp!>%P6DHhxIel)zt&8Drtc!adb!2U-KP znv8J&+N|o!z*BRFJuz0xLjXI~ngt#OcA$Sl?!~O*ueJDLzd$KA`#j2DUc0&PNQJDW zy7n0rF4t*mX;smdx)A2|*bR|K7-E|t_o08er#kF;&t|XB6P*>4S|bc%>~n4=)v$Re zyRkRE+p9~SwjmqaKtdVxKt0dPdaF_lH?CW0Y-I$J?7oqy5Pi+ueYu}rx!i_fKPuq~)SGG*>((rBrsdpg z<-x=Ab6q)dfxTpU7NvJ=W!YfNe}zKByPWjzz27+aE4ycY#gO7|lK+Ml%veht_X8|m z&g#^a*R)vcJb%a>T(Nr0KX3N+^&ne^-8v_(a9-7r>Gi5l!k#F7!IyE&pO}^|;g3b|;`*ye z6DyX0j?GJ&T0>j&R=p9~amRgb(Vbn6tVX}#ZG7{4bRnE$4A{I5lrarX z=cD%H5IvrG;=aq1*Ot9`khB0- zLmJd+Ez&r?)oI%{F5~Ww!Ur>Z9A}zgu9DwhqJpJHW9vS)<{c15R;9aAlvqY^AxiOK zF}6zbeq`q8mH|-mwf5kLN9?AHYJBHL7f)Q+jkB}82ir~^&D(Tizz%U(;fqR|7r{0D zdIHC~GuCI@EIv0phLWT)G%X}TyHpia&{tj3m+_4~O1wmt&0K<9W;olqNDo9fjFzuL zgIMskxo|-BLsJKJa(QLtLm?@ed+G3)Y6exJ!n4qQrnoEGwoAn0Xvp$N+mAbGU5x<4 zYRV1r+KOaFb9&y|Ajb?($OdFr`;Pc%UID+BmlMu*Uf(*4zMi}KKe#$IK!cR+?u#Wc z6kY7-U^^Y8W2B7ch>Na;|EjXRNoVWE@wNB!F%_=Gta9n}7SFFgtDb(k_4cZ-?X(O4 z)9RMDL^OF=$Bp}lVffFjihXiQ9H#X1GqaSicy}5pSR9u>jj><6P)ol>1+G8;5F-g6 zDF3{{jucDn8BOmW1m9}gs>swt-lRo?Y|TZVtJVg-kttVcuzS!Ny?MF{IWt1xrVE%^ z*}5_(V7OIbF_lTOCaEi)Anmd26U3Rf`Qq(}R?2foti;7I`EsF+sA~?;YuJ(mz1Ogx?b){RPAe|B2=&{utt;Aa&vyUR~8n_T^$H6d1om-p$XSmG#<%J*t=F-Bc#d82*sPHlHJm(>3!$r&M{1|v zk;i9p^STFB9cBUWNn!(jeMh9{ zSpFlq+`6)yTt%(9A*UGnrA#JNsdK%xz%v$hN{%Vfeupi9-Jggc&Fdd zmSHcRQ(44Z+N|6lRN^k)CnQw}hRo{LIkOw)8?|dZ+tkuhDmblBZVop9Bdv$k)$a*Mj|HNSH+uL`S; zgTd4V3O1>7C@VO6^yGfZ`$lI9S&v=V4qVMX`=d)ry|5p`qdIhIjWbUcmkVH}sEO?n<@6x-l2U1xgTg;*c zR!$A`ka%-M*1>p4sl@*!QT-q=|5v#`)o5i zmc?K-07W&Xm}Oh%(&xI$oS7t}5oNe7`Qv0-=+UyYIteLxm8#&-UVZa|;BoK0RklHG zxI1pv5IB7PR&WrD7FVrwc=)mpcO(j`Wk%)t~9&Bz96Cz)gA(= zcSxX=hNA&>f+SHCp#rg@S>$yv2mxX*rw&cc*T`p;?I|S4 zW2 zBf%|R=8krM;Vw42AVe#MB*Eg9HkU*I(M7f9E*NpACgE`0`r{9=ao`cRfO&#GG)Ud= zZ+rC7Q>V8CJmyYbpy>p8$pm~_?Xbenow)uCG|T0e>6CP-I}oRH{e$)avlwg#CKWP~ zt}Z1QviBjai;m7rkc|~b;I9{@Q@=)Qq;dnSf=c~raQT*wPR19n9YM?zZyWHK!UYz- z?TF1-k>$}IZ*znNH7{B5+E{{8BP^Baz7Gpq@WEP3tEf+5AnnR`$9ij(^G=krop1{> zgkxAf{C9+cjjAIeUYd~)`~QJkV%o1FZSVeZv`w(g87J!3QFzyJ8)uv@mvALyj1e=c ze+P*HxnovQ-dvL%auwIVYNBMt6%u4EdNBV1^Fvz!`Dg6AGHV^WvYGmC8Nity|4;4n zljl&zbWJ;?DSQ#tOmL+4KxgF94*TvfcEj_%T$?0op*anm=9IgaKd^vSENXrGXs`!QI~`YGl$i7D81ePW#**LWGSlspIoY>Tc? zUl0y1;}#xcM1uQ(Xsd!BR&aI8VaML}lRKKHQc@#XEEjxsZEdBB>hl5UAU0(iD5g*= zGHHu^FO!)Qka+AL|LmaH6&@3U(Kucw=yx9Hu^Zc1W!bl1oN9<>(JkMO*o;m1i5}xP zz_uT_-RPU11PFO@Vq#8jeE~+@#MucftiEI}PB+Uw4pc>YsKpLouIDXFz@}Z&U*1$bG)|akcA@ z5LVh=XLTn7!YAZy$x1nDt=5R9+YRW&E9hwjm-0dI@xdn|H%QN}z4Wy^1OAPjcarAB zaB(Nc+k^mpMPO67%uAT#7h5PRq0K5gHtM1wZx~e;Psb_8$TjWHN2DN+mbSiBB*KQPEc?=n+D7;<$xBbf&Z`?tllfkvSz^4}IFMB^g;b0IW1CHv z@!dg6#K=p&Ix(I;sDQ$Y3Sc2@IF~u<9d>MmKZ6q`2lJ!D&)D5LQYVwgxp8vku9s@) z;KED(E$XmwMBQKnG5oPH{Vg$PCVLzr$Vl$Ms&uRm>zP(g$7a$0ZfENVhd`bi6E4_F zkdLS2D6wrfoXh@PR;!2SR9AyMVb1ywDkn}T3ceqCq^5>w&5ASTMiBDs7GZ)G$cgooxm1wJ}N3E%?wq_XUg!MIP+ zj3SLVNJQp;_JBk3whDU7?GHq}9?nfKC>7$qdv4<4$(PAMtS7JV%tH+jdxRt~gkpR( z(kFY-y;()?%@J_vtt3;8BmX}S4CICA*6-WS>vN;TmiI7PtR5bIR$TX6SDxZRIxDV3 z2{K3Cq|gk+WApN26sDq`QhUgt!oN$OQ%^^WDY8VvyQqP6dQ8AwA~WMqEb6aD0RP=$ zcQ^Rl=Mdp<9Oj`p1_e!7Oi44{ zdV0fJ)%9GyS9$OW2v@ufG>fT=-{w=(g45;$^n%zNtKxk8H+K0B`gZVbAA}J^MrE(m zydKYbTXBUJTy;JrqapbS+$>Mja$$}u0eUB0@`;=ma!4ZSnuE+1OP+Jfl@BUzSG?@( ztNLioqM^<#a%*?V))d`qmm%u5i7VEL!}0hN{(To)LB1sR*o+>Mp(?@Kn?B%E;!ax* z8hfulTG;N>)K(UWF8+f7^y&gAD)=u?W@rg5;rzVNB zwmd&#b=vFtszf#xJL!O_i7s41aa>GCk8m$ZaVHN>vW9OahM8oy+r-0= z>&${b3ofd&cp-ekmdWFziH9qy#_ePcIQQL1XY^Y8q|YMBIs74ywLfXhuT<)(QdQ2E z{f~1~GwSM!U2ynS-ETy&VBO)0gx;5d}~M+ouZ${rg52r~fyr$(=qz#=eSStx^{IKq)j zUS%dsdSw-T6(2j`yT_HYNB+M@wxUpD`S@FcGj4eCdze7Vj@8aYDlbHUx|pJetcLQlS^bo<$d>My9F% zL1~;V-v#b#vkCp&3Z1bWxH!h_+NhR>XZK73tUmQ$32llyi5+Q?$pIRpXatI0ESm}$ zgsf*lOt1jarEsWwe2-oiXUpciIH*n%`u73zrFMQ?$Ui|G{hL%-`yZv*4JjxqZ2jZJ z`LQ)?L8~O@o9??K4&EvuwFg^`Up-S(lT_CwWv8-EpZC|v<7w87fvbx8z;1J&aSna= z1;XLt7=PCt5yYGV&Q(tvJl1~;%7IN(AJ3q{HiBfBP+s-L#s5$^Il3*gqW9%Y>#lQV z@o3UDh4VI>sLuy)l!iirut~EagGUZ6|19#cn|1O$DxE4MJ*S{Lqj4Xgx|%(;60yE! zjO%YM1bv5(=FPS^945C;{|V^Maq30%=ie!oE6f~>j4*&(Up*%J6phonDSr{)05ATe z;&#u=PA3QTPwd?f!O68)v)qwjD}!qJ1&B5`$(oJKyiCehvQ}UJ4F2xY@-2R0q&?)j zLX6^5Tiv=3jvlUD-PCo0RXgpnDly51XN>xQT`&^|Qp|SBU9W9vTX1E3A*hS>YIXvGeKT zk!{OciHmxuekCRy{Z(99!tu+2MH#mJ&=No^F?VFm{eA_!otK+IV<~3}C zOddMz%h@;7oyp-3JC;kn+>fs=>G-;=QY=+PXb-;r-4HAyn@+4WQSuNxV|~M$x%N^L z#&Hd(eJdeCu}#SoDispN``2QY^c8 z+@OmD)UOg(xs{>JS-V#!+ze|R6f;?DuVydLm~+Qm2O}#wyM;R%lDp&hMp@F9DZ{*X z^X17<jiw&W<7UUk0k@@2}a_I#PI!-{OlnC=$3etwTG3UNp?K6APfYpDqs$_ zEF0JC0w>!nS{y2;}YmXbhs8BB}dyx70y`%QR;T2Gtw%c zKdVq=tjlkHzV~Osm3^d_$-EBP{(`+`vhjSk)+h2WPMIUc-;V<`OMSFnA5m}8T#f#Q zpP3B~sOl3%RA2LiYqZzIvDtSS9pn1zGo4L&^EqMPholZ{9}8tga4jP(P4#|Zi-5D7n{_ybBY?S(h1{h3g`qt?Sg&h zA!SorQhxExMN!6xk*i5GI@emKw*h+mytF{x(G_Z5NHS&0KLiWAt6)q*XZ=Xlv?#SB z-#Tj=iK=XRxlA_BQ8%L*Y~uWW-w-d|@Yq!A_RsMgU=7Uo&w0!izy5@F(hJ{x@_(4H zVm)atBI4qtHO!i+<=Nib^9dN zEVA^t`r0{p7r~#>lKjD{I)R9_n8oJk1sDdkH4JWjX3U*AU7w&5E0QoE2)1H_D6CN-fxZa{nLV2Pnv zv&x>0*R&&GbGPj>rCT4~yFp3kAFed3ksAH*w%MciPqK8=G{rTCrO^C1_Ys@7Ez7ID zBG_p+2$?NDwjqh@&Q*A{tJ+zes07I=JjOQe0Xr=1#Br;3pi>bSgL5SQqO$H8h?IqY zt*2ZGzxZRJo#qN|OWURIH0X^N#@l$(k_r{tH3s?45ZQ$jQ!94js-00v9&9n6l*2kS z@syFdt@=pVjv^=5efz&CI0CAf^DDYYK{HlDnDJxrcx_Ts+e%CRVw8fvR?%Mc3WW_$ z&1P&@aX5E92HZV%u#t0MBD7fQgOcx_yt9un6Of*28-EJR_|clJBoSlXPFGZtZ&n07 zTPNIv$-}Z?o zVGl!BV;3yp$}&;l9hXo&&weg=C^)Tlh%bearfN7$eaC5tehNmDM>*)~zEBVZBsTaK zQa-EuUgj)9Z{}6JA;n!7(DvLjIGzm$H%5Ov)nJ{K!6I!kGt_KxX>6s*uCr~tWC=sa z**ngkN|JtCvd3ZVCZmWN5qTzczepzkPcOga0Kc1dsRzB;&S!S*^8FvDma{PB!01E3 zOjyGGc*OTUu{+O!YaGc%&|3L``*+tEnoXY@8*b^6W5`utdq;AIO<~Y$hZ%4U9l)jW zCM{m(I&WxAT=1Q4gv1ziNkCVf3%dwQtt=|qQAc@E66c>>AvlFD>I4Ep2yglfBVD?0 zV`W6Z_a65cs8B5bSA*PLpS#%>5WPMeqHax7XVz@)i4S{7-6*}_NDPV0?9PNMH|#Ss zdQEdh$waT~p-q(j0n^-&2_IwS68L%eXtbp^c}gE3g9{0oFJ{T?uyMM$6Jur9ij=Cj zDC384h;!}0wUXLX&TBU1!nM zW0?x2e~8eG$c21^-z-Z1Bb1_3UzY~|8aO%{i(8V_Y_9MrtPy3CGD7g{`^KgpmLBF`C<=K{VOB~KU7BTbia%6escfaY%Y!^s&hCyUQw)ib{Loaf}_Vlq~ zHC0g}CUQjifQBE4^OVY;T!@lo!$nyO#-KSqybb;1$<(yzPQLsn?CD&3x7XwQx-h!j z&lY?I@$@Z1O^(99%3GDu7<$q%CJ(r~19VJ1tmJ%K1`SMEf8&N_;;uTOO&BRQoXc)E z=Z9wc%c9-b^w0) zX+=atXG3{>Y2vBJuFJHWd%gtf$Iq``N1lDkY3i&lEC8I&(hlCRr-rBD?oSV%K1B}) zrPJd1hu}Q^fz$+cclzc7lflJ$m8)Hk)qE*sw?o+~%UNlwy5Tf^-mf-k7brI) zA|Mt<$oKCISw--n3&Dp96#JSB?djt~gP3>i`7eaHRmC@h1RG>p#lXtEQvwugn-8!Z z$Yj{z$R^CZ5dj*kRS)NOBo|BoJ$3?&`e~zTN{PwCls;2b?J2&+5$O{->s}m)cYirM zQG1GTPuQtdEtZ&ha~+4=k-b~SjNSR4)+3aAf;pR zduENtPsW>dZgYraFezZTKgAYgK zO2i|8ES%wILc6x71>)OBgHRUkQq58P<>{8%&|Pq9C~c9`u}F?gG6P6q`fSIlfrJPn zYGnM58nsEFop_hMc$Xp|r{2T*4@)hNpLNU|^{`c+!nPEk|HLqnjIS$NWq1~Kv7ljL zs>k15@d|$R2S2U%rfyiE`^$qJ*FU~{GajExt#E<8+x7qKT{v>0%KO}RCnvM-8x?$H z^zC3G;T6M?_Is2%ZN}W8r&c{`gsGp(qo+~K9F51IISdq9*{0?qA;)>)MNO2P1zjqi z$~xe<8!)Q!K?T>86UJ3PuHZsuhPH2I4tBG$3Ztz5KvyZROp4s?$(8`tBxS+`pQ|1= z`2=Jc5fPB!-0hx!p6-YGB+#?Uh>Cs&kb570VItFb{e9;k^=ETrfYS~e`f-O%vj#Tm zWP+_4o#XP^dxD1yjD6-@#pk(uJ(MS=XKCLj$;qcTx>t~?mgQAd7sc={gs%f*2eJ_e z-!-j7?@s;@Yw#H$HAotyb>P|ophk9Yk!CBE3yUKge(n{-F0M_gdp|Y`?Q?u*pk07J z8_4w^#UMhO8*(hzV5svAAa?LZNBZEqKo*805wTEt*Y3SLUMBalRZu*f&V|3#!+Lej z6=BjN#UPHAjRP4H#PB#?k`^J;uT&xHT%4Ky(xLP5pP z{j^J8_)0col5)F1D+!i*lKvz6wFi8+OpD+62Zrnb06@4N+WX4OGygY~s557BgXE#z3 z`ui+TC7zHqclP9(&soc|YFiAlc-z98EW}8pv>wCMbaW(b2x^fL*A?-lSV%m1!#3bK z`(BZX2K&ALprS?pSGU~}6A%=8Vr1l1g6QcXCgIwlZ(t+G=5$k5CqXQrU4J#jQ_y2e zttZA><*<~Z9;1>@Ad@7FB3&WrM9eMrPR@<4wY-nP3Mm$z>jOOm^Vh`6>QB}?aH}Ie zn$1}?AtkXZTTSQ}QE+`^j=db@*@;-CsQI~Rkt$KaB->jB>nBs%bjupN4L<~-$E#;6 z2fq2AzdBia>a#Up4k^22Yk9R?Sgh@3wcwSrTJ`q7TE}FkOFG8letNUvx(SYTb{92n zq0)f}-A#7^n~-BCM^aS&7IF+XbuPDd2$-oT%-uwA_UdYUOSfB8SVB@~udX5Msdc}I*=e{#3H zdUonz!byjRr{ZU=0h1eqeIQ@>f1uknbV>>=L?7{Qag+IZA>_#F{{eAVAw;ZPrqhe- znxWzY(c|2|KanV>-iSbF0^uSF5t1Mmdn4N-l2{&dj_~K_)-AQq86MW#yv`LWZzD|| z_LhE65QAkWU;-}kZ$=7&kd6SwCo$UFoDHPI9Sy8f?x)9nkD~@i$?8V$t_`;%q=Xe1 z;!K7u%9w-Ar(atV!7cTP(gwkdn@$wr-+!?I_q^~02LJ=YThc~I`9lx0iBj?FLT zL_{FLu63V#X2-5!6du*fcf}?1vu3b`^_bOFDvpG+dr9Y$5*HE2dQ!(gb%;R#C?)0j$x!zPNg|;1^lg< zX>W5tzZ3sJbbk+%Fls4ja~EOreofZMd_9@_Y$z8hsJ=&aRH!?IeiprcY5{ujhzL~s z@17TVbZujIC!R2N*RdTyavtmFgnFliL2-F=s1$0D&bcDr=mmPZ2JM>OZ0q7Br_)hW71Q-^q#jwUWj1T;}k0+^yZ{-Ma38wCFN zF2|2s02iT!dTB3B*^8N>&~*Q0Wnu295rx;4(%;v~G0bHcyu5Zo+8jp>vRKdHG?b0h zyLRHAd5bB%W_#83Am^&}JFv}Muw#(Ik>eM{I{E?%5{X8Ftgdwj(6W-Oxt>(|Ju~Xx zhjX`w3_a_(thEHQ^3Cu%yKM!#LXyM6yluPWlIDZr*#&K(u~kb;=xT|4?GGrCK}#9Q z)WedIQ$r37``i9IymA|0fWCoR7aL8ck(0n>b>iLJ0J4L|g%thpGJ~*O^&Wqzz3rDY4nYry3KQ!{-J zF=Tq$Ic9}qbm-!0TQ#l5UggwY!MR-qyQAdFg6zOIE;kR9trqnoE64wyUyrYDYwqma zqVbTqw3`RBGq5=L<%*%w`>P6wtt69K-%m-)tLQSll`8cE_D8r<1Zpduzh+8!AqY|h z2nAVkdpO_-ujq;|7B)WDsq5W1lB#W#MeIoG2EEIYql~Bqcck>48PAr<$ks-w$X^-= ze!9@OwCOg>qIHeknGjD%EmONbdW5eK%pHFWp`kB-F)fk4dX*_O zqft2hpWNVDa6|U~#tGQTt^I1~_8<9M+wCmdsY`rm6Sw*+Ovu_?CbE_Uw%P)nI!lx` zs}50xIbI;{W9=LR^9cgx%a-#pud7CsNbGx1pAjQ_pzi&u&jz-<0VJxUO^s0MzQdyw z+RU(~fi~0~nhL@zPU8v50!55=|GQVL)=#t0rx#hwt!Fwjzo%|b#D$TF-U9}T; zZO`|Gl=t&hak;zSBFeRKi^mpdqZ0IZ-Lq6-LO8V|e9)N=A%>yjg<9Y_^cwBGtbc-6 zx+BNt6NOPnW!aa=z}GD_>3Ukv{BT#&e0N9o#3w9D2lJ+_723Ln#dpjxmtdV1h4v?a z7?gC&y9#L##6w!x7jk^@N;LT+RO3!*A)defs`_8Q2SxU0{PxWr0@)V4ImQ0Tr~LM0 z!e%_2{Ff!Lt7ep9arcYNomes&2mii~!;D_U03>D<{MtK!n5cXxHZVJDOL(@bg2REV zVWdNmZ66sfZIHX6A3y+|5VSh?WStjQgKkvo*JkGCj8*DRzN3a9Qa5XQdfiprHr@y2 zQN^9E3ArNJ1d%0SX35AenH~UH`3qEnm2bk*fPx7(8L-)|;?x5v{>IPtR#`hzo27}bPsjU2Uq1hByaVN! z7pSP$#P=9){3YJ|i?fR!^QdXgxG(jGpGub2J@?{g*L5c71V1Q6+!-Y4LF+|y-W@M&$*C`${zGhY?M zzDMLZ;DnxInEr(^BGG0j_cn1B6{-opd@0k74D!kvjpJBf;Atp-*$HHiu^Fu9#WAmo;r=tKZNNZ2_w`j9(6#z>~#$}_I)|q>bHW^BmZZ93?!Efbt`mFuGTbx z>#3atIjv*;0)Ks#?9wA8ammNMPuhsb#DGw*S;-_p2OI_^Q>-w4Wcw0Zt*3tIqh2>( z8Bdrc%Cunl`Dyv`vwz*s>ag6U>f%VzfHqdph39JnxxCjwmv;~-%0HelhTG2A}?yR&lM0G0O&xlxN#}zswSIx_t19I#;unwoN zw5J{`&H5ffUtxEaW$p5bkU>Rg$>HU_f9kT#V^jwRk}v|5r0(#r zJzR3O;5*rI$^Nv%aAb2^$HG<~m-^=%qNu00D z3|(*xxDwDia~n+dKT#d%gZ>y@#6?+-A3>qjb!`<4fcu(n4H+^^?PY#&c?160!9c zPROn>+XR}inlbX7%kd*S(t^Y7R~}ctsVM)ph^D(iH3uO`NcZ2N4h8rhW0{M$Wv*cr z-uUmqZ?|Ro8wo-^n(yo~1vKU}A70F(1b$Zm;?*EaP_(Ij7Imn0Ca?2ftMD*-)m5R6HwgAB$%GWJE!^`r8Kd#mN( z1*7!G$rpTo39pX^kB=Atej!EGCaj}kBo8nTP&3Kb$D~v+&q5N~euv|Nc~HH)zhz)? zt`M44ZG}oOOnNt@0SG2_avr`7fN0~?h+6oKqzaTqFv#TYdCdCY-B*neUs#))6MiWb zA>y(~6^$G*!JJv$!>_z-nd8hUT@6&16Bj3*4!V4^K7q}hWI&SKksZvjUH|QH6B#b0 zLnF7zA0aI9{MB#@KwN`6UJ|>o(qUsMXhr1b&!~GZrL(KqL7li86lv&jPy4BO_YtHk zt82c<>Prxj@o(b;5SDrVg>wk$LDAK@t|=Y$?#Ex_MgQvWBpzdj!;UQjl3m@urxCSi zy(36{-K-v1vs7ie))!@i1(JMDv&vvOI3z+9U#=yL>`to=hoAgt-J@<{G#2npq#MkT zcabc?iG87s_T>;)OOAo+Eq|?3QMY?NHB(z=WITMJSadkx>!DL|rZmK7dI@;@c8yVI zE6~i^ZdNyumz}Ee{#51rW-@p9Dy^5#-94gq6FNkQgTX!jhjiq|*;TZs^cLYh*?jQr z-J)X{unJ+ex7cG_ah6&3H#x*(;Y168p19Y|s?vuz=tv zI2n!Q;1e=poePQ@e(S1BHnj_TY;C`#<9_iyQbUvkNju0k{cY5EK*EcqC?KjUyUrGt znC9W1X=|ZzE3 zcFWRt>OJP;-d@{;xLlYk;13h3G?sgXt^~d&i)3vEj!okdD#+9IL6Yzk%E55N_9{bd zG3p;|go40mj#+@RZOH0_C=Rv*B8aDXt#;@WnLa^`6#WNLhc79(z9cYCI-xit@wQd(cAgyR=c4h^JfdyCMifa3Xhfx`_8^a4H$!uBi;Ej8dGg# zLaZ;~P1zz;!B=_&bvwhh&!~c*@tak^%GT6HdqOQ zLA21&7df|tUv>4gseuV1J`)#J-4mJPU~rZg=>E8H)iP9(w6ZN(Jaub-S4d^N(dO&; zba(I{s8%w*5F17-x?evA)1`VNDC8aDyGnMjRtWA$O=FcL7oOZ?Y*Ql)<0cN3ci{=B z|D%<1^pqptw*)lRsNq2EG7PvbkLp~ZCt5pnL!S9-mRbjej?q#wW4MgrXp;bnMv^6b z%2r=PryMU?*k2MB3cHw7%+bgoa)oZc*zLIe!!6tPkft}*wk&pd zQ4Q$8_+28l$wVNoS4^I=QFyG7$7+%Sb&q7~9-Wq8JAfLK!(qWrS|~tFA9h63-qyir zd>*41wxc3}ISy(HgTSYgY+BL81-#1%HW`ekQ}o_%LSV+rQf1O;@4htVr#iYYmZ_HK z<`%9%Lvh804w0>jJ;r!Ro-1rkbYlohqs$S)$l@ki_UsYe^mi67NIX+wX z(5>rh&gZe4Txy1)y;7oneIqiLCRiNU?C=aZ^YMlSK-!@)P|CL0-J*+FY)w;#i%4)64 z_B7+L=3>%@kdvl2ytPTbO6KK;rZ;{t)!s~Zn|rakwiNc)xsuuP#fO36=yc^^Gh1Tpx}-*s4+R$Q8U z7G$-NGKOvXGeO5;WQppN#~2<*J9&+Y&WqL= zllh&C_Ipw){cVcaAwg1nF`;ZqJ709fT-eb;c7Bz(mKhQ8V+J4ere!q;E9G1L9ysAH zy|v$fWw_(dQc}ApsVL>3>knm9X&&LB+#j-7oB+UbBI|hG=j$YjvOOHSp}1VX>lgbHC4zOxm`P zlUURJ>C~bAM`t`kQzg_v&t6u7XU} zA0$(iBDjh0q`7@(K9?sckj1D+({STo;BH6kAu;Y9%Pa7=(Yh6XEaeszbEYrSsFPK0 zhhLltnZ1;)fCzE1wcsv9ry-FBOl4JNKgCpQmR9u?Q?Nx)ZH(g_7de0WHO3Lf%X#R4s*Bvq^hSVE4Jeyj0xVTn8!;h(Z4$QYMB_{O45lHG757^vH?AFVVmQyDn_8gRi4nVg0JI-jULic>x)ikc*A~NmKc32}q8lI*tivV;C&5v8~&MPltU!>ADt$VLjole z5p6;`R=H_)lJWAy8`jrB9WgaX*6-DT-R|aawDVl)(+j`HV?Evux2#Q6pl0Ru%P6*@ zq&&tU2r5bW&KNGUnUd}7Vsi|>Ey_bvdCzV+f@;2u zkCC(HD9@wd;j0NlZpMjQO#}A}bWvn<`f;m}Bl*(|X&Y3Avj89&J&@TRdYZjZ?wILcpJu;hp8P}o0-RvweU29|T8!$&S&#c-yi$Z@2H!>n z!WZ*jqHwBeQ2s({Acf+{y%#x5&1vO33oMpn`TK*6$M{pvc1+*j$LM47A`yO%ndoLd zjFbEu__m1dEdvI1&1E^!Uu|SX^);LSFTLm{XT7KbVxWp>f*hNEMsCXGO;1?+<#+Vv zCRN|RvHI^OV|JLlH1Z-D1Ce|fi2UD9Vsp~cYp*o#e@Z}_58}G_uN5`;598IcYGhmE zbu^pdZyr|^2Z8wX4sTMK05oAU7WdW({c8Br!^VaW>DU%F7lOt7C&J9)#mA^~!)HG; z2R;mob3Wys(9{Ax{HyrTTCm&iUKih-jJ_U)C~A1dO>7zG!pQ?&VE6sIg131PLp=5i8 z(NQ^AcruYW_V@JLyOM*;u&WoaNu&FGci<1P&6y>@Kap4=X!C^ctXF)dc99j=#Qtc#l_1VD z!S@+WqX6<4-At1U?w`Ez0%&?lz*&@~=ee~M`=SvRyh)GeQ#ttl@5aL}eu%&AbHS@#EjB^AFmacYu=5 z9X`fe;Fo{A&kRMED=g^d!PHzRQJ1UhYBn>(9GyRIEz_2UKzBM2Z|4~d*@jAs=LV(t z8?N@d!nEXzT3Q7H{4iJ<3c@RU9dUG&jB6d0SGbg|aTWJ+}DerEa&|4J3LFZR= zy0apLSUIod+JSRyp z`+}tr1q43!0ABKlY5%9#-raZmYb$<7c>;V{Z5Q@2sMmuMSNZDip0+MZAqf-oz?5u6 zyzEaCduLfvvya_X1wGHISE1# z3~3NPYvKNG&gV3uO6F+LfRKq27Q3YO&-z@(hT#(}2e!7OUgrpV551`;jGgD!6#S_K z>6-i8bQ;AF0x--&q}{s=(GyMwCsT9n6qG<4#ywF$!th5c;E!-cOrHdeJF7S0b7?cr zYp+C|1NUPt2nYy8X`l~ZmCB6;I79Y{sI#p&7xO2yx6jxYyewhu%ba^S96vQ+oKOb+ zkBzVLV80y@XrC>q%{v~L`V|RNyW3a%_)Lr!Ru*M-%4UvetevX8aR#Z?e0^50|5xJU z7_~Zm<5?gt@7bK{!jJ1#P-u-=uZCCl8Mg8a2_8pJuLK!DZ1P&v{Gd)7NCz_yh| zR^a7WUJ!SPqtS#yjvn*t$4{)c;7<*0Hmt5(>mp(by_Ge{e# za)A5n)cSnI`2XSFtr=_M@F16-rWr(W%RH`c{il?C8CbqIUJ=*ro7GbaF-xp(Eueg2 z;xw+O7*&;&2j<~vgV9@Cb-A|rqYgWvh($@q$ zRT4nO3zX!sO;6?A5S9s|p*Aqkq)6ksbH3EXjGPhrI$t!Y&-BI7SsImG;aSBlr17Yf zz5C#P7h*LML{lW$8q6@J9jX~sREpAf3L5uEl6f&|7U>nOn7t+PXIV2U`HBxrcUfvS zExKT~qgAr{xkG4?>_#6^Lw6+l(x*vq@s05*03@s`d+&na3QPSE=k|IkCBN<{CqOp(pUa$8! zpZmTazu)_h^QcaldB2uxd0x-!d2#Z-!XO}hG#I)53b1YNvTw)7o&~J1v?9lS7j$LZ zZVj$dPQ0*zwcRm5j^)A4_hW5?0%N5OOejp$6<{Vt_I*F(|5FMt9J9YBKw%{8`sIW*ff z(TXI*AGGnw`$HUY^nMOUJuL9b#MJW>TtOD$&u^ZqOAFg>gcw9oylt0PE2LTe?Nv1TZNKU_>n%~jokozBZhV&#pT(Q!j&B+bKvZ2ka$XL&zE zpS^;n=aDTA|7uk+1i@q0ee+^D&*MDCPNX%PuZ&G){TpNeBPpQ`eLWelD?AixcVu%@ zf0*(E4PQu|xV4qY45p89;(8@LcKhroW>wsnuWUPZaFE@^0JH^@9j6dEasCL)nCGN% zk1Z~IK&1jl{p`8dY2y`H#QzvBjYM%JXjAOCi#R~6XwucNA*ncwX7SZ1&W}gc34B`D zm4K_x*JBo}5EY|_!Y5O=y>7MU=;)7y&_T}P&8YCf4A%L#JY0uy-4+Qtu;*!^&`hQ* zr>Veq9BU}6`j&EK#tp9WLlJYLE01{t)v_!rdDH=f`Umsv+3O?P|(-u_kT0op8F(#AIb^ z1$B+X(|Ev;YZGG24!0HcXVaumfLbl~D-<4(UxJGfY2>`I(a}mKu_c3*TFY}6T$QGZ zbd0PR($f|}p|_*P=bu`0cc0UqivYfC0IVH|uW}|jZ#`XjnOmM~<~&M$NzCJZju#oq zaKEIr9~J|H0mO|2$CjK!ty9OGReo*2cLWz+9^8!U{h@#w{qpU-b0H~{I1=^?ENoIO z#ze!hb;U)BFgCoOh9SR0u;tz+skbf8JGp9V2&zY?CedtepAn`#j^1T~ie03GPUgEQrUB!O_6C*1@5 zhYu0i6~u)2?>+Rqey7@meX0X$!9d6o%c~t>9TML!5CPwBKRX`b%;B3cFlG9-Q|pzl(ySEWRx>hF7UeI(tnRn$=Hk=ip$BW zg7fjLqmfk>C+DRSghfA){ECf-yh{oCoI~4#tZRwr%w}9d2bhrDp%H=XEiz z;xhKs?nbpwWxE{kc7B!{>I$C!RHQ*%qp*xn??a5WKh9-iN8=!}+l(MvOvL8=@AqB8 z&mP9Y)y#h8HxIfU{r@fjyU2)YVoFOZAfto&%U*_Sfc?m0Wq!OMa=GqroZC1O#xn6FSQSF@Ha?1zd_g%ToE4&r7WFIrh}+41Eky|*<>4J49G~a(DjgyE6P!N%1i+A znC|kLSOH`Po^UUW5g}3p(JDjrsEY85=Ei=*YnP8+4oy*b=my9T4~6CL^HF*LuO)flhu~-c|HE^%5bH97pfLbLP=_}DK>Qfj!}gVf z^%nDQb2Sd50$(WJd7Qp{f`}aE*@k49&ib7aM?u)93y;IZK z4YvdF>Wzk|u-`WpsI~DiW*HJ|d8CpBlzYJl*of(=BVqOo*5hv`Vc!!QVQny%WByZ; zbC@WMX+ez%Z?;ni)<0ceTjFb4nd>Kg{;3-kK)Y;G!smVvHYf{Wkd7fo`pQ!TkPHmh zeR&0S46fCC0#$0@i}~uD7v4kj%jam}7-8Iye23)=#CF{jcB?XR)FFHAZ^ezTE(3Ug z9(p{Yyhz;$#7#|fX1Yy=)H`ZGo#bmIZA^FbXEwp)%JygP>@UKa+v(&aNuRt#xgPJ+ zV4MNAQSbFcPZRARGnBDjTqeZcin~4q&0x&b1&SxZ7qW)@`m6U>s=>%y01Fb?cD(}f zhG!jS1iu8gZIFw?Q+(Cq!$68tt^wBu)M~MCmIFuc!2+0}t!vy)8}6VI668o14cI*^ zbeV@D9b=X}E=Jc1jt1wrd=MxDc_@o*c08I$NlKX1`;#o<6i)!RpM&0xIFeu6#&)kr za~Fe+x_B9uimV6WaYpJUbr=Y>dQMdnfLR42!Bb(pFmi_`Z41qDee*9uLR_BZIw7I< zk$+Da)5f-dDi8NC#+*kl9mY;7o!(}-UC7Kd@t>N*MLBz>jq0|n4*2b6G)vN~Th2uN zkjc|MusrT|bauE+Pur@47GSQEWYyhYzd7zNbGB5fGD;{$pFyZbT2sYT=uEvmu37gb z1%8-f*OP@t{4UrPux9v>#1ER#Q+>OPlrhhyhAaP}D}45KO5g#Z7gqc!FRtSnzg+ES zMm#qhnDSuhv0(P1fGOe*PzXuj#$Xedqe*WaxkmyI?GJ{+e33g)_=2Rl2O&3DK5;gn z7FK8&W)tBEc5rQ+!cM0;*C28e*=l)EU2m z2YnYH!lc61b%Ifd2iVh5hObbYZ1@p1lel&qHI%`fN@4Mt9>FjO@rMD<2$;#S|LLs8 zDhzRG4Eqfr>5a}pJxmjr3iG9c9d!o|eTC*ZTyaK+NW`b<{fI$CO;a;F0=0)<%u8sNwe9eyc+i}L1A29h4zgRvQcG%7pE-b@x{*WZ4zf<0P?fs!;K48`BrbJ z*ivnj2t1o3|4(g@d(Mk%AZ|FY0T*L3Q3()m#NL8`0Cjxhj*T`weId? zkNL9aZ5*&k9F5_+6|pDwk7(HE-7Ywasb9-VV zwS=CRcmYx&i-ADlr$DsSapR{Pn&fy$OH3^t7yb(pRF8bM)zsLL0b2wL>@G^?xSy$} z5Wxd;SY~%D=G;((?-T|T8(cC%B~7R5#SNC)iT^P@GS|+oSDZ7{dNlF##+b$NtN@wz zF-PRlU-Y08(m`kd(S|A_cBFqcmtQJt>@euu*V8F4(f;EYwokYgV_9+Nqd3MB?lrbO zv2+JCKL-QZ2VzFSlI4MrA77dr^~FS=pb`pkf@UC71A_>LB)V6b#U+(0G7N44YEd0-@9vZ$$sxt#|taO0FYJzm(^HCE(jDQ~7q0imGc z?YfomdHPLAZbK)jnPTD?xMvDW0;HghzzzR95Fa+4?(oAO_xo z*QqUsF17Qu z7eKnLqGQ2xd5{}dA;~ZSt~556CC!-rU|#z{!+pS&L;KixMB_(n&P#1E@94giY3>h& zHxjbWX}7D*J-eW*zC;t>k?-G5f^s9ALxq+VkW;$RM7Zs1hmD~6uahOvm9D|NcxJsC zSsH_2Z+?EPTRP)c$e;VW%w`{L-7;zl_0~6Ri216dBjsCAt&igS(1EG*jbb=ocj=?m z-&!bSO#$77WgyZki(tkR1R(DMrl$@(Ii9zeS|kWJ@eKWzvzv-otsp(8ObTUu+52G^ za01$+|7AgukvfmYVvVwBzOv|ztb{?<`_{?iAN7|=yFi+fp#{8aXidaoR2`LEo+Oh4 zRNr5-)d5lbx7hB2UmteSc;GY>>-w3Oymy3~*|24`U{~MUOna;?@U;y0Ns|0&Ri;zt zTp6!HAqI?!V-Wbti@FASR}rMRCJSSXB?}oEn1>Dsf3Ts457uvH}qD%3y>%Ygn)V*VKc+H7^eUJC2`NW2XSJb(P{|fd)!n}wEeuS4WNityBDv`xNVuc7 z00_An%kw@_%*Tm`6p#Ll^ow%rL$$OP4mP#*=HjzNRpwQ?J`tG4T64*KGE?qL+mDo-@sI~ zf-W#T4otXrA&Dh&7kRK1*QZZ2(`@g!pu_u8m!G>LqyzYD5x=H}TDa|UaP)YlA5wUu z;1SH3)3U;b9I*QHEAjidvvM9P0yuOKVpGstNCJU=Fqkj_RL@`Qe1!)zwaJQSb}jR< z;%Z^+Xewsei%qZ^LsVl??WzXUC>kqraMa98%$aPaRQ^5}B0HvT+C7&K9Y3$A=N zcA{B1iMfEF4j{T`*Rx*d<#||A4iGCyRY$QkNuc;=9gN%?gVEjR#NZ!0qHY4M(5j`- zjRvq|v?h1mxLIgv>+-cnI&Q@8`H}F)(GY8KfPCb#L`Q1WXx>lMHtx-(2|z6(+)M2TGp0=I z{Llfm8s?4FKl@BL<5l$jB}KqQO3k8mEaxMh^h@?eeS|JK!<-@>a147kpW5J*3wgRI z;9Af;4=4nhTfqY(YxA^R5y+28h$Cx#K5egc5LPQFN%vnmc%?7lDEpYadT0e%Rgedw z!|+30Rn|VIHshm#0@sBej+XquhyD)PNr&b?Hf*>`%Ii`?*l`JVWca}#h(Zd@-Hd2t zb*%rIQ3g;)cqn~`+|1*cuTr;A^INjv_WUlq+#W1}mMZwfu)KVHz-ht>iTA!`{sspN zjRWM@9XbXGfyItbmwXi_i@+qG)&MdyLivJ|kdZ<}Ru;T^cs~*k>Uy&2;L?2Qa43T@ z#tx!$QPxq8W7L~ra1KA2F~fxL$E_PXRPTExQ;-!~4&9~xEmRVhaBLzLVyz~{lZ2BK zvCujJZ7Kt+Y?2PH0ty$ZUoQF>^&nJ(3B4%?K2(FCt3Hu=j|9~)dIkHgJ=W?at`IMw(F`Y&&< z*W-Mpu&nvlcX)dxuK3t2!!0@jf`@oL54^GLI%j( zhTNL1}Zs-NUoHkF~5PdZXzE1`BDScRjuU=_+p z{!&QDJag0Dw~m{A)u3Kny#wlRu=Dww=)-R@04tQ2BjPM7xM>1!CgK?=02Vbwp*lHg zcJZ|zzj6udG=yQcE*aU4f%C0cZNlfh8V*Or1bo<@pm>X23SeL1?z1z%7*ApR)q6tA!n~XmIH|QPQU)qQIAyF^Y2W1fr^*XiaK98aTuy7eV>xe zPW|kVL1J6MhnLGlIiTWE79~`ktX{7!WQ~Zd;Gz zRwmFKqRdSl)omH1?MJ`W{DdI%J27fCbhb3>eJj0;q(T>Xo~`Hp#(;F-l_03%uEaPdV>Bqid!4V~ESg>E1fg z2ru8=>M|3!dvT%%mE*Q)!i-q+nAUT%3-0_N1Eplk_36Lo-x|X`II;OrP!2eJ7$Xwo zQSBi5$-o_3@}M&VVV7yN-;hTLOP+U*>Jo^MD7cG)c2HVOu!TF&1k!dOY4ZWQ>9;V? z2xYTIM>L&a;sk#}Y*Ai#kD zIK0mMgiuSKU4g0-H+u26d$)SpW4XdRJY+=OOsSJgdQ zdl()dg}`K&9&o`IR5)S0$#dRQg`wuRjyzi(-*4Y-J~YXg%PjX~yihf$uFu~jOCQ17 z{VmOl7m5L44G^|+$~XIXELa2au#HR?FNUdMxCuVdlX7`^1$3YsplaBi=B3OaIvfFz7BgUl08)1%>OLJ)Vk0)Cj2{_UsB&Mcwm|Edy zZ6H!igrEid^BJr>82vRfAFlO=#RKdj7ytgtUb|)o(Xe;lrP*7Wa&C#ta4i#eX98MTFOf=W@}% zG~d>0Kcs#1YLnLAyBEWDUL}#3Xdr-~4QC?<8id-qfcm*Tp@6nys0KqEs=fV|tmum5 zPmm9auzJYS;bADW&^PhV9eUiM`VAbwv(ajxuvSV>Wf`oTuz2)*oLH1J?~%<(EhD}v z)56|6yw(grOkM!z$k#0!m#3-UURxp3RX{||)x=y#lzug{orlCQ)PATn$)Kp!oto5W zx$Rv_-j*K0O9RQVtIHKnmYay0mTeWg&X-a$kPzM4D>47kCtZXPQGP$dT836L6VC|& zD113xOr~ydjJWW*6ei4wD5|L^wtV-aZ&CV4W6qbNuEbKx=zI7!aQR6Bd%1ArQ#(wr z`oqIqD!l3!R}!|zmxy9e1#p~_wdTFaXjq&2m2F5_USGeu^Qb7avCdYKsKy+=Ec5S! zD3G{888PP4@#KlOc%f2@0+1N3zY#R|RUsgkgI60ookN|C(0fPe;eqYo)$pr2 z7DG$261a07^exc4YwVt&24MgJzHdkd3z4X!_V*?^LNvMHT83Khag9NC5O8d|36@fc zF&y}@Ve*whPlWMtTm)7MC<=_&vVuZJ8Iyoq0F~hQ1TmCdv7n_rZ38bFK=*8vCmwH9 zlVKkIGr1CxYE}*D0_0EV|Ey`?d5~~2=TI3*T##eBRWrKwE>CVx=SsuC8*tu6&g?a~ zvN;M$^)4AkttTJ1h+*SEWI8QBmStG$R_9Fii+|=bF*Y_9&kUqkov!OZc1uFW1UNqG z!xtQ%)J=0!s&kG}f6KXJM=r`-Y(1g#+H-!TGgSpT647iTr1VL|OxKN*$joH02AB=9 zV-fp=OmnUb8(7i<@9(^MIPi!@k&HKi{;M=WyZXHS(xoi)`y{(bw)zS+RqX zr|bS%6#KQrlch1$tfI*g!twC_18~rxOWdOI3=JUZR#uuCw^A3_eEp1P(M2_Ii6UNy zO?rdzF9o?Q3jngg02%A(s3dl*M)z15m*0Z4^>*ZJhzy=xrw(__M)gVF7d3Gevout# zQnu5gUp1SWc=qgN&2kTz>zLDF6{3efmr_0R)%R7J6YOW0Mi356v1o^qyRuh>3yiL9c!iRS6q1p}? z%8x1qY)DkVuAj+iU@$=HGFRCF45oGP12t|>Y|yN^GPg*vTiN zO@fh>WcKb~S%nrITcni|bIlmn$YHoKhF+_@w4Yvs?Q(c>aOcYrRH)r_;~Ng=gj)ST zBr-S1vBujWBLvrLk6zJ?AIc+G_D*b07q53;UkC{{w^5Ilv z_WgVLOU%`7nMLR8yIFO2MM%QKpYM+1H2$X8+mA<_QqkZ^ogA~6V%244%{Tiy2i4q^ zMdkzCQEVzoL-%RBbD>4SKR0|Tjt8a?!^}TL{EqNYM01~LVeBABK!0Z4YVgur z#q3|4aTxyxx|5v~kx+ehWgiQ?3PY=fnsU`I;H|L!ex z$@~HD0x#J$4XF!djHt8=XQ;bT-GLoItD%9c;BQ5?M<%DfPrXU9^umsTj>x|4@9m|HgfseAjz}YXtK0Rt(5TwDX=`B zA+wZr(SkA~ol0y?yPk5g=WqC9+u*xuTO0LYv-OnlJl;sDx^NWPsM?3AC%2;joIgbz zlMhPsN)gmG)A5`a%#}`>zlzxpTttf*7^w$-$GP^mRxi~ zmJ(Qp>dx=`bX%0Nl$T=Uk|~xF;{SXF&Byn^HM&^7xB3tcHxkeL3&_s;#jps$O6>a8 zWa$r;*EFHMxtZsXhgy)>N*_oL6B!RFMQMjSN7YtzQ~esa8?vwv@%grCgWpAc8?nP0q@{L{LCF zz6nr&MLdkIw42HWZG*J;Ulsh_IdE?!cZn>QT)*C*L|5gdd4pc8K+_2bTg-~I4uTni zXGlNi

0jw->*(OSCxR`KjKuZDY{gL9w|8a5UlyFx;0LYfEto*aZdVhKCOfx_V8wxi${$);!Mm0nkLNX z0>vrpA=@{D9Aq!>cw()@zcaEA6bixrg+L zVe^xpOD`3D?HrI7S(_i5@qh$>#JH5t+zdLM*Oc)|Na&PN#Gk?mblg{1ixAZ%S%ej4 z^SO=;Y7u#ixQpcz1Z*va`nAu}9G#IAaMAKYXLzOMz0lLOPt<-^TWz^&X5L){oeUXi~~+yTN^lCDZM}7rS+Q zYA7QGp^1-^rOX2mJMZB4!_o&8bP7Q-!v7E0{Hm*Yk@I|r>&TyoCRI~;16kIzmm~Rs z*Jdv|J`vH~QPON3QewKaK&34{!SI*FGY}Yz@-8jQ&^Rx68_JwA=5DEGglJHlaYMMA z+NYMOp_C5of>(9@)Hm$ll&n)=t+wvPc4xqqkBr2^#7O_1o`sxiVDeKO;ak<_kp)GO z1Wa`)`7cE&r5?>4;XR2_jp02?nAwx+$&rbAbB*gcoNpU&axRtZTRY)g!7X<{^L$GMQ zWrs)kn%^%dY_WW?9p@Vky}?T2lo#oqhl1WJ`xdW=cgRGq2Jdl{ZuXH}(E( z!f{)jdupv-2D!f?vIj&xLqVv4Mt$GrgjlV&X&Z*tP(Q>;4!JsWGXY(78Lb7xKH1^@ z6w7HaHKlLy>}%ki!`$vc(hDIMj)(PwG^MeGcz35PDU28U7R6JT^;>!t?I5cl^uBxE z?b8REt)N?ZPgg0*?@w!Zqxk(Ba;NmmbrpeT{?sp!R(m@2=+iQ1PCkA)+_L#5m7Mf_ zvH1-@zwYfN>MA}6uM;z>OkJ_3loa&WQ07*-P@t(eT z)O9_aH9O`;9djFlwC2V4jmXR6j*5c)5adUVy8J0&wRH}C8`*n4#m5fODiYD2yIcyj zS6;~`KexXBc%Ns4+hLSCM&`GpTw21Kc8n_JsoQ^o*5e1k=?B$i8l>mxx*`P`xjjo> z!hhMA-E?I zdPtO0f(2vo_y4a6s3_J2PtdEGH^b;bq#M67d4iE{am0Csw@T&(Z~)e2Nd*_w_6h!` zwNKw8rzd!ipiEKptJsbnl5~Er%s5310>V)~Ezx?@qoMN+lKvd8vh!pLjiqmkH zbr4-fPv;$&`PQ|1Mbmu+tp1zaxT8Scg*eu;@YkKU|8e@X?J@1E<)*1+;kP;PX;~}t zEUm`Tv2iqfM;il@oD^HGf0X6U!WE;I@R`{$>X>W(0*aOpPW$b|(&^?9?_EPv0=j0e zvl#JOqsCk~(92y>Moo;ar7WeX8wM+XEfp$e?3v0kUUBK}+{(T)^fog(+94&Y;M3l= z`KyoQ%nMKP1<<;l+IVZ%F3*gNjQru6$dkjDZLovhg8&Ppmpg8KOC2Vy$CDwVrkkif zOo)mPII%IeL!zz^p{)=j!Q*?SyY@*Y*^0q@IU>Ay2CGU7{KJ8p@y~Nn-a0rCI9{f` zLu5-FR$r+Y4&5*rYxz(eZa3eWL?(t^HK+05sy3q-@jQjdM~g zU&Fh!4Y(>JWHy&50AnJB*zS*JwlkJkl7kgm6XTO{xO3*lS+~GD!f;Ig9d57B7-xls zVHdq5-EL_r0Q(x@wV4Qm=@okgr|i~~)dmqS+oi`MMwWRw`@AB~SS47w+F&Q%mGLFS zurCWRg7;~>L2YO^X|dITs<9+&;%l4T_Dwint(MvcNVVwgeTl6Nf&1AyKC`lb4oYo& zsgIpTy-?QNpO&{$qwaPdEqm2xv;o)Xg^|aY)rF^;nyVMaa5y}KP?uvgrF)v!=*e!{ zlsnfoalI$7M28S8MuZ#vm(qW-oq4d)O)b-q&{XKC-1aXeDz|O^LT$-QKT=Z=d%%kcV^2VNAS7Xk#kJX7fF`u`QBwge#sxC!x0^n&EZbnAeOyK(yb@za! zIkLwU3qaliR+~p_5KE)#Dpj$iZEYB0+IE!21S@!uf&=ZN!0T!lU&YlT`7Vk&k}jbEjp1pccQ!l75tos32285w7)F!I~v z`ZAKTpwx6LA-4AJd;plkdB&R`f!H?+Pw}?|8{O-yTU{H`iIo{aTW$43Hq89eh)j`zx# zR!fnXMsphPZp62fO(RPW5m2jrSJ&vB0Tj3C>(kl~XMl~xUicUShSc^>Eg0yw<(t+2XYj@9NB4`hVY4JgZuPBGjQRF!TlSi0z`;J ze~Zls#WGE{W3i@*NmiG_5~f-H8Mi`5?RpYNblKgMjZ?CCNnIfBdcR(XzH7nnZ&zfK zum#sBZ_tzP!D=qN1PdLxDR*OdKY|55wxw}i*#-OLn`ULg>v%~}C-KeiRCg)9=H%XxhjNW8r8RZW+Q#BB2M@G&H zK;_)_ohEIVMv8l(*M_vb@QwGV~sQXs%lIQ)QO9ist*YFMC7kVU95Wf!{*s`ypP0C)_ci zn`QC4|BgQsv#*bU$*+TdkDE6`&5)BO-)Eo>1e(zyqVw7^_Yj)f3UPvp2wd*q%LP{i zd&Mxc#ouveVs7^P?B<)XjZ?uW{UxnrpkXh-T=p39&K|CXj-4xOPOy9h?2EDL@Z*~l zMC$$c`nQZ>0F|#;AZh36)yEE%`RdJSf6-_%HW|r15jIN7OR3$?aI65!LRbp53UjU40K&#o7`$4mM}5-PX=D@``@OgH{d4o(e4|anXMo<0U`yZgUaLRhX6{OVSZpXTDRHb zp_PpJ)$c&G;yuwPY>^(+>!3CL^zx3~Q>6z!B!xXxR^e-E_Pt`xF#=(^lcSfYpbVE1 zcw%ef0v;~J0Fn`mUhsB`dlTeG;S#~BrmIinuG9mMP zbsrHw#+PPzMpp#g^JKPGy!bB&sx@%ER@OLm0`G-+#$az)Ug1XolcT55u=Z-pfL02F z`QzS(&>RNCGBrsQG&py^bdiNcu$K`szTu3ozo0xInf-zUn4zp7ot+E7J3*NCab_bs& zwF2a!rB&1+H132H!`~aBnP}{JhU$J!+~gg_{ekxM;STL=OERwVQpuG*1ib@squ`0+ zM)Wy!dl>^`^GEh}CJOi^_`Q#~qfM@Mb*&~Y2<2c}NDPU73fd_U7<0wWNpe>KDM|Dz zv2b(MD9tsU)gV8g=Mz;eVEFQj>9>D0QLv=%_WJ;aXL^B!d;eY1?aNhaE2Qvr<7Fnb z!Lt&6klmiNMd$}mj6f$uLCa29n3$;6wg8>m)Qgq`-m)4Qu_C8;OO>;8kFv66YsS7c zj|QWxL1_V+P3Sfc?C;K-c=m@TdSO%`>X;Tkn$9TzE}Wf!Oe(PBUMZx-DNZOC?Xn%= zeciwKLYx{w+ZSH0cwLZwOa_}u-M(xHVUhrba?@CO5Ki`TjUP4Hk?_F2rqy7h`;a$q z!P4q7;MQ&cb_bYxZSz}T>hTJKlF$X#|2$*VC?`yt9YG{>;e72;P|$H&i@~5Kg_J;1T(5iHA|AEE9N~U3 z?x&#}UtUIhjEwe?m5b*5htXHZ6%(-lOYJP+po~jaz02jX=6C!*bxS-c;m_S4`$RVHZN&4dhfCh0EZ z1n|j~Few}!D5eD6aNRWoBMw1o@5B;GR$=WGUju78H5bl!O&B=a|x` z)($|PGGAd7pwv}c zCZzICdPZN4L%Li3?q9Z@%4K!OL|&*QGKpjF{eN|LXVGl<`P)V#^tQ84C$2FK$;U<- zeV-{r0}CVDhMar3^OWb<7Q{bSMIB5K15Zp=<8Gu`RBc(R@&<+>WhW{&wzIx1G!>%2 zyrnY|c$&nqb~g?;&#u2^!fU_Je0deo&T5`6aYPy<)2sSAaDsrr*EyMie7yi1haK(2 z(J^WS#@(uwwlpq!^MlNQ$w5+rKjrFM9CnsBx_@fLbW$HnlEVjbvHXe<+cHs*zf@u6 zbLTj<3?gH3nO25SMc|`@b?I!pY=Hj8#NYkz$p5B;mz2q!gq#BdFf?zcd-&fN}ZUx=Z$a zvwh;&M2Qrx{k~d3<;ub3HFfXUya@PYUc)}0PS{>(39PD>nd&^T66BI19nuWN+eUm2 z#W2sMS}Mwwabb?JP89fag`I>(LhD$4R#8CiwaTw+XGv12Kk+7t_cG(6++lj|$cr1W zC{U`J*D1lvmRdC5VNZ>MAOqqpo+FyPb}i%~R~Qe@u8Hwe%P9{cp*MXY(zK>*TGS>P@TO zwRG%u92VID z@Wmlu{o7is-)-j%@2N~bxgCt-&XUh2;O~b=EVCi5X5X=?EbNXsO+&^x9{4U zd2nB)o)tj@lJ12?F(+Z!q6riu@fX11y?r|8rJK`4=SiCuCJ|4tYTW_d3hyk$;HgyJq=yLxhwW@r3(o zTkFygOLFyTT{|gM3vm2a2=kXH%;%w`D7ZfmHerPLIyvv!MF^<}lLD)28EmU{;>BWx z!9p^I6?}he`Z-VpSNq45*a(V+WGeWJ^#Eoxga)9IM;tTMGVxAQ`4A5&cH)hUoah=gk0$(D?ve4P$XIj zaoLH)Gbn3GYE56vK5+V0V$dx`Ksq<`P?OQ*bvD@V3Uu)pRP-1;B~yVKm&df`)@l{l z9F4*N^~G2eB_WRrT(v=W2ix%hTNEsf>#1LbJVlLl2>*Z?PbWdlv@$G?-$6JfW9kh@ zEKAa@WcH(EwFO6GKFY4)vH%qaX-iPwm$g9K7`T# z`k`cdtgt2C|IZH}b1@sUUJ0wLx2E;xt4pM!#*}FK-FZ?u|i9GJjYAk8<)a z%Qy!o4Ik$-{suWIrd*TBu(|LRaqtSjDW{xPt|o45!^ zXN+Y@NDo{}oR$PE9Jwa(7LR!9GVxCpS*>%>paNAQi@j$JA3wnMu2H)*5aBMG^^gy@ z?h?;@E`PExv=dUc>dv~q0?+unIX--50F!3Biz>Um!q<SVsCOh4>AxyWvOB<%)qcw`*Fhx$lXq`?91`lwyz)YWMg+! zTZo8q`{Ne!!A@kYFvY20Dnn*+#N8^6V`UCo;c)dfI3(|&l7b`xFgFJ*MwseHC@K*{ z47J%m@KH>x7*1?YyRMYjosW2ivrIIP6;juwE8ho4nZD3W@T%qr&R%><60rd`1=tV_ z`f{JAAKd=<6t);gH4(Wfyr8u2?(oQ!KCmqOCGw`w_HCI>E-#;Qv(~lUif5q42MOzp z0@>mB73JbFHc$irUWl(F8>zuSVRUQ28W9W?$RGZ|Bi zvsVPca$&Z3Jvt(*27aQ&BtYGU&1?li@|1&pP*eAv;Ssg7^#NL_sMu9kw+2>b7=Zkd zB{3zawJ?9d`J+4rB}y`uqtq7Z_>^kZ^~GppVi1mJ#y)A|Lb>^8*JTBzwWKR6b{&RA zmCE#gP*LaSC^O^4t|MLmSj>ZNxmv|$JzZY*nIhX3!Wp=M5OP9qp}^i;j$-P-7GJfe zT@we{tS(rEAuH5&;_B#I`KqHKTQL%SMjQE8D9xyhRpc% zL?s~A4B!->#PJS5y;H|CDxot~s6p09&C%rk7K^=@MIEAlmVvm-GuG15KD(t*U1oqG zR1~HlK(G@!ZbKlV(vjbfV)BG_-qf;GPJg0PtPQIVwgISbdWv2^XsxoMWU$E+xpRxr z$ecF#*zm-6B2M>bTjY+|k1wTT?LJ(C)->u7G|}$$Jl$rvI6G65eGo80D!~B0v2}Vv z6A#~7RvoVflu2dg6JMSSvX#*@Z1NCEvP?yDhDoJrFAuu0iktu3J^T(C?3+zW+CzmV zyr9HRSaQtzK9=_iY!9{Y_F$KWvd5wlNRT(FWS+P-TOo}>(a>!LKozydI5!Ka6dbEH zzn4nagAE>84(K(GZtB#dL#Bv5jhcoxJdrbonh5Hus6KQQ5|F=a#Bp^Uh}=yyrPByA z4_BGC-&f-8JK!L_0m=*K;3*W3h5}Qs{}^mF?Z37<3<>}3y05x<0kK^RMU$JNg@sdS zFOh>aKgD-+FNWVqn;SQ^gksZGurWO>W8=Y7F+(J5yhoLVhMV=0k?3V@X1cs_Poux;& zoP3HXQfueJoUX6pb%4M^0#2uu1H!~-)-uI;ezWbo4{ zA7np`6(?>sdt|f~Vq?{&C_S$U|ME7WItDYnE`C?Uq5;+=y5i(h%aIMW*}zZ~^@{TS zvKOG~{->zc%fbUsyW(Hk*pM6HTTL;Bm-LbGI8dIzYGcS`L^77ODDm^<5K`#<$=rv1 zhnc;ey8z*trNN8X(zo#^=AA6;c`kpiFx1Z5PYoE2yk)*2fk`soP%Ab8iq@@{VQVd5 zYvXwBCR<)$>YP%1=inWcAnMKkj}?#jBkg)`a_2S(XH(R;d`TD*s0;*^K~$!Pd%Hr4 z74-l90ISb7k21Jwv$0KTNxL5Q-SvzU#5d|JTf`G^4og?t<+}+T>ItTQ*l5f0JFK-N zOK<742e6P>mDU_`-19eK~` z^jO2%4q2)&*0VBuNfRn>)rT;f-dOPzMiXp=U-R~`QH|e9WM(act&E|7?Pamry3dxV zWz*iW&k=)PQ`J6AtyD-YKZ>ec>6#`kxATJrmKN~=oOqaQ87l#7?KjTw`Z$qHN7+-} zV7(&Sw72(q^@n84p0&8ESy+ER_0K!jVS5K$QEn(13g{Knf z@Jfhds-sE$`w^u@tB&{Q$~^Nt%eDDRvNW%WjIjtD?XP@5Ods;fx&Fp-byE+DgJEI= z$|lrEP(pTUN1TWYb8Bc$Fu+HaB2Ry4_F-l-3QgUk>e!Xbgy48;FVUwh>H zI|dwl5LA6RAXim{9?{J175VHShG^RmcfkbO#L*tCx^$k%LrHw!2K#FE`g|c9mUxJA zrnerJF~bcZWUzlru9oQtDb>ljQ}LUhWC%5K9&!!+%EM6_MOh?Gh}&&-KHa)Pa+Z`N zvU(yTQMf>tLy`(6?SPcEv*xfmSHy)LaF)9}(QV#z^Tgz448I&$d?Y?nCOUJLySaa~ zNt`My28%nlOu~>`vV)BAAVa&{V_?6a)GJ3-XeX{;bgY&qg0Fw@3K8S=O%zg3Yl{b*d>4F$ol=k@}I&)dM}C>;jg9+WY3-G#-Vw&?|HS^u?20*Z%2 zivR-fny`_PwNyt!y4u!=lXqZlv-Fhy3Eh@{&9S}7{w*HCX`)~1*bsljloTE|dnlaI z5jkBalcsnwLhZ1eeyUA+f<$yow{BM=W^{MaN6o*Ws?ia|u5rD}6CV%0^vW*W+oNX= zdd5@z#|42v!zzNz&U$$ygfe3K~ytzab`I~ zjXC#Y0VqN+L%hnra*aOSeu8$LoqYHzp&IS`K#s}VV6reCeE(j-t1>^%B_oo`M9Ir@ z750df`SCH1u^u!wJ;60&0V+@Yy82^ErsXW+^hTw zc_DlHc+_!45#Gv+BUa_h^n>C+3JJZVwi)MZfEDraa8_{LNNQzGWZuwBK(hJ}S*L#O zLOLWSpn9wLZMWd=jMUUr7%C%{JsfK(75c4%a(wyf38G2fX%(i%j^8i?K}nt;gGMn& z6io4Vb5+~Jjyo;s(gh+U1;eDhalL%VcbKKV+bmF3V%QOw=XxUOj=0;9?)ZWocCki` zUpab!A;`*NjB!HsjMoM^fVDk%p2b3%Zc97sG`*Fy+7hCUtCTgmtqvNE(UHh&Z3=L6 zLUhmX%z!`9R>6C*$$L`9qcy@&36)lEX@`Qchk}VH!iqu2n5PxLEyl+Ldz zE!i1FQqMe6b;Y=u800S{2Dx2lBOa?_lr>O-Avnk35fM&Y9Q8tF)@in+dF;%Y^TCQe z?_Xe>;QO1WH1g9bt+l$jnfveW1RSHKvgH*9XpL?IqE>;3A5^40S2yD5+A^Gdi7zq~ zg!b01KnCZMk+U*yNlu9h?d-AmsDPIZTP9;7cGLQ>Kl(bT=a_yAEpVJ+AGb4Dz3S4s z+Ym^>%z?O@{iuSF=saTVSB3Ix#xx{hzV=`R3!cACyG{n9Huf!^v2;C6`R5nFy@0|_ ziui$E{iuX5zS-jMAUdpvp!%;`ELL?c>R|#c;CKi8bN$DwQge8oD}5-cAA1ha&+ z0;79V^)Ii%bp|gHcHR(@@VIt4A3y(3=83{}e_=R;lhT6skrgKNNxOb8y917=pj*or zD`sJ#)?_I-9%Gb^9XoR{V($QHGU{UaQzRGZ{fFT0T-QNVVY}5!u5@4f5ib_EK@Sf1 zk1#`uep5I4RFUgtn>D9sp9AxhKi9}rB9&iR`#|q6#fX8^1)W2~45vqz|)WjWA%=`kD1b<7JKGTHI(}ZUR z!G@Y3X9j1>K5;`k^5t|npR@3y;zwv#eRs0Cf?(O)0Ti*xV$`f=0oHcKK1z_!0@aMt zWj38v%3jFBz)KnYj$Y%u{YA)SHuM3Am$8#H{kzu9=7>FxK4`QS&~iIx&LUuK(ZubJ zzwV2}Y7i4bPmY`4*ExzV8mrrMlM#v=G=4Clj3AE`akoxUlB^Nc6GHS&D`(LT;d;Fg z`hx3~cke$CQa3vuve~f`&LmJ4bUMe&($*t#WsY$Bgs5N?FTtsweB3nbA%W0@A+PL} z@50e{RZbza1~N+is03tCdigv)c=oYMX&oWy3^niLGP{3|h1*vf0^l0XM^y%9g~C z?8{h^B9x_3)?^Z69gS^_`8{9nsr&Q!d>=o5-FN34&Ai{Q*K@g^*L6K<4Wyi^(*3t8 z5`%5Au97t+Dl6X_%1nc$b8yi2!W-J!NB6#u6tvfSkVCtmOf*UG)kG{ry=)a3`N`diJ=)L1y;=A@tJ$u^ZQRQRc;I#|!t0N-N?5M2J z+B~(EA+0%TJZ$^}2IS--28Mq8ZfqA|IT=C5mju?7h)PR=@adJs8!)gDuofmrFxL zL-8B1HXJdn6Cs@Pfgs;%+R=fYXTy>^b&Ft>Yi0`TbUe00*|GyGm>vD5nm;Q5A8a#N zv)<#(59~!@nt&Qubk7gv1AzAM;JONV;d^%*p#-!6#?Pt{6)Is_Nfvi5@wc{O)tr^w zE<>FrM5OR6kX7JuQxN_)%TbRX7Lb(=cln=u|L#TdHoSkaNqa1wGK4S_Zaen8g&xmk>J>5t0% z=rb!LD@IoB2NdOnZWB~~ehG=-%bH4U1%lJdtMN&ZSA8ekjymkCYyxi7EgvYo!9w;B z`zk(9CbZ)2no}6SeH0o=&>~@-y}f9W7+fz7?atDj*-O1^WVQBOj z2Su<-2Jc8d2PUEy0ED{Lzlbxe2udxdYjMwCh7(81Yg&~$Lhcdz0hxCOcGh;(`dWgD zXLCGLP_e&9=wI)%oO@3Vya3(8tsXDrFg~b~#FXtuK$|b4beS->Tt%O)XpT`PkKhJ( zK`h$qXw1ojAkO@!`;mK3(c&@e$&9OD-KuhYFLzOxpsLjcoBNe?g4*eCGo?vb0Qf~v zWq?mfzqY!z2Wm(Qu7#=!@2aN(Yy3{{mVRLRVf4R{9#D%37N~Wn@-NjB0zEa4AHvF= zigdUx(!RtR7t0s~qxM>KYi38J!;6-;3TXYRl~&2h6w!BOE;)OElHE zUkSaX{K(9lt=!x%MY;1J(6NHqL-JFG`7IdbQyEWE6dBD}b+ubJTSlX*AG8*Rscg4d z?^*{mjnGqZgb*PS1f#L}3-G_9$^Re^Z61N9I&xD-Z}y;D1{1ARtqdZ1vk>t^9?Yyf zjyPXFFL&C2X(N*npbIZx8@9KZLgw_K^CwV6aks5lsMGC)!_*V2J0-)+yJa7k(}1WN z0{^3_2a{~L&<2UJd#CezrI(Gk}x5a|r za?y?p{6{cBTp4*~ajHpe1K9Es=73PU+TJ{!pn$MU!M|X-L?Ig>XFBO7=En&7%7I@{ zCOM*LAzt} z%n!~ZeL032*w)2NN+ot58)ZD;tLdPr3hQ`v8#%b4p%?+AzhxQB+^cLK&cv*Fh|D9P z9M%N++0xVQO$wrt7GYlBl;b%Hh@Z&SE9P|?N3kuL706y zY@>;Lth9yyOqFfw2n1}LO6+6>0DK1>HMRNYjs|LGQA#Lk=r8M$nNS2BmsJsynQ7!n zhdz3^wQdeIL;!<*Mmbjx$O)e?>yy+GgIXKpSev#xvbTG~DB*;dsv2E~*VQsX60pjlYawlS(S1lR+}pV>%n%d+F_pJ~FPCoAZEZL&JJ|r#?OD zx}-_Jc>brp$gg?f4)vZh!vs|XhMTQ}fOiNxO;G%Ds?(&?Fc8JnD4*n^fh<7U(R*5g zzZT%AV=lIAE4ni_YT!{wUg}O{9dl-jR_eh4Y3QIc>}De0UBuFlR~Bwi2hrz`&-6S& z&GI)ksD~>p1MN5#F)#@9(xdD~@3oCsBKtz0yLgnf$_-Sg5^$t*=9^i>He&H&Ra~T` zlw)DPD%=?Xi5HZCT)fwjQ6KNB<=)4lHG8(GSs0ieF z6cwRFpjWi36{A}~<18Z|RPJtv2Q)nI4xc&x;L+F5t8?O=DqR2W|F>?8gXRov{BOSw zOcpO24tCLXnRNvQKsCyN1^LeAQxZbE>TT|~1Ou`NXvKK=&+IemXURKLcN`LduPsQ$ zwMBU48*|R0tG7-TdT~Zl37%B%}?@SB zzTpNg9%t5{nwi9yihI@Wm}NJ=YI8DDRE5;XMux+q0ybO8y-$>OoOP*!a6!f)<}d?M z$cXcJ3T^lxt95n0mC?udoRUfIRu9@aN+0}1ET$T)RvaO$Ep^90rt(D=GQ!j~RBnhH z4>yTR{uJ7NPVUPs6+}{NOg)NV+L=VCseZIGt+8WmB2sS$RXyJ%@`Hd{{ZZIA0U^q2 zkNu;))?WnnL!Sl}_@mP0iEoU+>ybwrMIsdYY7y zac?0&rm0#HQ!=_ofus){e*y95_rl={WPhrmB4Htz>qFjg^Ac*8f`~owJ$Qmwk8QzbqptCBOnghSCrNRH`+D9#|jUkf|EYii_NC?77bc33fet6+49G~yUCooD2g`CbTMhPvDCz=82Q=6 zZez$i0YB4&?1HDMOT7(HgyFh{sh#gzzU8$_m&>;nVYuA1ItEGhd^@AIm32AgSjLFy zXhPa+d9XDq3*B4dW)=A|hBFu!%yPb|QIZ47(EorA9jJUQ$@2o8LbU3gd9rrC#*qEJdBg_V%zm2?_njE=b=zDbv z!~LAfWwv_L$~OKt#o_qIndy*7*mc|HwLa#B%A;849eKI=lfH4IV(ek*C# z?7Raov+BlY+fV+c@_x-jrjVgLRIRWyfSd-qIL2lrvNeBnivo=jfso|0q6&V>Z5=Gq zs!#1`to5unvHYQXup)vi#s*j^+XcejjmMfiLNHIFWc*ZxeCqxDtaj!+BnNS}KT#S!iS0R7jkr+|CsYt4MYp&1wa06w3PdT zOfsLX?7GTVFlBjWnfm7rpvz1_#nIf1cFIKm7nHdhT6cbM;tmGK|M0~J{|iVQYSRWM zWlsfbjs|OKE(y`Q;6UsdVum$UT5dgXPfNHc8ckwq2v`p5vy;?9rG-q?6)z_#vWd_~ z;E#xkmUVo9EP8^*t__nVDi6mkrP$HfU>c!e+3w zOJsa3jk`I1a#Bbki}t}J!Wt!{SHUmb`Rbbt+fPuS|0Z~x8BJl`b`;RBC1PxQ@K(ZfO9|f_ujexeU}++ zigr29?~nE~OIrMQl*+fUU^}}QxR0*PW+5aCc{?G?dOY!8fU@o$LDD4f2CsL3M12Hi zTA{90Q;UC%2FvtG!V#Q6Y+{*pt9SU4H-0P5U@lz!fOLmmPG2sJG7>%o2+5KqLU*Cd zT`J&A@UJik>$SFQ)WqXUns2zT+kwrz+}ZG0-f?J_UaZ+AWCsQAQH%NMI~q_Q;1`_T zeP<#eubwkw{>a=^sGzMd`eEmlGCe-{nArOn0vS=(_X#XXFgfzRKnp>(s)5 zcIGm6&SDro?FK~p+8P3syQ4u=4!=;p$vICDCCRTrJJAQNdBUMl<#M!4f;s)Zu8Y|k zdvzb0Z_R%G55>zYT7k?_SKIy5R6bo!r+^KCCgNYjaHjEqYO~uIBS5udPB;k)hSoZf zTavj!YJeKy0wy;AWe!Q>mp$p1eF7e!I*GYU*gh_G(v^QfvK*dXWVcYK`dea&knF&z zKtd)XlWuH+Jwh8S@mrMin8HyIEwGrP=7TKxO~W%SQgdn>HhS*X*lHM zk?J^q^rfMfFuVU=0d0)#Iu{0BsdLcPY{T$iFY+?PvXu!QJGWp1dr7p5n94)~C(oJw zi-Zk0+np0hnDbEI0N&fGU9A&NQS|9OSQPb;adCi2S1~1DY_IK>Ou=tG)XUX*_S?2d zvGf`Oue2u!>ATZt&M@=i^y+56eP`_Ls?K)55;uS1ZU8r^@TrUeq8Wu~2<=L=-7}^i z_D<{07k-3-LlP@1bdCM#Tmo)2@H2TBg2geHPoutlOG0aT45XTTPw1oqRut$H4>@h_ zq)s|~U05aq0|2+k7xtonD}rDP=3qFmn*)eV0G7`Km7wWx4Qw`>%)*0rns6@~pp`yM zxmpTNn*(Kwo#VJt|Be@@>Mwjvja&z16?f z_}xB={Y1&jwU^87RGtB;Y6lg05q1mNWRWDxMTynTh5;zF4f-Nx#_LH^RwlujgAy;w z1a%uAT{@_=W#PSGj2zowi3Mv1dM)&gfKu(ikURvDXVhh5;4h@_kRxXBC3NrT3%;m3 zzu*baScDS!2E=xQwLDbTr3RMFHRKGhIG_lRr(eG9Gvar0<>!9tP$DPG$F-Jaz$d$E z`?s)64~G+yZ^wzc?q{RlX0Pvf_GsdzIvO~hB_td>?%fA>L%N6!az}JGSg394T59|s z;&wxXEXK+0p#f9dWb?=O7Fy?v11EoqSzI0qu{d+vJ6&F z`x^ZfRX1GEk%A}Pkn85|C*eYRp{4J`h}~(uS*$^P6SM0Z$vf*t1=@g~IF+c=Bs!&o z8FZj$fmB=~1!|Op*0M8tOz-wb*{@)$1pI{baoillv#1FvO)5QA@lNr4p#qiK0j7P? zg4*#&@Tq$=a5w?Dx4x6>6^%)F%a~Ve%$sIVdBVzpj<}{r0|xi|?Xn+$rXv8+FUKGL zbf#dr%1g4HwFCptqe!sxjzRL?-2V9XE4Nr%f$$Y|89cW+Sa3pcoX&i&a4}FV5IaPB%isqTDX6+hjw1Y=@Nta8r-4x!1+`!>DkF4(nIF0=omhS2 z!e;{kr4Q-7z6mt3zDzA&^v19IeO#{7XH(;8zUEUS2HZ8pCr3WFFBTucQxeiyYG}wy z`E^gWi8VmJ^EHq&$a4u0oU9d*$->WDAP%~cV7v~%g~J8~G039Vme{NOOEK$_WV`1S z+G5)Kp#Nsw`EDKvEYYAF&mU>evdWh68K^oNXBE74ZN|?)RLAnt)J6$U5M9OBFB#>H zkO?JdDFiFUK#N!ZplEXg)3@Cqq`><%@;?yMV@MZ!ZAG zCZTcq=Nq@<6z5SEtfQ<3u`>K-^_6{oTUAd%da(#Fn(PQ3X%mDH>~mgk-?huwc$V(n z5Jf^0FXm9)$YxxnCB!{29R`~=J7VyU_JHC8;V8g(b38f+8$bUCKIeFfrP_b8;Qn|O z_tXl>h*CwKb`?%+UNp8K8w?E~2*mFp0Z27q9`W_UFX!1`&VO!D+}bmt+Q#_09x6ss zV^4rv!t@R+t&BFwPqtMIx6Q9or1`>MgByURpqCBWAf|-eoLZqYCmOW)Y=esU%>DH}uwKY4Z5sl$1teS~0Lv+Q z**06QdI`%CS z#J>e8S%@o=YYGZ77iB-qBY%R|nK(+;iOSm;x#0yalv?$Zo<3~%L8HNi5?RxOj^ zt2n^V;cO2Z)CwSbgB*w$iGbd7`CrRc!kCLge$){ksIs!(X23U%j&7ojVxlujY~*91 z>eaY;G)a&M0%Y0uQ29VI)=^pK)k|80I~ih(nyUW!WxoqE->Bv&nM~MfP@vYkjG`&r zuREM+a7!7d%qk8YIuG=i5mSi-D?hmopEHvQ%wp}unZT1R#X=j%*=Ndj${+<$la~r# zkss`*8Xsw$mVGX2#0@C<~9jA1wEcs*d%kvIlz`(>)w?5?f-<;S!IR( zo)_y@)ikGjyCBCF!GdiaXQyl-9=!hk$m*dJqDX)PBh~;`omtdT8>(#YyQBBgR8DLK zWUcQ`9OZZdK37@reviRgk#}G~A4WqGPp_=cyZ`r;9)@ke>__emHqzZ(_PaASeDCq` z3Gn8N{kjqObHH1H?>><}gyv=>?kvUrED&)ROr9DiiQ2hC7(`nUvll|J?ZJg(i*wSIP}4!`%a-#)ZEIk*-hPJmtFqx1V(G>P(+Q zsqWxVGiu!fwf)gJiat6*&@#6a>czBHVA**1>kn|<$;V%&CM4^fvw3WN(#pTM=ebNt zt2MzaI@(qf8>431O3$l1$%XWb#AhArhqBGTvWw@(wW^f8fw=rwDA#)v!5Z-g#Lz3* zA6}u7JE3BFh!80EC4?Eb=2SZ6t6NYY;hK;Pf{uZs;7QLTBr_0MTze|*X7F41DstV+pQ$5>T%D&9c7NH}A z>uJ(uLS|eUO%T2iZ`i>**7q{d50$xQk|~I1c+?$Q3GgENex{7No7yW9-@!M0M|mbNl-gHFHuR^Q<(sp9}}qFgLlsZ zhmJLFnTUeUw!*AKe=%E;PNO!Iu|zjWwO(Uuz7$}HXOFIwp-j=w(`}3u#=(eWU|m_V zqCp3aG0r9DYV|&0nWT0Ly^%fyw1Jiqu~aZMmFgeS{cwDkS8N^!b#}L$QG>zS(bg^} zD(@&qK?m;GAJyhvOx$KHI46jBaKIdM-k%g=UIG^WRu~2JEB_8dXqGfyjkzVmuXWqi z7n9XU_@6%=hJE$+;K;aS=gN)Uo!f!7tM$L!JR2o1SYes{50-LV5 zjKpES2_dZCTL?TxB;wNsN#84MKbk#X-~16D@BS&Qw*KH&?l`2+@h-5Dej!i0G2Rrr z7&JLJF0QJ!$RxuC!Uj8N{L1o>vB?0~I0JhSl&uApHnzx% zt%h)tdp$VxD>kyyne?mn9B>6}mBVDimCs7CmNR{dUrl1;2}NVzJRl#@m!=h8+~ty& zj?)~+vku&^hC*FY0fc9b=w47qGmlFP+pmVKvYcG4u??3tp&Zd62?zMceTwb7kIB*$u z2M?wj`ZO8dn`?`j9|idA0zyhW{X>P??eF~5DoRd0hJsc8-gb2lOT!*hs_fmCYV|p^ z#R42tXjSpa{b~W5FN8 z()`P{K{VG|z|Mr`=AVIN)}9@XN(s8(;fW!yY^DGM{28--s+u3@JofCu&X;3ez%u4F2 zK&O=@2>3&4<-!u0>h(UDtgn8tA0-8k5=6sw+#hk(Ji~_t07|b#DIvQ%1L`ZZvE!>x z##1uF7?$0Wc4&uC5P>UZ2OOzyLz-dl0B(GChPP4SB=-zl(U2tMiK|yqsSrGUsLYOD z>uxQ%dLCXOBf2`8Gl8ft*JAF?HBW&b+vV1g?a(=?w>@YrnOM?F?!h)GQl~n2nZfYE zz``*R)yN@7uhY$P)>G2%7Mm@*^avn!#YGhLLDS>nt<8$E1M#B+Hw%W z$}rmeFDHBS)g?Jy^92WJgkAe}2QQ!syEgGcZ4H*8l91m<*Ejksi-W!To`7^Gp=lu+ zw-R7{mhY~WoSWg4A-o6g@#G%a{rZo} zYJhw@qM*f=bJ6lEQ5Bbam9&qLb8mRh)3p;?H_ zWXHUY+)aI+k?WEP!3OP1pQ?!uq0E;C;5-JdzPSy zTZ1`Ooq;+7ZBW;OH>!K&Q4V*xH@n*xz5V4AWr{;+zB4$%DGOB^fDVsG3`htRI+jqq zST|}efuJ}f=_HMry#UVv)&To%$dcqx4*i~_R37qk|JCgnuJ&E-O1sF z0vM_rcS-?SX>4$U+6P=dto9D9>hW2Hy4 zVcI@k9tym`p;kk1d1)hA^Dr{jk3g1yld|xUEpx_lt4@TCLP>XR z=Hqt)&Q`}^AM@G&v*1l6#SN+BLVyAk%{%6lbbF)d#lmZf(g6Dc6uEyaCymnp~Kvj@z ziRZP2aW$<79*AJ6;gb2RJ6KEABIR|;s&TOEEci3<1UdTae9iX2ZcM35S8gBqgEQ(V z*)1%Seq)QTmE6WtI$SlACmKf3J@hOHIK@3Wt&Xt#e?keTQTlBnoJ_<6l!trE;Z1o} zF=}~|96!8j<<>@jl2&0M$(a-a!rc@9NgpaPwSXh&T6!Y&)WxK0h*6y2>D8mM1IvQCd1@3S1}i83G}Z&<1A53k(i<%M zOh;#Lyjp3%7Imf>3hf+EGTHc8drPsQ-1sFpR#8zcX@^fEKz!~qHuw4;pB3DsF(Td) zjYM{``@oc*t>XGMDelww3_hRsfs4FGfjoKa)0aX)JC)( z>$g%^@&A@$Al|utyridZ^jx#EbWlLi1`koF4-EPmRlubP$tP;`GeX}pje_oK344+G zd%m6Ki|Yt<;s`!#9N-ccMMOZ#!_*>e0!H`tF`u9c5;z3hCHmL*xx^qYdLYrb`}exE zf*he;g>6Q)K^Wr}#-=zAWI+SY>j4C2tNRXAr!6!y0>@C8m@~B0{Y7v07D3y5_}nN! zXvsTv;VaC;n%Yk0!6rADmd+0alpN3m9pg*&U9Ur{!CZ>t_hitMTH z8#ywt8v)?V2rOY(^r^ zvOSG!G59t6rc$uLe9bV`4FWf)YJ)Y3i;ROOSY~jfh zrGP64>A%z6UpI_|bf6pqi~a)(o|LH5Mqf_^{Efa{1={Q_?OXErs}9EB82o$u20>B8 zpy9(BXbf<;jRh8Lz12tkx*+4}#D!ffEA+T#APfe zqv^epFL_2*F?>rUQ1wPW+e5L6G!P)je=JtlWQF5FDHK=I@_)=5Yl5$#sCR4pAwkiL ztJosL3;aFm_5Ys+clwA?!J%@y0dPS8r%V1ZP#*g42hn|%B9(P@3FbYI4ydBqETlf+ zQ*C=7EAK>>D*Be*PG9E$8-1j~KBqS5%Rob9JvoJk7+fplS(k*Q_(0cgY)(x~y^<^B z>Twbd2|{a?MhFiHaeR>|t}eD&6TQgojA?-rq&8H|!8uu%f3Tf-1@%xw8!eJX0F7;h z+s7HYaAr*MG`sOc_+`M;3o`hZ?={z;X}ekpq8oJgv9wBdw)E+Fm$q9gpqCutiuT~) z>?tqS#h){Yeqmd+u$&MTFW=|2?$m_>2~V=ZspyB12jdZ|MMci-L9g5lfl_W-L1IEBcZGPz$_y@tAGEr(V z>~Ke%e>^m8RpnoV8e|rR6`&jf8!6g6RT05#@%@HK)`}Q=-NM=3a>Cx3j*I9y?NMfH zoS>BW*=Uo26rTe!rR|!A-0u~c<#lTIlIb)Gf~J;Um#?a+M!Q0FsnwHFQS|aTBz)?P zhVCHustR`#lqP6Vn=q~dZR(J2@|oc}6KT-jnU*kT{;QO%{AZOSu2j+aoZpup zRae^kbmaDV#`E=JqK@8bg4$i1V9U*%H8D(od&H#iL~A&z>Sv1P+nA#0g;OS$$_bVO zQ0O|dqWa<@t2oC@C{!ZSWc}Spb=ccL1Gj6rxAMZ<<`Bt|9gT=UXeVX?Nobe%aX5Q$ z6Euj5ep`$6p?#A4`;(3-`=#vQDLWFR7yqR>cCssqggK1^ie?5B@yW&z*L$Bfv=Ts<^pXkq6lpwm2{a$wR0M(MaSp>`3FE-0P>endJTUva zocvzngH5!iz+)%JZ@_K%hyUtzcLqs^Ygoq{X0jJ9 zU~#l9Ig*eC@v;*Ris)4QNHlErga5#2=AztVBsxpmopfUuj@TWy$+#YAn^fTU&>EU@ z*pnl?unM5s4p#3?b>OU#z#yj)+Zo|`#+IBHbog~jz8g^?DCv))GP2_JgPLq-T!#Lek53jvOouyE%jX6VMxbhpit|d%O#=<|io~ ztb-n^b4A(#sD)HsErm{{>M7?w`P|Tn=h!2DiIeve>FVrNR zOt$C%NqFb#X>|-*p^jLIO1kFhI>1GL`cv&TO&a#KJuHczW5*f$3YfzQ*L7)ai#NWG zCE>|bbb+@Iy?}Y1Bcl}YB+MFECOM|%RFO!%bI=v;c$!pff&A0osZ`L~}4g%4yFp|VM2K?tj92L__?{X!?<_96$INskO>8I+?H(b}jUs`jqz9=3b??w;+jz)lKnB->5a6Q$$#Dly z8jLPme=$72q=??~uMLF-OE1JmUyf15M>)<1JLCkAQ~Ue?-}pmUMfzoI-Pm6bmTByG zI6mW9HUp1y{nvKe_5_m|M^ZGIM0N~ki2%wF;qt=&D8t!!9k_9n-$;h9@{--UIAM(0 z1qIBM9*tRn%LFIIspvHE#XGRm=SQ!y9!S8YLtc#PIDVb-$#KYmSS`2j z-F5NAyh5YvUT}EKeU~=b;3hjD6&;JC=n5vydWJVu??8`Zu=;ZoFgL=-KSBpGqIk-{ zY$`$6J^QU%rIv<$>Gl;CGJ9Yl7nUjkg`qS^03i7|V z-~%X2HlNpz`cvuqHv4G~wn`@~8S$u$+r^ z`hF`flmg5%97!fY1Q>6Cr6*;8!j>VE8dFQzp*HPMJR%>Ws{+-%%hs@&1k@~06uyDG37FPY$5fhf3Gu7 z^O2evGe^*dV9wsXGI@^@(kSv1XapHC`NVzi^~5c(Rzytj<3w;}JWs|LLw7bJVsUwZ zh|tOL#H+$~wp~+`_Vq}}m48pH`y!^uh=}+M@(hUguYVDYYrbNO)X;1KxoKcL%J17% zYXm+g&R2cV_!s3@!r_7I0A_f%Gy+&zeK-$o4Ti@~-7%)0ftlAfNPM9(8v z!o5F6$ou66qbEN&{NifGH%JQ{j9Kf_XPG1T9wEMho_3~{oCY%OIQ{IJ5*iD!ou&ke zdrIjVXZ%9by}O$~5JHUk=Sr<)^Ly14R*OE%=j8T$eT(8h|J_k09Ws@)T`G$zDJ!V`x}V9eSRNaj zO>k5?zPnli5ngCym&jH;qij>i#oMX7Q3*AaO*>Xx52sc#$T~M_l8~yt0jDoG&(*N% zD+G>{$+8(|_aNtDDVND3s5Zv+O6fj=DpQn2DB78p7xwPs(yKkq`))QnC4Fo%M4m=I za?g^U${fqyU)f@>l>e6mZn9x!X2z4ZMf~+&ki!ZyN)E+{L&hqJ+~z_ zHT(ngVI#y!xzeznvk?E3vibg1lnnRq_xE3*ux(kaC7Pu0E@J_bXrx@;xkM;TJ}&x2 zl8PGuR2nB{Dg{tB022u_9O7lKY`+Je(pKTgJ-~+)w>rY z9JO}OnPE&xX75_<{SEVFD8&f``!U2G?&%k}9X5#b5cjnEfao~9U995H2S$+%A0cpv z-XI)9->9bVO0J3#4+MOP)+IyJhA-ukCA$usq@j;<^Mm@A;(N^=n`ReFsx}_}!C09k zr?=9te3=k1m%2Gkr?OZ4vLW60-flPvl}vVF?=gmRFq6+D#C7h+NA?JNgVfEgu}0sq zJruYjURw|I!VQ?1GID~khGv2P<58WY0-Pg7AaHews2$HYnqx-;r9M-17=5nk;34}D zaB~d7$#Z<5N1=dMMI^D#2N5ItXLD{8!L-O#(=a)*{5DAuz!Tp9i`;qZpm=rRTo!i1L+2m4uWeH^K1oR3=za4nv#Rdjwkbv?G%Y_qz(HM zUa0;j#0KmMJQqs?D;HdO5u#E2^yra>wg40ccmSz;e=POqszIp+*{Yc!1l?z}#?{dp z+9C2Msc%A``~xP5vFFJpzc^O(Vm34Sa`vmp%e14k`J`MSi(X5vitQr18dp)v-^~&i z_TAk0=9?m0%0~p(%@0v(%VAz!_(6{nP@l_P|E^=MEZ+3-<_qmmX0N=ZZH5P)< zX&Rf&yv>t{9081-xE}7U=6|!}fj_ZubcmLA2m(La3p&bN*fC#LjA5`ErY=nrsAcIt zWBR^i{&Safg#%>ry7WL+>GI`Sb-EAl1&ajQ?(B}*o`zKmzy<1OtNLs`E?=D@DIp=T z!XV2RUsex$*BkzFPxExToKbaUTp?rDBGy~9Ti_i)GLrbw_7@Y0$r2CB@TH^;Q#oS;vwy&tJu zL)2wdq(fD7-q7A%vDz_&$M6PGmC&!ZseRU^z(VNZ}r9Io2K*dv!M9 z!UOdVeLz!ud8N<>r>Z5f$7&DF9z7aiPha4b`lr!u1Ta142^l2~#PrWi+tkukV!Qdi*&Wep=b}T_Z;?J@qHfp!#BPHmB_l-2530 z>t02aQL^`E2t~!71IsY@slhg9g@kX6ib*w*KJzKL<=bezNlKC9N4ov~I=|TdiLOSP-@L>84xCB2U_sOIQTRHj{bX%r(+GLuvC@NG5e-c!`>#lnSFEo~`0Wkm|A&%h zk#HgR0;7^)7o_kF8UqzLbtA?tjbE_iEF)-9qrnOs>dR@=Z==wUO6K-Q2_cu?KGSrQ%FcMOl8X zhwfVJV$s9ARsF=3#7+x0wAifXNF-%qM!nh@4)tMP2N>=g_5c{3=(+u<*v5}0GmL%J z!EY!a6@lY)vSZ56Vt2PeLCtoNtu=&5ZZndoypu&jm}qu~E1vtEOW{KKf7Znc7uCl@ zyyA<6CoUksUK*&HQ~_V(;xo(ETi3-mzuZF+=WbpuUkE@x*p|d=rZ>fieIt0>@hnJ| z8f(A)fFBb3qtEUhG=Af+&d%$Y*E$J{u`_RvM8?3+k?vmdz#-2sQ&3h^E{UPfHi$LL zg9obg<`8&d@Gw*zf^G zIw=?agujn3cZ&1Kt~y|D92NV`$D$H=CxcqASGN?XmSrPm&*5il?q>nan&8ha9IcQa zqB#X=qI_(xA`X%1)`v<~@R^j60CZcwd5-O=#Onlh9)kEe?#f59Wb1H!a&5TrFC-Ik z(A;bzV?TbtB}=1imNk{rwv#EDWw4ro=zO9BVKAZboPn6<24c?Tm-c8W(5DVN5rxDH z7I~@Z2F(?(X=MLo(WtwHdzvkqTfRl*&<;~Vai@vsn~boZECu;GJN|L-@b!JTDd|a3 zh;RCMML(;qp933}JBY#<6%$9sOg{U=>xPdlWNR;eym+%Y3V^5M$$WWNik1rCT7`#) zvuRx;K56&Y;U=4)a}PVcLFohlTrSpYircO(Eq81@yqOh|u#B(UK*FFg(a=rrY84ohf@Yn`u1>?lu z?dJ(!I`93c$Yz{N8CjIqX*vy{ktlofVP2h?7&}y=yc|y}+;~{^ATyO1tG*6*z^d|j zM8iq_O?Vsg)MBpULw%&i{-fv`5|F{k#yoxnl{-VLAG-m$3=W$s3Qa?_Q6%`_B&BrZ z;V)$pb`fP}Xk)@R)}fMWY)@}88fo}j=6c&e^|7J%J&hZRD?iAjMF~lbqUPj&_~N-E z^NI2z+Zt~ar?QsV&w>e>RdSc3*yOW!*FzD9IId!1GX0??x2L4hNg}dhBDIsCZoN7e zeOMI~@|p|F&pm|vRo$1CSHKaWX=H&?W&3`47m3&(gJ;h(ye095RVz%$5WZKYn`yhl z8gVsAXqdEQQPj$y;4|Tf!*nX7Vq;r#!>o5jlQotHwi}`OvC5e|P^j_JoX}s*7n_v# z_c&Au=6rEc*a_mZ_48ktV(-1Vv56jTX(nT^%MQOz>SpcWX9{+Y|Mszedgyk|++~nV z1`<5)sl)byxooqwIb7*dCab57oXYR`=c`H_So16jRbr4s;M7494X0SZ4G!rOq6iKV z@-in@2lXK!hu3uG65L?&*=giBF+zO7_e4Q78`z&YAaEeMljOwI7LHw)v2KKGVH0F7 zEN`u004wqk#}C(OV~tnU(9l?>5lJbe-Py*6syOxH<&>t{>$Gb)H?-ILNU5ArH1i){ z=ma+3Je|8K6HJHUCmq{Yk|!;)i)dl`Aa^$+e4xLA7yB%o2VW)PhIUIfDW+r zZJvL@cHGFR>`v*JFzFlD*e?Dc^pS%Y0UfWI>4*tc*G0qNj2>B$KO5_YGy5rlHmDZ# z@JWrD$miV1>n~UR0jU&gk;D$e9q6CW?$F-PdF7172@>(t?}u_g$6681%$uFy->3rX zwLyi~vTwRfzD;~~x@rtn+qT2JSkk_4g-ysR&MW%B_w2YwWUMGT0)bqR=>$kqMP|>_!)jHN~I3$~5$l z=%aIpajht_(|Sf-;&I*(;Y}vlwqgEGiyM|yi{I0jE5$x{oi#MOeli4u^H6HiHT9&f zG-h|;@+aV#TPhv~cq&R@1*9Zy2m(`BN$r#^&4b75wn111Iu{0zIXoB7I_&Fx4b{D^ zLLOfuWV`y<&TQNzbiE-XbKb(#Lr4MwUoQ{DJHwJbs4os8I!R5>aM!;}<#<#k)lYv2 zK9jzj#W||RY^}JYGa7#pjmgouDyhVZ&vmo)4r6=VV@DPfv-d+5gO>W{hsx`>0dHoH zN#lG_pYssA0?uQMYeJ*}_>d|-A|bR0l2t$o9GJ=&)tcJp8t|9b_(tf|dD=T!@!@Ps@Z0Tj~N4ASz|O z(vqedBVj{jkJ9{$Kg$^@`wX*TXTeZZ=a&A9O8#>`yi%`vB$vQLw&Xu}AJAbqkL^(r zRIx_$cPyNG3_LeY_%5>QH4U{H2Fcum%8s619e%rAd^i1(g9(6IgeYr$eYBmd6>ra% zbt}LK(Q8`YN3xi)!)Gh*p&L|;0$TiE5nFRm+f%0Z*q$#Sb}#jRq`rY7Y&S22gls+t zr*!xar>+hdJCH99SjT0}wP+;W%6GkmPXfF@qo)%>Ge(h- ze9<_h0;Wxi8b0@bT3QrkJY|%0B}9Ij1+0$u2^J{#CLv(P0f= z-(94^9*Hg9hC>5AXXC`8l2a=Qv_|!nYj$qMkus+Z&X)yw3LWM}T{Q&4U2WdN>u}Sy zL)#lN31gAla=zWV5;9>vPumZ&Tw~*Pv1xEHc%LkWrPWLZ5~hzBH`uJwC)y#1wp*Aa zFl4gX`gWGl!{VuXB~f7sBoXUkA?sBh`p||I`Uye*cs6J{ajdcyB82gwLZr8BkTl8~ zEO&GWf_zb&0_6{M+ZPeCz4Gb{r84+m^$jkGlyz|sbAHlu&>eaXFNy!_m_}b-%VH$+ zUuEMUPAO9(a+!l2gs59Uvzzq#aIX!+*nvKUyZ|51Ds5IX%!wO5IdS z)?y#9sTEi2FPy)4l{f#0WiE@=xy5Zq8~bZBR7x@A~9F3~#}*E(K59Y*m0{%o?nc)dmhsV#B>J|Mo$>*|6ri3M`C(T1GKP!0N zy|(8xF2pW&B%qqj`aaW^bbX?}GyCypKyH2vRcwTkK|vzRpuPf0f>xYNJsVb3Aa;SV z_ygw3-BzfT#e$6cbi0{*(8-lc@k895jlO>P4wQgAOl2_n`T>kDyC6(6M8*45UPQ68 z<8^o1$BS|AXx!EAFJO`Ii{<;`%CME za5uioIVuON$x3RGVa-WI{7Ur!<2}{UI^DfsdumAwEwGsTXu%5+b8v2`OQ(Y`_T2Sw z5Z=wd9~qc40_pqUlZL0mnIgN)bFT8mUY`)=*>Zp&dbrZCw{sq|a?FehJ@Re)nXq>y z-{Ri+MuqmkG{K%-2p{=)1_s7KnR)Oaa}aFPziJ^`=we5wtbgL!?Ig53_EIFO4;MfJ z`YaR;0g!j&sMbbYiBMaYYvvzQ<7z{IX|98uWWFa3OTZ7)iu#X>1L`|AOn(XeoKY6< z6XB?+3e#H8ZXt2L%ch3uIe@tqSVZq0vO)8I>ugy0Qkp3+-;qxy?z9tH-0yKI)~hMu@1`3_)rqNM&=qDz70h3PGj@a*|2-fjYbCSyi5Db8@{a>u{?Xp4Zc z{TSx|e;QWj61;WT#yV&L*fV3^xWV%0IZ+$iZEbq3EBQWK$K%Ggqk^vDPm!ICL&KS) zFh77UVs6!VBz6gt^a>z>;A&XU3`jRU0Mrnx>k0&K`F=_yq%#A^L&Ced0>3OuLL_xb zf@{@x%~se)EU0jc(h9(1-3D;~zMd#*Wl`8?39XifMp;ApL$o1;5fc&s4u|@l)4UZa z_y(t_Z}1QY&SvRJG;FOiz30`dQY4Q&op0u_PIAbckE`d5K!gxt`JnM~;p|-JSHY!*J-+=Ki*;>Z}95s=IIy&Enih&Z!R(R8cBtu%_Fl znfmf$JYZeDM-lS2e(D^C;NXr+O+LT0#Co4e7C`Z|(?;K5t!wVt<7kU>cA*CmeWdj& zp*`#?jLN@H0ZFMRlJpHZT26cZt-UXhRWlp(#^UL*y&uJ}D zSrAsy+$u}dZgG)&&<)8OT#N<@5sJX*c%Kd+Qy?H#9X&*m+ zqaqcWEUl!bvV|#2B*aLYr4U(Lrb56*^ajTV9=DI6w_?<;=Qo0ACnEH5;eV zvzj~JtTMXV3#%msLROVidc3|f0{39QT!#`UaJ;K@7;w~gN1vTyf?OmmKJ}at3>ntq z@L)ilK2^--Sf6N}(uS2@1Z(C$VUeSFW&#pK$78bVOF1&Zi7-oqm*@+(Uv1F}vaZdu zvRNFw10aQJ`04A5@>30r;mLnOmAB4?!;NEhi~C9;zaNH|z@=V-F8LCUY*WILeNBkz zt99&E*DcmMSx2C-I;6xM_(sgq2}0EaO$FC$hZAMOtRvT!A$QcE%3GV_hgtks?q>kg&u@^I#T0O$kpm2@fMkYVpJ z0bnhBW(?s?7RUN_paqsnNPD!r_yV}zY+qVp8Fq2yiljYIhGv=}bfejjbdt?}ZAeXN zwRSMK+4w_Jm0me|XMfxon1F@J2@yTWpZ7FtS{KgaTXCGHs|0T}=wGOQz0WU3lTWwg z7T}2kYEUH`+O|9P%Nc6LtrS+|Grg1%ktLAm6ge{kL7Fi2F@BrinOq>YJQ(nBzNP6@ZfvIa!RWz{$Lk)?*aT{c+eu)TNs9mwerb6 zOdqdr)z~7?jb&TcSZ!Y{V3RN0P*7b{lWN#68+g@1Sr|itf-#zajNw@p-VQ(+TiST6 zy)n6z3SIDkefU-ht(6#FM^Xw!Ul>R3&-qbq52yVsZ*PM&ZrBSV*;^jP=0(+&4VFw= zS%P|apkWyi_YpRV0?;H$+I9|NN^pA0)llbPDHZjKrd7_Ww39O0Snf1AB?cd$+g>%b zBWvgVsm0+pp<6UN(cO&&T*nEBmy)}orJiAvd;ooqMp%jJ=4@FF~A;S>;jF*X8=BKEs!>f4_fD>q5a>H!tc) zrQwzI4>XN#!ruM_ZPWT1ZB0N`0=dH`2&f+q3+D7(Xf6!5+-9^b%+R+UY*NlS|->nn&hgDOv0INfUE8xD~rG@{PuklZTaeTaMIdBVV?*;v<_PM zu%K_m?IieT=$n#0CWJ6)6J6}w44;;)A0qN4%l}GVi>ORqpi1@^ZI1c9>8j&<_Sgj) zJ7*{aZCNNC^7WsIMCgBwV*0`|;jH0=9-Os(^stVSD{lyEv)5+<8Q45GRyUuK5X~NcS0PBC-bO{U3?^W&)ZTmm7cza>5 zkNk=Rm7?)oa!P?Uk?vcf`%!xXd^&JiNMdwZv;&XTAgyN=D`#AKsVN>Cq=yzu4|?6T z285Do5Cw0#gN$ci|3deAx1fAMSllvhBJ><_A0w@?yCjO@K;=Wb(0cKFEC~$|DhJQ3 za&_jRn{`&>XGK=f$3tOS(3xI%Zc1#yE$pgGe)H5B6pZ6gq>h-0ugmGfTcOsBiuN~f`^N{3Q_7k{5WtwS#)a>#Sy+U zR6uoZ-$Y9YeXI+WE7WwP8p+opL$d0%&V4JnvP(@=Yv*{(XqDfC1Jk8BflqgEr_Jx# z=Ea7$*rWxz04@qsesq9H;4;oqp>CLpA@Y;qor*+0dyxb9q`tt5#H|c<*%~P!vlIDO zyo&}4g@`VR9@-S}LWyrrt$^ZMM9(W+o2P$G>D5=v#)M15M~&z@*rp^EtgxHz>&3Y<+HP50 zn$}7wX);+d1uU#G%@UonAIX#ozylYgLm2s$HV^0@c>PAv-Rl9l)S=B|_(LHh#13|Q zTK{!Rxd1V;06Fu`#SuV7K=8`lb;i!sS(;wo$;5Zx|CT<1iw~NGYxdBJKfemGq)5JW z2rmwJQT;PCiNI0FV`k^a@T+Jzuz2@w`sTB(W+0!U zqexgJg;m>dQ1ZjT+&7SCV<_fYCAtR~u5s01GLNhYp}9Ft9|(iubkRdr{Cm}mu=8D` zQ`zi6gvABY;%g7wDipn--tlj(NOFQds|FXS8t!Pmq*boDMzR|ir-p1`r39qOG^>!e zIHNPl`rCX{OQSNAkxsF5_JeseJ*0 zgN%*h3$%{zJ4GR%W;`&GPeMH;+iVnlLH%{M!pV{(!IeLBjH#}IY*G6)$0}&+J@_$P z$M&4=w0c}D-xuegWSTpSRSn^0f(6rbSW_nKcxmwscRiCIL^*E5NtERrKkWu4$J+Cs z4hsGe3%{Y>DPi#1$5oR)18Mf@0i7oQ1$Bp%?>#2rP=2=Rlemi2hKvEZn(xYR%>T@O zx_EdeK?uixhW>c@ZZ_0n5 z1iGXz>fCsltr)n440!E|s;03!K6Zs;PX@9`EgGCZ|D`P7j{qb|+^R`;ga-DL15e)YI(pN-j68HiP>QZP?25w5Gwn%ll9GM!B(X0xR|R>6qqnL1OrcQ|#MM zJB|_^PZVk(A;Ysryd?vBM1ozO;ppVHn(Q5ca&sNSwp@Ie%r^R@d%o7~7yK?mR>dZr z4O50jK;|wx$Ma~~R^b;r06T)q%ymMiVgUyApyO#bT0G4k%PkfueIg>ce4}^`*ng2k zfcon(txj&KT)I1mL;kCs6oJz*{T<-Vay9Y{gqgXeI^|o|`4}jeDuzAUNfKp5&X}QhsQfqPmS9W`W0SH!gW{IPHh1-rl3(^Ld|I?>V zZYRnNUcZi=se?Dc=Fa>|a=`s^-Oe5kb|s}*OBp`Pu9S{fi|EOO1x>G^-HEcwP*TMA zvJ>vyl7b%$fFu9|^K8D3Mg^Tyg~)?#KZ^?mjWj#FhqHFb&{pEo)JoU`TWRb2kaz^$=&fb&w)N>n5taf^f;o zl?I;(!9R1<(6Z*q-iLC$HS0prA5$NSHHQ0Zi%I;9;-kH zmKT1KEAj}=B6zs+eDH`0I`; zhVS-q(aRNRo;C1yN)k%ae_Y)UcTqx&1^JjaYZC3Lr=;Zn4sjJ{ph48tZbW z+~kvht=q8!8G;3@KcF5o0~`rzM$iAJ&N;MIrPs`+ZHsM0Dh_GpJ7g&rIPn`sSvSnj zOpCN;EaAsgOnB(@(5K;R89}(OqQ&6)9!x5;GB1QanLnsNPGxSsdW9KyzFO9bVb2qKw^jR34kSSs259EhQofyclsBBhTm4^V>8+Y z_s9*Le^=}^LVmCmi5SsDS<5}M-B!%~u5DejC9D*tNYwXIxN7~^F_2dIMZGvQLKCor za=3g7d$fjS>G1&knVldU6S5UnSR8~df2GzO!tpQGqF>n{UglMQ9v}XL0lG zUg1#OlpbbT%Gt7!)^HaB=q^szSL#uvp<&zI6G0uEuqhQV0GFYONA@-L)C&h%a*Br_ z@(L!!k5A%lLX$80EUN~>ZXEWxeeB?c4T5R?$ngERn$q*RWS9at$opx=Us^1d)ir~X zt4q3aEFeo44foAX_zO_z6$xb6)+HRuQ#2@gUuY+4a9z{Dt1(SvtBh%S0cs4%s)AZA zV5SkkmWqA?%x>#Lr`Qh*kI!iq8}x2nEI=Fzg?*o}YGLh|mPs@ZLp?lLB2vu*a=rQ> z9Y$w~I46v-mc^YGC=6RrTMt|Fu$9;&$up7_lp>YmfAXH@s%v@*2OT8-dqi$;wV zWLUVBR%j_Nu@!u=j>6a(ln=1&Iutq_(muMK^9keog=~#6U@=_9hgTGIa+M)C>^~7V zfg;w@pBRM*h27KSy0lS-Is-CP(8O`R_{_>tWp>XC1ZxL4V>!ZKF`q&&WyjWp_}_qo ze4uM%-j{%Cm?wWLteh_byGzdT9)q(3Kh#}q9!r$qH?O8$bg(X42`9Nhc6pNJl@upd z|8LpHDX8ik3RWOkk`Tt%)Ci*7&T04i-`j|VKeAT-w#|-b8mdA>$eRci6AT7u||BgWGiq{L=Vb`zGVFo0Bpxh z^dU+FZ2?@y72XSzOc72kiO=jl{=Go=*0KBXLPWVx*!gRafNyKo&?+setm5F2{{a0L z0Q3OffwFG2G?$00VA=99dY)YF;f104Ik{~GR^@pRMx~}tvb;)8OwMbtR(N;0sxR!u z*;WdJvOKTUU`7TQb*5c9SWhsPYih%fjt_nQHz8Oxtq|YzVG3%yp9)Z01aNKkbWHa| zYvucm5Z2E($<>W%(ZU?80S7l*>*4-L^=b8ii?nGk`KV3(K|sy_wmZ7pl@jFqtcscy zjas)@N0{UgM&@J^gk}IV$liMtLY@}+D*KRdG7vW8M!C>RN=9b0zQW$CbKkUOl%eL5@lXKsAF88Feg@!La?T8u~SJnI*lg66D{jdCK=Pu~O42M>G zZ*;=;d8)0vzh$XDl>9R09v~fJdPk?BP*=Xr>o!y|gql(>1*w>0|4Kq1pcQ@TPH;rG)lF!fP?ZAtYFoXMK znO_SD153bhjX7?ZWwesDP3))Dp!dVcFE-bVYy--klf;_<0<9#6_mxZ=#rAQw8M5!# zh5*39Pb+zeXmMy$O3Yg0CaAkWp{cF%%V+B&i@nA3e*e`Q*#G;QbEHKf6#akXdDUa_ zj?kh)-&3yNe>#SQaC~v(U-F-)slL&r^8D^R-cV?}h2;;#$+8RZF(Xqnuk05~OL&{T zKrQh}pnpgx0yk`q#n}O}lDVTP<`OzShSvy5O+WG18sEbj2nc-3*b2E3Y5cVOC%HXS zxc_G#!~XC(cM&^&=}IBV_->2h$XcO!T~z)4kvNh8N#!Y?>NGcaQ?~E%LgtNQ2b=u& zrR9AKlB%e=tK=~AB{m%x9f)7FTbk=KuaJ1$_x|htHQ3pBQLFk>uQ&@p>e9(ol{Hm6 zwj4f^@r6Xeed1V2%g=Va!Ox^cI#|c9gx3=)@+QAwQSryqnyV7tdaMe5QGUUc$d`cd zmkhT&li>t3OsS127ua4Mc%fuwRKNX$l^Jz*iF_`J`70hf1&flY1Q`Z~oyhQt#xe8v zKQ0C9zz$HMiF;cDFXH=f=dwVfX$=Y#P6`B1AiG%{(qgd&^fBiZP;W4*0=sqHEK*?! zzmJB4opq?I9eyG%L^d1rY~AN#d#1;%FZNQlVpT0X1fpWK6?u1yri2A5fIWXA+`QR z&t>KUu<_e__MWl+$k&J&D6aD`^@XZce z!k!7@y-{Oml>>WNe(=Pf&VICF5an){hWSYRL{Ih4ja2ht_wG3qMH0h78mkPJR8^GE zHuvXRI;V$H@^i^9szLUn8gt{$A=nsA`}&{%M37QBI|dLxMTTzWnVXuhY~{CMO+f|v z5DG!$s)$w-cpPE8#zsHs$1#YvUab`KVQNTbP01WdfGs`yMTdWwojqmGd_U$9n^5nR z#((jTkA|Q&c}J?|U})|_f~X;UzAfQ) zV#DQ%7x~ovsJvM|1!&&_rS!0q4y9)%z6S>t$}Rl{*u*w7z5E2I=YebI&^5=cwr|R$ z{~KPzhxfp1nC*^raq>rOy~V@kf*xZAzr=!D*mt2+ygWC23|J0QHKTd;cerJRzWl(R zPiaz7ziHArly0zM$)1*P=aBCpOM#UMFbVD#qaMdtMdZFFM}|CB6=jc#GnllZ*ary! z7kY5}tsfvmDxI&$ANS!o?}NfplF)RC%D+mk9fZ|o=eEbXBtoMPQlb!Knu3Exng&>t z>SYD~+HmBjVl6Us(hm1O*SZDlL{oUG4tJd0-3v)2Xw0Osyf3&;|4+yvQ}*!O+w3>y z7Mi*=F8wda&=N*g4&LcSw45kH+sL30QZOsS46+|yEGY9e|g0;9av1jUlSta|~_zWhjat@Y~APk2Z z*EssvT`T5U5eSc`N|NkDF^Y*XRQ5m~`?kteFoOFdWjr4$EOv~${jmLGq@#E|?Tsnlq7L()v(VdM_mSg%30>t^KrOsv3g}Mig^Bw=H5FWP7Vbkc%+x)e)yCM@F~W4{`V*t&pTwP zaJRwmA{0Yeu1S-!}8b$C9sa4BEl6GP%Ed593VKNnZ=bO72P=!VnVD z^PTwKThlphHp{N1K6-5>0_u=v3*Zn+0~3@BWP7IU$aaEk?UUZ7KK++m>oOvh9Zoc< zkq>lTV(1{eXP9rpDg4yBJUdm_mKkz?Nj7cohwqzdrglC*?QWwZixrB+uzpFai3W%a z8}kB#Q27%m7t$d<3&VmIv(4r9IKBT@aM2D^Qsa&}pc)PWX4`aK5T*%l{dU3o9mk5a zukeky9lUcpiaVhWoH)4F6pCpaNDXKt`hl5C$`}vam&eXhPxdB}%@NZhXS2886qb)^ z$$racd8B&?OVg4WVQ%(ElWoaivP!XbVwXTAkRChzT|;MfE<{Ofs>DHd=hgE~kV96@ zd|g%cQJH1QXDT(ykrhvPw-5-fO!mhwW7XB9O3<8nTOueaeKQjTeT45_5-@OkR{A~9 zasW*0r0kU_COMb8(q6&z>bGYobLi40)8?F69mmL1xqds7$Slzrvn&s97!6#iH{iYj zhw7BW*V-wIGCqh+ygGAgg+ z;sO|{rS8k2q!0v)`P^d0Y6LY7(k_BnV-or5qchigK8FHMvaG`5Y&K2GH_z&yR`3=I zIBYgOdoEi$=w;CFhkq?k18KGGwD(x)cjU9ym72ib9qMgNK`O`kWXyv4-XEM)&mHA_0}-@L2yL0+2emyX&HicC zY!Q{EwEcyp{|*$cJbz=y$uas#I{F0PnJmh_a^a;3bVU_7eFLIhxWFxF#z;UK^4E+c zQ68M4EoQ&y&(y8#A3PnF8SGf@I$*@17fgO;YZ$|Q3DaWAl0g%~c}0L^zkXoMv966S-Lc^~}G97>#0SM6eapgH^`52kfR}#_4>VSM zQa;1&6oz`LQ`jHuyeY9il2pMFssG|?-uDM~15^YlZ6o&anfY}mVWuO#_OufP#lrq0Im>NT2xON@y$b&rgUD2FuFKoKZnt#cqkI`!+$>sIC*9{}%` z>&YzXlkE`*9BOiA9o-rtl2G5rbqB_`ewfSIMZOa52?*DjQIVT2W+ud@!>9bD`!iae zp5iILQpwwsuuwP2JAkz9BYb5>wg37ex7THvAFsXfv2eT(3`*3WG?{|4ZF&l}*(2(4>dh6gF^Le>Ye==esDk}#|B3ud-bHr7XokanqR9XIrx!-4YDC+P`VO7` z?=#?5LS)KEdUG9HD=Yf_AFSaex{MH`544FMJE*e@W;WjZr^~Z=!JMcE?HG}}=L}s6 zzJIumOH)uu2cJhBM1_vq8Xj4?8bd`G5EyK7dZquffofO@*&p;P4=aiP!hN*UFYDl> zHoeQa@ZO2BP6syJBCo*m`6?E6bGZ5E@_||ibQy_be=OD$Xdl`D{|3ME+YJg?jDk*q z%CdDNLocGITD}<`KT=*u>(6Y3Js>AK;+g#jA$~^adpQ1}$oTO2aMjeIM%fLJv&XI?&f0E5nNoxgDC^Z0312&Rj zH|l@(ySGt9;AZLw7LzkUrbpA_5r{11?PaEoInNpHePqpBHXQ{e%{Kohe~*(x7^z2k zYq{LS>KS}Z9Bxw(!gNB1OD5<&!$sOg+fH$r9Pa}in16q7_cCXRr#gu?}G$#P;p`YwX?0{c8_yfROwe0@)W_+73- zC_x4Fg}6+p0qYFs5A{12m^ZgX=J~@&{)EyGhk&O=2g^F$cQ@_8L1XX>I5wf5Cfou> zQ8#P3l!%}3h6>s_5m+)(XaHZ=M~SxYs1{>6DBoeUsps?f%rD_q{c^0S-{7;#(=R3N z>>+l2xS041XN@!M=MW?B$da#FbOVS3E<|DA{T3;wIa_M43ef=n8)9+ zaX>9rg7GsW{}S%B666B1wdrNTluWvwKsQ_l%JpSw5ZBNq7YM*rSspJFcY-*U89q9h zCHrrg+d)#&(ok?{e-2U?D%C*~Rrq^tPn>-W1<nA2!jzXMX7nXoag-I9`^t1s0EC zcj`3!%t(pas^Zie*i3jpHN5i#c{IV-!M~(z<~LAUuAe=gfH~ApwhnLu$S$aH;u-5^ zKdlSk{)`I=D8vqSPnog~05%@1v@TUuwA#n5M2nXY5j~Iw!k{jE30=<0#|RU*+5Rj= zp6>WFgPN)nQ*VrPz4N_nXY?O7l|88w)2-GL4nQ|#=);{91aa-n8SP=$nGD& zAWQrX2<+!aM!?$x9v}pYPR3LtV5ZV4iQ^by^poc8N4f{rZ~?|wo2nO&vvfaBE{2jH zL^Sc?!=mfeEoMto_*35d6pXk1JecFE@W9V}%m)qKId@YZeJ1lx=X9)gS+D!ds8Uce zz^ldkAFq^x;0f9|mQQdYm5Lg<+T**i^*yXHhj5Exj#4t-9!8pcf4qscZJIlc^tJg9$aXHr3g z34f(XM%)2rJR4i4*C(AP8yAHTQcR=N`00fM49H*;;2^)Zlz&v)K%rvWtdw)MbF|pk z{X$(D3FH{rZ{P_QK}{J#@ctHQ6<0iHcJY(;Q*wIBT52zKkM(5cAMpG}Km5;Tujx2G z(s#Q0{|}4dTcOX)VC?aAbF$ zFE1G2V^h&1oNVS{2Gw@^a<{OXT#~iP49C1G6^cfYckhDDty}oqO4I_BP1sow`COx% zp^q?eFob%E&~7ELCme!hZ!fCWJJCNl4nNbynmSp~_Q4hh(lbkuY;NkZX_NgUzIx)!IvWmAi-g&<5*;Ctc{2OJW$b4lSeLRX{ zL^LNkGrP!e8v;X}Cr^)jZ_ifPihxJgP%X@y>Cdo_bTceei2W%20nYg)L1ucD$Wu^0 zRMk6SR|JXL!I-S6c`w<8*@HAiIzLx`|~Eg#&Yc}lAi7?c%)kp(-XVE=NL8A4o8QL zWd?|nkp5iy}WX&=EiulT9pEhRu8uU(|-;oR$}+UzhnxKTFU;p&d~eFAqK4(z!yP!FHj z(4GENYy;O82lu3(lOU`>KxYh4`gUS7r?zHYv_AZuK3g+V#mHghrDvicrFbl9O#J-C z)RxD??AQd%JTJ_9|5fWCN*g_VB%K@6t#f!7{$Hucw03ARnK>kd>pRH2HDy?I9M(x+ z`Q8$u%Z>tlSOrdl4``nN;Rg4n{wxrz0)}wtZhlNis6=>>M0izCaFE5lGj_Q1?PVh+ z$(b*hc`C*0XsRrMc-&_TfE|?3Ir2cw#>9}1&KN}aoY8vUd{5Xb7C*d1m>rayop&N z?z&yYG5BX{Y9IuwS5|zsnvMe60dh*CRu<_``2+k6iwCjhoQA%00;X-9h-M3_T5g_) z(0Ts-;^j`IGuhU9wz12v7v5GoU!En9&r-r|5q2xSDRcR;HgZ;0C1m7CYo1U;6{W^S z3JBv83@Vh8oEJDM6^)rzy>4_CCmPJ^!`vnNIpla_?8z4TV~QZb*NiJ{x&dBZ_b!&k z*B+2VKv-%{M3GA!6>QHA?DF-^kXT>c<1loAx3?Yd@094YIDd7(0BeF2J9)C@fvIlX z5JmC0L<4>pZKQ1>wc`-p{?NkG1Mi<-msTAErpf5+NV=n|;Ef=;y&YK@I??|Z|B7-R znOt7R7v~m>;0i%T6kMfYh-BrRV`RJH1NJomoYPA(2J$#@8;IHigQWr}`oOLQ7AMsy z(E^9dPj#Bx4yPD@l(e5LqSU$>Jrz3TPj!pnyyS@=mdq)5qKV!=&`MGkAsADqd9 z;7n#SKwow@|9gUT}~_0C3_ykFiZl%Iygg}J2s4Nc^!uJfY+pY2p$U9cy|x~e6Z1Kqf01~bFIsHx)P3zlAA!m zeXK=M2=}6-RdEDrdQp%(FwsN2&9BQEV!v>c543C5_7Ejj&yJvJ_Dli_({?~%FYNZg z5fifLXmx=jTJgEx|Cf9n!J7l%_wiGTerVQAVaz$2^?{!I3ZWZcTaHFvX-c3ZdPNB` z>|jtO-ZffLvMjy!^jBq|v?%s^1S)eLLO|LDs0?}WUQqS|_-}F8bFpEbi%Sduzg+H2 zUqqVE41Y&gXUAw=D09NdcS~wx#2|9`iQJQ8dvX+v--3=DM3tS~sWLEMBbp5a<%wLo zqV^xVg$b9lQ*y>vQ;jmxG_jZd76E!Q@s zOEhu@@{`9|uj+jv=2ExQ-n|=U8>v{6{gm|x$Q_u_{O_Z~`G>_-hqGfWcr)q=u!S!@ z%;HwUtY&y(qyHzAER2@+Op((XGu~sqw=}`5+Zl~?z*H!|CF3|3Ba&_jDVNq=b4>Nd z5fY4coD#el-}K?eIu(zOe8Rt*jkRl!APc($@sB zX>iRmdn&n1jmS?+5Zh4AOWuVdc|gI~ElfRa0ojqiE1o*S6yx{cC0wEa8x_2n$}15` z+IxwjbBgdGlD-!^TS4D{uXt5YY$vI4gbr#LuZ(KCDS~iDU0udd2CwdJNLsClUcxwH z+G-<0mgpN%i}5Y#C+uO8`^-7j!X@Rcz01@G1($d@Iu8Jj`dL~~v*!f7>~weVz_>*w zTC9+Zra3*j^i&4n5}Y8~Nuk0!k9ujQ{MPY2HVfV~nD&@nt;;hg4mv)vp2&11%ZjfS z-Tbwbu~cex6IeXL$#y*ed_;^n^Cn95&JI3S_S@PE!~xxbQy@9+L)^B?Vi> z;Y|o&=#*#`rv+^y340PFdvK$A|K-1LV|UtEk>oymVL-uHQ(JcPIbQu=PB-|4?%_>@ zOHP8Q#qYQgEfVXg3Ms&S`DJb?D%OKs2@gy7OaO+jPWd}PR}7QtM_zcC{ZK6((B40- ziG#SbCUVB8S)2?teyp9(WZPhIB;Sw-<05ovYyz3~p(2^R9g8UFbhZabVf8dA)8yPv zLS?HIYcU)_3KSW;L2LCU@Odjt3%@nMuCXB;YDC7WQ84GYIk^1AyEiF|SEos(F8-RP zM5@{a4z66pt5SVh)3tJ5?X1wCE9}aBVtD@@9pJA$RscnmiuHusaC(O2(X7va!aSa7 zA-d94o@@c1w1PeGg^6pXX7eoUedsdjY?szTlj%v)I|TqJ$_aJf=d6vWHZALXx(C1? zXu+C~lM}%07TbUI@DW`@XKG_z5izW+Nu=U{e~}wg@D`fkQhq(8JJlf@*RUH(5}r=!ACK(& z-ceQp;)Y{bMmS!!H`@_JxF4Vx3d5K_!4|kfS!Ga8|1jLl_&WQWvHUz9;zNGwK%;N8 zeVRCLxwoedL^i0*99s^(iNac9%O~Bp3DmeO57DnEPNT-ILHdzlb*~Nb!{G&A_QGFb zAy#mBznF>Lv^2y*MYMM4l;8WM50p$)FFKn!)6hg|Y|Pn?UIL5Fu*<{JUB?Dj4a~^k zH?0=K$%)qE`!`mpmMBjoKKkrl2Q&Kivb*2r9-A|d9M1-OlUbjf1c>@KMf9jqBFKPx z^vvduH*j~oCyf_?nlmciUK7EvyqpSpbn2P(_N;W7h=9Z^uZ=SyL@f?{E4ZTs^r z6D*zwtuSc-lU8saw_ncR{nJkPoD{B5)`)Ie@W)$mnxH4I(ZBpjg<<{@N-9C=*g*Bt zV1dxl;hc18r@~7~Wqm3ARwb1_R1d+@liPHB(xUQYG0@PQ9aMyd@_5jRdKkm}b$n33O(mVR_IOazud^=e@NPj%es^WQIB*CT? zX3QfREEEb!yJDc5*kTLrz#kl{A=wwV^bII+X_dCG1EXmeg-))2CkpqPyd+(KK$$okny8jbu|U0@Hxm54wlt)9C>9+I@IF^&bA6c$_s@ z^?IM}hL$_5N3)9Au37QH-_ABTzYU6Y{2ZA2MRG|ER|OoHpC|(SPCgiHRJx35hY*0n zQwjtv8~3CG6NFv~Rn+apMugiLz76Kgs5?w+$YM);{@`^Gy8`pW5Kq z9AbX}Y(HBb!%sLJdvYlb8O#dLGi!TjcsD~@?a_1WUl!a1M*k1sAe@@TaZON&az;## z)SW=)i~A1TQZC#dBGBy|3+yBCk2uz+tDi<;0t34?@3@hh?OZuQ%`Zw9EBCMTuKQmK z@DBa7xE_#m&oaP{mDun6hVde4nPn(ZY*W-8m~kE29H)|DmXoc)WgxIJ7ZeZUF7sGr zBrj5$O20z7$fr<(alS4fJ|`NB$XntPlMpxZ&1AXI^;(fk7l)@K%&u%)IBFO9)>_GEV2p8V6#jgx4ytV=Skh z{s4O$%xXKZ^?3b7^EA$9n2Rj{CnngT>x5cPhLN*= z@Og~(vC`mSGUYCUJ!BT<2EUC+3QcG;DIDneER!y;%4)FITIU$I9%3CLCp|^|ULUO= z?)ZddUXX*G@N>~N_$%zy_J(NPhM-K`ebM!1hOm4pt|<&`R8Akn}BV>+B@U?!Dp{9pb2(_ zBO>GWe9j?o_`M68iZt8U40S+*wdX; z%RZIYCJc^UCWmogsinMq7{>fYk;5>kSeI&)~!w3x=41fA`pA(5X!ck;$1Cn-dK|4Oxg4y zPmB*tYk1zX9-i0;;8}7Ok^KJ9!Ryex@plo7^~uJbs@3$tlb7dyz=NKl?H=aq>;dD5 zs&k08WDqvd^g<~3uwt(B0T%~nc#M<=)M0|vwnb-RYDtw5p zjnJtBTM&ijmRP&N5X4J#eXGfSMir_Fe8Z_0uf8n$!Oxg#xW%rbYCQOA-UsV{sCKY9 zkMoBbp4Yo(DSupT1eNS8KB65{13*@B24G^HtwsLeMv{LLzMVuFZPk1*yg7b!R6*ta zz}z<^-;`ing$9R!QChOk^TZh+IeW_N?Vb+2Zs)v*#bc5;Vm1G=h%mJbLW4=^r(n=) zlyg;m?YeA;F92yKg;J6%oFfKNo%zT%O4&z^c##UA?!SQFy~wGH22)s7aW zTgAC_V9{O9!lFAVnO20ARG9inPX03K-`bj<7VG`_SaQ`_(hf-V88O zKa%irDT|W)9L_gAyT6j6AiUy|0a$r8;XywXCujcTc0eKwefNNsGz-tVIRmnXJ zt`90K0Xi*)rD-c6yV}_{6*|7-`|kavh~oC9>(D;T!W@2eq-|tUWdSeVv@ET1qO6rx zMRx$Egbm(a@DqsE2aLkksm`gaulf-o`7uoiVC^7t^%`+;R(}qcjl}{kgD3nyisg5| z8nt@()%VCT2F+%tc?Z4i5_yS5r|SpmfNK@#?vGnbcszmWG*(+9d}AUrATm4Sw?@um zoa}z6(YXQEt=tGWY6RzIqW2gqy~2dPG$6ndSu&rE;&RB{`h(AmioU{C3wXRsbKl?* z3CVozAwCzK91I5GU; zj+b!EY6H%IJmw1%ZF*+&`V1hmq4^O=5mK6%iBJT0Cj<{2g;4U&R%i%gdxO5&yq!Qd z9}DBU)z5Q#MT5)9i1H#p=q_w%?7M)SS~tClH{mB{3ee+&lV)^ai z%#xwY%*F#Nc)DtPUIf915cyXV+D*c$%7r%u#=>!K2UrzB@rG8ivn!D+kNb#53)~fR zp&Kf1C*8^PmXaES{jmRhOOy+sQnWS%yG`raW}R%2z#@`{>Y&~=A&~O7a=|tmLZGJ( z6E-@`{Y&9=0@OkH6*3TEHC-}bY_#BJc&)$~%s7rLphg7`1Do1(-W=w3csz)%ZREWL zwJTJTwk$(ew(uRRey^SNTF5WOqtQLdH-y(8(T;c#F`DYT ztt@trYY#g)CUFrDJh^c#JggnsbAxl$L_|)(9*>RGCKSBc&7>@7 z8Z};U@UIj?Cy0zJ4!973if^SrGh*Y{xSSpIksoIivmhN~I=5qdZ?wViZY1`1$yJWk4%9FM~Rr2zv~VuqR-XShP$i3A2*@e=f}YBfUwd2R?+nt zHSpe{auZWh+jF60%bS&5wEuM*Lna6m1tWp82`s55ZaX9NnQf_K15eRU!He5aAv0<; zZVi#S4NEE5e|rEkYWnt%Ee=)9e<+s@$~$J|lm|Hhv4FVNGz&bKAn-6!N|B;^Frqf7 z45to?Qs`4o71*O>gtT0DykK=t3mQyP##1XYd@`C61f=EVA{ards+*7ErwQ;&f^7;+ z>&l8BZ#SS;NaR5s#xZYnEM-(Sas00+Is!0ZL;5Tl^EE(-MDsxGUWLx_!N8I%-@N$z z?H_jRI^+Wu58jvqT89gR2lH5m*5I21_@*IeoK2&(RyQM2X)7jE-A5x!M<}J7Nb*nU zFbQ!zF@8BkG8?6ucN-|j$mSj7ELmuSY}_)a<;J^?f{mayY(a9wemm(j7&b7Q2{?T@ zQ*}hcUI55iipzX1jV(if(}A35&6LMC{rHsi6@M$J93c$;b-T&g?B0V$S_9v;4(d7) zO4JM22m$&1A21?!*GlE)Tib*8m85I)%e~1QxDT%Apm>e821+H%(l|2cHHT1L z%zlXrUOIWkrcz@O{N7tY`ocHUM^E@`yW0hC21LEdkbnSoO>>bcbm?U8``ORF<~sCA zxfkb}8jYsw$ukB)BN>2fTv59{iAZ=27rw>2${LpO#5v^-5<0lR!tqW6o1iI5-aH-Y zo1%!LuXh~4p9wV*BgzyueuPtsd3IQ_gt(Goy%`#dRDxoPESHUx)I* zO3NDxf(xRUV#E4?OhXn)>3czUBAk!3k`~4)bEYcb3ozPIAiMfH>$&}Hi!=*6LrsFU z51`Ep|0=o@S~;IpLPT&*nG`iN!MTU$sw73P@Edf;N(Lr3Np86u+>5yg@`MBQllbH? zc@<9@fCZj!@Qi*NXi7BO|BK{SReXk*0XMZc!IyCTWdRy6k09D)>JCAsh;4g#lh$K$ zbZ$Il=l$cP>7RiBV2=$ zWkvoDxIIy==a5@Wp978*eLGjUs50R07RSsXK7O8#PD(Yo1a1>oLDXv?fJr+>s`Y?! zgrCekaOaI}gc)tvn>1GCl$ODoqm{Wwp8dw+LQ~&HUowqZ_Fc86nBd%fv+hHy(0xdE z8<>*7-9kAXUOECX_aUJEY#TrL%Nw2@Yt_T0<0r!GBvP$_YR)t|RQxIJlWnT4bRRK4QMgv3G}XUIO^fkraiR%gS?! z<)4k1w_&$I-AV3Bg!EyBALZMd`53iUFJ+d2vwE6w+AM!x981pU70JcL-9OS@^WP_l z#KTCp8zn@M#nB8~8o|(bKkVb&bz87{kA?nXm!(fwCWd ze9--`TtBMe=yQGflaORYD&GBls>=>u96G`LUkV|+hUbfJl8K71oD&JAjVKfvEfgo> zmSIQ83@m{dX?7yGp{EWw2-JJ6&H;b{q_N~2f(QZ->mx=x!^=#j>7R}kcbdUG`eXop z5kglps{Tk0g>JOHf+$!#dC%H5B2yb&i^rV18w)B@U+w=B_J!6hUrs*$CYWzCHKA%3 z%eiT~oLQyK&mzr;^hgK6uP4IRMBorPl+o|Mns%p1lpRWzgSY@55~Q5DS#`kAXvbnv zqtqX~k{u+tp(j%#1#?))Gpu&|n~Mf8;J}@ly2T??mXCAf;ngGZ40E_dZN=zS9>Ua* zDR;tdUgb;ui);(~pkk%bJ|*qa~YrdR94X$O` z)dmNXm6w-!@lY2afYqH;`6K|-ZA~PG`1IO*2vRz;kw{?r1KIAGMaMce`Wr77shV=e zf?(z-&A1?$XfNxCp3hC0vhm7{(>UF!{;29)GJ5CWmZ@#sdC%We3R7I53&^*Z{8Y%X zO1^4tO4o#%?#WvL=ypU&eo

|DV722{ghbRYPUU6S+NgClz*I^uz0e8~cXk4q+Kd z#2xG*{!B=!mMv=C3YVlTlIF1KP*dqU&ajsd-Y-ZQnQPchz_eewlIW0B1!hUgKsoaY zpv>S}E;UtE4@o74Djk@8@BW~+Mx+19)TS{ZKMQgslA1=ShA7_E*HavvpK-Faa;*Eu zC47Duon8hm;zrK<@qhWogPz%5Kw9Ch($)WXvJ)SitvyfKh0_T)_X69dNln=MA3L7E z(SRZ;`PmD*OF?x3Fv`gN-4UB(Q?Io`>LfDR)Kpz~iH-VTFO!R}bqqH-fCErEBo|5@hbEd|smZw+G`Af$_T zvw?HJ09JNpC-#g|2~N-V9~h3ZEK^eb*o%uyl&em>`+~jFU*Oo<&g5rI zVp&jg0=2(@GJ<}4(Sc4bh;*Rgj3Tu#-W-9l1o%w$58K$6YW)ZbKzi~Y!82lxx$Bd2 z$s~%dKp>A(b0Fw^N{0#s+ji*?)Gi(z+W%lmr0zWCT$S@!?}l#jtCs;e?3>6X)2|e!Pik5CE-!y(y=Jfx^+oTE-oF!O0|w-eX%&jjnh&8@s=7vXYp7-eJihAo%fkX3lz|{ zodEreOv;^D`MmS_fP=T0R%RQHBgssb$Y)6Gs}An*Hh>R66a}H-h0Lt9De+JKEtsQI z>!fy1m%if3Aav<6la3c~D2SV|{9eawpL3k5{&R$<$ckT|d?SsTH80Lq{LySM+NWJA z^iBba?12r91Ww>cl#=%mHe?|ClxKu(|-!!RMzDFAXyyj>ok zgtv2se-S<{_YZyP=+ue@DPq5s4}P@6a;)6WakuC9+|IIyAYk1l9+LOL0CP0r1uNPL zGemRU0nb~HrV9vrC7gno%624~a_3GKS@GK++2;%(>+|I_x0U%PM`?#(R9+a+(hE_P z!F0;Ke|n1G7`uwsl|Y~?s~a9m*Xtc0Xf5q@vaHlN3&Itf%CThN{QNB78^yV~yvL6C z;J@;se-}4Ho0m}0C+lL5CGb`xtzj&VRB{)AIY#NEttVvjVfV`5KZQuO(U48NP`Qba|B<&ac0D+lWOz(2We(=!OtPhLD>PCM?;YtuK==O2)ZkscJCwn-6mJt@dvv+t!-4sMQxE3l< zLtI|o+%0#@h7o#r-oW}9I?{9SLpz)dapcemK>s>JNm&smkc-Z=jHr{=*BgNIg^q0` z@J+T$<)4-Wwl3Q)x+b#Uv@Y^<%ur_qeGUegEO^cGc+R7!>ENA z=?jmxvQ3a^2tC)~PfqS=J*Q?7S}f^st?Imf5EC!_q2omtDz+((g-i6B{GnAE2zyH)o=H{{nYEFd z%12qWWLF9H-B;VAMknZA4w6Lw3Dl;G@@>0E&I)}?$%hEtRSM9A#Xa+>L{bvU!bT*( zFwEp^>j0ToX|coQ^wL)c(MHR_qRSUu{maz?{-$dT|5CTTEY(zF#tY0)E(SRuGG$LM z-T0se_tk!cIx|y6WnKzFk_55fPy7$YV`5SJ=o@4te+Q2_1S7ZOvozZu2f^O~(oRuH zX=F2sxHfOc>*A!RxgV!u2Xo)5n7pHJmveSWQjDuPbfw7k>{p9VD$SPH4qYxPp3^M< zrKDS4*>ucWT6e>v>iLyo|A(wEkB55u|DWngSGQdEW9n9-xVIQvi=_}PB*|{bPMe6a z#aM@Om8+yKQdvgXcam+aEmDM=z09DH7+bc%7=Eww9^L!>e17xCeOxi`_xrrhIj`mU zdcK}V?vs$I#xDfZJN%zi6nA;I%SnI!NkP?M_bow*>q~R%6YQ@ij@}LY<=8Lt%N5nV zs(KkQ@Mc0wX@74TwK)c87Oa9Ro$D3H9~nEvuP;)41~Jv`((UW!de z;T!m1{fMbkN#v2}Fs~eo(fcgft0?}#|9%^Gpe0y&;WKPWY! z5g`!Ofa+m9p*`fED`_kYFDEps4CiF}NaR6+X1VB*yq0OSR{9FC_){uJ&ah&8hn3i7 z+Z9NJTs-VcwFswW!am=mL*cVH4U&G*5wdGX`N>38ShFD=3{_poR`vOZuV2{Sw)t?% ziyyY`Bgd}2m>xmPA)}$`I|Jp~;So&P!ryEUfc}=p#c=vNGmiQ|TtxMcR@MoC_+}hA zT6l_YDKDy_IJhz!OLs`zZur;%?6KkR67Sb2vy~6{Ec#yZO_hw272G) zQO9p%kbJ1C3qkXVhC0f!G{ud3ih+d`>aM0pzVCdxnR+z>)z$hQBXtLFDUM_KPgW6% z7KZ07jPlv{8>kVYqL&J* z%86H+m&c998b<-0&V1NPrdSd$p0=?B?tsJjqjT}zLPIc}#I``-?zc(yRfnSloc7b`f~-r z6Ygni=FrKja@N)XE03+Qdauud@5t0GIz-t~O$I4z=~mHAb^+P?AAEB3BKbFbklwh0>K#DSv z*y)Zs06tdCwrtu-*iP#JQ>N3p2<;KX!A;sQTPOerVhlznqmL6A8-o>BOq5)dz#a21^cIM$!?nkR1wqg7{G=YXajV(E zpCiCDInAuhO(Yf61;XmA=|>p#uQ5aMz5bEPn+8xn7jGRLoHc^FGWY7`b_is|!0rf~ zrsi3@tZ~w&DXIxa1d#vCdO^QR`AR!J2w`M%e9`F^mFLEZ6_PC#$A)0aA+bVo!+iy_ zS!VZ&RP~Z>Tw{RxeN{6oQQyi0F)>_?FzPmKM7VM96Mywn?>|m?%^eOUerA$*l{EQld{76t4s{&;1$Q$39$j-M{k!PkI7P zaIfDA80QndB46KGkf!!8{QPnP_WlDiKHsRaH|{3!IX@c+I{45LpSf zj0kc{gM;m2qlM3e-$U2}R$zNjFUq1{H@H{NzJ+bzY6>aI%$HkJD$f^v#^q{cP%r%k z)*wle!PFVLjm*4QuB>rSj9(2{6@kHJr*Wz7DAj8Aln=@bgcDFkt=aJaC6DS!#761& zvuZBIze3-oJzQ8&pjpAsLH7Jf>2aTR>|9uQeQBYvKaT8+uJ!_Lz?DvyTK!TkIb^fwrspf~DR02l^c#B( zG()UIUh+56k|C7vW`D&?U%$b*dUZx*MPBq=+0ueI`98lxk!p2lXld#E$};8*r_kwX zp^JhQ$-xhX;8RwJTgo}vKe>;5qgy;|g?%$L^wZ?c6=gDTw0lo)?QCJUee)h&JQRU? z>cvD+sOE0}m|}PgDMNl~TFYt&VoTXU`fGFqgt|Hy`T;DItf^}stT<=xa+;|}cY@Q7 zM04j%i#?V1Z@OM&vjN9G57}J5*}~%IsS1BfYD{$%#fzwp#5H1?)i~9WVULhaDd-ZE zm3b15J3^|Za_^P&F8CjY&+&^<^ z`w@84HSs$KHiNw=(FJEGDQcZ!uy*$8a<))(!7>{fo|8gnnT;F5AK{O_Bt1N1P1#|< zcrlPH9%hG@(?WaBo8Na1=kdEFZs$X?8-)&Jt~;M>ShbT;4Kz|gb6 znGR%4P}ts}Ml@Hhrd~neEY4QLiFzw68Te3Gpcz^{(>n~+*$$JN0=76!#Jr@;0JUQx<5Pfy7Q`WJpMu5&EPZ1+Xfl6C=qoNX^+1gRf>^BZ}~oF*;((fUb{ zy-53|CBz~xBPWTmnS~}v*1<> z-t$)Dfbt_c7d1NMk<0QUG>?Ywb39Dj-K`$G`<|&zSgy?Mnk&ntw^)Wzswb>O;p(n? z5zIq)Qjl&z>=+X_yivFB;hCyNIH0ESi+xsT=o4|u8)&LZ^hJCb@{RISyvJLgYXte< zZ@4H4KVB{}uku)n!*@U>adsq(ad-XrONFw_+B=D3qRXCMgXf^)f~%>@=%(pfbu?s- zfU1eq=jC9F)|~1olU?vTT+K2nTr{ZNA7aSLqRosj_es4$RU_~@+k8g_9Twnj)hvVhm22{o8~LOJ8`Nx>*~F2T?tlYclqH4`7R^HxsiN+3v$EL@qAGe zsFhpP-x`fDh*1In=(lW@n@eBvz$XZU+qnt*}m) zpvlv19D2E(D3>mXC77SMp&jI8E@N1j@n zcn(ho^d5_Kh(EIFXCCyME+RKt8l-*wSn=E38Nz8KvGpi&ngz?4N}4F2*t6Z|iWXD03G9DZv7|u0A3NjQY@@FJ%$#b2 zoRWTkr=+fK@Q)}*Aa4nBgd*h-iX1FuE2|M?K{x9X;rBEU>`8fZO(~;6!!>v(8rDdT z3{%McVb1zg)2TTUd~Ot`5J@ICfU^L#~)Qq(&JQnIu2(-Y38lwUf_#p)y-UlX()Xhr@b(m7(krZ=B=&kRz6{O}g~CW+aJ?zWUe zqZ<}ROzwkCc<6I$=R%LjPkiVX`edjJZfmapJfLXvMR#397I?889QT#J$>vx;4Guj6 z5+^LQc`80VSx$GDe*m*>9?@{F!6uL~>Ji}qr;)tP#WUQMhv~+&y-U zWjtivT^4?~7g@WWG&I~y^Fb40|1&zx@tM~}rlWF)J-WM_k2MLOyKiKGe$jpBKN!|6sAgN|!Ay!To4Nh4HmJMvBG2KTml8p<-;*Vc#zdLfYEr z257R-vT;+15}M`uH0W0MSM&21?nmaw^By6{kZcD_Y;#SyrfT z1(AfXzr>j&N^2kHnL*eY5KM2AT$iea1LW=AmKov* z1LCi)2d(ZZ@W=-j1yB}0ptazZL{-L%w8HYPrbH_9Ksp%Z%9<#>b+vf`vfK-%38ur8 zlIcrus~|{oy3R9o1g5%=4-Af-bjwu?dF!(AvSRHsl{>Fn-35=GH%fj7CT?x7N+4ds zh`;cIjjos|cXrFPzz*G0Mv@dr2nZ73<6s-1jW~IuCj^o4${Wj@Nm9hmQ5%VdF`_Q3 zgFC@kFVQnzEXINn_RJQaBa{}8W6QivgYXVT+q2+ty@lWc0j@&(0h!cWj#A!KGmhZu z*9FHHvDYwLIF-=PP3*LW3SRdYkRQ@5XfBWVZji^8EEhf62d&Y{&JTdN!j|e3b`RQ1 z7ZXD2GA2D3EW&A{(u)ezkmMUn2w@w&b>) zLx*>&p9!AC2{-wxssSz~^Konr+!2VZM9b8-J0De@9at+y{CssIJ-!q1)NtjGF~3#1 zk-aej9cCsrzO16PP8uyTT&5#HrZUyyx29z9g9UrBI7)USee+7iVSajZJfAIV#<#NP zIhk*Sc@6_t?_twdQA%qJ53Y;)V&U`=?u-jMf(-K9^HL#!yL@&N77zU6QFS7$u_;<$XWRUq@M@i8l1vy@rn+F@M=h^qc}L>6TLj0m+$kUEKt@?zs^ zN1d1Zdjtmp0De%(MJ4{F%sA8chX6&jWu;)PM9NDtqAj01MCDP` zVmTrrFr<0_Mz}#}8)Q{1;alm?^{SM<Qp0Og&%fLoJ4=mD+96{T zW8uK81{tMZ)#m1w%e>ye4jPsFXC9H?MIJZ5d^K>?KeSGz0+qVK8$Ii8ZbEouX#&Gi z_{2w@qO8++3D-ov5rlg|NJv7RG2oV7Dv^X?S>oM@iT)OY&L-$p1dSjKW1DJ;3BByb zi|6%#-XyeqmDu-VO+Nw}UaCpx=h84<`MdbF5L?iZ3c|RQzUOeoLyzllfqXTvxivCc zwY4kWsHK=Vv)E;LY8Xr_XZS?0IHY9f0B50o(geE z)ANQ8fk`*e=vds-S#hwIXU7j!YC$D>K zg;ShPeB+GC$5L}dMPYl~LSVX=4f17e6F4V%lf6L<z4a0Xbk`kifmioF;t&SZ@dD~oh=OV#Div2PbuJ4rr($kerK zf&+D5B9;L0*cD=BqiN}g^^ZvqQvG>($=l7%P2l}}3SG(BiSNG8S*HD9N4HB@l%%p6gMRr2&xS3(R=$? zhMCHRDMnLi+3eErv(m$Jb_D(9F6URP>$UIkKj>gEDsoSyJq^WTC7ehr1{HpW>~Hj~ zh@iM>Ek?t_#%CNyhhLTzI-LOV?r1gs_5&ztm@T`BEIJ*5f$31vii`n#K(}^Bn2%(} zx2wpsZ->)83@K>8mKlvjlxWmrU|3#N6+V0->+OXp=kcE)l}iBR>#~AcR%^2eNFChU z9Gbc=$I)ba zxe9T3A@{c{Pq#kJR*~%(4UW{(?){)&T?@q?GPs=Prid69Lf7Ua-|g3o7z= z8l>BTMc_2{LP!D|caj3LAwq~5W(F9g3pKZ-Z}4hG#emRIC=8t5`fjVn{&z5rAX=q1 zZ28eVXh}@+w8Ys4mR^2yK3#jactsXH=VqNj%#)w7LLkn|h)|WYk-fP+s;MybT3RPc zWawtdrvx$DumHwr%Sbl)i7#I+oGAf#z5t@s&lAVxaP$~BQkoN$TG;7)ht6){yum7; zN)hOIQGw2_gw8SgcApf#-XqH)AxiPJe(?4~RNGuS5d1SJVgsscp~Hupc) zR?t>0b2jx5X9{uS#X43#4FmEd){&=Z@zUONTS`MCfKub8@f~i5MF#BtFeb&tnqq1oN0u6)B(K3{gS>JwC%sGWX$tM)v zBS^WKygqRpgJV!4kj3UXJ@S}ToA~o5pN9VQZimfj$m!v{EUcP#cQ{))+h9O<`{7Pj z%CQXe8ypq(U(PupB-89t^)`OJuqL6!389WqorMxGI2HcQ^%HD8ke3^+xZ10R7w@zp z6j$b4tx5;Wbo}vw5~E^ybRq2ElFWh17XviM3V77Y(AN!(_l+99j>RXRDl_Z=A&!R5 zL6dKTsi%A@B}^z5>6Wdca}4=azzK!#0s>=mh11B(c-ue>@TJLh>-w#0**W2Aly__b;@I4M77Zm!?zV$^VB|ghScZTR8w}k5#_8( z7W|wvX}pxqCnUI8qLNdZo;)k4f7nll4n6@lt3;lkNFS;|ZTEcL=2{zyO zRkoAr%C`YVT_*I@-9)ois>|3+N+s`J|La;Rso<;-!iDv1*593eAo#bT{qS`Kl|N`p zDzMie_Ce$F_ICLDLi=IzSy_hwH?VnXGtEZO91Wiu=YLG?Q?U1kz0Gn9MJQ;#%Ea|4 zLa?n`y&o#`)DU;0Rv<`>MKTWgbhE@;d$kX9R8cKjOpq{G-)w>-Hy?tX{g8dQ+gpyu-j12Q{qdmw%ulQFP0H-R6CrR5}ODh>+a3! zG;({&`3?Q1@BIH5RXcP>b&}1Npd&ti!O)10k1p6HQ_FG0d*joUflaBT7N11-k>!`= zx%xhUg7$I_ai(c;G5ub`Odk{mI7Ptbd$S9_Cy$yU*#orphsp-YmRih5TT42d-4~_} z{!ck|)*p0u(!*($S()1VH|RshBXSS0fjhw!tC#*=*MF*6F!+4U#d$}p9UKF? z+}LZkaY0CE!Xml#TlOgrbaJYc{T+n-i3d_nVz|ENAIAr+?v0N<%0(}67O|d$@4dL- zeN%Ms^jYe*ol~X2!cQqNw8&|eMUmQ&EQ%BQid&w)qn+Y4Ywc4GV!)}DTo!=z#7i8h zPDY6z?8IA7iFe2-_EAQjZoa_qQcS*Y`tY$1L!Wz4`y6v~gRwD7=XY5BoB$ z)%t#E7Ao+i_pWJxNNmy#Y$ArQ7k z35Lr7hxtDB)wyR)j!hR3OWq6NJM}C+tK5bXYrreenr)X`jm=kQ+z6FT$A2Oh^iMcm z%K=ryA|(RIWZ%9F_-uL(O^|LV>uJXCi8|PEw$*LUM^3p-kxn(2U$yYWG861=WFpy( z9)Gi3-aIP20De$nF5$m}w_2MI^p}Mt_>fnAc@K_qQ}i`})XZ~UXH5;cVR(InHcyqO z?1)b50@J;DDqy&x6-?=HC(}#Vr8AlR$x~tkwnk6@N}`2dq>7u_O(iHxYVL>7`wg=h zt(9|?mJs>(E;Bnhi`{_Rngq_$=KUqbIE*gSep&xA^c)^vpc|t_(WA? z9}JUT)Ag6}5B}Vp%@>u|3g8K^`L>|#AESpSQ645xAPHT|0q+o{CrfnDI6GVaa71I& zG6bwJ14lj2KH&{=J{?j#Ik z45;zfifLU1i{R=gi(tNJwHLiF*Fo|U4A?-+KZLdXjU!$^w&DFk<#`xC}o8sSzpQ@K2}Q{v3>GOC415;ZMust1-V+u-^VIJr6BwG+B za~r0ygl7IK=tmc*!ttR!{`UY!x^1j~7%RNN_OGcjOtgfKghNXrWkxuz>SmAMS-<7DbXYQj^S^qSPf_SeV8hZHh!?0kXse5 z{p@LQ=R)O~1sl+nIyP>+EhR5FZScNJe;qQT3|V7co0Z|_=Hh>LH!s5k(cG%Y*BF5L&+%;SDdFH}Xj}_~jn>5h0 z+xh{R#bVUaP7ghw^H*9d>Vc=^sn3AWUrN%rYNrMR?r;}{{PREXMY4xdEQ+og|Qqe{++|)rkKVkbV-=VzR{{aY9p3!z*j)x$RzAY#dS@B;R z`GAq=6|$!98@DJ&Hc0pjTfsK%DTm4uN)DqRaHmiuf9lrLUx%ku07Utfo3i<+mK5-Y zL_xbaLH|3a=l$nTU5)bFl6*=N%r z{XDp_E=AfG%ymp78-$r zSM%~zZ6qp>=Lhpd;YL#dlu%tVQpYlYvR1kccgpPsBKJbhu2LaqRfUk2`|MM#JxGOw zeBWx4@^}i!JsRrV)V+N=g*3FjxaTezJ8`fs-T|pr@y&1ZPESC$Ua+eU%l_ZPbgP_tQ z3nvz)%b+ZVuj$u@Q~z+*`)G;!;!6hMIG?ZiKMP>lN`a(4=salbhXn zaY3go?nfVI295)yOSc=({ia=3l(cW*jzUN=+xkIVh4doFu5i}JWcFSz-s9=E{jq%S z6l-gFR~Es_b_(#u<~6@ww{=@JhlAgvc6YT3l##FH_8(l0Dpc-kXd=vhKCTmuGV*4ky*m?2)Z* zgu@Tv0X0y8WWccG^Tu_XU{;GItu{O>=f&~ok(jaNUIwV19mLMiM&%;-v=^i5fy}Kt zJ&3&sX8-Rl45|Z|xhofiycJR#qo_-dmwYNYB@bd4fuYs_Pv(XQMyj z1V4U>PS6{?I>Uhao(AI!8eq-K!YF>6h+&kNR3`&bo_2njGqeL<rZZ9 zToM(1(#^REYz;Kjs&e3L<8R^kVWJ_YMzW*;AwUMbOLps;HK#q`YD+2;%&WvakN9;@`+C2UnVbMO6*+IL`>A)$`W$ zevCfeEjLS_nRYD8)n7pLKrvAT=^u+?1J4%!2VI!|?G|O6f6ZcrwRT&EIMHruZ>EER z7YSnLDG^ln6#M)E_R)ZD5ig2uV)IxfXsC;n$-lX~I`ZuRBKgAEV*8-Fi*rXb>(E6Z zTj^eBrF1@Zb;wpwC<76=Dc3rdZRCppjN~R9>4cCGeKpU8g;<%?G)?q0*e3|8Oh|P; zOS9{DtiWfv$F{UXWvh4sC`+{W z&-_fh*Kv?JRe=cBTzDiMF}a$63yd;I^q%08%T44&lN5U)R_~7H+iheZhWi;xKyH&p zued}_BeT2S^9)X4uW=MQT~%-S>MR@jtH!S>@B}Gq4?PF18+ZG%2w3ZUQ5iAdsypF4 zRYpXsn%)XUCL^atNF8p}rK-)r{VY%*RiZj#nArUwny48LkRWi8QeP)P$fEU%wvS*J z+krHTTD{qxJmp}HHvu6VvXtp*h}l?eIUCMnDHqNiC$oz$@+Znl z_u5$D31qB<)T_D$70U$OC=|+adEi2~=$Bw=!Tsy>PuISMoD1AYK(b$)XL7)U!xQ8!|6)xEbu? zyEl_J`xfd+j}SLf;Qd4(`v791DyUg#s9RcEF1bulz)YFelV8qC{7r+s6`H@}+Shu60xVDr+}zy_hfOCYCH#8_ z6p(6*m?*HBz=?O~k0boA=4P_!XGFmU>!_em(t#2?ZUqI`rHRoUtrNsnN>+d@=VVH7 z2dBklD*;$nZB@pfkQ$ff_g8AdM9kF)7DsgK=A*0!VRZn3>|;@pB^?>xMTWfk50d%E z+|<{pt|n{7?yG$j=SO6M zlY_JjC^+!1c`_k)aQeK%GX_?as#GI;J}knSze{77pV7c@I-E)_Mru^&;!5Lk`RH(r z0N3n&@cphM4$GFnhlIpHWSD-ox;Jko>ay@jfCHe^vn_4y7{Vn&v!3)jGm>_~GTJbW z&~V!wuwV0g3D${O`xfl7V;!=t)#dm2$Jg2~_hb|rw>>*s0sT0GCkzeTv0BkuU|_Z9 z94eCu5vx%JyL2*tEl&PikO~e1&IbXn^|TN{dvR9;EnDm`5Pjiuw+=jN<-&n;P`3u; z4J5eH3E}@X#lYY9eM)}ufu>ik$5Li`HAmib`-ievei~c~XbEs@nKjlHC5j=r_;zoa z+3AVE5ksZGq|X{3w9cc0RRwsf5l#c;Jg86(>Ust;jpHhw6`erC1sthVtE8J#2WAH{ zfl}**+(K2MQy~G|5h8Md{SanN?;raH;R$9`Zj`5MbG2IOCyQKu#KMEo1jv-mQeDC# z!DOv3zJl6LR|LP4;ZZOV$%h@X2JJigmm73iuCAcuAA^NL%eNBJro&<-*l0X^p)gWI zA=KZCX1d91YNUOpWA*2<*3molTQ9n(PK4{&xhF>iN|oquuTIX1_BD6mi|oXb#Xr_w zbj0}GqUQi){te2LaDWuI{*$47&E1!Ewvi~*XZMezpj-;rh)dtSZeUH{atrMuw8=1^ z%D@i99kmwOfjf&QrC|}EN-joY>h(wq8pATa3>AAn+tXm0@-at6TIm^yg%4c zA+d)l2nkRr3~zQNLo7u~OGrE5E!D0P;8iJCny7+Wb#OSvtOljOJVMa_;v&v-2_T&p zWvN5p8{T7i{l7&mASEYfeDyr6=oOE$AlKa6Y%LpJNFTNepdz#YkeB9E02v7?hx%HL zwYi$M#jp5j$3nrLO!gLoBICK~%J+wQoIL4s(>=0TIomUimf4hKX0>!TYunwrNihE} zt^i0OegUZbs8!07+ca*8Jg5#1xNs~`{kZDYEt}cmL2QG#cRC|pA5%!o=V%ZE#FB0# z(eV(tKk%uMf!q+7Nn7?-BW6<99dPANGYy}U_UZxU5-0?+j5I*m-^5=;sjeDeL*@@R z*LGCcq^k&R*5ToW(I_L%1^(|Y!7mDw579Y~!wyMm3TKzYb?z&_bCEYRnEU!4&B)Xt z9fH1YwM}Dov!cbH+7^Er!#f&=U@MaJUyM6_Q-^#zD?Zy16ZN%pO{=g-i=6wcLEO<0 zs}lAlR~0}H7d1A>Wz zW`>-7hw*eSh=!iHO_M=73~I7itr31MzW_E3ofmGm72~7`zDHPK>V6oWWmcTKb+yNZ z3GLRVcnn=jUvDvs8;$t}mSkY_H;ZtM$}rNEB#TCa6v!Aw(#4hW;vzz6(yt$R7faBr zx3}x+B912(F%4g>QuSj&`Z+F(w{Htsm!8iLu#a@6KlBh|!9RH@0FU#h(Kte=v3WIC zl4WY6hsLolV#Pj0&BG|-PMY8#M1(Z|X<~!21;+1Chd~T9J>7&r%mOnO`tGhB0O^dz zJTXs_+_5)SI+4O4gb_$a4=Pj<2a;}=Tfy+c@AZwM%cgg$)oM zrQDW_Q1=X3t2M;0A5X*hsG(Sf?*SERsF6g1F;Yky4{#vGXAGgo_krqzZC>)Zj`|1N zbQXT@SkZqkkW!}F3t{;%@S(meYnO(QG}Hn|OWzI*mIw%BwaYfr%~V(O4b+H*%#fGP zb|)lA2f5yBNRy%9r4p4{C0niQTfG^luebiqx|*?+Rw=x4fL9JJhmBl@z7B-mThe?+ z{V*d-Z-!ZieZ<9+yZ1xT0R;CUhaHfEz+bl^?MKbFA(aoFhHg&}dtr}`I0Cn~2P8?9 zI2zrq%)R`LblzagulCd05ZS8t6Tb+F6kwA8Z7G;(zQKV;IReKY4rAF|3!`r{w=$7v z33^ErjKYzj7st`iOg<|c^G0NV2iNF7P%xP zl34uStGPd;$LVWD@<_2OamqYSnhTSO{bU=Q3qCWil7e0y_GswV<|+|%ZBUS}hlmNtJB&SHVY2o8+%dB7LdKUyr- z^GOD4=7Y?2mHjt*i?&le9&~B|+;g^&+ENEuEyQUG@Pu7!*4M48sUuFywam6iBswOT zpaqmj0(#Eq999&r8AC_%sz`2xgOea-r|V%TmH?It3^n?Eq>LNGH_B01;$h!)!#K_< z0Y$1c{fLEe`09wlfyB=WAvC62(MdnW#n}1`1JtmkJG~}$?ZoTe_VZb8j0B+d(j~ff zq8jLgs)fdB>yp{UN$%$= zI#a>mM4u#XbSjI<+%lrD1>A9ra;@~c`gDQxFU^Sw8ix4|wkI2XoDcOxd=gN>U=HVnN0Za|2Ch9v^Y_U z!$N^ck9F#GnR;$>P4Cf+k1c0Ck^Bj>Unzgs&QZ$d(TmR*L zErd{z)RYms%GNKlsQWYnkm@z0m(xDg?PFI@1Ke{`Jf9cdV4oevT>PoTt{0>6_Ps5Z88Qh=uznm zEkLKZIPxd$uBLGT8rmDO1LTDZnqeBxWKG|oPB7$TWt z=Fvl)N90YQ>-HjXY+A4Od-8&M=l(5#=w~)OZ@pS0m|lpn5XIqjVxU)XGn-JX^n8@JQie4-o@BuH_8#6of{Y6GC_afr=BGiU80YIiW_~qP;Yc!8n(3O zAN=1{qJl!|AZPT@lo8XUMf;rj+19w|q>UZ+?c4427oL17_r>_L;Rf=W7W0dryuQEY z|3b6oE!zDR;Xmd(ajKy7oT>3U@q2^Zr>=nWJ2Kt2uN$wM_ErN(f(Q%^w9MVN4h|y? zCLBf!jRp$wAyX`~94*HZmd>YLL#W^}FNd*4iCaiV7L2g9LN}qMl}omM!~lJ409M#y zF^@p!kBHuPnAuZuAg|-=^^)2bdp);tT!n2&Ov!r9!!jt1P(tZ3fiOKg|E!+{;0A$FhU|VgEW* zjzP?<5gY3btEs{Q_$~XPl>+OA8n=fzqN;qL8|fRsuGCjX2@>dF*mr6Ty@ygIm#Q74 zgmMs=h3v&NB&12-?f4rWZ>oO2xL@p)VqkB0=9Uv=rV{Ob9bA|Y&{x*U3`dqBh(GIV zdCiq=S2v&szw&J-s2y-QJ8o23tYc@mb7nVfZG|L0sL*C)&SPIZC96vU!=bz^tvZtE z9T+EMt{oQ5(qd;Q>hX!aqK=6pM&{KA{0U?h6noJjgz5Q{7>0UZ_DvwhN}mx5zGUUi z+qb*XOyc|%@e6EUi{F+q0b6rV^Z7$<0;ut#5Odm>|czgIb@c7NxE9SngG z*4Wr;z0jV`e;tp9ndZysw9Z>KjoybKEm{~CqiZrc@O@SZ)$HFn^sLto$=ysaecaXF zr54+Lwdj-OZnTv05%a{Kqw-H1u^v{u=Y^U%Xk8#c4S+`opmo(3D6$aDfLDULGy3Mp zDL~KJ88!t1X)E~>6suel$=V$vof)q~Lo&DgsP;hT*i83}uVH5+6vd>PZo3XyCB08? zv3T=6^rh<-_3&6XXYwKOeU*9uFu(UT@VgZHZDJV~7;SUi3oMQE%BT;Rd_&J5;WHMA z)e%c(152=Y3QkT!$o(BI86HS0s;GxfVG#ua&~T1Ll9T!^ax7YJKMFNML;p#d5_J*e zLp=#eZKU$6Xy_>O;XKT_WAQlKT^p1?5N@HpGz)6St zR`Xd&-Eo+VQ1_#j7+8!OrP6Y!A6Xc#;>GwMkVx+!JfU8KQ&Zg>79#XLPWHP8!;4dH zP)Js|26{U0up}^XGa66l_CmSSPq+7M*{hb-eX8jU1>~_pFFOf~XrG@gU8$_kxNJ9L z?sh;xU{yh44}(O0gtBP5j@9~|+g)KtU{DvgjR={4l>~U9w@L|7f9Tzl$?dpkwMJBs zG7(E#uE^?eP(%x1j%d|k6fcQw3a@so^r+)DwH1QNG`Kr7ozuyeE1<;@HeTIdP1N>) zdOsQ!ozKVZ?Q6RRG4Y)FdE-XOMP?`Xg`WjbquWDeomu$=mU}uMx$+$c41Yl28^%fx;5i&)wwhTv*EgoJk9ioTrW48 zxhiTS(bo!l;lKJ2pfI5Y{IQ1~_!^vy@X+!J6V3Lpy4KF{wSxhWEiR%`&uYLt z=`iAz*p>*LUaPznh!-$tH$Gl9F*Hgm*yJgp8?>RhGvFSj3KMmZBELt>aF|6%Nep8= zLSnlw6F!)bf>f%|LUi39 z!PY9TAO1`*He+-=pO=emJzHedXoU3`jAnrv$lfMAwv#-$rU4l&#lS{n8vf}QhxLAx zn9Qymd^W!bYXlm+Q6so4jIo9n<(Q$Dr83Rp;<95wY9mUycC!|AL?B{wU?5POyk^~u zpXrw$F#I;3el95(zjxx{YRwgTisR5;a?*solp36LbSH$%k^hC!b^D5#c3#)Y)cCZQ zh)$js;Hx5|YW+!dq}mL;qW8B-H!m{J*7G=tbm5#wV- zvp0rAfgk=@p9mBjnd$M|$ke1Oc=Yvo3z)23qEBq@+|RaLGx~b*!zIDlE_?tX-6rRK zHgm@Fsd02sH#amv5+Y@V{-;(m9A+gdLg9*&WO%i55^&wXZM%=w5S>xXca&+ePfH#W zv-MFbYOI%EW{6|r2`G9^tk3k!8uk7D?u)=0GN)I~0OSA5DRIP@FJb_;*n#NDT_jeN zGK|tF9Ab1XpX`r;=!-6qTY@#XmKoLbW;%ZI76d<)p!@?rh<*i#K&Wjgp8-%5NOc|M zv|p_Gse3p}(AK~_{z(NrnCIONvn-h$ekE>2p{*7pBJ4+J zCvkW2K`vf6Du#$U@LV+14F_)DrN3cYKdork{| zm%t)UTK6-)C??ip?)S!EEpGm9r28LN`(qp<5-nFd&`=Z2knYPKrjy^yB1i5LP&Hze zdAb$1heP`OukGCT)1e=VT`&iL*xTF=O6vGyuZa=!qgwLGLn#c)%$jM}5>&NRe+63- z_~Ki^_5SCZt6Ckh>9(;gz9v?y5w|hdO+S_T+iXJp_n+B`7(+x1DTGgEVOc|${IIGk zV}d8`#Q->uX3R(L;)P7!H5i(G4rP21ub+KNAAu4|vC@49=DGvvA3L?GNzvAHrvp9< z&hQ98%^sH$`EQLYoXTym6T`l(EVJLQ5&n@?Q7f*zW!fovIx_`DlF(Quk;>r5jMvMw zNe;3nK}wko(lT&+_#7pohPlOs)I;DX9s0cfza?(liBxXYIbS|iA{o0LOgp{{R1((o z?a6W9n2hxvdE6}P0lvRhC3ly$9@SXgq1^{p#ZC+a@yi$?s~utbZZj3%OBI3q^66E% zAyLLB2)Q~DlMk?3Gt|CXMF$-+7f)&QV3UYt!E^{N*hX$suwVw8FuOkaZ*``88`kUS z-1NMPyWFi}_#%zqU>bD7_8-$uFxHx7v@=WgOKKXz))dw$qbNX?KzmK@1Bm8) zSOtzWM&N^!|HvcYB9dg*ZHBkY6)piz+|^_uz@#!l-MSUZTV+1G$?`e*oZ%~Hov_l= z0j8uiBAbAJ%n)`|8+ptnrlEhp2@ICiHOQh`>}QmhmBKMD|15K3w9JLg9DhvrvZ$#` zIlKvV7;qyZmGzoG;rcdj4^)KRED&x2A?+7kkIDvaaFlUsoA9yo7oP3o2Yzw@6`B#S zmPCljgZQ~@&mbm%W9GL{j{!#B0JuZnvas*gyj-4W<6uz>;m9ZBjQe4_BPQN|>0)wv zV7!|~ikl|_&E6;X}1^miFuyEPI{PGsoa~huPs_0qya}O$U&Q?F* zhkMshkT6i_Vzx)hu}JtCf|gY#>(FGU`~&+s3Z~6hEtRaaG<``e5!%w>Z`d;NrdgWk zt@}Mq%|zCz9Qj%QFtfu?pYGRAlFZV%b|Av;NM6T%^Y$rM)5y2UExbv7sPPltGL&?G zQurYg4y+Z1LT@Bgo)b_^IhV|GbFk$Ur$FD&78Aow8C}I#$D&unIe6cEx+CUSKeMsy zy&T%`q5NJN4i(hU(vL&fs`i$`sHYJ~21XGHf{^T?*M+~D(9FRE6k&F&qs`ZjxKqZI zhE=*#5#och9YWEH-{mC+0?XyhXVZTI*jN>0EiXHdT;xJ($e18A0?I8Ql{dEhQV#jM zH~o*pih}}RUlJ-4pw-%I5*O%fhM}gwmq!1_kKnvsUNGDtQ#nOsA1r zrr}r!bl>u15|fR&W6ZSY+dxX**BICwzm}8+2557&qzHc%6eQqJNhZZS>TOBVAp2u! zVKvc=28&6U(Cbo>`|b=`dr?>&!;f!zYwGYd`t(95J^(NFiTDjHaxHnBzA@9Oz}piP zJaoQSce+_;Yi|cXPi!ea(sqKNe?#w{?nZCnvEr*E8M(E45I_gcAEmBv@`f%n0(^?Y z7Wd>?$R!7+BGA4>Xm=yFx?!;gU>$u3qIF1kkxTDAQ~S2546W{RBq`wZ65-ln+hgl! z`ibvM1P3>KoJ9o2FPl#Ua9cD`uKuNIYB!2I1OzI3rZMFWRM}*$Aia%jk2N^uuLbMd z)7CaX7~8p z-^#$y=#A|ql3O2^nZfb42v##w)qtl*#a;ODj}@>|REg&H*-T$C^bh(0+;H~k*GtY6 z0sN}C5pyZroQ_Vx3Sxqd5%8oMRzVag@C8dUXOSdIv4c_DBM1mCam)^zU*({tUoG{x zvZ>}x3e6F-g{bNYnwZ_6rzln@okq)7wIY$Q!gtmUENE@-ThjZruo;;G-?r63Z z*1taOZ`Rq6Ck`bacvOn*PWv05cMQOB@T04w zE87yRBGE0lHxQ`f^z8rH2gPCxrWCgZ-NN~3j2(ZduZS$??srHV;K?`bBPHq`8b0l- z&Dx>_liS+z*_b&chw;yAFU)FM!8wc*Vz5}UFhK4iSZ*1i(LV^F_P-aQ^%yN2d=dXp zL0Dwuv+mJ=?W}uY)rh>wi1^@=;eEnxigIcz4Y|m+X+aFe@qbj;6G4;ZMg}#-=t#3X zV2!O&yaE##+&`r7u~Kr!l<@I<$oNhgpvPY$pKw3PYzId!v&g!m!M*M4 zVCpV12kVy;D9C1RFf4w8DgdTD-Ne>k$LhBXRDm{U8bE=k{uQPLVBkAO-uLOq9Po&& zyw*fGLtQH;6cc*ez*EmA#J~8a(#4Ny;1J_16^Gd-7|#bYpAji=Vo;`!HbU8;BOJC^ zd4*KxPAn)E>ENcen2@25hj4c|dU`K9R$yGQ{vYZdj*PXH9qUJ+m)Q0kUzb0#jqJv! z(TUxmzotq5GWQ~NpC6~WUC3d1j(G(UZpvCG-6G#MxO~?oS-6Yp>7PN|7O3bW3V;8c z{uei6*8vR#02gfelV7~A>#0OzG>j{!`uDJm7=a|L)Qp}VSJpOo-_+;IxT6}k5@56m zYtMiVGev}Fj~<>*)`w+b%|}PXT4Dx9?y7o1Z{@0*4H68^ga1e6;d_ z=k={VrD%2bVhQFMFh2MnF9rgTV4;8j*rWT(4QZ!(BU+}nqH)X_nAhdqqUCC0bF=`i z5gx-5m8w<$cYjQss)gbdKyeKUad7z7Np<6^jwt1+L3wQ7{_y&oyTO2ZsWk1p#O0ia}O5jnc@H7j(vMzK)Y_5UVP4cj!_RRja6w zJI4%VE3u_=wInJ>Ora$=qeCi-klbgE#xNsdVwm|opYPG-l^< z9?!>9;n#?=x~7TmHiB7A+dZIV%k}rIYqE<0kg~!IG-4wsX&Q5iPy(b4S~7gYYg$fP zJ~nOpqi`%JN2l6+8M%jx9@(fd{D!B9wMAw+GM*0A^DJzdHWJ59r5gM;owox-GEY7Z z$Q<4V^>54PSVPmRL7jK=*WMqoE)!BPc4$#u<^PT>lUlM1}L((#Ss<_|K;Qh&CGEb znW80tY9Gu0fi$^K@lJJVTg;YjxE3oG{TH8F*t>M@oqHT}r2b!7IH2+WV9l#f2QUB1 z7~Ewu@=n+fE{W8fN{F?3wk!e(3w(6ZytApc%5yXpJ0q@QA7>A_e`TA_&X$vhj8>{B-z~v)M=KIS36d`Bg2;h#6heiv-VwGDwEzNrQjy!4|n#K z=5zV&MgWwgC9~>3$~CGAYIcGj5xzH1N0^pDMY{6uRaOye2(mS4(?wLqE&$6}Z+h zie+D_!uMMmpqjdjdxx5E^%jrXaJh#N6SufAdc8YL7HU)wQK5-u?LGzp+ORo`_-`th zS))VILZ^?Yk)#V;l>aSIGgWr(+*KrD<6G_u<3rMW*cQwYL~y+oxs1s z0?lrDREtrR+{QU2Jg$+0cXe3Ciug-kE=2DsFmsvfy_u5>LOu-X(q`P>Zv?8W&`^}4 zg@y_gejTy*I7{FoNnTx&rFKbyf7?Cu>ylAO1RV+W)wG^AW%{f0EB|qKuj{J|q8$s@Kcm{*kO{IrRQh-j z4C6q>WA0@=AN53%l6Wk60y2)6cxWi%tsE=RyHy)o80?JMTN z)6`oYjb*wSa?&=*^!zce@CU!Up7xwhn_Th)CWj&i4;NVUsLemXub9X%*9fqjXWEs@Vq?N17;3+vq9n zJ!!}Lgv&_$^1B_IMLOfEyBen15ik!)?qedbz=v6;iJ4!jPJI|2U|!6a>w`QZby>(2 zmV$Gq&aP;9;>ToXMM%vOfFpqJ8iG(^PKlCDIjBtF{+sohq=j&lmcc}s?dk(X@8io% zlEcfyQm3SleK^d2am)NpiE*YvbG=c+&~&H!}q{Wu7Z_4l!X2#6qEXz=i# zPDywu9k*Fz_5me*O^Lzd*zkDsprASGylA?*y0oz8!`C#ADEE`O%j&dfZK0PV_6gVV zO=Yp?ymhHgHQkmPlr-9jQt4j^xOy z6;DUUel)9ZB2OP(`&(N!rrd;29k%gI`$dJlY^cNsQ9c6h{HqL)KglB7EDtO^ng27f z+O2Ws>eK{1X7G}7wbBDQK|xhw_XS$~#~%+!!ec8B$rxIBrP#v+kYJI2Van+;#w*84|`GUwA z4aRb6O9T0Ka&mhf^2aDp+CEs|yd-5_o7m5guk4-LqSA})v!Df*H@>1MXbgfK!{(F0 z)l@@|Kd8J8E)qn}IiMqr^;tz&iNb?*myFOnkfzb8eAL*^jh#5Tk879cFSceRJ|!z6 zw~fcEWOU`!zCO9{@>|A~4H@}pNdnv?PSFn*9U}X%0o33XoJ@lPpi27&__^EZDThb$ zN9bS|nL@}?v&7Kjpcvc4;BgFJ^sUyak{r$)C{2 zKFBDp1r2Zx?UDYnqz4(i@xHuzM)X%tVnclMif{996W#Z*f?#@@V{u~yQ_h3c+gCUK zlTGDTnBN@$hy$Det3SZec=Jz8gF(ATJYsGL}JkXkj^T zM^XQynhCwwhkKjSLu0M2(q8n+x$KONHoZO=U)L5-XMavR+|G##yEW*zcC770Ur->s zt1a>=O|&xp0F5jb$#g_^;4ql40Ba_z$_M1IW?xrILpkil3$f4tFck&|rFy$--}wP^ zGQ30s@9-@RymAOR&VR@q49?iEr;%iACPQ$qIoJ88k^Ch3<8py^BMoRN}M5AxV>X<%Zc;enOPth@HmZ+&Bsg- z97aj1!X-eLTpVkYvww7t_Tu-4>xLMNh9Ev&E`xlxoe@Zpk#vV{NX0{)8qVE^6b(i@ zWx%NpcfNsvIVF1SDtQYhAq{bnE`BWJnxE-`i69s}3YBxg_1wjYANy&FBfq)&teW@c zSLwDi?^i`q_l{R;t^@q+K-Nj=PxDCHk5$gQ6TVu7*HuQxh^B1Zt|WWHN&hwBU&){B zdKb}ToEm8*P|M;V485sX0`3m>O$iVs9D5`kSLjnTWO1=*=Z3Tc%^~WAax%&5?5ZbM z%n!9WfuYF(>G;2Er8YEm5avxo(i=7kbO93U#oNnRC!utRZ1#HK&d z=(t2#{DaB-$$1x!GcIxPkSLaIvhW)k%77Ft7+|*B4Mi`|s=JKqZWA^GOwI=JV~@jO zA4WXl2Oq>6>dqI)s>CAGuMit@oxo6Ba2FE$Jz4PI5I$V?4Spg>415gsY9QPIxJ)6{ zWu}2>h24F4^D>1)ozehg+|+t`)l1VZv6TB}i+|AjJlMIx;4pPD2)22G)_kz(0YfJE z!)5j5(?yFi$w`ZD?o~XLSvB2Li4@qY#tNtjeRgY}M!#FI@_);i81@!^3x^}P6XOB; z!EF^9T?>v~QXXL$-y#6-le`3`3zC?0qQNj+tn2LooXn20W$?Qpvxu>{jab!A9;U5a z4@61QTOsh#>K_|CAhSl=@XK!2BJEs`+Iv(@X)hLCcCifr z@n~DOVK2?o2o8brd&`i5smO(u>E~UE>Zs)kWZTyY6wg58)BC#NT1 z$rn|#V{10D-Z5=S!5Qgo{>NLd5@+g=8_+=1*|RJuQ=CM-(JuP(!Rwy{ucMf76d@#I zoK63r&XAO61P91Z+Z6XwRc-IV0^F-YO>cX1HskSAg(1s3C1-)YS)ae9?YlouScFd} zq-(^(wCu2TgZn+u^y~!{BpkXRI@wTtYTWYS=sZ$rf#U5$5#C@6(e04T&PqR@$$HnY zSu(x>axLnd54J{8ZF1XR$N#$gg@PtW^;)+J}kZd=Yt+62Phy34y8iy{( z5Wp1eUxFG^du3+38R}6MaOB~ZpoV{}<{=6Or=u#76 zRzab7*21-E=63?;Qi=G-O>UhC=9%g_H>pei)JWLK57%7Y9b5^k?UW?>vRCm5&!Y7R zPa$TU85_oc1}J|@T#dADjKJk(EEkkcZMRw1v?rOxCXQVMy*I=Jx4~~s{L=QTWss&^ zq0Hs*sNyi;{e+e&b>KA$ciwyj6710Cgx(&aEz3h^9YMw>-NUwMlH$1!kQqu|m#wNZ z7B#_svpHY=o<|w4Q`Z!J@yLlz+3Ht`sF>&JZ-o3~!R5Eywc@%Z#)@C(L+~0J)2y;T z^XO8)hZP1A?q53rUrgLo%7(t4Hwpb_$tP0GzgfjT=YraS(n_V%%|10BISk{Lp@heQ zU7@s{I|)0o{_der8i!NZ*~EU=Ee9)jzu_xy99}6o8IQ{{$yk6ai$%T_xIzn5_&M-R zaGQqxD*O(G<(<#C74Vwc{V9Z` z&>S6Y{zcJFb_k@(G8=a&Qe?60D#-GRc$Vq8?6z0e1=zuYe!?Lfeth0HepmZz_riXU z(NAD?p-|&4%`lQg8kSpFe!weN32ZWXFO!w9OIcg6bk^7O3L7=xCmL5JM2Ji4e%*X*V! z%zq#9QQG$8;m7{F>+w#8BN4_s9pBrh>;X4Ij9ziaj1va55b4I6t2V`5gtQ8t1K@x- zIH{fcILwHuGA6(9KtD(n3IF2vSBuI;k%mWyY=Y23slCx{lqe z7l}mQwMA&x9KFoCdtAb9uP~`P+;ZLM*4X#gW1F#F`nW7{uK8H}vFb^YTl%Eknez}` zN&5|Un!9WLcR~C>-si+u-9&`-+{W1>t!vy!!N-Rti+~6;e}3XA*vj^703s5>{osyB zu^U60Z+Mhq+cjc0fug}YhfReEO$xC#h5HsmyLY{=8-YNLDM-OkfpcVk%ALu?Arw`D zx|Xon$9rz}1=6gHYReidn>xu-8Kx0#wr6r!vx00fNvwXV0C5+qJoKKDbJbHOl1Mu^ zzJdb*;Gmhm3CLD3hdDQ{EhbV}d&6CB! z6-=s|KV_s{j#Ta=aMntEL4|S)DkxYpd)&(2BwV7P==o|_bcARZp&BKb9|mt4rcM|K z$@o`|fyf31a}+?>>f=pXO_KH%B!y}HZ|Yyu%V27Y_bb7>41G-lM9b_^pdEv)ENxDo z<#uSl9$^1Pi4MCCK{g*bWWoyVO@AiaY{+s{6!cHt8o@_16-j>8gL?OnJ^ViK%g3&@ z+6YJgmFgKUnOsyNIy5v34DDlnhegp=mee9}3;*zJsq$0MA!pk#CLB6ke@Sh+TwKJ% zV_=0u;3NwQ~P$6COu#HZM3{&7*^qE1lUbe!_)~bn@^3&{&n7or_HKj8L|K!!@NT=mz@LQSb)ISVU8vW7 zC@W|`?0oFTfzbu6vS{zT?31b$=E;wk0dAp5(G7Z7Ps@8+czJPF8J&fmpF&N>w)19ovaa8O*~hwxgP(*zV3< z<9X3(rvUscE=u&bgxCqO1{7ZP)0^IskWUpxbiqzB)glG@Gmu|5bmb*J*V z&c3i93Y$O5Z4Yu7P6ef8-cNSGfoDQ<614TO+pXS=m~~|(vp=5chc`M9=UW;L^Jg;b z1rj}&U}zDpcmbkolt>E*+cA>ZK%UWng#YbTuAgJ^Dbw{T8(0fTi z81Oo|U*qFOzUO%OGwmMySLQ}>XA)Vpz&PC-sPU2oT<>T19d0iIo#1E9cGoBNu-5e5 z5^-M%&==q-$J8~!c)Kb=k`#PZ1$C=(54!kyM__m?F(n96V*Usat*2odeNo`+vE3k9 ztA_n=@FDHZJ6lon=U={b%|?WVLa)mDg;7mRH8-s3*6go%k?wSXTpF~k&QRCBt)5fo z^~I0qV)9=EQU*z}&k&<+{1b>ys+_%O$zD-ERZIBWOiTX}m|-se06ZG(^#qMhoLjbOCQz=S2);NO z8wXE+lXAAfs{`K&(A4^m!*s6IfY5+fEnXCq)u?ZjoJh>)@Y<1wvggYicbYkmURf9e zvYs<&0`S29@}BP$=6%Kb*OQcv{9yQ_T_5@<%!mXF5P7KywMROb7 z1>wQ`Tm7D`{QsX7&0tEGv+SuB=PU{bc9~#KGrX?iZNSjUg*%;}FMxs9I|Q83ZWMK) zNkeldH?l~IKabRBnLp8Eo-5p!dvwzcv~ifDW`mi@LditZu-dFOAMBY$`sO$7XL=K5 z3zu9#$c&3U@2`NK{r%kM3~vXUl%|QAZ9mRL6p|B3!~bJ3sZJOBq7 zQ#}YcK7Pvt{nE!eA5KpMMO>H$JFXxuJ(H&a0oEO*l25ffmmzKSLI)p5GYr+@`~!sV zo~5}zlCtTj((j}^4l|EBj-v{5AZy~TMTJm4lr0i(u5@G{dlHBF@?0-V_elicdSTMF z9dalJ==TAnA9Yo;9)qc8^GyekXFpC8$M6^jcBNLhyl8EI4fG}HI+vSt6xIsk5wMEH zC?n*f?H#asvl3&OXWV#qw8XiB6Fdwh2H0A!KLNVWxBar~fM&SC8=)H_3C){6`wEiO zS(u!Bl9iXa!|228K4)_C%lG1%g**M8eo~ArpL|rX=&NjjsP-3)GNIw8<=BcX{K3g9iMLP8O^5A~^cYjaa z{(0Wx!hxC2ro<;=b(vc%2iIk$<@0r=mEeqJ$4*uY@SMfYWq1m|r^16hWMN@Jm|U4# zG^!PDFR4DSKC`wrPk)&!P&*ZxJUt^N(hsCJ>&5%dhUG1KbLEtv#danJjs!xI%h>_1 zOfvuSU)&AiZ}fC^b#I&xnm_p##}~djqi_0($Ko79EJ`2SNF^nS4gFFxnaS(-aL(UE zDqo)bJi>@VSH~UMDT~)tPDy@Y*n9;N$H1=>LtjRj0OI(dbMSGycA51N5RYlfWY2?{ ziJg3Yr0pP%ZM2)Mu5OPMo#8+LGU?R5+UDiV@@h4K;-8cY6BTrBg>9)1hLda3$cFDh zIO4$k+5SveLNWc22xikhTD0e93Y{Va=bb&UHuA;q&FeuRlBV=$*aIi?_1XY%n%yb9 z&5D3dZK?qQKmYN3>iq`BpvG()xg)jZl1iFCiQFW$_^) zh&34*Ai(;+cM5JLVUfS6mCqw}ZYgD-Ij}9fSE_lvw+;ATj#`KHHdJ96f8mi=hcB>3 ztDn+^-x2T8;1xRQ{&8u&)8Dy?+h?`Y64{jgsnu|L?F#`E$6JR~?w`WB25<3Br>v?Y ze*;_v>0ASn>pVJSfp)kHbg6+kiQdEroc)*IqHt@ds&V!SlQFGlGR?mS5Oj&Z_c*m% zqLA(no8AaSQky-e;AP9dplZVRIYE5e92>S}(Fgb`*?YFC`t#R8yZ)LnRYxmoyw?2U zGiJh-@SJXO&&~&=B;KF@OhF*v+i%mPHT6pilj4pFOOout`vZvwMQ7?S;*9Btf{u+M z!}oGoF=4mC+(P}uMVneS$jn1iWqL{#Y%0-_4;91jH#n1iZ%Cf#&B(|gxK9KOPP~RR ztN`W_UddCc`a2Lj%z~KDv1_k*5TNhjp`pC3er@mzwn2HCdnbm9Rg{TF5`TU*o~{hBCTuY?FSNsY|*IhdnJF_;>6jXBXi$n%W${Tq;u? z&gBuplj+J;rz-t(PFIU~{UDIfhuYyIbfzEsRDG%X>z6XKH)6#bpX8*R@Dv^lGRB=J zdlJAExz=%Ak2wg~DKa-JJDqc@{y2$A$hps(-TVe7mhXKq+q--@VPF|7#bk=7U@5-DETLmBytAY!sg3#Onm;)Q zi$J1!;Id&(%XpT@X}|ctH-XSBo2hEi9xDclif^7qeu?H_E5?z0<4j%{38GD+_u9Tg zN}ivmV#=?UbF}xYr&ePR5YoiNr*oL?~PT%Pd$noWMBTwzrbYUiRe3%E^WG znpGHQ2HOOW2HFlG;%ss51XNK(z+4J*;c!B)y}-MtpL>z~rkgTrTA z3U`g0j6yAHQqH~ia+6ulotkzbvZp`tI@Ej?FM=QfBkK-JX_Xc;Y_J}a!{Hx365*3$hTQkvRyR_hcfxMFnVjA$xgRggHBy}Rq@oHpEAn)!2^0)I;s_r3w zUE|tXBfX>IFgyaM=e95VsL+vbG9L<&qY+^5_1WwYNVHb=P~?c0xK?_DGK?6UQ1M;f ziVZc3CCJ?45_Fu%X(Q+?_h3^M7^Zpe zE7fqegH1wuuMJx*H6ue2Sve0_Bk`eaLj5+|R_b!s!Zd4Li=(e}T#B02J4S*0_l>7^ zJ#!_}wD~tdo0)Jd=z>`mr-E>*IoAe~gFU%}q^?x?OP75QG99V9aDzOf+Df?nA2FG! zyT`3eUr_5U3PBR|cRLL{yVO#-8UuJXk=~y|9b!Y{Y(1{F`%j32;PGU>E(*eeJEFs` z!0VeCf?h3Im~pTPPITdSxa*ocJG!&r8+7+*a@synyxb0O8F8w5sG%^u+rHe{;dSsL z2qZ9}7G_%f`@N}8@c%!lpEI#U~_et$@g zB9T;{gFMZ^fY8#l+mgq8*osGtMMc#pT_PR_xLe>j)H|_b=^!i1Q!ia!Lx&HjqiU|> zd6QnvA4vAw>$@vc4{*-j4Z&@bbRCD0PRLqvqqB(Zn7*|Tu(}h(k$?qJanZYk6O?*x zICe>X@}+TI$zYfc`MF=N3u{+gxcy>`lPb`x8M71w$aq%0Nzce|lXG1d!p%%Y5gWMNa@7eCx9oF*~SO*1pG$jChLk}Ey{rfNM~d<*7<5@1KOG-@>@(Zp}H zfQUEyCHXnm+3pzJr|gPaUw`cx}0$nUL3Ob3V?_#=;_+s0_~Qg+;E|BK~6QPe4j^ z!IFnydO;9O->bAO&!s~kYnus#I%tE-x3giX5}W?;IYyxSCD#G#A@Pm~5*08FY@fZu z-OsV8fBxJ&xgDBlC21EO3%BpO%_~c#$1>Q?(*g0k)oERoFOvsrtKhSM=>sqwOhc8d zD`N&WLHLTV`+70L`CjnO2)J8P?oJDfMvOq0@`hk3z5EmkGHSoCeh_A=>n3~q(9g*> zh_2~2HdNd=6Mc3ZD4ynDh=+eT#_=z$D$V3suW2~>N%jElxvGneHxOT0%BL3 zUVi_;@oDJvBE>;_jSE5dJsLi4KANS*zn8qLF9bftvP%0pAAgy}0vq)gVxu-v-ZzCUu*v}@QC<7^6Ygpr4kjIkpV|3(rZumc z%UC}#m9c+5xcb20wz#syEx7o?v$L=Q_qo;EcV)lmQt{9q;MlOR0FwyZoZuyVF|}=S z;s#R*Hf^#v#pwPcF`qgCIgsyFY@&=vKvH^r(hR{~POcnnvv8B?pCoDxh&=ZU+GYAv zgXplT3b&8<9H}E8O9Z(5AskvGYT>!@c{dup7*ktx_(YQmq$Q)KF2(ej0RSaP%;=AX z@nx&yV7~8O48r!Y#F2^bG#=KC#!|yW0A_rWg?mbJfir%1fc(dgH!v%&UTDq&BJ@I%eM@BYXn($|=DHW zXzao*94OOCq@S(wexvGXr0sx%fWAL*nc9>{aj{&42wMWhAS=U?h*gjV?g1 zEs>)qjU4$>k6BQ=%x@`jR-6?+RmEW`ozG;IrQf9L4XQ14?0RPVl*U||KKiYiT^WD5 z&W-2K;`%%$PjmmemtTJoF3lS^SWt)FHZ` zqftM*39cIC*uc}$sd@e-(;!uDeE{2AB7)2;jNoYieL0EfnXL(#Bh6n*n>`*uj}R#8 zJ=5mHnuGoTM!R1_?dfESx3gYHyeE_&u$#3C%5G^baEldLg4^y!ZK6za{Z13e4jCMN zhn``(j66GRe4PJHLK4FIXb&;bxg%sqZF5I9nv%bnErn&6mfMr95HfRQTU}MGGGN$3 zB~{O#KLqZ!q?*A9`W9pe&dz)siw$&TG;b+V(8Aie#VMTuNxn(hv zL8*J;CO4+rUx0Mo!%-fyHglJ{yS2*o}O~ZB@8rwB25)!7} z4G;%vo;yA3ochM0P1Jx=*UK8Xpf?RYrI>oF?CpUu z*q>RIM$t91X;Td~@MfSPapl0`a<{Y77Y{(XX{mUi^BD|SjC#EFuZjJasod$Qc_h_- zq8hn!74`xo^TEC?&nB*^NgLk(4RX*!^%n~u6S!8OBM%|nzcA;PiK6N~2=d;#iiZYV z>Whx~^A5wH8SuYlg3{dnkuw+mYTSk=S*nyZC;a*95b1pNkXs2h!qSroPQF%I#BpTk z@LPFm@+BxIF!iW)UHAwmk*A^-y1a5f_p>oE6xr&km$97$I_;fH?Co1MF_5$y3O9&? zU10b%haI@&D9t~5{OUZCOGEC(w_+*wu^`1sSS{Q-q6;Pl5Kt_MI-(b5^=S9znHDnU zPhRZ!GBAEf^Zaq_Oozg$FD8@Tb}UI73K1-Qa;^OJG!l%hx4jO8vru@{oe-DfFH9sZ~Fpym%oT=Cu3#ow>YiOcOoz7jrj1+m=m!|~hx~^qwD7ncyL91#v zh*@p^vw<;Jh9~>Yg+c1>hfhbuJeHEQT-t*hsLENubcUnEx^Kf# z21q=u{?Nd8LOO@Gjdpt0I<`7ce5RtgVMyJXu3_RtX_keu$@h`Ip9{7w56ShSo+l!x zc>3G4945Kn)SZoY5t5O=7bABB7KLEA4)hF3zYQ>~ZH)3B_;!@R>ccDk*KwNH^6WXq zNJ-`g5M!o3*`D_N5>#4|S_?KQPq}_x zuX)t)6eM!2tj)r{?AtZ>=aI6lP#slwZx(&WSJstKQt|GF^B}@D!JgV_{T;&Eq6KuV zeGt?StmkmDKcE0DKj!ammNcVAqu7zVh{hJ#4I>W^a);u!z(fZCwve|akzCT${VE}& z>aT(OGX?+)CZ=J2`|mgA`V(6$#14|WgX}CE4-QyKD3I9DeSIOZC^G`H{qMMI;2i+# zL)$_w4h{~g`Su^*__FLk#hdh_I?_+_Q*J{3mZwk};d=I>N5dqPP%0Mwc2xxkEGyJm~Amn(<*7F0Uz;#qbGuDJCde%0TkoqJoIbatF?oloq!ZrV=Uo@G%bNn zA>~&YkmQF>4$}^fKbjDR9a2636biO($w(e1mYNM>aoEvJP0u(3J-2PJfZ$k zmj)-HZM&{ASRhb7fSC9#jjYCD3L?i#z)$uY1k|`B_x`S<17DO4serG+G1C9-GXaWK zNR!ts{WEkapmbnCcP@6t2LMaJ5dnY!iCZEI6fa%e0P4Vww}~I-PD;o;o2uPpGV=Pn z*9=GI@}k3bbj}o7-h|vSz9HsBv9F+zfxnZO?A?(!9B^pxG)V5C7QmDyNdv0cndz3H zF9-*ySU}pg@=rO8ak6@Vg#`ajXbEZ(P6P=6VncKlrLkeoBF-H5is}f8AsJ-!(Hzp9;dmmsci3j3w+^s9MA!jk_cs^H#!YaQ2 zov@_+ipE>O25NMsWCwwUyIUt=oz=922#~YT^$2OQyqZm1tPY@hf+%7wZ(Q?Ofb%Tx zW?*;F@x-h>%WNbprT3A$@T7k=QT3|c4s z$P~{NE*kV(lXcEP@O!}#r^EMcV3a4xZ6UEf6^*y;{U^&!EK*uw_b(O}6GpFGE{(F7x8HYVsoVGc{w-Z|}^j9-<4+%oGu>hRgfOnCt`#)fDFoyvgddMJ+>kvV$ zNjSwP3dFB@`hK90!ufX)i#U-3s|0??MAU!kpLma0k}A=?eTS&Uyq`&~+BT~c=(<5i zQ(VvO%j@$|Lz{%aLO}QPJbj6evb;ZYqK`e)PZbxOR6IcI?Q!Mdv zOuz+34$rq{7si?%gt6LX6w*SjxUA@wh#wuOC6cr~?YxyKFFX)+Na1i?N>>|mS4+2{ z^a}QuL3`$kd?Todb14~kx92|MVW+_QbP=`Qb#%HFc@>WIqNKdwf(erj%&R!XZ{G|o zjW2E1VeTxTKXKs>)dvm{=8tgqq__j_V=*bue+kQn31&>pU6%9o!lE2R|M&gLiGObM z7Ac0;z?w_2_N`bZy>WNs2X6`tes7a9?70D%hxWdrEwT`*1rrmCBN$B$>=4S>Ky4*4 zA=#zp0+a+D*oLG+Zex?xT!5z4w}u1E)gVAbr;IURp5dvAw^ts5+X@-=+@#tq&&~5G>w+RW{wwS zs+rDwSz`E(-_zIE_bX?KIQNulJOzv_bZQ*TV+>4xTS>V)fy_0N;W+?E(xh8E{ zp5!!PIaSZ)5Mns{nYE0kn^7#ci)hv*oeu((>HLKP#n&1$SRd%3KJwK**e5`8V^u#5 z1|@JGgk{gGk>eq$c)tq-H@6mU?rjFI{)I4!Sqv zj>yKwtm|nfmnVS^5QI_2#svCmvs{~!BQ{Uv1r(8bsAloyBN*@zGGY5BfKc|fXPPBb zJdCXJlUk1S7$}ZWaZ+Bl4Ww6dxp9n9HXl+gc5w*Vg(9p9z}>UZw)rVY_XKm#a4EyNe0L~<@yu^N%_!SfWLK}s zC@})f!rt7hrBJ7lg7d+LTpi9t;o?((&8!Z}+`7!(E@hC9=usuzE`jr{Locglh0b&f z_o~bO%Z=z3johPe0}^%JwJC)^xM(c7Cze!=etf~cW_L>*tTX99B&EN@4wnL5lg7RA z-sjDt0!Pml?I4}AwXlH9jHL*`>pHnBFHgge`xj;V=bOf2N-*yz}bC^*J;yh(O~=#d6XLAoT&3oIDsx<#1@8Ya_~P5;AB1x4vk- z3wP#oNK^0Lz|Sh^v1Qn-hZ(QiLv_F1_4+@CtjE}E0?RM&+mCkbf8N~O>Y+sPfk8@$ zHxry{O!2$FA#*P;J9B29yP4;F(ugPYy2NO`+?_nHA?#ezwV|TU^oc*ggs1suK*gTi z{J8LlBrCq+UHVpzpibPkMtb;|TeFKZ`CYu>Pi)2a#-i2@?6U#x5CgKc>b*}{r%*&h zfNaL0HoskSVslM}-{FSm-bX}(=faZev&30TC!Xnj`vPsE0RIS*qDD$iCAgI}PyvT- z0;Gd&OG$%059LkG!?f{1qu9w>mqKqWI+pizPlNc(C35lDvlR7~F3DhV1#*@Yxhs`? zqgt~-;ehV9wD?;&VUZ3CL9kLyHyAbsrZLceyZnp*xRJp+TU{&shX7RKRA;NraM z*F#KsxLh|bN91eJo!qKlrI)91Pa`~SM?MEwmx^H#Ra;ZD^;$<+{B5!6xeeMqNi}R; z63053Yg7~j?4fHu>bms9=W<*qu0S>b!ka>%G>*@(V31611Y()#^;|T}^`Gq@(rxut zQf%-+(bmd8T^BxvA+X(}Te1E;x_>;QVP+^*3mD<+M@!i?33xtxzw)Ny{7g zrX`fUtV!+i+a|~5EyY%%^vpdgS84BG7ZF_jSM|LhX>dTLOlU*WnA#wF4*(wEC}5Tg z1({_@NvJ?TWgH4%P}!dat4I>0?9D_=1#P}wVUG469FzCIi;6~o|I?_dvK^0)FlyTP zwG4=w-RnvUtv^*qbXk~YC)liJ#)kcPTP-}I3((M}AXv%oEg9NJtrs%uCkTQSA*J>V zK*S3udU0aWZKp5`x18TB=mPEj)QAjz=tKDMrxHQ)-v*ng@5`VhoFZ5CfVR*FRZn=u zgdw4gnl`>TmM4IP$(-@U$g~2N2x}^3*2_7_#w=x=%UHQrvr7V(Doi|%gT;ZhtZICJ z&ELkn8>GiAMu*25K{J@TN{W{9B_yb*q@?7}&|Mai=Q_W9q0hn@6J7z;o)vQDm}*y5 z@h6)=hQp4dc6dQ&#&TkQ3Fh$JwJyzC(1!}i-`Yh5AsejWur)By_8a4WlLZn12u?G| zPJu|?&r~m5Y_F&e`~I6cW~h>fku|H@kaAHAtj=I>5G~2HouiTheWTMAd>nebv_@VF z$^){U(P;cLvvIofApwU!fsS(QE#P&LW$kSGj2o%=i_Pn4GFRJXy^LZ9NZ7b2XwoeN zLy@G-^+_*NWrF6xDUx{3FRDH~{$Za9oFW4tCZE7jT1$vS5!fA9-WN=Od<#V?vJNgc zKx2sJnAA)yrJ;)(P+z6SFEC@SfI?}N(+uHV56m6N7YR*8-1?D)=K|cvP+;{(F9^78 zB0CdPcJ&~z*Z!AkPc`MK;0EJdU=hetq)~jG#e??Q_)o0F6`P?Qt;Pp&+Yig+PwO60lyc>kFQ~95G2LhPAG_UodkN8^ttR}gd;`8lwk|7- z5U-Rcp7nSCdqov^gC%68L?c%D6Mo4GI;&DPhJep;x)!%^$97Dl~hR zh*-YQTgYmhUsX@+T$nuTVHCgpNBC2+yWPk3wtCF2CmC5}D#~P4jaY78J#!3DA8zpN===QuJkqul^6rg!kam1O%=DR)O0gEI1o* zyV9^20Ng$gN>^=j`{w0DIyyNhR_=N__sAe!Q28~az@6PfJAn8g^}UmI#4r#3e2f^Y9@`>QE78HrvF!8k#C*^ zqV!S|=Rv5hFLgW9%1u+bsqC`phd6LNVD%L}fbBqhRMCB7U270uDe0~6Z;kfyiFA-hS0zX)BS;g5YEPvJ+!f@LHz?WYpk~wv&p$<3VLU$^tT%bKn zg_F&wFT4R12z-S;2)6W0HO~{s>Ozv%5E9_l6N9PFRU*6A;HiliicLbz#;Rh1ONaEpkLw{@lFkJNZ_)@WQ3H%MdxuRf zdJvf)`}ogz1=i7yR=U#~zGADv@V&R&a4y+}tzG_-m_gK(pLLSh6z>u;N)ZamUWkgH zULPzJdiy}VMtJm+RN+mC{`M2`CVt(#!>#gpIK0_EL#aT0v$9sVW@ZLygf*AS-3Lqt z27g>A&g-}!n7Kj<_SrwbBQB43*K+NoqrFOQo5vK>*mWN0NtPfGj-PC|$$s5$7_d-j z+768f@@eB*a~t430exq!mVkujB{%uR+hW7W`%XUxUlF{ZMoMlaot#wS>?R`*G9br8 zsOzz}OQv{%O(dhftN>2=A}%V=bFcOGi5?JTwpxA@E{}_*k^f|R6i0`N0&ZT_Zl^sQ zPKY51-kHjgY|izX-!ie5l)c4B>5%cHqp8a9Re`%!8<-&ymu=5IG7q_b)~5KV$(^kd zKa$>Wa$8J2B%5#HPbXZtpdLmf;7f$qLSGyqqCFkx9W-OVu{Kty{AEhh4&irgx5T0l zlWg=Q4v$O~(Wje77C9##htz;@ZO;bMM)py9g*540(?r8a*@=}UM6>eee9EuC3}3=j zRPF>atRR4hb_5T`MWb!4nx<|$V_Z2+Dm9AywB;<0WiN4YhT(oOpvP(a3WPtI{W2HS2 zewi5y*jwX==P$XxEnPl&(54A_JwjCjUrrj^FYM*g^(;|@(YyrK3HY-MYc@sK4Cr7;gxdM$K#Nj;Fa~fLR_U*AJ>jtJxCM*$nJEl+x)|D z0K{Fkd)abHuUW6+tfit=d7Dai|6*>j16m6+5mG3Q$S!WxJ^w3(;z>22zTIe0fo*_T85vX_DYs;j>)SisOL{#M`dTQ0x z89%%~AdtEcd=>#NZxM^CzJ~v|c|Fmh6liS@Y}597&JIc<7!36q z!uY0aYslje)BF@PKT7tp<3#4tC~%u!0L{%8NA>JG0NWZxtc2&OVEKUotVMB9Hb1P_@I; zp7Gwe*Fnb6eQvBEkQH!ro7L^%;9z+Eu@fBJ{e`^rYPcnic zz<6~a+e&=p=g~B}ZR_B8VfVNhP*en0~8lq>Sut zI5n?3MBcRX20TD!8jX7)tIP}nPWdSShHVWDAd7?~;HH1no`<&@&EefGe`y)xt4?UR z<^`|vDLeLa84SzrF}T~ztV=;!11$rH zzJXvLxVJfAFtxOElW8VYgam>7-g1uK^uW@{K@YR9;%b>Bwd7tg(goVT*LphyUTafG z7E$R?D2{>!0|P?ux~ZH}uyYacAb55yl6{f!!*IylkttP?Mks#q7RuX3s%F0uo%Dcd zRhuOZYa{z!z6By-!cw9mMweQmC_Lc5g?dV@hs)<;s|3<9 zjFlfktG+|Ect73wDBEQ%YP(s}mNE{)qg>dnvznst#{rNbD(Jle^nspPqbCcJ`7>N3 z%12E>ci*Q%3?x)C?aU=w1&%@mGLe8QQFUFyLWQco|4DFMRnN=+4?EDAtUh&R-eP(+ zN4Pm)kap%FGYv@9yi${Z(X-Yu@BXzPke?EOw5&~|?voS#h@ z$4QbTpKC(>BbYIit0otQyJ*NBv9OTtS&3V%7_vt3CBvkd7^d;D-i{j~IDkk_PDf}( z)OX=AMjXi9yH8?LHB^+jE-s&USENl~`dFTdx&=5!;kdS%Gx-I2NT8`Y7ecuOCBpuT ze+v?Jn~xwL5v_F#k1}(2fMWQ0csoP)_`Afidl-=_`GGmQYVQ&jlL`)A9P^RKj6D*v zLxxvthVNe}E^seaMvc-(~&K z7h~1^X_)IB^IRTW?}^pF%i*`-$f8UT5qG9GF%w{NrQpNKQRcJ|<{OLt0knWD7M1V| zhEia$j$Z&x;#4u+PI@t)l}M)Z0Bk7w_=HQP4>WGo_CGadY8?r5L!%^iX{h|Sg!Nc+^l_?)QaLjRXR+NC~vk-`P)Ol{ch zc}e0@@@`j&MV}5*{0BON2K9G`zc2aV*$H*uU<^B>mJ`*72Z9RxQkswKuY8{6tgy1V zBqO8j8tnNR7i9em3=Rk46=WUm)xjLFTf5;i?l2)iY)#!rxXy+{+s11P=?h8a8&ZV&LqjXO z_JWTLbf`f(ny9B_s%r~7ekG0LxD6L)a4M=UL))h6jk;uG2Yyf0XqlwL^<^80Hi30d zOP={1?d8|o%Zje56s%j-J>RLA8H*7c!H5?fk3-H(910k{e)?GRz5n5AVq^IuS?i?c z*KMGY8`@Sp;AX&smODXMaRFZYiBPb|JjrU;)WUjwqWN*K5>0QGkV*_n=H8Y}O0N zk35dE1l1@ytbCvv88+l!h+qc@OKJUe7gEw}{h3`2BBhv6{BhAGI}y?r7$xE>ZRkLZ zn1EyS%7Ks*MCKA>bbu>&w{|{oGPx2vq<_q(+&lrmw1`7$uM@s+sEgte#F)`UD;VV6 zjRJ72MPrV|&WW?ajgYo6Z!%L|!zNfM(|;*gCzKKJg<@|} zr*TEyPVbx#39Bz|enhBVAiV|sMsT&$F^BHdj#0YpuFr@?v#)8}`<)08)A& zf1H?b0o(07RQ{b)+EB3UnG1bzjh*1>=7Vw#0n;ZCN4giAWHRs>%2~2urX9Srq}j!p zZhedY-i&Leu3ad+eUiZz%RcQIb~}rY;y$Nn6-d$w9HC~6WD0f0-ddLq~ zfFRl-j1B#6@wY^%OaILO4Zs6Qnm7&bni<{6PUh$t!HblXbcay(m)62;7tGON?h(Ih z790S56B7BAL80Iyg>n!)AmO81GpAure`cgu9xyf558)I}Bb)Sn>LWQWOzx*&OHJUx zxh%Vhg&t%Ir|?c2jh?fWgy}1`!vPxMDn^r4cr4Pq45c3w_ngGv{kcq{fnszSVyvIf1`)iMW@14kGE#uzyhQx` z)tPL_ivegbOr_igm{1i4DIvR5h>t(;rJ(=B*SL4YLKi5CX-348++CFXzBO1V1 zQq4lq+*r5@4s%gu*M%`=+r~SD2A1|4i=TUe;LEF#vVzou+;-Gl6MH;~GMTDg=(13x zaQJ1Hw0-RdY({dQl@Q2))MVXS9TX3h0s5ut+!Pj!%%6e5p)@`1ssv%xvlBzDyqy~) z`isO!t?No__p<}y&Isq#bM z@Xw>-w*>(u*7yq{#&NmtVcn8X^uU9sjO#{h2|nrD#d>>iKwh_(HWY`-1{H47(My4m* zP|<6Auh{%IS0S60zT(58O(JsBvq2n_?{Z8us8X&$NffYR0wo~4bf!02z|BeJHYE4v zq*U`gmIR|^F@wErsXsaY?rpk#Svz?n%c$nAO} zsP!QPOfB{lZO&COg#?8Zlv-xB3yP^T4?_$yHVCP@A&~TnCAA^8Uyr)t{?=tqRoBl# zMep2E=zRI@ek{!v>AQN{0CLyfIO4PFPr)BgN)Z%+MhE6o?}9eFd+ibT^UELK3A@~; zn6U9c$hHRVf5p-gg|C#y?|o0gE{1*rWWlhJYksy4(bi1CX82OxZ@qXX@xDQiJVD|; z^S2G0{p&@aOFU>2X{YnM_<}aD{2*OMt!4Fp>8kt z&n;7LDZe}cW2|#4CN{!flx&*ti8&@QjVbRJ7``zFN6E=)CYZ!X0`WzS2hrYD=9R7i zO=(hGR{G_)&KA9=qJ^5z68Z-`O%DGjtBc1LrdPbf5Y3>BoWEXhKwiiq89$%_AvtuK#*EHSodOpGzV^Lh{WbH9(r@BPQUkDIQU z_xtsJo!2?f^L#!}x)zk@NS-G!@FkPCkINz7xT~D2rHDp)Dr-{3ehXvLqyrUT)4fOS zhILy3V|bC9rj2dCguGH9-ly3FuL_TfqMKNq%!T#2wNCoG*RicN2=MH6)Luqtfu{9K zJVHA6Q4|hrUx!QBQxm4@p>zgh4}HW{b1*Pv95J{1Ce6WK4ZBM0?F9O(0h06oeHO%T zp-DtgeqkMJ?$5i3SlirSrMp#U-`7S~5#@6!vRLg%^(V4HT3WUrqnjdu*!&mHu)jzd z0S#{OWWGFvO~p}Dg)E+Oo#1i#8LvF9e+;n+J*GM&_aRy~>0OcHf5lDu&|qnhM$DMA zBreoqG$5deQwB&BRnF-Xk?|4NA&F{)eU=GQ7<7fRxkHMe?#Y0pqXB{B^FMR(Auuu# zg)a{z6VNHckU<)Wk$lcDEeNol0v+=H)@xBc)N-!S@CC4azDaY2=NPRt37fhS(aB|Y zr3Tns{Ar_Ih=m|MU#Q2NbbB}McBJ;4`Az#Pa?W`XP2B9Uikz043&|Ad$C|jCq^Q}x zoLA5HiIunvyj{M)NKWkhvu1Pk+Fk<;1jQo7T5h+_$s~jIgJt#u_9VT?i~lu&HiL%O zcEE{-u5pNBiT8zeFW2>Qe3TuM@Hh%nk1g|TkMm+<;vG4k%6>@d=m+R-&({7Ta*uH$YEXkGv{ z1zw&1t;*nif&R<7wT`$VGU5eKo;S~^_x6x3SsMzrKv*^m2O~x|LpS9_o*N2VTIs-) zw86^-J{qU>XXg%b7w*n=Kp-hZc%~HexxTuW)c>Nnkr}Zhd)x!->m2p zrTSq=QY)eW*5#uc+4}fxiLH^~Sa??DB+%SFre>EnM<*H%oVpMw?F&;!bP-FJuh*ob z;5?60sxrf0BVV)JdFQqw5wg{Tr2iA5$dDGJ(-ZLCIHLp8KteF;hO-expVd>v+z?!m z%59he%j1)AYobdTH2H*f^7w$&Mlc)P42l#t$~82e965m{0&)KTPSN1@jCwm001a?@ zSF&b+OYBX%pVA8Ih8K(=Je4u5+jp3A@y~he?BuquA<~{uK#<-UCsp^6&T{Q`X7j*$ zm>n?62r8|5t7Ww(yv~B^e;4|B1npn6xgaWKiNLCS=GPB|2^E5)P;AGxo5*>>JB*v@ zgwsAiRr#xJpFzlC2yBo8++MZ#3pgYLw>U{>UhV!Kx=Ot|?>K;1Ed`4;ztduekCi!x zT}UYWm+M>0Yg?e@4^432N}Km z_D;0-z?Clz$`e(aaNSQv;SlWGJn-S%)2Dn(`3z*)&%K+As z=!XFLA-vbZMgvg|I6yThGC_4oL4i*;0Gc;qp;wRe16(3nXKx=J`7|&AqvM zYE~;zgSr7iV2exWWgm@K{ut4!`AD^TSNi@n>nI|cbva7=yyWL!$63v4xq&AU3^PdH$-vR~+W1+ZnF&&BXdFJXd_HVj#V`g18x)mgO z#XFFK4l=zGG$qIXD4CjAj+Z2jJTrdQqy2+J;p&KRNp*cK&j9f7O+MO-!&Gi9k> z_~8_*Y02V(763CR#t2x4$&Rkcf z`a)qm83texCCn8TutQ)EAVG&;OdQq>HzH3K$dd}_<%;|c@v3bZsqW?8S0+;WLz72( za<*v6dZsivVgpu^2slv@4+D&= zK*WKugwg#1Z`AxJ4{a}yHf<7{d&rBs(+G>2ZT=a+EGYKiq69!b_ab_woh%2@YjHs| zkLU*if)1R==b&pUC^pbX#~3r6(vZTVo~;B}a9DOOxf?h}0%ca1;oa27#sx*VSkUQ`bsxfE#HDTG-)SZf!1gBXxGum|NB&fyF* zXkPWi3;YhWtdZ84btp&ukQ_lny+_0aLs-m8?|j7~LnVhW0umILc#AYGD9QtMJrk8} zX2P!pJt07F7Z_rM0v(Cdc|VW7w)z0Oor6H7g;HxD=Zq$rr=^d^* z#HIm83_EVF9Kg3SOxHhh3jUWlP`B(y`lYBj$t1olP76i9)Y0o(4vwd7ZY#lhWc$OO zOQ^Pey>`j3tC~wQ>Bab-V2*pSdMjV}MO226ZyVjU`lY?aYWkD` zCj^C3(8Ovbg}0||7!HQVdx z3k`7#1)SLyvrmFc%&km>pve@pb-e3aCU}J!q`!N~Baa zfW%x<3ma|Rs5@-jp@YeDp32_{VSveu-Ubi_ckRI%YB$l4k06F?sq`>I+9&(!1a<=S z5WS4zTjRTEkXjW(G~%0ljGGPY_cTfRyArx1&gEW-D>_6G_Y%FRo5Qn~lWlmmt^c;#`T~4| z|1KTB4NfHznTPK|jgSC*LD(Q5_mx{&J}!+0nz2aaIzHwKRtL7wXVE&2X-;L3(;qS) z3$2GfTYUi6CuMHOA$-D`?)UR)@CSVt#bYr<_k$bpgNgC_hhO#(0E6xr9T zw~y}$I;w^daV>wrNE4t-U5EK1{7cj7x+JiQm$ESNO1?2Nv;LLG$20-=h63?D5=Wo@ z;qS&>CzoQ3a{jrjjLdrMUbTZ~doeMS5!oQXL4Xb%1T= z636XPiOc@4<(zoGH5s))p=Ya%IIVf9+}LMR2d|=PzEZBo%O(*@{Zds`g}RXDE7qw{ zwjn&4O}d|=6Hlc72Dj9YgyoaxJ)O^#jm3$+wZQJ>P^nra7S7Y;;+h>bAvdJ^>6|&H zk2MkjY($sd!QP~cF3HN2EUrQg#Gc>7sy%?m&YHHsdK2)MM7cgyj(sK}(Arm?ILM_h zwn$fp#er%}xfv6K@8rhwyr4)-m6LdXuCjD!(Qxd@30BYdov^b?PM(KVvU&id7qR8B z98b)*oZ=a;rO$+khX_6b?hKK_Vd~QMi5r(M{^dQ(=w;FuYy?%0HVM|1O&mWs{tP-5 z5DPz-Q7&->N|fE`7z8ySyp(5gUCu}kvNa9n0NDbAs*c+^#s4?Sa*4~D2n>BZkt701 z)@{s^c4G6shZ(acTK99q{PYe?d2}1SkAMyicII_z{cxMrzzrN;W~(lkMI2!5nD7m; zp}(3uv0oO>xg@M-MDKX;c`!@(Hy4(ZP}cIFpgrgUZepU~J+k{rknx_%LrAxj-`%`N)8IQXwUs^B?*fTNBT;uOeNS=)N7 z-o`0URBwkKo(HO=WvsHF^`IdqGM;UU+`FM1f#8T3{_bx|Sbc%z)881JFkfBBro?f| zF}%)mhc!s|<|+?4PtLJ2=KnTI-YEnJd8*r+{ERGdeeDvpN}&1qUJ|g)s-=l_M!p-{ z{B;1zfrq_ei#jBE)7lF4^&xF-Os65&jjjHb$#>PbtChR!>O>BhKJ{*1;U9CQez>0L zxAd}J=n996mP5b@ltu`NOgfS8L%0WQA>Tcfq~v-l1is12F_a4COQlr-+je+W)_^4kmDs6aCm9VtIZ+5eVpcZCP-bGWU3VbP*Udr810fo|;;bv7#kR z$j=}7z+GQdh+0L`Kz_d`avA&k0nR$IfSPQpewCaH+T`;P^g5xe!RZytnO>AeAyZ#} zS=&Ck*t9aviP8XTiE+14T!>q<#QgORZ-Arxfw0uPOUSp!Yo4Kq%tK{FYR^c1Bzi~q zEO7b{HaaenbTIXq>iA0|@psyF#slZyV0`3Zm1N^opm0sReIN0&(bipECG-opSA^!g zHExfTHvhQ7MKhO)0?UW`TM>^wlYt~q6B)$~pG2HqM9FErp2?-iVAgbc%bc&yM!`wF z{1#;9xX|FL@Tt=h4s{WfZBBdnW`_rurkzU6{BX0RE`R_2Z3y6Hia{lJvFjtyWvA0n zKs{MW*Nrp$r(+;(@5FF9uS?NkfpS8rajcJe%v&^LPI4B!Q%m zfOcSA$w27{#Jlt7I^}|cfj~IRZH%5}DKyZqQVR4;sLY&z&qY^&32HTZ{Ay?C&P4VY z?#H@0J#j<|LAs1d=l+$@zmQy0?biQZ%Qzt^Zl@Ot?nHnbKsh$K5%J3i#eO2r+)5`K z(cWDq?SYmPPAo14$YXDbjrCcd4k2>5q;d_dz4tJq=u(nJTIdy&qBA*D1$0oN{#CIc zim5)RiXH1eYld+UJl(?zkgTpyzuIfE^Y`!;!lol;sXe6 zsH|xdWaVS5e{M?7!jC`6ZcJiV$QFgLiSXTkwZ zlfkZu_77}+BTW-n#X4tyAv0O$Shd^Bh%Wu~`5n8DPM6W~4ofNnZi?l>Y* z)ZAg9GQN<7Ko0!qN#(c3Ft8dS^_(4nTWksuBxh5ji`qrdLza1taJg`Nxi4VV0~S?9 z?l*{_aU>a2L^SF~Ok9A#cw6@E%5ncnsR{PTK8DW$>iHgB*0Co}*}|_C51G`Os)@7@}K8&Dp=A#UI{SL@pR%F8;Lt z!|WS>fAi2q@L?D`E3QjbJqlA#9jmW=I~PLBjvNYzPB>e)vQ@e5>xb zc0BFS(S=&o35_i{6^LE(fq#g;D?9v;o-q^y*^@Tn|KW`3X=;yu|7Q}_T!@n5)Ys0M z%Sv@MzSd;aSGZijd;W*qrp;|F3B{h321k>L%>eczfpQie^J(GI9?oF<6&t{b<-QfA z=tj4C7ftePU|CVt4*C{lK`uU4mDU7xG$wPU2KyINkS>JE(|g|9K6hbk8;vjZ|s z6@!=jXg|CceWyEwViN--g>i{@1TLd)9C*;K8)Y|o7Np}^N^GiO6aW;&V4<`;(HoU* zCA6pevv;=ZC6tIlsolLxIL@9Kdc))7%O($X6E37o@q7S>hCyoK@U+%~Mqt7mn3o2# zA6|u07YkMn6MJ-!5yH42|Lc;|3D-ZyRX!NCe+|8~K2#`&SHr~c41WpNQfE{CJMWZl z-UzyV8qOb2g?|_wNW-V5h5Yb)X|Z<`+S$;OXz$GOR9T4guowiJ9aVb6Sy}zC8Hhr&brc^5;g|IR;_74#7s#u z4syxrXIG52hn1sqNl!}C9BSPN0gtV+-%Le{>O~T1vwQsfXk!#cMQ{Kb+i)2_oO3|K zMuollS7oTGu*wipIIQ0%P?4$}o z7`oa(QiCk=ONKu)Yr+i`wsR&LlF$0gWfITp?t=V$dvY*pqxYWCIfgZ;u*d_#dMt7x z)1$?2ATTe41{;IX0UeYyZAgedzXVPSv%Oq;dSo*CD{Z$9YDzlsI&e?44&*O z=Sxsnsk}`i@Y~X1+Ht4o^IKIqb&i*3IvsU`W(Ex>OA-U8Ig8euF-LY@)X%*ZvPS!l zJ|A9AZtmT<*Zgczg~?nscf`&SpQv&L8A;067xEN2CP2dg!w%Q#^>D=x_!}Ujc1bA# zK|W9?&K<&SPuIS71a3!ByuqMFZ2>D53@B_DBi~OHL*vevF%cNbqF-y9Fm+IV2^C=pAxS zJG|xhGaHXS{gXp77cJq@Osl1}cOQQ0*rrV7q;kILE;VwZDuNMyC1%}*4GSpny78#6 zZUWwxD0(7E+Ih>hwOXJN`M)c!lAo{oBSAy2D2M+ZZhA+1T7K4X`fDsNTJu%~-o!m0 zh|wUZTulddzVhL2$)fH<-XTR?&YfU6++;ZWT^VC-wkFVT1PP?eRVE)TjC!ct0IOI0 zFH~hME&Z2Iwrefc^~oQWZ~$P6@5vF!tEz1#_J*NCyC`)f1QX)X=B=1;bFF%R+rPsY z^-1H99+JSfRt4_@NiK1F$Kp7N=A;-l}UI!Z1LFshc!-}x=*JdrA_gACzDRAM4S zaFk`qp4&@F4Wk{2&hc96EuihIH72EC&%GVN(kNa@RXYUOe z?SmW;Hv`RPq+R2AOF_7<@T4)B2N7drq{4JjKtQT?+81&L`%a=x8xVEK+b0ehGk;Ai zGtn3_cD&9iK<^ABm&m_LYMTK{1$38)n0|K4QOxv2Ik0LNz=%gw#{wa1(J&H=D3Fl1 zuX*MjsZ#D3Q|qTQ*wH*b><@rg8wV+pe<%@jyC6s$d(wa1;E98y&n4E+1e%?z*RO3L znRhCF2Azbf(OF6^JWh4?*}Pwv)#vb>>!ORRrsS&>XjiJ$KZk1D+*Hues>AfQn0RYy zJ%|=sT68b&H&$r&@tM7?JIKC;Baa?&Jk;UuWG=8eD{1tfr-ygqN=|n>z2E31vRl9= zm1`tw&5c=3TD063vP3N*Xkw4Sne^xBNk7VUOpKJrVqPaz z$a%L&3eCO8Xd*rg-=Jw$5A)Zz`Z-Q&6Ud7s|4KQU77$ohi^3=+SaMk8Z3)lX8!^^v zQ&1v4Vlw==@1%zyX||BVs9OTjr2AoY)EPTM;X+XgD>fvj&K@Y@A($F;2t_3Z)Cl7M zsi9UX->D!>?Fruv$&_Yke+bIda0d(p?_pKh(laG>@n80yLT7e&?ta$Bqy$(Q=}hLV zQL-BnnXcgWMY_P58JPC=cbG=fePtR}{y!0#b|5~4;$MVIb+_ah{5#$1)j2@63$w*g zl~CxAuOkv8;!>epsHv^ehw$ItO^34)`fF3@4n4U3VmXP>(eTZCjpltLi9UE%(_c{) zOCy{m8iz%?t+t36dVHQnIEenXr2Ld7(HmK}mnu&mV!S#yabU~Mlo5azf92MUay#Rl%78z?0a_ya< zz0AYPN95WZs&)4-6TZl*3zI>SQ2QLq+qz}#P)})-JN-8@QV>yvl;3Md$A|{AzMb-A zymZ|))7jkF{+Z!qDfvJsOElFH$oTr(q~P=-yr_@vJf0T0^x)qG6BNutv~*`GLU)Hm6IouMw|lF2^6R05@G z_M#@#FL`}DEp{X5Vf?T`M^9R1JG(WjwlEdCW-HX{E?rC_P&iqvjncYdtTX-5pVY!c6|TT>LO`G`uaR4Fh15vWTHIx=o1wmU^L>S zEwFyWF{qF`!i>CgOzZ^wN@6t<|jZBI}jfUy$6yON{b zx`S#5hf4s67|-;}RhxL^*&VbyI~|2wNEXhbIFWhot)kZ&p?M{)IZx+&O9LmkSoa&K zxhU7UUY^!0Uuoc4ZI|q-{l3r*>vx?*Iw)~AR#QCmf%3@C2?-P?e)K>V@^6^9uKtGc zXW{{u9l}xGJVGVM&hDBf+OruLH`Shqkh1dI)YK=(lfq?PvT+9ddU%JjTx{!i;i_9O3xw$%33IhPF6mqvhM3q2 zY}UOyUl94z8&uaR1;OsvDTAFi8+W6OY?l$O4jB}n@S&$LT6?KVx_cvZAI18Cx{R(n z*7ByvqrD;dLEF1i|IIpG8JCeLR_7ns@gP7gmHvcSTa>WiS%!G`_6c9N?y&MeYzS$O z%vWqilk`P80x#UTEx3R)=0Zhhr8Cm|nU%qD$ApRvuY=p@P;q3BUfj*_}V zi8CQS0OK?t(SHH8b{rJ4Z3!xRN;PYKg|MKT5*x2~olAQ*^TuSOx01jS;?NP4J-|1ysgl49w&> zM9ef|X;(BFh8o7c-b)DgKh8fZ6uTo8I^1=^o1jte&j44vA}M4V`!tvXR+kzXK?wZb zV+$x3F2NPMV^`PaYs$Euzm*;aB>ZO~|26)vdBfJ`nL|voqsoP=-+}riK^AYR>Va3U zal~~cRTUH$bX4aoBNTe(nURMPWG9vvNo3AXap%24j>X~*HPAWMF$Z#PibkgIDWjh3 zP#Oy?57AX*2ef=+0^ZktGM*z5 z86anc)OQlteK63|PSM!2yFSaFZ^uRzK4yRe)t|jY3s9PQ^UP2vhER@4oe>3D9pV2WPa*Q~@g8)_fjZMRo~k97y7_=lqqHbU1y9f{%Lf zXeq8d$STZMp0Jle`A$baHM`>jQ&V^up#VNtEb`@&)^#9;DuK?ta--I7EZ7uq0tKPi zUzCdMBN-ei1VH`Tk}RNO>}mjYLcXg626FeXFT-c_wtct}gZx|5)g?Sfc{PN4d&}%o zC0x?xwQt|plyXMp(=>9XJNkm5`{bC$-cwol{}JvWZXrG^*GwF$KCg~gdbPis?T&L* z3SPE6zd#291DdWxeP5!;uPq7!O$Q8Eff>K>xdyhW#)4qY6sG%mDe`HD{~R`gZ&n}z zy`bYa{2UatRc@vOz+KT-h?%`MAK_B^;Ky*?!JI(9jrD&1LfdP)WWznf<*xj$0^%;M z#UO^(JegKFCx>uip}7Wb69CKn$QT}zbf@gi8beK^-#m3)9T9g5=V@Q0_#1sgG02vZUT7A{_iLj zcO5d)ewl_+AJ0P*L^gcwe?d7fz+o^xWn(WPArBMA`&KJkn0w3vi44d*eCnA zh>UbWnQYZ@BL+B#|AIGH;!%f6%U)bo*D7tX9u?UI1u{r~>ri9%eYHnB#@!Nk&;jkg z$YNk!9dT)u$|dM?p+t}WiGfBV5>WtOqY7aOHsX+f!458JMylP(tI=ilX?!Kuo?n;) zRppnY)H=*lR2DJJXlufmZry2jclDX_hvV-Rlp!2LK#0%1pKBdou;Q~-nMwbF*pgAA zj-;?+rH@N;a8qD5N>?!In@aPB>duo!9Z8A4?1&DOa@I@kVe2FTD!fmhsEBHSmMNR?S$89hB{5yojuF6dZDlIWAfes|G z|K^^lgHQL4e3i=0VV#5&eM3S(aJ8JT0em00y1cOb+m}C0+Krog1MpF(8~qi7Yw~Xl z7Qkd=z7IK&Z^B$Iz(F2K??kqPGlffSJ?Dljj?<8pj^-iVpAybs@d;Lb_j}l5wh=e0F5OiW^~L%FLNOmzk*jgo(tlhnr}*C< zOH{CR=@$NgayQm$Xa_9EF27EsE(XnwB2u?myz&P&jUF$qOOv2IkMYUA6n^Qzd7+?& zTm%^pvnE2o?c5k4TC-r-a2{o0aH<-b@jOm%V?eSIofi#<5IRDkS1!&SSMw+ed)=$| zXVg_xR9snXD}VVK9EL~gSnP=}D8pDuI)_fj~xkCra$RvW_I4p5`Mk~5yv+}Gsb3Lz~}8SecTd= zRYj_pJA0YZdjgFps!UFNFB5LOezvC(cqI*~UeR$K=Yyqc;abY8tJ-gC_2i8X zfF{B0k1aY$U@Aok&Zyj(7k6jw%??8%p5O^D1*Nv<3e+ATg}& zQg~qZCyBC z39C6n(W-${d6z0T^Mb~9y32b>Jp{{6{D$7@sLuLI|3y;ALXgH|fS|$s!=<~nF(Zd% zgf>Tjv++MHKrQ~I{Y;zY49s#(AD{s!`oQsx7jnh4c09&43PZ%6jP#*E81yrPGVE>aLYnmhf) ziBR>xlP+2>ZEVTp%@_Z`i6#{5js~+ybUSb=B;634Rh&7V-HQxNEr)rR+oG-3Fh!FMAYO3;nLQSI!HRaO`xUX($ z;WC@}mdM4H;`g3k=*|S8L~EPwHkcarxy(f{E!pf>{f-QttB~Lm-gZ_+KU6U~d*Du+ zPi60){!MULr}`71p=VYl0vnsz>+cP=&FCcV{+p0(+OD&aC5w`>5gJD$NPpKJ|o$3W=go8o(**A_IXIH}Xl@7W;&T)ZrC=ObI~sf3a>bwltCO{B{P zmrZ(z z47f++nE0|AiYV6?5!2(_?J8DSegc_!#=|q>L0cMPR{Dy_w^LLt@}192BfQ1F8csbz{P41i%Biw|DYj- z@M>jeP2NTZx62oF+3sGHZ@r++O(%V_kv-XiBnLqSMMXu<*pxMY^X;%(UjYk-TaPV) z0bt9!(G92wG6^9|Lq-JSH^{jjprafZt(ipR%JfmkcD`dw`r=7H21M( zPyJEO^{d3Niyw%uYZ>JZ z%)T4RlNqA1*<)WCQwx{aVB;W1NW`pS(DZ-+28>E8+DvF=f}j>zI4QKaNy5RyUB9z% zo1?$w%E8GA9zeAA?}sFx>@_AF5ls1ES%9n}XRPuPp$5B@-bh1(gwo-P~kSz+kJ3^7OpEWf5aG@0#n2k&kKDCt}#fqdK21j2%0e^z!<=dM~BJ^yF zRW3i&g7&^kbg9JhJ{*I6=-wfyK7Ir8J~+weTEBg&R z9Hob4dAXb~iCm-F#u1Zi8qV{#X(-+%LPejfd1QlfzM6Va7kMAd_AmklLm>@HkQhiE z3%N>pL|U$x0r&mQIn#(oxJw2l{HpWw?Tn{nj90)yGqYfj_Rna6i*C2G(P4vbK5#i3 zUgpRsx0GU}1GxPsG;n#63d=wKkO5Al;NjD4O~O|53_;;_E-^#9UF%`i_i(uF>bCCa zm_)O*9_)-O`IFDDh)89GEe*1do}JNNlfn5;qiy^}S_^9?ItshL_L+CvBa@DpB*U`J z?QNUKuR)ya-JGx;e_dsQf&dMnXYg8R20pVEs{)c5=1CBL_n8n}DAAS&s zJ^2nM@M~~G-2rzxj<`YT8a2~j9-#r4+x;%N^^j8EG4Gbt1sB$;C3aaxSSBiD zyX-N8&LYPXy7$xn6WHVl@c)1H;Em;X>TgBdTWw;W^bh60Lpbv6#XHEoPy=$*DTh}D>4S&K+Qm5T;PGaGu}QWC#6pp?^fFE zxs(4FK&?%DK0qT}w}owkv#`!d<;uH+aj|(Nj4x)iLiq9ub(*j7z<1B^6=JkMVkTC)RS3_592Q+&m-4Oh{Jpm!|J2({ z56B5KsI6zYH&7f$rb2J(T{Oa?4eFv1i;UCBfl0l78rMbRG>+hrkz`(>k2MeZ?r>(t zyqy2t;VKct1-6CXJNdajuE_J}FAwomdLE^=Wrw6p3H5u5Bodr&D{XqPHt^RiH+7=B zR>@A;lG*RZ7p!spyM@>9^Pm`P*=F;ap^NLh!<&3!t6mR$TBRE{x2wf)a*K@2mYbqh zVqz`B*Ci_5Ad`?rP^cI(YT5FXO&NTT_eL!kb_lw)IXnE-n2zAYrR78s{wQ}BKOQvr zOONUX3k1U3(d71Oo3@{>EMAdZw~(j%gtG|rsjPTFtl6l#tasFs^VM(pgxUeKQF$a^(7x}2rQE%;l<%gP zx^1wOv5(&C{iCe64(FL(ax9@F;hf~=S6-F&QbPs6sOfY~9w`6|SQ0WzWfb!i!rX;&owdE9^Hb#87;q>orm^v?Fs~Idc$f4slvG-ZR4IwTkz8 zI2{4U0QgQ$5=)yNEY8DeZ1|$n2S&leU~)~D0t5AIP95dKT!)MD!oG7G(yHB(TXn%B zx}-Tbt(nqMxf5$BtF{-mZ*h4dkLv`GG_lexY8@(M@dWEdlzg3I*6&BB zjMxyv9NkyZ)=Y+K?QOJrV^3lSF07nP1AS)UK6#z4l2RiDSyd?4f8Asg&V;!tSOve( z*6(P8{AZe*doA^(kg@IQRQ0_CPa`4I+A~!Mu+U+O7%S}mk**+-W@-NVu*TzSA6u4Q zN`8NRd0s1w}`dykKN ze}UTGd%x;?ab|B3Dvu$=;V6n9$peGXX?ue|BKec?ACgsPi@cTdYTiR=k$Uc;{JLsY z--HeetR6ltPRlUQ^Gt_XO5)6Hy?xHXDO0zrW~$#?d?&fY)BQfr<6A=fdn8IXtj3vo zA{{fe zq|+`~e@SGYBrl>TEgM+6^*Z=YUfBl=_YZ{Q!D_ZA^CMO4sxMB;DktJ5JfVAJ0bFi& z36Fhdttg*=33?IL`aMa?&3HbX-n@_C?(9AW!xPPT!u7T+SQxOs!qJ>2f~E!s3YUM1 zRYO04pE*DnkX~}u-ZMJgpTmorHpi9;Gw5jK1hH#pzbc|TLd9q&wK6Zf8ljZwKyb#! zKaf)c;a!+kVD^8O9olZ$r=XPgnqP4`YvY5^pnk}J%2%HXle`l;jqoR}RlDKH+80lg zJgR?ccbjh0W!BAq1YsmWSe@=02$1hhgxsN5)Y4npdyG*306b$+G~?L%6N(Bg1=BQg z*C+M5mbUs*VGeXbiD!bQ5|Rv~U;|fdVF6M6D&(EAXS%N|OxT;?3l-6yLslmNklBxK z(o~k`_4^M@?ioN_lh$g(^Xm~2M*N88(I5C2&6uyLeuL+?IFitDZ#Xtiq2-zp4^lf+ z_Nl7{!9MqqAZ^X$H|fj|8{qlNYwmT(^!R8;wu!F>N`gv2{W~WL8O7HpR_~s*M{$La! zZFde0LDl2rbv5Rs=#2a?DkUW&{2QHv8t%U4^pMzybTYhmww^a8(O)v3FYwn?lJ9yb zm9;p#8Yfh@NI#JQb?MSbrSzKc1&r=+sP`!0@I#!t0bxo|z?-KN&`kNudfA<>1SbJp z#Z%=w3jyRoQANpWxf&A?z~I7Nb6COVga}_4EBvRp_vQG-!fIQo(PiPaasM`?2zBzeTR**SA3A0bl#r^JMoiw9-Wmm><; zGD)5859Yy%?#y4KfQH1|dN$w`(t^PNU;HQIFnKr%25NHGKdA1{4+L#5T!D8N@MhAx za5#%I3%$83xAYYuAGAWgJ}LGufronHuWItV+F2ul_#KN7NNCc*_yXbjrfFa7BWg(+-4s6`Zh{ub;3ylL0zctYAF}GZp|+y#?T!Du>gl=)qJ1r zz0FTqlJh^}2XBcF6-Su_gBil)7Ekasvb~AReIy^zG%;sT+zVYm3&(EBHc9l|9i`pd z*bmUxHzoP2XEPNt8PMP@NY#4B6bWv2ed#F>F@(do4$W&4;m>Xef@=;`T z^+oqRhkeNrC6}EY{vz)>gCgs9AzYMJekG`jdnXN(s^i z5z`5)L+9}k9%?^mOA~eMa*oR9vB)0Dr`it+9{ss$cY_fqN85b*VWFjmL7zQmE#$Y) zNcwDO$QtlU1fIY&rXwfNLH>N{_Ub}XN|HLzNbbW;t;zo8!LUJ+kd!nv?sw4aJ)1NRC&v1Nq zyuTcF1BMHs8dxz?dGmc?A66%D59V-U$A1sz1hN~P4v`Y{pueuywE;bPYNuYM6v`XN zMm6HUzScT$B~Mwg`hDf6sRKyrJT!CN~cfs?TT10pvLDA2`ioDTH9^jm1l zMwVBqd(=Ja`j%V**n4r7cM1eg_N>MOKR9ZJ_YhA-@-wTwAB5RZwp=0i);DpFHY6;p_vn@Cp)%3Z(of8X%PJuk(P5>kYh;XL>9%c7_Hh*Pw*l(s%*YsF1eeJljRk?acABMngtZz2te(dbmB*1=1C zMYg;@S9a*RK2E??)w=nBLj?cs1|swU6n*-afN2s3zVclu?N=pf7Gsokk$gO3NXZB( z>ekHKR9^ow`>;TmS!&CE`|6BAUgvt~KA2Wg1{ssA;l1e987%P{VW9K~%I9*0FMjiF zA4d=v2=_3Cm=BF4d%-G{s}HlylDX$lZzs;%np<{WhH7o@$K^@aihR@wRE}R8b{gJT z{U?qi1+b5hu;_8?B9NEUbkMtY;;jYb=EDV_c0hJ)g<==}6|d;P9}!K0dx9M2MZ%!I zAx1QLtHw0Vn#iaeRwsY>KY1=-d=ZhE_$lPt^gCTYyvneUo>^U+&1`G`a3+EX+$8yL zuXjP{?((u+bIO=n2toG&8OtlWtVg#wk3|9v6pv=n(;ZKmkInDFZWQ;p7z)Fveht0( zcO)AU$@I?+rC#x4tJ0*pCjwXl^9v02H)&t%1|zQJB9tYY&A^?>%+^f$3$i0EfOf;< z>)ir-RN<7G+%h-66ubTb@6n%W)A!4hM^=x1qNfc;?m4E?&n#)u!nVvq!d6D>=jDwg*9Use40Dp6QxZ$9WT!VhG23|fAy ztIdP^X@aB!yQTw{0RhPHZ->Q`6H@6|&G_;CPkJ?o2J&f&>L|_D2|QHY=2~itzv(#A zs?~xQj@`j%yRV_Uh;O(5whXo4Oi}PFgm-*0-3uz)QXguOus}Mm=+HE0W{ShE<1obp z;a>}g7T@=Y!_fc=7JP)~WC&ko!l@L}dYF|P78v^lN_G?7+fgnmy49~BKlC;y@RgL% z8YND-Yu7lvoN3~~qKsn>PXHA& z&l%WewRniCv@&dBVfy0yS^{l3v63|ql5Z8af9M$qu~Le&gZYSuQUC@BD9of3hnVL4 zw9X!>g{`l~fCCX+P{X`989)3ks}_D?cu&>k<&*4j5+Q%;>VjPM6Xu&9o9h}vWY#^dv)C^w?fI0`$ zdwq@3&)}!KE3R1A-}e&Ojds`SH>E0%=UcK_H{gJ5bgDSJ>X#0`W`UsVTAVRysG+s1hV{|z%QApJulsd zXXm<11M*`>)@(c*(C~F3CbgD#30uI`U-J?LlvJrZ6S#|*fe!Q+yY+&oa8TvPz2Nc8 zmXG*ZH2=Mc8>Au$P?LI^cpsn~$vNTeL+(o%0;p=>Qzl~oHDT#A>!TX4(%XJQ>+|}2 z8f3?Tr^_jWB{ck4d=jW!6Mlm@%0IBPGwnxuVB4-fay*hA<$&DjaU&RxlqI&j^3 zY~ti|0I*ib4!9RKOIVuisXa11J>SA1$$dS*uehB3gN>u{$n-yi)wuCIbi#bsnHlNr zoX$c=<$=PrQZLZN3HRZwI0eQ@6o(u*poJYUx*D@4hWBZ10W4$eKrV@VTAnxN?;G$7 zE{P@LhVYOiq@qN*s|>4+8u0Hurq}4<@2h+*jrF&1N`)x;D5;$09(DyxB3we#O4s2Y z-$gb|j~$$R#x9)@v_{`Ne1@BP!qJsd;+V`ElUGI}I_8+z)h%bt?`hz~_VG(&qTZlt zrqd8YC>O~_ranRr#yk&j-dn+3iu5C%k@N?&a`Sc|>TVu@B;mm}*$w)CLF8dfos9R% zH46uo4<%8gz#)X{cxBefs+bjRTuj~9Y_ z3y2(u>jv*HnF5j&7_;yM8uE(i)xO%QLyOBu*f^S{{8x4%-v&GS>@&f}t9N<23Q*z} zT-NG5|E;;V^DP#S)+6%*?A)C})ya$9Hgj3+N%zcJ_dehRsgBOyh0-3$es_K-+=|7A z+)v+FOV2$NlYH}7*SraWui7JO%{G~(nf+7vh@RRvUbyDeAM&t!Qjd=7D5-;6P-^W` zDz7)rRS_SEykPZ%_Qwxz{aP!c&Q96}d>R-?i(>yx zefp%-PAxq7E8+mW7#cyORVL@MU?JZ7F{VrGvr5@Z?dkzxQ<#SDwWY( z>{*j3Rx!@AAeJ7BA^HmfmKnmWs6mWysqP|UDr2gfBH&d1A#a12XtBmqA|gDCeG6vr zrCrVHAnDwMf|f1Kp(gpixyUIbZ4i5hP&rgg_X4@mMqhs$@Z_h?`6wY`mU=5P(U zq3TZs<2{wfuvvt8h}f~f^mpbgAJe$-Bq&@Cq!f&dFELCi^FBI4xF`MyP1c?LAzGOy zM@6ZR`*6sI8jicQnUq<=rR3J2j49e$_ksoc8D*1fg|^$@E3y}Jb9I+sZRWPsPAO)r z8OiM_|6#&oRwNiFIxDmYk+{u4ksk_BNl>CjM)e88(fL0>hcA3gen^8^(4Jkhaod?2 z-$Q;qE|z9%=ItYA*03IOuuah?^jSYa9rL;b5-qVrK0V8rg&V&kX-&#Ggb+ zwWwlpAR>}SEeP)lm0;1ZcLx4paTxOaf1>|wXpCd;$a?=^FWw8~J6nw4QFlbOgwo$L zj8EY(HA6%~biP5)em5ttoq&4(Jbv~|bsoxCsA_+5=k)X{?Tz;jC2*jSL6=Sfre zda|-ElRr=IzWvRqzzcJiBAQyN_IVt>zyH6;Br5e~`Npk;#xu7d-F2XF;9UX1Zpy01 zf3$yKuG+)b8Lz2U*%=(1g64Bo)R>9rVTX`95rbSQ4TgLHD7}AgkSy>^*5Mnhb4V32 zPFha4ai+q^W$$ZSWnPyfhRG=hiZgcTRc>itOi<;EH-GCRWTgXv>IV3Y2iMR^UUsv# z9kXAMi3#)k_d8}wXM(o@sKqu<9Jp{f4d=!ivm+aDidUD(s7WR!#$L)sC3iS+w^}8v z(gng+nwRI#e0L0jwH**;q_kH*xNbIR)njl0eaN^EFnybkF1(E8s;C(5tT8sCX38Oo zG-|_?)V{zQ@_FS}-TDdAu9LopFi#kjNYRrI%fFez<6%N4Z~1zB6~q4NwQ&YecDF1+ zKArdTNI|;JD%toQ3^ADJV)){si1FA#Ej2Z@s32nE+4&^~Agvl$x zNw6!}rxOPTiR5s_p{2N)#fTFLeUpxNC>LH4Di(g{HkB;* zhh2d}>F^>;_gA73F?Q$OY?NBCPBt-4RjF;1jXzgp)ZF%l_mnZ8YO)zX1=yALnwxnAw@w0WrIXUIKF!{1G|6zxP7ArBMqNQ3Jak$e-TpNs zV+OIRmlla;6hDHmH@t>Dc}CtdwfLfVAJWMXsl)fT?fW1WC|Fa@iPpXHZJ~~H8+G`0 zC~XF;$6=en3i=O1a>$i=D1Vsj>_jf1(4dYcO6&36IVtp5{wTNoI1mrja}4AqE}=y? zqFhQ-gkaW+{60yiih!gqEiD|M1t z%M9ej=_QLxC)vlZd0N#!WtL)KYPbhfaGNi4`x>98iy7^MpH#WZ{l-yQ`8#b+NF)+wP|y z$LU?mZJ80BmZBntY{SfgLhQlKXZXYL^~p)mdbDlHQxP4wq9iD8XW+W!+3B0U&+BSUp zNDpXg+yqb_pzjcHPEy}N0+dLOBP+cXa3beQS=`U?KYi0yj{(Q# zm)_-M3nB3CXa)6OU{7RdH!602OBF%u?;SFllyw+RQq|evuo+C)hX$)U{N84%>a4e1 zJpqL@4P{#Vd#2tXF%J9Wr2Jdye{|H*4wl_x0BP!#^3RbmOyYKe+?sVF+j;+eyx=Hv z`~OjO<$+M<|Nj}|C?(mnZAzqrQPHam#79r*3hL+BVyONVIS02mov*zNkPE+rO)PaF5D2xkld6 z@VEVEKgs(LjrR=#W}LfvKnKlZ?!+y;#f5@VSCZ_t+hN^*fBDY9P0~H=@1#Ai!*@x@ zFmLMhOhvx}oRJ>|%sx`;!}RpsJI}#4)c~#_&Ss?H+s!@pL`A}iR%GQ<@VDu`dBk2Y zGuyo_fc-V0EdvXICHwYLIC%4mA4JH?8Lo8_0{{>3jWw^UKcffgH4}w+KJLN$%@jZE z-yfC*lAsUI%x%Uqf$n{|gT6_sE57kn@j|bdJQO$r^;fTv(3GR}rP}a#ZLRGttPVCh zrSl(mfyFI#P`bd`nx(YE-xIi2_?bpPrmp@Iv!%zmv`f5U0OMl;q2~m=_|~-46AuK7 zHe=2Nq#`X|`jdAEwX1|tjbl`q%c)hGSY|=@;>1W?`La#@&nIxe2j&oGJ?9{q1>h(L zAjU5p@E>~1JOfIOUigtgRKilD5(0hT#y=2SNqxrvN-%nV0H}=s;mkn}o#9eg=;y3p z@jDJ_J#j?YNnD7vJOnH_zD-unNogN=BwmB*-foUVCZT0#%6>VA87SeBDa@k8BohHE z!CVC_P={*$g+_P<-+XdKtGN&kM*M4feF+cFq5P8G1$c15v{l$*M5??~p{DO6jh=&q zQmUxW3>yBcHvgf^x8HdgC^+8%KI8aLQt)*)nV_jay;b0JzQSZ9%LL@}olaT@57djH)x4{%WgQqXW26Gpy#6|V_%5TT`qZ9v#@3r zcKo|b)nC@@9f2C<6tlErC|VN!l?29m#>o{{e`iC@jHB7TXhOsQR0wMj_u9Fo^XHJm znae>rup06JE~8O~sZk)&tKx$eXd=15(-Uc$=ok5RD9%^F+^$$9WHstj*%SBzBvWE9 zr|go;c#KF->;#St8N1uK%CoN)8uzy++jE% z+o*JifFwv3b>UJ#Q7xcufoi8@FSGg@s(xpYtA3^#Q5DuUZ4j$vV;_;>dAy@d5J>nk zoKhQsksh*#g8amT8x{Y2fp>G^b%w%v(m`J-A%- z)9&|Jx51{JV2|Z)0LBGC5BRr8HcRQ=1{(~5+)*9>N2O@+T-6QbFIeDaAam0zeonC) za$r?i6QpM$#rY{Mh7{L-z$0M8K#z@wdPpK(AHCcaxCtA6;}SiL6Nt#w)_dmWbS1;; zJ?)AltI4JgaE`yc7tKCyjBy8@>jXp(uC=*g?!Z5n%0-WP8$9MrRVcJT`)9U~ywMDk z&g#TM!~cKbj;+|+{w3=yo{N8mTzr*ex0DX6Ne>9lv?Sl3o?C=N7 z8$=ZaK~IqZ7#v&%j#LrdjVTG6e~f_n3tu;?<9ZDZ6}aTHWa#CdIwu=)b)@nnNS^UM z&*bax?}S0ZQ#*~}vs5P3H6OYew^|>%w%iXVfq9;96xcHlJHHhjjd(};NCBWWC(DMm z*F0HwJoW)Y#Pj0xBy%nnH4SZ1`$Fg$m{pwYtK-k3!$z(YRLlX$^doR+HKDAK$DD%A zO%kNNLbU^2WYRgXMK1YcZ1w34n0SD(G!?;vcggCsVH+_v=2+9^~zP={h?5!@7jR9`@bX*s=Etg4PN z487g2ef#lBtEDR(61Z&K(6G_Yr+7|M!rCs4O=paIqcP%DdJEtxh|ZY4K$WrYv1me| z7%sOdu4Qi0`C80jWDXGQD#j3Nj(?N6N1HYI5l%4Fwfx~}YrTYXwR;Y62z#-+Kg+v4 zkhUCx{8D6Dz!kEZu`4Aj4|xR6t0`b?@_h(-t1wzg-r+5VAe@FcSUT+ugcN9q9=jmU z<);vk{6pSQVHFUC7Zm9dJ82FSQd-6DPFmtPSqLbX3T7;YE7M+-!O&cMTfh51PunKB zYH6m|M0h4E-TLoPWHNW8sSyrJe>V^0Bg>ApLtZR9bN4DtD%M|48Wn z5r3?So0yR zwzLHb4845M?Am7h*~>zpKST5w$Ou-|dB!QxY1g_-@wr&=Ljy55zL^IKKJ+?F`OCQ9cN6e4&u7|#$a9;_6>JUsq5)XJBV|{cx>ET z>z>E2%{Ge8m!G}g)!Os%fJhLICZzA)HdCQ7*X9RYYo*|i*=>K_wx=a4t}FDDk7Dz% z{kzSX8}ZKql{NAYV)o@299y+lkb!gYD?ypv#v6;CHKS>o#9! zYpT&HRn3BqscX#>H~NHG(=`ypDx=u0MQEik^br9?sHEnp_#2Cf%%Hs>&k^>(Av{tl z3?)oCljkkea8|ZifoO3JjV6J0#^6XO2&y2TV#nzbn|p4jbA}#{5iU}FHEOC2^H;9g zGM})Y;t^KInyof4;rcBS=AE$+NA$~j207cF%4Iv=S=GDw!=T4^$?(&PWGY<2UF`JU zBfaRK(y?kX?KKrUBJpTX{o1|Cx!A1+LEkIyrwExqi@^5fdef|Fq+6D;*b{vgDP!lo zyy2R$N;~%_ zbxvsehLPXJ<34^{7eHY;H2`f4S3j*rJd2S?`-lwueaPIXW0h-zSX+qlZq z_7y7%YV6sI9+xQjBnQ?y5+x2pH}jg|CN)yZkUpw!B#V_xg_;3 z<8^BewvVdOyKXkJx0OwYxI=G|4N)6tP7|ArYIKsLmhN@a)r9d~5nbz!(^HgJWLf~^ zc&53c=NQ&=m?e6tnd)`(ocE0dc>RQ_XE#q*%n}I@tmz#NLn#6p8mctWdk-7LdL$D9 z_V$Tt9-tYkj*NzXmV&~{>d59d#f-^+%0H~56(WR6bVh}0uHNfxBXQ;PiY%+<<^YMa zi}jI+N+6N(?BY8;5UP6q*6yC!4Y=H?_XC{T%G_YOCcO(B_bB}%q8LJ`U=8!>L zCkJmfddBo`7_D(vq1+l57qZ?=%)y4PHRNSayuyc`!iNnvkpSfJqd^!0agP4x=viaTv^bC1if?k=i$3t zHjR|!iTSZ7*!LLoT3pftFor+ExOP50^7%-1?G_v@nb6kF;1qO_wR@PJ<Yk9OPys_dQ#wTlz71|6f}f~Z!U{_pUf z6R8M8ck6T~k?{1$=ER1{g#{Oa6MG#~yYYM|JUN6*{)t>%7pmZJXl^Qtxepw+513MiC=>?cN_{03i> zE}NskC`*|=L|Gw;C7%~HOzK_kb5&uLK3a&k0V~=dkQohls@nCLqEhR~=JWqio3n=}nl+<@zxo*2 zc5GePQg@;GVE{~aiShy&OMuRN~yw{E`6)L{&$=pGlItG1{rtXi6wI$i0X zVSWBPYV=lGx&r}Lai9IR_?;Q7jK0+Q@So&WfrK*dA3EKd-4Wt4MH@7{Ti(oON+d!~ zBeTyvTemCME+IgeVFB|U?CBPZbZPkLg#Ts0H9EUc^4_-fpvT7Hg5+=ZT!XaTa~KJy z^$->2%Pidv!zq_wh=b_-%`!*z+Ykr^69I|3&hFH%QpUt~5!)slHqx}Z7r=-K4vz~n z$Hc`hTzwphj0&1s2`2Wm31kxKIub`yN!aTY<=dpuXjayCt0{2Y*{U zm%IX0PIF|CB;{Ala|ChRs8>oJK0yzHl)-IQ@pL1xPXj@`%2dX@$QZiA;B5y|;T^i6 zxeYx`*1Q=PWz(%MRcoH$)EF*C9(X^F8b$Cj8hlU)shTeQ&-r8QEM8d7vJC3NB(X-O zKiM<-4D{Im&~!E+e~KR~aCO&fi6&Ol1v6vRr)y8$HenSypyzL6Po67ujP>r;Ph+nO zq5UY=>0kk5X#$?JKLd`c&t*H!Xm_twTAfXxeXT4$j`0X|vH9zk2Px4=R(Aeo1>l+Z zMMeyd%Avl3efQ0Xks`e`kwI^zHs7jA3M%{Zngqs<@$XarC3H}mkGV7C&rszV_@!NSNHm`CWmZedNg_;71 ziQiTnS-pv^_?|ixd@(6z1m-~Rn+yX&u6cbb0C5F2ugIP+F1UKYjtRIP=an=Z^jP?# zx!H9|Y-)v~FB4H#M;`F5D_m*Hcw%f%c$pKhl}4%Cwi0R$ z#d4w6;dZTCLBf%hLUbXnqNkTl*s45eriNVXy5gjpDhc zQIir;K=+qh%tH2oQ$Drm?5DFo$K^DbiJT_Zbv3g6FEU(<@ClhInaFymdcncz46_-~ z(4nj67K_#u|7FrjKdcpRFMOg$fjPW&PB4Y9VPR~aS!+MBN=a}>_dREhOLdB7YFgjQ z8--BJ4~;(_n`hHqz#zs76joq^CuZrF8&i{F9onPU{}#Hu>gn66+B+5R-ZW`j?RVBK z`4a4oSeC-d-=4+aAz1br6Nv9IVjc>Di8kBdaJ&`GOwKg?C>;pBFfNJ4XKXO?esM_0 zga)V$;j$=co`55DEZ3|hLpdL-@y?oV8k@daZLZ$Xkrl2Spu9S}koDJg*zUus`G81u zNmxox&lsQd-8MZ_LhaCz!5ZGh+c1bXW!SxyA{kf~Jy|GfREujgBh;g_RCF-9daFgG zEc_3q)Mxe>tvQ7FD)#hqtHD@%X_>}K*ijjb0+77Q-WSC_eUm?W!uoiQB8j)Be1>F$ zh7k0|-XinW<$XrES7;9M|#r2uZ?%;i9m{ z!BwK{*G>=VQf1_pOqdZp;WJ~_-z)S&ifrkOCI;KhZ5h@}?G7cL)3uSem;F8@nL(JP zJ5%EBGEAY8=S4zS<-(5CY8Io&f;Y^8GWG)=ho5CKJc!P}-g~_I!UthjnP%LcA3@9{ zLtj26d?y)wofohQcl?Peo9-O_V4bfrH2p;+j*g!61242RAD?QC>wmSP)rGwe#NiF@ z>_gE;w8_tsiBT?>oxx;xPfc@%W|5B`Z^&)$M=XHLL^5&I#oBgdMLp5X^kPmwX=Zws z+Wc)6%W$=(=I{eoc$kMJN6kQm+PHX8>iVr8Ldh2@$xBlSsDb`EB=YXr04wt&P^IAr zMZH{p)+ET9x^Zs~2t;h)-fK?(|G1pr>ZYV zSs6Q)N9_@vL(?@-sD_a}DSK!J6W+Vmvu64n0dmsWml0zNF)u>LA(l(NK>2W(HM&q> z5p8b$A<`OKgBJ9hFJG}fBD`$!=Eab|%c1bxX5rPCXC7HS*{vIZK6Ruik;`HRi8DwR z)7&^KBa*qd-9aiOl)I8Zh5i%m^-;2{ULbeh|fi?9D>>`>bdb-~2Q2k|KX zg>^nVTKriu8QeQLi?Tet4{{23r(pMe6d+U3(6o|NPrU8g)zUF5(wqohUq*KL$DHVg z&B5jqTW|>Auel)d^)o;BrnsA2mdGoecMcmo4Kpo|>N`Nw3l(Z3*1N8^{HFnO?RD0I zR{K~*hh^B@{*2~1@HA2y=h}qNayvM8hMxl3Rw&p2=>P3p*YyS89YJ+~5v- z*=3fvG%MsV^qSI@RZiLTk7F&q`4LED7ysHyC~tXM;1UltMH>%{&c~!{lsb`a4O~3& zVklac?bwq~Kepmec&FY~3qD4P%@d_BC;=DCT%E&I2WL_2IJs;Jy@XGKJF6keD zfJQvYjtA)46(;I zO48j!Xppe$`z_IPI zIEkbNkGl@!J$y-nYV{iyaS@9I^%43}!YoD$^&PC=}s= z9xJ{V&O5O|>1ei9dpSReWfh2=N5}X5r}kB+BX!BS-=6*17L66$*gG?6q6gb4`;I|R zhZtwn;3Eq&@Y~9)d#>l~#=2LSJTi&-9(^derJ+%Lu=3^N)JVM7Az!_? zkv!KV9NM`!{8j0*#%`b8-nLcg+iNG}y+F-?8d@Q_)?nMf%)V=5ldEZ$akEIrjX_$& z*OaRTg-V&nV8k@vG)Ebo z>^ss5%DvWF9?aafy66Ce0>FB&6~`j8+4wIH;cZ9R!;lPXhUAI(7a~^Vq^QsJqAuxY zE$CPB^+ZbMipOcFKbK44f_W#gpdT&odkP{RHrrkVj3G+4&c=-~QhuyxnREKHBUjdJ z+y-Mqz_Ft^0Wj2S?rrwYU#*_cy#b# zdQH^sINxPf6|2RDQFqAX7yFX=MSn0)I#a}|yOM3em3d!y(WQ0`R!#bHGq4t~q? z$L(nmwDd>iRDKFB9~VmxA;#LbV*zbAzx3SS2kHpq#@$#enk<%&tU{Og^OIXO$LRT_ z-iW8lhgVQHI_GedH{1BL^7H2IRjva~iBnMLEK}AJahm=e7DyCGHmV&kb`*&Ii=KzG zK1+Q2>;;T0j2nQ;s&hhYgy=H}FK4rCKAL;gCRkqxNFci9kq$ARiD+ zpn%U|uM%#~UMpTS8Y-AblOPeOS(qarh_75TkemSzj+a~(95jD3uW0Bi{dcP9w~jM_ zsO{Q$&V1(lHEA`mnJLnUve(*wQ26sv)lAZsc;&kZ9Oag4_~(H93?6rdR#?`f@NgBy z4>aRE76M7bAcp3L^*kbrcE1Y^3~~|rYJ*vOOX9`;1hf*Wu8WtKHx{l}#Y2VA)3v&j zNb7dwLE#OBWDu)#&nKfb4osbS>nVlYcLfc>?z3317|}*dCQzNg zVYZlW1+Rlj6wQBNSo2KMM(t?NE4_ss+CcD%C?Y*X6COvn`;m*Yo*Zk|^?vW|ytJko zJmE5SA&6jLb^Hm?pVvch6c(1)Zlr=Z(l8{^mjk#h@XO<#RMBbHJ$Hq(P$aM4aP0Mo zQ9a+$-76(ggpG2Yy^U1o}sjW*0Sm%DI-Ycxat}85d^*utJO)t{b-f`*>6LvmD z&Ol;o{e}1?svRk@tUtdI8mgU>D##Jin$j`P<(=m9I`Pu`6n(GfZ@Y zfS`dowLjaXn>{~~2dQL&(nzWPlwlATFQ?ql3_GvF$f}Nj3rJqwh2O<_r z+N6u~rn5V=n$s0tA(dOSsl= zBM<}OuI+YX778#cAui#xk_dqJ?*gDJRJyA66;`8`^}_^*5!Hj=?KqVzzuPrix4E+q zOCsHbHj86j(Y;pRsrd*r{9oQpR_7@;LG4WFm0;{$!Nv%F`r;3cz9M8*n!xrMTIOX1 zE;;>Z3q`oNWo?m#Op2o|0Q2|*!&Nx#{q=q?!;3CWlGr(9T(~K@;;@bDx>ykT7XD%I zo+)^XIEAe`3oskY<>rB9I^>Ft6P1c>Fb)3I=K|szP{?TDnF-mhc;$8Xc3_0T7q%sHx=ZJ$!fQUb{=UheDky#Z*KR3cww}CFpzh$U0uypqlOF+s-tR+=H3?E# z!Zf#ABTILX(m?$Nk$BfVVxyjEY);$dlE&-Z(Mqd{HBw|H)9bE*(t!0eby1t@N2JGK zGI6g{W47kkJ(y<5UHS7l+TKBEEw_l_^?JlzOd>U(wjt@_6{lu#HDl z{TsAWmeHMa;&_h0=GiqowS&~n%3Tf-}}hne?P`HPt4 zgAj2;5)1Un9iW{J&1P8DHJ?0~Jl*WVwQSN&8(U1LT=OwTy|SJc#hPJ~h^(O2&tefr z7A4*Te4{(ye=1(Cb-r~eI`E4Fm!XOkT|F{I{Q$h)2OB3Pi{Hz~ZsF1^EupTZIyl1Q z@adc~V?;WWf%;0XzU0D;a&h71i_D}D~qU)*MP6YY{(yN@;Lh6O75w1v7?#>1io?(~> zlUFMsOQemIWIPW=BCD9BxaK`z5#e_8%s}UhCw9RNph4RDM<;)`FklIM3O4p|pl`S+ z8_!`V+|AS6`XI08hFo4Y-^S%3A!O*t#=wVJm!5KnZfDmMJB~PSsCfi=;sb~$ac8Vk zCRQ6g7GqzOSW5cb<7w(pvG0sl#D?#TO=dE5U*?YA&eDC!$+F6FkzBz-*Qi~^E%YBU z?t-Lnm1*ruk^xX%>v}u4WBXqLj^RChLi_sE2j0>-Y9ZIAX~;Xa9uzO~8^gloHc`;F zM#Co^Z161proHSQD}BO_6De|OkTLWujllt*;uPc6Xm7YH!Usoepa$!q{dy=UaQO3K z#`spP5>WZ!5~R+(>=^-I^tc1m&}Ph@y3EQH%VUkz+L2y9@1geGVvI+)2t{T^jX*tI zgNQs^f*3l`Gy!J0QE{yUIS$5;wEhB0kOp!`fCBcYuq4>aO>Hk)JrExxJ#!tY(!og;Iwr5?-2) z3YJ-iUNOh$o|iMB`5F3R<8jH1U-#bskziR7hwtPrxO-s!V?H&lDIY(>HPYZcOwETO<^?YD(%Nn6WGf^ArKjY)_%G~h=%tG4cy4GS(n9d;G! zu<(Pd4F{Expb^WxR{|0}FBfu#D(F&@nwRWruU>=|S+d3+vaA@n6Swa~y>Q{~Zqn7J z(^hVrWL6m)lnICGp_$Q)k3gU8heEX|Y!LL|CX)PJB4 zy{7(=%}cWPLzS*C(2>G`UIwy%+s_V&K>n|$vd{4R*IZ1R*iW)uBe zMJm12O=y#TgSm7=hZvz8k^1*mk)p}MdtwU(qBC@mkP&Y692?&wOxn-_U)2w18=}F` z>oQUsu28S9(4>LZ4(%$P*pTJnPWI80g-;Vo?Oul?$18d3>D}m z>_u6uRQ?5UBGZC-Q{QEZYiIVi^BnMiRh2N8tW1w%DqCbg4_!GGWO;XFWD^`*dk*~z zU`ORsj)Su#ThC#@*WTvw`U?I)Lo1@;FCiJe#$W{JDM;Oos~vl-Ql)MA=L$$l2JW?_`+LiS)1Iv7X4aV>fa`n|qC5@B@jvilyi>xP4iDO*Ou)BpwQk zI`4W%hWYT~wv$MY0JrP0!Ce=t@yrsT^bhch-~OswI-t$6lR_svZ8*?*0MAl?8<7XX z_GBzkvm*#m9MY>SSR;^;l-4jMQA*evH1L}@io-2BGyXDJZ#cZutE`yMq{LY=hV1r<3tgxvBAg($hy7N4#b5HYDeseN=?sK)<_wqkE=^U?C6nb z4*CLWBQZDdofvoFilFV*5i}{&WyzA&4oiQ(s{Dpw9=|p}<(kI~ywP2s3YF@T$GbS9 zchzLs1U`M8Lhz;rE`g6&j*);_5~Z-Pu;4kg0z%!YbfAuF!Tt4XCYr#vrbnez{3=_y zb*U-LEnA1FO%xB&IAtupld!44BRbSv{TwK&Bane$+Zu<37Yx-7-fjOcr-JeA9AXh0 z!6G;p2)iZzVQ?C;&I4K{t|7`qCTr|cuStCU&nt(mSo3ooVK}T8MWJ8YrpsGXIFf52 zMp(~)ynHN0dL)fr4CB%CLGp}1%4i?DZe^yGMP#fMdxHtR&!9(; zoir4$Y>|KLx3}1fuuwz~_|Ph*Ek;H@zy}%vE0UpPPMOszJ70wY?)PlbqpwpsD5mDgXq!Tbc#K#iS;@^KczXFhH%JzReV=-9R`jPGT}Ivxh#03x}JEqTyys2On_B(LQ@Q30#m zB}_8Nr~GmHk_2w}b#kZ-I$#QQXeLCk+dlT33-2fuY2lAFP0Vq26VUuAD(0+eiAi7{ zKFgs94&kUsrY%QiaD`Dx6wJn%6|91nwg(@}EOObTweJI@AM!i4YeyNGqrXNZE zHgzA_X3ZHYaiib-Yirds$0~{4ECx5urB$0?it39i&3KK7YT{E6^O0)`)zU~Q3Vv4a zTkt-WW|xG#Q!8Moho1l*P|7m7NPTljrq~Fc=x>l}O zl4a|S1sL|yau^fga;fG2)c$u%YOcDdL-~L$!|N$!a29r%u_xJH6-;@m{&erRX8u8; z3@U1?-3l=IS>zxntt>2Y3Pb!;@zQ2xtUn!0Gf&I$DoP;{`qkM|F@QP#u|3vL7_wmn89%FP#dnn|jFSl2WzMQ~V3sT$sda3Uv#R4c7uUjP z3&we4RAS9Ea(V#@_lGX2vkUuZ4ssO$kWM3|)`wNq*mXddpabZU9Jv-nF95kKLT&1q z3P~|SL?7oB+m50W3euA%#M7sB1n?f>Vb(>_4CP4p1f?5! zax^OUR!K;4Go9c}t5~pSFB;W==(B;m>|=UsxM+nJfF)8JsQUX8IIS!5Hhnn?nF>_# zsnzXfEPj%$xs6c{dlU2rZo+yb4N7RMgitk!TuJj1xZGU`S0kJ+h0GZtPT_K$fR~zm zi)U~wHoDRK*|bLL{8A=JZeE2%xOPeJIWW`R@fbGA&&jU&h`G*orDYh}6S*7_E;xUR*95LgG39*!YN;~(&-&{=&R zyPKqW9XT>+KGAJXK8(yVP<)(xO6M3kII z=c*0F3H+NwDGj$+QIIRo&q)YgVzVWS@QoYWc}x5}-O}2g2gZ7Opn$gO=o^4>VUTu0 zztFso6x&Z;y6~m3!?Gxre+jGSV~MR%qUN|0Npyr&$tIqK>TCV&oltc3i)W_rv5Nh+ z9&zjaEbliHA*a_=uteWTt-#m@!sgA2pF8YquSoUsOYiVpTN)>;NMbY8jESYhq79^? zsuFH}3c2w1?C8I*qwgjzs^|V5eLVqZH)MFidgcPP=NzJyrv7~OJWNfIf;ykViGYIi zf^pY;N`;~P#Ega`i#Yz+UDvUp8M6S?y#?lL>bUz8pEJqR?G7T*|!DVwk# zLH{2T5|UhzI6=Dg{3>gn2-%51_RZWSymrEzj6=&jZ>OkN+b zoo79@oEfHGxwsbMPv0I6i|?63yw7>=+isvT{-PEJw!l;BPm!i`I@m{uqA$gx^xgh! zU<*F~UOh8CBjwG8idZFT&4m_L2&cp!8jAfqN2y8w1ZG3CMC*Q^ieg-qwu1Kfw>CD` z%aNSnp*^1vH`WU-(BrNu{|<8_16ds>xn4}7jd;$UZgmpyLfYB7A4NQ!74F8TLJ_8y zUbSHVG*is2AAOxM?_i%F{s4zoAs5TH;3qYU98}xIyn=T$CNu*-0BbaQI@4vnKY+K4 z%(!?Api$$n4PjRrE@bI?)m`m#Wre#{tThJpzZAw)uoh>JVJ<`_m^<@j%Yn}&__|@I zV4gb`$||{Z$wgUdq@JUjay?$ z()b3ncV+yS={eZl49HEF42IBG*c4aIV|CArSvbTVgpid(eBON$?>*KMZIS_S1Mp8F zNwii;7XTSBXWp3#Q%f^K>)nyaS7SyvQ?{ai@{78t@pwX1>0pw%#l68E#PnO8udcR^ zi~a>YX>h*@j^@Zv@%n<@(HTAN8R3mO^@aE3QrwzrI4BO*WnS{JkdTS0TRY+0E!*cm z3_i}@D}E4~Im!5!emI&Z_)ma2V9b{c0(@YQqeXY?jrcY%=OM|(z5+-N{bbe4OJTU{ zYznyT;4Zpd({_RLYr09tV+%`qzUekn`)1FwzgU}`7mn|yUcy+Fg+w%vyPlWxwMaCBX7YR98lm#oXsEidsLp{;@|dO}^K`!_hZF8UqJQax0*Ecj;cgt5}P zY-z>{^_V)TAKu8Cg3mX8H6twb+4-uOX<>97OzR7wuGo>YvuH6ZT^a}*j9q3X(KmKa z8Xfv$1=R-}O!xA@^2YJtA3ge^AuEF^APNZuo0Q1n*BSB=Z7>VW=<oKI-{iSpuyq5xaFXaZ47~LIUxR1w!6)rf-Sembx;wF3`?Mz30Uq**964fH zWT5vRq{*KoT$4|_GE06GfR^cYL`U)!XrsL&mXp;vdul~kSaNrxa6CfpP|R*1+N2?l zq-9bP{2~vUfhUezv@Sij4Um-y-wttOZKB0-AsdGarw=dst`qL?vtHQ4mMgF(fSBC$ zfjbLXyn4jS1Zg(9+R(U7gt$W-H3j|D>G4LfZ0re8tja=?n7}lT%PtxHq0;*%F;#)l z6nWRrfv+NJ<*UPh6L9XS3m)C~W+|pX6z_#v#G(WZLB+=l#xk5r!?&X$VI16$-ergu z#x57F@iSmroibF@QWEAdLiQ2QAnE0AAH2JLJZ4TmOh4VDj1`dly55SRHSAAt8PCCB zXij8LsHeWE1=awNUKG&qSo%sl0|AAv(H-z;gUpBgAdjHULf|LVHgUEFH8d7Cp*+$b z=blHTZG_Cda+eXVu@BOM{kWyUd-2d!#I+K z2bLk>-xSaF*gMlDee(o9%Ks>>744YJ89UD~x6`+$U1p8#AWX6vi6Mfdy@ek@1(X>f zS9ED!_i`Ij#ns5W*%xR=Ma+si$!Q>P&KJ+UV+A$SOiQ%S#1(DTy9Jlzho;j_eu$iL zW5a9>1g;37cSGG+g4HYVVvlRq@oEph4*xVW$}h@LT!?d^`i zC(Ovp0?#Z|g|TIPgcrf{_g{A)b074@?cYV>uB5Iqrm&H}W+6g*?giF8K1c@+>B`HJcVLYcMV8uwjDkiNW7CD&^DiMjJ67&Ft3Bcb36Z*ZUjmsoMcd$c%V z_L1cxmB-}g@_;WOvb!mhqluJ8mEweX+o3^V{16ya#Txa%trt?ZxpZ7@P}`Qh0E7c` z_V(#>dVX?I?kaynIDZOKW!|ba|64@6W4V8n5c~^v2uW&L?c>k89%aQ`gg?fp__pUT z!}Jrk$3s4ii-0IU(P6A+i-H3@ms|(w8>~M&_a22c{Y&!_cs4l2F~9FTlOuWQqCZQ`oj(eRH2E)>am3)5*0%wLY6^FR0GBZ|Ypn#@pLw~9Q@sLkQ_P>iicZmCo|jToK}qxr)|Ng@ zdbZ4KaBT6q{NEmWIN`HynPjr(NzH>$MfX+=gxFEJ8(_*PSADe|{}&GSU(LIfL$-B- zi39DKS4*a^D+dnOb9}FIq#+=c!mU^VlTH|mLaP63;7!$&jU=VQ$jwDJPTSd5pcl=S z#EC0YV)cyo#~CJZG*p?anD%lv&rWZcAz(H;pB^R=%K8sbV;2Em)7kPb zG5)$k07AOmXD=SXdidnI&Ze39-5hg!f$c_9#aofA{IKLl=};*_lTrzDfJh!Qu@t-e zhf9IfWyb}Qs1%j#U8b08v|>$^L+o7gmG07UOS^4dyj&Fu`_<(03j0`la&mgML_216 zRYG!<Ys@CnPABJ*IoAZ8vLv(Zd!0w&P&^OPOf|_eq4OWWtoz z({gC%`i*587^DBSZg2B$yqy;hguxiMxf;EA0a&7ir> zV4>pZ0pue=vGSzwLCLL5KMNd&;qw%XaSV5gte$R{OlD-y@G7!0N|6GEJv`CfjGri< zXj=Wwt7B!WWL0LY#@m7+P#(3`{y4ZB<}twQ5J@;co1ppAkG;IEsI-Iu_<@&VyG61p zR71qKr84bh&+yoDr^Ak)lseRAj2lp~!;OZjIqX=*j>Ku`D2phH`lAN~7qc{#g9)9j zM?Sybiu)k({kj|AYaJ8Mr1nC;v){YD=q`vB93Q8QMed2+_&C#obuTP= zWLm9AidXR2A;pcj)^#!CQT0JC2EVqa4?Z`&KPsn#utEK*Y3?o$rURwP_rE4JtJGGL z+_dam$ZY!~HP4>dHGUYI+Dph{+0$<3@ZR#B+H?fG=$t}|Ao!Qe$3|1mY4x1!xMF23V06c~A@;p7?{o7FNeHV}j>X;x92zkGQu z7xhPuOJ>HL0wQS{^;}9u6@RE|h|PZHB#e@zcg(zQoN4kboHs2Qwdq4@pD)skXya4p zsl(iFt2#)qL4yrxbqm<2LNb0I2Jl6}qSQUVxI|AOW$M$u$&|iSp1Wk5pfhn43J_`U z#xTNuPZBZEVcGimi9GP}Ig(<{M%Iu!cUH)*fCZ&L6lw`sC4|^cN7yS7vhhSjx>fUY zy`t^t;YuWP`fr0qt*KV=s{^!Pw)UsSY-Z(mKvYyOZ0<3{2D1lcKT_#0@S1HKO=}3P z(nS_Xnu+x?G_<>ucJVH1Zz31 zBAJsy(uAGrk-ZbvmPUdf&8@7grayD7Y0D{5W#N@gAD9L3Xb=m|iFk()t()0y+weD< z1hq|ot1vvFd@Ou)BPVhkF?x@qd65xOXnYRIZs{`TJ%RVtbHh<>0e!X}c(YkuSL&IT zi|hX4^HR@#;Ah)hpGAIbsGJ{sNM6zXaU~i|(@&t&zG)e7OG1{08QRPQ?m)~Ng&uSdb?%zjA_jh>H# z4LWLW`Zi_l&<=jY_gV2pAtm*^DT{?@dh%0Ybi1M+d+AT93Nhga1C-Zi^XieY%#=8z z)wbR^x@xkD#ao3QB;ziQYoF!a&E;5~fin6zSZ;yH7_>?_grl6hO5BKhcy!Kq9W>`M z5j5YX4nnPL;t*t{T78z91p5kmoHA(_8f2px32F*4%8~4X?`43rrAC!rLCO#K`xa|D7Z+5z+PYNZ`86X^C>LE(I$U{nO6q)JJsHhpTOe)epJR8hnq3b`|~ z-cp4t4p2UV>v9UuQ(}3i=8WxT{K@I&AC*fBg4~}}K$kpy+ZvRvkAE>KeOMB1MfKrS zXPAA+=8S@UA8#V{U_?5o27LylV5KsyeTR5+7;&D-fRQaMQX4kHJuJRfJ)zRuhrkDG zw##6xJ5DSV5;fjk#;rY`(6R_eSxVv>E7AZjyXRP@1XoCQcrLlX{6}j*8i*(?i2^aS z?}g>3l-q#w=W-^qkdhgB-{!<536|gP`8YtPJq`&^ZR%M>t8msKeOJ{S%@v5#bS zonRKVYHDlPVre!!J{_5l?4RGQoX4BUN{|TJ5)AL0#E%1BjzznO-s40T5}F;kg|H$Hvq8*M~j4i zFhu)_dK0ct_U>dX8kQMBd68J5+%rI4s(4T$(k*g=jIvi{tsG2)QA=e_4CC(KaIlx= zH35XRJ(ar|>rAMAoI$27$AR#crIpUYnkyzfy#>ULkJI4!{|=jqqegivoeRdc-8LM` zE=~4v0?S`(7+tAxV$q8=cRu&e^6h{d0K@Li7Uw1COjdCl^KiF(W`&{RR)wkic z4JMVN$M_KO!mjjrFgHb8-oSYRXPTh14a$IBoicpqjYdN$5o>ONSp$O|P>2e8O2D`4 z6*7d=JMvI+c-?J(3Gy{xs7S2YVR+o818NR_K}_Ux&_=w_XnIb`A)4H3ARpoy=D$^L zI|*qt`!+~Z!SR9y8-MOH#7~=bT;L9oQ8bN{HO%`~zJsh*Tu5A2KHFCXW;3l;i zUo(z*fIxQHUU*^8%z~H^Mt7+f(h2N&Ud@2P%%3sH)?@@Koqip(SN6Jdmots@_#UHn4tyHv;HYjftJ440_)$<#-anNZ_8sPDAo7BwTr`& zl#R0py5MwP^QxJ0q^np<$Qk-_2R9cgq&V*qn9JqRK}!U(dg*u2gW%bX7wlea&B6LK zz2N{xSDetIP~-40SQe~Ck5mM8Z|r75Aq0=CQEk_e@$|2 zsK0-O1}xfj_HN0Vp2E9&#})0;EWURRzaPU6E=v6?pAl4JrhV5mb6%Uyzls~vs3_H_ zuSikN~x2kj0-4~l>sV*wN%Ax8nWz9neh4eHt7EbZ)-z}iM>VAP`Mj)lh$hz3nFcyTB z1qrKw^&WmZ1Zspch;9m;irDyt66F3MT5*$t8`CEnvNfo>Mf8t4Gt@=b*X!Oehq5s* z?{h@P$6jV3KLeId;Ga+#OC~PyXS7~1aA;rRH+zm?3twf1OJ9+`yo#!0_Gy6&*tu>2 zjkE2)P@FW<05pAE^`ukD`5MRQx|%;>{Kf17!>Knwu&~i?N@C4;au8@mZivV_iOHr@ zWbKlZ9n0_w-*#gnvTqpHg+ELbj*7pq-i7?6XJBG?+oU^X zuEJq<7XN~(D%8W6uQjyiwRr(tS&6arqrD!vB>~_nA>V%ByM(+wI;kOGYr^v#WPYlD zM7IEWw|s8}1p*bYMx*12XS*M_#7)edtFtuKPAhm3kJeG(!K@mHE9l^E>W)Q`r8U3B?xy=zOQW{6(tQIFOuES zkzGz>dSP3J>+xeXvjIvcm{D7g=0OPkN%D-Z_~#1L0g0VNjlUSv_WqQCIs#zVLZA=< zzCk{EC-2UgqUkDJqdU<8;A|W_x@#zZmDc$8cyY5i;cLdX7I?Lzj9+=ds5ON@@z*{T z%wPu~*ZX%s?%c%)uJsbap4=3moEu9&2QRyXmY2$%y5`hWles#qKv``FqC)pIh(}X6 zKTrg*^_i<#il>t$<;wj@a>4d#@sqti#b3t^53cJqHzxU6ev1yUbbJdVG>up@022(C z$ELS~0JL;+rfYo2eV1^Z#@0jEAO~N*%NyFDWFU(Dsmp%o0&Bb_o8T~{bq@*jwT2Pt z&7|}e+?mD`5T!Ntpt|F#KNiAG(}p`oL%C>{jI;b3-+3H9Tk}iA{|+RcXKN-VO@nP9 zNEwZ(IKkx2{L;rn-<>!b!Xu!#cEhBM77mcRt)y`R<^kl6T@1t zG4^U!%}DOL0ol^F-4OlPiH;9({7X+FS(5WEmYDRon}}iE*$mlwZ19KZH=$i6>g!}x zE>aEGo@2s{+w1X1&KAEn`LbGxMC@q7B@>SmIMQkuF@kTUr?*rj7J#Prv@j2ET9#Gr zd52C`F@LB5=abklOm#O_^QTB?Fh@q}L!3t_Gh6#taX)te+Lod*V&U~u)Dl-Ztnn+#J}&N%>euW>VNv_?Lf~SH}u0U5{ztQ zAiLmqd@0LsZJM>F59aL#@38hngX44`s;xo)5AAkn%N0=Jacb7E&8an?9BoM)2X~0A z-Ms@)gXrzPM#MS}kc*IqE!r@P8T>XqhaLf4-mP;(sthlPt?`cF4eFASUGpjer-Fhc zVF89R>7vZ}>c+APLvcj0k*phr*WiIiiO(*|dJzgS3#c?p=aJ9AV$|SZMI}Ciy&GUv zNBI~M`0vrLX4xd*GU(IViBmAoO3*?71_+0fot@;uVXmn3;!AXreoY#0grYkkdTP7$ zQ-~bq=f>p3abkT7S=($_;w{@Vo*Tikxx+hg)*oyQT+K7|-z>v2Z~p2jF%b8h?P8d2 zEoHM8F_N*fi1u32&ALE8*mtb@DJ2F!37Jq?Zjxl^Fgg-1o7D60;b7Ji6F#fJ{N zq`)Ig4pG3t%gut!7X)%Nh10KA_TEWDHlS0Hmk*d*uQmhx#sen3^aaFd5^=-)O;}wi zu9$|wm<)*PfM~;hi?=W;&=_nU));#NsZ>54(!nwJK%P@Spt9SGzouXU{;$(xo3UXBvS(xNZ*UrZX`8QX z&tv}3a}3asGXh$|P!Mvn{d;s%C)5nj;Imt{uVEV%ED7bGld@F+^3AoD)JrNmYDbYZ zxtwjAuAKQafgUMzG?j+$6}J}ok?bs$!Yx3G@rp7d5c4Z|XBD_Hks!Ss1JN&DHdNOC zzvP)Ot*6kED-0iPfUcglm%duOjj1P2s{gNEoQXB8T+5EO7KGn`AZ7_S;%o-4Rz_>$ ze?4tq>*U5%F8l5nI8g)n4gM_7Q3kWd`^fZpLIdkAC=t0WJ2!RpD}10k??|LDa2kE=HignIw}$6vOgG?q4nC@tD7$(9)0NQIWGNDNa^)C?+1mSJ#j zw4hQH*;=$<3~6XExGhM9#9(YU35}(UA^Z0{$G!LS{{3Em+}y&vUeDKgp7S`5^El^u zp=wP^gDsRy_YOO4TCIXr!7O+Peg+jmWZ^ld1lKh0;8Dm@%Kc$5;>W+y`13Nb^sV&S z^#RMwWajsDDSHxJ+4V|Si9oYQ^HsPH0tMv70w4J`AtorX%%yfbWMYt2CV)De5iZF_fpu8wLh-+s4 zUn3F}nGoG)VF7Iok+HkQX;F5qeo;L8(7mk}HK}#>igJBMN<+D~%yK?;#DTQ`=vM#9 zeSjYqLGM+^{Q`kG2c){d)@~APdDvC=45ehz=RcM;rj%FD+zjpXB<*RRU6skH(8Vli zIwip3!f|VV6Xv&T?rH2`z7TSk!SH@IRFJyU75+Psj%0vzB3%=wx(vB_h~>BcouoCd zRS|;H0vVgxpEZIV{F>9ybL@3eNL&NhfV*LY{Zl`80NT{?G!X)Z^@Z%Qk3ia*&UW(% zyX9lcwf0bJAI<9v&2G-lAjOLn`CG*8P!&-u-8X7{P97S!%wnnaP_jVH8J}=V#Vmlx z)nfj@$53}*%pZp2|9P4+`?=|_KB@+;1?g(pX{35=a{>BMzFA15<)kq98bfU zhZl=~pFFmY2eykn+XePy0#dd>;9%EEsH3xnERJ@*v+epC3f?CNGzjO33l3fifVNjS zGL>MpuAjO^a83-Hc!UwcgSodwldfgl$rt`#AfHJ)cMm0;5qWPh{Wqk9N1!QcN}7KWa{` zNr?*DZr1X04MsDD!FcxwPbw%6du=yWYa;{oBIn=V`L zRBM~w9_wUTJa&-vYHBXJeIz_D=@WMInYDt7=&kVY`^*8J^gV}-hlt{TW>QZr#0u9M z-Zk2%RQt%;p{xdpk&YIIb2kolT5lSDRZf7()aqyoW|q4)^j%2rd)cvFP6d=+Q7{#< zBl8xa5d8nOi{n8}s4yXQu;ccqp_A*n6HCmFwPLAU;bcYI>pBAuMyIs|LE6;RAAus+ zGiVP|T2#6UVa;D5)b_DHsLF(w%vW-G;Z~xzRNLm#O6MHCm5N6$4qN_CZ|SeB3Kn~u z9(Y3fTJhTjz7m^$Tw79UG4ZpFmYa^%@}6R`JG-w}FOzHRF4*;??e0~-j}pocQVGvD zUk;J%I`iykg2v@Aef?iEHXiSEUaXE^J^r?X=djdrTi6NKLeLiy59;KeO`lou&!|JP zgJVL^309If{*`vgMd#t(WB2aRWo+dw4}G5U_nTRadDSr4jpFLBaEF#!s%6a}f}=TZ zZG%KxLXOf|?nh?VQv7}mYU4P^q&;Ry1Z5dq<<&~ z?}g$|UjMP7-+Ip$n0LV&zDr-=VdSJJw?(l-)PtVCb*jd6ma1x6J3doA5?5=hx)Z}Z z)SM#X?-rsqU>^r2u~!tEzD4Kf^@M~r1a2jUJsL5twdHhdXvKbuJRhDlDtOHAX5Mr4 zvaa0{bdRJ+IjJ^mb{ysc z969o$%4_ad@t43<&S04Fidw#LE}xfd;_|pGNs?VXE?*^ym3u+k>fL)=d!1D2#JSl= z{om_!*~3%q4g#aMLr(Bf3`aTf@aR1&F4(*iN*B( zjH}yi`SZim=sl@rleI+S(3|TNzv}2wvq4A{L+Q3J)jrL=xZsq>$-R{Kk7C}Ew?oBI zO*~4>AR{I^IhM1H_HE0RFTN9%3dPeqVlR{}mj5}^Ih)*n5?-jSo0FWlx$x)(42!B} z&gin}-!q%UoGCjyGLOk-;^?0yPEk`4`F^GOIpI5VwB&x}(@F~0=ycbvQI>;pCA?St zjNBUVLAvP3qbGkJU*Te`PqYkncT5ntX!Y8$GP6XIER}9vjjkHDb!Z?cRy`AG#NIb_ z$;?b!9@L@vDQZ8dK!SVQj5NEyv=?d39qddC%MSa#q~S=}oFU;@VcU5s_sz&yW_PhF zeJ(5QePZ=yj}|{-;RWX8q+j@l_hKTLOm%SN&AH>O&AKU}p|HgsJgBvC5KG7Vc3#45 zJUiE$*%8sV;18g(!M=q9C+V++3!DB@in3_3q!g3@x@_0DL4YrFQ zkg}7Q2>Q?WSGx__A$D=K3hJ3zo@wwZ2264)*S2%e%I}SI1=}um)ey5#TJb~-3$XN& zq;a1;dv=o?h};`Sx*n=o|I;QZv%e>@$F*bi#T5bkj)}a9k+F(aPOdbWz4(201x>La z+qjl*R5m+N>niKzPvi{Vsv$%|uQ2V<`@$1S`CDs(auYe5is2*cHn7Sn6S~~EV|i%B zn@xC``&wCt*cxPRb1hVE1J6x=c@(|JF_q#clGmGJV`*{ot2LTGrE$_aJxU||j>iT{ z`spnc-K0#X;DRetkGlMTrM7M6y{&waO65J`9bS*Bk&S z9p@5~is=fz%9ZkYVXG20O-`-#Ssaj&%iR??({IE8I3#zxeceD)LivUE-t3qP!2UZ- zJvYvj`|1sLv*_F%S(U@3R`3u543#Qqo&8knczngB#5hY-Dhc~lsZ-v4wxLmk5sqts zXS?%zg8R!mO`NA%>=P<2{zr}+xk_GEm0dVF#|jMwv_IMHkma_-2_`ZM z!Sf_~+ZS3MT6$GI%Lxu1*1z}e(CtdpUu9@Kt3L`+saBU zw%c%V5*^2g9G(j8tAf*EA=&cORBDq6j9CHHT`*oFl>zXq#ZTiDhglq=6! z4DcLmHH90=?_pS-NUVmhm}bSQ5BWK7*G>I}t)%UMt-MtJ>9gU&Nf~9!TKM{BNqlc+ z9Go-S&Y&ni>6YQXwf4YK+8))ebyDi{-1VZ`?YFkoGN~H)=-U+jTTEuJW5Nt|TDkJ2 z>*eT+9o1!b38yfu&W(9(oSf{6f2b$u$-yetoa4ZK=S7$D82+}AHy7M{ zt)C;A$P4<5*}xw{5wL2rSt}D~>0yAM{Kz*pfWh4shn8N>1~>&)s2GxAj%%iGymG&T z8#McnU*)SuBcd%gINT^}X+rv8-p$juo76Tc8_88p$@YC3sbVAVN3GnCax zt>u?|%!QDK-XHy&stESs;E^Ni9ZN$)2(GE)s+zryf@xC&;`&xJqbfGmK1~`UaSCc* z4#l4_S{0WwEIgFO=OQapx@e2yVcmG#)8@renYC-ViscaA4uxFUtfr$fM`3)Z6qLk6 z47(Y!m!yNG_mHK>HrC|p62%4{#T1jv7+KvzOkI01{o_tzfll8hFlDur)7%%UrDo4e z9tCq{T5GbOuX zJ0@*|qv6HP<$QAn2@7GTECpMixVZB0FYpuPgC0NfJHGRedi%9?0MBeX`#WMILVuZm z6;gy@pToj_HhyPa?pSVlXlv@{nFR`V4OR=V!3=&@5WA6oZpEd)-<i23Vj@0he3yG}Z#FBEf!S>H!GcdDr!U*RPX zz0}>7YheM}@p+S;se8MdDITI7`bOVl*FNejcb`&D{SVow?5#DQeDMayYo@l+b`|87 zy8#ATcG?Tvev$@HhU}<-02V3OU(Y44hoyii8xbF$teL*u+Q?pqKkP<)eV~7s%X|w& zG(F1r^J+fe^5svJMu?@d1IQ!n2^NjNo$7Eg44{rN#gK?Fk&Q!x3fNN0!F4tH1AQ<0 z&(QpT7}xTqTYZ;djQi#)FiCN)JNJ9wtBE$>BUmdK{FCvV{>|Xd*VL7N1&gmidwd`f zcfm`sC_8s;ib36kDBcA}+Xeo0y(85n#q>$-V}-Kp+6Si7C;iwxEIt3~acbGrGncaF zU^kW`7PW_C%e^V>&YRUo_QDM)n3ZtSzqHu6k<46leVLdWR-BmGFD-wgWuUm5LhxE@ z>3Su)Xz1}jEBT2Qml>A&_>eYpQ%%0jLzm_=tc2n^=|;u$>`F1^#ZD2g7JL$|bGAIx zr=Bs_R$0rZhMkzagWj3&q7~<9T(=uvJ9s86nLx*h_XegW6TWRAtx~W#8rHF@m3N49 z1Gum7s_!KaMHqA*EAy;Hum|Zp*X8sis~$oEDOUfxdY8Asd`#{BgZ}RljwS|Z+Blpn z6Z+M}0FOnoZP7SoA>eY6*nBx!YWo=GPlCB$!3Pq%uWSdS!f5(ywO1DHOi}$+ZCqiL zQagPnaB5enX8scTHxnYqZ|CV6!A%D?y|Z_U-5KaU=^@h9t-E^SjL-CR^~mgSBGf%& zLdAW~PKFvGC!cl~W*^v3F_Jpch@L6T9!H^}rlv`#KhIF)rdMp93^@~2_X2m#XO_ilYM_bhMJMR#717KNgxH4*w)&NK*GonlJ?+d&rhu=z^g|+Qa zxT7jo=lr~7;G(K&rJt#ZYsYL_W*k&1bsZZxyRuKJpe)2tS@AUvGbA&!?GCaueK_|w zUa=S;hP@c<6ysTtK4nO5+=6FuoT$|AgLKiJ`u)6$Hd|NkFsI3&qo&a3N1JKb-G~Bl z*|E@{ZV#efFN3YuKJm|{n)1X_2#<0 zpVn4^`yT)q0Br{f`q?h3`9(v?QqH(K5>KQWknh$0@9flG3kx?d)-h!kWVl8~$4XsA zX`W1h{24CGA6ZHv9INE!q<@gxRMdU2->fC!k_0bC0wY~GzT*3zGYu5uRN)rS-x^#r z{plsjr^>HU_4c$aM|fYtQrGV0y318(l+T`h#nWE2Oa2C*e}T{_ia%Lnyp6W&@X|N- z-&zQ7g&EZW8&yyhdFXKG1HW=CF}Luvi7D1>&QWMllZqgznp#Ch_?dKO=ylWy5?L~M z4^h!uVVi9mM!ydq2ZOSd`u%NGzoD&IJU3{Sb*%!tT})dG{(0p1%*&HM5qioH5qC^* zw^AhUe&tTea_0ECuf#r0%p7|~#hULM#&*{j2#h|^ zo|C~YvQH$EEYi{xmxlC>8q>|+fhW}ZjiDR5kMs9;{)2ve?$Bnbh_3G-dC`8n@ydyx zZY%Fiqos`piwm}(VGLQtW*BI4KP_H}MyD&k^B0Q@Vq+Ro5$RlFjF#{Qw$_~AmXqt? z%H9UV*b?2o!(s>hRMste_2Wxk73$xObG~X_WS1|3<+wh&;@+) zn2~1chScFyJoLG^s>za1F@NjI;CzA=+ZI>;EUeL*p(_3Voj{Vg(y#vBu}p)DXM(7V zc$_*EIsrWK*SFRkP+Fy!2NYgz>mj!X0jXC#gASQl zqCQm+p;$9P1i_xlWf}d?Afrs~GlVSg$4J&Bp&}FW>Q*;@%I_JB>>2bkA29vQvobnv zGSolHqc)^G?_u5a2`qD_&U~lt*iQXywzIDnqpmtz9(ZW!n=f&KnU|rOMMpab+C)7c z1GK&|-+qI5olNi9(8``&1PeQ+H95ksf0K=A~)dprEGq zyd|KLd(l{kS=}7Rd?p@UXsDFJ%$|FQm1P=C_e3cBZ`ym)!s08G!F?Mlb@nPv*lt*I zVqv`zvsAWjwSpP{mU~NQWi3zy`{4}d-dS&%w`&slY`NvL2w+3C$$AR(~Y%KTLvom>%$VBIHTiJxdI1V)APKn(%U0RYY^5I&p&z>vy zH!fFlqvUn!<>E)B+>)l<+s(`UR}%gT|+=k8(lc^%HceSrtnJXj8s)D$+EVF z;9rsy?4>L2u71%u=M4c#^QTelOX8BYa*Kg2hdrMVQ=`8^EE>UE?^^&zX6xr(KyeHmkUE~vU-bG{RR5V)bp)#h&xd}=5n6H7>vPJkY{yW|AQAwiqY^dU+u0m2UwlO3ZQ(09J3f`wWs7%d$`zY>$O+WL zUV?X;b$j_yyLf}##FmC5VyEWGo?0#uKu7stKOr1|qCT-EsxK(8^u?nxv{h*zX0{r( zFk?P4VUCZlkr`k__zv4XvsAO~n1Mp&&zc_91BE{?{!1aMr^7rZ*-yLoFvZ`T(aw&I zr%^uPj<>G<&=j~N#Wa1JYO96P5`$IvK@r_r@>u)Nk+Hds%3A3NQo(dL*gQliavgK^ zGW13V^KyS)$>BT_NzX~@Xg-lbx6^L#x}Q@~%}RYgI;_x~DZt!`-c`MyWE%uELB;I- z7fg<>oO(3e$t#T7mlg-fLM9Rgz0h9t>S|@OsZ8iQA>LFe3+RN5*U-jvLD5CDuC@;= zNQf>7J1Q+Ff<+=vLA?RBz=_WCuXO-+vGfmL3}dzC&Ia}-0>Yrp6l-2|ZAE6`JbOsg5eB>7}vsDQPD~0$8%wAC}wu`Fp`(h`;Hh zrH1R!cs&zYS{4>pz9i>3Yy01ux*feI!Cf}d)yRN)4d7NXMI_Eh<$m4@e{-=*&$rfC zSb%V#+}AcB1HTBlQ7=8<}TeAssCd$<9vl{REg zn9oJ2STWiwNcO#0m$)R;X0N_U3;cdu>z?tcexnmat(MUlhHXZ-1hw;M*xXUs!%a?f z1M=>)G+tT2TW>vLSrzLzFlTFR8+>O;Ek~zfz`kFdQ+~WeQ%P%XeB>%neG+BD{7Ez&K0O;Hafu8GdF5Q*dH{4?q7B}uBYvfWmJ6EO>Qlb*QRZXqTwU}t&+A$J} zZ+i!<$yjPrp^$8&eds693}a6=ylDGF<0ZUra8#9(U2gi~TuXy0t`4Sg<-QzD=P92$ z+&CT^y+^rnu6D%QX@S?}`+e5(@|>Al*(4JpVYl4PX>m)f{;gxCS$MWHR^k8yKDXNpK5* z85r=GC1)qJt3Cx9qNT|%-Zo=+OHkJ6c&W;hY*!AyF5}bqaq4V8t3ojKYm;zLWOd^d zxTkwi1C5V&Upp3s$VJ3Y6xIlgy&*@#Fbn&%Fhtp^Xhm9gkO3ewHgWvgKRQxphWq?X zv^rb*3D&UllW+!a!T-#jnRpW)qp}!dNOB}}!hB5Hfz{nI!5A0U3kKZnEuS4iq+MvZ zBKr<2^mOGn#SD7^B5IH2qpGl9Ty3?Q$1UyAR;+G0B24pofvm|(J@hhFN>S7DBrC3 zD-~hLQ{bbTR}st7&`wN{ttXTQWlbUa6f5A=f0GqyYn!|j;3Rqypi^X;h;sh@U*8T} ztgCgM>D5=MJvZc_rskMn;vCxFHo8p|BdK#W$@PXSrqXk-0_D6kuTUiYqpHV@&|8Xn z)g%OJE6R#`#6Fwske9#vfjyN*oi5oT>{u*J$B!V~2v@vs=T(g)rfFq@-zv`K`x_T~ zb__4m-pg6}-Dn^F_fsxmg-uEdPkEvV98OqnuIp6uI8mWIqyd*a8FEzCxM_+rW5V1e z79ThVb>$tpQoFqKh;Rd?VFpAAjGWeO82d+N7Uq@t?Abn@8$$P8Q)}Wa1Qa=v>#Ytg z6%d@*Wdt1}fGa~cz`lvH24Tu@U+D{UwF8BK;~Z7mY^tTn!G8Qr|LjI|&x3vxs3C~m zP4*(~e%`nHl#^)kXy98Mx$C;;LF*NlIyK9DOJXWx{&!F7-xqn!jM*-=|1H5o%#7O6 z(pqlo+G#{6be~-q^?}LZ`o*k+5m{I68xm)K4nv(LH9%(>0vRnuAaOi)Ef5i%T|RQ+ zl33LGTE6pZHb1PP=|hX+z3eh8qAISd7vx(9-SiF+05++Ov4^-{STy%BX;mEFrir5t z8O87!T33&0g6n%5henn8jH&41t{;$D?}q7TKSS7)C3ZmH{3-=ZtE?v6#U`e$Qzw?) znT}6)dkc(jc{-$BZ{jC|X1^f93a(^f9Zh_Ln4z&Pw@|Q+DHuews82vUZ#IIqXy7JA z5*~gtuo`@*G zbgky;S8#BdaS+By<40loE`}+~G_A$TkLUT7S=%*DwJ_~~2EihE)Lnd4nlKDS@l~?W z@o{|EbJhFv!*{FD;c%t3-_QLvu7M6EPY;BuqgI(d*cRZel~A)#HiDJPDzvR+d%$0H z`8X@xBRiSxKD6}Denp=PlhgU`>De&{$&`&^q$TcwO$zdR8xld|a8X!>tIi()QXY;^ zOiB85@aM^*)sjm_1sO4w!`y2G z+L%W&IOrvL-ln&4x6k~@^<=W+JW1#6LgQ$*O_7y>CL+a&?v5MOz-gRdEr;s19lR0a zv|O+Gjbf!sf)Q6~fM3@wLFcEJ{@S+=LF;fZgMzy}mRE1~P-k7RlWTyX(c-}Iq4wh7 zu&2FN!MQwwYzibquo6Wbpc$xMz<&d`#NY;z@&PnH_GWFyv?~(`gRBGlg@v~Q&I>@* zl+h-VQ}dgI@cjIoj72nx)GMDX>DcF6ryfK{^Fo8XCYvFdR)50>7th!Jz@xqHV=s9SwFB;-Qf1DS~umF7Pts)P|xDK8YQjG5iJnWdOaBt1DV zhB6hMv|CJl!bFt&Abbc-)^ivZYrcou04INC^dAGeKH!;70#df;WJ6;?98@pHkId|N zGWFS44`T!eb%@z)uE}?{nXdp%Ta|tcn0<|iOS8xu(3i*eO8d$J3kee{qEE>+&>Jsqsmvq%&DLC4!9^*P?oFYk8_hjppIQ4 zkM7SSAC4def(Y^)tRKIGZnU@mZE2^la6@bV zX!8Dj|W|;4d=WPZz{@XyHQ!9`^s&y<*7Njxd3vP<{1>JG(QrI zZ=oLmHwBC@6zyudkR$-g*ahDJDF!0G8Ebtij4#5()=!mz2yBV+b$EgcaK8X1Ys(Hq z+CjN{+JHraOtpt(?>IM)FV9uf(oSzo(VCGaZ zD{f!Qr!u7ED{rUj8n!IAmHq3lsD7NMlVXuaq!@)rcJc|_O=~X7 z?ui|Nz~{lbz{GMalaOZavRy+s`zvZEDeBNd8!J zL{Pd`?Pg~GRp6ulw^*tf0q6}&E5wW0qvX(t#6Dp`kvxqSBQ zVc4!h)C`|7@R1!EPbt>KL0O%&p9^s|qiVBUk>}V9dt#fSS+$W&`ow5SW-b?K$z_fy z<1~HP5X+t_$y`A%nr4fLoC66J)xZZe5K?;s+e1xc`h=P9KpOJejo;$LN)9Mj{_Ihj zH?7s$E49rd_l$<9f}|;U%T2^Ju~M~4#%5kJ`T@V~^c06e8?MVYL z195)ayZo&RbGpJf|05LsuH3(wdEhc(5FW!U7);F3prAG4t+oHB-6F!*`!dG3_CkXm z)_i!yrLg6W(3E@of9i7!Ff6(`z&!Tn)+8v3*^^aez_~m%ut=s$L{&0-iFQg$#SGV3 z9vWD0*upm^1fo3%m&Y2riZ~`w0i^AHf&MVJ!7qBF4P>;V%%Y<-czmeCRq8jya2LZip7=b!^A4bs3%TxG zqaT?ALT}EXimN|3M3&y))T<c7j{?bM!d%g z)?Bn#TG3Fo!uCEuDDmsAR+WR-I#JYyF;KxB)rT!!pBuWc9;^&qX3}}`E3nqRu-6`h zm)p8eQtiZ!09Pp!ML)e^&q~|-Qx^p@ePH;`fAp@$F#U$~tN&Pmrxrd{jD*UI=tY+w zhYC8XhhNS8J|nmaYAg}|deT?eDG#jtlzK|`tUnp$OnWRml+2S780$Y@O5eTG2P4h7D7_AMjzx4SE3Iev2<4R|Cqpk zNg0mETAyrW?^xCn!X7#anyWhmd$b;KHLs#aikcO}dy4*SE|Pinr8IX;V({uvZA1BC zVwyzPk8H_JpfG*9hacHnFC$btrn{9wVmzSmpU6d7CaUytR{0KW3=J1wlkRsCTu=%d zj3*gh`UFQ!Sl8C#@!GE150G%o+IfMRvaoP!<)=NO?htVU!XnFihZellE`Qjd%YA=` ztwlSI%8!~rRxz}gg0#jNKXlKn-EZM#>z-h|SR)nsj)~heS@L|noTQsz^^V) z;^DD$;wN2OTaJ%HtJ#D87~QdU!(HP&h!qCXCw;W!F60cqOS(Fjc8>BlC++R@)?MM4 z<^ZX+1I)U>nzdrPe~s9-kDoA?>-z{Q76Dt22=b*n&>AJNb}+^>UEODiu#zO0?YLSA zxmJ>8q&axmSe4623m!J&NDUr-9+X-%9weZON5M^zl>W!(${c}rHB2dG&-O(J9>AIv zxMS-X-Uuprq$x?Nb9HEI3}3F&yaxJW52)jRaeQpnF4>rOz$D?qrdKIzs zB~Z8{T|#V+GnM{%^>xsf|N0u=I0IShkP{&QZ5?!L5Y_V^a0INk9cvNi{&&6L8}qB# z4LH&fUT|#;?EaNS8=aK?=z1KnTFM4mWyCc_Y;XN1d$3ihPtU?NQ3~6w~8^u;9v%Am}X52F&D}2$>N9Y;^vTq0vEm*8HJ~x``)}W?Pv1 zb|vfMJLb$GEXi7|S?~HxdvU<9#di{3Ui7wX47;q4ACA^U<^amGRBasUT@-x)Vtt-! zf-H;a2_SqqXcrlvv2{r-cXNIKcsl3nMML=UJKMaRyXte!x>a%kpJY9+6f-YZ}k z48a&q*rJzWJK+x+@e^+d4N4z&Gzn{^o`Lj}rZI7HAQ`PkYwug@Uf*3WG{zdsMT^~? zPRbz$<_XS20q=|wa{a)0krx`mXREpd9_$ZFckBI)3*p^!>fZ)2#c#S=iN))gt!&0| zKXx+9Vnv$Cobc)^kcZA+gKt@Zn1`eZFl(q>r%7I5W6$#Pa2Op2Ikg)In3OLGFe3hN zDzFnb8Yillg3nJpPj1FrG||2a%E_ogjX3Mh&oOIy?H~+63?Ewmz}fTISo*x+@f;JgGs*<6&> zx+dG6l+ zqhYh(sb>Mca?!w`^S{^1YTl_R-d=f5d4a7%cqYimoNXM!tHr%DV(8L)RFM1^QgFxd zO}$p=X5gxuIE=R$0~yjNq-NOB)7C?GHeb1~J)aE@8nOmW>+mmZWqqI&U9^glE(HDw z$#v=LDGyHs)0UDFQCy0Cf2wnp_lO~|4SOuy3`0gh(hEC>X|7e(-^(ZJ5!#l&eZdI- zxWsd6#;wyp7ge*0GYlGP`6p1{Rm1=WF zNi~@qN#{r1F4T_J$>~NjKm5VW8=Ek!_~1Y6&X_5Xp^!!-gsnsK3eaVCwcl?)MoS^% zZS0}b)q{nL*X`1QE%2R+F;Z|--v|Aqj?()leW|K-Qy|AFIRNgV7>WX=At1myW~>c> z7%bTLub{NU!_RvZ7Klq6vy61HwD@E7=_q0*Nc+%Uc=e%=>LR#Y=02*bXS zMH))qKrJ$69%=uV_O$=Rqi$Ojt(tswH8o%Z>+K-F?V^(x?6(JgZtL&f1yK0?v8@$| zA}CKJh5bEcG|6h=GpLg0Rn;3PHSl{#BCfvkN&OK8kJf;^uXOnqu*D`D3;rsCye- zG$res>vjYk9$z^K@ClERoZ$60wEhUhKp1vp*0MNlQLiYK%2o_MQcX6=j%Em{-dZm@`{OsdjO}@Xl>jft7%jDFwu+ zrMmdXOD))T&B2?$u;|t_mAd^xLOB^c3m{@*M7%66yqXa{4`5Pv!X%uzuiH@-d`A#- zkxl3s);{#h;9|tNC;(gE5Im;_577D!VL7}(#!N>41MsR?^Ker|^*g4>qE>UI?B?a^ z*?7u|J!0H>$8dZFyY=fi<=)WsYH$z+*u4#;dg4x^#21Iwb~`mgM6hwwOD(*OQ|B&@ zmIh%)t~I9rDX=u(nCtlfSoCOM%CO+_B2=EHd1kN0u$}!zOP$6OJxGik{>gxuQXV05 z-QL9wmFP|ZZ3#;s?mi=84UNjN{}d1~hBcxDO_S!GYKmzxT@?nAWJnx z@;#ED>_p#@UW)jHwdp>cfF@Z)(J>l8wo}`8Je@l5;c;eGULx-Hrxj)GQZ~Dn5EW2X z{wcViBy?F49VT32jCiOcM8&qlmy*J?$X;@NXYkwUEts-m=QtIpe%!=K_($5zkn%mU zwb)Ud=HNq=>0aF8q|^p_^rfBmu0%iVuTg*phu*T^E!Ig9RPPct0M1yf@QpT8f9WZ+ zbg0R*`=?OxjU|7&^cG{77R&4(`&Y)4a}3S^6<)NIqkszh>jfJM@k5j=!qR0}X*hju zVui^^V8_qAf^JZ_l7teY<0f82Xp8&iBPn27({|P?ImgGd^0dbs67im5W^3Q%nrnyp! z4!iOQ(^!?9%#o$Md8_)e++GGu#WGS+WQi9Yr)b^HBUXpt%Jjr zFGriimz7EF%iW>>y`kVZ+SfL{ z@vsqXIQe*}fvD-Oiy{_OXki0F{ZAv82+|4&qPv7kik^v7eAE;pIgC#K#f}=>)1bR7 zFMxV^h@1CFbvStVPrVr$=BGD9Jr}5$AIAe{zXL%H!qiAx74@2Y(|<+j z;5Kw`XefMJir-t0qj+i?!Ck2daOWCclp8xKHgIQIVx$KkzzLZ6Wi;Fi7Sm9P{2IEZ zUKxSfeetE^8$YF{JKK#K=S*3V0SUUXE>Tq;<1B@i&af78WD7>y4DjC{$m`aHD&D*H z0je1QkxY+4DNm8SPb?lh3@Ux%WhBf|l3{jXf;}8?R~x-yX#K(Aw{OVDDTmf4Tct&xW`q!seU>5w%uA zsz>+k@D#gW(JS(&Mk}zT?|t#y^zPt@4fWr7e9^*+H*h(BAS1Y#fw zjO{+^<`=rgFmU_iRDoWfi`XCB5o6eezv|9G*U}xja8J~Q{#(Y8%*@(hjNUeASwu*Q zLbW#xnn_qK6*2U-y_%#LUfryYcaFQi@q6hqG_osr&h{Xbld(DrfY7UibV_zFY8qPt z>c$5T*8UUZ+9Z8W0Yao{K*^S(kwy6+o`+#!7&4srJ&p1%XeXo2&~o?d05U7CDeS0k=>jg9EjLox1p8x znHiYr2bEBfcZOS;;-|Qa`y|rws4XtHV`yOoGE8*INb~*{A#*2n_ZhPJ+6B?=ii!i= z%jIqZk*TU^ypQRG4H#zjcv3BNYhW~>{^uPd{108&L^u_A@H`M$(AM%D!zjnOvztEz zq7NdeM#R@|ZcTrq4_r17R_`~}JZDGmnMZR#K6G9N>Jwg|S0yNH-xhj+l;>FetUECO zmMex;7ji#VGC%Z+GFo8821b5HcJXgW@x#Gbbl*C{Z-+yznLIlzX$$Owuf4sunG)ZI zuIf88sHM?4Q|Ga;f23HLndF(As+T8BdIlR{K4+ye9X?qHdQFhvwkZ{6^Z zrysbKs(=QZbv&O8s;RFZVSTxa9u3~iU%*K;Ah8Whd&R9k?U=BtbT;sy4#yijs+)C~ z|2*j!Oi{jBdS1Uh4sg9$zR4XOfD4dzkp7|8^!be&2F zTavNKB<`VS5_@=f!G5+9QT@ag5W@&|QXcSbv<8Ks+ozp^%I!@XB#$4->pvrd_@}}H zKP2l;1+k$blD1@Nv7X}fg**HBra*Xp0pVfYa$bcp;+aOX38ub=!qAzs7|?xt(as(4 z8;8EL(!YYO4M1TEG({iWoK#8ZleRb&>v~#uODS5ey0ltK6fln^ipGkxnY|68f1+;K zcFzYHxb^DQi2&&UZ+nR0Zx*eON&(8R?`>tWRK6-ji7;*&KY4+eA9 zF@4nFnHZ-kUI7XSc=YUV%a98oxsqRk?)W8v85_>4DYRQ^xdz(s5Ka10I;Hay={UM6 zjPX*ai4w4HTApGhy|P^EnLZ;lg7fTleN?VZ1AZk=Sab~$ycBD`^uL%UBo(c+3?Thj zP$z>`4O~TKxZ732puZbjWx6A|#bJ*r>UJr2kyHQ^*jwiX9%SPQ*T@r4Lnf_HPk}Nj zARIW8^NZ$Ohfre%^BV#(W+I&(8}f0$k4|cY6Z5%1Za|#qJiEruc}h4Igr>HJL^JuH02F# zDz+T-6hJz`bN9;Hn_tpK($ZaZl67aG+$pl-+)9r+9zv0!1 z;u}uxnq+ZkvBLV{5g)`*(_UqWAQ4KlOsSWNuxj9EHlm)^$j`H?{a)+3iU5@&#kUFU z7o&&JC&TFQit6ttV_fQFTFc+)i*L$E(cK7h3a6>@mG$Lrco;B)HETx1hkvV^$oyv$ z#E#!hJfq;W+@`K;MfvPf%X6%nO4$gK+vEy(^#dnsegO>xtpZ6wDfJ04b>(Xs_?5Qf zn|!Dfr;vyyDL8a?g-KYUJ8>SdYyi^!?!C`^twdQ6b>eL{{E+p?o!2tZM%1WuP>A8gO9_?OVEEZVM%wM2Av?l!uj-P9I~G& z-H#~yDUWKoxpTMbdi0GhPQ~jyolY)|cq9`EO6*oMNsmPBMRQy87anBiYz&%x6dgT0 z=YK*70+T@kq!B}1+4)5d7CO4wV6K1;B$vgyMY~$*1<|u*`Y9U2XR%h$u_F$jU-A@n zOJPUzsBE;vG!ruZn>eY%mb0mkwJGEdeMyn0oS$8Pka81ny>R`r4a8UIzxfBtiI!`k zs$S}6YM>^79@{Co>z{3qJ;>j=_Td)*AJ<0n>hW2SihqWd!_DZI*aF+Ib`27DVg`@W zlIPv!k%RdzxlOgun%?}HNj)`CjOgfr=P#^pK(3rA`%QmExiXG&knAL;N_n`8rqjBn z)|HBO>Kw%;lI;Rp&z2MMq2>JS`YP#p zOSoF;C)9Gwg>+mw0E>{WL00kxIWh9kKQ4r%O8PjlH2==*mF0fhVN+lJ6{Hy3i8aK& zIX*l(I2QOe5N^+_Pt8;L>X}*L^D3~ghW=#W5j9bbVm7gVbE}ou5ojUofrxn!_Q4X= z^U3H)r1~C~3{W=d_CvQ4h9zGWDV){9zJ-|_`H{aB5TW=?kTqEHX#Et_S{_=F&oe= zkM9#_gt27)W=_c~SGH6iNoCgYedg1p8}2Lf=HWE?iLj#6>#C^=vVY(!3SY^rO20i% z-1l}u(Nu462?_;43Oa3#K_s2p;I_AoU9rJwKPS<9hjyrGqzD>>bp#I`2Kdk@qe>_s z7w)9eu$*sB(CifymoPI88wSzApleA8{RlryqcYhK>k~+uC`W%2d-^Ipyo0%jahqUz z73GhjZ5@y=CRepb{*F3=5u=#a1o?J3Z9dlE6u$fH5yp_aQImJj6ofTJRr<`707|mL zf4PIOwF=~QsSyH6;RlWyov`DSE-6^3i^?{{*_5H;@+nWb7dgD0qjQOKh{Ax<7%GuY zLOMe>!aq{dS~gqK{jA?bMSjx>h*>+ADa)&?E@YXev( zPzeWn@lU`6<0~KP6WkD%w$z3}e-JRNC79{pvBE4M4cP~sql`ykIXP3)@I$m+{tUp8 zml#p&k-7~Za08L%xUO$se!a1lzmacT-0)`#%ABL4ds7(UU|TC5D)faG+N^+!0#m? zTTvY5zXUAdJ2`0r{8-ueN)e$8Wgdu(7k1?$aVl1{Q&_)oIS@AbL}t(IS1mQ@xbXEY zm~4Y*u_w0}keysf=reap?H_|VBGiM0^`pCgZ>GaLm!DExj{51UHt$*BkvXs9wsmC} z)s)FXo~5RCXmr|I2(1|>xP}f&exgxkN@L7Nd09Ahz7aE0gRjfnYA8~NGcqlr2-UjL z-EF5mA-s8LJ4O6?6q@i*;=qf-2)iq%SVLV*->^R^Q(MO@#hz015O+KF-W47J&|RP4u$>qM6B!uG?gcdB#R$i{Yx2swww;B zny|7AUsv|~#m(CVl5nPlY(VEoSj{~%<@k2$!1{}^kUOXKP$`oe{fbynE1W?A((trl@U_*?RpMv9< zQ1u4JM_jpBg?sKE{)Yy?03$QApn-mB_O%dc6+~%t3vu_BkXXqd+mdlpi=Rw|vcg_9 zQLR{E!Z{hB8HTG_6X8qQ%&c&1WKbF)DeXn^PR@j zP)Kypj14hR=}$JJ7}$I}MT5v(|75q)9XX2&N6Gm8@D3C*L=-_A3x&;z%>TI5vLIE6 z8jj?Zotq=#&s71hZDAkdN_o^B3Rpz{I8;<&UyFJRu^;D{@7Lycpj%|b z<%FkT{s$td21G3jpOFLo)0byMDS-9Y{UsEdfy0nTt(KXok5dlVIz%Ren&P>{b{E=h zp}gxmcLL~^0)_DEQ)sC&1+(=?K45N#Ea6|g@tuc+7the=Dd!{PRR+W>d@U+et*5N5 z4`wa22o-9ThZpSehgmR2`l1-)2ouh(pF{`(hL)v0D`H@r)IwdT`Lwtnb~c1XfpOD&%_pHF?^ zUaUk*QMbYr!;M#<6x43_% zRBb_`l-ctxe+%3GXlPK1QN|uLUdE|5O2uUHJdLqn0#--yA55v#Rf2)74mvDmpghZ0Ux|3&Epp?2~geG>9hJ184Hr6cUmRTccq z%ITiK>WVO!?t)?M$~Ukvu}@o|k{8z2&qf5jy{{|cV}>3M0z z%g(L3KwKV`wE=8+%q=tPH#?86N)TvtObxTr%;&i{2=+n3_2da&wWo~Uh2Ky48+G5j zn4bROzwFvH2;2k4XHiiBX4(ik13H`1w2VU7up{J~4bb}}IryspNv$2F5%Y*aR17)g zgeB=43R{k(WiI0~pp~Gky{B_()*A|Y|DiXZV5S5E=7HRyi)d>=apHCA#8Y0MF-@Pa zdwa7^T2;gntk`bpn|@8zG*q1@sXKjJLtZINO@QU%&BB}+ntt7k7?)t$%uGAMZOzwQ zd*wb<^3dLfG#sYy{2IYmq)?C)24c%*P)y<8q1HBnE}&OWvn^xRrmoOg_3<&>^_TJ%)nDO<9Z(n5MFNx}%(W-LW^2G3JUl2l}G zqrzCykaeVzWRDppOQ|elNrS-{zWaBk-tYU)AJ1EFbLN~m=eOMVbzj$Yuh9wy9U+@< z6jV{=i-$te!_j$m<$H`vasNHE!3q3zIB35AK8hbk#Vg+nLfIQ;-=+1nXPa;F75f2i{e)g8vRe&P!*`II?&MDdp1E%)By#(F+P(x^WiA(Qi zo538^fO}WSJP22ae+hH}q+u3hvqR^Psu0-d=~I66UfPR$!VaO8S<|6lG@hpxir>QW z?`oZ|M&+89@ELe~+X+la-?1%*xkh+f=#LiqH<&#KwqM4nLt)pt(1t6-20}#L7eIxR z^?Ep>E>fGI@*%Yvd!DrBp8fBD-%=2b)@EaSKu7{i*<9WuBL#4J@gKGM6PxIX9sHpM z;~5>4=ypG3EoLhr^x>*AoC2b<*+)@+i;Q2pJ%QSVf&yYi7Eb#}dE9nrkIh6)L54v2 z+Z8hBDy__2D7S_h78@=&w#=8$P^@OTuU)ZJe0t?~Df!oIgq7+ej;<|tVA~n^n`#_hfvxR;cv;yH zc}vO2V0_2}zd5uMS`%ID8D6nFd%FHu_xL~XJc9C{qz==#R@n9fa4CbiCNONc8Rx(P zO+loJd^PRvcMBUskef(CNDpG4f6#&p`W6TSi2BzmDp$V!Z@`6^P(Grna$g_A zkI5O8*D-hu6A$JXQgE-lsUK!}F8R2OtM1x3bwkb_1FH-+H(;`aZ#g8Oq~q zuA`~-099^xiyD1BdA;4yjdo9>0t)_KnS1{a4YakDry=qG;Ns|!@O~4yIyB9y36Ru} zmE*Uvj@fj7TsRa5`4X+JRyemt01aZM6qf<9-)&Vz&P5ItI44<4s?oFk1Z&aDhcyOA z{dFj0&RW*P3UkZ81LLKMztutTLDJoHXRQ$Zw2=qz0RamkL$Ctdl{oLM_ zG5TbF^NXRQ%GX%aLCUvBjuSkOQExuKvv8u<%ldCqR49cbZ4a!qS)|2-VAu^bS^9dy z@jt_7MZ^`8x8OQy_!!JM(4>N*Bf`$nB2n_F7tfNqx~og{v{woYZ;-yQR#bcL$aeu9 zC*AF?gI}g!Rbun>f$CQc%V3)4#*;&BKPsXt*6sJ$2kJfw55yb>l6OJdG`VqT0muQ{K9YWn`y%pKIIL zt{&}a&JO)VYxdqbbO~Emdqz~5xnS2Y_Qp$%SqO+)t)7x}Wa3V|rb<#soqU>8p&*y$ zo$M2X;#!bktW|EF&s(-_!&8(J;O64++Q?Tc*diw~sYQHo0FV$^dn~)k9S=}pVeexw zW=k$kjOOe{mAlyQ!!oD8o`%96%|FQ-OGGZ}s_o{p7A%jgp9c=mp&~8i*>AzqoSgYh zjZ?}D?WspE5vEW2cu%(yU2fR|MOIkPx2hk=-eSq>7Wwm*E0S)o(7uM1uUOyX+*T(I zLAfHvjF{gAVY=or*hvUS6OV=43S|L`77+o!eoVw6@b-3}jJV+3YpHr0XbRBl@(d(3wTr2aHoP??R`-i%D0R={`d5s& z+lIj7hr=hM@(RR@mxT9KfeLll@~8s9QJlNYoWYr&zL{)8Nj=a zWzoMJT?;k5tK?hi(-#8czy*1ie3;l7WaZrRs~FK>^vtvVxH}O0Qqa4UOHyg(RMf>=b4@sj!B%AdWzJGzX1=!RaPaRZ$+m$@Wd4NTzOj*kx z>r{RFfX@5b3rpi%l8-s|>K@_I{R4=Xifs%Sa>Hz`f)V9LKA!x9vrEg=E)&Qc;?Jac z_y3@1PT^DFeLp`2W)B7@?NM!n+E-HyX^%(&Xl?J>dsn2$%^S$qmm}VFQ`0v6b*u-e z+mAsvN9FRbenC#-6OU6jF4}^UKMD(bn@<@WD?|`Zc#VV%gBgX%ddlG@?yYj`SZSPM zhNub@3O}aK&a$QGS@Uytmt{U9KZ9ZSkhy~)%H(9p&*R*5gSQ%dE?cNyZYE$4Z>kqo zYN7@tUxtJ1Z|SbnD`phY+C8b+!4SI$sgs4;As7F&J8cbeGZ-MZ z6^Su>bn!?X+OIc;t`rV!z8_`R@}fuk@QI@E8eGmQCB^V)&|O+bJ4=f zjgWEYtFa<7&)@Bc$kPd~>vsM;@c1#Q8GMQc7N1}q+feawvHF_?K!YulvQiXrG(}YU zOY5rEN=>Bx_=yFAz<%kt+8}ZDFJ!54SqVs6aOCD;n8S0CvwKhHj8~~UQPert&_6U9 z4{b!#F_#=5c8r$&_m0Y%6&8f|pqXv-Qt|;3O2?u_SC5Ds11=zl(zaJybFS;)?v~V5PG4((zp;Iwz zN7Ia2{$*tQgp$pj>9w5E{XqAkB6Kpyw)pyF&!xAv00e^P%Bb>)&Mu(r!BG&63Sbjc znOp$0oUUX1kdIi!U%!P(6KiS<`VT%u-_Ra_5R=e>@Nxo6>bPwoMr84jl)8}~Me%qF zqJ_;0CxG;-F&&JVHz6*14%$%YycPcn7ev>KFehiqkD<32Bt$o{tsOOy{tJ3I-Envo z+aD>v$Y~m~x#bikgY!FP(Sq@@Wulyx>rg79S_v^>WEPc`_>d7xc0B_i9PedNh2cGi z`->QSG}m!}us@bV!3xt$E%yL0f)zIQOEBNGaz?+)ozB@lWR3uYlB9XP-BwZjaX%eo z%_NEFo=(I+)P^!T2L;oi(F_)aG4*%;BF^NTUNlp5zEyMoF zwd|08Ps4ejl<_r%|MSgQWSoKm@EZrnbDap<|E&mzjRKd@aL<$4eB}WSYyzU3y>-E? zY?(3R{6@va4g@Blm$L*gKgl|&ZZ$Bfa+N6{(pe4Muh5&wRa&6fy&}ri^R8|PIY4$hl!?<+3+k}-4kN|E=P0lb$}~+W^c;?K9APW68W^q*W0AtoBU=e^Q07jb zYp12evHyW`DbPwfn$zR~-hY$Ao??&BZ>ZhXF^#=I52KE)Hqw0}tTcoBQb~(@`jA+u z#Xn{87jObp0PseEd~SDZ--KubyAAPK;Yg^1O`3CL6Iw3Yywp^{9MC1Nbh%fhw>x!{7b%c^RNs{CT|?zPM2wxDI9yMGy5Km|OUl1K$& z0X*ZUR*+2!%jZz1)kTu&5IoKpHa$h-5@xc|RWC@f7z?u|Ih{xrI>wN!|pPN%ya>sOv$| z_C`&=lQX5DVJ)2vA8^UtF4%o3RJHTql@B7)-64D8q>-a zEhZe?puf%>_+vI@FQe3#=VyPBqOQ7PQ@TS%ftW)w_O{3v0}eszdz{OU*Mk_Vo zUp>pj$e!nmr7#ZYQc&Uuz9#MS_`7VR2YigK{tfUMIAqC4$CA3PB(g~X|9%f{H!v8zM)zF zHbxLajDRIlz~LV`2{g!~sSPk%;(!o=X93X}onH{nE~X~6@>2qk7g{<%0ZRG)<-fiH zod*4L&(Hoj43J{p83VUdsVXQcjOos3(}t(BRM%eXD|TqYzfrww<0k; zFc{(7Mr&G>7*=@0F^@>IXy5k*LvFc9=|Y|y>;j3Y zSiSBOC+w22x~Dw0q%FEfmLeFVBEoj+an)L^w7G4?`eDpj27W#;Yk=s5Aa;hYq<6eJ z!e94rCa`bDzI|74jj41fvpM2`^vU)PU(pf){n7+>7{Ku~1U4q{V%*I~LyYh72cUn( zioj#+&9F)(zfdZ=h}d6)cm;6$AU%qze6hpz08d6o%t_=tj{X4A|0JWO7gX9{HU^YE zG*KYDe@2^eL8ubDnJmq~l@Kk7`T4^W(tv;{oLGIp7P%RFZ<2~P?$O5X(g*XtOt-F9 z0TL_zDk@fe_2dVh`x$6>)VT*pGh=V1TUzy<EnKNoRR?u z1YbnFM^=6&QVgNNfivp=k#sq4IfkldGe~|{{P^hpz{~h28Z7cr8 z=S|((QM(Js9A6tnViLAPiFSqfy6AcJq8hHN{9!a220GxjmO}& zmgW?F7Q#dJlybuH2h^-uq^(494PZss6N?$PdE6!UYOumSqw6IyFUlNgS5FaAZ@Ndf zZ~er`q3UTOyCeT@^-i>FM&jyum#mutXey@()#l?&2@NfhAmhCH(ve>f| zYn;H5i2Fz>w5C{=1kNtNAwf4t8OF-6r=y025}|&XxFVjk5U~(!Pz|I6kPEw1P-aet z&v4$(f}cXJ5=Qh|{02LD_RdYo-}Hr0XK0V^;aRDzgT9rNNZxZE{UIUK^Qb;q`X^v$ zVoe7%u^<>$BfKFt5BC_uY@7cJF~F8%PgH#oR)ZwN>jX1#Mb>56qMIpl%N@$;_pVH6 ziHYJ0&7m$C6Wy@C>-35R1rd<)w7+5`AQtYBB&apPGuaNq|_}selS)%o)q`HuKZdT^PHY2udcm{>L?h( zy>b((oZ)=+VD2quPXrnnp@1KGrxJq&s2u!jS{{(eR}jv7u7Ye^VN(4~ll&&ju&^P? zYGrG1Bs{9S1*7y(?=;QLVV9Vc+{~(Qv<&ObdBoP!)WhD+ zACjBzLo4(f;mt~7R>w0m4pfsTu>w#Pr&LK$O`XXI1z>CQ`H)^%+toE8ZrBf`T_{d6y$Hki0ES+H z8KAlS;#*8+Kz-EU=N=&lV{lO!y;d@E%rO1c#DMXM9D)})gL9teB46fCTd-xj<&Tp> zs!g=;SPY{BpKdVJM?w6Ld?n5Hd-`Z`r{ihadllENCO>4nf+$BHTnjmYa(d=AMo`C8 z$gf~GnfUQxS-(Cs$}QVo+_e#HLVspF<~>eeH@UHeK1dOl0?pfz#}A&bwpqE!HS&Ms zV8$=3L1MTE!D$SJgMSW0M(C&KnqUA?S$z(xh{|RbV)=jVMAgS=^wjm)O>$U_U;fVe z^^5G72r?myco3q_h#&*!BO)!n@~)jq9-|@>JIz z?psXCSSkY0xCtZQ1fpE03am^7RC$_I(t1SvMZu41DhS0su0mI3*|D_FQTIO}sH#kZ zPW+U8Nblzf`nRCnGD%qmAn#IaoRGoh-tVyYcQr%u95M~hm4nByAmDtQAfwa2g$Njx zCa{bnACwtL=Uj#!glb4pL`s~0XL(RWgRA1`mj$#>@5bcaEv@uBsN%W>^Kk~S09ylx z5=J(nB(L#Y6yaF9!H$kY5RbXX+XeJK{Lx5+s+^5gSx7SGe{$O)#Q!{Il>ZslaUp`? zOC|l)x!@sjEn@oMTh{G#=WXY09&y>si(M_#TcBC+!>u$(-~zifnrDE(LfMIvihI)-dd1uW61Nrw;mD{B574f{gFXT} z67h+!5M)w|EyL1Km0{#V{*e3J*6nCpc<+LhjWiyx?MRi{8{ms_Uj;49=z^y#(X%Zm zIT;WEm?kqh_+jh9cE^tWH^=HC}^ zd2nRp$1{Lt+T`#coFF2ftfXgkxqt|Pp9P1ytK}sVT~s5 zMZA33coKU~SZDM%CPbff>YauvP$UX|pcj{(U}4D?HUOP&Up3S<#-it+HoUkbtedq8 zxl~!^$RVeOC>>Q5Z4`(@8kPGD9$^>Tj3R-4JPz5|d!4rT1Qbf6BnJS}OIlx!|cY({Z;D~l|U*zNSPx&f#sl3Z8UYT2MYujj|4 zM0~LlExWhc7W$Dvmzev*oIV%28yE*%EjO)h;w@R-#qhS**$Lt21Xm5pPB2A`;*pH6EKBTP}OFj_fy@45dDL29pNayAb=uKcn9 z#Aa|915s_Se_29Cr7hSsCYhRZsMuGd|1hK9Da*Xf{<0D_W62Jcn$uDg`=e=%bC}l! zYKjoj^^GU+pac}!gaMm7#R|*MIPZZXRVgLK&-qq{ZbmlZchagZ>50#`d#-Qwd3Vb8h zFL(bnIw_MxW#7JhASXi%pq5r1t%-sfVHP9u9UP=%(si!Aez-!xew(t8KW09m-TU?0 z!1G*XRLFGZ&xefFwsM3ngNA%S=WM+ZgeQ4dQ|5hTm_xlCwb-$Y-K94C;c!IE5n zYOXnK)->b)iN&2NU=4EZjX_l5m!IF2OS9;19cMmH;_amV{a2v1V$|g8>4=MC8a-DS zG4yh#de`O^qD-A+u@>BS8q%fZW!M5qpG{6R6(+uh%{pA3h!z^69W81!UJ<4e&aL4d z+q7i)IZ^coz5fu5yBO>v)x)4GU@>aSZRSh-W!MvQ;5T)0RrO1y;+=NjzH6vCioM-j zlLgYkEVNBU#`;^2ROblfY7gg%QOio!35Yq`xA~T1{#~1e#>tr6p6ceg7hs;ad57X9 zpYLKUOZ{!=ndrYjh4dK(h|7$CI49ELWlaNScV*xX?1{mD==En_+<{(QdDvn>2_lIZ zHq$*)=C>`Y@CBIuo+Do@s2ib>=I(HR9)}=iYJ{BjZ@58-^mIi@4>`6NcfB32z$(JGQv_Lq zHsY;f?*Kg;C*`nMAIMsZmSRb{NKC=st>scD)J&l<v-=bi6!^0EQqDQ!+D;^yBcSsck1O^7qZZ!B11r1U2d`~o#SO;zv#3o64ELbu?&PQjmnmOjE*$0$; zdJqNirfe(2K`ZF@hW*nPMoigvOHE)p0$;GqbqjwUPYAXz#t%}Uj27Mio1u@A|S>&!sf%$@mV zjLP!xmcp0a>Di$*wCn~f1)$^q6eZ%3*YkU-*ohRNA#I5F8Ep%{4P|H3klE*RGmzO0 z=59wc$-;?JFGj+Mb=`x|8h`6>D=arC2$u0eSReY;U|PDqI_5a&wV^fKATJrZ9H<=V zYkxH-2om5ExmImT*|h9`7Nf;iit&LLW81hf{N>w^sfiwE3M%MsnC z<8a=Yd~#_f;lw(wFb_19+w)!eUH9rl15y_{aF*KP(>qwwak|PCchz27)_vjz8zW`I zI4y`KbN(%sC1Mm7^rKbpDzTj={2x>8zf$1X_^oc@6VH6>}5b zf#zg_u`HIcz-bVpsw2J6HPmHK7b$>X)s-pR@>HmhQ!f8qZtkDa-d9CuL}$*Is2&PW z`&RW0u}C-nCizJ5oaiMfg+xh|JE!*Pq3@%+L9efiryFXy){r`-4|uC27h-P(`^O7e zOED5Zyxm6mHHnP}jT6;Xb}22Zy60Zef@EOU8A3G}(B9)e^QxOBZ)_=i5QLGqCp6W` zR!)Ta>vwgHxw7=SI`Am6DeJsb?~ea7jT&{e^qg2)f}R{G7H(9P(sw!f^C_7@(V&wc*f!3?QmXWMWfn6EDF9jiDUWysDGV(w z@RTC?s4=vosXsMT5f%7~mB`=3q-WMhUN~MfVj{o}7NDQcNNyLO80GoWi zr#p`eg9KQg6!5uaK}RO#;IXPvc8-ni8NRG%JQ9%guM5rd z0WZ8f-`9OhEeHA}kQO_HQ;lVwX@|W&XBvFaG)5TJfJ^y$^c1nf$8LDDhSZy{x12;C zvDA0DNCn9ZWmdNCjQ-XtY3|O*UbqDyH6b?~d=0&m*;aH?`svEFWkbj=AJKHB?o%BS zAu7=aHN6myl!0uJ>o&!|H&OzbP6NRx|w*GWUXT#km3nwjt-{xrlfXbcC|&^3iaL+S(#) zAOF7X_#TLWe~p=nv`Vg7nBu~h3PjPAWW_nr!>+mG*q)c3dEg|<7sS_=b!KjK%rc#-B{T?eeuV&|Bb1U^cQD#S-mGa+pQs|*+nAD*#~*6M-EC8 zk-HgYggSXRd_Q^drnYyFIwDrn=fF%paww;1 zoa(IqsXY8a$XKM!?7qj-m31=#c}j?3E*|0Iqowpmr_RQuK8wZ3y6mV129;dJd~b;vQ9ASw65xOA`W3yuKUz?)>5U^%5S;h zDHRx>GbE?J7aM)+!w?Cqz*;eQ%uJ`dh^Y9z5;h4?_DqXI)Uana*wEp44|vCai*K0~ zV6E`CRh0=dz7p*z>z9BB)Lk;+{7WG)9fMLgF^9Hb!vI^~MzZdS&_;-H<{Mgh?EE29 zQ$xcmRf-|Iu+5Z|5oK4UQs7eZRQ-f{tM3|$4uR?J(#=^&+wmqIe9mKvLvH}{L)8pI z703@EkOPy*7C+PEg|Jo8b^D8PR*H>YTFSo2+F(K#Zp0&k^lHSbg>D99@ZPaA_Al!X zh}`ipk3s@)7<*p{p$+|whvR^)e2TS=T12`s=UJ>7h}z+3AWKRi!4Mm`f^Qu(fRXYiH$uV z;sHefX`i(6f40oVVR_rBx8N#~ug$I)9aY=92V^YH#i!xk3ZjBX3YFHrrQsX(r3a?> zOXCaQQl-hPHu(e)W5^X|3$zFkg18hSb`M{#Sx3uT7JoJn+(}Z2n;XklsPZy3g z?cE%!)C>%JJZqH0s7`$Si5n za7+5t)3ip)mNFCyE{8&VIN36e!bcsYEkrki4tBJ+&xPm)G^B|4*qE80b{U+=;W>}b zXCTkJj#crW$LC?7!_k5Dq_8b34~8;)!eletY zU;b{``C}Ebu7bM7dMr1hn*btpXsbZul*ty-G{%ZGdFMhpuiyLv77~pM zF{uNT&Ez%PSCH2mhMysYA6xDEXse~Oe$$+^Itnr)1))1{t_!O;A=icrAx3a}n&)Uy z0qaJE>mEe?2j%M4T`uI=((HFFC2r$oN;AM-XJFBo_D#$o9qP5934<9-p)So;V(w_* ze8wtO05M!nPSQUSkrh^sGLr*?2dxw`y*o?NE8hb#7zA!V4Oi@W>` zgbaWBHMtlUk4O;rCp$_WMEpAHnwgtq5s!D$}ZgOos^8l#$8m5{Fs@x zmbu}7tv}%6bT_RAgf<3%_i8*)2mTI0EfE5z-=WG7=^(c*cNBYQFi@Q6_Y#KlvkkKF zRn77XSv>$OOr;w>H{zM4u!gO9?5SNNkbqVA#YqUm08`mQW@38Z~ z)TIM};%fM|Cz-G9LVtw6*Dj+;aW||)1;B`J(0z{@Y$b9V zU~ou#!T!l*rVyPzdxxCLdD%o&3Vlr9fZArpOzzNc;nZ142eO*qC*UV>RC9z2$Qtz> zMixNW{mLC_ue)D)PhW}PQBxI1>w?1C=72ZQ3FAf#O^OqbN;fCMr_jL;*!fT3+{?{Q=iH4Gdc z!0H=Py}`fY#}8F~L8~xWV$y;Z8FxkhPH^ZZsPTfVQcm~JawM+B(U0{P?ZvjHj}>y; z%5*b?J^;w@8lYN=0~ms#kNh4!B-j4O20P(ZTqs^sa8-Tv5yaT8Rd@3ERcE7UyLRqX zIsfb3_YL9u=Z3rIqUXg0(WiRT7?r&AAW$^XL*b8?VhjV&KFEJlS>|4#(~~Y#YhueP zRip%M1?o^@bl;!7smBhM0BTpqzP`m480}O&Pf6u2+vIYK=iPy{ZzwwT4{$Q5IZ|DC z<5IV&$jb>q?JzzTsl|>rY^+N~F6UShY=(P~4S&!If)%S^VQ38x050A6;&Qiq#HrBT zg0(B|#NIxovt=4_u@I6#Fa~{MlIa0h1C&9s0<$X7wi^oF=%77#dGvHRW@Ii2b4s#6 z{O)j6z3P+`)brgYl_A4k_~_eQU&vfV$SNQa=7&IMN;oayHSNj^DPtE){l9+uU!>|C z_b-PCeXfzqpKs@N%O4(@E$8q~dpF!Zrxf?TUE;6Z?|T(oC7K7e4iob)yA9+%-mBoM zygt|?V`F~LBAW%_OHI}g&n(&d`s$8a;v03SL3#!9ivk{>z^)=(b_xy z?_H+K<~;@LPc6R?r@P(De)GNjH@)45ooXc9B|EKxkMq6NmbA3YKOc;%_{e1+eh^SB z-94yhG0o%h`vXOAOxKf@H8(k@{!&=EpuxmgPZ54gQXC*#*ZD|V@l?dhF&}z`c&;~k ztjiAd&Zw|Ku(3k&v5buh2-H;g5q^7G=j?g8k5>o0(+5I3;3ORG)L{?^ou?%^Y6Zdy zJ^>Vme6CrG8L8f%l8@gnLQ#UDt2Cl91dDfz)Q*POk#2Y?*qL<*-6}3gD(rCVdY%W_ z(A3YU@1-401cZ$KwD-4O0MwSGN9d8~{`C?lyNgv>!grjPMUr9MisyxdO_x?0DfrFx zoLe}G50@Oh9&w}E$3~Su1Se29d60}iu^A278bCb^-^@*mJc)W^S~ADX7YcvZi_?9+ zqua+2`tN4XZ7GM+oO?p>F*p48Dvg3YPXO6)ooG;LCRJt`pnnW@5~x@vQl+Mv9$rS%g4!%baPW{n=5kfzbq z{jIjB_-UZu+8_1ZzwK-KfQ!dHz!)ZvDgeUq3p4|8QNR}yZ?-@`RIW7|!0Ui08P2N> zs)@u%&Az2p$*#h*x)=%b_~WGu5a_o5q7cF}-GWp3|Bukun#Osh0Xbk#fn8!})j z54c>K=p#@*JO@xTcGEL6%S~jJ`gePA6EO}J?y)uvT>i6xK6uuY$Y#${yk+}7ZZ&g_ zD#Bxu9vc+Wa>+J9Rs+6=xCTC2$we5d#X1&0hpTh`H;G=+=b>D+*k@U*ahy z5PnULs`VA0lYpUjwIQA=>mpH0tWz-Gzp?cOnxw?OWlO ztpz4nr1ZWEn#zJrfMW8LFTR?*EHmmnxoI4)#oS5uFpIl(fuP%b@Vr9E{OEjP$afc; z7@Py#Df2bhaTmMTUOq$9X32Z(*nqlVmH>3kBA%g#%lXr;Dj?&H z2Js%4sI2RI#BQKvX}Bk(w16uYtDX&Cfx{hlgaT1yyC>xf%!E&IAS-gsUD8jIA{TlT}=xBuD~$%v6b4seY>4|x3iq9 zo|*0cZZmz?X4*%6?5>*rp4zUiLpb!SbN!?u&RdF{kl_kH)g|AtitDjXHflWhjr3lG zn4@Zv+Wm$BmcS-04JcDHekoyoHDxd9V3DW;D?Z_v<5aPiyyJDK2jC@s8RPD`SAFr#X9j zz-3VR`a`GD&R#$Bu%$3O>_@FHg+CS=O2IQzmw70v_;F$V7fb!yO0@nf8iOU;(OquF zg?r%EU~YVaxKi;)nV0OcI`GI3R^4mO4XA|CMoDksoLGPvszKRkmDG#7X5o)X`|Zw< zV%7JxygNf{>G?KF2pN;?o*gy`{vKR0e-!%5&84}{SfBwI?PzIdNtVy7+=_edj_Qs> zOI`G$>NoaZDSd8NB~Ot^1_m8h0m=P~bmp`sRK&pqTNN4{(6z0x*n@P$juz~ypQYOYM$(_W2v7CMHyl{q z)mja!YA(e@N6G5U3^O#Q@2XKCM6B@sMTX~+rnSgWD3{!Ee6N5Z*+_CunA8~aKF0QFXjc9ew*i7WMciCx?+iH#XQaNs| zI+_*@5=dRYsKN$52YV7whT6YQdV2iAW)L0+X;aM{aExH_yQ9YZ1S$Ij9oj9D(kmzY zt(tn)#v}WfsT%{^JnjJoP*GQ+o0i)d92-E;X_^WwbE}2ka$Y47x9v z;b8J`B;V!E&0*L-+e>`3l{HilCxqz*buDU;okZXgTp!5Gi+mI!lG(WykS7I>zq{B; z@@xSPe-v7WJk}@8!lH|PAAJj?|1agJKWZC|%dB&JetvJ&+5o>F#4JfD^@fAKMRg}+ zwu{@Z$p1AIrhf2++SJxXhF$)ylJgm=L79l5pHty7V}Zu4+yzVd>*{MRd|t{l>JxJ+ zSdJ?>v3k_|n69-%Wexnh_XT!w1>Uq(jRFV~?(&Oa-PX1sPGUMi3+ji*?Ij`t`7;x2 z1=KM~IiEDXGoMefC$E0U+pD_bKE-}7^RkR)IST= z6Vw`)0TH+ugZZp7MeEgI8v+HnD|D!Z!?dmF(H?fx71D#KcuaQ?Hq(OtfW6{9Vn_rK z(>eFw8_(J25rHMz;>xA%(I|Z z_^IVwxlqH=)=RjCY%snoMJ#|%cT-X~P*%bMTR(t?O)H}aCXlP0B)NaVP?%DsQMvN@ zbWt;la;WQqR)%2`B8h(D=CY7N^5@Ln&ioxm0YofXOMaR~sK~Q0ON|rCJ!)}pLVe6T zOg=t7cqe?CDPb8NJ0WjnCHxHjM${m5C?Tk<){j~2PD~E2K#e{D_WyF+@G*luVQ6?Z z`}9$Ji5uDw$|=G_XlB*gi&{)xHU&#Vn~>IVA2hMf8C%>C(o^&7hR_bS7AcoCM(^j1|KX*qFs*uy{rs-eq#^C}S)% zxQjNRc_cg<&;~=pZ%vPkq7CmE$SPoT#b3SuxR=cES*yv%AN2{1$Gj6nwoFGzu9jP$ zsn1c`7WAvz4N#f@u{cgpb_ta3mJ^O7gL9W__nG=jh0-j-!lxPp55zO#TgolJyy0oN zKOmIfeq1aY?|+Y$Bmf84mA}q8@;0|X$^Zv6Y?_{c%&()w0l+2H?_(i1j+Z1^hz=qbO z|NMW!PC_k>*JLF+MO`S*<~r*vdiDQ~vaC^tzI~^1nvD;uTTVvN`BoNwgYufl|K8{S zQ}Nz~f0Fypi(rOv$4>`VMQY+h_QXuU`YX0h#aqx1#(C_)mT7$Ge<%xKjL0rjtZ*$( z_|e?+3DWEKZkzhQsXvvaa zodqpnu@ip%{N6qWy#3vO0LgGXpv44#vh%wg`$%6D_Wk-+NTSn~-8-Eud5>Q!)~#YCarp_h zE3}uBTV$8y~TYl{0yn6pUZ&mKn zj;UubOpM$9PQ3QHc=K03g=qIL7disxq*KMAiF*ckeM79Ke{$eTELQ8RJ>Z19j?aM? zjhAj;*<-DSR{4D!ebU;gHuIkfk+DTk-!_}8(?{U4JH(13cZlgiSFv4Q<+rc|Xeg{p zgB|46%6}g{`#w4%VwM@rXGUL3lD&1#tXTGk=FLkng`rkY2PgC!V)}ij3wofp5FEr* zur=H}_3PKU`6e>tD~zkcI__ZF>DVOS02P5M$D4BQ@zf>s!`L2D>&NB?EGX`90nAlw z^ipzmF=}NoxhupG`?8)#UbjPo0=LZO(`RlEH@LqRHz^8(9Y5kkFg$&v?=+( z7%NTOEgsW8lY7U3HW zGg|Alub4bnzqve2euYKyMq3*J`Z0k*PA@yZoAW&om=2;gY@HHdUG{?S4m7AZV1m(# zlo0H)P5?m95Oc4or3(w)*`v_6M|uzRX#wZ&6`@Vc$f>V{jqAZ;He?8yS zw8R~chHnphOuRjp>{Zf9eLo^)FkSwhRQY zbL-t$q<>rj*03G=_4pz7E9HjoiC1$WYq>@P2=d9SeQiR`_ z<^=2=8#Lm0!+KNequ7N&l*mv!gWqDqyAlCp5$MPzK|K;Khb4Y{f=g|Sx21Kd7MY1? zKy}j6$He@VA?(Z8L0Zm9PQUp_3YVgd=s z&iRUR<@_YA!UOB8_oD`oxU_)++vjA$ZOm~Q+PNFoZDd0E6{z4i;Hei>T+3N?<%^6U zg*JP{v%|;@7uOY=LX`x~m&C^eS&}U~7?djMISc*uLv&Lc4*Vu_rq!#a-8wkI0#Lbt zyEC-L8!iHI7`8dSBvJf^zkhFd^|oOha}gnhL5#xUv-|XK#X#iL0o+vd*eb{bxGO_z z(1mb4U%hmpieo>T0+7oGENZq9br!+>)P&_%VRG^TYzjuLBAC|?GaQBxkiWNw#oFG+ zPos5l=UfPNJNDBP(i1vlF=x;#3rl5L8g8pNm{H|4#_gARU$2k0a zQ^Q5!4~{YeQH^eKp0>s%FBTw_-!TB-vtm=Q6Z9ZS#J>V&O8ZOq^$RcPsai=875TjT z4SgI}n=>h<aDNj(^M-M6W0mh`ms-(I{;G|Ls-Ep_{y|Fukyw=&wgMmMGon{ zM}gzSV{RPHwv50`foR-nHe`I%7Y&G^&Ti~M;Oe3MM&RG81bAY4<}+nzgV?4ImI}s` zTyRab0;}AZ%3ZkgqivqIercE+bl`N4%K_K*f`SytsS`lL4~ z<@AK5Aki~JrHkOA1Rwnu0CeqeyACAduF5zp>XI5pg*!W>hTr+2ZSFAKs6J>Z1D9Az z*R9y}-!^0s&_4Ty(i?H-Nd)^kv;CLv$xuc36#e@^P%)JB2hQ#c!>vv$r>R8~_m<1S z+HBFr`hU?S8mt*$ZeRuuKphw1hI&4bSXZ+RGF2bq)S;v43Hx=#f~cIFkV0fX`75SE zu-7Emp-2?o@bwvBySJAZ^(fv$sE5_>TxFV<7ve6FOHhr$!pv<%YxFEcCr)FlPM;6j z;t9tyU zoi_f(!mD@WM#7#|(DmN=q_*j|N|Ba^k!`p;#7T~< z(oGbTt+iZm9o`YZJ5pI=>&v?t5dpxY0L0GyuU@i4OSTT=3)4tPaqeH)&o75@IU_y` zbNyISHiJ_EXk<+7#WK{iYmoZ<{5khrd=CvYY-yd=I>ON~{h7hedu)7$msVHa5nFuX zV43sreEe1Rs2-#C0$Mt&_ComR3{`KgwN@9y*}Gyhj;;=dR7!5B<*Z!inK})@FwmQM zDu>ca?Ro(gnV@I*0@X~vNY_?Bg6u~GrxF!Rz)@m9SGguHjiab{s629Fn#z;dds^^sWUFnXZj>8rp%ir4I7#DZE4$Z<}reB9O#86 zdMI86Lj=!hqDmTB3I4M4BQ1zgTyNU5EmtRT$q9ycH!OV;7WjW6n1wdzF5zieVI_@a z+q7WUNSHY*wm`HV`ZpNiYlC(N6hgbe_Mr~gHttgDU@Kwy=tgRHZ*g&WlEWdf_ABtHN`4qO0CBs>8*Aw{YG zJe%sYz!>eHul!fM>W%o+tAVIS;9%hBy74N%t zWE=&{Y#qAYD&_mshjXfHPf5>;#C-vZb+ypv0PfB3I~A{%q=`N^4uOu?1LGaFwblH% z#Yf&kY}=jZzw1D8uW z#dHfl%jL!251xeI_H+r_vwbAeZd@4T^7!E*@0J1+*<8m6@;yaMYq|bz1hb(nX*W}9 zlY{F<_ieIUAups#vigq;3JSd;;~$uC=4WsGgSpicBEx|Ix`Tu9YU=Ly|AZC3BLKz4$$=$sdpEY-}A-oRy)q&ab zC}7cPX(ckev{4*+)kaI#J@4(~|IBXXUGra4ztPE~A44?EVB$9!BQ}HBa(4svluQ{FMm1bwT zQ99YFAUjWT&_tRBk#*An-3IRigA&(HuNFPI6+ZYh0_&1+?w|maBQYGnL+0(3rD@l| z7uo7N$LcrQ&3O9M4?aE=hSzW31|GBDzE+x}(-HMMHky{}Z6%jmW6^75Re@9$F+Z{d zZlM%T3-0m%svoGlFv*RUo`ky&Plx8IcNfYLPP9=h38kVtK@b8cX7j{kUn2V@gZ zxNh0Oqm)Q_l~*0A5sPrgO$7xf{jt7#yj3^d5~+iaRNvpnvcjatoNmA>hvCszQU|6S zu)Gbav~!jG>yI@+7%HFtq2#%iz%T-CToc++DQ`tN03puc*6Qbn=2J-glH%e_{6qLx zT;e0{cbd|;^UuQolmT^?6o*%9E!$8^X4v8qp|kV+wp6(VLRi$vTnPhBoLAio91r=| z+6r*?J*b?5>N~FKGoMBu$y)fFJ(Eo}cO~wMBBC$H(Veky-=hBiF!kl}P_FO)4+X#uXuj1~2CLz$yUoI0>$Z~6crNA_FOH@*fN-Y*j4U*S03S63Gt0??*)f|KO$AP2_X9iF{ z7|R6g#GwZ~jO02!y+PL22l2Oc>!^_q6pSqGy6556vGoh0h{kOMkom!T@|-?6U8D4VQs zT^TjnxMyB|pF{@?r^@FxU+THYkE^S3>$Rs*##7IV47XQ#KkRtK3O>V~g&26YNG&_p zO}<>x{l2JT=hD^=4TRQ>$AT_?z6%fs5#8}0Z)>@9)-qg`{$9WGyXpddOOMS{`h`rq zQWA1e7;BA8O#e0yYqO^unE!$!bl+X#{t=B}Q(2Jj0zO;x^ytqIQ{@80oOEkKH8?Yi zqyM)We^QNK0Fd^`f!H)|RsfA@XaFi^<#DUm!-J|G4GD$d!Y%eU>espAHaON44IcQi zqByr-M62oI^C*AtH&)k0rqI%2eeUqe_p{}%pVlh`z5VZE!&Q)Ww;Av%XeyI~fM3#s zj0NHFBCFzuAj8NR+b-_QZ{NOh_O#P4XJP?wKEwffc_C{=TSPuX5d}%L2%t54d5~1l zJZE93^y=3vd=zPichFJB>!w4x=3wRT#-W_izTL0)29-r#fZ3bjEZ_Rj%gOM!!3N^y zwDlguvt0`uA=jwgn|uHczsa%cz08kC{>iO`x*L$msf}_Aj7ii-5si}CpjPs;(f2we zbK60LI`GF9r`g-#1_~ya{X6i?p$PUy{t49l?-qDeexg>BhhX+g~3`PyHr<*D*_^lo8QYQ(X(c@=+(;`ek89 zl55Q+tiT2XJiT0iNoQ3m*w@!G>w(|VcQYu7#mP0giMSjW?-og)iM)*_oTX?F%YAh& zixC&LzF5yK#i4ERuZMZcG1avb<4nV-wxiwqxN zKZ}AvF_5|K8b9iygHAnMR(Nrwjx{M|Xu~J2KdNErg;yT?uNv?}pj}_jrO_Yuf756i z85p|(6ZM-LlgBiqzrYz~&Hf;#KFsTx(Qj}go7pUKKZftBaA<>7Tmxw10g>zAgPq48 zq2i>CEDo)$lD^Vd6`U`CcE-}-CH_76>7b^C;y#A5T9K+=|HbTM;_!VdduXQxiT;px(w(Tc9;ssNX=T*NJq%+D zcbU8Z?C%i$k_&yT%&l_>_IUSqX+4eha8#`&E8rvi!uH9P76z zz1l&$h?9J_ae7Of5EHCVgE_nTOgn~|j4)OHVIpHhlDZb7{ZDa77Zp*9cJ^3GTDAJn zfUd(W;WG%7@09s1lphh5?$p@Nw@%-)ag@1ry2f|3$T~%jrwG5Y{|!3KpHnyu?Jj*OhK^PcFKRK{Ni}srJuXvhs%IE?2XFz9 zVC22C!O&);iaM0fd{G9qc=%%IdZb}l2(Z54FIb-8gQ52|byg8sE<`sZWFf344hP(B zXZ5oxEkO#M`&jWSC|fMAz7G}=ab17?J*m(6rjVBPmnU#I@+!&$D%?;78lIfFixFMc zeSsa?u#H3CwzS3iB&eoCowhCD31AYa-iTFEFO+o2+9vE(c`b@5pR(XRnpxMx>qqZ} z&?Vtyu>Y++0$&CC6&XZauOMq#oSza)cig{RMR?nSasBY;Np3EJ-L@D48nM z(J9T>(6vy9!%tXHgI!q_Jx<#QbiwkgFGG7Fr8F@)v)y+JaC7v-_Pqr-+SCMIoH5W< zeDZ6gN>eYBrTm>0cuL-@oad9Abe`F}*Ltvb$dB}zS20|fNBQcKB!)7%aSImffYe|C zWjT6Th$} z%YEmQ@VGDel(&H%7!h3W!Hn*`N_VV1RAKd%ll-6iFiczL9#n7fZ$>&6V zfv7q&g{K!%hmk--tN2fK+aW{mnF!=R;cCN4FP|>{+b4;Lig;4hKYRGv0wL_pa>?Yi z>ZK>pZj_1=iK#>&R+aUTzK|j0wIhSYNCvy9X@aTP?+;H}4VbkX`7zU&G`_<~_7P3KYhtE#wgMa?0(Bie{!da5_rT%p4? zd5NW2MUPzN2EzTbo@_91iP1oHFIzeW7FvX!y*xs3(ba49u}C0_{+6UD>$eWQ}j;avA)XyUguGE5JzU_ms>i zdKgtgo{w~G>dj**ceDEXS|6&m8V5U$z+RIxVDUp<<-h*bSRxncLG2nzuRTZCP|E*qrTChxVM;}j=M3s3+1sA{b z!wM8%;4D3B>{M2pD-0K=vWRKCa&N<{2K!cZET~W%42b9T42^%A8-Gc2VspJ5gkTOT z2kR1!(cJhHZ*qdqPu7K3>H8OIDH&>#N1z_x>5ms)T6d!ez@x>Qcn({KA&2nfoTI=I^oO)f83eCE{+t;KDkk)TRmXOb8=Gq02<$rVSFQG@q`46esX zOOfsFwpSPC8^~iy_@r#J=2;jv4)c9K6lD86Y>>J}I`w;jQbIh7qbtcukx^xf`YKi@ z0*Nb3Yf8tsC<_zh!Jvdu9I15@EQPh3I;1|lS z`t-dkV8n`mmE?i4^|2k0t{!dYZ0Klr0t_BqR|q=3b?zQ#QX}PEel2#lNQ~HOXX>;> z=p+1FhHzI+8K#aJ_Op~#x`y-{3= z&YaUjF9gR~MP6qmgl$)W?!v-rR+j%jR}5LODMYFXZ7)!94QZ$!D2UhA`5w3k>_+rSe=J3yPr{TrR!q$rE!edE0*LbSzKN3-9m*C|7@r|8Gq-L^pl21w3 z=-k00Nnh*=Ld)%E#{cmOV;(q_n7VB0t zNxJNR{Am%4SRoy6zYRAe;2($%|LXc{;&hNOyhvq9M!SUcMa-3d7U6)X7F>I&Vy15) zME=Cf&PuRf&_#OOaiZ*OC)VEn{@8*8&lYJ^{ihlF*a8AX$m98{?c@)ZOb`08uC4S1q%qT{Yc)$rl5A zjUh{k4`>ZO7T5zjIn7P%-;;AbFDdEr&bdMiCZ%Szv#6z<{BnR|0)YFA6WQQF!D(RR z-oEA3u^#>JOpIqCvykivJOR1aKMe08&KdFlB%OoIZK~%1UX<$989NQ{2xl6zhDs8B zSe5>8W*lsE*&_rH7V7h_;QT5}3X^ds5NVq(z|)hA$AGDR|MZvb+6IH^|D0K(3Km^-%A{6LsIc-c`$SETXRQeMYgQ)nS{nwiif?Pw6a2VffvPhr{3G?u)&x z&;B}-*|7cI`K0b1IuPsO?lPFtrBbkXbGzHhekYwOcWRX=g4&wRe>ZFvMwp(W~b+-zsXEQMi zFK53@u19Ap>H-o49SFw%rVTOR8)5D?(pznQ zuwJQpC(t0QHKzsI>Lwa_su$GzbY7BPNF)yh8COIltjPGp5)Ix|oI3?Wk|Vs7QAiG# zI+`mdHLgbA83m-9m!Po%RbfP;=R>g{96~zehwt1=D{&sMr98-lW{6!Y)jBwChxf5S z@CXm7(h^jzmRxc7LAB(ji~j&0c{PNB@>oVR1dTDC^wb6d;9`XhCT$nZ72uwHIXe%Z zLt>W2Vh3DKAFH~PB^nJAGHo7XwJZ zOP4D$NOcEFYV zQiG%p5{>^tI5)gS&}=Ye3a}SgVFT#fL@0#tM^uSW1|VyA4?`y37O(P|*&r+11u@+R zlCL6pJS%H{hvBWkbcqLuZC1&W zA$4~u|HPQ(y#}F5<*Q?hs@K6Uz8OfVAGhB#E6(Verk8ESU39(~ko5^O+mwez6|e0F z&`>0PTGvKS-u}p=vo6#XRY5=tR-N)i-03r#(Tm_)X;?VabYqRsr<*->7n*e{RSOm$ zKgVAiJk`v+0Tbl?DpAx37YTKcL|@q25V0CkNHZ?{o`s@yqAZR^h8RZ5g1B_x=j27i zl(z)H5V2X|OsLA^xVZfu08z`fAu1uK1PU|`zdaf)1n=moGGsFRG}Ry@lJTE2>S$*p zT&-$xb#2&}9`P->%&qe^eM+OOOl?7lnymg((4SL?`C*&^pM{JIIczx*qDQqMVH@eh8xA@L31i{T?P zjcdABTY?#Ep55mV4T#Q~du_Z_d@Pf2#s)k3MY2I{JL@u94S2tE@>@~722B%!ICZ>d zLf1&8T?p&gEG(M!4z*a2WLx1y>ecX!5syenW#D6G-u?*URsb9t>aYoBN3O36vITG` zM?wUvzX|o~s+f+Y@i1WOyx{0Pe*4Jot-EJmz(CmBlNg)^a|^@7Xe|;? z&xrppPsd1g^4}X36>(0b0W8}MlOwMQXEz;r*Ixg7E!@{I&e=Xn2v`tI*7a0>ddOAw za%RYS0gXJXfxcoe1@(&s0kns`%u?S|og?H#x9`{M& zO&7UUvg}}MTb`GHkjZ%hVesBFnv`1FhUA^@-AUPE7fO?U^IETX3M$pP+Jf@}t?|W* z+47AdSa5e!u(6d07%EH8-$y0GXv`D7V*Vo35Gakt33DGm_AC)a6i^5lls-RVH2&d; z42P8s;AH(hdEDhf82ne5U=bGaJF}Cs^W5_H3~`gBy@AK~Wp7qW9*F9vJ5_vQxb=mP z8|hW}X(r7dx-iky9=ZPGqsAASTjI;LY&l(zuU(*%@tjV1KFPc1xd~89J++ok=RPyOT zW&W46}&t(^n;h`Do8gcq`$VNyO%28=<3YbP1pmuQ=|7~|h zel}G-2`o*pB<^z_GmfwYP6vksP&CeLZ|J_%ql9^ZBX~L_Umq$sQbF$3IV66YFCzS(31(g*|1}KZ)AKQTfBIgRX6N0PlH9_JuXq(GZ(uy@x9`%b zcTT=3>Vs9;7QW)c5028uY26IEY4THV#VG{VakeKJJuRQzLCC`C2bi2SC_PvQj$Kar z^^ei~O#*+?DTrN`#LfON&h25-*eOQTGCl?A{pw>?k+QeT_dxjC|V z7yFs<>$$+n%-1yYZc3v1K}xS?qZLDYk=DXYD4U)$elpphs)Eq?5<9NY$bXfr?f4MWv-~<^_%Za!_!Qky{WV8tJZ~v$J99 z;Oc=xNAEEbyUwg>OO{a34H5K#tL~Cp=^>%fG_fsmS}6W;{Y{tCeEh_F{0*g}+u7Ir zpGXmRq`J(_Pf&U)J}^qhK#CXzT|3gy3ByPw)TpwopEZ*S>pi)#G{lRYP6{m-KeLy&&>TM zFKCbnJ>+}ZDcF=Zx4NBK>}c^O3jZM>SzLw!j87LycpGQ5=n8-hio~}HV^!k9_L+YQ z(ENoMi9S6yj;jl>*Qr9;qb*w03OcNt5{FK~gDmav#6+!m^h%kMzK-Mhau-0SKMJhP z{5V$|vWiB1zxvRs-5MG&V9;5rp?Q@jjf9}&u?z+N zH`z#jLkikJ(r8zaa#xiqcy+aNb*%U3ApErz)vD-tSrCbx@TT`?Kne!bk2?4W>ZQ2|5ME&cJ5ecZS7zkz_$C==N6XnHvw~m9yA=sAJCDaF}DksUR8* z1~Bj=C~VIn>I9EAaSZL~PAu{>RVJ?V3K`f#9Q<4&t$!7|(^&v`f}jT7jLk+d0cH{< zV&4M|?x>jSAiP>Ac?cmH-lJcskbI|fp?m}*cB2zSt(!W9^cO^qdUxA~778Wjn(LLM zGobMO`CQlRh5F0rpwFC^c=TuC;glSazpkYYn~UrYdu!t)2d#gGoNY4qCVq`Yl>Op+ zZqeCk)^c!R@|#?{Z29k+e-dq>@)05)G6YY!3J9RE4Cj5RZmt;$HzlyLL6*75)nP)z z6r^8DSgV}?QV!sMc?n|~4Gpxp=!XqgPlN7g+#~bG+e(>bB z`&4qKTL}D-q(Fuq2q)w&R^QE-Y7c$H(tWL1WdIdG6~2Q}VwHy;V94R&hz^2JNlpqd z3eIY&RIy{P8fIR1M0qE=U|KI~gR7f;w~+Zn0q6Ith6F<)9VY&yuCKdnw|Bvnl?9nt*Q8iZXS-swn(QoS zym_;Z9Tj=N8c*n354Y<_jrQByDO`meYT%rzir|-lb(Y|;P*!U!-MV=@9-GFqxa)PD zuC|3PW(uXR{;HSs?$j&i64RhQv6&g(U^M*l9j2i@EZsDrG1MbQz^PA!tg5pW=0gE% zGq(W?5vLcxXkEKGUD3Yp?!7h{bpNp(f>jG1Z4qrl(4jWaV~pf~0i4*1dMSVk2v$)* zSG9yh+=E&ughhc^(MazO2h+fdN(hfxCMeptb}#u1u~_Lou)dZ~=?S5KUR?^e7XNdE$|`eD)vO68>P)# zR&bERfvSN^6o7Tyzu>Y}Xpb6xR`Fh=8>pY@_!y?FEfwX3GIbEOiddXOPDy!7jfY8%uTI_Xh$ zK>)N#AdQbAWHH*FGykD_nusvG6TR%f3Yj3HJWSSZy_4F@=Sms=YEqJ&wc!u<^|d5H z5x&vgk908ar~uzQ)?QK~s}mj3L@$CgM*5`FaxcZ=aiyPzOVM|L*GK@oB}Jibs&;P) z$XOUcZ$7Zf=}edTKQLk{awf?1;RX-b=<#Tdbs!y<7RJB^nc$%6l|%H;cpD~ELaH9H zupiJi?qVf&5hKUI-%NU3QxU>czBJ37>=MjW9r?rV^0w5-68AO9F2R|G&o>v0jDji2 zW53J06~07Ntw&a76Hv_3-1KT8S56A}x;!vZUOMn_BEx}{au&8C`PmBq4Injwop|oG zrjD)LRmgx4U326GIQgqb&UPa5^_@Km}`1Y;F=mM}hES4^Sku%h&&nS-F+kNwe(#`H> z?p>0=Q!^^|vBOGS%HT!OMXTY&`hG;Lm*vy`)CxF`X;vI*~b(FkPjlIiJ%K~ZWU*R3d2hYzv+|37NNqRrw|>5yaOJ4Q_6!h5O{-KT`eq} zg~Z>hOmRHuN$&F?RKv3wWFX_~egx;BoNYW)hO{A{3w^}Jqk6&2d1;MCL+Pr&C5(d^8y zQuwQ@{pxIH;a?e|n=U)RBb-+_gcoY6KiI>@w2-og$s>iIFboTd|Il@$?I7U=^E*^E zqvcTxCT`)*!8tPQT>2zHJ|V9)aHqV6E@C}p5x+Z#4Z&Gu940FovsVH7^A7UR-tl^V zPj!ittV>`ct$ptLhu*nfZUSZUCH)ZPvCxxz#f);rm3egSKkj-w8Vhaa-pB1Kmzdy8 zx*D7Ia_B_vuJIIjOLPfx(z?L+Vu)=L^qXz+6m_w}4ox%()h0bca0q7uR<*MfkT#va zDlS6Y9x1$7? zO;=4S)qpGShIqHh{txpINNZ}6*5bq)`fTKupwnDAo~HS+_0~p@jL(0AhbHbAd~<9( z)FZ`sbFnBl0CZ<%x>)@VdPV!hq4{swrn#??SR zH&YrPy6UJz?&fD-tO%-XmvSG}ojpshgRt22m~jH1^NYf93K&oO(Chodh|g5=C(V^D ztP*7kW%yOpoEv00vG)QEIWBk6o2?bSP2@<3rL7Wjr-rErY=tX$6!l3$-I@Fuk#Sy1 zfxH0F8*m^XvY#a`{{|$qTu{k?SKW|vP59({=pm^RP_X)7hA1}f{+EXF>k zaS0Gu%a$#!wL&&&82XuFoAzXNdVM;ELb%DMJ^7Y+AHn*l{%7GIt_HA8TQo=iPT5QO zcR%W(9X=pq{QpU&+J|kHS4_r&EdO-pmM;GpGbzh7V`y3-?ik|cX$_Rz5PgDV2%tTq zHS~ghYh*YXY@Bj|{J2P4@>EV}b%c-Fq;E`ZoY6WYB{-pgd|X(gh%_~^d%9{Mvnly+ zg`A}M5g*EdftNOh6Jkp9My5R9xw3i{T7x$kWS8i22GZ%#W?c=1+n_W{s0&*KomZ+z z6u|V6kP6g(7CSK7$O17Z=otat1MSH+4k6%QIb>A8CHg`8vhPUyjhVN?Ao>CIxVv#k zsBo$xFD^H&6y-qOXM3~oR(gztnvV3#E5E9dadydW3{|2tw?Z{<@ckuwFE21K>K=Fp z&0}t1rw4#IRYQUHJGI5&JTUa%{A_YIT8Fm8%<07y=QNx_V;36SZJvpo&DiQ+rb1|D z22Q}9PpCg%33>!o1M200dj}?=Jz|yy8l$>jtVjiy97;NPKE`}nocz|@NNaL+>hLlb zcw)yAk0qXQtSXQl+$_1Wm_@xRFM0i(p>!K00Ag~pSLuO%s)Cuou^~R!xan_GAfZ*)*|Glw$}TKKH_*qqeq|QuAYwmVwhfbM%C))7UqP z$C)kUmAR+USAi&V=706PAPviKFv{2nXuaUgW}si{8TbxOBj|epYxzzO&mJ*F5LwIM z-c7dOD&V)(^glt0gncO?wYM)rT?oooa*Z`8LC#QmWaWB5VQHuC-IhgdKSdA8zVlym z&)TxU&^7o{i^*XP>L*4HExID_mb;pB=T!L`)qTT%U(-swa3Z*%nfRy2ha3Qn;W22K zB#=m6Rs5wBJly1KWS19_-o#85-6V2uFyIs<0FDDw*OUcway8SLufT???IK-j8}gfb z>oiyNuE;Zdp2O!=`Y@%~!`ew$vLBjeZi0C7XIQ$hsYz5Vg&}5+=*AtC+nZYg1B#nD zw*To1z{pIH z%;KPeKGLhFs02a;xCp8J`Wm5+PP&GV6M&scT$a$Do)jbvO|R~Y%-ih|nF?bWdtEXc zTJS=PQ20>mQVvBqQ}mJ`)Wp;7dp|}0%tD(BFV8p)Oo4PI4doeuHUr;{Rpfs7YD=yr zHdRigiEVVN@7J==eO5S!VR-qz`|L|ARjl3Yo4*qltV+?$lHH z40jIgy+aGVA9`KoY)7*eWD+S#9!Yd1kQkO3^ZcDKdNjXp{LM8<3qm*#4G?R{ za)htq(Pkfn1q)^F6gzx{P#oZHTpThnsk7IK)-4Z9mL{kK1s39!px_O>eOBawW}_5Ez-7up}0%m(|LXd58k)AGI%`EQtW%>oue zA=~RqUvV2w^L~S*wh*4R9LT&&46ri;ZjLknF_0*CKlbNlyu9 z)<+=T;bWT)bkd`}DbdZ9J|0^;TlWMpg$`3s)RB@^OuK;I%R(2Zpn;E-Je?Ha8&YlzY!Q$)et9CK=NP-0j zc`w-H6q&0BsClBeSm!G{^3%$^t~uB~=(04GhXLl#UagYwciMd&3uI+)|EWPDjt8E zRY!A0s{=>>BYfvSZD+)EaLn+V05d#_bKrnjYYH`!?`vADfvcBnCZ`3;bK8Lbe)wAu z1@5ithay~&l4^t6NCmXgJ`q5A7ctwKBm}#;?vm0N)Xfw0DjHxmT>9;b!2_%YO(4E42b|8NsC8uyA zwlt(jbauwiX+9Q=I2vfWRp^8HcvZ&?pbquR>?UJ{a&`|V6 z{lNzA#=h$ulsdshii5Q=Q7{A*46{Ih>BlMx0}#OwV8HxEfHXw-9I|P$2OlZm!nEFi zz`XU;O>51efMw|R*LXhi6#J3ZLY_vkFp9e}QsDp9R3uG-w$#1;*((yxPH25G)^4v$ zy8%gjlKXA5MxRyak>Xj3AN1~^og^y&r9q)BPjvqKrTYW*9w19Zmbll(wib!Wn&TGf zt%O<;@p>2?^;bh#0Jt3HP95N6h?*{^-MR*My%KE&TIA4Gry4%_+0-=w!qs=FS!2{` zpdTtcBLW=`rG)6_K0QbdW!j@Xo-w=TGQ1NXgeqaNdtmq|`4HQ5(r@PE+*}?f<%!KZ z(?P>-L2u!W9(E^{wgi0-*xhGv{X2C4M4kJmfkc2y)xnzWhc%O^PhYv4D2b6`V9GZ_ zUDl1o!974H)<)4pAbIvm#6^?SLsUP2r;jHr$C5YK>uMiRf*3g5VCUB92=ocV5q$$; z&5I6nDFnOhuKX;SAAxI*di@$^A~A1v9a{Q_So;0R=BuNikE?Q_VE?hmOeg$ax+ z!;0n(LcBqY8JtrIslC!nW>!y~+-Owj$8X7L2sk(Kc(j)EgdPGVB7z%t063M7As<31i8MIMeDA3$NsL7Q9|c@r<)uTq9JUaKsY;Vm=mHsxFc!y7Td&x_rCX+ zh2yeUDjRKmdpe0dLn3tgt(R1?cBGj7~}vifyxs?N;DT}l zqv~9Gf>^d)r7>t3WwUc$>i&lN2s*0>kU1MGpYp7F&>Z%vlF#Qg@Lrcu9Df%r@no`S zJ{(y$fIJntC#AtPY&^LVVD7pxum$k}|J-*xud(lG*fV&6wo`8XOeF9kY$W8KBpTAH zxza>r&C0y~T2+|k9=3J|BzIVbj=_gLq)Sj|JUKt?yxwTKiGrf^7ER&c&)$^-xB_0; zOL5Dv53Wx3I0B_*JpT9%A7VRn6{jNsMUa;SgCEHi2*4VSbMY~z!5*Mm=2U(fHW3QD zmbov&I@H(DPnRKZZwHdY0S4?}0?M25M}N?W0RleCHT`Gbyzu~+q4nKIwKzBB%lSCCbMP^u#hDlYq@ z;I3E!LURJwNg58LKkkNxCA51`Oi$9rdo?LLdz^RlI4^Oy*-)_6E?-fqPgqB6F*G9B zIjRiCAf&lfsk-*+R>=cR%4hA)pgxt`1Eu2ka^FzZmyCyl67yQX#JW1wmmk!ffPpCT z8Q_0GC1B4?u~s3Sv#(3xfk^&MQJCQd`~qlUiO;Q;a;Ac`GGkUJL23yqYT+xQgiZ|5 zyKl$SXxT!+^Y%g{5Vgw6%c3R~xQJWkYFCVB)HOWBb1(Gv9F0M|us0r;`#uPj3aT@| zK|vz}WCo_*0UPu)vLZBG=hsVC*9h%&jSz^8;*VRjvP?C0qjIz)Y?FLr;5o+d95~>0 zV{3~q4;0VHAXLFC2#&?A!h26Vs1ElyY*R$EDgm3raaKRDGVB;{;YgU)%6PmVvTCWJ zI|9_4m0?{;w{zGpsX{}A$;+{h6_VJ?u04AtlbbK<#%^d(@-{UAUGjQgL&GI+^$1z` zpvWqHhw~%QU=ib)m)Es)`FXhnvDHgNqYZKu7Z`hi$So->R~xhtp6=uRv}gdNp2=RD z--5L0TF7DTFjaO}9~5yn^<8lOP^^tA6MZ9Yu3nhlH+!_USU)HTHH& zl>ocP=E};^k&{+R;I|}s-d}&m982Ccot!}9;RM6l1r=(qMw8F93^g2N=X{C!|I1V; z1KNA&mv8HU!OXY-8y5T%;_c(oUMKHBt`VVp5cEz!EJa-C9QvZgL`I3Y<%LJ6RrEa2 zB9lMX>oWE$EykR+Mvq#N{VtSW;Ns{@!1BE>y;SAHU75tcqD&oqO$UsAjj0$;)J1(@ z^K6V}W3(X^lg+GQCnektH)-0kz{4q4G{-H}(qrXqsg~q4;Nr@x&4-|j)b+d21ce!P zG?ouQi9LA-*gXOPXohKS)vC~yXUh;nx<*5>U|Acz0j1ZqC^U3DZP0G6t7)@ZztHF4_EKbmGaGO8F;SWY9f?@)HFqAUY8^G|40I}GFE&>6@=4?Ar z{q52;BWe92g|*syRGt)`EE%Oodobj@8SBxJY#q?`mzw6`3Qn!^2oY+Cnuoz*Y+3)KS0fQ zU~ZsWZgVUYc+)j?#*$#%0a6ja48v3={E(?9i2)MYrM8W}7U#-#tq^eeH)B>!F5Icu zOZBoaf~PoMEj=2Q-~ohD>EDIZm}FnLXMvgjk<$N_26KUu8YnRM^tPj~*>&X~X+YgJ z+9v2}qaQJ%5}yklocr*1ZNJO9X#`QoKuKk{j67;UcRzsl_@O>)Arf69Jo!SknQl*e zpZNP;@(#u5hPLD8jL^4UJDt!;8%Y!$f=u7Sug{WBq37gfi9iuOuLbo^!|7Z{ERCI} z?Pk|?nzOet4e|2 zD=h1uS6@Y4@JEvNJc0+N!3^D`fWk=8LFdNB*hp->M5o^xgGfgk4mSuoaynkXfEA;L znj632kR?sCirtmT;)im@4E7atCQbedy@~27s|VmWKTcUg=N=)aPh~II7?;Ya^X|2E z0J6!g55vDF7Qu1DCmg(Xe^P>`fcS%%f!-D_ox=vI5J(vU`}e^KZ`Xntq{9}1@H4Y- z0$D8qq;^^m;kChBFc1ONuPJ21M3#IPYLF91WoGw_=zJ@ry@-D+0!J^$Qgo+rHdv3EtiGUm< zUd^H@kkX~0`h5PqQFapE$RP^(oX|pY7YRs68zx`O?&?_kA8(z}NmU42E9nfMoPT=3 z2JO)L{)8*ZChXo0LjV}A>IXW2k&%%>gYlqjc8xYRWT-L{7Szkss?1^U|0;)03|w95 zl8?YyBPQefAXs4kamDeH4H;5c@E#bwjXDJ(07gp8n&7;WH=!W8sI%JI41C?+r6hPj z-z$WEkI5w4+kDa%u{^)9i$+5DBZ(`fv{tLOKZAClv0dZyJxTJBH-F~ z;g~BnE)f5oz2Wq0xZ!sAd}`&i9Od2g>%aHPJh4C9xzZ~1v3q4MXxi=s>5Hfjncs>< zx5BIU)p{GZw%<5hq13t;>(E?NLMN}z6FLWoJecjXXKdT`8fx(FDo}F<92*@VjT^a4 zc&4XrZfi}8#-GGxSpEy4d~$+~Lpt-gA3t=ok$cLIV*8kTni^Hd{ms(J*F*JsD5uHJ zw2)}1(`PqeI$+KbX+*FjNKoRKsZ7U9m>>%LC3JX*MX($e@N-I-CyU&ji(j_@q=XU0 z9%%Z!1LzN?q~q^wX?hXr5&{e=YZ%0ajj+UnKNGZE|r-rP2x0Dwh<e7-VkBJ{D-Y>l%B>V%S?y8?8JIW6fD!xELph}A1dxI}ksP`@*!d5aE>cv}?#XlQ1%IMk24Sxt+iLGgu=CegvK<3j4&@La2(OA| z_+0m-D3x}GCs?1yEbfN68C~v9G~fya0Y8VByIyTvK_F0=4ZIv9|4AVI5PJu% zHW7#FAV>H_9DhM8f;1tZJKeeiALryCd>=N?USM)=buC?%+=;8L;{BAxYXzVMvei|p z3pUbsBfcgc0}$I_i4av-kYmL{OPRZsgZ!h1XJcvH;t!rY3*K}R?>qk>P#kARN=!`N z_m`=~=Khg>86@=y1W+eUH#ND|S9)tGHh)v6&IyH^JMNJUVS5s?_B@q;;a+zg^0KPj z4n-@NS_<=yF=<-|3w(%R!G8IxZ4myc^zLbU;nMkk>~rxM1LEVVfr+uj5J&}M`%R3k z&fywCuS1o|NDp4cz}gUIip|d(ac3z|T0p!KkgYfHTTj62b#+h~VAjZmd-v;n7zZGGBsC-e4Adyp>L3)e{)hVr5tp{BBfIp{n})Uv zmKlbfEklc&ZD=b>I$A&MM&j(W0;nRMHD{?=?g%$xpLct0*E*mu-?>!hhYqyVEe#|1 zjNkqQDok@0zs2A`Sj+01{JMwC5?T@*k?Nxv$NS|5J-AbSHul+-;>lctnJQo z1J5cc!<}&D>%p$Lzcn-l4FQX+5Wp9-A+En*eNZ%z%#ZwQ!7;!RDPQr(`SIT1f&_@8 z@H{m`c=V=^yg?EK*eh@l2eSgf{Eo$vvCY9PnT^Q5hS8ZT%P1QUt=X-@lYQ_?crK+Kq%7kqKCaWzbyb(yY=CyWNFb!UE@TM558C? zdZpDvB=v$gA)mGxsJ^AoccE?h8)-zr3OW>_k^y{!%S3KtT_Yc*9ya595q0iJh#j|UJE-Zm+-Hh@w$zgmB zjRY2+otq^ueN0>aIOUk-ieexp^L!0qcCoe@K1Q6X<4GzQi_17bT$_82%LsauIT{D{ zGh$oyM`~^==TZI(P7wr#Cb3@q6iGU3doJDI)DYQh0_CZYz~Y7w@1h^SiJ>kDQ>pRo!BV6Q7HLKytqSjx%=B!?U(r_dd)F@3h-u?(U+s|3c^tB9iUg9xIiy5dLdD-Wr~ zbC~x(uV3?BHryX8AuNrj!BvqarnRb3jO}M4Xm8dZK zX^8kRaCKB5fOsCJcXp@=+W#Tc!@T4nFNdiJ;Hyb;{|C@x?FznB1fk-GD)^^Ey3F!t zIozW{Slr2jUIjDhXt&_&ppw}vt<_+JdN+$}?aOh@Jb5$|B0v$J;MuJBWFDK%u-e+0@F)P}LB>jc*bg z`4Jc9walKpixkmhR}ErjuJI{`Wb5B&AHq;4oc#4}Dj;BGm1R!f@ zKGrbthB6T&Cd|JyV!=xN^t>Iw(7SolXnJ=Uhd*4+g&m;5HRwpC)^B0*%HeRL#tr;x z)WA(B!%hl;TaHj*86CUCpF&In_;;|nE3Y6W=6B}i7$y(?Ibb46P-^#UAvXhLjcjcQ zB<>(YhupSRqz9wLV{{6aNV^X?T~sMS!*AiA6<`Rw5@--N>7z6Tf7ghF9ih zk!F_i4^Wf>jY%6%MyYs?huUtD{eoF4FtNhu;-rW0K~kdD8G!#{Vxr}zqq3Ubr*>K< zqCG>}B1wD2GCVw9`9?~_Qd3tIBHwGGSqZX*VG#g5Fm9_#fSMI_DuWdWs~R|Gr&N<* z%B~nR1Z|n&zN1OhE2AY8yMaOHR~=nD96oP8{^r6|7Fp^LjAv;$&E5vL!IkeO*da>M zAo3vaEsBa_<9Nj!$h~N8f*I6=zg7xinN={S7UUzJ^Zt)T`3yKBP8hBnk!#bnQBtL8V6q!Y47!dQmWVmCQLvW&6}Mur(0X3Wh0{*2D^|6SMjzPg_4d3rSF^ZwlL zdwJcj`@UN~A|u)nqx+Ya_@J6YU1)7$A8bEacF8Tqcr~o%k$Zw>;+tIkzeGF;Lg~$) z%0&P~&rwrp_#))d%fNv5723m*!WE9td=h?EY(1sQXuaGl${a$|1$5W(pN)$RzQ%rO z=ty)h&&3y)dHXJDLtdth9?7AFBuPzbMd0wC|7X5YDCJg{mU`>QKRba$@j9#p%@OL3 zqIuD2-gK(O>$RIjK^ydi^_a)&0m$&v<63?vA15&1Ot6)a_SH6;57r@1C-Qr4=MIIU zR}48hpx+#t{OawS6$zK@m;u_%FPFiNMr}rKmt3XVU_~IeovEZNcFQ;W(AF~*k?zks zE*Hf_?o)S$2}814EU`ju>efAHvtb5aq1Za;oM#%K)Igx9{Nf`%Hd!knnn3@p zXJ=^tRRxvuNfKD*&;HaBc4s=(eb=nP*u5P8J*y3R8eWeI7lY}$&>5B z{nAr=m5EJf^M%g|2h|0EY-8K#Qa?Hy^ObOzAzFb0ivUvsYQq}qV)xZ@3NjPi`Uw+(RA#i>k6&s z>t5Fv7u28Jlmn=Df9)=&KQ(G$rrr0U&auC|Gm0goY_rKrof>(Bub_ElqkDlfRKm;f zmf6=WAi_#U6&UmSqsdq?_U(~}_Aptx3TGnHbYV@}uPHX!Fy{mYnHeqOuKa0}f9V`^ zdF>v`M@|qx$=35}+&SMD_eVuCa)ovfa5-om?aNTM3tft3!IFYI4?BPE`L0jNx(qML zyzAnFl)+@8!sGlyO`$5peQE6pX$Ez6c(oAN{RG}%#FqaOW4}RzxHQ7&Kex)tGfMMi z<&`eUg4bN>}L@^u;nPo|-Pav5OC?!`;M41(Typbu!|Wo+n! z1~%bp$!AcPANnn%+Lu26Lub?D`uPQ&<;ftBN1IFx!$3sf^9rlYW|P(Rp~h=EqNH;a z-F!?K;m2iHugj1@a!MJvuvZJ$)z&ViQ5Of<{QG!_F|z?2TJJp2V{d;_Gy#U7of>(5 zU%s0`3?VC`X9Js{o zcK|QOi!sOyM8`Ev!8KUkM{}y)x&ARa6>jfD>WHSDBeK zs9EIF@U?4b3tidef^K##40n>0#4Nqd$}@JcXRqoqh54ko%F?m#kWSm+TnEViQSpC6 zqi^Z{O3)aFS2uWnR#d$&iaGRimtz#Dx?6WEt?hP!Q<+|bJiMwknyz-;}dSG+|50Q%M+$T$A8W+y=a=ziWq~)Cath5n}b#?;cCB z-UMbj*_ZT`mG{Niu~Rfbk7Slh1R}g{39RKrzb<%c$WDb7v##X<0~hu|FeTi0^!ZLj zFv^JO7b0xv&#E?{Ae{X2YAa7S?!HYrD$A;J2|3jSsxn808(6Z<@JX#H`tuv$H1_!> zIwY@hl$rabZvBNdGB=3eAs^bG^MxAY=T8#jz-<9QhvpjtCMiHcg=`!*j1D26K2rF?<$&5OxgsM5N_y2Y@&+vX|#iXK^=$x0#@QKp@5N?u8*%CJF5T z{61R!b0H|f7%U3rDQu)c5w=kwa=oAri2n~j{6Bg872^T(Ah zL*qUyaD4X5)gxK~WhXAeeNT-{$uNgAa*iPTaCvUa3qgiV_SFgJNKPtdRI5&TxBU5c z8?1x1y|)S7?WmzqE13EuoYb1LTE_F>&I+3bdC2qoEX5WlNw(qu&n>2qxjkll7rfO- z!wp+L9#4ZJSfz2)05vIVg{CA-?YS~t>&$o?>Sj;IPYDw;`0fXUda&Qka(;P&)nfZk z45Tg#ZS1dv@FJrFjLWTrE%4KXq@!7Z0M_ zVD>}G)9}Vb#yuPU5uy!?-^6_rVJi(fgY4s~^FY!0Xaf4Ek@x3co95oV8}OT$cT#?G zNj*to$K@kk=rt?@w|a2AqN^LRbzA7>Zbfp8(+oDFczoHFBf(;cKw^VX1%j^Ozx1Vz z8c`tk+@=bEyq5ls?+mkJjo8t&MFDo>=&!Cm0goQbC_1UyaO;!aI-rz%KBVh(pKGi*F3ZPa1zc{?-7AaO;%#&S|trSAxY- z_viQjS}ANKC7MGVUu+Y7^@{Awr@&l^tD@#zKwr3fMX#=YUybVJwN|_$3N&TikvX=R z_VN#C$@+QK2p1!Wkq+jt1F$WFEa@YDp{O(Pg&6={J_1_J!h80~hk~zyx$kO%PrIP- zc@jUbw)8RnE?Odr;jw># zTqbzmylJ+SYgA{&2#q5(8W1xW}Bw5=NNpcx6<+)osg|b}baRLw%q&exZETafyIHpS1X2&}`@qumnKf z0OQV~b0`5_$y9dbhK+eA9E>xJKq(9SMW!b2P!a8mp?wh=W|MUfXFw`qojGjptM9+F z211{{3y6c8LF@RRFF3B*r8;!9{_?+(6>dA>6j{znE~){$(9yr@QEu z^Jl^VQ|MRk;|V@<$)A5^1!JZnbE|wreXf5V>5!0m_iNnUj_LIGg(jOGoiFQk^}hS> zhn4GS7CPde=Wl5kysTgNr%v?j#0ST!wZ~?4e`qAg^;$iMl8@Q;)=cEaNX!evn6z8^ z8nk=2#BCnzrhmS!B6j`Jr@(coURfz+id__iY*Me+t6VU}wM1F<#Jg`b@%C=KS~8hZ z4lR(wZz&3%g+T;nJ;+-%8+QYwE~>JkqGDs$*%#QdQ#E)gq<;`P8{!fR15zG6u%GUfD;3gYDE%^;5c6Aq(U z6$cauRfc9}W^p;wAD&d3C5M^Ke*4yQSx)ZuZQEL2E~Brvckxh%uQ)PPJ7WA>VRruk z{mMz6fRw^_Z6&2YVuc(8u_P-X6?&Cwvy7zd2UBeaWS*-5ODu6<)8Br>!5`uqE@yv|k5hztk_m>It<*9RhdsslXD zed%9C2=;F(D=T{!f;tLvY5{xOT;m)98b#2U`u_d9>B5C|e^-~6m$PS{bNw6ILEeUG z*nqfms}in?^mkc{Jh5*YMTV}H_?bKB2!I!NJa{Db_nhCa*Islc6zSetAS}8m{#|`Z zTz4^l!hBSk)NDRFVUApHMv6H>(H$XM7!-82)=rWyYaHHCW*-uuXyk5YY-~*8RA0(! zNE!v4m^_;+7`%ex>u8+4#-&?Xmdhgq@0QR91t2jgV*f&1@^+4P7RY8ki>$@U#?Vtp zP6D2RpA{^T1+s#aCKjIiUC<(!otO0&(BOH>w|=3BX{O^Xh@>n9IhEF0vx z$A*E`&`7@ta%3SsU-kw>p=y}*PL{f<#~a7n8SA0w59#NJ+X;lRB~wWdS^R=YW5}N@ z7o^kA1z(*zKZk@@mZX-pb5BB3t-=745f@}W<=WcjrJ;vv{2)s zzP!9H4wGSJk(!SC9MOxX_Ho-q-UH6^)OMMcHZ0=%b< zl1GDE(l&kUX+Cd+W+>Y@hC7X(s%IeBDF*ZY>~N{xeH!hkq-FV}rSr!KJRT3R0B+q1 z7@GeYeZAR?`;=$yX1b*PUCF$EhC!?t3Im&v@Uqk}bQfc`+J=Frlgi+~dpz_=^57PC zz+8vuj^P;aw&d!~#N%UhoR5l>-{z4wPTl7NbrsFsSR`D3cI9^o&Q*^K6NM)wUZW97S{Ifve{(^cMK3e+XE(tt=;}tStCO#dopOhyuI-xcOye81E0zJP@=( zleMD?L@pt;)p?ZmOHUJ$VM7}5GdNqKcj1KOP&QsSZZDE?P_h0p{ zE+E8ONBA4Y2uPX7>OH8NZr(}X=~Z)!o|BE($vP7+Mny%{jKAssGk6G&FjRHJ0OFMO z93*CPg|w~WJ&DJT?rOo`^rL%Ol}L3^b4J62c>8^jy!g&rV=Rh@x%^1fE~yJXB5WY+ z-Px#S9v=g1Nbbip8SW(OG2%rNJaikfSAP@pPF6PE>r|0DCbP*U{q5Vgj($x8z1XE_ zU;zM)eBWiypk{tm-N>bsu+0gQ+M8#@GU7^z^Za%)n`cXX8X4sSB8S1S)9S`iXv%)x&>AA9^n_>a>y_4r`d$u(G@EHIg zBcpcHbJ|_Ml{zOCj^>>n3Q%~H7f~!_SFQ7Fy13S^RC^bR^FH+*6SUO;NdCQ<>W%+J zEm&lqTB*7Tdw5}C!SL`}89Q9%!>q~r86#pvPDWx?-{%EI0nXwbsy}*MhL#q2;SY;< zV`C|+8vt?s0QI%^>~qdpuF0)KBhJ45{{B`L=p~cqoklXWJ6Wk@o`4@bJBL@A?Gec0 z&&`dKPxmukPS&=TAMz=Ij zqQ9RW##VnwlK%>K%TGt2padTemut33%d$MvZ225Fydsmyf|-I$7cr#h5o;SAgcw;m z-hGOQLt`tutH2~%FNYTRia2IUdy#p&H^wiq)7MDtr_3lEbFAv%VgUFb==}Wjd#}V? z`3v)X0n`#ttl8k34AZU$>l@KarmZhWaQ4ESHAWUmo?Z?oe~5wKd3Rbg>4Auw(R4xX zw=sDxj~Jq%VVvf_G@s04U*&!uh84uaME=hAwN_xu;_AHIy$7L~P}+O%ajUJ-S`l*< zZ#>!{5jCY-O~HB0peEz2bkzii-snl1i4?r@5V9#ypDFwZMq zRUYgWxYS;6w>TIs6yC*ddMuK2+r&1Fmgi{S&23^$JKt8Ps~>NP=xzXU6qvvnxgdQx3fi}-8lEb_%e_ah#!`J?^DeCMGS;%378GaE44oBd7&7^L$t8vGe5M>08{U(~{_spOZ#^PZTyW@i;Dbv*q*jaqo`p|8RNL6A*>}w_DcCCDmkg~MrUNqcf%J|KO3Z` zc(XNcq+)IgWHOjgLb_b%;=BuUVQM)tqtnT=w(dWuDQy&@rA2BQuZF6_831SH9`@aq z0b*r=Mn&?xO-|{S;+j&QB?!Nf^A?qff~9d=tEGrJEBvR0+X?(rKZ+jhV$iWfpUO$q z$Ei#fMy21kX5I3kRX`g>ERYold%l1~a0V*`0gDxIcc^d>HT;MU@WPOr)R^d%*VfkZ za~UlWgHNnP%=ZucsI`-4K7teApypZo@S5=WxVX5xek~fK?<^J6H`uM-uuW@SslJaw zX6xbn8v&I&#Ef8dsGde9u*fwu#jS%6n~?`04t5lza#iZSd+A%dV^|SSWm zS8(R5*U;ukP&X;!yT&~3Pi6&++FcFpGb~*qjN{0H`;I&|9Ki2wpJAycB-sgj7py&l z%0_8hXxkV&FnXd1BAz9>3D=t@YPt5DK=u(&rq8Ge4#&@8cl=Dc?5yF32q|D~>VGxg z4^L{RQC>iKR(sIg^i#ofSo(T@*jA0|?c2AReGpFSIGmev?U1>y;r+D^E5#~&3U(>( z@ro)W|EaWJ>zxK^&zn_^hjWy?9>p6(sbpLUs^6o?EWBn0C3{D;?YFCK(dd%vYVi)H zwK90_9llQ+nn1dG#OoA>*W6U z?S|Ohn@3WuQJtWS@%Qn5T|N}g4rp2n2x(Yv(CX*w>s#>fga~E_X+MN>2Te^)YfO{` z0!d2)ltDA3P@?+XIyFHo=XT$qVy-SFR*N<*f@D-LyX5>E6Xp;46hd^R#ODYl&4+W9 zu6T40nK%0y+Zhpk>PABayy^P0b-7R0?6J8Kk9|^&aaDW}>9V-k3lt7I-HhC72xjq1f?4sx%NmE<}g5y>BSltSu!2g)$1yru+@Ob@Y^(($^H zs(34y@xiS9vUHz<$IWGH)i>pJQ<}wML=zbwIA0>OF!)8YG49WzVX-zk<##GN2D&(| zEx&J2Z`_Pmbm73xvW2Zw_!^270+o&NF=>0TCQ5OSy^FN%8cpGMI4_MgI=()C_L zn$qDb;fXx}qOn5T7EShIV?Kk6&;#dy)In3*+heCpXWrdBV0RUpmcHgx;g|WRmnyf1 z8(C9{;`(m~1`dpebLa+9YOYB=-zOR@J<40$oy-{MFZ6LFik-6TqOI>75NkPbfd8JX zCbxSt|LcdYB7c88Pg?`rXoJ9^qi796=zD6QZ^TU)lgz!?F}(Hqbz~+J2>ACmD!SHk z=IyKe$3$999yEQ=|3W?I?2Xw6$I{Pt{!bNFXi@c`GcvIkhRfbljc51aIqq;7;!8n# z$js{Y&fDnWfG$K7IONICo{j3i*xn>^$|XJ49Xt*^YMu^dn6?-RwtIe+&7Y2q>g^Dg zZYS-E4Pg`36S@-DS6~Ow7P36bv%7l&u*Tt7PA$%Dh8c=ioj#(_*xqmvt3sZpSWEhX zc1XuQ=xL&h{>9iiL?it(=zZ>xQVuD%UCXA5w{(^)NN8VBS^VIE2MsafPmtHib@-8^ zT<7v&U8*MPYAq=qa6P_TT14 zN!;@p3NI9-r>3S_Y$Prp6T(`b8cDtiqFTGx-3Pz$7LCVB z;TD1PT!wJGFOqmgiEycfGv!RX<9Lz*aY^&=i4GY+0cvV$RCaG~OcDRotShB`o)9Y< zVBR}Azf((-wYw0h*WW8rUSyqyK6`Sz*nC1ydB1+bb#?m$UQPe6p-}{; zr+#CB196p&-vJC>0X;lkiX|DonO{&(-AF2BElgPG9FB&7B{GAK^@e}ShZu{`z!%+(?R}ZjXCAq+$prCbUHm^SA7 z8WlEd@U(3qK~C|xhrpPBfk~wR2BV=#*UQVDqPxUql3nlhvr~9IiFnx?DA5*#P&Elg z#7E5notRu|0CCwos(z+R+Go-1`q|JC z{5zSOUDIR5GM?r>jScZYbqtJ&7#QOSyAZLA;}CYlX*iJUhVn}Yfn8#*$RF+vsMp)7 zDekqmLQiX-yq6?_*~tAd_s_^&lo>kr z(NYp$u;@*#eDfn)_XFb|WMqV{ZTe3fokKjCbF>fOhO4!EPG*4u)a2=Km4a(u7na~t z>B*cH_D-q8$MI{vr=V3akg?E+hzMhjC4gD+LQ4vvI8ql0JuaoQXVAaw1Zhd2(YEY} z`i7$2+Id*|0oac_EyXf2F)_Ip5C>qP!^0u3$A=Wg+mO**`Kj-0E^nn+ct&lB`bCM; zX+$E?>UbeHRBvcCV%a?Fs6e0~BhnV_+^d5PtXj86hi6Nh!A8gu+7Am9My}_!aR2CP zNK(qU%EwR6ukqNl_^PyYtG*KA>AmXsnEeV(u{vH0m+_S?yXKp1?EbheKG8zd%)Lkt@rD;!Nr_xJ7LxpQM*Evb7vyt3EV=XaOr-;^p$=`^ATG|?C zA+OZ?=R;Z~vAKO=eNOCVD~J8+fA`+oxMu6%!q-wCHU6#Ss0N5-Allys;s5le$EiWM z%xTs>`!aBTiNjlo%18w`GrFaP>pdKgzVN)MhsE!Oi)UFLs8Ffu)V0`ganU%qrn5G) z>j7T2axj%i?aSm;q2FF8`H$d%HbR>m{4a+G!M=z^g_=&w^z^jfP2r6chtl4f&g<^> z3Acu%heXUSlO9}_Ftm|09>Y5>Y(O+uY}G>t(jBdK`doEI81?U2BM#PDLYpyVVS;6T z>yee+s)~s-G?`)xJ{^|>7opP$*|q)mo;`2!!QLpgLi_2EB?KcI19_z1dbu9<49O9a z+mB>t>z)EDTD{}XZwe4(FN&>3R*pM-qQIASUA_FRKl;+6ym%*&+}{G2L9m_L?8N!`R`FfX7TPP6ld?4gneIH8lxAvCA0TlMT8b4LuL!=-{$t64L(_?ikNqx zNK?z|8b+_|lp)%4Sya|tcm*@4~ygYRf+b6oV%vi(2` zO94QcpEEra$wsGAy5nnvmk82CcCVxyTwwOpeTZ5yC=yx3V`2%RdCR850)F~~Z>_D( z3iGm2EX_pF8Z;8MkC6v)2M>!Hj^9h{PD|Xhw}z6mg-)UV9`k__iwlE#B@ezsNdDWq zzdxS0XKY0hT@=nzLCNAC_fvwd`@@G8no$IPN7{Iegs+j*mL1W6b0QhK&8j)CYE)(u zh}9b(n{2eG=Kc6Z-^hNO_9i6b%9zaY_RxDJKU}b7-r|}xQZg8o3TIW%YQMq!OT1#& zFr7SCV#|C-E|2%gve3sCuZm_$XMY5uqhW+nXN_8oXMcH|5hl5ccl}TsWBx86yzEk6 z8`E_w()Ek}B5s5`#e>al+@9sP3s3k3T)~Gfn*GD|!%7PmGdFu01%2J4Wm9S%g}&fZ z48ExOt37qUNFDL9W6lLPZxQoru0>LAjfn?{g`Rxl`4uoX|HyOeGE$AxV$=kNnbi0N zPEV15qJ>TELeLa0+hOyCVi z+Ek936@Ys{p$H9)AHuX`>+;hTKZTO#aDwe!Kg%d6D0a|`P~ht^s>W!*C4&6?xj>xiY$e6I)@<|9eJKE^%BkTaxkPk@pXV!7vyJeHhi=7!dM~E*TdDuL7V*2< zS^pzfvb?aUsK_dxAJGlZeD&Sm_$vdsPYY9$Df7ScT$Uf*^XbqCcN-+&JJzOFZ)&Gd zC?0|i7LOo|w*i%U4)!ZH1}#SPMUsz0wDfcyS9jLP3P>hw+98OQcC=jgwU<(`QT!q? zgd2_0lp0Y*;qH3|URK97PiIGG?;)%uTx#SVa;F6^J!D_6*7LIajn9_0arjvEl!^2B zG1MeI(h+~x_flDlC=FdgHAfpe~ ze+8sxri2!|BT=?)z^nLIv-4~H%*q3AIqMj&%+^*~sT(etlhXhBX?<8;kSWIKPB^_F zrmYQf>X_JAfWKqr=VLs0m-s~gT4>!r07^^?KN#&V{*Qf2H8DPAg!Z9&8`AHtno*@| zSgFf82V_4rxenmWS(a}*`>$*}GLS3j$O`KD&ThlUn<*|or}-tY4Yp?6N%CDi;9_Lc z@3D89MU;avsyjPwwc&Dba4_r8W!Hvp9eS~TGVpKgtegc~=+`~8=!RZyl1T209ih)z zsr*d(rh#NMw}|;oB^*+Q*%ewxL`puMQ83GYO4@K`MGh&|DSgc!={OzwihbrMvcw)_ ztBJ=JOJk|+w`NM3@mfl-G}<5!uI!;ErPS1jnixi;R%YM*g+aCgo7-w2ic~*Ep~&9r zO*e)S&#PEWduR&O+9G?|UEe+3Ap762$Yn(dqu9rD2`{sk!bkwR167xNhjE=+kdW58$e>S}x{2w(sQyB?OT;F29D?fR?oYGnub0hCF6!h}PH1T$7 zrNbSeY}Ks4W%Mvb>g5g0by5>QN;hD1K7d1dBA!?}^oFj<-hv@6Vt3|Ab0&aOmf0t2 zcXM;Lw;(fT@jd>#N9;m8|9Ony2l@VjkDxcEr6mTe3V&E&_|1dos3=%IqyFma6}{wE z?2hXmT^bM}IXD&uyM{e5?UA9cgE#~K`w6lEn|vzNL#woD7Al8#rW{?0Ai7zWu`Vdj z-tWwGss9R63S}WQ$d+VW1y2|5M)7CBaGmP=1C>t|o+Sw=2=W#aG>rQSfZ z0LK?@*1!&vISv(Kvp9XS{r;9U$KUW}k+85n98BS5dV0Ez9Qp^%1i1@Qy~HNIOm#Dw zW4w!{=ZTj8jP${9FN`E)x}1nY^L$xVVM)L=;uJuup1Yy=Mw{qdE`#Zi_RkB?M|LY( z#C7NX4)81-&!js2@Cddu#jB<)pDba%AB{3CEFgZ9A9z&NXzZ|@^wY3YCPcRblRHJg zyhA=IV&`KNfcaUX4a(dmr7Q|NAk_D1MF!XJmx_ry`uxPZzhP2{hi+qcfx3QicrUe` z-ZUqA${o*)NR;whoL{O%m22g#`0b$Uc0X?QNUv)Y#}P;*MqL%2=MldkD|o9Yme8r} zZ?QB$R-UEtUEficQd%xsweKC=uMOTmzk~|D&e)MieFW7am#dyZg*}N;?yHcyiQMtR zDqEWIc{jGzi1Ad(arSL-&AqX7PJK5#fKaBbBsPhfCsmLO{uGwHjm(mF8*g8BZz`>) zWiuhTj`iD%#3qZp^-h|P$qIhNthMjvI5;y-rJaPl(4@YpSTtxTU3@WjQ${t^^OU!k zD-j%9MC+Ub_K7|6U%gNMwe);~l!t-%7TWx`C6LJ;7MDq>^t01)4#)M1k0%$PFsqs9 zr>-dRp(t*j+_2o2JYJQ+?_HEdoX)|GYXBzj!f0_#du5;6x8F_*Jz}JnOlN?aw9jt> zBLen#Z6K?*k|en}WEmcTUb12m!UNMhhjs^ut`het)v#?Ga}}d zPy_ke)!?e$zG{4;NQ|weNZM2EZCtTUO4b&yrcbUG0DS9UffdtYaUO^>e5XVw{Z7=i4SuSD4qJ^sDZkXkg@!YzRxr5~Ovx@w~ z@BI1a->LDxGCkNK#Tk$vW#lW!KTaKvq8E4Vp^H>Fdkc)%kQs)B0f@LUFc@!x|6Up- z7|EwQ80kIbJzg>@{=Fb+ugl-UjG#-pa4-IJ$XMS1`<6PMUdywvu4FUYWLaK zO?5~o;5^K?4V1;LM7q$yQ$UqbE$vRCumsMwc{chF z*A-6?J8=VXz3howlhW^s32jkN*CqMy+|!|RIq9zEx?ItSI6b#)&EL)N{@ z*|9f#4j2SZ&sH3_d@D@7d(2~p8Ove#ffi&8--x}-;P>7OE0BB=RAhX}Rx`e3Q;;nl zZ5Z-Z23pq*@6)= zE0H%Y)E**K5mCvB{{JoQH4s@$gsSI)16tyGZwTbkOBCmQrpuA#$ZMFF_CIvi zK9Tls7X6fMOT<%WNm0Tj?Q8sxDAM@tNeorTy8z{HD8hMV zI4Oe}QtZuVPuyaZNP04zI}bHnBnd*?hGqezRA(4C$JFLCIgiAf#Cb?$cRuT$U`Fle z5=#*I4#TEcEV2^_HLjV4o=jPEz#Mmh^ zpd9=?xOnUa#HqSZ^;BzA5Y{e^(#+PQQZAy;EH`|k(Xu>=L-MONr9BIFM@yRM(ZIrA zHuPvC_F0%$PuGB`@&c;Wrj`q$@c;h>M_19tUUg|jPFDV2`#1fB{9t0}-SbKFiD(>C zXr>p7hF&W}d2kA)%z1>a{!6U-i?3Ls)$^F!vNS=IFk{%fd-Kr^m0b<+$xi!)FPjYs zKBjfvzP^0)O^*8a6sKfK!uyu_8Izna70E7tE$*RSk9PhsiET6!wG9(lcs6#%5x>le zL~@UP{0ftJrOlHjZ1b~z;`CG&iqrp@O9JXyO!RDRZ%0zjLlk7{(EA{W3VE@vZftDq z{PX>Tj(wk_7JJUIe~@8LDuKH{%8#ijJl2HZB;?dv4u=zAv3Mg|(O&CWerVA^G8_gD zJ&fr*yP%E@mm~-rbOlIe-`Xw38`T@qDxc*})w6|aDWh*75G~p>abc7BcJT$TVs_mj~tYM%Fa&z;c_L`0hjEoy%dhv-3XH}N#zTp@r zwC?-t%AU1YunR}=XPAsg^uNRtCAaF${LO{47>n*^sgLT}iYDz{i58lM-)2(f;Y2u- zpnx&Ix!p-wd0_=uSgpkQ^cGFJu}>PUs@eD$YGWBWlDOY>qVt2y ziIlvfU(ds*ob@^*(3$gaFe)?w_FNTbGlBAS&DDhzeDLPSOa4pB!^kL>*8wqE6Gmxv zkz$paX6fo`k^hgp^N>0cKn2v`4es{xRf$ zsB~>JT*NIG|IfTb11gjWCWGi4mB4D$DE`VCjRt=OUb%08&PgFX! z%DmFM=4C#{{(w+%9>FRE64-DyA&nF*?V_CuLEeid9Lb*U`|KO?KaW`zm01gfu~GO@U_^+wdk>^nf8 z&>>mFn3R>r$8x2eTCs}2)vF?@g0t$`x@OPyFP=(?z=L-&gbKPNH zhDNaO{vM>|wva5H6)$$wH$5punNgPHZs(CGBSt4c6yE`&SiPNIhjUnc5D?Y=BG-)e ze*`V7nU#7-`$OrodaWB zVGZEJFh8?zP2JKSEA8N04))#Kn=?irSgYhjo~UoS*v%cz4IlzA)r&qWspxQv<;ii- zg&~~xSAn#scHU;&G0|W%Np)Tud9jMDEH~@VPXT+;!rgWM=(|!MI4=4qZ;ROKy+abn z&>M&Y?MMBbDwwm0Yi(n0s{kg8>gC3%tz46&~d1vK)eE@)L66pN@Gy3Bq@s$D-m;S z0gC4IN9yixdH`V;coJ=rO!>pm!jftGZtLbcT0XH@Tj_DCNVE5=eD#aqZT7gV$+(fF zwJvvu){kJ(ty;gsS8EVB2;*V9S%$5>?K~-d6DlqgP z+Je}rp^#EwMcoLn(7gV$j60h6gVvIr!KN>!=sW7B3Y8nYXGn`+ZcxdIL*HIbxEQi? zi`WC0jwN6Qxi$)VB@-H!>5GX3G^#=*4a#iJ>rW%(-4{g6>{ zJFHp+yS`t2=c&)9_W3LFp87vx<)J)QxdiSO#Jt(FA8W9WX3XI8`M8dvJH6IL9zKFV zXJNUbY5peY`zK;Frx}M;{ghVgH?lBGB7l}IOU`ql18n-`0Shg%&BfB{-``Fs@_9h( zVn7vNnap0AonSQ>WZ{~W%P+1%^w1jj!W55I&r~~#lG;p-l^7{kUf=Zt=2hL~)BXA< z2UZd;IbY>HvrMU#^^QWzo6o)ZgC=2ak6d5m?|o4SL3e-pkXMd~NVDbwBzX#`A=#>d zno|wUTQw~^8k(FdOcX~%Ch2Ot=Ylb^GvB|r|DJY_Lg6dv!(fcx#xzOjvSpE+qA4{n zA=4%pEdw((F!8OdVC|!-c8&9su4H!wFnj@cW-YFd?>^lVjF$*94J8#@w(HTt{~!Qq ze_6ceo)Cr;ntHOu;$D(;o=L1ttoyp(qAfFj51wp&qb`22T6z`Q8!3Jz|M_k@Xq4pkrOI--9#3#Jl9#w-1dZvdO8pTM0*44?xW; z2cHf%RW|{O8?IEkld{z zcDtO*gStj5$1}a$UP!C!9!vj2Ld$31WhDBZW8u+ctQGrpTL^duVcW~JRoq?|A-deU zRq-inw_>x%HL!x)RZwIZR~<>sR$4R4btAQxzc97kRO8W%zspEc07LT-Up@)PlUfv_?VNvKqDV+ zqg{WWLgaDAfvmC~?Jf0A9?pZpdrb^i zO8WF89j8HnG39RL5sVip0v3ep3_gHHWO;327W?eCjy@okDQZ1cX(GP-_R@z1384O+xIZQwf4G!M? zw4Y%W$)j76C1VrnjVsS!BUBX-?RZN`O_yw)PbBX-$~J6P4hVHwZ4DuVj#ZpiaHR89rIl&7T19f{9l$=1 zj|u-V)mzHt7AQULpRib3Xn^NzNnphrsa-s`2g!%69VoNO(nK$0-)+=_#Rz>}riZdY zOI-=dO?WI|2;StM6EhEF;>bb(c+nb-E;Tm0ia~uNRI_L+*6rszbnt)?5oTP^bt^vv zEz>UjEFknTNZ&B9$x}X3b7sM7U4SsmY*UPmFOt|6zvix=DX{iX_S%G3ae zsO~#jwbP+8`gfS`_ZrBxm$h^6Vzpt{TwA7uw;oox3~<08iV^3hlQwv^--#8XCd0(UQiD z5fs}*S>JYArGtVnlwz>iL=r|y@UXnckLV9?gP@Hb%Q9pJlmHNyz7(U57NhxoOCT) z94+iF_j}uUi?qy%y$c#*iEUP*b(2o;d{DM@w^%&j-eb)-^%3L)N;S=5_xMJsA`T~1 zO~~IxURUF3#w+XcgQ~;RxT?uN9}m$vCb5MLe2r1%1oaVzysik&``^|w(MqBghPYyj zhay25N$hzKGVa;07LL*eK@E!|OS^Y9vGSl~+MSYGj_ZoLC^T&|JHUXug(#nY7~0AV zhTm}<>IQ}OJ)&(&H@6joUVH8qQaGB)ayz5fZ`dK3OT+MN1>MvgBpJDs6uIGDX7his z9>OJ;@k8vk#RBG-;dLkiIuiyBggHA|uab{LX(97Q;1Elw$*eu)NxYhpqHDMy9FYlAk@L7wFH8JCPuDcltG>r)CA9Hz77YDN?k?hEkgCDfte_2H; zR13-7LRUKFc2`M5gPJ}f{nGaB_xb@yRp4K%7FkF9id7!WH0Qf=8dG@|3Q z#ttW#$Mac`Y-3|)qYuGD$rcvi9_`*&{sk;IV#m<|025& z(l@!`f7~r!ZxvIpg(d)vNE_(UXf>w%CD|rKe*F&GSsSQ)+I7j~zXwX}frFYDx6t1M z?`$~)TL_#3nl&7-PF#isA)-0reO}>}P_xn+XTdX_)b-$-SAlw^b?rA_ggU7{td@nZ z?lvmQLhp2kuQNJoJ64F8AQEwV)!S} zG!%g{+MkKa*kuTNm{M2#;=i0j%;F|($=Vy0z-PV%@qsQw@MMJdPs(hZ2I%=Bi38Q4 z1=B9htXnAcFV^p^4$R*`>TV8sdHRVaHi$IeK=YyV35Ue&do#QvS0+;MOyUyEn=Jo& zzLOQNq<+E?Ei3HS^^`labpz7>!A*==m~f~f)LGFjRB7|0`p+nR%j+6SCD@AQ0y~;; zL;1X$uH+nUQUD=NQ4 z9OmxU?SY%4kOeo#ImX2tRkJI5e${SB@=lWKfO>2(2e;Vb!7mv<(5lN&MgbGc@)y7U z451Rte?>es8hk3#4T`32UjZWnw9Ji)|HQ;xK-uTtWY>&mgFz?JtM71)1E_w+IVr>H z+*8XBMj)Yca8B&np;Z5iegKpy*uCW%D3l2$*@eYUe``KhXs0vAyG0QllniCc{X@v= zuc)d@7vvr*6f?K?cKcSMO^?ck*rw$(-$V<5rBF>nC8r9Kgfe{N@k> z6K!J*-^US3pulVC%-m+t2yg>OO)wSG#eeKM{k4b8pDzNw{L-FA zP1=^dPML*SG%ZE>jjIsvVjz`G`TV5KPG0Ha7o4vWQWnIAN^NX0OU2^{s#_b>1df8P0YOOnL6pAh8s+YYRcl6? zs@2*^&o&{=>SE6o4-T!l48%O7v7q%VaD2Eg4?1Sf-3_lSK*BZIp*?>u#bUkiSp zQAczR7gp`MBzCpXV=N*o+LC=a!Y$PPLq{i3>d;-Jm=wf6*ByPUm(eohMX-M;(%iF5 zAy_GVM7S8q*KvSgvgRg!zP~EWHGH*gM2KAo6m*-jh4vyArDxSmoiAX#h!Z!jME#LB zuP~pTI8SUI%VK@bJBns2=pCyF3zfBN|L40f<#XQ^t=sYjiAvLPb?=suF;7B!ygGa< zFlc+OoDgCK%v64aij&YT`dxhs(t5VzdQ&Ct2~XmB6MJVv6N+*<5*2sRwxMm&8j-q) zXOcCe1xmKcl~l1j&4z6~O^fc5UO$cE5P$P>?}r(2elB+S@Hb+u)@ND7vjY-p-J_Dj ztn<$sgnZw0z%8MVp-J_iy@k;tTkD(5&rAZhxWtZ@xf%_$ zPT6XjFlFn(5@^%L!uDE;EA7)Uw+6BOF zKpHr#p3s^SZ6nob@MbYxefq@uJ4R|EkhDyxwSOUAKnto&m78LdTZ>MjpH{@F-K_W1 z*S<$%&JPf)XOe7eZEdZ{+9df;_7dCv=d)gb8sc^JfNQ3Ln9u(|t(5_#(&?#55f_kA zha}GZMPr8o)lb$;vxrXYl>JsiC4^wSE>(JiA4whCtGQupzuQoa&U$lD3ih^!`}V7J znggksH)@gd?AZo2)F|GCTuXEvW~V`}Hz7zuNwjW9BllA#!&d1*9wq?8Bsl&s05rX5 z{D9^0F%@C_D+GwpFe>06cI{N5aj8wRqti6YHtgRY#WHau6B_o11T)zQh8x^_js+ikFujIO8P972@c zN81dj)eZm1Rh}>eQ~IXkVlR__>W$20o-IwPYSs;$H|CJ zw`>60Z(6Y5qQ}P>zVh`{;UR?euI+hje~0@-!nTzs15{kHU_x-u$9YmEUS$gLT`U@* zj)N0fwN>bk(!~@33ZTNLJlSr%tCg)7TRr`sPu@?^bKZXBe-kKXUv&MPicy2;pK2+p z8UXdSNrZL|O37;vhb$%W>iN}~Op4JuOro{h$g<&izxK&sa?$Bd!NmLx4Nwp}0?|Z z=R;@-k`6?yUcmB(jT+mw8|v$bHy?{1#liRYZ`@=+Y(M3Bc8Uu(YzyhF8&6)&;0tn< zs0Xu%W~NhoeNh7MOt1p19RE20VgQs4xI&XM-;jW)Vhb^g+QynlVriPpa;0Dm!X8ds2@&B}#BamXmGFOfcMSrdVIT!z-rkK0` zKXEaQ-#&C`L zXH9{J2|*~)!A&SJLHQ3zwrP~8!aQLYKIPMbpE~x)WN&paF_;-09k{8YdRr=gv%WE+ zeK(#W-Jwz9bPWUk84$-5-{I0xMTomlmQs>wg3Uw+sKKT0v{VIKrs+N(hlJqfJekfZta#0-WBH z$t>A)3EM-?SGL3u<&M3;FpRIbC0E^dzO;+^xcyCtVrhYOsM|Pxfy=DB$)L>o>$elC z$Lc*~mlbXRQ75!_vww|j$WC94b^d(xYbfBEMR);Sp~PT;(gNWKf$MaiT8=~Gi2u)Z zahWFDpc?~c3$jmToV!-cRrbeuiKtkHn5y_Yw}MgDVAW_|eEB46_u>L)Hzs>dN8KsL)tx=b?XyI{%3HfiK*L(y|V5G;AVGO<4wENKSq;d*>KByjp$hSl3Cj z&?*89yy%7)yaxNE=tyTPMj2VlaY!6vD@C9+Rkq{_tJ97Z=cprQy7b7Cp7xMc!5!sk zQm&a&C*0e&))GS9vf01tZ!!fl0_1;5V7g(1mlZ_; zih2?W$7hWy%~D?4U_$Aqjl-QF6hCr(*|J5yVlYBYzhT_j(BrOrXHi#_pqa^Fgl<#7 z^*aNq+`BWU7FFqPjuC$c_}`+%c!JGPTlST}Ekk~kM=5-JAw?8GpBa!o*?63A_RRc(=2rHh%Bu&nQjFZD8e^Zq_ z50e{kqJd`+JelY%c*5NfJMCDW=M=K)R43zdW_tO1UWotcZQLiI0(9w!(9xe>aiA{E)#iQNG_Lbj z{Z{z_z!j*Z0XJ?0uHM#D$Cf~>t^UVk=i>SJ2TR*Miow4jdWLPZKxWSY^9yjl_{5W- ze+O=3>L z4ma9Z5$C6uq-W*=OQY#mANAK{TKAfQ@Sb3da?TPmtl=W{eMBbPSW_(u{}+^O*;;so z&j_$c5NRM&Jr9YyH;k^2y8^`A*VN}-0riT~LE&?4z%xsJb609Z0dsEfAYR3))nkPz z@M9$Es<$-%<8UE(I6?j)LUT`Wn%;K?G;?gsV==ZSomi+YGKi#BL}0ooG+-7{fcm4Z z*p(T~Yn83P^JNVigtGDaiJnY(%ksxz@t`#EIPVy^x;WYVIJ8qZKVkFV9jqNuQK_BShXvgh7FaB$62c|{YIgM`S|egaD^bQ zvMJ^o=*ZSvmfycY?j4K}Q}^~i-0?(9-0*LUnspDukFF~9!qr+S-4@i%JiRSM+GXvsc&Tm*K`5r2U@gxiyb~-=J60p@8+1u(ptMDsAM18n& zq~Px$I&Uh1T9tkwf$3MJW!7=BdLOBEa^i6mgZ$~iw(1}CDQUgjG{GbhO(EMoOA15J zPCTXzWH)OM_g&q5=?z*BirHI<*sawXQJDj~qnMJSSe~;*Bf5=ULXNq61+aGaT?Jw9 zBGO_}*$2dghgHCf?vynBML-#R+03Sxn^}O9$}N$0Z>Mon;zP41t&iclm@bE1s6N6Ezqd z4vJnwt@%jydxS$b1`1A{P($c-jwBUdOo_U6=cc4amr9WuG*30&nr- zkt)H|-+GUPS*1pPQ3rXhBVfKGz|vywr4syD)03Q1$qJ@9->f=As36B*HcIcl^23wy zBfBp=eWA{o_fn&Y5KVDT@t$R3Y%hW&9DF%WiOnNZl@mJBNA%l7*dyzeA&vWFBdUc1 zqz$|=Nfv|)+0jP%O?QZ`HeTZ%qJ83Q;)`_J85HAMB4}^dfg*Sy(K2APRWh`)wJD$R zb`;%2`83BKkX3+mD7S9(N%yYphPe8d#^Sb)l?!(V@|c4aJ#&zip~uz%HVC@297^(Z zoFrgQF)EqfUJ*=qngTgl#FV)O|H`fk18)Q*!|3ECczVN2PUF-hPnKX9iP86Q6DbS- zddyk_7-z2&Ys5UD#hVLup1jJAxWKzL|Ea*^)}sGo=6A1*3dDeljfJCEHOrB($SM+x zJZd65ltBMjFpxlqAZwj+4*)|Xl@oW4B@;-KVJb}$Q8-9qJ{^*Jj?cF1SbmMjFjEoR zI4PqHHM9ElJP~`Xtxkg32JeT4u^s>}A@7p27AVi!*LX@rtk|JLd2du6mGA@_b;A{f zJ_8k&)Co`jz3(Jram!!*t1TEc+aeS_8P6>3y9GdZRrQas)PY6`zELqOj!ZE`YWe+E zTJPa1i-%sGNWKqX5n6!$0(vQf18j~Lo!9coLUa{sfgs81`=3z40dnS+oC~S`qM-FTgf9{I8Wcskz=@9|2U6aJsdz6(eln_qxjhPGbY(_ ze%Agh=-SjTXkyHMGBD~&OjR`leDYo|h zBk8U=yeoLXLUcV0(`-<>kh5WYw)i{7sPe8pQJ=CzRC)yy~L zy`L0%$1F2v6Ia3RLyuDpv>b0h?>Bf_od7CAFl2UKzWiV3Q{@)2%( zA?TzCuM_-a?L4C3Ra;aroG9=(f2>Y0G2r}$^X<*jIoJGGxR_$G*!tO24;pLr^Kh*H z3MZs@znS`JHO1&9t};L{u>Ony=A4$;U(U9rMMC!!%m&R~zXHU{*1po#Eh&nkP|pyQ zZW!?`?;}-z*DJ{U5w~8h;9Wj0>Xd&7m3N$^81;rS(L~l;Hyq~XZ&6Sk9vQ5tt-oRl z4$()yG)wFT7YPwOBsHw~msR#$bW0ned}ooEGnJoX&}x)~hni$14L4xa zuaGmEGEBW)llupskAk8VOG&m00bH-*23Lrpyjbhy_Nriaa91{CZlS=c7+dE=bfZ+o z%9W(kClL8`*0n^(tQJT}sdrYdHH_NQb@5HkV560k0!8mnNF@W0RR-E8iR7cDS=BI- zM@b4b#Q%NS^JR65Fau4k>-Gd`2kWFG| z|25@3wv`)nS54sD3*(Z|gl;w1-RRNiMbgbt834i!5m|UX2f~C8Dy|VG0!MK23v5{8 zbnCRjmj|J68EIJk4x*06=O29U(BS(R_NF5Rww+t&NF*rfaMXY97O@h|Ddi@SHCvMv zvt7%$Djth9gx%Rvk7JT`J^6y(6Gw(ap}V_Ll{3NSk^Or^R+&uREy9KzEx~uJ)f*Gc zm4U30-n=}cN_IRma}^~T@=!DT!M{47P5ZKa7?&b$wjxs~Kfg%VzqEyS#6P=vX1~0; zm#(7mmc4SGdTr4y*L#~YfHmDg~2>Z7;^W{BDgK2kN9F3cseuUa8WfaQB&OjS6gZ+CfYY;JeAjC zgu~@qi{@(xC3E>)AdVlup}rBx{!~;sQAm^VG^TW2+e-?cB>4<@{Tp^Xjk&uR@sYD# zph5`%Q-9z$XsDUuJ#St50t=M4vFUu}xZPSqQt2%rLRsE1m$GtzuyeX>O)GbN1BT98-0>UcwmevF}Oa`Hy9qf zAOf9u=&Lop=eY%&?p)an=sJ3?m;SGfg8VIGUB--o*2hMuk?&|ctqD7KTESo(Of7kD z%7Z29Q`9c;6s@=Df7&Ai83#sd#yQ1(dMu+pYm^!X%%w2R3*yfSAJ*mShf~rEKVMXwG6;ed7qIRCunC=0l(gXzy2nc~|Hx1E;O-R#^~;z`*8aftW|ajlyJ*brsf8m` z>99H4t~$A9G?VL;oUuo14>&0j6=Cklcp#wcFY=% z)IQ$>L%d*hwm42ws+K&*hPp z&|Uf#gQCm4RN$0p2qmn92edGJ$#|YpNt$_dn|TXvB*22F67T5rXm>v(N=_FZrUr4$qqDJ z=v*oZ0B7eEqn_{F*U4GUw^pAHpe`1~B8N5FeJGYo4CrIB1&0ddk0i||W9%Kvj1HHI zs-$EjgOJQ7%G2M#dd3zTt!&^*fp7X zA+g6QI*&5(irhiOn-u2BkFz7FJWKxqtuE1N=AbC0+jGftU^&J~Q!dpwVBBj;Hkc$7 zRoz1T+78ZeYcX9TX5o7kjtw(7!0A{WtX2!B!+NT+&A ziOg`!o7Dy3^^b+V>joUm)@i`Q-Q;vDy}4QWj)Ltcdzru7?9A-|areySKg3$G?@}tTv=3lnz*=y`L-VvMw^Fu zpnKDKy@U zl(>aS3TbZ-_n3f&%ZBjkeXXktD0#mlubnxZN9?&HA=~nXp&~qi`b|!21t|g;nUhP< zf~oBEc*Z_6Hs85zc91BlE~LGr1leb{oxd>Wet`I7|7g*0v2e0LqO~*L9)ioCENh&k zM5bOq1fziJ+`&}|j;63HMPveIt1<;Ba=GaQk#=rn8rbC)SlxX|nc?KMKN5C&zJ%A> z+8ge;bE&Pg#MSMf*z-ifH#jr7#R|iWR~Ggwu6VC<5dBBc{$GSv4YFvOX5QY=8~Jtq z<@1`G3_)Jbh4|@$bg}o68wK;Da~N+smJrFCzIYYacx}qrK=0XwRQU_}ItyvZksg&K zHFY_NQB&9P0}D0sQ^D!!Vj7f=NMZHs9C%{Wn#v7L-_0CY+CKebI@^(rDDbdkz^DrH zL_^}w6k1F$1l-Fxh>=*j@-44E3xIfE;8}U_7nfb5G|$+#icH$|PFpA%5QondgIoleAOXz5RN;097T z2H?8b8Ht(#mUA`62B9G#A$Grfg>qw@iwBS@S=x&Wff^o9Y3Q**ibE5M?ggtW$TtE3 z1?_=j>#I)T-||)n*Ni}h76KSpZslTF;WuNGo&(x83+I=1VdwDd>yq_A1Y`b?ZHiu& z0u3ZD&MwHt23}9pG3a&hmw_KuGRH+{+{a;Lw_xJmog)(3BgW&9obEX=l4x%!N@{fp z*v7mA|CApXM2>C5uAG@|e<3WR$&z<&+Nh+vJ+yI_E`oJXDU?Wfs#K6U~*3;cE z;I90%PT(B|MN(XD6Ylly?fEsNVWUrEy*s!MZtlenP^+a$?QbonTNafggBLI)$3VB( z4fz&)7FK9OrBVFzT)sI@^J zmzY%5-TnNQe5uVQSgh`pym%(^u2NAIkTJU(V$?Ui$HD@v*s?$(d-MOas7TsQ&ueJ7 zTX<)Ee9}SDeGJjd;X4CX9H=gyXvq^%KNxU}+EmpN=T}*$8oLUUO3zt|FLc5Fk?oHf z>EDKeN(p~0Esaw;QmYumIw42X!88C?jsDkY%0B^oEhY#%A`YR1KoXPKwxo31G_9|s zyJ{NO*LV8hg-g^SAKrtDKgAqw_77GAs{;WfPWE!jccbr=-dQeWD}^MA&t?giN?Si6 zylrEoe}{J0=g*%jW@|>rW=Xh=xPt1#4l`9E0fLs&xyDMI*yudq+LfHZe^V2(!vFbj zjQk)14$7M2e?e90wc5nyy=0QKA|yd ze}iV?d)~sQb~k?~dMMEw=PJ#?#p(5IDnq zgP@v?q)g1AsP^r?(eX&b8QU_wdTF&Ps+Tyd6NZh}yVb`ugys=Dg6}a_vUTaa4$L2- zp#zI4Sx@f!gy_$G#0`Nplg8VpsTX1dWEgydHk}RJNTjPh3X|df^T_NEo6M|ELJRia z70^kzAnO)z!oHgmK{aZVd+4fnmzyyBR>V&*|Adik6{FVLp$OzH+VCI z&1LS%?%kCvG9LVwx*7#Ow!=X!2|GXI6#2EuYimR`j%fXqdC^#1IyoKmktM*@6k4x4 zt*(Eua}@#0UPzyO@*5c_msGMJ}Q&YIk_mxFoVDU-Wc^fdh+J2{>H#U&g1OeDS_oZ5T z7@p$A)8S*^KWaB|v^Ui4TBs9KGGV2dN~cPcs3S7!*T%-~|MGfh5VMLn2ZbF46932C zC$qGj9SvjF<$o*UBvIYHjV4Ltc=?7rKD}d~+Z%~_i%Z)JC|aJjtX*DNLzZ~=+v6n^(U`3Hg4n(^T}Qya zHLxO;&wRrk2yj*OKYM4N7Tq!&o@a*3&8+rstS#(T&S2V8@5uX35(YWj^gT71oFq$8 zmEp^EHGM8vVT{aX-btvsN}IFNcv%XgqE3>;z|k@~4q*l8DKWUPvOEpQ(^bri%Dlo(&xy^Uawq1-=1 zILpc>{oPTS=TaM6&~~n?o*O=oE6%?={FdoXlXQw8_Lv|kRjdIYc{66}u$@71#@St( z`Gjd&vUSNR0RRqj8RzWhXI7u4-Vw(-pWej}>6W5v9dHUM10@op~E6>mN;#P{}UI>-eTTb4dpF_Av>>D>( zwz92%_IqB8@?#KY@79kWCV&%!Gye7YDcVrd%XEI*bLh5!!@<_JDL9^fby&K{K(QnJakOyDNFtVlKC@mKrWFVC(nDJLlxADVu_d))(s3YZhZoGB6dpJf6 zYUZdkBot7~TGMll?XrYEG|d;!^Id6ca-rfKV-M?i?hvZOI2|m_tk&G$;n$35;oTv; z)`{AYmEzVhLoxEzZ-0|Z;oU0MWkwrdr4sGkuas(eca++(V9SMN5w7U=9duLgulqbJ zXsgp1{qAal>v(RpLhRth6Qb5BgdAvbTzkrv9U-RrMB>n8 zCa(Xa|1C;fw|ylx+1~$qbKE@?RCjdVQv1H%!A7)G6DS!BgCPK8+T>gC^Ij-l_2G%A z-ek;gXWb;$uK^{L#<6%FvQgl(TyO9M9NG&84h}#o96e47FGE8USZUzrvsp-0H&7fH zd@*-WY3^}8bZ+{)+x(s=@(!uHv?Z>7cpdH`X*s6kuc5-y@9COmhKY|_d)=7P@3*nM z?>>lt2W?jMc z0g#<&1zPxufWwH@31%fF_we13T|(|S!|~twD^3~1@<)8X4RuGMJR-;yWB^B}=*VVq`zOd&D-25eb$B=f~+`K)tsb%(bd0}yS zas4QF#yRGJ0A(Y~(b-X#Y3#t5tyPyx)YvOmp}o{Hi+a<1^%BaKmnk;?<9yCkDd5;R zP?~_={%1w*xi%bi!l7MndxH-(_f=e+cKH3+zDRWDEG$Mxjn9t9irkeNEgXpeOT@Ze zG0$|)S=!3H)25%tt9&GPMCz32>z_MFWd{3?zqfk1ue610*kq&EDuChoInPrSWQWWJ zczB2&{NBO9TpB-hu4r9i#5HV0X8Ud0@!SDK+_~4@4!7IN%macVU-b~p@0(|h%}86; zP0%Y1EmlGkm!CZ~i6qGvzM1cqc=tY=>#WFU39_mj98iJ?eiIARywF7jOvydAX)VMB z-Y=-C9@o5se=%Q`>Cv;xcAVb=J92Y*RQnxHDSQAyN1^0lNW;`m5Jq_RFIw>($Ejh* z=`E9;MEB}Z@~PT5B47-deHP|+nIRokLb+8n@xu{kpfhvLzin$v)y6iCZo9m|*#1Br zGfSL#Te|YcXA1=yW?8Eb$=rZHiK7O60mT3mMwKh`%Wezi`Gs=3`r=~^jKkTdlCfO1 zg~qJu)zZ>1P!)cJ&eR&d#-l<2R8e*rXqE=?GlK0>Ov)z&+!F^tWewyQhRB-X`X=Mk zD$2xX?D_YEE#@uYWk`;!{623nbV-1+W!Mcu6IZV_`ny5NW$1Ys zVMPWA6IA`T%x^YeuDYv%dg@jnLY&?4c>YB(!8o>%-qu2^J|x<%WwdIhZj>+D@2F6k z7zKN@*9Uc?c>DxbWnL~S4=EsGV}c^?+ti_`iTntcEDOpN3m}k zChxMz5X)fqL*q-w*$0xSsjQU@-G9^Zg~XPZ8G z*}OV%9revt&>81cMSW!o#!d1jY4xMb!`Hq?x9^0gH8FIqo~DM?uG;0QCR9_ZbhIPe zA2a5)M3{d|Se04p0n5o;@(5@e4ngQOOnvc2IQ)*~@9rgOKwH+KEbL`|m`6($PEOf1 zDXOI)GLWRt0@~jp8wC1yPd+Oakz8VMOE<``@zNw z(}Tk(GC;QjnH)*Ep5yz}U`8}(!@Vzx_=~E?A}rLMa?6(>yV*PIYcg}~Ejvu!aTlv* z*W<*$)m_ElAV=l74K);oVZYVoQkQoFZPn~x2GS)QP`$x6%z6RWf$WZ6^`l6!6otYy zGAMd=G}Kl0xkP?}kGy;S)?)q2_~%ee91TeWL)#%l#W?ZD#k(|F28v}c0P6gCP%W%z zn8Tas()9!GFiBEdmyiJPLiq3kbeg8~x&u*#3N-Q7)p7&TvSTuZ>?bjuagHW8;-6* zCF6Bj9>ib#+=tKsh)n2A@QAixBHzcQ@f&l8hD#^@Xr7CWCes%JEfX#T2MYo|3R-wI z_l%mgQwF|`Hs#s~f`lcvGChiV^+A_RO@3PXIYY=b(cA&B4!1v+o9QTejWN9<)v|T) zxl2Ip+8tWaI@GKHXx{GTd#z*Rp&niA9v=HQUc$W>5Q2*DTw(tvQNbcg+#hn`_inZc zP%F(>j{RLkJ7p%FZ)vLoOa*j+rLQZA^-U?uVi^B|@DapSAIfJI$ciPux2R6;dCWJ| z1AL$q&t;mPd-A=*mcUopCQO*-KVdDm9cq6Z6Zb?2EO_)<`|ZrfHC{u$W2s;PIwdeW z2&O;F0m@Ck{qIuGNQ__VF%ss-QJuJir@(O3ya2ge!)=fb={o#)S{iCbds1BDO z`J$jo*0TCXI-%o!Yt<+F+l2L{y?Z;nh5AcBnUg9#13^h5^?t|2LzDq+nEBablKeicy6isxpXPnBEKQT@Q-Lp+2wS5b?SlJ!s>Ls8Tr8eTN5E4 zg7_Ks#_?4AfBTErg7jSXW`Fl!DmL`+X<3;3$9#c<%95WSDusWSg8$lxLAPYGBf+j| z14^S_E_t<;-ySAX9k^P|B3x8prgzb`w2^JzJvR%4j;wVK7(`h@PI#7e6i;>U4C6-H zeG9-|x6P+P^ou{Y zWbvdJs=iK)%>| zG9B6vmZ_{IfIiA{DdyFz9BjiL?NO$q*bgwNxUwmlm0=qSqt8&PG}`ynCXi_FDjOTl znF0Huc}XRi9v7#A8WLQW9Zx)vIbuRI6OG=v*{cbNM8Q2i&wqp1Pd;k6rTBMWx-azi z9i`@cWh?U+Z9irWq?gKF1=xxaN~UHqXZw;AlhBp7QRrf6X6N(p9(!@hyQ=GQ%7q)2 zH~bu&2kRyRZAWw1W+Jw7jI<+$BKx2WB~e>>#aOl4d=AoFkeDgbXdNg|D<*#^)U zeG1AOTg9S#XMWCnp>G_kdolbwsPX~9oUZw?P~;NCii}LoDy?6vWxefr>^+4w_a=PPO#sptd?~d?tP>Y#*{6hZF ze~Tupu(-@bOWdZtf>a;SS#Jm>bmUJfXtG9uzJs^}6=zByh z$0+3nA3hztedX47VZqMj<)iF5Jle_yE8+TYBM6pC?d=eG>f>fT+$u1p%34i~OfS0P zqHh@AI;USLVhaNs1yc7N>c&}B=B?g3Nx7Pvo+|AL+nKY&_s^iW`0hmUxyt^PoSmKj zvOA7Rhh{~$Y_2UWy@`d{6mykw0>oo8!5V9icn~9LaibTGI&Y?nX!NaVtjX;wS0~b| z$FvAX1A|EORn@~GW;&c(@~0<1Rzd2ri)wT$WquxoxO|#GZkh8+1#r3LTL0I{cbvk| zky}i-fz+~dzfS*gZUt^TKT(>DjPzJvoR2*Z4vqF!qDu7|t7b-m`@Ntk9Xj*2qHp{c zFk+5km(Kn78B*7NOA}1Vvi~62oSLNqOt-gqZu|=BVdo|gX)G_dH*nUpW?z5-2comZ zwil~8wFlSO7xtwS7InUdRUNXL+D6VgY(G=^zQLNaAC#dWb6cz^GN%w1(Lkx{*=30- zutUc-sg2|RdRnA7*mZBrEG-r2i6brjFHjRGr|XCp$_}Lm1ebvvY|O)bAE5qIFJe34 ze)!3XWSy6mMkCi-+q3HH7Ak4)*}*z4jDg4M zdvPm!Qs;BBcDt9{eHs3Pkk@rB+1DnU8$-y+AI&By~Td zpCW(5*cNk@rKR*FI_xqdLee_?bvI2YA#!ns)VK1w|Jv@G5_V__sZ9eB?hg_ss(9bu z%Vwp#UxK#>`v`m@Pxyw5-C+~;LT~{{5~Nqc~zwr4K>ARLf>OVp$y+IxLWSP;~kr7Bmu;typhQb`F;@SeNwQS$~4~A*t_tdmGK|mzK05 z^0%I<@)DKX#IxQ5hG*Srr#wHR%sfWy!kw>Lpr(T9Y3#Y*E=h~=E7hWXL<<8Z> z8p%}C7EM^jE6o#RXF$qUL{3SkU zeKYIyH;B2KW-bo5qp>omiF3mns?5!_N#u~;$Dj?*bfX??j)yo#djuMXvHL#s!?22d z?M&>cz`%~S+j2a_e9_KT+UM!Ig0_>z|u* ztiN_H$hYn7TiLo*JI+6BJ^gFc`59}C-NA)z??@JI`Z5?-weveFKk6&-O$W*ZFl+wl zz)+pRCrZ)sbGcx{=K`^MIL5nAkc$8cLr?co8!LQ6LzvF;6B)Zwx5V5=d;K|KBV>7@Y*dv4Q!czjJ`~FI%qv#@+{b{wMDfTre0@UqDATstQ~##ka2T ze8H&fG`6bFBlmV7!YSY&HulAn>BODRj+*bg7S*eNWK;9RE(vK^!!BJo|ZFH-E)PUwn2M&;=^wC>TbT` z=9^GL=vkOsaV|)72=>d)6H|9DpG6|9@cQg3yvjFZ)Me3#3eAgATeIu%Cl42?F$g<~ z6Hxb|>e$qtUkX>&-xLhAkEaK0TRG@LTPpRVzMaYj7nOyD*I>UFvD9N3h^T@_!yh|$ z&ZjQUBaHZk(!quB1>@)z@KOL=PuKLuXW+_*e}G8KSrYUkr};c&oMZ*P{}q^Qsi8vg zVksi9a{sTkbG64`%jox4$zP1rxmbjK?5ee<7w%}Vlhv>&dBE3xBTIU z_3pCHi-|pPvY8Y775jv+Fw^Hz(Le4*f_%EsEwkaLAf z-mw%7p~NToZ9~k$5oX=Kjx;cuZ{=Dm21a$FVP1{bIdR^FDn}fM9!UFD_}ZPxr=1NyY zmgxxC=Khd+COA{92bH_DG#|%bZr6HM6ENNx{Qc)=i*41`Z_B)>uPTeBR zYAY-(Cha;gqk<;V{Y;6dMR>%}%zRo}KwV{b=!%N8?uwMje3VH^CwTrVv2|^@NOwy2 zc%ryv%0%rdK8;k<6tHD{nuS(!jrz6~QkFP7e5DgW2 zdXtaYfa)1zdyrowdm!(5I0Ghq$IAP+M3KSx<#?s$NoSi0ltqk8=2bjx-P@5;;1c+& zhO9-X3qTr2aFA|y#=6F@Y@p(>Dy(AXz6u|1C-XpGB%}W2{8Kw8P%kE})`Xe{@CV0` zG)guI<2Le*CpN0C-VWk-_Ta12opUqT#o@(p3y#bNyaEj5QQxEW&rejFEinnsE7j{S zTCzrsNps{y68-dr^ruh1tQE@B7Ah7_5v1xUc4yM>c8_nWP{eqeRz}aFt4R)v<^5xL zNI0r1m80y}+oZ7UcS4Ja`u3mPHpdh$Gw`{a!=f0h-s+ZLE&x0IY=mqg&lxPGT0L3( ztf|HD=MVbN;0gQ;w6jM*^lQcl_b4s0?s%rvMG)|G)=|?69vE?`H(-gQ7yfktL)`?I zOVXXy+RlMkHBZ!5_+C~>-e=r)PEb|NJ=uOQQ29pG)$+ied_HB7?Xf_WZkKFqzBKHb zeVKP#1^(IM2+fV(#>ZP~Hz?F;Z8t2h>3(5?_5(4iSjZ`PU(KvR^DjQOwB*wsZ2qL- zIZS&CRypc_hD-t?`nt-Xu+GV}*&s}}B_wZfc_tCuK!UdPIGMo; zhT!WuZ)E1ih|puM9X{zcvew(4TZ0{pxUVt4S6v8Dl7i>yscBk`w0uURH41^>_k#9j zu!!r8XV;ik7LG~3i8D|nE!pp>~j~^vz9D%WcjiF2W*UA zY&2a9V_%j#X*;q@py08@t7C+bh}!O@^Bu66d75wC(Nl(NCbfi^{2@T0CUKgc#N-Q% zY-N=%VWjjozaq>P*QTPhZU3KYp7?KDVs>j_dJdg^sJpikkPpa|k=2-}3f7A+2ju`ib6etm1RY!VDP!K9?3igF7Zv}vv(1uFSLV&wxkXwGutT5L zA$I6(Gb>mS^+%Abins|Dl#$cV+l!5s0v&*0^q5udEc+~vlcc*njE|(yMbxz&-L-Z4 ztRj4~$JliV9P83dd$(ClqEd5rObu436llR_TX2C*kUYS2^%|E$qqwgi3nLFSlk-Swl3!=mnsrUDp;V`FrusTy35UaC-uYpKjwAZE>f@7KW! z_O(9g2^w7d+q?$eEEvbbpjrsz$**s78rL!5ri)FBxm{Yf$G#6MKzY;5wCcDDlX^Tg zSX5{3^E__fKhD}3tfn?DuHOp|g!+s0SVR7ph}?eWPbee66&FgZJ&mWn*<^#}_rNT5 z#z{{u^=!hAAp7>WN87~Tz1`h?$lvnwms!9%HlnNhA+l>IYxIZtl1#_?%&cE9(HH{y z4C-pi&aaQwp)CCE@7f#}Blr29yqr#8rI!49&)awvpQ*{x#mvQ4{A-_u-K9ZmF`l~^ zdxmjJ7h6aJ2xIzKEEhFW!3_V~8HN0%Cwo8N-OF|O*4;OJ<`@zv<*IU>p~(&?ncdOO z3kf|oZ-HWgJ;5e*m$~CRyz_k&%OH$3C9-TfB5JmrzSnum#1}fCSxMX<1y{nlpgL*h z1)r@1-xhi z+y-WNQPtuv4i{@{S>eMt4;H%UEa&F({C^5u(^H(jHy_8Bf`uD>W|qmXxxd1H#hy$l zWiCq?lkHlx``q)>ZS(svP7j^wN(peten0B&=#9g-g8z5jCNwd&UlH0ZcFDm-M5U*2 zYxmwP)%sPy(Pc-xGxE2u#d=3Dm?Npc6I8Is#gb3Y%)H}9_AaQHwV$f}RFLJ8LOSU1 zj8;<){4wpSgC%+J7u)V@TYbcE$I8>tL@hqVDaso3mGD z8;v$RycT3nq=OyK?~PvBr`Z1luO+pO$TJCIRsqxxm`{J5janMm7%kL&Nb56*+73k} z;d0>0@;cmM*5i7Mc4?l-F!1SB9RXudRrBI!mpqtLFU- z&}MVb_}9&NeJozvZ&~^EPo7%kgx2tRT~9-ORj4JIV9bHm%n|wA(sk5JUV#~EtJ1#qihy2o`HvdC{sK$ zCAbcg|5Ssxo=|^tuS}>xp14A zA){S~x2-d>Wu}i0ZXki=wbV18sVpkV&YrjXohnwW@}(|JajtAp9+ZGHV$rIMV*)#d z1O6%0%z(@dVgd~j70j9|Lcc{5kn7d(b3HIvvg_kJ$}L@gEfF#pr{@KS#LRXjF^oJ? zFfW;>#sfMckNx89aC0omBe${%S-b+HGy;bmaokky#`1=ITy00eYS$E>?=aURy`22Z z=D6H%^3a+sLXp9Ly=;8dGbnsQW^mU>;Qrnnych)GrgS?|v|Q6VJ`EN18}oKQb1xT3 zm~D;%_soFE-6z;r-3fB3HGJ6jzyjk1G>Kj`_qD+YRnTuvOxQ%a{PGnAsY-cse!$e~ z$sP5k^AQD?C2moP%u#(CtXF;UnEYj!j&Q@+?D=SfT4&_@nRnHqPJ#_?2OPG!=qA~` zk7=x||KDDzh~(pHtB^`+p#}K)2MJ3F;6Q)06$>0WFbw0>A zEf`5ifsRE*H99VI^Q4I9i*AnbnZr8Gddw&7DB3~?T@iu|j{dQkN177KQ)S*693FXW zL`BkmPQ(`L_Fns=^vEX6WZrDVj1iv<%I?XPg=04UhoO10h@Xgi_rXKzURGVgz)i_N zeOSKz8oqCJm)x3Ed2C+UEfeM|)BGYjZ-J`eU&zN9R|Fq%Z=5PVfsR(!R z&BGXaOX4&_5c33@)p+8Fl@{}#?v%Ok!Qn?fUWKLU1nE)|C1l>h{XH=(Y9`S{`&$t? z{&NI3CVLs&y8SXTF9PjD+g8W7y#!*m?L}AREUAKHgw8Bez$>?X<9cWJ@#8S(!aRH4 z1iDsNbNk-jW>aiVh;CMby6{Ly+Sm#2;RzT|8xx~6r@Zk5B6heBFSd$s6GJOU zuk;Mo>8tUM?_!x0EL_s(Z zIhN!B{Q={+{J(VoL;X|&UH8_isCL`r@Y_|H^%+uo-px|DZcntB7Y8eUbal*ZOLG=V zJUVrg-FLKeBvcmW-8Y2iWkv>Oq~AqImG@ZP)(wp^!F|Xng&>^4i8+qFYP3*;FAHcuTP%9E%XjO|D(b-!Gca#c6_qGHE z2E9d=7Kj%ZYN0F?24sV8#b}=B*#qCsg@%IoLAz~R!XahLeNf$zJnz=t=4tbe5i6R)SJG#?t4?~6?p79XZlw>}U#3j&KfMj4U3 zzPg+Ab`(=T7AwLR$~K0!w*CnnkBT#feP5tcf%Ze{S$fQ8slorguh|3wZ2vSM{@|Nv zAyf!{>D*niW3f!dnbC4qoC0jXAZIob>+EgFeA*4Hcz%on zYaPzvx4(1+l2ur{XPh{^kX{>L6Kq2z?Kn6LzN|stS8%ZL9bY@yG-p)W5+8c#7VZ{V z3WQyg%_J{pUXo#5bJREc7cgVNFdlJdZ?Sr>F@)whle)eqQlD37#FCKPg-$y@} zhrbW~`SS|PB0N5hg~shf-zqmHT{hh`>V<}(Z^C3DF#MKsNAyc(Gk!?H0#=_Z`WKse z`x^Dq9`lY5KPvxn*>7D5r|67!dg`_4P4Zb<?VB3b6-!CjoH1}K_PSrSaaVrt zzp^d7%B*2-*Q#)=_U%8ic$L^9EPNDRd13IO*$+ZHt^J}Ox&2siFJd>j%V$c{->3h0 zownO@+g8$6xrcMzSr6Vf{0KkevVE$~z-DF1F~j&~=0T87P8zxSwu)K+FC!|Wd3kC+ zJh8FpT>ZPNT|;eqFh*-H_|l~`i+ui~xBcOlv2FZV{iRw!r9P`WW%SD__JER=g@V%J zRDjhd-s-tOy}hxshbvKM#EjPPilQ6&I`x`L z_f_G_@Vvsmn#a`0_`ilK;{<08rkPWqIk8ImnX6XHBXC>ICgSHRD$U%lyEZG{Y#R9Y zMNK1v!N6IZmz7}GSwykR24x<+>`wsdk5?(oN(nhweG3cn9UJ7*FC7##F*Ce*ymf;DEn0s5Aj^4fUA&I#^zEoKKD7e;PN~{e3Ob}wsgqucAGiweUN=L(t zu;FVSLU6RGjc$25B{JcIh3O=MqFuOaAQ@az)ow;mn|ksa`7I zk1HZ8*)Kz$mUTTm0&G-%eJ!Tvl!Xa=*~$weJgTRAxgYDQoPXEZ*|Z zU?UuH>>R;eP3;3d#~QuZ>cLk-zOs9*TQ1FopUFpc(CL6Eyno`e?YC4DloqGQ*3SIw zi_i7m_R*K|$LFjW-N+%D#m?c~#|`Fwwf&1M#W0B}lg$vI$h1ZMVXH}Lj1M@cqMt|4 z2=aXU|ERk1c&PLL|3R^BB(hvIN3L;?a!g_jCgT`pey{ha_WOJPp~tRcKJU-_{eB(K=kxV?D-okJkWX_P z&8wNInv&*$XE!&r-cTL6;B_t}qCc~>9_!X3y3g4uI%+;$K!M!!OM&2KWCE3FOvg)U z>77i37Z4OMVjQRUec1dIWii}TkFC5upW+BZO$}j$sh%ED6^pyIvN#K~m9T0=IUmTo z$obqHXtChu<_W(5oF$%J^Z$&`(n+yY!X5=F%p_cIlp{IN>!dpn_v!Rq^z?ZKCo+fK zDp*~w7^Z_WFp@DyFpS2}iF&`oK$aAwyQsbqHMsOClrix$Z5+|JtWIa@qqb(EK>NC%8S9RbgZnpjt%hnhJK5kIqks zvYZcf-t@n_EGZ!6`eu@M2A_5|Ir3gEf3Z8XJMEG`3GYOU6W|w|Jc=g~8mlgZq73*_ zRSQ%g3&Du20NM4J>1Kx|Jz!Ri zGDmbOC5sYXm5%%Qr&n!PTpqA=P5fq!;wFY5fbP`I^Y3AI3z_p$cFspW^KbY2ugYOu ziZmiwzQcji8FRQ)ddzqQ;f-h4d4EoKUvLtenms$&vc^IZ)j6J-+~QB&N*g^lhkq+=L9?oQ z2FsGOD;mSrzhzdg!{d~F?ff+gk$C&0E3w{*tk=BoQG94=0#}tFINvNXFQ0rc6liJ) zyRRy)^Tm6rr`XS#ZoCw&k$UN6HsWN$#h0^~yXdjH;Pg|V&c-TTh2gX11#EvCPR;Km z;^IdZmiUlQ(~A=R^5xj1ux3NdSMMojxMD~{ZD>>~;Y!7+p5rEOK0Zv2%|NqplcI=F zPsGkh7H>q&NNm`OSeB=e*~@$2L}hoRe)4aX^pDQc?d=&~cI?iz5Kdz6Ff<9Zijfw>ukoSDn_o(K5v zUOS_W_2#)CVD{Dp^O&6xha|tZ9bNV z2rAd%(L7f357-U;OuxP8S#Ua2X}Mfm)T^8X%506Rli;-bd0FXIoVX|51z&8dI!*Q~ zo<8AfUB=SPhzrP5FK*m$d-gwlMwXj>`FVFb$KLv6~Xd{8{b}jP+ru+ch@es6)`+0 zWSH4)FDbPc*M};af7x(brm^lPRY`}cB*oW`dmM;uTMl~mEDK6XdR?%y;<6k+^Z6L7 z)RSX|Q?){=n{0^J!MZ6W$7S3rl&u@`>fkquMU`vu*2;A&^DiGKa^LTMmy*q+?edbG zOFd?8Rma?$C!fWdQ$0C+EqjPcbgXwQaYLA` zrILHtYoy)?7h}e2ZSsfiF79p)lR!K6(xLsmDr!dVTZHZirv4-`e~?aoV?B;_PyVT? zY(DG6Lw4*W>R;pjO($~sTdtB~1PMb+EOEZwd3Q!t<)fQs zB8Wt~c1yfm8NS-&)pJe!*`(R>`A&-=0!-7w)vwggURN2-(||qxlU!N;i-VJ&7O0Je zmO9mOo0NA-tfimfrmva}X3z#$T#TN$*#OsoIv+d}jX55dU zyBqbVPC%~P6@{H2zU-O>zgz9n^5w}x@6FY|O;zi|DiPQDF;nnFXR9P1+!3=XGa%Q+ zaCBT+c)&R?tfxvJ{+Hn>o?M8I3vOfQo5~v@ao*w7e^%l_3 z`4vz7W$V0bPr`8@U5l{jb&KtGYG6foT;>uXB7#uc%InOFl-xxWZcPl<-R3=c8%1O2 zf@1(@UeB|70*&b+N|LDu@{oL+-UgQwCyAH$2*(S)dSaN}P!08>sPxC0k?=2aZ;Dck z?b&QDnLj+W)^NsWX8-1f*rB#dKRFyvKnY=F*{tWvdn)P1{;Tvw(7$@|Cy8Q8-5-Dt zoyt|ae?a5b=`B9_!;*jap>^X^$)mF}RTZ)YcH)qw2J8nQGA{PFacL z-lO?slv7@)E_&3=Fn#x_NF8fMu~pop8ElVl!@(7TI-TH5IBV60<=y0Q$&2Y-*l~>a zcj%sBt>cP%Y^qp9nhxJi3w#C`#ydzv>S7Oka&v57$x(s8wn{^o@SHnbUthofFIt1$ zjj2rQ`yU?p8bBb#;USk6E(!>%Iy*XT5+5N|WJf8v=uSk)%oTtyErZKDWudA`SgkDD z+{`#_)flZg|7t=2+G={u(oH?Ijc<|@S=*(g0}`3A|Za>@}CD?njF!sY`R}cmoBeYz4y3z zUScATzRr>o)zQT-!`>=8c%55(bZ_^lpNOqUzm%t*lGyp`y)B&wR1{Wla97sx>*P5_ zb~^^F4YPY9idu5Kx!bK^CzQV7Dq)ZI8`61W4-TlWA6+G|pRwFK{(rXKr#H?!^S(q% zhb$x|db(Z!gz9}!*{KaV!Pk#^@PL;2(d}yzG;gkhkZvrGqut6B{BUq(c({?=^yJ8& zQ_q3uoM6@)+=LyPlFO{8+DrZ1xE?C5<-&XUbAnt!22exn+gmqb?p4Op(fPC~SCc~9 zEOcvb56E3H%=3rz)|Ml0DfENl%98PXZ9%Oeq&xKV(#!9(=(G2qo6t&S@73NX6a5cKc^A791K#tnqDzJAEt|{%W}F_&k!)I(>|&-!o_wF{Qx^hK8(9A`CaOoW`LF19&zhvaD^NYBf=@n~ zq*WFy$~3~NK0q($_kSLgnS|S3ayPalfj`%@RlwHUnY@++%{@&h zjx%|Kj;rs9!*02zW>et92Na?rCfQA38AHBCLabFDGHxVw?{#9zBL7Q~=&AYFmFkSw z~^zvPcjm8lK1 zsdpSax5{qVy6j!m1r$qNs-kFwmSU%CkJHI5CAP!Wfj=V)-+p>m;vLikIXlFyeSPj4 z*vcUGK}s++H8{(M@=P##csLaAU0J?>KIDWSNT+W7!}+wKa`uMCW1Awzz=>acbL9$H z!h7w>DFpH7`;q22$WADko{M2URd~^@!*hI0jAoKw(Mn?Lqc3e z`JwLboIjn6P>{}SWJl)w=gsV*sVO5D+Eqi%ES>Ax?fo2ImG+kao)g?6DA!!EN^AsS z_V^p?S8V$zYBBVCVU$X=AamN2r*3{)xT&q7Zzs)S+V9L!h2RqVyY%gAfLBP1QgHP> ze^3-xI$L6x$e?5?BXaA5+TLtmm0&`PeDt}(@R3cQzO4KNpbA_8hm7O?`i^oV_T+P4 zl@_Q@yRWm&O8HVU%_$F1evHm9jc81rqst3@+FAlZR^;~Izb~1?E@U5-ZN9m*!*TpR=&W7!)ssXO3P_|EE9`>`P%p$U~9jg5{KPcw@!48Qm zY-fZ}N$WuCtTu{YgK{%9vYSbFpY9rsNBe1B?(C=U6KZnq-zK}><#X;NUMi&+2JQf) zzxp?r@3QK$cbbhZmKNw5ikGTu3P2*af}n?0@A{U|y&Lj0oLjv!L9UF^=6!X$8hh+t zu##+@!?T`T8QzQwgPTjhi7x>WG&L_8;F`!s-f-J+d>13aQmgp;8e+}I0V``K#$?FT zsqhK8mxag|*xlrvwD_fK)eZ`d`DskjVQGUAFyF!uV4IwJ`m&iNf^eOT4bn>Jz z6o|pBj61|%U_f1?L;ia)%=kWpTJD`0$G_OnhmT7%5JHG8d^f}CMc3;Au%L$7v-c=s zuB#N*c(h#)&>;rlTvO*TW-hmXs0KIH7YAz(LFKWQ_V(W{2Q>LkL?v6Tmu_AF^XWK6 zPI~!udtd9r4!Fappxx7PEwldmk9awdg$mLX(mmxmkRW72)CnnD73ga|zIqAUbkG!v zDynMDV-|Pw8yoPc%ByECHwd%vjNv>hIUkrjAy`k~3pmqzl{d0Gpl*||fzmRz#S5vg z`Il7++52fpC$5uwY`TNbn`>9xpxFs#pwW`(#0eUCIW6VKz@?o8pQS zSRwM%mdXXHVTBA5J+1{S2-KQ#6KJ*o)5ks}I#ert|mk#W(gAP*NFb zA$|&86h>y8(m&+F+-L+xyyRYj%N5%_Rmp5o(Kk6i-0wz4Y3Z8fkb%~bP%^qL>onnq zZ->cxuN51y>Mos+9to59XHwht?w$BWK6~jqGlja7%#up+&nf9G$LGAy4-m9(n{H>+ zI(qc+Hk!rpsoS1=&(ZNkJ|x;TUhr$gH~y4GiZ9sJEp9k~w}|CN`XX^e%BJ(t2}>8y zd_oVe+tH+=gbgmiuVtl2923XRzS}tz=#r=Inz=%*?9Jg~gc||M1h^5`;?m~YUVGY} z>G+FN9=1DGAquAol8%bIhi9X?qfvbT4%Z@`IM|s4XTbiPlSi@YQ%!Lc$5+?K1t%;% zlV+~ZsbEeV%~|^Bmq$eB^B%&9v6{YyqoFLj-D2@bqhH;~vpdhZB7l3WgUuZ9K8J0n zAt&H=AlC{ATk79T$mJPYYP7My zC(63bas7f@)RdfGe{)GAB_CB+o&~XI>S^2LznaDKbN0Jh>ueVEO*E>EQ=&qzHN|ZX z`+MENaRf_#>XR!UA02KTKN<=DNJ~RdM^Dj5;iL~sRgQItM&t2%dU)=Ynq8*n&mLOW z<+m==hZNQMUfJP+GYU=9^>*am2IsX>_Y)FO>we-xtfM!uV9<6$u_7iiDp6oGyX;84 z18sel9|=Hv+BDgEvO@_#C^d_zQ@xq-J*5VxrWaafapPKAA2|h4H;&Hl>Yojeb}kob zU%w5H=ikEYlbL{VS^S2;GFBTZlC;VduGKgbh}P#rqOefR;S^$DuzeH*%tg0CjD+rA<&*;NOeQ9Q>$z05w{pQT z+P+Htu1R_Ku8}pFX{0Ej$vD*-hisIS{dMxX?g#+_E?bpWoVb4PZ5&o@o~w2@He#mE z^R&f;_!SFgFh^KGf~D;*uHRp*pKK+wzT0{;Z}#L8Z6*bh{2B(DR)MS6UxtNq5`|P! z+fT}v-PdM?w$u{nxiCGdw|z&IFv~kXtQ7~T0TW}<^qY=XJM2BC^A1H~0BL!($k{U# ziSqpV*s9dY0v13u%fa~6#fJ^m2@4Tst&OL>Pnl3H)w+!8LS9zI+`IbT`lKf}ds5=j zH-)kPM9u{dyV=0oYFKfj=E?jma`d1LehOTl?!XV{gK>o>mK4)#0LzGrx?GKp za)BAjL8X!_c2Rz^+W$JP&hXmGrE(+=>QD?3n*F#>s@T!gFu0G>GLD-+V6*! zw8hGmjgQ@ej2|u@6kCt^f8&)-ew`A;WgvNaM>kpF3))|$o5-{Ph>CU0l`s*Hq2%SU zx#$N@Hh2p)yllC*IbE(X6>6pPruGBB!~-gEumc4?gB8P*wj#pQZ4H}IAT}bpGI^au zHG}?g2PZt`#+H$0Kdmr5Ut-SoEMNMFQMC;s@*1YV0{eWg|3xx|5Lr63rBbiXM72ge z4u%?kwnODv$#l-txCo_en%cAXn5a1)Fg$qdJ|DtYH}?o*xfJ|x9%0jOoeZ+p!M^PZ z{+&>CU<&8G?9wGQ98L|8;N;+WYqhO|C6r!|Q!Hhhq+K9v{kADxDwS~VTYV}!5Al@f zWY_%4X6D6J-?k+DlidOUr(GgIGE_iBq;3wdeq+a*^m7k1RpG-T0l>ZIYuDo7oVBdT zhhebXz^ozjUaZ!6CvNgXSBsG$j4pbob+Ljb*16evAyE~N%2=v(juD?n+KL=KooU;z zjd~_~cP607F0dF}(}sgy7YmkDDz@epB}I<+sXvi(lal{<*3ft6nvncjoAqb=g30Xa%;>z8vih(3Qh45>h-?xmk z+Aq_rk?;1)_c``+WdT8yldb}mfTDA>rWl?2j#WabWcoMZhJnTD`p9uiBNZB<1K0L} znUekQk!Rs!XDm9MCbT#cth#wSRj|`UQA>yyH={sWnfM4o?p0F^-z}AnF0QVwKr1d7 z7B3g5o>W4sXtZ8CvP6xjx`%naNg4kvDIzx#iiRP*(ArQpgWgs;>r1o#JfC1Ct{0$9 zk$Mtu3#mXVwwLTH{&?7Gk@LyreY8J&1(SEegL2lRu#OPd_nd;|WrK*|YWOXHdC}Uk zA5Q~_THngj!KqC3!QFIWf|&348iFz0TF{&ixD8ioLDQ7Cr(o_KyA(~BWwt+-qVS=KRfmFp2B*z4@G zf$Ul~x_dJ+qK!9|_plIo7XGrJpy29D@Ao?oC20O^yaVqYZIO-1W2QusW{XMpSjEO` z-u^y9rAumR4C9HLJlZM z2?sCB+RUwtb6@&e8U&l+R)l279C)zlD~d)6ZPwbwB()NK3^q(zpk40@o`WSELnqSqDT8{<)ijylqC4c(M8SvZ{Xsj@1yYneE?JYU3jF72P2Y+<%u-k=3gQ=mM16Wxt0RQQQ)#_rAG-XRUy_=3G81-n zu#f^1{=Dhc>VVKaSy)Em&4-q#23(!^XlZ5 zW39;XJ+k25dps>P;3ONciC3NbpeO=Mxt^F+sd)i{-NGv`Rr@yspt72vH#+=w{D0#c z-0GK0!J1F__2k$`dPsAuK#2XM{Eq#5m~+wt>v1@2nT>REs9~6z_fZ@$4z!Lfv&k=6MjDCA_G`66L~vV5eZ5hL#>Xh+9v>0BmV1?%p7AQE@bkk`v@^{t zkLsW>?%S|qBKioT^LvHh<^^tuvd#MfFEYDWnlSY!Gb1BIR~Il<(|Z)T+jg@Z+8d;q zSF|i&9Ftygs#D(?#}Pgx-)VdFz#mR()6f64LFEhS;+%il;gL$<=K$o z{|<->MGQx;8LZ04rz?;eS8o6xQI{KAf!&iOW~zHCd3MjB*uX~7pzr`GG!7#@WLsb` zQLhH3k2dIhymSYA){v97aBpx*DcAZt8`PVOs;2%}gktP{%Z}!fKTfnk2KN1J&2DHd z)%-_R=~M8b@&Xlv4_+Ne{nj?O7pNkJ7>sh*VOxaLi6%GgK{=?t0Q2bk;kkzM;EVkw#-~0pwqN zuQk}txl0`VibJ{~bm+MhZwCg((zYvNo8i6_DP%?YhE2QM4m*HW71X`2ye$}_Qe6mvg zuxx^*3W#;z17t4QJit-&o*aFO!k>6b-S>TZKyQ9Pap;$0+3O%G8|y*%1}tJbvyKwW zQ{;5vTV>2;&(}i*i2H0L(#c72?XeFEu3?ckEUH(xt70i>#+DSv(OSGcBnmR87tpsp zl`UH6p41d*svVc%t~o3|N)$R2T!rwJ9$Pk+;qtliLZ3_xCDv43ORRX_bbV5mKLLXV zrJ1_2Fe(D`b?j9M&Sc=t&+}2{5B?A64Ys;$*JjH5&0N9hm(1_9wg5-Ul9Wg{>K>6P z4j3o~IBlO{-=o;^;Jp+8{=>ynZ=EoDHeV4fylfP45H%A@e|-n;7))zHOw*v#cRakg z4ay?U z4C_|jhpthjj)}n#;)k3)(Bf||O5AvfUk3H}U-gMI`Ry}T@Pw{{DneYw4OOuNbU6S3 zC^`dS;cjB%#kr?KOYyerlB;-#bn`z7WFt{HnqcgOUW#l4rlX*JqY+r!Oa{1ffyFAq zwsWZ2^Q#3A0Hv$~wEmxB={fF9eg^Rq^BB7~c(nnMtNf^)Nm9~hi?n{h+V**Nv}R;f zVV0w(MQSa6hE}2_rIbtkX-q!lJhN<6pBLGDv-1YUb)s(l6-$YE1N)X^-C{2FYT#r8yx&!`XwbpIb8`OVAOL+n4-~!DF zj5u3;!@JQ{K4l(p!d(pB)9eT|GuCb}evk_*_q_I3T*=GiCEwRDQHb%;D?$VgL~O>l zk^cvx(@E|mtH7&{CHK@|{?qJ#A=iQp-QCTdiqzH3$e@kD=^Yw-k!%g!140J!d?QHF zwuUleY*m~eApM%W{U;j~^yMw%s3tI7ZwX)GLFAhqH+5XLsW%?5BD-&mKS?OXKXwBaK_`4m^p-skn8U?@pOhM=%w?jz+QTzTJHlr;7_ zx$5a3fb*5*YcP0tN>T#i8#I;Gx}PQ{LL?l$0fm@{waJs8KPGp!r~v8=QCD~{bt)1m zP-JdaP3CjKXTh_H1SkTzN9hYCwJe^EnGP^i_Q#tf{n*gJU7?u0khW$s_@N*r|G5fT zdQA^=_POnPEMx!zoGO1luDsj$$hlYb;{uZHW4BY-mjj+UT_mEl9A*+TfQ#+C^WL+J zvV?W0=VnYiu9?~YCN8u&`s^8TOw9$@4ZDC~Ry!M_aI32Ar}fT?M0940r)HeDJWco7 zC=(*Fy*SHow@{R55j=-~Mi#V7Tcnd*NzN7a!l4|;kde#`39Xnd@}8`P9>jiDY@H81 z2Zp4db*7$KGzhFGb1g=jPQaTOvX;CH8PsVtxGmE{^nO7r8?TS^a0Q)MbVm2x3WfTf zvvMh$pWi68Rh`?t5j=uT0v`Po8ja780v$dL0rbc$g?!Fzoq0+t*fnnszrNIqxa~H&^Th1&AEY5`*fl{92~peNGP{Ka)O_7cP5aNmh&P!sx-?d z(}USfnYdKQMN`_0193`|5NMuTptcq)m1;=x@lqA7rW9G>>?hh*Ym+qwj2m+Fwu1=9 z2vUfuMWOXyDJsoh1_$littOv^bi!@a=3@5t$z+;1WFna+35ncj#vctCFNZRNa3l8B zjpoOG z>D?N#XxwkAMJket*eG6*eheA2kVJx(xwxX@%yL;e($Po0llk>xhaKKKp|1q3#W~k= zSTk+u^a`M_HVCd9JLoo?0T;$e61G6Jelo2;&3WbL)KjlkQ`&LjZE>5V388;hQHe}y z`wmOwNcvqXBMX?SrJnBQ);(+8r6vQ2FfXP`tohH4h%7-r=M~@4W`R7CTAue++`wG= z^4Ti#7K^YfpCfm_&H&|+59+8qRR4DJ=kDTOuxVg)+_+>@DnbW`CRGyQuX*?_crd9H zGC0DuFfEwvTxk(^AT6JG&SS1HXweW)(%RN(!u+eJY6`S~-Gn>mQJ_xuDp$EpS@ZJL zaFK7uDtW9eO8lBk=u4HUfYH$5q0M~_#jVbma?v-c2Qa$ zlnX5qsC-eq(kAi36FQwG@)}U~vDX3NZT(fpL#AY;^Dt#!6NiLIdH``eZgZPr-pjoU zraKAkOnjJKiTx!BXr@IuP0IT!HB)5yT=*>n?XtZO=N=;Kend?Wb!Asep&I}?Iso*Z zs+q3}!RvhTINl{qOUaaZS0&HQCsaSgyxD7=<@n>!4`?2Ya>{j=0MB@38|IQ1wdDe4-&?n|KjmC)&REeCA-{>vGFeC!)G3EzJ{4fvgpUwdWw1^x-5P zpr7?yI2k$&#V$Flg3&}|ujowc_XdH_GPP0aVE!iUveDGK;1A61g2RgLFPu@f^T^3? z(n9@FE753E3ayES4%b@Md;Dl|fC@j1F+cbVox)tM_fw5|0A z5^#T0I%WPr4U8$6Q#DlHr`DD*7cn(;kM_Gh6y~Xz5MYc4p?_{5Os z?LT9tJt#uk5W;@k3CDSQoD4&;>d9k$hq2`+P}u@ey0ui{?FB z^eJ8s(x>LV$P}{V?+Oz~dc?VDmi@}d<>|aej^DEtU!{OwFBSLVs7daiuF?bjAtYqy zEoCzAbT%Xm_2N>?^lmNn?^fwVbr-El=ViBbx7sTs zGXY{NIZI9#&y7F3oKmRhYFLtENBrEx)YA7Sy+X5mM6C2&zhUtj-fgnkuixhd|2xiY z2k2M;Hw~1sz+lSawdwUSjdQk9oXFl_xmIk9)BxsEZof|4)cS;=frWIhFHT;ei}9En z*Em_1M~Gip^5^Z@vMY3VhnT-RYCM`IY7MIxjpUiGw`>|ndODg z5FppF79RpJv^r=}muW;N3U^|d{Ydj$qlF3oO)yIp&gdc?u30g}E;A{!93_4@h+&;Gl2x#K+7NZ_dPbBXaN)?WnCu(zG%ldE+~`!bzX;QOx5; z@=*qj)~#syN;7M-#?E#Sz_%W$&CzZ%QA&!(+kDYep(=_pivB5uD}&OJ3uMSzM<@&3Dkxx@8%PhBdRIhh_?b|M8a%* z$gy^HY&l}TrxmBuv#~sfWH)@k_4DrkP!xGYQ4ibRSy#$>4JfG71Y`Lr?o=O>s+}L3 zvXpK&al&LPhpZ@@kGu2*lCWK>^1Ly=1QATC$vN99UmUC4k#2nH$z!UVCv@<_u%CaE z=^UUjJPcJGpSu=P7ZR?K@J!=r;+Au$8cj8M_5nqa!Gdc-;ll%@+tq(9^xFWks{s79 z+LKTRi8aQx&ZU5fbIObLjuLL0Baw%UHC>^j!RLQgCH2Rupj=lK6bCag#yfDRVYXjw z3!-=z3bZkCccS|`Uju}4uJC~NG2`hdJV2-)queE0X?1EPfO``tU+N5QiF3>WSI+1{$u+$~TA5TrMLJjgY z;4NjF4C?FEI7#lcEk0dUM>+D*K1)!W!i}#35f9y%)EeIW6>gaUvTT}93628Lb|vWD ze+>Q$FbM)KfS#KXH`Z|up$Y1gq%H_J?j2jiP-6Alh#r?q-Z4NdShrHF2ERKx_0=;u zr&Ka_+c_F7N7MV3C;NMfUaFqcK+I6JE=3_JqI_MWVOY$%lX&kL!nfCtmtMYj`c^WI zrO_CGnq{BW$xei8&3J8#w+3u5bL(82`^pYeTJLyGCto|oCFuToctD3t)9|*hplzOM$I@ZEUqIbaEB?s@2_vx2*QYVie+pz|PoS)> zDpP4y3;o0C+{Mwa%4A-0bpEg7w{Y*rd8>clBJL{gk|BrP8-^#2zWHrE(NO_z{~7xk zrqHoA*BmvH)KWVOMNfP49(dmrnrhr?Wl(f5w-9n+FQY%~KhN)$fUwn1lU~VRKt)~e zi|&A(7PYP??42-VE+4!mBckzXJZVRw}g#r{Lx3L8=9QL>E9LP=}-V zN4K3t6`OowZ3&Uk+7EJ!pUDwoEgp^wozq+zGu^jpp7Dk|4Iy#2;bhy7kAt=gZKwo~ zSvz;--pFoHIE4Q5`xsn^q}~!LSgsR4PP?6Bp5Tb+Bb8L-#yPfT_g*578WZZ|ZZSLl zZW?DkXoD;bp0l0y^7LIj5bz@_bI>;T&I{Hdv(gVt>&4;`F^46s_SRb0NCvuFPZzS& zQ9}f`f@n^1(E_*3*T6DUz3G-k*iGTZO`4-8vdFv699!%r$(=lBi60l<+S1F2M`vRL zmKn8W6JERDDXS8JA;nXHk9*}1s#G-4J&CFyO5gD8V){ZD-iv2qUxdwJ9dq{mcZ{;^ zU^B`@$FfD6HVUZhpoY3h;GgQTMaPF&LzGaKT9C9m;BEXDlxi#p~wy@D0M_{uIH zXL3ZzE0+2s7(b+YqxvVTF}~ng6}lh_`Yf=ny;MC|d6|B?C@J*)`n`tqo(O-h-^%FS zvbyzf8IY5uEWU0LH|}#Ft-$r=9LsZEuV2e5!^G+|?WoRFE&UDDEhw^SWF@M;8xMV; zv%Xy6?HmsUEVaPjtTYIji*KVP8-Z`X6w6!a7rVGdU+%qJL%BJ<<_yI%2$`el^+PW4HeFJ$2-y|7eMQlZ$7UvdU^ zp{CkdROoP4r(m^PF49~6y2MRGv?PS)5yMf~qE+om0K0#7w}ITB@N7kWB*@h!E%$Z@ z+hvrVdYxhyybc*O%g(Ngp@<0! zG_WSX5cwa97NB&Xck;xeWI5O&Q5{@!bj zWS66p$ZqlXI=hi8Img_dQ=G6AgLv~f-!~9F9%W(TDikOGP@P_nE$<#~81WXM%HWPn zBxiik)bH$MBg#$Rkd^v)#%_hv*0tv=^K2|8H*5T&1fjWB74m?+PNF5kUC+H~wPbta zl7GG(mSsq0g6&(SKbQPwt4Uy$Ww-Ld$`o_Ca=+3D8~oXuxG`uMHGhRhc4FuZM6jrh zZYjrqIP&poeE0UhCRrK#6(98Q)>JP7XpSJ(YE*XcX#N9r1sNpcy`60xh+gj*a2nTN zI6LdVMX}5J?p)3x=OPnc#6R%m{@b6htRnz7JVLUu*u+bqJJr|l?(v874&-xORYyXU zFpoI=!rSNS{PDDM)_5wfvj$k{ZLMq_Mk$A}BJ)IT@Izl@HuGa~*^zCHUW)xaM-4GA z$)<|oyVz@GmtDEnZ_PeCKGvAI_0Mstia_FUrW=aYuWJO|J)b@^9qX z$IgU&pq8@SCTi7_7nLI(qalOJ^ia z`93<9qKzebk(o64)OW*ljw!>O?bbmOzf>yizZ5(5p)pCn4S}0mhi?P;OXY z@{a$Q+Aarixo|64HF@y3g>uhp%r85^HzB@y^QahI~_&+=RdKIJ|y41|D{qFy=Vn`43Y8+t}>RsQMy%%qBr?#Yn~3+oqM zpdl;j4wl^Q=>9!0mq7lMF1lO=ja?$v$SIi;6qob-24syMHiMumH<=h<7~TYdB8GK$ zU3`NyH-z%NT|qMQ-3x6)l8!j{EoLk_{bQc!uHi5vR*>$Rd2RxbVW0vmQ2{Jv?GL!X zNwi?queTD0gS|vsja;(WrBCF{h$9jDns%NE#5CvO!D(Y)pk>{W=JX^3mF zRfxlpXwuV$zj!IG^EQ8YHCU<-e3Y+2<_&I$iA2sQhLfs12ADhk^5{z9MXL^2|2GM8B!kn(4ACh3G{`?o>t9F{GJ}NmT{Rj z7E7~a3ku0x^6b)-1!c?Yi9pUetfKzgbb&-zkY59xgFh$rN8Y3g*IB;&TF?OZcqZh^ z-L}3NT9p4KBu;9tIG1BG@IG%HmX}7Ap9zTsmThIYy1R2Pj&@qn(A#|3t8%|4WmlN0 z5WvSWL1x9}=n6rO>J*hlNS{wiARn36!5H2|p7vtDx5k{^aV>wiuQHiMzvxpd4{eAFUGbrZNR6NTs(A;AWnUrFWTg<49L-~oyUCO*2)u~Ecv zmI+Vh#q)GgM3K^Iq7X;)@Y&+A?j*(M@W@uV3NS79_Jot~XT_ETe-`t%&UcKYhIsXf zbFwG#fnWL>_sPuCc|4up;__43%*z?1@Y13OS|{8WBj|fDhyf1yZHeg`MXY*1!5N26 z=k(}sM*(HKv_GS&A-!_ZIaJGvY9i(7HHfJ6RZ$zAnt$q|a#p(|aQD=mp#zWkTB#jf zn+i=fQeFrO`V2rTeY@la^^>Q{a9gaFR0TQ*7EcwlFcr=V&GmRUJo4?!XqUz(j!FJ- zltqDVs{k3 z*p|ZSx^Py|uybmf9G&k3q{nD7b)<{h>mdtsxyR0s_v`0jjTe=7tzLBUxklC>>$=2i zp@TJWyeIEbXO?^0x_Z9=f$~VeeikuvQD*l`d*Uxx3ZLAH3Lr*aw8SKGOT~23=!dGa z$9pMnd1Z;I!Bpm=)os2A$-_?mm6$SC{q}C+Amwr5+|tYR5*k%Fnk*L7C9UeZ^Vy!AB5WE@1x@`n4W8qf(f%isZwfMZxSG_A?wCGk_8s4fiFl!*jn#G-Mj@m z_FN$)uM4``aXQ6>F+f)5ooH=kwbi-Bh6j zEb3@sTqshdkQ}=9F8DcTgc>#9mr=%9qJKorrc~wK_&8ODN<5%8D+e_<>Lu z-^xxLq4M0c!f_^+Qc5m3apym9nSKf2>Q7VRn*F{UXm6xF$v|+d4OIJ`*}0+#w|fm? zUAOq=mbHvT1*JyDLs5>`-xZSTO#Xrj8mVup4EvbTsKsMtbAwqDy!J%3PiQ$LvQ0)7 zB|{q5Tl~g@j!D_d%9WhH)M4&pgN=A%q!un@tNuU(sksM7}UWSv2P&O4(+lLMwVV|cPIb+>>REfmsU+pA%4m72F1+0-I8E$(NX~AAVrxK>65Xr16Qw} zCAwR6Hgv?VsX@HIWz;*JNNl*VrCg&ZE)s7D;IfgpD?u=J9C>#&bUl=skq*(FWSVdJspj3hy zICW6oHa5uV$YKQ9Qdkh#l&vbaq3Rj0E@5qK4FewOAy=mr_6MZz7q;qKk-@=x&uSUoQHE~-vU*YOWeb< zNtB-$PC-u|A9lObbhy{}3{bYK%h_iI4C6;{{#xmf(0axPJk`h`h0`^%T;aY)6Na>Y zRlCo*k-c8}VnBoZH(Uo0EL$m%4{B!{5$pmGC;Dm>`d}2X?52^1(O)o5<3k zHy_g}eSqq6Ig{oUH(&W=#>fSZH<~cM`jPtjRIv@EtTAXR;F=e^uxs)YQiB)Qif1)6 zvyDnT;pKyPk08Uov8}Y}4ov7)A z-U8HpVh-z+0)A{1Ec~VFP8KTr@;mbI7R0bY_h)aiJaH_`x|el8<0}qUDU~=s$*77T zC}W;qEa#v8b>wTVwm6vYA+2&EiGzYsCNSaIe8Fj=Wn+j6R&k^Ai0}Z&q#|SjTCVrt z)?wB^4K6XvYY92#@dQp{e@aRa(wZhb)`FOYd*!qB89>5%J>(J?#U}1TDz+g;FRLOpNHYR8PPU zQTslELPZSqGZvr0q`kFQ*{+`AatSm(Cpri%bt0divGvxKL_daw#0UPeUon+V?kNi1 zkuD5F2*uzt5`s%XX?4it2<`Oir@iDqyjy11V|G4B-n3ir$6IFNj{~`^oYLr|5GAza z%Vf86z{@wJet**``0w={AXAr-h$jMGZ4dz0Q%itT@OAsB54{r7Cgn9NQeld^Gb#u9 ziG7M5yPX{x_A1=Cqn}>RJfGo{*vL%z*gVlzC(4z_2-l~EBBGM@d3xwNJYAN#VH!2* z3?%k8oUGZgbI?aKS1Uw$I=|eRdn2t+?w;l7PUr_xJoU$JM;sGrEv&3$J%$T8NFUSu zSX9(??EXru$Sc|_>$~fQeE!gPK7y~3gqndlmvPeikS$A8aB ze>h#fn7{oKQ3AQzRj5nSY^#!ULUgO{K$TqF%~0C;-&ziH1V&E^0%*@9ru=o8C+QB~B`abuVPbT3YApuB<7@Y%MP;E^$BvdleR zw!>)`_0;$Y1tvb(W9vX|JFBA4Msq77!7s`lNMES-49-~Aj+QZDngMZW z%g=e0=Ee-A<(y{egN8iNuA(Ze*TEc5WNwP7w4LcwQV$)R_q3fDw8_1j!i<=iFkLT; z*?|~arY8GB=xB_sn3>!fh@5y!U2IWTD?JEyw=1&ChE0B9KZIs<4!ZRO^`yf12ml!B zhNcf&6!jIVy_zmP$u=$7EThs6pED7I5R0#@Tvk#s${m%fR%eXhXuZ75j;_2ezW`?s zReBmj)svJAAhr8#%_FUoo6wt3Qv;Y%eW>0%;_qY9#K}z_qcS-I5?6=mdB(`4m;}iM zL)*v%xI7BVWPR-nqmP}bN2PsrIVc~re31?7iHrXk>IMSiNji0h<^ytZx|m0Ep%Mmnsaz&wCjwXPj*-3TQbH^4^r4#EZbx64#SnN z?VtVUR6A+ha@llCuS$@Fy1LQqp!US0FK4KEaT|!ouT&@BS0AU5HV;l9`s6F8VH<EywC%vfB|8)WkQ!?(sYG<{z_b)EIh6|*9IHd z4c82O)U-?zeD(Tm{7zf+7tly4{0^$oC#FNZ7d88@gMH85o22ArlhOu4UhCG;y))=0 zhfvFa4W`=^7qLw?;(Kd%$B_yJE2} z(lxpQZV6S7V17zU0;T|=1z{0DSOJ6owv9gl)9ZmXGW+unZVJ~E2f-Wt_DYS5vejGW zRMuzSH_Q)WBfn@9LaMq!E%S7=b=!;$`VDW-TfsX8PdOdx1m64BVG2y-?qU({Hz=tg z-}f_I>_>g$r+C3(u2 zUjhyz9?nlqTMT^Y2TekyoRfZ7hw;YTY}2Z~$ypfb_l<|nknNtcN0{zs$jEa0;bHS5>cBJkw z3kJ=VC81gUR^n)%EZF>?;tpW;;t_(ok=>^>B7qaVM7FSP6>?4|z?<4|fRaqyIK3(8 zgvaUJPvDdJ!(2g-MP+l>=o@C(_;dRTD4~s!EX@C1oW*I^{|U=m&8zLY=_^x9WArkH zdnh#9*rms0)0K<-yLn%cCH*U+nX;&op8nwUKX8B{M>SYO5z^XBSHYt~1eG`4tZn+O z7PP|$P{-Tyg*G`nctn``$v9;QN?xZzgN*zy3e`c3nfiiFjT^(-J!+$eYFX|&v4oi( zvxg1zhyhrNt489q`s45@xxl4-V`Ysl{NbXD$~!X{H^^SUe-0X4W=SG<+Iuxt&U}zj z-bZ#mp94S9yiD~7qnl;t29*t)>++yJ;QJ`LGyT#$zfQ?s8W;mmoCi=dWcC`cFPAeS z%1Q$T!i;>Z`Z#5ZAnQ$HT9P48j_M1!<3GZxSI0Q&uHM+m$bO&9NLSo3i+2xXR0K8{ z!(1H>X&6_`+n z9ezsmiSDt}s2vFB^*}fEc&&Jg(-CC(#%#P=JtzLEywvr_5M=(b88e~5#s%3a`AIWx zc+;I%F0wj}Whiwm4O2m+Nvdbl1Ld9VoKSGntlJ}A-=8;yFY_x-3_TsU{ArSRK&w^^ zaOOoj=qWJy0Sf5k>6|k2U3Rmq%7wpI#$eSmfeG55cahiVC@n`>VtCuk!ROBTUpO_{ zi@D&NRukTsA4~QJSPE6Z-OZ#r9;1`Lot8~T3oJc}*UK+kW6diT#2Q;3x?X#yfCd_H zR}ON38B9R#3ckoHJ!V4VkiIgxW8a?@>h^ERj`O(%p99Z<0(p;2<`Dd=+sHOB*MaF^ zYA?T3bui@j_xC@A?VDv}r~14Z-S;h?l+MuSoZ?WOJp`Jr3H^QXqbiu?6x6My=lNNR zuAE*eONlLc^{m0$np3O$6@1^I+Hj~7fT*W&@Q(2qv}J%sMlEvz8Lr+(q2DEevF>Vkcrs*kO8RV3zJ#XM<-ENeS2vtH6AM0gt@dl1XAWp2{(h-rfs07p4j-HH zBA$6xkZZiLc7~p1jvdN3sPGb&NWR+U2-lv~`0FbPMV!WIp>%#ZkPUBYaw~+I{ z2hTeJhT3~u2o{X&2s-AHxjDNe>nmXy*yD#G3kgg&wP^jN$znE^=LUK=Th5>s0u(+; z)6jGMLUn^6x6$P7~H8N4<;ehyfwoJf-eSWIE=jr}ks~eapszqL1 zig#sZJ)FiCygc3Noh7SY4nGK5qM8k`6V@Nn4)pqRF(;1ZefcG9Wb99?E&I@&x<7>i z!9nQkKzWpGn6{ynTEFs-v^(DsF>HOwv|s1lZU?>MhSR7?3eNlLeBkdwX?C+js2JfV z>T~RKEO2DY32*+7tLu(yGF#d&9ajOd3kax-f{LJ0l@bphHdFt6Ny;Lwmh&c^yQpyZnLbMBb$$vNVyI`F4zgs(~l z;5n=ogO1QHo?D7_vNyDct;uZnoALwJRXLo4r z$(M`fT*bh8_@YY7Jj>xjURux~+U55=W-W5z`6LuDog8lb?1I(wY< zr+#I_wTjt#F-X&ci0co`Ep=6}i7(j{=o6qHg@+yUEn(u+1(3dupYpm=A$*w^+3 z?)4&Y*#Bg&`nHBG7xj(0-S=Jq*PG<5#lB8wc^skTSAJxRgJaXSLT9NaE1d*N3tY~E zdG7wAF{n$QTixj{;YX-AO_Wa%SF{WYP|v@rf9`1vAJj;l&~*~q|C!Q>bNi@>cWzLa zC{3>knTbc7Uvx%415Y=3FSi>7(Tc<5O`z6hzxx1pR}77hpCk(2J3CASoO2tRE`U1q z;+dj|#B#k=IQ4f&wBvGiodsq+Kq_&JoLGre1qfzluk|n;8*+@wmvbUA_VKJ>9|G+= z)y_k|ZTBFHbOo5F^cyJ4vzET*@V9i_R@3K7a6BLZxRJRF`sjh=JNu*99h^ykw@9%` zV<%}?@wqX8_dfM57FeYQbgVV>yWKE(b1qR$+E<8Jq{`a{E9%J^N_fXnfLZKpNJF76 zwuyr4k{b9MW6pEV)iXWm26fuv%i{B`;ijx?1OKS;@kfr{I3P{-p}C2}!9La_W%YKq zKE`T8&qKDY^>JVO8A8TtoO?EolCBMph=_oea4rU z$+$d$e4zwt<*1o=N5Fg|d$UH@NjKKPCNEQ2Ozq&I;FQ>>_M{#6k5t;;-#Y8?lqBRa zmkm&QCFdvi$a*7I>w>`7CuWxMp+OU61GF?R4zvGCxDkj^RZ$%RPqdwCsHFMmY-=65 z1Ly=pH}(AQI1cxh_=X7eJ+(en@b7<)_#WiD@t$Cr?|J@a+}BE9l%Q>K8$&&k41r)qEsBD+o`@K>#yvvAW}n?TPirT3>mB%+}oe_dY-dZcGCyvVa;~DQM&h3tOmb|Vcg%A7pur5_%XW=sX#X;3} zjElu|;9;J@*q$qYZ~ob=jA{wWy3)RC9)W=O7`T7`48AY|R3jA#wxJBgpwvk}`R|}o z_I3<(n>#vn^nT8L*b@&7QMdacoO^~Fm%al&W|iC?xG5*|p<{r9@2B71GIhcK%1dfq zTHn??jCme&;)q7e{ts?36d9n#awe~a3b)ipl(1jHBi^@2e`A!3U-c^24|QqYj@`2i ziD9tJ-NFj!6hVcS!i6z`c;UHT0vFnt2f__;)vsPR#KqVJqn7hBsQYT|4!LP_beMg` zM#p|lJOK8{cNoh18-~CftL_xWlr?~{Et>6W4*Yz=;KvPhFr_Yf(BuQB8v#V6CJO!C z*O_knCsvV_n|i%P78~DJw|2e z-G3>*Om`!MCmH{G8`=OQq%3AZk~^Yw5s74B|fukW6+K`meO zkB0Tj)sN|YgvLMCd6$#y%3eQT#+KDQM`(fzmfy{=pRP1I)YTUY5 z;B02vMpHLF^8m}&@~er*mtPUu54UgS1<1B|Ev;_86em&Y zW@N!%2?#5yT%T z-&jMrGW(4DIKq%u&SA%G;G}hQ(Bw-HYak`-F_-UfovMb;F2aF-=S0rpZJ7 z2JrvGGWF|U-XXLf@AV8HKf6`ckHN{^Hhfa9YU|6m8XQY8p%cf~*tD10MfW?Zd0fVL zbtTqC;=u*;jRHFzEbA5t@)MSK{!@tpj-8M^=y_|+Jh1uh*_-apPhie@YbDGe0pk5= z;P2w!5fMf?hnGUUvFvCJQGpDHEPqN+wl-9KbH<07#)I;_%#wcqPxQSaI&C`8v;9wL z!VWcM)m#>3309Zb5FANcCb)YQ^1EwTj3l*^dbarWjsq-9T|HUEP2DX%aR1|Mdr=va3n=V&`Ja(jCk*hwJMD4qO4{L=3H6f*j`(yuncqL?oJdZ}^wefT+7VdIc4wsgRRA z)pfo?h+a4~^K3T@9wmSSba7L$Eo1A#T}6u66|jVe{oKPMKUco;I{TvE1DW&Z;_j15 zJwC3CpQ+33B+=b3*QZQ zgD?NAgYI0HVoy$@CO)A>pC6!PE3b*b#S%lf6lZg{=$DL7E6K1= z_$b~iI^7%&z%p?3EeHyJ5kI@z5%JTOg3gNGGzT>}FvoKV6Qt1lr_s6ta5Fx5!gT~s z89kgIbG{i*tSsWX6q0cY!a5|$&N2Np+LJvmrZ#F9`Z~l-fQ}foEwZ!b&XAfJ4HpGr z7UpX;hf&A^h2M{Ag8@JNsaXp}6HqlFa=~r=@>-+@hdMoz7aeOMN(t7v*_8FezojzE zD+JYf%ngDC-f}4g>!KsW8WETS*%Xx=Bq5F?EsdE0bxd0d;;AcIQ%`Z;z{l~mro>9s zWvHwSSmP>*4GQQw_Q{#Cz4GrAQu(cN)|SMrG|h2YDHBD~kKRDu?7*FxUC394nMNG< zI2tRxBEQ}`;|`lz()rqvenA^qnWxp6VgV+%=f9W|#dK1^_CpUUs-xv(L)HQtY4nuF z#S);*t9Ikd*xcx=g4YAjO+Wqt1WJ>SfOw63@hk;5xrMVLAe308qc zK-~eR`FWI-fVfx&dkPyW50e98c?m)p2%6EbyOfaq&xj$;y`7%DdL-L~h_Uv(DzPJfq+Xa0>6P;T9H^4==VUOfp zB#}s*VeKOO-(`C(KOi|!E3a9Fr#J8yRH&);amZnqh9t@YXyOfu6qIiK*aLpyS~m*% zA_9GvpIjN781x4ZlUNI_=N>^jf`ZbBK-BYF=P*me;rOsc4`n-McS|Sf=m}Nl7jMo+ zzbu;9Ozo&KV~A)Us4Q+tETc0aSb=O2jT%>i(bt;I+-=$uPy#e zo#03Bm?Nuf&lc}>lIFh!VFZ+*y}SM9WGaoZN%JV4mgMZ-C=Ghi)^YcittIujZ7(*W z@A)-6zu;)Ktyym*lTJ3=LE6j&50>2c`tyACGf+clUU^hq{KV?qVKuy9F9=FFiobLTPsv#L zIK7>LdGFyhESvk7?@tj4skQXuT0wt%O`BX;!wf!~swB zVd$e0yH@jM*JSP;fd!2(bAO~}-$1g9pW_0ZcPD(7A9WiB60?kVt`yT3C8O53`D8M8 zuO%;*8%nE$#b-^`doL_;JX@3qA@nBb#i#Z@HRQ2(f)i;)^s<;3l7)K{HDcOqXiwqi z4=+q24ZhUtC%H!qZRdxAnD`Xq%?rDOp}_%gs>Hoa_vS%G}Ij&N8S0`&GM^pbnBGL^Xtcjp7Fmd_M4V?~4af;07<1 zzo^aYb;#c7G< zg+VKqG-#|m^FVLOmFN1NIjA+)J5o`6s^8#LPk$P9Lcy}9ythyUD=Q5gMt%mE=5T!R z(ikomjG)&W3cGpbNzT#SR9@t^#r@Qlcc*#`96u~32>OYx_oAd zJLfo0q2U0;1I*?OPH&!>M!ZGB&)v3pKDXN|k)!N>VB6w_iDI=7Liur0FZFG=JmM(N za4Z;`@f7}t$vk-|c-s2~%DfG-OV6vygFQqXk(=DYlVM2S{`y2jV;A>Jt=?U2tFpqe zwDH8TCH z%GcWG&=(m)T4G7>dD*Yh%WSnD@#;NJaczk@YyCB`If;)TeRkN{sUwrryu6!pGQ9kL z&3b)ywF@ts7mqT)8!g`&bqb!)LM$qXB_ACY&AQ0#EfcR6Fh>%pVyvJ=eX!@2NF6 z&r?1B8_#7}1ZgB8uwC%W=rui1LBIRx=7W@yOqf%g;6_N%(pW#Do25z9VAx^TR03<%ANRD=zX>S9!*L zEC+&>h7M@|Ge$4+pv&=~huK{Hzoj)f4QOhebpnwSOP~KRuitafS^;Y6Ng}pRMoxTZ z(v5)USW*38wykkGzt;hJPb6$d_d7YqS`Vv@|HekCggPcS2RuiwS2?@GY5d-DoXlAg zDLv&7IX@0yV5WuD5Zio;Q8T{z0r@&z>kl|Y#_WB>s+s*@3Ckdsd!HlB} z78&oL#{@}R&bui0wEz(3P>M{U(x8 ztK(8=mB8NP+^2U8hlJ@q#?2om%(IN2@L_2=xdb)5QilreAM#Q>FXf|MMA4>Oqf?Uv(D`Fe<2thoolm&IWGDsYVpx{)*ckk`outL~STU>1 zw-xUK;8AF~+4ZcerSJy2#%AbdQ9mN@M%RpC8ieTh%Ksa!Q`UVfEB|jPeLoF^5EuyThR#sa62HMoJb4La2Ilqa zRnnMQRU(qIIrX7QPJG2+({jtnMNptWHGNM1SxwMYtV$T2O1C(!S=gs~C1n9B5C@rw zX(xW}RU_;bFt%8<``gc8sPqH+8+OM=mASPc71d6UWwStW840hA18F7xvdX@m`@{yF z zC8M6Y1Z{yxcp-)!1YLZDc=(@b*FduEX7*nUi00^d=!}^KBhnr!HZR3ANJ%nfLw78JN-d!ZalUG(wgt{1>p#cq^XfJ^90X1w53Qb1O+ zYFE>$s_U;A@`9*`ldl^>GWT=bE-?Os^sCn49PkufZkyf7ub1tdj0>C`Io?Ac-$Xi zeqy*S`%IiUCj)Dpa_FN&ZSF^z2q&7!+0P+Go-w?vbzSc9kO7pl{Hs+I+IVQE;?0jD zK1iEH&OeQ44Jbz~Gv1-aj}?1&UYH(3+b3yetLHWL{Z&pe+VN3EpmdFd`$sA8t_XZHx z>+Q3Q^#pm|sz zE%n=MB#I-A`J*mWTl{46?3qipPW~mg3sTxFvR|79WR5}yQA&vMck9fSute|H+ zn^^Bhl8j$~MU?wh-~mjzN3A;vxKNZK4Xv~$@@NMwIHHo(F+#7B$}VNb$P0i9x~UFz zp_GtwY*ajaeDmG8<3D*l*+u6M=rO8oRsJK|t?5C&9Nq={`yk@z^6V}8#x0A0-2ORw zy^w$F0&3LjB0|*{8hP;-_p4q5(5C{$p`E$^!`b6Lf}9jjf#UPoUO@&YbGb9r1& zKl@NvqVU97io3;-#fY9ex^;M$0(=utgCCTxg4dTd*(VCs2t2P@rIoe0*wc$o46_SA z8Bar%^QwA&DKZi8Jd3QB^mYL*ZDc}2Io|@vPCnH zZ>mF0fp<){KWpveK}EY?R80Wwc zt?l*G_31*r5Zep&H(;)5+3Z*uvN}m7+a7v!<)*QXikx|8=iAxVeR|*42<&z5kV9{! zQK-@Z?yF0EyZlDt_sQx=eu4_`Z{4q-XWy`*-5QP59;^%j?)~>5Oh8!jo;~4XAO+_K zF6~Ah)nGjyr3H>1le@)nyx)-4jXORusMpU=g||zg?PKiYY;22NBz#S{U0|z=GAU+c z9PfVddWhDbULV>A-t^D**=a${@=ix^!yws#RJgc7S=ghMb1W#;1?Eu z2i{WS>5GRyiECgTS0UYzTcFP%ht92N9zC%>;y8K>PJh}5RnpkL^ZXl>}4oB_|W z;h+3Jf1e%`Vh@?_k=TYCpE9_ZIpf|ril38A>!9CW<>9$3G*&127A-(ZFUb^*yvD188?Y6I9 zg;YVj>>l93!x1Xm@B%V}vMCvivLmqcou6DDWL;QYTKJm>XB1zt_35tU+*~ z{7TjN!-nq`75g3b4t}A(9NWwn6LogI%~oaXXs|7x{glAgaZQmZ!Y7AY4|XM+Yp;)5 zc=wJMQBs!>Be7)3g5WK*&LSXdZ zRW-AYXC6Cz z_%`j1Ogm=-KNWDBj*l_s>X0C`J`Do0L8m;Bi4Fx9B4g4C# z-+(rrnU)ADbvk(X@Hnfn(&OA?c!_zKdiQZ^wCEp7%Bq4`Z+vEfVut2Q#XYcT(Tiea zPDgwJBJ4?Rq3(eU5tNBGd&ylw@m3m;$;`};WnSxRZsy!az_^lYg*%U7hlCcztJ1J` zPLi6_?H|leN@TtG!8CoedXv-@K_vmqc)0)pX;qY*mU%qMbb9Vgdz~8o=F-!rPn(eN z%(~m-cLw@qIb<_9)69yK(`n>#ORl&2vP7vzx0@^18Y5hary0K zHYiBRb;PXJ`B2>L4Suk%5vK6Ak_tZmHwPL?t_y8I)CvzANt+RzhlkC*}-;6;Q&rDDn6z z@#72cZy%ojYnf}KiaWjU0|N2U?*=eR`l{~x6tKD+a0TozzkkevpTRX>ui(%3aNcQ& zYP7bQ95(3!%=-b`Uq7*KVu`=G(L6JqOgLl4_B8Gi?EdpikLsEce=7$c3FaaCLmv8G z9!8;>InRi^QGnMlz7BZPl$@lWw(mYPqph;|#!Ixo5^_Rf{Fv8r`bNad!he1;^1?mkd?)(^p_4S1J7WBQdrbpnuQz4$UGpyl(4~RD7yHJ zU2L=Nt`c$6aNWDc%k@h{O`ZE%rY3bZ5>`^yZB*9ERi@jnX1VHRMI7QCiPH%2Q(kk-8 z--Z*`SrO%E$uo`Tocp54ovD`(^+1%Pxjb4sGl$WQQhe`X#^HW;r*;{gmN2z{BafM` zPY^od#|Z7dCZB67qFBO=$wuCqn$3RLu$!_KGhP-KNvvldJsw_}f)|ZrR?jm9qD1#^ z?jp>`udKApM}vr5b4ly+}?IJJhL&GP?1 z4>db(GxP90Ga0b^-V_r1yO5$l)tLm6)rNu5?-@Mdd#tkfQNR=1eUvLDk;%y4pVK&y zm??H0<#wnIbWzVu!BjqqZCFqKCOje~m`!RZh z^weoJIsh96@;$*&y3a|$b@;9fpkS#ScG`Doe@lm4ST1=T5q!4mXr!mo` zADb@dO(+nTZFDAhHL||VHx-Vb*WQ!~SxR?s9a<6LCD}YNe7`b(J#qd~H^6?aneD=- z)db($Drqh;lDuJu=u#!85Xr}@=Yfh|D=qADCz3Z%)%NNtNYU4-){k%a*CYY8;IqW{ zSUUmrLS?V^QOEk9r3RIr`gy^_bTKE~i!IF>p_Q5u*G0^qy zB$2JF)6oMiN=|7;VtDE1uSuFn-rY}()}8n7y_JkPO84s*{p(eP2=AIC2nF&bE%(3G z?q;1J_>p-8d}_@oxiDdI`~pHLkuB4ecfk-hJLx%yX>Wr3J}PZBRoRj(X;20RF6LHo zo?)YySs-_o!=`5Qg(2HZ+YK`9N7Y)vx6s=n3PXJ+S3hLGp7nHwK-^6%D0VzYBjG;JLi9@&`CpW+&5QGtY6*4$01_imsBHGA}&B)eV$ zzb#wREa}+$x>vU1QkZyB>JU33Lw}`jYBSbV!Y?q8a`B#MW<)S%c4iOJc#0XFQZX`y9at@_^@wjHT#{VAWA~foyC#MhMJD^?*J7PZhKha!!bMGZ zP}#LetlMpsHmEFf9e>Xbma}Y8P$?vMX8RcK7KYC75&o<%zW~)n*3;VCes-8vOkJZ{ z*{m9SoI<~bb%{=rpmRm=v~O&-uwu#VHvdXY(ZD|LSwo=ZIz0X1TZ5j=9+ZlGoGAcj zzwkoNK|4?h0e;%#Ap^fH){MyWg#yw>?32*s_$VM$su_}jQM1vcKLwta*W!YFoKf zWwCzcZf0>mRpYCM#)Xw+q)mqNWox!<4sSefhYX zF#|h9R7&8E#m#;pmsy^Od@uIZ?qq|Q=ccYv&{CIzdrQnDS>H&juXJAPcXIiER<_>} z0O*$s+xBqERrn*PUhwvRoo{#l8o2H{bhw6{scnvkRPhnRs?-vMcHCJuEL3YPsEArj z>doX0@R34a(k2`r5cZ&Z?$0#n zf5H~I*pxc5nT4@LEk8x(bmcK6gK%gjaFZq+zv{GAW8Y^tKtTza&5>Wtkx{;-n~v2U z0x#d4dP?p`Zq^I`eLT!sv>5JM)3A>KDB>-VIw!%YW5M{6puc$*BV?^@aWb`18uWJt zrJ+li0X{I45|E7l|6fYf;xBIYm$eGmt&P*?qFSY z8;>`zRj~^52IlkZLUP_x59)Ne zJqsq`p0p%p9^3Rcvb?`X`~^fy_vveR*c8oZ`>59*2K!QwJ1G-0qwfLqmJzBFB91qk zEDAb?+wEnT-BUCEr1p!&UVMZdnF@p{WGxb=12>DrTLLIn@eyMyc6sza?okEd-8F9?U-MP>BTXRoa zpZ;`-{uRGJ@Cw-mRx$OZ#E0246mN%%6!(0Il2{GwV+s{~#NeEdm)FiaVXtboihT+!B|f=1m9mX&=Edt1KOD^J>;y^ycWVC5Ee5hN_GfwF(uxsi6}p z)*m4xpH!pgbDNdR%Hs9D;GP0ek_RYC`Hw)~Wo7>PecR*zvU0Q4j9oA3=?9A4+cnv4 zKy){dxLQEcn%Fnm_2qS_wl@u;a66%5%~d>t{k6VW1$wR~ZuZts8*t9KzrzXEn1<>v z*v;KPDty2rwE8-LobrlwVZr{%1T_oPKaS?Oa1M8H}I!}chK~K44P*`|KhN|;6x!?{^9Kz zaT{6Q>c;&7H)aGdtxGj5>lOxs0p@`^xA>Bqj{Fe>aZJS$&E8(U_>sHZS>#Ae?6;}j zyTx}W@5=5J(T~@QKcPgzoXc+TdAuUtzI|LTb#1#>AR;G-NLS`TuiLrhwLhGrC^I6y zn*Y6|D^b`~q2Y15Xx4Ar@|kjdcZ*$0xLu~!Ak~k56p%Vr09Hbrii3nF)DZYxvFn4y)=s7`*^-W`@RT@u z1k;=Dx&s2F%opgV}r(K94O{@Z*9P&6>E$v@b!Me20HX1P6rYf8pH z4-5=A5kE75?d*o8(7)zAj-G{)<7SKPw}SFQ+zqP1l^rT<{`BSn=O(_`tm;~lit(4= z7g+pf;(V*jBq0hj89|55MQqlOlwa951}c5uI!iGj9rl;ffSiV2$jeBAt ztl?3nu62TtH=n6QMp7oc0G^@m49PTi`af;&S_qUpC8tWC6RhWdAejYa*~S4vZl(Yn zlETgW3)d2Im$<>}whM$qC%Iv}pCGO(G$NH;p593Df;!RITZFT~XY{oX_A&9LS3-SU z2c;VSOx@AG{ZFYxvF`x=a<$h355*gl?qjgVCOJw-3_d$Tuh6MS%-PZeB;o*)B=m3_69PVylyUG~^_QD1s-#6ct)1wmE}MB6MFmbzoaNd)?09LZ_?+!*vT{vOZrjGil+d3i65(13iiw-@J#28!wokmqSpkl_zQ>$ zG2@6F@_sM&Id8%9ow_J|uhE!P?G3xUu51xm#amzxQhvw=TJ|#5#%r_0_I|56D&a;- z_b-467VkbGyyw@9TdC+#ApOAo@bd7?j5V1NC>E*;)_y3kNzn9xWhXjDVFl3UAAqFN z?ZvP8;Dy767G*Y-PwTQT*}hCKqwd6>WBn@yi3iJ=FY87dc~ zyK#<9yrGr=o=-4m5ZYHfxw?JriY>vn#TwKM)r4`1JKEq!Xv7bu)D?=M-=-={f_njK z`$%l;3(B)D`88ei4_zk!aH%zZddL|mXSpGCSmAE5=MYNrHpXcN-F7Ji78wg&=D)31 z_DUVqW&@^pusLn`^nxh$N)tGq$bZIJslX!~Q*!m9X1t&?>3#wI@yy=v@a1p8~?wApI zI9%i%k*y;t!x-%Qh+2sg8*RM#O2?NC_3_L;W5~#{>OYZO8J-CF(Q!_8fVcWck?YcO zA(+q-7!77q9K8i`=qZ_C*6+5ZL=2nSX=>dJ3u`qf1%y!Dj%EIPjh6ZkC+C!~B)6;Z z^*+s{Cnbt}!3AHGWmiMO&BS%ucawWGArx-d5PaMycXojrM;o>{*HQb4C?(wHJ-~%G zGWV5&Z#|X-IuQ;!S1-Y-%6(?kOzLeA=Y2fz^Sc+i3$87(&3Ex%m7LQY-|2f@U*zAL zqICzRbl_D*K(9*C1vvl>(}y0fdzBIbSvh}?jC*V{9tmmPsq>Na_~E6FQw7j#;2C@T zFYqG8O7CZD>-HRZT6baxxDi0QZW#&9MZXK5=ugD0~0XMX%SgJ zZkjD4_R!eniv1r3fJ1%{f-^7rO!NekJeyYJQgqq&JLD5y#c7Os4i$1`X10`BE=8wG z1x{7%9bGlV^SD^}q)KBSSM#s7jk8N3HrqCOX5^$(`?gRl69Nu6PWScGnsBq7bg03g z1g8K%^TPzC)UNPVAH+U^6=}-qtOC;~QN;DV&i7cfZfQ)TIPFc@kJlT6cRW7mJdr{D z00c-(-@Cry!L=0&)1FoPZj%3xUPCL34P&gNqG?iv-0~@dG*iwbYn5BBGUnM-DrAQL zve`-)<)xB1%e)R>Y+V8`ISG#6{-;G5bHAqhC?hU4^JK6uBSJSBq%3!7>tq>w{xH2} zrk8Y^H8qio+aN+1bK~9pHLKtBsdJr;UM9TJ`3mV%J*+SKdOCo4DcK$dzVen^o zM@s9r=1L#_J8cu~;~qJ0JT(>Xom)ixjTT_tYv1Fks)&l2;Sz4%Ju-VNiq7T!L*HpL z0(G_(v?0yWlJm<^WOCGd;!f@1{$m*u0@w)9Z)LM9uwHq7DxwVlOE1b`9C3Bmh2WlO zK^!!;k0Zy;-C}}_E7iRUb#{bw5j?rz_F&}=g@mhpCN~n?-l=CaiXHIen3JMWtYWN{*BT-oGm%f{lc6biJ1QXJYZ<-V+aQkND~I5B9xE zQ;wL_rMl}158thGJbBHxSy^G&&l(S)jZzGYp)`*#kjq-_HeEFt&! zjRODej*MF7ypk-JVt3z7cG^2_8=Z%@%KVjIIG|zvVN^?MzuM&Ax8lzL6v-Q%TJ>}0 z&{e=`K=#H^O_kd4aiN%w@Zs>a zfvJ-FS4C8-F0G7C;&*Q|iv8nwxNwf8+gq}xsh{Q*?a6?K3zF9}*#Q^()C^!#S^0av2fASaO6JYqsvZ2w~Ox z*!OiJ+d(ms>v%D|nH0$~j6IUVzRV}PIIgibma#Z41gh)heUwAdi`t-&ji<`Ld>zyd z#qETGU6eCQ0!p;@i(lf2vb3OEZfm_Yuh0fHZz>(qVOTYY%JC)B;;cLRO8D>kaDDPW zIp1{i?QgQ3n3$>K%37_N)f*~I(}oTV{~GS7pxT5y1$)4~(A}rk5;z-5{%pC*3D-Cj zHGZ0!zh4;4x`^pS{HoD*tk|($MTqd=J+lSWs3tvK>=hnYJ%rwrCP}hwjQnFUodgKT z6M0?n#FiNl5@a^f+MspQWiTnmDNWJ=ut-YwsxQJssW(a{IXi7@1~F>!A@}WLOqlj< zAAx&5Uk64#^5t|UNNpK$Od~G0^=|u67q9?HnNFy=#_#cQ0A;f{Zrem=`?wkN^d`q$ za-Rj?5Z#ZS6wSdA=bi3=Eg%Dfm!L%*ce*Gj6G z8iq+hDLE2WG(mF@U>v*PHO(;T@-41-8Z*HY1*A0^M|mlNKM3014C{81Ld*G>-;Ku5 z-{!>&m+sU+XRWEC*;#F^DJVyZG&Vc5m_L$luf4~ETQr#7aem7524%&kjRW2&HOt;G zKAKKobmSEk6`7>u2h69CZokaQOG$gnGaPS-+ErI5_sztwp|(!^kw&?NO-seJrHIi= zfoVVd3kwPgNR!_uB#I5)W>H8FiKSZ|jzhmQZ!j~0WxucjD9x)?$jhkvb%F4ePoSbl z1KMmYp_=y;vAdiP!na0$W*$G~7Ls(nEirpHZeUNJm#}cZn3`p0`_0gwJF2G_ncyr@ce!+=e_SRfpxtHL)yTJg!U+z1vkkl1Ljv@R4P z!maQ6T=T8=Ai>8Tfz9x{b@ntb-}Wm>Zkmk%*EWMvHA7UtKXpp_ZWG*jdn#=M=8kgx zP3rw`^`tOGB*$i^@?i~YSWb7=q%KBq--BKw7#L1yd^bCix6w;~#+GNl1#V|o>^rIzK6)Niha1-i?UWdNN27~*qX0Ze&hjrXuxa6ga(EkhlQ-E6Y7ZXLPUAXJsrU z_|hfjwhd2H!aabjLvj@4*opSi@h9)wWx72mRoZE2A?PzGeDO2%BOxl`!9uVupX@8? zoB1g7m@w7`JE`PkZKA&OKPSp-sR62a9E14Zpp-krIE3B+vgW4WW6Y|RTPo!57hG3~ zdvYLiJI$x97KPhFyy`(ukEp{t_v&nBoxih&pA38+BudO4P39J8(Elks9Iuwpuj$ml zS@HA839gTFmxm+giDK}Y?6${h)$*qZm$qLaKoX9ClW@?Um9&|^;k2f!o|S};z%3)k zyT77*ble&cjWgX*LPbp=$y0qAihe*KB1WjURTb9s6mSq`4f&M`nSNZO6u*MfN%BvRFk7W;eo#hOhl6v*7wT*-**vm8W=LV2qMT}DMk1+b4BrT&7a z9~6C*(P)`hxwA5NoU)zN zUO2@q65C%0#)&&tQpv5ccDI-ps-U`3w7POtQ0Zz`^DYX)cE&^{FIdsR``U$dfNM1slEISRQ%N2#?IN4g=lqkN>Fml+}YNy z$>#afr)8G&^N;AnJ+5}hY1<%mt=+>~?Ht^?g?8e62;;~6mBIw$IX9?00!aG0Oa$!9 z)Q_RuXCYs}p!8oW_*0Tf+sIml+Q1}bikWJ-G+i*!Rq0*BAKNa8wH}2zuLnlW)h@## zmD+lSuefrtzfAu&p@yf(2M;v!5~pHjWp8#hRjG6lM4_YTqb;k24-yqr?`hPFtrOd_ zH(UD2rlTu;UuYH8uM)knhq?Xdw9rpN3VId$ixUy$On0!G~ILndY6|`Yfd--{ zZgJ#|x1b!`SA9i$A7@$kTYL+fb)%3;pe2G1!I*i_8s5GVb<>Z3`gDVja#AhmKXM)I z9&6^J#0@5nH&?b^ETAYEfBb27^WIDJIw!5LB7I&afp^(?TA~Cs2rrdZVj=6Udfc64 zlXxlgI6czz*l^z@^E2XHe+^2_PQnN;5}_gi2zPvfiuzv6j$3PZd@GtgwO)1QaiYyu zfayjf4J2>L=l(b$r2S@lKt1l%x@Kf;c^_0z!+%AHpKKX3O1*WSPsTc5mRguLlM75p zIDM$5pxhGu*5QlQ@=_6A77YdqF+xxK_ASCxqC%CVxQph|sf-Vqb~aZoUv9p<{)Wj- zlYEukty6>a9~1v-S_LQv*^{XrmFSWPa{Z=a?&LxFtpSj8>WR5wmIvZ@zX{Gc@gMtP z%EcKIw@};Yw`pL5RVOAs&9?g;oX`#3Vl5W|Gb!$S&+PxWG}+&^V=#6<+mURPu9m)$ zC05dx59Qw1CUR;#0_Pk9b(v|`ZfN9d{~kNoEMh9VI4A;6drV8Az-b8bTpz7g_R)|4y?Z1ZcsOpNT zF9WPT9@=0`Md=3yF1fqFpC>(2Qid=ZxJl+xs2a6iI69Lgo?kEK0NA&s%Q2ZM|AaIF z3=7!C<#9A4ZIvs!@Pu3TfcFH6U+0d?V!kpbKh7ovE$zW1>H(Z%QX?pd;N|=W)}~7Q zw7m2Pr`&^oIK7l!$fho5AnG!ds> zoa9G9U>yjte;T)h)yJ5GMBl@ec=H8>EQ`20%%fu;_*&~wGLA{s=U)+wbzQu|5u+8w zu;Ne($C#D(&hC1J(0r_urD zQNG*-_esqTQA_|bqs&skRDu?_{;}KOmTHP*TvW%n>x7rv#Hgp>)-MNWZ$8i}X_FT< z7ot#0EB}xLvG+Xu6K~X+2(IO>TR7!G7)jzfj*gc!R&qzkjZ<>m)js1@`;l2;(TrQa zH_CxyrpgCP^lr6QA>>+}b}?>#Z$*K0+b3MHmAR{*u~FfQ=I0`!#QlI5W`WklJ!lL3 z{X52A_4r$K#N7MB%Up<=LY_fzJ|E^P<7S&d<)fDDG~D;ax{KMMAVt&9ro-H{J z6$L&+8Xot~>vyL9p}0?Qot&dA#}vx*57G$(mKi=axO4=;2&ct-?-KfpocwKEmH6nz zJ-S|T@}GNbO zF73&f_FlbtRiqcbR@9LhI|o&$At@vYlwk4u)=;!q?cXfnQg@tOXhZj?3|T9&URFqK zREj6zc{hFeUBk=-s@yZM6l3hF1x-W+NLZ zU;KOe+K{w_s02}Mg865rdhh!et}v0-?=mO(Fqz#=@|z0yRtuZW<<0*y)#1CMyJc}H zUrY_k!E*bG5jOC7=A1SkCef?)wycl|OW#VM$f(0!-iPQBUH z38(d%cy>^DXaFNj!ExkRt-eBr{JPf z@`>{fEHf1)M9({r8j5_fQUb1z$->524fHpY49%|bOfV!K^s03{z6R(z!Q0+%`LTHC zH7g!b-$a(Y6C(&H;WMqhTKJ27vx^fY>VX=COJ`_$+lOfN|4c-!sW157$^P`aa`av*}tCYg?a z871HEN)@*YhFD>%Mh_xD=YCJbl_b0FpTV1{)NqZaA;fobtXhiCWxh3n~2_)9is1 zKI|{xbhW5tnE# zGIG&Dt#YVk_BQw|<7J8Za=-A=2+n#N^`OfUWo-s_`?2+Gem#Ap2kw->{WFjWX3WOa zfUaRclk2$6f(*L&rxcAVgkay!@-^vg@W&t?(T2icKgf(LU)NPf)s?0VUIt2ZOU!ja zLcWXU0B8H>qL{_}Cfc6G8Fab1Gr8n@m55P^S=o(0fQUz0UEmCNpA-qlNyeYc$opwm zIHx3>lRa(v&Y9eTb{TC~y$OC614c$NQ*i6uUj{2%d9aM_brhH8$pjah*;ucThItl!#q>B?lG!+`~4Lw|`0)mZb`I z=}nOC{?@y$uQ|DfAB$D$-t~w=`U2Aj)y;(Bt1QLt)}Kn2hAjSmwaX!izl-erX<@^w zaDvIAD1nrsZ9`^IbyB_XMoGxZ91t9s2xQ#gNzoHJ^=EKo5^yJGD*vDP3~WQS0zOM* zMa~-1aLf65Dsg6}pd(c58~})7ZcmzaA7eY-E|P==qEw4ZHm&YJ%_pp^Gjc1)=u!am zp8oBVEe#GzoX32fl0S%{dW{Z!!J+n?cDCkr3M4gXwHu~fBc_%jOLpBfDCUlWzF@DIIN$as(e{C}ISMDpZbC2N74 z*r((22}S3s8QPa`TP#VvhDG5{av2mIKs3O_o%4jRGtRtf^EF^_6}Dr^15Fh`61Ta- z18@LyItc2S3hy^VABHavmNA+!chdO}_mXMH8BlGXy62SD*9BbRaTR-@(>f79q{U&W zh&PC$QZeUk{308h61tR0^A>tm=1XRglnv1Rt06^F+OG30X%prNAU>DSk=-@RqeN|2 ziuZpg z3x9NwX5*n(lq`5cXU>dGq@4iUA%RpcTSf>)!1LzxSSjJM4pfMHMqeW`*nzN#jJ-a4 z-HjW5&7w|ssEkxDm{>&%!Qb%`Zv$`_Erflb<#Kar56c?1$K_H6{&IU|Q&#hi?njND zmcT(qSh=m}YEWpqig7y#lpa9B@YVc$p+7tk9e%FFC>SisQlS{l(1pG?aZ=2xg zP4#U2W2@ak;;DUZI`9y^_dr-m-^S^uUVMt4D`4>ickL~u3CS&bmrPKc=z8~#}TBIEZm{hR41=a=(Oz02!L;RW#C9_ z_KfbIE$E}egS-l#VdkhBe)b9Cv$p@Y;$HkxHK89I)|Y?XIDJV&>bgHeF;qN9E;Z|M zP2J_K3oLhF>V5OKx5E@L851SWIg4-J%UQ;#st*UaX`efbnY3^$_RAcEUXi;5A=l(O zunS6_LT1E;;C8Gx`p=lKjp~6gLx@}=Q2!0~0iCN#L(F~ex!fhEp?P>X6H5m2y~RkH zES^5g>=E2muc@Rat>^)EmAcXec)CNoZ%1I zxui-s32v|8v{k{~66W7l4i@nTWVFs`(K_ZkU-EDuV_!Lyp=T6n2@I=T!}MN}WS=Q~ z6S>~jJba9`dNh$$|AFn5{!l39{&F2wChzLyr2$~IN9dM)s8OrUP>@|ysR8>C)oO>u6FjM`rPk&VE{fe(1+nrRFz<(lrNsVpLE%%1RsbD$ z-a{@yb*`@fTl3Xdz=i${q88)H?@}D;~MBqJYMTtV}KxJ`{RTuo=GDoi(HB%a}-5b5DlLrU^o#*{w9R>&f^) z7z~xr4pt6Y4Mm;0cEQRQmC~17-b$miccbDHXe!_z1yX|b04YAMRO>3r8m~|wr$z}; z0?Kf+o9uNynmN^Gw#V2Qe!<8qO$@Oic~3vO{=)Dx@fcsy9>L9kQ6s9B6BUS|%H*!= zp?!7yK5{EL`zPCWujTt#C~(bbTWDC?14bbJaYKW!Uy+bqM{ZJF z=T^)sLT2opRzPao58LpW&GB67#>i`Kuh0t}7g6M|ynb5K6;o$HpBhK5BOMS#9 zyl^)#b?dDol6$D(>X#y8EjKoA$R3F0cIf8Tg=J$#f#yrBZw?gWnbtBa$OH47Vd z;Kn#72g9(vKL6MzEVX7vY{oDx7i%T{trk$`Noj|CUeRAvd`L)TU)=Bi%O-9K z|5Vq3ZeJsJ=}s6~C@ov@-xoUYXroq>MQt+d z`H}jY>j$ly$F_vVvd8(zHju~zy-+U_+lw`)Q^u%8C03R6_c~!{^DbUlnQYg*5YZaF z1gKqK^NcmbjtimBtfR)odfKC5rD9S471Jfs>CZQa!;sL`C^!zfFg(U9X$tD3nfgp6 zAR?-E2SkkWW%sgGNn!Sb3fEJ*Ou$<`q-c#O6qZ#ob;jxUn_O+|oZmj8LYELHYM_D8 zR3Hd z-TkTtYnNDiLG=w@R}MToXqOqa`?B9U$OM_jwA}T3`BEHRUgoIAT&$E3-S&%1hg6zm zt^{WG%NrP+bq|B&4$MxOujZ>(Tg{*{=jM~_ z`pUGi6y1%$m%dy9Hyinm?7l&|leLn}RQ3#e!7RPRE3m;h4zBolJQM8EfhugNk%OtT zwT+Ogld{lmbSbQj1hg}PR^mm7fa=!B|9L+z@%^Ukzq^HB8!h072*u2+&2A!}VH3evOc2N1gvP?FJ; zfb_5W#Xie8znj-$2AWbfBAJv>h3!I=jnFS0gaZbe^bM>khOMC1QrvK zIvW0@XG6ReHfA!coU$G^Y3Jv}g;Q*>;Df;QbQW$Ly@WHGe@lHis}Bqg-GL9>jnfy{oc3}eH;-!XE~q|v&5vfYwCQP6H2ENre57;@A@!? z50}jU(*UqRx8m2|j|Ih@0WUzy6+I-B;`sRRlktiso9o6ugb9s^gpPIVPU!Aeiilqr zGYBgqpB&n(m&~OUOSxTzH9>tvw>m-#mh7J^FvJUTBki929~pvo1@uk|CcP^m_PK~3 z7!Dnn9uY^t3AM^X&%eDimt`gRz3HR?R&Qya_BOecfbwl{&Eyz+<0ltt+~zm=DAIbG zr$g$+@B;)RNM0VXrBG^}7!)3qp)Nb?SW<=z<*=RkD#J3=4o@Kzf$sG|_ZDGvYU-`B z7(+QAFTU`kXTwAVxSmul*)JG-3_K_2(n`uK9CRe?P+;gKP|X+c5SjXnS^Z6*drBp6 zFZjD?_X@P?ThbjG4T}>pJy={VSI{2t+ z^{jnAn7RwbFSE1ayhsCJ43b-`C#=cM6EA@yO5GO-C@5}tX zIY*9`tFN@;`KT@C=sq^`DV3&p<>IU*nM9Pu{oq}!zf-64&g6QiM6?iKL^TsC*~DsSGJQC^;D+6{WZlVxapnWdHFe z<+ET#-QT#iYW@`cCkPDV@LbenrDwSgR=L_(0JY`=hx&hTZmrylW1kKq-$MI6nW~qc zjntor%t;BF&~X85Xf^W|?b<+Ps^CMu@`QV1^Dh7wWjF}bs#t!@mZC>yi|T5=TG7VGf^a_p!=Re_ zxb$hihaEytPTL`v;^|^^x5UNSnb)|5&x#=Ym?a`f>CP3?#6$m#-6+x2M@0k9q-06p zt0h?>KqWx6{f=4pv56MBf_WKoiOc+cy}PQAat%FG)zs02L~B>20gp=#0qpDHArukh zK}2AReHa1Q;X=D)@Nzz}%}j#?*XxXZj%taffSW*|r%uy)%a8q!u-I}B;Noev!{y@8 z(H`E2TH&EAzvKe*zQv_jAvfh;wKd=0Ksj=wv`_C;psS(e>L|= z_fCARx*X4_BNGN>=y-ZK@@GcN2Y5C@yCU%Hmaz2eI5zA47mGW|T0rVpJ-;9igR=*6 zFo7TkvmLW4gmVlgj~p&6g?OEaq$&0^L<5tq*_j2Fq4!VZ8aCV>>o%~YE*rXEh{I!YrlmDs~9)i{%oJ~Q>1=^fVkX)!&gVQoqP!kOwW&F}AX2`%j-4qDF7`R0^C zBWPw_mYl1sP5DN+SC7}DEWb9q0IWdh1SyZ}m#Y*^%smd{Gm0@aflwr}fpWDRO5O+E zrDx$vbKdS51Q8Qd-N&Il>j(B+@fxQ;dm*I|0V<)zeJg{Aw~D3AS27ZXI}sri^2Rdp z{Wnv*um_tbZ`y;re>ddeE~n;ja%=Z0gc$mZT1psN86JZkYp68krex$Vj^L|R@znRf zR(`x9C-?q9^H(?az`?%YpH9z|I7Hr_g&Vm30xPQIx%j^M%0Gy0XLoaR`LUwcH{F7} z&;irim&KQ;WA)$qKNx^^=AO1Ge#*=XUNmtz7DW@cL#Dd6NX~T3k-$k0Q_Dm<4aR2q z+<<+o>NoNs3;x(HJWTu4Cx_~xs9Mpv3I&pAx(Vips01JoIbUZy@KO2k=1*f(lPD(3 z#4tOgeHX7;3})8jZA`kbo=SZAY4YE_cy2L@+eQMtb1RHg@jw=X@qZ?C1{K6&1QiR8 z%+evB1>`o-=%@EwAp|}>9oojXVbKK@%VxYdA$%zhT{`v)#>J~%?l50Q4t^ngtx;`m z*RQdadqsg7i^t{PH=B1P3S*mf#~uHNyaQK%y-m{BiUi?91~K_VCi>&t^}zc6?_kB$ zNzc2~KJ^r^!0Jsz49;1%@Ki2Pz!Nf!eD7#*;hN|Z|3jdqM$Ee%@CP>f8uUN#wq%Fl z%z%aI4Wpvk>~N)}`tLF>XN%%Yo1qPCl_1Vgs`ubQA1!S6MjceV@n!l7?-)TbXQ4;c zfzFWuij<5nT)nu%y9qyLfj4`lb325|xAQ#1PMvHo8zOdEY`ms5C!0GoGM}Nz_51sn zU|qow#|v)fLW9AKy53K-AqAv* z5jw{Btk#mxDu_Uv?Ioevn$jSDEDBBCxgDfHg<>a#jt&G0*CLa7`}+Bpcqbx^0Nbc#8CS_{Tk@9aV?4B|zj$8;v)#{gW|ijJri~ zcrdN?h^7++I;DjHddp|Nrdd(vhB~y$68t;7`LjAgi$%>)lijQ6){aFOExNEHRd6wq zLonx$h@igt_;w69h%e0BVxKxK$NJT3HV}tSkazjC1=(2^&i}Ia3{LjLF%&Ug=B;P;O77y=hp&XK z(fW`VV!p(}G(by5$A@tEZ@9mDMfit5M4n&8-USu)E3SnE2M2S8src0j=(_?S#X^~{CR;5@}-*2k5{87Ms6kn?_!RuqE zyUG?kA6vMXco#?Y5YOHt(3D;PPsePw=W?GGUM&tJGqerQ3(&fL0dod%9ywh-9l>K` zTKA-&mCAI6HX^+wM>q_~s$$M%!5#&guugs!JFvm^9CpY;*me?cQ(3 zN)}jIg1&+(l6MMc7sL4H$ zMT|Wz{PzyT6|yd33dvROnbt`|GH6l)`xUXtrhlom zR5GK;J@A=#7dM5rp(4W@M%Y}~@GR$<=O_`Bc)|>Rhs_a(smhoP($9HXpENUc@6DQv z#c~`rT{U@MPb(irSIJ6vJd(94$NO-YJoa;{c`i)xJ}b=8vQX_eT&l%il33)Osx)O> zV?erFjHuF`7tX?i+5!F&f@+{!XLNXK-P@QvG{FG!`h?(Zqri&Qk=lf+r&hl@qNjcZ zIC)12AIkbu(2;3D=%RT`#ciAGixvrSVU@`QM7l6<8H=B0=Orb=o3QdN`SO}izCZI`{KR!$wk8$JEE`$;gUO|D%tg3uOI+267|9&e+pnvliL ze8`LJEJRo$p}Dn^(_FSTc#~`P>6Q;PdD>9n;IQAtS?={RE17*tx>}WMf*oB)2I%H9 zGeaD@t^a?}=BV8T*W7Wksc{rG^bfByVxLG;vU!P|mgiw_`?UJf!)WOo6>4_-lIHLT z9%t8)nb+P8M>>}1AT9_E(g(A`vVG`dy5Y#r6VK(p>9Mf4O*?$Q4u=6b)6|@wk^GS{ z@DZh9n+@eKXsVwe*c-;Dj&q~2+>aSkQnDw<=S`&FB~?FSIdyI>kxlqx_pD3Ynzl`t zx#P)z*PBfk!^SScDhmm}>m1Ao)DaS3{S1INlmLMl6y{OnaMRxnjPqA25gUl4`gWvEM@VMe~=&YNsWGScF)q*G= zJ`l15ug7!IG%Z@!D|nyS6F3i`(zkdAHM>j})X#pfm8R%4*12i}Gfc4xKYVkZDbVsdizu$h52O8b)S7J_U zV^+{K_t(?Dr-LZMg*Jj68Fj;lSc+(MOFh3(gPa^imHF?U|E5a*^8N=Bf@}E7K!5sH z@!c&eR+VhHCXIFaxiAj93+nKff7i>?u8=iuey+qC*SN!E1@Y{%%t4hsLYPjzo4g{| zEW>yu=##$4(GxOp9nDvDd6`nr7Xi>PniBe!sm^QoM5fs8h|ED*#z2#(H7vEgWxXJN0NRa3pD!dJofBVX+*e4~o^CRd!aDQJOS7W5Hwrl`_)?b00)xABsnw!w zCM(}19T?_r9M#;B*6ml6{KrziQzE4(L0;-{O|bLK<69-USAV;8X5&hO)$5&O%pSNs zM`!kSx9oWy36Y}AA3d-UxPO389RPJo{0V$3Gt7&4#FQpudczEE<}W^sgJe*mdhoP< z;2Bu0&p^x4IMT+R|4wnq{y-;gsi}zlDEl=#_?-A5B~EzaqnH` z@-0I5F}FwvG(aK8qFz5vr~MdQkPMYJn$koOa@d4svAVY||Ja~A(7ZY1pZa{oKvFC>BaY2c=qY@%8DhH%d9c%!*<{ZCpz6A#jxkF^e06l z+93x^vhPadE~&I;&LrfYtLENu&0*#7X3dCCV5Xx_l}T;jFgpA^ITqZQfS7ikkg+ zUL2Yt0^~fK3%fn?hGepa9NDw}UI*6e<*!Yn7%m@Bolb z5QnGJHT~U79RpqDu&I--QQ0rSd7>YbXW(1}F-*f2CW z4*wXTEKu>t*EP${$_{>A^NqKrKK40rb3Eo1UY$^)L0v4qc+G=n;~+jsCdBVzN^)fQ z$gD8~^TZ`$X-hU5rq%0Lpb}T{Rg(pxuUm6C)Zc7=T@gqg0>JwwuzE{LQ}2TmY1`F%(`yIJE$K$-WI;WbHna9bCMw<*W#tAa#xofC$jkB zj>{iyW|7c5wk?vjT+K2=s)yo$C7=g%gK@#b2ijKB;Z>F=ovlju8j`+F0*FsRYSTa& zmrxU>3y`(WHP39;+ow10S|&VU+uuH^>GLcAejhuq*Vxwn602x-_BW3$FLOfxJV@S; zAQzs7mDG+}LEwq)Ve-0r7ifat@0m+$yi|t>jlAn4Db%T%6@rFyT=Hu^91QUwG?j!o z;%0o5*zYKgspYc%DD0$he$r*uIw;e<^*e!U?sV&JoX+y(-T;Vz;Z?3OaoW3Y>@DM2 zF7>24bvG52%z;#L?=|$HK@QrvYT1~%%{c`HjPp(Opy~uy`7P(j*TtJGmf0%5xP{#2 zrKWp&Z|F=^cPZ41JP(*Wbxq4 zs#rc`+$`+Ig-0|Y+x;C#=fmnPGMMgPm~IUCx5P`e)Yq9i-&Eo?yjqj-2n32iJL0A8 z$(VcE3w92;U{km}PmhKUuhVoYJOA29@vz7$c!l-8LV?g9#8M(GJ}?W{ybp00|GwAh zP#@_j8UaY>9R=m_GX}c>WW(3zN{2^((&U1fC3i#zRuZOk`dS3A&J9HMGj9F5I%N&@ zAsg}J5R3z4BFWpbJM{tZ5SrQaBX(Vp#5M4Wet83`Bg*ccEAVPO z{aNGm4>m~dWnnuL0GwQ?GQiJMHV<=t4jUTeLjFP-*JL_;?Rlj@R^y+6HqeB_8DE+q4pkKPL-0?udi0?s=I&c+^2_A0T z?DM_Q+|ydH^WzT^(gNE_^EC0ddCR$5@j;ZO0`GWPW+JYme@sPcED*gly%+(Slw0Gf-h_bcBX3JtK+W!zr;}FiNuwNl64uh}0>=Q>+s40n_WQ9eTeC)jz z^~C}cU+Xbne15F4m}qDUq8N;xez^n$pp4Tp!mA#ETcq`98R#8TVyRfJ`vg8P69pwJ9`;k5cBz7GyNmh1B517;quO91vy5%F8+m6D<)+U8W6)&59 zBEFxBoH*o9?r%x=aT#qV0Fk#WyCfgkbhc{-x+d}otc?@K{E$TCOh##Fj^?R8xa4#d z?IQk#U4-CcS%h{XDxRbde~QX4U7q;N?~d4H2~jafnhQNLo}_A#(VI5ZZ|*${P`b^| z<>=k@PUDY`>)qZrJBwGaceVdm*hpHyLBn}=zWb5J9ifo#?0L#u$qm*9B!o8eVWD#x zN0%neAux3K++8fc7a2M z<#gZ0TO!fAL2L=5WA3%~b*QmvRt=TQNjH?;{+nlobZ+3)(ALrovDfL{aOPU`MuTYW zayM9y41(a6?CabsG;{+l@=_DLDkxC9tOgzhI+lOem7oe&6m?M-liG*xm=c45dLzV(_T1mP!Ji@d| zBI1_PHdJmK`Z6$t5s`2-N6CfS0y2L;v#v0sIr%i9tqD@;&>p~i=jZsz^6p0V$oM{f zxZf^`D(jKG-Xsy zK`?|sl)uL_7RKhf>DQ!NF$_|?2gO$tu9>}AAmWw*e*@is2M`KwIVLs#HC6b4GE4ky4AC#dKK67i+6n^T>*U!BUkRlJiqeaF@Ze`jCZ1|A{NmKc2FZH%{P15&JB+`&af7&wW|GNqy!Fuebp3v|fARrW}i1_%^VFf%0o3hj*7#TUm-SjX+^e z%^lhog<;DI(da-m>N-aOp(e;WHz^R#Ij?g%)*PDHOPvZ41ULSA4ka;2Zp8lVKI(cn zC{d*^Xb+zd@(G5EyBo(z)4b?Qo+e|f@#6bBh`ELHRZs&h7!TZ^Eh19@J5r_%k%w>` zsGaeUC-`uuZj!VNhR{jtml$`gMU$Cc+#9Wu=iQCeK{vD1t*(##BTTp%x*3fWbQI17 zIs1M(3)u9DVLP4VI(|cFxxjL5GMM3h_e~aIoo67BIi%$4_rpMx?Dy-d576%*gIY3jVF>*fIHaoV)waQm^l&G+HmkO6*&OW8V4Dt}h~cz2uiU zrVyb-FmzT8O_Y-<}WM)K8IIYz=Ibv%F?X51JM;H=%Gb% z*5Rk`t}D8aj8fggeE*U14{OWhVv1B8*qc-D?9aFdi-Y6_Xl`IeGmovP9Z~c!?ySu1 z8KM5SBx%BeL%Sut9IKV3iS0FYHBy-q=zQr?>!Yox+pZnrHT%NN_SLn?*XEo;b}+>6 zcEJ*BQ+JtQ#l0 z^LAwlfj{)eRV}R6{(?eo{S+yN4~v{=;;*e+TP2%V{`0G{XNqmaR(NCNDwam!iCJ*D zK#LhK*c*1*{YPszZQ0o$C8#TlhSL$gQ(sXl+38R_`T2c8OHL>Mo=qtPc-0Occf5Vf z1^@KXVOiR-YMFc^Z^*lz*6UwGQ3le@jq2sZjy)mN6sO}C0LaWcR8)mne%k!4pv8-~ z=0-G?q?`|num{`L2)ySns2kK00)dGy%Za!g?xl*e8PTp*p0Mr|1_uUe6!>F~zYT8wV z1l_JmUW}<=3Y*VSGQW|ZGT|=UI6U?jyBIZlfNRBDe;^rz$4DPH%M$}~29$eWwd4+(PlyZ2Auj}> zkacohmco;=7sE^zTvC%mt!rN-rZl{aQlyr|;wh!eAQB2I>(QYf+(&&p=N@jZ`@y}s zVjcFf^w)`gvaP+Ly9h`+t#N&y;-OM=BjxAu^o(EAFI`p)0o~uyyge*neV8GPI^cTI z!-2|~{U?P`4Vdgpf$Xne!d8Z5kn_@j{kKY%;mzJ<(HQR@2PGOL2?yMZ<0SA;;YQCF z+>*%bc~m4sejA!dm=pw+MKxOJuzNr2FeYZC_?Bg3-9ksAhlRAHDHA5=D=biwcwX$yVR@Kj?A3xdMz6ZJ6Necu{!=V`^^P9w#`CG&C@l?G(OPOqyk@q7Bh`%Li0>i8RTEsph8V-zi-w^E3y>n ziN{kM3QU_hQDb*`nTEyQHeUHrAl_o2ORtt zy29LN{{_+sJF0UA=|1MObW(x1Kga4PuW1h0v~AVygPan)<1??PKEKS6H~-EwnV-ku za)$2f#5;uB)Lri$R@SdB_r|)W7d*wheS7%KWD-X1NNuDd7%Y?};yX*3ux95HIidj?O8Y@@a zlW}^JQ12;jGALcEEe^m5rO|3F>CEINzVf3<(z;VWRqp)*r}a0>5GQX+3A;LVI2P70F+ucY9{>JIz##GNN_-Z_NEbc`1bugkNX# zht{fntiR(gH1}Bg9Ubpj;&f^at&;R8wS95d%l=Dp)uCW=Z)Vi%es0PZ!o(Yf>{+%+ zau-B)$O%|3pNhmeL)&%B=JR>@-lA>YSek0|eLiy9JI@f(!u^vCEhx?Q1a25CHH6dX*Z6W`7no=c_BR zjh=*^#l1kso;U)YsJ4`y#jk<#jEH}doi&aT(Ia_BqOqD@KrSDF1rV~ zTdA*yRXnpFLs~(9n!!qny-rl%j>r&N4bx8=(pkEZ*3GPB5W4>0`AgLw?L@dD&NuFG zfq*TPUe^flpWoZ%8IpWGdtm4O%2PZ=VZIiEfj3c-reUY+1it|^6}P9VtfNBH@Z01f zMAfGB6GMO7*PIOH5U&|fd;Md^a`Ly=DVxqZKA!jT-|+XiyQ%!Y!>%gyFAUuH7)ie^ zWaBGd4sFOXgdQ!By5L+JnCxviA3U#s4V^aJS-E^{rC(Q2 ztVz?uwdbPmQQepEyz5JX*v<#dWKqmgBIb-=DIc36c`a&@J7#sASz5_|+dbdy`Sq03 zBXUDWt6drhYjq$J^|AMpo_>kr`RUR3Tov=wb{@Yse)0?JFuFG>KkTCR{`))Mc6>eF z3w;2L-l-^qY!G2_E2J36!$$_Ep;+?~iK*ITTB#2{dPjQrnjxTeE4Y2v@V>&>Wf-XQ zvNDjT$Vk#>-nt^4%#+$*(&EiDh1!Tl->k33H?Grab+>UnO&$Z_Y5 zaZ9oOG_Q)K4B7^2hhwYukRjn z5lQJq*$dXck-gEvfwF?Toi?rsO~h25nFWIAes>NhOyyd_KW}w=Z7Q0+h)*6*`6uNJ z`X#LY>Gtrd3#xKc#LmtAz8z|lt6wt&F(a`ra#OZ)M;!J(oZQPFM<6l`O_J6Ic63R) z#Co;$xPB^zK5o)*;EUI>jcfiGq4mOzLq@ByCM_{akbWcDd-8>eZiKV%TPEMFje{6))56Q-*j2UI1@fKZoH3=t z9pG)zy+cjcj57)A6vHR4zYF~G8koUSAeG#rj&7nNaHOz+`*l*TCGD^bxhDlluneb? z$`-8XO1HH>t)pt_!v6w4iB zB|D)De@H_tJV&b4KpvBzYkZ8cnpKLf#U2HOta8{Y$$aqhSO{<;(t53;LZ23Wx^J*~ z@@;%RYY8^Ff@K=n9BW_A*`0Q{fV8evUQO?)PYwzYu9)#IY@_9IB-CIP2}pS2;v2~wNTW+TivSr zc1Lw0+6TD7-1x~;#MO2n0drY zXE`KJE7d0Jnj-i>&Fdh3=VUIa4KjGIFOrvl@C^4^;EHMQuFIGyF#s_fhAlPfw?p`N zLAViX0{&v;fcH5_0cYYwbJ6t11Yzr&i{^ewhKb2oDUJESPhg|-IsV#}F{e*HaI?y5 zNJ+ibC=s`Dld14E>}K%ZYuGhxf2~jyc8$E8Pl;b?&r3nKSD_IhPIs=02=PgfIhUL~ z_P92_2)4TYzxkwBQ*)iqBq)0Iuk~~jQaFMOM3}< z!-2N4z-9J(dWI*t_7!xc1dVZea!x2uLaZFKU3a64EQJ*@na)>S-4(;XTbwfEzf-#J z#@ACDniy2Wu>38G_djOVU2~Ywg_Me%jqW(eVvZi29C%ibgL`lCAOnBVpl@Zh{@~=v z-o{gLzX7u>=$R5ou%C_- zI<#wR*BVzKjA#@^h#y_+4PD9UB zvE#C?3qzs(pA6L(^I_zrgfJUVFG2_VZ%}k79Qm7iaWM1eDO(%AP@UOoRMFI(3%*+9 zWZ1ZH3yEZ$^5TXt?gZ1x0eJB5A7{h%FgGqgF7E4m&DWwUG4nj$ae&$-PTr12600m2!dy?HUmu=M9>J6Bhv{fkK^YlI$MZ@=r3uCt-Y z{&oQO8bu~gOJ)vculDPKaRTum0xb-u^a>33pe$6A#3@ym1GY7CPYkeJEj%mYiLH%uF zV(EOHzW5GaCAi6GUAnhoghiw(v&}y8>jyS| zba-pLWdGkynXCDaklMk$bTR>_%^c7yiRV%uY2HTDHeoBLm)(BV2~AYvoI_eXx%-@- zijN5AGy1O45jb*(oOCaKZ{~iz44_iF3h@mg_UM(sinZBd6{z{%WSgmJc{5!P1eo-R|JaE$bG=8w-vv2(yz-iY0WByfuzKW;wLaS3*N zd#JcW(b4&fHQzvNUwEhY^QaR4M5>^X+&AqXw|~!$!3E?^6Lu+4k3Lz-26x2rrZq)b zTKn=>taCO~O!#0I1@@=x70HAN7By_{Tl2ugUlHnIbF(wA@|Z#mBF^hMe7Z>+so8YS zudd42EGEoyIm;6^s0TN=Y$fjBFM1+#9V}{{wZ(5&|Hi_n^#vts)04s+X<}DTjrrq{ zsO_!7^bAZ9Ue}W!8tN+p#yc+au+tz9{R8 z*PlM5mZZuR4jjoiH%MBI2Z_$)%<}=`o}+(Zws$$_8aN(O2QuaK_~ujPCp_w$9}Jwn z9Jlhu`SQ*7hvFGuQSGB88-`qe8}5@<$HVnrJ0XQ5a)KaK?sx0r20yi#-3K3@xEyce zebm!vOWqPF?!d!VW!XX{sm{)0p4$QcbI9(K5EHobZ2ap^)daEn!)lPN#vNG}^n91$ zN^%!4%1SQ6UU^z0rMiI{+0WhSbHn2Ez8B4lpHP2z&wrZ$XaXaTJlMUHQdJsqx`(I-(tpG@^%vI+B@o~VoHTeq0xiiU6Gv|eb7c&VX5g2`F1ywjlpd5py&?`b`K zw)!2uVKa#f8|Iv}mXeG%2JJ`wI`^JGSS6%r9r1mK;xDSN(ZkC>N(#DzQ^GQb(L}>| zAF$0*z;0r-@s;p2_BWcC34Jy(?HkY?9I0|now^*ardfOSRL0R*JVX=eXyU&ECFml} ziGRmiXpD!yr@YZfP42SV>cfv@d~5|Aq_L!cU&J)NFrd);Zv)?BnXTC~NrSFiiJ2XEy+AUc4J=QEAPHBp*vsd{L_Alyz6)2 zXC~c=xk?WfrasOrm0o%_^;W9^I%7Vbeg0r#qO+L4SATQb@!|3Y^DsjH_urcTbOcI1 zO&WP7V$ikopON!M_<^il%YWxlnQEJwCqB7^?K-*x<8w>PHBr65qu;*pIrK@L^PFa_ zu&)G(&xW42@0-HBdDof=sR;P9LqTL=DVC(Vc;}Mi>1GN`M4aLyx@@|gXG_VaodfEy z+9_6+z5m#eS_mVI3~7%tya$2rfng3A2H2K}Z9CPMyjS#7mJ@w`FzoB8a$k1TRXcy3 zOBrQo$6>wAFtx9|)qJANRs9?(jBsc8^7~E1FTmJ)Jdh{zWMu3WpN?e*aq-O;VOO)$ z4Gd@XO$?p-b`#C!kDfVn{&eU>)55GiTa%O&bLa{U8_l5$Z423Z3bTF}T=8M4pmRtj zY50}+&p_C$DNc26UkW;7OAL<&&!ffd|2T80A?O2A+-V#)F5j{9UQelf+Z*}rH%8rW z=67p0@t8qHT8+3)?Qqa`pG=E}7M48#OYb-N?Y{q{ajNK9Nwzn#H`&AvEIng{^K#Ov z@G8ECMb&K^*sBD$&hu*TBFFPY<3tR60VW@@h}g%!CiBaFe%o7np1s-ATynNl`}GRR zq+pKUAPz8>laENN+!DCB_$xL_&kV29Ek9_o#HeHaNjG1~ry`aSq)4RVw|%T)faG7(i#U#69~nSVu5Oru;LYS>8m;+UjK3zt6ml@qL2z zvc2cY{O+`jopHFql@trFvS!d~T%jUdG7tvQc&}Z5X5C1$e}5i!^-(sLfAVnQ)$z|F zK!7f?g~iS*NwMJ3eg*Ev{7d(4`d@@Q7T`48btK}B=I!!n2uO7nofujD_j(~ExzI)Z zA6Tee7+lfF&man`GRHm^W|f^f!eLJi!GDoGJ{nQz!wE9m1OyxAjr22~=+4f`}KBX9r)#E z^_rV&vqyFs9#}77WLbFAe>(}AjDCIfP2el_znM#yk8qNXkJ0A)!Y-D=FJqm*GuMwg zdMGTzl=}uMKJF9T@+M%3qwb-sk#!=8;a5>3o%j5R(Nv}ba}b%>iwY5q>}H*;N65&& z&L6xperEK`S(@zc(iB^=g@vGG8mTi zsF~-y!%9JFd?wmDmjpUxnr<#9%f^r?Q7pkLqaHUKB#t_NP$Mc)q zfl`YjOXh%s`Z0^if8%6cPzqgK9$BcGt$xJ5Xu;RHpT2_}{F@6|ou`RjFe4lQkkngG%}>er z=#mVLvNkT-u}U9^ahS1-*eHo{`gd>7xTCX!H31?2r}e zwOxH(Z!}%x_U@zcySBlXJIBNZdIaxd33mrR1CztA^sdM9d!HnK2L5=)s$L?ye>qI$ zDE*wDK7=zZ6H=Itqf@`b&@*4r`{E}{y?b80XOFbyonwqK>zGuy=R1s-ASl!^MH5Y= zPGOxBq4&y*l_e-DWIo}TGYRpWy;317)Ac6#rCvWZoSz+IK-K{{0E~qBg~`2Fzr;2J z>>{$Nx_&~;WAWTo6IMn}wi(;9k2)c18`2)^M`Yh86Co^n4&9xlSGD1%6MAAW>3APwdB4C3zXkGvR?9@&~xZydaY{D zgb<48@d7n_cw`$_&(OUq^sT{2!u#5*Lk~JhKM+KOBK2WD}zQ}^wwjoM>J zYOc&Y^q41&zlg1n8FpG&>AC{Z#Mq0Zaz4yqfBBvUe$>!E5+rJ5=&mtfw$wwSb*xe@ zY!p)NF?DVfE19|+Ko9~eza|LtEd|(jrzV8)Fex?Lf)0y*tdRQNcpE+=1)a7yBW0+D>rbcym<#+%tqWT`BNsa+J( zV~@qrz7LXIM?L=R2^w|7SlBd=`=Y*w^zEhYT~RCSy?l68gIK1Ge5bsSP9rdg>0I-& zp9YloKLnLn28&8EFRg!tVj>?|_xj+FkdVQk?PaJMgn$Jwl#%rbwsLzDc6~S}fQA1` zCrowi0(=>JGx4pcu%A;|-o4o^CEKr%wMBiMBWZG0fhz4ch5*8be$t>e|LD;kc{Pal zm>#;3$`k*(q7?OogW>WLDMLB-Q#D;Z(usW?JRD{P3Qo8-dAFX~Im_3|A*|s+L@V8b zuNvKJ;PC>n4?LDWOUYvn9E*Q{k*^mC0R;^cs*jtT*yZOi4^7Tn6(aEf=hh zH_O!sI2vrV?pX;H$pYj}O0A+?c9DE?j$wlDU4yM7Hn=gg3q1pj^!%_jAA)92&NP_{=N%g~ztOjjs> zeP*}=R`pWpnyVgA*(!20*_;ua-G27%8GFu*KI?)*DeM1?-od*mNm<%XOOH?Tai*xl zQb96tMT&h3pwKYlr_WRF?Z8d3y4bhHeTfBwZ35JUfpXw%!#A_PN|SmCJo2wjzSpaz9XlyiY%3zF`PK3k5Wmx!8j#svF>(4%;X$GnaV6G)2EpG z2{Se~-`BM{pWoy6{;Ofz~iEwtKGoq9V#uSIL z7ft)>73GY)Ih*S>xudF?A9aP2ESynk9{mBWiAI?Hbv!HhvagCR&2e9q2;r;Etv^$5~?x$*svIRez5D)wk>>BUFY6`_8hOjenJ? zER)u~LJ_=?u&1<(Gpe}X9;<22-k5m;Nc_0zKLOzx?IOCv|U7s z{~0y0D(|&Gk{}8nC?!z$n=Nk=Zg5dL zBg(=v$3Jm2YHvVTT4_J0e!THe!ylGTDs`+)6eeqDMQLjkpFK0!dORf!x2UD-S)8VA zP)Q1D2NxYJ)tq?VR;wxXWiGurD4 zg_5Udy%rgN_E6Tt&~iJd1uW4!y=TA1&V`{C-%y5oqR>O8YA(CjkydeN$GQl=e*GFq zP8X(kk$snkmilb3*1wg(IoE!+#?SP138;@=M`3VU|KIej{}nOs7L z^;rdsQhyc1{fe<&xrk$v(c>lFPKq(~RqoCk25Ro7u-RA{X;ZcdPea>}793`l8J`CQ z^5h>pX3y&t$}kCIX(9%^N`<} z9mVg7XEb@^G(VTbr*081Xjo5il3O_+Js!4mFyq`|*@G*oCGXRxB06G|X7u5NO*|EA z4`Wz_b*?bF?1KfS|I`Ly8yr?YEuKDAh+?u62WW3flG$;0j^zJU;o46CucF;mrmuN} zx>gIBM{Y^bynT4CdpfQae?pZ{(wj#*WsCf!!4N^w-TD<_sUFSJA82%^^E3#h&-;FviG00`B(n}FVzJw*2Y+kzq@)luO2@M`6YCxi!VFQhmYTc zv=FPb&9LY%9hJcA#HtaA$KBNLB(%-%P>hOyOUc?XG!FK*G`;k-vBa*^O||K!{L18y z4eLeM2uli0D4Up>{>l9re+YYJC6n(!)Z6H#O4olZmZ5I+aZoKg{u7I#{#Ij5%`&Cz z*fPAaC~DTnntflcVL`?UFVDqZXpq?_KvrF}?HSIjpsJR;n#N$L;|pRTolc8@`+CkF%Q+30i!85h;yx;$d+v`bE@BNB9rIr& z|9Nl`L8lyADTpICiEdDmp#%ZV=PPklMt%J|=kN^2a^W|TSQ@I-HQedcggE1b4bWTI z#g2{hAaDu(T+TV4%!>c*cPG%GJD{Iq9{Uzla z#tE4_+CT87w<_>#S}frc*1rQ(=EyC^Hg4cJ1=wqt7laI*#qgP#6hLF^xRAmq-yW`n-3Cob+pTfg&a$w16h<&E7PC+LDXnTn zkhur*)Ht0ViE8%K)b+EXS|kFebZ;1<5wbHkBQspleiVNC)4zf)d~AM@wg*=(Svq-0e*;YAK$9~=!@#Y|Ji3R-<*&JDjDiU-GL%XSy~?G<6vr<^D5emQIG+^nr#Q?@YHWEZ`B@ArD%|_Y^ImX3Ad%EsLrsent@{G0IKYy6+^#lw?xQ)7(gU6E`A%76ih}bXAN9BsReh6P=a5pCV|vQ`B8C z)mt&eWD86exn zy*rORq|j}Y$NLS!TfAT2B$%gao6fMfLfYA`)-DaB#zsa)1{bmwsKo)_R{^sw0t0|8 zDhlSYVNl}QDO>*pqcD>YGep`RBSUz`n}P3Kq9e`;G9rjFh4m-E$wA{(bCQSs-wo zybS!NepX4fk&nNc_GfeMzK+vt6+hiT$`Z&umJIUg-fW()zmkU3mZrrsHt+*Pm1aVE zp+lNGsM=0JU?{s;kxPvfNBF1N=}_ zA!%guhwZpT6M@(`!Ak2P2o-J0(a@L8$qw$lNUV+iPYxa*{VjHiH$846jG3;O5=_e= z?XNMGHzM5uxdfVw@fyWep}t~%WsH2Z-T_1N3qFO}lo$7(-&y!UI5vHsvsHFTe5~ zEXY|62g6pv+s9A*$CWP;QZY-mN-9-5H+vp@QC76*#u%x4!}51twe~)Hs$uTMjz*WeXmj#(5i~eF23}q~*cHbN88UKaN^PX85;*FQ@ zjpz3T-djK;_teMLKW2Ou;);_OzFxgIqHC5o!g>EHOCL9)KS_UZIHF)Q1Dxo2R6~sL zKg1({libjrAb1nAFO3pTV=)%bc0ZMk=()+2F0u|rf2!@0p$IxST+K3nqp0?uQnxm+i1>4z3#y(MwOTWL)`2?EIaZ2odcYD?_>v2|8 zrX!p1l3JTm%j3p zbO)=Z!5al=n%kDLBw;mm0pEJ2mxpSM`yeV8xo{_hvJk|ZtPVhpCA{U*DMqPJ7d^vt zELsKQhPvPNKXH1#?RI6d#f1lN#NPHvE(Mo@eQrFmj}g}~^mD_#y5UHnY#lM`%E3*~ z^Gua4;T{yRA<9V3=}^^1Xa2wr{y;D0vuDl;*F$dX!Hz?EnmI0sWJYtlcRv9j-E6Vy zmqd;by@!ue{3`Q#zpgWH_gi6PwvB2qHk=hKae`0G@CB)kv7cPrTTJ7~uS8#FoU2sY zgIH9>K2Em0fI*H zAy;gj-T}LQr64tDYMIY}I1hQ_eWItWqKE&!1atADTPjk`-&JUiuPVLLUgm$|Ejx+} zP2M`+aN9zi19Kjf*@S8F=hT_IR3hGC*Nd&(5y|8lah%ex;h`3t&52udW5H&6Z_(Az zr8i|t!)@d)X;z4St((xNbFAXR@x&+7TK?A}Kd7zsY}sR-{cke%lWW@%_H1z8U9PNC_aw-lC?_xZ_FIZXnS5tbCb6+ zCNktRH4@K)D~aI20?`DrJriA#T6v+5RJh0P4+bH+P}C{w<(H~djEfV46X^XQ)fiPE z#osE@zXSiI$-?}0+=w$P2;2(eXL7V;>D1MfnKJE^U*9_ZqQMVeSGQ{ zPi>w(gMZMl_mTgDUP9rxkYy!WU0K_Z;_TKzC}#=c0%K$1Q!A95R&ZDAVB%9 zv(r=;&DwxvR2^9rCb&ot-HfZ66I6WP_~Nc3u`uQ?=GL+i5`XtfcxLvF0#9A9$(Jxe z&mUa*A)A#DqfK4tqSzj73XtxY%@=0%khr$ELu`%1kvd5kAE8EGqx}`nbb35TvK_?H zR-NB-3GhxIbXeqHztV5|8#o4pEsbhBbBZL&{3gy54<8-7!;#c4laKd*6qSK{4v*&6 z0s-#W*WT~nzd!C}V`fWBD(+V@#0>ReoJ*C9>J3V)=envj9mB32giX8(ds9^1)$?jR z#9!Jy+~&!?26ze9)zBDk_Qb{%GP1{2(?DQE2j;h7=!#ZGu1U{TFvP(peTix zjaIY5^O5+-=(9Jg#8WaxbbkctTjo~<+Bnc&5o^Jd`kgXNuCc_(cd#D&gMGJ z!=KJDGU<|jS!f@RHz_77l@HzHROnlj*gRW~#}e;_%-Ues-HS8Es?3_Opo``{_H31V zRXCU-%(pfw%0Kta@pbam=+!%dDgxKp>FTMWZZV3HyC&U?3!olTusM8$A5>HAZ4B+N z57SZGgXW$-ohLW@`Me zQO+jjhKL??)5r2t{;Z2~@mD}z7_ra`7ZFe0`U(rx=4%Hm%f~{s?6q7xV5UhQ{R>8y z0@qX18UNF$n9cn+<%me8j#3pXjPAl1!@ZZ;}|BtHI50jg}I_5N^c!daG{ zzfqMlT(Z1%oL4Y^=~Y%<9nVd{mLG9WldS|;D3pN6X)a-DK(w8Ak{!OM;SUD?>-pR| zf|oyQiMU)8y)ggvOrTFkhxrup4UXObWiob&gEwDVM=f!dJ7F0*~v$~pQ&AjlQW83e(dBA4rwA*AXNQ>}f+atZKXZ;D==R9 zbF%)0x@{izEYQreF(RK9+S}7SC|VoKi^5Vj_zrSqO98uz_cLcz{ib7Ez@se9#y^;<%IlU614oFN+a|2?cy5{5X8tO@3cjB zq}~@u+&34Pi=TKcTLQ@cZDBb8!-bbwSFA*)$fXB<{02mwwGc%d?Dr{TRXq36VU&s8 zm1Ho>yC;7medL1C=gzy_pS_57XZ}Wa2`3H9%hBBS!0=yqoON+G^1Dy$?`HNLb0_P< zDquxMmAdu>&o{QW$Xb8T(!FFX#mPUz-$P5wq(+DM6Z7*;MLRTpG9*tQZ@37Ft0H!< z)@xq4WKeW^8oB&3X6w?P8P^E2eXh;3tB%pWR`O`P#N9%{>yLXcky!r~Jm4f$>g$Fg zFRrtGimY@q-nIhfnPfqmPdmdPx;z)#IW=7u6&qg+)Q6GLC@9qm+7`A>koaq)s4~1%7 z?-wyH+#?uuO3EDZ2F=RVCC$};CZ3Ib4cEQ(Fh&-lkyuFFvxf4Ol4Wy_l=WUt6x&q( zH)Cci_AySMT)wzLmCJHpfA-y&y6u|vx(u&*?bPdz=rnEc)|0IbJ@p15?>pije^C~^ zPRj;{#2wcP=k6Z0>r28i9*9+=wS%USz>GZ~c{AIiyeL?{q3JqLf{HrHQ*G~fx3XzP zqjPoS;pzZOD8QYRYL1Q?76_d;EN{>9?kE}9N#Jcax=G2OZ|ke1Ik)#c$vJ##Ri0S2 z7}u3#e9=ie0GFR1oby`2yqFVc$qMFC6xmQ#UDPh2^ozP0TJ|JGy#u))!2hX!DZk@W zq>`{;yNfCm6Wt5_kS}4fUS5XsT3%!Y&QUI`@EH~f_BiODVLtcr#kN>v}jrn zga01tRFLUQA3=0wFR~&jB&jIU8aosp8kV(Xlm}oXG`A^bH!FiN_NaL3)5G_iHSnXR zgB?3#)=4;!&r(|(d!Uoyem5y%o#;0OZK@*A|MmA}OO&qNKkm>{a;rIx>&@48S^7%F zVzRC{}eWI zAWd#BiSvNZ1{W20J7x!uQnmIc%AVLDs*%8Jy4T&b-)zN-@W)i-=7m%|4?(LTaqq5J zCMW)YY2N038@>aqNSGyU!ge18-!$w1zu{vRl=MT|JY8u7if^5qCoOcDTBYIC{0gS1 zu`u|e`TA%1=7ZOv3}z|Rrvobm44;QD>x(XbbUhkteQnI{o`$5AazjL-f(&3{w!^a9 zaRKmIa_te9FiuZWcb>6)!-!+UnfbS@M{yrprlSCxij-6R%LM7(g)F2Lx`dpnJ+CwI zG$3Y0>k(K5H@6IBjl_{|Q-(?7KJJ@GDW4xJYgm1J*guUd^+6O(h%5kUdm&cgitpP# z@T$Bh-Pcgy^(E78GswYX^Y({g@ed@a5gO%iJ+XSmeRKHyMd3Uxrv2C@=SXvcCHvqT zp>ynN&7yBFK&`V1wZuzBPhF==QQe8;x%Oyz@8c3=Bwct)|JZVuECZMAND#GC)Ykg% zkzJNWww+~U7->^apJekAtgf7y8S5t?5&Y#L#^eeI-vVnioO2iAjT4PUzjUda`FOJN zvAj2Eu#AkF1~?4|s7Y{@rcoM^&kH8sN$=1T(CgqH2kd3=dF}?kBgw5uE!~QS+mci- zDs%Ad$qfpfMB!ce&A_F8^r!xZPdP!R3Ud)tIbNAdu&Qq{BLtOH)&?7~vB_MVe)~; z0F0^cd&n$kbK)Jt@~T)~7Os1hIAYgnNYKP1)^3sDY;wj6p!@fK1Ui2Tl8g)fX;OI( zUtN`}dw<|?K-hNkq+GAIS5ZUOw_+ICr-W&cVXvuzhG} z>H_{-mEEKyPW8S`wBnBW%zvm53VtTQupPshl)KT(^AW7WxWb4h`l>v?u=vi&)t(<+oK*RZ+l)cc zG73hC<9=VaMek-;s?SM*ao8hzM`3wZ#VBdi5U7wy8;caApP9<6j*R)gZB}4y>H)79 zO>f+pHQ)gSe9%_Cj>`^X*o~FfDq=xgT+)V{w>qJbe@%^(lSVgRNsAk|H`uGv zxSih<#K1G)6Blp%r(X?6%W2;fdCkJKj`CQr!@?Ltyw>|;B}CMqh21H(Kn!E{7d6$T zej=vC87wmY<0r=zSbXzwvUUfR0* zX*DEiX{S4OKJhw|{BmwXK^;b07)>%AfxB@`#OhR=ka9Fl%8XO;!G`V7^Z+RA8E!or zH80_cuLR>v`1lp@J}y-#_I)-QV-^9^y!~~{=wK-X_egsRajw3!vCYYRVNgZ zJ%7IwSUjtAhYzXg5sOGiE4EU99KU1xUTq~hF5$1{*4T-f*oi&lX>H=0=qow>6{Bz* zBOMq)DE;AatbOy>o4xi$t7MH+Hz}EF**4bJ)@REfP$EN4yRkV)akc|0JIrjN;<)oV zd??6oGH5G#b?BS))^JZw?eKpgSOz95EB~E;=Xmsosi(aVd4Lq8X?3BbD5$tg3)>Ac zKxi5-GcLYuTSb@^I}L8ZBE7RM(&6^4?MFjb!}rai?REA$Wpd&+V**l;t#eu&*m}%9 z6<2Dgo)FWteTXkDmA3gW==0gs9`R6nma}K?_Fpo>mZt_tID&LP?}l(iBH;@@+pulc zu(;a_DT}i~$HV`I=&S+tY?bv(n{EmN8(xH&ST0Nw-5r+~uB@P^^mvp$I34ssiFIVw z;t$Ue$TUJ(SyD&&6S1n1oI?z*6;kmkBjtl_sCQ4Y0MWpjr=>R` zxqRwWv1%tgm=^13Tie*M#Vy_c)F9qAyk!>4Ji8_U%oeS;F6!&!;jKg&k951fGU!`5 zF|M{V6UG=gfVML8Y09}x1i`-0A(swm%mt#68L~@>%(}AfKF|9?U931G<2)P>e<8)Uf+K(7XC-LcK(08`m8C!qePzFKn z#H6(R4189cVJs>*QH;KoXc|{*Z%}+KAXzO43-wL1GX(uM0Ps`5x)D;Sb^+xhV)Cf1 zb*oY;dIdL2ws93*7@d^_ZBfOabrxJjPXlPhv%x-$=(>iR#@d%X=)y~QN8bia1(XAo zlGwt8$h$|B%;mEt2F?6!j4fR+#0J;khxVi(uD!mj+$PM4KPQEu^*cxskyS?sj{lL- zXNqo3R}k^b2fQ_R^@tbT@+n+y7_g3^^{v1I0#eK+GiDBPRro2yTlFF%G8xdXR(0Qd z9Wa=|8+?w|r{7{;z)AU8asRysSpbwnC8e?4aP&LWEh?0b0Q0S0LvP8$n!-SpuV^$x}L0t{e;lxBDS#3Q7!5;xe@ zDhOa+CO;N3<`j`;TUCt8Ih*#WbeceOo6f6c zOf@=!7|35g(|42^{#x-4LHclM?rE@7gj)j2{OV02JA$bJ+dv4iOPdZ#%Cjz=&dur%4K3!3R*>{QSZ~vF$)wyu zj?DAv=ss;#-2C5(9!bz!^a>hqBFmxNHBDxUbv^Bq+w9!AvwU)STb=M9r61$ykoihM zeXJ0@F%}I{TxXWPCI2vckPIR$QAfxEa0rCVC;oFV>J17H)|y<{FiAh1Yh&!6WX!t_ z4YmQ#DH1e!Wm2q$9g~zOO5+aFc4JRPB`b_6`C!`1xy8R3$4(;yA5c70CY0LIPds8o zE=Sh0%rs#&+U>#^n}viWs4FzoF>sl|60>+juq$GUCGI|gBM=2dz0hAerO2-HezQ`r zhXmZCu{P7I=e0}QUKq~UnHONnx*-qoNUV+-))V$N>>4Q4=Efm|kNp(T+W{84Jag*57A&eQ}vKMX-( zUhij}TStvnP#?&Oy>Lf{fS_0jZ}JoT8fw&D&w@#`T_TGqvIfTtoVS$xJ)QH1&RhJ2 zb!60ZMvr=l#`rOuhQcIeY}@Fq=i_^7m9@d~ZSSze+}1d!v_ukSIqTxvvDp8*D^{_*bQkS$fh(&&29_n%w{w58y)43fowZ%<_;TaD zbad1SINu7*JdK+a+_Y*^Q_0`6vmr`qFGo2`Ga_l!?e#7dnm~R|<;TN_cLH1#aR8jpgW%)SP9?e*GgV2bhO&!#IvxW+7 zjG_c&%iU>lk73WqpG^C#xs~4&u?2oS7}AoG|HSkRLoN6yW`j-MuzF-HX(izih@*M2 z{o9c`+Mp)h8DdA)yQCskQnMF2$hO%FsX+3Z@Lm@p4_!?~Y|R966*k=H;H@gKqdRjcOI$ zo5r)O^cUiEQ;ns^itjJHAn_gD*Ov}&>8p23AhrFyZ^`jjFtaq7YdW<%PRF-)YM073 ztm}Dj!uciv4BDq=kKMJV>%>g82W!_wD8R@aw@Sp}u}MwSnR{f#ifz%X*q@QHI_Wp5 z&ol#AR^ogz@Id6u*erSozZZ$!U<9diWs7iD!2T!x6|fsZDq3suoV9u4>QU+Km{Nra zXqYbZ>JW7ksQ-HnYIZn2N!3h~ql&#t#T+I9ecLiFiktiy^( z;xu12BI_)#*do6Hw*pVCTwyCE)?0=4^vgSK36paPQeOZSC=*hT9Tk+eotnyc()`_V zmp2O1tp4v<;-3!pyn$PYjt*hJT*YI;2%Xa?ysgEWrvebPlDw~B0cbr%x%tuQl>puv ze89Q*rj@1}hl}BJ`-2_SGkpj`fz}HEK0#aq{WehP49cf71#}97BbeS5>HeBjZWPpMzlhdiH(&S*Woe97>)+LBKFA3rBnPQT~hN2hm& zn*G)LmW2-AB12{~niqj~7FkRele(k8SYg#n2VY%m=_>gUQa!D`){lwd0@@`>0C+Q29hA3YhpK!)P?JY{N`BSA5q{DIWNOzbpU5rI4^?~*iE!D`C!&| z^>x#m!f{F#7fizFDBX||mX$7HM&&DmMHdgg)|i6tnYiP_+<-2|BEJ*h{OP6#^O$^m z9Z)@PeTyisBR-7-1p@@Dp8PGeBl)644Tr_IG|e>m1D^bz-{BJmW0dp3GpX_w08;N; z$!1}ADu5InAE&KB>qV#GUdB|!)=SnX-N-u;7Gi>?hxGi}W#c=jV%eV@$(YZ;NJU3-*oc&PqDiqUxe2#*$lWK6d7T~D3 z_F=}-Xwr!QbsEd%xF(BXotKDQtS;78;)risf+jkfj5cN99WbQu+!mwqA1br8eI@+4 z2YR;r)oU@*a)c^4Vm72;*i38hZiwZyT3_QZp0IuhoGw4+60VZUoSZ6J6KkXwoz*)_ zAn}V0iLn{ofC3M!y%*A#7mDUgcsJy!{b$;1=-~YOKK<6Et&!t-(|yVi#y_pnl^_56 zmgCl!eFH;2;quqc@}ugkJ$F?VGjkGF#7?()O(*=bMHirrt`zG$Pra_&!@t12v!}r@ zJY;`qX6F_)V*0crJyFY?bo&$n1CnmChPoj_XQ$>X>j}|WW*ukuH!*o**lF_-$_>iC zM>2Td=bcvX+0uceK>1Rb25_!8k0(u`Fmq_`H(hS`P(%x&EPCBVJZ$Tf-JNqu_2<9K znKOdox2l_iR^`$m zF02!Q5LT}irBkJCE1cmM**4~T|E!CtDtKagEp98o$B6|nbX!3AVCdU^x3IehLD~2y zS(d9)5wkGByI&aF$LI^ugzhr*y9(n6ko#)6k6wU@S&2XKj{m_^%aMz3drL>lb_vQ^ zDVc&6^1e`wZn+{)-_v<#@VRmLyopo=6*s_)aoViY*2jLX?Iu1RIPneTikkI-Q+P)?m#D9=v=Z3Q` z4A^>A)>?cMv+Jg+8%HhPnW9`)AzKmI@=IdQuEEu6;B{Mt|G1P%&u7Rl3sG7pwCj_+ z2{Th8EZKLJEF;nC(K#5;{R?(|b~vEaft9at7x=gqJN>q!z*NIejlgW#6E~`Dnj%!mD#zpQ7eOWH#~a!va>Cgutfnze%TUc2H@@y%-${c%!XM6zP=9* zQ!*sEkDtW)1Z9g9HFgM!X{VbH#wtp+>encsQkLpwsFO67i|jQPgYoEkVJ!aROt%_UsN4mSa3!uEkpxQ)QDV^fbn`MO zgo=kbUOUxdmC=o+ll~Eg_UndrGc7aB!kx^0*nCwBs{zi``@sWC7!#jcW&!VS0CB>> zi>-bRtrgg7Z-KeTWJfo~)wZNOGR2tz2H5S+B!dj*GaL-vd?E*Sv!$ZP1*7tU=2>i1 zmdQr#-UMFdy;ba7?Xzf4DY`~MN1))bF7YQU0=@gr(Kxun3SOZSnNVc;?~FZfvcDDR zrL3(Bc%;lkMepABfuXr>mhOw-%ij7XP+}IWK4;GJ?w``)^7(k=XwUmr9tpRBJ_(!E z7J%^D#wKqvBdAW`zyL`$lHxDI0~w5Mp^|qHh=-I2VSl-I3!KY?;;+BCNN|d&K(57| z$i|*F=8lG`qiHd_(FkQt+i{r7!67GDq6Zd~AUXJq|C>pVIo-@{`|sOzDv}9os8L@S zKq5SHPz_6~K8cNG1}9iBG(!UpF*NlbO~a4`>9l*s@_$G?pwwdldr@;KTWJ4|ef2m+ zyHfclJTG11=W^6Pvv=qBd$K1Z3GPuT%S$Kx7)`TA!~Iv;)Sy)_WGjJ%4FCF17d7Vz zk)rJxDfvzwe-SGd`%j}XcC&^2&O10T{@vT_5!JnAuwZ|kpLwX%g!pF|>GP^bBCzBQldvFT7>M@L64 zs$b+b2E`WFjR)dU$23f^{V(n}&4*)31FibLQ8 z$mx#iccl15`Wj{izB5%$v!VJj2rl-cmk^qDU6dvhK-Twg3=~(;EX%u(Bg&Zaw2FUnz zuxLIW>`I2Ol;A0k&^o}TPv=Uz-zQlrVAcjQwEc?cmI=$_Q5g4ctHUiOiq(XBwhxD8 zDLiC8Z(lbvm~yE`$!s;V(wprX-gr)b4#Q$2E_oNbx@c*afaXdZ8 z|DH)UerCzvD3YmTmvVI0spcDB_T_75AeKF<5Rbm^)tH(XBD-HVd?GFGC-A}dwcQ)` z`+Q!>nn8Oh(IlZv5wly#@$|IHDV4Q21Z1QSkm190Qf};e*}Iw>(w-dg4(S25RpkgF zlPO$3?1@fyRqm(Ksq+ty0zLvUDp;lNerfW@J%VA*r}JizFO-&jorpm25V( zcVHDsWh_)1VCPAklW{vZT~yDo}Pb`rdvgzO$R?B?ZrWK1Th*miF~9Cd#V|Vp&O|rHZnB_NW5RtZ-aY@cwmnfAH|U;vyKF!t=%G zLH;ntwsfnjiY}ItXiqwDQr~P^ZlGNDF2vh==CR$DaM>E^b;Ea$Zp-(hMs8jAG*1?7 z(w9`5e)TCF`PEr2oc%RqkHTYj=zrdLK>)BO;ROzVWu#^&vA=Cxk zyL3eBeh47K{a(Ex<$DE9i$;{u7H!%;K9#xc!qG>E*@FFNopjwIF;`8C%x#RbzHpHper(2Z=OCo`p4Ttf_yWu(Mq*JtD|5;xi z__s{_r=H@9vEvUm?-8VN+7#l5Jlrt9bgVO;@OSk(S*`!*wYL?_Ssi68qXCuX`J@Dv!c;+ppWH-^Ad(SFe)4 zAlUT1V2GZ=NE-J9YVB{J&BDf#`|>{w-iYrXc#sUl~|>ofl9R(#7<)vlzi`W`#h)lvVRRHK;$%?o1Vv=#WQes-yG!f~kJO9}J) zaGsDNQQ++UD0@a^wbJSxXZ-?`n@p1FGRlk|#1j1Fj5^-aI~jA`dn@*RZb$g(Xk^jB ze&IdxIN?{K6F%VM_Wv#=_J~MQ6bmK)qlB4YySe3xPopF#8Ic&E{xRZ7WLk`JTZgXD zTG{e}8LX%TSH)P2Hcw@5OU%rV6rZ&+VfPej1p{47-H)}y+h>@l%Sm&jTP!yz<`ul_ zwW?gfOdR&_?#>^LIkcwuNh^4&eSfl1QtU3&pj4s9^8g#ww@mfRI872C`+e!4Wfv}cF2C&JO zQd@=G#AXjw`1R^!HqcK1nxlpD36>+E6&Hn9z}>hd?kz$%C-cX;5`Wo97F+;UUVS3F z6r;rR2d`+(2X+cc$u5Jow5Es?^E%AGKtKPM-Cdw?YeX_em%B-byyD|aVIjWM%7+zg zGj0YR4WLVp!!s;> zn6d+^;=Iqg@p~(YVnMP6ZD|j zO-+8Q0HUqN^NL=y;A~dRFx&?9#tFj~fdnJnyU|4pZe-w^px>LkS<9sOEqC#yRlznQPA$PMjTRDP zs(PQY{5tM^?9{l3?#sITeY{JSQBnW87Tp4_Y9om;LFAMj#u4{MobO!vUPtM8T)ClD z!nk}CcE@6<9Ho_3&Leca&DP|uQO00n$(+S#5zS6&oXwx#7Z8dx_fluj*gk%2oMlfF z{MY0jTFcDy*SNx98!<<>KBBU*!+N#qeP)9}z#$@qsnnTS3@Ob1F_s_2jF$E!kV z5BAiEoqMmLp0UE7U`BPjy-6*%nEHG1W(!ZuW9rZQcXVG~MLDp4ivLuD`mOe|xsPS8 zmat~gzHGbS+vxC&_CS!bcIfzPk?XW60YzPwDQe>$-N)+WMuhRtp6tJPzRCuYpoHa_ec>D2d?%=S0U;hR46fWIb7~wqjVQ74{m#Bp3h{c8Rg~Z z&`y8D^6qSX2{x~N=}?U8Wv&tYu=MJ1Iry2VqCG!`gSKq?Rq4bGj-tx7AZaX}zaK?0 zdVKt!F*Es^BxCVc>gJo2gq*!AhQ453IngnCHv`29Q!&bkqp_^Exs1DtQhP`;Wp`?> zQIP7U%NzJd8}D!(X{Up*qkQ>3{nHEXYhrnO1H;*NtFFZ(C0YCc;-@l}Sb@`&%G=>j zp&Ah^Is(V4-(N5_)1wjhex{^R&nYH9{opD8bL+%OB>5}orM0+V$$g=#(e28H+j~^0 zqi-dasp2YmHuNCZD+wWwJ^0gD)gJ;sDaqisF@80I^q`5$8ALoPpN#2x zVzUAbGXLE4G6yrrlG_UNt9o|Jycp$le?_;S2RivI^J0SPT_~dH?6*mpLoC&4O-INH zT%}>RR8uMAEWB}jV7Ibjf@bQ4!hxRckKDC3s_I^X!j+K1(^#N(G{fSOYU5=EjNGGL zy!VTm`-s9X0->kzYKNoKR3`e)>jVX$b&lTEP@^d7{GRb~nf6)?M(FW;6rBd6sK5@` z`@{x%2yClNBgs>QZn@e6twZN;PwkNXNUwOU`T_Aa`aZh6e>}Wmf{NZ4F6uDfHb`Rp zdB$$7ZJEt9PViS4D>jz=_Dh`O|F4H>Q?-*hJyCn~%eSDcI#vWaV}it}4pO>?gxhrOz^$`mhd9^d zyDU`b@LMQAi_1K*8vaGw9EnKc$kQ7+m=>^hM%FsY%@{n^1)j2yDUrCZ!na{b*KT46 zf@E5IBWjdfp9IQy{_=?Yf462pw6<#6mTCIwnLYWmCxK&r3$-VP8h5U4$BH0`P5_GW zynm#~>*_2l{aV~gXr{F`K}(GY+o|zlnddRHUxXbyy|U5F2i~wvfM5qm76NEWMVs@4 zm&LiwS?~%z3)Lg3M7<*aBeWD=lMa?YeWVb`e?fD)e}3T z--^|gD(u+J>RT{B(BQ)VTIDH{{xp`jHeoeWr>qWZG>%)2`Rta&H7b217=bP;<0I+X z}#+6gI-)wJw%zL_bIvk=O#{9im5oQY9 zsB};D5ty-*ao**QsTpD7hAUUB`0Ca=cbzgN`4}_%wS?}Th97#YrKa8eCT(oG0tJs*W*4HehFGqdD6{&o+}`$Y7-Y|X~YTYvEgF>|yp z!TZt9TmUbDgr3On)tYjH&4Q)QiWNTYMa4LtwdwwIr955BFRbx>2)GJd3VL zWpkAFm+!86l-sR{I|d{^O!JLPyaD1KT1XerasW~WUbxzhWp&tZGm#ZM=>m0Y_QQ~} zJuu&9M}c1xI5!NM%t&J~V`kX2;>?3kzZPNPJHZ{!gU4!R9`eZyN150%CE5=Hmj~=- zL>jV?Z)|M+E_uZlt(=*r#t@}hE?TWx#duFshbg+F6*Txw(Ks$#)dDxhM!kj$8)&ss z6fg?NG{=IeaW&XsuQ(^z?cV@g=vR0So3a_H%lZR&S$3yGP&lT!2c;k$8tB3m4 z@5I+DY93F>{pu7T>uBq&F@>vt!Nz8AN6kPARFo;^2Wy8?GbB?!wP-VvC91{mX6&(g zK(d0~CrE5yAV#}4GzZ6L|CLi_bhQ0`91nhTwp+=-&RCg%XNtYyHe7Fwjkl2P@$1d3 z{t489y~2|V_*Ax}*`;54mZ9ii*CQ2%ZxSumaBvdt9OS6|RbeEl2>hQ)3SBlcM;kA1 z*KjQYreC3#L5!-tt?>T$lj|;L=$SFzF3EnNd9rw>$Bfwx`36lp33UOaPcEh4Z%E!K z`@W+$v`km1dp|G|Ca7BwG?SZgtLNa>z~`bgVzY6zR|*LY?zJfYFXy-H=;aE)OF&5; z`KT(cmvr9iZ8)TWn{3bbrGk(q&%{Yr?*Euqs?aT3pk^v>*q`{>u_^?gqT*s7FJ>-T zDT=YWAvd0IVB^fuE9u@V^Ph%;5MqJ4>89xHNQM#6Sasvl4?Vvu*!T!;RR}rnfLn~d z7fgScPD@sqjz@P?UDTIDsz?0M(HP9(<*^SZCcEiGC)pn$V*~wynJ%MvK6sA(o2LnM zQ@Cq;>k~r&wz$PgCEB%DU%=3r@BH;Jz8V+mtDx!WH>{VY=2&b~)qK;aB2GGI1A7KC z!r~T!XlZXYKz~?pJ-1M8jc94~#vvk|8`mRwjScRY;dWj5QK`BZWjeb%ok!lVRW~vw z+n_$}gx!jYo5Pn-p9Lif)-01p&>XI03fhNfWR5$7&UGD9DP`y^F^R^H}K z(jTOYXfdz2e;@>X#Sin(!OCL$eztRmWRZ4+PCZRsHvt@AeHg0lJ-m+u#yRM4|^Fnf>w81WP=VkiW%sp_6nZfU}gd)^to5o#w~f4>Ay1!)j} z=YguOzT{Zg)iz!XOi1mABcl~ps51YCMXts9@z+?6e@L8z zHLOb5BtqS-*%|MT(9E&)G~9xc0qr0cd;A~KO7}UA%rrNv&W0JS&Tq|j67%6^?t{uQ zBL%#5+0=|6`l_*i@z1E9tRVfd7-B-0wYFLIp&cyJUAJqL-!q((-$3C^=Yy^V@x41n zWheYm9Ryr7veF@I+;U>0u0wkVxUlSPoHz>RTTflBYL>K8tGA>AT!oSb4}*n6S(>y4 zuDb3$V{r%mFGz&1h2~8|Wy|e9o`;1EI26{lvsw11+5))A0dj*7(|v9%2~l|hkAeP} zhe@-GCT8mBLN6)u8Gbw4k9CO8Ayi|*4NdCRNf16d!RDDMgo5beS}4tYSAm+JM~J3_ z{Vx4-thi|F9qm(Cr-B3Yby?_~(Dfxn9tQJ{X|0+@oRv z+-_u_%4`MWIjV4j{#~`Td48H$^~2Vw7m*5!|H>!9IPOJ+;X|VHe_a`6!eg2CHgro( zhX&6|VWVM<-*)P0*bB6x+$qQp(9S4>-bOb)OX3aOuO~+Iy)Xk?p;muP*j}1BH{zSs z|8ey$a51gl|M>1Mx(J7EMC6hqiKwI|<#LJ&ji{J>=t!Y7Lf4s!^zk_oN-m?B4o)?x zA(GN%l7otw4%Lt}gEDETG^VMhX8!BhJLmU({r7pjPP)wA&weiJ@?P(?meYMTechuC z^iY^DWVqU{Z{fWh$U$t(gbeB(JmS<5H&}kt=3u^T57uu2Sqr!}Vr`|h$=@UqXVRkpSDD!$U<9WN*lDyNV;&~;LNKl z)}D*@wdTg9&bo|KElteTBgioZuNK0VyxhsSJrR$GADtr(aug$a6fkp7)xjqFf?5k` zSyyeSLdqv*o^6Sz>IrUXrh1M{PrJ_Tw1&tQ7H*EiGAZjutoUA~)4 zjiyZ4zDxTpQTSv!ec5kwwyS5J4!CSZG7&{U!U7MgC(PL+6cP=W5t7|3%Nti%x5TL?Q;w~pDx+B#)nt_EI>GyfJvn(C_|bX<5$%9 zam6lG4!+%)wS5yjQ@__IcHgZXz^OJe2)V+ME8H#wEzOM~q{2dLjfU3`{dP?>9_g^v zw)Hy@e;tp%M*XHc|Sgyp($M_&(;5qt53<84vVdq_>jVrn2Rxo@b?fi~$}?-RPT5JarcWR<%xki zhe2&de>lg-0Vu&c^<1sU4Au|}p|W(rwZg4SmtPt=2utTx7?Az9c;4(E0b^8f%%Wea zs@w~NC|-___LB1Bxi{yK$pIxTD&fmd8#AeXc4^Nm(j_QYQM_2c6!)A@n0~a&uvfb| z@hiRBQxunz4ap!3MMly;k(Ji`WqV_Inpkjq`piwOGqjI_>W%c)Ek;FaskfV@r(tkCAp0vW%6|OaB)$n7aGXSl zY0pn`Kaj?@IUBBRYIptGg7Q~M%0yc2kakhKl3OG+qEyl^~TD*r@vbe11;(Z z(7%;5r#7-hyA9L(dq>LR<#+7Q@`jyS-HN%gTS>#fJufc(?rbP_xre`nj>~pv+m>1q z0WS>YN!PgW#ioc6H}psI>vo|~ST-0ScifJjN`%t`=e%Kc+<+8)kszv9BeEkK?33S^&;(&;F?D>%mEPW1%lz z=%Gp(8GsOL=+v|iehUSz;)3%Voj9XPzI}wHY7OB1M|jZ_J+*mD5}rQm&><|egyokU z!{En}P-QO)F(ILoge4U|B z*e2|`UAAx+uJ{dV9s5v^5YThar?PWbth|}K z_jXbgmhip5zrX2f67zIQFux^IUr+c!cEeNjrM$~!ypRvOVfGtq{v7wWfM3xm1$LL0 zPiYP&A-u2VqBI|w?yb!czt7&zdo=YV_{2V0PFH-F!fwJBwo+$ztG}XuO6Dl6(a;qY z&8?nWIy3Hlqih2{wx(4D8~Ro&tl@6iL_vt)eRixv3(s!Xy;|7XJ;8RxX|)FTaz4Xa zG;Yg;)6L7ObCN z>v4cSxR5UsLfen1!JZb!xPWAUW&qN^7*tmmyj-2j#>s1Wk6PBKk!Ux)|7sv@#&b>ri8UNDNqOf_XmF-*75vTwfHZ~^* zVI9?f1teUmxGixy#+*HN7{rCO_cyajJU#0r{2GLKh|7iY%l~JbG(@gyO4s@7ZK+#>ZrCq3^|G`1{w@)^c0Rr(;s#m+D=kRy3s%?FGY&5)YspfH?gJw{7$zOh$D`t~dX2kY6WYNxxd? z+=?TGd8GR z-9j)+I7jB0JOzr3TJ!CDzQ7n@8h5q&3N;E}T-!*S&GjWVf9jtN%5T2%Vb}@dn~~1k4jTLtGQYP%Dju9#LavC)rW{;x-|dL*ljX*fSFyGx8d!$v`kx%Do-HYjM6wgWdvrr2Xfd%`M>YJ{kS~{VdQ5{z(@S1R{ z@|k!PmYlxl`S3ru#V_mdUuxA?<0w3~TMLts3u&bm@cVwpFFK9N<3g()Z{lWS;>eyc zK5l~o)0TRnLcg*;v|c3!nzOO9Y8Q=oc>mA7RBz)3XM(ny$RmhTAhj6Zso&Q z6?>Fa-hASF)Rd4TTmk3O)}uw;RMP|2Os|nh z9Y*HtXJ_YTGc|4``CYa}fC^dFrGC1b4EktezK4WBn9bYGZ$FJ@vbu1q_^=D$Bmd6j zJd`gH5KctK0W#Cjw11-ZW1IHn-&tRP&p$z(>#b7 zg>z^%!_@(ILWP|%!srQ8<)DG#p!V$J6(u%Z}iM-bFq{$Kl9XJxI{o5S}cyz){cF?97VCA*^6+J@uxg z<%T9lV|Vsxy_%(M{fH9vTa3%4#X>A1JPVv{@5uc`xW7AUW!HUG3c9$x+G zz^qsh?LaQ(rg=Hu-cLAGnn3-_60>r3eupOp1!T!kv!Tb@G5T1YkBF82QzCKajx-8Ghlv0v4{qAq>!-x*?m+0#&1C4_?VHAwmM~K_p zve@rtpvgwOEd)Vy*WFnC011HB-JCqKb^!OX-=mt(rH7ptqH${i#3I? zrs987ZyktU_{#S}@J4K8Ns@(08eRmQb6``)r0TJAs-bhLP#~E^DEnHp-+Z?>@jEa2 z+L-r0X_mEE2HAlZfIjXl~uwLiQ(9qc&}LXQ82njU6`@x|&l;fY}z*R13Hvu%H^ zaBJSx5mxggHLT9RsMjwT>1c6&V^3aKU-`(?B(`@|;@K`2G){7OmgQhbR^nKTgRsWJA!KjpCE3!?d! z3dGebI%l~UesM<0 z5O>qoWv~mj!_(Zh_~g5qi}uybqBlNQiVNhmcZp+E(fl`Pv3?>djqxn?BZyOkn-2PU zzH?G^mvf(DE_BmZ{beY5s`LSvU=cO140YRI3fy&%y%e;cla3xSzJJ?D6+RH>pts)p z)3^O*kzdxk7=rXr4lefH(6OQL`PdHI)nIM>=unb{gee8{B_i`a?MI0?t%S~({l>cn z^_QR`0|d9o1^(0g;QjjT$%BorIrf+2+%UzD0;ySJpH6;X^Tw9p6;LKASSdT~J!WOFK&pF$TqgoQ#YJp6%_La}+q$&%0I-EUm z^>td}5ba#9=28n~ya6ro`|jCxbs3VNoJOt7YuB6>e6iLnQN#PL9~b|3Ut?S7r+55g z^p$E?W?!LiBF&#Ym8)s1wrTSGjqu;J`M(A^%&7Xb@nslUP5t-vpN&^Jvy}xqtf>w~EOPlD&qty7>9x7J(XffYT2yeoSn69T|RM$DAU1$r$>WdND zMPiyXq5bYSP|gP_?Qw#~9B@3YZM7dZ7s<)q_GYP9%XwHNy|Wd!O-uMC5*j7hfgT3S zIhaYK{%-gQ@D2J01_q}5Q{!g4GwnM>B!9@lv9JbyfdH;FzFN`5HhQr` zrFU1t1Ie@>&k(}8yUx)L?Y)uwwqTyQ@7pqQ6x?T5c1jWcZkqEgQ0b*KzWDuM5yoZW zR!eu;*G5i>s!>^#J9vaVO!Xg*QAgh8Hf2ImJ$xEELyeRaGH7UH6Ggh{*Pg3=Ga+4} z$p8MpbysV^#xgVl@wK&tA{EYm(wKWm$3xr>b!-sJHl{59;nO?UXXc5ffQ}hxo3Qw7 z{jEn2!<{1oO)?m=l$OEsf~Xy<+5{t8fNI2U_jk+&C&0m{RH>UV)&8;+^E=WrEJn7R z#2yBLneRm*0M3VLI4W?MfU2-nS!azADX&NnrT_LaPBXy50z*VdOPbFV#;_twIJJ6u=IvHRnclQoFWEE*9=Wxm)xP z>?oJjKd4}K(F2LhAJWvf8Fv9b!H3Gapkbueo!yt5sUNGw%MzS}5Q4;OcLZJe4j}H< zaEH?hKwC$C)wxJEYdT;H8;Kj;=>oCj@5?OzgX|zqfft(fkJ9oox2>Szy!G`Cyi7pW zQw$PjGE?tdo5D-hFs^<^obn5CZ2xQ+{(8cBRv8iOBdk|ef!>aWq?m+@wf-oUxMY$L zM4;2hBUoC zsOj+w_}4&;cM}J8CYh&txfzc2!zF~)J-XK51>PMhygXI!F<5wnQ%dDWV)M#zqF9pz-D(TM}2oGw$l$%}QY)ot}0 zjlVsb9de4=WFgeD5-Ws?nH$TO-d>qz#i4480Ef~fn^G@QVRzFT5ye9}g4gmAG;0l; zkD|Q=i)aMRpb~%}bboiTB5mKd9hE28&h(wzd3QOI0L2{wQTE^g}^3k z|940_Ur^Ds__2zJDAu1?2z}3p%|^t_pSvR6-D>Ah>`)?)qlk5#G};c3_5%G8l{Dd5i@*E1(+6nsxaSuVLp=ICyJ5N*x^#p~R6nV+R`1Rp)b@DGWx! z)Ipj(*^z>!eriu#E2||2#S6Kv`oDu(q|;l4l)#Tw8H+SB3s;;_kKASAahXXo;aamM zSA2RTM`rPc6HIm5B}ENe!L2k<2YFAfqO3ATX!;71CKSLlKB8pxR9^uQG>y;Qr;!WC zso}!f&1snX*rC6$Q-cU$OPNP%7D$rqm{|pXYqky7=J(2Gk<18?bKx9~t9B2IBTBfR&>3GlcNV6y(RuY*{1G8; z0>LO_TIhsltK%v7oW^TgiGxG?7K`%iP80=Tfen&*paY@H#tIzGHebMm;qA_<-6Xi6 zsE_yV5R7aBaM4;U-FZB}cru!~Jni)l@H)?ergbenEvZW9388)MiI2ksfiOgRn2Z8n zer_<&dQnBS_DY=P`F7|s^P!7Y0r-{(8P|e%Tnsfec+LK2npwK zc4aGEYIRk`ZSq?UF&a%2ajR9KAwpBw1zYnXX3hOS6SpYk+lW)-dK4F*?R#a8)5_T~ zDF21?i*c&?6G{k|pIey&$?uc>pa3?muSXw2TsbWbCtForx-{Sz>*C6Hx)4^Y4M~e3 zDvzv@tJQ;$(uf6ll~(*J+flQhq8E8}o2!o0NfKiNKlk07RbI}6zH3<9kgyfr4JM3v zA|WB+Y=N-9oZdYv{N*QRzrSUI@G3nDUmv?ArR4PT6q?KLv0A zxH!CdFDw9QGlV!@ZZn;4O90cXxb6O};&$ObHLxC$jIWDtzv}CYA6r2leAd#Vo}_e; zkKAxEhSB8z)j-9l-{4(P(dgRm&OZ6GykSqhYbGTFWRssFV25VJ)4hy%3eV zyTALj{RE(sT^Nm5zQ&S9<5%l#=Wz^a*CkGRPhIppTt=hibGW`pLV3JTS{kzk?_&}U zP;_A7F^fKPI4;J(DS!rLj;L4!=r zm-6?b-mH>RB&itC^@HyE(iK)!R6#iFP2oE)XK1Jc*Nqv-`C^KuZNX`^Mw+QZ}*ddT+Ar$Ml{Lebum%w7)0 zE2|<*9^HcqQKnPhhy~Ga_aA=~@vM(LoOsf}P3#9Bs%{2?z<)dji^Dy_)Boe(I@gXEcFWx^>U>3R7cMYiq16!u^^huBc)r ze#(jtynsF3{TE`}rNt^9FdLHIm|VWyYY1H;tmQE~{d=l`A|P@g561oG+Z6)}O&qTa zosuP+24~J{9G(k<@Fkau)mnI^Fl{5BX#r?TFf8DCaK7Hw-oiR8L?2!k%SOoqMX-5I z?zhSlmZ38ou(#L^P;HVhBzOQZo_04b3EA~YB9VljtbK!OBSQ-vCjjn@|Zs^Sgckr^K@fTZCxo0kz2c+j zT?RcImxWgHDj3c6)ONoRF6e3Q-KfYPzUmk6*?b2hYfJ(H7ZQ4DZjs2*Jp!xj1EC%$ zUgDNj@gPi=Oqv;!;3#vtDBxQQ5HE%9SETfM?*eyRMHuk(rR#6}v(WM*vLydhX&IOd z$&lT&g@eKF6173V-}286jOh-dYu26&c>j24&$m&?L{0YU&MWTXP+xfYsdj$Iu0O^0 zklqcef1EJYtrKQA`f#iJ>;@EXqaFR#M6Zx*%}^aiK>^N87>>xCoTD>Oi~^7EiNdm& z@)Xu(nvw=TDYlZY!%gxfuIE$a2(U(-T3O-jHHQCOo&=s{c66A@<#xH63~;gOM&E%# zGHc5M2Q+LpgD6JKW7NO{kB$ck zhR){dEc6fegBchqXrF84uQ+aeBORYl!cwa^zAy zrkX-ck{FB2GRIx3KtJPW75Xlc0%SX&v!P;VSCD%Cr}m0Y6qBqv#>-|d_>3?_XbF-t zDIevAbQ878u*j3SAj*+V?SXhp5__2O_Q&9{uWMe^q~d|>U2n{SPpbZE8>?2t!pu}4 z)1+I)73q1R@_Vfy&ATDUDNX1zgU>$9q-x9!+;T2@r{n3|GhyPXq~l$(&Suh}-dCgc zZQBN!$Rb@g1l2BpX9sGgR}wC8#E;3(0VRVukcZ^W1n}R%-05QF91P-E%Zs!JG%&8* zd!}#eRV0~N>}qG)@I^hmK`TCrk}$O?$@gKfiAz(A*k9#QHuTurjiwEiT6A}C7yOE^ zm39iHsLz4N6NJ>nx87L20aOr_k9IcPIEeJ_dm?2G<3$>?oHMoS^?Z93v%TO?97^2Q z$XB;?k(9Y#x2eIFYB8niZ-kS^t`{%GyU&7mzteRg(1AoN$UHIbf}4xc5!IX*!s^+hOKTXZ|q#fh!F>QzshVei^8p#u` z);@Q40}pf0B#)Je`5QS2ROeb7)TC<(`w#YZi{~aW{WsjbRQ|q4K z61C@GHWwVDbti(kCUO9f3YUbYp`12Ht)?|#mvvG1i_g8!VCo?9uTdWV7nzUd#(Y^3qm5B4B$acwP#9P=BV?uYI0Az126HcjqXEdHnEtnzLBRuu-uP&^A340HM6xe1Mpyzm$jAt#x*LgA3F*&1 z9g(>+s?H0xW zfMWAqh4a5?z#~9D8#UzHJ2*EL1UCx{f=*ia5m|)@yB@T`1z}{+jzduyJIuVvTZh7IQCabz&@Uk;8BW zQVEtHfw(#6W32cU%Ap4;1(>14DJ8<^*x}G_wFaanjmFF_!}E{&7}(Y)Y>Y0ay+|5^ z-2+vo*8PICKE1dhsjg*kr|=3yjMLH(X#y6A?ou$F)ER`rQ*yTE+<9ucWgt7>XN!Zl z-xzTnJHL(WSf@1~KB@BrjGZokEAqxPkFg?0~L(}Cs4DvY9_69igb*CZ3~b_&@uO&j2RoNh(M z6C>HyGXLLZPOsVatz8|QU%s0-J^OsZ4TS%FH@O@yk(4W7da=}-ZB#Wv!HrLisp+|A zpVHEBK=#8g0Z6t#*-wvJ$%emX^w`>#FID^xXtw8?d|^#p@xCsA5ymINK5ephq(7{P z?=8WBKp4;d7WK5$Rura-gr>5(Bere%0||XzLZirpou}h_WAdoanDq*P2X})hbxBe% z;0RkA+MpM?zN-fP%X&v_7?RE(>u+p1iFH+LG2mWhQXIdG5cM37qFhEG8h4m&|M@e( zUa(%zYwKE=b)AV46zV}5IhEBgc7xb6*)o4}&z%wrcbJ&HIckx&^?KK2OP{^&l*J&S z<05XlUu|g8WRr@vq$217f$Vp)4#vko~1cq@zWidzR)pnAQ3 zq@l{CtpG$~6;@_K#RCeWH%!nPhH3xpEi3TvpQtG5iCB6$Ou=>^9{qdvUX1?kIsJdu z&0pYUM|WohpX`jmo^dDZh;;&x*LtpuD>y%w!VlA{*nFVsom+7-i)}a{HQgRrOvKAJV+c?_1E5q$QTOV z*o>5a zH!zG)EiV*91tte|xH7^Vc0RHPz$`)SO8}@oMXEdeeC*gSWaeo#K^Yu4*Wtbdo3U+! zr(D2^^=Q@&j!H*NXV5k-8H!_LZ((HP$u0C`JKu0rwUl$~)InR6ANfcA>&dWb7&bZu zpT}rI?F*gN-k;#zgM3QqNanUCI$;&1dYc~b)v?eUo(VO?ruDZno}-)pH8o7}&01{C z{dPSr;kip~u6)vG`O*}eKqI@#+wzSHS`SOKRNO865fX>*+HrGn;X%lkgcwlf>G@_M zcBG;at~bc7ZQApY@s==Fu@q^*kH}4hISSLz*KdV63P4DGno~-;0SYLdRNIJQML#WP z!~%7E7P`U|w=r8enn66U3*23gzo2*OZ7Po1J$gM?l>j%f{k0`$(wj1P=jL(iD0a3+ znH{M-!2N^IOl@|^To|*j50rEVQMbN1;t$tMEjfxKB6q&&c#F0vu0{~Sv2J^5ueGEL z-!P4$O)y)DsDD?Q)0p|rB-~`)xBmmib>@5I)v@!U|!<;b1 z#1z;q^z8uI%>TH2LhjYu*Y0s(u?pMDN1N1PR9HHBiSZuXoKI5y6``HqlLs@S`?bFB zHG!upo;upWpSf=hp)tjLTql)|dpffk)YG9XV3^wmgC8$MrRu$>Vzd8^;4!LUs!<$;fD{u$~)-s+QMR9CKS@HC}WJw*$)NBnG)r8O&lI+FW z3*@r)BOoq8&X%I`0}}gn@q>qUOB6PAIIi?Lj&kRlzu?x=)p0`s+qZX*_CiDZJBw4^ zqUsfvmWbgky9&oYG!-hGX$}6|Ew+B-K{L~%AY{!i1`x9f0%KGo-gP*5PTqbaFFp}bAeqxyot4`XK>U?5rU8J z<6XHyK-AQWSCUgMQZZQl_r662&+$D>Lm~x2Y!ela4o`K^^^?99b@nj5c5Xlg3r1!3 zw5lwmYST_2sXsL8LQw>@p>m3_f4YWAy#m~-3mZ2rH7)_W)MDNh{beSspj(lkNv*%c z1rNYET^FxCUksi&X|?Oj;A4w%N+m)<^}Jd}^1-C}pKbR9(J04*h}Rs3(X+oo^s!BiSClj@5` zT&0yOH7?xqk@_&Dg541!E-1u<-kq3S+sUC|H(OdFnix1A zK%1xAJm+>oWcfhVrSJe)=Lx4|aOkXBc+x_B)Gr8P=FiFdc3m5r3SdR)#Ab)A5wHj8 zH2)>WZW())kaPO{9EF%o9|(XJ-b_gA0IL+t<3)e&ii}Up5yxMH*>UZvscDUV)F1|% z9}8MrD%L=RUp$qE;e&U?!}=*D%ftx%Si5(_$`||SQKhxHp!U4nCvDn}EU@=;k>C~y z$whHa(InViW#^Mj$-J)X&p_;sJ_<{^+oEoLy0{^+k{r3Psl&%r=aI5JQ&5I`k!i4R z!^(5wm8#b!e1A7MJ@=2DV7AM-oiMkG5s(D^+f1Y3SO&(72cq{?U9Eir}aMR+t)KV#$8lEYhQl(l|>o)@Ao7%|Vf zzugFPkdP^_F}cj3$jQ@r?^N*TaFIiR_@C70xFdEvL_qH&7nn8B|0>qH48OjAiEqfp zldZ!=N6CYq$m&m$8ix;%eErqqN(@AZ^CgiKbvb@T$mbJr&{as1k+qG1rGX_}5>Lp1 z*Tb5U2`wl(=y(}XOV|AA{pn+=`O0Z1Seh^v4X)_Hd??JB@A_lR03fZcT|v8VfN>GI zxFtt|aDx2YhJ@e!an)DxS3|W;K^Ys=PEe$s9v`+^AVpZR+extowLksa2@mO0dta2g zaySW9ts7u-keC2f;aJtc=--LRjHA zKgjCOd0D3h)WJz(*=RszNlaM~(4RE29GvY&Dv(A!dY7Uf?#iN{{3@Bu2!hr+=Caea z=B{Jr2W91%d=)jlzDS}rP@zPW6eErn;ZKnGbW*)(JIrBqSQ2P(OaLi_zX8-h(r@8?&~JjRIg}U1H~bOv z!+o&t=&mf%5m7wMV5LmFbui8a!GG;aCwoBsk?buq_``cxT;0m7X%GXq6JyMkRIwSf z&%-HVkVe>eStsma#le&Hv+eZfqLPM>($WZ71k)EA(OM1aTDKZLovt6jrE=pz)DRpgZR$eq7Fg5_)sR4Rq# zgR=7R(d>nZ3=uwfh=&gks*6dr<@f<-;T-UdPS~zXh+Kj`nLbL%PWicV4KaSLLzuXa) zG%&KmX|$)GzCV;Pjo|q<-}1zB%I2dT!V-=PjO!@$WMqUZCsQwjkWbsR7cK>O3jxzj z3F9ZyF;i_8k{EE&b7RLf5+2kCC!-(WiU;i|dN_JftaIP5tM6Xhuufzpv$|$!3-b+I zxLlx;z~UK27dL)s6YQ5Bg4=Z82oqob><+gaFF}a0s3pNx8ksmwd9{KtD{!o|$NpCh zsBZgqp$fO|q8CDiGBgs*{di$*L-y7~vzKFBU$V)~g5BVLh?r5XU@RX4i06;^!x>|9 zpslgLNL%Me4jRVI}ML{H|Sx9Yypz5x%$mogr=?}T?u9M_3Qx36pm za}?+dc&OVxddo^{-06y6=}9)DPsF%<^Ntz6&mHqcLLFoL4;c@eSs;)F{jY=12OeSS z!uGqCv5|Hs z?2-&%!jR1{7)UcRJi%<%Q_RJn=LQ4nhq%MjRDMgaW4P}H?NWxmh(+c6(&acFdicBe z#qeUsb0Kh;4u4ndeh2Jpic}SG7)iVYzi;JqWI@pP5IM+L=EamhO7?8f_*&Db%_3ww zE~1=Lwm_}zDip>TsuVstrg1`mfGLkBNW*&2Oi_{qT#0PYgbdPZ7#hAco3~rdo}=*i zDj6yA#MWoY_51g`Cb(KC%8xn-_W}S4gKLz8@hD&3iMvbrc~U&%a%47t0d5rqc<;&i z72bZlX9m{Lpq+_w+tbo;u=p>V#?)ejN2TGm9Fm{u*{8nXSa@9(b2a$mcDBf-gALWj z)@nP8uxWg+!$}hWe%u{s3!9Bwt~nP{3g4a^el6~%IYl^sRaCZ!`w)jIesJfYJ(NbR zfm?8yIGVW~u=GOWq$?HefyZtW5MbG&3(DDGyaWESdsYhlJ`Dt#Ii4+u0R zY))U_z4O8`=;`d4+TTN1 zQKfEdSaQ^_5^OK2;BbF0UYdX;e1kk9!Pbzbe}2Mc&kCcMUq;~!h?wIB>YRs@>0ToT zGLGb|N{6^)%@_?CrZ(RdqrWMYxa6t3#{5q&Y*B1NvX{KPecL|GYjXgqeN=Qf>1MAN`Q`ddJ;xccis;DpJ)qgfA%>qlN6o>j<74HNhF zE{CnAgi#%%P#)Q&Eczx%oujILRNPiRp}k;Yyb%OV#u zvyXj~+*KgcZ-f;P9M${mbZiIQ%GPgugtZ7-OiM#9Vw&o(-$!Q276}-Vjs&7^`4RM4 zk*p0qJ#P{py?b@y8owAJZb8jK;utQ`4zl(`GbmZ+>z<{6{BiIJL=i0`bXlCCSC&g| zxXz#I0(5{W6n>VPi~7ZP6@&6X+}TO+{Z`H;6cpU2j9}(s#*BkO_3LwC(E&gUecQ0d zU4T4}Cw#M~unFgzl}Zz#Ntlc8UYwS0C^S`W)EFETx@)8K3|J+Hphgc4%ova3B3w}jDNuWnfB3_LR+Dbx zej-96EZ2%Gz=J9IphP?2&)v1drGWc`iT?n({@f*hj2}2`%+G3q8wM;ueRA@mb_&um z(1t>*zWVSEwpLQlwbQXS(Z$0E**-ERAq|p$XIjx74a(o6-HnX|!D4W`?pT}2C8 zDKS^WDKxy8vG2|tT4&5gJamJl5-mY6uEoAjhqM<0cm>`d`NG*&6BZ+>!E~J(y&Bai zOyetZidVi71{m1YrzWN#r4E-(BXtq{zo3n@Sq9)IG~<}g_;vQ4H?W_wTU+?GQ0+o| zzWpSzi6xLgdK6M7W*3 zfsyPTXa7L4s(*RU08q}e{D@q!wI7`Dk#KQzzVZSHT#3q6{3`G-HDS|3&PP#iwu(hGaG4|$f%&shWGD@Fz!C!|cG16nOz1eSz`bISBAV98m~WP4 z7u;{V5lBjqsA$#aZ)aN`t}iP>DT3pQ?YbLm7IXz=V^E3Al-=yEYYIYR2sQCVd?HK5 zJ(CC4{Ii-1+-L3@c78$i8q0eBIUb4SGbP0Uj^1)Cf{%L;KqoeI_n==a5U7Vo0&eZO zx43%JR)P|!fu%0#+c2P<*XKXK!e{gud z;Ln5e5`ej!KBQ-TyOpaCt?kba;0lx}pIc-mel&m5t9$2N`%>T@_(hLpw;V7-NMChw?>)1Afp5pNO0b6)4u%rD zg5;=im3LA9mPznsPLbJL=!a%E8IML0SXBG|_yuOe?}Z5A0N*3U?HjDaV=zZHz^ko& zKLtUv<>v|q7_;R^)UE!TU&i4KxTMSpi!Q#Ba0%hEBXQ`1c*~LX@w|FAp5*lZ#&I5q z9{(jUurxeuq+cy=nCwiY+Try{+8G50qt!I%I+b7|vd zSJS4I9w8YesY0d^lsO&!tz!luA)JBWvcWj$`*m~Y3-Usoq_`P999`UKKi!+QzPOGd z4<0!{K<~Z4S>OyNrp$RR?vNFD+#fOSKde}Z z=&D8>xQv+k+f4kP#b_?lz;+H@SF4Y44fg%J4+$fWC&+nQ7mA_JqKod*=u`;KRyec? z&quz)P+%78pK-uRSY9N+gInO z-Ex3cp$GGBYmR%NN032mgzEGc3H)l2CFW6y1Z3!_Ht}RY%uyYJP)P3h>A_(4{gy1O zbx9W%Ov&H%ZB#Q=)Yj*fRDTFHo>0yzz>d|BLEu?rQuSrfvp~8!tJ#i}RgSsHwo-L|eV5dl5Hb|^z&D{(dH@3h!;%f_n-Kup`C*#>7SAK>&Y z?FvpC|F=^4oFD((>Nu@_@zEE!-KVQ6nwmF!@aRtM`ThlB&lVn%j-lf@M`f`u-hco0k$y2mbiK&3woi3h9I&wh!ApF4H z|B%IT2w4aL$^OCjQFv?Nj_xk2p-8&jPHgYLE2)ZwgduBc{Y28za zTrWyjyeFa{VBO1!gd0p_nd#Z}V41|4+zh&P{yrOy@Bi??L|Pi2(3_TqM*-825@-$A zq=&=vXCki^m=gzW7s4^pM`3&@|Kwx}7;&!WOey#b8{%8*V5917?iJcOEnN&~C(U^x1fWvldfW=-^iYi|+=mKiaJdfbD{8v6LS-#p z)U+Y58g?N%MC8uVdg}q_vW9X1asTBs($mr|qZ}0CDBx-!E$yGs6=78m<~(0tMN9^6 zwIGZ$&DLQ$K&+1?qoWYfi9ItpGk#GFN+(p5;4c9|0=mt;2=s8= ztjx<_8M+OfK3l)H&!`7|_k|BvK6Xi``VE&C3?!a(F+wOG^p$Rjxxhony~AljMq-JJ zQ|6a!*_06#`1Vb(kjV`i@B$G&2xl|Nz-6A*BBc0l9+B2hY^dN(5HaiiLy z9^YvzGw@WI(9pwH8jF{62XMMmP0EtxLsPKC{WxBX$4fP>D1pXL3cir+$oYycs@kj0PzE#8$ksZZ_ZO$AtTFARq%O^X!F0iw8oK#qT8#YGz zP$h*h03~o6;u37Pdb42-SiH7JfyAUWzkez&-u3gEnEGYZp`Wu-+!oJqu92C;hv>|4 zyMendBx3-{#V7keT_JG~KYz+4wk&>_fmyfsriREtxJyN7GMwmr_gsl64tE$)jA8Ld zDQXKs-@{(n1h{p69Tg6F;sVJNwx%NA6=n=7nLsbj1e6;$w?|9(YBQOq0h|Sw0FK%S zj&5R2t73X7hoI*Ta@1|cp%KhK>y)?2w9EPjOj(aYaMmQgEXvis+ccJv^miIx7xWPa z3kR6^k~IQ*aJt4}Od~wHJ#OnX{c>8P7?Fs=WL%RvgnXfsv%8AxpOzpgn+8RrPNWU#iKgZ2 zHErLWw|v5IzY`2iZ|moZsM*hOc+UlY*-q-8BX z5>Nx)(L^FBepHqvUlb#}AYZ0kg+CB$x{&07V8tw_P(KvRgt7C{K@Y@(@6fhTPk zU_D6x{BfPS7XF;I6k4=&0NKD{J3)YlpOI+eJ_u{7<|4s-c(C1o(-px>y^V=Tl#LUq z%u67#;p}+Sy)pvn4!F|ufaAtGYc1cSz6Itpw?TB$T5~>C|0j=NFcBRNtnDpaY7yO(pO;Hu5C}cE zl}mzFnFF~bT#1~}pkyliUlhRBoVG6#mbGllH)UZ%0@z;MTjLeETf?DbknDf{afGK* zb}^~GW#kN=H5sX|dWQn9F%3TzX zoolHSsuVQU(s8^Cg4#d}O!3lh*Gjb3+3;DmfU^p3AY#N+>jQ&o$)tPG%ghPHhg&YF z8Imi)g9$^>Z%J@*0Nny3k;%zr%}3w`+{r~2;|_|laG+g+V`*SOz%DslA6sM{9aul? z3f~ES)D(ecN0%V;>|K)#2Ryu1o1Gl3=<+$0JS@#fX)&moep|gROQW3X>$ZEtK)A-E z@CH`r=qQmVgZg6;AQ{vK&L05yUwT|pI|v%zJ=SM3h%({%P4l#PpF8y$ru zrqXiw*TZqR3)Ay72zWWW3&CzfeX~Q@`a%5MG@t$ zC{ETIK0K3uKn*&k1@v!kMdUoa&7~M#+^kA_WT_(No{Ce*zFvc#hY8AX$0qWOv)sQ1 z&dtw4w^(pfl=SFOc)zxFpimHhf+>K(mro!q{<;Lge%vxo0} zh8=Uj90fEQ8qTfGZj|bwy5#V7CiD6nZNNDsPwFi=#sk-*e_MG(qtlm4Y^K^shEIqBhmy-w-xZUHTiSJ=$enM7L^>|mxcOvyB)ERQr9 zx>px0t+cVwwt5{A^Zo^2tmnBh65fM;X25x_&Ka20AkQrE{+`2-CsG)&>pHR^f)-yXd*AMdRsd{v`x_W8-27_cnP1) zw-S4AzbHizw`ls$L1=}1k^>WO10KM7vfnMRdg2q2d7k!?yo2|7wa*>%MR(K`wrM7o zE@mXDxbG3iP~*Y}8jCPFQyRs0W zVgDK#9sUxys~}_$yZ(7h3f|+XY@ED-6))5Xg>u!SlJJ!e_UA%@vO4?tRU7vJcPLRV z1jTAG-QhEhj@Jl7V-5fW;xg(dJibr1>%j{S6kq9tI!e$pBSGQlE&No zYaB2XOup~LvN^jVI&TRLbZi+4=?PG;pLc4$Ft6}nVcC$uo zhgSb`a0h_HeQS>3_*lD3 zSM}zMIZI}~>w61DnCwaZj!R+utP6F+8RX6&Td@-EbN}a3IsIxDODvVaiq(34yxIPu zqb=G#q)9vb8EW;z0U;RD*L4|gfnc(?#4f4XX7v!xZGIu57|eha2zHe6*9l;@7+)pg zR}Sl1_qchH)txJp&IWW^gThX}8t%ZDD|Xp_5r6ll$3N?wK#Mx_X?X0?NHxr-DW@8J zC!G9!x0yw%T%pDWY~`Yf2(iVdxDBw1>lM_}+`%a6c>x3c^XEJFYX z)CK(6%jk&g+;>QG91z`zV!b^$;&ZupJN1%8Xp}7@8%0GMV7Q5Pb}5m*fPQ_ti*kH`s=P3nmukl{`+`al4Iys zTL&rR=ksZnc%6f_DXU4*JMR#HO3ZZ%!rfz2=E9&w)5z*G^gO{TDz?)2!p?Wv2zc^h z!iN7~+|vrf%PlyjJS1E{gIu6_qO1n&+Y}|Gr;}- z*t+sSsPq2+#ULwbwOvwlNJyt@bTC%eQb-}AiJr1y!bC_6gMF%}O;V=QF&&mL!^*g0 z+MX3^kusBeS~O~0k#iis_vbrfpJ(>@egEhf4bHI;gNUD=S1@HqKQ}=N(6%LN=@51l$?+qgBM5 zf4mX^u$mmNQ5a2q!9ewYW8+)k8_-i&X{QFhaVE)DK`5_;3x&?4J@wUhGYCt8U?z?X zA9jkPAAl;1#JAYFgRQY0VOOY!0IOlVjs6j2A6>8GNQhCsTFl^@uQez7MYuShX|Evl zYatWWGfnlPLG*o;Eheq|{BsZ>`a$Ii7y=&HKeb;nw%ViPOK;xiK|hNZn0%#hF-LmR zITvPf4eoDH!KPj#ZwTY0j9u{iC9@C*&6)HQBi@JS#Z9Gxb%N0pnv{v?k=T{oyt0eq9_t>gfr8!jMwi!xZCQ4T%6$P8)r^dT!JwO>5$ zKvXyloc(cq5q-w8qAy-wN9kESeAoG%SzD&{H2z*$LOj-$aE36&S$;HoQ|){++VU)C zv-gJ-Ho{kEYon36re+K`ZxWLTVg*bGcNvoMJIwKG5aBBIhJ}tMcW#|{62xJxn=9Oz zpYE6D&zS>LUQ(}k^KkVpm_1{i%;AZ}2b{_F-m}3kN75q9yEf<~(eq8^VScb_;|THrYQFKksM|tW6^bO&bIn@Ldyt{?!48 zMRD-&b9ZGv5?1ikcbN$qZ!SHrg3@`?wAx;zVWu__)qw{ADOL$Ms`E~_lm`du0_B*9 zk7wA&g7TQXhdZ^~@|z>z$0`)pw1XEtJgGwjzXQ(tp`#Rwh@Bk?@C%G@WNHtKz8OOu z8>c*PgRT!Jg#KIwl?}V2;^1@{&C|iVNZ)S&JDlcIXz}^NvuMi!RJZ2e79SWEWdPrJ z&^2xrRyBEV%E;)+KuHEU`x}yT$7CfNw+)3Ec(T{U0NiblI6&4^3jQ^^cdPyMlP?xa zGRxGD_Fm9aKN7Tvkr4CbWhsKYjH%)w3~V9>XVd;=7|RXrLs9?6$>bZ!E z3gV5PyH`W58IZ=N{iR~bHss>=Z#%UCmIjw|e~us+96rP>{jGbY3ae=#p@j~Do|lc} zcfd2LT!m4Hvi3R{V~N?BJCeiCPt38d_`d3euyZz6mTofV0-YsU6Iu)q3t;- z%OS?P(q34ejqtu&ZF#;;+357#fb^}GBYlapN!5b2k0;mR%VaTkI=jwr9rR~|PG|Kh zmFm!JYc%-M5Q~y}4O+c)74NL!15a}M$o{qvZ}Fg zxFQ(_*dy~kU>E+}P%&%=@U>K(Tz#?ZOs)9@xjo3FVZ((*=MqZQ_j0z{qjl(Ynn5J_ zISfO6l2=nv^v}a$IL&cRBdc1v^wjO$uf*F$yr)qz0vSiYpd%g|puZcyV;ut4n zjkfmiPkypM5NU->VPVJK1xL>-+KTl#DH#$Lf`9a*qrA1Ry;tS7Kkeof-rnu2D840MwhvoulX+3mNidh@* zGi%e#3uoUDBC=(i0Nj0y{RSz21J&8Vi3-0%#ui5`YJO5h2g>CY4(JqHKZ_+PkFOFd z6mzP~=8y!a-8zK5AVs+4&yiQ+KxPfvm+&8Dd7Lhnwt!}4L**lo;drvNTMhghJq3A2 z(U}AkvlZaYqXo39MwSpU$@kA-T(+rn2hqZS9$K01$$OKJJW)MRUmb{D+*=DiQ9v+L zS&bnreCmob32yu#!$@jvGR8L%^(>VyuW=HHILm1uBL>t=o1Af>GTvNmu6fl8%4DPloPP zXlQ?&JQz2Shy*}U;n<1i>l{NpNznk(I8p)hTB~$~9V+^hFM9nMJP6PpROx0$&opkb zBGDyun32r*JDxfFU}={BLi1m|N)xlUVM-nvjogKwi9jE~Ju4^zD3W(Jr`21ghT*RThf;OXyl0YM;Ggg;x@r`u;iw2H$Bd%% z?pP^2gyiTH?-46UJn71o`8?H8(LNwz_0dT43@{}L0g5`Q^+S|NfBI%*jLPTDw8dde!$vrzX;V{;>|hkrnsq=lMAfWiIHo%RfB`hv&K zJ!aU%Pemj~c+pVdQ!!*{LY8hwIG2v{FA%5$t4~%mh$alUWPkC)REY>pHsL0O4dEWy(ra4NBoRZd+&1&7s0 z?aV|aqyx_`m)edA53TjkhC4IqHxR9$P1#fca-b{2+cGuk=HiD&HN_6h!DS3Suyxn1 zvScw3A;|{?J-_`GHDwj}UzrVB2Lk;wz9IY#vu&A}DlX$9GPd%*0Sh@jQPbHh7$}Vh z*N1nR!_OwqiN&FO{WLrHjWmd@g!5f-73e-o{_GCx^B#~lplRAMF=eF=1!Rht%QpCE z;ra}J^TZ*0$IlNT323qzoHPoaDfL6oswP1lVG*we^YHvD$X7$TyYBnt2M>@2Vqb31 z0~}D0heyON9)542fLVPR9vh-a_+3RGNM5iN!nU%sU-vcX%_B$D zR>8-_dbu1Y7S|{+a&d*>oq4SL&}RjRdN!rE4bOBV+kV)IK;-fnNQ}Q=wk=mrRS9>N zLi&7;1=1#%@ZKiR2UQ_oQQz$2O3=bh*s_SmR!I2h*>I- z(V}#3(b(cU$5OJ8{#-X>6XGLpI4axe+0WgjL-TS0Zy@WDre6I|kS2|33$DWrDhqrc zSW{S{;`mFsAZ~u^V=%jH_)LmhP>@fiSb9 z2`i(@4AcrQxnnGV$tXdvE_;1+#Np0DXQo(#L`4GY&k@Hm01veX>#OSrN^59UPU*mgAcBkJW(*GJC5P;`75!H zWh}QH(X#HXS7pX96iR?XOV@xzccK%M#hHoEGi*>}T!^Q@8H9C3fu2taRRDz+_^hfB zz!1qznxLHU575&q@Hg+%xg8YG@XV>)NL2A#Gzd^%O>#{X_3d8MDXX;+Z$&tHC#zCv;r0hRlAm1h}`( zf9?WwwVT@U3f?*(zw}M4+$!@-GJW1TOBDlWvzPV@XVDVSMtzZdQve)Uz?Mz9BfK83 z1)h-AG~h!6w{x{wf$NaV8=lDyl%65nny%dDfvu$#8TBQ53-lRI z#Z@E8dB#A(2T;K?HF<4PaSV$u#bq~AMagLk?2Tv&)z13F5J=2gxk9>PtSA%Ct#NWC zoSP$vgC$==jywJS7ge7Fy@Zd9|G}6(tgrXl9k|`SYX;}QWn~1^`%I^b^-m+wYlv)4 z49zD}m_2^ zwr5d8eA*JJJ(P+vP54yC=S>BVzY*<5a*1`!ga_YXX^Tc-gjx$ZI+(Hp(L3X%#KxaA z3eN*@pL-dOnzp3=vu{GviyC0UL%S14ZUWG5&n@CO7bgRKrJ;88D^}n%Nw$^deoh_u zZ+Qe0-Lz;U9!oz)o^vhhd!8N;?M#IPHnE#as*~bhM0b&&6&0?%{akb$*J`XQ-V*lg z15S3xj}##%E`-xZ7GJ$X_zCvW>NHR~%~b_8Lx6ijyqE!9VLa{+k$$RmJ!>p2G;=WF@C} z|BQbwTvQ*`Ls-Y_>>SL(Ne@zZG?7>vzla}51d`e2X&1uXIun37|0eBukwyw9R93BRvpHf%yil{q8&UG-#ApxvjA%P&3`qRfRsAt9fwYG( z5*7G!j>%IN+RHREBF?ARc0`xLkBA4kgOctYLD$fBrpm!K8mrNLH|l-Fvu}XdI(<0? ztSH=`p19IE!!_!Pl-F!OG|vKRcN0)jVY8|a`&j7XIXTs`iUjHFg(R-RV`&u)d|)DN ze2f36mLj8d&!rQpR`zW3Y%*gu$(Y^y=olK%_U2GqeRUt6u+AHkJ6j#3P@qHX2;}R( z!Rx@K1I~x{j0|zbn<@gKr>`sAFSt&um_b-`n(nSrh4uHr;xm*{7x-K!A6=$s7&jrn z6@|g}{5Q}09l?bv@bAIJ?_nc(9y7h-;GBCLL)#qH;}r_HPJwNU&txSbAxVUpN?UHh zXsqpWhtig&l~4jpl{&3a7oLQDFzAs(Cg&fJ8OCsN%HX2N1guOjUbGHD58()Kqxr+u zDJhnjNRXeKGrn#mX8v2p&XXoIB5VgXFKj_tD5Y@VLlhW zOA3^e8>NTX4;pxA3-IS+H)IK{Tp>+k?F7PheGr>YxGf4a$qIlnd8ZNQ5PB;}cYYfz zfUz!}KmB;cZKwz59TQ4`LzoY&a!5=mM#owW!EDFsfehN6eQ!H0j8d z9D=c^O{j+Sf^Y)&JT;wr;G=SXW#^E^^SDqD^BWX{%M{*Y@QjEs8qn^ z3x;sMlTPu;f;$P5|CYi)#AI|?dOJQbOsI)!IdVZd&5nsY0+_v12NZJ`-iaaYso%FH zSW8^r3!}iO#R&<5UD%WtQtyq>!bk58vI}`_13NRW{N*j|S|WGu5%wOWPqH`wdflXc z7wkw^y7zN+cGLt^sfW5b756GMlf~nGv+Y5qz`lqeVIRIE9FU{;iRZFndfBJ%j^OiT zWuz;wKcepW&*h#=vqREl;=e10Z|RIIVO}kAsP6tZ9zc4vF(*8u(QG-))!^-Cq-w2j3zwyoP$c`iXj}UtV`dgTXm$y`p&Fj`$ z^-b~Pt6pjC->gb6cx3hK?#SKqZDuV$l(>HaI==PURiC%5{@1)FYF3ZPYwhWe)%5oB zeb3c8@I7a`;*Jh$F8$EDbj662Gq#xFHL19sw&vBheKLI;QhpHcU4iwA)nK^a-g}NM%3YR zkmei56DrZD=;5hyK_Eby5tj=8YkX%NonFiO29Tap`8IY8G4hK?ll9EH)s zMuqzla2fXpaBIX6z;;0mZZKjBpP%7fgE~SAb>z2(<)mng46Y#9LDGG)`W@JjIqiGV zzL3=(7Q_CoNlBdCN2}I~ztNs2CaXVGAab9(Z#XZx;N2kbp)7^b7qZz>l*MS5(+mBU zTbM@LJr)v-#5(pd`cdS=TXhaK`m<~P`Ubfp@p~yQ~fa?glpm98&hr~E>sT?2b9w|P-FD0w(1esOzVkh)0iA9~d*%bIc zt(C0q4qCp)%Y?W`&wy7`zbw|nVlTD*D9}}`w@>NY#z90XwSPHk(TcH9?b#~Pja$Ng z;=y6KyD9b~&(G54{zDM3rWC`1g*!bg4j2#KAgs^LX`A)F>TFZhm-~`1XbK$O{pX*5 zK8$xj?$IBYRs)}s+_(!8j)jH;jK@T~<)Bv*+3Tk@2qV6CHXgi8IEYeFqn420RE;A3 ziRDKvL^nx#3$U<*R0Sh;q5-1CQ5?S2ZgYvffG_jazEj8FH<}|M?>Q5HLW{r{ySQ4l z`q{9n?{Dw@jA4P?oGBgt10cY|sNrPemhMN~kY)X+*-Kw4nkj^Hf@uCx|4x9|AOm=K zI;1BANt1+?a+^J~GrOrV|9~%EsJoLtU)u>FMDW?OXAc!$jP+!HF@*grzu?Z3@jM;xIA$uLr z#QI&hN=wX<=Mr`UW_2m^q`}-=F=0JvI(NZOtz>r%-jjOsEOlg{kZ;qi*xgn&0ngdp zuVD2Ib6FiM+7t}SPUUOg{|0P&EIfsI(ec5F`C#~Zy=w?$$E<`U1#uUw6*Nw*5DLgU zIIcVV6cflj!VaA7tx=&|V=}Nj?=&jd>(L&5(}?=rU|;_wr^M}WF-Li^Zx;RbeE8)_}o-kI-`Aw^?YpPN_8Sx0RMV|=AhW4604wu43YRF>X$QtT|!;;HJ` zDaO0LsT!Ndy45KDqE^rbwvcoc=4994^j*_P*$2p@_nTxdT6!GVvv35a9O-hPtDhk( zKnK^-C%rr-x1;Un1|=9=pU2Q$0$RIqOZ=Z33D*kw9e;~;7!Ct{zrB8mWL2>$;`CSRDmKdvN>KLKwHaZpVYkc;zW-& zqurYKqp?I4)u7mX3t}_rYMXb~OVr|^rv{cSTuFA#&!5(T@juHERsRp{E!$hr%26=0 zrH7WcQ zzW9)hlmW(@DG8J=YxJR0lrfpQ22hJmP2p=zr10V$pa!qk+OQM+wR!Qw+K>h$G8Wgc zGp*G9;kV|bFaw)^h6h40FU+VtqcxLHovNQ=sR%H0scZJ2s8;PRc8`UhPYXC^uyJ`a zcwvfyw?L}h(F`vJXyn=`jS|)v*f)f!!5svA?T{q#`FRj{EXr)p%kgb$4AUSYFQ8FM zBK@ZmlPbcOD$&nb@eXKBKQ2{UMK!qt+t8Jj*%nOs7#M0y4JYRhl&psrCCrnP+NXbEs=k9^Ud)?4aS~nuB^u7sv-69XgW0=MHFmkGZ%6v` z5qE62v*HkAoa|awN#8J{sSuuN+`iLt1jLXJo82VkLU;qiLe72MzlXhL=XY?O7beQ! zmOFAmQ0_5IX$F7SEr%*daS?FTFrr`db9Z~|9WlF}&heQ5mtfeDC?VM;_K%$=Sr|}b zPsw%3KcI0{wTL_NFZCjjdEQiy7ISY%gh20;V`rM)LU_2;(aZd0N8p>K>z)UGRFYJR zn3zXQy8@$;Ozk$TfP)-OjQ_}S{l@X?%hRr7GvoCZBb~0a3%U?Er{Mg?L=KC z#rVN*Fq)B?Cm_3mG|H=et2Q*8{|*+$fa5+7W1O)R(KHvkdE$3lV z-JSVat}bqWgk>`6-;;JhDaw)Ji#6HTl=@FnTy8Hs2abgVYpSHCcoV8?!2ae}ikyv0 zZ1aa)RX)&OjVq*~HEv0WHeFo`p=>rq9iuHHQ@`E~`~)KB3H%qUHGv#N$Ks3h+^xDw zpBF>MoI|5Jc_GM-ljz;nhWPf}a=3(+7GweeN`2M?ZlISXhyfml&;J#qy`*ZpEsm?d6BDt%PgOF+p9fHc-2|&6 zjmWtriP!ndvVA+B2f{8zpwz#Vcywzgmuj}p%YH3sF{G9dRtM{|K0LZ=2aB)R9%THy z^6n)Kv?XLl9x7Gs(KdgveGV@cx!QWPts7KY@;4zc-556{DG5pp423Pl18DHK>`}e7 z_X*|pMHh5tr%ECM0s;(UF&w+ z>bLB~EQM<(q9G0;z{L-aca9Hq>jnELmh-v>UcmU<3bUP-<>Hr&PBKF(QSGH+O4#(& zWT;`v#FV73tZZ@&oCzllQ_xxrQ&5KoL&e*gCJI#$03a7HfYcwY=K98hgV$mA^v){~ zx3N{SMdk~!i1^9*+|Heoa47@&8Z=;#ngo)H(qNbdtGIM>Y8zJ_cz5RM`WaORITYzm zT;F?D%^JoVWt#qG{i2273JU#?*eNrMTOmg?GzJtHy)uofcOMlJI#@48K%NT&rGi)u zbgT-Z7m7VN_M0La|XQG+&(weI#KGh-#X43!_ z1-k&dwlp_415J#2gMDnXaT!=l0XfhygG+tC@1rYrsgB9`gN`=iSI*UisA*mNaCsAu zT=1zDgTbFU$iz%T#ue7TQO=DsCzI*^jRy3fV;r`LhFWOU`ZM043Ne{_bmGAG;BkUn_tJiI0U zw{cM9Snhv0$bgF@an3?`T&V|f$0}hTw3&Po?{nSXSzBGV34m1m!^n*wQ`yP}`^v4< zgg{^8C^y0lR@!x&utso;u8PbxUMV@WI+rUt4_^=_$TNL5!WUpSS)By&%%aw^oO`4`VqWBZv~zW<3kq7JI;rdIEolfYKJ}fJ1XB#_VRpRg;vNL`@t#$ED2+eC6Ak{ z!l-Uu?mHo7$c6TLjkkn}{8`Yf&N*u92p1YurJU*WQ%EQ#1P6NP z+#N(du6$QBu`wO34wP^GI;O3U7XvKuPpY~_CekVZjY zpfhc9+>m!95jFw;iN6k*k^UDoFNFc>H2C8(hR&+gA${D42*SoQkRd53F595GJLU1Mu zZGV*Mn$N!jI&$pkpJKj1jj=ZINXUDEB0qL<$dZUaq1QTKd*n}-;=4F^ppbfa!Y zlF4o_k>%;oT%&lIbJbYJlAY*%xtrW7#-&!jr>uzqycizF%Efaoy#OD^Zez_n*p;v& zA5W^;`%<)DtHW(<=2Tp^^)E&W_e$Uyc%e)A5WCBox0F^&ro23Bp)QP-+V6g-0vY$i z`BoL#3b+F$h)nuV%dtLu(v@ObaRCY_0irC&(O-}08YQggBr<2cwqa}A^?+rTN<{!9 zQ5-J56LTrh-nn%;pTu|Xv8eeQu?g5~d34Av@v!#L+gmDt1KQ8oj?0&bi=@)Vm& zGnQ1&`{~C-*zouDRXac6v%$>~ksQUq@`P(wk<7B#)--o*$fgK8qNt7dG{__wwQI#a zedfO~S^xCh!uk#%fBQLoT7X+&GLvP?2wGb9K1d*$cm=!PKk9sx=$?3$eC zU?nx>>XKiS1yFbnLAEJmwAR2vj4E92D&jJ6E@x>G~m0Eg5L8h#Z=f zTTi!zbur|Y^=JuBjPQB?>AP9_#Oy6zr%G^@1M&)G{KJV{y&7>f8m1FURxj_iW-R4p z0iVsowS@}f-rtF_i0o1`5{5`IS1#^~rNjp9lK&Iy?)S%qsgWiU~}}v{S>p>*?$UNwP8v;1TFw&lQbx15vZxutvXxOF`aha1%=-*;#PTf;#=45rfQsl zA6@H&Liz^?gdA7J!kqZOLIkw{C_Qd32U-mrLYR%{%x2rck%C7CEB%L5mOul8A7}== z!V2O!+_jGEkMqDdG_sm)X<=dE6DekpmaoY65&b2MFDce;gPBIf)_EtgD565;mtC`hx2r^6(q7)n=e>gcT4hp+Zlxhw(vC zfUH;^Y-P6EQEYtm$Wt8unYf)rP@-6carkqiIY9z)5Iu~OatzGVopl2~9eu>t{)37H zgrnk3*CncZ_=Pf+3w}*DdtZ z{TVHBO49wK1Wa?xGg*#-=ixwl3;ZCvOzQIpp=jU3p#vH8GHMEB*rMk9UVwmBjt1(c zHz$=FSdrQH4H+9Z5^sYnietsMvQRq+hsMHEn5=VoDw}{v(M?cQWRoFbq80wl%3r}I zAc};@k-hJXu=jM;zP_4NxJB*4ejzX^jmTzOv;v|9N15=uoNI5#!|{t`fU^?5ha?@O zEpZogJ8)lW~X^p97K$kR0(Mr^kX;stiC*>aR!RiDbQv(Z4}5{VTVX z0aBV#23if=uVK5jwGpo)fz)-0(0k6L;=?1YZd8JYpdrJ_qvcm7iQ6qG#6dg=2XIL^ z6<^G%pmT6>#Km;11pFq~#Uj8A=;@K$F7!$0{@T69sKFrYDya-Lrvjy9j1v^7&f0Vt zP?AM1>bc|F1TR z!DJ1+SEKuD;nchRWVS01V>(5Nv`}h=9)sr{r_(qmseNi;?KX;;g>*N7hFI~vuWzD3 zv*dg(hya9$0N#N<7xqvEUOJkX4tf`pwbT-}w*M@O%7Ig6KkGMX#E+%!@0Kw%l#tnu zGV{TB5neCa7dewdN>!Y?3HstB(_4*2Fs1?R--1~PICK8RLMs!VQ4H&y2uYJ$?B${n{SdXNd=`d zE+RpHurkQ5r@_)Huax{%k3jrRZXpw>n?8;_VdH~9-MY-EcToP@XS8G*n=7p3C|4`w z{)syT9Hhei5^g6Vr7eB_XX-BKCL<&wL{b+-|9pgktPPQ$31?%n6iiA`o(z017rqYu zicTq*c0)JNr<@TWXA1tL$(hRNqXY$54etr^Jb{xI->1&aT6cB=c*e&+Y$H5X{yRnR zxkQOY)w(kw+n$OdTMkt^CXDevplCG8^tvi0e2H7b8g2j-7l2nxD$Bqp-~tqmY-g~! zPat(}IB8Y}S#VxBHpNG+MlF&4dKmLQ*(h1Uf2X<-eIXf6xi$2K(3`uu{h1W#!Yr=9 zufWm~VwrL249qX&xU?x0Rh7zQ7%S`VTMVP_L#-+i3`3sNlxLV3*Z}(!o5+htR#ybz z@LaKWAMUU=M8+Uc$9`H4!+}MWi9YE(U&o#H3(@myQc~lY zRa~f+q5%Lhl~P9tBcD(tnMv zsA2*i_s8{c#Jl-{In_$`u7dIpC8 zg{P5o01`OnqrI@+p%4JFWu;V9(~pi2x-MplrGUE>HMj(0D9TJB(@iQ&A>>)J>hgnn(nVA6`PoqK`-<$+chw2{C(>_9>;aU36+wPRS0*yA zzEF+=Et9$l02OWyV%2g*B#e1p?C0P$!Yl+;JN@vTfeC-hR_&G=%U9>Yn%GWJE9TW2 zf~a=&Kc0jPSZ>(wxPwfAxSrx_w}1t+pS&FibOj702L@>?q|yO?A{5sZ6fi^3)lRJW zi}Q1rWutU*G$}?b+{t4sPE@?9P-G(B&TdVv3jCF7`3ormS%YU1hBYy`7z%1PRcbJZB)DsU3gmKEhqyZ>eE z!l+;6b_G7b-bX`C28!#-JOIY1zZ7m9m(_7ud2YY9s?}Zu2D%_=ya(7~ZAv`u*ym!C zeD0(w@=Vr7y!b#niVIX>iQIV#HMRD`sqkV`4qXr+BRUtdijZsfzev;CaE6=SGxaJs z%G_rLgHcW*RZL_g-ofs%c^#O;ltht2i-N*8;?%Pm6~|ztnZA8!4RDmIGjm|9`%DG` z+E}Ipt%wwMuu-Q!2h(%P7|ZoPq5zZF%oX85qR!3aVvAY1kErcu(OrZL4hT$XkQ%!r zm&}IVV$Pz*{2Z|dpd_Gz>p>_0Pz)HtePG6m`VT2A(iA6RsCwtA186gL#pLtMK1|12 z0KUf^79ebr1y!SNcqWQGmoesjVQqz_H5Ks-Mx6=Z-=p7TBs!tXwGE?y=(;WvEc0+( z2Fw%IBJ)R(i|uBeb9Pp0=z&NKV^BJ2qP*re_|wt{ez=vpfOh-hk8IPeO5g+}Y(r54 zWH7f^N{v-edBzfFT*8{@#_>9&COdd-T?Ae|=1#_fgDiQ& zZe#KL|1x=yGN*-;)fY)opb~cJc3`5@u<^&z<-F7SjAvBQymnXz(T$z?N-Bi^pEtov z`jv26y-0r?*?w*t(xaF;-Zy9IpN$I-}j1 ziSX4RQz1VK_G~sgW}9+>Lc*nlzInoiz^DrhdLZ0pNLJ^d09F<=@Vs~vb=LNpJejA? z;?EM;r&H5s1>B$rq@YJ<8fQx1V^D|Zo&u)n!DCoHvVfr=A;Cg`W0 zJxet;g|hEQkc^1>P2>VzLbEP)%iCEG_J^L7h&6=u!qH>OZTf1H;{0U?|s1)?7L z{>lH(GWO-+nTqdmZ7XbkeBvxMrh0S4Jg6z_-?vy74^EQJPeN;-Rw$a`Ip^m2rErm! zI8Q}P^vRbckVYDjbU^HypG6TgPJ)Ssxl7yLO1BH+#c>y)hJCbkb!o(ypIzU2&(#AA zitM%??HL^nQkAT(HNzyS+oZ~r_!EaCUOfwMWS$L)fp_`6IJk9{WBO~EkOc3vjD~!F z@yM8UXuO-hM_U`OmM?@1$TcY~7;2|_K>~sQzgIV_hYg9}a*NF49Mv(_dPO}k&b8)AeiRt}IZXb&vgVL$GyQ8C~? z0Ri6<`oWdQHK3s_D`JyHG2|d+QC>N(8r1{)30&mr7!4VZ^cs9X61 zk2V!d^aMPS0%J6h;v%=5B9q8}fbI+>-(QcvnlUUtgPZ`bu6#VE6um_2PP1bODLnsB zzpwk=Wh}e7zx(oX_Fe{(zQ)jAkd__fmrDB6421w#w|T3gLk!VhY*;>+eY(i zcy_81mU?^Wnp44Xg}9;4?u_1hEkiUAB5svM`vl3CdM?mm2E61+r(*bf=3$hIy{$4S zeOtcTOnUW8qs&`rncTg_mu|ILB=0j%hQ^WNd<%<_8a1#ByiMl%Azu9hC7hJ5w)|G@ zn&&}&A_BBzwK>_nYP445eG1Z$fa7IV|1H@ttB6py99lTo3>=1g& zBIsZ;N9Xsosap?|#$I+POtjQ&jI3gB34?z13p{qVT9YbdK21>UH&~Ym`*K%{Umu+_6VloZGytjlVHP9Ra=?})Mz zQqD!_27TOhR~6~_7CHM#xF3OE8$%C}t=@v~i?z1Ps8#Rh zt*cloEt}PPJyX8G+;0fmO<&tt-A7GP9r9w#16oj=GJm?5?-6#3#Xwnz`DN3+eJy1B z1qn}e%Gfn8P}YTY!&XpbLeouis9Vk#FiAV18t4&IvSb@PV5SI1gGS1@t1T-PZ+@6K z72zltS^oYsOhboERrohOq!o$cza{U*oqKj@fe*Ul1_1AEwXoMU3Pe9IBDxYh<;)UI zgGEF^48f?gjhYfU^=rq7o%=wq=$4p6^5{y^xgFwz@yAZz#)ztjZDJn(#YRUtEG=Zy*KCC}`cjyF$ zS$l~}ue)z!gukMf@CC+Xih-he7M$Q>J3HL}2AmL%g1runE-!0J2LO`c*;L>)m(!1P zUMun6T`zy>_5K#`wUhP=CGg;(XlaOH6QcvkC(>xHXGOdbh093 z-Ajra>H~!E#(M}88Ed$o#8XFfl_Bn=Hmj@XK0JVNY3B=LTQ9i#a}MJ%@C8ECoVHZv zmvb57_pnPlq%l0m^FgG?3YBxp=-eSG7U^k@8}QylPh&on=l?_z9&qWbU%ws_eVYQu zin~>g83?&U<+b*2eck=tqW*kaabD`E!FWzltEJAh zt{_2p%pp%gFIpGb+u#SUhn(Ubzu^g_Q_czU+opdBS`Po5dFXL^S*CSS>wd6$iJX*s zR{jI7R4-T_Lni19HTfzX*0}wFUL0A8mjnD_RS|;jnJoOS-5<<1oCvK8i;9ZUDX+{o zwT8hRANYBs-jp14vFq8sYp%lh1HZ^hzN0Ygl*Mo0+X^&n8WM6s@#Knld@H#ykxBmN z(z}`BQ;7g*Af+f@9)iU1P8F?kF5ONLt(MawmQsv~ZX`3bq><9$v# zxrZiv^=TeQ4!kS8Q!qUnrFyAD*L+hS>Zyq*M`4quKnW>d4K|obV_rP3j){8k6-3>@ zo7Sh^pl`;_+gtSaxCgpruxF&~2?fas*+UWsDb0PLtmZ9U#7%+L{(Oczw|M_&mbX(Bm2NzuvnzKcq#&$+o?&B|_7? zYHW+fR@7}j=wR;3wq(-<)-Rkat00Au|Jn7?&{a482}PJq<~EO|>HO^tr8&#uLVgKB zB$b`jy|QZRk`sT=aHsjQuWrLuKe~Ny>>r-99$Z^#=t5a*F9Tx8NMS*Xs20Bew+aZF zUewJ`(WV6lu3x-(-RD)?Mh%;Qq(*vMpx4njL5oUe-*AHqR>!2V1uxNn(ntIJ@ z^h%3fC&8_Adm_oh(*-ihywbQ?$k=fp)GJ`Q>tftp)n`?mZ@K_BIz^9#u`~+TKPyho z^z$!k3188bl~6|Bk*vR_p$KHYy(@ocvt8vNg>rjxQ^QMeykhVEp99BP(<}AC0*E?U zdC#r&@gl}uOi}aoGU0W;BNzwFQqx0yR2_7J-^C3gmp1dyoqJvDt{Z7wk*wCZ8W0AX zr|Aq&O77cPTL;TgyK+R1w4JDjJY*^@31$W!X82Z>zU+>upqG@QGK4iL_{DGeeUVd@ z;Xm=KsfbKc;y2-09iUJl3Dy@8Gw9`6Vhfmg?b5&Lu|T!`-`zQ&n{w!w_F74jfT?%O z^iBc9%+`4a=(qYlP5Z}aj&w;cXGp&txB|2fEjkN44gbn#Vc+@Ov5NDHIGbX_NL;F%rk zr1^A7@RY@Rv(|xK`-NN29nzL`NbU_TK4;mcbW@tBDD{ql#r6gFv1prHtvm`Z(vy_t zOs|Qo#6e%)VRTUh)Cu6BDRr7ogNQkJ%vh_#w+wc&*0I507a&S5wc*m_Ciq=@`H=8> ztz+XGL-foewY*59U*UNq=xZyBfZB()i0R3ZtP*p$v8HUYuXn5Yfkzw7SGSFzEY3#m z*B`WR^r}!+Z*U>`ZR?&&Y*o)Szr8m{v_hd$#9wnKyduO{&_k&ID56=LB@b`e5RxDH zYlsSj8hbdw`M+&n2{TDRSrBVIil>*4^i}VPqT@o#gm?Co#eodTN!)rPziAA=yHoX4 z8?v1KAL^yn*^5!)XOIcy2I)_!dvR#*@es^hW5uA+B8jN3OHZTey5ADM^y zj)c7@pS&KxY^RNk{$&1xRoAhy!DZK$Yy3+RZjNfQ*LsAuK=@B`Y;Ppvd8orh=AY9X-W0(6=P~Ee0(NV zMhm)aJbfa*A9g++0klFg&Z!2S!LBIT_`C+@%ljl}I*vXLpspD`Ztt%hG{pT?7#dln zc6q_|VQ0noh_#}B;aDUuIaWnoWEe5h0$sNo13M>}SOcNzH?xWmxc9G@6YrzsGkPTe z(q?y(luiOhGW5VheA%%9#fW&}a%YKuvBbW*1xLa?%A=U>ZTfL`Z*XO4M`>>vz_jtK zdXW(u!E)Xwx@6MNdm;E*W}e!6!20wI$_v9){mUcW)$BjUYF)mkas~&=zuX(3|8O!t zAP)L-*w^llq<2i*8}z-VD{5(*eYTD z&MvyPBeK=o`JkG z|0ljRjXN|~AMJ+P_+KG|1JWnr#$1M9b`L6r z^;)%NZntQlFeR>`aNGECUyQvSJJ?-xU1ZbS{efKYp?%!+FH;3euf zmon?X#emw+AsZyKbJjydm=$=enb~~h@fI~hsMPQ zsnzdrJ$F-5-*72@Qlt0e^s%Ur8NuRF_TzASUO5UVva)i_p=vbnuG^24N>uEIT@y9+ zMI0`k`=6CCoQ}a++^P$=`fX=LBDPZ@AvLb=VARKP?SDTrZp)G?y0G61m6hv!6#Fdp zx(}S_*cpzz`5-MqM(A!G6G?+pzS;gD76e}^Mo5f-kVcIz)xqtZU(m(jCNNaH~DZRdm zeucun3-c9@%7O4OdvQ$u3WZC!PMuYfVG^@`uyLgPHhZg9#g}A^Upk%WX`@Jedvv2{ zEMObBQBuy-WKA3^S3-!bYAeyOFJ@u*y2u=_{-;Ya6GrH9ef!F9($%X!##RUFK=H1? z5nM1(slk4&I+UaU87keUD~o)cxkY|ZF6JNJO82m6(7mbWz$(irMa8WY>rl!3oaB!< zx|V=Hm@!2i8f$tjK&0MUih%JPkvuTYUGY4aZQb5LV1zJ=mkb2|a59(h?XaWb0avpm z2S4qtx%~67gV$B!knIk6ztJ^Ak|N~8nsor2RFS_t=pO+kRz^=&c~P#eds7_D5s&G- zB^qR@X;+JyyoI!=hps*cUcF2Z6W6@VLab1VdIEVv6@?QY!0KqBQ}2!rb0v~nXDPWi z9{??dS!c_mJF6YI2zb>~ESc}$nllX!NaDhV^>3>tuT+$5t~rS$)+IUrxNzZsFd%A? z>le+b=N6-?Fx7FK7!Q`L*HF-6HIJyo;zwKu%NcrNcc(6u)bhY4eO#*fuuOoV>|RO7mEFt+R;Ltv&3Nsrou(r=hS&F#TZuJ z9>*5~N5jR{+X|AQ82Mv;VNDPiFfLWY;Z5-Vcbs4MH1xSgC?zB(SV$8!lH(>Vq=q{z zB5iri!Q9m~`Ywz~QcA~jgk-usnJ4!c4jN&fV3+mx$ekFsh1zOmNM(D&QC ze!9-+BL_t*BG^5ATItfQi&ya+rCV0k(XR=6l8a5}*uD@>vW1aI6Zb^%LUgWYVV))h-ccyf?2U{m7aGB-HX zXY{sjpmY*As9%nQCca-*>znfYgWJ@=?J_8SkUFCHoPWOXdbYedYZcMVvI^9uWn@CC z&S|d=P2McuYi)$p`TmLKxu(7cNdAhXPx)8Xq3v@?d@p@kqW!LQ+v1T^I`7C<=~<|O zE6?y$$um52cI>5RH^-&P-G;k#{p7k@~!r+SK$lXBO1!C z)7{yXV@B;w0UCe)(Y8@?CupSo#;uD_M1frO6+Ll|dOdq0fUd9Ho_fW7r<6I11m*X0mT@Hx@h zgoL~W#B*!B((aVK{Z;9sm#a4OBk{@n#|eF(@It7leDBIpgTecz7c921kBI>O-+uiv zXIVoaU2_z1Hb4gnWYxtIho>+GMa{cGV}D*mS1tQ&_4#Wgc@i7}R60wON~(jRYICt%ID6)OXexr8fd<6?WBgl)DxxRJOzi zUMeZ_n~fs1h)z*vFmL4QNmsLEk23q=Zwt+noQCc6?Hh*XLj3KS<8i8q%|Wu_P0eS11GPDBTLr&6eN+{#XiM#;{_2L7Svx*RAFhXm@kH`x zbrzSNdB6Tc09B@zkYWSRJI+7H>xgrlTta8PYU&Do%Il_ze`N(cAd%|A5}+#BPI+%vU%*qMH(l5T|n zzVfLm*xX*rdCF0rsPZM{U=_YL{g1HydO68`xL+@5_;lar&jnkO{f0F~1-Gegh%DoUp!jIPKGs_Df-H z0d&C{NkHA`FpkN5Rz3-+uUGjUUS_>R%)OiS4BscneXXJPn7j76iAopKpgkCMo1R(L zxx>R2r>6hJg8}y-R5Vp`)#hoiDO##Q?i+(ULPLU^69U9nX6S0bf$2sLvpeN(uHnw^L(Ey zggjY`Fv)O_D>RJwf&@>>tVN7~>ywKM*WVps_cZNrHnS6j`yE#Z;+95mgXTI#-FRxZ zD*0r1bx6ar-h10XdF5m(eJp(7Br;371k-24%H^;oJTtvv9nEoCHFrpA6`XU{R$X=d zQ9V{?%CfN^4J|vZSnKL@r{6`*+V;giOHNyiwRRL`H>KUS*zUye&k%IF{jl;ojcBhQ zs(&_H7CV1KvdUAlEYQe1R;?+lzxdmw8{&f;i&BWdRNyzmA*7*>j^UG4zdfeL)QQLm%c{HFeysGgZeSYLS*pUK_S+_R?7XA02WEb>dgX*8XPeim=Ektj zS}@Aa+qXMIlaQ}HP5FM1|2Ip6qJWJ*;PMnk)LFX<%RlB z)r40&jYR0{g+mnV+Sv3>y58a}gEfnX&!+bQB+#*POIle~)G#nLf&S90y(O2bHL)`RZb#>oRP{ZRjDBS|1G z)eJlFveur-%;vs@cbfA?hF?Ns+;Dxt&&LeNEz?YT!^_6g_a}965P2s0IbrqS{D#x- z#=odSF9B;FE$KGZ@nfiq0SdNEb^ninbJ*s^6Mj@h-Svzi)!&{N$Jn^~5EFRirq{(8 zrps_|$R2xl=T4$^^v_PQ8CDG%1rtou&#wXthsx=N=Kn|6x4=W4w*3!{L`f?hNT`$~ zkyMlhwMA5{Ldc;-PB|xpacE1=CWjCTqm61E%Q1(>s9oob4#;t|gHaQM!5Cv^{?|SB zdEWIt@BjDN&+hZ-vG)Gmzx%!p-|KsQuS)p~k_w3pg)!Cgj6;9nHn}Sj$UmK6*R`;= zQM0ugpds}^dC9&(?OQ!YiNJyvPtXDNOb|Du z#)+m&1d&|<=Kzs=@~-$$+}Ra#MWT1MqV@p54h6WrEZLA~Rh1=P3wQS8(bNzbP`$Dgml8Sz)pV?C29z@vbwW$u+fC66%JN7+sinJ>8HU?XuR4r{a3 z1MrobN|X8O9ID%i20uXxbCDs6$yN*5_c#!A;3cu}-%>=&;28h`<4_*M3)cgW>KS!i2=BAB!9uQuKcz zo_6}6_1mVgNkc&x##BM~lk^xAjpW2O2FGJGKFf<%sS_~6*zGjpg}7(^giWzw^8N~k zIaX51{oewG#^Mk~D0hJSIWb(k$CP7TCBNGEsmV0gB+{SGfikYPq;;hSH>5GPF(Obv zP-$=euX0S|gOzrLE0c4hzF2L!R&N`!PEVpw%Oih>P*maJ>qE2;!P@uK+Y5h~nqcYP zo44`8-gji800}_iw_7PXL=<<;F=jRtl263#q`kv>dqN8VNykXvpnJujfIa2q3n(l% zL9w%IPQ-a@2xN4FJY6ciK;2btb$9|*i`n>Hy~(qimXcvPQ8S{O&b&-#Sw~G&BiI<; z%|}LWml@tcEUulnx!c9&=Q@Zq)W-UlaSdN!yUbdacZzFQXg&8h_(9N4*{HBsz3f-P0)%b>kE4GW5qXYOiFTn%&pP5^Bivy!MAl)`q-LY&9Q zfSLbY_aVJ38yqss-ux{2wzvErdU0CbO@3-M0|S{v@MkHId$yM*Qd z=(Y2r+;h3}%I8@q3x+T8bNWCE<%04|NKdcO$L^5#V6XcYQ7OraZ*zxn_p2 zSUp@ognL|*y2JE>A_hG=lmHa0;E4O~DU9FYMB#pBzqXZ(N$BxD58i zfaI(#g2Mle4+0*>K(aXda=9Vaqt@vJ_ydy8GA8YeCH-Et?y9pAVw+8ev}3kuq{kb* zC1J;c_gIL~IQ z9{yTb8s#v(($1lgoAN!Vb=D<_^37Xg+Hgf~{7!0q7#TmC_4Y*_6dN`=;}Y2J^!`Tp zK0|WLg3lYOlE-QFV_i8yKaHQdknyZ*%g&&C)O51jP9xeS?YJ-Wr4obwqG4d+elvVL zb{As{b|A?8`9qQ??JU%Kt3jwkEtgYoWA`WqRQfL>@?p#M%$kEW_4d}4l-I?A!eYyz z;AG|lu6j*5vab4|`^A)%#bzfc zvSOcp`{6)LLc@s8K9Or#59pe5gG6eqR@hmm_VXrAou3Npzr~M)rIVb|P`)J{*^Zsa zK;)K%^7k7E^S|gV#_%DC;ZHm8S|iv`BGjq9u>oJ7VHI47t4zp?3XZjy9g^`lLLbr^r6-|IKFZRtutJ%T8Z{(z$&tc^Bo^o6yg(pfO;EI z=PDVdY8-q2@Xr!Pht3^~3#(h7UAv{AO`!)ojDD(%9D$Z^L(};YUZh>1)eZkc@MIsJ z&JM_zi2X4C4PkcI)z-PMW7#`h<~8}XsjM=?-P8IFRF`~%WgMJqr&QhlNN>o9c@XmB zH}`U($(OkX3_%A&G)xtDRQPue-E`y56b`CCBRFa&JbkRfg;s>Fm>aXne6}8+M&RPC zY3<=L1Nz6#sYX0w2`AbPh&F^Y;T~fhM#HvGO1X6|d1L`cdgQ?1>W6u=p)TPEi>vIF zw9_Gd$PUw!dfvaVIA&I&x0~Vs>HPgJ+RK_)R7m*GxOBn$=`*Jt1LUuHrzi?n@4n}T zDEFS-A6(p>Q{ix#>4*rmG^S8>?!`q>o3(d}%wI0^) z#g>G<^HO7vE~S=A79OnO94?KYfc%&@&IsCDLv60@HSB#F7Euzvo0${qL_SMw{GZDd zHWTto(E%O=JlV@tl;0Vz_)Yh7zlY=S;}t_gn-@Ol4_(9(&O=AYTjRG>tq<5Z#uJI) zkMlH0S}1kX{M6e|LjngEtAKj>Efe*1@^iLVZjsonOvccdn81R=XeXn$^ zLTdqW?t9Cx{Y%lH`o~BB*Mr-qBr@6!8(Ew<0+Z6Jy^Cgo-z(H92@mjt)Zsf5QrSjb z)-`*W1#LBud#DRc-66Lkbsjond=N6>(fEr5K2w#SIiNp+RiQv53Q!pxeC&kRGxw}Z z$4MouowO<}uqC<%MRY&L$H&tB!;Q>J=vW^Y-3*4^==t!}?CG|f@C{5B<`$pO9>I%@ zEz{}J53uq1v_aU){j_Y@5*U`d`h)(gKx2fbrToN%wyZx7g&(}0Iu?xG zx!A&qBI+<)27UM4J6*R&0?%aQwbFpEGhbftQOUO;U+P-I5CAXj=T)iQ;oQ=QIWiA= z))tHTvvs`g{XCAf71^_EAb{d`*v*GUi0Jel@*C#0PJ~kzbr(+(pECq-f=R#$~((g)_#|U^mCd33u3K^ zS4Iug<^V>2A18kd)+&ys>W#i8uZNG8@psM6FiinPU5CtAOXR!81k$I0A;)Yxoubls zLw!vWxm;dxR+5j*KD+>*hh71WsJ~rP;Lk|>;AQ+H>KF&Niv_=RtVEZU#*r(GZ!Dci zr^&!k$v*kNk_M}3$LWEA0Xx(kbuU`0_qUVon}doz>^61ORBK|*2)gOj)H*a|s;N(? z_p~O@o_GT74r;mFhEt7)pu!Go008CEuPdJdzx7UXMj+o5X3 z>hEwDZ^X|20A<4*bjoAPh{yce1J3rES#{pA1%2CcG0VJw~u+wot=>>8-H3D<4+ zjl@;KzU|G8keC@vimYEUUtty$S;p2DU@)4f$j0cWElCBzYjz@bGEttM49vJjR2xr>Im`F+!SzQ&W=AMZfAah**=W|N*2j* zStxhJ6%p=KMi5wbT`gwFzF*7 zZ=jBYZfG5!#phAVJw3o|r`Lub!~@L#(d1?)wl&Z$I`7|-R{BA+UV7wEsXu_Ci(Lc6 z3)#4Bji%9inGeISVKPp6(vxd=@AVc8`OlNRCIcGW=L|ZO!^Oc>>?|$b znJ0s7qS###XWFUmVQAbAk|@Mv*h5=IA_jite7e>kLC_ipVWzE;+a#F%k#U9>cWZ?v zp^f`Hs^FMcG@hN?xoQIQd`HyVLbfu0n6L;G3F>~T#=>4|ZWn9+MEeuC$ahP!dcZt| zOe{47s=+cRRsV?$Mt+-i^rpyv+F*2=kZF=|Dmv+L{0FyUbqPIOYoP2oA1K(v=N1q< z$946oZ4k1B$&x*_0nM5n;WY@785adw$>3@X>GCk|XL`MT-Tk5BC1Pw)^ktFx_pWSv z8t;5`qJSErGQvXSnQ=~ljyk4nJ4e&T_E~HMpTXzWssE{cmwDf>6X^Y@!(B5TP?CME zH(crT^qyx+j3?OWNC)=+=0t1jg=id>I3=joosCJjortQt<(^Sa%~PQWoUVD`WaQ{C z-0?-OAB zTR}cQtlb{=WP^+$@o%ITOV`cUb18ufu}>(g6^+a;Pb3YOGYFCc3G6{{MgPGX3bw;kCc^nr*RPjSr6!#z zyOK2#Tk-gKr)%#ieJWQ@WoGK{2YXnP9(_C#$K<49Dwm; zN}jg6yZ90rgWF@!yad3l_$;z3;#_+pLRnfW5}yO%38Z%>g32~Vc2&;WfplootLaO! zQ?C64{Wq+M7^Q4QmP4ID$Zouv4~>MWKXVCOXFU_)F@HT3gQPk>X!PY?jc;6JhAD(o zU{D?BkWG?bV)&yM2N5l!tR{}22-v=xK?B9w(95FXOBNMQfA-z~@&^m*^+JV*MsJwP z{`B;vN=6d)@sjsgyG_FB<0PiN2YEgfu1{KGL$te+#G;L`kUy8vs-0aifDw_1g8YZ0 zsRtjZ`UZ!%!CDfOCBgc5f1~rV9hw*L(fqj-{hI^bsEwIh;}=$?p4{E`aT>=;?AH?> zGPKzmF;e{?umv?1^&T|ncDDBo4Gp!0p2v9}0O{s(6o@`A!>0ZF7S7EHogD2$kuM_F zU?P{PSIF5xnF+sBB5WG-@-{Bx2{vz+V|k!X*1?Cjcv_3|yU7>pIoEIAKR{$~LcV;~ zj6m!-)WoHKbcul)_WK60A!9``g!wi!%N!=$#VRWsvGe`)u0RR3n5{EVwk5b0B3mm? zJ7)gZ?~ImtXuwr>S;W?=lP^0{EL9J+UorATq4Ynb>4;`H~iV0C6U|5^PR8d{J~%TSEcIwWGuo&yXWHPZ<=R(%bBk_fq9 z>}5SzXJwi0j_-6@{4%qLwdUM&>#uZ=)ZS;Gkj22A9yeZhI{LKD<86B|n1h*~k^U3w zl{UZdq)xJ;ll5d@WJDBu`tvWO}WvIMs>{0vN|G zKwt@K8o7e7`ors$e|GG2f00fPqqsFS>-0z%<0>PgomzvpOWdA@y)9E`KF6*vyMltm zCD8#y!OAEOkTh#2?ufb!;)*VH*P|cVTn?8a?&A6kzdz}@2e{52+?t%Unxx?W}Q)_14dryYQx@Orb*1ycH z_mLHMpMj&uP2>o>|I)jMDGeoCMS~D)E4@uMLNOB{0#4}shL@x#O$H)1zHun?_wNo+ z;@jwgdqm5#Nus^Br6Aj--?xmxK)CK5)r^9vmP2kRza&Z3* z_X{Ucl0-Arb(NjmQ3kN?)x%|56z$mt@53A_=8P=0jZ+3eKp;kU+${<8AN5W4!a zE}V|=uGBz2u@Ed?hdNC@JA-pXcs01r(~kb0oC=g*T$kuL+*ACim*>Ulfy>&1Bnv)F z=pLqvO&I?7m3S+)J9r|hnbEvLpg9XYPtUaWtf)5Z*cN6l8?Izj7T9pGOp)>tL@U%? zwj^gLstXM@{w@fKWI_4_NVn)1*kQeNKkPn3!;Ql>{tX6+L}T)tG{Eoc|V)M(nDakOek3Q^z9*8V^m6MRR(9ii|*m1V3L8$Xk4;3iux zjL|pIj60h!U4h1CZ$rXKvocD&Z5oLR*S+&O{+;x*~&kK;ct1D{4 z^x~Z={oV^A_3?Xv14NQd6s<>{3R}Y(pi3+A!A{Ix&>E-2_gjrBSD9krUUo&FD&6N! z*d<31$l$h5J>dFenSf7{^jCP2GBpq4zfd&CxIJ2C;_N~uEu0=wji>}IZFpCKgXsK#6lSFg}wXu&C}7p%e4&v*0Cqh{kYdm+txZ*TgI(+M)v}M+#F8P$D*hZ5C|9r{m zid_=q5A!pEIwTStn;F-Od1&M5*m>iH>b0NZ^8XeVdQ~*N0Rd?M#B@V;uIz(jSDy6}j73rC8DFua2Dq2Ib-l#!{Xx`#`| zHBU}Epu9V51}Z=HE`Yzmcr7;`Y6dMeLeA02raKHmk$Rr73L-qffBm;2fdrF|Ui7Ib zxAOOfZcU9if3AQI#nau#3_nuXc3wbI*>4IjUZLqj_k8hriC&C_6p*yddGQfPh_>-l zDrTfs7{mS8iysoaBZ!k)j-CvWf5U^#UQi;dm1*vaFVJ(&Ks4{=3NrI0x*d;B<%sPu z@s)X&cj-%}Hsd#bnm*yl|k-g%-s;m8Twq2uW*l-FtdE7gj+tQT>Zlb9iMKZa)lQf4u}K^7w1wd&MT zMCr-nzVfT4a{CjG~PjfzsT@b4%PBC}pMwQYlYI50b~ z((xi{kYyZN6zeXhJ`gTLzG^l=uVw0eJXQgX<>yVk7X`8m-QdHU zVYffG1{hnLYHt)y7IM_JdDK1AzjBS?Al2;_y8jJhs9!$sHS71+Xha3cb=kQItCs%5 z_=R<}^!d+(;uPWE(V?-hK+-_vk}J=^_@KpqNzWVA$9L^+3X;Pw&QCq@py(ODuv{=i zROCg@yp4>y^J2$VQ+Vee8>T?~XCW-2Knm-c33uH7!R#+ibUkJLs&djvjAL;*Cpe!R zQ@`1zKLA<-27_z|fBsI_fw;4>YcNz0(~?||usM3O1OH$h56vq@?u)u6RktkEIp8DZyS@e|{$Jmm!Na>QyirGgON1|WwGhlb7=2MBt=KlMUE0!L+1l@Ug z;a>`*dCweFHa~iEdg-H!NSJrFujc4SnD5?U@JC@VG+BfZbJe~+8*41v#^qyET*DO6 zNV>6>Fz4v@xSc>0QIbJmE+Z8mD9)>_z_aaT^bnGIj3SD;O-JAfEI9_Vb8}H_giRs~ zFbl^s)3`stR?iMRV1aZ$obSj(%X$@7?hrYRGz2{qGIkk-Ttu>Vd2_ccP?X@IV7AS1 z`ql1&)wP{~!OaZZj$ilU=v9@pG`x}U{g0-n>;B|=Tr{6o=5lBFjq+yj%3+tp{jcB_ zE{b1!8tD-|P$G%`Ncazw*Af`<7FpEh1sROSr9zRIij9ld#+-7$I2QcO=F+ag_%1zB zIMW9*7P*84Z`3vP_e1iWelre;l-2mn$<0rtFSXnqP)XSJ`SK;H3+c=f!Dmk$>cz^H z1J{IuMh?ymJaBsO=7m^EyQRT{LpgeBUY@sfAn4+i9pTSEtBz`Jfd0A3kP`27$1kX7 zhuAlhj_zC@z&nKt-m8Ibjtu9)McR`C5p3{ipMq>7DMxj5biUR|_*~AIu9{wSX{ZaC zDB&!+5E-QznVFdu+(q1J9QjZQZj`$o|SfX%cYH+IX!bjsM^1kkS#fdz-!;0+E)zf~PRT7eT_To0*XzaMIL)MO%a(rX$(Edq z?AHo<_{FgruiAaLJj~~J+Zu$vh2c8E#nR3u21LsoMTZH~MH!)HYJ#w~%AH5q&bR#F z(6KxMv!*&Ui-6jjPXtCkrm?6TbCcD=)JE6KqALjFxau4{3*K*Soq3wfbdoQObGk2> z^nzCVOjj?Rc9ogof@6h^^j*7|Qc9riwatzg7-C(b+24?y%%wXL`GwuhmG+|c)p5aO ziTUTXhXWw@>jdkvog0A031Ze7{Ijs4VWSe3a9mHtd5=+*nu=E9rh@es)5%}0>_3t3 zatbH+iOl!_BMNRq-0gk!F953h7-Va6U-&|mX-+ml81H5LxHvyeQZr)?gT8>nG^-d^>ygqcWN*+3^vfyANL5jP1dB70+R)g@o zc#Q%)1SO$y(v+Kvykd8LEu^$C_Vq7^ z1@FHt5&DO|gSoFG*a!}skT0DUCTD-cbSL%eV}-^^)sTIkd6nRbA4+R^}Vs)0OvTYefg(|jA&WisN(w&`f8hYUW_~RdgJqq z==CaE`{x|bL_T&K=k?xj^kw1GqAgWkeR{31XZY~Gg(?&`ASpYDjy^1YbGUc#q69?n zJr_A_Ys0d0t1#EV7G^>eGvs6wg1hQwkZ8@QMj=XX@lBV0tObYWUuxegk#UfC#mGPN zUBF)5S_=Y&Sc^-fP zWib@;rH41$JkCqac|lq_-9o%Qw^D7Y2yp+rI(c{vEtP~%19;R;K=Gn7NARi9+@3v- zP}`L=Zb(;oezlhEi7KKN@L??lR(1YTNZf8}PS;3C8X^oJljYRi5^x>`tQH`7EgV{i z{dw+n(S7~lSO&am)}diuIxUWQNjdEHjB3CDdG=j3zR!PaGuOOMt~Z5NB$a@>P}S~? zLuZKh%RjF~q+W8gl!{Vf;c=bDZS}tlIF3y4E@ozAWPrnEO}@dcu~NOo-Y(LL;I8-x zey;xT_kc^%pCER@*^btq-;J}yt1(3yAQSZsLxgn?{-POO>f!?!ItD3S$lCM1nq^26 zH2-!Tk=aQOp@p3m{q&?^#w+GXpkobj-!%WgNDd<=9 zf;Bh}Yd7(o*3-q|aF*kZo>`5~M*fS+Mm^jIl}&-iZ0YRAcY)YmfhJp|)AF%lcargv-DlPM@NcN~POfW>(6iYu?th&~h|#NNtQ8p{8Ey~fT0Zhof)#fzOuzR_Nj zGUOJ#k9-%UTAh;=)i0jI$I32|ONf$n_t~{rm6|w`ZyNXuQ1s`TP~htP=h5CMW@aSx z9z}+E;X=Yu;*Bb|jLlyuMJX-C;OpMqomA;h-ZZ?PB-T;gFM58yXv2UotqNCn-Og0# zO_&4y)(7?h9p3qD)K$P^A0p7z!OaqOGUH_G#Lw_v$9_Z|DVXBsSC3~M?_-&(h9M-H zlj}QjDyuOA_yl{Rn#)w8Xx4S_ZWOT$<4i`K{N;wLt}zePo{S^(O9pHO2LZ|Pe}!il ze!?4UkGT1;nz`fQs3$$9jx1M^#4L1vxutc6(JfXm5tC7Yse2$CSSUTp7W72*<>d*A zc6@q`TGPxa>a?#6dM3f-W>=o72 zg-#U<3p5(nB2*}8MpdX;B{@lWlgbN!xH2~7>e(M9E?P#4%wJV0NaQG#DK+|3QYuiy z$cY9Z9pkVw%2IHk2sA276CVl_ErN+~ASa)d#C`gv{bWlAe1J8S&smLauawAjw*0@+ zM!0bqlGX}WK2O`5^1Rbsf4F3#;IFWuU&29IXcT^>@0n!~i546rF|%_T4&nuIg`^)G zcHFWwANy^~iwymdBzTB@;<2{*x8CTi6*ICI#4^af=7ncl;`j(}hyNd}a;kJ!Evl&y zEgacinss;--MT;@6(%eTj==98LH&vP8>nX?b0AtaZnYDW%>-PC#v$RuvSz>3@-O$} z_M*JGT=IF~RwtwB+S7wuTBpS2kY*^jtM|eiImUk{Jw9oMjfWDP%uL zXYFSLNcy<=Qg~>6sl20bYp@Bd%yX>mT7Fd@uKn+|M&|Wy`b!{*2Nrk@SLAANJbwYv zstWTKmN+bV;h59n!OjW|-*XkI(r&*>54W>c>t8t|bq89R+PUx<9iMCZJR&)ZJZLOD z>eC}}q5YL52?bN?^l~w*C!Cm`3u0%4K#Of>(*^J-#VPc60`tGZFzRh6`6ck?xgav3pu$>IOPl1e)WZkl^Q{>E#34TncYT@e~rh#-g0$u zkS{-PT@)d9r{z#RWW|`m=0yYH(zY7nyCqhIl?9i^UnKAYEIRpI^hcKCG~2jx64CDf zyxR`y1K?bQ5GlW+W4wndfeDVcj6Q>=HhL)BSy}chg)TwJ=hTKY`1FwH%4xk0NmEx@ zRbRsf?f5f`*XpKOgAk$Zp)Pb^B*zfMevauzfv-g;Wcw`0bzUK9zXc|K8!YU?GVFhd zR2|i#;>ffy+dVmJ-Z5r&>mD(0-5Xa2Q|ZCzFD-$)45kI=T)YenKGhGko|mg9_t@xL zIt{ zVJu(mJ>=iT^e42@PSp8S>}fnb!HSmR?`fn6!ur{;`3>Da1Y1uioQUvIqfPxChQiU* zUH&n4ny2lGlJ1fBR20EXmPmhZJ!EiBa2sEq-Zp~$d8c+dr8RiJ!KDZrx5E)J-r4?h z@h8OzSLCut2dI!p&)*1X-(~P?VbeT{mL7v&)mOzT3pt$h^?#(<-m-AE>(3JVE@Q=M z*Jf^M&1_Z+;fJsVE8(O%%OamD-s@|bp=z6nTmBgAlW`{7NmUY7eA3I9~Gb`_@fDvCk#koYa5qE_IJQ$mirB@PW=rms|j zp1KeP#_w`nXckuI)Yf=tM$F%`I4EHxe}k{tcp~nmcL$`G+^^p80Ka~YXC2@vRHHtr zg#}tEEPM0ef7W0yYs|$@U0b0dohKA|JtwjgGD-NE#$C)uVnp+cY;b(YtmcIq3=p?+ z7Rwb~U}KMnSpu8~{%KlXMqSBK2or4WDP-I?e%*G2tsq!t(+#UGTdYz*{@$bGdl{lOI{tU=(e=%|=Y`yoO+m!0SYAzuKJLNC{2#O$NF7ePrM zuLX)7m5+=jHpLpp2ykF{2m&vGAsU8zC~xoH_~N%8m&r+FXuE?jR2xER~s zqI7_~OqMb7r>`4-9rG57WYIUi`nGcCvll>1#6)!3ys&U+kS}|$c2&2Fx{WEy4!r`2 z)?$^eBqG;<^Eig!z}1HLqzb3ls&5v;#3US79w5#~0fR0O< zL6&2XrSSSuf<4~2;Y4co3vlIQ;1M;hiJkLy$(lq3#g(NK|JjvvJ;D_BO4 zlx&y@&XBtB(C>z4RBp_Dp8kU931tm-v1UXP-FnX5B#x@4a=WX85D?rV$=hJFrDORARGc%s=pr>t zmIHu-d!qJ$=lUT8hh{=7z~Ob4n_PlkF^*$WuhQ70$AG|<=H)^3GhU-c>RWY=cc4pW zKC^&2g3BWP(bdfwRHYeq#l-B2h;Pf6>!T^?-hUE@n@oOObNNZ~d+|A(WZ# z-@g~+ef!&dg2l66w%tP**z5tWliG7((&d~Xd-3zR8vk;HxG@qBA{ynu-eN+6W=rjS zyV>*cEQ&^@_M#8rhX!z3l!L)Q@sZ$&&rwf-?rA#TByzfAB;+Z|?cj>m-WwW@Z`KC? z-f*z?WVBR(gAcG&b_>#qx7O;oxmoTEAP4AeiG4pt*oc`6oMfY_m%G;`g^+gD`Vsi= zP0B3uE2K?Ems7ecFmlhlHT=ctfh_QQW8E(|U6^PIK2%?+lG9B4wo=8lZx0e;p2QN;H-GK2js?U=C7UG^y0`Q;o0(imsh!>R?KAGa@Rp+m}OQW!p zdOiVuNv)k{z3W%nGm0dpb}x%%nYAg95c8##`PG{C7YjN))BPh%6MXXNH+)h8M>0$u z)C^vT70A~V^gVL`&DvT+;RfcS0Y&J=L~EHME4@F+CXp!^xV7W}@ym`w_BIvmDI1bM z<(3Mj;hZ8wl_bA)X^WCFq9A$vCsXb1Cl7KtEA{5rIfxYCV}SSAO4MVB!#L=UQb%*Y>#8k z6i$Uf9*J>SRvgE~njJP`qbBHYG?EL3XN=Z^A?GIek9Nkr6QrC(v!_$OwwjV?*a?|s z`&b{YTy0T#{pyf!G(c^BU0|~x1MH7GSvI_+6+eF^1f8GU@D(Zd%k#y9W zGWVg5tbKj#LtPz|N|vmxGCbjxUye=pccQ{Hr1QMIPW?IZE{*hUEunHc4vlm2hV3=j z-3q;*yV->VO>79y5h8kmPn~i5&Df*;bze_p)v>=(`DTdMnV*L+HNjW+T^^=ke0I6X zZ2r}|W6H!M2OKEXrx#l-1{q*9pb(E_LEu1$`k8#jaN&ZL2L)>@(2ef zaMynqq1I0Ew}k4~W(VEg%SaztY@MZ-Ze2Jc?32iug5nOsLN52i>CjhLW&<#Ur(2#c znv~UfV3?)!BgB+Vf8KNc-v8zfc^*zA!%vbxs_#$^`GAO3trw`f3`A`lTar%d!m2!)^(4yvjxENzKK*QKwaOHwVYSxKb=G9h!idTH|G@|`?L{#`E*I863d&%sfQ`$AhA)hu^&=7f|!$C?59oPUMO@P3?7 zuOe6Qh}nKRuumtJMU9xDq*_bI@8!jHmQA#atJ5UTivRxlLiX449vO~b!d~80n0W1SqQmN_k*e4P;va(fo4x%r2z+dmQ^|{DP z&fjR1%|Np^Ei3@he;;H!R2u8WJxcRzrxX>~UfEh2D4e8oim=s)Ui+8naBy4rK~|5Y z4vJhiOD#K}m>ogSkGra1rfjkUC?k&q0C6uScCc_^EwDQDSSZ3Sb8dIMCJ`X@dJe*n)Z zHG^icbMkkwyOcF7holLQQ<#QmK^Ok6CR8?Q&mj(oAZxUd{PB$0`RGTH#UER=znH}y z*1m%Del5yDHWv(;=_M3pTcA4v*?CG+o+O!6PD@x&TO|aVa-A7?4SNEpRTTC#+0{o_1#WrO&R{$V#t9QL% zSqrs<;7K8k#jzuJnJUWgD;6?sm=oLoDZC+AP2^8#Fh(Al<_Bz$n1rEr>=_l`W-H@$ zCehZn5z3#Jhg<|;LF)9C@~)4ld;1WyMeJKpt&$H~k+ObK8H(j&CebHv_r{CE$(i(X zN@tz(bsYUE@@?xYMd~jvrPhh+wH|3h<@oV#yRn#%b-^_!M0eAwqI3;07Sz>e*0K;& z3qL=ZxlQJVP!GZr^SJEedj11vH4KA7_lhj6U;GGI z7ZI2wfUzb(4<#iME2OqCI z8ZT(xvl?H$8nOQ@I?=uCBqjv%<7TFiW0R(- z?o3E^caB~{$PYu44TB8a(W6K6J@_|P5d;I2Jrtfdb!#J{m6W4c#LsXd^N`~nUB}m> z#!(M-k%nht1`tKUZCSJK(y~u&GiWXpeR_RG()o)rw-puB(Lf#XHIM zb!^<=XkndI6okeYeXHn1O+x=3rUJAm-MLxW%GR8jechGoPyyE+N`no5K|9O>d+rGw zz#<0WR`FqU4d7$OVeeMr)ZsTke26AHFMCM4b2{8%j7rLz2f|4*C3;7 z4V;1x64{%E`{E%nJ^#7KPXbf>hqe?Z&M-RhHqMmwu+7kAaD{fp%XaFO{*Mc@l0@?b z3G0~AfK|_LlauJ(5cU0O$m7i1Em_<8$|WqFCf~4&^(bDTIMX4z+@zs3#U2+vhSmW` zybG-3!9uxbVi#!L=86lsk%KQ+dY0|Em5Bg^{JE$uCpE!jj@o_97O@|%b^_XH*NM*( zpAQh*rLZNcdD2hp;x+lLmiXeYsX_#0p%#U${9wFe#s|h$&H}rqr^h;lgKAvM8H$39 z(f=s*yu*X!Dy~utr7|#vmBt2LD7S)`@Tf|dZdc?TR?F-((k=2(NX{4vse4uthU&MS zxUmHU8FC_;HfAwXH@9tYdUCej6VIv7n`VX z3LkO8LH*YLhhX$+eU&ZqKYtghaZ06aonCzVx$#~|Q5zQp9uJlW9%w1qrwc`YdrIP5 zjNYq#4z+zj!DA~`DsNDrWCIDDer}fwkB@c?mf+d25xaY6 z1J1N7zYYlnGAhc9vX9QFQwc2l#)8y}oz%Br`U-mE*E87{bNTrSaAHZDm$7}&8qgv( zYQBa5{JFVNx?h?g6r0>A_N!6C_lr5}d=GE7GSjTPPu07Xv%cV11rsWcm^kZELaCSC z=&UARq}KbYkbm@FLrf*NPa$*A$Zq=LXb0g^;~;18-t5dFXWg{Gap;Yis5$GfjmpNN zK_LI%RcuIW+gvX7#|Mw08-i&-ow(gPoJskr_)%>ePXTfjIMIXH<$#%x@L<-JBUMt{5LZJ~_7wi{y_ z!&Nc5l8JtlmrdcQ?JSFdHYM%<_%teo8~0hbJ>2h^>+4yhP>5Z-x+GUp%u=W8%_jh3 zOcHirCfR6mN}hl#!Kje?5QZaIrK{g`V59CL*pgLZyG4lSgj?_RMZCMAw}ygj33J>t zR(TJd5edhNln+7$Mumqr@?kcI00NauFPw`kkd!EQ!6EsrPBDk7ieBwSH7H6C_i3w zVzoJxTdIEV-L%Sk{79o;Q%Dk3s}ayTxo8}KUrKjo3Wa_yG$M5&SK|SOniy_Rr5J#2k7A6S|(aR6nd%N^wFitT4 z?Bx&HZTmgFE_I{?mSZb~xku+54e8ttB9>6MF>>&RUY`0u_Amg$zvrPCisqnqT^L$5 zF#7);#@u@aArrkuf-zN3`2A}SH%5MbAc{$oLvSSbw~ z;O@0mO{(ok7kyo9{mV>(K(E6^eno>)_sa~5UE$mloNdGTr6RL&J6@If!Qrf6DU08I z!Ve3s#y|fy_*=^H+~N(FZtXX5od3JLWn|X!cs22vnXhV{==_=i$L%7&#h-{S`RGHR zqf`r7sE1Pwce*L}r4k1vh)wtjCrqLHP7Dwo4`4d#V*5T8ySd~Q$p%thu)hwi~W^e_Yg%`zGPqLj*`ELMFv9*5Sf zzaDE)Ke;v3aUR(V6~#r1YN2NX`k%U$jB;#O9Tj=3Q;?zX*zp<+@w|33F=pfQz~_pi zAiaAyL5Hx~X65bQQl_R~#&5#FX(u4)GB2Fun{QB{-b_XX0E_6t&8e2OgCfbtkNVf4 z86DvA)7v<2f=3ns`&^8@TP*|%$_FG*HoTXy`qm9Dh17bE$1@E-jQrfP2dElHkpwU* zcJvPp4qB=_Z}a;%Q8$sPMH>$tS-6IP*-mebKh1Jp`11CN?S6-kHm=*_s~$m-zSB;r ztRXk9+9mryu3IHxLOX&m5x9r5aFgU}Vzvl?{|uBNjn+NEhyl|8>x$U_kZFqllPflu zU+!n%%wzqHC;NQ-Haq&-Ifyhr!hTX9>iSxfp6Zk|TUWqwfe4n7gugmkQ;I|M=Gy=B zpx~A3!wZd~&S+mzS<~0c=2)sIBDbP5N??{M?JnM;ailEz&43~;RoLvjW3{c@5h3A4 zo5R7UBd5E^Rw^B}mwXbs^`hWfGVBd+-xyfY`_V@4Q?LxCH_`9L!WLj(=EqVs8Hc+_ zS`EKKNrDsi=9AL(}Rz?$}p2h{yUoV z)feCMmZ!V(Br0=Dja*e@rG)byl{l`qj&X=NV!jmy00x`OB$w__?{78i9nAF76UNm+)6-TusGWI=i{8A>*Blh5Y1+MMU3Jl2zfpo)a*y7qX=>xUhQUEjF_)>;f%?>RH1CUbk^y}$(L=e zIRr$TAhRytv3bO5)}4BJ$rRcL2Qj!UF^~ zgO}}<6q62b@f_vPcBBdITIuH*bwT1^j+#a;<+GC;owg`I;M7ukYBZu@WAgm{0#l{5 zs{$Ux`fKGCagb_V39l5=j(wR)9#tQjp9vp)rjRM`VQL9yQ0ranf0W+`J)i~A^bsV^nIX*ePegC(*U&4x5musDXp5% zWimexA4wkSzeJEW(%Z)leWs6(t|>WtIoxs4hE9+Vh4}^zN(ScXjoK9WQ8zzXc!6|+&rn%~FAxGr>NYZ=Fxwfw zll9WOp4$~|5mo6qzIe!fW#rV?mAe%lK`@!0hctWg=Au1iV%pZ1Yx+E3zk4ZC_-4PL zs5MpKP*y<{Tdc}pDZGwzkxQV7PiGi3hzT%RJ<%Pu47wF>P*983L8fC~{J-~h2qS&y z)@#evcb}O}EjbvR(u?C}(;CC9thy6m_bhQ6RkX8?Q=L*GA1!8x8GEa3PP%MVcVY5S ztd%DVWr`a1Rb(H~>Y#XiJ<(t-c4`Y}QRMB1-dBo3`J{R6x+u%nT0ZH4H8Yjf^dPYx z+qD(zn?l+Z>5tZ!Ojub51@XaDM;0%enD_T)D&9ID_1aQ6==`L%$i4WJJREuzi$KKc z>hzwR{XD`*QT)w`6MFC9=P&^{0kNlZte(Q#1kNS#`J?UGc4DKRJAHN+!|*tNA<%pA%8sE4tnFQTFf*vvlw; zcJ?9X@NfZrn|!itS`I-~vCWia4DY2iW2o0qCQz=fsC%JtqoNLcCku|^mRnJM$%@fS zl(oqq@vwNL=w~^;;z@vR<@Q^jm;pH&N5gJSOr>%H#2aWti8b9FoKCwURp91IWy_@( zYabGGhH@aZ(?BWN!FN^RJxOf-n_!nL9Q|ye$geiaH?IfYjQuDXftrDMA)ee_yfY2C zG7(knkOk=Tm*f3tr5bM04=}tl;zG*1tID&j@!-`U1~2RtjdJa?(XY4N8LT4Ia)P0r zV;0(*89sJCT3Aqo?pMwhDHI7?!{x2PgvkrFM5Kf~oS|v*J~;ffLz9xKPkL<9D7KX9g7OAFsb8+W4e%=>OGq?SW8cZTvN+%dpg9Tg662 zbT#Ng7o+TUp|I6Rl+j8m*zzOs?Kk*;oTx+X|yIp1d%@d!ot)Ruf&n+S)uHxnO%XVTVpY@GwW z?)4~M+mL9tZ}*L`1Eb8k?Z=`QJtMiScA3dUm*ef|5-rc;n}y!a8eR*k9BV8L-q*+7 z@^3%lgf4mcpqBD=XlY!iCJK>jj=qyH-{j9h3;1d4S+w9!h(i%bVhNIU|J&V4xXivvZ(v`N{s-h`}njZI-$R1{~sMU)iZ=9U{P$z5@G`5CDJI!R0igQF` zpDV9|Twz=lxA_D0pN14w!p z1hZngBqSQ>-QM$2cZB-f!;$NUDd{`Af5>g|+o-qx|L}nya*8+XFdDlGtNJ;T)k1&(3DdLuj$^9E~Dw zmNclSi#-YzT>I%tFcKrB-k$t+=PP)vM77b6Q7YA5(dl}Bg~F&^GiK+_PM2M@0NOX# zyTh0D)te2h&gbw4Wddm%<80v8n~2?pHGO`=06yFU>j7oIAXz=&0BH*kF;leG6z+{) zZfjjw0ztpa(V;$XgqNyOzZGG35#mRYlyJVeM=QGe=eiITtYJ}~;)2AUbvICw;Vqx< zzV3-IEh}|{aE^XPsW9g*&ww2-9oR+=1Xe|hskB~X*>AA-frVd56}lxB5T4`hNX3sj zrTQn*YSVJ29f%d|<2`a!J+J-t1S}}CSkdUppae|C2asH6pXt0CT7UgyEcHnJadJyf z&X-ri^xfQ<7O@)puB0YNLw)7r^8PWJYQxf&JU{2Vfg~7!zS%^c zWIbE3V5(Y=2O+-0j&MH6z#+ivCY{zjdCEg{VY!V$fl;(mzlZ`c==l*}!vV;$d%EVk z*MH@;Xfo1)n8t8ppC72LId?oSiER?~By2@R%~8+8g(dZ#|5SII)R)uLHcf7a{2R(2 z-B2!G}|E=EG}E41d!*e^wSolSpZOZUJ_Z&R4I8x+iZ8a3xu7 zH#BHiDyba){&B#%C)P6OwT~PR?aBkrYJR%ONu;3@Qv?2zuJxgoUzI)|Z4R4+( zh^-D1>gR@qr5owG9bzDd^$~;4jmZZQMq@`2Y8{-aD#T-Of z=B|Q`63*k}xTId)`&!BR7bq%I`>tV+EB1)q+*X}$^(quk0C|(u_>4WjZR! zQ3+Ugiiveu%<|;b^tx-)`BDSn;=+G$v;hArQcTEfRm?7)0Z&{>rJgLjm%f=v?o%Tb zm%1=}y|i|(Ri1)t|1O9cJSx7FoAEmD?@HO4K9104np#Ct(n#Ab4jSpuAut4=4i~;Z zVDEeSpY5T`3ZoGW$4u;nhAW*ZTWP=QBVK4b;zPBpO~l?5;6aaYLVzOL=6z z$@9O;JVorL=@(O1W2wt$`Wyc=__By*mvA)RPXGX7s&8K>N?CZzNmouTNFG9Z*&&Sv z!{WLOEXJWqk|+B?hKLcVRA%S+f?v;<3$AilxoHk6U`)aE$kn&(-?A-vb)jiGF*kY- zL>9kNZNp#EX~ipKMg2h`s4ljG@%w0s(uQ@CqIcbtJWaVy0E_M79ftb|l^*0SSMR#o zZxqL*oO;27u<%=M>h{YO-~QsEHXp1QO4*Azd>ug0^XKXUW(c!d@qY|~_$(_6Ho6#) zYd8)m%j`!rulb>^26P?@@7FX&nQu1y6Q*cQqX=S%l@TS;c5GKVWh*80RyLrX-ve~? z;rKu)RdIG0!Y@DpH2S-dgS-3>EXf>n^}e#akZyLwUBlHzNdp(}mHkrQuUu?|!XJ>` z!^n(!!`A+d`LZ2ovAsTHWZ;XDnUXy z#o{hyRtickCixM6WrSZje!G|<3Bq*Qeth#u>#t7PdW7|2N(>B-$xnM)u;cX~y4ghC5Y=YdMPaz_ zbPMeMjoJ*rtE1&-#ZK_y( zW<|EAnQXsXqU|4W=%&|jIO)ex(YvNG3W`W;yC zkg_k}zCqw_F{f(@Zb2GEKtQP`{MQdL1<=it*kSvp)rGwg^)nK8kl|DH9g zG?{RVAMITe{M6Kk@JNa_o#v>EOUkE?&mYE)agQSP^zOS}93YwP=Iks?o*k=Ax4a93Sry zb^Y~aLgI<68LbzwKZq8&Gvkf1HUXE$}lT68H&>*wpAJPv9m&V)x`!H}BFh zX@H$tht5?TN`gn8Q9-IY2h2R~y z`vBQG-&BRtMHU8db8IF$de<~k5ZIuG!hh#xcvrL=bw2dXc<(fNJ$ou7C0d6Wi--dA z4MMGy#n?AcbFHeoKKxMUA|HA1^gYc%*BVo>HgFj`^SAn+?fXry9SgT7Hz@Nlu@xDU zup#{mUvFm7v(wYPAi&D#4hwxP)(X~tC1xx|D06_IO|Vuww1yI!BC6_8qOwc9YW*)- zLxD<$ERWq36q=9Yk&dxR8swL_0 zj}p+_f(qWEh=eaTvps`&zjM|u`*ixPY)IJ-{FS1uzWu)7PZ@8Ti8cA6H5h%<=hxKh z(5TpEivwYLEGL-%ZS0#`O*OL$|8xJXD?9%3^`1bqDX9e3dQW4!#J**ajIkODo2!*Jo11@dD zp&JoDMS| zMAyj2#d^dmWF!&XWLo>2@@&mvc7nV3IsuE4K!~`q$*NPwUj{m85GO1j?@hs&ATyhb z&~zX8@gA0gV8&7wexb?qL!hp~brj(SPh+n*&)Y;($u|`VSS%uhiKIOy^xZzYhtY_` zb2^-iOhw4SMu^)0s}4}tJBFB9l?=sKSWKZq;h)H3DCf@}=H)Pu-NGuq9={&?r-{3P z-AE;6+gE(`#eZ>^*(TL%F|o9;+4#WnbvDAe5tC3+2&PZdc-`)f5Xm>G1#sk2sZfll zl!zk-&!jUidG->;kUjH5MMA1{IH}*a&l>AFmN98Y$kFNn6f2?u=t7W=zJA(Z0J&QM z-~@#r?FCnHcgDE==e@HWoR1^*8JG9)zB`+ood^qJ|3WwAP%8m24e&~<`)QL*uQx9K z>Bm_7Fy!Lkhb2*l&iWWjWMLM6yzZ@JZHy|5(hEytBfg#CqeutO9$Jsh__eWGh&X}N zqo|%6;x7MIZLP^v%}67aHj?AvJ!-#>VLEj$5qndVA{ZG)#R*xI|>{K`NR1sbkvrq(|&P6C79`oz|3~@{EJ)@>}aLG=9+DA?|-pavWt z*%PJ&F<%ZDm_7}(4{(p+-(0t}x6L!~b;4Zfm`T85&W;C~ z-axp?TX^{&^}C?yvH+wB!%p8x*+_6qqmVN7-3`kiA*m#)1G*~>2V`(jA}eDw1nEt2 z-=Gw}xKRaI+UJ_ex~FNMbv_5IZsXdN)#zu#+OnCC`Uz_GD~H4V3gHA-*Q+ z`PlfQnpN|4D=J35UzFgf^{h;^^e1witw;Yubh_`zn%jR}FWP)YcYaS^2JAzA8#tM- zcPeaJ;!^KNi;JGqx}I3gKsc(Z;~wg+G1Y{Ge3jB&{t@OSVTa2&-6F9}-YvqP68k?f zk=;4ZbleG9!kJU(M)=3-D)3QXaF3}Pp?or6aG^m~S^p;SzguuiW_=ZHmW73LdS)X? z12qdWU7TU@14ZoiZWBB;-Zkh%yY~sK8V(EBVAUkchGHutj~au{p&uWgSn!iopRU<6 zOP-GI(wE4Y0fgNz<9E-{X`S0n0bXiNv1c%E%?_o~8q7_ZF-x=Esk1tMda)X_8$}T~ zyaizTNJojwh4Zgd>hrpVo>GdU@=(I4BM{tz=nysQu7=q61Rc%;TJDb|g)m zTqYV;6i7OiGc`dthB4-y7HOxhaUpmOYk%g>IN`4@X(4M)@qkfvl8Z5GdBiP&)R?@m z`gAq>H=8%OlHMEqEaO~|%GWVF?02Pi^W4}!tF)4N5ETjIb|bdk(q<%AOeY; zp^&ZVbwJZ&Nu?rxxFJN(X0A#wyWKN1B}w3`Bm%)4{9`Fs2(?OA2Y2{5f~(&ryYm#-NVs2w zf3n+W@-oVYv@wi`L0(0OJMTH{QEJzzr+3Q(eU}Tm$4Zs_>JxAgTvybLs}x3i-_`&+ zUx~9u3&$Pv%)(CtQ?b@cTltY-S0)IEFs5Oli!zMiZDT)0vD+2@SmTOl<^j0g3}<7l z!1y7Ek{6;U|1$@a8$gB0Y6dE_9(~ZaROY7ggf@>a##4d_E(xM{30kA!hgton+(DIrmsSH4Oa&Bbf+ z+(S65QY^VFZ%3I)lnEGXEW7c@BW%j~)NC&eXL)anFct^r-{LCeqo(|7zBFCF2dQoV zQp^9vAuRA_SEPAyI0HnK0mkmYyk5yDcSb^6l{RFYA}3xEe$CJGopLJXzwZ{aS~lK) z=^<&Qh6M%&0$pSz7%aigtZCm~g0l*cOBP3bmp664EO$8YAMA&w-w>YiO@Ks(tgF=L z(5{g)xOJq0)qE9lX$046RCdMkqEB4pBX1y0b+mnn6uCjBYtANHbiv7*#^*>%;g3Tm zw${S>G4+53(U=P5Mz))FG^yW2*4yuXSK%@8r2(S&mggAAWI0QP<%ly+tyJO-KP1Zk zPZqjpvC1xz;~IoT%HL`iC%CVKPrOp5?^iV3P7t(iid#@ITP0qw@frnXx0&x0n-xk1 z)rH8#vd2g|og2T)6ldpW4>6h(JzJ(=%03fk!t>n?<9wbM6XYrVcHu%-zN()3qHT;o> zZ&lA(HUuFMgR)J8rew`Fez2AGPW*7j)v&iJM`-C;5C>I}Zl5|9zN|?nby0PE|6>Yp zY=v`WT|^@#4FaL&8;|I*;Ta)poS*Oi@f8?}KAQvH!S;G}QVxayf?tUp zJYWIJrr|D~xjcorPkLd{^WHFn z*#v&l3XHh{Th+7cdAa-m<~MRF9gk(X=$Zv?jpUMhmn1w}-$ZG6KlJI@KEA3cU>Tdf zLpe};>@CNhTn7v7LPdgrE?uYoaht}I9!{Us-WK-d8qKbt*{Yl+vPiTO%bChOTH6R_ z$biD-B3U&&|H17)MLQ=Qd*PHD?!$gzxlG^=PBzNy#8qXH*!zgzi)16yr4aPQISA;! z5rrJ%!WZeAdaTH^%Y2*e;oG!Uwf05q7Q^0(t9v@Cf9IUUL%Z6@#ga?W{5q5_Q?@4A z_=mMUg`>R>!5ZCcnEke>*2pjvoZFjRTno@(SyH*Xx|V!9dNu@$9Xm9l+89&9vUmHS z6L77f-g#s*)cCF|R{hNqr2+2$!3`~>wK&$I#$&IsS&5!6Q66$(p$EXtO6m>sh>J|( zx|O@O)8{~&$i6A9Po+qNq7MLPo%Y;zfQDi;=|vtp9_oo3HBYwu1fm#Y0O-JYH$O;?GsJ?Tce3vnw1t{Q^9Ae3C`&I4*@ z7w^}#xxA_Na?4x}s^C=CD#|`|B&Z!9Td}^h@7c*dKdEGJUg0s)U0h%pFQTR6`|pH& zfJmP9%(-?zOSl>fFh#@+oNCycFLvn! z*w9Ml)hkNybX@pCgJNqUjHSd~J?hfCX6x9THSL8^#|wAsKaIN|6FG(1l9(UgKX9v{sMgf0%NCXu|%KLe>VG zF4*RTl|8_Pe7jU%-{p9=5l!Hcz|w{zHl^)Mc*a-{P-p`klwUXiLLPQnfA_KzlA5f-y&}Vo)7;kJa|3pD6 z(bQ*AX0**^xHMJ*nxDS+kMYN-C0Kz|S*>#0LS9H24Rg6l(tqd{WO{G~4m|mx941B% zo2dF|o3C{>#qb`n1i5z9N@WUn->AHvFAY)LKEy1@nsH05;Budy)p=#)V(ddxWYKcV z5U(sD6_quW?ZB&Xs&C4FXu)q-%bK>W%qHwKq9}mg7%h8v6hKUk+(DHl+tu%di9b+c z0U;UT)rYT+)V8(X?%uQNj-4Z3xgJZwiuRR%SP|B-@%9DdMH_lYE>;Pz7qoSY2uW#7 z&^r~a3Ise{ZHT+)Ej$M`s8aIky<90G0lDDHqyPv(bRuP&D1XdqFskTA*@WR4Ztoi4 z^KE(fMY(;pY31k@|ihX!>2x)|Ltl`zqS|Nx(1= z9{uOyFJbMy zX+7VIHTIQ>ULAWlI8X&rrATWxXL?Fqq#rAAWzPd?_*ty=@ZPEPMxC&v z)K4ll67++&Ik-qspTv%oj)c4$Nb2?{oM}FqYvTcYeoEsN+!t8!zBed3OwipIJJ|E? zg)abpJ1Y=ntZ5&kb}Z6y{F`A57+f$1O99on1^0)fo~=r(JQ9%XghIlHW-x&T@NCse z8QqpiXD0znU^i*KBN-&mK1~ixNF}(z<)}iXdABm61JzKtQ)pgD&WD~bukmC>j1>Zr zjKjL8uNn|$quvE7_5nD|d)fvB0J_Z`QEHIl#UK2mCZbSdVO-#9=W|S0GcnHtS}c@0 z=nVs@dzDWrBh1e({!k8!wGMbX5K%Ay@s)V5QNyha^0>@5$T5D?#`<_xVGSF`&oZ_Z zar)0|-vWsj`k)G{1*fVG1d-`BczpuzqN#j>jo)PYaJlEMCID0{ih$RFW z(HF*thFEA+H6f|*_@EfPsu4d+EV>fKo|QJ!bMqAl;TxXw@ziM2o#3EjnZ<#&_>vIS z?^|t#ls}1b4A3Lp?AfJZK!mrfImn9J{fHDAa-bdpZJpor6)4k&tM<*mQ(fKr- zEe(e)G^9;h(-#y8v@9c@VJQA#JM?aOF<+?_6 zx@X>5(#Mw`Egh@&$@-`1S=p4NXDw?4FZ$D3lHn#+#_V~@t%Lpyi=SXuWET9U@|rUq((~uYk3x z7@@rXe~@`PVdin%*ydcyEi3q;aqxGp!Ta_Gd$01Md&3vZf^2TR#N1*D*tefw8!I=Ia{XubbYzkLV& zeAfGiV%C=Cp(`yd7LgZ`<03bptd;ir|LfvS4?5`YhdF<{VTt+DWk%+QcEUeno4<4X K_V#*D_WuAET58w; diff --git a/examples/research_projects/pplm/pplm_classification_head.py b/examples/research_projects/pplm/pplm_classification_head.py deleted file mode 100644 index e26521fe391..00000000000 --- a/examples/research_projects/pplm/pplm_classification_head.py +++ /dev/null @@ -1,19 +0,0 @@ -from torch import nn - - -class ClassificationHead(nn.Module): - """Classification Head for transformer encoders""" - - def __init__(self, class_size, embed_size): - super().__init__() - self.class_size = class_size - self.embed_size = embed_size - # self.mlp1 = nn.Linear(embed_size, embed_size) - # self.mlp2 = (nn.Linear(embed_size, class_size)) - self.mlp = nn.Linear(embed_size, class_size) - - def forward(self, hidden_state): - # hidden_state = nn.functional.relu(self.mlp1(hidden_state)) - # hidden_state = self.mlp2(hidden_state) - logits = self.mlp(hidden_state) - return logits diff --git a/examples/research_projects/pplm/requirements.txt b/examples/research_projects/pplm/requirements.txt deleted file mode 100644 index 630d1b0f8f6..00000000000 --- a/examples/research_projects/pplm/requirements.txt +++ /dev/null @@ -1,22 +0,0 @@ -tensorboard -scikit-learn -seqeval -psutil -sacrebleu -rouge-score -tensorflow_datasets -pytorch-lightning -matplotlib -git-python==1.0.3 -faiss-cpu -streamlit -elasticsearch -nltk -pandas -datasets >= 1.1.3 -fire -pytest -conllu -sentencepiece != 0.1.92 -protobuf -transformers==4.48.0 diff --git a/examples/research_projects/pplm/run_pplm.py b/examples/research_projects/pplm/run_pplm.py deleted file mode 100644 index cc49b7fa83c..00000000000 --- a/examples/research_projects/pplm/run_pplm.py +++ /dev/null @@ -1,823 +0,0 @@ -#! /usr/bin/env python3 -# coding=utf-8 - -# Copyright (c) 2019 Uber Technologies, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Example command with bag of words: -python run_pplm.py -B space --cond_text "The president" --length 100 --gamma 1.5 --num_iterations 3 --num_samples 10 --stepsize 0.01 --window_length 5 --kl_scale 0.01 --gm_scale 0.95 - -Example command with discriminator: -python run_pplm.py -D sentiment --class_label 3 --cond_text "The lake" --length 10 --gamma 1.0 --num_iterations 30 --num_samples 10 --stepsize 0.01 --kl_scale 0.01 --gm_scale 0.95 -""" - -import argparse -import json -from operator import add -from typing import List, Optional, Tuple, Union - -import numpy as np -import torch -from pplm_classification_head import ClassificationHead -from torch import nn -from tqdm import trange - -from transformers import GPT2LMHeadModel, GPT2Tokenizer -from transformers.file_utils import cached_path - - -PPLM_BOW = 1 -PPLM_DISCRIM = 2 -PPLM_BOW_DISCRIM = 3 -SMALL_CONST = 1e-15 -BIG_CONST = 1e10 - -BAG_OF_WORDS_ARCHIVE_MAP = { - "legal": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/legal.txt", - "military": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/military.txt", - "politics": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/politics.txt", - "religion": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/religion.txt", - "science": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/science.txt", - "space": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/space.txt", - "technology": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/technology.txt", -} - -DISCRIMINATOR_MODELS_PARAMS = { - "clickbait": { - "url": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/clickbait_classifier_head.pt", - "class_size": 2, - "embed_size": 1024, - "class_vocab": {"non_clickbait": 0, "clickbait": 1}, - "default_class": 1, - "pretrained_model": "openai-community/gpt2-medium", - }, - "sentiment": { - "url": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/SST_classifier_head.pt", - "class_size": 5, - "embed_size": 1024, - "class_vocab": {"very_positive": 2, "very_negative": 3}, - "default_class": 3, - "pretrained_model": "openai-community/gpt2-medium", - }, -} - - -def top_k_filter(logits, k, probs=False): - """ - Masks everything but the k top entries as -infinity (1e10). - Used to mask logits such that e^-infinity -> 0 won't contribute to the - sum of the denominator. - """ - if k == 0: - return logits - else: - values = torch.topk(logits, k)[0] - batch_mins = values[:, -1].view(-1, 1).expand_as(logits) - if probs: - return torch.where(logits < batch_mins, torch.ones_like(logits) * 0.0, logits) - return torch.where(logits < batch_mins, torch.ones_like(logits) * -BIG_CONST, logits) - - -def perturb_past( - past, - model, - last, - unpert_past=None, - unpert_logits=None, - accumulated_hidden=None, - grad_norms=None, - stepsize=0.01, - one_hot_bows_vectors=None, - classifier=None, - class_label=None, - loss_type=0, - num_iterations=3, - horizon_length=1, - window_length=0, - decay=False, - gamma=1.5, - kl_scale=0.01, - device="cuda", -): - # Generate inital perturbed past - grad_accumulator = [(np.zeros(p.shape).astype("float32")) for p in past] - - if accumulated_hidden is None: - accumulated_hidden = 0 - - if decay: - decay_mask = torch.arange(0.0, 1.0 + SMALL_CONST, 1.0 / (window_length))[1:] - else: - decay_mask = 1.0 - - # TODO fix this comment (SUMANTH) - # Generate a mask is gradient perturbated is based on a past window - _, _, _, curr_length, _ = past[0].shape - - if curr_length > window_length and window_length > 0: - ones_key_val_shape = tuple(past[0].shape[:-2]) + (window_length,) + tuple(past[0].shape[-1:]) - - zeros_key_val_shape = tuple(past[0].shape[:-2]) + (curr_length - window_length,) + tuple(past[0].shape[-1:]) - - ones_mask = torch.ones(ones_key_val_shape) - ones_mask = decay_mask * ones_mask.permute(0, 1, 2, 4, 3) - ones_mask = ones_mask.permute(0, 1, 2, 4, 3) - - window_mask = torch.cat((ones_mask, torch.zeros(zeros_key_val_shape)), dim=-2).to(device) - else: - window_mask = torch.ones_like(past[0]).to(device) - - # accumulate perturbations for num_iterations - loss_per_iter = [] - new_accumulated_hidden = None - for i in range(num_iterations): - print("Iteration ", i + 1) - curr_perturbation = [torch.from_numpy(p_).requires_grad_(True).to(device=device) for p_ in grad_accumulator] - # make sure p_.grad is not None - for p_ in curr_perturbation: - p_.retain_grad() - - # Compute hidden using perturbed past - perturbed_past = list(map(add, past, curr_perturbation)) - _, _, _, curr_length, _ = curr_perturbation[0].shape - lm_output = model(last, past_key_values=perturbed_past) - all_logits, all_hidden = lm_output["logits"], lm_output["hidden_states"] - hidden = all_hidden[-1] - new_accumulated_hidden = accumulated_hidden + torch.sum(hidden, dim=1).detach() - # TODO: Check the layer-norm consistency of this with trained discriminator (Sumanth) - logits = all_logits[:, -1, :] - probs = nn.functional.softmax(logits, dim=-1) - - loss = 0.0 - loss_list = [] - if loss_type == PPLM_BOW or loss_type == PPLM_BOW_DISCRIM: - for one_hot_bow in one_hot_bows_vectors: - bow_logits = torch.mm(probs, torch.t(one_hot_bow)) - bow_loss = -torch.log(torch.sum(bow_logits)) - loss += bow_loss - loss_list.append(bow_loss) - print(" pplm_bow_loss:", loss.data.cpu().numpy()) - - if loss_type == 2 or loss_type == 3: - ce_loss = nn.CrossEntropyLoss() - # TODO why we need to do this assignment and not just using unpert_past? (Sumanth) - curr_unpert_past = unpert_past - curr_probs = torch.unsqueeze(probs, dim=1) - wte = model.resize_token_embeddings() - for _ in range(horizon_length): - inputs_embeds = torch.matmul(curr_probs, wte.weight.data) - lm_output = model(past_key_values=curr_unpert_past, inputs_embeds=inputs_embeds) - curr_all_logits, curr_unpert_past, curr_all_hidden = ( - lm_output["logits"], - lm_output["past_key_values"], - lm_output["hidden_states"], - ) - curr_logits = curr_all_logits[:, -1, :] - curr_probs = nn.functional.softmax(curr_logits, dim=-1) - curr_probs = torch.unsqueeze(curr_probs, dim=1) - curr_hidden = curr_all_hidden[-1] - new_accumulated_hidden = new_accumulated_hidden + torch.sum(curr_hidden, dim=1) - - prediction = classifier(new_accumulated_hidden / (curr_length + 1 + horizon_length)) - - label = torch.tensor(prediction.shape[0] * [class_label], device=device, dtype=torch.long) - discrim_loss = ce_loss(prediction, label) - print(" pplm_discrim_loss:", discrim_loss.data.cpu().numpy()) - loss += discrim_loss - loss_list.append(discrim_loss) - - kl_loss = 0.0 - if kl_scale > 0.0: - unpert_probs = nn.functional.softmax(unpert_logits[:, -1, :], dim=-1) - unpert_probs = unpert_probs + SMALL_CONST * (unpert_probs <= SMALL_CONST).float().to(device).detach() - correction = SMALL_CONST * (probs <= SMALL_CONST).float().to(device).detach() - corrected_probs = probs + correction.detach() - kl_loss = kl_scale * ((corrected_probs * (corrected_probs / unpert_probs).log()).sum()) - print(" kl_loss", kl_loss.data.cpu().numpy()) - loss += kl_loss - - loss_per_iter.append(loss.data.cpu().numpy()) - print(" pplm_loss", (loss - kl_loss).data.cpu().numpy()) - - # compute gradients - loss.backward() - - # calculate gradient norms - if grad_norms is not None and loss_type == PPLM_BOW: - grad_norms = [ - torch.max(grad_norms[index], torch.norm(p_.grad * window_mask)) - for index, p_ in enumerate(curr_perturbation) - ] - else: - grad_norms = [ - (torch.norm(p_.grad * window_mask) + SMALL_CONST) for index, p_ in enumerate(curr_perturbation) - ] - - # normalize gradients - grad = [ - -stepsize * (p_.grad * window_mask / grad_norms[index] ** gamma).data.cpu().numpy() - for index, p_ in enumerate(curr_perturbation) - ] - - # accumulate gradient - grad_accumulator = list(map(add, grad, grad_accumulator)) - - # reset gradients, just to make sure - for p_ in curr_perturbation: - p_.grad.data.zero_() - - # removing past from the graph - new_past = [] - for p_ in past: - new_past.append(p_.detach()) - past = new_past - - # apply the accumulated perturbations to the past - grad_accumulator = [torch.from_numpy(p_).requires_grad_(True).to(device=device) for p_ in grad_accumulator] - pert_past = list(map(add, past, grad_accumulator)) - - return pert_past, new_accumulated_hidden, grad_norms, loss_per_iter - - -def get_classifier( - name: Optional[str], class_label: Union[str, int], device: str -) -> Tuple[Optional[ClassificationHead], Optional[int]]: - if name is None: - return None, None - - params = DISCRIMINATOR_MODELS_PARAMS[name] - classifier = ClassificationHead(class_size=params["class_size"], embed_size=params["embed_size"]).to(device) - if "url" in params: - resolved_archive_file = cached_path(params["url"]) - elif "path" in params: - resolved_archive_file = params["path"] - else: - raise ValueError("Either url or path have to be specified in the discriminator model parameters") - classifier.load_state_dict(torch.load(resolved_archive_file, map_location=device)) - classifier.eval() - - if isinstance(class_label, str): - if class_label in params["class_vocab"]: - label_id = params["class_vocab"][class_label] - else: - label_id = params["default_class"] - print("class_label {} not in class_vocab".format(class_label)) - print("available values are: {}".format(params["class_vocab"])) - print("using default class {}".format(label_id)) - - elif isinstance(class_label, int): - if class_label in set(params["class_vocab"].values()): - label_id = class_label - else: - label_id = params["default_class"] - print("class_label {} not in class_vocab".format(class_label)) - print("available values are: {}".format(params["class_vocab"])) - print("using default class {}".format(label_id)) - - else: - label_id = params["default_class"] - - return classifier, label_id - - -def get_bag_of_words_indices(bag_of_words_ids_or_paths: List[str], tokenizer) -> List[List[List[int]]]: - bow_indices = [] - for id_or_path in bag_of_words_ids_or_paths: - if id_or_path in BAG_OF_WORDS_ARCHIVE_MAP: - filepath = cached_path(BAG_OF_WORDS_ARCHIVE_MAP[id_or_path]) - else: - filepath = id_or_path - with open(filepath, "r") as f: - words = f.read().strip().split("\n") - bow_indices.append([tokenizer.encode(word.strip(), add_prefix_space=True) for word in words]) - return bow_indices - - -def build_bows_one_hot_vectors(bow_indices, tokenizer, device="cuda"): - if bow_indices is None: - return None - - one_hot_bows_vectors = [] - for single_bow in bow_indices: - single_bow = list(filter(lambda x: len(x) <= 1, single_bow)) - single_bow = torch.tensor(single_bow).to(device) - num_words = single_bow.shape[0] - one_hot_bow = torch.zeros(num_words, tokenizer.vocab_size).to(device) - one_hot_bow.scatter_(1, single_bow, 1) - one_hot_bows_vectors.append(one_hot_bow) - return one_hot_bows_vectors - - -def full_text_generation( - model, - tokenizer, - context=None, - num_samples=1, - device="cuda", - bag_of_words=None, - discrim=None, - class_label=None, - length=100, - stepsize=0.02, - temperature=1.0, - top_k=10, - sample=False, - num_iterations=3, - grad_length=10000, - horizon_length=1, - window_length=0, - decay=False, - gamma=1.5, - gm_scale=0.9, - kl_scale=0.01, - repetition_penalty=1.0, - **kwargs, -): - classifier, class_id = get_classifier(discrim, class_label, device) - - bow_indices = [] - if bag_of_words: - bow_indices = get_bag_of_words_indices(bag_of_words.split(";"), tokenizer) - - if bag_of_words and classifier: - print("Both PPLM-BoW and PPLM-Discrim are on. This is not optimized.") - loss_type = PPLM_BOW_DISCRIM - - elif bag_of_words: - loss_type = PPLM_BOW - print("Using PPLM-BoW") - - elif classifier is not None: - loss_type = PPLM_DISCRIM - print("Using PPLM-Discrim") - - else: - raise Exception("Specify either a bag of words or a discriminator") - - unpert_gen_tok_text, _, _ = generate_text_pplm( - model=model, - tokenizer=tokenizer, - context=context, - device=device, - length=length, - sample=sample, - perturb=False, - repetition_penalty=repetition_penalty, - ) - if device == "cuda": - torch.cuda.empty_cache() - - pert_gen_tok_texts = [] - discrim_losses = [] - losses_in_time = [] - - for i in range(num_samples): - pert_gen_tok_text, discrim_loss, loss_in_time = generate_text_pplm( - model=model, - tokenizer=tokenizer, - context=context, - device=device, - perturb=True, - bow_indices=bow_indices, - classifier=classifier, - class_label=class_id, - loss_type=loss_type, - length=length, - stepsize=stepsize, - temperature=temperature, - top_k=top_k, - sample=sample, - num_iterations=num_iterations, - grad_length=grad_length, - horizon_length=horizon_length, - window_length=window_length, - decay=decay, - gamma=gamma, - gm_scale=gm_scale, - kl_scale=kl_scale, - repetition_penalty=repetition_penalty, - ) - pert_gen_tok_texts.append(pert_gen_tok_text) - if classifier is not None: - discrim_losses.append(discrim_loss.data.cpu().numpy()) - losses_in_time.append(loss_in_time) - - if device == "cuda": - torch.cuda.empty_cache() - - return unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time - - -def generate_text_pplm( - model, - tokenizer, - context=None, - past=None, - device="cuda", - perturb=True, - bow_indices=None, - classifier=None, - class_label=None, - loss_type=0, - length=100, - stepsize=0.02, - temperature=1.0, - top_k=10, - sample=False, - num_iterations=3, - grad_length=10000, - horizon_length=1, - window_length=0, - decay=False, - gamma=1.5, - gm_scale=0.9, - kl_scale=0.01, - repetition_penalty=1.0, -): - output_so_far = None - if context: - context_t = torch.tensor(context, device=device, dtype=torch.long) - while len(context_t.shape) < 2: - context_t = context_t.unsqueeze(0) - output_so_far = context_t - - # collect one hot vectors for bags of words - one_hot_bows_vectors = build_bows_one_hot_vectors(bow_indices, tokenizer, device) - - grad_norms = None - last = None - unpert_discrim_loss = 0 - loss_in_time = [] - for i in trange(length, ascii=True): - # Get past/probs for current output, except for last word - # Note that GPT takes 2 inputs: past + current_token - - # run model forward to obtain unperturbed - if past is None and output_so_far is not None: - last = output_so_far[:, -1:] - if output_so_far.shape[1] > 1: - past = model(output_so_far[:, :-1])["past_key_values"] - - lm_output = model(output_so_far) - unpert_logits, unpert_past, unpert_all_hidden = ( - lm_output["logits"], - lm_output["past_key_values"], - lm_output["hidden_states"], - ) - unpert_last_hidden = unpert_all_hidden[-1] - - # check if we are abowe grad max length - if i >= grad_length: - current_stepsize = stepsize * 0 - else: - current_stepsize = stepsize - - # modify the past if necessary - if not perturb or num_iterations == 0: - pert_past = past - - else: - accumulated_hidden = unpert_last_hidden[:, :-1, :] - accumulated_hidden = torch.sum(accumulated_hidden, dim=1) - - if past is not None: - pert_past, _, grad_norms, loss_this_iter = perturb_past( - past, - model, - last, - unpert_past=unpert_past, - unpert_logits=unpert_logits, - accumulated_hidden=accumulated_hidden, - grad_norms=grad_norms, - stepsize=current_stepsize, - one_hot_bows_vectors=one_hot_bows_vectors, - classifier=classifier, - class_label=class_label, - loss_type=loss_type, - num_iterations=num_iterations, - horizon_length=horizon_length, - window_length=window_length, - decay=decay, - gamma=gamma, - kl_scale=kl_scale, - device=device, - ) - loss_in_time.append(loss_this_iter) - else: - pert_past = past - - lm_output = model(last, past_key_values=pert_past) - pert_logits, past = ( - lm_output["logits"], - lm_output["past_key_values"], - ) - pert_logits = pert_logits[:, -1, :] / temperature # + SMALL_CONST - - for token_idx in set(output_so_far[0].tolist()): - if pert_logits[0, token_idx] < 0: - pert_logits[0, token_idx] *= repetition_penalty - else: - pert_logits[0, token_idx] /= repetition_penalty - - pert_probs = nn.functional.softmax(pert_logits, dim=-1) - - if classifier is not None: - ce_loss = nn.CrossEntropyLoss() - prediction = classifier(torch.mean(unpert_last_hidden, dim=1)) - label = torch.tensor([class_label], device=device, dtype=torch.long) - unpert_discrim_loss = ce_loss(prediction, label) - print("unperturbed discrim loss", unpert_discrim_loss.data.cpu().numpy()) - else: - unpert_discrim_loss = 0 - - # Fuse the modified model and original model - if perturb: - unpert_probs = nn.functional.softmax(unpert_logits[:, -1, :], dim=-1) - - pert_probs = (pert_probs**gm_scale) * (unpert_probs ** (1 - gm_scale)) # + SMALL_CONST - pert_probs = top_k_filter(pert_probs, k=top_k, probs=True) # + SMALL_CONST - - # rescale - if torch.sum(pert_probs) <= 1: - pert_probs = pert_probs / torch.sum(pert_probs) - - else: - pert_logits = top_k_filter(pert_logits, k=top_k) # + SMALL_CONST - pert_probs = nn.functional.softmax(pert_logits, dim=-1) - - # sample or greedy - if sample: - last = torch.multinomial(pert_probs, num_samples=1) - - else: - _, last = torch.topk(pert_probs, k=1, dim=-1) - - # update context/output_so_far appending the new token - output_so_far = last if output_so_far is None else torch.cat((output_so_far, last), dim=1) - - print(tokenizer.decode(output_so_far.tolist()[0])) - - return output_so_far, unpert_discrim_loss, loss_in_time - - -def set_generic_model_params(discrim_weights, discrim_meta): - if discrim_weights is None: - raise ValueError("When using a generic discriminator, discrim_weights need to be specified") - if discrim_meta is None: - raise ValueError("When using a generic discriminator, discrim_meta need to be specified") - - with open(discrim_meta, "r") as discrim_meta_file: - meta = json.load(discrim_meta_file) - meta["path"] = discrim_weights - DISCRIMINATOR_MODELS_PARAMS["generic"] = meta - - -def run_pplm_example( - pretrained_model="openai-community/gpt2-medium", - cond_text="", - uncond=False, - num_samples=1, - bag_of_words=None, - discrim=None, - discrim_weights=None, - discrim_meta=None, - class_label=-1, - length=100, - stepsize=0.02, - temperature=1.0, - top_k=10, - sample=False, - num_iterations=3, - grad_length=10000, - horizon_length=1, - window_length=0, - decay=False, - gamma=1.5, - gm_scale=0.9, - kl_scale=0.01, - seed=0, - no_cuda=False, - colorama=False, - repetition_penalty=1.0, -): - # set Random seed - torch.manual_seed(seed) - np.random.seed(seed) - - # set the device - device = "cuda" if torch.cuda.is_available() and not no_cuda else "cpu" - - if discrim == "generic": - set_generic_model_params(discrim_weights, discrim_meta) - - if discrim is not None: - pretrained_model = DISCRIMINATOR_MODELS_PARAMS[discrim]["pretrained_model"] - print("discrim = {}, pretrained_model set to discriminator's = {}".format(discrim, pretrained_model)) - - # load pretrained model - model = GPT2LMHeadModel.from_pretrained(pretrained_model, output_hidden_states=True) - model.to(device) - model.eval() - - # load tokenizer - tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model) - - # Freeze GPT-2 weights - for param in model.parameters(): - param.requires_grad = False - - # figure out conditioning text - if uncond: - tokenized_cond_text = tokenizer.encode([tokenizer.bos_token]) - else: - raw_text = cond_text - while not raw_text: - print("Did you forget to add `--cond_text`? ") - raw_text = input("Model prompt >>> ") - tokenized_cond_text = tokenizer.encode(tokenizer.bos_token + raw_text) - - print("= Prefix of sentence =") - print(tokenizer.decode(tokenized_cond_text)) - print() - - # generate unperturbed and perturbed texts - - # full_text_generation returns: - # unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time - unpert_gen_tok_text, pert_gen_tok_texts, _, _ = full_text_generation( - model=model, - tokenizer=tokenizer, - context=tokenized_cond_text, - device=device, - num_samples=num_samples, - bag_of_words=bag_of_words, - discrim=discrim, - class_label=class_label, - length=length, - stepsize=stepsize, - temperature=temperature, - top_k=top_k, - sample=sample, - num_iterations=num_iterations, - grad_length=grad_length, - horizon_length=horizon_length, - window_length=window_length, - decay=decay, - gamma=gamma, - gm_scale=gm_scale, - kl_scale=kl_scale, - repetition_penalty=repetition_penalty, - ) - - # untokenize unperturbed text - unpert_gen_text = tokenizer.decode(unpert_gen_tok_text.tolist()[0]) - - print("=" * 80) - print("= Unperturbed generated text =") - print(unpert_gen_text) - print() - - generated_texts = [] - - bow_word_ids = set() - if bag_of_words and colorama: - bow_indices = get_bag_of_words_indices(bag_of_words.split(";"), tokenizer) - for single_bow_list in bow_indices: - # filtering all words in the list composed of more than 1 token - filtered = list(filter(lambda x: len(x) <= 1, single_bow_list)) - # w[0] because we are sure w has only 1 item because previous fitler - bow_word_ids.update(w[0] for w in filtered) - - # iterate through the perturbed texts - for i, pert_gen_tok_text in enumerate(pert_gen_tok_texts): - try: - # untokenize unperturbed text - if colorama: - import colorama - - pert_gen_text = "" - for word_id in pert_gen_tok_text.tolist()[0]: - if word_id in bow_word_ids: - pert_gen_text += "{}{}{}".format( - colorama.Fore.RED, - tokenizer.decode([word_id]), - colorama.Style.RESET_ALL, - ) - else: - pert_gen_text += tokenizer.decode([word_id]) - else: - pert_gen_text = tokenizer.decode(pert_gen_tok_text.tolist()[0]) - - print("= Perturbed generated text {} =".format(i + 1)) - print(pert_gen_text) - print() - except Exception as exc: - print("Ignoring error while generating perturbed text:", exc) - - # keep the prefix, perturbed seq, original seq for each index - generated_texts.append((tokenized_cond_text, pert_gen_tok_text, unpert_gen_tok_text)) - - return - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "--pretrained_model", - "-M", - type=str, - default="openai-community/gpt2-medium", - help="pretrained model name or path to local checkpoint", - ) - parser.add_argument("--cond_text", type=str, default="The lake", help="Prefix texts to condition on") - parser.add_argument("--uncond", action="store_true", help="Generate from end-of-text as prefix") - parser.add_argument( - "--num_samples", - type=int, - default=1, - help="Number of samples to generate from the modified latents", - ) - parser.add_argument( - "--bag_of_words", - "-B", - type=str, - default=None, - help=( - "Bags of words used for PPLM-BoW. " - "Either a BOW id (see list in code) or a filepath. " - "Multiple BoWs separated by ;" - ), - ) - parser.add_argument( - "--discrim", - "-D", - type=str, - default=None, - choices=("clickbait", "sentiment", "toxicity", "generic"), - help="Discriminator to use", - ) - parser.add_argument( - "--discrim_weights", - type=str, - default=None, - help="Weights for the generic discriminator", - ) - parser.add_argument( - "--discrim_meta", - type=str, - default=None, - help="Meta information for the generic discriminator", - ) - parser.add_argument( - "--class_label", - type=int, - default=-1, - help="Class label used for the discriminator", - ) - parser.add_argument("--length", type=int, default=100) - parser.add_argument("--stepsize", type=float, default=0.02) - parser.add_argument("--temperature", type=float, default=1.0) - parser.add_argument("--top_k", type=int, default=10) - parser.add_argument("--sample", action="store_true", help="Generate from end-of-text as prefix") - parser.add_argument("--num_iterations", type=int, default=3) - parser.add_argument("--grad_length", type=int, default=10000) - parser.add_argument( - "--window_length", - type=int, - default=0, - help="Length of past which is being optimized; 0 corresponds to infinite window length", - ) - parser.add_argument( - "--horizon_length", - type=int, - default=1, - help="Length of future to optimize over", - ) - parser.add_argument("--decay", action="store_true", help="whether to decay or not") - parser.add_argument("--gamma", type=float, default=1.5) - parser.add_argument("--gm_scale", type=float, default=0.9) - parser.add_argument("--kl_scale", type=float, default=0.01) - parser.add_argument("--seed", type=int, default=0) - parser.add_argument("--no_cuda", action="store_true", help="no cuda") - parser.add_argument("--colorama", action="store_true", help="colors keywords") - parser.add_argument( - "--repetition_penalty", - type=float, - default=1.0, - help="Penalize repetition. More than 1.0 -> less repetition", - ) - - args = parser.parse_args() - run_pplm_example(**vars(args)) diff --git a/examples/research_projects/pplm/run_pplm_discrim_train.py b/examples/research_projects/pplm/run_pplm_discrim_train.py deleted file mode 100644 index 43ec5823e37..00000000000 --- a/examples/research_projects/pplm/run_pplm_discrim_train.py +++ /dev/null @@ -1,526 +0,0 @@ -#! /usr/bin/env python3 -# coding=utf-8 - -# Copyright (c) 2019 Uber Technologies, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import csv -import json -import math -import time - -import numpy as np -import torch -import torch.optim as optim -import torch.utils.data as data -from nltk.tokenize.treebank import TreebankWordDetokenizer -from pplm_classification_head import ClassificationHead -from torch import nn -from torchtext import data as torchtext_data -from torchtext import datasets -from tqdm import tqdm, trange - -from transformers import GPT2LMHeadModel, GPT2Tokenizer - - -torch.manual_seed(0) -np.random.seed(0) -EPSILON = 1e-10 -example_sentence = "This is incredible! I love it, this is the best chicken I have ever had." -max_length_seq = 100 - - -class Discriminator(nn.Module): - """Transformer encoder followed by a Classification Head""" - - def __init__(self, class_size, pretrained_model="openai-community/gpt2-medium", cached_mode=False, device="cpu"): - super().__init__() - self.tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model) - self.encoder = GPT2LMHeadModel.from_pretrained(pretrained_model) - self.embed_size = self.encoder.transformer.config.hidden_size - self.classifier_head = ClassificationHead(class_size=class_size, embed_size=self.embed_size) - self.cached_mode = cached_mode - self.device = device - - def get_classifier(self): - return self.classifier_head - - def train_custom(self): - for param in self.encoder.parameters(): - param.requires_grad = False - self.classifier_head.train() - - def avg_representation(self, x): - mask = x.ne(0).unsqueeze(2).repeat(1, 1, self.embed_size).float().to(self.device).detach() - hidden = self.encoder.transformer(x)["last_hidden_state"] - masked_hidden = hidden * mask - avg_hidden = torch.sum(masked_hidden, dim=1) / (torch.sum(mask, dim=1).detach() + EPSILON) - return avg_hidden - - def forward(self, x): - if self.cached_mode: - avg_hidden = x.to(self.device) - else: - avg_hidden = self.avg_representation(x.to(self.device)) - - logits = self.classifier_head(avg_hidden) - probs = nn.functional.log_softmax(logits, dim=-1) - - return probs - - -class Dataset(data.Dataset): - def __init__(self, X, y): - """Reads source and target sequences from txt files.""" - self.X = X - self.y = y - - def __len__(self): - return len(self.X) - - def __getitem__(self, index): - """Returns one data pair (source and target).""" - data = {} - data["X"] = self.X[index] - data["y"] = self.y[index] - return data - - -def collate_fn(data): - def pad_sequences(sequences): - lengths = [len(seq) for seq in sequences] - - padded_sequences = torch.zeros(len(sequences), max(lengths)).long() # padding value = 0 - - for i, seq in enumerate(sequences): - end = lengths[i] - padded_sequences[i, :end] = seq[:end] - - return padded_sequences, lengths - - item_info = {} - for key in data[0].keys(): - item_info[key] = [d[key] for d in data] - - x_batch, _ = pad_sequences(item_info["X"]) - y_batch = torch.tensor(item_info["y"], dtype=torch.long) - - return x_batch, y_batch - - -def cached_collate_fn(data): - item_info = {} - for key in data[0].keys(): - item_info[key] = [d[key] for d in data] - - x_batch = torch.cat(item_info["X"], 0) - y_batch = torch.tensor(item_info["y"], dtype=torch.long) - - return x_batch, y_batch - - -def train_epoch(data_loader, discriminator, optimizer, epoch=0, log_interval=10, device="cpu"): - samples_so_far = 0 - discriminator.train_custom() - for batch_idx, (input_t, target_t) in enumerate(data_loader): - input_t, target_t = input_t.to(device), target_t.to(device) - - optimizer.zero_grad() - - output_t = discriminator(input_t) - loss = nn.functional.nll_loss(output_t, target_t) - loss.backward(retain_graph=True) - optimizer.step() - - samples_so_far += len(input_t) - - if batch_idx % log_interval == 0: - print( - "Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format( - epoch + 1, - samples_so_far, - len(data_loader.dataset), - 100 * samples_so_far / len(data_loader.dataset), - loss.item(), - ) - ) - - -def evaluate_performance(data_loader, discriminator, device="cpu"): - discriminator.eval() - test_loss = 0 - correct = 0 - with torch.no_grad(): - for input_t, target_t in data_loader: - input_t, target_t = input_t.to(device), target_t.to(device) - output_t = discriminator(input_t) - # sum up batch loss - test_loss += nn.functional.nll_loss(output_t, target_t, reduction="sum").item() - # get the index of the max log-probability - pred_t = output_t.argmax(dim=1, keepdim=True) - correct += pred_t.eq(target_t.view_as(pred_t)).sum().item() - - test_loss /= len(data_loader.dataset) - - print( - "Performance on test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)".format( - test_loss, correct, len(data_loader.dataset), 100.0 * correct / len(data_loader.dataset) - ) - ) - - -def predict(input_sentence, model, classes, cached=False, device="cpu"): - input_t = model.tokenizer.encode(input_sentence) - input_t = torch.tensor([input_t], dtype=torch.long, device=device) - if cached: - input_t = model.avg_representation(input_t) - - log_probs = model(input_t).data.cpu().numpy().flatten().tolist() - print("Input sentence:", input_sentence) - print( - "Predictions:", - ", ".join("{}: {:.4f}".format(c, math.exp(log_prob)) for c, log_prob in zip(classes, log_probs)), - ) - - -def get_cached_data_loader(dataset, batch_size, discriminator, shuffle=False, device="cpu"): - data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, collate_fn=collate_fn) - - xs = [] - ys = [] - for batch_idx, (x, y) in enumerate(tqdm(data_loader, ascii=True)): - with torch.no_grad(): - x = x.to(device) - avg_rep = discriminator.avg_representation(x).cpu().detach() - avg_rep_list = torch.unbind(avg_rep.unsqueeze(1)) - xs += avg_rep_list - ys += y.cpu().numpy().tolist() - - data_loader = torch.utils.data.DataLoader( - dataset=Dataset(xs, ys), batch_size=batch_size, shuffle=shuffle, collate_fn=cached_collate_fn - ) - - return data_loader - - -def train_discriminator( - dataset, - dataset_fp=None, - pretrained_model="openai-community/gpt2-medium", - epochs=10, - batch_size=64, - log_interval=10, - save_model=False, - cached=False, - no_cuda=False, -): - device = "cuda" if torch.cuda.is_available() and not no_cuda else "cpu" - - print("Preprocessing {} dataset...".format(dataset)) - start = time.time() - - if dataset == "SST": - idx2class = ["positive", "negative", "very positive", "very negative", "neutral"] - class2idx = {c: i for i, c in enumerate(idx2class)} - - discriminator = Discriminator( - class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device - ).to(device) - - text = torchtext_data.Field() - label = torchtext_data.Field(sequential=False) - train_data, val_data, test_data = datasets.SST.splits( - text, - label, - fine_grained=True, - train_subtrees=True, - ) - - x = [] - y = [] - for i in trange(len(train_data), ascii=True): - seq = TreebankWordDetokenizer().detokenize(vars(train_data[i])["text"]) - seq = discriminator.tokenizer.encode(seq) - seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) - x.append(seq) - y.append(class2idx[vars(train_data[i])["label"]]) - train_dataset = Dataset(x, y) - - test_x = [] - test_y = [] - for i in trange(len(test_data), ascii=True): - seq = TreebankWordDetokenizer().detokenize(vars(test_data[i])["text"]) - seq = discriminator.tokenizer.encode(seq) - seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) - test_x.append(seq) - test_y.append(class2idx[vars(test_data[i])["label"]]) - test_dataset = Dataset(test_x, test_y) - - discriminator_meta = { - "class_size": len(idx2class), - "embed_size": discriminator.embed_size, - "pretrained_model": pretrained_model, - "class_vocab": class2idx, - "default_class": 2, - } - - elif dataset == "clickbait": - idx2class = ["non_clickbait", "clickbait"] - class2idx = {c: i for i, c in enumerate(idx2class)} - - discriminator = Discriminator( - class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device - ).to(device) - - with open("datasets/clickbait/clickbait_train_prefix.txt") as f: - data = [] - for i, line in enumerate(f): - try: - data.append(eval(line)) - except Exception: - print("Error evaluating line {}: {}".format(i, line)) - continue - x = [] - y = [] - with open("datasets/clickbait/clickbait_train_prefix.txt") as f: - for i, line in enumerate(tqdm(f, ascii=True)): - try: - d = eval(line) - seq = discriminator.tokenizer.encode(d["text"]) - - if len(seq) < max_length_seq: - seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) - else: - print("Line {} is longer than maximum length {}".format(i, max_length_seq)) - continue - x.append(seq) - y.append(d["label"]) - except Exception: - print("Error evaluating / tokenizing line {}, skipping it".format(i)) - pass - - full_dataset = Dataset(x, y) - train_size = int(0.9 * len(full_dataset)) - test_size = len(full_dataset) - train_size - train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size]) - - discriminator_meta = { - "class_size": len(idx2class), - "embed_size": discriminator.embed_size, - "pretrained_model": pretrained_model, - "class_vocab": class2idx, - "default_class": 1, - } - - elif dataset == "toxic": - idx2class = ["non_toxic", "toxic"] - class2idx = {c: i for i, c in enumerate(idx2class)} - - discriminator = Discriminator( - class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device - ).to(device) - - x = [] - y = [] - with open("datasets/toxic/toxic_train.txt") as f: - for i, line in enumerate(tqdm(f, ascii=True)): - try: - d = eval(line) - seq = discriminator.tokenizer.encode(d["text"]) - - if len(seq) < max_length_seq: - seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) - else: - print("Line {} is longer than maximum length {}".format(i, max_length_seq)) - continue - x.append(seq) - y.append(int(np.sum(d["label"]) > 0)) - except Exception: - print("Error evaluating / tokenizing line {}, skipping it".format(i)) - pass - - full_dataset = Dataset(x, y) - train_size = int(0.9 * len(full_dataset)) - test_size = len(full_dataset) - train_size - train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size]) - - discriminator_meta = { - "class_size": len(idx2class), - "embed_size": discriminator.embed_size, - "pretrained_model": pretrained_model, - "class_vocab": class2idx, - "default_class": 0, - } - - else: # if dataset == "generic": - # This assumes the input dataset is a TSV with the following structure: - # class \t text - - if dataset_fp is None: - raise ValueError("When generic dataset is selected, dataset_fp needs to be specified aswell.") - - classes = set() - with open(dataset_fp) as f: - csv_reader = csv.reader(f, delimiter="\t") - for row in tqdm(csv_reader, ascii=True): - if row: - classes.add(row[0]) - - idx2class = sorted(classes) - class2idx = {c: i for i, c in enumerate(idx2class)} - - discriminator = Discriminator( - class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device - ).to(device) - - x = [] - y = [] - with open(dataset_fp) as f: - csv_reader = csv.reader(f, delimiter="\t") - for i, row in enumerate(tqdm(csv_reader, ascii=True)): - if row: - label = row[0] - text = row[1] - - try: - seq = discriminator.tokenizer.encode(text) - if len(seq) < max_length_seq: - seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) - - else: - print("Line {} is longer than maximum length {}".format(i, max_length_seq)) - continue - - x.append(seq) - y.append(class2idx[label]) - - except Exception: - print("Error tokenizing line {}, skipping it".format(i)) - pass - - full_dataset = Dataset(x, y) - train_size = int(0.9 * len(full_dataset)) - test_size = len(full_dataset) - train_size - train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size]) - - discriminator_meta = { - "class_size": len(idx2class), - "embed_size": discriminator.embed_size, - "pretrained_model": pretrained_model, - "class_vocab": class2idx, - "default_class": 0, - } - - end = time.time() - print("Preprocessed {} data points".format(len(train_dataset) + len(test_dataset))) - print("Data preprocessing took: {:.3f}s".format(end - start)) - - if cached: - print("Building representation cache...") - - start = time.time() - - train_loader = get_cached_data_loader(train_dataset, batch_size, discriminator, shuffle=True, device=device) - - test_loader = get_cached_data_loader(test_dataset, batch_size, discriminator, device=device) - - end = time.time() - print("Building representation cache took: {:.3f}s".format(end - start)) - - else: - train_loader = torch.utils.data.DataLoader( - dataset=train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn - ) - test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, collate_fn=collate_fn) - - if save_model: - with open("{}_classifier_head_meta.json".format(dataset), "w") as meta_file: - json.dump(discriminator_meta, meta_file) - - optimizer = optim.Adam(discriminator.parameters(), lr=0.0001) - - for epoch in range(epochs): - start = time.time() - print("\nEpoch", epoch + 1) - - train_epoch( - discriminator=discriminator, - data_loader=train_loader, - optimizer=optimizer, - epoch=epoch, - log_interval=log_interval, - device=device, - ) - evaluate_performance(data_loader=test_loader, discriminator=discriminator, device=device) - - end = time.time() - print("Epoch took: {:.3f}s".format(end - start)) - - print("\nExample prediction") - predict(example_sentence, discriminator, idx2class, cached=cached, device=device) - - if save_model: - # torch.save(discriminator.state_dict(), - # "{}_discriminator_{}.pt".format( - # args.dataset, epoch + 1 - # )) - torch.save( - discriminator.get_classifier().state_dict(), - "{}_classifier_head_epoch_{}.pt".format(dataset, epoch + 1), - ) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Train a discriminator on top of GPT-2 representations") - parser.add_argument( - "--dataset", - type=str, - default="SST", - choices=("SST", "clickbait", "toxic", "generic"), - help=( - "dataset to train the discriminator on. " - "In case of generic, the dataset is expected " - "to be a TSBV file with structure: class \\t text" - ), - ) - parser.add_argument( - "--dataset_fp", - type=str, - default="", - help="File path of the dataset to use. Needed only in case of generic datadset", - ) - parser.add_argument( - "--pretrained_model", - type=str, - default="openai-community/gpt2-medium", - help="Pretrained model to use as encoder", - ) - parser.add_argument("--epochs", type=int, default=10, metavar="N", help="Number of training epochs") - parser.add_argument( - "--batch_size", type=int, default=64, metavar="N", help="input batch size for training (default: 64)" - ) - parser.add_argument( - "--log_interval", - type=int, - default=10, - metavar="N", - help="how many batches to wait before logging training status", - ) - parser.add_argument("--save_model", action="store_true", help="whether to save the model") - parser.add_argument("--cached", action="store_true", help="whether to cache the input representations") - parser.add_argument("--no_cuda", action="store_true", help="use to turn off cuda") - args = parser.parse_args() - - train_discriminator(**(vars(args))) diff --git a/examples/research_projects/quantization-qdqbert/Dockerfile b/examples/research_projects/quantization-qdqbert/Dockerfile deleted file mode 100644 index e64c9f0e021..00000000000 --- a/examples/research_projects/quantization-qdqbert/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -# coding=utf-8 -# Copyright 2021 NVIDIA Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -FROM nvcr.io/nvidia/pytorch:22.02-py3 -LABEL maintainer="Hugging Face" -LABEL repository="transformers" - -RUN apt-get update -RUN apt-get install sudo - -RUN python3 -m pip install --no-cache-dir --upgrade pip -RUN python3 -m pip install --no-cache-dir --ignore-installed pycuda -RUN python3 -m pip install --no-cache-dir \ - pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com -RUN python3 -m pip install --no-cache-dir onnxruntime-gpu==1.11 - -WORKDIR /workspace -COPY . transformers/ -RUN cd transformers/ && \ - python3 -m pip install --no-cache-dir . - -RUN python3 -m pip install --no-cache-dir datasets \ - accelerate diff --git a/examples/research_projects/quantization-qdqbert/README.md b/examples/research_projects/quantization-qdqbert/README.md deleted file mode 100644 index 2cc2d5e5f98..00000000000 --- a/examples/research_projects/quantization-qdqbert/README.md +++ /dev/null @@ -1,200 +0,0 @@ - - -# Huggingface QDQBERT Quantization Example - -The QDQBERT model adds fake quantization (pair of QuantizeLinear/DequantizeLinear ops) to: - * linear layer inputs and weights - * matmul inputs - * residual add inputs - -In this example, we use QDQBERT model to do quantization on SQuAD task, including Quantization Aware Training (QAT), Post Training Quantization (PTQ) and inferencing using TensorRT. - -Required: -- [pytorch-quantization toolkit](https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization) -- [TensorRT >= 8.2](https://developer.nvidia.com/tensorrt) -- PyTorch >= 1.10.0 - -## Setup the environment with Dockerfile - -Under the directory of `transformers/`, build the docker image: -```bash -docker build . -f examples/research_projects/quantization-qdqbert/Dockerfile -t bert_quantization:latest -``` - -Run the docker: -```bash -docker run --gpus all --privileged --rm -it --shm-size=1g --ulimit memlock=-1 --ulimit stack=67108864 bert_quantization:latest -``` - -In the container: -```bash -cd transformers/examples/research_projects/quantization-qdqbert/ -``` - -## Quantization Aware Training (QAT) - -Calibrate the pretrained model and finetune with quantization awared: - -```bash -python3 run_quant_qa.py \ - --model_name_or_path google-bert/bert-base-uncased \ - --dataset_name squad \ - --max_seq_length 128 \ - --doc_stride 32 \ - --output_dir calib/google-bert/bert-base-uncased \ - --do_calib \ - --calibrator percentile \ - --percentile 99.99 -``` - -```bash -python3 run_quant_qa.py \ - --model_name_or_path calib/google-bert/bert-base-uncased \ - --dataset_name squad \ - --do_train \ - --do_eval \ - --per_device_train_batch_size 12 \ - --learning_rate 4e-5 \ - --num_train_epochs 2 \ - --max_seq_length 128 \ - --doc_stride 32 \ - --output_dir finetuned_int8/google-bert/bert-base-uncased \ - --tokenizer_name google-bert/bert-base-uncased \ - --save_steps 0 -``` - -### Export QAT model to ONNX - -To export the QAT model finetuned above: - -```bash -python3 run_quant_qa.py \ - --model_name_or_path finetuned_int8/google-bert/bert-base-uncased \ - --output_dir ./ \ - --save_onnx \ - --per_device_eval_batch_size 1 \ - --max_seq_length 128 \ - --doc_stride 32 \ - --dataset_name squad \ - --tokenizer_name google-bert/bert-base-uncased -``` - -Use `--recalibrate-weights` to calibrate the weight ranges according to the quantizer axis. Use `--quant-per-tensor` for per tensor quantization (default is per channel). -Recalibrating will affect the accuracy of the model, but the change should be minimal (< 0.5 F1). - -### Benchmark the INT8 QAT ONNX model inference with TensorRT using dummy input - -```bash -trtexec --onnx=model.onnx --explicitBatch --workspace=16384 --int8 --shapes=input_ids:64x128,attention_mask:64x128,token_type_ids:64x128 --verbose -``` - -### Benchmark the INT8 QAT ONNX model inference with [ONNX Runtime-TRT](https://onnxruntime.ai/docs/execution-providers/TensorRT-ExecutionProvider.html) using dummy input - -```bash -python3 ort-infer-benchmark.py -``` - -### Evaluate the INT8 QAT ONNX model inference with TensorRT - -```bash -python3 evaluate-hf-trt-qa.py \ - --onnx_model_path=./model.onnx \ - --output_dir ./ \ - --per_device_eval_batch_size 64 \ - --max_seq_length 128 \ - --doc_stride 32 \ - --dataset_name squad \ - --tokenizer_name google-bert/bert-base-uncased \ - --int8 \ - --seed 42 -``` - -## Fine-tuning of FP32 model for comparison - -Finetune a fp32 precision model with [transformers/examples/pytorch/question-answering/](../../pytorch/question-answering/): - -```bash -python3 ../../pytorch/question-answering/run_qa.py \ - --model_name_or_path google-bert/bert-base-uncased \ - --dataset_name squad \ - --per_device_train_batch_size 12 \ - --learning_rate 3e-5 \ - --num_train_epochs 2 \ - --max_seq_length 128 \ - --doc_stride 32 \ - --output_dir ./finetuned_fp32/google-bert/bert-base-uncased \ - --save_steps 0 \ - --do_train \ - --do_eval -``` - -## Post Training Quantization (PTQ) - -### PTQ by calibrating and evaluating the finetuned FP32 model above: - -```bash -python3 run_quant_qa.py \ - --model_name_or_path ./finetuned_fp32/google-bert/bert-base-uncased \ - --dataset_name squad \ - --calibrator percentile \ - --percentile 99.99 \ - --max_seq_length 128 \ - --doc_stride 32 \ - --output_dir ./calib/google-bert/bert-base-uncased \ - --save_steps 0 \ - --do_calib \ - --do_eval -``` - -### Export the INT8 PTQ model to ONNX - -```bash -python3 run_quant_qa.py \ - --model_name_or_path ./calib/google-bert/bert-base-uncased \ - --output_dir ./ \ - --save_onnx \ - --per_device_eval_batch_size 1 \ - --max_seq_length 128 \ - --doc_stride 32 \ - --dataset_name squad \ - --tokenizer_name google-bert/bert-base-uncased -``` - -### Evaluate the INT8 PTQ ONNX model inference with TensorRT - -```bash -python3 evaluate-hf-trt-qa.py \ - --onnx_model_path=./model.onnx \ - --output_dir ./ \ - --per_device_eval_batch_size 64 \ - --max_seq_length 128 \ - --doc_stride 32 \ - --dataset_name squad \ - --tokenizer_name google-bert/bert-base-uncased \ - --int8 \ - --seed 42 -``` - -### Quantization options - -Some useful options to support different implementations and optimizations. These should be specified for both calibration and finetuning. - -|argument|description| -|--------|-----------| -|`--quant-per-tensor`| quantize weights with one quantization range per tensor | -|`--fuse-qkv` | use a single range (the max) for quantizing QKV weights and output activations | -|`--clip-gelu N` | clip the output of GELU to a maximum of N when quantizing (e.g. 10) | -|`--disable-dropout` | disable dropout for consistent activation ranges | diff --git a/examples/research_projects/quantization-qdqbert/evaluate-hf-trt-qa.py b/examples/research_projects/quantization-qdqbert/evaluate-hf-trt-qa.py deleted file mode 100755 index 7a8ea2109bc..00000000000 --- a/examples/research_projects/quantization-qdqbert/evaluate-hf-trt-qa.py +++ /dev/null @@ -1,457 +0,0 @@ -# coding=utf-8 -# Copyright 2021 NVIDIA Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Finetuning the library models for question-answering on SQuAD (DistilBERT, Bert, XLM, XLNet).""" - -import argparse -import logging -import os -import time -import timeit - -import datasets -import numpy as np -import pycuda.autoinit # noqa: F401 -import pycuda.driver as cuda -import tensorrt as trt -import torch -from absl import logging as absl_logging -from accelerate import Accelerator -from datasets import load_dataset, load_metric -from torch.utils.data import DataLoader -from utils_qa import postprocess_qa_predictions - -import transformers -from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed -from transformers.trainer_pt_utils import nested_concat, nested_truncate - - -TRT_LOGGER = trt.Logger(trt.Logger.WARNING) -absl_logger = absl_logging.get_absl_logger() -absl_logger.setLevel(logging.WARNING) - -logger = logging.getLogger(__name__) - -parser = argparse.ArgumentParser() - -# Required parameters -parser.add_argument( - "--onnx_model_path", - default=None, - type=str, - required=True, - help="Path to ONNX model: ", -) - -parser.add_argument( - "--output_dir", - default=None, - type=str, - required=True, - help="The output directory where the model checkpoints and predictions will be written.", -) - -# Other parameters - -parser.add_argument( - "--tokenizer_name", - default="", - type=str, - required=True, - help="Pretrained tokenizer name or path if not the same as model_name", -) - -parser.add_argument( - "--version_2_with_negative", - action="store_true", - help="If true, the SQuAD examples contain some that do not have an answer.", -) -parser.add_argument( - "--null_score_diff_threshold", - type=float, - default=0.0, - help="If null_score - best_non_null is greater than the threshold predict null.", -) - -parser.add_argument( - "--max_seq_length", - default=384, - type=int, - help=( - "The maximum total input sequence length after WordPiece tokenization. Sequences " - "longer than this will be truncated, and sequences shorter than this will be padded." - ), -) -parser.add_argument( - "--doc_stride", - default=128, - type=int, - help="When splitting up a long document into chunks, how much stride to take between chunks.", -) - -parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") - -parser.add_argument( - "--n_best_size", - default=20, - type=int, - help="The total number of n-best predictions to generate in the nbest_predictions.json output file.", -) -parser.add_argument( - "--max_answer_length", - default=30, - type=int, - help=( - "The maximum length of an answer that can be generated. This is needed because the start " - "and end predictions are not conditioned on one another." - ), -) - -parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") - -parser.add_argument( - "--dataset_name", - type=str, - default=None, - required=True, - help="The name of the dataset to use (via the datasets library).", -) -parser.add_argument( - "--dataset_config_name", - type=str, - default=None, - help="The configuration name of the dataset to use (via the datasets library).", -) -parser.add_argument( - "--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data." -) -parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets") -parser.add_argument( - "--fp16", - action="store_true", - help="Whether to use 16-bit (mixed) precision instead of 32-bit", -) -parser.add_argument( - "--int8", - action="store_true", - help="Whether to use INT8", -) - -args = parser.parse_args() - -if args.tokenizer_name: - tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) -else: - raise ValueError( - "You are instantiating a new tokenizer from scratch. This is not supported by this script. " - "You can do it from another script, save it, and load it from here, using --tokenizer_name." - ) - -logger.info("Training/evaluation parameters %s", args) - -args.eval_batch_size = args.per_device_eval_batch_size - -INPUT_SHAPE = (args.eval_batch_size, args.max_seq_length) - -# TRT Engine properties -STRICT_TYPES = True - -engine_name = "temp_engine/bert-fp32.engine" -if args.fp16: - engine_name = "temp_engine/bert-fp16.engine" -if args.int8: - engine_name = "temp_engine/bert-int8.engine" - -# import ONNX file -if not os.path.exists("temp_engine"): - os.makedirs("temp_engine") - -EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) -with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( - network, TRT_LOGGER -) as parser: - with open(args.onnx_model_path, "rb") as model: - if not parser.parse(model.read()): - for error in range(parser.num_errors): - print(parser.get_error(error)) - - # Query input names and shapes from parsed TensorRT network - network_inputs = [network.get_input(i) for i in range(network.num_inputs)] - input_names = [_input.name for _input in network_inputs] # ex: ["actual_input1"] - - with builder.create_builder_config() as config: - config.max_workspace_size = 1 << 50 - if STRICT_TYPES: - config.set_flag(trt.BuilderFlag.STRICT_TYPES) - if args.fp16: - config.set_flag(trt.BuilderFlag.FP16) - if args.int8: - config.set_flag(trt.BuilderFlag.INT8) - profile = builder.create_optimization_profile() - config.add_optimization_profile(profile) - for i in range(len(input_names)): - profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) - engine = builder.build_engine(network, config) - - # serialize_engine and store in file (can be directly loaded and deserialized): - with open(engine_name, "wb") as f: - f.write(engine.serialize()) - - -# run inference with TRT -def model_infer(inputs, context, d_inputs, h_output0, h_output1, d_output0, d_output1, stream): - input_ids = np.asarray(inputs["input_ids"], dtype=np.int32) - attention_mask = np.asarray(inputs["attention_mask"], dtype=np.int32) - token_type_ids = np.asarray(inputs["token_type_ids"], dtype=np.int32) - - # Copy inputs - cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), stream) - cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), stream) - cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), stream) - # start time - start_time = time.time() - # Run inference - context.execute_async( - bindings=[int(d_inp) for d_inp in d_inputs] + [int(d_output0), int(d_output1)], stream_handle=stream.handle - ) - # Transfer predictions back from GPU - cuda.memcpy_dtoh_async(h_output0, d_output0, stream) - cuda.memcpy_dtoh_async(h_output1, d_output1, stream) - # Synchronize the stream and take time - stream.synchronize() - # end time - end_time = time.time() - infer_time = end_time - start_time - outputs = (h_output0, h_output1) - # print(outputs) - return outputs, infer_time - - -# Initialize the accelerator. We will let the accelerator handle device placement for us in this example. -accelerator = Accelerator() -# Make one log on every process with the configuration for debugging. -logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO, -) - -# Setup logging, we only want one process per machine to log things on the screen. -# accelerator.is_local_main_process is only True for one process per machine. -logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) -if accelerator.is_local_main_process: - datasets.utils.logging.set_verbosity_warning() - transformers.utils.logging.set_verbosity_info() -else: - datasets.utils.logging.set_verbosity_error() - transformers.utils.logging.set_verbosity_error() - -# If passed along, set the training seed now. -if args.seed is not None: - set_seed(args.seed) - -# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) -# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ -# (the dataset will be downloaded automatically from the datasets Hub). -# -# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called -# 'text' is found. You can easily tweak this behavior (see below). -if args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name) -else: - raise ValueError("Evaluation requires a dataset name") -# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at -# https://huggingface.co/docs/datasets/loading_datasets. - -# Preprocessing the datasets. -# Preprocessing is slightly different for training and evaluation. - -column_names = raw_datasets["validation"].column_names - -question_column_name = "question" if "question" in column_names else column_names[0] -context_column_name = "context" if "context" in column_names else column_names[1] -answer_column_name = "answers" if "answers" in column_names else column_names[2] - -# Padding side determines if we do (question|context) or (context|question). -pad_on_right = tokenizer.padding_side == "right" - -if args.max_seq_length > tokenizer.model_max_length: - logger.warning( - f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the " - f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." - ) - -max_seq_length = min(args.max_seq_length, tokenizer.model_max_length) - - -# Validation preprocessing -def prepare_validation_features(examples): - # Some of the questions have lots of whitespace on the left, which is not useful and will make the - # truncation of the context fail (the tokenized question will take a lots of space). So we remove that - # left whitespace - examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]] - - # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results - # in one example possible giving several features when a context is long, each of those features having a - # context that overlaps a bit the context of the previous feature. - tokenized_examples = tokenizer( - examples[question_column_name if pad_on_right else context_column_name], - examples[context_column_name if pad_on_right else question_column_name], - truncation="only_second" if pad_on_right else "only_first", - max_length=max_seq_length, - stride=args.doc_stride, - return_overflowing_tokens=True, - return_offsets_mapping=True, - padding="max_length", - ) - - # Since one example might give us several features if it has a long context, we need a map from a feature to - # its corresponding example. This key gives us just that. - sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") - - # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the - # corresponding example_id and we will store the offset mappings. - tokenized_examples["example_id"] = [] - - for i in range(len(tokenized_examples["input_ids"])): - # Grab the sequence corresponding to that example (to know what is the context and what is the question). - sequence_ids = tokenized_examples.sequence_ids(i) - context_index = 1 if pad_on_right else 0 - - # One example can give several spans, this is the index of the example containing this span of text. - sample_index = sample_mapping[i] - tokenized_examples["example_id"].append(examples["id"][sample_index]) - - # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token - # position is part of the context or not. - tokenized_examples["offset_mapping"][i] = [ - (o if sequence_ids[k] == context_index else None) - for k, o in enumerate(tokenized_examples["offset_mapping"][i]) - ] - - return tokenized_examples - - -eval_examples = raw_datasets["validation"] -# Validation Feature Creation -eval_dataset = eval_examples.map( - prepare_validation_features, - batched=True, - num_proc=args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not args.overwrite_cache, - desc="Running tokenizer on validation dataset", -) - -data_collator = default_data_collator - -eval_dataset_for_model = eval_dataset.remove_columns(["example_id", "offset_mapping"]) -eval_dataloader = DataLoader( - eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size -) - - -# Post-processing: -def post_processing_function(examples, features, predictions, stage="eval"): - # Post-processing: we match the start logits and end logits to answers in the original context. - predictions = postprocess_qa_predictions( - examples=examples, - features=features, - predictions=predictions, - version_2_with_negative=args.version_2_with_negative, - n_best_size=args.n_best_size, - max_answer_length=args.max_answer_length, - null_score_diff_threshold=args.null_score_diff_threshold, - output_dir=args.output_dir, - prefix=stage, - ) - # Format the result to the format the metric expects. - if args.version_2_with_negative: - formatted_predictions = [ - {"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items() - ] - else: - formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()] - - references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples] - return EvalPrediction(predictions=formatted_predictions, label_ids=references) - - -metric = load_metric("squad_v2" if args.version_2_with_negative else "squad") - -# Evaluation! -logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path) -with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( - f.read() -) as engine, engine.create_execution_context() as context: - # setup for TRT inferrence - for i in range(len(input_names)): - context.set_binding_shape(i, INPUT_SHAPE) - assert context.all_binding_shapes_specified - - def binding_nbytes(binding): - return trt.volume(engine.get_binding_shape(binding)) * engine.get_binding_dtype(binding).itemsize - - # Allocate device memory for inputs and outputs. - d_inputs = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] - - # Allocate output buffer - h_output0 = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.float32) - h_output1 = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.float32) - d_output0 = cuda.mem_alloc(h_output0.nbytes) - d_output1 = cuda.mem_alloc(h_output1.nbytes) - - # Create a stream in which to copy inputs/outputs and run inference. - stream = cuda.Stream() - - # Evaluation - logger.info("***** Running Evaluation *****") - logger.info(f" Num examples = {len(eval_dataset)}") - logger.info(f" Batch size = {args.per_device_eval_batch_size}") - - total_time = 0.0 - niter = 0 - start_time = timeit.default_timer() - - all_preds = None - for step, batch in enumerate(eval_dataloader): - outputs, infer_time = model_infer(batch, context, d_inputs, h_output0, h_output1, d_output0, d_output1, stream) - total_time += infer_time - niter += 1 - - start_logits, end_logits = outputs - start_logits = torch.tensor(start_logits) - end_logits = torch.tensor(end_logits) - - # necessary to pad predictions and labels for being gathered - start_logits = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) - end_logits = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) - - logits = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) - all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) - - if all_preds is not None: - all_preds = nested_truncate(all_preds, len(eval_dataset)) - - evalTime = timeit.default_timer() - start_time - logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset)) - # Inference time from TRT - logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1000 / niter)) - logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1000)) - logger.info("Total Number of Inference = %d", niter) - -prediction = post_processing_function(eval_examples, eval_dataset, all_preds) -eval_metric = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) -logger.info(f"Evaluation metrics: {eval_metric}") diff --git a/examples/research_projects/quantization-qdqbert/ort-infer-benchmark.py b/examples/research_projects/quantization-qdqbert/ort-infer-benchmark.py deleted file mode 100644 index bb0436c1258..00000000000 --- a/examples/research_projects/quantization-qdqbert/ort-infer-benchmark.py +++ /dev/null @@ -1,50 +0,0 @@ -import os -import time - -import numpy as np -import onnxruntime as ort - - -os.environ["ORT_TENSORRT_INT8_ENABLE"] = "1" -os.environ["ORT_TENSORRT_INT8_USE_NATIVE_CALIBRATION_TABLE"] = "0" -os.environ["ORT_TENSORRT_ENGINE_CACHE_ENABLE"] = "1" - -sess_opt = ort.SessionOptions() -sess_opt.graph_optimization_level = ort.GraphOptimizationLevel.ORT_DISABLE_ALL -print("Create inference session...") -execution_provider = ["TensorrtExecutionProvider", "CUDAExecutionProvider"] -sess = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider) -run_opt = ort.RunOptions() - -sequence = 128 -batch = 1 -input_ids = np.ones((batch, sequence), dtype=np.int64) -attention_mask = np.ones((batch, sequence), dtype=np.int64) -token_type_ids = np.ones((batch, sequence), dtype=np.int64) - -print("Warm up phase...") -sess.run( - None, - { - sess.get_inputs()[0].name: input_ids, - sess.get_inputs()[1].name: attention_mask, - sess.get_inputs()[2].name: token_type_ids, - }, - run_options=run_opt, -) - -print("Start inference...") -start_time = time.time() -max_iters = 2000 -predict = {} -for iter in range(max_iters): - predict = sess.run( - None, - { - sess.get_inputs()[0].name: input_ids, - sess.get_inputs()[1].name: attention_mask, - sess.get_inputs()[2].name: token_type_ids, - }, - run_options=run_opt, - ) -print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 1000 / max_iters)) diff --git a/examples/research_projects/quantization-qdqbert/quant_trainer.py b/examples/research_projects/quantization-qdqbert/quant_trainer.py deleted file mode 100755 index 132aa284905..00000000000 --- a/examples/research_projects/quantization-qdqbert/quant_trainer.py +++ /dev/null @@ -1,305 +0,0 @@ -# coding=utf-8 -# Copyright 2021 NVIDIA Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Helper functions for training models with pytorch-quantization""" - -import logging -import re - -import pytorch_quantization -import pytorch_quantization.nn as quant_nn -import torch -from pytorch_quantization import calib -from pytorch_quantization.tensor_quant import QuantDescriptor - - -logger = logging.getLogger(__name__) - -name_width = 50 # max width of layer names -qname_width = 70 # max width of quantizer names - -# ========================================== Quant Trainer API ========================================== - - -def add_arguments(parser): - """Add arguments to parser for functions defined in quant_trainer.""" - - group = parser.add_argument_group("quant_trainer arguments") - group.add_argument("--wprec", type=int, default=8, help="weight precision") - group.add_argument("--aprec", type=int, default=8, help="activation precision") - group.add_argument("--quant-per-tensor", action="store_true", help="per tensor weight scaling") - group.add_argument("--quant-disable", action="store_true", help="disable all quantizers") - group.add_argument("--quant-disable-embeddings", action="store_true", help="disable all embeddings quantizers") - group.add_argument("--quant-disable-keyword", type=str, nargs="+", help="disable quantizers by keyword") - group.add_argument("--quant-disable-layer-module", type=str, help="disable quantizers by keyword under layer.") - group.add_argument("--quant-enable-layer-module", type=str, help="enable quantizers by keyword under layer") - group.add_argument("--calibrator", default="max", help="which quantization range calibrator to use") - group.add_argument("--percentile", default=None, type=float, help="percentile for PercentileCalibrator") - group.add_argument("--fuse-qkv", action="store_true", help="use the same scale factor for qkv") - group.add_argument("--clip-gelu", metavar="N", type=float, help="clip gelu output maximum value to N") - group.add_argument( - "--recalibrate-weights", - action="store_true", - help=( - "recalibrate weight amaxes by taking the max of the weights." - " amaxes will be computed with the current quantization granularity (axis)." - ), - ) - - -def set_default_quantizers(args): - """Set default quantizers before creating the model.""" - - if args.calibrator == "max": - calib_method = "max" - elif args.calibrator == "percentile": - if args.percentile is None: - raise ValueError("Specify --percentile when using percentile calibrator") - calib_method = "histogram" - elif args.calibrator == "mse": - calib_method = "histogram" - else: - raise ValueError(f"Invalid calibrator {args.calibrator}") - - input_desc = QuantDescriptor(num_bits=args.aprec, calib_method=calib_method) - weight_desc = QuantDescriptor(num_bits=args.wprec, axis=(None if args.quant_per_tensor else (0,))) - quant_nn.QuantLinear.set_default_quant_desc_input(input_desc) - quant_nn.QuantLinear.set_default_quant_desc_weight(weight_desc) - - -def configure_model(model, args, calib=False, eval=False): - """Function called before the training loop.""" - - logger.info("Configuring Model for Quantization") - logger.info(f"using quantization package {pytorch_quantization.__file__}") - - if not calib: - if args.quant_disable_embeddings: - set_quantizer_by_name(model, ["embeddings"], which="weight", _disabled=True) - - if args.quant_disable: - set_quantizer_by_name(model, [""], _disabled=True) - - if args.quant_disable_keyword: - set_quantizer_by_name(model, args.quant_disable_keyword, _disabled=True) - - if args.quant_disable_layer_module: - set_quantizer_by_name(model, [r"layer.\d+." + args.quant_disable_layer_module], _disabled=True) - - if args.quant_enable_layer_module: - set_quantizer_by_name(model, [r"layer.\d+." + args.quant_enable_layer_module], _disabled=False) - - if args.recalibrate_weights: - recalibrate_weights(model) - - if args.fuse_qkv: - fuse_qkv(model, args) - - if args.clip_gelu: - clip_gelu(model, args.clip_gelu) - - # if args.local_rank in [-1, 0] and not calib: - print_quant_summary(model) - - -def enable_calibration(model): - """Enable calibration of all *_input_quantizer modules in model.""" - - logger.info("Enabling Calibration") - for name, module in model.named_modules(): - if name.endswith("_quantizer"): - if module._calibrator is not None: - module.disable_quant() - module.enable_calib() - else: - module.disable() - logger.info(f"{name:80}: {module}") - - -def finish_calibration(model, args): - """Disable calibration and load amax for all "*_input_quantizer modules in model.""" - - logger.info("Loading calibrated amax") - for name, module in model.named_modules(): - if name.endswith("_quantizer"): - if module._calibrator is not None: - if isinstance(module._calibrator, calib.MaxCalibrator): - module.load_calib_amax() - else: - module.load_calib_amax("percentile", percentile=args.percentile) - module.enable_quant() - module.disable_calib() - else: - module.enable() - model.cuda() - print_quant_summary(model) - - -# ========================================== Helper Function ========================================== - - -def fuse_qkv(model, args): - """Adjust quantization ranges to match an implementation where the QKV projections are implemented with a single GEMM. - Force the weight and output scale factors to match by taking the max of (Q,K,V). - """ - - def fuse3(qq, qk, qv): - for mod in [qq, qk, qv]: - if not hasattr(mod, "_amax"): - print(" WARNING: NO AMAX BUFFER") - return - q = qq._amax.detach().item() - k = qk._amax.detach().item() - v = qv._amax.detach().item() - - amax = max(q, k, v) - qq._amax.fill_(amax) - qk._amax.fill_(amax) - qv._amax.fill_(amax) - logger.info(f" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}") - - for name, mod in model.named_modules(): - if name.endswith(".attention.self"): - logger.info(f"FUSE_QKV: {name:{name_width}}") - fuse3(mod.matmul_q_input_quantizer, mod.matmul_k_input_quantizer, mod.matmul_v_input_quantizer) - if args.quant_per_tensor: - fuse3(mod.query._weight_quantizer, mod.key._weight_quantizer, mod.value._weight_quantizer) - - -def clip_gelu(model, maxval): - """Clip activations generated by GELU to maxval when quantized. - Implemented by adjusting the amax of the following input_quantizer. - """ - - for name, mod in model.named_modules(): - if name.endswith(".output.dense") and not name.endswith("attention.output.dense"): - amax_init = mod._input_quantizer._amax.data.detach().item() - mod._input_quantizer._amax.data.detach().clamp_(max=maxval) - amax = mod._input_quantizer._amax.data.detach().item() - logger.info(f"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}") - - -def expand_amax(model): - """Expand per-tensor amax to be per channel, where each channel is assigned the per-tensor amax.""" - - for name, mod in model.named_modules(): - if hasattr(mod, "_weight_quantizer") and mod._weight_quantizer.axis is not None: - k = mod.weight.shape[0] - amax = mod._weight_quantizer._amax.detach() - mod._weight_quantizer._amax = torch.ones(k, dtype=amax.dtype, device=amax.device) * amax - print(f"expanding {name} {amax} -> {mod._weight_quantizer._amax}") - - -def recalibrate_weights(model): - """Performs max calibration on the weights and updates amax.""" - - for name, mod in model.named_modules(): - if hasattr(mod, "_weight_quantizer"): - if not hasattr(mod.weight_quantizer, "_amax"): - print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER") - continue - - # determine which axes to reduce across - # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) - axis_set = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis) - reduce_axis = set(range(len(mod.weight.size()))) - axis_set - amax = pytorch_quantization.utils.reduce_amax(mod.weight, axis=reduce_axis, keepdims=True).detach() - logger.info(f"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}") - mod._weight_quantizer._amax = amax - - -def print_model_summary(model, name_width=25, line_width=180, ignore=None): - """Print model quantization configuration.""" - - if ignore is None: - ignore = [] - elif not isinstance(ignore, list): - ignore = [ignore] - - name_width = 0 - for name, mod in model.named_modules(): - if not hasattr(mod, "weight"): - continue - name_width = max(name_width, len(name)) - - for name, mod in model.named_modules(): - input_q = getattr(mod, "_input_quantizer", None) - weight_q = getattr(mod, "_weight_quantizer", None) - if not hasattr(mod, "weight"): - continue - if type(mod) in ignore: - continue - if [True for s in ignore if isinstance(s, str) and s in name]: - continue - act_str = f"Act:{input_q.extra_repr()}" - wgt_str = f"Wgt:{weight_q.extra_repr()}" - s = f"{name:{name_width}} {act_str} {wgt_str}" - if len(s) <= line_width: - logger.info(s) - else: - logger.info(f"{name:{name_width}} {act_str}") - logger.info(f'{" ":{name_width}} {wgt_str}') - - -def print_quant_summary(model): - """Print summary of all quantizer modules in the model.""" - - count = 0 - for name, mod in model.named_modules(): - if isinstance(mod, pytorch_quantization.nn.TensorQuantizer): - print(f"{name:80} {mod}") - count += 1 - print(f"{count} TensorQuantizers found in model") - - -def set_quantizer(name, mod, quantizer, k, v): - """Set attributes for mod.quantizer.""" - - quantizer_mod = getattr(mod, quantizer, None) - if quantizer_mod is not None: - assert hasattr(quantizer_mod, k) - setattr(quantizer_mod, k, v) - else: - logger.warning(f"{name} has no {quantizer}") - - -def set_quantizers(name, mod, which="both", **kwargs): - """Set quantizer attributes for mod.""" - - s = f"Warning: changing {which} quantizers of {name:{qname_width}}" - for k, v in kwargs.items(): - s += f" {k}={v}" - if which in ["input", "both"]: - set_quantizer(name, mod, "_input_quantizer", k, v) - if which in ["weight", "both"]: - set_quantizer(name, mod, "_weight_quantizer", k, v) - logger.info(s) - - -def set_quantizer_by_name(model, names, **kwargs): - """Set quantizer attributes for layers where name contains a substring in names.""" - - for name, mod in model.named_modules(): - if hasattr(mod, "_input_quantizer") or hasattr(mod, "_weight_quantizer"): - for n in names: - if re.search(n, name): - set_quantizers(name, mod, **kwargs) - elif name.endswith("_quantizer"): - for n in names: - if re.search(n, name): - s = f"Warning: changing {name:{name_width}}" - for k, v in kwargs.items(): - s += f" {k}={v}" - setattr(mod, k, v) - logger.info(s) diff --git a/examples/research_projects/quantization-qdqbert/run_quant_qa.py b/examples/research_projects/quantization-qdqbert/run_quant_qa.py deleted file mode 100755 index 770a36525b5..00000000000 --- a/examples/research_projects/quantization-qdqbert/run_quant_qa.py +++ /dev/null @@ -1,688 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2020 The HuggingFace Team All rights reserved. -# Copyright 2021 NVIDIA Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Fine-tuning the library models for question answering. -""" -# You can also adapt this script on your own question answering task. Pointers for this are left as comments. - -import logging -import os -import sys -from dataclasses import dataclass, field -from typing import Optional - -import datasets -import quant_trainer -from datasets import load_dataset, load_metric -from trainer_quant_qa import QuestionAnsweringTrainer -from utils_qa import postprocess_qa_predictions - -import transformers -from transformers import ( - AutoTokenizer, - DataCollatorWithPadding, - EvalPrediction, - HfArgumentParser, - PreTrainedTokenizerFast, - QDQBertConfig, - QDQBertForQuestionAnswering, - TrainingArguments, - default_data_collator, - set_seed, -) -from transformers.trainer_utils import SchedulerType, get_last_checkpoint -from transformers.utils import check_min_version -from transformers.utils.versions import require_version - - -# Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.9.0") - -require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt") - -logger = logging.getLogger(__name__) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field( - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} - ) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - tokenizer_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Path to directory to store the pretrained models downloaded from huggingface.co"}, - ) - model_revision: str = field( - default="main", - metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "Will use the token generated when running `huggingface-cli login` (necessary to use this script " - "with private models)." - ) - }, - ) - do_calib: bool = field(default=False, metadata={"help": "Whether to run calibration of quantization ranges."}) - num_calib_batch: int = field( - default=4, - metadata={"help": "Number of batches for calibration. 0 will disable calibration "}, - ) - save_onnx: bool = field(default=False, metadata={"help": "Whether to save model to onnx."}) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - """ - - dataset_name: Optional[str] = field( - default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} - ) - dataset_config_name: Optional[str] = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) - validation_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, - ) - test_file: Optional[str] = field( - default=None, - metadata={"help": "An optional input test data file to evaluate the perplexity on (a text file)."}, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - max_seq_length: int = field( - default=384, - metadata={ - "help": ( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ) - }, - ) - pad_to_max_length: bool = field( - default=True, - metadata={ - "help": ( - "Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when" - " batching to the maximum length in the batch (which can be faster on GPU but will be slower on TPU)." - ) - }, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - ) - }, - ) - max_predict_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of prediction examples to this " - "value if set." - ) - }, - ) - version_2_with_negative: bool = field( - default=False, metadata={"help": "If true, some of the examples do not have an answer."} - ) - null_score_diff_threshold: float = field( - default=0.0, - metadata={ - "help": ( - "The threshold used to select the null answer: if the best answer has a score that is less than " - "the score of the null answer minus this threshold, the null answer is selected for this example. " - "Only useful when `version_2_with_negative=True`." - ) - }, - ) - doc_stride: int = field( - default=128, - metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."}, - ) - n_best_size: int = field( - default=20, - metadata={"help": "The total number of n-best predictions to generate when looking for an answer."}, - ) - max_answer_length: int = field( - default=30, - metadata={ - "help": ( - "The maximum length of an answer that can be generated. This is needed because the start " - "and end predictions are not conditioned on one another." - ) - }, - ) - - def __post_init__(self): - if ( - self.dataset_name is None - and self.train_file is None - and self.validation_file is None - and self.test_file is None - ): - raise ValueError("Need either a dataset name or a training/validation file/test_file.") - else: - if self.train_file is not None: - extension = self.train_file.split(".")[-1] - assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." - if self.validation_file is not None: - extension = self.validation_file.split(".")[-1] - assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." - if self.test_file is not None: - extension = self.test_file.split(".")[-1] - assert extension in ["csv", "json"], "`test_file` should be a csv or a json file." - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) - # quant_trainer arguments - quant_trainer.add_arguments(parser) - - # if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # # If we pass only one argument to the script and it's the path to a json file, - # # let's parse it to get our arguments. - # model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - # else: - - model_args, data_args, training_args, quant_trainer_args = parser.parse_args_into_dataclasses() - - # setup QAT training args for scheduler (default to use cosine annealing learning rate schedule) - training_args.lr_scheduler_type = SchedulerType.COSINE - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - - log_level = training_args.get_process_log_level() - logger.setLevel(log_level) - datasets.utils.logging.set_verbosity(log_level) - transformers.utils.logging.set_verbosity(log_level) - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - - # Log on each process the small summary: - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" - + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - logger.info(f"Training/evaluation parameters {training_args}") - - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - - # Set seed before initializing model. - set_seed(training_args.seed) - - # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) - # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ - # (the dataset will be downloaded automatically from the datasets Hub). - # - # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called - # 'text' is found. You can easily tweak this behavior (see below). - # - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if data_args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - raw_datasets = load_dataset( - data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir - ) - else: - data_files = {} - if data_args.train_file is not None: - data_files["train"] = data_args.train_file - extension = data_args.train_file.split(".")[-1] - - if data_args.validation_file is not None: - data_files["validation"] = data_args.validation_file - extension = data_args.validation_file.split(".")[-1] - if data_args.test_file is not None: - data_files["test"] = data_args.test_file - extension = data_args.test_file.split(".")[-1] - raw_datasets = load_dataset(extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir) - # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets. - - # set default quantization parameters before building model - quant_trainer.set_default_quantizers(quant_trainer_args) - - # Load pretrained model and tokenizer - # - # Distributed training: - # The .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - config = QDQBertConfig.from_pretrained( - model_args.config_name if model_args.config_name else model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - token=True if model_args.use_auth_token else None, - ) - tokenizer = AutoTokenizer.from_pretrained( - model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - use_fast=True, - revision=model_args.model_revision, - token=True if model_args.use_auth_token else None, - ) - model = QDQBertForQuestionAnswering.from_pretrained( - model_args.model_name_or_path, - from_tf=bool(".ckpt" in model_args.model_name_or_path), - config=config, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - token=True if model_args.use_auth_token else None, - ) - - # Tokenizer check: this script requires a fast tokenizer. - if not isinstance(tokenizer, PreTrainedTokenizerFast): - raise ValueError( - "This example script only works for models that have a fast tokenizer. Checkout the big table of models at" - " https://huggingface.co/transformers/index.html#supported-frameworks to find the model types that meet" - " this requirement" - ) - - # Preprocessing the datasets. - # Preprocessing is slightly different for training and evaluation. - if training_args.do_train or model_args.do_calib: - column_names = raw_datasets["train"].column_names - elif training_args.do_eval or model_args.save_onnx: - column_names = raw_datasets["validation"].column_names - else: - column_names = raw_datasets["test"].column_names - question_column_name = "question" if "question" in column_names else column_names[0] - context_column_name = "context" if "context" in column_names else column_names[1] - answer_column_name = "answers" if "answers" in column_names else column_names[2] - - # Padding side determines if we do (question|context) or (context|question). - pad_on_right = tokenizer.padding_side == "right" - - if data_args.max_seq_length > tokenizer.model_max_length: - logger.warning( - f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the " - f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." - ) - max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) - - # Training preprocessing - def prepare_train_features(examples): - # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results - # in one example possible giving several features when a context is long, each of those features having a - # context that overlaps a bit the context of the previous feature. - tokenized_examples = tokenizer( - examples[question_column_name if pad_on_right else context_column_name], - examples[context_column_name if pad_on_right else question_column_name], - truncation="only_second" if pad_on_right else "only_first", - max_length=max_seq_length, - stride=data_args.doc_stride, - return_overflowing_tokens=True, - return_offsets_mapping=True, - padding="max_length" if data_args.pad_to_max_length else False, - ) - - # Since one example might give us several features if it has a long context, we need a map from a feature to - # its corresponding example. This key gives us just that. - sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") - # The offset mappings will give us a map from token to character position in the original context. This will - # help us compute the start_positions and end_positions. - offset_mapping = tokenized_examples.pop("offset_mapping") - - # Let's label those examples! - tokenized_examples["start_positions"] = [] - tokenized_examples["end_positions"] = [] - - for i, offsets in enumerate(offset_mapping): - # We will label impossible answers with the index of the CLS token. - input_ids = tokenized_examples["input_ids"][i] - cls_index = input_ids.index(tokenizer.cls_token_id) - - # Grab the sequence corresponding to that example (to know what is the context and what is the question). - sequence_ids = tokenized_examples.sequence_ids(i) - - # One example can give several spans, this is the index of the example containing this span of text. - sample_index = sample_mapping[i] - answers = examples[answer_column_name][sample_index] - # If no answers are given, set the cls_index as answer. - if len(answers["answer_start"]) == 0: - tokenized_examples["start_positions"].append(cls_index) - tokenized_examples["end_positions"].append(cls_index) - else: - # Start/end character index of the answer in the text. - start_char = answers["answer_start"][0] - end_char = start_char + len(answers["text"][0]) - - # Start token index of the current span in the text. - token_start_index = 0 - while sequence_ids[token_start_index] != (1 if pad_on_right else 0): - token_start_index += 1 - - # End token index of the current span in the text. - token_end_index = len(input_ids) - 1 - while sequence_ids[token_end_index] != (1 if pad_on_right else 0): - token_end_index -= 1 - - # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). - if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char): - tokenized_examples["start_positions"].append(cls_index) - tokenized_examples["end_positions"].append(cls_index) - else: - # Otherwise move the token_start_index and token_end_index to the two ends of the answer. - # Note: we could go after the last offset if the answer is the last word (edge case). - while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char: - token_start_index += 1 - tokenized_examples["start_positions"].append(token_start_index - 1) - while offsets[token_end_index][1] >= end_char: - token_end_index -= 1 - tokenized_examples["end_positions"].append(token_end_index + 1) - - return tokenized_examples - - if training_args.do_train or model_args.do_calib: - if "train" not in raw_datasets: - raise ValueError("--do_train requires a train dataset") - train_dataset = raw_datasets["train"] - if data_args.max_train_samples is not None: - # We will select sample from whole data if argument is specified - max_train_samples = min(len(train_dataset), data_args.max_train_samples) - train_dataset = train_dataset.select(range(max_train_samples)) - # Create train feature from dataset - with training_args.main_process_first(desc="train dataset map pre-processing"): - train_dataset = train_dataset.map( - prepare_train_features, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on train dataset", - ) - if data_args.max_train_samples is not None: - # Number of samples might increase during Feature Creation, We select only specified max samples - max_train_samples = min(len(train_dataset), data_args.max_train_samples) - train_dataset = train_dataset.select(range(max_train_samples)) - - # Validation preprocessing - def prepare_validation_features(examples): - # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results - # in one example possible giving several features when a context is long, each of those features having a - # context that overlaps a bit the context of the previous feature. - tokenized_examples = tokenizer( - examples[question_column_name if pad_on_right else context_column_name], - examples[context_column_name if pad_on_right else question_column_name], - truncation="only_second" if pad_on_right else "only_first", - max_length=max_seq_length, - stride=data_args.doc_stride, - return_overflowing_tokens=True, - return_offsets_mapping=True, - padding="max_length" if data_args.pad_to_max_length else False, - ) - - # Since one example might give us several features if it has a long context, we need a map from a feature to - # its corresponding example. This key gives us just that. - sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") - - # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the - # corresponding example_id and we will store the offset mappings. - tokenized_examples["example_id"] = [] - - for i in range(len(tokenized_examples["input_ids"])): - # Grab the sequence corresponding to that example (to know what is the context and what is the question). - sequence_ids = tokenized_examples.sequence_ids(i) - context_index = 1 if pad_on_right else 0 - - # One example can give several spans, this is the index of the example containing this span of text. - sample_index = sample_mapping[i] - tokenized_examples["example_id"].append(examples["id"][sample_index]) - - # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token - # position is part of the context or not. - tokenized_examples["offset_mapping"][i] = [ - (o if sequence_ids[k] == context_index else None) - for k, o in enumerate(tokenized_examples["offset_mapping"][i]) - ] - - return tokenized_examples - - if training_args.do_eval or model_args.save_onnx: - if "validation" not in raw_datasets: - raise ValueError("--do_eval requires a validation dataset") - eval_examples = raw_datasets["validation"] - if data_args.max_eval_samples is not None: - # We will select sample from whole data - max_eval_samples = min(len(eval_examples), data_args.max_eval_samples) - eval_examples = eval_examples.select(range(max_eval_samples)) - # Validation Feature Creation - with training_args.main_process_first(desc="validation dataset map pre-processing"): - eval_dataset = eval_examples.map( - prepare_validation_features, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on validation dataset", - ) - if data_args.max_eval_samples is not None: - # During Feature creation dataset samples might increase, we will select required samples again - max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) - eval_dataset = eval_dataset.select(range(max_eval_samples)) - - if training_args.do_predict: - if "test" not in raw_datasets: - raise ValueError("--do_predict requires a test dataset") - predict_examples = raw_datasets["test"] - if data_args.max_predict_samples is not None: - # We will select sample from whole data - predict_examples = predict_examples.select(range(data_args.max_predict_samples)) - # Predict Feature Creation - with training_args.main_process_first(desc="prediction dataset map pre-processing"): - predict_dataset = predict_examples.map( - prepare_validation_features, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on prediction dataset", - ) - if data_args.max_predict_samples is not None: - # During Feature creation dataset samples might increase, we will select required samples again - max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) - predict_dataset = predict_dataset.select(range(max_predict_samples)) - - # Data collator - # We have already padded to max length if the corresponding flag is True, otherwise we need to pad in the data - # collator. - data_collator = ( - default_data_collator - if data_args.pad_to_max_length - else DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None) - ) - - # Post-processing: - def post_processing_function(examples, features, predictions, stage="eval"): - # Post-processing: we match the start logits and end logits to answers in the original context. - predictions = postprocess_qa_predictions( - examples=examples, - features=features, - predictions=predictions, - version_2_with_negative=data_args.version_2_with_negative, - n_best_size=data_args.n_best_size, - max_answer_length=data_args.max_answer_length, - null_score_diff_threshold=data_args.null_score_diff_threshold, - output_dir=training_args.output_dir, - log_level=log_level, - prefix=stage, - ) - # Format the result to the format the metric expects. - if data_args.version_2_with_negative: - formatted_predictions = [ - {"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items() - ] - else: - formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()] - - references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples] - return EvalPrediction(predictions=formatted_predictions, label_ids=references) - - metric = load_metric("squad_v2" if data_args.version_2_with_negative else "squad") - - def compute_metrics(p: EvalPrediction): - return metric.compute(predictions=p.predictions, references=p.label_ids) - - # Initialize our Trainer - trainer = QuestionAnsweringTrainer( - model=model, - args=training_args, - train_dataset=train_dataset if training_args.do_train or model_args.do_calib else None, - eval_dataset=eval_dataset if training_args.do_eval or model_args.save_onnx else None, - eval_examples=eval_examples if training_args.do_eval or model_args.save_onnx else None, - tokenizer=tokenizer, - data_collator=data_collator, - post_process_function=post_processing_function, - compute_metrics=compute_metrics, - quant_trainer_args=quant_trainer_args, - ) - - # Calibration - if model_args.do_calib: - logger.info("*** Calibrate ***") - results = trainer.calibrate() - trainer.save_model() - - # Training - if training_args.do_train: - checkpoint = None - if training_args.resume_from_checkpoint is not None: - checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint - - quant_trainer.configure_model(trainer.model, quant_trainer_args) - - train_result = trainer.train(resume_from_checkpoint=checkpoint) - trainer.save_model() # Saves the tokenizer too for easy upload - - metrics = train_result.metrics - max_train_samples = ( - data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) - ) - metrics["train_samples"] = min(max_train_samples, len(train_dataset)) - - trainer.log_metrics("train", metrics) - trainer.save_metrics("train", metrics) - trainer.save_state() - - # Evaluation - if training_args.do_eval: - logger.info("*** Evaluate ***") - quant_trainer.configure_model(trainer.model, quant_trainer_args, eval=True) - metrics = trainer.evaluate() - - max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) - metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) - - trainer.log_metrics("eval", metrics) - trainer.save_metrics("eval", metrics) - - # Prediction - if training_args.do_predict: - logger.info("*** Predict ***") - results = trainer.predict(predict_dataset, predict_examples) - metrics = results.metrics - - max_predict_samples = ( - data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) - ) - metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) - - trainer.log_metrics("predict", metrics) - trainer.save_metrics("predict", metrics) - - if training_args.push_to_hub: - kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"} - if data_args.dataset_name is not None: - kwargs["dataset_tags"] = data_args.dataset_name - if data_args.dataset_config_name is not None: - kwargs["dataset_args"] = data_args.dataset_config_name - kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" - else: - kwargs["dataset"] = data_args.dataset_name - - trainer.push_to_hub(**kwargs) - - if model_args.save_onnx: - logger.info("Exporting model to onnx") - results = trainer.save_onnx(output_dir=training_args.output_dir) - - -def _mp_fn(index): - # For xla_spawn (TPUs) - main() - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/quantization-qdqbert/trainer_quant_qa.py b/examples/research_projects/quantization-qdqbert/trainer_quant_qa.py deleted file mode 100644 index a56d875354d..00000000000 --- a/examples/research_projects/quantization-qdqbert/trainer_quant_qa.py +++ /dev/null @@ -1,212 +0,0 @@ -# coding=utf-8 -# Copyright 2020 The HuggingFace Team All rights reserved. -# Copyright 2021 NVIDIA Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -A subclass of `Trainer` specific to Question-Answering tasks -""" - -import logging -import os - -import quant_trainer -import torch -from torch.utils.data import DataLoader - -from transformers import Trainer, is_torch_xla_available -from transformers.trainer_utils import PredictionOutput - - -logger = logging.getLogger(__name__) - -if is_torch_xla_available(): - import torch_xla.core.xla_model as xm - import torch_xla.debug.metrics as met - - -class QuestionAnsweringTrainer(Trainer): - def __init__(self, *args, eval_examples=None, post_process_function=None, quant_trainer_args=None, **kwargs): - super().__init__(*args, **kwargs) - self.eval_examples = eval_examples - self.post_process_function = post_process_function - self.quant_trainer_args = quant_trainer_args - self.calib_num = 128 # default number of calibration samples - - def get_calib_dataloader(self, calib_dataset=None): - """ - Returns the calibration dataloader :class:`~torch.utils.data.DataLoader`. - - Args: - calib_dataset (:obj:`torch.utils.data.Dataset`, `optional`) - """ - if calib_dataset is None and self.calib_dataset is None: - raise ValueError("Trainer: calibration requires an calib_dataset.") - calib_dataset = calib_dataset if calib_dataset is not None else self.calib_dataset - - calib_dataset = self._remove_unused_columns(calib_dataset, description="Calibration") - - return DataLoader( - calib_dataset, - batch_size=self.args.eval_batch_size, - collate_fn=self.data_collator, - drop_last=self.args.dataloader_drop_last, - num_workers=self.args.dataloader_num_workers, - pin_memory=self.args.dataloader_pin_memory, - shuffle=True, - ) - - def calibrate(self, calib_dataset=None): - calib_dataset = self.train_dataset if calib_dataset is None else calib_dataset - calib_dataloader = self.get_calib_dataloader(calib_dataset) - - model = self.model - quant_trainer.configure_model(model, self.quant_trainer_args, calib=True) - model.eval() - quant_trainer.enable_calibration(model) - - logger.info("***** Running calibration *****") - logger.info(f" Num examples = {self.calib_num}") - logger.info(f" Batch size = {calib_dataloader.batch_size}") - - for step, inputs in enumerate(calib_dataloader): - # Prediction step - loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only=True) - if (step + 1) * calib_dataloader.batch_size >= self.calib_num: - break - - quant_trainer.finish_calibration(model, self.quant_trainer_args) - self.model = model - - def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None, metric_key_prefix: str = "eval"): - eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset - eval_dataloader = self.get_eval_dataloader(eval_dataset) - eval_examples = self.eval_examples if eval_examples is None else eval_examples - - # Temporarily disable metric computation, we will do it in the loop here. - compute_metrics = self.compute_metrics - self.compute_metrics = None - eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop - try: - output = eval_loop( - eval_dataloader, - description="Evaluation", - # No point gathering the predictions if there are no metrics, otherwise we defer to - # self.args.prediction_loss_only - prediction_loss_only=True if compute_metrics is None else None, - ignore_keys=ignore_keys, - ) - finally: - self.compute_metrics = compute_metrics - - if self.post_process_function is not None and self.compute_metrics is not None: - eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions) - metrics = self.compute_metrics(eval_preds) - - # Prefix all keys with metric_key_prefix + '_' - for key in list(metrics.keys()): - if not key.startswith(f"{metric_key_prefix}_"): - metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) - - self.log(metrics) - else: - metrics = {} - - if self.args.tpu_metrics_debug or self.args.debug: - # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) - xm.master_print(met.metrics_report()) - - self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics) - return metrics - - def predict(self, predict_dataset, predict_examples, ignore_keys=None, metric_key_prefix: str = "test"): - predict_dataloader = self.get_test_dataloader(predict_dataset) - - # Temporarily disable metric computation, we will do it in the loop here. - compute_metrics = self.compute_metrics - self.compute_metrics = None - eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop - try: - output = eval_loop( - predict_dataloader, - description="Prediction", - # No point gathering the predictions if there are no metrics, otherwise we defer to - # self.args.prediction_loss_only - prediction_loss_only=True if compute_metrics is None else None, - ignore_keys=ignore_keys, - ) - finally: - self.compute_metrics = compute_metrics - - if self.post_process_function is None or self.compute_metrics is None: - return output - - predictions = self.post_process_function(predict_examples, predict_dataset, output.predictions, "predict") - metrics = self.compute_metrics(predictions) - - # Prefix all keys with metric_key_prefix + '_' - for key in list(metrics.keys()): - if not key.startswith(f"{metric_key_prefix}_"): - metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) - - return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics) - - def save_onnx(self, output_dir="./"): - eval_dataset = self.eval_dataset - eval_dataloader = self.get_eval_dataloader(eval_dataset) - - batch = next(iter(eval_dataloader)) - - # saving device - to make it consistent - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - # convert to tuple - input_tuple = tuple(v.to(device) for k, v in batch.items()) - - logger.info("Converting model to be onnx compatible") - from pytorch_quantization.nn import TensorQuantizer - - TensorQuantizer.use_fb_fake_quant = True - - model = self.model.to(device) - - model.eval() - model.float() - - model_to_save = model.module if hasattr(model, "module") else model - quant_trainer.configure_model(model_to_save, self.quant_trainer_args) - - output_model_file = os.path.join(output_dir, "model.onnx") - logger.info(f"exporting model to {output_model_file}") - - axes = {0: "batch_size", 1: "seq_len"} - - torch.onnx.export( - model_to_save, - input_tuple, - output_model_file, - export_params=True, - opset_version=13, - do_constant_folding=True, - input_names=["input_ids", "attention_mask", "token_type_ids"], - output_names=["output_start_logits", "output_end_logits"], - dynamic_axes={ - "input_ids": axes, - "attention_mask": axes, - "token_type_ids": axes, - "output_start_logits": axes, - "output_end_logits": axes, - }, - verbose=True, - ) - logger.info("onnx export finished") diff --git a/examples/research_projects/quantization-qdqbert/utils_qa.py b/examples/research_projects/quantization-qdqbert/utils_qa.py deleted file mode 100644 index e90d6c4747c..00000000000 --- a/examples/research_projects/quantization-qdqbert/utils_qa.py +++ /dev/null @@ -1,435 +0,0 @@ -# coding=utf-8 -# Copyright 2020 The HuggingFace Team All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Post-processing utilities for question answering. -""" - -import collections -import json -import logging -import os -from typing import Optional, Tuple - -import numpy as np -from tqdm.auto import tqdm - - -logger = logging.getLogger(__name__) - - -def postprocess_qa_predictions( - examples, - features, - predictions: Tuple[np.ndarray, np.ndarray], - version_2_with_negative: bool = False, - n_best_size: int = 20, - max_answer_length: int = 30, - null_score_diff_threshold: float = 0.0, - output_dir: Optional[str] = None, - prefix: Optional[str] = None, - log_level: Optional[int] = logging.WARNING, -): - """ - Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the - original contexts. This is the base postprocessing functions for models that only return start and end logits. - - Args: - examples: The non-preprocessed dataset (see the main script for more information). - features: The processed dataset (see the main script for more information). - predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): - The predictions of the model: two arrays containing the start logits and the end logits respectively. Its - first dimension must match the number of elements of :obj:`features`. - version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): - Whether or not the underlying dataset contains examples with no answers. - n_best_size (:obj:`int`, `optional`, defaults to 20): - The total number of n-best predictions to generate when looking for an answer. - max_answer_length (:obj:`int`, `optional`, defaults to 30): - The maximum length of an answer that can be generated. This is needed because the start and end predictions - are not conditioned on one another. - null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0): - The threshold used to select the null answer: if the best answer has a score that is less than the score of - the null answer minus this threshold, the null answer is selected for this example (note that the score of - the null answer for an example giving several features is the minimum of the scores for the null answer on - each feature: all features must be aligned on the fact they `want` to predict a null answer). - - Only useful when :obj:`version_2_with_negative` is :obj:`True`. - output_dir (:obj:`str`, `optional`): - If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if - :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null - answers, are saved in `output_dir`. - prefix (:obj:`str`, `optional`): - If provided, the dictionaries mentioned above are saved with `prefix` added to their names. - log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``): - ``logging`` log level (e.g., ``logging.WARNING``) - """ - if len(predictions) != 2: - raise ValueError("`predictions` should be a tuple with two elements (start_logits, end_logits).") - all_start_logits, all_end_logits = predictions - - if len(predictions[0]) != len(features): - raise ValueError(f"Got {len(predictions[0])} predictions and {len(features)} features.") - - # Build a map example to its corresponding features. - example_id_to_index = {k: i for i, k in enumerate(examples["id"])} - features_per_example = collections.defaultdict(list) - for i, feature in enumerate(features): - features_per_example[example_id_to_index[feature["example_id"]]].append(i) - - # The dictionaries we have to fill. - all_predictions = collections.OrderedDict() - all_nbest_json = collections.OrderedDict() - if version_2_with_negative: - scores_diff_json = collections.OrderedDict() - - # Logging. - logger.setLevel(log_level) - logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.") - - # Let's loop over all the examples! - for example_index, example in enumerate(tqdm(examples)): - # Those are the indices of the features associated to the current example. - feature_indices = features_per_example[example_index] - - min_null_prediction = None - prelim_predictions = [] - - # Looping through all the features associated to the current example. - for feature_index in feature_indices: - # We grab the predictions of the model for this feature. - start_logits = all_start_logits[feature_index] - end_logits = all_end_logits[feature_index] - # This is what will allow us to map some the positions in our logits to span of texts in the original - # context. - offset_mapping = features[feature_index]["offset_mapping"] - # Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context - # available in the current feature. - token_is_max_context = features[feature_index].get("token_is_max_context", None) - - # Update minimum null prediction. - feature_null_score = start_logits[0] + end_logits[0] - if min_null_prediction is None or min_null_prediction["score"] > feature_null_score: - min_null_prediction = { - "offsets": (0, 0), - "score": feature_null_score, - "start_logit": start_logits[0], - "end_logit": end_logits[0], - } - - # Go through all possibilities for the `n_best_size` greater start and end logits. - start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist() - end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist() - for start_index in start_indexes: - for end_index in end_indexes: - # Don't consider out-of-scope answers, either because the indices are out of bounds or correspond - # to part of the input_ids that are not in the context. - if ( - start_index >= len(offset_mapping) - or end_index >= len(offset_mapping) - or offset_mapping[start_index] is None - or len(offset_mapping[start_index]) < 2 - or offset_mapping[end_index] is None - or len(offset_mapping[end_index]) < 2 - ): - continue - # Don't consider answers with a length that is either < 0 or > max_answer_length. - if end_index < start_index or end_index - start_index + 1 > max_answer_length: - continue - # Don't consider answer that don't have the maximum context available (if such information is - # provided). - if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False): - continue - - prelim_predictions.append( - { - "offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]), - "score": start_logits[start_index] + end_logits[end_index], - "start_logit": start_logits[start_index], - "end_logit": end_logits[end_index], - } - ) - if version_2_with_negative: - # Add the minimum null prediction - prelim_predictions.append(min_null_prediction) - null_score = min_null_prediction["score"] - - # Only keep the best `n_best_size` predictions. - predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size] - - # Add back the minimum null prediction if it was removed because of its low score. - if version_2_with_negative and not any(p["offsets"] == (0, 0) for p in predictions): - predictions.append(min_null_prediction) - - # Use the offsets to gather the answer text in the original context. - context = example["context"] - for pred in predictions: - offsets = pred.pop("offsets") - pred["text"] = context[offsets[0] : offsets[1]] - - # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid - # failure. - if len(predictions) == 0 or (len(predictions) == 1 and predictions[0]["text"] == ""): - predictions.insert(0, {"text": "empty", "start_logit": 0.0, "end_logit": 0.0, "score": 0.0}) - - # Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using - # the LogSumExp trick). - scores = np.array([pred.pop("score") for pred in predictions]) - exp_scores = np.exp(scores - np.max(scores)) - probs = exp_scores / exp_scores.sum() - - # Include the probabilities in our predictions. - for prob, pred in zip(probs, predictions): - pred["probability"] = prob - - # Pick the best prediction. If the null answer is not possible, this is easy. - if not version_2_with_negative: - all_predictions[example["id"]] = predictions[0]["text"] - else: - # Otherwise we first need to find the best non-empty prediction. - i = 0 - while predictions[i]["text"] == "": - i += 1 - best_non_null_pred = predictions[i] - - # Then we compare to the null prediction using the threshold. - score_diff = null_score - best_non_null_pred["start_logit"] - best_non_null_pred["end_logit"] - scores_diff_json[example["id"]] = float(score_diff) # To be JSON-serializable. - if score_diff > null_score_diff_threshold: - all_predictions[example["id"]] = "" - else: - all_predictions[example["id"]] = best_non_null_pred["text"] - - # Make `predictions` JSON-serializable by casting np.float back to float. - all_nbest_json[example["id"]] = [ - {k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()} - for pred in predictions - ] - - # If we have an output_dir, let's save all those dicts. - if output_dir is not None: - if not os.path.isdir(output_dir): - raise EnvironmentError(f"{output_dir} is not a directory.") - - prediction_file = os.path.join( - output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json" - ) - nbest_file = os.path.join( - output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json" - ) - if version_2_with_negative: - null_odds_file = os.path.join( - output_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json" - ) - - logger.info(f"Saving predictions to {prediction_file}.") - with open(prediction_file, "w") as writer: - writer.write(json.dumps(all_predictions, indent=4) + "\n") - logger.info(f"Saving nbest_preds to {nbest_file}.") - with open(nbest_file, "w") as writer: - writer.write(json.dumps(all_nbest_json, indent=4) + "\n") - if version_2_with_negative: - logger.info(f"Saving null_odds to {null_odds_file}.") - with open(null_odds_file, "w") as writer: - writer.write(json.dumps(scores_diff_json, indent=4) + "\n") - - return all_predictions - - -def postprocess_qa_predictions_with_beam_search( - examples, - features, - predictions: Tuple[np.ndarray, np.ndarray], - version_2_with_negative: bool = False, - n_best_size: int = 20, - max_answer_length: int = 30, - start_n_top: int = 5, - end_n_top: int = 5, - output_dir: Optional[str] = None, - prefix: Optional[str] = None, - log_level: Optional[int] = logging.WARNING, -): - """ - Post-processes the predictions of a question-answering model with beam search to convert them to answers that are substrings of the - original contexts. This is the postprocessing functions for models that return start and end logits, indices, as well as - cls token predictions. - - Args: - examples: The non-preprocessed dataset (see the main script for more information). - features: The processed dataset (see the main script for more information). - predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): - The predictions of the model: two arrays containing the start logits and the end logits respectively. Its - first dimension must match the number of elements of :obj:`features`. - version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): - Whether or not the underlying dataset contains examples with no answers. - n_best_size (:obj:`int`, `optional`, defaults to 20): - The total number of n-best predictions to generate when looking for an answer. - max_answer_length (:obj:`int`, `optional`, defaults to 30): - The maximum length of an answer that can be generated. This is needed because the start and end predictions - are not conditioned on one another. - start_n_top (:obj:`int`, `optional`, defaults to 5): - The number of top start logits too keep when searching for the :obj:`n_best_size` predictions. - end_n_top (:obj:`int`, `optional`, defaults to 5): - The number of top end logits too keep when searching for the :obj:`n_best_size` predictions. - output_dir (:obj:`str`, `optional`): - If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if - :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null - answers, are saved in `output_dir`. - prefix (:obj:`str`, `optional`): - If provided, the dictionaries mentioned above are saved with `prefix` added to their names. - log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``): - ``logging`` log level (e.g., ``logging.WARNING``) - """ - if len(predictions) != 5: - raise ValueError("`predictions` should be a tuple with five elements.") - start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits = predictions - - if len(predictions[0]) != len(features): - raise ValueError(f"Got {len(predictions[0])} predictions and {len(features)} features.") - - # Build a map example to its corresponding features. - example_id_to_index = {k: i for i, k in enumerate(examples["id"])} - features_per_example = collections.defaultdict(list) - for i, feature in enumerate(features): - features_per_example[example_id_to_index[feature["example_id"]]].append(i) - - # The dictionaries we have to fill. - all_predictions = collections.OrderedDict() - all_nbest_json = collections.OrderedDict() - scores_diff_json = collections.OrderedDict() if version_2_with_negative else None - - # Logging. - logger.setLevel(log_level) - logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.") - - # Let's loop over all the examples! - for example_index, example in enumerate(tqdm(examples)): - # Those are the indices of the features associated to the current example. - feature_indices = features_per_example[example_index] - - min_null_score = None - prelim_predictions = [] - - # Looping through all the features associated to the current example. - for feature_index in feature_indices: - # We grab the predictions of the model for this feature. - start_log_prob = start_top_log_probs[feature_index] - start_indexes = start_top_index[feature_index] - end_log_prob = end_top_log_probs[feature_index] - end_indexes = end_top_index[feature_index] - feature_null_score = cls_logits[feature_index] - # This is what will allow us to map some the positions in our logits to span of texts in the original - # context. - offset_mapping = features[feature_index]["offset_mapping"] - # Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context - # available in the current feature. - token_is_max_context = features[feature_index].get("token_is_max_context", None) - - # Update minimum null prediction - if min_null_score is None or feature_null_score < min_null_score: - min_null_score = feature_null_score - - # Go through all possibilities for the `n_start_top`/`n_end_top` greater start and end logits. - for i in range(start_n_top): - for j in range(end_n_top): - start_index = int(start_indexes[i]) - j_index = i * end_n_top + j - end_index = int(end_indexes[j_index]) - # Don't consider out-of-scope answers (last part of the test should be unnecessary because of the - # p_mask but let's not take any risk) - if ( - start_index >= len(offset_mapping) - or end_index >= len(offset_mapping) - or offset_mapping[start_index] is None - or offset_mapping[end_index] is None - ): - continue - # Don't consider answers with a length negative or > max_answer_length. - if end_index < start_index or end_index - start_index + 1 > max_answer_length: - continue - # Don't consider answer that don't have the maximum context available (if such information is - # provided). - if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False): - continue - prelim_predictions.append( - { - "offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]), - "score": start_log_prob[i] + end_log_prob[j_index], - "start_log_prob": start_log_prob[i], - "end_log_prob": end_log_prob[j_index], - } - ) - - # Only keep the best `n_best_size` predictions. - predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size] - - # Use the offsets to gather the answer text in the original context. - context = example["context"] - for pred in predictions: - offsets = pred.pop("offsets") - pred["text"] = context[offsets[0] : offsets[1]] - - # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid - # failure. - if len(predictions) == 0: - predictions.insert(0, {"text": "", "start_logit": -1e-6, "end_logit": -1e-6, "score": -2e-6}) - - # Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using - # the LogSumExp trick). - scores = np.array([pred.pop("score") for pred in predictions]) - exp_scores = np.exp(scores - np.max(scores)) - probs = exp_scores / exp_scores.sum() - - # Include the probabilities in our predictions. - for prob, pred in zip(probs, predictions): - pred["probability"] = prob - - # Pick the best prediction and set the probability for the null answer. - all_predictions[example["id"]] = predictions[0]["text"] - if version_2_with_negative: - scores_diff_json[example["id"]] = float(min_null_score) - - # Make `predictions` JSON-serializable by casting np.float back to float. - all_nbest_json[example["id"]] = [ - {k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()} - for pred in predictions - ] - - # If we have an output_dir, let's save all those dicts. - if output_dir is not None: - if not os.path.isdir(output_dir): - raise EnvironmentError(f"{output_dir} is not a directory.") - - prediction_file = os.path.join( - output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json" - ) - nbest_file = os.path.join( - output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json" - ) - if version_2_with_negative: - null_odds_file = os.path.join( - output_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json" - ) - - logger.info(f"Saving predictions to {prediction_file}.") - with open(prediction_file, "w") as writer: - writer.write(json.dumps(all_predictions, indent=4) + "\n") - logger.info(f"Saving nbest_preds to {nbest_file}.") - with open(nbest_file, "w") as writer: - writer.write(json.dumps(all_nbest_json, indent=4) + "\n") - if version_2_with_negative: - logger.info(f"Saving null_odds to {null_odds_file}.") - with open(null_odds_file, "w") as writer: - writer.write(json.dumps(scores_diff_json, indent=4) + "\n") - - return all_predictions, scores_diff_json diff --git a/examples/research_projects/rag-end2end-retriever/README.md b/examples/research_projects/rag-end2end-retriever/README.md deleted file mode 100644 index 9aa0bc5dbcb..00000000000 --- a/examples/research_projects/rag-end2end-retriever/README.md +++ /dev/null @@ -1,56 +0,0 @@ -# End-to-End finetuning of RAG (including DPR retriever) for Question Answering. - -This finetuning script is actively maintained by [Shamane Siri](https://github.com/shamanez). Feel free to ask questions on the [Forum](https://discuss.huggingface.co/) or post an issue on [GitHub](https://github.com/huggingface/transformers/issues/new/choose) and tag @shamanez. - -Others that helped out: Patrick von Platen (@patrickvonplaten), Quentin Lhoest (@lhoestq), and Rivindu Weerasekera (@rivinduw) - -The original RAG implementation is able to train the question encoder and generator end-to-end. -This extension enables complete end-to-end training of RAG including the context encoder in the retriever component. -Please read the [accompanying blog post](https://shamanesiri.medium.com/how-to-finetune-the-entire-rag-architecture-including-dpr-retriever-4b4385322552) for details on this implementation. - -The original RAG code has also been modified to work with the latest versions of pytorch lightning (version 1.2.10) and RAY (version 1.3.0). All other implementation details remain the same as the [original RAG code](https://github.com/huggingface/transformers/tree/main/examples/research_projects/rag). -Read more about RAG at https://arxiv.org/abs/2005.11401. - -This code can be modified to experiment with other research on retrieval augmented models which include training of the retriever (e.g. [REALM](https://arxiv.org/abs/2002.08909) and [MARGE](https://arxiv.org/abs/2006.15020)). - -To start training, use the bash script (finetune_rag_ray_end2end.sh) in this folder. This script also includes descriptions on each command-line argument used. - -# Latest Update - -⚠️ Updated the rag-end2end-retriever to be compatible with PL==1.6.4 and RAY==1.13.0 (latest versions to the date 2022-June-11) - -# Note - -⚠️ This project should be run with pytorch-lightning==1.3.1 which has a potential security vulnerability - -# Testing - -The following two bash scripts can be used to quickly test the implementation. -1. sh ./test_run/test_finetune.sh script - - Tests the full end-to-end fine-tuning ability with a dummy knowlendge-base and dummy training dataset (check test_dir directory). - - Users can replace the dummy dataset and knowledge-base with their own to do their own finetuning. - - Please read the comments in the test_finetune.sh file. -2. sh ./test_run/test_rag_new_features.sh - - Tests the newly added functions (set_context_encoder and set_context_encoder_tokenizer) related to modeling rag. - - This is sufficient to check the model's ability to use the set functions correctly. - - - -# Comparison of end2end RAG (including DPR finetuning) VS original-RAG - -We conducted a simple experiment to investigate the effectiveness of this end2end training extension using the SQuAD dataset. Please execute the following steps to reproduce the results. - -- Create a knowledge-base using all the context passages in the SQuAD dataset with their respective titles. -- Use the question-answer pairs as training data. -- Train the system for 10 epochs. -- Test the Exact Match (EM) score with the SQuAD dataset's validation set. -- Training dataset, the knowledge-base, and hyperparameters used in experiments can be accessed from [here](https://drive.google.com/drive/folders/1qyzV-PaEARWvaU_jjpnU_NUS3U_dSjtG?usp=sharing). - -# Results - -- We train both models for 10 epochs. - -| Model Type | EM-Score| -| --------------------| --------| -| RAG-original | 28.12 | -| RAG-end2end with DPR| 40.02 | diff --git a/examples/research_projects/rag-end2end-retriever/callbacks_rag.py b/examples/research_projects/rag-end2end-retriever/callbacks_rag.py deleted file mode 100644 index 09a30ff6d5c..00000000000 --- a/examples/research_projects/rag-end2end-retriever/callbacks_rag.py +++ /dev/null @@ -1,119 +0,0 @@ -import logging -from pathlib import Path - -import numpy as np -import pytorch_lightning as pl -import torch -from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint -from pytorch_lightning.utilities import rank_zero_only -from utils_rag import save_json - - -def count_trainable_parameters(model): - model_parameters = filter(lambda p: p.requires_grad, model.parameters()) - params = sum([np.prod(p.size()) for p in model_parameters]) - return params - - -logger = logging.getLogger(__name__) - - -def get_checkpoint_callback(output_dir, metric): - """Saves the best model by validation EM score.""" - if metric == "rouge2": - exp = "{val_avg_rouge2:.4f}-{step_count}" - elif metric == "bleu": - exp = "{val_avg_bleu:.4f}-{step_count}" - elif metric == "em": - exp = "{val_avg_em:.4f}-{step_count}" - elif metric == "loss": - exp = "{val_avg_loss:.4f}-{step_count}" - else: - raise NotImplementedError( - f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this" - " function." - ) - - checkpoint_callback = ModelCheckpoint( - dirpath=output_dir, - filename=exp, - monitor=f"val_{metric}", - mode="max", - save_top_k=1, - every_n_epochs=1, # works only with PL > 1.3 - ) - - return checkpoint_callback - - -def get_early_stopping_callback(metric, patience): - return EarlyStopping( - monitor=f"val_{metric}", # does this need avg? - mode="min" if "loss" in metric else "max", - patience=patience, - verbose=True, - ) - - -class Seq2SeqLoggingCallback(pl.Callback): - def on_batch_end(self, trainer, pl_module): - lrs = {f"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)} - pl_module.logger.log_metrics(lrs) - - @rank_zero_only - def _write_logs( - self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True - ) -> None: - logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****") - metrics = trainer.callback_metrics - trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]}) - # Log results - od = Path(pl_module.hparams.output_dir) - if type_path == "test": - results_file = od / "test_results.txt" - generations_file = od / "test_generations.txt" - else: - # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json - # If people want this it will be easy enough to add back. - results_file = od / f"{type_path}_results/{trainer.global_step:05d}.txt" - generations_file = od / f"{type_path}_generations/{trainer.global_step:05d}.txt" - results_file.parent.mkdir(exist_ok=True) - generations_file.parent.mkdir(exist_ok=True) - with open(results_file, "a+") as writer: - for key in sorted(metrics): - if key in ["log", "progress_bar", "preds"]: - continue - val = metrics[key] - if isinstance(val, torch.Tensor): - val = val.item() - msg = f"{key}: {val:.6f}\n" - writer.write(msg) - - if not save_generations: - return - - if "preds" in metrics: - content = "\n".join(metrics["preds"]) - generations_file.open("w+").write(content) - - @rank_zero_only - def on_train_start(self, trainer, pl_module): - try: - npars = pl_module.model.model.num_parameters() - except AttributeError: - npars = pl_module.model.num_parameters() - - n_trainable_pars = count_trainable_parameters(pl_module) - # mp stands for million parameters - trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6}) - - @rank_zero_only - def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): - save_json(pl_module.metrics, pl_module.metrics_save_path) - return self._write_logs(trainer, pl_module, "test") - - @rank_zero_only - def on_validation_end(self, trainer: pl.Trainer, pl_module): - save_json(pl_module.metrics, pl_module.metrics_save_path) - # Uncommenting this will save val generations - # return self._write_logs(trainer, pl_module, "valid") diff --git a/examples/research_projects/rag-end2end-retriever/distributed_ray_retriever.py b/examples/research_projects/rag-end2end-retriever/distributed_ray_retriever.py deleted file mode 100644 index f97467292c2..00000000000 --- a/examples/research_projects/rag-end2end-retriever/distributed_ray_retriever.py +++ /dev/null @@ -1,185 +0,0 @@ -import logging -import random - -import ray - -from transformers import RagConfig, RagRetriever, RagTokenizer -from transformers.models.rag.retrieval_rag import CustomHFIndex - - -logger = logging.getLogger(__name__) - - -class RayRetriever: - def __init__(self): - self.initialized = False - - def create_rag_retriever(self, config, question_encoder_tokenizer, generator_tokenizer, index): - if not self.initialized: - self.retriever = RagRetriever( - config, - question_encoder_tokenizer=question_encoder_tokenizer, - generator_tokenizer=generator_tokenizer, - index=index, - init_retrieval=False, - ) - self.initialized = True - - def init_retrieval(self): - self.retriever.index.init_index() - - def clear_object(self): - # delete the old self.retriever object before assigning the new index - del self.retriever - self.initialized = False - - def retrieve(self, question_hidden_states, n_docs): - doc_ids, retrieved_doc_embeds = self.retriever._main_retrieve(question_hidden_states, n_docs) - doc_dicts = self.retriever.index.get_doc_dicts(doc_ids) - return doc_ids, retrieved_doc_embeds, doc_dicts - - -class RagRayDistributedRetriever(RagRetriever): - """ - A distributed retriever built on top of the ``Ray`` API, a library - for building distributed applications (https://docs.ray.io/en/master/). - package. During training, all training workers initialize their own - instance of a `RagRayDistributedRetriever`, and each instance of - this distributed retriever shares a common set of Retrieval Ray - Actors (https://docs.ray.io/en/master/walkthrough.html#remote - -classes-actors) that load the index on separate processes. Ray - handles the communication between the `RagRayDistributedRetriever` - instances and the remote Ray actors. If training is done in a - non-distributed setup, the index will simply be loaded in the same - process as the training worker and Ray will not be used. - - Args: - config (:class:`~transformers.RagConfig`): - The configuration of the RAG model this Retriever is used with. Contains parameters indicating which ``Index`` to build. - question_encoder_tokenizer (:class:`~transformers.PreTrainedTokenizer`): - The tokenizer that was used to tokenize the question. - It is used to decode the question and then use the generator_tokenizer. - generator_tokenizer (:class:`~transformers.PreTrainedTokenizer`): - The tokenizer used for the generator part of the RagModel. - retrieval_workers (:obj:`List[ray.ActorClass(RayRetriever)]`): A list of already initialized `RayRetriever` actors. - These actor classes run on remote processes and are responsible for performing the index lookup. - index (:class:`~transformers.retrieval_rag.Index`, optional, defaults to the one defined by the configuration): - If specified, use this index instead of the one built using the configuration - """ - - def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, retrieval_workers, index=None): - if index is not None and index.is_initialized() and len(retrieval_workers) > 0: - raise ValueError( - "When using Ray for distributed fine-tuning, " - "you'll need to provide the paths instead, " - "as the dataset and the index are loaded " - "separately. More info in examples/rag/use_own_knowledge_dataset.py " - ) - - super().__init__( - config, - question_encoder_tokenizer=question_encoder_tokenizer, - generator_tokenizer=generator_tokenizer, - index=index, - init_retrieval=False, - ) - - self.retrieval_workers = retrieval_workers - self.question_encoder_tokenizer = question_encoder_tokenizer - self.generator_tokenizer = generator_tokenizer - if len(self.retrieval_workers) > 0: - ray.get( - [ - worker.create_rag_retriever.remote(config, question_encoder_tokenizer, generator_tokenizer, index) - for worker in self.retrieval_workers - ] - ) - - def init_retrieval(self): - """ - Retriever initialization function, needs to be called from the - training process. This function triggers retrieval initialization - for all retrieval actors if using distributed setting, or loads - index into current process if training is not distributed. - """ - logger.info("initializing retrieval") - - if len(self.retrieval_workers) > 0: - ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers]) - else: - # Non-distributed training. Load index into this same process. - self.index.init_index() - - def retrieve(self, question_hidden_states, n_docs): - """ - Retrieves documents for specified ``question_hidden_states``. If - running training with multiple workers, a random retrieval actor is - selected to perform the index lookup and return the result. - - Args: - question_hidden_states (:obj:`np.ndarray` of shape :obj:`(batch_size, vector_size)`): - A batch of query vectors to retrieve with. - n_docs (:obj:`int`): - The number of docs retrieved per query. - - Output: - retrieved_doc_embeds (:obj:`np.ndarray` of shape :obj:`(batch_size, n_docs, dim)` - The retrieval embeddings of the retrieved docs per query. - doc_ids (:obj:`np.ndarray` of shape :obj:`batch_size, n_docs`) - The ids of the documents in the index - doc_dicts (:obj:`List[dict]`): - The retrieved_doc_embeds examples per query. - """ - if len(self.retrieval_workers) > 0: - # Select a random retrieval actor. - random_worker = self.retrieval_workers[random.randint(0, len(self.retrieval_workers) - 1)] - doc_ids, retrieved_doc_embeds, doc_dicts = ray.get( - random_worker.retrieve.remote(question_hidden_states, n_docs) - ) - else: - doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs) - doc_dicts = self.index.get_doc_dicts(doc_ids) - return retrieved_doc_embeds, doc_ids, doc_dicts - - @classmethod - def get_tokenizers(cls, retriever_name_or_path, indexed_dataset=None, **kwargs): - return super(RagRayDistributedRetriever, cls).get_tokenizers(retriever_name_or_path, indexed_dataset, **kwargs) - - @classmethod - def from_pretrained(cls, retriever_name_or_path, actor_handles, indexed_dataset=None, **kwargs): - config = kwargs.pop("config", None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs) - rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config) - question_encoder_tokenizer = rag_tokenizer.question_encoder - generator_tokenizer = rag_tokenizer.generator - - if indexed_dataset is not None: - config.index_name = "custom" - index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset) - else: - index = cls._build_index(config) - - return cls( - config, - question_encoder_tokenizer=question_encoder_tokenizer, - generator_tokenizer=generator_tokenizer, - retrieval_workers=actor_handles, - index=index, - ) - - def re_load(self): - logger.info("re-loading the new dataset with embeddings") - # access from the training loop - - ray.get([worker.clear_object.remote() for worker in self.retrieval_workers]) - - # build the index object again - index = self._build_index(self.config) - - ray.get( - [ - worker.create_rag_retriever.remote( - self.config, self.question_encoder_tokenizer, self.generator_tokenizer, index - ) - for worker in self.retrieval_workers - ] - ) diff --git a/examples/research_projects/rag-end2end-retriever/eval_rag.py b/examples/research_projects/rag-end2end-retriever/eval_rag.py deleted file mode 100644 index 55f4da56571..00000000000 --- a/examples/research_projects/rag-end2end-retriever/eval_rag.py +++ /dev/null @@ -1,320 +0,0 @@ -"""Evaluation script for RAG models.""" - -import argparse -import ast -import logging -import os -import sys - -import pandas as pd -import torch -from tqdm import tqdm - -from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration -from transformers import logging as transformers_logging - - -sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip -from utils_rag import exact_match_score, f1_score # noqa: E402 # isort:skip - - -logger = logging.getLogger(__name__) -logging.basicConfig(level=logging.INFO) - -transformers_logging.set_verbosity_info() - - -def infer_model_type(model_name_or_path): - if "token" in model_name_or_path: - return "rag_token" - if "sequence" in model_name_or_path: - return "rag_sequence" - if "bart" in model_name_or_path: - return "bart" - return None - - -def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): - return max(metric_fn(prediction, gt) for gt in ground_truths) - - -def get_scores(args, preds_path, gold_data_path): - hypos = [line.strip() for line in open(preds_path, "r").readlines()] - answers = [] - - if args.gold_data_mode == "qa": - data = pd.read_csv(gold_data_path, sep="\t", header=None) - for answer_list in data[1]: - ground_truths = ast.literal_eval(answer_list) - answers.append(ground_truths) - else: - references = [line.strip() for line in open(gold_data_path, "r").readlines()] - answers = [[reference] for reference in references] - - f1 = em = total = 0 - for prediction, ground_truths in zip(hypos, answers): - total += 1 - em += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths) - f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths) - - em = 100.0 * em / total - f1 = 100.0 * f1 / total - - logger.info(f"F1: {f1:.2f}") - logger.info(f"EM: {em:.2f}") - - -def get_precision_at_k(args, preds_path, gold_data_path): - k = args.k - hypos = [line.strip() for line in open(preds_path, "r").readlines()] - references = [line.strip() for line in open(gold_data_path, "r").readlines()] - - em = total = 0 - for hypo, reference in zip(hypos, references): - hypo_provenance = set(hypo.split("\t")[:k]) - ref_provenance = set(reference.split("\t")) - total += 1 - em += len(hypo_provenance & ref_provenance) / k - - em = 100.0 * em / total - logger.info(f"Precision@{k}: {em: .2f}") - - -def evaluate_batch_retrieval(args, rag_model, questions): - def strip_title(title): - if title.startswith('"'): - title = title[1:] - if title.endswith('"'): - title = title[:-1] - return title - - retriever_input_ids = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( - questions, - return_tensors="pt", - padding=True, - truncation=True, - )["input_ids"].to(args.device) - - question_enc_outputs = rag_model.rag.question_encoder(retriever_input_ids) - question_enc_pool_output = question_enc_outputs[0] - - result = rag_model.retriever( - retriever_input_ids, - question_enc_pool_output.cpu().detach().to(torch.float32).numpy(), - prefix=rag_model.rag.generator.config.prefix, - n_docs=rag_model.config.n_docs, - return_tensors="pt", - ) - all_docs = rag_model.retriever.index.get_doc_dicts(result.doc_ids) - provenance_strings = [] - for docs in all_docs: - provenance = [strip_title(title) for title in docs["title"]] - provenance_strings.append("\t".join(provenance)) - return provenance_strings - - -def evaluate_batch_e2e(args, rag_model, questions): - with torch.no_grad(): - inputs_dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( - questions, return_tensors="pt", padding=True, truncation=True - ) - - input_ids = inputs_dict.input_ids.to(args.device) - attention_mask = inputs_dict.attention_mask.to(args.device) - outputs = rag_model.generate( # rag_model overwrites generate - input_ids, - attention_mask=attention_mask, - num_beams=args.num_beams, - min_length=args.min_length, - max_length=args.max_length, - early_stopping=False, - num_return_sequences=1, - bad_words_ids=[[0, 0]], # BART likes to repeat BOS tokens, dont allow it to generate more than one - ) - answers = rag_model.retriever.generator_tokenizer.batch_decode(outputs, skip_special_tokens=True) - - if args.print_predictions: - for q, a in zip(questions, answers): - logger.info("Q: {} - A: {}".format(q, a)) - - return answers - - -def get_args(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--model_type", - choices=["rag_sequence", "rag_token", "bart"], - type=str, - help=( - "RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the" - " model_name_or_path" - ), - ) - parser.add_argument( - "--index_name", - default=None, - choices=["exact", "compressed", "legacy"], - type=str, - help="RAG model retriever type", - ) - parser.add_argument( - "--index_path", - default=None, - type=str, - help="Path to the retrieval index", - ) - parser.add_argument("--n_docs", default=5, type=int, help="Number of retrieved docs") - parser.add_argument( - "--model_name_or_path", - default=None, - type=str, - required=True, - help="Path to pretrained checkpoints or model identifier from huggingface.co/models", - ) - parser.add_argument( - "--eval_mode", - choices=["e2e", "retrieval"], - default="e2e", - type=str, - help=( - "Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates" - " precision@k." - ), - ) - parser.add_argument("--k", default=1, type=int, help="k for the precision@k calculation") - parser.add_argument( - "--evaluation_set", - default=None, - type=str, - required=True, - help="Path to a file containing evaluation samples", - ) - parser.add_argument( - "--gold_data_path", - default=None, - type=str, - required=True, - help="Path to a tab-separated file with gold samples", - ) - parser.add_argument( - "--gold_data_mode", - default="qa", - type=str, - choices=["qa", "ans"], - help=( - "Format of the gold data file" - "qa - a single line in the following format: question [tab] answer_list" - "ans - a single line of the gold file contains the expected answer string" - ), - ) - parser.add_argument( - "--predictions_path", - type=str, - default="predictions.txt", - help="Name of the predictions file, to be stored in the checkpoints directory", - ) - parser.add_argument( - "--eval_all_checkpoints", - action="store_true", - help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", - ) - parser.add_argument( - "--eval_batch_size", - default=8, - type=int, - help="Batch size per GPU/CPU for evaluation.", - ) - parser.add_argument( - "--recalculate", - help="Recalculate predictions even if the prediction file exists", - action="store_true", - ) - parser.add_argument( - "--num_beams", - default=4, - type=int, - help="Number of beams to be used when generating answers", - ) - parser.add_argument("--min_length", default=1, type=int, help="Min length of the generated answers") - parser.add_argument("--max_length", default=50, type=int, help="Max length of the generated answers") - - parser.add_argument( - "--print_predictions", - action="store_true", - help="If True, prints predictions while evaluating.", - ) - parser.add_argument( - "--print_docs", - action="store_true", - help="If True, prints docs retried while generating.", - ) - args = parser.parse_args() - args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - return args - - -def main(args): - model_kwargs = {} - if args.model_type is None: - args.model_type = infer_model_type(args.model_name_or_path) - assert args.model_type is not None - if args.model_type.startswith("rag"): - model_class = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration - model_kwargs["n_docs"] = args.n_docs - if args.index_name is not None: - model_kwargs["index_name"] = args.index_name - if args.index_path is not None: - model_kwargs["index_path"] = args.index_path - else: - model_class = BartForConditionalGeneration - - checkpoints = ( - [f.path for f in os.scandir(args.model_name_or_path) if f.is_dir()] - if args.eval_all_checkpoints - else [args.model_name_or_path] - ) - - logger.info("Evaluate the following checkpoints: %s", checkpoints) - - score_fn = get_scores if args.eval_mode == "e2e" else get_precision_at_k - evaluate_batch_fn = evaluate_batch_e2e if args.eval_mode == "e2e" else evaluate_batch_retrieval - - for checkpoint in checkpoints: - if os.path.exists(args.predictions_path) and (not args.recalculate): - logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path)) - score_fn(args, args.predictions_path, args.gold_data_path) - continue - - logger.info("***** Running evaluation for {} *****".format(checkpoint)) - logger.info(" Batch size = %d", args.eval_batch_size) - logger.info(" Predictions will be stored under {}".format(args.predictions_path)) - - if args.model_type.startswith("rag"): - retriever = RagRetriever.from_pretrained(checkpoint, **model_kwargs) - model = model_class.from_pretrained(checkpoint, retriever=retriever, **model_kwargs) - model.retriever.init_retrieval() - else: - model = model_class.from_pretrained(checkpoint, **model_kwargs) - model.to(args.device) - - with open(args.evaluation_set, "r") as eval_file, open(args.predictions_path, "w") as preds_file: - questions = [] - for line in tqdm(eval_file): - questions.append(line.strip()) - if len(questions) == args.eval_batch_size: - answers = evaluate_batch_fn(args, model, questions) - preds_file.write("\n".join(answers) + "\n") - preds_file.flush() - questions = [] - if len(questions) > 0: - answers = evaluate_batch_fn(args, model, questions) - preds_file.write("\n".join(answers)) - preds_file.flush() - - score_fn(args, args.predictions_path, args.gold_data_path) - - -if __name__ == "__main__": - args = get_args() - main(args) diff --git a/examples/research_projects/rag-end2end-retriever/finetune_rag.py b/examples/research_projects/rag-end2end-retriever/finetune_rag.py deleted file mode 100644 index 9bc2e5db6d5..00000000000 --- a/examples/research_projects/rag-end2end-retriever/finetune_rag.py +++ /dev/null @@ -1,815 +0,0 @@ -"""Finetuning script for RAG models. Adapted from examples.seq2seq.finetune.py""" - -import argparse -import copy -import json -import logging -import multiprocessing -import os -import random -import shutil -import sys -import time -from collections import defaultdict -from pathlib import Path -from typing import Any, Dict, List, Tuple - -import numpy as np -import pytorch_lightning as pl -import torch -import torch.distributed as dist -from datasets import concatenate_datasets, load_from_disk -from torch.utils.data import DataLoader - -from transformers import ( - AutoConfig, - AutoTokenizer, - BartForConditionalGeneration, - BatchEncoding, - DPRConfig, - DPRContextEncoder, - DPRContextEncoderTokenizerFast, - RagConfig, - RagSequenceForGeneration, - RagTokenForGeneration, - RagTokenizer, - T5ForConditionalGeneration, -) -from transformers import logging as transformers_logging -from transformers.integrations import is_ray_available - - -if is_ray_available(): - import ray - from distributed_ray_retriever import RagRayDistributedRetriever, RayRetriever - -from glob import glob - -from callbacks_rag import Seq2SeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback -from kb_encode_utils import add_index, embed_update -from lightning_base import BaseTransformer, add_generic_args, generic_train -from pynvml import nvmlDeviceGetCount, nvmlDeviceGetHandleByIndex, nvmlDeviceGetMemoryInfo, nvmlInit -from utils_rag import ( - Seq2SeqDataset, - calculate_exact_match, - get_git_info, - is_rag_model, - lmap, - pickle_save, - save_git_info, - save_json, - set_extra_model_params, -) - - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - -transformers_logging.set_verbosity_info() - - -sys.path.insert(2, str(Path(__file__).resolve().parents[1])) -isEmUpdateBusy = False -isAddIndexBusy = False -processes = [] -threadHandle_index = None - - -class AttrDict(dict): - def __init__(self, *args, **kwargs): - super(AttrDict, self).__init__(*args, **kwargs) - self.__dict__ = self - - -class GenerativeQAModule(BaseTransformer): - mode = "generative_qa" - loss_names = ["loss"] - metric_names = ["em"] - val_metric = "em" - - def __init__(self, hparams, **kwargs): - # when loading from a pytorch lightning checkpoint, hparams are passed as dict - if isinstance(hparams, dict): - hparams = AttrDict(hparams) - if hparams.model_type == "rag_sequence": - self.model_class = RagSequenceForGeneration - elif hparams.model_type == "rag_token": - self.model_class = RagTokenForGeneration - elif hparams.model_type == "bart": - self.model_class = BartForConditionalGeneration - else: - self.model_class = T5ForConditionalGeneration - self.is_rag_model = is_rag_model(hparams.model_type) - - config_class = RagConfig if self.is_rag_model else AutoConfig - config = config_class.from_pretrained(hparams.model_name_or_path) - - # set retriever parameters - config.index_name = hparams.index_name or config.index_name - config.passages_path = hparams.passages_path or config.passages_path - config.index_path = hparams.index_path or config.index_path - config.use_dummy_dataset = hparams.use_dummy_dataset - - # set extra_model_params for generator configs and load_model - extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "attention_dropout", "dropout") - if self.is_rag_model: - if hparams.prefix is not None: - config.generator.prefix = hparams.prefix - config.label_smoothing = hparams.label_smoothing - hparams, config.generator = set_extra_model_params(extra_model_params, hparams, config.generator) - if hparams.distributed_retriever == "ray": - # The Ray retriever needs the handles to the retriever actors. - retriever = RagRayDistributedRetriever.from_pretrained( - hparams.model_name_or_path, hparams.actor_handles, config=config - ) - - if hparams.end2end: - ctx_encoder_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained( - "facebook/dpr-ctx_encoder-multiset-base" - ) - retriever.set_ctx_encoder_tokenizer(ctx_encoder_tokenizer) - else: - logger.info("please use RAY as the distributed retrieval method") - - model = self.model_class.from_pretrained(hparams.model_name_or_path, config=config, retriever=retriever) - if hparams.end2end: - ctx_encoder = DPRContextEncoder.from_pretrained(hparams.context_encoder_name) - model.set_context_encoder_for_training(ctx_encoder) - prefix = config.question_encoder.prefix - else: - if hparams.prefix is not None: - config.prefix = hparams.prefix - hparams, config = set_extra_model_params(extra_model_params, hparams, config) - model = self.model_class.from_pretrained(hparams.model_name_or_path, config=config) - prefix = config.prefix - - tokenizer = ( - RagTokenizer.from_pretrained(hparams.model_name_or_path) - if self.is_rag_model - else AutoTokenizer.from_pretrained(hparams.model_name_or_path) - ) - - self.config_dpr = DPRConfig.from_pretrained(hparams.context_encoder_name) - self.custom_config = hparams - self.context_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained(hparams.context_encoder_name) - - super().__init__(hparams, config=config, tokenizer=tokenizer, model=model) - - save_git_info(self.hparams.output_dir) - self.output_dir = Path(self.hparams.output_dir) - self.dpr_ctx_check_dir = str(Path(self.hparams.output_dir)) + "/dpr_ctx_checkpoint" - self.metrics_save_path = Path(self.output_dir) / "metrics.json" - self.hparams_save_path = Path(self.output_dir) / "hparams.pkl" - pickle_save(self.hparams, self.hparams_save_path) - self.step_count = 0 - self.metrics = defaultdict(list) - - self.dataset_kwargs: dict = { - "data_dir": self.hparams.data_dir, - "max_source_length": self.hparams.max_source_length, - "prefix": prefix or "", - } - n_observations_per_split = { - "train": self.hparams.n_train, - "val": self.hparams.n_val, - "test": self.hparams.n_test, - } - self.n_obs = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} - self.target_lens = { - "train": self.hparams.max_target_length, - "val": self.hparams.val_max_target_length, - "test": self.hparams.test_max_target_length, - } - assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}" - assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}" - - self.hparams.git_sha = get_git_info()["repo_sha"] - self.num_workers = hparams.num_workers - self.distributed_port = self.hparams.distributed_port - - # For single GPU training, init_ddp_connection is not called. - # So we need to initialize the retrievers here. - if hparams.gpus <= 1: - if hparams.distributed_retriever == "ray": - self.model.retriever.init_retrieval() - else: - logger.info("please use RAY as the distributed retrieval method") - - self.distributed_retriever = hparams.distributed_retriever - - def forward(self, input_ids, **kwargs): - return self.model(input_ids, **kwargs) - - def ids_to_clean_text(self, generated_ids: List[int]): - gen_text = self.tokenizer.batch_decode( - generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True - ) - return lmap(str.strip, gen_text) - - def _step(self, batch: dict) -> Tuple: - source_ids, source_mask, target_ids = batch["input_ids"], batch["attention_mask"], batch["decoder_input_ids"] - - rag_kwargs = {} - if isinstance(self.model, T5ForConditionalGeneration): - decoder_input_ids = self.model._shift_right(target_ids) - lm_labels = target_ids - elif isinstance(self.model, BartForConditionalGeneration): - decoder_input_ids = target_ids[:, :-1].contiguous() - lm_labels = target_ids[:, 1:].clone() - else: - assert self.is_rag_model - generator = self.model.rag.generator - if isinstance(generator, T5ForConditionalGeneration): - decoder_start_token_id = generator.config.decoder_start_token_id - decoder_input_ids = ( - torch.cat( - [torch.tensor([[decoder_start_token_id]] * target_ids.shape[0]).to(target_ids), target_ids], - dim=1, - ) - if target_ids.shape[0] < self.target_lens["train"] - else generator._shift_right(target_ids) - ) - elif isinstance(generator, BartForConditionalGeneration): - decoder_input_ids = target_ids - lm_labels = decoder_input_ids - rag_kwargs["reduce_loss"] = True - - assert decoder_input_ids is not None - - outputs = self( - source_ids, - attention_mask=source_mask, - decoder_input_ids=decoder_input_ids, - use_cache=False, - labels=lm_labels, - **rag_kwargs, - ) - loss = outputs["loss"] - return (loss,) - - @property - def pad(self) -> int: - raise NotImplementedError("pad not implemented") - - def training_step(self, batch, batch_idx) -> Dict: - global isEmUpdateBusy # use to check whether the entire embedding update process is finished or not - global isAddIndexBusy # use to check whether the entire indexing process is finished or not - global processes # use to keep threads embedding update processes - global threadHandle_index # use to keep thread in embedding indexing processes - - if (self.trainer.global_rank == 0) and (self.custom_config.end2end): - if (not batch_idx == 0) and (batch_idx % self.custom_config.indexing_freq == 0): - free_gpu_list = [] - nvmlInit() - deviceCount = nvmlDeviceGetCount() - - my_list = json.loads(self.custom_config.gpu_order) - - for i in range(deviceCount): - handle = nvmlDeviceGetHandleByIndex(i) - info = nvmlDeviceGetMemoryInfo(handle) - - if info.used / 1e6 < 15: - position = my_list.index(i) - free_gpu_list.append("cuda:" + str(position)) - - if len(free_gpu_list) >= self.custom_config.index_gpus: - has_free_gpus = True - - else: - has_free_gpus = False - - if (not isEmUpdateBusy) and has_free_gpus: - model_copy = type(self.model.rag.ctx_encoder)( - self.config_dpr - ) # get a new instance #this will be load in the CPU - model_copy.load_state_dict(self.model.rag.ctx_encoder.state_dict()) # copy weights - - processes = [] - - if len(free_gpu_list) > self.custom_config.index_gpus: - cuda_devices = random.sample(free_gpu_list, self.custom_config.index_gpus) - else: - cuda_devices = free_gpu_list - - num_processes = len(cuda_devices) - - for rank in range(num_processes): - logger.info("Iniitializing embedding calculation process rank{}".format(rank)) - device = cuda_devices[rank] - p = multiprocessing.Process( - target=embed_update, - args=( - copy.deepcopy(model_copy), - num_processes, - device, - rank, - self.custom_config.shard_dir, - self.custom_config.csv_path, - ), - ) - processes.append(p) - - for p in processes: - p.start() - - isEmUpdateBusy = True - - if isEmUpdateBusy and (not isAddIndexBusy): - index_process_list = [processes[k].is_alive() for k in range(self.custom_config.index_gpus)] - if ( - sum(index_process_list) == 0 - ): # If entire list is false, we can say all embedding calculation process has finished - logger.info("Start adding the index") - threadHandle_index = multiprocessing.Process( - target=add_index, - args=( - self.custom_config.shard_dir, - self.config.index_path, - ), - ) - threadHandle_index.start() - isAddIndexBusy = True - - # check when index building has started - if isAddIndexBusy: - # check still the index_building process is happening - if not threadHandle_index.is_alive(): - logger.info("Merging the dataset shards") - saved_dataset_shards = [] - - for address in glob(str(self.custom_config.shard_dir) + "/*/"): - saved_dataset_shards.append(load_from_disk(address)) - - concat = concatenate_datasets(saved_dataset_shards) - concat.save_to_disk(self.config.passages_path) # here we update the main passage file on the disk - logger.info("done updating the dataset") - - # To Do (@Aaron) : Useful in the future dynamic memory implementation. - # if you load the index from the disk make sure to update the index file here, otherwise it is ok to update the index file from the worker. - # logger.info("then updating the index") - # shutil.copy(self.custom_config.temp_index, self.config.idex_path) - - logger.info("Loading new passages and iniitalzing new index") - self.trainer.model.module.module.model.rag.retriever.re_load() - self.trainer.model.module.module.model.rag.retriever.init_retrieval() - - isEmUpdateBusy = False - isAddIndexBusy = False - self.trainer.strategy.barrier("barrier") - - loss_tensors = self._step(batch) - - logs = dict(zip(self.loss_names, loss_tensors)) - # tokens per batch - tgt_pad_token_id = ( - self.tokenizer.generator.pad_token_id - if isinstance(self.tokenizer, RagTokenizer) - else self.tokenizer.pad_token_id - ) - src_pad_token_id = ( - self.tokenizer.question_encoder.pad_token_id - if isinstance(self.tokenizer, RagTokenizer) - else self.tokenizer.pad_token_id - ) - logs["tpb"] = ( - batch["input_ids"].ne(src_pad_token_id).sum() + batch["decoder_input_ids"].ne(tgt_pad_token_id).sum() - ) - self.log("loss", loss_tensors[0]) - return loss_tensors[0] - - def validation_step(self, batch, batch_idx) -> Dict: - return self._generative_step(batch) - - def validation_epoch_end(self, outputs, prefix="val") -> Dict: - self.step_count += 1 - losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names} - loss = losses["loss"] - gen_metrics = { - k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ["gen_time", "gen_len"] - } - metrics_tensor: torch.FloatTensor = torch.tensor(gen_metrics[self.val_metric]).type_as(loss) - gen_metrics.update({k: v.item() for k, v in losses.items()}) - - # fix for https://github.com/PyTorchLightning/pytorch-lightning/issues/2424 - if dist.is_initialized(): - dist.all_reduce(metrics_tensor, op=dist.ReduceOp.SUM) - metrics_tensor = metrics_tensor / dist.get_world_size() - gen_metrics.update({self.val_metric: metrics_tensor.item()}) - - losses.update(gen_metrics) - metrics = {f"{prefix}_avg_{k}": x for k, x in losses.items()} - metrics["step_count"] = self.step_count - self.save_metrics(metrics, prefix) # writes to self.metrics_save_path - - log_dict = { - f"{prefix}_avg_em": metrics[f"{prefix}_avg_em"], - "step_count": metrics["step_count"], - f"{prefix}_avg_loss": metrics[f"{prefix}_avg_loss"], - f"{prefix}_loss": loss, - f"{prefix}_em": metrics_tensor, - } - self.log_dict(log_dict) - - def save_metrics(self, latest_metrics, type_path) -> None: - self.metrics[type_path].append(latest_metrics) - save_json(self.metrics, self.metrics_save_path) - - def calc_generative_metrics(self, preds, target) -> Dict: - return calculate_exact_match(preds, target) - - def _generative_step(self, batch: dict) -> dict: - start_time = time.time() - batch = BatchEncoding(batch).to(device=self.model.device) - generated_ids = self.model.generate( - batch["input_ids"], - attention_mask=batch["attention_mask"], - do_deduplication=False, # rag specific parameter - use_cache=True, - min_length=1, - max_length=self.target_lens["val"], - ) - gen_time = (time.time() - start_time) / batch["input_ids"].shape[0] - preds: List[str] = self.ids_to_clean_text(generated_ids) - target: List[str] = self.ids_to_clean_text(batch["decoder_input_ids"]) - # print(preds,target) - loss_tensors = self._step(batch) - base_metrics = dict(zip(self.loss_names, loss_tensors)) - gen_metrics: Dict = self.calc_generative_metrics(preds, target) - - summ_len = np.mean(lmap(len, generated_ids)) - base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **gen_metrics) - return base_metrics - - def test_step(self, batch, batch_idx): - return self._generative_step(batch) - - def test_epoch_end(self, outputs): - return self.validation_epoch_end(outputs, prefix="test") - - def get_dataset(self, type_path) -> Seq2SeqDataset: - n_obs = self.n_obs[type_path] - max_target_length = self.target_lens[type_path] - dataset = Seq2SeqDataset( - self.tokenizer, - type_path=type_path, - n_obs=n_obs, - max_target_length=max_target_length, - **self.dataset_kwargs, - ) - return dataset - - def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False) -> DataLoader: - dataset = self.get_dataset(type_path) - - dataloader = DataLoader( - dataset, - batch_size=batch_size, - collate_fn=dataset.collate_fn, - shuffle=shuffle, - num_workers=self.num_workers, - ) - return dataloader - - def train_dataloader(self) -> DataLoader: - dataloader = self.get_dataloader("train", batch_size=self.hparams.train_batch_size, shuffle=True) - return dataloader - - def val_dataloader(self) -> DataLoader: - return self.get_dataloader("val", batch_size=self.hparams.eval_batch_size) - - def test_dataloader(self) -> DataLoader: - return self.get_dataloader("test", batch_size=self.hparams.eval_batch_size) - - @pl.utilities.rank_zero_only - def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: - save_path = self.output_dir.joinpath("checkpoint{}".format(self.step_count)) - self.model.config.save_step = self.step_count - # self.model.save_pretrained(save_path) - self.tokenizer.save_pretrained(save_path) - - if self.custom_config.end2end: - modified_state_dict = self.model.state_dict() - for key in self.model.state_dict().keys(): - if key.split(".")[1] == "ctx_encoder": - del modified_state_dict[key] - self.model.save_pretrained(save_directory=save_path, state_dict=modified_state_dict) - - save_path_dpr = os.path.join(self.dpr_ctx_check_dir, "checkpoint{}".format(self.step_count)) - self.model.rag.ctx_encoder.save_pretrained(save_path_dpr) - self.context_tokenizer.save_pretrained(save_path_dpr) - - @staticmethod - def add_model_specific_args(parser, root_dir): - BaseTransformer.add_model_specific_args(parser, root_dir) - add_generic_args(parser, root_dir) - parser.add_argument( - "--max_source_length", - default=128, - type=int, - help=( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ), - ) - parser.add_argument( - "--max_target_length", - default=25, - type=int, - help=( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ), - ) - parser.add_argument( - "--val_max_target_length", - default=25, - type=int, - help=( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ), - ) - parser.add_argument( - "--test_max_target_length", - default=25, - type=int, - help=( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ), - ) - parser.add_argument("--logger_name", type=str, choices=["default", "wandb", "wandb_shared"], default="default") - parser.add_argument("--n_train", type=int, default=-1, required=False, help="# examples. -1 means use all.") - parser.add_argument("--n_val", type=int, default=-1, required=False, help="# examples. -1 means use all.") - parser.add_argument("--n_test", type=int, default=-1, required=False, help="# examples. -1 means use all.") - parser.add_argument("--label_smoothing", type=float, default=0.0, required=False) - parser.add_argument( - "--prefix", - type=str, - default=None, - help="Prefix added at the beginning of each text, typically used with T5-based models.", - ) - parser.add_argument( - "--early_stopping_patience", - type=int, - default=-1, - required=False, - help=( - "-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So" - " val_check_interval will effect it." - ), - ) - parser.add_argument( - "--distributed-port", type=int, default=-1, required=False, help="Port number for distributed training." - ) - parser.add_argument( - "--model_type", - choices=["rag_sequence", "rag_token", "bart", "t5"], - type=str, - help=( - "RAG model type: sequence or token, if none specified, the type is inferred from the" - " model_name_or_path" - ), - ) - parser.add_argument( - "--context_encoder_name", - default="facebook/dpr-ctx_encoder-multiset-base", - type=str, - help="Name of the pre-trained context encoder checkpoint from the DPR", - ) - parser.add_argument( - "--csv_path", - default=str(Path(__file__).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv"), - type=str, - help="path of the raw KB csv", - ) - parser.add_argument("--end2end", action="store_true", help="whether to train the system end2end or not") - parser.add_argument("--index_gpus", type=int, help="how many GPUs used in re-encoding process") - parser.add_argument( - "--shard_dir", - type=str, - default=str(Path(__file__).parent / "test_run" / "kb-shards"), - help="directory used to keep temporary shards during the re-encode process", - ) - - parser.add_argument( - "--gpu_order", - type=str, - help=( - "order of the GPU used during the fine-tuning. Used to finding free GPUs during the re-encode" - " process. I do not have many GPUs :)" - ), - ) - - parser.add_argument("--indexing_freq", type=int, help="frequency of re-encode process") - return parser - - @staticmethod - def add_retriever_specific_args(parser): - parser.add_argument( - "--index_name", - type=str, - default=None, - help=( - "Name of the index to use: 'hf' for a canonical dataset from the datasets library (default), 'custom'" - " for a local index, or 'legacy' for the orignal one)" - ), - ) - parser.add_argument( - "--passages_path", - type=str, - default=str(Path(__file__).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset"), - help=( - "Path to the dataset of passages for custom index. More info about custom indexes in the RagRetriever" - " documentation as well as in `examples/rag/use_own_knowledge_dataset.py`" - ), - ) - parser.add_argument( - "--index_path", - type=str, - default=str(Path(__file__).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset_hnsw_index.faiss"), - help=( - "Path to the faiss index for custom index. More info about custom indexes in the RagRetriever" - " documentation as well as in `examples/rag/use_own_knowledge_dataset.py`" - ), - ) - parser.add_argument( - "--distributed_retriever", - choices=["ray", "pytorch"], - type=str, - default="ray", - help=( - "What implementation to use for distributed retriever? If " - "pytorch is selected, the index is loaded on training " - "worker 0, and torch.distributed is used to handle " - "communication between training worker 0, and the other " - "training workers. If ray is selected, the Ray library is " - "used to create load the index on separate processes, " - "and Ray handles the communication between the training " - "workers and the retrieval actors." - ), - ) - parser.add_argument( - "--use_dummy_dataset", - type=bool, - default=False, - help=( - "Whether to use the dummy version of the dataset index. More info about custom indexes in the" - " RagRetriever documentation as well as in `examples/rag/use_own_knowledge_dataset.py`" - ), - ) - return parser - - @staticmethod - def add_ray_specific_args(parser): - # Ray cluster address. - parser.add_argument( - "--ray-address", - default="auto", - type=str, - help=( - "The address of the Ray cluster to connect to. If not " - "specified, Ray will attempt to automatically detect the " - "cluster. Has no effect if pytorch is used as the distributed " - "retriever." - ), - ) - parser.add_argument( - "--num_retrieval_workers", - type=int, - default=1, - help=( - "The number of retrieval actors to use when Ray is selected " - "for the distributed retriever. Has no effect when " - "distributed_retriever is set to pytorch." - ), - ) - return parser - - -def main(args=None, model=None) -> GenerativeQAModule: - parser = argparse.ArgumentParser() - parser = pl.Trainer.add_argparse_args(parser) - parser = GenerativeQAModule.add_model_specific_args(parser, os.getcwd()) - parser = GenerativeQAModule.add_retriever_specific_args(parser) - args = args or parser.parse_args() - - Path(args.output_dir).mkdir(exist_ok=True) - Path(args.output_dir + "/dpr_ctx_checkpoint").mkdir( - exist_ok=True - ) # save dpr_context encoder seprately for the future use - print(args.shard_dir) - if os.path.exists(args.shard_dir): # we do not need previous kb shards used in dataset re-conding and re-indexing - shutil.rmtree(args.shard_dir) - Path(args.shard_dir).mkdir(exist_ok=True) - - if os.path.exists( - args.cache_dir - ): # we do not need previous cache files used in dataset re-conding and re-indexing - shutil.rmtree(args.cache_dir) - Path(args.cache_dir).mkdir(exist_ok=True) - - named_actors = [] - if args.distributed_retriever == "ray" and args.gpus > 1: - if not is_ray_available(): - raise RuntimeError("Please install Ray to use the Ray distributed retriever.") - # Connect to an existing Ray cluster. - try: - ray.init(address=args.ray_address, namespace="rag") - except (ConnectionError, ValueError): - logger.warning( - "Connection to Ray cluster failed. Make sure a Ray " - "cluster is running by either using Ray's cluster " - "launcher (`ray up`) or by manually starting Ray on " - "each node via `ray start --head` for the head node " - "and `ray start --address=':6379'` for " - "additional nodes. See " - "https://docs.ray.io/en/master/cluster/index.html " - "for more info." - ) - raise - - # Create Ray actors only for rank 0. - if ("LOCAL_RANK" not in os.environ or os.environ["LOCAL_RANK"] == 0) and ( - "NODE_RANK" not in os.environ or os.environ["NODE_RANK"] == 0 - ): - remote_cls = ray.remote(RayRetriever) - named_actors = [ - remote_cls.options(name="retrieval_worker_{}".format(i)).remote() - for i in range(args.num_retrieval_workers) - ] - else: - logger.info( - "Getting named actors for NODE_RANK {}, LOCAL_RANK {}".format( - os.environ["NODE_RANK"], os.environ["LOCAL_RANK"] - ) - ) - named_actors = [ray.get_actor("retrieval_worker_{}".format(i)) for i in range(args.num_retrieval_workers)] - args.actor_handles = named_actors - assert args.actor_handles == named_actors - - if model is None: - model: GenerativeQAModule = GenerativeQAModule(args) - - dataset = Path(args.data_dir).name - if ( - args.logger_name == "default" - or args.fast_dev_run - or str(args.output_dir).startswith("/tmp") - or str(args.output_dir).startswith("/var") - ): - training_logger = True # don't pollute wandb logs unnecessarily - elif args.logger_name == "wandb": - from pytorch_lightning.loggers import WandbLogger - - project = os.environ.get("WANDB_PROJECT", dataset) - training_logger = WandbLogger(name=model.output_dir.name, project=project) - - elif args.logger_name == "wandb_shared": - from pytorch_lightning.loggers import WandbLogger - - training_logger = WandbLogger(name=model.output_dir.name, project=f"hf_{dataset}") - - es_callback = ( - get_early_stopping_callback(model.val_metric, args.early_stopping_patience) - if args.early_stopping_patience >= 0 - else False - ) - - trainer: pl.Trainer = generic_train( - model, - args, - logging_callback=Seq2SeqLoggingCallback(), - checkpoint_callback=get_checkpoint_callback(args.output_dir, model.val_metric), - early_stopping_callback=es_callback, - logger=training_logger, - profiler=pl.profiler.AdvancedProfiler() if args.profile else None, - ) - - pickle_save(model.hparams, model.output_dir / "hparams.pkl") - if not args.do_predict: - return model - - # test() without a model tests using the best checkpoint automatically - trainer.test() - return model - - -if __name__ == "__main__": - multiprocessing.set_start_method("spawn") - parser = argparse.ArgumentParser() - parser = pl.Trainer.add_argparse_args(parser) - parser = GenerativeQAModule.add_model_specific_args(parser, os.getcwd()) - parser = GenerativeQAModule.add_retriever_specific_args(parser) - parser = GenerativeQAModule.add_ray_specific_args(parser) - - # Pytorch Lightning Profiler - parser.add_argument( - "--profile", - action="store_true", - help="If True, use pytorch_lightning.profiler.AdvancedProfiler to profile the Trainer.", - ) - - args = parser.parse_args() - main(args) diff --git a/examples/research_projects/rag-end2end-retriever/finetune_rag_ray_end2end.sh b/examples/research_projects/rag-end2end-retriever/finetune_rag_ray_end2end.sh deleted file mode 100755 index cef1a264c93..00000000000 --- a/examples/research_projects/rag-end2end-retriever/finetune_rag_ray_end2end.sh +++ /dev/null @@ -1,68 +0,0 @@ -# Sample script to finetune RAG using Ray for distributed retrieval. - -# Add parent directory to python path to access lightning_base.py -export PYTHONPATH="../":"${PYTHONPATH}" - -#creates the custom knowlegebase -python use_own_knowledge_dataset.py \ - --csv_path /DIR/SQUAD-KB/squad-kb.csv \ - --output_dir /DIR/SQUAD-KB - -# Start a single-node Ray cluster. -ray start --head - -# A sample finetuning run, you need to specify data_dir, output_dir and model_name_or_path -# run ./examples/rag/finetune_rag_ray.sh --help to see all the possible options - - - -python finetune_rag.py \ - --data_dir /DIR/squad-training-data \ - --output_dir /DIR/model_checkpoints \ - --model_name_or_path facebook/rag-token-base \ - --model_type rag_token \ - --fp16 \ - --gpus 2 \ - --profile \ - --do_train \ - --end2end \ - --do_predict \ - --n_val -1 \ - --train_batch_size 4 \ - --eval_batch_size 1 \ - --max_source_length 128 \ - --max_target_length 25 \ - --val_max_target_length 25 \ - --test_max_target_length 25 \ - --label_smoothing 0.1 \ - --dropout 0.1 \ - --attention_dropout 0.1 \ - --weight_decay 0.001 \ - --adam_epsilon 1e-08 \ - --max_grad_norm 0.1 \ - --lr_scheduler polynomial \ - --learning_rate 3e-05 \ - --num_train_epochs 10 \ - --warmup_steps 500 \ - --gradient_accumulation_steps 8 \ - --distributed_retriever ray \ - --num_retrieval_workers 4 \ - --passages_path /DIR/SQUAD-KB/my_knowledge_dataset \ - --index_path /DIR/SQUAD-KB/my_knowledge_dataset_hnsw_index.faiss \ - --index_name custom \ - --context_encoder_name facebook/dpr-ctx_encoder-multiset-base \ - --csv_path /DIR/SQUAD-KB/squad-kb.csv \ - --index_gpus 1 \ - --gpu_order [5,6,7,8,9,0,1,2,3,4] \ - --shard_dir ./test_dir/kb-shards \ - --indexing_freq 500 - - - -# Stop the Ray cluster. -ray stop - - -#this script was used to test the SQuAD data. -#change the dir paramater acording to your prefernece. -#please use the same device ordere when running CUDA_VISIBLE_DEVICES=5,6,7,8,9,0,1,2,3,4 sh finetune_rag_ray_end2end.sh \ No newline at end of file diff --git a/examples/research_projects/rag-end2end-retriever/kb_encode_utils.py b/examples/research_projects/rag-end2end-retriever/kb_encode_utils.py deleted file mode 100644 index 444c07b2bab..00000000000 --- a/examples/research_projects/rag-end2end-retriever/kb_encode_utils.py +++ /dev/null @@ -1,80 +0,0 @@ -import os -from functools import partial -from glob import glob - -import faiss -from datasets import Features, Sequence, Value, concatenate_datasets, load_dataset, load_from_disk - -from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast - - -def split_text(text, n=100, character=" "): - """Split the text every ``n``-th occurrence of ``character``""" - text = text.split(character) - return [character.join(text[i : i + n]).strip() for i in range(0, len(text), n)] - - -def split_documents(documents): - """Split documents into passages""" - titles, texts = [], [] - for title, text in zip(documents["title"], documents["text"]): - if text is not None: - for passage in split_text(text): - titles.append(title if title is not None else "") - texts.append(passage) - return {"title": titles, "text": texts} - - -def embed_update(ctx_encoder, total_processes, device, process_num, shard_dir, csv_path): - kb_dataset = load_dataset( - "csv", data_files=[csv_path], split="train", delimiter="\t", column_names=["title", "text"] - ) - kb_dataset = kb_dataset.map( - split_documents, batched=True, num_proc=1 - ) # if you want you can load already splitted csv. - kb_list = [kb_dataset.shard(total_processes, i, contiguous=True) for i in range(total_processes)] - data_shrad = kb_list[process_num] - - arrow_folder = "data_" + str(process_num) - passages_path = os.path.join(shard_dir, arrow_folder) - - context_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained("facebook/dpr-ctx_encoder-multiset-base") - ctx_encoder = ctx_encoder.to(device=device) - - def embed( - documents: dict, ctx_encoder: DPRContextEncoder, ctx_tokenizer: DPRContextEncoderTokenizerFast, device - ) -> dict: - """Compute the DPR embeddings of document passages""" - input_ids = ctx_tokenizer( - documents["title"], documents["text"], truncation=True, padding="longest", return_tensors="pt" - )["input_ids"] - embeddings = ctx_encoder(input_ids.to(device=device), return_dict=True).pooler_output - return {"embeddings": embeddings.detach().cpu().numpy()} - - new_features = Features( - {"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))} - ) # optional, save as float32 instead of float64 to save space - - dataset = data_shrad.map( - partial(embed, ctx_encoder=ctx_encoder, ctx_tokenizer=context_tokenizer, device=device), - batched=True, - batch_size=16, - features=new_features, - ) - dataset.save_to_disk(passages_path) - - -def add_index(shard_dir, index_path): - data_shard_list = [] - - for shard_address in glob(str(shard_dir) + "/*/"): - data_shard_list.append(load_from_disk(shard_address)) - - concat = concatenate_datasets(data_shard_list) - faiss.omp_set_num_threads(96) - - index = faiss.IndexHNSWFlat(768, 128, faiss.METRIC_INNER_PRODUCT) - concat.add_faiss_index("embeddings", custom_index=index) - concat.get_index("embeddings").save( - index_path - ) # since we load the index in to memory,we can directly update the index in the disk diff --git a/examples/research_projects/rag-end2end-retriever/lightning_base.py b/examples/research_projects/rag-end2end-retriever/lightning_base.py deleted file mode 100644 index c1a271e88d1..00000000000 --- a/examples/research_projects/rag-end2end-retriever/lightning_base.py +++ /dev/null @@ -1,414 +0,0 @@ -import argparse -import logging -import os -from pathlib import Path -from typing import Any, Dict - -import pytorch_lightning as pl -from pytorch_lightning.utilities import rank_zero_info - -from transformers import ( - AdamW, - AutoConfig, - AutoModel, - AutoModelForPreTraining, - AutoModelForQuestionAnswering, - AutoModelForSeq2SeqLM, - AutoModelForSequenceClassification, - AutoModelForTokenClassification, - AutoModelWithLMHead, - AutoTokenizer, - PretrainedConfig, - PreTrainedTokenizer, -) -from transformers.optimization import ( - Adafactor, - get_cosine_schedule_with_warmup, - get_cosine_with_hard_restarts_schedule_with_warmup, - get_linear_schedule_with_warmup, - get_polynomial_decay_schedule_with_warmup, -) -from transformers.utils.versions import require_version - - -logger = logging.getLogger(__name__) - -require_version("pytorch_lightning>=1.0.4") - -MODEL_MODES = { - "base": AutoModel, - "sequence-classification": AutoModelForSequenceClassification, - "question-answering": AutoModelForQuestionAnswering, - "pretraining": AutoModelForPreTraining, - "token-classification": AutoModelForTokenClassification, - "language-modeling": AutoModelWithLMHead, - "summarization": AutoModelForSeq2SeqLM, - "translation": AutoModelForSeq2SeqLM, -} - - -# update this and the import above to support new schedulers from transformers.optimization -arg_to_scheduler = { - "linear": get_linear_schedule_with_warmup, - "cosine": get_cosine_schedule_with_warmup, - "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, - "polynomial": get_polynomial_decay_schedule_with_warmup, - # '': get_constant_schedule, # not supported for now - # '': get_constant_schedule_with_warmup, # not supported for now -} -arg_to_scheduler_choices = sorted(arg_to_scheduler.keys()) -arg_to_scheduler_metavar = "{" + ", ".join(arg_to_scheduler_choices) + "}" - - -class BaseTransformer(pl.LightningModule): - def __init__( - self, - hparams: argparse.Namespace, - num_labels=None, - mode="base", - config=None, - tokenizer=None, - model=None, - **config_kwargs, - ): - """Initialize a model, tokenizer and config.""" - super().__init__() - # TODO: move to self.save_hyperparameters() - # self.save_hyperparameters() - # can also expand arguments into trainer signature for easier reading - - self.save_hyperparameters(hparams) - self.step_count = 0 - self.output_dir = Path(self.hparams.output_dir) - cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None - if config is None: - self.config = AutoConfig.from_pretrained( - self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, - **({"num_labels": num_labels} if num_labels is not None else {}), - cache_dir=cache_dir, - **config_kwargs, - ) - else: - self.config: PretrainedConfig = config - - extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") - for p in extra_model_params: - if getattr(self.hparams, p, None): - assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute" - setattr(self.config, p, getattr(self.hparams, p)) - - if tokenizer is None: - self.tokenizer = AutoTokenizer.from_pretrained( - self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, - cache_dir=cache_dir, - ) - else: - self.tokenizer: PreTrainedTokenizer = tokenizer - self.model_type = MODEL_MODES[mode] - if model is None: - self.model = self.model_type.from_pretrained( - self.hparams.model_name_or_path, - from_tf=bool(".ckpt" in self.hparams.model_name_or_path), - config=self.config, - cache_dir=cache_dir, - ) - else: - self.model = model - - def load_hf_checkpoint(self, *args, **kwargs): - self.model = self.model_type.from_pretrained(*args, **kwargs) - - def get_lr_scheduler(self): - get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler] - scheduler = get_schedule_func( - self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps() - ) - scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1} - return scheduler - - def configure_optimizers(self): - """Prepare optimizer and schedule (linear warmup and decay)""" - model = self.model - no_decay = ["bias", "LayerNorm.weight"] - optimizer_grouped_parameters = [ - { - "params": [ - p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) - ], # check this named parameters - "weight_decay": self.hparams.weight_decay, - }, - { - "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], - "weight_decay": 0.0, - }, - ] - if self.hparams.adafactor: - optimizer = Adafactor( - optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False - ) - - else: - optimizer = AdamW( - optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon - ) - self.opt = optimizer - - scheduler = self.get_lr_scheduler() - - return [optimizer], [scheduler] - - def test_step(self, batch, batch_nb): - return self.validation_step(batch, batch_nb) - - def test_epoch_end(self, outputs): - return self.validation_end(outputs) - - def total_steps(self) -> int: - """The number of total training steps that will be run. Used for lr scheduler purposes.""" - num_devices = max(1, self.hparams.gpus) # TODO: consider num_tpu_cores - effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices - return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs - - def setup(self, stage): - if stage == "test": - self.dataset_size = len(self.test_dataloader().dataset) - else: - self.train_loader = self.get_dataloader("train", self.hparams.train_batch_size, shuffle=True) - self.dataset_size = len(self.train_dataloader().dataset) - - def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False): - raise NotImplementedError("You must implement this for your task") - - def train_dataloader(self): - return self.train_loader - - def val_dataloader(self): - return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=False) - - def test_dataloader(self): - return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=False) - - def _feature_file(self, mode): - return os.path.join( - self.hparams.data_dir, - "cached_{}_{}_{}".format( - mode, - list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(), - str(self.hparams.max_seq_length), - ), - ) - - @pl.utilities.rank_zero_only - def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: - save_path = self.output_dir.joinpath("best_tfmr") - self.model.config.save_step = self.step_count - self.model.save_pretrained(save_path) - self.tokenizer.save_pretrained(save_path) - - @staticmethod - def add_model_specific_args(parser, root_dir): - parser.add_argument( - "--model_name_or_path", - default=None, - type=str, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models", - ) - parser.add_argument( - "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" - ) - parser.add_argument( - "--tokenizer_name", - default=None, - type=str, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--cache_dir", - default=str(Path(__file__).parent / "test_run" / "cache"), - type=str, - help="Where do you want to store the pre-trained models downloaded from huggingface.co", - ) - parser.add_argument( - "--encoder_layerdrop", - type=float, - help="Encoder layer dropout probability (Optional). Goes into model.config", - ) - parser.add_argument( - "--decoder_layerdrop", - type=float, - help="Decoder layer dropout probability (Optional). Goes into model.config", - ) - parser.add_argument( - "--dropout", - type=float, - help="Dropout probability (Optional). Goes into model.config", - ) - parser.add_argument( - "--attention_dropout", - type=float, - help="Attention dropout probability (Optional). Goes into model.config", - ) - parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") - parser.add_argument( - "--lr_scheduler", - default="linear", - choices=arg_to_scheduler_choices, - metavar=arg_to_scheduler_metavar, - type=str, - help="Learning rate scheduler", - ) - parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") - parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") - parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") - parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader") - parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=int) - parser.add_argument("--train_batch_size", default=32, type=int) - parser.add_argument("--eval_batch_size", default=32, type=int) - parser.add_argument("--adafactor", action="store_true") - - -class InitCallback(pl.Callback): - # this process can also be done with PL ddp plugging. - # But still it is experimental (check original RAG, I updated that with pluggin (shamanez)) - def on_sanity_check_start(self, trainer, pl_module): - if ( - trainer.is_global_zero and trainer.global_rank == 0 - ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. - pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. - - -class CheckParamCallback(pl.Callback): - # check whether new added model parameters are differentiable - def on_after_backward(self, trainer, pl_module): - # print(pl_module.model.rag) - for name, param in pl_module.model.rag.named_parameters(): - if param.grad is None: - print(name) - - -class LoggingCallback(pl.Callback): - def on_batch_end(self, trainer, pl_module): - lr_scheduler = trainer.lr_schedulers[0]["scheduler"] - lrs = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())} - pl_module.logger.log_metrics(lrs) - - def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): - rank_zero_info("***** Validation results *****") - metrics = trainer.callback_metrics - # Log results - for key in sorted(metrics): - if key not in ["log", "progress_bar"]: - rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) - - def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): - rank_zero_info("***** Test results *****") - metrics = trainer.callback_metrics - # Log and save results to file - output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt") - with open(output_test_results_file, "w") as writer: - for key in sorted(metrics): - if key not in ["log", "progress_bar"]: - rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) - writer.write("{} = {}\n".format(key, str(metrics[key]))) - - -def add_generic_args(parser, root_dir) -> None: - # To allow all pl args uncomment the following line - # parser = pl.Trainer.add_argparse_args(parser) - parser.add_argument( - "--output_dir", - default=str(Path(__file__).parent / "test_run" / "model_checkpoints"), - type=str, - help="The output directory where the model predictions and checkpoints will be written.", - ) - parser.add_argument( - "--fp16", - action="store_true", - help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", - ) - - parser.add_argument( - "--fp16_opt_level", - type=str, - default="O2", - help=( - "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " - "See details at https://nvidia.github.io/apex/amp.html" - ), - ) - parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=int) - parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=float, help="Max gradient norm") - parser.add_argument("--do_train", action="store_true", help="Whether to run training.") - parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.") - parser.add_argument( - "--gradient_accumulation_steps", - dest="accumulate_grad_batches", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") - parser.add_argument( - "--data_dir", - default=str(Path(__file__).parent / "test_run" / "dummy-train-data"), - type=str, - help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.", - ) - - -def generic_train( - model: BaseTransformer, - args: argparse.Namespace, - early_stopping_callback=None, - logger=True, # can pass WandbLogger() here - extra_callbacks=[], - checkpoint_callback=None, - logging_callback=None, - **extra_train_kwargs, -): - pl.seed_everything(args.seed) - - # init model - odir = Path(model.hparams.output_dir) - odir.mkdir(exist_ok=True) - - # add custom checkpoints - if checkpoint_callback is None: - checkpoint_callback = pl.callbacks.ModelCheckpoint( - filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1 - ) - if early_stopping_callback: - extra_callbacks.append(early_stopping_callback) - if logging_callback is None: - logging_callback = LoggingCallback() - - train_params = {} - - if args.fp16: - train_params["precision"] = 16 - - if args.gpus > 1: - train_params["accelerator"] = "auto" - train_params["strategy"] = "ddp" - - train_params["accumulate_grad_batches"] = args.accumulate_grad_batches - train_params["profiler"] = None - train_params["devices"] = "auto" - - trainer = pl.Trainer.from_argparse_args( - args, - weights_summary=None, - callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback], - logger=logger, - val_check_interval=1, - num_sanity_val_steps=2, - **train_params, - ) - - if args.do_train: - trainer.fit(model) - - else: - print("RAG modeling tests with new set functions successfully executed!") - return trainer diff --git a/examples/research_projects/rag-end2end-retriever/requirements.txt b/examples/research_projects/rag-end2end-retriever/requirements.txt deleted file mode 100644 index 32025229d07..00000000000 --- a/examples/research_projects/rag-end2end-retriever/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -faiss-cpu >= 1.7.2 -datasets -psutil >= 5.9.1 -torch >= 1.11.0 -pytorch-lightning == 1.6.4 -nvidia-ml-py3 == 7.352.0 -ray >= 1.13.0 \ No newline at end of file diff --git a/examples/research_projects/rag-end2end-retriever/test_run/dummy-kb/my_knowledge_dataset.csv b/examples/research_projects/rag-end2end-retriever/test_run/dummy-kb/my_knowledge_dataset.csv deleted file mode 100644 index 76da009a2f2..00000000000 --- a/examples/research_projects/rag-end2end-retriever/test_run/dummy-kb/my_knowledge_dataset.csv +++ /dev/null @@ -1,2 +0,0 @@ -Aaron Aaron Aaron ( or ; "Ahärôn") is a prophet, high priest, and the brother of Moses in the Abrahamic religions. Knowledge of Aaron, along with his brother Moses, comes exclusively from religious texts, such as the Bible and Quran. The Hebrew Bible relates that, unlike Moses, who grew up in the Egyptian royal court, Aaron and his elder sister Miriam remained with their kinsmen in the eastern border-land of Egypt (Goshen). When Moses first confronted the Egyptian king about the Israelites, Aaron served as his brother's spokesman ("prophet") to the Pharaoh. Part of the Law (Torah) that Moses received from God at Sinai granted Aaron the priesthood for himself and his male descendants, and he became the first High Priest of the Israelites. Aaron died before the Israelites crossed the North Jordan river and he was buried on Mount Hor (Numbers 33:39; Deuteronomy 10:6 says he died and was buried at Moserah). Aaron is also mentioned in the New Testament of the Bible. According to the Book of Exodus, Aaron first functioned as Moses' assistant. Because Moses complained that he could not speak well, God appointed Aaron as Moses' "prophet" (Exodus 4:10-17; 7:1). At the command of Moses, he let his rod turn into a snake. Then he stretched out his rod in order to bring on the first three plagues. After that, Moses tended to act and speak for himself. During the journey in the wilderness, Aaron was not always prominent or active. At the battle with Amalek, he was chosen with Hur to support the hand of Moses that held the "rod of God". When the revelation was given to Moses at biblical Mount Sinai, he headed the elders of Israel who accompanied Moses on the way to the summit. -"Pokémon" Pokémon , also known as in Japan, is a media franchise managed by The Pokémon Company, a Japanese consortium between Nintendo, Game Freak, and Creatures. The franchise copyright is shared by all three companies, but Nintendo is the sole owner of the trademark. The franchise was created by Satoshi Tajiri in 1995, and is centered on fictional creatures called "Pokémon", which humans, known as Pokémon Trainers, catch and train to battle each other for sport. The English slogan for the franchise is "Gotta Catch 'Em All". Works within the franchise are set in the Pokémon universe. The franchise began as "Pokémon Red" and "Green" (released outside of Japan as "Pokémon Red" and "Blue"), a pair of video games for the original Game Boy that were developed by Game Freak and published by Nintendo in February 1996. "Pokémon" has since gone on to become the highest-grossing media franchise of all time, with over in revenue up until March 2017. The original video game series is the second best-selling video game franchise (behind Nintendo's "Mario" franchise) with more than 300million copies sold and over 800million mobile downloads. In addition, the "Pokémon" franchise includes the world's top-selling toy brand, the top-selling trading card game with over 25.7billion cards sold, an anime television series that has become the most successful video game adaptation with over 20 seasons and 1,000 episodes in 124 countries, as well as an anime film series, a , books, manga comics, music, and merchandise. The franchise is also represented in other Nintendo media, such as the "Super Smash Bros." series. In November 2005, 4Kids Entertainment, which had managed the non-game related licensing of "Pokémon", announced that it had agreed not to renew the "Pokémon" representation agreement. The Pokémon Company International oversees all "Pokémon" licensing outside Asia. \ No newline at end of file diff --git a/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/test.source b/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/test.source deleted file mode 100644 index 3d5cbc38039..00000000000 --- a/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/test.source +++ /dev/null @@ -1,8 +0,0 @@ -What does Moses' rod turn into ? -Who is Aron? -Where did Moses grow up ? -What happens at the command of the Moses ? -Who manages the Pokémon ? -Who owned the Pokémon trademark ? -What else include in Pokémon franchise ? -How many seasons in Pokémon animme series ? diff --git a/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/test.target b/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/test.target deleted file mode 100644 index a3a6e04372c..00000000000 --- a/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/test.target +++ /dev/null @@ -1,8 +0,0 @@ -to a snake -Moses' assistant -Egyptian royal court -let his rod turn in to a snake -The Pokémon Company -Nintendo -world's top-selling toy brand, the top-selling trading card game -over 20 seasons diff --git a/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/train.source b/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/train.source deleted file mode 100644 index 9f72c3e03a7..00000000000 --- a/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/train.source +++ /dev/null @@ -1,48 +0,0 @@ -What does Moses' rod turn into ? -Who is Aron? -Where did Moses grow up ? -What happens at the command of the Moses ? -Who manages the Pokémon ? -Who owned the Pokémon trademark ? -What else include in Pokémon franchise ? -How many seasons in Pokémon animme series ? -What does Moses' rod turn into ? -Who is Aron? -Where did Moses grow up ? -What happens at the command of the Moses ? -Who manages the Pokémon ? -Who owned the Pokémon trademark ? -What else include in Pokémon franchise ? -How many seasons in Pokémon animme series ? -What does Moses' rod turn into ? -Who is Aron? -Where did Moses grow up ? -What happens at the command of the Moses ? -Who manages the Pokémon ? -Who owned the Pokémon trademark ? -What else include in Pokémon franchise ? -How many seasons in Pokémon animme series ? -What does Moses' rod turn into ? -Who is Aron? -Where did Moses grow up ? -What happens at the command of the Moses ? -Who manages the Pokémon ? -Who owned the Pokémon trademark ? -What else include in Pokémon franchise ? -How many seasons in Pokémon animme series ? -What does Moses' rod turn into ? -Who is Aron? -Where did Moses grow up ? -What happens at the command of the Moses ? -Who manages the Pokémon ? -Who owned the Pokémon trademark ? -What else include in Pokémon franchise ? -How many seasons in Pokémon animme series ? -What does Moses' rod turn into ? -Who is Aron? -Where did Moses grow up ? -What happens at the command of the Moses ? -Who manages the Pokémon ? -Who owned the Pokémon trademark ? -What else include in Pokémon franchise ? -How many seasons in Pokémon animme series ? \ No newline at end of file diff --git a/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/train.target b/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/train.target deleted file mode 100644 index 3bda0caf2e3..00000000000 --- a/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/train.target +++ /dev/null @@ -1,48 +0,0 @@ -to a snake -Moses' assistant -Egyptian royal court -let his rod turn in to a snake -The Pokémon Company -Nintendo -world's top-selling toy brand, the top-selling trading card game -over 20 seasons -to a snake -Moses' assistant -Egyptian royal court -let his rod turn in to a snake -The Pokémon Company -Nintendo -world's top-selling toy brand, the top-selling trading card game -over 20 seasons -to a snake -Moses' assistant -Egyptian royal court -let his rod turn in to a snake -The Pokémon Company -Nintendo -world's top-selling toy brand, the top-selling trading card game -over 20 seasons -to a snake -Moses' assistant -Egyptian royal court -let his rod turn in to a snake -The Pokémon Company -Nintendo -world's top-selling toy brand, the top-selling trading card game -over 20 seasons -to a snake -Moses' assistant -Egyptian royal court -let his rod turn in to a snake -The Pokémon Company -Nintendo -world's top-selling toy brand, the top-selling trading card game -over 20 seasons -to a snake -Moses' assistant -Egyptian royal court -let his rod turn in to a snake -The Pokémon Company -Nintendo -world's top-selling toy brand, the top-selling trading card game -over 20 seasons \ No newline at end of file diff --git a/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/val.source b/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/val.source deleted file mode 100644 index a2c628e9ca0..00000000000 --- a/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/val.source +++ /dev/null @@ -1,8 +0,0 @@ -What does Moses' rod turn into ? -Who is Aron? -Where did Moses grow up ? -What happens at the command of the Moses ? -Who manages the Pokémon ? -Who owned the Pokémon trademark ? -What else include in Pokémon franchise ? -How many seasons in Pokémon animme series ? \ No newline at end of file diff --git a/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/val.target b/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/val.target deleted file mode 100644 index 57bfcf5270a..00000000000 --- a/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/val.target +++ /dev/null @@ -1,8 +0,0 @@ -to a snake -Moses' assistant -Egyptian royal court -let his rod turn in to a snake -The Pokémon Company -Nintendo -world's top-selling toy brand, the top-selling trading card game -over 20 seasons \ No newline at end of file diff --git a/examples/research_projects/rag-end2end-retriever/test_run/test_finetune.sh b/examples/research_projects/rag-end2end-retriever/test_run/test_finetune.sh deleted file mode 100755 index c44d110d200..00000000000 --- a/examples/research_projects/rag-end2end-retriever/test_run/test_finetune.sh +++ /dev/null @@ -1,57 +0,0 @@ -# Add parent directory to python path to access lightning_base.py -export PYTHONPATH="../":"${PYTHONPATH}" - -#creates the custom knowlegebase -python use_own_knowledge_dataset.py - - -# Start a single-node Ray cluster. -ray start --head - -# A sample finetuning run, you need to specify data_dir, output_dir and model_name_or_path -# run ./examples/rag/finetune_rag_ray.sh --help to see all the possible options - - - -python finetune_rag.py \ - --model_name_or_path facebook/rag-token-base \ - --model_type rag_token \ - --fp16 \ - --gpus 2 \ - --profile \ - --do_train \ - --end2end \ - --do_predict \ - --n_val -1 \ - --train_batch_size 1 \ - --eval_batch_size 1 \ - --max_source_length 128 \ - --max_target_length 25 \ - --val_max_target_length 25 \ - --test_max_target_length 25 \ - --label_smoothing 0.1 \ - --dropout 0.1 \ - --attention_dropout 0.1 \ - --weight_decay 0.001 \ - --adam_epsilon 1e-08 \ - --max_grad_norm 0.1 \ - --lr_scheduler polynomial \ - --learning_rate 3e-05 \ - --num_train_epochs 10 \ - --warmup_steps 500 \ - --gradient_accumulation_steps 1 \ - --distributed_retriever ray \ - --num_retrieval_workers 4 \ - --index_name custom \ - --context_encoder_name facebook/dpr-ctx_encoder-multiset-base \ - --index_gpus 2 \ - --gpu_order [2,3,4,5,6,7,8,9,0,1] \ - --indexing_freq 5 - - - -# Stop the Ray cluster. -ray stop - -#CUDA_VISIBLE_DEVICES=2,3,4,5,6,7,8,9,0,1 sh ./test_run/test_finetune.sh -#Make sure --gpu_order is same. \ No newline at end of file diff --git a/examples/research_projects/rag-end2end-retriever/test_run/test_rag_new_features.sh b/examples/research_projects/rag-end2end-retriever/test_run/test_rag_new_features.sh deleted file mode 100755 index 6c667c09403..00000000000 --- a/examples/research_projects/rag-end2end-retriever/test_run/test_rag_new_features.sh +++ /dev/null @@ -1,16 +0,0 @@ -export PYTHONPATH="../":"${PYTHONPATH}" - -python use_own_knowledge_dataset.py - -ray start --head -python finetune_rag.py \ - --model_name_or_path facebook/rag-token-base \ - --model_type rag_token \ - --context_encoder_name facebook/dpr-ctx_encoder-multiset-base \ - --fp16 \ - --gpus 1 \ - --profile \ - --end2end \ - --index_name custom - -ray stop diff --git a/examples/research_projects/rag-end2end-retriever/use_own_knowledge_dataset.py b/examples/research_projects/rag-end2end-retriever/use_own_knowledge_dataset.py deleted file mode 100644 index 20e0ea2d3cc..00000000000 --- a/examples/research_projects/rag-end2end-retriever/use_own_knowledge_dataset.py +++ /dev/null @@ -1,175 +0,0 @@ -import logging -import os -from dataclasses import dataclass, field -from functools import partial -from pathlib import Path -from tempfile import TemporaryDirectory -from typing import List, Optional - -import faiss -import torch -from datasets import Features, Sequence, Value, load_dataset - -from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser - - -logger = logging.getLogger(__name__) -torch.set_grad_enabled(False) -device = "cuda" if torch.cuda.is_available() else "cpu" - - -def split_text(text: str, n=100, character=" ") -> List[str]: - """Split the text every ``n``-th occurrence of ``character``""" - text = text.split(character) - return [character.join(text[i : i + n]).strip() for i in range(0, len(text), n)] - - -def split_documents(documents: dict) -> dict: - """Split documents into passages""" - titles, texts = [], [] - for title, text in zip(documents["title"], documents["text"]): - if text is not None: - for passage in split_text(text): - titles.append(title if title is not None else "") - texts.append(passage) - return {"title": titles, "text": texts} - - -def embed(documents: dict, ctx_encoder: DPRContextEncoder, ctx_tokenizer: DPRContextEncoderTokenizerFast) -> dict: - """Compute the DPR embeddings of document passages""" - input_ids = ctx_tokenizer( - documents["title"], documents["text"], truncation=True, padding="longest", return_tensors="pt" - )["input_ids"] - embeddings = ctx_encoder(input_ids.to(device=device), return_dict=True).pooler_output - return {"embeddings": embeddings.detach().cpu().numpy()} - - -def main( - rag_example_args: "RagExampleArguments", - processing_args: "ProcessingArguments", - index_hnsw_args: "IndexHnswArguments", -): - ###################################### - logger.info("Step 1 - Create the dataset") - ###################################### - - # The dataset needed for RAG must have three columns: - # - title (string): title of the document - # - text (string): text of a passage of the document - # - embeddings (array of dimension d): DPR representation of the passage - # Let's say you have documents in tab-separated csv files with columns "title" and "text" - assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file" - - # You can load a Dataset object this way - dataset = load_dataset( - "csv", data_files=[rag_example_args.csv_path], split="train", delimiter="\t", column_names=["title", "text"] - ) - - # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets?highlight=csv#csv-files - - # Then split the documents into passages of 100 words - dataset = dataset.map(split_documents, batched=True, num_proc=processing_args.num_proc) - - # And compute the embeddings - ctx_encoder = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=device) - ctx_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name) - new_features = Features( - {"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))} - ) # optional, save as float32 instead of float64 to save space - dataset = dataset.map( - partial(embed, ctx_encoder=ctx_encoder, ctx_tokenizer=ctx_tokenizer), - batched=True, - batch_size=processing_args.batch_size, - features=new_features, - ) - - # And finally save your dataset - passages_path = os.path.join(rag_example_args.output_dir, "my_knowledge_dataset") - dataset.save_to_disk(passages_path) - # from datasets import load_from_disk - # dataset = load_from_disk(passages_path) # to reload the dataset - - ###################################### - logger.info("Step 2 - Index the dataset") - ###################################### - - # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search - index = faiss.IndexHNSWFlat(index_hnsw_args.d, index_hnsw_args.m, faiss.METRIC_INNER_PRODUCT) - dataset.add_faiss_index("embeddings", custom_index=index) - - # And save the index - index_path = os.path.join(rag_example_args.output_dir, "my_knowledge_dataset_hnsw_index.faiss") - dataset.get_index("embeddings").save(index_path) - # dataset.load_faiss_index("embeddings", index_path) # to reload the index - - -@dataclass -class RagExampleArguments: - csv_path: str = field( - default=str(Path(__file__).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv"), - metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"}, - ) - question: Optional[str] = field( - default=None, - metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."}, - ) - rag_model_name: str = field( - default="facebook/rag-sequence-nq", - metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"}, - ) - dpr_ctx_encoder_model_name: str = field( - default="facebook/dpr-ctx_encoder-multiset-base", - metadata={ - "help": ( - "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" - " 'facebook/dpr-ctx_encoder-multiset-base'" - ) - }, - ) - output_dir: Optional[str] = field( - default=str(Path(__file__).parent / "test_run" / "dummy-kb"), - metadata={"help": "Path to a directory where the dataset passages and the index will be saved"}, - ) - - -@dataclass -class ProcessingArguments: - num_proc: Optional[int] = field( - default=None, - metadata={ - "help": "The number of processes to use to split the documents into passages. Default is single process." - }, - ) - batch_size: int = field( - default=16, - metadata={ - "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." - }, - ) - - -@dataclass -class IndexHnswArguments: - d: int = field( - default=768, - metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."}, - ) - m: int = field( - default=128, - metadata={ - "help": ( - "The number of bi-directional links created for every new element during the HNSW index construction." - ) - }, - ) - - -if __name__ == "__main__": - logging.basicConfig(level=logging.WARNING) - logger.setLevel(logging.INFO) - - parser = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) - rag_example_args, processing_args, index_hnsw_args = parser.parse_args_into_dataclasses() - with TemporaryDirectory() as tmp_dir: - rag_example_args.output_dir = rag_example_args.output_dir or tmp_dir - main(rag_example_args, processing_args, index_hnsw_args) diff --git a/examples/research_projects/rag-end2end-retriever/utils_rag.py b/examples/research_projects/rag-end2end-retriever/utils_rag.py deleted file mode 100644 index ec98c1d782e..00000000000 --- a/examples/research_projects/rag-end2end-retriever/utils_rag.py +++ /dev/null @@ -1,244 +0,0 @@ -import itertools -import json -import linecache -import os -import pickle -import re -import socket -import string -from collections import Counter -from logging import getLogger -from pathlib import Path -from typing import Callable, Dict, Iterable, List - -import git -import torch -from torch.utils.data import Dataset - -from transformers import BartTokenizer, RagTokenizer, T5Tokenizer - - -def encode_line(tokenizer, line, max_length, padding_side, pad_to_max_length=True, return_tensors="pt"): - extra_kw = {"add_prefix_space": True} if isinstance(tokenizer, BartTokenizer) and not line.startswith(" ") else {} - tokenizer.padding_side = padding_side - return tokenizer( - [line], - max_length=max_length, - padding="max_length" if pad_to_max_length else None, - truncation=True, - return_tensors=return_tensors, - add_special_tokens=True, - **extra_kw, - ) - - -def trim_batch( - input_ids, - pad_token_id, - attention_mask=None, -): - """Remove columns that are populated exclusively by pad_token_id""" - keep_column_mask = input_ids.ne(pad_token_id).any(dim=0) - if attention_mask is None: - return input_ids[:, keep_column_mask] - else: - return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) - - -class Seq2SeqDataset(Dataset): - def __init__( - self, - tokenizer, - data_dir, - max_source_length, - max_target_length, - type_path="train", - n_obs=None, - src_lang=None, - tgt_lang=None, - prefix="", - ): - super().__init__() - self.src_file = Path(data_dir).joinpath(type_path + ".source") - self.tgt_file = Path(data_dir).joinpath(type_path + ".target") - self.src_lens = self.get_char_lens(self.src_file) - self.max_source_length = max_source_length - self.max_target_length = max_target_length - assert min(self.src_lens) > 0, f"found empty line in {self.src_file}" - self.tokenizer = tokenizer - self.prefix = prefix - if n_obs is not None: - self.src_lens = self.src_lens[:n_obs] - self.src_lang = src_lang - self.tgt_lang = tgt_lang - - def __len__(self): - return len(self.src_lens) - - def __getitem__(self, index) -> Dict[str, torch.Tensor]: - index = index + 1 # linecache starts at 1 - source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip("\n") - tgt_line = linecache.getline(str(self.tgt_file), index).rstrip("\n") - assert source_line, f"empty source line for index {index}" - assert tgt_line, f"empty tgt line for index {index}" - - # Need to add eos token manually for T5 - if isinstance(self.tokenizer, T5Tokenizer): - source_line += self.tokenizer.eos_token - tgt_line += self.tokenizer.eos_token - - # Pad source and target to the right - source_tokenizer = ( - self.tokenizer.question_encoder if isinstance(self.tokenizer, RagTokenizer) else self.tokenizer - ) - target_tokenizer = self.tokenizer.generator if isinstance(self.tokenizer, RagTokenizer) else self.tokenizer - - source_inputs = encode_line(source_tokenizer, source_line, self.max_source_length, "right") - target_inputs = encode_line(target_tokenizer, tgt_line, self.max_target_length, "right") - - source_ids = source_inputs["input_ids"].squeeze() - target_ids = target_inputs["input_ids"].squeeze() - src_mask = source_inputs["attention_mask"].squeeze() - return { - "input_ids": source_ids, - "attention_mask": src_mask, - "decoder_input_ids": target_ids, - } - - @staticmethod - def get_char_lens(data_file): - return [len(x) for x in Path(data_file).open().readlines()] - - def collate_fn(self, batch) -> Dict[str, torch.Tensor]: - input_ids = torch.stack([x["input_ids"] for x in batch]) - masks = torch.stack([x["attention_mask"] for x in batch]) - target_ids = torch.stack([x["decoder_input_ids"] for x in batch]) - tgt_pad_token_id = ( - self.tokenizer.generator.pad_token_id - if isinstance(self.tokenizer, RagTokenizer) - else self.tokenizer.pad_token_id - ) - src_pad_token_id = ( - self.tokenizer.question_encoder.pad_token_id - if isinstance(self.tokenizer, RagTokenizer) - else self.tokenizer.pad_token_id - ) - y = trim_batch(target_ids, tgt_pad_token_id) - source_ids, source_mask = trim_batch(input_ids, src_pad_token_id, attention_mask=masks) - batch = { - "input_ids": source_ids, - "attention_mask": source_mask, - "decoder_input_ids": y, - } - return batch - - -logger = getLogger(__name__) - - -def flatten_list(summary_ids: List[List]): - return list(itertools.chain.from_iterable(summary_ids)) - - -def save_git_info(folder_path: str) -> None: - """Save git information to output_dir/git_log.json""" - repo_infos = get_git_info() - save_json(repo_infos, os.path.join(folder_path, "git_log.json")) - - -def save_json(content, path, indent=4, **json_dump_kwargs): - with open(path, "w") as f: - json.dump(content, f, indent=indent, **json_dump_kwargs) - - -def load_json(path): - with open(path) as f: - return json.load(f) - - -def get_git_info(): - repo = git.Repo(search_parent_directories=True) - repo_infos = { - "repo_id": str(repo), - "repo_sha": str(repo.head.object.hexsha), - "repo_branch": str(repo.active_branch), - "hostname": str(socket.gethostname()), - } - return repo_infos - - -def lmap(f: Callable, x: Iterable) -> List: - """list(map(f, x))""" - return list(map(f, x)) - - -def pickle_save(obj, path): - """pickle.dump(obj, path)""" - with open(path, "wb") as f: - return pickle.dump(obj, f) - - -def normalize_answer(s): - """Lower text and remove punctuation, articles and extra whitespace.""" - - def remove_articles(text): - return re.sub(r"\b(a|an|the)\b", " ", text) - - def white_space_fix(text): - return " ".join(text.split()) - - def remove_punc(text): - exclude = set(string.punctuation) - return "".join(ch for ch in text if ch not in exclude) - - def lower(text): - return text.lower() - - return white_space_fix(remove_articles(remove_punc(lower(s)))) - - -def f1_score(prediction, ground_truth): - prediction_tokens = normalize_answer(prediction).split() - ground_truth_tokens = normalize_answer(ground_truth).split() - common = Counter(prediction_tokens) & Counter(ground_truth_tokens) - num_same = sum(common.values()) - if num_same == 0: - return 0 - precision = 1.0 * num_same / len(prediction_tokens) - recall = 1.0 * num_same / len(ground_truth_tokens) - f1 = (2 * precision * recall) / (precision + recall) - return f1 - - -def exact_match_score(prediction, ground_truth): - return normalize_answer(prediction) == normalize_answer(ground_truth) - - -def calculate_exact_match(output_lns: List[str], reference_lns: List[str]) -> Dict: - assert len(output_lns) == len(reference_lns) - em = 0 - for hypo, pred in zip(output_lns, reference_lns): - em += exact_match_score(hypo, pred) - if len(output_lns) > 0: - em /= len(output_lns) - return {"em": em} - - -def is_rag_model(model_prefix): - return model_prefix.startswith("rag") - - -def set_extra_model_params(extra_params, hparams, config): - equivalent_param = {p: p for p in extra_params} - # T5 models don't have `dropout` param, they have `dropout_rate` instead - equivalent_param["dropout"] = "dropout_rate" - for p in extra_params: - if getattr(hparams, p, None): - if not hasattr(config, p) and not hasattr(config, equivalent_param[p]): - logger.info("config doesn't have a `{}` attribute".format(p)) - delattr(hparams, p) - continue - set_p = p if hasattr(config, p) else equivalent_param[p] - setattr(config, set_p, getattr(hparams, p)) - delattr(hparams, p) - return hparams, config diff --git a/examples/research_projects/rag/README.md b/examples/research_projects/rag/README.md deleted file mode 100644 index 59aa46a8952..00000000000 --- a/examples/research_projects/rag/README.md +++ /dev/null @@ -1,203 +0,0 @@ -# Intro - -Authors: @patrickvonplaten and @lhoestq - -Aimed at tackling the knowledge-intensive NLP tasks (think tasks a human wouldn't be expected to solve without access to external knowledge sources), RAG models are seq2seq models with access to a retrieval mechanism providing relevant context documents at training and evaluation time. - -A RAG model encapsulates two core components: a question encoder and a generator. -During a forward pass, we encode the input with the question encoder and pass it -to the retriever to extract relevant context documents. The documents are then prepended to the input. -Such contextualized inputs are passed to the generator. - -Read more about RAG at https://arxiv.org/abs/2005.11401. - -# Note - -⚠️ This project should be run with pytorch-lightning==1.3.1 which has a potential security vulnerability - -# Finetuning - -Our finetuning logic is based on scripts from [`examples/legacy/seq2seq`](https://github.com/huggingface/transformers/tree/main/examples/legacy/seq2seq). We accept training data in the same format as specified there - we expect a directory consisting of 6 text files: -```bash -train.source -train.target -val.source -val.target -test.source -test.target -``` - -A sample finetuning command (run ` ./examples/research_projects/rag/finetune_rag.py --help` to list all available options): - -```bash -python examples/research_projects/rag/finetune_rag.py \ - --data_dir $DATA_DIR \ - --output_dir $OUTPUT_DIR \ - --model_name_or_path $MODEL_NAME_OR_PATH \ - --model_type rag_sequence \ - --fp16 \ - --gpus 8 -``` -We publish two `base` models which can serve as a starting point for finetuning on downstream tasks (use them as `model_name_or_path`): -- [`facebook/rag-sequence-base`](https://huggingface.co/facebook/rag-sequence-base) - a base for finetuning `RagSequenceForGeneration` models, -- [`facebook/rag-token-base`](https://huggingface.co/facebook/rag-token-base) - a base for finetuning `RagTokenForGeneration` models. - -The `base` models initialize the question encoder with [`facebook/dpr-question_encoder-single-nq-base`](https://huggingface.co/facebook/dpr-question_encoder-single-nq-base) and the generator with [`facebook/bart-large`](https://huggingface.co/facebook/bart-large). - -If you would like to initialize finetuning with a base model using different question encoder and generator architectures, you can build it with a consolidation script, e.g.: -```bash -python examples/research_projects/rag/consolidate_rag_checkpoint.py \ - --model_type rag_sequence \ - --generator_name_or_path facebook/bart-large-cnn \ - --question_encoder_name_or_path facebook/dpr-question_encoder-single-nq-base \ - --dest path/to/checkpoint -``` -You will then be able to pass `path/to/checkpoint` as `model_name_or_path` to the `finetune_rag.py` script. - -## Document Retrieval -When running distributed fine-tuning, each training worker needs to retrieve contextual documents -for its input by querying a index loaded into memory. RAG provides two implementations for document retrieval, -one with [`torch.distributed`](https://pytorch.org/docs/stable/distributed.html) communication package and the other -with [`Ray`](https://docs.ray.io/en/master/). - -This option can be configured with the `--distributed_retriever` flag which can either be set to `pytorch` or `ray`. -By default this flag is set to `pytorch`. - -For the Pytorch implementation, only training worker 0 loads the index into CPU memory, and a gather/scatter pattern is used -to collect the inputs from the other training workers and send back the corresponding document embeddings. - -For the Ray implementation, the index is loaded in *separate* process(es). The training workers randomly select which -retriever worker to query. To use Ray for distributed retrieval, you have to set the `--distributed_retriever` arg to `ray`. -To configure the number of retrieval workers (the number of processes that load the index), you can set the `num_retrieval_workers` flag. -Also make sure to start the Ray cluster before running fine-tuning. - -```bash -# Start a single-node Ray cluster. -ray start --head - -python examples/research_projects/rag/finetune_rag.py \ - --data_dir $DATA_DIR \ - --output_dir $OUTPUT_DIR \ - --model_name_or_path $MODEL_NAME_OR_PATH \ - --model_type rag_sequence \ - --fp16 \ - --gpus 8 - --distributed_retriever ray \ - --num_retrieval_workers 4 - -# Stop the ray cluster once fine-tuning has finished. -ray stop -``` - -Using Ray can lead to retrieval speedups on multi-GPU settings since multiple processes load the index rather than -just the rank 0 training worker. Using Ray also allows you to load the index on GPU since the index is loaded on a separate -processes than the model, while with pytorch distributed retrieval, both are loaded in the same process potentially leading to GPU OOM. - -# Evaluation -Our evaluation script enables two modes of evaluation (controlled by the `eval_mode` argument): `e2e` - end2end evaluation, returns EM (exact match) and F1 scores calculated for the downstream task and `retrieval` - which returns precision@k of the documents retrieved for provided inputs. - -The evaluation script expects paths to two files: -- `evaluation_set` - a path to a file specifying the evaluation dataset, a single input per line. -- `gold_data_path` - a path to a file containing ground truth answers for datapoints from the `evaluation_set`, a single output per line. Check below for expected formats of the gold data files. - - -## Retrieval evaluation -For `retrieval` evaluation, we expect a gold data file where each line will consist of a tab-separated list of document titles constituting positive contexts for respective datapoints from the `evaluation_set`. E.g. given a question `who sings does he love me with reba` in the `evaluation_set`, a respective ground truth line could look as follows: -``` -Does He Love You Does He Love You Red Sandy Spika dress of Reba McEntire Greatest Hits Volume Two (Reba McEntire album) Shoot for the Moon (album) -``` - -We demonstrate how to evaluate retrieval against DPR evaluation data. You can download respective files from links listed [here](https://github.com/facebookresearch/DPR/blob/master/data/download_data.py#L39-L45). - -1. Download and unzip the gold data file. We use the `biencoder-nq-dev` from https://dl.fbaipublicfiles.com/dpr/data/retriever/biencoder-nq-dev.json.gz. - ```bash - wget https://dl.fbaipublicfiles.com/dpr/data/retriever/biencoder-nq-dev.json.gz && gzip -d biencoder-nq-dev.json.gz - ``` - -2. Parse the unziped file using the `parse_dpr_relevance_data.py` - ```bash - mkdir output # or wherever you want to save this - python examples/research_projects/rag/parse_dpr_relevance_data.py \ - --src_path biencoder-nq-dev.json \ - --evaluation_set output/biencoder-nq-dev.questions \ - --gold_data_path output/biencoder-nq-dev.pages - ``` -3. Run evaluation: - ```bash - python examples/research_projects/rag/eval_rag.py \ - --model_name_or_path facebook/rag-sequence-nq \ - --model_type rag_sequence \ - --evaluation_set output/biencoder-nq-dev.questions \ - --gold_data_path output/biencoder-nq-dev.pages \ - --predictions_path output/retrieval_preds.tsv \ - --eval_mode retrieval \ - --k 1 - ``` - ```bash - # EXPLANATION - python examples/research_projects/rag/eval_rag.py \ - --model_name_or_path facebook/rag-sequence-nq \ # model name or path of the model we're evaluating - --model_type rag_sequence \ # RAG model type (rag_token or rag_sequence) - --evaluation_set output/biencoder-nq-dev.questions \ # an input dataset for evaluation - --gold_data_path poutput/biencoder-nq-dev.pages \ # a dataset containing ground truth answers for samples from the evaluation_set - --predictions_path output/retrieval_preds.tsv \ # name of file where predictions will be stored - --eval_mode retrieval \ # indicates whether we're performing retrieval evaluation or e2e evaluation - --k 1 # parameter k for the precision@k metric - - ``` -## End-to-end evaluation - -We support two formats of the gold data file (controlled by the `gold_data_mode` parameter): -- `qa` - where a single line has the following format: `input [tab] output_list`, e.g.: -``` -who is the owner of reading football club ['Xiu Li Dai', 'Dai Yongge', 'Dai Xiuli', 'Yongge Dai'] -``` -- `ans` - where a single line contains a single expected answer, e.g.: -``` -Xiu Li Dai -``` - -Predictions of the model for the samples from the `evaluation_set` will be saved under the path specified by the `predictions_path` parameter. -If this path already exists, the script will use saved predictions to calculate metrics. -Add `--recalculate` parameter to force the script to perform inference from scratch. - -An example e2e evaluation run could look as follows: -```bash -python examples/research_projects/rag/eval_rag.py \ - --model_name_or_path facebook/rag-sequence-nq \ - --model_type rag_sequence \ - --evaluation_set path/to/test.source \ - --gold_data_path path/to/gold_data \ - --predictions_path path/to/e2e_preds.txt \ - --eval_mode e2e \ - --gold_data_mode qa \ - --n_docs 5 \ # You can experiment with retrieving different number of documents at evaluation time - --print_predictions \ - --recalculate \ # adding this parameter will force recalculating predictions even if predictions_path already exists -``` - -# Use your own knowledge source - -By default, RAG uses the English Wikipedia as a knowledge source, known as the 'wiki_dpr' dataset. -With `use_custom_knowledge_dataset.py` you can build your own knowledge source, *e.g.* for RAG. - -For instance, if documents are serialized as tab-separated csv files with the columns "title" and "text", one can use `use_own_knowledge_dataset.py` as follows: -```bash -python examples/research_projects/rag/use_own_knowledge_dataset.py \ - --csv_path path/to/my_csv \ - --output_dir path/to/my_knowledge_dataset \ -``` - -The created outputs in `path/to/my_knowledge_dataset` can then be used to finetune RAG as follows: -```bash -python examples/research_projects/rag/finetune_rag.py \ - --data_dir $DATA_DIR \ - --output_dir $OUTPUT_DIR \ - --model_name_or_path $MODEL_NAME_OR_PATH \ - --model_type rag_sequence \ - --fp16 \ - --gpus 8 - --index_name custom - --passages_path path/to/data/my_knowledge_dataset - --index_path path/to/my_knowledge_dataset_hnsw_index.faiss -``` diff --git a/examples/research_projects/rag/__init__.py b/examples/research_projects/rag/__init__.py deleted file mode 100644 index 3cee09bb7f5..00000000000 --- a/examples/research_projects/rag/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -import os -import sys - - -sys.path.insert(1, os.path.dirname(os.path.realpath(__file__))) diff --git a/examples/research_projects/rag/_test_finetune_rag.py b/examples/research_projects/rag/_test_finetune_rag.py deleted file mode 100644 index 0906295b301..00000000000 --- a/examples/research_projects/rag/_test_finetune_rag.py +++ /dev/null @@ -1,111 +0,0 @@ -import json -import logging -import os -import sys -from pathlib import Path - -import finetune_rag - -from transformers.file_utils import is_apex_available -from transformers.testing_utils import ( - TestCasePlus, - execute_subprocess_async, - require_ray, - require_torch_gpu, - require_torch_multi_gpu, -) - - -logging.basicConfig(level=logging.DEBUG) -logger = logging.getLogger() - -stream_handler = logging.StreamHandler(sys.stdout) -logger.addHandler(stream_handler) - - -class RagFinetuneExampleTests(TestCasePlus): - def _create_dummy_data(self, data_dir): - os.makedirs(data_dir, exist_ok=True) - contents = {"source": "What is love ?", "target": "life"} - n_lines = {"train": 12, "val": 2, "test": 2} - for split in ["train", "test", "val"]: - for field in ["source", "target"]: - content = "\n".join([contents[field]] * n_lines[split]) - with open(os.path.join(data_dir, f"{split}.{field}"), "w") as f: - f.write(content) - - def _run_finetune(self, gpus: int, distributed_retriever: str = "pytorch"): - tmp_dir = self.get_auto_remove_tmp_dir() - output_dir = os.path.join(tmp_dir, "output") - data_dir = os.path.join(tmp_dir, "data") - self._create_dummy_data(data_dir=data_dir) - - testargs = f""" - --data_dir {data_dir} \ - --output_dir {output_dir} \ - --model_name_or_path facebook/rag-sequence-base \ - --model_type rag_sequence \ - --do_train \ - --do_predict \ - --n_val -1 \ - --val_check_interval 1.0 \ - --train_batch_size 2 \ - --eval_batch_size 1 \ - --max_source_length 25 \ - --max_target_length 25 \ - --val_max_target_length 25 \ - --test_max_target_length 25 \ - --label_smoothing 0.1 \ - --dropout 0.1 \ - --attention_dropout 0.1 \ - --weight_decay 0.001 \ - --adam_epsilon 1e-08 \ - --max_grad_norm 0.1 \ - --lr_scheduler polynomial \ - --learning_rate 3e-04 \ - --num_train_epochs 1 \ - --warmup_steps 4 \ - --gradient_accumulation_steps 1 \ - --distributed-port 8787 \ - --use_dummy_dataset 1 \ - --distributed_retriever {distributed_retriever} \ - """.split() - - if gpus > 0: - testargs.append(f"--gpus={gpus}") - if is_apex_available(): - testargs.append("--fp16") - else: - testargs.append("--gpus=0") - testargs.append("--distributed_backend=ddp_cpu") - testargs.append("--num_processes=2") - - cmd = [sys.executable, str(Path(finetune_rag.__file__).resolve())] + testargs - execute_subprocess_async(cmd, env=self.get_env()) - - metrics_save_path = os.path.join(output_dir, "metrics.json") - with open(metrics_save_path) as f: - result = json.load(f) - return result - - @require_torch_gpu - def test_finetune_gpu(self): - result = self._run_finetune(gpus=1) - self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2) - - @require_torch_multi_gpu - def test_finetune_multigpu(self): - result = self._run_finetune(gpus=2) - self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2) - - @require_torch_gpu - @require_ray - def test_finetune_gpu_ray_retrieval(self): - result = self._run_finetune(gpus=1, distributed_retriever="ray") - self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2) - - @require_torch_multi_gpu - @require_ray - def test_finetune_multigpu_ray_retrieval(self): - result = self._run_finetune(gpus=1, distributed_retriever="ray") - self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2) diff --git a/examples/research_projects/rag/callbacks_rag.py b/examples/research_projects/rag/callbacks_rag.py deleted file mode 100644 index d75f97995bd..00000000000 --- a/examples/research_projects/rag/callbacks_rag.py +++ /dev/null @@ -1,116 +0,0 @@ -import logging -from pathlib import Path - -import numpy as np -import pytorch_lightning as pl -import torch -from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint -from pytorch_lightning.utilities import rank_zero_only -from utils_rag import save_json - - -def count_trainable_parameters(model): - model_parameters = filter(lambda p: p.requires_grad, model.parameters()) - params = sum([np.prod(p.size()) for p in model_parameters]) - return params - - -logger = logging.getLogger(__name__) - - -def get_checkpoint_callback(output_dir, metric): - """Saves the best model by validation EM score.""" - if metric == "rouge2": - exp = "{val_avg_rouge2:.4f}-{step_count}" - elif metric == "bleu": - exp = "{val_avg_bleu:.4f}-{step_count}" - elif metric == "em": - exp = "{val_avg_em:.4f}-{step_count}" - else: - raise NotImplementedError( - f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this" - " function." - ) - - checkpoint_callback = ModelCheckpoint( - dirpath=output_dir, - filename=exp, - monitor=f"val_{metric}", - mode="max", - save_top_k=3, - every_n_epochs=1, # maybe save a checkpoint every time val is run, not just end of epoch. - ) - return checkpoint_callback - - -def get_early_stopping_callback(metric, patience): - return EarlyStopping( - monitor=f"val_{metric}", # does this need avg? - mode="min" if "loss" in metric else "max", - patience=patience, - verbose=True, - ) - - -class Seq2SeqLoggingCallback(pl.Callback): - def on_batch_end(self, trainer, pl_module): - lrs = {f"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)} - pl_module.logger.log_metrics(lrs) - - @rank_zero_only - def _write_logs( - self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True - ) -> None: - logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****") - metrics = trainer.callback_metrics - trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]}) - # Log results - od = Path(pl_module.hparams.output_dir) - if type_path == "test": - results_file = od / "test_results.txt" - generations_file = od / "test_generations.txt" - else: - # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json - # If people want this it will be easy enough to add back. - results_file = od / f"{type_path}_results/{trainer.global_step:05d}.txt" - generations_file = od / f"{type_path}_generations/{trainer.global_step:05d}.txt" - results_file.parent.mkdir(exist_ok=True) - generations_file.parent.mkdir(exist_ok=True) - with open(results_file, "a+") as writer: - for key in sorted(metrics): - if key in ["log", "progress_bar", "preds"]: - continue - val = metrics[key] - if isinstance(val, torch.Tensor): - val = val.item() - msg = f"{key}: {val:.6f}\n" - writer.write(msg) - - if not save_generations: - return - - if "preds" in metrics: - content = "\n".join(metrics["preds"]) - generations_file.open("w+").write(content) - - @rank_zero_only - def on_train_start(self, trainer, pl_module): - try: - npars = pl_module.model.model.num_parameters() - except AttributeError: - npars = pl_module.model.num_parameters() - - n_trainable_pars = count_trainable_parameters(pl_module) - # mp stands for million parameters - trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6}) - - @rank_zero_only - def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): - save_json(pl_module.metrics, pl_module.metrics_save_path) - return self._write_logs(trainer, pl_module, "test") - - @rank_zero_only - def on_validation_end(self, trainer: pl.Trainer, pl_module): - save_json(pl_module.metrics, pl_module.metrics_save_path) - # Uncommenting this will save val generations - # return self._write_logs(trainer, pl_module, "valid") diff --git a/examples/research_projects/rag/consolidate_rag_checkpoint.py b/examples/research_projects/rag/consolidate_rag_checkpoint.py deleted file mode 100644 index 6adae75fea9..00000000000 --- a/examples/research_projects/rag/consolidate_rag_checkpoint.py +++ /dev/null @@ -1,101 +0,0 @@ -""" -A script creating a RAG checkpoint from a generator and a question encoder checkpoints. -""" - -import argparse -from pathlib import Path - -from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration - - -def consolidate( - model_type, - generator_name_or_path: str, - question_encoder_name_or_path: str, - dest_dir: Path, - config_name_or_path: str = None, - generator_tokenizer_name_or_path: str = None, - question_encoder_tokenizer_name_or_path: str = None, -): - if config_name_or_path is None: - config_name_or_path = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base" - - if generator_tokenizer_name_or_path is None: - generator_tokenizer_name_or_path = generator_name_or_path - - if question_encoder_tokenizer_name_or_path is None: - question_encoder_tokenizer_name_or_path = question_encoder_name_or_path - - model_class = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration - - # Save model. - rag_config = RagConfig.from_pretrained(config_name_or_path) - gen_config = AutoConfig.from_pretrained(generator_name_or_path) - question_encoder_config = AutoConfig.from_pretrained(question_encoder_name_or_path) - - rag_config.generator = gen_config - rag_config.question_encoder = question_encoder_config - - rag_model = model_class.from_pretrained_question_encoder_generator( - question_encoder_name_or_path, generator_name_or_path, config=rag_config - ) - rag_model.save_pretrained(dest_dir) - - # Sanity check. - model_class.from_pretrained(dest_dir) - - # Save tokenizers. - gen_tokenizer = AutoTokenizer.from_pretrained(generator_tokenizer_name_or_path) - gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/") - question_encoder_tokenizer = AutoTokenizer.from_pretrained(question_encoder_tokenizer_name_or_path) - question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "--model_type", - choices=["rag_sequence", "rag_token"], - required=True, - type=str, - help="RAG model type: rag_sequence, rag_token", - ) - parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.") - parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier") - parser.add_argument( - "--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier" - ) - - parser.add_argument( - "--generator_tokenizer_name_or_path", - type=str, - help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``", - ) - parser.add_argument( - "--question_encoder_tokenizer_name_or_path", - type=str, - help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``", - ) - parser.add_argument( - "--config_name_or_path", - type=str, - help=( - "Identifier of the model config to use, if not provided, resolves to a base config for a given" - " ``model_type``" - ), - ) - - args = parser.parse_args() - - dest_dir = Path(args.dest) - dest_dir.mkdir(exist_ok=True) - - consolidate( - args.model_type, - args.generator_name_or_path, - args.question_encoder_name_or_path, - dest_dir, - args.config_name_or_path, - args.generator_tokenizer_name_or_path, - args.question_encoder_tokenizer_name_or_path, - ) diff --git a/examples/research_projects/rag/distributed_pytorch_retriever.py b/examples/research_projects/rag/distributed_pytorch_retriever.py deleted file mode 100644 index b8c4b6fc3c5..00000000000 --- a/examples/research_projects/rag/distributed_pytorch_retriever.py +++ /dev/null @@ -1,138 +0,0 @@ -import logging -import os -from typing import List, Tuple - -import numpy as np -import psutil -import torch -import torch.distributed as dist - -from transformers import RagRetriever - - -logger = logging.getLogger(__name__) - - -class RagPyTorchDistributedRetriever(RagRetriever): - """ - A distributed retriever built on top of the ``torch.distributed`` communication package. During training all workers - initialize their own instance of the retriever, however, only the main worker loads the index into memory. The index is stored - in cpu memory. The index will also work well in a non-distributed setup. - - Args: - config (:class:`~transformers.RagConfig`): - The configuration of the RAG model this Retriever is used with. Contains parameters indicating which ``Index`` to build. - question_encoder_tokenizer (:class:`~transformers.PreTrainedTokenizer`): - The tokenizer that was used to tokenize the question. - It is used to decode the question and then use the generator_tokenizer. - generator_tokenizer (:class:`~transformers.PreTrainedTokenizer`): - The tokenizer used for the generator part of the RagModel. - index (:class:`~transformers.models.rag.retrieval_rag.Index`, optional, defaults to the one defined by the configuration): - If specified, use this index instead of the one built using the configuration - """ - - def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None): - super().__init__( - config, - question_encoder_tokenizer=question_encoder_tokenizer, - generator_tokenizer=generator_tokenizer, - index=index, - init_retrieval=False, - ) - self.process_group = None - - def init_retrieval(self, distributed_port: int): - """ - Retriever initialization function, needs to be called from the training process. The function sets some common parameters - and environment variables. On top of that, (only) the main process in the process group loads the index into memory. - - Args: - distributed_port (:obj:`int`): - The port on which the main communication of the training run is carried out. We set the port for retrieval-related - communication as ``distributed_port + 1``. - """ - - logger.info("initializing retrieval") - - # initializing a separate process group for retrieval as the default - # nccl backend doesn't support gather/scatter operations while gloo - # is too slow to replace nccl for the core gpu communication - if dist.is_initialized(): - logger.info("dist initialized") - # needs to be set manually - os.environ["GLOO_SOCKET_IFNAME"] = self._infer_socket_ifname() - # avoid clash with the NCCL port - os.environ["MASTER_PORT"] = str(distributed_port + 1) - self.process_group = dist.new_group(ranks=None, backend="gloo") - - # initialize retriever only on the main worker - if not dist.is_initialized() or self._is_main(): - logger.info("dist not initialized / main") - self.index.init_index() - - # all processes wait until the retriever is initialized by the main process - if dist.is_initialized(): - torch.distributed.barrier(group=self.process_group) - - def _is_main(self): - return dist.get_rank(group=self.process_group) == 0 - - def _scattered(self, scatter_list, target_shape, target_type=torch.float32): - target_tensor = torch.empty(target_shape, dtype=target_type) - dist.scatter(target_tensor, src=0, scatter_list=scatter_list, group=self.process_group) - return target_tensor - - def _infer_socket_ifname(self): - addrs = psutil.net_if_addrs() - # a hacky way to deal with varying network interface names - ifname = next((addr for addr in addrs if addr.startswith("e")), None) - return ifname - - def retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> Tuple[np.ndarray, List[dict]]: - """ - Retrieves documents for specified ``question_hidden_states``. The main process, which has the access to the index stored in memory, gathers queries - from all the processes in the main training process group, performs the retrieval and scatters back the results. - - Args: - question_hidden_states (:obj:`np.ndarray` of shape :obj:`(batch_size, vector_size)`): - A batch of query vectors to retrieve with. - n_docs (:obj:`int`): - The number of docs retrieved per query. - - Output: - retrieved_doc_embeds (:obj:`np.ndarray` of shape :obj:`(batch_size, n_docs, dim)` - The retrieval embeddings of the retrieved docs per query. - doc_ids (:obj:`np.ndarray` of shape :obj:`batch_size, n_docs`) - The ids of the documents in the index - doc_dicts (:obj:`List[dict]`): - The retrieved_doc_embeds examples per query. - """ - - # single GPU training - if not dist.is_initialized(): - doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs) - return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids) - - # distributed training - world_size = dist.get_world_size(group=self.process_group) - - # gather logic - gather_list = None - if self._is_main(): - gather_list = [torch.empty(question_hidden_states.shape, dtype=torch.float32) for _ in range(world_size)] - dist.gather(torch.tensor(question_hidden_states), dst=0, gather_list=gather_list, group=self.process_group) - - # scatter logic - n_queries = question_hidden_states.shape[0] - scatter_ids = [] - scatter_vectors = [] - if self._is_main(): - assert len(gather_list) == world_size - ids, vectors = self._main_retrieve(torch.cat(gather_list).numpy(), n_docs) - ids, vectors = torch.tensor(ids), torch.tensor(vectors) - scatter_ids = self._chunk_tensor(ids, n_queries) - scatter_vectors = self._chunk_tensor(vectors, n_queries) - doc_ids = self._scattered(scatter_ids, [n_queries, n_docs], target_type=torch.int64) - retrieved_doc_embeds = self._scattered(scatter_vectors, [n_queries, n_docs, question_hidden_states.shape[1]]) - - return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(doc_ids) diff --git a/examples/research_projects/rag/distributed_ray_retriever.py b/examples/research_projects/rag/distributed_ray_retriever.py deleted file mode 100644 index dd5baaf7261..00000000000 --- a/examples/research_projects/rag/distributed_ray_retriever.py +++ /dev/null @@ -1,152 +0,0 @@ -import logging -import random - -import ray - -from transformers import RagConfig, RagRetriever, RagTokenizer -from transformers.models.rag.retrieval_rag import CustomHFIndex - - -logger = logging.getLogger(__name__) - - -class RayRetriever: - def __init__(self): - self.initialized = False - - def create_rag_retriever(self, config, question_encoder_tokenizer, generator_tokenizer, index): - if not self.initialized: - self.retriever = RagRetriever( - config, - question_encoder_tokenizer=question_encoder_tokenizer, - generator_tokenizer=generator_tokenizer, - index=index, - init_retrieval=False, - ) - self.initialized = True - - def init_retrieval(self): - self.retriever.index.init_index() - - def retrieve(self, question_hidden_states, n_docs): - doc_ids, retrieved_doc_embeds = self.retriever._main_retrieve(question_hidden_states, n_docs) - return doc_ids, retrieved_doc_embeds - - -class RagRayDistributedRetriever(RagRetriever): - """ - A distributed retriever built on top of the ``Ray`` API, a library - for building distributed applications (https://docs.ray.io/en/master/). - package. During training, all training workers initialize their own - instance of a `RagRayDistributedRetriever`, and each instance of - this distributed retriever shares a common set of Retrieval Ray - Actors (https://docs.ray.io/en/master/walkthrough.html#remote - -classes-actors) that load the index on separate processes. Ray - handles the communication between the `RagRayDistributedRetriever` - instances and the remote Ray actors. If training is done in a - non-distributed setup, the index will simply be loaded in the same - process as the training worker and Ray will not be used. - - Args: - config (:class:`~transformers.RagConfig`): - The configuration of the RAG model this Retriever is used with. Contains parameters indicating which ``Index`` to build. - question_encoder_tokenizer (:class:`~transformers.PreTrainedTokenizer`): - The tokenizer that was used to tokenize the question. - It is used to decode the question and then use the generator_tokenizer. - generator_tokenizer (:class:`~transformers.PreTrainedTokenizer`): - The tokenizer used for the generator part of the RagModel. - retrieval_workers (:obj:`List[ray.ActorClass(RayRetriever)]`): A list of already initialized `RayRetriever` actors. - These actor classes run on remote processes and are responsible for performing the index lookup. - index (:class:`~transformers.retrieval_rag.Index`, optional, defaults to the one defined by the configuration): - If specified, use this index instead of the one built using the configuration - """ - - def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, retrieval_workers, index=None): - if index is not None and index.is_initialized() and len(retrieval_workers) > 0: - raise ValueError( - "When using Ray for distributed fine-tuning, " - "you'll need to provide the paths instead, " - "as the dataset and the index are loaded " - "separately. More info in examples/rag/use_own_knowledge_dataset.py " - ) - super().__init__( - config, - question_encoder_tokenizer=question_encoder_tokenizer, - generator_tokenizer=generator_tokenizer, - index=index, - init_retrieval=False, - ) - self.retrieval_workers = retrieval_workers - if len(self.retrieval_workers) > 0: - ray.get( - [ - worker.create_rag_retriever.remote(config, question_encoder_tokenizer, generator_tokenizer, index) - for worker in self.retrieval_workers - ] - ) - - def init_retrieval(self): - """ - Retriever initialization function, needs to be called from the - training process. This function triggers retrieval initialization - for all retrieval actors if using distributed setting, or loads - index into current process if training is not distributed. - """ - logger.info("initializing retrieval") - - if len(self.retrieval_workers) > 0: - ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers]) - else: - # Non-distributed training. Load index into this same process. - self.index.init_index() - - def retrieve(self, question_hidden_states, n_docs): - """ - Retrieves documents for specified ``question_hidden_states``. If - running training with multiple workers, a random retrieval actor is - selected to perform the index lookup and return the result. - - Args: - question_hidden_states (:obj:`np.ndarray` of shape :obj:`(batch_size, vector_size)`): - A batch of query vectors to retrieve with. - n_docs (:obj:`int`): - The number of docs retrieved per query. - - Output: - retrieved_doc_embeds (:obj:`np.ndarray` of shape :obj:`(batch_size, n_docs, dim)` - The retrieval embeddings of the retrieved docs per query. - doc_ids (:obj:`np.ndarray` of shape :obj:`batch_size, n_docs`) - The ids of the documents in the index - doc_dicts (:obj:`List[dict]`): - The retrieved_doc_embeds examples per query. - """ - if len(self.retrieval_workers) > 0: - # Select a random retrieval actor. - random_worker = self.retrieval_workers[random.randint(0, len(self.retrieval_workers) - 1)] - doc_ids, retrieved_doc_embeds = ray.get(random_worker.retrieve.remote(question_hidden_states, n_docs)) - else: - doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs) - return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids) - - @classmethod - def get_tokenizers(cls, retriever_name_or_path, indexed_dataset=None, **kwargs): - return super(RagRayDistributedRetriever, cls).get_tokenizers(retriever_name_or_path, indexed_dataset, **kwargs) - - @classmethod - def from_pretrained(cls, retriever_name_or_path, actor_handles, indexed_dataset=None, **kwargs): - config = kwargs.pop("config", None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs) - rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config) - question_encoder_tokenizer = rag_tokenizer.question_encoder - generator_tokenizer = rag_tokenizer.generator - if indexed_dataset is not None: - config.index_name = "custom" - index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset) - else: - index = cls._build_index(config) - return cls( - config, - question_encoder_tokenizer=question_encoder_tokenizer, - generator_tokenizer=generator_tokenizer, - retrieval_workers=actor_handles, - index=index, - ) diff --git a/examples/research_projects/rag/eval_rag.py b/examples/research_projects/rag/eval_rag.py deleted file mode 100644 index 55f4da56571..00000000000 --- a/examples/research_projects/rag/eval_rag.py +++ /dev/null @@ -1,320 +0,0 @@ -"""Evaluation script for RAG models.""" - -import argparse -import ast -import logging -import os -import sys - -import pandas as pd -import torch -from tqdm import tqdm - -from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration -from transformers import logging as transformers_logging - - -sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip -from utils_rag import exact_match_score, f1_score # noqa: E402 # isort:skip - - -logger = logging.getLogger(__name__) -logging.basicConfig(level=logging.INFO) - -transformers_logging.set_verbosity_info() - - -def infer_model_type(model_name_or_path): - if "token" in model_name_or_path: - return "rag_token" - if "sequence" in model_name_or_path: - return "rag_sequence" - if "bart" in model_name_or_path: - return "bart" - return None - - -def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): - return max(metric_fn(prediction, gt) for gt in ground_truths) - - -def get_scores(args, preds_path, gold_data_path): - hypos = [line.strip() for line in open(preds_path, "r").readlines()] - answers = [] - - if args.gold_data_mode == "qa": - data = pd.read_csv(gold_data_path, sep="\t", header=None) - for answer_list in data[1]: - ground_truths = ast.literal_eval(answer_list) - answers.append(ground_truths) - else: - references = [line.strip() for line in open(gold_data_path, "r").readlines()] - answers = [[reference] for reference in references] - - f1 = em = total = 0 - for prediction, ground_truths in zip(hypos, answers): - total += 1 - em += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths) - f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths) - - em = 100.0 * em / total - f1 = 100.0 * f1 / total - - logger.info(f"F1: {f1:.2f}") - logger.info(f"EM: {em:.2f}") - - -def get_precision_at_k(args, preds_path, gold_data_path): - k = args.k - hypos = [line.strip() for line in open(preds_path, "r").readlines()] - references = [line.strip() for line in open(gold_data_path, "r").readlines()] - - em = total = 0 - for hypo, reference in zip(hypos, references): - hypo_provenance = set(hypo.split("\t")[:k]) - ref_provenance = set(reference.split("\t")) - total += 1 - em += len(hypo_provenance & ref_provenance) / k - - em = 100.0 * em / total - logger.info(f"Precision@{k}: {em: .2f}") - - -def evaluate_batch_retrieval(args, rag_model, questions): - def strip_title(title): - if title.startswith('"'): - title = title[1:] - if title.endswith('"'): - title = title[:-1] - return title - - retriever_input_ids = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( - questions, - return_tensors="pt", - padding=True, - truncation=True, - )["input_ids"].to(args.device) - - question_enc_outputs = rag_model.rag.question_encoder(retriever_input_ids) - question_enc_pool_output = question_enc_outputs[0] - - result = rag_model.retriever( - retriever_input_ids, - question_enc_pool_output.cpu().detach().to(torch.float32).numpy(), - prefix=rag_model.rag.generator.config.prefix, - n_docs=rag_model.config.n_docs, - return_tensors="pt", - ) - all_docs = rag_model.retriever.index.get_doc_dicts(result.doc_ids) - provenance_strings = [] - for docs in all_docs: - provenance = [strip_title(title) for title in docs["title"]] - provenance_strings.append("\t".join(provenance)) - return provenance_strings - - -def evaluate_batch_e2e(args, rag_model, questions): - with torch.no_grad(): - inputs_dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( - questions, return_tensors="pt", padding=True, truncation=True - ) - - input_ids = inputs_dict.input_ids.to(args.device) - attention_mask = inputs_dict.attention_mask.to(args.device) - outputs = rag_model.generate( # rag_model overwrites generate - input_ids, - attention_mask=attention_mask, - num_beams=args.num_beams, - min_length=args.min_length, - max_length=args.max_length, - early_stopping=False, - num_return_sequences=1, - bad_words_ids=[[0, 0]], # BART likes to repeat BOS tokens, dont allow it to generate more than one - ) - answers = rag_model.retriever.generator_tokenizer.batch_decode(outputs, skip_special_tokens=True) - - if args.print_predictions: - for q, a in zip(questions, answers): - logger.info("Q: {} - A: {}".format(q, a)) - - return answers - - -def get_args(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--model_type", - choices=["rag_sequence", "rag_token", "bart"], - type=str, - help=( - "RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the" - " model_name_or_path" - ), - ) - parser.add_argument( - "--index_name", - default=None, - choices=["exact", "compressed", "legacy"], - type=str, - help="RAG model retriever type", - ) - parser.add_argument( - "--index_path", - default=None, - type=str, - help="Path to the retrieval index", - ) - parser.add_argument("--n_docs", default=5, type=int, help="Number of retrieved docs") - parser.add_argument( - "--model_name_or_path", - default=None, - type=str, - required=True, - help="Path to pretrained checkpoints or model identifier from huggingface.co/models", - ) - parser.add_argument( - "--eval_mode", - choices=["e2e", "retrieval"], - default="e2e", - type=str, - help=( - "Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates" - " precision@k." - ), - ) - parser.add_argument("--k", default=1, type=int, help="k for the precision@k calculation") - parser.add_argument( - "--evaluation_set", - default=None, - type=str, - required=True, - help="Path to a file containing evaluation samples", - ) - parser.add_argument( - "--gold_data_path", - default=None, - type=str, - required=True, - help="Path to a tab-separated file with gold samples", - ) - parser.add_argument( - "--gold_data_mode", - default="qa", - type=str, - choices=["qa", "ans"], - help=( - "Format of the gold data file" - "qa - a single line in the following format: question [tab] answer_list" - "ans - a single line of the gold file contains the expected answer string" - ), - ) - parser.add_argument( - "--predictions_path", - type=str, - default="predictions.txt", - help="Name of the predictions file, to be stored in the checkpoints directory", - ) - parser.add_argument( - "--eval_all_checkpoints", - action="store_true", - help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", - ) - parser.add_argument( - "--eval_batch_size", - default=8, - type=int, - help="Batch size per GPU/CPU for evaluation.", - ) - parser.add_argument( - "--recalculate", - help="Recalculate predictions even if the prediction file exists", - action="store_true", - ) - parser.add_argument( - "--num_beams", - default=4, - type=int, - help="Number of beams to be used when generating answers", - ) - parser.add_argument("--min_length", default=1, type=int, help="Min length of the generated answers") - parser.add_argument("--max_length", default=50, type=int, help="Max length of the generated answers") - - parser.add_argument( - "--print_predictions", - action="store_true", - help="If True, prints predictions while evaluating.", - ) - parser.add_argument( - "--print_docs", - action="store_true", - help="If True, prints docs retried while generating.", - ) - args = parser.parse_args() - args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - return args - - -def main(args): - model_kwargs = {} - if args.model_type is None: - args.model_type = infer_model_type(args.model_name_or_path) - assert args.model_type is not None - if args.model_type.startswith("rag"): - model_class = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration - model_kwargs["n_docs"] = args.n_docs - if args.index_name is not None: - model_kwargs["index_name"] = args.index_name - if args.index_path is not None: - model_kwargs["index_path"] = args.index_path - else: - model_class = BartForConditionalGeneration - - checkpoints = ( - [f.path for f in os.scandir(args.model_name_or_path) if f.is_dir()] - if args.eval_all_checkpoints - else [args.model_name_or_path] - ) - - logger.info("Evaluate the following checkpoints: %s", checkpoints) - - score_fn = get_scores if args.eval_mode == "e2e" else get_precision_at_k - evaluate_batch_fn = evaluate_batch_e2e if args.eval_mode == "e2e" else evaluate_batch_retrieval - - for checkpoint in checkpoints: - if os.path.exists(args.predictions_path) and (not args.recalculate): - logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path)) - score_fn(args, args.predictions_path, args.gold_data_path) - continue - - logger.info("***** Running evaluation for {} *****".format(checkpoint)) - logger.info(" Batch size = %d", args.eval_batch_size) - logger.info(" Predictions will be stored under {}".format(args.predictions_path)) - - if args.model_type.startswith("rag"): - retriever = RagRetriever.from_pretrained(checkpoint, **model_kwargs) - model = model_class.from_pretrained(checkpoint, retriever=retriever, **model_kwargs) - model.retriever.init_retrieval() - else: - model = model_class.from_pretrained(checkpoint, **model_kwargs) - model.to(args.device) - - with open(args.evaluation_set, "r") as eval_file, open(args.predictions_path, "w") as preds_file: - questions = [] - for line in tqdm(eval_file): - questions.append(line.strip()) - if len(questions) == args.eval_batch_size: - answers = evaluate_batch_fn(args, model, questions) - preds_file.write("\n".join(answers) + "\n") - preds_file.flush() - questions = [] - if len(questions) > 0: - answers = evaluate_batch_fn(args, model, questions) - preds_file.write("\n".join(answers)) - preds_file.flush() - - score_fn(args, args.predictions_path, args.gold_data_path) - - -if __name__ == "__main__": - args = get_args() - main(args) diff --git a/examples/research_projects/rag/finetune_rag.py b/examples/research_projects/rag/finetune_rag.py deleted file mode 100644 index af3acd4def6..00000000000 --- a/examples/research_projects/rag/finetune_rag.py +++ /dev/null @@ -1,649 +0,0 @@ -"""Finetuning script for RAG models. Adapted from examples.seq2seq.finetune.py""" - -import argparse -import logging -import os -import sys -import time -from collections import defaultdict -from pathlib import Path -from typing import Any, Dict, List, Tuple - -import numpy as np -import pytorch_lightning as pl -import torch -import torch.distributed as dist -import torch.distributed as torch_distrib -from pytorch_lightning.plugins.training_type import DDPPlugin -from torch.utils.data import DataLoader - -from transformers import ( - AutoConfig, - AutoTokenizer, - BartForConditionalGeneration, - BatchEncoding, - RagConfig, - RagSequenceForGeneration, - RagTokenForGeneration, - RagTokenizer, - T5ForConditionalGeneration, -) -from transformers import logging as transformers_logging -from transformers.integrations import is_ray_available - - -if is_ray_available(): - import ray - from distributed_ray_retriever import RagRayDistributedRetriever, RayRetriever - -from callbacks_rag import ( # noqa: E402 # isort:skipq - get_checkpoint_callback, - get_early_stopping_callback, - Seq2SeqLoggingCallback, -) - -from distributed_pytorch_retriever import RagPyTorchDistributedRetriever # noqa: E402 # isort:skip -from utils_rag import ( # noqa: E402 # isort:skip - calculate_exact_match, - flatten_list, - get_git_info, - is_rag_model, - lmap, - pickle_save, - save_git_info, - save_json, - set_extra_model_params, - Seq2SeqDataset, -) - -# need the parent dir module -sys.path.insert(2, str(Path(__file__).resolve().parents[1])) -from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa - - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - -transformers_logging.set_verbosity_info() - - -class AttrDict(dict): - def __init__(self, *args, **kwargs): - super(AttrDict, self).__init__(*args, **kwargs) - self.__dict__ = self - - -class CustomDDP(DDPPlugin): - def init_ddp_connection(self, global_rank=None, world_size=None) -> None: - module = self.model - global_rank = global_rank if global_rank is not None else self.cluster_environment.global_rank() - world_size = world_size if world_size is not None else self.cluster_environment.world_size() - os.environ["MASTER_ADDR"] = self.cluster_environment.master_address() - os.environ["MASTER_PORT"] = str(self.cluster_environment.master_port()) - if not torch.distributed.is_initialized(): - logger.info(f"initializing ddp: GLOBAL_RANK: {global_rank}, MEMBER: {global_rank + 1}/{world_size}") - torch_distrib.init_process_group(self.torch_distributed_backend, rank=global_rank, world_size=world_size) - - if module.is_rag_model: - self.distributed_port = module.hparams.distributed_port - if module.distributed_retriever == "pytorch": - module.model.rag.retriever.init_retrieval(self.distributed_port) - elif module.distributed_retriever == "ray" and global_rank == 0: - # For the Ray retriever, only initialize it once when global - # rank is 0. - module.model.rag.retriever.init_retrieval() - - -class GenerativeQAModule(BaseTransformer): - mode = "generative_qa" - loss_names = ["loss"] - metric_names = ["em"] - val_metric = "em" - - def __init__(self, hparams, **kwargs): - # when loading from a pytorch lightning checkpoint, hparams are passed as dict - if isinstance(hparams, dict): - hparams = AttrDict(hparams) - if hparams.model_type == "rag_sequence": - self.model_class = RagSequenceForGeneration - elif hparams.model_type == "rag_token": - self.model_class = RagTokenForGeneration - elif hparams.model_type == "bart": - self.model_class = BartForConditionalGeneration - else: - self.model_class = T5ForConditionalGeneration - self.is_rag_model = is_rag_model(hparams.model_type) - - config_class = RagConfig if self.is_rag_model else AutoConfig - config = config_class.from_pretrained(hparams.model_name_or_path) - - # set retriever parameters - config.index_name = hparams.index_name or config.index_name - config.passages_path = hparams.passages_path or config.passages_path - config.index_path = hparams.index_path or config.index_path - config.use_dummy_dataset = hparams.use_dummy_dataset - - # set extra_model_params for generator configs and load_model - extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "attention_dropout", "dropout") - if self.is_rag_model: - if hparams.prefix is not None: - config.generator.prefix = hparams.prefix - config.label_smoothing = hparams.label_smoothing - hparams, config.generator = set_extra_model_params(extra_model_params, hparams, config.generator) - if hparams.distributed_retriever == "pytorch": - retriever = RagPyTorchDistributedRetriever.from_pretrained(hparams.model_name_or_path, config=config) - elif hparams.distributed_retriever == "ray": - # The Ray retriever needs the handles to the retriever actors. - retriever = RagRayDistributedRetriever.from_pretrained( - hparams.model_name_or_path, hparams.actor_handles, config=config - ) - model = self.model_class.from_pretrained(hparams.model_name_or_path, config=config, retriever=retriever) - prefix = config.question_encoder.prefix - else: - if hparams.prefix is not None: - config.prefix = hparams.prefix - hparams, config = set_extra_model_params(extra_model_params, hparams, config) - model = self.model_class.from_pretrained(hparams.model_name_or_path, config=config) - prefix = config.prefix - - tokenizer = ( - RagTokenizer.from_pretrained(hparams.model_name_or_path) - if self.is_rag_model - else AutoTokenizer.from_pretrained(hparams.model_name_or_path) - ) - - super().__init__(hparams, config=config, tokenizer=tokenizer, model=model) - - save_git_info(self.hparams.output_dir) - self.output_dir = Path(self.hparams.output_dir) - self.metrics_save_path = Path(self.output_dir) / "metrics.json" - self.hparams_save_path = Path(self.output_dir) / "hparams.pkl" - pickle_save(self.hparams, self.hparams_save_path) - self.step_count = 0 - self.metrics = defaultdict(list) - - self.dataset_kwargs: dict = { - "data_dir": self.hparams.data_dir, - "max_source_length": self.hparams.max_source_length, - "prefix": prefix or "", - } - n_observations_per_split = { - "train": self.hparams.n_train, - "val": self.hparams.n_val, - "test": self.hparams.n_test, - } - self.n_obs = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} - - self.target_lens = { - "train": self.hparams.max_target_length, - "val": self.hparams.val_max_target_length, - "test": self.hparams.test_max_target_length, - } - assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}" - assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}" - - self.hparams.git_sha = get_git_info()["repo_sha"] - self.num_workers = hparams.num_workers - self.distributed_port = self.hparams.distributed_port - - # For single GPU training, init_ddp_connection is not called. - # So we need to initialize the retrievers here. - if hparams.gpus <= 1: - if hparams.distributed_retriever == "ray": - self.model.retriever.init_retrieval() - elif hparams.distributed_retriever == "pytorch": - self.model.retriever.init_retrieval(self.distributed_port) - - self.distributed_retriever = hparams.distributed_retriever - - def forward(self, input_ids, **kwargs): - return self.model(input_ids, **kwargs) - - def ids_to_clean_text(self, generated_ids: List[int]): - gen_text = self.tokenizer.batch_decode( - generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True - ) - return lmap(str.strip, gen_text) - - def _step(self, batch: dict) -> Tuple: - source_ids, source_mask, target_ids = batch["input_ids"], batch["attention_mask"], batch["decoder_input_ids"] - - rag_kwargs = {} - if isinstance(self.model, T5ForConditionalGeneration): - decoder_input_ids = self.model._shift_right(target_ids) - lm_labels = target_ids - elif isinstance(self.model, BartForConditionalGeneration): - decoder_input_ids = target_ids[:, :-1].contiguous() - lm_labels = target_ids[:, 1:].clone() - else: - assert self.is_rag_model - generator = self.model.rag.generator - if isinstance(generator, T5ForConditionalGeneration): - decoder_start_token_id = generator.config.decoder_start_token_id - decoder_input_ids = ( - torch.cat( - [torch.tensor([[decoder_start_token_id]] * target_ids.shape[0]).to(target_ids), target_ids], - dim=1, - ) - if target_ids.shape[0] < self.target_lens["train"] - else generator._shift_right(target_ids) - ) - elif isinstance(generator, BartForConditionalGeneration): - decoder_input_ids = target_ids - lm_labels = decoder_input_ids - rag_kwargs["reduce_loss"] = True - - assert decoder_input_ids is not None - - outputs = self( - source_ids, - attention_mask=source_mask, - decoder_input_ids=decoder_input_ids, - use_cache=False, - labels=lm_labels, - **rag_kwargs, - ) - - loss = outputs["loss"] - return (loss,) - - @property - def pad(self) -> int: - raise NotImplementedError("pad not implemented") - - def training_step(self, batch, batch_idx) -> Dict: - loss_tensors = self._step(batch) - - logs = {name: loss.detach() for name, loss in zip(self.loss_names, loss_tensors)} - # tokens per batch - tgt_pad_token_id = ( - self.tokenizer.generator.pad_token_id - if isinstance(self.tokenizer, RagTokenizer) - else self.tokenizer.pad_token_id - ) - src_pad_token_id = ( - self.tokenizer.question_encoder.pad_token_id - if isinstance(self.tokenizer, RagTokenizer) - else self.tokenizer.pad_token_id - ) - logs["tpb"] = ( - batch["input_ids"].ne(src_pad_token_id).sum() + batch["decoder_input_ids"].ne(tgt_pad_token_id).sum() - ) - - return {"loss": loss_tensors[0], "log": logs} - - def validation_step(self, batch, batch_idx) -> Dict: - return self._generative_step(batch) - - def validation_epoch_end(self, outputs, prefix="val") -> Dict: - self.step_count += 1 - losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names} - loss = losses["loss"] - gen_metrics = { - k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ["gen_time", "gen_len"] - } - metrics_tensor: torch.FloatTensor = torch.tensor(gen_metrics[self.val_metric]).type_as(loss) - gen_metrics.update({k: v.item() for k, v in losses.items()}) - - # fix for https://github.com/PyTorchLightning/pytorch-lightning/issues/2424 - if dist.is_initialized(): - dist.all_reduce(metrics_tensor, op=dist.ReduceOp.SUM) - metrics_tensor = metrics_tensor / dist.get_world_size() - gen_metrics.update({self.val_metric: metrics_tensor.item()}) - - losses.update(gen_metrics) - metrics = {f"{prefix}_avg_{k}": x for k, x in losses.items()} - metrics["step_count"] = self.step_count - self.save_metrics(metrics, prefix) # writes to self.metrics_save_path - preds = flatten_list([x["preds"] for x in outputs]) - return {"log": metrics, "preds": preds, f"{prefix}_loss": loss, f"{prefix}_{self.val_metric}": metrics_tensor} - - def save_metrics(self, latest_metrics, type_path) -> None: - self.metrics[type_path].append(latest_metrics) - save_json(self.metrics, self.metrics_save_path) - - def calc_generative_metrics(self, preds, target) -> Dict: - return calculate_exact_match(preds, target) - - def _generative_step(self, batch: dict) -> dict: - start_time = time.time() - batch = BatchEncoding(batch).to(device=self.model.device) - generated_ids = self.model.generate( - batch["input_ids"], - attention_mask=batch["attention_mask"], - do_deduplication=False, # rag specific parameter - use_cache=True, - min_length=1, - max_length=self.target_lens["val"], - ) - - gen_time = (time.time() - start_time) / batch["input_ids"].shape[0] - preds: List[str] = self.ids_to_clean_text(generated_ids) - target: List[str] = self.ids_to_clean_text(batch["decoder_input_ids"]) - loss_tensors = self._step(batch) - base_metrics = dict(zip(self.loss_names, loss_tensors)) - gen_metrics: Dict = self.calc_generative_metrics(preds, target) - - summ_len = np.mean(lmap(len, generated_ids)) - base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **gen_metrics) - return base_metrics - - def test_step(self, batch, batch_idx): - return self._generative_step(batch) - - def test_epoch_end(self, outputs): - return self.validation_epoch_end(outputs, prefix="test") - - def get_dataset(self, type_path) -> Seq2SeqDataset: - n_obs = self.n_obs[type_path] - max_target_length = self.target_lens[type_path] - dataset = Seq2SeqDataset( - self.tokenizer, - type_path=type_path, - n_obs=n_obs, - max_target_length=max_target_length, - **self.dataset_kwargs, - ) - return dataset - - def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False) -> DataLoader: - dataset = self.get_dataset(type_path) - - dataloader = DataLoader( - dataset, - batch_size=batch_size, - collate_fn=dataset.collate_fn, - shuffle=shuffle, - num_workers=self.num_workers, - ) - return dataloader - - def train_dataloader(self) -> DataLoader: - dataloader = self.get_dataloader("train", batch_size=self.hparams.train_batch_size, shuffle=True) - return dataloader - - def val_dataloader(self) -> DataLoader: - return self.get_dataloader("val", batch_size=self.hparams.eval_batch_size) - - def test_dataloader(self) -> DataLoader: - return self.get_dataloader("test", batch_size=self.hparams.eval_batch_size) - - @pl.utilities.rank_zero_only - def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: - save_path = self.output_dir.joinpath("checkpoint{}".format(self.step_count)) - self.model.config.save_step = self.step_count - self.model.save_pretrained(save_path) - self.tokenizer.save_pretrained(save_path) - - @staticmethod - def add_model_specific_args(parser, root_dir): - BaseTransformer.add_model_specific_args(parser, root_dir) - add_generic_args(parser, root_dir) - parser.add_argument( - "--max_source_length", - default=128, - type=int, - help=( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ), - ) - parser.add_argument( - "--max_target_length", - default=25, - type=int, - help=( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ), - ) - parser.add_argument( - "--val_max_target_length", - default=25, - type=int, - help=( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ), - ) - parser.add_argument( - "--test_max_target_length", - default=25, - type=int, - help=( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ), - ) - parser.add_argument("--logger_name", type=str, choices=["default", "wandb", "wandb_shared"], default="default") - parser.add_argument("--n_train", type=int, default=-1, required=False, help="# examples. -1 means use all.") - parser.add_argument("--n_val", type=int, default=-1, required=False, help="# examples. -1 means use all.") - parser.add_argument("--n_test", type=int, default=-1, required=False, help="# examples. -1 means use all.") - parser.add_argument("--label_smoothing", type=float, default=0.0, required=False) - parser.add_argument( - "--prefix", - type=str, - default=None, - help="Prefix added at the beginning of each text, typically used with T5-based models.", - ) - parser.add_argument( - "--early_stopping_patience", - type=int, - default=-1, - required=False, - help=( - "-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So" - " val_check_interval will effect it." - ), - ) - parser.add_argument( - "--distributed-port", type=int, default=-1, required=False, help="Port number for distributed training." - ) - parser.add_argument( - "--model_type", - choices=["rag_sequence", "rag_token", "bart", "t5"], - type=str, - help=( - "RAG model type: sequence or token, if none specified, the type is inferred from the" - " model_name_or_path" - ), - ) - return parser - - @staticmethod - def add_retriever_specific_args(parser): - parser.add_argument( - "--index_name", - type=str, - default=None, - help=( - "Name of the index to use: 'hf' for a canonical dataset from the datasets library (default), 'custom'" - " for a local index, or 'legacy' for the original one)" - ), - ) - parser.add_argument( - "--passages_path", - type=str, - default=None, - help=( - "Path to the dataset of passages for custom index. More info about custom indexes in the RagRetriever" - " documentation as well as in `examples/rag/use_own_knowledge_dataset.py`" - ), - ) - parser.add_argument( - "--index_path", - type=str, - default=None, - help=( - "Path to the faiss index for custom index. More info about custom indexes in the RagRetriever" - " documentation as well as in `examples/rag/use_own_knowledge_dataset.py`" - ), - ) - parser.add_argument( - "--distributed_retriever", - choices=["ray", "pytorch"], - type=str, - default="pytorch", - help=( - "What implementation to use for distributed retriever? If " - "pytorch is selected, the index is loaded on training " - "worker 0, and torch.distributed is used to handle " - "communication between training worker 0, and the other " - "training workers. If ray is selected, the Ray library is " - "used to create load the index on separate processes, " - "and Ray handles the communication between the training " - "workers and the retrieval actors." - ), - ) - parser.add_argument( - "--use_dummy_dataset", - type=bool, - default=False, - help=( - "Whether to use the dummy version of the dataset index. More info about custom indexes in the" - " RagRetriever documentation as well as in `examples/rag/use_own_knowledge_dataset.py`" - ), - ) - return parser - - @staticmethod - def add_ray_specific_args(parser): - # Ray cluster address. - parser.add_argument( - "--ray-address", - default="auto", - type=str, - help=( - "The address of the Ray cluster to connect to. If not " - "specified, Ray will attempt to automatically detect the " - "cluster. Has no effect if pytorch is used as the distributed " - "retriever." - ), - ) - parser.add_argument( - "--num_retrieval_workers", - type=int, - default=1, - help=( - "The number of retrieval actors to use when Ray is selected " - "for the distributed retriever. Has no effect when " - "distributed_retriever is set to pytorch." - ), - ) - return parser - - -def main(args=None, model=None) -> GenerativeQAModule: - parser = argparse.ArgumentParser() - parser = pl.Trainer.add_argparse_args(parser) - parser = GenerativeQAModule.add_model_specific_args(parser, os.getcwd()) - parser = GenerativeQAModule.add_retriever_specific_args(parser) - - args = args or parser.parse_args() - - Path(args.output_dir).mkdir(exist_ok=True) - - named_actors = [] - if args.distributed_retriever == "ray" and args.gpus > 1: - if not is_ray_available(): - raise RuntimeError("Please install Ray to use the Ray distributed retriever.") - # Connect to an existing Ray cluster. - try: - ray.init(address=args.ray_address, namespace="rag") - except (ConnectionError, ValueError): - logger.warning( - "Connection to Ray cluster failed. Make sure a Ray " - "cluster is running by either using Ray's cluster " - "launcher (`ray up`) or by manually starting Ray on " - "each node via `ray start --head` for the head node " - "and `ray start --address=':6379'` for " - "additional nodes. See " - "https://docs.ray.io/en/master/cluster/index.html " - "for more info." - ) - raise - - # Create Ray actors only for rank 0. - if ("LOCAL_RANK" not in os.environ or int(os.environ["LOCAL_RANK"]) == 0) and ( - "NODE_RANK" not in os.environ or int(os.environ["NODE_RANK"]) == 0 - ): - remote_cls = ray.remote(RayRetriever) - named_actors = [ - remote_cls.options(name="retrieval_worker_{}".format(i)).remote() - for i in range(args.num_retrieval_workers) - ] - else: - logger.info( - "Getting named actors for NODE_RANK {}, LOCAL_RANK {}".format( - os.environ["NODE_RANK"], os.environ["LOCAL_RANK"] - ) - ) - named_actors = [ray.get_actor("retrieval_worker_{}".format(i)) for i in range(args.num_retrieval_workers)] - args.actor_handles = named_actors - assert args.actor_handles == named_actors - - if model is None: - model: GenerativeQAModule = GenerativeQAModule(args) - - dataset = Path(args.data_dir).name - if ( - args.logger_name == "default" - or args.fast_dev_run - or str(args.output_dir).startswith("/tmp") - or str(args.output_dir).startswith("/var") - ): - training_logger = True # don't pollute wandb logs unnecessarily - elif args.logger_name == "wandb": - from pytorch_lightning.loggers import WandbLogger - - project = os.environ.get("WANDB_PROJECT", dataset) - training_logger = WandbLogger(name=model.output_dir.name, project=project) - - elif args.logger_name == "wandb_shared": - from pytorch_lightning.loggers import WandbLogger - - training_logger = WandbLogger(name=model.output_dir.name, project=f"hf_{dataset}") - - es_callback = ( - get_early_stopping_callback(model.val_metric, args.early_stopping_patience) - if args.early_stopping_patience >= 0 - else False - ) - - trainer: pl.Trainer = generic_train( - model, - args, - logging_callback=Seq2SeqLoggingCallback(), - checkpoint_callback=get_checkpoint_callback(args.output_dir, model.val_metric), - early_stopping_callback=es_callback, - logger=training_logger, - custom_ddp_plugin=CustomDDP() if args.gpus > 1 else None, - profiler=pl.profiler.AdvancedProfiler() if args.profile else None, - ) - pickle_save(model.hparams, model.output_dir / "hparams.pkl") - - if not args.do_predict: - return model - - # test() without a model tests using the best checkpoint automatically - trainer.test() - return model - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser = pl.Trainer.add_argparse_args(parser) - parser = GenerativeQAModule.add_model_specific_args(parser, os.getcwd()) - parser = GenerativeQAModule.add_retriever_specific_args(parser) - parser = GenerativeQAModule.add_ray_specific_args(parser) - - # Pytorch Lightning Profiler - parser.add_argument( - "--profile", - action="store_true", - help="If True, use pytorch_lightning.profiler.AdvancedProfiler to profile the Trainer.", - ) - - args = parser.parse_args() - - main(args) diff --git a/examples/research_projects/rag/finetune_rag.sh b/examples/research_projects/rag/finetune_rag.sh deleted file mode 100755 index 8fd1fea3e54..00000000000 --- a/examples/research_projects/rag/finetune_rag.sh +++ /dev/null @@ -1,34 +0,0 @@ -# Add parent directory to python path to access lightning_base.py -export PYTHONPATH="../":"${PYTHONPATH}" - -# A sample finetuning run, you need to specify data_dir, output_dir and model_name_or_path -# run ./examples/rag/finetune_rag.sh --help to see all the possible options - -python examples/rag/finetune_rag.py \ - --data_dir $DATA_DIR \ - --output_dir $OUTPUT_DIR \ - --model_name_or_path $MODEL_NAME_OR_PATH \ - --model_type rag_sequence \ - --fp16 \ - --gpus 8 \ - --profile \ - --do_train \ - --do_predict \ - --n_val -1 \ - --train_batch_size 8 \ - --eval_batch_size 1 \ - --max_source_length 128 \ - --max_target_length 25 \ - --val_max_target_length 25 \ - --test_max_target_length 25 \ - --label_smoothing 0.1 \ - --dropout 0.1 \ - --attention_dropout 0.1 \ - --weight_decay 0.001 \ - --adam_epsilon 1e-08 \ - --max_grad_norm 0.1 \ - --lr_scheduler polynomial \ - --learning_rate 3e-05 \ - --num_train_epochs 100 \ - --warmup_steps 500 \ - --gradient_accumulation_steps 1 \ diff --git a/examples/research_projects/rag/finetune_rag_ray.sh b/examples/research_projects/rag/finetune_rag_ray.sh deleted file mode 100755 index 7c8e7b97e77..00000000000 --- a/examples/research_projects/rag/finetune_rag_ray.sh +++ /dev/null @@ -1,44 +0,0 @@ -# Sample script to finetune RAG using Ray for distributed retrieval. - -# Add parent directory to python path to access lightning_base.py -export PYTHONPATH="../":"${PYTHONPATH}" - -# Start a single-node Ray cluster. -ray start --head - -# A sample finetuning run, you need to specify data_dir, output_dir and model_name_or_path -# run ./examples/rag/finetune_rag_ray.sh --help to see all the possible options - -python examples/rag/finetune_rag.py \ - --data_dir $DATA_DIR \ - --output_dir $OUTPUT_DIR \ - --model_name_or_path $MODEL_NAME_OR_PATH \ - --model_type rag_sequence \ - --fp16 \ - --gpus 8 \ - --profile \ - --do_train \ - --do_predict \ - --n_val -1 \ - --train_batch_size 8 \ - --eval_batch_size 1 \ - --max_source_length 128 \ - --max_target_length 25 \ - --val_max_target_length 25 \ - --test_max_target_length 25 \ - --label_smoothing 0.1 \ - --dropout 0.1 \ - --attention_dropout 0.1 \ - --weight_decay 0.001 \ - --adam_epsilon 1e-08 \ - --max_grad_norm 0.1 \ - --lr_scheduler polynomial \ - --learning_rate 3e-05 \ - --num_train_epochs 100 \ - --warmup_steps 500 \ - --gradient_accumulation_steps 1 \ - --distributed_retriever ray \ - --num_retrieval_workers 4 - -# Stop the Ray cluster. -ray stop diff --git a/examples/research_projects/rag/lightning_base.py b/examples/research_projects/rag/lightning_base.py deleted file mode 100644 index 12099bc3aa1..00000000000 --- a/examples/research_projects/rag/lightning_base.py +++ /dev/null @@ -1,404 +0,0 @@ -import argparse -import logging -import os -from pathlib import Path -from typing import Any, Dict - -import pytorch_lightning as pl -from pytorch_lightning.utilities import rank_zero_info - -from transformers import ( - AdamW, - AutoConfig, - AutoModel, - AutoModelForPreTraining, - AutoModelForQuestionAnswering, - AutoModelForSeq2SeqLM, - AutoModelForSequenceClassification, - AutoModelForTokenClassification, - AutoModelWithLMHead, - AutoTokenizer, - PretrainedConfig, - PreTrainedTokenizer, -) -from transformers.optimization import ( - Adafactor, - get_cosine_schedule_with_warmup, - get_cosine_with_hard_restarts_schedule_with_warmup, - get_linear_schedule_with_warmup, - get_polynomial_decay_schedule_with_warmup, -) -from transformers.utils.versions import require_version - - -logger = logging.getLogger(__name__) - -require_version("pytorch_lightning>=1.0.4") - -MODEL_MODES = { - "base": AutoModel, - "sequence-classification": AutoModelForSequenceClassification, - "question-answering": AutoModelForQuestionAnswering, - "pretraining": AutoModelForPreTraining, - "token-classification": AutoModelForTokenClassification, - "language-modeling": AutoModelWithLMHead, - "summarization": AutoModelForSeq2SeqLM, - "translation": AutoModelForSeq2SeqLM, -} - - -# update this and the import above to support new schedulers from transformers.optimization -arg_to_scheduler = { - "linear": get_linear_schedule_with_warmup, - "cosine": get_cosine_schedule_with_warmup, - "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, - "polynomial": get_polynomial_decay_schedule_with_warmup, - # '': get_constant_schedule, # not supported for now - # '': get_constant_schedule_with_warmup, # not supported for now -} -arg_to_scheduler_choices = sorted(arg_to_scheduler.keys()) -arg_to_scheduler_metavar = "{" + ", ".join(arg_to_scheduler_choices) + "}" - - -class BaseTransformer(pl.LightningModule): - def __init__( - self, - hparams: argparse.Namespace, - num_labels=None, - mode="base", - config=None, - tokenizer=None, - model=None, - **config_kwargs, - ): - """Initialize a model, tokenizer and config.""" - super().__init__() - # TODO: move to self.save_hyperparameters() - # self.save_hyperparameters() - # can also expand arguments into trainer signature for easier reading - - self.save_hyperparameters(hparams) - self.step_count = 0 - self.output_dir = Path(self.hparams.output_dir) - cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None - if config is None: - self.config = AutoConfig.from_pretrained( - self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, - **({"num_labels": num_labels} if num_labels is not None else {}), - cache_dir=cache_dir, - **config_kwargs, - ) - else: - self.config: PretrainedConfig = config - - extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") - for p in extra_model_params: - if getattr(self.hparams, p, None): - assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute" - setattr(self.config, p, getattr(self.hparams, p)) - - if tokenizer is None: - self.tokenizer = AutoTokenizer.from_pretrained( - self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, - cache_dir=cache_dir, - ) - else: - self.tokenizer: PreTrainedTokenizer = tokenizer - self.model_type = MODEL_MODES[mode] - if model is None: - self.model = self.model_type.from_pretrained( - self.hparams.model_name_or_path, - from_tf=bool(".ckpt" in self.hparams.model_name_or_path), - config=self.config, - cache_dir=cache_dir, - ) - else: - self.model = model - - def load_hf_checkpoint(self, *args, **kwargs): - self.model = self.model_type.from_pretrained(*args, **kwargs) - - def get_lr_scheduler(self): - get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler] - scheduler = get_schedule_func( - self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps() - ) - scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1} - return scheduler - - def configure_optimizers(self): - """Prepare optimizer and schedule (linear warmup and decay)""" - model = self.model - no_decay = ["bias", "LayerNorm.weight"] - optimizer_grouped_parameters = [ - { - "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], - "weight_decay": self.hparams.weight_decay, - }, - { - "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], - "weight_decay": 0.0, - }, - ] - if self.hparams.adafactor: - optimizer = Adafactor( - optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False - ) - - else: - optimizer = AdamW( - optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon - ) - self.opt = optimizer - - scheduler = self.get_lr_scheduler() - - return [optimizer], [scheduler] - - def test_step(self, batch, batch_nb): - return self.validation_step(batch, batch_nb) - - def test_epoch_end(self, outputs): - return self.validation_end(outputs) - - def total_steps(self) -> int: - """The number of total training steps that will be run. Used for lr scheduler purposes.""" - num_devices = max(1, self.hparams.gpus) # TODO: consider num_tpu_cores - effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices - return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs - - def setup(self, stage): - if stage == "test": - self.dataset_size = len(self.test_dataloader().dataset) - else: - self.train_loader = self.get_dataloader("train", self.hparams.train_batch_size, shuffle=True) - self.dataset_size = len(self.train_dataloader().dataset) - - def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False): - raise NotImplementedError("You must implement this for your task") - - def train_dataloader(self): - return self.train_loader - - def val_dataloader(self): - return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=False) - - def test_dataloader(self): - return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=False) - - def _feature_file(self, mode): - return os.path.join( - self.hparams.data_dir, - "cached_{}_{}_{}".format( - mode, - list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(), - str(self.hparams.max_seq_length), - ), - ) - - @pl.utilities.rank_zero_only - def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: - save_path = self.output_dir.joinpath("best_tfmr") - self.model.config.save_step = self.step_count - self.model.save_pretrained(save_path) - self.tokenizer.save_pretrained(save_path) - - @staticmethod - def add_model_specific_args(parser, root_dir): - parser.add_argument( - "--model_name_or_path", - default=None, - type=str, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models", - ) - parser.add_argument( - "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" - ) - parser.add_argument( - "--tokenizer_name", - default=None, - type=str, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--cache_dir", - default="", - type=str, - help="Where do you want to store the pre-trained models downloaded from huggingface.co", - ) - parser.add_argument( - "--encoder_layerdrop", - type=float, - help="Encoder layer dropout probability (Optional). Goes into model.config", - ) - parser.add_argument( - "--decoder_layerdrop", - type=float, - help="Decoder layer dropout probability (Optional). Goes into model.config", - ) - parser.add_argument( - "--dropout", - type=float, - help="Dropout probability (Optional). Goes into model.config", - ) - parser.add_argument( - "--attention_dropout", - type=float, - help="Attention dropout probability (Optional). Goes into model.config", - ) - parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") - parser.add_argument( - "--lr_scheduler", - default="linear", - choices=arg_to_scheduler_choices, - metavar=arg_to_scheduler_metavar, - type=str, - help="Learning rate scheduler", - ) - parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") - parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") - parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") - parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader") - parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=int) - parser.add_argument("--train_batch_size", default=32, type=int) - parser.add_argument("--eval_batch_size", default=32, type=int) - parser.add_argument("--adafactor", action="store_true") - - -class InitCallback(pl.Callback): - # This method is better that using a custom DDP plugging with the latest pytorch-lightning (@shamanez) - def on_sanity_check_start(self, trainer, pl_module): - if ( - trainer.is_global_zero and trainer.global_rank == 0 - ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. - pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. - - -class LoggingCallback(pl.Callback): - def on_batch_end(self, trainer, pl_module): - lr_scheduler = trainer.lr_schedulers[0]["scheduler"] - lrs = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())} - pl_module.logger.log_metrics(lrs) - - def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): - rank_zero_info("***** Validation results *****") - metrics = trainer.callback_metrics - # Log results - for key in sorted(metrics): - if key not in ["log", "progress_bar"]: - rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) - - def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): - rank_zero_info("***** Test results *****") - metrics = trainer.callback_metrics - # Log and save results to file - output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt") - with open(output_test_results_file, "w") as writer: - for key in sorted(metrics): - if key not in ["log", "progress_bar"]: - rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) - writer.write("{} = {}\n".format(key, str(metrics[key]))) - - -def add_generic_args(parser, root_dir) -> None: - # To allow all pl args uncomment the following line - # parser = pl.Trainer.add_argparse_args(parser) - parser.add_argument( - "--output_dir", - default=None, - type=str, - required=True, - help="The output directory where the model predictions and checkpoints will be written.", - ) - parser.add_argument( - "--fp16", - action="store_true", - help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", - ) - - parser.add_argument( - "--fp16_opt_level", - type=str, - default="O2", - help=( - "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " - "See details at https://nvidia.github.io/apex/amp.html" - ), - ) - parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=int) - parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=float, help="Max gradient norm") - parser.add_argument("--do_train", action="store_true", help="Whether to run training.") - parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.") - parser.add_argument( - "--gradient_accumulation_steps", - dest="accumulate_grad_batches", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") - parser.add_argument( - "--data_dir", - default=None, - type=str, - required=True, - help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.", - ) - - -def generic_train( - model: BaseTransformer, - args: argparse.Namespace, - early_stopping_callback=None, - logger=True, # can pass WandbLogger() here - custom_ddp_plugin=None, - extra_callbacks=[], - checkpoint_callback=None, - logging_callback=None, - **extra_train_kwargs, -): - pl.seed_everything(args.seed) - - # init model - odir = Path(model.hparams.output_dir) - odir.mkdir(exist_ok=True) - - # add custom checkpoints - if checkpoint_callback is None: - checkpoint_callback = pl.callbacks.ModelCheckpoint( - filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1 - ) - if early_stopping_callback: - extra_callbacks.append(early_stopping_callback) - if logging_callback is None: - logging_callback = LoggingCallback() - - train_params = {} - - # TODO: remove with PyTorch 1.6 since pl uses native amp - if args.fp16: - train_params["precision"] = 16 - # train_params["amp_level"] = args.fp16_opt_level - - if args.gpus > 1: - train_params["accelerator"] = "auto" # "ddp" - train_params["strategy"] = "ddp" - - train_params["accumulate_grad_batches"] = args.accumulate_grad_batches - train_params["profiler"] = None # extra_train_kwargs.get("profiler", None) #get unwanted logs - train_params["devices"] = "auto" - - trainer = pl.Trainer.from_argparse_args( - args, - weights_summary=None, - callbacks=[logging_callback] + extra_callbacks + [checkpoint_callback] + [InitCallback()], - # plugins=[custom_ddp_plugin], - logger=logger, - **train_params, - ) - - if args.do_train: - trainer.fit(model) - - return trainer diff --git a/examples/research_projects/rag/parse_dpr_relevance_data.py b/examples/research_projects/rag/parse_dpr_relevance_data.py deleted file mode 100644 index 4d8a1e5f467..00000000000 --- a/examples/research_projects/rag/parse_dpr_relevance_data.py +++ /dev/null @@ -1,47 +0,0 @@ -""" -This script reads DPR retriever training data and parses each datapoint. We save a line per datapoint. -Each line consists of the query followed by a tab-separated list of Wikipedia page titles constituting -positive contexts for a given query. -""" - -import argparse -import json - -from tqdm import tqdm - - -def main(): - parser = argparse.ArgumentParser() - - # Required parameters - parser.add_argument( - "--src_path", - type=str, - default="biencoder-nq-dev.json", - help="Path to raw DPR training data", - ) - parser.add_argument( - "--evaluation_set", - type=str, - help="where to store parsed evaluation_set file", - ) - parser.add_argument( - "--gold_data_path", - type=str, - help="where to store parsed gold_data_path file", - ) - args = parser.parse_args() - - with open(args.src_path, "r") as src_file, open(args.evaluation_set, "w") as eval_file, open( - args.gold_data_path, "w" - ) as gold_file: - dpr_records = json.load(src_file) - for dpr_record in tqdm(dpr_records): - question = dpr_record["question"] - contexts = [context["title"] for context in dpr_record["positive_ctxs"]] - eval_file.write(question + "\n") - gold_file.write("\t".join(contexts) + "\n") - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/rag/requirements.txt b/examples/research_projects/rag/requirements.txt deleted file mode 100644 index 5988d38de9e..00000000000 --- a/examples/research_projects/rag/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -faiss-cpu >= 1.6.3 -datasets >= 1.0.1 -psutil >= 5.7.0 -torch >= 1.4.0 -ray >= 1.10.0 -pytorch-lightning >= 1.5.10, <=1.6.0 -transformers -GitPython \ No newline at end of file diff --git a/examples/research_projects/rag/test_data/my_knowledge_dataset.csv b/examples/research_projects/rag/test_data/my_knowledge_dataset.csv deleted file mode 100644 index 76da009a2f2..00000000000 --- a/examples/research_projects/rag/test_data/my_knowledge_dataset.csv +++ /dev/null @@ -1,2 +0,0 @@ -Aaron Aaron Aaron ( or ; "Ahärôn") is a prophet, high priest, and the brother of Moses in the Abrahamic religions. Knowledge of Aaron, along with his brother Moses, comes exclusively from religious texts, such as the Bible and Quran. The Hebrew Bible relates that, unlike Moses, who grew up in the Egyptian royal court, Aaron and his elder sister Miriam remained with their kinsmen in the eastern border-land of Egypt (Goshen). When Moses first confronted the Egyptian king about the Israelites, Aaron served as his brother's spokesman ("prophet") to the Pharaoh. Part of the Law (Torah) that Moses received from God at Sinai granted Aaron the priesthood for himself and his male descendants, and he became the first High Priest of the Israelites. Aaron died before the Israelites crossed the North Jordan river and he was buried on Mount Hor (Numbers 33:39; Deuteronomy 10:6 says he died and was buried at Moserah). Aaron is also mentioned in the New Testament of the Bible. According to the Book of Exodus, Aaron first functioned as Moses' assistant. Because Moses complained that he could not speak well, God appointed Aaron as Moses' "prophet" (Exodus 4:10-17; 7:1). At the command of Moses, he let his rod turn into a snake. Then he stretched out his rod in order to bring on the first three plagues. After that, Moses tended to act and speak for himself. During the journey in the wilderness, Aaron was not always prominent or active. At the battle with Amalek, he was chosen with Hur to support the hand of Moses that held the "rod of God". When the revelation was given to Moses at biblical Mount Sinai, he headed the elders of Israel who accompanied Moses on the way to the summit. -"Pokémon" Pokémon , also known as in Japan, is a media franchise managed by The Pokémon Company, a Japanese consortium between Nintendo, Game Freak, and Creatures. The franchise copyright is shared by all three companies, but Nintendo is the sole owner of the trademark. The franchise was created by Satoshi Tajiri in 1995, and is centered on fictional creatures called "Pokémon", which humans, known as Pokémon Trainers, catch and train to battle each other for sport. The English slogan for the franchise is "Gotta Catch 'Em All". Works within the franchise are set in the Pokémon universe. The franchise began as "Pokémon Red" and "Green" (released outside of Japan as "Pokémon Red" and "Blue"), a pair of video games for the original Game Boy that were developed by Game Freak and published by Nintendo in February 1996. "Pokémon" has since gone on to become the highest-grossing media franchise of all time, with over in revenue up until March 2017. The original video game series is the second best-selling video game franchise (behind Nintendo's "Mario" franchise) with more than 300million copies sold and over 800million mobile downloads. In addition, the "Pokémon" franchise includes the world's top-selling toy brand, the top-selling trading card game with over 25.7billion cards sold, an anime television series that has become the most successful video game adaptation with over 20 seasons and 1,000 episodes in 124 countries, as well as an anime film series, a , books, manga comics, music, and merchandise. The franchise is also represented in other Nintendo media, such as the "Super Smash Bros." series. In November 2005, 4Kids Entertainment, which had managed the non-game related licensing of "Pokémon", announced that it had agreed not to renew the "Pokémon" representation agreement. The Pokémon Company International oversees all "Pokémon" licensing outside Asia. \ No newline at end of file diff --git a/examples/research_projects/rag/test_distributed_retriever.py b/examples/research_projects/rag/test_distributed_retriever.py deleted file mode 100644 index 7e75e0a7a7e..00000000000 --- a/examples/research_projects/rag/test_distributed_retriever.py +++ /dev/null @@ -1,338 +0,0 @@ -import json -import os -import shutil -import sys -import tempfile -import unittest -from unittest import TestCase -from unittest.mock import patch - -import faiss -import numpy as np -from datasets import Dataset - -from transformers import BartConfig, BartTokenizer, DPRConfig, DPRQuestionEncoderTokenizer, RagConfig -from transformers.file_utils import is_datasets_available, is_faiss_available, is_psutil_available, is_torch_available -from transformers.integrations import is_ray_available -from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES -from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever -from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES -from transformers.testing_utils import require_ray - - -sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # noqa: E402 # isort:skip - -if is_torch_available(): - from distributed_pytorch_retriever import RagPyTorchDistributedRetriever # noqa: E402 # isort:skip -else: - RagPyTorchDistributedRetriever = None - -if is_ray_available(): - import ray # noqa: E402 # isort:skip - from distributed_ray_retriever import RagRayDistributedRetriever, RayRetriever # noqa: E402 # isort:skip -else: - ray = None - RagRayDistributedRetriever = None - RayRetriever = None - - -def require_distributed_retrieval(test_case): - """ - Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with - :class:`~transformers.RagRetriever`. - - These tests are skipped when respective libraries are not installed. - - """ - if not (is_datasets_available() and is_faiss_available() and is_psutil_available()): - test_case = unittest.skip("test requires Datasets, Faiss, psutil")(test_case) - return test_case - - -@require_distributed_retrieval -class RagRetrieverTest(TestCase): - def setUp(self): - self.tmpdirname = tempfile.mkdtemp() - self.retrieval_vector_size = 8 - - # DPR tok - vocab_tokens = [ - "[UNK]", - "[CLS]", - "[SEP]", - "[PAD]", - "[MASK]", - "want", - "##want", - "##ed", - "wa", - "un", - "runn", - "##ing", - ",", - "low", - "lowest", - ] - dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer") - os.makedirs(dpr_tokenizer_path, exist_ok=True) - self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"]) - with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: - vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) - - # BART tok - vocab = [ - "l", - "o", - "w", - "e", - "r", - "s", - "t", - "i", - "d", - "n", - "\u0120", - "\u0120l", - "\u0120n", - "\u0120lo", - "\u0120low", - "er", - "\u0120lowest", - "\u0120newer", - "\u0120wider", - "", - ] - vocab_tokens = dict(zip(vocab, range(len(vocab)))) - merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] - self.special_tokens_map = {"unk_token": ""} - - bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer") - os.makedirs(bart_tokenizer_path, exist_ok=True) - self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"]) - self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"]) - with open(self.vocab_file, "w", encoding="utf-8") as fp: - fp.write(json.dumps(vocab_tokens) + "\n") - with open(self.merges_file, "w", encoding="utf-8") as fp: - fp.write("\n".join(merges)) - - def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer: - return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer")) - - def get_bart_tokenizer(self) -> BartTokenizer: - return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer")) - - def tearDown(self): - shutil.rmtree(self.tmpdirname) - - def get_dummy_dataset(self): - dataset = Dataset.from_dict( - { - "id": ["0", "1"], - "text": ["foo", "bar"], - "title": ["Foo", "Bar"], - "embeddings": [np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size)], - } - ) - dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT) - return dataset - - def get_dummy_pytorch_distributed_retriever( - self, init_retrieval: bool, port=12345 - ) -> RagPyTorchDistributedRetriever: - dataset = self.get_dummy_dataset() - config = RagConfig( - retrieval_vector_size=self.retrieval_vector_size, - question_encoder=DPRConfig().to_dict(), - generator=BartConfig().to_dict(), - ) - with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: - mock_load_dataset.return_value = dataset - retriever = RagPyTorchDistributedRetriever( - config, - question_encoder_tokenizer=self.get_dpr_tokenizer(), - generator_tokenizer=self.get_bart_tokenizer(), - ) - if init_retrieval: - retriever.init_retrieval(port) - return retriever - - def get_dummy_ray_distributed_retriever(self, init_retrieval: bool) -> RagRayDistributedRetriever: - # Have to run in local mode because sys.path modifications at top of - # file are not propogated to remote workers. - # https://stackoverflow.com/questions/54338013/parallel-import-a-python-file-from-sibling-folder - ray.init(local_mode=True) - config = RagConfig( - retrieval_vector_size=self.retrieval_vector_size, - question_encoder=DPRConfig().to_dict(), - generator=BartConfig().to_dict(), - ) - remote_cls = ray.remote(RayRetriever) - workers = [remote_cls.remote() for _ in range(1)] - with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: - mock_load_dataset.return_value = self.get_dummy_dataset() - retriever = RagRayDistributedRetriever( - config, - question_encoder_tokenizer=self.get_dpr_tokenizer(), - generator_tokenizer=self.get_bart_tokenizer(), - retrieval_workers=workers, - ) - if init_retrieval: - retriever.init_retrieval() - return retriever - - def get_dummy_custom_hf_index_pytorch_retriever(self, init_retrieval: bool, from_disk: bool, port=12345): - dataset = self.get_dummy_dataset() - config = RagConfig( - retrieval_vector_size=self.retrieval_vector_size, - question_encoder=DPRConfig().to_dict(), - generator=BartConfig().to_dict(), - index_name="custom", - ) - if from_disk: - config.passages_path = os.path.join(self.tmpdirname, "dataset") - config.index_path = os.path.join(self.tmpdirname, "index.faiss") - dataset.get_index("embeddings").save(os.path.join(self.tmpdirname, "index.faiss")) - dataset.drop_index("embeddings") - dataset.save_to_disk(os.path.join(self.tmpdirname, "dataset")) - del dataset - retriever = RagPyTorchDistributedRetriever( - config, - question_encoder_tokenizer=self.get_dpr_tokenizer(), - generator_tokenizer=self.get_bart_tokenizer(), - ) - else: - retriever = RagPyTorchDistributedRetriever( - config, - question_encoder_tokenizer=self.get_dpr_tokenizer(), - generator_tokenizer=self.get_bart_tokenizer(), - index=CustomHFIndex(config.retrieval_vector_size, dataset), - ) - if init_retrieval: - retriever.init_retrieval(port) - return retriever - - def get_dummy_custom_hf_index_ray_retriever(self, init_retrieval: bool, from_disk: bool): - # Have to run in local mode because sys.path modifications at top of - # file are not propogated to remote workers. - # https://stackoverflow.com/questions/54338013/parallel-import-a-python-file-from-sibling-folder - ray.init(local_mode=True) - dataset = self.get_dummy_dataset() - config = RagConfig( - retrieval_vector_size=self.retrieval_vector_size, - question_encoder=DPRConfig().to_dict(), - generator=BartConfig().to_dict(), - index_name="custom", - ) - remote_cls = ray.remote(RayRetriever) - workers = [remote_cls.remote() for _ in range(1)] - if from_disk: - config.passages_path = os.path.join(self.tmpdirname, "dataset") - config.index_path = os.path.join(self.tmpdirname, "index.faiss") - dataset.get_index("embeddings").save(os.path.join(self.tmpdirname, "index.faiss")) - dataset.drop_index("embeddings") - dataset.save_to_disk(os.path.join(self.tmpdirname, "dataset")) - del dataset - retriever = RagRayDistributedRetriever( - config, - question_encoder_tokenizer=self.get_dpr_tokenizer(), - generator_tokenizer=self.get_bart_tokenizer(), - retrieval_workers=workers, - index=CustomHFIndex.load_from_disk( - vector_size=config.retrieval_vector_size, - dataset_path=config.passages_path, - index_path=config.index_path, - ), - ) - else: - retriever = RagRayDistributedRetriever( - config, - question_encoder_tokenizer=self.get_dpr_tokenizer(), - generator_tokenizer=self.get_bart_tokenizer(), - retrieval_workers=workers, - index=CustomHFIndex(config.retrieval_vector_size, dataset), - ) - if init_retrieval: - retriever.init_retrieval() - return retriever - - def distributed_retriever_check(self, retriever: RagRetriever, hidden_states: np.array, n_docs: int) -> None: - retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) - self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) - self.assertEqual(len(doc_dicts), 2) - self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"]) - self.assertEqual(len(doc_dicts[0]["id"]), n_docs) - self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc - self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc - self.assertListEqual(doc_ids.tolist(), [[1], [0]]) - - def test_pytorch_distributed_retriever_retrieve(self): - n_docs = 1 - hidden_states = np.array( - [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 - ) - - self.distributed_retriever_check( - self.get_dummy_pytorch_distributed_retriever(init_retrieval=True), hidden_states, n_docs - ) - - def test_custom_hf_index_pytorch_retriever_retrieve(self): - n_docs = 1 - hidden_states = np.array( - [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 - ) - - self.distributed_retriever_check( - self.get_dummy_custom_hf_index_pytorch_retriever(init_retrieval=True, from_disk=False), - hidden_states, - n_docs, - ) - - def test_custom_pytorch_distributed_retriever_retrieve_from_disk(self): - n_docs = 1 - hidden_states = np.array( - [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 - ) - - self.distributed_retriever_check( - self.get_dummy_custom_hf_index_pytorch_retriever(init_retrieval=True, from_disk=True), - hidden_states, - n_docs, - ) - - @require_ray - def test_ray_distributed_retriever_retrieve(self): - n_docs = 1 - hidden_states = np.array( - [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 - ) - - self.distributed_retriever_check( - self.get_dummy_ray_distributed_retriever(init_retrieval=True), hidden_states, n_docs - ) - ray.shutdown() - - @require_ray - def test_custom_hf_index_ray_retriever_retrieve(self): - n_docs = 1 - hidden_states = np.array( - [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 - ) - with self.assertRaises(ValueError): - self.distributed_retriever_check( - self.get_dummy_custom_hf_index_ray_retriever(init_retrieval=True, from_disk=False), - hidden_states, - n_docs, - ) - ray.shutdown() - - @require_ray - def test_custom_ray_distributed_retriever_retrieve_from_disk(self): - n_docs = 1 - hidden_states = np.array( - [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 - ) - - self.distributed_retriever_check( - self.get_dummy_custom_hf_index_ray_retriever(init_retrieval=True, from_disk=True), hidden_states, n_docs - ) - ray.shutdown() diff --git a/examples/research_projects/rag/use_own_knowledge_dataset.py b/examples/research_projects/rag/use_own_knowledge_dataset.py deleted file mode 100644 index d2ab6d07d5c..00000000000 --- a/examples/research_projects/rag/use_own_knowledge_dataset.py +++ /dev/null @@ -1,208 +0,0 @@ -import logging -import os -from dataclasses import dataclass, field -from functools import partial -from pathlib import Path -from tempfile import TemporaryDirectory -from typing import List, Optional - -import faiss -import torch -from datasets import Features, Sequence, Value, load_dataset - -from transformers import ( - DPRContextEncoder, - DPRContextEncoderTokenizerFast, - HfArgumentParser, - RagRetriever, - RagSequenceForGeneration, - RagTokenizer, -) - - -logger = logging.getLogger(__name__) -torch.set_grad_enabled(False) -device = "cuda" if torch.cuda.is_available() else "cpu" - - -def split_text(text: str, n=100, character=" ") -> List[str]: - """Split the text every ``n``-th occurrence of ``character``""" - text = text.split(character) - return [character.join(text[i : i + n]).strip() for i in range(0, len(text), n)] - - -def split_documents(documents: dict) -> dict: - """Split documents into passages""" - titles, texts = [], [] - for title, text in zip(documents["title"], documents["text"]): - if text is not None: - for passage in split_text(text): - titles.append(title if title is not None else "") - texts.append(passage) - return {"title": titles, "text": texts} - - -def embed(documents: dict, ctx_encoder: DPRContextEncoder, ctx_tokenizer: DPRContextEncoderTokenizerFast) -> dict: - """Compute the DPR embeddings of document passages""" - input_ids = ctx_tokenizer( - documents["title"], documents["text"], truncation=True, padding="longest", return_tensors="pt" - )["input_ids"] - embeddings = ctx_encoder(input_ids.to(device=device), return_dict=True).pooler_output - return {"embeddings": embeddings.detach().cpu().numpy()} - - -def main( - rag_example_args: "RagExampleArguments", - processing_args: "ProcessingArguments", - index_hnsw_args: "IndexHnswArguments", -): - ###################################### - logger.info("Step 1 - Create the dataset") - ###################################### - - # The dataset needed for RAG must have three columns: - # - title (string): title of the document - # - text (string): text of a passage of the document - # - embeddings (array of dimension d): DPR representation of the passage - - # Let's say you have documents in tab-separated csv files with columns "title" and "text" - assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file" - - # You can load a Dataset object this way - dataset = load_dataset( - "csv", data_files=[rag_example_args.csv_path], split="train", delimiter="\t", column_names=["title", "text"] - ) - - # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets?highlight=csv#csv-files - - # Then split the documents into passages of 100 words - dataset = dataset.map(split_documents, batched=True, num_proc=processing_args.num_proc) - - # And compute the embeddings - ctx_encoder = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=device) - ctx_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name) - new_features = Features( - {"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))} - ) # optional, save as float32 instead of float64 to save space - dataset = dataset.map( - partial(embed, ctx_encoder=ctx_encoder, ctx_tokenizer=ctx_tokenizer), - batched=True, - batch_size=processing_args.batch_size, - features=new_features, - ) - - # And finally save your dataset - passages_path = os.path.join(rag_example_args.output_dir, "my_knowledge_dataset") - dataset.save_to_disk(passages_path) - # from datasets import load_from_disk - # dataset = load_from_disk(passages_path) # to reload the dataset - - ###################################### - logger.info("Step 2 - Index the dataset") - ###################################### - - # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search - index = faiss.IndexHNSWFlat(index_hnsw_args.d, index_hnsw_args.m, faiss.METRIC_INNER_PRODUCT) - dataset.add_faiss_index("embeddings", custom_index=index) - - # And save the index - index_path = os.path.join(rag_example_args.output_dir, "my_knowledge_dataset_hnsw_index.faiss") - dataset.get_index("embeddings").save(index_path) - # dataset.load_faiss_index("embeddings", index_path) # to reload the index - - ###################################### - logger.info("Step 3 - Load RAG") - ###################################### - - # Easy way to load the model - retriever = RagRetriever.from_pretrained( - rag_example_args.rag_model_name, index_name="custom", indexed_dataset=dataset - ) - model = RagSequenceForGeneration.from_pretrained(rag_example_args.rag_model_name, retriever=retriever) - tokenizer = RagTokenizer.from_pretrained(rag_example_args.rag_model_name) - - # For distributed fine-tuning you'll need to provide the paths instead, as the dataset and the index are loaded separately. - # retriever = RagRetriever.from_pretrained(rag_model_name, index_name="custom", passages_path=passages_path, index_path=index_path) - - ###################################### - logger.info("Step 4 - Have fun") - ###################################### - - question = rag_example_args.question or "What does Moses' rod turn into ?" - input_ids = tokenizer.question_encoder(question, return_tensors="pt")["input_ids"] - generated = model.generate(input_ids) - generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)[0] - logger.info("Q: " + question) - logger.info("A: " + generated_string) - - -@dataclass -class RagExampleArguments: - csv_path: str = field( - default=str(Path(__file__).parent / "test_data" / "my_knowledge_dataset.csv"), - metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"}, - ) - question: Optional[str] = field( - default=None, - metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."}, - ) - rag_model_name: str = field( - default="facebook/rag-sequence-nq", - metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"}, - ) - dpr_ctx_encoder_model_name: str = field( - default="facebook/dpr-ctx_encoder-multiset-base", - metadata={ - "help": ( - "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" - " 'facebook/dpr-ctx_encoder-multiset-base'" - ) - }, - ) - output_dir: Optional[str] = field( - default=None, - metadata={"help": "Path to a directory where the dataset passages and the index will be saved"}, - ) - - -@dataclass -class ProcessingArguments: - num_proc: Optional[int] = field( - default=None, - metadata={ - "help": "The number of processes to use to split the documents into passages. Default is single process." - }, - ) - batch_size: int = field( - default=16, - metadata={ - "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." - }, - ) - - -@dataclass -class IndexHnswArguments: - d: int = field( - default=768, - metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."}, - ) - m: int = field( - default=128, - metadata={ - "help": ( - "The number of bi-directional links created for every new element during the HNSW index construction." - ) - }, - ) - - -if __name__ == "__main__": - logging.basicConfig(level=logging.WARNING) - logger.setLevel(logging.INFO) - - parser = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) - rag_example_args, processing_args, index_hnsw_args = parser.parse_args_into_dataclasses() - with TemporaryDirectory() as tmp_dir: - rag_example_args.output_dir = rag_example_args.output_dir or tmp_dir - main(rag_example_args, processing_args, index_hnsw_args) diff --git a/examples/research_projects/rag/utils_rag.py b/examples/research_projects/rag/utils_rag.py deleted file mode 100644 index ec98c1d782e..00000000000 --- a/examples/research_projects/rag/utils_rag.py +++ /dev/null @@ -1,244 +0,0 @@ -import itertools -import json -import linecache -import os -import pickle -import re -import socket -import string -from collections import Counter -from logging import getLogger -from pathlib import Path -from typing import Callable, Dict, Iterable, List - -import git -import torch -from torch.utils.data import Dataset - -from transformers import BartTokenizer, RagTokenizer, T5Tokenizer - - -def encode_line(tokenizer, line, max_length, padding_side, pad_to_max_length=True, return_tensors="pt"): - extra_kw = {"add_prefix_space": True} if isinstance(tokenizer, BartTokenizer) and not line.startswith(" ") else {} - tokenizer.padding_side = padding_side - return tokenizer( - [line], - max_length=max_length, - padding="max_length" if pad_to_max_length else None, - truncation=True, - return_tensors=return_tensors, - add_special_tokens=True, - **extra_kw, - ) - - -def trim_batch( - input_ids, - pad_token_id, - attention_mask=None, -): - """Remove columns that are populated exclusively by pad_token_id""" - keep_column_mask = input_ids.ne(pad_token_id).any(dim=0) - if attention_mask is None: - return input_ids[:, keep_column_mask] - else: - return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) - - -class Seq2SeqDataset(Dataset): - def __init__( - self, - tokenizer, - data_dir, - max_source_length, - max_target_length, - type_path="train", - n_obs=None, - src_lang=None, - tgt_lang=None, - prefix="", - ): - super().__init__() - self.src_file = Path(data_dir).joinpath(type_path + ".source") - self.tgt_file = Path(data_dir).joinpath(type_path + ".target") - self.src_lens = self.get_char_lens(self.src_file) - self.max_source_length = max_source_length - self.max_target_length = max_target_length - assert min(self.src_lens) > 0, f"found empty line in {self.src_file}" - self.tokenizer = tokenizer - self.prefix = prefix - if n_obs is not None: - self.src_lens = self.src_lens[:n_obs] - self.src_lang = src_lang - self.tgt_lang = tgt_lang - - def __len__(self): - return len(self.src_lens) - - def __getitem__(self, index) -> Dict[str, torch.Tensor]: - index = index + 1 # linecache starts at 1 - source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip("\n") - tgt_line = linecache.getline(str(self.tgt_file), index).rstrip("\n") - assert source_line, f"empty source line for index {index}" - assert tgt_line, f"empty tgt line for index {index}" - - # Need to add eos token manually for T5 - if isinstance(self.tokenizer, T5Tokenizer): - source_line += self.tokenizer.eos_token - tgt_line += self.tokenizer.eos_token - - # Pad source and target to the right - source_tokenizer = ( - self.tokenizer.question_encoder if isinstance(self.tokenizer, RagTokenizer) else self.tokenizer - ) - target_tokenizer = self.tokenizer.generator if isinstance(self.tokenizer, RagTokenizer) else self.tokenizer - - source_inputs = encode_line(source_tokenizer, source_line, self.max_source_length, "right") - target_inputs = encode_line(target_tokenizer, tgt_line, self.max_target_length, "right") - - source_ids = source_inputs["input_ids"].squeeze() - target_ids = target_inputs["input_ids"].squeeze() - src_mask = source_inputs["attention_mask"].squeeze() - return { - "input_ids": source_ids, - "attention_mask": src_mask, - "decoder_input_ids": target_ids, - } - - @staticmethod - def get_char_lens(data_file): - return [len(x) for x in Path(data_file).open().readlines()] - - def collate_fn(self, batch) -> Dict[str, torch.Tensor]: - input_ids = torch.stack([x["input_ids"] for x in batch]) - masks = torch.stack([x["attention_mask"] for x in batch]) - target_ids = torch.stack([x["decoder_input_ids"] for x in batch]) - tgt_pad_token_id = ( - self.tokenizer.generator.pad_token_id - if isinstance(self.tokenizer, RagTokenizer) - else self.tokenizer.pad_token_id - ) - src_pad_token_id = ( - self.tokenizer.question_encoder.pad_token_id - if isinstance(self.tokenizer, RagTokenizer) - else self.tokenizer.pad_token_id - ) - y = trim_batch(target_ids, tgt_pad_token_id) - source_ids, source_mask = trim_batch(input_ids, src_pad_token_id, attention_mask=masks) - batch = { - "input_ids": source_ids, - "attention_mask": source_mask, - "decoder_input_ids": y, - } - return batch - - -logger = getLogger(__name__) - - -def flatten_list(summary_ids: List[List]): - return list(itertools.chain.from_iterable(summary_ids)) - - -def save_git_info(folder_path: str) -> None: - """Save git information to output_dir/git_log.json""" - repo_infos = get_git_info() - save_json(repo_infos, os.path.join(folder_path, "git_log.json")) - - -def save_json(content, path, indent=4, **json_dump_kwargs): - with open(path, "w") as f: - json.dump(content, f, indent=indent, **json_dump_kwargs) - - -def load_json(path): - with open(path) as f: - return json.load(f) - - -def get_git_info(): - repo = git.Repo(search_parent_directories=True) - repo_infos = { - "repo_id": str(repo), - "repo_sha": str(repo.head.object.hexsha), - "repo_branch": str(repo.active_branch), - "hostname": str(socket.gethostname()), - } - return repo_infos - - -def lmap(f: Callable, x: Iterable) -> List: - """list(map(f, x))""" - return list(map(f, x)) - - -def pickle_save(obj, path): - """pickle.dump(obj, path)""" - with open(path, "wb") as f: - return pickle.dump(obj, f) - - -def normalize_answer(s): - """Lower text and remove punctuation, articles and extra whitespace.""" - - def remove_articles(text): - return re.sub(r"\b(a|an|the)\b", " ", text) - - def white_space_fix(text): - return " ".join(text.split()) - - def remove_punc(text): - exclude = set(string.punctuation) - return "".join(ch for ch in text if ch not in exclude) - - def lower(text): - return text.lower() - - return white_space_fix(remove_articles(remove_punc(lower(s)))) - - -def f1_score(prediction, ground_truth): - prediction_tokens = normalize_answer(prediction).split() - ground_truth_tokens = normalize_answer(ground_truth).split() - common = Counter(prediction_tokens) & Counter(ground_truth_tokens) - num_same = sum(common.values()) - if num_same == 0: - return 0 - precision = 1.0 * num_same / len(prediction_tokens) - recall = 1.0 * num_same / len(ground_truth_tokens) - f1 = (2 * precision * recall) / (precision + recall) - return f1 - - -def exact_match_score(prediction, ground_truth): - return normalize_answer(prediction) == normalize_answer(ground_truth) - - -def calculate_exact_match(output_lns: List[str], reference_lns: List[str]) -> Dict: - assert len(output_lns) == len(reference_lns) - em = 0 - for hypo, pred in zip(output_lns, reference_lns): - em += exact_match_score(hypo, pred) - if len(output_lns) > 0: - em /= len(output_lns) - return {"em": em} - - -def is_rag_model(model_prefix): - return model_prefix.startswith("rag") - - -def set_extra_model_params(extra_params, hparams, config): - equivalent_param = {p: p for p in extra_params} - # T5 models don't have `dropout` param, they have `dropout_rate` instead - equivalent_param["dropout"] = "dropout_rate" - for p in extra_params: - if getattr(hparams, p, None): - if not hasattr(config, p) and not hasattr(config, equivalent_param[p]): - logger.info("config doesn't have a `{}` attribute".format(p)) - delattr(hparams, p) - continue - set_p = p if hasattr(config, p) else equivalent_param[p] - setattr(config, set_p, getattr(hparams, p)) - delattr(hparams, p) - return hparams, config diff --git a/examples/research_projects/robust-speech-event/README.md b/examples/research_projects/robust-speech-event/README.md deleted file mode 100644 index ca3c5cdecde..00000000000 --- a/examples/research_projects/robust-speech-event/README.md +++ /dev/null @@ -1,713 +0,0 @@ -# Robust Speech Challenge 🤗 - -Welcome to the robust speech recognition challenge 🎙️ ! - -The goal of this event is to build **robust**, **real-world** speech recognition (ASR) systems in as many languages as possible 🌏🌍🌎. -If necessary and available, free access to a V100S 32 GB GPU will kindly be provided by the [OVHcloud team](https://www.ovhcloud.com/) 🚀. -This document summarizes all the relevant information required for the speech community event 📋. - -To sign-up, please see [this forum post](https://discuss.huggingface.co/t/open-to-the-community-robust-speech-recognition-challenge/13614) 🤗. Please make sure to: -- Read it in detail -- Fill the google form -- Join our Discord server in the #join-sprint channel. - -## Table of Contents - -- [TLDR;](#tldr) -- [Important dates](#important-dates) -- [How to install pytorch, transformers, datasets](#how-to-install-relevant-libraries) -- [Data and Preprocessing](#data-and-preprocessing) -- [How to fine-tune an acoustic model](#how-to-finetune-an-acoustic-model) -- [How to fine-tune with OVH could](#how-to-finetune-with-ovh-cloud) -- [How to combine n-gram language models with acoustic model](#how-to-combine-n-gram-with-acoustic-model) -- [Evaluation](#evaluation) -- [Prizes](#prizes) -- [Communication and Problems](#communication-and-problems) -- [Talks](#talks) -- [General Tips & Tricks](#general-tips-and-tricks) - -## TLDR - -Participants are encouraged to leverage pre-trained speech recognition checkpoints, -preferably [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53), -to train a speech recognition system in a language of their choice. - -Speech recognition systems should be trained using **PyTorch**, **🤗 Transformers**, and, **🤗 Datasets**. -For more information on how to install the above libraries, please read through -[How to install pytorch, transformers, datasets](#how-to-install-relevant-libraries). - -Participants can make use of whatever data they think is useful to build a -speech recognition system for **real-world** audio data - -**except** the Common Voice `"test"` split of their chosen language. -The section [Data and preprocessing](#data-and-preprocessing) explains -in more detail what audio data can be used, how to find suitable audio data, and -how the audio data can be processed. - -For training, it is recommended to use the [official training script](https://github.com/huggingface/transformers/blob/main/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py) or a modification thereof. A step-by-step guide on how to fine-tune -an acoustic model for a speech recognition system can be found under [How to fine-tune an acoustic model](#how-to-finetune-an-acoustic-model). -If possible it is encouraged to fine-tune the acoustic models on local GPU machines, but -if those are not available, the OVH could team kindly provides a limited -number of GPUs for the event. Simply fill out [this google form](https://forms.gle/GFZkMkKLiufi75g28) to get access to a GPU. -For more information on how to train an acoustic model on one of OVH's GPU - see [How to fine-tune a speech recognition model with OVHcould](#how-to-fine-tune-with-ovh-cloud). - -The performance of speech recognition system can often significantly be improved by adding a -language model for decoding. For more information on how to add a language model, please -take a look at [How to combine n-gram language models with speech recognition models](#how-to-combine-n-gram-with-model). - -During the event, the speech recognition system will be evaluated on both the Common Voice `"test"` split -of the participants' chosen language as well as the *real-world* `"dev"` data provided by -the Hugging Face team. -At the end of the robust speech recognition challenge, the speech recognition system will also be evaluated on the -*real-world* `"test"` data provided by the Hugging Face team. Each participant should add an -`eval.py` script to her/his model repository in a specific format that lets one easily -evaluate the speech recognition system on both Common Voice's `"test"` data as well as the *real-world* audio -data. Please read through the [Evaluation](#evaluation) section to make sure your evaluation script is in the correct format. Speech recognition systems -with evaluation scripts in an incorrect format can sadly not be considered for the Challenge. - -At the end of the event, the best performing speech recognition system -will receive a prize 🏆 - more information regarding the prizes can be found under [Prizes](#prizes). - -We believe that framing the event as a competition is more fun, but at the core, the event is about -creating speech recognition systems in as many languages as possible as a community. -This can be achieved by working together, helping each other to solve bugs, share important findings, etc...🤗 - -**Note**: -Please, read through the section on [Communication & Problems](#communication-and-problems) to make sure you -know how to ask for help, etc... -All important announcements will be made on discord. Please make sure that -you've joined [this discord channel](https://discord.gg/SHr5wC7m) - -Also, please make sure that you have been added to the [Speech Event Organization](https://huggingface.co/speech-recognition-community-v2). -You should have received an invite by email. If you didn't receive an invite, please contact the organizers, *e.g.* Anton, Patrick, or Omar directly on discord. - -## Important dates - -![timeline](https://github.com/patrickvonplaten/scientific_images/raw/master/Robush%20Speech%20Challenge.png) - - -## Data and preprocessing - -In this section, we will quickly go over how to find suitable training data and -how to preprocess it. - -To begin with, **all data except Common Voice's `"test"` data can be used as training data.** -The exception includes all Common Voice versions as the test data split of later Common Voice versions often -overlaps with the one of previous versions, *e.g.* the test data of Common Voice 7 in English is -to a big part identical to the test data of Common Voice 6 in English: - -```python -load_dataset("mozilla-foundation/common_voice_7_0", "en", split="test") -``` - -includes more or less the same data as - -```python -load_dataset("mozilla-foundation/common_voice_6_1", "en", split="test") -``` - -However, we strongly encourage participants to make use of Common Voice's other splits, *e.g.* `"train"` and `"validation"`. -For most languages, the Common Voice dataset offers already a decent amount of training data. It is usually -always advantageous to collect additional data. To do so, the participants are in a first step encouraged to search the -Hugging Face Hub for additional audio data, for example by selecting the category -["speech-processing"](https://huggingface.co/datasets?task_categories=task_categories:speech-processing&sort=downloads). -All datasets that are available on the Hub can be downloaded via the 🤗 Datasets library in the same way Common Voice is downloaded. -If one wants to combine multiple datasets for training, it might make sense to take a look at -the [`interleave_datasets`](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=interleave#datasets.interleave_datasets) function. - -In addition, participants can also make use of their audio data. Here, please make sure that you **are allowed to use the audio data**. E.g., if audio data -is taken from media platforms, such as YouTube, it should be verified that the media platform and the owner of the data have given her/his approval to use the audio -data in the context of machine learning research. If you are not sure whether the data you want to use has the appropriate licensing, please contact the Hugging Face -team on discord. - -Next, let's talk about preprocessing. Audio data and transcriptions have to be brought into the correct format when -training the acoustic model (example shown in [How to fine-tune an acoustic model](#how-to-finetune-an-acoustic-model)). -It is recommended that this is done by using 🤗 Datasets `.map()` function as shown -[here](https://github.com/huggingface/transformers/blob/9a2dabae7002258e41419491c73dd43ad61b5de7/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py#L444). As can be -see we can pass some characters that will be removed from the transcriptions, *e.g.*: `--chars_to_ignore , ? . ! - \; \: \" “ % ‘ ” � \` -on the official ["Single GPU Example"](https://github.com/huggingface/transformers/tree/main/examples/pytorch/speech-recognition#single-gpu-ctc). -The participants are free to modify this preprocessing by removing more characters or even replacing characters as -it is done in the [official blog post](https://github.com/huggingface/transformers/blob/9a2dabae7002258e41419491c73dd43ad61b5de7/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py#L444). -**However**, there are some rules regarding what characters are allowed to be removed/replaced and which are not. -These rules are not this straightforward and therefore often have to be evaluated case-by-case. -It is allowed (and recommended) to normalize the data to only have lower-case characters. It is also allowed (and recommended) to remove typographical -symbols and punctuation marks. A list of such symbols can *e.g.* be found [here](https://en.wikipedia.org/wiki/List_of_typographical_symbols_and_punctuation_marks) - however here we already must be careful. We should **not** remove a symbol that would change the meaning of the words, *e.g.* in English, -we should not remove the single quotation mark `'` since it would change the meaning of the word `"it's"` to `"its"` which would then be incorrect. -So the golden rule here is to not remove any characters that could change the meaning of a word into another word. This is not always obvious and should -be given some consideration. As another example, it is fine to remove the "Hyphen-minus" sign "`-`" since it doesn't change the -meaning of a word to another one. *E.g.* "`fine-tuning`" would be changed to "`finetuning`" which has still the same meaning. - -Since those choices are not always obvious when in doubt feel free to ask on Discord or even better post your question on the forum, as was -done, *e.g.* [here](https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586). - -## How to install relevant libraries - -The following libraries are required to fine-tune a speech model with 🤗 Transformers and 🤗 Datasets in PyTorch. - -- [PyTorch](https://pytorch.org/) -- [Transformers](https://github.com/huggingface/transformers) -- [Datasets](https://github.com/huggingface/datasets) - -We recommend installing the above libraries in a [virtual environment](https://docs.python.org/3/library/venv.html). -If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). Create a virtual environment with the version of Python you're going -to use and activate it. - -You should be able to run the command: - -```bash -python3 -m venv -``` - -You can activate your venv by running - -```bash -source ~//bin/activate -``` - -To begin with please make sure you have PyTorch and CUDA correctly installed. -The following command should return ``True``: - -```bash -python -c "import torch; print(torch.cuda.is_available())" -``` - -If the above command doesn't print ``True``, in the first step, please follow the -instructions [here](https://pytorch.org/) to install PyTorch with CUDA. - -We strongly recommend making use of the provided PyTorch examples scripts in [transformers/examples/pytorch/speech-recognition](https://github.com/huggingface/transformers/tree/main/examples/pytorch/speech-recognition) to train your speech recognition -system. -In all likelihood, you will adjust one of the example scripts, so we recommend forking and cloning the 🤗 Transformers repository as follows. - -1. Fork the [repository](https://github.com/huggingface/transformers) by - clicking on the 'Fork' button on the repository's page. This creates a copy of the code - under your GitHub user account. - -2. Clone your fork to your local disk, and add the base repository as a remote: - - ```bash - $ git clone https://github.com//transformers.git - $ cd transformers - $ git remote add upstream https://github.com/huggingface/transformers.git - ``` - -3. Create a new branch to hold your development changes. This is especially useful to share code changes with your team: - - ```bash - $ git checkout -b a-descriptive-name-for-my-project - ``` - -4. Set up a PyTorch environment by running the following command your virtual environment: - - ```bash - $ pip install -e ".[torch-speech]" - ``` - - (If transformers was already installed in the virtual environment, remove - it with `pip uninstall transformers` before reinstalling it in editable - mode with the `-e` flag.) - - If you have already cloned that repo, you might need to `git pull` to get the most recent changes in the `transformers` - library. - - Running this command will automatically install `torch` and the most relevant - libraries required for fine-tuning a speech recognition system. - -Next, you should also install the 🤗 Datasets library. We strongly recommend installing the -library from source to profit from the most current additions during the community week. - -Simply run the following steps: - -```bash -$ cd ~/ -$ git clone https://github.com/huggingface/datasets.git -$ cd datasets -$ pip install -e ".[streaming]" -``` - -If you plan on contributing a specific dataset during -the community week, please fork the datasets repository and follow the instructions -[here](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-create-a-pull-request). - -To verify that all libraries are correctly installed, you can run the following command in a Python shell. -It verifies that both `transformers` and `datasets` have been correclty installed. - -```python -from transformers import AutoModelForCTC, AutoProcessor -from datasets import load_dataset - -dummy_dataset = load_dataset("common_voice", "ab", split="test") - -model = AutoModelForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2") -model.to("cuda") - -processor = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2") - -input_values = processor(dummy_dataset[0]["audio"]["array"], return_tensors="pt", sampling_rate=16_000).input_values -input_values = input_values.to("cuda") - -logits = model(input_values).logits - -assert logits.shape[-1] == 32 -``` - -## How to finetune an acoustic model - -In this section, we show you how to fine-tune a pre-trained [XLS-R Model](https://huggingface.co/docs/transformers/model_doc/xls_r) on the [Common Voice 7 dataset](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). - -We recommend fine-tuning one of the following pre-trained XLS-R checkpoints: - -- [300M parameters version](https://huggingface.co/facebook/wav2vec2-xls-r-300m) -- [1B parameters version](https://huggingface.co/facebook/wav2vec2-xls-r-1b) -- [2B parameters version](https://huggingface.co/facebook/wav2vec2-xls-r-2b) - -To begin with, please note that to use the Common Voice dataset, you -have to accept that **your email address** and **username** are shared with the -mozilla-foundation. To get access to the dataset please click on "*Access repository*" [here](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). - -Next, we recommended that you get familiar with the XLS-R model and its capabilities. -In collaboration with [Fairseq's Wav2Vec2 team](https://github.com/pytorch/fairseq/tree/main/examples/wav2vec), -we've written ["Fine-tuning XLS-R for Multi-Lingual ASR with 🤗 Transformers"](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2) which gives an in-detail explanation of how XLS-R functions and how it can be fine-tuned. - -The blog can also be opened and directly fine-tuned in a google colab notebook. -In this section, we will explain how to fine-tune the model on a local machine. - -1. **Log in** - -To begin with, you should check that you are correctly logged in and that you have `git-lfs` installed so that your fine-tuned model can automatically be uploaded. - -Run: - -```bash -huggingface-cli login -``` - -to login. It is recommended to login with your access token that can be found under your hugging face profile (icon in the top right corner on [hf.co](http://hf.co/), then Settings -> Access Tokens -> User Access Tokens -> New Token (if haven't generated one already) - -You can then copy-paste this token to log in locally. - -2. **Create your model repository** - -First, let's make sure that `git-lfs` is correctly installed. To so, simply run: - -```bash -git-lfs -v -``` - -The output should show something like `git-lfs/2.13.2 (GitHub; linux amd64; go 1.15.4)`. If your console states that the `git-lfs` command was not found, please make -sure to install it [here](https://git-lfs.github.com/) or simply via: - -```bash -sudo apt-get install git-lfs -``` - -Now you can create your model repository which will contain all relevant files to -reproduce your training. You can either directly create the model repository on the -Hub (Settings -> New Model) or via the CLI. Here we choose to use the CLI instead. - -Assuming that we want to call our model repository *xls-r-ab-test*, we can run the -following command: - -```bash -huggingface-cli repo create xls-r-ab-test -``` - -You can now see the model on the Hub, *e.g.* under https://huggingface.co/hf-test/xls-r-ab-test . - -Let's clone the repository so that we can define our training script inside. - -```bash -git lfs install -git clone https://huggingface.co/hf-test/xls-r-ab-test -``` - -3. **Add your training script and `run`-command to the repository** - -We encourage participants to add all relevant files for training directly to the -directory so that everything is fully reproducible. - -Let's first copy-paste the official training script from our clone -of `transformers` to our just created directory: - -```bash -cp ~/transformers/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py ./ -``` - -Next, we'll create a bash file to define the hyper-parameters and configurations -for training. More detailed information on different settings (single-GPU vs. multi-GPU) can be found [here](https://github.com/huggingface/transformers/tree/main/examples/pytorch/speech-recognition#connectionist-temporal-classification). - -For demonstration purposes, we will use a dummy XLS-R model `model_name_or_path="hf-test/xls-r-dummy"` on the very low-resource language of "Abkhaz" of [Common Voice 7](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0): `dataset_config_name="ab"` for just a single epoch. - -Before starting to train, let's make sure we have installed all the required libraries. You might want to run: - -```bash -pip install -r ~/transformers/examples/pytorch/speech-recognition/requirements.txt -``` - -Alright, finally we can define the training script. We'll simply use some -dummy hyper-parameters and configurations for demonstration purposes. - -Note that we add the flag `--use_auth_token` so that datasets requiring access, -such as [Common Voice 7](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0) can be downloaded. In addition, we add the `--push_to_hub` flag to make use of the -[Trainers `push_to-hub` functionality](https://huggingface.co/docs/transformers/main/en/main_classes/trainer#transformers.Trainer.push_to_hub) so that your model will be automatically uploaded to the Hub. - -Let's copy the following code snippet in a file called `run.sh` - -```bash -echo '''python run_speech_recognition_ctc.py \ - --dataset_name="mozilla-foundation/common_voice_7_0" \ - --model_name_or_path="hf-test/xls-r-dummy" \ - --dataset_config_name="ab" \ - --output_dir="./" \ - --overwrite_output_dir \ - --max_steps="10" \ - --per_device_train_batch_size="2" \ - --learning_rate="3e-4" \ - --save_total_limit="1" \ - --eval_strategy="steps" \ - --text_column_name="sentence" \ - --length_column_name="input_length" \ - --save_steps="5" \ - --layerdrop="0.0" \ - --freeze_feature_encoder \ - --gradient_checkpointing \ - --fp16 \ - --group_by_length \ - --push_to_hub \ - --use_auth_token \ - --do_train --do_eval''' > run.sh -``` - -4. **Start training** - -Now all that is left to do is to start training the model by executing the -run file. - -```bash -bash run.sh -``` - -The training should not take more than a couple of minutes. -During the training intermediate saved checkpoints are automatically uploaded to -your model repository as can be seen [on this commit](https://huggingface.co/hf-test/xls-r-ab-test/commit/0eb19a0fca4d7d163997b59663d98cd856022aa6) . - -At the end of the training, the [Trainer](https://huggingface.co/docs/transformers/main/en/main_classes/trainer) automatically creates a nice model card and all -relevant files are uploaded. - -5. **Tips for real model training** - -The above steps illustrate how a model can technically be fine-tuned. -However as you can see on the model card [hf-test/xls-r-ab-test](https://huggingface.co/hf-test/xls-r-ab-test), our demonstration has a very poor performance which is -not surprising given that we trained for just 10 steps on a randomly initialized -model. - -For real model training, it is recommended to use one of the actual pre-trained XLS-R models: - -- [300M parameters version](https://huggingface.co/facebook/wav2vec2-xls-r-300m) -- [1B parameters version](https://huggingface.co/facebook/wav2vec2-xls-r-1b) -- [2B parameters version](https://huggingface.co/facebook/wav2vec2-xls-r-2b) - -Also, the hyper-parameters should be carefully chosen depending on the dataset. -As an example, we will fine-tune the 300M parameters model on Swedish on a single -TITAN RTX 24GB GPU. - -The model will be called `"xls-r-300m-sv"`. -Following the above steps we first create the model: - -```bash -huggingface-cli repo create xls-r-300m-sv -``` - -, clone it locally (assuming the `` is `hf-test`) - -```bash -git clone hf-test/xls-r-300m-sv -``` - -, and, define the following hyperparameters for training - -```bash -echo '''python run_speech_recognition_ctc.py \ - --dataset_name="mozilla-foundation/common_voice_7_0" \ - --model_name_or_path="facebook/wav2vec2-xls-r-300m" \ - --dataset_config_name="sv-SE" \ - --output_dir="./" \ - --overwrite_output_dir \ - --num_train_epochs="50" \ - --per_device_train_batch_size="8" \ - --per_device_eval_batch_size="8" \ - --gradient_accumulation_steps="4" \ - --learning_rate="7.5e-5" \ - --warmup_steps="2000" \ - --length_column_name="input_length" \ - --eval_strategy="steps" \ - --text_column_name="sentence" \ - --chars_to_ignore , ? . ! \- \; \: \" “ % ‘ ” � — ’ … – \ - --save_steps="500" \ - --eval_steps="500" \ - --logging_steps="100" \ - --layerdrop="0.0" \ - --activation_dropout="0.1" \ - --save_total_limit="3" \ - --freeze_feature_encoder \ - --feat_proj_dropout="0.0" \ - --mask_time_prob="0.75" \ - --mask_time_length="10" \ - --mask_feature_prob="0.25" \ - --mask_feature_length="64" \ - --gradient_checkpointing \ - --use_auth_token \ - --fp16 \ - --group_by_length \ - --do_train --do_eval \ - --push_to_hub''' > run.sh -``` - -The training takes *ca.* 7 hours and yields a reasonable test word -error rate of 27% as can be seen on the automatically generated [model card](https://huggingface.co/hf-test/xls-r-300m-sv). - -The above-chosen hyperparameters probably work quite well on a range of different -datasets and languages but are by no means optimal. It is up to you to find a good set of -hyperparameters. - - -## How to finetune with OVH cloud - -[![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://youtu.be/XkMnYocAEO0) For a more detailed guide on setting up OVHcloud please watch this video: https://youtu.be/XkMnYocAEO0 - -### Creating an OVHCloud account -*TIP*: If you haven't created a project on OVHcloud yet, make sure you've received your GPU voucher code *beforehand*, -so that you can skip entering the credit card information. -1. If you're a US citizen, create an account via [OVHcloud.CA](https://ovhcloud.ca/). -If you're from anywhere else in the world, create an account via [OVHcloud.COM](https://ovhcloud.com/). -2. Once logged in, click `Public Cloud` from the top menu and then click `Create your first OVH Public Cloud project`. -Then enter a project name (e.g. "huggingface"), enter your voucher code, and click `Continue` -> `Create my project`. -*Note: if you see a request for credit card details during the last step, and you can't skip it, then your voucher code -is invalid. Please report it to the [#ovh-support](https://discord.gg/p4qqDV3M) channel on Discord.* - -### Setting up an AI notebook -1. Go to the `Public Cloud` page and select `Project Management` -> `Users & Roles` from the menu on the left. -2. Click `+ Add user`. Write a user description (e.g. `AI Trainer`), and select an `AI Training Operator` user role. -Click `Confirm`. -3. Write down the *username* and *password* (at the top of the screen) somewhere. They will be needed during step 7. -4. Select `AI & Machine Learning` -> `AI Training` from the menu on the left. -Click `+ Launch a new job` on the AI Training page. -5. On the `Launch a new job` page: - * In `1. Choose a region` select a region closest to you. - * In `2. Enter the Docker image` select `Custom image` -> `baaastijn/ovh_huggingface`. - * You can skip steps `3.` and `4.` if you will be using the Hugging Face Hub to store the models after training. - * In `5. Configure your job` select **1** `GPU`. - * Validate the info and Create the job. -6. On the `AI Training Jobs` screen wait until the job's status changes from `Pending` to `Running`. -7. Click `HTTP Access` from the Job's details page and log in with the AI training user you've created earlier. -Once logged in, you can close the page and click `HTTP Access` to launch a JupyterLab notebook. -8. Awesome, now you have a free GPU-enabled Jupyter instance! - -**Note**: If you're an experienced Docker user, feel free to create a custom docker image with all of the needed packages -like the one in step 5. The Dockerfile for it is available here: -[baaastijn/Dockerimages](https://github.com/baaastijn/Dockerimages/tree/main/Hugginface_challenge_speech). -Once you've built your image, push it to https://hub.docker.com/ and select it during the OVHcloud job creation. - -For more quick tutorials about OVHcloud AI products, check out the showcase https://vimeo.com/showcase/8903300 - -## How to combine n-gram with acoustic model - -Having trained a speech recognition model with CTC as shown in the section above, -one can further improve the model's performance by adding an **n-gram language model** -to the decoding process of the model. By doing so, we are replacing the naive greedy decoding -with **n-gram-boosted** beam search decoding. - -N-gram language models can be built on CPU in just a few minutes. *N-gram-boosted* beam search decoding noticeably slows down the -inference time, but also yields significant word error rates improvements - usually between 10-40 %. - -You can find an in-detail blog post on how to build an *n-gram* [here](https://huggingface.co/blog/wav2vec2-with-ngram). -The blog post can be opened in a google colab and by adapting three lines of the example for your use case, one can directly -create an *n-gram* in the google colab. -The blog post gives in-detail instructions on how to build an n-gram and how to add it to your trained speech recognition model. - -- why one should add an *n-gram* to her/his speech recognition system, -- how to build an *n-gram*, and, -- how to add the built *n-gram* the speech recognition system for seamless decoding - -Our previously trained model - [xls-r-300m-sv](https://huggingface.co/hf-test/xls-r-300m-sv) - enjoys a 30% word error rate reduction after -having added an n-gram. As shown in the example of the blog post, we strongly advise participants to upload all files required for combining -the *n-gram* with a trained speech recognition model directly into the same model repository. - -## Evaluation - -Finally, we have arrived at the most fun part of the challenge - sitting back and -watching the model transcribe audio. If possible, every participant should evaluate -the speech recognition system on the test set of Common Voice 7 and -ideally also on the real-world audio data (if available). -For languages that have neither a Common Voice evaluation dataset nor a real world -evaluation dataset, please contact the organizers on Discord so that we can work -together to find some evaluation data. - -As a first step, one should copy the official `eval.py` script to her/his model -repository. Let's use our previously trained [xls-r-300m-sv](https://huggingface.co/hf-test/xls-r-300m-sv) again as an example. - -Assuming that we have a clone of the model's repo under `~/xls-r-300m-sv`, we can -copy the `eval.py` script to the repo. - -```bash -cp ~/transformers/examples/research_projects/robust-speech-event/eval.py ~/xls-r-300m-sv -``` - -Next, we should adapt `eval.py` so that it fits our evaluation data. Here it is -important to keep the `eval.py` file in the following format: - -- 1. The following input arguments should not be changed and keep their original functionality/meaning (being to load the model and dataset): `"--model_id"`, `"--dataset"`, `"--config"`, `"--split"`. We recommend to not change any of the code written under `if __name__ == "__main__":`. -- 2. The function `def log_results(result: Dataset, args: Dict[str, str])` should also not be changed. The function expects the above names attached to the `args` object as well as a `datasets.Dataset` object, called `result` which includes all predictions and target transcriptions under the names `"predictions"` and `"targets"` respectively. -- 3. All other code can be changed and adapted. Participants are especially invited to change the `def normalize_text(text: str) -> str:` function as this might be a very language and model-training specific function. -- 4. **Important**: It is not allowed to "cheat" in any way when in comes to pre-and postprocessing. In short, "cheating" refers to any of the following: - - a. Somehow giving the model access to the target transcriptions to improve performance. The model is not allowed to use the target transcriptions to generate its predictions. - - b. Pre-processing the target transcriptions in a way that makes the target transcriptions lose their original meaning. This corresponds to what has already been said in [Data and Preprocessing](#data-and-preprocessing) and is somewhat of a grey zone. It means that one should not remove characters that would make a word to lose its meaning. E.g., it is not allowed to replace all `e` in English with `i` and simply make the model learn that `e` and `i` are the same letter for a better word error rate. This would destroy the meaning of words such as `fell -> fill`. However, it is totally fine to normalize (*e.g.* lowercase) all letters, remove punctuation. There can be a lot of language-specific exceptions and in case you are not sure whether your target transcription pre-processing is allowed, please ask on the Discord channel. - -Uff, that was a lot of text describing how to make sure your `eval.py` script -is in the correct format. If you have any questions, please ask openly in Discord. - -Great, now that we have adapted the `eval.py` script, we can lean back and run the -evaluation. -First, one should evaluate the model on Common Voice 7's test data. This might -already have been done for your acoustic model during training but in case you -added an *n-gram* language model after having fine-tuned the acoustic model, you -should now see a nice improvement. - -The command to evaluate our test model [xls-r-300m-sv](https://huggingface.co/hf-test/xls-r-300m-sv) on Common Voice 7's test data is the following: - -```bash -cd xls-r-300m-sv -./eval.py --model_id ./ --dataset mozilla-foundation/common_voice_7_0 --config sv-SE --split test --log_outputs -``` - -To log each of the model's predictions with the target transcriptions, you can just -add the `--log_outputs` flag. - -Running this command should automatically create the file: -`mozilla-foundation_common_voice_7_0_sv-SE_test_eval_results.txt` that contains -both the word- and character error rate. - -In a few days, we will give everybody access to some real-world audio data for as many languages as possible. -If your language has real-world audio data, it will most likely have audio input -of multiple minutes. 🤗Transformer's [ASR pipeline](https://huggingface.co/docs/transformers/main/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline) supports audio chunking out-of-the-box. You only need to specify -how song each audio chunk should be (`chunk_length_s`) and how much audio stride -(`stride_length_s`) each chunk should use. -For more information on the chunking works, please have a look at [this nice blog post](TODO: ). - -In the case of `xls-r-300m-sv`, the following command can be run: - -```bash -cd xls-r-300m-sv -./eval.py --model_id hf-test/xls-r-300m-sv --dataset --config sv --split validation --chunk_length_s 5.0 --stride_length_s 1.0 --log_outputs -``` - -Great, now you should have successfully evaluated your model. Finally, there is one -**important** thing you should do so that your model is taken into account -for the final evaluation. You should add two tags to your model, one being `robust-speech-event`, one being the ISO code of your chosen language, *e.g.* `"sv"` for the -exemplary model we used above. You can find a list of all available languages and -their ISO code [here](https://huggingface.co/languages). - -To add the tags, simply edit the README.md of your model repository and add - -``` -- "sv" -- "robust-speech-event" -``` - -under `tags:` as done [here](https://huggingface.co/hf-test/xls-r-300m-sv/commit/a495fd70c96bb7d019729be9273a265c2557345e). - -To verify that you've added the tags correctly make sure that your model -appears when clicking on [this link](https://huggingface.co/models?other=robust-speech-event). - -Great that's it! This should give you all the necessary information to evaluate -your model. For the final evaluation, we will verify each evaluation result to -determine the final score and thereby the winning models for each language. - -The final score is calculated as follows: - -```bash -FINAL_SCORE = 1/3 * WER_Common_Voice_7_test + 1/3 * WER_REAL_AUDIO_DEV + 1/3 * WER_REAL_AUDIO_TEST -``` - -The dataset `WER_REAL_AUDIO_TEST` is hidden and will only be published -at the end of the robust speech challenge. - -If there is no real audio data for your language the final score will be -computed solely based on the Common Voice 7 test dataset. If there is also -no Common Voice 7 test dataset for your language, we will see together how to -score your model - if this is the case, please don't be discouraged. We are -especially excited about speech recognition systems of such low-resource -languages and will make sure that we'll decide on a good approach to evaluating -your model. - -## Prizes - -TODO(Patrick, Omar, ...) - -## Communication and Problems - -If you encounter any problems or have any questions, you should use one of the following platforms -depending on your type of problem. Hugging Face is an "open-source-first" organization meaning -that we'll try to solve all problems in the most public and most transparent way possible so that everybody -in the community profits. - -The following table summarizes what platform to use for which problem. - -- Problem/question/bug with the 🤗 Datasets library that you think is a general problem that also impacts other people, please open an [Issues on Datasets](https://github.com/huggingface/datasets/issues/new?assignees=&labels=bug&template=bug-report.md&title=) and ping @anton-l and @patrickvonplaten. -- Problem/question/bug with the 🤗 Transformers library that you think is a general problem that also impacts other people, please open an [Issues on Transformers](https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title=) and ping @anton-l and @patrickvonplaten. -- Problem/question with a modified, customized training script that is less likely to impact other people, please post your problem/question [on the forum](https://discuss.huggingface.co/) and ping @anton-l and @patrickvonplaten. -- Questions regarding access to the OVHcloud GPU, please ask in the Discord channel **#ovh-support**. -- Other questions regarding the event, rules of the event, or if you are not sure where to post your question, please ask in the Discord channel **#sprint-discussions**. - -## Talks - -We are very excited to be hosting 2 days of talks from Kensho-Technologies, Mozilla's Common Voice, Meta AI Research and Hugging Face. - -### Thursday, January 20th - - Speaker | Topic | Time | Video | -|-------------|---------------------------------|------------------------|------------------------| -| Patrick von Platen, Hugging Face | Introduction to Robust Speech Challenge | 4h30pm - 5h00pm UTC | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=X9e5Tto-Iuk) -| Raymond Grossman and Jeremy Lopez, Kensho-Technologies | Pyctcdecode & Speech2text decoding | 5h30pm - 6h00pm UTC | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=mp7fHMTnK9A) - -### Friday, January 21th - - Speaker | Topic | Time | Video | -|-------------|---------------------------------|------------------------|------------------------| -| Gabriel Habayeb, Mozilla Common Voice | Unlocking global speech with Mozilla Common Voice | 4h30pm - 5h00pm UTC | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=Vvn984QmAVg) -| Changhan Wang, Meta AI Research | XLS-R: Large-Scale Cross-lingual Speech Representation Learning on 128 Languages | 5h30pm - 6h00pm UTC | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=ic_J7ZCROBM) - -### Talks & Speakers - -#### Patrick von Platen, Research Engineer, Hugging Face -- Talk: Introduction to Robust Speech Challenge -- Abstract: In this talk, Patrick outlines the Robust Speech Challenge and gives tips and tricks on how to train and evaluate speech recognition systems with 🤗 Transformers and 🤗 Datasets, and PyTorch. -- Speaker info: Patrick von Platen is a research engineer at Hugging Face and one of the core maintainers of the popular Transformers library. He specializes in speech recognition, encoder-decoder models, and long-range sequence modeling. Before joining Hugging Face, Patrick researched speech recognition at Uber AI, Cambridge University, and RWTH Aachen University. - -#### Raymond Grossman, Jeremy Lopez, Machine Learning Engineer, Kensho Technologies -- Talk: PyCTCDecode & Speech2text decoding -- Abstract: PyCTCDecode is a fast and feature-rich CTC beam search decoder for speech recognition written in Python, providing n-gram (kenlm) language model support similar to PaddlePaddle's decoder, but incorporating many new features such as byte pair encoding and real-time decoding to support models like Nvidia's Conformer-CTC or Facebook's Wav2Vec2. -- Speaker info : - - Raymond works as a machine learning engineer at Kensho Technologies, specializing in speech and natural language domains. Before coming to Kensho, he studied mathematics at Princeton and was an avid Kaggler under the moniker @ToTrainThemIsMyCause. - - Jeremy is a machine learning engineer at Kensho Technologies and has worked on a variety of different topics including search and speech recognition. Before working at Kensho, he earned a PhD in experimental particle physics at MIT and continued doing physics research as a postdoc at the University of Colorado Boulder. - -#### Gabriel Habayeb, Data Engineer, Common Voice @ Mozilla -- Talk: Unlocking global speech with Mozilla Common Voice -- Abstract: Hear from Common Voice Data Engineer Gabriel Habayeb (Mozilla Foundation) as he talks about how Common Voice makes it easy to crowdsource voice data in global languages, as well as getting key insights into the dataset itself, how we maintain quality, use metadata - and our plans for the future! -- Speaker info: Gabriel is a software developer with the Common Voice team at the Mozilla Foundation with a focus on data engineering. Before joining the Foundation, he spent the last six years working across different industries, including education, enterprise and not-for-profit organizations. - -#### Changhan Wang, Main author of XLS-R and Research Engineer, Meta AI Research -- Talk: XLS-R: Large-Scale Cross-lingual Speech Representation Learning on 128 Languages -- Abstract: In this talk, Changhan will present XLS-R, a large-scale model for cross-lingual speech representation learning based on wav2vec 2.0. XLS-R has up to 2B parameters and was trained on nearly half a million hours of publicly available speech audio in 128 languages, an order of magnitude more public data than the largest known prior work. On the CoVoST-2 speech translation benchmark, XLS-R improves the previous state of the art by an average of 7.4 BLEU over 21 translation directions into English. For speech recognition, XLS-R improves over the best known prior work on BABEL, MLS, CommonVoice as well as VoxPopuli, lowering error rates by 14-34% relative on average. XLS-R also sets a new state of the art on VoxLingua107 language identification. The XLS-R team hopes to work together with the open-source community to improve speech processing tasks for many more languages of the world. - -## General Tips and Tricks - -- Memory efficient training: - -In case, you are getting out-of-memory errors on your GPU, we recommend to use -[bitsandbytes](https://github.com/TimDettmers/bitsandbytes) to replace the -native memory-intensive Adam optimizer with the one of `bitsandbytes`. You -can simply run the script `./run_speech_recognition_ctc_bnb.py` provided in this -folder that makes use of `bitsandbytes` instead of the official one. - -- Dataset streaming - -TODO(Patrick) diff --git a/examples/research_projects/robust-speech-event/eval.py b/examples/research_projects/robust-speech-event/eval.py deleted file mode 100755 index b6c89a6d49f..00000000000 --- a/examples/research_projects/robust-speech-event/eval.py +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/env python3 -import argparse -import re -from typing import Dict - -import torch -from datasets import Audio, Dataset, load_dataset, load_metric - -from transformers import AutoFeatureExtractor, pipeline - - -def log_results(result: Dataset, args: Dict[str, str]): - """DO NOT CHANGE. This function computes and logs the result metrics.""" - - log_outputs = args.log_outputs - dataset_id = "_".join(args.dataset.split("/") + [args.config, args.split]) - - # load metric - wer = load_metric("wer") - cer = load_metric("cer") - - # compute metrics - wer_result = wer.compute(references=result["target"], predictions=result["prediction"]) - cer_result = cer.compute(references=result["target"], predictions=result["prediction"]) - - # print & log results - result_str = f"WER: {wer_result}\nCER: {cer_result}" - print(result_str) - - with open(f"{dataset_id}_eval_results.txt", "w") as f: - f.write(result_str) - - # log all results in text file. Possibly interesting for analysis - if log_outputs is not None: - pred_file = f"log_{dataset_id}_predictions.txt" - target_file = f"log_{dataset_id}_targets.txt" - - with open(pred_file, "w") as p, open(target_file, "w") as t: - # mapping function to write output - def write_to_file(batch, i): - p.write(f"{i}" + "\n") - p.write(batch["prediction"] + "\n") - t.write(f"{i}" + "\n") - t.write(batch["target"] + "\n") - - result.map(write_to_file, with_indices=True) - - -def normalize_text(text: str) -> str: - """DO ADAPT FOR YOUR USE CASE. this function normalizes the target text.""" - - chars_to_ignore_regex = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training - - text = re.sub(chars_to_ignore_regex, "", text.lower()) - - # In addition, we can normalize the target text, e.g. removing new lines characters etc... - # note that order is important here! - token_sequences_to_ignore = ["\n\n", "\n", " ", " "] - - for t in token_sequences_to_ignore: - text = " ".join(text.split(t)) - - return text - - -def main(args): - # load dataset - dataset = load_dataset(args.dataset, args.config, split=args.split, token=True) - - # for testing: only process the first two examples as a test - # dataset = dataset.select(range(10)) - - # load processor - feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_id) - sampling_rate = feature_extractor.sampling_rate - - # resample audio - dataset = dataset.cast_column("audio", Audio(sampling_rate=sampling_rate)) - - # load eval pipeline - if args.device is None: - args.device = 0 if torch.cuda.is_available() else -1 - asr = pipeline("automatic-speech-recognition", model=args.model_id, device=args.device) - - # map function to decode audio - def map_to_pred(batch): - prediction = asr( - batch["audio"]["array"], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s - ) - - batch["prediction"] = prediction["text"] - batch["target"] = normalize_text(batch["sentence"]) - return batch - - # run inference on all examples - result = dataset.map(map_to_pred, remove_columns=dataset.column_names) - - # compute and log_results - # do not change function below - log_results(result, args) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers" - ) - parser.add_argument( - "--dataset", - type=str, - required=True, - help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets", - ) - parser.add_argument( - "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice" - ) - parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`") - parser.add_argument( - "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds." - ) - parser.add_argument( - "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second." - ) - parser.add_argument( - "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis." - ) - parser.add_argument( - "--device", - type=int, - default=None, - help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.", - ) - args = parser.parse_args() - - main(args) diff --git a/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_bnb.py b/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_bnb.py deleted file mode 100755 index cb489ea28d6..00000000000 --- a/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_bnb.py +++ /dev/null @@ -1,779 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2021 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and - -"""Fine-tuning a 🤗 Transformers CTC model for automatic speech recognition""" - -import functools -import json -import logging -import os -import re -import sys -import warnings -from dataclasses import dataclass, field -from typing import Dict, List, Optional, Union - -import bitsandbytes as bnb -import datasets -import numpy as np -import torch -from datasets import DatasetDict, load_dataset, load_metric - -import transformers -from transformers import ( - AutoConfig, - AutoFeatureExtractor, - AutoModelForCTC, - AutoProcessor, - AutoTokenizer, - HfArgumentParser, - Trainer, - TrainingArguments, - Wav2Vec2Processor, - set_seed, -) -from transformers.trainer_pt_utils import get_parameter_names -from transformers.trainer_utils import get_last_checkpoint, is_main_process -from transformers.utils import check_min_version -from transformers.utils.versions import require_version - - -# Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.16.0.dev0") - -require_version("datasets>=1.13.3", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") - - -logger = logging.getLogger(__name__) - - -def list_field(default=None, metadata=None): - return field(default_factory=lambda: default, metadata=metadata) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field( - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} - ) - tokenizer_name_or_path: Optional[str] = field( - default=None, - metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"}, - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, - ) - freeze_feature_encoder: bool = field( - default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."} - ) - attention_dropout: float = field( - default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."} - ) - activation_dropout: float = field( - default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."} - ) - feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."}) - hidden_dropout: float = field( - default=0.0, - metadata={ - "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler." - }, - ) - final_dropout: float = field( - default=0.0, - metadata={"help": "The dropout probability for the final projection layer."}, - ) - mask_time_prob: float = field( - default=0.05, - metadata={ - "help": ( - "Probability of each feature vector along the time axis to be chosen as the start of the vector " - "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature " - "vectors will be masked along the time axis." - ) - }, - ) - mask_time_length: int = field( - default=10, - metadata={"help": "Length of vector span to mask along the time axis."}, - ) - mask_feature_prob: float = field( - default=0.0, - metadata={ - "help": ( - "Probability of each feature vector along the feature axis to be chosen as the start of the vectorspan" - " to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature" - " bins will be masked along the time axis." - ) - }, - ) - mask_feature_length: int = field( - default=10, - metadata={"help": "Length of vector span to mask along the feature axis."}, - ) - layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."}) - ctc_loss_reduction: Optional[str] = field( - default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."} - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - - Using `HfArgumentParser` we can turn this class - into argparse arguments to be able to specify them on - the command line. - """ - - dataset_name: str = field( - metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - dataset_config_name: str = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_split_name: str = field( - default="train+validation", - metadata={ - "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" - }, - ) - eval_split_name: str = field( - default="test", - metadata={ - "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" - }, - ) - audio_column_name: str = field( - default="audio", - metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"}, - ) - text_column_name: str = field( - default="text", - metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"}, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of validation examples to this " - "value if set." - ) - }, - ) - chars_to_ignore: Optional[List[str]] = list_field( - default=None, - metadata={"help": "A list of characters to remove from the transcripts."}, - ) - eval_metrics: List[str] = list_field( - default=["wer"], - metadata={"help": "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"}, - ) - max_duration_in_seconds: float = field( - default=20.0, - metadata={ - "help": ( - "Filter audio files that are longer than `max_duration_in_seconds` seconds to" - " 'max_duration_in_seconds`" - ) - }, - ) - min_duration_in_seconds: float = field( - default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"} - ) - preprocessing_only: bool = field( - default=False, - metadata={ - "help": ( - "Whether to only do data preprocessing and skip training. This is especially useful when data" - " preprocessing errors out in distributed training due to timeout. In this case, one should run the" - " preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets" - " can consequently be loaded in distributed training" - ) - }, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "If :obj:`True`, will use the token generated when running" - ":obj:`huggingface-cli login` as HTTP bearer authorization for remote files." - ) - }, - ) - unk_token: str = field( - default="[UNK]", - metadata={"help": "The unk token for the tokenizer"}, - ) - pad_token: str = field( - default="[PAD]", - metadata={"help": "The padding token for the tokenizer"}, - ) - word_delimiter_token: str = field( - default="|", - metadata={"help": "The word delimiter token for the tokenizer"}, - ) - phoneme_language: Optional[str] = field( - default=None, - metadata={ - "help": ( - "The target language that should be used be" - " passed to the tokenizer for tokenization. Note that" - " this is only relevant if the model classifies the" - " input audio to a sequence of phoneme sequences." - ) - }, - ) - - -@dataclass -class DataCollatorCTCWithPadding: - """ - Data collator that will dynamically pad the inputs received. - Args: - processor (:class:`~transformers.AutoProcessor`) - The processor used for processing the data. - padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): - Select a strategy to pad the returned sequences (according to the model's padding side and padding index) - among: - * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single - sequence if provided). - * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the - maximum acceptable input length for the model if that argument is not provided. - * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of - different lengths). - max_length (:obj:`int`, `optional`): - Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). - max_length_labels (:obj:`int`, `optional`): - Maximum length of the ``labels`` returned list and optionally padding length (see above). - pad_to_multiple_of (:obj:`int`, `optional`): - If set will pad the sequence to a multiple of the provided value. - This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= - 7.5 (Volta). - """ - - processor: AutoProcessor - padding: Union[bool, str] = "longest" - pad_to_multiple_of: Optional[int] = None - pad_to_multiple_of_labels: Optional[int] = None - - def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: - # split inputs and labels since they have to be of different lengths and need - # different padding methods - input_features = [{"input_values": feature["input_values"]} for feature in features] - label_features = [{"input_ids": feature["labels"]} for feature in features] - - batch = self.processor.pad( - input_features, - padding=self.padding, - pad_to_multiple_of=self.pad_to_multiple_of, - return_tensors="pt", - ) - - labels_batch = self.processor.pad( - labels=label_features, - padding=self.padding, - pad_to_multiple_of=self.pad_to_multiple_of_labels, - return_tensors="pt", - ) - - # replace padding with -100 to ignore loss correctly - labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) - - batch["labels"] = labels - - return batch - - -def create_vocabulary_from_data( - datasets: DatasetDict, - word_delimiter_token: Optional[str] = None, - unk_token: Optional[str] = None, - pad_token: Optional[str] = None, -): - # Given training and test labels create vocabulary - def extract_all_chars(batch): - all_text = " ".join(batch["target_text"]) - vocab = list(set(all_text)) - return {"vocab": [vocab], "all_text": [all_text]} - - vocabs = datasets.map( - extract_all_chars, - batched=True, - batch_size=-1, - keep_in_memory=True, - remove_columns=datasets["train"].column_names, - ) - - # take union of all unique characters in each dataset - vocab_set = functools.reduce( - lambda vocab_1, vocab_2: set(vocab_1["vocab"][0]) | set(vocab_2["vocab"][0]), vocabs.values() - ) - - vocab_dict = {v: k for k, v in enumerate(sorted(vocab_set))} - - # replace white space with delimiter token - if word_delimiter_token is not None: - vocab_dict[word_delimiter_token] = vocab_dict[" "] - del vocab_dict[" "] - - # add unk and pad token - if unk_token is not None: - vocab_dict[unk_token] = len(vocab_dict) - - if pad_token is not None: - vocab_dict[pad_token] = len(vocab_dict) - - return vocab_dict - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) - - # Log on each process the small summary: - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " - f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - # Set the verbosity to info of the Transformers logger (on main process only): - if is_main_process(training_args.local_rank): - transformers.utils.logging.set_verbosity_info() - logger.info("Training/evaluation parameters %s", training_args) - - # Set seed before initializing model. - set_seed(training_args.seed) - - # 1. First, let's load the dataset - raw_datasets = DatasetDict() - - if training_args.do_train: - raw_datasets["train"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=data_args.train_split_name, - token=data_args.use_auth_token, - ) - - if data_args.audio_column_name not in raw_datasets["train"].column_names: - raise ValueError( - f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'." - " Make sure to set `--audio_column_name` to the correct audio column - one of" - f" {', '.join(raw_datasets['train'].column_names)}." - ) - - if data_args.text_column_name not in raw_datasets["train"].column_names: - raise ValueError( - f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. " - "Make sure to set `--text_column_name` to the correct text column - one of " - f"{', '.join(raw_datasets['train'].column_names)}." - ) - - if data_args.max_train_samples is not None: - raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples)) - - if training_args.do_eval: - raw_datasets["eval"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=data_args.eval_split_name, - token=data_args.use_auth_token, - ) - - if data_args.max_eval_samples is not None: - raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples)) - - # 2. We remove some special characters from the datasets - # that make training complicated and do not help in transcribing the speech - # E.g. characters, such as `,` and `.` do not really have an acoustic characteristic - # that could be easily picked up by the model - chars_to_ignore_regex = ( - f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None - ) - text_column_name = data_args.text_column_name - - def remove_special_characters(batch): - if chars_to_ignore_regex is not None: - batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[text_column_name]).lower() + " " - else: - batch["target_text"] = batch[text_column_name].lower() + " " - return batch - - with training_args.main_process_first(desc="dataset map special characters removal"): - raw_datasets = raw_datasets.map( - remove_special_characters, - remove_columns=[text_column_name], - desc="remove special characters from datasets", - ) - - # save special tokens for tokenizer - word_delimiter_token = data_args.word_delimiter_token - unk_token = data_args.unk_token - pad_token = data_args.pad_token - - # 3. Next, let's load the config as we might need it to create - # the tokenizer - # load config - config = AutoConfig.from_pretrained( - model_args.model_name_or_path, cache_dir=model_args.cache_dir, token=data_args.use_auth_token - ) - - # 4. Next, if no tokenizer file is defined, - # we create the vocabulary of the model by extracting all unique characters from - # the training and evaluation datasets - # We need to make sure that only first rank saves vocabulary - # make sure all processes wait until vocab is created - tokenizer_name_or_path = model_args.tokenizer_name_or_path - tokenizer_kwargs = {} - if tokenizer_name_or_path is None: - # save vocab in training output dir - tokenizer_name_or_path = training_args.output_dir - - vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json") - - with training_args.main_process_first(): - if training_args.overwrite_output_dir and os.path.isfile(vocab_file): - os.remove(vocab_file) - - with training_args.main_process_first(desc="dataset map vocabulary creation"): - if not os.path.isfile(vocab_file): - os.makedirs(tokenizer_name_or_path, exist_ok=True) - vocab_dict = create_vocabulary_from_data( - raw_datasets, - word_delimiter_token=word_delimiter_token, - unk_token=unk_token, - pad_token=pad_token, - ) - - # save vocab dict to be loaded into tokenizer - with open(vocab_file, "w") as file: - json.dump(vocab_dict, file) - - # if tokenizer has just been created - # it is defined by `tokenizer_class` if present in config else by `model_type` - tokenizer_kwargs = { - "config": config if config.tokenizer_class is not None else None, - "tokenizer_type": config.model_type if config.tokenizer_class is None else None, - "unk_token": unk_token, - "pad_token": pad_token, - "word_delimiter_token": word_delimiter_token, - } - - # 5. Now we can instantiate the feature extractor, tokenizer and model - # Note for distributed training, the .from_pretrained methods guarantee that only - # one local process can concurrently download model & vocab. - - # load feature_extractor and tokenizer - tokenizer = AutoTokenizer.from_pretrained( - tokenizer_name_or_path, - token=data_args.use_auth_token, - **tokenizer_kwargs, - ) - feature_extractor = AutoFeatureExtractor.from_pretrained( - model_args.model_name_or_path, cache_dir=model_args.cache_dir, token=data_args.use_auth_token - ) - - # adapt config - config.update( - { - "feat_proj_dropout": model_args.feat_proj_dropout, - "attention_dropout": model_args.attention_dropout, - "hidden_dropout": model_args.hidden_dropout, - "final_dropout": model_args.final_dropout, - "mask_time_prob": model_args.mask_time_prob, - "mask_time_length": model_args.mask_time_length, - "mask_feature_prob": model_args.mask_feature_prob, - "mask_feature_length": model_args.mask_feature_length, - "gradient_checkpointing": training_args.gradient_checkpointing, - "layerdrop": model_args.layerdrop, - "ctc_loss_reduction": model_args.ctc_loss_reduction, - "pad_token_id": tokenizer.pad_token_id, - "vocab_size": len(tokenizer), - "activation_dropout": model_args.activation_dropout, - } - ) - - # create model - model = AutoModelForCTC.from_pretrained( - model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - config=config, - token=data_args.use_auth_token, - ) - - # freeze encoder - if model_args.freeze_feature_encoder: - model.freeze_feature_encoder() - - # 6. Now we preprocess the datasets including loading the audio, resampling and normalization - # Thankfully, `datasets` takes care of automatically loading and resampling the audio, - # so that we just need to set the correct target sampling rate and normalize the input - # via the `feature_extractor` - - # make sure that dataset decodes audio with correct sampling rate - dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate - if dataset_sampling_rate != feature_extractor.sampling_rate: - raw_datasets = raw_datasets.cast_column( - data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate) - ) - - # derive max & min input length for sample rate & max duration - max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate - min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate - audio_column_name = data_args.audio_column_name - num_workers = data_args.preprocessing_num_workers - - # `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification - phoneme_language = data_args.phoneme_language - - # Preprocessing the datasets. - # We need to read the audio files as arrays and tokenize the targets. - def prepare_dataset(batch): - # load audio - sample = batch[audio_column_name] - - inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"]) - batch["input_values"] = inputs.input_values[0] - batch["input_length"] = len(batch["input_values"]) - - # encode targets - additional_kwargs = {} - if phoneme_language is not None: - additional_kwargs["phonemizer_lang"] = phoneme_language - - batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids - return batch - - with training_args.main_process_first(desc="dataset map preprocessing"): - vectorized_datasets = raw_datasets.map( - prepare_dataset, - remove_columns=next(iter(raw_datasets.values())).column_names, - num_proc=num_workers, - desc="preprocess datasets", - ) - - def is_audio_in_length_range(length): - return length > min_input_length and length < max_input_length - - # filter data that is shorter than min_input_length - vectorized_datasets = vectorized_datasets.filter( - is_audio_in_length_range, - num_proc=num_workers, - input_columns=["input_length"], - ) - - # 7. Next, we can prepare the training. - # Let's use word error rate (WER) as our evaluation metric, - # instantiate a data collator and the trainer - - # Define evaluation metrics during training, *i.e.* word error rate, character error rate - eval_metrics = {metric: load_metric(metric) for metric in data_args.eval_metrics} - - # for large datasets it is advised to run the preprocessing on a - # single machine first with ``args.preprocessing_only`` since there will mostly likely - # be a timeout when running the script in distributed mode. - # In a second step ``args.preprocessing_only`` can then be set to `False` to load the - # cached dataset - if data_args.preprocessing_only: - logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}") - return - - def compute_metrics(pred): - pred_logits = pred.predictions - pred_ids = np.argmax(pred_logits, axis=-1) - - pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id - - pred_str = tokenizer.batch_decode(pred_ids) - # we do not want to group tokens when computing the metrics - label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False) - - metrics = {k: v.compute(predictions=pred_str, references=label_str) for k, v in eval_metrics.items()} - - return metrics - - # Now save everything to be able to create a single processor later - if is_main_process(training_args.local_rank): - # save feature extractor, tokenizer and config - feature_extractor.save_pretrained(training_args.output_dir) - tokenizer.save_pretrained(training_args.output_dir) - config.save_pretrained(training_args.output_dir) - - try: - processor = AutoProcessor.from_pretrained(training_args.output_dir) - except (OSError, KeyError): - warnings.warn( - "Loading a processor from a feature extractor config that does not" - " include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following " - " attribute to your `preprocessor_config.json` file to suppress this warning: " - " `'processor_class': 'Wav2Vec2Processor'`", - FutureWarning, - ) - processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir) - - # Instantiate custom data collator - data_collator = DataCollatorCTCWithPadding(processor=processor) - - decay_parameters = get_parameter_names(model, [torch.nn.LayerNorm], ["bias", "layernorm", "rmsnorm"]) - optimizer_grouped_parameters = [ - { - "params": [p for n, p in model.named_parameters() if n in decay_parameters], - "weight_decay": training_args.weight_decay, - }, - { - "params": [p for n, p in model.named_parameters() if n not in decay_parameters], - "weight_decay": 0.0, - }, - ] - optimizer = bnb.optim.Adam8bit( - params=optimizer_grouped_parameters, - lr=training_args.learning_rate, - betas=(training_args.adam_beta1, training_args.adam_beta2), - eps=training_args.adam_epsilon, - ) - - optimizers = (optimizer, None) - - # Initialize Trainer - trainer = Trainer( - model=model, - data_collator=data_collator, - args=training_args, - compute_metrics=compute_metrics, - train_dataset=vectorized_datasets["train"] if training_args.do_train else None, - eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None, - tokenizer=feature_extractor, - optimizers=optimizers, - ) - - # 8. Finally, we can start training - - # Training - if training_args.do_train: - # use last checkpoint if exist - if last_checkpoint is not None: - checkpoint = last_checkpoint - elif os.path.isdir(model_args.model_name_or_path): - checkpoint = model_args.model_name_or_path - else: - checkpoint = None - - train_result = trainer.train(resume_from_checkpoint=checkpoint) - trainer.save_model() - - metrics = train_result.metrics - max_train_samples = ( - data_args.max_train_samples - if data_args.max_train_samples is not None - else len(vectorized_datasets["train"]) - ) - metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"])) - - trainer.log_metrics("train", metrics) - trainer.save_metrics("train", metrics) - trainer.save_state() - - # Evaluation - results = {} - if training_args.do_eval: - logger.info("*** Evaluate ***") - metrics = trainer.evaluate() - max_eval_samples = ( - data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"]) - ) - metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"])) - - trainer.log_metrics("eval", metrics) - trainer.save_metrics("eval", metrics) - - # Write model card and (optionally) push to hub - config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na" - kwargs = { - "finetuned_from": model_args.model_name_or_path, - "tasks": "automatic-speech-recognition", - "tags": ["automatic-speech-recognition", data_args.dataset_name], - "dataset_args": ( - f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split:" - f" {data_args.eval_split_name}" - ), - "dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}", - } - if "common_voice" in data_args.dataset_name: - kwargs["language"] = config_name - - if training_args.push_to_hub: - trainer.push_to_hub(**kwargs) - else: - trainer.create_model_card(**kwargs) - - return results - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_streaming.py b/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_streaming.py deleted file mode 100644 index 37f91b9ef61..00000000000 --- a/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_streaming.py +++ /dev/null @@ -1,679 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2022 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and - -"""Fine-tuning a 🤗 Transformers CTC model for automatic speech recognition in streaming mode""" - -import logging -import os -import re -import sys -import warnings -from dataclasses import dataclass, field -from typing import Dict, List, Optional, Union - -import datasets -import numpy as np -import torch -from datasets import IterableDatasetDict, interleave_datasets, load_dataset, load_metric -from torch.utils.data import IterableDataset - -import transformers -from transformers import ( - AutoConfig, - AutoFeatureExtractor, - AutoModelForCTC, - AutoProcessor, - AutoTokenizer, - HfArgumentParser, - Trainer, - TrainerCallback, - TrainingArguments, - Wav2Vec2Processor, - set_seed, -) -from transformers.trainer_pt_utils import IterableDatasetShard -from transformers.trainer_utils import get_last_checkpoint, is_main_process -from transformers.utils import check_min_version -from transformers.utils.versions import require_version - - -# Will error if the minimal version of Transformers is not installed. Remove at your own risk. -check_min_version("4.17.0.dev0") - -require_version("datasets>=1.18.2", "To fix: pip install 'datasets>=1.18.2'") - - -logger = logging.getLogger(__name__) - - -def list_field(default=None, metadata=None): - return field(default_factory=lambda: default, metadata=metadata) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field( - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} - ) - tokenizer_name_or_path: Optional[str] = field( - default=None, - metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"}, - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, - ) - freeze_feature_encoder: bool = field( - default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."} - ) - attention_dropout: float = field( - default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."} - ) - activation_dropout: float = field( - default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."} - ) - feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."}) - hidden_dropout: float = field( - default=0.0, - metadata={ - "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler." - }, - ) - final_dropout: float = field( - default=0.0, - metadata={"help": "The dropout probability for the final projection layer."}, - ) - mask_time_prob: float = field( - default=0.05, - metadata={ - "help": ( - "Probability of each feature vector along the time axis to be chosen as the start of the vector " - "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature " - "vectors will be masked along the time axis." - ) - }, - ) - mask_time_length: int = field( - default=10, - metadata={"help": "Length of vector span to mask along the time axis."}, - ) - mask_feature_prob: float = field( - default=0.0, - metadata={ - "help": ( - "Probability of each feature vector along the feature axis to be chosen as the start of the vectorspan" - " to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature" - " bins will be masked along the time axis." - ) - }, - ) - mask_feature_length: int = field( - default=10, - metadata={"help": "Length of vector span to mask along the feature axis."}, - ) - layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."}) - ctc_loss_reduction: Optional[str] = field( - default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."} - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - - Using `HfArgumentParser` we can turn this class - into argparse arguments to be able to specify them on - the command line. - """ - - dataset_name: str = field( - metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - dataset_config_name: str = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_split_name: str = field( - default="train+validation", - metadata={ - "help": ( - "The name of the training data set split to use (via the datasets library). Defaults to " - "'train+validation'" - ) - }, - ) - eval_split_name: str = field( - default="test", - metadata={ - "help": "The name of the training data set split to use (via the datasets library). Defaults to 'test'" - }, - ) - audio_column_name: str = field( - default="audio", - metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"}, - ) - text_column_name: str = field( - default="text", - metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"}, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of validation examples to this " - "value if set." - ) - }, - ) - shuffle_buffer_size: Optional[int] = field( - default=500, - metadata={ - "help": ( - "The number of streamed examples to download before shuffling them. The large the buffer, " - "the closer it is to real offline shuffling." - ) - }, - ) - chars_to_ignore: Optional[List[str]] = list_field( - default=None, - metadata={"help": "A list of characters to remove from the transcripts."}, - ) - eval_metrics: List[str] = list_field( - default=["wer"], - metadata={"help": "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"}, - ) - max_duration_in_seconds: float = field( - default=20.0, - metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds."}, - ) - preprocessing_only: bool = field( - default=False, - metadata={ - "help": ( - "Whether to only do data preprocessing and skip training. This is especially useful when data" - " preprocessing errors out in distributed training due to timeout. In this case, one should run the" - " preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets" - " can consequently be loaded in distributed training" - ) - }, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "If :obj:`True`, will use the token generated when running" - ":obj:`huggingface-cli login` as HTTP bearer authorization for remote files." - ) - }, - ) - phoneme_language: Optional[str] = field( - default=None, - metadata={ - "help": ( - "The target language that should be used be" - " passed to the tokenizer for tokenization. Note that" - " this is only relevant if the model classifies the" - " input audio to a sequence of phoneme sequences." - ) - }, - ) - - -@dataclass -class DataCollatorCTCWithPadding: - """ - Data collator that will dynamically pad the inputs received. - Args: - processor (:class:`~transformers.AutoProcessor`) - The processor used for processing the data. - padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): - Select a strategy to pad the returned sequences (according to the model's padding side and padding index) - among: - * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single - sequence if provided). - * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the - maximum acceptable input length for the model if that argument is not provided. - * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of - different lengths). - max_length (:obj:`int`, `optional`): - Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). - max_length_labels (:obj:`int`, `optional`): - Maximum length of the ``labels`` returned list and optionally padding length (see above). - pad_to_multiple_of (:obj:`int`, `optional`): - If set will pad the sequence to a multiple of the provided value. - This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= - 7.5 (Volta). - """ - - processor: AutoProcessor - padding: Union[bool, str] = "longest" - max_length: Optional[int] = None - pad_to_multiple_of: Optional[int] = None - pad_to_multiple_of_labels: Optional[int] = None - - def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: - # split inputs and labels since they have to be of different lengths and need - # different padding methods - input_features = [] - label_features = [] - for feature in features: - if self.max_length and feature["input_values"].shape[-1] > self.max_length: - continue - input_features.append({"input_values": feature["input_values"]}) - label_features.append({"input_ids": feature["labels"]}) - - batch = self.processor.pad( - input_features, - padding=self.padding, - pad_to_multiple_of=self.pad_to_multiple_of, - return_tensors="pt", - ) - - labels_batch = self.processor.pad( - labels=label_features, - padding=self.padding, - pad_to_multiple_of=self.pad_to_multiple_of_labels, - return_tensors="pt", - ) - - # replace padding with -100 to ignore loss correctly - labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) - - batch["labels"] = labels - - return batch - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) - - # Log on each process the small summary: - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " - f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - # Set the verbosity to info of the Transformers logger (on main process only): - if is_main_process(training_args.local_rank): - transformers.utils.logging.set_verbosity_info() - logger.info("Training/evaluation parameters %s", training_args) - - # Set seed before initializing model. - set_seed(training_args.seed) - - # 1. First, let's load the dataset - raw_datasets = IterableDatasetDict() - raw_column_names = {} - - def load_streaming_dataset(split, sampling_rate, **kwargs): - if "+" in split: - dataset_splits = [load_dataset(split=split_name, **kwargs) for split_name in split.split("+")] - # `features` and `cast_column` won't be available after interleaving, so we'll use them here - features = dataset_splits[0].features - # make sure that the dataset decodes audio with a correct sampling rate - dataset_splits = [ - dataset.cast_column(data_args.audio_column_name, datasets.features.Audio(sampling_rate=sampling_rate)) - for dataset in dataset_splits - ] - - interleaved_dataset = interleave_datasets(dataset_splits) - return interleaved_dataset, features - else: - dataset = load_dataset(split=split, **kwargs) - features = dataset.features - # make sure that the dataset decodes audio with a correct sampling rate - dataset = dataset.cast_column( - data_args.audio_column_name, datasets.features.Audio(sampling_rate=sampling_rate) - ) - return dataset, features - - # `datasets` takes care of automatically loading and resampling the audio, - # so we just need to set the correct target sampling rate and normalize the input - # via the `feature_extractor` - feature_extractor = AutoFeatureExtractor.from_pretrained( - model_args.model_name_or_path, cache_dir=model_args.cache_dir, token=data_args.use_auth_token - ) - - if training_args.do_train: - raw_datasets["train"], train_features = load_streaming_dataset( - path=data_args.dataset_name, - name=data_args.dataset_config_name, - split=data_args.train_split_name, - token=data_args.use_auth_token, - streaming=True, - sampling_rate=feature_extractor.sampling_rate, - ) - raw_column_names["train"] = list(train_features.keys()) - - if data_args.audio_column_name not in raw_column_names["train"]: - raise ValueError( - f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'." - " Make sure to set `--audio_column_name` to the correct audio column - one of" - f" {', '.join(raw_column_names['train'])}." - ) - - if data_args.text_column_name not in raw_column_names["train"]: - raise ValueError( - f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. " - "Make sure to set `--text_column_name` to the correct text column - one of " - f"{', '.join(raw_column_names['train'])}." - ) - - if data_args.max_train_samples is not None: - raw_datasets["train"] = raw_datasets["train"].take(range(data_args.max_train_samples)) - - if training_args.do_eval: - raw_datasets["eval"], eval_features = load_streaming_dataset( - path=data_args.dataset_name, - name=data_args.dataset_config_name, - split=data_args.eval_split_name, - token=data_args.use_auth_token, - streaming=True, - sampling_rate=feature_extractor.sampling_rate, - ) - raw_column_names["eval"] = list(eval_features.keys()) - - if data_args.max_eval_samples is not None: - raw_datasets["eval"] = raw_datasets["eval"].take(range(data_args.max_eval_samples)) - - # 2. We remove some special characters from the datasets - # that make training complicated and do not help in transcribing the speech - # E.g. characters, such as `,` and `.` do not really have an acoustic characteristic - # that could be easily picked up by the model - chars_to_ignore_regex = ( - f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None - ) - text_column_name = data_args.text_column_name - - def remove_special_characters(batch): - if chars_to_ignore_regex is not None: - batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[text_column_name]).lower() + " " - else: - batch["target_text"] = batch[text_column_name].lower() + " " - return batch - - with training_args.main_process_first(desc="dataset map special characters removal"): - for split, dataset in raw_datasets.items(): - raw_datasets[split] = dataset.map( - remove_special_characters, - ).remove_columns([text_column_name]) - - # 3. Next, let's load the config as we might need it to create - # the tokenizer - config = AutoConfig.from_pretrained( - model_args.model_name_or_path, cache_dir=model_args.cache_dir, token=data_args.use_auth_token - ) - - # 4. Now we can instantiate the tokenizer and model - # Note for distributed training, the .from_pretrained methods guarantee that only - # one local process can concurrently download model & vocab. - - tokenizer_name_or_path = model_args.tokenizer_name_or_path - if tokenizer_name_or_path is None: - raise ValueError( - "Tokenizer has to be created before training in streaming mode. Please specify --tokenizer_name_or_path" - ) - # load feature_extractor and tokenizer - tokenizer = AutoTokenizer.from_pretrained( - tokenizer_name_or_path, - config=config, - token=data_args.use_auth_token, - ) - - # adapt config - config.update( - { - "feat_proj_dropout": model_args.feat_proj_dropout, - "attention_dropout": model_args.attention_dropout, - "hidden_dropout": model_args.hidden_dropout, - "final_dropout": model_args.final_dropout, - "mask_time_prob": model_args.mask_time_prob, - "mask_time_length": model_args.mask_time_length, - "mask_feature_prob": model_args.mask_feature_prob, - "mask_feature_length": model_args.mask_feature_length, - "gradient_checkpointing": training_args.gradient_checkpointing, - "layerdrop": model_args.layerdrop, - "ctc_loss_reduction": model_args.ctc_loss_reduction, - "pad_token_id": tokenizer.pad_token_id, - "vocab_size": len(tokenizer), - "activation_dropout": model_args.activation_dropout, - } - ) - - # create model - model = AutoModelForCTC.from_pretrained( - model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - config=config, - token=data_args.use_auth_token, - ) - - # freeze encoder - if model_args.freeze_feature_encoder: - model.freeze_feature_encoder() - - # 5. Now we preprocess the datasets including loading the audio, resampling and normalization - audio_column_name = data_args.audio_column_name - - # `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification - phoneme_language = data_args.phoneme_language - - # Preprocessing the datasets. - # We need to read the audio files as arrays and tokenize the targets. - def prepare_dataset(batch): - # load audio - sample = batch[audio_column_name] - - inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"]) - batch["input_values"] = inputs.input_values[0] - batch["input_length"] = len(batch["input_values"]) - - # encode targets - additional_kwargs = {} - if phoneme_language is not None: - additional_kwargs["phonemizer_lang"] = phoneme_language - - batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids - return batch - - vectorized_datasets = IterableDatasetDict() - with training_args.main_process_first(desc="dataset map preprocessing"): - for split, dataset in raw_datasets.items(): - vectorized_datasets[split] = ( - dataset.map(prepare_dataset) - .remove_columns(raw_column_names[split] + ["target_text"]) - .with_format("torch") - ) - if split == "train": - vectorized_datasets[split] = vectorized_datasets[split].shuffle( - buffer_size=data_args.shuffle_buffer_size, - seed=training_args.seed, - ) - - # 6. Next, we can prepare the training. - # Let's use word error rate (WER) as our evaluation metric, - # instantiate a data collator and the trainer - - # Define evaluation metrics during training, *i.e.* word error rate, character error rate - eval_metrics = {metric: load_metric(metric) for metric in data_args.eval_metrics} - - def compute_metrics(pred): - pred_logits = pred.predictions - pred_ids = np.argmax(pred_logits, axis=-1) - - pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id - - pred_str = tokenizer.batch_decode(pred_ids) - # we do not want to group tokens when computing the metrics - label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False) - - metrics = {k: v.compute(predictions=pred_str, references=label_str) for k, v in eval_metrics.items()} - - return metrics - - # Now save everything to be able to create a single processor later - if is_main_process(training_args.local_rank): - # save feature extractor, tokenizer and config - feature_extractor.save_pretrained(training_args.output_dir) - tokenizer.save_pretrained(training_args.output_dir) - config.save_pretrained(training_args.output_dir) - - try: - processor = AutoProcessor.from_pretrained(training_args.output_dir) - except (OSError, KeyError): - warnings.warn( - "Loading a processor from a feature extractor config that does not" - " include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following " - " attribute to your `preprocessor_config.json` file to suppress this warning: " - " `'processor_class': 'Wav2Vec2Processor'`", - FutureWarning, - ) - processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir) - - # Instantiate custom data collator - max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate - data_collator = DataCollatorCTCWithPadding(processor=processor, max_length=max_input_length) - - # trainer callback to reinitialize and reshuffle the streamable datasets at the beginning of each epoch - class ShuffleCallback(TrainerCallback): - def on_epoch_begin(self, args, state, control, train_dataloader, **kwargs): - if isinstance(train_dataloader.dataset, IterableDatasetShard): - pass # set_epoch() is handled by the Trainer - elif isinstance(train_dataloader.dataset, IterableDataset): - train_dataloader.dataset.set_epoch(train_dataloader.dataset._epoch + 1) - - # Initialize Trainer - trainer = Trainer( - model=model, - data_collator=data_collator, - args=training_args, - compute_metrics=compute_metrics, - train_dataset=vectorized_datasets["train"] if training_args.do_train else None, - eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None, - tokenizer=processor, - callbacks=[ShuffleCallback()], - ) - - # 7. Finally, we can start training - - # Training - if training_args.do_train: - # use last checkpoint if exist - if last_checkpoint is not None: - checkpoint = last_checkpoint - elif os.path.isdir(model_args.model_name_or_path): - checkpoint = model_args.model_name_or_path - else: - checkpoint = None - - train_result = trainer.train(resume_from_checkpoint=checkpoint) - trainer.save_model() - - metrics = train_result.metrics - if data_args.max_train_samples: - metrics["train_samples"] = data_args.max_train_samples - - trainer.log_metrics("train", metrics) - trainer.save_metrics("train", metrics) - trainer.save_state() - - # Evaluation - results = {} - if training_args.do_eval: - logger.info("*** Evaluate ***") - metrics = trainer.evaluate() - if data_args.max_eval_samples: - metrics["eval_samples"] = data_args.max_eval_samples - - trainer.log_metrics("eval", metrics) - trainer.save_metrics("eval", metrics) - - # Write model card and (optionally) push to hub - config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na" - kwargs = { - "finetuned_from": model_args.model_name_or_path, - "tasks": "automatic-speech-recognition", - "tags": ["automatic-speech-recognition", data_args.dataset_name], - "dataset_args": ( - f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split:" - f" {data_args.eval_split_name}" - ), - "dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}", - } - if "common_voice" in data_args.dataset_name: - kwargs["language"] = config_name - - if training_args.push_to_hub: - trainer.push_to_hub(**kwargs) - else: - trainer.create_model_card(**kwargs) - - return results - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/self-training-text-classification/README.md b/examples/research_projects/self-training-text-classification/README.md deleted file mode 100644 index 062d5de7afd..00000000000 --- a/examples/research_projects/self-training-text-classification/README.md +++ /dev/null @@ -1,128 +0,0 @@ -# Self-training - -This is an implementation of the self-training algorithm (without task augmentation) in the [EMNLP 2021](https://2021.emnlp.org/) paper: [STraTA: Self-Training with Task Augmentation for Better Few-shot Learning](https://arxiv.org/abs/2109.06270). Please check out https://github.com/google-research/google-research/tree/master/STraTA for the original codebase. - -**Note**: The code can be used as a tool for automatic data labeling. - -## Table of Contents - - * [Installation](#installation) - * [Self-training](#self-training) - * [Running self-training with a base model](#running-self-training-with-a-base-model) - * [Hyperparameters for self-training](#hyperparameters-for-self-training) - * [Distributed training](#distributed-training) - * [Demo](#demo) - * [How to cite](#how-to-cite) - -## Installation -This repository is tested on Python 3.8+, PyTorch 1.10+, and the 🤗 Transformers 4.16+. - -You should install all necessary Python packages in a [virtual environment](https://docs.python.org/3/library/venv.html). If you are unfamiliar with Python virtual environments, please check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). - -Below, we create a virtual environment with the [Anaconda Python distribution](https://www.anaconda.com/products/distribution) and activate it. -```sh -conda create -n strata python=3.9 -conda activate strata -``` -Next, you need to install 🤗 Transformers. Please refer to [🤗 Transformers installation page](https://github.com/huggingface/transformers#installation) for a detailed guide. -```sh -pip install transformers -``` -Finally, install all necessary Python packages for our self-training algorithm. - -```sh -pip install -r STraTA/selftraining/requirements.txt -``` -This will install PyTorch as a backend. - -## Self-training -### Running self-training with a base model -The following example code shows how to run our self-training algorithm with a base model (e.g., `BERT`) on the `SciTail` science entailment dataset, which has two classes `['entails', 'neutral']`. We assume that you have a data directory that includes some training data (e.g., `train.csv`), evaluation data (e.g., `eval.csv`), and unlabeled data (e.g., `infer.csv`). - -```python -import os -from selftraining import selftrain - -data_dir = '/path/to/your/data/dir' -parameters_dict = { - 'max_selftrain_iterations': 100, - 'model_name_or_path': '/path/to/your/base/model', # could be the id of a model hosted by 🤗 Transformers - 'output_dir': '/path/to/your/output/dir', - 'train_file': os.path.join(data_dir, 'train.csv'), - 'infer_file': os.path.join(data_dir, 'infer.csv'), - 'eval_file': os.path.join(data_dir, 'eval.csv'), - 'eval_strategy': 'steps', - 'task_name': 'scitail', - 'label_list': ['entails', 'neutral'], - 'per_device_train_batch_size': 32, - 'per_device_eval_batch_size': 8, - 'max_length': 128, - 'learning_rate': 2e-5, - 'max_steps': 100000, - 'eval_steps': 1, - 'early_stopping_patience': 50, - 'overwrite_output_dir': True, - 'do_filter_by_confidence': False, - # 'confidence_threshold': 0.3, - 'do_filter_by_val_performance': True, - 'finetune_on_labeled_data': False, - 'seed': 42, -} -selftrain(**parameters_dict) -``` - -**Note**: We checkpoint periodically during self-training. In case of preemptions, just re-run the above script and self-training will resume from the latest iteration. - -### Hyperparameters for self-training -If you have development data, you might want to tune some hyperparameters for self-training. -Below are hyperparameters that could provide additional gains for your task. - - - `finetune_on_labeled_data`: If set to `True`, the resulting model from each self-training iteration is further fine-tuned on the original labeled data before the next self-training iteration. Intuitively, this would give the model a chance to "correct" ifself after being trained on pseudo-labeled data. - - `do_filter_by_confidence`: If set to `True`, the pseudo-labeled data in each self-training iteration is filtered based on the model confidence. For instance, if `confidence_threshold` is set to `0.3`, pseudo-labeled examples with a confidence score less than or equal to `0.3` will be discarded. Note that `confidence_threshold` should be greater or equal to `1/num_labels`, where `num_labels` is the number of class labels. Filtering out the lowest-confidence pseudo-labeled examples could be helpful in some cases. - - `do_filter_by_val_performance`: If set to `True`, the pseudo-labeled data in each self-training iteration is filtered based on the current validation performance. For instance, if your validation performance is 80% accuracy, you might want to get rid of 20% of the pseudo-labeled data with the lowest the confidence scores. - -### Distributed training -We strongly recommend distributed training with multiple accelerators. To activate distributed training, please try one of the following methods: - -1. Run `accelerate config` and answer to the questions asked. This will save a `default_config.yaml` file in your cache folder for 🤗 Accelerate. Now, you can run your script with the following command: - -```sh -accelerate launch your_script.py --args_to_your_script -``` - -2. Run your script with the following command: - -```sh -python -m torch.distributed.launch --nnodes="{$NUM_NODES}" --nproc_per_node="{$NUM_TRAINERS}" --your_script.py --args_to_your_script -``` - -3. Run your script with the following command: - -```sh -torchrun --nnodes="{$NUM_NODES}" --nproc_per_node="{$NUM_TRAINERS}" --your_script.py --args_to_your_script -``` - -## Demo -Please check out `run.sh` to see how to perform our self-training algorithm with a `BERT` Base model on the SciTail science entailment dataset using 8 labeled examples per class. You can configure your training environment by specifying `NUM_NODES` and `NUM_TRAINERS` (number of processes per node). To launch the script, simply run `source run.sh`. - -## How to cite -If you extend or use this code, please cite the [paper](https://arxiv.org/abs/2109.06270) where it was introduced: - -```bibtex -@inproceedings{vu-etal-2021-strata, - title = "{ST}ra{TA}: Self-Training with Task Augmentation for Better Few-shot Learning", - author = "Vu, Tu and - Luong, Minh-Thang and - Le, Quoc and - Simon, Grady and - Iyyer, Mohit", - booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing", - month = nov, - year = "2021", - address = "Online and Punta Cana, Dominican Republic", - publisher = "Association for Computational Linguistics", - url = "https://aclanthology.org/2021.emnlp-main.462", - doi = "10.18653/v1/2021.emnlp-main.462", - pages = "5715--5731", -} -``` diff --git a/examples/research_projects/self-training-text-classification/finetuning.py b/examples/research_projects/self-training-text-classification/finetuning.py deleted file mode 100644 index 4bf9eb28df2..00000000000 --- a/examples/research_projects/self-training-text-classification/finetuning.py +++ /dev/null @@ -1,818 +0,0 @@ -# coding=utf-8 -# Copyright 2022 The Google Research Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Fine-tuning the library models for sequence classification.""" - -import argparse -import dataclasses -import json -import logging -import math -import os -import random -import shutil -from typing import List, Optional - -import datasets -import numpy as np -import pandas as pd -import torch -from datasets import load_dataset, load_metric -from torch.utils.data import DataLoader -from tqdm.auto import tqdm - -from transformers import ( - AdamW, - AutoConfig, - AutoModelForSequenceClassification, - AutoTokenizer, - DataCollatorWithPadding, - default_data_collator, - get_scheduler, - set_seed, -) -from transformers.file_utils import ExplicitEnum -from transformers.trainer_utils import IntervalStrategy - - -logger = logging.getLogger(__name__) - - -class Split(ExplicitEnum): - TRAIN = "train" - EVAL = "eval" - TEST = "test" - INFER = "infer" - - -@dataclasses.dataclass -class FTModelArguments: - """Arguments pertaining to which config/tokenizer/model we are going to fine-tune from.""" - - model_name_or_path: str = dataclasses.field( - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} - ) - use_fast_tokenizer: Optional[bool] = dataclasses.field( - default=True, - metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, - ) - cache_dir: Optional[str] = dataclasses.field( - default=None, - metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."}, - ) - - -@dataclasses.dataclass -class FTDataArguments: - """Arguments pertaining to what data we are going to input our model for training and evaluation.""" - - train_file: str = dataclasses.field( - default=None, metadata={"help": "A csv or a json file containing the training data."} - ) - eval_file: Optional[str] = dataclasses.field( - default=None, metadata={"help": "A csv or a json file containing the validation data."} - ) - test_file: Optional[str] = dataclasses.field( - default=None, metadata={"help": "A csv or a json file containing the test data."} - ) - infer_file: Optional[str] = dataclasses.field( - default=None, metadata={"help": "A csv or a json file containing the data to predict on."} - ) - task_name: Optional[str] = dataclasses.field( - default=None, - metadata={"help": "The name of the task to train on."}, - ) - label_list: Optional[List[str]] = dataclasses.field( - default=None, metadata={"help": "The list of labels for the task."} - ) - - max_length: Optional[int] = dataclasses.field( - default=128, - metadata={ - "help": ( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ) - }, - ) - pad_to_max_length: Optional[bool] = dataclasses.field( - default=False, - metadata={ - "help": ( - "Whether to pad all samples to `max_seq_length`. " - "If False, will pad the samples dynamically when batching to the maximum length in the batch." - ) - }, - ) - - -@dataclasses.dataclass -class FTTrainingArguments: - """Training arguments pertaining to the training loop itself.""" - - output_dir: str = dataclasses.field( - metadata={"help": "The output directory where the model predictions and checkpoints will be written."} - ) - do_train: Optional[bool] = dataclasses.field( - default=False, - metadata={"help": "Whether to run training or not."}, - ) - do_eval: Optional[bool] = dataclasses.field( - default=False, - metadata={"help": "Whether to run evaluation on the validation set or not."}, - ) - do_predict: Optional[bool] = dataclasses.field( - default=False, - metadata={"help": "Whether to run inference on the inference set or not."}, - ) - seed: Optional[int] = dataclasses.field( - default=42, - metadata={"help": "Random seed that will be set at the beginning of training."}, - ) - per_device_train_batch_size: Optional[int] = dataclasses.field( - default=8, - metadata={"help": "The batch size per GPU/TPU core/CPU for training."}, - ) - per_device_eval_batch_size: Optional[int] = dataclasses.field( - default=8, - metadata={"help": "The batch size per GPU/TPU core/CPU for evaluation."}, - ) - weight_decay: Optional[float] = dataclasses.field( - default=0.0, - metadata={ - "help": ( - "The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights in" - " [`AdamW`] optimizer." - ) - }, - ) - learning_rate: Optional[float] = dataclasses.field( - default=5e-5, - metadata={"help": "The initial learning rate for [`AdamW`] optimizer."}, - ) - gradient_accumulation_steps: Optional[int] = dataclasses.field( - default=1, - metadata={ - "help": ( - "Number of updates steps to accumulate the gradients for, before performing a backward/update pass." - ) - }, - ) - max_steps: Optional[int] = dataclasses.field( - default=-1, - metadata={ - "help": ( - "If set to a positive number, the total number of training steps to perform. Overrides" - " `num_train_epochs`." - ) - }, - ) - lr_scheduler_type: Optional[str] = dataclasses.field( - default="linear", metadata={"help": "The scheduler type to use."} - ) - warmup_steps: Optional[int] = dataclasses.field( - default=1, - metadata={ - "help": ( - "Number of steps used for a linear warmup from 0 to `learning_rate`. Overrides any effect of" - " `warmup_ratio`." - ) - }, - ) - eval_strategy: Optional[str] = dataclasses.field( - default="no", - metadata={ - "help": 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]' - }, - ) - eval_steps: Optional[int] = dataclasses.field( - default=1, - metadata={"help": 'Number of update steps between two evaluations if `eval_strategy="steps"`.'}, - ) - eval_metric: Optional[str] = dataclasses.field( - default="accuracy", metadata={"help": "The evaluation metric used for the task."} - ) - keep_checkpoint_max: Optional[int] = dataclasses.field( - default=1, - metadata={"help": "The maximum number of best checkpoint files to keep."}, - ) - early_stopping_patience: Optional[int] = dataclasses.field( - default=10, - metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, - ) - early_stopping_threshold: Optional[float] = dataclasses.field( - default=0.0, - metadata={ - "help": "How much the specified evaluation metric must improve to satisfy early stopping conditions." - }, - ) - - -def train(args, accelerator, model, tokenizer, train_dataloader, optimizer, lr_scheduler, eval_dataloader=None): - """Train a model on the given training data.""" - - total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps - - logger.info("***** Running training *****") - logger.info(" Num examples = %d", args.num_examples[Split.TRAIN.value]) - logger.info(" Instantaneous batch size per device = %d", args.per_device_train_batch_size) - logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", total_batch_size) - logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) - logger.info(" Total optimization steps = %d", args.max_steps) - - # Only show the progress bar once on each machine. - progress_bar = tqdm(range(args.max_steps), disable=not accelerator.is_local_main_process) - - checkpoints = None - eval_results = None - best_checkpoint = None - best_eval_result = None - early_stopping_patience_counter = 0 - should_training_stop = False - epoch = 0 - completed_steps = 0 - train_loss = 0.0 - model.zero_grad() - - for _ in range(args.num_train_epochs): - epoch += 1 - model.train() - for step, batch in enumerate(train_dataloader): - outputs = model(**batch) - loss = outputs.loss - loss = loss / args.gradient_accumulation_steps - accelerator.backward(loss) - train_loss += loss.item() - - if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: - optimizer.step() - lr_scheduler.step() - optimizer.zero_grad() - progress_bar.update(1) - completed_steps += 1 - - # Evaluate during training - if ( - eval_dataloader is not None - and args.eval_strategy == IntervalStrategy.STEPS.value - and args.eval_steps > 0 - and completed_steps % args.eval_steps == 0 - ): - accelerator.wait_for_everyone() - new_checkpoint = f"checkpoint-{IntervalStrategy.STEPS.value}-{completed_steps}" - new_eval_result = evaluate(args, accelerator, eval_dataloader, "eval", model, new_checkpoint)[ - args.eval_metric - ] - logger.info( - "Evaluation result at step %d: %s = %f", completed_steps, args.eval_metric, new_eval_result - ) - if checkpoints is None: - checkpoints = np.array([new_checkpoint]) - eval_results = np.array([new_eval_result]) - best_checkpoint = new_checkpoint - best_eval_result = new_eval_result - else: - if new_eval_result - best_eval_result > args.early_stopping_threshold: - best_checkpoint = new_checkpoint - best_eval_result = new_eval_result - early_stopping_patience_counter = 0 - else: - if new_eval_result == best_eval_result: - best_checkpoint = new_checkpoint - best_eval_result = new_eval_result - early_stopping_patience_counter += 1 - - if early_stopping_patience_counter >= args.early_stopping_patience: - should_training_stop = True - - checkpoints = np.append(checkpoints, [new_checkpoint], axis=0) - eval_results = np.append(eval_results, [new_eval_result], axis=0) - sorted_ids = np.argsort(eval_results) - eval_results = eval_results[sorted_ids] - checkpoints = checkpoints[sorted_ids] - - if len(checkpoints) > args.keep_checkpoint_max: - # Delete the current worst checkpoint - checkpoint_to_remove, *checkpoints = checkpoints - eval_results = eval_results[1:] - if checkpoint_to_remove != new_checkpoint: - if accelerator.is_main_process: - shutil.rmtree(os.path.join(args.output_dir, checkpoint_to_remove), ignore_errors=True) - accelerator.wait_for_everyone() - - if new_checkpoint in checkpoints: - # Save model checkpoint - checkpoint_output_dir = os.path.join(args.output_dir, new_checkpoint) - if accelerator.is_main_process: - if not os.path.exists(checkpoint_output_dir): - os.makedirs(checkpoint_output_dir) - accelerator.wait_for_everyone() - unwrapped_model = accelerator.unwrap_model(model) - unwrapped_model.save_pretrained(checkpoint_output_dir, save_function=accelerator.save) - if accelerator.is_main_process: - tokenizer.save_pretrained(checkpoint_output_dir) - logger.info("Saving model checkpoint to %s", checkpoint_output_dir) - - if completed_steps >= args.max_steps: - break - - if should_training_stop: - break - - # Evaluate during training - if eval_dataloader is not None and args.eval_strategy == IntervalStrategy.EPOCH.value: - accelerator.wait_for_everyone() - new_checkpoint = f"checkpoint-{IntervalStrategy.EPOCH.value}-{epoch}" - new_eval_result = evaluate(args, accelerator, eval_dataloader, "eval", model, new_checkpoint)[ - args.eval_metric - ] - logger.info("Evaluation result at epoch %d: %s = %f", epoch, args.eval_metric, new_eval_result) - - if checkpoints is None: - checkpoints = np.array([new_checkpoint]) - eval_results = np.array([new_eval_result]) - best_checkpoint = new_checkpoint - best_eval_result = new_eval_result - else: - if new_eval_result - best_eval_result > args.early_stopping_threshold: - best_checkpoint = new_checkpoint - best_eval_result = new_eval_result - early_stopping_patience_counter = 0 - else: - if new_eval_result == best_eval_result: - best_checkpoint = new_checkpoint - best_eval_result = new_eval_result - early_stopping_patience_counter += 1 - - if early_stopping_patience_counter >= args.early_stopping_patience: - should_training_stop = True - - checkpoints = np.append(checkpoints, [new_checkpoint], axis=0) - eval_results = np.append(eval_results, [new_eval_result], axis=0) - sorted_ids = np.argsort(eval_results) - eval_results = eval_results[sorted_ids] - checkpoints = checkpoints[sorted_ids] - - if len(checkpoints) > args.keep_checkpoint_max: - # Delete the current worst checkpoint - checkpoint_to_remove, *checkpoints = checkpoints - eval_results = eval_results[1:] - if checkpoint_to_remove != new_checkpoint: - if accelerator.is_main_process: - shutil.rmtree(os.path.join(args.output_dir, checkpoint_to_remove), ignore_errors=True) - accelerator.wait_for_everyone() - - if new_checkpoint in checkpoints: - # Save model checkpoint - checkpoint_output_dir = os.path.join(args.output_dir, new_checkpoint) - if accelerator.is_main_process: - if not os.path.exists(checkpoint_output_dir): - os.makedirs(checkpoint_output_dir) - accelerator.wait_for_everyone() - unwrapped_model = accelerator.unwrap_model(model) - unwrapped_model.save_pretrained(checkpoint_output_dir, save_function=accelerator.save) - if accelerator.is_main_process: - tokenizer.save_pretrained(checkpoint_output_dir) - logger.info("Saving model checkpoint to %s", checkpoint_output_dir) - - if completed_steps >= args.max_steps: - break - - if should_training_stop: - break - - if best_checkpoint is not None: - # Save the best checkpoint - logger.info("Best checkpoint: %s", best_checkpoint) - logger.info("Best evaluation result: %s = %f", args.eval_metric, best_eval_result) - best_checkpoint_output_dir = os.path.join(args.output_dir, best_checkpoint) - if accelerator.is_main_process: - shutil.move(best_checkpoint_output_dir, os.path.join(args.output_dir, "best-checkpoint")) - shutil.rmtree(best_checkpoint_output_dir, ignore_errors=True) - accelerator.wait_for_everyone() - - else: - # Assume that the last checkpoint is the best checkpoint and save it - checkpoint_output_dir = os.path.join(args.output_dir, "best-checkpoint") - if not os.path.exists(checkpoint_output_dir): - os.makedirs(checkpoint_output_dir) - - accelerator.wait_for_everyone() - unwrapped_model = accelerator.unwrap_model(model) - unwrapped_model.save_pretrained(checkpoint_output_dir, save_function=accelerator.save) - if accelerator.is_main_process: - tokenizer.save_pretrained(checkpoint_output_dir) - logger.info("Saving model checkpoint to %s", checkpoint_output_dir) - return completed_steps, train_loss / completed_steps - - -def evaluate(args, accelerator, dataloader, eval_set, model, checkpoint, has_labels=True, write_to_file=True): - """Evaluate a model checkpoint on the given evaluation data.""" - - num_examples = args.num_examples[eval_set] - eval_metric = None - completed_steps = 0 - eval_loss = 0.0 - all_predictions = None - all_references = None - all_probabilities = None - - if has_labels: - # Get the metric function - eval_metric = load_metric(args.eval_metric) - - eval_results = {} - model.eval() - for _, batch in enumerate(dataloader): - with torch.no_grad(): - outputs = model(**batch) - - eval_loss += outputs.loss.item() - logits = outputs.logits - predictions = logits.argmax(dim=-1) if not args.is_regression else logits.squeeze() - predictions = accelerator.gather(predictions) - - if all_predictions is None: - all_predictions = predictions.detach().cpu().numpy() - else: - all_predictions = np.append(all_predictions, predictions.detach().cpu().numpy(), axis=0) - - if not args.is_regression: - probabilities = logits.softmax(dim=-1).max(dim=-1).values - probabilities = accelerator.gather(probabilities) - if all_probabilities is None: - all_probabilities = probabilities.detach().cpu().numpy() - else: - all_probabilities = np.append(all_probabilities, probabilities.detach().cpu().numpy(), axis=0) - - if has_labels: - references = batch["labels"] - references = accelerator.gather(references) - if all_references is None: - all_references = references.detach().cpu().numpy() - else: - all_references = np.append(all_references, references.detach().cpu().numpy(), axis=0) - - eval_metric.add_batch( - predictions=predictions, - references=references, - ) - completed_steps += 1 - - if has_labels: - eval_results.update(eval_metric.compute()) - eval_results["completed_steps"] = completed_steps - eval_results["avg_eval_loss"] = eval_loss / completed_steps - - if write_to_file: - accelerator.wait_for_everyone() - if accelerator.is_main_process: - results_file = os.path.join(args.output_dir, f"{eval_set}_results_{checkpoint}.json") - with open(results_file, "w") as f: - json.dump(eval_results, f, indent=4, sort_keys=True) - - if write_to_file: - accelerator.wait_for_everyone() - if accelerator.is_main_process: - output_file = os.path.join(args.output_dir, f"{eval_set}_output_{checkpoint}.csv") - if not args.is_regression: - assert len(all_predictions) == len(all_probabilities) - df = pd.DataFrame(list(zip(all_predictions, all_probabilities)), columns=["prediction", "probability"]) - else: - df = pd.DataFrame(all_predictions, columns=["prediction"]) - df = df.head(num_examples) - df.to_csv(output_file, header=True, index=False) - return eval_results - - -def load_from_pretrained(args, pretrained_model_name_or_path): - """Load the pretrained model and tokenizer.""" - - # In distributed training, the .from_pretrained methods guarantee that only - # one local process can concurrently perform this procedure. - - config = AutoConfig.from_pretrained( - pretrained_model_name_or_path, - num_labels=args.num_labels if hasattr(args, "num_labels") else None, - finetuning_task=args.task_name.lower(), - cache_dir=args.cache_dir, - ) - tokenizer = AutoTokenizer.from_pretrained( - pretrained_model_name_or_path, use_fast=args.use_fast_tokenizer, cache_dir=args.cache_dir - ) - model = AutoModelForSequenceClassification.from_pretrained( - pretrained_model_name_or_path, - from_tf=bool(".ckpt" in args.model_name_or_path), - config=config, - ignore_mismatched_sizes=True, - cache_dir=args.cache_dir, - ) - return config, tokenizer, model - - -def finetune(accelerator, model_name_or_path, train_file, output_dir, **kwargs): - """Fine-tuning a pre-trained model on a downstream task. - - Args: - accelerator: An instance of an accelerator for distributed training (on - multi-GPU, TPU) or mixed precision training. - model_name_or_path: Path to pretrained model or model identifier from - huggingface.co/models. - train_file: A csv or a json file containing the training data. - output_dir: The output directory where the model predictions and checkpoints - will be written. - **kwargs: Dictionary of key/value pairs with which to update the - configuration object after loading. The values in kwargs of any keys which - are configuration attributes will be used to override the loaded values. - """ - # Make one log on every process with the configuration for debugging. - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO, - ) - logger.info(accelerator.state) - - # Setup logging, we only want one process per machine to log things on the - # screen. accelerator.is_local_main_process is only True for one process per - # machine. - logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) - - model_args = FTModelArguments(model_name_or_path=model_name_or_path) - data_args = FTDataArguments(train_file=train_file) - training_args = FTTrainingArguments(output_dir=output_dir) - args = argparse.Namespace() - - for arg_class in (model_args, data_args, training_args): - for key, value in vars(arg_class).items(): - setattr(args, key, value) - - for key, value in kwargs.items(): - if hasattr(args, key): - setattr(args, key, value) - - # Sanity checks - data_files = {} - args.data_file_extension = None - - # You need to provide the training data as we always run training - args.do_train = True - assert args.train_file is not None - data_files[Split.TRAIN.value] = args.train_file - - if args.do_eval or args.eval_strategy != IntervalStrategy.NO.value: - assert args.eval_file is not None - data_files[Split.EVAL.value] = args.eval_file - - if args.do_eval and args.test_file is not None: - data_files[Split.TEST.value] = args.test_file - - if args.do_predict: - assert args.infer_file is not None - data_files[Split.INFER.value] = args.infer_file - - for key in data_files: - extension = data_files[key].split(".")[-1] - assert extension in ["csv", "json"], f"`{key}_file` should be a csv or a json file." - if args.data_file_extension is None: - args.data_file_extension = extension - else: - assert extension == args.data_file_extension, f"`{key}_file` should be a {args.data_file_extension} file`." - - assert ( - args.eval_metric in datasets.list_metrics() - ), f"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}." - - # Handle the output directory creation - if accelerator.is_main_process: - if args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - accelerator.wait_for_everyone() - - # If passed along, set the training seed now. - if args.seed is not None: - set_seed(args.seed) - - # You need to provide your CSV/JSON data files. - # - # For CSV/JSON files, this script will use as labels the column called 'label' - # and as pair of sentences the sentences in columns called 'sentence1' and - # 'sentence2' if these columns exist or the first two columns not named - # 'label' if at least two columns are provided. - # - # If the CSVs/JSONs contain only one non-label column, the script does single - # sentence classification on this single column. - # - # In distributed training, the load_dataset function guarantees that only one - # local process can download the dataset. - - # Loading the dataset from local csv or json files. - raw_datasets = load_dataset(args.data_file_extension, data_files=data_files) - - # Labels - is_regression = raw_datasets[Split.TRAIN.value].features["label"].dtype in ["float32", "float64"] - args.is_regression = is_regression - - if args.is_regression: - label_list = None - num_labels = 1 - else: - label_list = args.label_list - assert label_list is not None - label_list.sort() # Let's sort it for determinism - num_labels = len(label_list) - args.num_labels = num_labels - - # Load pre-trained model - config, tokenizer, model = load_from_pretrained(args, args.model_name_or_path) - - # Preprocessing the datasets - non_label_column_names = [name for name in raw_datasets[Split.TRAIN.value].column_names if name != "label"] - if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: - sentence1_key, sentence2_key = "sentence1", "sentence2" - else: - if len(non_label_column_names) >= 2: - sentence1_key, sentence2_key = non_label_column_names[:2] - else: - sentence1_key, sentence2_key = non_label_column_names[0], None - - label_to_id = {v: i for i, v in enumerate(label_list)} - config.label2id = label_to_id - config.id2label = {id: label for label, id in config.label2id.items()} - padding = "max_length" if args.pad_to_max_length else False - - def preprocess_function(examples): - # Tokenize the texts - texts = ( - (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) - ) - result = tokenizer(*texts, padding=padding, max_length=args.max_length, truncation=True) - - if "label" in examples: - if label_to_id is not None: - # Map labels to IDs (not necessary for GLUE tasks) - result["labels"] = [label_to_id[l] for l in examples["label"]] - else: - # In all cases, rename the column to labels because the model will - # expect that. - result["labels"] = examples["label"] - return result - - with accelerator.main_process_first(): - processed_datasets = raw_datasets.map( - preprocess_function, - batched=True, - remove_columns=raw_datasets[Split.TRAIN.value].column_names, - desc="Running tokenizer on dataset", - ) - - num_examples = {} - splits = [s.value for s in Split] - for split in splits: - if split in processed_datasets: - num_examples[split] = len(processed_datasets[split]) - args.num_examples = num_examples - - train_dataset = processed_datasets[Split.TRAIN.value] - eval_dataset = processed_datasets[Split.EVAL.value] if Split.EVAL.value in processed_datasets else None - test_dataset = processed_datasets[Split.TEST.value] if Split.TEST.value in processed_datasets else None - infer_dataset = processed_datasets[Split.INFER.value] if Split.INFER.value in processed_datasets else None - - # Log a few random samples from the training set: - for index in random.sample(range(len(train_dataset)), 3): - logger.info("Sample %d of the training set: %s.", index, train_dataset[index]) - - # DataLoaders creation: - if args.pad_to_max_length: - # If padding was already done ot max length, we use the default data - # collator that will just convert everything to tensors. - data_collator = default_data_collator - else: - # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by - # padding to the maximum length of the samples passed). When using mixed - # precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple of - # 8s, which will enable the use of Tensor Cores on NVIDIA hardware with - # compute capability >= 7.5 (Volta). - # For fp8, we pad to multiple of 16. - if accelerator.mixed_precision == "fp8": - pad_to_multiple_of = 16 - elif accelerator.mixed_precision != "no": - pad_to_multiple_of = 8 - else: - pad_to_multiple_of = None - data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=pad_to_multiple_of) - - train_dataloader = DataLoader( - train_dataset, - batch_size=args.per_device_train_batch_size, - shuffle=True, - collate_fn=data_collator, - ) - eval_dataloader, test_dataloader, infer_dataloader = None, None, None - - if eval_dataset is not None: - eval_dataloader = DataLoader( - eval_dataset, batch_size=args.per_device_eval_batch_size, collate_fn=data_collator - ) - - if test_dataset is not None: - test_dataloader = DataLoader( - test_dataset, batch_size=args.per_device_eval_batch_size, collate_fn=data_collator - ) - - if infer_dataset is not None: - infer_dataloader = DataLoader( - infer_dataset, batch_size=args.per_device_eval_batch_size, collate_fn=data_collator - ) - - # Optimizer - # Split weights in two groups, one with weight decay and the other not. - no_decay = ["bias", "LayerNorm.weight"] - optimizer_grouped_parameters = [ - { - "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], - "weight_decay": args.weight_decay, - }, - { - "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], - "weight_decay": 0.0, - }, - ] - optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) - - # Prepare everything with our `accelerator`. - model, optimizer, train_dataloader, eval_dataloader, test_dataloader, infer_dataloader = accelerator.prepare( - model, optimizer, train_dataloader, eval_dataloader, test_dataloader, infer_dataloader - ) - - # Note -> the training dataloader needs to be prepared before we grab its - # length below (cause its length will be shorter in multiprocess) - - # Scheduler and math around the number of training steps. - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if args.max_steps == -1: - args.max_steps = args.num_train_epochs * num_update_steps_per_epoch - else: - args.num_train_epochs = math.ceil(args.max_steps / num_update_steps_per_epoch) - - lr_scheduler = get_scheduler( - name=args.lr_scheduler_type, - optimizer=optimizer, - num_warmup_steps=args.warmup_steps, - num_training_steps=args.max_steps, - ) - - # Train - completed_steps, avg_train_loss = train( - args, accelerator, model, tokenizer, train_dataloader, optimizer, lr_scheduler, eval_dataloader - ) - accelerator.wait_for_everyone() - logger.info("Training job completed: completed_steps = %d, avg_train_loss = %f", completed_steps, avg_train_loss) - - args.model_name_or_path = os.path.join(args.output_dir, "best-checkpoint") - logger.info("Loading the best checkpoint: %s", args.model_name_or_path) - config, tokenizer, model = load_from_pretrained(args, args.model_name_or_path) - model = accelerator.prepare(model) - - if args.do_eval: - # Evaluate - if eval_dataloader is not None: - logger.info("***** Running evaluation on the eval data using the best checkpoint *****") - eval_results = evaluate(args, accelerator, eval_dataloader, Split.EVAL.value, model, "best-checkpoint") - avg_eval_loss = eval_results["avg_eval_loss"] - eval_metric = eval_results[args.eval_metric] - logger.info("Evaluation job completed: avg_eval_loss = %f", avg_eval_loss) - logger.info("Evaluation result for the best checkpoint: %s = %f", args.eval_metric, eval_metric) - - if test_dataloader is not None: - logger.info("***** Running evaluation on the test data using the best checkpoint *****") - eval_results = evaluate(args, accelerator, test_dataloader, Split.TEST.value, model, "best-checkpoint") - avg_eval_loss = eval_results["avg_eval_loss"] - eval_metric = eval_results[args.eval_metric] - logger.info("Test job completed: avg_test_loss = %f", avg_eval_loss) - logger.info("Test result for the best checkpoint: %s = %f", args.eval_metric, eval_metric) - - if args.do_predict: - # Predict - if infer_dataloader is not None: - logger.info("***** Running inference using the best checkpoint *****") - evaluate( - args, accelerator, infer_dataloader, Split.INFER.value, model, "best-checkpoint", has_labels=False - ) - logger.info("Inference job completed.") - - # Release all references to the internal objects stored and call the garbage - # collector. You should call this method between two trainings with different - # models/optimizers. - accelerator.free_memory() diff --git a/examples/research_projects/self-training-text-classification/requirements.txt b/examples/research_projects/self-training-text-classification/requirements.txt deleted file mode 100644 index 25d66c8b6a4..00000000000 --- a/examples/research_projects/self-training-text-classification/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -accelerate -datasets >= 1.8.0 -protobuf -scikit-learn -scipy -sentencepiece != 0.1.92 -torch >= 1.3 diff --git a/examples/research_projects/self-training-text-classification/run.sh b/examples/research_projects/self-training-text-classification/run.sh deleted file mode 100755 index 34e91d7c127..00000000000 --- a/examples/research_projects/self-training-text-classification/run.sh +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2022 The Google Research Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#!/bin/bash - -# Create a virtual environment -conda deactivate -conda update conda -y -conda update anaconda -y -pip install --upgrade pip -python3 -m pip install --user virtualenv -conda create -n strata python=3.9 -y -conda activate strata -# Install all necessary packages -pip install transformers -pip install -r requirements.txt - -# Download and prepare data -WORK_DIR="/tmp/strata" -rm -rf "${WORK_DIR}" && mkdir -p "${WORK_DIR}" -wget https://storage.googleapis.com/gresearch/strata/demo.zip -P "${WORK_DIR}" -DEMO_ZIP_FILE="${WORK_DIR}/demo.zip" -unzip "${DEMO_ZIP_FILE}" -d "${WORK_DIR}" && rm "${DEMO_ZIP_FILE}" -DATA_DIR="${WORK_DIR}/demo/scitail-8" -OUTPUT_DIR="/tmp/output" -rm -rf "${OUTPUT_DIR}" && mkdir -p "${OUTPUT_DIR}" - -# Specific hyperparameters -MODEL_NAME_OR_PATH="bert-base-uncased" -NUM_NODES=1 -NUM_TRAINERS=4 -LAUNCH_SCRIPT="torchrun --nnodes='${NUM_NODES}' --nproc_per_node='${NUM_TRAINERS}' python -c" -MAX_SELFTRAIN_ITERATIONS=100 -TRAIN_FILE="train.csv" -INFER_FILE="infer.csv" -EVAL_FILE="eval_256.csv" -MAX_STEPS=100000 - -# Start self-training -${LAUNCH_SCRIPT} " -import os -from selftraining import selftrain - -data_dir = '${DATA_DIR}' -parameters_dict = { - 'max_selftrain_iterations': ${MAX_SELFTRAIN_ITERATIONS}, - 'model_name_or_path': '${MODEL_NAME_OR_PATH}', - 'output_dir': '${OUTPUT_DIR}', - 'train_file': os.path.join(data_dir, '${TRAIN_FILE}'), - 'infer_file': os.path.join(data_dir, '${INFER_FILE}'), - 'eval_file': os.path.join(data_dir, '${EVAL_FILE}'), - 'eval_strategy': 'steps', - 'task_name': 'scitail', - 'label_list': ['entails', 'neutral'], - 'per_device_train_batch_size': 32, - 'per_device_eval_batch_size': 8, - 'max_length': 128, - 'learning_rate': 2e-5, - 'max_steps': ${MAX_STEPS}, - 'eval_steps': 1, - 'early_stopping_patience': 50, - 'overwrite_output_dir': True, - 'do_filter_by_confidence': False, - 'do_filter_by_val_performance': True, - 'finetune_on_labeled_data': False, - 'seed': 42, -} - -selftrain(**parameters_dict) -" diff --git a/examples/research_projects/self-training-text-classification/selftraining.py b/examples/research_projects/self-training-text-classification/selftraining.py deleted file mode 100644 index d741225b061..00000000000 --- a/examples/research_projects/self-training-text-classification/selftraining.py +++ /dev/null @@ -1,388 +0,0 @@ -# coding=utf-8 -# Copyright 2022 The Google Research Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Self-training for sequence classification.""" - -import argparse -import dataclasses -import json -import logging -import os -import shutil -from typing import List, Optional - -import datasets -from accelerate import Accelerator -from datasets import load_dataset -from finetuning import finetune -from tqdm.auto import tqdm - -import transformers -from transformers import AutoConfig, set_seed -from transformers.trainer_utils import IntervalStrategy - - -logger = logging.getLogger(__name__) - -MODEL_BIN_FILE = "pytorch_model.bin" - - -@dataclasses.dataclass -class STModelArguments: - """Arguments pertaining to which config/tokenizer/model we are going to fine-tune from.""" - - model_name_or_path: str = dataclasses.field( - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} - ) - cache_dir: Optional[str] = dataclasses.field( - default=None, - metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."}, - ) - - -@dataclasses.dataclass -class STDataArguments: - """Arguments pertaining to what data we are going to input our model for training and evaluation.""" - - train_file: str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."}) - infer_file: str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."}) - eval_file: Optional[str] = dataclasses.field( - default=None, metadata={"help": "A csv or a json file containing the validation data."} - ) - task_name: Optional[str] = dataclasses.field( - default=None, - metadata={"help": "The name of the task to train on."}, - ) - label_list: Optional[List[str]] = dataclasses.field( - default=None, metadata={"help": "The list of labels for the task."} - ) - - -@dataclasses.dataclass -class STTrainingArguments: - """Training arguments pertaining to the training loop itself.""" - - output_dir: str = dataclasses.field( - metadata={"help": "The output directory where the model predictions and checkpoints will be written."} - ) - eval_metric: Optional[str] = dataclasses.field( - default="accuracy", metadata={"help": "The evaluation metric used for the task."} - ) - eval_strategy: Optional[str] = dataclasses.field( - default="no", - metadata={ - "help": 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]' - }, - ) - early_stopping_patience: Optional[int] = dataclasses.field( - default=10, - metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, - ) - early_stopping_threshold: Optional[float] = dataclasses.field( - default=0.0, - metadata={ - "help": "How much the specified evaluation metric must improve to satisfy early stopping conditions." - }, - ) - do_filter_by_confidence: Optional[bool] = dataclasses.field( - default=False, - metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."}, - ) - do_filter_by_val_performance: Optional[bool] = dataclasses.field( - default=False, - metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."}, - ) - finetune_on_labeled_data: Optional[bool] = dataclasses.field( - default=False, - metadata={"help": "Whether to fine-tune on labeled data after pseudo training."}, - ) - confidence_threshold: Optional[float] = dataclasses.field( - default=0.0, - metadata={"help": "Confidence threshold for pseudo-labeled data filtering."}, - ) - max_selftrain_iterations: Optional[int] = dataclasses.field( - default=100, - metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, - ) - seed: Optional[int] = dataclasses.field( - default=None, - metadata={"help": "Random seed for initialization."}, - ) - - -def create_pseudo_labeled_data(args, infer_input, infer_output, eval_result, id2label, next_data_dir): - """Create pseudeo labeled data for the next self-training iteration.""" - - dataset = datasets.concatenate_datasets([infer_input, infer_output], axis=1) - - if args.do_filter_by_confidence: - dataset = dataset.filter(lambda example: example["probability"] > args.confidence_threshold) - - if args.do_filter_by_val_performance: - assert eval_result >= 0.0 and eval_result <= 1.0 - num_selected_rows = int(eval_result * len(dataset)) - print(num_selected_rows) - dataset = dataset.sort("probability", reverse=True) - dataset = dataset.select(range(num_selected_rows)) - - dataset = dataset.remove_columns(["label", "probability"]) - dataset = dataset.rename_column("prediction", "label") - dataset = dataset.map(lambda example: {"label": id2label[example["label"]]}) - dataset = dataset.shuffle(seed=args.seed) - - pseudo_labeled_data_file = os.path.join(next_data_dir, f"train_pseudo.{args.data_file_extension}") - if args.data_file_extension == "csv": - dataset.to_csv(pseudo_labeled_data_file, index=False) - else: - dataset.to_json(pseudo_labeled_data_file) - - -def selftrain(model_name_or_path, train_file, infer_file, output_dir, **kwargs): - """Self-training a pre-trained model on a downstream task. - - Args: - model_name_or_path: Path to pretrained model or model identifier from - huggingface.co/models. - train_file: A csv or a json file containing the training data. - infer_file: A csv or a json file containing the data to predict on. - output_dir: The output directory where the model predictions and checkpoints - will be written. - **kwargs: Dictionary of key/value pairs with which to update the - configuration object after loading. The values in kwargs of any keys which - are configuration attributes will be used to override the loaded values. - """ - # Initialize the accelerator. We will let the accelerator handle device - # placement for us. - accelerator = Accelerator() - # Make one log on every process with the configuration for debugging. - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO, - ) - logger.info(accelerator.state) - - # Setup logging, we only want one process per machine to log things on the - # screen. accelerator.is_local_main_process is only True for one process per - # machine. - logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) - - if accelerator.is_local_main_process: - datasets.utils.logging.set_verbosity_warning() - transformers.utils.logging.set_verbosity_info() - else: - datasets.utils.logging.set_verbosity_error() - transformers.utils.logging.set_verbosity_error() - - model_args = STModelArguments(model_name_or_path=model_name_or_path) - data_args = STDataArguments(train_file=train_file, infer_file=infer_file) - training_args = STTrainingArguments(output_dir=output_dir) - args = argparse.Namespace() - - for arg_class in (model_args, data_args, training_args): - for key, value in vars(arg_class).items(): - setattr(args, key, value) - - for key, value in kwargs.items(): - if hasattr(args, key): - setattr(args, key, value) - - # Sanity checks - data_files = {} - args.data_file_extension = None - - # You need to provide the training data and the data to predict on - assert args.train_file is not None - assert args.infer_file is not None - data_files["train"] = args.train_file - data_files["infer"] = args.infer_file - - if args.eval_strategy != IntervalStrategy.NO.value: - assert args.eval_file is not None - data_files["eval"] = args.eval_file - - for key in data_files: - extension = data_files[key].split(".")[-1] - assert extension in ["csv", "json"], f"`{key}_file` should be a csv or a json file." - if args.data_file_extension is None: - args.data_file_extension = extension - else: - assert extension == args.data_file_extension, f"`{key}_file` should be a {args.data_file_extension} file`." - - assert ( - args.eval_metric in datasets.list_metrics() - ), f"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}." - - # If passed along, set the training seed now. - if args.seed is not None: - set_seed(args.seed) - - logger.info("Creating the initial data directory for self-training...") - data_dir_format = f"{args.output_dir}/self-train_iter-{{}}".format - initial_data_dir = data_dir_format(0) - - if accelerator.is_main_process: - if args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - os.makedirs(initial_data_dir, exist_ok=True) - accelerator.wait_for_everyone() - - best_iteration = None - best_eval_result = None - early_stopping_patience_counter = 0 - should_training_stop = False - # Show the progress bar - progress_bar = tqdm(range(args.max_selftrain_iterations), disable=not accelerator.is_local_main_process) - - # Self-train - for iteration in range(0, int(args.max_selftrain_iterations)): - current_data_dir = data_dir_format(iteration) - assert os.path.exists(current_data_dir) - - # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for - # iteration > 0 - current_output_dir = os.path.join(current_data_dir, "stage-1") - arguments_dict = { - "accelerator": accelerator, - "model_name_or_path": args.model_name_or_path, - "cache_dir": args.cache_dir, - "do_train": True, - "train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"], - "do_eval": True if args.eval_file is not None else False, - "eval_file": data_files["eval"], - "do_predict": True, - "infer_file": data_files["infer"], - "task_name": args.task_name, - "label_list": args.label_list, - "output_dir": current_output_dir, - "eval_metric": args.eval_metric, - "eval_strategy": args.eval_strategy, - "early_stopping_patience": args.early_stopping_patience, - "early_stopping_threshold": args.early_stopping_threshold, - "seed": args.seed, - } - # Add additional training arguments - for key, value in kwargs.items(): - if key not in arguments_dict and not hasattr(training_args, key): - arguments_dict.update({key: value}) - - model_bin_file_path = os.path.join(current_output_dir, "best-checkpoint", MODEL_BIN_FILE) - if os.path.exists(model_bin_file_path): - logger.info( - "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.", - model_bin_file_path, - iteration, - ) - else: - logger.info("***** Running self-training: iteration: %d, stage: 1 *****", iteration) - finetune(**arguments_dict) - accelerator.wait_for_everyone() - assert os.path.exists(model_bin_file_path) - logger.info("Self-training job completed: iteration: %d, stage: 1.", iteration) - - if iteration > 0 and args.finetune_on_labeled_data: - # Stage 2 (optional): fine-tuning on the original labeled data - model_path = os.path.join(current_output_dir, "best-checkpoint") - current_output_dir = os.path.join(current_data_dir, "stage-2") - # Update arguments_dict - arguments_dict["model_name_or_path"] = model_path - arguments_dict["train_file"] = data_files["train"] - arguments_dict["output_dir"] = current_output_dir - - model_bin_file_path = os.path.join(current_output_dir, "best-checkpoint", MODEL_BIN_FILE) - if os.path.exists(model_bin_file_path): - logger.info( - "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.", - model_bin_file_path, - iteration, - ) - else: - logger.info("***** Running self-training: iteration: %d, stage: 2 *****", iteration) - finetune(**arguments_dict) - accelerator.wait_for_everyone() - assert os.path.exists(model_bin_file_path) - logger.info("Self-training job completed: iteration: %d, stage: 2.", iteration) - - new_iteration = iteration - next_data_dir = data_dir_format(iteration + 1) - - config = AutoConfig.from_pretrained(os.path.join(current_output_dir, "best-checkpoint")) - id2label = config.id2label - eval_results_file = os.path.join(current_output_dir, "eval_results_best-checkpoint.json") - test_results_file = os.path.join(current_output_dir, "test_results_best-checkpoint.json") - assert os.path.exists(eval_results_file) - - with open(eval_results_file, "r") as f: - eval_result = float(json.load(f)[args.eval_metric]) - infer_output_file = os.path.join(current_output_dir, "infer_output_best-checkpoint.csv") - assert os.path.exists(infer_output_file) - # Loading the dataset from local csv or json files. - infer_input = load_dataset(args.data_file_extension, data_files={"data": data_files["infer"]})["data"] - infer_output = load_dataset("csv", data_files={"data": infer_output_file})["data"] - - if accelerator.is_main_process: - os.makedirs(next_data_dir, exist_ok=True) - shutil.copy(eval_results_file, os.path.join(output_dir, f"eval_results_iter-{iteration}.json")) - if os.path.exists(test_results_file): - shutil.copy(eval_results_file, os.path.join(output_dir, f"test_results_iter-{iteration}.json")) - create_pseudo_labeled_data(args, infer_input, infer_output, eval_result, id2label, next_data_dir) - accelerator.wait_for_everyone() - - data_files["train_pseudo"] = os.path.join(next_data_dir, f"train_pseudo.{args.data_file_extension}") - - if args.eval_strategy != IntervalStrategy.NO.value: - new_eval_result = eval_result - - if best_iteration is None: - best_iteration = new_iteration - best_eval_result = new_eval_result - else: - if new_eval_result - best_eval_result > args.early_stopping_threshold: - best_iteration = new_iteration - best_eval_result = new_eval_result - early_stopping_patience_counter = 0 - else: - if new_eval_result == best_eval_result: - best_iteration = new_iteration - best_eval_result = new_eval_result - early_stopping_patience_counter += 1 - - if early_stopping_patience_counter >= args.early_stopping_patience: - should_training_stop = True - - progress_bar.update(1) - - if should_training_stop: - break - - if best_iteration is not None: - # Save the best iteration - logger.info("Best iteration: %d", best_iteration) - logger.info("Best evaluation result: %s = %f", args.eval_metric, best_eval_result) - accelerator.wait_for_everyone() - if accelerator.is_main_process: - shutil.copy( - os.path.join(output_dir, f"eval_results_iter-{iteration}.json"), - os.path.join(output_dir, "eval_results_best-iteration.json"), - ) - else: - # Assume that the last iteration is the best - logger.info("Best iteration: %d", args.max_selftrain_iterations - 1) - logger.info("Best evaluation result: %s = %f", args.eval_metric, eval_result) - accelerator.wait_for_everyone() - if accelerator.is_main_process: - shutil.copy( - os.path.join(output_dir, f"eval_results_iter-{args.max_selftrain_iterations - 1}.json"), - os.path.join(output_dir, "eval_results_best-iteration.json"), - ) diff --git a/examples/research_projects/seq2seq-distillation/README.md b/examples/research_projects/seq2seq-distillation/README.md deleted file mode 100644 index ab79a652ed3..00000000000 --- a/examples/research_projects/seq2seq-distillation/README.md +++ /dev/null @@ -1,434 +0,0 @@ -## Sequence to Sequence Training and Evaluation - -This directory contains examples for finetuning and evaluating transformers on summarization and translation tasks. - -Author: Sam Shleifer (https://github.com/sshleifer) - -### Supported Architectures - -- `BartForConditionalGeneration` (and anything that inherits from it) -- `MarianMTModel` -- `PegasusForConditionalGeneration` -- `MBartForConditionalGeneration` -- `FSMTForConditionalGeneration` -- `T5ForConditionalGeneration` - -# Note - -⚠️ This project should be run with pytorch-lightning==1.0.4 which has a potential security vulnerability - -## Datasets - -#### XSUM - -```bash -cd examples/contrib/pytorch-lightning/seq2seq -wget https://cdn-datasets.huggingface.co/summarization/xsum.tar.gz -tar -xzvf xsum.tar.gz -export XSUM_DIR=${PWD}/xsum -``` -this should make a directory called `xsum/` with files like `test.source`. -To use your own data, copy that files format. Each article to be summarized is on its own line. - -#### CNN/DailyMail - -```bash -cd examples/contrib/pytorch-lightning/seq2seq -wget https://cdn-datasets.huggingface.co/summarization/cnn_dm_v2.tgz -tar -xzvf cnn_dm_v2.tgz # empty lines removed -mv cnn_cln cnn_dm -export CNN_DIR=${PWD}/cnn_dm -``` -this should make a directory called `cnn_dm/` with 6 files. - -#### WMT16 English-Romanian Translation Data - -download with this command: -```bash -wget https://cdn-datasets.huggingface.co/translation/wmt_en_ro.tar.gz -tar -xzvf wmt_en_ro.tar.gz -export ENRO_DIR=${PWD}/wmt_en_ro -``` -this should make a directory called `wmt_en_ro/` with 6 files. - -#### WMT English-German - -```bash -wget https://cdn-datasets.huggingface.co/translation/wmt_en_de.tgz -tar -xzvf wmt_en_de.tgz -export DATA_DIR=${PWD}/wmt_en_de -``` - -#### FSMT datasets (wmt) - -Refer to the scripts starting with `eval_` under: -https://github.com/huggingface/transformers/tree/main/scripts/fsmt - -#### Pegasus (multiple datasets) - -Multiple eval datasets are available for download from: -https://github.com/stas00/porting/tree/master/datasets/pegasus - - -#### Your Data - -If you are using your own data, it must be formatted as one directory with 6 files: -``` -train.source -train.target -val.source -val.target -test.source -test.target -``` -The `.source` files are the input, the `.target` files are the desired output. - -### Potential issues - -- native AMP (`--fp16` and no apex) may lead to a huge memory leak and require 10x gpu memory. This has been fixed in pytorch-nightly and the minimal official version to have this fix will be pytorch-1.8. Until then if you have to use mixed precision please use AMP only with pytorch-nightly or NVIDIA's apex. Reference: https://github.com/huggingface/transformers/issues/8403 - - -### Tips and Tricks - -General Tips: -- since you need to run from this folder, and likely need to modify code, the easiest workflow is fork transformers, clone your fork, and run `pip install -e .` before you get started. -- try `--freeze_encoder` or `--freeze_embeds` for faster training/larger batch size. (3hr per epoch with bs=8, see the "xsum_shared_task" command below) -- `fp16_opt_level=O1` (the default works best). -- In addition to the pytorch-lightning .ckpt checkpoint, a transformers checkpoint will be saved. -Load it with `BartForConditionalGeneration.from_pretrained(f'{output_dir}/best_tfmr)`. -- At the moment, `--do_predict` does not work in a multi-gpu setting. You need to use `evaluate_checkpoint` or the `run_eval.py` code. -- This warning can be safely ignored: - > "Some weights of BartForConditionalGeneration were not initialized from the model checkpoint at facebook/bart-large-xsum and are newly initialized: ['final_logits_bias']" -- Both finetuning and eval are 30% faster with `--fp16`. For that you need to [install apex](https://github.com/NVIDIA/apex#quick-start). -- Read scripts before you run them! - -Summarization Tips: -- (summ) 1 epoch at batch size 1 for bart-large takes 24 hours and requires 13GB GPU RAM with fp16 on an NVIDIA-V100. -- If you want to run experiments on improving the summarization finetuning process, try the XSUM Shared Task (below). It's faster to train than CNNDM because the summaries are shorter. -- For CNN/DailyMail, the default `val_max_target_length` and `test_max_target_length` will truncate the ground truth labels, resulting in slightly higher rouge scores. To get accurate rouge scores, you should rerun calculate_rouge on the `{output_dir}/test_generations.txt` file saved by `trainer.test()` -- `--max_target_length=60 --val_max_target_length=60 --test_max_target_length=100 ` is a reasonable setting for XSUM. -- `wandb` can be used by specifying `--logger_name wandb`. It is useful for reproducibility. Specify the environment variable `WANDB_PROJECT='hf_xsum'` to do the XSUM shared task. -- If you are finetuning on your own dataset, start from `distilbart-cnn-12-6` if you want long summaries and `distilbart-xsum-12-6` if you want short summaries. -(It rarely makes sense to start from `bart-large` unless you are a researching finetuning methods). - -**Update 2018-07-18** -Datasets: `LegacySeq2SeqDataset` will be used for all tokenizers without a `prepare_seq2seq_batch` method. Otherwise, `Seq2SeqDataset` will be used. -Future work/help wanted: A new dataset to support multilingual tasks. - - -### Finetuning Scripts -All finetuning bash scripts call finetune.py (or distillation.py) with reasonable command line arguments. They usually require extra command line arguments to work. - -To see all the possible command line options, run: - -```bash -./finetune.py --help -``` - -### Finetuning Training Params - -To override the pretrained model's training params, you can pass them to `./finetune.sh`: - -```bash -./finetune.sh \ - [...] - --encoder_layerdrop 0.1 \ - --decoder_layerdrop 0.1 \ - --dropout 0.1 \ - --attention_dropout 0.1 \ -``` - -### Summarization Finetuning -Run/modify `finetune.sh` - -The following command should work on a 16GB GPU: -```bash -./finetune.sh \ - --data_dir $XSUM_DIR \ - --train_batch_size=1 \ - --eval_batch_size=1 \ - --output_dir=xsum_results \ - --num_train_epochs 6 \ - --model_name_or_path facebook/bart-large -``` - -There is a starter finetuning script for pegasus at `finetune_pegasus_xsum.sh`. - -### Translation Finetuning - -First, follow the wmt_en_ro download instructions. -Then you can finetune mbart_cc25 on english-romanian with the following command. -**Recommendation:** Read and potentially modify the fairly opinionated defaults in `train_mbart_cc25_enro.sh` script before running it. - -Best performing command: -```bash -# optionally -export ENRO_DIR='wmt_en_ro' # Download instructions above -# export WANDB_PROJECT="MT" # optional -export MAX_LEN=128 -export BS=4 -./train_mbart_cc25_enro.sh --output_dir enro_finetune_baseline --label_smoothing 0.1 --fp16_opt_level=O1 --logger_name wandb --sortish_sampler -``` -This should take < 6h/epoch on a 16GB v100 and achieve test BLEU above 26 -To get results in line with fairseq, you need to do some postprocessing. (see `romanian_postprocessing.md`) - -MultiGPU command -(using 8 GPUS as an example) -```bash -export ENRO_DIR='wmt_en_ro' # Download instructions above - # export WANDB_PROJECT="MT" # optional -export MAX_LEN=128 -export BS=4 -./train_mbart_cc25_enro.sh --output_dir enro_finetune_baseline --gpus 8 --logger_name wandb -``` -### Finetuning Outputs -As you train, `output_dir` will be filled with files, that look kind of like this (comments are mine). -Some of them are metrics, some of them are checkpoints, some of them are metadata. Here is a quick tour: - -```bash -output_dir -├── best_tfmr # this is a huggingface checkpoint generated by save_pretrained. It is the same model as the PL .ckpt file below -│ ├── config.json -│ ├── merges.txt -│ ├── pytorch_model.bin -│ ├── special_tokens_map.json -│ ├── tokenizer_config.json -│ └── vocab.json -├── git_log.json # repo, branch, and commit hash -├── val_avg_rouge2=0.1984-step_count=11.ckpt # this is a pytorch lightning checkpoint associated with the best val score. (it will be called BLEU for MT) -├── metrics.json # new validation metrics will continually be appended to this -├── student # this is a huggingface checkpoint generated by SummarizationDistiller. It is the student before it gets finetuned. -│ ├── config.json -│ └── pytorch_model.bin -├── test_generations.txt -# ^^ are the summaries or translations produced by your best checkpoint on the test data. Populated when training is done -├── test_results.txt # a convenience file with the test set metrics. This data is also in metrics.json['test'] -├── hparams.pkl # the command line args passed after some light preprocessing. Should be saved fairly quickly. -``` -After training, you can recover the best checkpoint by running -```python -from transformers import AutoModelForSeq2SeqLM -model = AutoModelForSeq2SeqLM.from_pretrained(f'{output_dir}/best_tfmr') -``` - -### Converting pytorch-lightning checkpoints -pytorch lightning ``-do_predict`` often fails, after you are done training, the best way to evaluate your model is to convert it. - -This should be done for you, with a file called `{save_dir}/best_tfmr`. - -If that file doesn't exist but you have a lightning `.ckpt` file, you can run -```bash -python convert_pl_checkpoint_to_hf.py PATH_TO_CKPT randomly_initialized_hf_model_path save_dir/best_tfmr -``` -Then either `run_eval` or `run_distributed_eval` with `save_dir/best_tfmr` (see previous sections) - - -# Experimental Features -These features are harder to use and not always useful. - -### Dynamic Batch Size for MT -`finetune.py` has a command line arg `--max_tokens_per_batch` that allows batches to be dynamically sized. -This feature can only be used: -- with fairseq installed -- on 1 GPU -- without sortish sampler -- after calling `./save_len_file.py $tok $data_dir` - -For example, -```bash -./save_len_file.py Helsinki-NLP/opus-mt-en-ro wmt_en_ro -./dynamic_bs_example.sh --max_tokens_per_batch=2000 --output_dir benchmark_dynamic_bs -``` -splits `wmt_en_ro/train` into 11,197 uneven length batches and can finish 1 epoch in 8 minutes on a v100. - -For comparison, -```bash -./dynamic_bs_example.sh --sortish_sampler --train_batch_size 48 -``` -uses 12,723 batches of length 48 and takes slightly more time 9.5 minutes. - -The feature is still experimental, because: -+ we can make it much more robust if we have memory mapped/preprocessed datasets. -+ The speedup over sortish sampler is not that large at the moment. - -# DistilBART - -This section describes all code and artifacts from our [Paper](http://arxiv.org/abs/2010.13002) - -![DBART](https://huggingface.co/front/thumbnails/distilbart_large.png) - -+ For the CNN/DailyMail dataset, (relatively longer, more extractive summaries), we found a simple technique that works, which we call "Shrink and Fine-tune", or SFT. -you just copy alternating layers from `facebook/bart-large-cnn` and fine-tune more on the cnn/dm data. `sshleifer/distill-pegasus-cnn-16-4`, `sshleifer/distilbart-cnn-12-6` and all other checkpoints under `sshleifer` that start with `distilbart-cnn` were trained this way. -+ For the XSUM dataset, training on pseudo-labels worked best for Pegasus (`sshleifer/distill-pegasus-16-4`), while training with KD worked best for `distilbart-xsum-12-6` -+ For `sshleifer/dbart-xsum-12-3` -+ We ran 100s experiments, and didn't want to document 100s of commands. If you want a command to replicate a figure from the paper that is not documented below, feel free to ask on the [forums](https://discuss.huggingface.co/t/seq2seq-distillation-methodology-questions/1270) and tag `@sshleifer`. -+ You can see the performance tradeoffs of model sizes [here](https://docs.google.com/spreadsheets/d/1EkhDMwVO02m8jCD1cG3RoFPLicpcL1GQHTQjfvDYgIM/edit#gid=0). -and more granular timing results [here](https://docs.google.com/spreadsheets/d/1EkhDMwVO02m8jCD1cG3RoFPLicpcL1GQHTQjfvDYgIM/edit#gid=1753259047&range=B2:I23). - -### Evaluation - -use [run_distributed_eval](./run_distributed_eval.py), with the following convenient alias -```bash -deval () { - proc=$1 - m=$2 - dd=$3 - sd=$4 - shift - shift - shift - shift - python -m torch.distributed.launch --nproc_per_node=$proc run_distributed_eval.py \ - --model_name $m --save_dir $sd --data_dir $dd $@ -} -``` -On a 1 GPU system, here are four commands (that assume `xsum`, `cnn_dm` are downloaded, cmd-F for those links in this file). - -`distilBART`: -```bash -deval 1 sshleifer/distilbart-xsum-12-3 xsum dbart_12_3_xsum_eval --fp16 # --help for more choices. -deval 1 sshleifer/distilbart-cnn_dm-12-6 cnn_dm dbart_12_6_cnn_eval --fp16 -``` - -`distill-pegasus`: -```bash -deval 1 sshleifer/distill-pegasus-cnn-16-4 cnn_dm dpx_cnn_eval -deval 1 sshleifer/distill-pegasus-xsum-16-4 xsum dpx_xsum_eval -``` - -### Distillation -+ For all of the following commands, you can get roughly equivalent result and faster run times by passing `--num_beams=4`. That's not what we did for the paper. -+ Besides the KD section, you can also run commands with the built-in transformers trainer. See, for example, [builtin_trainer/train_distilbart_cnn.sh](./builtin_trainer/train_distilbart_cnn.sh). -+ Large performance deviations (> 5X slower or more than 0.5 Rouge-2 worse), should be reported. -+ Multi-gpu (controlled with `--gpus` should work, but might require more epochs). - -#### Recommended Workflow -+ Get your dataset in the right format. (see 6 files above). -+ Find a teacher model [Pegasus](https://huggingface.co/models?search=pegasus) (slower, better ROUGE) or `facebook/bart-large-xsum`/`facebook/bart-large-cnn` (faster, slightly lower.). -Choose the checkpoint where the corresponding dataset is most similar (or identical to) your dataset. -+ Follow the sections in order below. You can stop after SFT if you are satisfied, or move on to pseudo-labeling if you want more performance. -+ student size: If you want a close to free 50% speedup, cut the decoder in half. If you want a larger speedup, cut it in 4. -+ If your SFT run starts at a validation ROUGE-2 that is more than 10 pts below the teacher's validation ROUGE-2, you have a bug. Switching to a more expensive technique will not help. Try setting a breakpoint and looking at generation and truncation defaults/hyper-parameters, and share your experience on the forums! - - -#### Initialization -We use [make_student.py](./make_student.py) to copy alternating layers from the teacher, and save the resulting model to disk -```bash -python make_student.py facebook/bart-large-xsum --save_path dbart_xsum_12_3 -e 12 -d 3 -``` -or for `pegasus-xsum` -```bash -python make_student.py google/pegasus-xsum --save_path dpx_xsum_16_4 --e 16 --d 4 -``` -we now have an initialized student saved to `dbart_xsum_12_3`, which we will use for the following commands. -+ Extension: To replicate more complicated initialize experiments in section 6.1, or try your own. Use the `create_student_by_copying_alternating_layers` function. - -#### Pegasus -+ The following commands are written for BART and will require, at minimum, the following modifications -+ reduce batch size, and increase gradient accumulation steps so that the product `gpus * batch size * gradient_accumulation_steps = 256`. We used `--learning-rate` = 1e-4 * gradient accumulation steps. -+ don't use fp16 -+ `--tokenizer_name google/pegasus-large` - -### SFT (No Teacher Distillation) -You don't need `distillation.py`, you can just run: - -```bash -python finetune.py \ - --data_dir xsum \ - --freeze_encoder --freeze_embeds \ - --learning_rate=3e-4 \ - --do_train \ - --do_predict \ - --fp16 --fp16_opt_level=O1 \ - --val_check_interval 0.1 --n_val 1000 --eval_beams 2 --length_penalty=0.5 \ - --max_target_length=60 --val_max_target_length=60 --test_max_target_length=100 \ - --model_name_or_path dbart_xsum_12_3 \ - --train_batch_size=64 --eval_batch_size=64 \ - --sortish_sampler \ - --num_train_epochs=6 \ - --warmup_steps 500 \ - --output_dir distilbart_xsum_sft_12_3 --gpus 1 -``` - -+ Note: The command that produced `sshleifer/distilbart-cnn-12-6` is at [train_distilbart_cnn.sh](./[train_distilbart_cnn.sh) - -```bash -./train_distilbart_cnn.sh -``` - -+ Tip: You can get the same simple distillation logic by using `distillation.py --no_teacher ` followed by identical arguments as the ones in `train_distilbart_cnn.sh`. -If you are using `wandb` and comparing the two distillation methods, using this entry point will make your logs consistent, -because you will have the same hyper-parameters logged in every run. - -### Pseudo-Labeling -+ You don't need `distillation.py`. -+ Instructions to generate pseudo-labels and use pre-computed pseudo-labels can be found [here](./precomputed_pseudo_labels.md). -Simply run `finetune.py` with one of those pseudo-label datasets as `--data_dir` (`DATA`, below). - -```bash -python finetune.py \ - --teacher facebook/bart-large-xsum --data_dir DATA \ - --freeze_encoder --freeze_embeds \ - --learning_rate=3e-4 \ - --do_train \ - --do_predict \ - --fp16 --fp16_opt_level=O1 \ - --val_check_interval 0.1 --n_val 1000 --eval_beams 2 --length_penalty=0.5 \ - --max_target_length=60 --val_max_target_length=60 --test_max_target_length=100 \ - --model_name_or_path dbart_xsum_12_3 \ - --train_batch_size=32 --eval_batch_size=32 \ - --sortish_sampler \ - --num_train_epochs=5 \ - --warmup_steps 500 \ - --output_dir dbart_xsum_12_3_PL --gpus 1 --logger_name wandb -``` - - - -To combine datasets, as in Section 6.2, try something like: -```bash -curl -S https://cdn-datasets.huggingface.co/pseudo/xsum/bart_xsum_pl.tgz | tar -xvz -C . -curl -S https://cdn-datasets.huggingface.co/pseudo/xsum/pegasus_xsum.tgz | tar -xvz -C . -curl -S https://cdn-datasets.huggingface.co/summarization/xsum.tar.gz | tar -xvz -C . -mkdir all_pl -cat bart_xsum_pl/train.source pegasus_xsum/train.source xsum/train.source > all_pl/train.source -cat bart_xsum_pl/train.target pegasus_xsum/train.target xsum/train.target > all_pl/train.target -cp xsum/val* all_pl -cp xsum/test* all_pl -``` -then use `all_pl` as DATA in the command above. - -#### Direct Knowledge Distillation (KD) -+ In this method, we use try to enforce that the student and teacher produce similar encoder_outputs, logits, and hidden_states using `SummarizationDistiller`. -+ This method was used for `sshleifer/distilbart-xsum-12-6`, `6-6`, and `9-6` checkpoints were produced. -+ You must use [`distillation.py`](./distillation.py). Note that this command initializes the student for you. - -The command that produced `sshleifer/distilbart-xsum-12-6` is at [./train_distilbart_xsum.sh](train_distilbart_xsum.sh) -```bash -./train_distilbart_xsum.sh --logger_name wandb --gpus 1 -``` - -+ Expected ROUGE-2 between 21.3 and 21.6, run time ~13H. -+ direct KD + Pegasus is VERY slow and works best with `--supervise_forward --normalize_hidden`. - - - -### Citation - -```bibtex -@misc{shleifer2020pretrained, - title={Pre-trained Summarization Distillation}, - author={Sam Shleifer and Alexander M. Rush}, - year={2020}, - eprint={2010.13002}, - archivePrefix={arXiv}, - primaryClass={cs.CL} -} -@article{Wolf2019HuggingFacesTS, - title={HuggingFace's Transformers: State-of-the-art Natural Language Processing}, - author={Thomas Wolf and Lysandre Debut and Victor Sanh and Julien Chaumond and Clement Delangue and Anthony Moi and Pierric Cistac and Tim Rault and Rémi Louf and Morgan Funtowicz and Joe Davison and Sam Shleifer and Patrick von Platen and Clara Ma and Yacine Jernite and Julien Plu and Canwen Xu and Teven Le Scao and Sylvain Gugger and Mariama Drame and Quentin Lhoest and Alexander M. Rush}, - journal={ArXiv}, - year={2019}, - volume={abs/1910.03771} -} -``` diff --git a/examples/research_projects/seq2seq-distillation/_test_bash_script.py b/examples/research_projects/seq2seq-distillation/_test_bash_script.py deleted file mode 100644 index fa84a60c0c8..00000000000 --- a/examples/research_projects/seq2seq-distillation/_test_bash_script.py +++ /dev/null @@ -1,203 +0,0 @@ -#!/usr/bin/env python - -import argparse -import os -import sys -from unittest.mock import patch - -import pytorch_lightning as pl -import timeout_decorator -import torch -from distillation import SummarizationDistiller, distill_main -from finetune import SummarizationModule, main - -from transformers import MarianMTModel -from transformers.file_utils import cached_path -from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow -from utils import load_json - - -MARIAN_MODEL = "sshleifer/mar_enro_6_3_student" - - -class TestMbartCc25Enro(TestCasePlus): - def setUp(self): - super().setUp() - - data_cached = cached_path( - "https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz", - extract_compressed_file=True, - ) - self.data_dir = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k" - - @slow - @require_torch_gpu - def test_model_download(self): - """This warms up the cache so that we can time the next test without including download time, which varies between machines.""" - MarianMTModel.from_pretrained(MARIAN_MODEL) - - # @timeout_decorator.timeout(1200) - @slow - @require_torch_gpu - def test_train_mbart_cc25_enro_script(self): - env_vars_to_replace = { - "$MAX_LEN": 64, - "$BS": 64, - "$GAS": 1, - "$ENRO_DIR": self.data_dir, - "facebook/mbart-large-cc25": MARIAN_MODEL, - # "val_check_interval=0.25": "val_check_interval=1.0", - "--learning_rate=3e-5": "--learning_rate 3e-4", - "--num_train_epochs 6": "--num_train_epochs 1", - } - - # Clean up bash script - bash_script = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py")[1].strip() - bash_script = bash_script.replace("\\\n", "").strip().replace('"$@"', "") - for k, v in env_vars_to_replace.items(): - bash_script = bash_script.replace(k, str(v)) - output_dir = self.get_auto_remove_tmp_dir() - - # bash_script = bash_script.replace("--fp16 ", "") - args = f""" - --output_dir {output_dir} - --tokenizer_name Helsinki-NLP/opus-mt-en-ro - --sortish_sampler - --do_predict - --gpus 1 - --freeze_encoder - --n_train 40000 - --n_val 500 - --n_test 500 - --fp16_opt_level O1 - --num_sanity_val_steps 0 - --eval_beams 2 - """.split() - # XXX: args.gpus > 1 : handle multi_gpu in the future - - testargs = ["finetune.py"] + bash_script.split() + args - with patch.object(sys, "argv", testargs): - parser = argparse.ArgumentParser() - parser = pl.Trainer.add_argparse_args(parser) - parser = SummarizationModule.add_model_specific_args(parser, os.getcwd()) - args = parser.parse_args() - model = main(args) - - # Check metrics - metrics = load_json(model.metrics_save_path) - first_step_stats = metrics["val"][0] - last_step_stats = metrics["val"][-1] - self.assertEqual(len(metrics["val"]), (args.max_epochs / args.val_check_interval)) - assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"], float) - - self.assertGreater(last_step_stats["val_avg_gen_time"], 0.01) - # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) - self.assertLessEqual(last_step_stats["val_avg_gen_time"], 1.0) - - # test learning requirements: - - # 1. BLEU improves over the course of training by more than 2 pts - self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"], 2) - - # 2. BLEU finishes above 17 - self.assertGreater(last_step_stats["val_avg_bleu"], 17) - - # 3. test BLEU and val BLEU within ~1.1 pt. - self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"]), 1.1) - - # check lightning ckpt can be loaded and has a reasonable statedict - contents = os.listdir(output_dir) - ckpt_path = [x for x in contents if x.endswith(".ckpt")][0] - full_path = os.path.join(args.output_dir, ckpt_path) - ckpt = torch.load(full_path, map_location="cpu") - expected_key = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight" - assert expected_key in ckpt["state_dict"] - assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.float32 - - # TODO: turn on args.do_predict when PL bug fixed. - if args.do_predict: - contents = {os.path.basename(p) for p in contents} - assert "test_generations.txt" in contents - assert "test_results.txt" in contents - # assert len(metrics["val"]) == desired_n_evals - assert len(metrics["test"]) == 1 - - -class TestDistilMarianNoTeacher(TestCasePlus): - @timeout_decorator.timeout(600) - @slow - @require_torch_gpu - def test_opus_mt_distill_script(self): - data_dir = f"{self.test_file_dir_str}/test_data/wmt_en_ro" - env_vars_to_replace = { - "--fp16_opt_level=O1": "", - "$MAX_LEN": 128, - "$BS": 16, - "$GAS": 1, - "$ENRO_DIR": data_dir, - "$m": "sshleifer/student_marian_en_ro_6_1", - "val_check_interval=0.25": "val_check_interval=1.0", - } - - # Clean up bash script - bash_script = ( - (self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py")[1].strip() - ) - bash_script = bash_script.replace("\\\n", "").strip().replace('"$@"', "") - bash_script = bash_script.replace("--fp16 ", " ") - - for k, v in env_vars_to_replace.items(): - bash_script = bash_script.replace(k, str(v)) - output_dir = self.get_auto_remove_tmp_dir() - bash_script = bash_script.replace("--fp16", "") - epochs = 6 - testargs = ( - ["distillation.py"] - + bash_script.split() - + [ - f"--output_dir={output_dir}", - "--gpus=1", - "--learning_rate=1e-3", - f"--num_train_epochs={epochs}", - "--warmup_steps=10", - "--val_check_interval=1.0", - "--do_predict", - ] - ) - with patch.object(sys, "argv", testargs): - parser = argparse.ArgumentParser() - parser = pl.Trainer.add_argparse_args(parser) - parser = SummarizationDistiller.add_model_specific_args(parser, os.getcwd()) - args = parser.parse_args() - # assert args.gpus == gpus THIS BREAKS for multi_gpu - - model = distill_main(args) - - # Check metrics - metrics = load_json(model.metrics_save_path) - first_step_stats = metrics["val"][0] - last_step_stats = metrics["val"][-1] - assert len(metrics["val"]) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check - - assert last_step_stats["val_avg_gen_time"] >= 0.01 - - assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing - assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. - assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"], float) - - # check lightning ckpt can be loaded and has a reasonable statedict - contents = os.listdir(output_dir) - ckpt_path = [x for x in contents if x.endswith(".ckpt")][0] - full_path = os.path.join(args.output_dir, ckpt_path) - ckpt = torch.load(full_path, map_location="cpu") - expected_key = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight" - assert expected_key in ckpt["state_dict"] - assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.float32 - - # TODO: turn on args.do_predict when PL bug fixed. - if args.do_predict: - contents = {os.path.basename(p) for p in contents} - assert "test_generations.txt" in contents - assert "test_results.txt" in contents - # assert len(metrics["val"]) == desired_n_evals - assert len(metrics["test"]) == 1 diff --git a/examples/research_projects/seq2seq-distillation/_test_make_student.py b/examples/research_projects/seq2seq-distillation/_test_make_student.py deleted file mode 100644 index 73df66315cb..00000000000 --- a/examples/research_projects/seq2seq-distillation/_test_make_student.py +++ /dev/null @@ -1,40 +0,0 @@ -import tempfile -import unittest - -from make_student import create_student_by_copying_alternating_layers - -from transformers import AutoConfig -from transformers.file_utils import cached_property -from transformers.testing_utils import require_torch - - -TINY_BART = "sshleifer/bart-tiny-random" -TINY_T5 = "patrickvonplaten/t5-tiny-random" - - -@require_torch -class MakeStudentTester(unittest.TestCase): - @cached_property - def teacher_config(self): - return AutoConfig.from_pretrained(TINY_BART) - - def test_valid_t5(self): - student, *_ = create_student_by_copying_alternating_layers(TINY_T5, tempfile.mkdtemp(), e=1, d=1) - self.assertEqual(student.config.num_hidden_layers, 1) - - def test_asymmetric_t5(self): - student, *_ = create_student_by_copying_alternating_layers(TINY_T5, tempfile.mkdtemp(), e=1, d=None) - - def test_same_decoder_small_encoder(self): - student, *_ = create_student_by_copying_alternating_layers(TINY_BART, tempfile.mkdtemp(), e=1, d=None) - self.assertEqual(student.config.encoder_layers, 1) - self.assertEqual(student.config.decoder_layers, self.teacher_config.encoder_layers) - - def test_small_enc_small_dec(self): - student, *_ = create_student_by_copying_alternating_layers(TINY_BART, tempfile.mkdtemp(), e=1, d=1) - self.assertEqual(student.config.encoder_layers, 1) - self.assertEqual(student.config.decoder_layers, 1) - - def test_raises_assert(self): - with self.assertRaises(AssertionError): - create_student_by_copying_alternating_layers(TINY_BART, tempfile.mkdtemp(), e=None, d=None) diff --git a/examples/research_projects/seq2seq-distillation/_test_seq2seq_examples.py b/examples/research_projects/seq2seq-distillation/_test_seq2seq_examples.py deleted file mode 100644 index 0ee4dd8afe1..00000000000 --- a/examples/research_projects/seq2seq-distillation/_test_seq2seq_examples.py +++ /dev/null @@ -1,444 +0,0 @@ -import argparse -import logging -import os -import sys -import tempfile -from pathlib import Path - -import lightning_base -import pytest -import pytorch_lightning as pl -import torch -from convert_pl_checkpoint_to_hf import convert_pl_to_hf -from distillation import distill_main -from finetune import SummarizationModule, main -from huggingface_hub import list_models -from parameterized import parameterized -from run_eval import generate_summaries_or_translations -from torch import nn - -from transformers import AutoConfig, AutoModelForSeq2SeqLM -from transformers.testing_utils import CaptureStderr, CaptureStdout, TestCasePlus, require_torch_gpu, slow -from utils import label_smoothed_nll_loss, lmap, load_json - - -logging.basicConfig(level=logging.DEBUG) - -logger = logging.getLogger() -CUDA_AVAILABLE = torch.cuda.is_available() -CHEAP_ARGS = { - "max_tokens_per_batch": None, - "supervise_forward": True, - "normalize_hidden": True, - "label_smoothing": 0.2, - "eval_max_gen_length": None, - "eval_beams": 1, - "val_metric": "loss", - "save_top_k": 1, - "adafactor": True, - "early_stopping_patience": 2, - "logger_name": "default", - "length_penalty": 0.5, - "cache_dir": "", - "task": "summarization", - "num_workers": 2, - "alpha_hid": 0, - "freeze_embeds": True, - "enc_only": False, - "tgt_suffix": "", - "resume_from_checkpoint": None, - "sortish_sampler": True, - "student_decoder_layers": 1, - "val_check_interval": 1.0, - "output_dir": "", - "fp16": False, # TODO(SS): set this to CUDA_AVAILABLE if ci installs apex or start using native amp - "no_teacher": False, - "fp16_opt_level": "O1", - "gpus": 1 if CUDA_AVAILABLE else 0, - "n_tpu_cores": 0, - "max_grad_norm": 1.0, - "do_train": True, - "do_predict": True, - "accumulate_grad_batches": 1, - "server_ip": "", - "server_port": "", - "seed": 42, - "model_name_or_path": "sshleifer/bart-tiny-random", - "config_name": "", - "tokenizer_name": "facebook/bart-large", - "do_lower_case": False, - "learning_rate": 0.3, - "lr_scheduler": "linear", - "weight_decay": 0.0, - "adam_epsilon": 1e-08, - "warmup_steps": 0, - "max_epochs": 1, - "train_batch_size": 2, - "eval_batch_size": 2, - "max_source_length": 12, - "max_target_length": 12, - "val_max_target_length": 12, - "test_max_target_length": 12, - "fast_dev_run": False, - "no_cache": False, - "n_train": -1, - "n_val": -1, - "n_test": -1, - "student_encoder_layers": 1, - "freeze_encoder": False, - "auto_scale_batch_size": False, - "overwrite_output_dir": False, - "student": None, -} - - -def _dump_articles(path: Path, articles: list): - content = "\n".join(articles) - Path(path).open("w").writelines(content) - - -ARTICLES = [" Sam ate lunch today.", "Sams lunch ingredients."] -SUMMARIES = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"] -T5_TINY = "patrickvonplaten/t5-tiny-random" -T5_TINIER = "sshleifer/t5-tinier-random" -BART_TINY = "sshleifer/bart-tiny-random" -MBART_TINY = "sshleifer/tiny-mbart" -MARIAN_TINY = "sshleifer/tiny-marian-en-de" -FSMT_TINY = "stas/tiny-wmt19-en-de" - - -stream_handler = logging.StreamHandler(sys.stdout) -logger.addHandler(stream_handler) -logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks - - -def make_test_data_dir(tmp_dir): - for split in ["train", "val", "test"]: - _dump_articles(os.path.join(tmp_dir, f"{split}.source"), ARTICLES) - _dump_articles(os.path.join(tmp_dir, f"{split}.target"), SUMMARIES) - return tmp_dir - - -class TestSummarizationDistiller(TestCasePlus): - @classmethod - def setUpClass(cls): - logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks - return cls - - @slow - @require_torch_gpu - def test_hub_configs(self): - """I put require_torch_gpu cause I only want this to run with self-scheduled.""" - - model_list = list_models() - org = "sshleifer" - model_ids = [x.modelId for x in model_list if x.modelId.startswith(org)] - allowed_to_be_broken = ["sshleifer/blenderbot-3B", "sshleifer/blenderbot-90M"] - failures = [] - for m in model_ids: - if m in allowed_to_be_broken: - continue - try: - AutoConfig.from_pretrained(m) - except Exception: - failures.append(m) - assert not failures, f"The following models could not be loaded through AutoConfig: {failures}" - - def test_distill_no_teacher(self): - updates = {"student_encoder_layers": 2, "student_decoder_layers": 1, "no_teacher": True} - self._test_distiller_cli(updates) - - def test_distill_checkpointing_with_teacher(self): - updates = { - "student_encoder_layers": 2, - "student_decoder_layers": 1, - "max_epochs": 4, - "val_check_interval": 0.25, - "alpha_hid": 2.0, - "model_name_or_path": "IGNORE_THIS_IT_DOESNT_GET_USED", - } - model = self._test_distiller_cli(updates, check_contents=False) - - ckpts = list(Path(model.output_dir).glob("*.ckpt")) - self.assertEqual(1, len(ckpts)) - transformer_ckpts = list(Path(model.output_dir).glob("**/*.bin")) - self.assertEqual(len(transformer_ckpts), 2) - examples = lmap(str.strip, Path(model.hparams.data_dir).joinpath("test.source").open().readlines()) - out_path = tempfile.mktemp() # XXX: not being cleaned up - generate_summaries_or_translations(examples, out_path, str(model.output_dir / "best_tfmr")) - self.assertTrue(Path(out_path).exists()) - - out_path_new = self.get_auto_remove_tmp_dir() - convert_pl_to_hf(ckpts[0], transformer_ckpts[0].parent, out_path_new) - assert os.path.exists(os.path.join(out_path_new, "pytorch_model.bin")) - - def test_loss_fn(self): - model = AutoModelForSeq2SeqLM.from_pretrained(BART_TINY) - input_ids, mask = model.dummy_inputs["input_ids"], model.dummy_inputs["attention_mask"] - target_ids = torch.tensor([[0, 4, 8, 2], [0, 8, 2, 1]], dtype=torch.long, device=model.device) - decoder_input_ids = target_ids[:, :-1].contiguous() # Why this line? - lm_labels = target_ids[:, 1:].clone() # why clone? - model_computed_loss = model( - input_ids, attention_mask=mask, decoder_input_ids=decoder_input_ids, labels=lm_labels, use_cache=False - ).loss - - logits = model(input_ids, attention_mask=mask, decoder_input_ids=decoder_input_ids, use_cache=False).logits - - lprobs = nn.functional.log_softmax(logits, dim=-1) - smoothed_loss, nll_loss = label_smoothed_nll_loss( - lprobs, lm_labels, 0.1, ignore_index=model.config.pad_token_id - ) - with self.assertRaises(AssertionError): - # TODO: understand why this breaks - self.assertEqual(nll_loss, model_computed_loss) - - def test_distill_mbart(self): - updates = { - "student_encoder_layers": 2, - "student_decoder_layers": 1, - "num_train_epochs": 4, - "val_check_interval": 0.25, - "alpha_hid": 2.0, - "task": "translation", - "model_name_or_path": "IGNORE_THIS_IT_DOESNT_GET_USED", - "tokenizer_name": MBART_TINY, - "teacher": MBART_TINY, - "src_lang": "en_XX", - "tgt_lang": "ro_RO", - } - model = self._test_distiller_cli(updates, check_contents=False) - assert model.model.config.model_type == "mbart" - - ckpts = list(Path(model.output_dir).glob("*.ckpt")) - self.assertEqual(1, len(ckpts)) - transformer_ckpts = list(Path(model.output_dir).glob("**/*.bin")) - all_files = list(Path(model.output_dir).glob("best_tfmr/*")) - assert len(all_files) > 2 - self.assertEqual(len(transformer_ckpts), 2) - - def test_distill_t5(self): - updates = { - "student_encoder_layers": 1, - "student_decoder_layers": 1, - "alpha_hid": 2.0, - "teacher": T5_TINY, - "model_name_or_path": T5_TINY, - "tokenizer_name": T5_TINY, - } - self._test_distiller_cli(updates) - - def test_distill_different_base_models(self): - updates = { - "teacher": T5_TINY, - "student": T5_TINIER, - "model_name_or_path": T5_TINIER, - "tokenizer_name": T5_TINIER, - } - self._test_distiller_cli(updates) - - def _test_distiller_cli(self, updates, check_contents=True): - default_updates = { - "label_smoothing": 0.0, - "early_stopping_patience": -1, - "train_batch_size": 1, - "eval_batch_size": 2, - "max_epochs": 2, - "alpha_mlm": 0.2, - "alpha_ce": 0.8, - "do_predict": True, - "model_name_or_path": "sshleifer/tinier_bart", - "teacher": CHEAP_ARGS["model_name_or_path"], - "val_check_interval": 0.5, - } - default_updates.update(updates) - args_d: dict = CHEAP_ARGS.copy() - tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) - output_dir = self.get_auto_remove_tmp_dir() - - args_d.update(data_dir=tmp_dir, output_dir=output_dir, **default_updates) - model = distill_main(argparse.Namespace(**args_d)) - if not check_contents: - return model - contents = os.listdir(output_dir) - contents = {os.path.basename(p) for p in contents} - ckpt_files = [p for p in contents if p.endswith("ckpt")] - assert len(ckpt_files) > 0 - - self.assertIn("test_generations.txt", contents) - self.assertIn("test_results.txt", contents) - - metrics = load_json(model.metrics_save_path) - last_step_stats = metrics["val"][-1] - self.assertGreaterEqual(last_step_stats["val_avg_gen_time"], 0.01) - self.assertGreaterEqual(1.0, last_step_stats["val_avg_gen_time"]) - self.assertIsInstance(last_step_stats[f"val_avg_{model.val_metric}"], float) - desired_n_evals = int(args_d["max_epochs"] * (1 / args_d["val_check_interval"]) + 1) - self.assertEqual(len(metrics["val"]), desired_n_evals) - self.assertEqual(len(metrics["test"]), 1) - return model - - -class TestTheRest(TestCasePlus): - @parameterized.expand( - [T5_TINY, BART_TINY, MBART_TINY, MARIAN_TINY, FSMT_TINY], - ) - def test_finetune(self, model): - args_d: dict = CHEAP_ARGS.copy() - task = "translation" if model in [MBART_TINY, MARIAN_TINY, FSMT_TINY] else "summarization" - args_d["label_smoothing"] = 0.1 if task == "translation" else 0 - - tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) - output_dir = self.get_auto_remove_tmp_dir() - args_d.update( - data_dir=tmp_dir, - model_name_or_path=model, - tokenizer_name=None, - train_batch_size=2, - eval_batch_size=2, - output_dir=output_dir, - do_predict=True, - task=task, - src_lang="en_XX", - tgt_lang="ro_RO", - freeze_encoder=True, - freeze_embeds=True, - ) - assert "n_train" in args_d - args = argparse.Namespace(**args_d) - module = main(args) - - input_embeds = module.model.get_input_embeddings() - assert not input_embeds.weight.requires_grad - if model == T5_TINY: - lm_head = module.model.lm_head - assert not lm_head.weight.requires_grad - assert (lm_head.weight == input_embeds.weight).all().item() - elif model == FSMT_TINY: - fsmt = module.model.model - embed_pos = fsmt.decoder.embed_positions - assert not embed_pos.weight.requires_grad - assert not fsmt.decoder.embed_tokens.weight.requires_grad - # check that embeds are not the same - assert fsmt.decoder.embed_tokens != fsmt.encoder.embed_tokens - else: - bart = module.model.model - embed_pos = bart.decoder.embed_positions - assert not embed_pos.weight.requires_grad - assert not bart.shared.weight.requires_grad - # check that embeds are the same - assert bart.decoder.embed_tokens == bart.encoder.embed_tokens - assert bart.decoder.embed_tokens == bart.shared - - example_batch = load_json(module.output_dir / "text_batch.json") - assert isinstance(example_batch, dict) - assert len(example_batch) >= 4 - - def test_finetune_extra_model_args(self): - args_d: dict = CHEAP_ARGS.copy() - - task = "summarization" - tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) - - args_d.update( - data_dir=tmp_dir, - tokenizer_name=None, - train_batch_size=2, - eval_batch_size=2, - do_predict=False, - task=task, - src_lang="en_XX", - tgt_lang="ro_RO", - freeze_encoder=True, - freeze_embeds=True, - ) - - # test models whose config includes the extra_model_args - model = BART_TINY - output_dir = self.get_auto_remove_tmp_dir() - args_d1 = args_d.copy() - args_d1.update( - model_name_or_path=model, - output_dir=output_dir, - ) - extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") - for p in extra_model_params: - args_d1[p] = 0.5 - args = argparse.Namespace(**args_d1) - model = main(args) - for p in extra_model_params: - assert getattr(model.config, p) == 0.5, f"failed to override the model config for param {p}" - - # test models whose config doesn't include the extra_model_args - model = T5_TINY - output_dir = self.get_auto_remove_tmp_dir() - args_d2 = args_d.copy() - args_d2.update( - model_name_or_path=model, - output_dir=output_dir, - ) - unsupported_param = "encoder_layerdrop" - args_d2[unsupported_param] = 0.5 - args = argparse.Namespace(**args_d2) - with pytest.raises(Exception) as excinfo: - model = main(args) - assert str(excinfo.value) == f"model config doesn't have a `{unsupported_param}` attribute" - - def test_finetune_lr_schedulers(self): - args_d: dict = CHEAP_ARGS.copy() - - task = "summarization" - tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) - - model = BART_TINY - output_dir = self.get_auto_remove_tmp_dir() - - args_d.update( - data_dir=tmp_dir, - model_name_or_path=model, - output_dir=output_dir, - tokenizer_name=None, - train_batch_size=2, - eval_batch_size=2, - do_predict=False, - task=task, - src_lang="en_XX", - tgt_lang="ro_RO", - freeze_encoder=True, - freeze_embeds=True, - ) - - # emulate finetune.py - parser = argparse.ArgumentParser() - parser = pl.Trainer.add_argparse_args(parser) - parser = SummarizationModule.add_model_specific_args(parser, os.getcwd()) - args = {"--help": True} - - # --help test - with pytest.raises(SystemExit) as excinfo: - with CaptureStdout() as cs: - args = parser.parse_args(args) - assert False, "--help is expected to sys.exit" - assert excinfo.type is SystemExit - expected = lightning_base.arg_to_scheduler_metavar - assert expected in cs.out, "--help is expected to list the supported schedulers" - - # --lr_scheduler=non_existing_scheduler test - unsupported_param = "non_existing_scheduler" - args = {f"--lr_scheduler={unsupported_param}"} - with pytest.raises(SystemExit) as excinfo: - with CaptureStderr() as cs: - args = parser.parse_args(args) - assert False, "invalid argument is expected to sys.exit" - assert excinfo.type is SystemExit - expected = f"invalid choice: '{unsupported_param}'" - assert expected in cs.err, f"should have bailed on invalid choice of scheduler {unsupported_param}" - - # --lr_scheduler=existing_scheduler test - supported_param = "cosine" - args_d1 = args_d.copy() - args_d1["lr_scheduler"] = supported_param - args = argparse.Namespace(**args_d1) - model = main(args) - assert ( - getattr(model.hparams, "lr_scheduler") == supported_param - ), f"lr_scheduler={supported_param} shouldn't fail" diff --git a/examples/research_projects/seq2seq-distillation/_test_seq2seq_examples_multi_gpu.py b/examples/research_projects/seq2seq-distillation/_test_seq2seq_examples_multi_gpu.py deleted file mode 100644 index 9eeb3b30d39..00000000000 --- a/examples/research_projects/seq2seq-distillation/_test_seq2seq_examples_multi_gpu.py +++ /dev/null @@ -1,163 +0,0 @@ -# as due to their complexity multi-gpu tests could impact other tests, and to aid debug we have those in a separate module. - -import os -import sys -from pathlib import Path - -import torch - -from transformers.testing_utils import TestCasePlus, execute_subprocess_async, require_torch_multi_gpu -from utils import load_json - - -CUDA_AVAILABLE = torch.cuda.is_available() -ARTICLES = [" Sam ate lunch today.", "Sams lunch ingredients."] -SUMMARIES = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"] -CHEAP_ARGS = { - "max_tokens_per_batch": None, - "supervise_forward": True, - "normalize_hidden": True, - "label_smoothing": 0.2, - "eval_max_gen_length": None, - "eval_beams": 1, - "val_metric": "loss", - "save_top_k": 1, - "adafactor": True, - "early_stopping_patience": 2, - "logger_name": "default", - "length_penalty": 0.5, - "cache_dir": "", - "task": "summarization", - "num_workers": 2, - "alpha_hid": 0, - "freeze_embeds": True, - "enc_only": False, - "tgt_suffix": "", - "resume_from_checkpoint": None, - "sortish_sampler": True, - "student_decoder_layers": 1, - "val_check_interval": 1.0, - "output_dir": "", - "fp16": False, # TODO(SS): set this to CUDA_AVAILABLE if ci installs apex or start using native amp - "no_teacher": False, - "fp16_opt_level": "O1", - "gpus": 1 if CUDA_AVAILABLE else 0, - "n_tpu_cores": 0, - "max_grad_norm": 1.0, - "do_train": True, - "do_predict": True, - "accumulate_grad_batches": 1, - "server_ip": "", - "server_port": "", - "seed": 42, - "model_name_or_path": "sshleifer/bart-tiny-random", - "config_name": "", - "tokenizer_name": "facebook/bart-large", - "do_lower_case": False, - "learning_rate": 0.3, - "lr_scheduler": "linear", - "weight_decay": 0.0, - "adam_epsilon": 1e-08, - "warmup_steps": 0, - "max_epochs": 1, - "train_batch_size": 2, - "eval_batch_size": 2, - "max_source_length": 12, - "max_target_length": 12, - "val_max_target_length": 12, - "test_max_target_length": 12, - "fast_dev_run": False, - "no_cache": False, - "n_train": -1, - "n_val": -1, - "n_test": -1, - "student_encoder_layers": 1, - "freeze_encoder": False, - "auto_scale_batch_size": False, - "overwrite_output_dir": False, - "student": None, -} - - -def _dump_articles(path: Path, articles: list): - content = "\n".join(articles) - Path(path).open("w").writelines(content) - - -def make_test_data_dir(tmp_dir): - for split in ["train", "val", "test"]: - _dump_articles(os.path.join(tmp_dir, f"{split}.source"), ARTICLES) - _dump_articles(os.path.join(tmp_dir, f"{split}.target"), SUMMARIES) - return tmp_dir - - -class TestSummarizationDistillerMultiGPU(TestCasePlus): - @classmethod - def setUpClass(cls): - return cls - - @require_torch_multi_gpu - def test_multi_gpu(self): - updates = { - "no_teacher": True, - "freeze_encoder": True, - "gpus": 2, - "overwrite_output_dir": True, - "sortish_sampler": True, - } - self._test_distiller_cli_fork(updates, check_contents=False) - - def _test_distiller_cli_fork(self, updates, check_contents=True): - default_updates = { - "label_smoothing": 0.0, - "early_stopping_patience": -1, - "train_batch_size": 1, - "eval_batch_size": 2, - "max_epochs": 2, - "alpha_mlm": 0.2, - "alpha_ce": 0.8, - "do_predict": True, - "model_name_or_path": "sshleifer/tinier_bart", - "teacher": CHEAP_ARGS["model_name_or_path"], - "val_check_interval": 0.5, - } - default_updates.update(updates) - args_d: dict = CHEAP_ARGS.copy() - tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) - output_dir = self.get_auto_remove_tmp_dir() - args_d.update(data_dir=tmp_dir, output_dir=output_dir, **default_updates) - - def convert(k, v): - if k in ["tgt_suffix", "server_ip", "server_port", "out", "n_tpu_cores"]: - return "" - if v is False or v is None: - return "" - if v is True: # or len(str(v))==0: - return f"--{k}" - return f"--{k}={v}" - - cli_args = [x for x in (convert(k, v) for k, v in args_d.items()) if len(x)] - cmd = [sys.executable, f"{self.test_file_dir}/distillation.py"] + cli_args - execute_subprocess_async(cmd, env=self.get_env()) - - contents = os.listdir(output_dir) - contents = {os.path.basename(p) for p in contents} - ckpt_files = [p for p in contents if p.endswith("ckpt")] - assert len(ckpt_files) > 0 - - self.assertIn("test_generations.txt", contents) - self.assertIn("test_results.txt", contents) - - # get the following from the module, (we don't have access to `model` here) - metrics_save_path = os.path.join(output_dir, "metrics.json") - val_metric = "rouge2" - - metrics = load_json(metrics_save_path) - # {'test': [{'test_avg_loss': 10.63731575012207, 'test_avg_rouge1': 0.0, 'test_avg_rouge2': 0.0, 'test_avg_rougeL': 0.0, 'test_avg_gen_time': 0.1822289228439331, 'test_avg_gen_len': 142.0, 'step_count': 1}]} - print(metrics) - last_step_stats = metrics["val"][-1] - self.assertGreaterEqual(last_step_stats["val_avg_gen_time"], 0.01) - self.assertIsInstance(last_step_stats[f"val_avg_{val_metric}"], float) - self.assertEqual(len(metrics["test"]), 1) - desired_n_evals = int(args_d["max_epochs"] * (1 / args_d["val_check_interval"]) / 2 + 1) - self.assertEqual(len(metrics["val"]), desired_n_evals) diff --git a/examples/research_projects/seq2seq-distillation/callbacks.py b/examples/research_projects/seq2seq-distillation/callbacks.py deleted file mode 100644 index 6f6ed5dd58a..00000000000 --- a/examples/research_projects/seq2seq-distillation/callbacks.py +++ /dev/null @@ -1,116 +0,0 @@ -import logging -from pathlib import Path - -import numpy as np -import pytorch_lightning as pl -import torch -from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint -from pytorch_lightning.utilities import rank_zero_only - -from utils import save_json - - -def count_trainable_parameters(model): - model_parameters = filter(lambda p: p.requires_grad, model.parameters()) - params = sum([np.prod(p.size()) for p in model_parameters]) - return params - - -logger = logging.getLogger(__name__) - - -class Seq2SeqLoggingCallback(pl.Callback): - def on_batch_end(self, trainer, pl_module): - lrs = {f"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)} - pl_module.logger.log_metrics(lrs) - - @rank_zero_only - def _write_logs( - self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True - ) -> None: - logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****") - metrics = trainer.callback_metrics - trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]}) - # Log results - od = Path(pl_module.hparams.output_dir) - if type_path == "test": - results_file = od / "test_results.txt" - generations_file = od / "test_generations.txt" - else: - # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json - # If people want this it will be easy enough to add back. - results_file = od / f"{type_path}_results/{trainer.global_step:05d}.txt" - generations_file = od / f"{type_path}_generations/{trainer.global_step:05d}.txt" - results_file.parent.mkdir(exist_ok=True) - generations_file.parent.mkdir(exist_ok=True) - with open(results_file, "a+") as writer: - for key in sorted(metrics): - if key in ["log", "progress_bar", "preds"]: - continue - val = metrics[key] - if isinstance(val, torch.Tensor): - val = val.item() - msg = f"{key}: {val:.6f}\n" - writer.write(msg) - - if not save_generations: - return - - if "preds" in metrics: - content = "\n".join(metrics["preds"]) - generations_file.open("w+").write(content) - - @rank_zero_only - def on_train_start(self, trainer, pl_module): - try: - npars = pl_module.model.model.num_parameters() - except AttributeError: - npars = pl_module.model.num_parameters() - - n_trainable_pars = count_trainable_parameters(pl_module) - # mp stands for million parameters - trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6}) - - @rank_zero_only - def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): - save_json(pl_module.metrics, pl_module.metrics_save_path) - return self._write_logs(trainer, pl_module, "test") - - @rank_zero_only - def on_validation_end(self, trainer: pl.Trainer, pl_module): - save_json(pl_module.metrics, pl_module.metrics_save_path) - # Uncommenting this will save val generations - # return self._write_logs(trainer, pl_module, "valid") - - -def get_checkpoint_callback(output_dir, metric, save_top_k=1, lower_is_better=False): - """Saves the best model by validation ROUGE2 score.""" - if metric == "rouge2": - exp = "{val_avg_rouge2:.4f}-{step_count}" - elif metric == "bleu": - exp = "{val_avg_bleu:.4f}-{step_count}" - elif metric == "loss": - exp = "{val_avg_loss:.4f}-{step_count}" - else: - raise NotImplementedError( - f"seq2seq callbacks only support rouge2, bleu and loss, got {metric}, You can make your own by adding to" - " this function." - ) - - checkpoint_callback = ModelCheckpoint( - dirpath=output_dir, - filename=exp, - monitor=f"val_{metric}", - mode="min" if "loss" in metric else "max", - save_top_k=save_top_k, - ) - return checkpoint_callback - - -def get_early_stopping_callback(metric, patience): - return EarlyStopping( - monitor=f"val_{metric}", # does this need avg? - mode="min" if "loss" in metric else "max", - patience=patience, - verbose=True, - ) diff --git a/examples/research_projects/seq2seq-distillation/convert_pl_checkpoint_to_hf.py b/examples/research_projects/seq2seq-distillation/convert_pl_checkpoint_to_hf.py deleted file mode 100755 index 5f3c984f372..00000000000 --- a/examples/research_projects/seq2seq-distillation/convert_pl_checkpoint_to_hf.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python - -import os -from pathlib import Path -from typing import Dict, List - -import fire -import torch - -from transformers import AutoModelForSeq2SeqLM, AutoTokenizer -from transformers.utils.logging import get_logger - - -logger = get_logger(__name__) - - -def remove_prefix(text: str, prefix: str): - if text.startswith(prefix): - return text[len(prefix) :] - return text # or whatever - - -def sanitize(sd): - return {remove_prefix(k, "model."): v for k, v in sd.items()} - - -def average_state_dicts(state_dicts: List[Dict[str, torch.Tensor]]): - new_sd = {} - for k in state_dicts[0].keys(): - tensors = [sd[k] for sd in state_dicts] - new_t = sum(tensors) / len(tensors) - assert isinstance(new_t, torch.Tensor) - new_sd[k] = new_t - return new_sd - - -def convert_pl_to_hf(pl_ckpt_path: str, hf_src_model_dir: str, save_path: str) -> None: - """Cleanup a pytorch-lightning .ckpt file or experiment dir and save a huggingface model with that state dict. - Silently allows extra pl keys (like teacher.) Puts all ckpt models into CPU RAM at once! - - Args: - pl_ckpt_path (:obj:`str`): Path to a .ckpt file saved by pytorch_lightning or dir containing ckpt files. - If a directory is passed, all .ckpt files inside it will be averaged! - hf_src_model_dir (:obj:`str`): Path to a directory containing a correctly shaped checkpoint - save_path (:obj:`str`): Directory to save the new model - - """ - hf_model = AutoModelForSeq2SeqLM.from_pretrained(hf_src_model_dir) - if os.path.isfile(pl_ckpt_path): - ckpt_files = [pl_ckpt_path] - else: - assert os.path.isdir(pl_ckpt_path) - ckpt_files = list(Path(pl_ckpt_path).glob("*.ckpt")) - assert ckpt_files, f"could not find any ckpt files inside the {pl_ckpt_path} directory" - - if len(ckpt_files) > 1: - logger.info(f"averaging the weights of {ckpt_files}") - - state_dicts = [sanitize(torch.load(x, map_location="cpu")["state_dict"]) for x in ckpt_files] - state_dict = average_state_dicts(state_dicts) - - missing, unexpected = hf_model.load_state_dict(state_dict, strict=False) - assert not missing, f"missing keys: {missing}" - hf_model.save_pretrained(save_path) - try: - tok = AutoTokenizer.from_pretrained(hf_src_model_dir) - tok.save_pretrained(save_path) - except Exception: - pass - # dont copy tokenizer if cant - - -if __name__ == "__main__": - fire.Fire(convert_pl_to_hf) diff --git a/examples/research_projects/seq2seq-distillation/distil_marian_enro_teacher.sh b/examples/research_projects/seq2seq-distillation/distil_marian_enro_teacher.sh deleted file mode 100755 index 5c938a71604..00000000000 --- a/examples/research_projects/seq2seq-distillation/distil_marian_enro_teacher.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash -export PYTHONPATH="../":"${PYTHONPATH}" -export WANDB_PROJECT=dmar -# export MAX_LEN=128 -python distillation.py \ - --learning_rate=3e-4 \ - --do_train \ - --fp16 \ - --val_check_interval 0.25 \ - --teacher Helsinki-NLP/opus-mt-en-ro \ - --max_source_length $MAX_LEN --max_target_length $MAX_LEN --val_max_target_length $MAX_LEN --test_max_target_length $MAX_LEN \ - --student_decoder_layers 3 --student_encoder_layers 6 \ - --freeze_encoder --freeze_embeds \ - --model_name_or_path IGNORED \ - --alpha_hid=3. \ - --train_batch_size=$BS --eval_batch_size=$BS \ - --tokenizer_name Helsinki-NLP/opus-mt-en-ro \ - --warmup_steps 500 --logger_name wandb \ - --fp16_opt_level O1 --task translation --normalize_hidden --num_sanity_val_steps=0 \ - "$@" diff --git a/examples/research_projects/seq2seq-distillation/distil_marian_no_teacher.sh b/examples/research_projects/seq2seq-distillation/distil_marian_no_teacher.sh deleted file mode 100755 index 4f0f53d7960..00000000000 --- a/examples/research_projects/seq2seq-distillation/distil_marian_no_teacher.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -export PYTHONPATH="../":"${PYTHONPATH}" -export WANDB_PROJECT=dmar -export MAX_LEN=128 -python finetune.py \ - --learning_rate=3e-4 \ - --do_train \ - --do_predict \ - --fp16 \ - --val_check_interval 0.25 \ - --data_dir $ENRO_DIR \ - --max_source_length $MAX_LEN --max_target_length $MAX_LEN --val_max_target_length $MAX_LEN --test_max_target_length $MAX_LEN \ - --freeze_encoder --freeze_embeds \ - --train_batch_size=$BS --eval_batch_size=$BS \ - --tokenizer_name $m --model_name_or_path $m \ - --warmup_steps 500 --sortish_sampler --logger_name wandb \ - --gpus 1 --fp16_opt_level=O1 --task translation --num_sanity_val_steps=0 \ - "$@" diff --git a/examples/research_projects/seq2seq-distillation/distillation.py b/examples/research_projects/seq2seq-distillation/distillation.py deleted file mode 100755 index 323f62bf458..00000000000 --- a/examples/research_projects/seq2seq-distillation/distillation.py +++ /dev/null @@ -1,310 +0,0 @@ -#!/usr/bin/env python - -import argparse -import gc -import os -import sys -from pathlib import Path -from typing import List # noqa: F401 - -import pytorch_lightning as pl -import torch -from finetune import SummarizationModule, TranslationModule -from finetune import main as ft_main -from make_student import create_student_by_copying_alternating_layers, get_layers_to_supervise -from torch import nn - -from transformers import AutoModelForSeq2SeqLM, MBartTokenizer, T5ForConditionalGeneration -from transformers.models.bart.modeling_bart import shift_tokens_right -from utils import calculate_bleu, check_output_dir, freeze_params, label_smoothed_nll_loss, use_task_specific_params - - -# need the parent dir module -sys.path.insert(2, str(Path(__file__).resolve().parents[1])) -from lightning_base import generic_train # noqa - - -class SummarizationDistiller(SummarizationModule): - """Supports T5, Bart, Pegasus and other models that inherit from Bart.""" - - loss_names = ["loss", "ce_loss", "mlm_loss", "hid_loss_enc", "hid_loss_dec"] - - def __init__(self, hparams): - assert Path(hparams.data_dir).exists() - self.output_dir = Path(hparams.output_dir) - self.output_dir.mkdir(exist_ok=True) - - save_dir = self.output_dir.joinpath("student") - - hparams.model_name_or_path = str(save_dir) # Tell lightning we are training the student - teacher = AutoModelForSeq2SeqLM.from_pretrained(hparams.teacher).eval() - use_task_specific_params(teacher, hparams.task) # We copy good generation parameters to student by default - if hparams.student is not None: - student = AutoModelForSeq2SeqLM.from_pretrained(hparams.student) - use_task_specific_params(student, hparams.task) - e_layer_ids, d_layer_ids = None, None - else: - student, e_layer_ids, d_layer_ids = create_student_by_copying_alternating_layers( - teacher, e=hparams.student_encoder_layers, d=hparams.student_decoder_layers, save_path=save_dir - ) - - if hparams.length_penalty != -1: - student.config.length_penalty = hparams.length_penalty - hparams.tokenizer_name = hparams.teacher # Use teacher's tokenizer - super().__init__(hparams, model=student, config=student.config) - assert student.config.model_type == teacher.config.model_type, ( - f"teacher, student model types should be the same, got {student.config.model_type} !=" - f" {teacher.config.model_type}" - ) - - if student.config.model_type == "t5": - student_encoder_layers = len(student.get_encoder().block) - student_decoder_layers = len(student.get_decoder().block) - teacher_encoder_layers = len(teacher.get_encoder().block) - teacher_decoder_layers = len(teacher.get_decoder().block) - else: - student_encoder_layers = student.config.encoder_layers - student_decoder_layers = student.config.decoder_layers - teacher_encoder_layers = teacher.config.encoder_layers - teacher_decoder_layers = teacher.config.decoder_layers - - self.different_base_models = not (hparams.student is None or hparams.teacher == hparams.student) - self.do_calc_hidden_loss = (not self.different_base_models) and hparams.alpha_hid > 0 - self.different_encoder = self.different_base_models or (student_encoder_layers != teacher_encoder_layers) - # self.different_encoder determines whether we need to run the teacher encoder - self.teacher = teacher - freeze_params(self.teacher) - - if not self.different_encoder: # To save RAM, delete teacher encoder and freeze student encoder. - try: - del self.teacher.model.encoder - except AttributeError: # T5 - del self.teacher.encoder - - if e_layer_ids is None: - e_layer_ids = list(range(student_encoder_layers)) - if d_layer_ids is None: - d_layer_ids = list(range(student_decoder_layers)) - - self.e_layer_ids, self.d_layer_ids = e_layer_ids, d_layer_ids # type: List[int], List[int] - - if self.do_calc_hidden_loss: # Intermediate supervision: Decide which layers to supervise - if hparams.supervise_forward: - self.e_matches = get_layers_to_supervise( - n_student=len(self.e_layer_ids), n_teacher=teacher_encoder_layers - ) - self.d_matches = get_layers_to_supervise( - n_student=len(self.d_layer_ids), n_teacher=teacher_decoder_layers - ) - else: # student layer should emulate hidden states of the teacher layer it was copied from - self.e_matches = self.e_layer_ids - self.d_matches = self.d_layer_ids - else: - self.e_matches = None - self.d_matches = None - - self.ce_loss_fct = nn.KLDivLoss(reduction="batchmean") - self.temperature = 2.0 - self.alpha_mlm = hparams.alpha_mlm - self.alpha_ce = hparams.alpha_ce - self.alpha_hid = hparams.alpha_hid - gc.collect() - torch.cuda.empty_cache() - - def calc_ce_loss(self, mask, s_logits, t_logits): - """Copy pasted from distillbert (transformers/examples/distillation/)""" - # mask has False at padding_idx - sel_mask = mask[:, :, None].expand_as(s_logits) - vocab_size = s_logits.size(-1) - s_logits_slct = torch.masked_select(s_logits, sel_mask) # (bs * seq_length * voc_size) modulo the 1s in mask - t_logits_slct = torch.masked_select(t_logits, sel_mask) # (bs * seq_length * voc_size) modulo the 1s in mask - s_logits_slct = s_logits_slct.view(-1, vocab_size) # (bs * seq_length, voc_size) modulo the 1s in mask - t_logits_slct = t_logits_slct.view(-1, vocab_size) # (bs * seq_length, voc_size) modulo the 1s in mask - assert t_logits_slct.size() == s_logits_slct.size() - loss_ce = ( - self.ce_loss_fct( - nn.functional.log_softmax(s_logits_slct / self.temperature, dim=-1), - nn.functional.softmax(t_logits_slct / self.temperature, dim=-1), - ) - * (self.temperature) ** 2 - ) - return loss_ce - - @staticmethod - def add_model_specific_args(parser, root_dir): - SummarizationModule.add_model_specific_args(parser, root_dir) - add_distill_args(parser) - return parser - - def _step(self, batch: dict) -> tuple: - """Compute the loss for a batch""" - pad_token_id = self.tokenizer.pad_token_id - input_ids, src_mask, labels = batch["input_ids"], batch["attention_mask"], batch["labels"] - if isinstance(self.model, T5ForConditionalGeneration): - decoder_input_ids = self.model._shift_right(labels) - else: - decoder_input_ids = shift_tokens_right(labels, pad_token_id) - - # noinspection PyCallingNonCallable - student_outputs = self( - input_ids, - attention_mask=src_mask, - decoder_input_ids=decoder_input_ids, - output_hidden_states=self.do_calc_hidden_loss, - output_attentions=False, - use_cache=False, - ) - lm_logits = student_outputs["logits"] - - # Same cross entropy vs. label smoothing logic as finetune.py - assert lm_logits.shape[-1] == self.model.config.vocab_size - if self.hparams.label_smoothing == 0: - # Same behavior as modeling_bart.py, besides ignoring pad_token_id - loss_fct = nn.CrossEntropyLoss(ignore_index=pad_token_id) - student_lm_loss = loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), labels.view(-1)) - else: - lprobs = nn.functional.log_softmax(lm_logits, dim=-1) - student_lm_loss, _ = label_smoothed_nll_loss( - lprobs, labels, self.hparams.label_smoothing, ignore_index=pad_token_id - ) - - def zero_tensor(): - return torch.tensor(0.0).type_as(student_lm_loss) - - teacher_enc_outputs = student_outputs[ - "encoder_last_hidden_state" - ] # use this unless self.different_base_models - hid_loss_enc, hid_loss_dec = zero_tensor(), zero_tensor() - if self.different_encoder: # compute encoder hidden state loss - all_teacher_encoder_outputs = self.teacher.get_encoder()( - input_ids, - attention_mask=src_mask, - output_hidden_states=self.do_calc_hidden_loss, - ) - if self.different_base_models: - teacher_enc_outputs = all_teacher_encoder_outputs["last_hidden_state"] - elif self.do_calc_hidden_loss: - hid_loss_enc = self.calc_hidden_loss( - src_mask, - student_outputs["encoder_hidden_states"], - all_teacher_encoder_outputs["hidden_states"], - self.e_matches, - normalize_hidden=self.hparams.normalize_hidden, - ) - - teacher_outputs = self.teacher( - input_ids, - attention_mask=src_mask, - encoder_outputs=(teacher_enc_outputs,), - decoder_input_ids=decoder_input_ids, - output_hidden_states=self.do_calc_hidden_loss, - use_cache=False, # since we are not passing labels, never let this default to True - ) - dec_mask = decoder_input_ids.ne(pad_token_id) - loss_ce = self.calc_ce_loss(dec_mask, lm_logits, teacher_outputs["logits"]) - if self.do_calc_hidden_loss: # Intermediate supervision of decoder hidden states - hid_loss_dec = self.calc_hidden_loss( - dec_mask, - student_outputs["decoder_hidden_states"], - teacher_outputs["decoder_hidden_states"], - self.d_matches, - normalize_hidden=self.hparams.normalize_hidden, - ) - - blended_loss = ( - self.alpha_ce * loss_ce - + self.alpha_mlm * student_lm_loss - + self.hparams.alpha_hid * (hid_loss_enc + hid_loss_dec) - ) - return blended_loss, loss_ce, student_lm_loss, hid_loss_enc, hid_loss_dec - - @staticmethod - def calc_hidden_loss(attention_mask, hidden_states, hidden_states_T, matches, normalize_hidden): - """MSE(student_hid, teacher_hid[matches]). Called "Intermediate supervision" in paper. Inspired by TinyBERT.""" - msg = "expected list or tuple for hidden_states, got tensor of shape: " - assert not isinstance(hidden_states, torch.Tensor), f"{msg}{hidden_states.shape}" - assert not isinstance(hidden_states_T, torch.Tensor), f"{msg}{hidden_states_T.shape}" - mask = attention_mask.to(hidden_states[0]) - valid_count = mask.sum() * hidden_states[0].size(-1) - student_states = torch.stack([hidden_states[i] for i in range(len(matches))]) - teacher_states = torch.stack([hidden_states_T[j] for j in matches]) - assert student_states.shape == teacher_states.shape, f"{student_states.shape} != {teacher_states.shape}" - if normalize_hidden: - student_states = nn.functional.layer_norm(student_states, student_states.shape[1:]) - teacher_states = nn.functional.layer_norm(teacher_states, teacher_states.shape[1:]) - mse = nn.functional.mse_loss(student_states, teacher_states, reduction="none") - masked_mse = (mse * mask.unsqueeze(0).unsqueeze(-1)).sum() / valid_count - return masked_mse - - -def add_distill_args(parser): - # NOTE: if --student argument was specified and the teacher and student base models - # are different, the models still have to have the same tokenizer, specified by - # --tokenizer_name. So, for example, you can distill from t5_large to t5_small but not - # from bart to t5. This s because if the tokenizers are different, the output space - # for the two models is also different and their logits are not comparable. - parser.add_argument("--teacher", type=str) - parser.add_argument("--alpha_ce", default=0.8, type=float) - parser.add_argument("--alpha_mlm", default=0.2, type=float) - parser.add_argument("--alpha_hid", default=0.0, type=float, required=False) - parser.add_argument("--student", type=str, required=False) - parser.add_argument("--student_decoder_layers", default=12, type=int, required=False) - parser.add_argument("--student_encoder_layers", default=12, type=int, required=False) - parser.add_argument("--no_teacher", action="store_true", default=False) - parser.add_argument("--length_penalty", type=float, default=-1) - parser.add_argument("--supervise_forward", action="store_true", default=False) - parser.add_argument("--normalize_hidden", action="store_true", default=False) - - -class TranslationDistiller(SummarizationDistiller): - """Supports T5, mBART, Marian, other models that inherit from Bart.""" - - mode = "translation" - metric_names = ["bleu"] - default_val_metric = "bleu" - - def __init__(self, hparams, **kwargs): - super().__init__(hparams, **kwargs) - assert hparams.src_lang is not None - assert hparams.tgt_lang is not None - self.dataset_kwargs["src_lang"] = hparams.src_lang - self.dataset_kwargs["tgt_lang"] = hparams.tgt_lang - if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, MBartTokenizer): - self.decoder_start_token_id = self.tokenizer.lang_code_to_id[hparams.tgt_lang] - - def calc_generative_metrics(self, preds, target) -> dict: - return calculate_bleu(preds, target) - - @staticmethod - def add_model_specific_args(parser, root_dir): - TranslationModule.add_model_specific_args(parser, root_dir) - add_distill_args(parser) - return parser - - -def create_module(args): - if args.no_teacher: - module_cls = TranslationModule if "translation" in args.task else SummarizationModule - else: # DISTILL WITH TEACHER - module_cls = TranslationDistiller if "translation" in args.task else SummarizationDistiller - args.setup_cls: str = module_cls.__name__ - print(f"using module {args.setup_cls}") - model = module_cls(args) - return model - - -def distill_main(args): - Path(args.output_dir).mkdir(exist_ok=True) - check_output_dir(args, expected_items=3) - - model = create_module(args) - return ft_main(args, model=model) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser = pl.Trainer.add_argparse_args(parser) - parser = SummarizationDistiller.add_model_specific_args(parser, os.getcwd()) - args = parser.parse_args() - - distill_main(args) diff --git a/examples/research_projects/seq2seq-distillation/dynamic_bs_example.sh b/examples/research_projects/seq2seq-distillation/dynamic_bs_example.sh deleted file mode 100755 index cfe9e21f0f6..00000000000 --- a/examples/research_projects/seq2seq-distillation/dynamic_bs_example.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -export PYTHONPATH="../":"${PYTHONPATH}" -export WANDB_PROJECT=dmar -export MAX_LEN=128 -export m=sshleifer/student_marian_en_ro_6_1 -python finetune.py \ - --learning_rate=3e-4 \ - --do_train \ - --fp16 \ - --data_dir wmt_en_ro \ - --max_source_length $MAX_LEN --max_target_length $MAX_LEN --val_max_target_length $MAX_LEN --test_max_target_length $MAX_LEN \ - --freeze_encoder --freeze_embeds \ - --train_batch_size=48 --eval_batch_size=64 \ - --tokenizer_name $m --model_name_or_path $m --num_train_epochs=1 \ - --warmup_steps 500 --logger_name wandb --gpus 1 \ - --fp16_opt_level=O1 --task translation \ - "$@" diff --git a/examples/research_projects/seq2seq-distillation/finetune.py b/examples/research_projects/seq2seq-distillation/finetune.py deleted file mode 100755 index ff889af81e3..00000000000 --- a/examples/research_projects/seq2seq-distillation/finetune.py +++ /dev/null @@ -1,454 +0,0 @@ -#!/usr/bin/env python - -import argparse -import glob -import logging -import os -import sys -import time -from collections import defaultdict -from pathlib import Path -from typing import Dict, List, Tuple - -import numpy as np -import pytorch_lightning as pl -import torch -from callbacks import Seq2SeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback -from torch import nn -from torch.utils.data import DataLoader - -from transformers import MBartTokenizer, T5ForConditionalGeneration -from transformers.models.bart.modeling_bart import shift_tokens_right -from utils import ( - ROUGE_KEYS, - LegacySeq2SeqDataset, - Seq2SeqDataset, - assert_all_frozen, - calculate_bleu, - calculate_rouge, - check_output_dir, - flatten_list, - freeze_embeds, - freeze_params, - get_git_info, - label_smoothed_nll_loss, - lmap, - pickle_save, - save_git_info, - save_json, - use_task_specific_params, -) - - -# need the parent dir module -sys.path.insert(2, str(Path(__file__).resolve().parents[1])) -from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa - - -logger = logging.getLogger(__name__) - - -class SummarizationModule(BaseTransformer): - mode = "summarization" - loss_names = ["loss"] - metric_names = ROUGE_KEYS - default_val_metric = "rouge2" - - def __init__(self, hparams, **kwargs): - if hparams.sortish_sampler and hparams.gpus > 1: - hparams.replace_sampler_ddp = False - elif hparams.max_tokens_per_batch is not None: - if hparams.gpus > 1: - raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training") - if hparams.sortish_sampler: - raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously") - - super().__init__(hparams, num_labels=None, mode=self.mode, **kwargs) - use_task_specific_params(self.model, "summarization") - save_git_info(self.hparams.output_dir) - self.metrics_save_path = Path(self.output_dir) / "metrics.json" - self.hparams_save_path = Path(self.output_dir) / "hparams.pkl" - pickle_save(self.hparams, self.hparams_save_path) - self.step_count = 0 - self.metrics = defaultdict(list) - self.model_type = self.config.model_type - self.vocab_size = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size - - self.dataset_kwargs: dict = { - "data_dir": self.hparams.data_dir, - "max_source_length": self.hparams.max_source_length, - "prefix": self.model.config.prefix or "", - } - n_observations_per_split = { - "train": self.hparams.n_train, - "val": self.hparams.n_val, - "test": self.hparams.n_test, - } - self.n_obs = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} - - self.target_lens = { - "train": self.hparams.max_target_length, - "val": self.hparams.val_max_target_length, - "test": self.hparams.test_max_target_length, - } - assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}" - assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}" - if self.hparams.freeze_embeds: - freeze_embeds(self.model) - if self.hparams.freeze_encoder: - freeze_params(self.model.get_encoder()) - assert_all_frozen(self.model.get_encoder()) - - self.hparams.git_sha = get_git_info()["repo_sha"] - self.num_workers = hparams.num_workers - self.decoder_start_token_id = None # default to config - if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, MBartTokenizer): - self.decoder_start_token_id = self.tokenizer.lang_code_to_id[hparams.tgt_lang] - self.model.config.decoder_start_token_id = self.decoder_start_token_id - self.dataset_class = ( - Seq2SeqDataset if hasattr(self.tokenizer, "prepare_seq2seq_batch") else LegacySeq2SeqDataset - ) - self.already_saved_batch = False - self.eval_beams = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams - if self.hparams.eval_max_gen_length is not None: - self.eval_max_length = self.hparams.eval_max_gen_length - else: - self.eval_max_length = self.model.config.max_length - self.val_metric = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric - - def save_readable_batch(self, batch: Dict[str, torch.Tensor]) -> Dict[str, List[str]]: - """A debugging utility""" - readable_batch = { - k: self.tokenizer.batch_decode(v.tolist()) if "mask" not in k else v.shape for k, v in batch.items() - } - save_json(readable_batch, Path(self.output_dir) / "text_batch.json") - save_json({k: v.tolist() for k, v in batch.items()}, Path(self.output_dir) / "tok_batch.json") - - self.already_saved_batch = True - return readable_batch - - def forward(self, input_ids, **kwargs): - return self.model(input_ids, **kwargs) - - def ids_to_clean_text(self, generated_ids: List[int]): - gen_text = self.tokenizer.batch_decode( - generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True - ) - return lmap(str.strip, gen_text) - - def _step(self, batch: dict) -> Tuple: - pad_token_id = self.tokenizer.pad_token_id - src_ids, src_mask = batch["input_ids"], batch["attention_mask"] - tgt_ids = batch["labels"] - if isinstance(self.model, T5ForConditionalGeneration): - decoder_input_ids = self.model._shift_right(tgt_ids) - else: - decoder_input_ids = shift_tokens_right(tgt_ids, pad_token_id) - if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero - batch["decoder_input_ids"] = decoder_input_ids - self.save_readable_batch(batch) - - outputs = self(src_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, use_cache=False) - lm_logits = outputs["logits"] - if self.hparams.label_smoothing == 0: - # Same behavior as modeling_bart.py, besides ignoring pad_token_id - ce_loss_fct = nn.CrossEntropyLoss(ignore_index=pad_token_id) - - assert lm_logits.shape[-1] == self.vocab_size - loss = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), tgt_ids.view(-1)) - else: - lprobs = nn.functional.log_softmax(lm_logits, dim=-1) - loss, nll_loss = label_smoothed_nll_loss( - lprobs, tgt_ids, self.hparams.label_smoothing, ignore_index=pad_token_id - ) - return (loss,) - - @property - def pad(self) -> int: - return self.tokenizer.pad_token_id - - def training_step(self, batch, batch_idx) -> Dict: - loss_tensors = self._step(batch) - - logs = dict(zip(self.loss_names, loss_tensors)) - # tokens per batch - logs["tpb"] = batch["input_ids"].ne(self.pad).sum() + batch["labels"].ne(self.pad).sum() - logs["bs"] = batch["input_ids"].shape[0] - logs["src_pad_tok"] = batch["input_ids"].eq(self.pad).sum() - logs["src_pad_frac"] = batch["input_ids"].eq(self.pad).float().mean() - # TODO(SS): make a wandb summary metric for this - return {"loss": loss_tensors[0], "log": logs} - - def validation_step(self, batch, batch_idx) -> Dict: - return self._generative_step(batch) - - def validation_epoch_end(self, outputs, prefix="val") -> Dict: - self.step_count += 1 - losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names} - loss = losses["loss"] - generative_metrics = { - k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ["gen_time", "gen_len"] - } - metric_val = ( - generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] - ) - metric_tensor: torch.FloatTensor = torch.tensor(metric_val).type_as(loss) - generative_metrics.update({k: v.item() for k, v in losses.items()}) - losses.update(generative_metrics) - all_metrics = {f"{prefix}_avg_{k}": x for k, x in losses.items()} - all_metrics["step_count"] = self.step_count - self.metrics[prefix].append(all_metrics) # callback writes this to self.metrics_save_path - preds = flatten_list([x["preds"] for x in outputs]) - return { - "log": all_metrics, - "preds": preds, - f"{prefix}_loss": loss, - f"{prefix}_{self.val_metric}": metric_tensor, - } - - def calc_generative_metrics(self, preds, target) -> Dict: - return calculate_rouge(preds, target) - - def _generative_step(self, batch: dict) -> dict: - t0 = time.time() - - # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') - generated_ids = self.model.generate( - batch["input_ids"], - attention_mask=batch["attention_mask"], - use_cache=True, - decoder_start_token_id=self.decoder_start_token_id, - num_beams=self.eval_beams, - max_length=self.eval_max_length, - ) - gen_time = (time.time() - t0) / batch["input_ids"].shape[0] - preds: List[str] = self.ids_to_clean_text(generated_ids) - target: List[str] = self.ids_to_clean_text(batch["labels"]) - loss_tensors = self._step(batch) - base_metrics = dict(zip(self.loss_names, loss_tensors)) - rouge: Dict = self.calc_generative_metrics(preds, target) - summ_len = np.mean(lmap(len, generated_ids)) - base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **rouge) - return base_metrics - - def test_step(self, batch, batch_idx): - return self._generative_step(batch) - - def test_epoch_end(self, outputs): - return self.validation_epoch_end(outputs, prefix="test") - - def get_dataset(self, type_path) -> Seq2SeqDataset: - n_obs = self.n_obs[type_path] - max_target_length = self.target_lens[type_path] - dataset = self.dataset_class( - self.tokenizer, - type_path=type_path, - n_obs=n_obs, - max_target_length=max_target_length, - **self.dataset_kwargs, - ) - return dataset - - def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False) -> DataLoader: - dataset = self.get_dataset(type_path) - - if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": - sampler = dataset.make_sortish_sampler(batch_size, distributed=self.hparams.gpus > 1) - return DataLoader( - dataset, - batch_size=batch_size, - collate_fn=dataset.collate_fn, - shuffle=False, - num_workers=self.num_workers, - sampler=sampler, - ) - - elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": - batch_sampler = dataset.make_dynamic_sampler( - self.hparams.max_tokens_per_batch, distributed=self.hparams.gpus > 1 - ) - return DataLoader( - dataset, - batch_sampler=batch_sampler, - collate_fn=dataset.collate_fn, - # shuffle=False, - num_workers=self.num_workers, - # batch_size=None, - ) - else: - return DataLoader( - dataset, - batch_size=batch_size, - collate_fn=dataset.collate_fn, - shuffle=shuffle, - num_workers=self.num_workers, - sampler=None, - ) - - def train_dataloader(self) -> DataLoader: - dataloader = self.get_dataloader("train", batch_size=self.hparams.train_batch_size, shuffle=True) - return dataloader - - def val_dataloader(self) -> DataLoader: - return self.get_dataloader("val", batch_size=self.hparams.eval_batch_size) - - def test_dataloader(self) -> DataLoader: - return self.get_dataloader("test", batch_size=self.hparams.eval_batch_size) - - @staticmethod - def add_model_specific_args(parser, root_dir): - BaseTransformer.add_model_specific_args(parser, root_dir) - add_generic_args(parser, root_dir) - parser.add_argument( - "--max_source_length", - default=1024, - type=int, - help=( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ), - ) - parser.add_argument( - "--max_target_length", - default=56, - type=int, - help=( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ), - ) - parser.add_argument( - "--val_max_target_length", - default=142, # these defaults are optimized for CNNDM. For xsum, see README.md. - type=int, - help=( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ), - ) - parser.add_argument( - "--test_max_target_length", - default=142, - type=int, - help=( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ), - ) - parser.add_argument("--freeze_encoder", action="store_true") - parser.add_argument("--freeze_embeds", action="store_true") - parser.add_argument("--sortish_sampler", action="store_true", default=False) - parser.add_argument("--overwrite_output_dir", action="store_true", default=False) - parser.add_argument("--max_tokens_per_batch", type=int, default=None) - parser.add_argument("--logger_name", type=str, choices=["default", "wandb", "wandb_shared"], default="default") - parser.add_argument("--n_train", type=int, default=-1, required=False, help="# examples. -1 means use all.") - parser.add_argument("--n_val", type=int, default=500, required=False, help="# examples. -1 means use all.") - parser.add_argument("--n_test", type=int, default=-1, required=False, help="# examples. -1 means use all.") - parser.add_argument( - "--task", type=str, default="summarization", required=False, help="# examples. -1 means use all." - ) - parser.add_argument("--label_smoothing", type=float, default=0.0, required=False) - parser.add_argument("--src_lang", type=str, default="", required=False) - parser.add_argument("--tgt_lang", type=str, default="", required=False) - parser.add_argument("--eval_beams", type=int, default=None, required=False) - parser.add_argument( - "--val_metric", type=str, default=None, required=False, choices=["bleu", "rouge2", "loss", None] - ) - parser.add_argument("--eval_max_gen_length", type=int, default=None, help="never generate more than n tokens") - parser.add_argument("--save_top_k", type=int, default=1, required=False, help="How many checkpoints to save") - parser.add_argument( - "--early_stopping_patience", - type=int, - default=-1, - required=False, - help=( - "-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So" - " val_check_interval will effect it." - ), - ) - return parser - - -class TranslationModule(SummarizationModule): - mode = "translation" - loss_names = ["loss"] - metric_names = ["bleu"] - default_val_metric = "bleu" - - def __init__(self, hparams, **kwargs): - super().__init__(hparams, **kwargs) - self.dataset_kwargs["src_lang"] = hparams.src_lang - self.dataset_kwargs["tgt_lang"] = hparams.tgt_lang - - def calc_generative_metrics(self, preds, target) -> dict: - return calculate_bleu(preds, target) - - -def main(args, model=None) -> SummarizationModule: - Path(args.output_dir).mkdir(exist_ok=True) - check_output_dir(args, expected_items=3) - - if model is None: - if "summarization" in args.task: - model: SummarizationModule = SummarizationModule(args) - else: - model: SummarizationModule = TranslationModule(args) - dataset = Path(args.data_dir).name - if ( - args.logger_name == "default" - or args.fast_dev_run - or str(args.output_dir).startswith("/tmp") - or str(args.output_dir).startswith("/var") - ): - logger = True # don't pollute wandb logs unnecessarily - elif args.logger_name == "wandb": - from pytorch_lightning.loggers import WandbLogger - - project = os.environ.get("WANDB_PROJECT", dataset) - logger = WandbLogger(name=model.output_dir.name, project=project) - - elif args.logger_name == "wandb_shared": - from pytorch_lightning.loggers import WandbLogger - - logger = WandbLogger(name=model.output_dir.name, project=f"hf_{dataset}") - - if args.early_stopping_patience >= 0: - es_callback = get_early_stopping_callback(model.val_metric, args.early_stopping_patience) - else: - es_callback = False - - lower_is_better = args.val_metric == "loss" - trainer: pl.Trainer = generic_train( - model, - args, - logging_callback=Seq2SeqLoggingCallback(), - checkpoint_callback=get_checkpoint_callback( - args.output_dir, model.val_metric, args.save_top_k, lower_is_better - ), - early_stopping_callback=es_callback, - logger=logger, - ) - pickle_save(model.hparams, model.output_dir / "hparams.pkl") - if not args.do_predict: - return model - - model.hparams.test_checkpoint = "" - checkpoints = sorted(glob.glob(os.path.join(args.output_dir, "*.ckpt"), recursive=True)) - if checkpoints: - model.hparams.test_checkpoint = checkpoints[-1] - trainer.resume_from_checkpoint = checkpoints[-1] - trainer.logger.log_hyperparams(model.hparams) - - # test() without a model tests using the best checkpoint automatically - trainer.test() - return model - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser = pl.Trainer.add_argparse_args(parser) - parser = SummarizationModule.add_model_specific_args(parser, os.getcwd()) - - args = parser.parse_args() - - main(args) diff --git a/examples/research_projects/seq2seq-distillation/finetune.sh b/examples/research_projects/seq2seq-distillation/finetune.sh deleted file mode 100755 index 683c2d7752d..00000000000 --- a/examples/research_projects/seq2seq-distillation/finetune.sh +++ /dev/null @@ -1,11 +0,0 @@ -# the proper usage is documented in the README, you need to specify data_dir, output_dir and model_name_or_path -# run ./finetune.sh --help to see all the possible options -python finetune.py \ - --learning_rate=3e-5 \ - --fp16 \ - --gpus 1 \ - --do_train \ - --do_predict \ - --n_val 1000 \ - --val_check_interval 0.1 \ - "$@" diff --git a/examples/research_projects/seq2seq-distillation/finetune_bart_tiny.sh b/examples/research_projects/seq2seq-distillation/finetune_bart_tiny.sh deleted file mode 100755 index f0289b45ab5..00000000000 --- a/examples/research_projects/seq2seq-distillation/finetune_bart_tiny.sh +++ /dev/null @@ -1,32 +0,0 @@ -# Script for verifying that run_bart_sum can be invoked from its directory - -# Get tiny dataset with cnn_dm format (4 examples for train, val, test) -wget https://cdn-datasets.huggingface.co/summarization/cnn_tiny.tgz -tar -xzvf cnn_tiny.tgz -rm cnn_tiny.tgz - -export OUTPUT_DIR_NAME=bart_utest_output -export CURRENT_DIR=${PWD} -export OUTPUT_DIR=${CURRENT_DIR}/${OUTPUT_DIR_NAME} - -# Make output directory if it doesn't exist -mkdir -p $OUTPUT_DIR - -# Add parent directory to python path to access lightning_base.py and testing_utils.py -export PYTHONPATH="../":"${PYTHONPATH}" -python finetune.py \ ---data_dir=cnn_tiny/ \ ---model_name_or_path=sshleifer/bart-tiny-random \ ---learning_rate=3e-5 \ ---train_batch_size=2 \ ---eval_batch_size=2 \ ---output_dir=$OUTPUT_DIR \ ---num_train_epochs=1 \ ---gpus=0 \ ---do_train "$@" - -rm -rf cnn_tiny -rm -rf $OUTPUT_DIR - - - diff --git a/examples/research_projects/seq2seq-distillation/finetune_pegasus_xsum.sh b/examples/research_projects/seq2seq-distillation/finetune_pegasus_xsum.sh deleted file mode 100755 index ec7ff98557c..00000000000 --- a/examples/research_projects/seq2seq-distillation/finetune_pegasus_xsum.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash -export PYTHONPATH="../":"${PYTHONPATH}" - -# From appendix C of paper https://arxiv.org/abs/1912.08777 -# Set --gradient_accumulation_steps so that effective batch size is 256 (2*128, 4*64, 8*32, 16*16) -python finetune.py \ - --learning_rate=1e-4 \ - --do_train \ - --do_predict \ - --n_val 1000 \ - --val_check_interval 0.25 \ - --max_source_length 512 --max_target_length 56 \ - --freeze_embeds --label_smoothing 0.1 --adafactor --task summarization_xsum \ - "$@" diff --git a/examples/research_projects/seq2seq-distillation/finetune_t5.sh b/examples/research_projects/seq2seq-distillation/finetune_t5.sh deleted file mode 100755 index 504e9eb71e3..00000000000 --- a/examples/research_projects/seq2seq-distillation/finetune_t5.sh +++ /dev/null @@ -1,14 +0,0 @@ -# Add parent directory to python path to access lightning_base.py -export PYTHONPATH="../":"${PYTHONPATH}" - -python finetune.py \ ---data_dir=$CNN_DIR \ ---learning_rate=3e-5 \ ---train_batch_size=$BS \ ---eval_batch_size=$BS \ ---output_dir=$OUTPUT_DIR \ ---max_source_length=512 \ ---max_target_length=56 \ ---val_check_interval=0.1 --n_val=200 \ ---do_train --do_predict \ - "$@" diff --git a/examples/research_projects/seq2seq-distillation/lightning_base.py b/examples/research_projects/seq2seq-distillation/lightning_base.py deleted file mode 100644 index 640828bacd3..00000000000 --- a/examples/research_projects/seq2seq-distillation/lightning_base.py +++ /dev/null @@ -1,393 +0,0 @@ -import argparse -import logging -import os -from pathlib import Path -from typing import Any, Dict - -import pytorch_lightning as pl -from pytorch_lightning.utilities import rank_zero_info - -from transformers import ( - AdamW, - AutoConfig, - AutoModel, - AutoModelForPreTraining, - AutoModelForQuestionAnswering, - AutoModelForSeq2SeqLM, - AutoModelForSequenceClassification, - AutoModelForTokenClassification, - AutoModelWithLMHead, - AutoTokenizer, - PretrainedConfig, - PreTrainedTokenizer, -) -from transformers.optimization import ( - Adafactor, - get_cosine_schedule_with_warmup, - get_cosine_with_hard_restarts_schedule_with_warmup, - get_linear_schedule_with_warmup, - get_polynomial_decay_schedule_with_warmup, -) -from transformers.utils.versions import require_version - - -logger = logging.getLogger(__name__) - -require_version("pytorch_lightning>=1.0.4") - -MODEL_MODES = { - "base": AutoModel, - "sequence-classification": AutoModelForSequenceClassification, - "question-answering": AutoModelForQuestionAnswering, - "pretraining": AutoModelForPreTraining, - "token-classification": AutoModelForTokenClassification, - "language-modeling": AutoModelWithLMHead, - "summarization": AutoModelForSeq2SeqLM, - "translation": AutoModelForSeq2SeqLM, -} - - -# update this and the import above to support new schedulers from transformers.optimization -arg_to_scheduler = { - "linear": get_linear_schedule_with_warmup, - "cosine": get_cosine_schedule_with_warmup, - "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, - "polynomial": get_polynomial_decay_schedule_with_warmup, - # '': get_constant_schedule, # not supported for now - # '': get_constant_schedule_with_warmup, # not supported for now -} -arg_to_scheduler_choices = sorted(arg_to_scheduler.keys()) -arg_to_scheduler_metavar = "{" + ", ".join(arg_to_scheduler_choices) + "}" - - -class BaseTransformer(pl.LightningModule): - def __init__( - self, - hparams: argparse.Namespace, - num_labels=None, - mode="base", - config=None, - tokenizer=None, - model=None, - **config_kwargs, - ): - """Initialize a model, tokenizer and config.""" - super().__init__() - # TODO: move to self.save_hyperparameters() - # self.save_hyperparameters() - # can also expand arguments into trainer signature for easier reading - - self.save_hyperparameters(hparams) - self.step_count = 0 - self.output_dir = Path(self.hparams.output_dir) - cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None - if config is None: - self.config = AutoConfig.from_pretrained( - self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, - **({"num_labels": num_labels} if num_labels is not None else {}), - cache_dir=cache_dir, - **config_kwargs, - ) - else: - self.config: PretrainedConfig = config - - extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") - for p in extra_model_params: - if getattr(self.hparams, p, None): - assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute" - setattr(self.config, p, getattr(self.hparams, p)) - - if tokenizer is None: - self.tokenizer = AutoTokenizer.from_pretrained( - self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, - cache_dir=cache_dir, - ) - else: - self.tokenizer: PreTrainedTokenizer = tokenizer - self.model_type = MODEL_MODES[mode] - if model is None: - self.model = self.model_type.from_pretrained( - self.hparams.model_name_or_path, - from_tf=bool(".ckpt" in self.hparams.model_name_or_path), - config=self.config, - cache_dir=cache_dir, - ) - else: - self.model = model - - def load_hf_checkpoint(self, *args, **kwargs): - self.model = self.model_type.from_pretrained(*args, **kwargs) - - def get_lr_scheduler(self): - get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler] - scheduler = get_schedule_func( - self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps() - ) - scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1} - return scheduler - - def configure_optimizers(self): - """Prepare optimizer and schedule (linear warmup and decay)""" - model = self.model - no_decay = ["bias", "LayerNorm.weight"] - optimizer_grouped_parameters = [ - { - "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], - "weight_decay": self.hparams.weight_decay, - }, - { - "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], - "weight_decay": 0.0, - }, - ] - if self.hparams.adafactor: - optimizer = Adafactor( - optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False - ) - - else: - optimizer = AdamW( - optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon - ) - self.opt = optimizer - - scheduler = self.get_lr_scheduler() - - return [optimizer], [scheduler] - - def test_step(self, batch, batch_nb): - return self.validation_step(batch, batch_nb) - - def test_epoch_end(self, outputs): - return self.validation_end(outputs) - - def total_steps(self) -> int: - """The number of total training steps that will be run. Used for lr scheduler purposes.""" - num_devices = max(1, self.hparams.gpus) # TODO: consider num_tpu_cores - effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices - return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs - - def setup(self, mode): - if mode == "test": - self.dataset_size = len(self.test_dataloader().dataset) - else: - self.train_loader = self.get_dataloader("train", self.hparams.train_batch_size, shuffle=True) - self.dataset_size = len(self.train_dataloader().dataset) - - def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False): - raise NotImplementedError("You must implement this for your task") - - def train_dataloader(self): - return self.train_loader - - def val_dataloader(self): - return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=False) - - def test_dataloader(self): - return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=False) - - def _feature_file(self, mode): - return os.path.join( - self.hparams.data_dir, - "cached_{}_{}_{}".format( - mode, - list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(), - str(self.hparams.max_seq_length), - ), - ) - - @pl.utilities.rank_zero_only - def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: - save_path = self.output_dir.joinpath("best_tfmr") - self.model.config.save_step = self.step_count - self.model.save_pretrained(save_path) - self.tokenizer.save_pretrained(save_path) - - @staticmethod - def add_model_specific_args(parser, root_dir): - parser.add_argument( - "--model_name_or_path", - default=None, - type=str, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models", - ) - parser.add_argument( - "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" - ) - parser.add_argument( - "--tokenizer_name", - default=None, - type=str, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--cache_dir", - default="", - type=str, - help="Where do you want to store the pre-trained models downloaded from huggingface.co", - ) - parser.add_argument( - "--encoder_layerdrop", - type=float, - help="Encoder layer dropout probability (Optional). Goes into model.config", - ) - parser.add_argument( - "--decoder_layerdrop", - type=float, - help="Decoder layer dropout probability (Optional). Goes into model.config", - ) - parser.add_argument( - "--dropout", - type=float, - help="Dropout probability (Optional). Goes into model.config", - ) - parser.add_argument( - "--attention_dropout", - type=float, - help="Attention dropout probability (Optional). Goes into model.config", - ) - parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") - parser.add_argument( - "--lr_scheduler", - default="linear", - choices=arg_to_scheduler_choices, - metavar=arg_to_scheduler_metavar, - type=str, - help="Learning rate scheduler", - ) - parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") - parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") - parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") - parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader") - parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=int) - parser.add_argument("--train_batch_size", default=32, type=int) - parser.add_argument("--eval_batch_size", default=32, type=int) - parser.add_argument("--adafactor", action="store_true") - - -class LoggingCallback(pl.Callback): - def on_batch_end(self, trainer, pl_module): - lr_scheduler = trainer.lr_schedulers[0]["scheduler"] - lrs = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())} - pl_module.logger.log_metrics(lrs) - - def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): - rank_zero_info("***** Validation results *****") - metrics = trainer.callback_metrics - # Log results - for key in sorted(metrics): - if key not in ["log", "progress_bar"]: - rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) - - def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): - rank_zero_info("***** Test results *****") - metrics = trainer.callback_metrics - # Log and save results to file - output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt") - with open(output_test_results_file, "w") as writer: - for key in sorted(metrics): - if key not in ["log", "progress_bar"]: - rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) - writer.write("{} = {}\n".format(key, str(metrics[key]))) - - -def add_generic_args(parser, root_dir) -> None: - # To allow all pl args uncomment the following line - # parser = pl.Trainer.add_argparse_args(parser) - parser.add_argument( - "--output_dir", - default=None, - type=str, - required=True, - help="The output directory where the model predictions and checkpoints will be written.", - ) - parser.add_argument( - "--fp16", - action="store_true", - help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", - ) - - parser.add_argument( - "--fp16_opt_level", - type=str, - default="O2", - help=( - "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " - "See details at https://nvidia.github.io/apex/amp.html" - ), - ) - parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=int) - parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=float, help="Max gradient norm") - parser.add_argument("--do_train", action="store_true", help="Whether to run training.") - parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.") - parser.add_argument( - "--gradient_accumulation_steps", - dest="accumulate_grad_batches", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") - parser.add_argument( - "--data_dir", - default=None, - type=str, - required=True, - help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.", - ) - - -def generic_train( - model: BaseTransformer, - args: argparse.Namespace, - early_stopping_callback=None, - logger=True, # can pass WandbLogger() here - extra_callbacks=[], - checkpoint_callback=None, - logging_callback=None, - **extra_train_kwargs, -): - pl.seed_everything(args.seed) - - # init model - odir = Path(model.hparams.output_dir) - odir.mkdir(exist_ok=True) - - # add custom checkpoints - if checkpoint_callback is None: - checkpoint_callback = pl.callbacks.ModelCheckpoint( - filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1 - ) - if early_stopping_callback: - extra_callbacks.append(early_stopping_callback) - if logging_callback is None: - logging_callback = LoggingCallback() - - train_params = {} - - # TODO: remove with PyTorch 1.6 since pl uses native amp - if args.fp16: - train_params["precision"] = 16 - train_params["amp_level"] = args.fp16_opt_level - - if args.gpus > 1: - train_params["distributed_backend"] = "ddp" - - train_params["accumulate_grad_batches"] = args.accumulate_grad_batches - train_params["accelerator"] = extra_train_kwargs.get("accelerator", None) - train_params["profiler"] = extra_train_kwargs.get("profiler", None) - - trainer = pl.Trainer.from_argparse_args( - args, - weights_summary=None, - callbacks=[logging_callback] + extra_callbacks, - logger=logger, - checkpoint_callback=checkpoint_callback, - **train_params, - ) - - if args.do_train: - trainer.fit(model) - - return trainer diff --git a/examples/research_projects/seq2seq-distillation/make_student.py b/examples/research_projects/seq2seq-distillation/make_student.py deleted file mode 100644 index 83e014bf481..00000000000 --- a/examples/research_projects/seq2seq-distillation/make_student.py +++ /dev/null @@ -1,186 +0,0 @@ -import warnings -from pathlib import Path -from typing import List, Tuple, Union - -import fire -from torch import nn - -from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, PreTrainedModel -from transformers.utils import logging - - -logger = logging.get_logger(__name__) - - -def copy_layers(src_layers: nn.ModuleList, dest_layers: nn.ModuleList, layers_to_copy: List[int]) -> None: - layers_to_copy = nn.ModuleList([src_layers[i] for i in layers_to_copy]) - assert len(dest_layers) == len(layers_to_copy), f"{len(dest_layers)} != {len(layers_to_copy)}" - dest_layers.load_state_dict(layers_to_copy.state_dict()) - - -LAYERS_TO_COPY = { - # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. - # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP - 12: { - 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher - 2: [0, 6], - 3: [0, 6, 11], - 4: [0, 4, 8, 11], - 6: [0, 2, 4, 7, 9, 11], - 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], - 12: list(range(12)), - }, - 16: { # maps num layers in student -> which teacher layers to copy - 1: [0], - 2: [0, 15], - 3: [0, 8, 15], - 4: [0, 5, 10, 15], - 6: [0, 3, 6, 9, 12, 15], - 8: [0, 2, 4, 6, 8, 10, 12, 15], - 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], - 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], - 16: list(range(16)), - }, - 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, -} -LAYERS_TO_SUPERVISE = { - # maps num layers in student -> which teacher layers to copy. - 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, - 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, - 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, -} - - -def pick_layers_to_copy(n_student, n_teacher): - try: - val = LAYERS_TO_COPY[n_teacher][n_student] - return val - except KeyError: - if n_student != n_teacher: - warnings.warn( - f"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first" - f" {n_student}" - ) - return list(range(n_student)) - - -def get_layers_to_supervise(n_student, n_teacher) -> List[int]: - """Used or the --supervise_forward kwarg""" - if n_student > n_teacher: - raise ValueError(f"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}") - elif n_teacher == n_student: - return list(range(n_teacher)) - elif n_student == 1: - return [n_teacher - 1] - else: - return LAYERS_TO_SUPERVISE[n_teacher][n_student] - - -def create_student_by_copying_alternating_layers( - teacher: Union[str, PreTrainedModel], - save_path: Union[str, Path] = "student", - e: Union[int, None] = None, - d: Union[int, None] = None, - copy_first_teacher_layers=False, - e_layers_to_copy=None, - d_layers_to_copy=None, - **extra_config_kwargs, -) -> Tuple[PreTrainedModel, List[int], List[int]]: - """Make a student by copying alternating layers from a teacher, save it to save_path. - Args: - teacher: str or PreTrainedModel if str, this will call AutoModelForSeq2SeqLM.from_pretrained(teacher) before - copying layers - save_path: where to save the student, defaults to student directory. - e: how many Encoder layers should the student have, default is fully copy of teacher - d: how many Decoder layers should the student have, default is fully copy of teacher - copy_first_teacher_layers: [bool] dont copy alternating layers, just the first e/d. - **extra_config_kwargs: extra kwargs to pass to the student, by default the teacher config is used. - - Returns: - student: new, smaller model. (Also saves it to save_path) - e_layers_to_copy: list of which teacher encoder layers were used - d_layers_to_copy: list of which teacher decoder layers were used - """ - _msg = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher." - assert (e is not None) or (d is not None), _msg - if isinstance(teacher, str): - AutoTokenizer.from_pretrained(teacher).save_pretrained(save_path) # purely for convenience - teacher = AutoModelForSeq2SeqLM.from_pretrained(teacher).eval() - else: - assert isinstance(teacher, PreTrainedModel), f"teacher must be a model or string got type {type(teacher)}" - init_kwargs = teacher.config.to_diff_dict() - - try: - teacher_e, teacher_d = teacher.config.encoder_layers, teacher.config.decoder_layers - if e is None: - e = teacher_e - if d is None: - d = teacher_d - init_kwargs.update({"encoder_layers": e, "decoder_layers": d}) - except AttributeError: # T5 - if hasattr(teacher.config, "num_encoder_layers"): - teacher_e, teacher_d = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers - else: - teacher_e, teacher_d = teacher.config.num_layers, teacher.config.num_decoder_layers - if e is None: - e = teacher_e - if d is None: - d = teacher_d - if hasattr(teacher.config, "num_encoder_layers"): - init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d}) - else: - init_kwargs.update({"num_layers": e, "num_decoder_layers": d}) - - # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs - init_kwargs.update(extra_config_kwargs) - - # Copy weights - student_cfg = teacher.config_class(**init_kwargs) - student = AutoModelForSeq2SeqLM.from_config(student_cfg) - # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. - info = student.load_state_dict(teacher.state_dict(), strict=False) - assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. - - if copy_first_teacher_layers: # Our copying is done. We just log and save - e_layers_to_copy, d_layers_to_copy = list(range(e)), list(range(d)) - logger.info( - f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to" - f" {save_path}" - ) - student.save_pretrained(save_path) - return student, e_layers_to_copy, d_layers_to_copy - - # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. - if e_layers_to_copy is None: - e_layers_to_copy: List[int] = pick_layers_to_copy(e, teacher_e) - if d_layers_to_copy is None: - d_layers_to_copy: List[int] = pick_layers_to_copy(d, teacher_d) - - try: - if hasattr( - teacher, "prophetnet" - ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers - copy_layers(teacher.prophetnet.encoder.layers, student.prophetnet.encoder.layers, e_layers_to_copy) - copy_layers(teacher.prophetnet.decoder.layers, student.prophetnet.decoder.layers, d_layers_to_copy) - else: - copy_layers(teacher.model.encoder.layers, student.model.encoder.layers, e_layers_to_copy) - copy_layers(teacher.model.decoder.layers, student.model.decoder.layers, d_layers_to_copy) - except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block - copy_layers(teacher.encoder.block, student.encoder.block, e_layers_to_copy) - copy_layers(teacher.decoder.block, student.decoder.block, d_layers_to_copy) - logger.info( - f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" - ) - student.config.init_metadata = { - "teacher_type": teacher.config.model_type, - "copied_encoder_layers": e_layers_to_copy, - "copied_decoder_layers": d_layers_to_copy, - } - student.save_pretrained(save_path) - # Save information about copying for easier reproducibility - - return student, e_layers_to_copy, d_layers_to_copy - - -if __name__ == "__main__": - fire.Fire(create_student_by_copying_alternating_layers) diff --git a/examples/research_projects/seq2seq-distillation/precomputed_pseudo_labels.md b/examples/research_projects/seq2seq-distillation/precomputed_pseudo_labels.md deleted file mode 100644 index fb2713ccde8..00000000000 --- a/examples/research_projects/seq2seq-distillation/precomputed_pseudo_labels.md +++ /dev/null @@ -1,43 +0,0 @@ -### Saved Pseudo-Labels -These are the generations of various large models on various large **training** sets. All in all they took about 200 GPU hours to produce. - -### Available Pseudo-labels -| Dataset | Model | Link | Rouge Scores | Notes -|---------|-----------------------------|----------------------------------------------------------------------------------------|--------------------|------------------------------------------------------------------------------------------------------------- -| XSUM | `facebook/bart-large-xsum` | [download](https://cdn-datasets.huggingface.co/pseudo/xsum/bart_xsum_pl.tgz) | 49.8/28.0/42.5 | -| XSUM | `google/pegasus-xsum` | [download](https://cdn-datasets.huggingface.co/pseudo/xsum/pegasus_xsum.tgz) | 53.3/32.7/46.5 | -| XSUM | `facebook/bart-large-xsum` | [download](https://cdn-datasets.huggingface.co/pseudo/xsum/xsum_pl2_bart.tgz) | | Bart pseudolabels filtered to those with Rouge2 > 10.0 w GT. -| CNN/DM | `sshleifer/pegasus-cnn-ft-v2` | [download](https://cdn-datasets.huggingface.co/pseudo/cnn_dm/pegasus_cnn_cnn_pls.tgz) | 47.316/26.65/44.56 | do not worry about the fact that train.source is one line shorter. -| CNN/DM | `facebook/bart-large-cnn` | [download](https://cdn-datasets.huggingface.co/pseudo/cnn_dm/cnn_bart_pl.tgz) | | 5K (2%) are missing, there should be 282173 -| CNN/DM | `google/pegasus-xsum` | [download](https://cdn-datasets.huggingface.co/pseudo/cnn_dm/pegasus_xsum_on_cnn.tgz) | 21.5/6.76/25 | extra labels for xsum distillation Used max_source_length=512, (and all other pegasus-xsum configuration). -| EN-RO | `Helsinki-NLP/opus-mt-en-ro` | [download](https://cdn-datasets.huggingface.co/pseudo/wmt_en_ro/opus_mt_en_ro.tgz) | | -| EN-RO | `facebook/mbart-large-en-ro` | [download](https://cdn-datasets.huggingface.co/pseudo/wmt_en_ro/mbart_large_en_ro.tgz) | | - - -(EN_RO = WMT 2016 English-Romanian). - -Example Download Command: -```bash -curl -S https://cdn-datasets.huggingface.co/pseudo/xsum/bart_xsum_pl.tgz | tar -xvz -C . -``` -### Generating New Pseudolabels -Here is the command I used to generate the pseudolabels in the second row of the table, after downloading XSUM from [here](https://cdn-datasets.huggingface.co/summarization/xsum.tar.gz). - -```bash -python -m torch.distributed.launch --nproc_per_node=8 run_distributed_eval.py \ - --model_name google/pegasus-xsum \ - --save_dir pegasus_xsum \ - --data_dir xsum \ - --bs 8 --sync_timeout 60000 \ - --max_source_length 512 \ - --type_path train -``` - -+ These commands takes a while to run. For example, `pegasus_cnn_cnn_pls.tgz` took 8 hours on 8 GPUs. -+ Pegasus does not work in fp16 :(, Bart, mBART and Marian do. -+ Even if you have 1 GPU, `run_distributed_eval.py` is 10-20% faster than `run_eval.py` because it uses `SortishSampler` to minimize padding computation. - -### Contributions -Feel free to contribute your own pseudolabels via PR. Add a row to this table with a new google drive link (or other command line downloadable link). - - diff --git a/examples/research_projects/seq2seq-distillation/requirements.txt b/examples/research_projects/seq2seq-distillation/requirements.txt deleted file mode 100644 index 533f6339ab0..00000000000 --- a/examples/research_projects/seq2seq-distillation/requirements.txt +++ /dev/null @@ -1,20 +0,0 @@ -tensorboard -scikit-learn -psutil -sacrebleu -rouge-score -tensorflow_datasets -pytorch-lightning -matplotlib -git-python==1.0.3 -faiss-cpu -streamlit -elasticsearch -nltk -pandas -datasets >= 1.1.3 -fire -pytest -conllu -sentencepiece != 0.1.92 -protobuf diff --git a/examples/research_projects/seq2seq-distillation/run_eval.py b/examples/research_projects/seq2seq-distillation/run_eval.py deleted file mode 100755 index 54ad6c6fb6b..00000000000 --- a/examples/research_projects/seq2seq-distillation/run_eval.py +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/env python - -import argparse -import datetime -import json -import time -import warnings -from logging import getLogger -from pathlib import Path -from typing import Dict, List - -import torch -from tqdm import tqdm - -from transformers import AutoModelForSeq2SeqLM, AutoTokenizer -from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params - - -logger = getLogger(__name__) - - -DEFAULT_DEVICE = "cuda" if torch.cuda.is_available() else "cpu" - - -def generate_summaries_or_translations( - examples: List[str], - out_file: str, - model_name: str, - batch_size: int = 8, - device: str = DEFAULT_DEVICE, - fp16=False, - task="summarization", - prefix=None, - **generate_kwargs, -) -> Dict: - """Save model.generate results to , and return how long it took.""" - fout = Path(out_file).open("w", encoding="utf-8") - model_name = str(model_name) - model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device) - if fp16: - model = model.half() - - tokenizer = AutoTokenizer.from_pretrained(model_name) - logger.info(f"Inferred tokenizer type: {tokenizer.__class__}") # if this is wrong, check config.model_type. - - start_time = time.time() - # update config with task specific params - use_task_specific_params(model, task) - if prefix is None: - prefix = prefix or getattr(model.config, "prefix", "") or "" - for examples_chunk in tqdm(list(chunks(examples, batch_size))): - examples_chunk = [prefix + text for text in examples_chunk] - batch = tokenizer(examples_chunk, return_tensors="pt", truncation=True, padding="longest").to(device) - summaries = model.generate( - input_ids=batch.input_ids, - attention_mask=batch.attention_mask, - **generate_kwargs, - ) - dec = tokenizer.batch_decode(summaries, skip_special_tokens=True, clean_up_tokenization_spaces=False) - for hypothesis in dec: - fout.write(hypothesis + "\n") - fout.flush() - fout.close() - runtime = int(time.time() - start_time) # seconds - n_obs = len(examples) - return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs, 4)} - - -def datetime_now(): - return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") - - -def run_generate(verbose=True): - """ - - Takes input text, generates output, and then using reference calculates the BLEU scores. - - The results are saved to a file and returned to the caller, and printed out unless ``verbose=False`` is passed. - - Args: - verbose (:obj:`bool`, `optional`, defaults to :obj:`True`): print results to stdout - - Returns: - a tuple: ``(scores, params}`` - - ``scores``: a dict of scores data ``{'bleu': 39.6501, 'n_obs': 2000, 'runtime': 186, 'seconds_per_sample': 0.093}`` - - ``params``: a dict of custom params, e.g. ``{'num_beams': 5, 'length_penalty': 0.8}`` - """ - - parser = argparse.ArgumentParser() - parser.add_argument("model_name", type=str, help="like facebook/bart-large-cnn,t5-base, etc.") - parser.add_argument("input_path", type=str, help="like cnn_dm/test.source") - parser.add_argument("save_path", type=str, help="where to save summaries") - parser.add_argument("--reference_path", type=str, required=False, help="like cnn_dm/test.target") - parser.add_argument("--score_path", type=str, required=False, default="metrics.json", help="where to save metrics") - parser.add_argument("--device", type=str, required=False, default=DEFAULT_DEVICE, help="cuda, cuda:1, cpu etc.") - parser.add_argument( - "--prefix", type=str, required=False, default=None, help="will be added to the beginning of src examples" - ) - parser.add_argument("--task", type=str, default="summarization", help="used for task_specific_params + metrics") - parser.add_argument("--bs", type=int, default=8, required=False, help="batch size") - parser.add_argument( - "--n_obs", type=int, default=-1, required=False, help="How many observations. Defaults to all." - ) - parser.add_argument("--fp16", action="store_true") - parser.add_argument("--dump-args", action="store_true", help="print the custom hparams with the results") - parser.add_argument( - "--info", - nargs="?", - type=str, - const=datetime_now(), - help=( - "use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g." - " lang=en-ru. If no value is passed, the current datetime string will be used." - ), - ) - # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate - args, rest = parser.parse_known_args() - parsed_args = parse_numeric_n_bool_cl_kwargs(rest) - if parsed_args and verbose: - print(f"parsed the following generate kwargs: {parsed_args}") - with open(args.input_path) as f: - examples = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in f.readlines()] - if args.n_obs > 0: - examples = examples[: args.n_obs] - Path(args.save_path).parent.mkdir(exist_ok=True) - if args.reference_path is None and Path(args.score_path).exists(): - warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c.") - runtime_metrics = generate_summaries_or_translations( - examples, - args.save_path, - args.model_name, - batch_size=args.bs, - device=args.device, - fp16=args.fp16, - task=args.task, - prefix=args.prefix, - **parsed_args, - ) - - if args.reference_path is None: - return {} - - # Compute scores - score_fn = calculate_bleu if "translation" in args.task else calculate_rouge - output_lns = [x.rstrip() for x in open(args.save_path).readlines()] - reference_lns = [x.rstrip() for x in open(args.reference_path).readlines()][: len(output_lns)] - scores: dict = score_fn(output_lns, reference_lns) - scores.update(runtime_metrics) - - if args.dump_args: - scores.update(parsed_args) - if args.info: - scores["info"] = args.info - - if verbose: - print(scores) - - if args.score_path is not None: - json.dump(scores, open(args.score_path, "w")) - - return scores - - -if __name__ == "__main__": - # Usage for MT: - # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ - run_generate(verbose=True) diff --git a/examples/research_projects/seq2seq-distillation/sentence_splitter.py b/examples/research_projects/seq2seq-distillation/sentence_splitter.py deleted file mode 100644 index c5acec73928..00000000000 --- a/examples/research_projects/seq2seq-distillation/sentence_splitter.py +++ /dev/null @@ -1,22 +0,0 @@ -import re - -from filelock import FileLock - - -try: - import nltk - - NLTK_AVAILABLE = True -except (ImportError, ModuleNotFoundError): - NLTK_AVAILABLE = False - -if NLTK_AVAILABLE: - with FileLock(".lock") as lock: - nltk.download("punkt", quiet=True) - - -def add_newline_to_end_of_each_sentence(x: str) -> str: - """This was added to get rougeLsum scores matching published rougeL scores for BART and PEGASUS.""" - re.sub("", "", x) # remove pegasus newline char - assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" - return "\n".join(nltk.sent_tokenize(x)) diff --git a/examples/research_projects/seq2seq-distillation/train_distilbart_cnn.sh b/examples/research_projects/seq2seq-distillation/train_distilbart_cnn.sh deleted file mode 100755 index 6a1bafbdc9c..00000000000 --- a/examples/research_projects/seq2seq-distillation/train_distilbart_cnn.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash -export PYTHONPATH="../":"${PYTHONPATH}" - -export BS=32 -export GAS=1 - -python finetune.py \ - --learning_rate=3e-5 \ - --fp16 \ - --gpus 1 \ - --do_train \ - --do_predict \ - --val_check_interval 0.25 \ - --n_val 500 \ - --num_train_epochs 2 \ - --freeze_encoder --freeze_embeds --data_dir cnn_dm \ - --max_target_length 142 --val_max_target_length=142 \ - --train_batch_size=$BS --eval_batch_size=$BS --gradient_accumulation_steps=$GAS \ - --model_name_or_path sshleifer/student_cnn_12_6 \ - --tokenizer_name facebook/bart-large \ - --warmup_steps 500 \ - --output_dir distilbart-cnn-12-6 \ - "$@" - diff --git a/examples/research_projects/seq2seq-distillation/train_distilbart_xsum.sh b/examples/research_projects/seq2seq-distillation/train_distilbart_xsum.sh deleted file mode 100755 index 86a3440fc0c..00000000000 --- a/examples/research_projects/seq2seq-distillation/train_distilbart_xsum.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash -export PYTHONPATH="../":"${PYTHONPATH}" -python distillation.py \ - --teacher facebook/bart-large-xsum --data_dir xsum \ - --tokenizer_name facebook/bart-large-xsum \ - --student_decoder_layers 6 --student_encoder_layers 12 \ - --freeze_encoder --freeze_embeds \ - --learning_rate=3e-4 \ - --do_train \ - --do_predict \ - --fp16 --fp16_opt_level=O1 \ - --val_check_interval 0.1 --n_val 1000 --eval_beams 2 --length_penalty=0.5 \ - --max_target_length=60 --val_max_target_length=60 --test_max_target_length=100 \ - --model_name_or_path IGNORED \ - --alpha_hid=3. \ - --train_batch_size=16 --eval_batch_size=16 --gradient_accumulation_steps=2 \ - --sortish_sampler \ - --num_train_epochs=6 \ - --warmup_steps 500 \ - --output_dir distilbart_xsum_12_6 \ - "$@" diff --git a/examples/research_projects/seq2seq-distillation/train_mbart_cc25_enro.sh b/examples/research_projects/seq2seq-distillation/train_mbart_cc25_enro.sh deleted file mode 100755 index 54e7935ff60..00000000000 --- a/examples/research_projects/seq2seq-distillation/train_mbart_cc25_enro.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -export PYTHONPATH="../":"${PYTHONPATH}" - -python finetune.py \ - --learning_rate=3e-5 \ - --fp16 \ - --do_train \ - --val_check_interval=0.25 \ - --adam_eps 1e-06 \ - --num_train_epochs 6 --src_lang en_XX --tgt_lang ro_RO \ - --data_dir $ENRO_DIR \ - --max_source_length $MAX_LEN --max_target_length $MAX_LEN --val_max_target_length $MAX_LEN --test_max_target_length $MAX_LEN \ - --train_batch_size=$BS --eval_batch_size=$BS \ - --task translation \ - --warmup_steps 500 \ - --freeze_embeds \ - --model_name_or_path=facebook/mbart-large-cc25 \ - "$@" diff --git a/examples/research_projects/seq2seq-distillation/utils.py b/examples/research_projects/seq2seq-distillation/utils.py deleted file mode 100644 index de666e0c249..00000000000 --- a/examples/research_projects/seq2seq-distillation/utils.py +++ /dev/null @@ -1,645 +0,0 @@ -import itertools -import json -import linecache -import math -import os -import pickle -import socket -from logging import getLogger -from pathlib import Path -from typing import Callable, Dict, Iterable, List, Tuple, Union - -import git -import numpy as np -import torch -import torch.distributed as dist -from rouge_score import rouge_scorer, scoring -from sacrebleu import corpus_bleu -from sentence_splitter import add_newline_to_end_of_each_sentence -from torch import nn -from torch.utils.data import Dataset, Sampler - -from transformers import BartTokenizer, EvalPrediction, PreTrainedTokenizer, T5Tokenizer -from transformers.file_utils import cached_property -from transformers.models.bart.modeling_bart import shift_tokens_right - - -try: - from fairseq.data.data_utils import batch_by_size - - FAIRSEQ_AVAILABLE = True -except (ImportError, ModuleNotFoundError): - FAIRSEQ_AVAILABLE = False - - -def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=-100): - """From fairseq""" - if target.dim() == lprobs.dim() - 1: - target = target.unsqueeze(-1) - nll_loss = -lprobs.gather(dim=-1, index=target) - smooth_loss = -lprobs.sum(dim=-1, keepdim=True) - if ignore_index is not None: - pad_mask = target.eq(ignore_index) - nll_loss.masked_fill_(pad_mask, 0.0) - smooth_loss.masked_fill_(pad_mask, 0.0) - else: - nll_loss = nll_loss.squeeze(-1) - smooth_loss = smooth_loss.squeeze(-1) - - nll_loss = nll_loss.sum() # mean()? Scared to break other math. - smooth_loss = smooth_loss.sum() - eps_i = epsilon / lprobs.size(-1) - loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss - return loss, nll_loss - - -def lmap(f: Callable, x: Iterable) -> List: - """list(map(f, x))""" - return list(map(f, x)) - - -def calculate_bleu(output_lns, refs_lns, **kwargs) -> dict: - """Uses sacrebleu's corpus_bleu implementation.""" - return {"bleu": round(corpus_bleu(output_lns, [refs_lns], **kwargs).score, 4)} - - -def build_compute_metrics_fn(task_name: str, tokenizer: PreTrainedTokenizer) -> Callable[[EvalPrediction], Dict]: - def non_pad_len(tokens: np.ndarray) -> int: - return np.count_nonzero(tokens != tokenizer.pad_token_id) - - def decode_pred(pred: EvalPrediction) -> Tuple[List[str], List[str]]: - pred_str = tokenizer.batch_decode(pred.predictions, skip_special_tokens=True) - label_str = tokenizer.batch_decode(pred.label_ids, skip_special_tokens=True) - pred_str = lmap(str.strip, pred_str) - label_str = lmap(str.strip, label_str) - return pred_str, label_str - - def summarization_metrics(pred: EvalPrediction) -> Dict: - pred_str, label_str = decode_pred(pred) - rouge: Dict = calculate_rouge(pred_str, label_str) - summ_len = np.round(np.mean(lmap(non_pad_len, pred.predictions)), 1) - rouge.update({"gen_len": summ_len}) - return rouge - - def translation_metrics(pred: EvalPrediction) -> Dict: - pred_str, label_str = decode_pred(pred) - bleu: Dict = calculate_bleu(pred_str, label_str) - gen_len = np.round(np.mean(lmap(non_pad_len, pred.predictions)), 1) - bleu.update({"gen_len": gen_len}) - return bleu - - compute_metrics_fn = summarization_metrics if "summarization" in task_name else translation_metrics - return compute_metrics_fn - - -def trim_batch( - input_ids, - pad_token_id, - attention_mask=None, -): - """Remove columns that are populated exclusively by pad_token_id""" - keep_column_mask = input_ids.ne(pad_token_id).any(dim=0) - if attention_mask is None: - return input_ids[:, keep_column_mask] - else: - return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) - - -class AbstractSeq2SeqDataset(Dataset): - def __init__( - self, - tokenizer, - data_dir, - max_source_length, - max_target_length, - type_path="train", - n_obs=None, - prefix="", - **dataset_kwargs, - ): - super().__init__() - self.src_file = Path(data_dir).joinpath(type_path + ".source") - self.tgt_file = Path(data_dir).joinpath(type_path + ".target") - self.len_file = Path(data_dir).joinpath(type_path + ".len") - if os.path.exists(self.len_file): - self.src_lens = pickle_load(self.len_file) - self.used_char_len = False - else: - self.src_lens = self.get_char_lens(self.src_file) - self.used_char_len = True - self.max_source_length = max_source_length - self.max_target_length = max_target_length - assert min(self.src_lens) > 0, f"found empty line in {self.src_file}" - self.tokenizer = tokenizer - self.prefix = prefix if prefix is not None else "" - - if n_obs is not None: - self.src_lens = self.src_lens[:n_obs] - self.pad_token_id = self.tokenizer.pad_token_id - self.dataset_kwargs = dataset_kwargs - dataset_kwargs.update({"add_prefix_space": True} if isinstance(self.tokenizer, BartTokenizer) else {}) - - def __len__(self): - return len(self.src_lens) - - @staticmethod - def get_char_lens(data_file): - return [len(x) for x in Path(data_file).open().readlines()] - - @cached_property - def tgt_lens(self): - """Length in characters of target documents""" - return self.get_char_lens(self.tgt_file) - - def make_sortish_sampler(self, batch_size, distributed=False, shuffle=True, **kwargs): - if distributed: - return DistributedSortishSampler(self, batch_size, shuffle=shuffle, **kwargs) - else: - return SortishSampler(self.src_lens, batch_size, shuffle=shuffle) - - def make_dynamic_sampler(self, max_tokens_per_batch=1024, **kwargs): - assert FAIRSEQ_AVAILABLE, "Dynamic batch size requires `pip install fairseq`" - assert not self.used_char_len, "You must call python make_len_file.py before calling make_dynamic_sampler" - sorted_indices = list(self.make_sortish_sampler(1024, shuffle=False)) - - def num_tokens_in_example(i): - return min(self.src_lens[i], self.max_target_length) - - # call fairseq cython function - batch_sampler: List[List[int]] = batch_by_size( - sorted_indices, - num_tokens_fn=num_tokens_in_example, - max_tokens=max_tokens_per_batch, - required_batch_size_multiple=64, - ) - shuffled_batches = [batch_sampler[i] for i in np.random.permutation(range(len(batch_sampler)))] - # move the largest batch to the front to OOM quickly (uses an approximation for padding) - approximate_toks_per_batch = [max(self.src_lens[i] for i in batch) * len(batch) for batch in shuffled_batches] - largest_batch_idx = np.argmax(approximate_toks_per_batch) - shuffled_batches[0], shuffled_batches[largest_batch_idx] = ( - shuffled_batches[largest_batch_idx], - shuffled_batches[0], - ) - return shuffled_batches - - def __getitem__(self, item): - raise NotImplementedError("You must implement this") - - def collate_fn(self, batch): - raise NotImplementedError("You must implement this") - - -class LegacySeq2SeqDataset(AbstractSeq2SeqDataset): - def __getitem__(self, index) -> Dict[str, torch.Tensor]: - """Call tokenizer on src and tgt_lines""" - index = index + 1 # linecache starts at 1 - source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip("\n") - tgt_line = linecache.getline(str(self.tgt_file), index).rstrip("\n") - assert source_line, f"empty source line for index {index}" - assert tgt_line, f"empty tgt line for index {index}" - source_inputs = self.encode_line(self.tokenizer, source_line, self.max_source_length) - target_inputs = self.encode_line(self.tokenizer, tgt_line, self.max_target_length) - - source_ids = source_inputs["input_ids"].squeeze() - target_ids = target_inputs["input_ids"].squeeze() - src_mask = source_inputs["attention_mask"].squeeze() - return { - "input_ids": source_ids, - "attention_mask": src_mask, - "labels": target_ids, - } - - def encode_line(self, tokenizer, line, max_length, pad_to_max_length=True, return_tensors="pt"): - """Only used by LegacyDataset""" - return tokenizer( - [line], - max_length=max_length, - padding="max_length" if pad_to_max_length else None, - truncation=True, - return_tensors=return_tensors, - **self.dataset_kwargs, - ) - - def collate_fn(self, batch) -> Dict[str, torch.Tensor]: - input_ids = torch.stack([x["input_ids"] for x in batch]) - masks = torch.stack([x["attention_mask"] for x in batch]) - target_ids = torch.stack([x["labels"] for x in batch]) - pad_token_id = self.pad_token_id - y = trim_batch(target_ids, pad_token_id) - source_ids, source_mask = trim_batch(input_ids, pad_token_id, attention_mask=masks) - batch = { - "input_ids": source_ids, - "attention_mask": source_mask, - "labels": y, - } - return batch - - -class Seq2SeqDataset(AbstractSeq2SeqDataset): - """A dataset that calls prepare_seq2seq_batch.""" - - def __getitem__(self, index) -> Dict[str, str]: - index = index + 1 # linecache starts at 1 - source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip("\n") - tgt_line = linecache.getline(str(self.tgt_file), index).rstrip("\n") - assert source_line, f"empty source line for index {index}" - assert tgt_line, f"empty tgt line for index {index}" - return {"tgt_texts": tgt_line, "src_texts": source_line, "id": index - 1} - - def collate_fn(self, batch) -> Dict[str, torch.Tensor]: - """Call prepare_seq2seq_batch.""" - batch_encoding: Dict[str, torch.Tensor] = self.tokenizer.prepare_seq2seq_batch( - [x["src_texts"] for x in batch], - tgt_texts=[x["tgt_texts"] for x in batch], - max_length=self.max_source_length, - max_target_length=self.max_target_length, - return_tensors="pt", - **self.dataset_kwargs, - ).data - batch_encoding["ids"] = torch.tensor([x["id"] for x in batch]) - return batch_encoding - - -class Seq2SeqDataCollator: - def __init__(self, tokenizer, data_args, tpu_num_cores=None): - self.tokenizer = tokenizer - self.pad_token_id = tokenizer.pad_token_id - assert ( - self.pad_token_id is not None - ), f"pad_token_id is not defined for ({self.tokenizer.__class__.__name__}), it must be defined." - self.data_args = data_args - self.tpu_num_cores = tpu_num_cores - self.dataset_kwargs = {"add_prefix_space": True} if isinstance(tokenizer, BartTokenizer) else {} - if data_args.src_lang is not None: - self.dataset_kwargs["src_lang"] = data_args.src_lang - if data_args.tgt_lang is not None: - self.dataset_kwargs["tgt_lang"] = data_args.tgt_lang - - def __call__(self, batch) -> Dict[str, torch.Tensor]: - if hasattr(self.tokenizer, "prepare_seq2seq_batch"): - batch = self._encode(batch) - input_ids, attention_mask, labels = ( - batch["input_ids"], - batch["attention_mask"], - batch["labels"], - ) - else: - input_ids = torch.stack([x["input_ids"] for x in batch]) - attention_mask = torch.stack([x["attention_mask"] for x in batch]) - labels = torch.stack([x["labels"] for x in batch]) - - labels = trim_batch(labels, self.pad_token_id) - input_ids, attention_mask = trim_batch(input_ids, self.pad_token_id, attention_mask=attention_mask) - - if isinstance(self.tokenizer, T5Tokenizer): - decoder_input_ids = self._shift_right_t5(labels) - else: - decoder_input_ids = shift_tokens_right(labels, self.pad_token_id) - - batch = { - "input_ids": input_ids, - "attention_mask": attention_mask, - "decoder_input_ids": decoder_input_ids, - "labels": labels, - } - return batch - - def _shift_right_t5(self, input_ids): - # shift inputs to the right - shifted_input_ids = input_ids.new_zeros(input_ids.shape) - shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() - shifted_input_ids[..., 0] = self.pad_token_id - return shifted_input_ids - - def _encode(self, batch) -> Dict[str, torch.Tensor]: - batch_encoding = self.tokenizer.prepare_seq2seq_batch( - [x["src_texts"] for x in batch], - tgt_texts=[x["tgt_texts"] for x in batch], - max_length=self.data_args.max_source_length, - max_target_length=self.data_args.max_target_length, - padding="max_length" if self.tpu_num_cores is not None else "longest", # TPU hack - return_tensors="pt", - **self.dataset_kwargs, - ) - return batch_encoding.data - - -class SortishSampler(Sampler): - "Go through the text data by order of src length with a bit of randomness. From fastai repo." - - def __init__(self, data, batch_size, shuffle=True): - self.data, self.bs, self.shuffle = data, batch_size, shuffle - - def __len__(self) -> int: - return len(self.data) - - def __iter__(self): - return iter(sortish_sampler_indices(self.data, self.bs, shuffle=self.shuffle)) - - -def sortish_sampler_indices(data: List, bs: int, shuffle=True) -> np.array: - "Go through the text data by order of src length with a bit of randomness. From fastai repo." - if not shuffle: - return np.argsort(np.array(data) * -1) - - def key_fn(i): - return data[i] - - idxs = np.random.permutation(len(data)) - sz = bs * 50 - ck_idx = [idxs[i : i + sz] for i in range(0, len(idxs), sz)] - sort_idx = np.concatenate([sorted(s, key=key_fn, reverse=True) for s in ck_idx]) - sz = bs - ck_idx = [sort_idx[i : i + sz] for i in range(0, len(sort_idx), sz)] - max_ck = np.argmax([key_fn(ck[0]) for ck in ck_idx]) # find the chunk with the largest key, - ck_idx[0], ck_idx[max_ck] = ck_idx[max_ck], ck_idx[0] # then make sure it goes first. - sort_idx = np.concatenate(np.random.permutation(ck_idx[1:])) if len(ck_idx) > 1 else np.array([], dtype=int) - sort_idx = np.concatenate((ck_idx[0], sort_idx)) - return sort_idx - - -class DistributedSortishSampler(Sampler): - """Copied from torch DistributedSampler""" - - def __init__(self, dataset, batch_size, num_replicas=None, rank=None, add_extra_examples=True, shuffle=True): - if num_replicas is None: - if not dist.is_available(): - raise RuntimeError("Requires distributed package to be available") - num_replicas = dist.get_world_size() - if rank is None: - if not dist.is_available(): - raise RuntimeError("Requires distributed package to be available") - rank = dist.get_rank() - self.dataset = dataset - self.num_replicas = num_replicas - self.rank = rank - self.epoch = 0 - if add_extra_examples: - self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) - self.total_size = self.num_samples * self.num_replicas - else: - self.total_size = len(dataset) - self.num_samples = len(self.available_indices) - self.batch_size = batch_size - self.add_extra_examples = add_extra_examples - self.shuffle = shuffle - - def __iter__(self) -> Iterable: - g = torch.Generator() - g.manual_seed(self.epoch) - - sortish_data = [self.dataset.src_lens[i] for i in self.available_indices] - sortish_indices = sortish_sampler_indices(sortish_data, self.batch_size, shuffle=self.shuffle) - indices = [self.available_indices[i] for i in sortish_indices] - assert len(indices) == self.num_samples - return iter(indices) - - @cached_property - def available_indices(self) -> np.array: - indices = list(range(len(self.dataset))) - # add extra samples to make it evenly divisible - indices += indices[: (self.total_size - len(indices))] - assert len(indices) == self.total_size - # subsample - available_indices = indices[self.rank : self.total_size : self.num_replicas] - return available_indices - - def __len__(self): - return self.num_samples - - def set_epoch(self, epoch): - self.epoch = epoch - - -logger = getLogger(__name__) - - -def use_task_specific_params(model, task): - """Update config with summarization specific params.""" - task_specific_params = model.config.task_specific_params - - if task_specific_params is not None: - pars = task_specific_params.get(task, {}) - logger.info(f"using task specific params for {task}: {pars}") - model.config.update(pars) - - -def pickle_load(path): - """pickle.load(path)""" - with open(path, "rb") as f: - return pickle.load(f) - - -def pickle_save(obj, path): - """pickle.dump(obj, path)""" - with open(path, "wb") as f: - return pickle.dump(obj, f) - - -def flatten_list(summary_ids: List[List]): - return list(itertools.chain.from_iterable(summary_ids)) - - -def save_git_info(folder_path: str) -> None: - """Save git information to output_dir/git_log.json""" - repo_infos = get_git_info() - save_json(repo_infos, os.path.join(folder_path, "git_log.json")) - - -def save_json(content, path, indent=4, **json_dump_kwargs): - with open(path, "w") as f: - json.dump(content, f, indent=indent, **json_dump_kwargs) - - -def load_json(path): - with open(path) as f: - return json.load(f) - - -def get_git_info(): - try: - repo = git.Repo(search_parent_directories=True) - repo_infos = { - "repo_id": str(repo), - "repo_sha": str(repo.head.object.hexsha), - "repo_branch": str(repo.active_branch), - "hostname": str(socket.gethostname()), - } - return repo_infos - except TypeError: - return { - "repo_id": None, - "repo_sha": None, - "repo_branch": None, - "hostname": None, - } - - -ROUGE_KEYS = ["rouge1", "rouge2", "rougeL", "rougeLsum"] - - -def extract_rouge_mid_statistics(dct): - new_dict = {} - for k1, v1 in dct.items(): - mid = v1.mid - new_dict[k1] = {stat: round(getattr(mid, stat), 4) for stat in ["precision", "recall", "fmeasure"]} - return new_dict - - -def calculate_rouge( - pred_lns: List[str], - tgt_lns: List[str], - use_stemmer=True, - rouge_keys=ROUGE_KEYS, - return_precision_and_recall=False, - bootstrap_aggregation=True, - newline_sep=True, -) -> Dict: - """Calculate rouge using rouge_scorer package. - - Args: - pred_lns: list of summaries generated by model - tgt_lns: list of groundtruth summaries (e.g. contents of val.target) - use_stemmer: Bool indicating whether Porter stemmer should be used to - strip word suffixes to improve matching. - rouge_keys: which metrics to compute, defaults to rouge1, rouge2, rougeL, rougeLsum - return_precision_and_recall: (False) whether to also return precision and recall. - bootstrap_aggregation: whether to do the typical bootstrap resampling of scores. Defaults to True, if False - this function returns a collections.defaultdict[metric: list of values for each observation for each subscore]`` - newline_sep:(default=True) whether to add newline between sentences. This is essential for calculation rougeL - on multi sentence summaries (CNN/DM dataset). - - Returns: - Dict[score: value] if aggregate else defaultdict(list) keyed by rouge_keys - - """ - scorer = rouge_scorer.RougeScorer(rouge_keys, use_stemmer=use_stemmer) - aggregator = scoring.BootstrapAggregator() - for pred, tgt in zip(tgt_lns, pred_lns): - # rougeLsum expects "\n" separated sentences within a summary - if newline_sep: - pred = add_newline_to_end_of_each_sentence(pred) - tgt = add_newline_to_end_of_each_sentence(tgt) - scores = scorer.score(pred, tgt) - aggregator.add_scores(scores) - - if bootstrap_aggregation: - result = aggregator.aggregate() - if return_precision_and_recall: - return extract_rouge_mid_statistics(result) # here we return dict - else: - return {k: round(v.mid.fmeasure * 100, 4) for k, v in result.items()} - - else: - return aggregator._scores # here we return defaultdict(list) - - -# Utilities for freezing parameters and checking whether they are frozen - - -def freeze_params(model: nn.Module): - """Set requires_grad=False for each of model.parameters()""" - for par in model.parameters(): - par.requires_grad = False - - -def freeze_embeds(model): - """Freeze token embeddings and positional embeddings for bart, just token embeddings for t5.""" - model_type = model.config.model_type - - if model_type == "t5": - freeze_params(model.shared) - for d in [model.encoder, model.decoder]: - freeze_params(d.embed_tokens) - elif model_type == "fsmt": - for d in [model.model.encoder, model.model.decoder]: - freeze_params(d.embed_positions) - freeze_params(d.embed_tokens) - else: - freeze_params(model.model.shared) - for d in [model.model.encoder, model.model.decoder]: - freeze_params(d.embed_positions) - freeze_params(d.embed_tokens) - - -def grad_status(model: nn.Module) -> Iterable: - return (par.requires_grad for par in model.parameters()) - - -def any_requires_grad(model: nn.Module) -> bool: - return any(grad_status(model)) - - -def assert_all_frozen(model): - model_grads: List[bool] = list(grad_status(model)) - n_require_grad = sum(lmap(int, model_grads)) - npars = len(model_grads) - assert not any(model_grads), f"{n_require_grad/npars:.1%} of {npars} weights require grad" - - -def assert_not_all_frozen(model): - model_grads: List[bool] = list(grad_status(model)) - npars = len(model_grads) - assert any(model_grads), f"none of {npars} weights require grad" - - -def parse_numeric_n_bool_cl_kwargs(unparsed_args: List[str]) -> Dict[str, Union[int, float, bool]]: - """ - Parse an argv list of unspecified command line args to a dict. - Assumes all values are either numeric or boolean in the form of true/false. - """ - result = {} - assert len(unparsed_args) % 2 == 0, f"got odd number of unparsed args: {unparsed_args}" - num_pairs = len(unparsed_args) // 2 - for pair_num in range(num_pairs): - i = 2 * pair_num - assert unparsed_args[i].startswith("--") - if unparsed_args[i + 1].lower() == "true": - value = True - elif unparsed_args[i + 1].lower() == "false": - value = False - else: - try: - value = int(unparsed_args[i + 1]) - except ValueError: - value = float(unparsed_args[i + 1]) # this can raise another informative ValueError - - result[unparsed_args[i][2:]] = value - return result - - -def write_txt_file(ordered_tgt, path): - f = Path(path).open("w") - for ln in ordered_tgt: - f.write(ln + "\n") - f.flush() - - -def chunks(lst, n): - """Yield successive n-sized chunks from lst.""" - for i in range(0, len(lst), n): - yield lst[i : i + n] - - -def check_output_dir(args, expected_items=0): - """ - Checks whether to bail out if output_dir already exists and has more than expected_items in it - - `args`: needs to have the following attributes of `args`: - - output_dir - - do_train - - overwrite_output_dir - - `expected_items`: normally 0 (default) - i.e. empty dir, but in some cases a few files are expected (e.g. recovery from OOM) - """ - if ( - os.path.exists(args.output_dir) - and len(os.listdir(args.output_dir)) > expected_items - and args.do_train - and not args.overwrite_output_dir - ): - raise ValueError( - f"Output directory ({args.output_dir}) already exists and " - f"has {len(os.listdir(args.output_dir))} items in it (expected {expected_items} items). " - "Use --overwrite_output_dir to overcome." - ) diff --git a/examples/research_projects/synthid_text/README.md b/examples/research_projects/synthid_text/README.md deleted file mode 100644 index 30ab9990373..00000000000 --- a/examples/research_projects/synthid_text/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# SynthID Text - -This project showcases the use of SynthIDText for watermarking LLMs. The code shown in this repo also -demostrates the training of the detector for detecting such watermarked text. This detector can be uploaded onto -a private HF hub repo (private for security reasons) and can be initialized again through pretrained model loading also shown in this script. - -See our blog post: https://huggingface.co/blog/synthid-text - - -## Python version - -User would need python 3.9 to run this example. - -## Installation and running - -Once you install transformers you would need to install requirements for this project through requirements.txt provided in this folder. - -``` -pip install -r requirements.txt -``` - -## To run the detector training - -``` -python detector_training.py --model_name=google/gemma-7b-it -``` - -Check the script for more parameters are are tunable and check out paper at link -https://www.nature.com/articles/s41586-024-08025-4 for more information on these parameters. - -## Caveat - -Make sure to run the training of the detector and the detection on the same hardware -CPU, GPU or TPU to get consistent results (we use detecterministic randomness which is hardware dependent). diff --git a/examples/research_projects/synthid_text/detector_training.py b/examples/research_projects/synthid_text/detector_training.py deleted file mode 100644 index 35d0ea22f42..00000000000 --- a/examples/research_projects/synthid_text/detector_training.py +++ /dev/null @@ -1,502 +0,0 @@ -# coding=utf-8 -# Copyright 2024 Google DeepMind. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import dataclasses -import enum -from typing import Any, Dict, List, Optional, Tuple, Union - -import numpy as np -import torch - -from transformers import ( - AutoModelForCausalLM, - AutoTokenizer, - BayesianDetectorConfig, - BayesianDetectorModel, - SynthIDTextWatermarkDetector, - SynthIDTextWatermarkingConfig, - SynthIDTextWatermarkLogitsProcessor, -) -from utils import ( - get_tokenized_uwm_outputs, - get_tokenized_wm_outputs, - process_raw_model_outputs, - update_fn_if_fpr_tpr, - upload_model_to_hf, -) - - -@enum.unique -class ValidationMetric(enum.Enum): - """Direction along the z-axis.""" - - TPR_AT_FPR = "tpr_at_fpr" - CROSS_ENTROPY = "cross_entropy" - - -@dataclasses.dataclass -class TrainingArguments: - """Training arguments pertaining to the training loop itself.""" - - eval_metric: Optional[str] = dataclasses.field( - default=ValidationMetric.TPR_AT_FPR, metadata={"help": "The evaluation metric used."} - ) - - -def train_detector( - detector: torch.nn.Module, - g_values: torch.Tensor, - mask: torch.Tensor, - watermarked: torch.Tensor, - epochs: int = 250, - learning_rate: float = 1e-3, - minibatch_size: int = 64, - seed: int = 0, - l2_weight: float = 0.0, - shuffle: bool = True, - g_values_val: Optional[torch.Tensor] = None, - mask_val: Optional[torch.Tensor] = None, - watermarked_val: Optional[torch.Tensor] = None, - verbose: bool = False, - validation_metric: ValidationMetric = ValidationMetric.TPR_AT_FPR, -) -> Tuple[Dict[str, Any], float]: - """Trains a Bayesian detector model. - - Args: - g_values: g-values of shape [num_train, seq_len, watermarking_depth]. - mask: A binary array shape [num_train, seq_len] indicating which g-values - should be used. g-values with mask value 0 are discarded. - watermarked: A binary array of shape [num_train] indicating whether the - example is watermarked (0: unwatermarked, 1: watermarked). - epochs: Number of epochs to train for. - learning_rate: Learning rate for optimizer. - minibatch_size: Minibatch size for training. Note that a minibatch - requires ~ 32 * minibatch_size * seq_len * watermarked_depth * - watermarked_depth bits of memory. - seed: Seed for parameter initialization. - l2_weight: Weight to apply to L2 regularization for delta parameters. - shuffle: Whether to shuffle before training. - g_values_val: Validation g-values of shape [num_val, seq_len, - watermarking_depth]. - mask_val: Validation mask of shape [num_val, seq_len]. - watermarked_val: Validation watermark labels of shape [num_val]. - verbose: Boolean indicating verbosity of training. If true, the loss will - be printed. Defaulted to False. - use_tpr_fpr_for_val: Whether to use TPR@FPR=1% as metric for validation. - If false, use cross entropy loss. - - Returns: - Tuple of - training_history: Training history keyed by epoch number where the - values are - dictionaries containing the loss, validation loss, and model - parameters, - keyed by - 'loss', 'val_loss', and 'params', respectively. - min_val_loss: Minimum validation loss achieved during training. - """ - - # Set the random seed for reproducibility - torch.manual_seed(seed) - - # Shuffle the data if required - if shuffle: - indices = torch.randperm(len(g_values)) - g_values = g_values[indices] - mask = mask[indices] - watermarked = watermarked[indices] - - # Initialize optimizer - optimizer = torch.optim.Adam(detector.parameters(), lr=learning_rate) - history = {} - min_val_loss = float("inf") - - for epoch in range(epochs): - losses = [] - detector.train() - num_batches = len(g_values) // minibatch_size - for i in range(0, len(g_values), minibatch_size): - end = i + minibatch_size - if end > len(g_values): - break - loss_batch_weight = l2_weight / num_batches - - optimizer.zero_grad() - loss = detector( - g_values=g_values[i:end], - mask=mask[i:end], - labels=watermarked[i:end], - loss_batch_weight=loss_batch_weight, - )[1] - loss.backward() - optimizer.step() - losses.append(loss.item()) - train_loss = sum(losses) / len(losses) - - val_losses = [] - if g_values_val is not None: - detector.eval() - if validation_metric == ValidationMetric.TPR_AT_FPR: - val_loss = update_fn_if_fpr_tpr( - detector, - g_values_val, - mask_val, - watermarked_val, - minibatch_size=minibatch_size, - ) - else: - for i in range(0, len(g_values_val), minibatch_size): - end = i + minibatch_size - if end > len(g_values_val): - break - with torch.no_grad(): - v_loss = detector( - g_values=g_values_val[i:end], - mask=mask_val[i:end], - labels=watermarked_val[i:end], - loss_batch_weight=0, - )[1] - val_losses.append(v_loss.item()) - val_loss = sum(val_losses) / len(val_losses) - - # Store training history - history[epoch + 1] = {"loss": train_loss, "val_loss": val_loss} - if verbose: - if val_loss is not None: - print(f"Epoch {epoch}: loss {loss} (train), {val_loss} (val)") - else: - print(f"Epoch {epoch}: loss {loss} (train)") - - if val_loss is not None and val_loss < min_val_loss: - min_val_loss = val_loss - best_val_epoch = epoch - - if verbose: - print(f"Best val Epoch: {best_val_epoch}, min_val_loss: {min_val_loss}") - - return history, min_val_loss - - -def train_best_detector( - tokenized_wm_outputs: Union[List[np.ndarray], np.ndarray], - tokenized_uwm_outputs: Union[List[np.ndarray], np.ndarray], - logits_processor: SynthIDTextWatermarkLogitsProcessor, - tokenizer: Any, - torch_device: torch.device, - test_size: float = 0.3, - pos_truncation_length: Optional[int] = 200, - neg_truncation_length: Optional[int] = 100, - max_padded_length: int = 2300, - n_epochs: int = 50, - learning_rate: float = 2.1e-2, - l2_weights: np.ndarray = np.logspace(-3, -2, num=4), - verbose: bool = False, - validation_metric: ValidationMetric = ValidationMetric.TPR_AT_FPR, -): - """Train and return the best detector given range of hyperparameters. - - In practice, we have found that tuning pos_truncation_length, - neg_truncation_length, n_epochs, learning_rate and l2_weights can help - improve the performance of the detector. We reccommend tuning these - parameters for your data. - """ - l2_weights = list(l2_weights) - - ( - train_g_values, - train_masks, - train_labels, - cv_g_values, - cv_masks, - cv_labels, - ) = process_raw_model_outputs( - logits_processor, - tokenizer, - pos_truncation_length, - neg_truncation_length, - max_padded_length, - tokenized_wm_outputs, - test_size, - tokenized_uwm_outputs, - torch_device, - ) - - best_detector = None - lowest_loss = float("inf") - val_losses = [] - for l2_weight in l2_weights: - config = BayesianDetectorConfig(watermarking_depth=len(logits_processor.keys)) - detector = BayesianDetectorModel(config).to(torch_device) - _, min_val_loss = train_detector( - detector=detector, - g_values=train_g_values, - mask=train_masks, - watermarked=train_labels, - g_values_val=cv_g_values, - mask_val=cv_masks, - watermarked_val=cv_labels, - learning_rate=learning_rate, - l2_weight=l2_weight, - epochs=n_epochs, - verbose=verbose, - validation_metric=validation_metric, - ) - val_losses.append(min_val_loss) - if min_val_loss < lowest_loss: - lowest_loss = min_val_loss - best_detector = detector - return best_detector, lowest_loss - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "--model_name", - type=str, - default="google/gemma-2b-it", - help=("LM model to train the detector for."), - ) - parser.add_argument( - "--temperature", - type=float, - default=1.0, - help=("Temperature to sample from the model."), - ) - parser.add_argument( - "--top_k", - type=int, - default=40, - help=("Top K for sampling."), - ) - parser.add_argument( - "--top_p", - type=float, - default=1.0, - help=("Top P for sampling."), - ) - parser.add_argument( - "--num_negatives", - type=int, - default=10000, - help=("Number of negatives for detector training."), - ) - parser.add_argument( - "--pos_batch_size", - type=int, - default=32, - help=("Batch size of watermarked positives while sampling."), - ) - parser.add_argument( - "--num_pos_batch", - type=int, - default=313, - help=("Number of positive batches for training."), - ) - parser.add_argument( - "--generation_length", - type=int, - default=512, - help=("Generation length for sampling."), - ) - parser.add_argument( - "--save_model_to_hf_hub", - action="store_true", - help=("Whether to save the trained model HF hub. By default it will be a private repo."), - ) - parser.add_argument( - "--load_from_hf_hub", - action="store_true", - help=( - "Whether to load trained detector model from HF Hub, make sure its the model trained on the same model " - "we are loading in the script." - ), - ) - parser.add_argument( - "--hf_hub_model_name", - type=str, - default=None, - help=("HF hub model name for loading of saving the model."), - ) - parser.add_argument( - "--eval_detector_on_prompts", - action="store_true", - help=("Evaluate detector on a prompt and print probability of watermark."), - ) - - args = parser.parse_args() - model_name = args.model_name - temperature = args.temperature - top_k = args.top_k - top_p = args.top_p - num_negatives = args.num_negatives - pos_batch_size = args.pos_batch_size - num_pos_batch = args.num_pos_batch - if num_pos_batch < 10: - raise ValueError("--num_pos_batch should be greater than 10.") - generation_length = args.generation_length - save_model_to_hf_hub = args.save_model_to_hf_hub - load_from_hf_hub = args.load_from_hf_hub - repo_name = args.hf_hub_model_name - eval_detector_on_prompts = args.eval_detector_on_prompts - - NEG_BATCH_SIZE = 32 - - # Truncate outputs to this length for training. - POS_TRUNCATION_LENGTH = 200 - NEG_TRUNCATION_LENGTH = 100 - # Pad trucated outputs to this length for equal shape across all batches. - MAX_PADDED_LENGTH = 1000 - - DEVICE = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu") - if DEVICE.type not in ("cuda", "tpu"): - raise ValueError("We have found the training stable on GPU and TPU, we are working on" " a fix for CPUs") - - model = None - if not load_from_hf_hub: - # Change this to make your watermark unique. Check documentation in the paper to understand the - # impact of these parameters. - DEFAULT_WATERMARKING_CONFIG = { - "ngram_len": 5, # This corresponds to H=4 context window size in the paper. - "keys": [ - 654, - 400, - 836, - 123, - 340, - 443, - 597, - 160, - 57, - 29, - 590, - 639, - 13, - 715, - 468, - 990, - 966, - 226, - 324, - 585, - 118, - 504, - 421, - 521, - 129, - 669, - 732, - 225, - 90, - 960, - ], - "sampling_table_size": 2**16, - "sampling_table_seed": 0, - "context_history_size": 1024, - } - watermark_config = SynthIDTextWatermarkingConfig(**DEFAULT_WATERMARKING_CONFIG) - - model = AutoModelForCausalLM.from_pretrained(model_name).to(DEVICE) - tokenizer = AutoTokenizer.from_pretrained(model_name) - tokenizer.pad_token = tokenizer.eos_token - - logits_processor = SynthIDTextWatermarkLogitsProcessor(**DEFAULT_WATERMARKING_CONFIG, device=DEVICE) - tokenized_wm_outputs = get_tokenized_wm_outputs( - model, - tokenizer, - watermark_config, - num_pos_batch, - pos_batch_size, - temperature, - generation_length, - top_k, - top_p, - DEVICE, - ) - tokenized_uwm_outputs = get_tokenized_uwm_outputs(num_negatives, NEG_BATCH_SIZE, tokenizer, DEVICE) - - best_detector, lowest_loss = train_best_detector( - tokenized_wm_outputs=tokenized_wm_outputs, - tokenized_uwm_outputs=tokenized_uwm_outputs, - logits_processor=logits_processor, - tokenizer=tokenizer, - torch_device=DEVICE, - test_size=0.3, - pos_truncation_length=POS_TRUNCATION_LENGTH, - neg_truncation_length=NEG_TRUNCATION_LENGTH, - max_padded_length=MAX_PADDED_LENGTH, - n_epochs=100, - learning_rate=3e-3, - l2_weights=[ - 0, - ], - verbose=True, - validation_metric=ValidationMetric.TPR_AT_FPR, - ) - else: - if repo_name is None: - raise ValueError("When loading from pretrained detector model name cannot be None.") - best_detector = BayesianDetectorModel.from_pretrained(repo_name).to(DEVICE) - - best_detector.config.set_detector_information( - model_name=model_name, watermarking_config=DEFAULT_WATERMARKING_CONFIG - ) - if save_model_to_hf_hub: - upload_model_to_hf(best_detector, repo_name) - - # Evaluate model response with the detector - if eval_detector_on_prompts: - model_name = best_detector.config.model_name - watermark_config_dict = best_detector.config.watermarking_config - logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermark_config_dict, device=DEVICE) - tokenizer = AutoTokenizer.from_pretrained(model_name) - tokenizer.pad_token = tokenizer.eos_token - synthid_text_detector = SynthIDTextWatermarkDetector(best_detector, logits_processor, tokenizer) - - if model is None: - model = AutoModelForCausalLM.from_pretrained(model_name).to(DEVICE) - watermarking_config = SynthIDTextWatermarkingConfig(**watermark_config_dict) - - prompts = ["Write a essay on cats."] - inputs = tokenizer( - prompts, - return_tensors="pt", - padding=True, - ).to(DEVICE) - - _, inputs_len = inputs["input_ids"].shape - - outputs = model.generate( - **inputs, - watermarking_config=watermarking_config, - do_sample=True, - max_length=inputs_len + generation_length, - temperature=temperature, - top_k=40, - top_p=1.0, - ) - outputs = outputs[:, inputs_len:] - result = synthid_text_detector(outputs) - - # You should set this based on expected fpr (false positive rate) and tpr (true positive rate). - # Check our demo at HF Spaces for more info. - upper_threshold = 0.95 - lower_threshold = 0.12 - if result[0][0] > upper_threshold: - print("The text is watermarked.") - elif lower_threshold < result[0][0] < upper_threshold: - print("It is hard to determine if the text is watermarked or not.") - else: - print("The text is not watermarked.") diff --git a/examples/research_projects/synthid_text/requirements.txt b/examples/research_projects/synthid_text/requirements.txt deleted file mode 100644 index 9e40a93ee08..00000000000 --- a/examples/research_projects/synthid_text/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -tensorflow-datasets>=4.9.3 -torch >= 1.3 -datasets -scikit-learn -tensorflow diff --git a/examples/research_projects/synthid_text/utils.py b/examples/research_projects/synthid_text/utils.py deleted file mode 100644 index abcb6ca2f28..00000000000 --- a/examples/research_projects/synthid_text/utils.py +++ /dev/null @@ -1,408 +0,0 @@ -# coding=utf-8 -# Copyright 2024 Google DeepMind. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -from typing import Any, List, Optional, Tuple - -import datasets -import numpy as np -import tensorflow as tf -import tensorflow_datasets as tfds -import torch -import tqdm -from huggingface_hub import HfApi, create_repo -from huggingface_hub.utils import RepositoryNotFoundError -from sklearn import model_selection - -import transformers - - -def pad_to_len( - arr: torch.Tensor, - target_len: int, - left_pad: bool, - eos_token: int, - device: torch.device, -) -> torch.Tensor: - """Pad or truncate array to given length.""" - if arr.shape[1] < target_len: - shape_for_ones = list(arr.shape) - shape_for_ones[1] = target_len - shape_for_ones[1] - padded = ( - torch.ones( - shape_for_ones, - device=device, - dtype=torch.long, - ) - * eos_token - ) - if not left_pad: - arr = torch.concatenate((arr, padded), dim=1) - else: - arr = torch.concatenate((padded, arr), dim=1) - else: - arr = arr[:, :target_len] - return arr - - -def filter_and_truncate( - outputs: torch.Tensor, - truncation_length: Optional[int], - eos_token_mask: torch.Tensor, -) -> torch.Tensor: - """Filter and truncate outputs to given length. - - Args: - outputs: output tensor of shape [batch_size, output_len] - truncation_length: Length to truncate the final output. - eos_token_mask: EOS token mask of shape [batch_size, output_len] - - Returns: - output tensor of shape [batch_size, truncation_length]. - """ - if truncation_length: - outputs = outputs[:, :truncation_length] - truncation_mask = torch.sum(eos_token_mask, dim=1) >= truncation_length - return outputs[truncation_mask, :] - return outputs - - -def process_outputs_for_training( - all_outputs: List[torch.Tensor], - logits_processor: transformers.generation.SynthIDTextWatermarkLogitsProcessor, - tokenizer: Any, - pos_truncation_length: Optional[int], - neg_truncation_length: Optional[int], - max_length: int, - is_cv: bool, - is_pos: bool, - torch_device: torch.device, -) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: - """Process raw model outputs into format understandable by the detector. - - Args: - all_outputs: sequence of outputs of shape [batch_size, output_len]. - logits_processor: logits processor used for watermarking. - tokenizer: tokenizer used for the model. - pos_truncation_length: Length to truncate wm outputs. - neg_truncation_length: Length to truncate uwm outputs. - max_length: Length to pad truncated outputs so that all processed entries. - have same shape. - is_cv: Process given outputs for cross validation. - is_pos: Process given outputs for positives. - torch_device: torch device to use. - - Returns: - Tuple of - all_masks: list of masks of shape [batch_size, max_length]. - all_g_values: list of g_values of shape [batch_size, max_length, depth]. - """ - all_masks = [] - all_g_values = [] - for outputs in tqdm.tqdm(all_outputs): - # outputs is of shape [batch_size, output_len]. - # output_len can differ from batch to batch. - eos_token_mask = logits_processor.compute_eos_token_mask( - input_ids=outputs, - eos_token_id=tokenizer.eos_token_id, - ) - if is_pos or is_cv: - # filter with length for positives for both train and CV. - # We also filter for length when CV negatives are processed. - outputs = filter_and_truncate(outputs, pos_truncation_length, eos_token_mask) - elif not is_pos and not is_cv: - outputs = filter_and_truncate(outputs, neg_truncation_length, eos_token_mask) - - # If no filtered outputs skip this batch. - if outputs.shape[0] == 0: - continue - - # All outputs are padded to max-length with eos-tokens. - outputs = pad_to_len(outputs, max_length, False, tokenizer.eos_token_id, torch_device) - # outputs shape [num_filtered_entries, max_length] - - eos_token_mask = logits_processor.compute_eos_token_mask( - input_ids=outputs, - eos_token_id=tokenizer.eos_token_id, - ) - - context_repetition_mask = logits_processor.compute_context_repetition_mask( - input_ids=outputs, - ) - - # context_repetition_mask of shape [num_filtered_entries, max_length - - # (ngram_len - 1)]. - context_repetition_mask = pad_to_len(context_repetition_mask, max_length, True, 0, torch_device) - # We pad on left to get same max_length shape. - # context_repetition_mask of shape [num_filtered_entries, max_length]. - combined_mask = context_repetition_mask * eos_token_mask - - g_values = logits_processor.compute_g_values( - input_ids=outputs, - ) - - # g_values of shape [num_filtered_entries, max_length - (ngram_len - 1), - # depth]. - g_values = pad_to_len(g_values, max_length, True, 0, torch_device) - - # We pad on left to get same max_length shape. - # g_values of shape [num_filtered_entries, max_length, depth]. - all_masks.append(combined_mask) - all_g_values.append(g_values) - return all_masks, all_g_values - - -def tpr_at_fpr(detector, detector_inputs, w_true, minibatch_size, target_fpr=0.01) -> torch.Tensor: - """Calculates true positive rate (TPR) at false positive rate (FPR)=target_fpr.""" - positive_idxs = w_true == 1 - negative_idxs = w_true == 0 - num_samples = detector_inputs[0].size(0) - - w_preds = [] - for start in range(0, num_samples, minibatch_size): - end = start + minibatch_size - detector_inputs_ = ( - detector_inputs[0][start:end], - detector_inputs[1][start:end], - ) - with torch.no_grad(): - w_pred = detector(*detector_inputs_)[0] - w_preds.append(w_pred) - - w_pred = torch.cat(w_preds, dim=0) # Concatenate predictions - positive_scores = w_pred[positive_idxs] - negative_scores = w_pred[negative_idxs] - - # Calculate the FPR threshold - # Note: percentile -> quantile - fpr_threshold = torch.quantile(negative_scores, 1 - target_fpr) - # Note: need to switch to FP32 since torch.mean doesn't work with torch.bool - return torch.mean((positive_scores >= fpr_threshold).to(dtype=torch.float32)).item() # TPR - - -def update_fn_if_fpr_tpr(detector, g_values_val, mask_val, watermarked_val, minibatch_size): - """Loss function for negative TPR@FPR=1% as the validation loss.""" - tpr_ = tpr_at_fpr( - detector=detector, - detector_inputs=(g_values_val, mask_val), - w_true=watermarked_val, - minibatch_size=minibatch_size, - ) - return -tpr_ - - -def process_raw_model_outputs( - logits_processor, - tokenizer, - pos_truncation_length, - neg_truncation_length, - max_padded_length, - tokenized_wm_outputs, - test_size, - tokenized_uwm_outputs, - torch_device, -): - # Split data into train and CV - train_wm_outputs, cv_wm_outputs = model_selection.train_test_split(tokenized_wm_outputs, test_size=test_size) - - train_uwm_outputs, cv_uwm_outputs = model_selection.train_test_split(tokenized_uwm_outputs, test_size=test_size) - - process_kwargs = { - "logits_processor": logits_processor, - "tokenizer": tokenizer, - "pos_truncation_length": pos_truncation_length, - "neg_truncation_length": neg_truncation_length, - "max_length": max_padded_length, - "torch_device": torch_device, - } - - # Process both train and CV data for training - wm_masks_train, wm_g_values_train = process_outputs_for_training( - [torch.tensor(outputs, device=torch_device, dtype=torch.long) for outputs in train_wm_outputs], - is_pos=True, - is_cv=False, - **process_kwargs, - ) - wm_masks_cv, wm_g_values_cv = process_outputs_for_training( - [torch.tensor(outputs, device=torch_device, dtype=torch.long) for outputs in cv_wm_outputs], - is_pos=True, - is_cv=True, - **process_kwargs, - ) - uwm_masks_train, uwm_g_values_train = process_outputs_for_training( - [torch.tensor(outputs, device=torch_device, dtype=torch.long) for outputs in train_uwm_outputs], - is_pos=False, - is_cv=False, - **process_kwargs, - ) - uwm_masks_cv, uwm_g_values_cv = process_outputs_for_training( - [torch.tensor(outputs, device=torch_device, dtype=torch.long) for outputs in cv_uwm_outputs], - is_pos=False, - is_cv=True, - **process_kwargs, - ) - - # We get list of data; here we concat all together to be passed to the detector. - def pack(mask, g_values): - mask = torch.cat(mask, dim=0) - g = torch.cat(g_values, dim=0) - return mask, g - - wm_masks_train, wm_g_values_train = pack(wm_masks_train, wm_g_values_train) - # Note: Use float instead of bool. Otherwise, the entropy calculation doesn't work - wm_labels_train = torch.ones((wm_masks_train.shape[0],), dtype=torch.float, device=torch_device) - - wm_masks_cv, wm_g_values_cv = pack(wm_masks_cv, wm_g_values_cv) - wm_labels_cv = torch.ones((wm_masks_cv.shape[0],), dtype=torch.float, device=torch_device) - - uwm_masks_train, uwm_g_values_train = pack(uwm_masks_train, uwm_g_values_train) - uwm_labels_train = torch.zeros((uwm_masks_train.shape[0],), dtype=torch.float, device=torch_device) - - uwm_masks_cv, uwm_g_values_cv = pack(uwm_masks_cv, uwm_g_values_cv) - uwm_labels_cv = torch.zeros((uwm_masks_cv.shape[0],), dtype=torch.float, device=torch_device) - - # Concat pos and negatives data together. - train_g_values = torch.cat((wm_g_values_train, uwm_g_values_train), dim=0).squeeze() - train_labels = torch.cat((wm_labels_train, uwm_labels_train), axis=0).squeeze() - train_masks = torch.cat((wm_masks_train, uwm_masks_train), axis=0).squeeze() - - cv_g_values = torch.cat((wm_g_values_cv, uwm_g_values_cv), axis=0).squeeze() - cv_labels = torch.cat((wm_labels_cv, uwm_labels_cv), axis=0).squeeze() - cv_masks = torch.cat((wm_masks_cv, uwm_masks_cv), axis=0).squeeze() - - # Shuffle data. - shuffled_idx = torch.randperm(train_g_values.shape[0]) # Use torch for GPU compatibility - - train_g_values = train_g_values[shuffled_idx] - train_labels = train_labels[shuffled_idx] - train_masks = train_masks[shuffled_idx] - - # Shuffle the cross-validation data - shuffled_idx_cv = torch.randperm(cv_g_values.shape[0]) # Use torch for GPU compatibility - cv_g_values = cv_g_values[shuffled_idx_cv] - cv_labels = cv_labels[shuffled_idx_cv] - cv_masks = cv_masks[shuffled_idx_cv] - - # Del some variables so we free up GPU memory. - del ( - wm_g_values_train, - wm_labels_train, - wm_masks_train, - wm_g_values_cv, - wm_labels_cv, - wm_masks_cv, - ) - gc.collect() - torch.cuda.empty_cache() - - return train_g_values, train_masks, train_labels, cv_g_values, cv_masks, cv_labels - - -def get_tokenized_uwm_outputs(num_negatives, neg_batch_size, tokenizer, device): - dataset, info = tfds.load("wikipedia/20230601.en", split="train", with_info=True) - dataset = dataset.take(num_negatives) - - # Convert the dataset to a DataFrame - df = tfds.as_dataframe(dataset, info) - ds = tf.data.Dataset.from_tensor_slices(dict(df)) - tf.random.set_seed(0) - ds = ds.shuffle(buffer_size=10_000) - ds = ds.batch(batch_size=neg_batch_size) - - tokenized_uwm_outputs = [] - # Pad to this length (on the right) for batching. - padded_length = 1000 - for i, batch in tqdm.tqdm(enumerate(ds)): - responses = [val.decode() for val in batch["text"].numpy()] - inputs = tokenizer( - responses, - return_tensors="pt", - padding=True, - ).to(device) - inputs = inputs["input_ids"].cpu().numpy() - if inputs.shape[1] >= padded_length: - inputs = inputs[:, :padded_length] - else: - inputs = np.concatenate( - [inputs, np.ones((neg_batch_size, padded_length - inputs.shape[1])) * tokenizer.eos_token_id], axis=1 - ) - tokenized_uwm_outputs.append(inputs) - if len(tokenized_uwm_outputs) * neg_batch_size > num_negatives: - break - return tokenized_uwm_outputs - - -def get_tokenized_wm_outputs( - model, - tokenizer, - watermark_config, - num_pos_batches, - pos_batch_size, - temperature, - max_output_len, - top_k, - top_p, - device, -): - eli5_prompts = datasets.load_dataset("Pavithree/eli5") - - wm_outputs = [] - - for batch_id in tqdm.tqdm(range(num_pos_batches)): - prompts = eli5_prompts["train"]["title"][batch_id * pos_batch_size : (batch_id + 1) * pos_batch_size] - prompts = [prompt.strip('"') for prompt in prompts] - inputs = tokenizer( - prompts, - return_tensors="pt", - padding=True, - ).to(device) - _, inputs_len = inputs["input_ids"].shape - - outputs = model.generate( - **inputs, - watermarking_config=watermark_config, - do_sample=True, - max_length=inputs_len + max_output_len, - temperature=temperature, - top_k=top_k, - top_p=top_p, - ) - - wm_outputs.append(outputs[:, inputs_len:].cpu().detach()) - - del outputs, inputs, prompts - gc.collect() - - gc.collect() - torch.cuda.empty_cache() - return wm_outputs - - -def upload_model_to_hf(model, hf_repo_name: str, private: bool = True): - api = HfApi() - - # Check if the repository exists - try: - api.repo_info(repo_id=hf_repo_name, use_auth_token=True) - print(f"Repository '{hf_repo_name}' already exists.") - except RepositoryNotFoundError: - # If the repository does not exist, create it - print(f"Repository '{hf_repo_name}' not found. Creating it...") - create_repo(repo_id=hf_repo_name, private=private, use_auth_token=True) - print(f"Repository '{hf_repo_name}' created successfully.") - - # Push the model to the Hugging Face Hub - print(f"Uploading model to Hugging Face repo '{hf_repo_name}'...") - model.push_to_hub(repo_id=hf_repo_name, use_auth_token=True) diff --git a/examples/research_projects/tapex/README.md b/examples/research_projects/tapex/README.md deleted file mode 100644 index b98eb9b428d..00000000000 --- a/examples/research_projects/tapex/README.md +++ /dev/null @@ -1,288 +0,0 @@ - - -# Run Table Tasks with TAPEX - -TAPEX is a table pre-training approach for table-related tasks. By learning a neural SQL executor over a synthetic corpus based on generative language models (e.g., BART), it achieves state-of-the-art performance on several table-based question answering benchmarks and table-based fact verification benchmark. More details can be found in the original paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/pdf/2107.07653.pdf). - -> If you are also familiar with [fairseq](https://github.com/pytorch/fairseq), you may also find [the official implementation](https://github.com/microsoft/Table-Pretraining) useful, which leverages the framework. - -## Table Question Answering Tasks - -### What is Table Question Answering - -![Example](https://table-pretraining.github.io/assets/tableqa_task.png) - -The task of Table Question Answering (TableQA) is to empower machines to answer users' questions over a given table. The resulting answer(s) can be a region in the table, or a number calculated by applying aggregation operators to a specific region. - -### What Questions Can be Answered - -Benefiting from the powerfulness of generative models, TAPEX can deal with almost all kinds of questions over tables (if there is training data). Below are some typical question and their answers taken from [WikiTableQuestion](https://nlp.stanford.edu/blog/wikitablequestions-a-complex-real-world-question-understanding-dataset). - -| Question | Answer | -| :---: | :---: | -| What is the years won for each team? | 2004, 2008, 2012 | -| How long did Taiki Tsuchiya last? | 4:27 | -| What is the total amount of matches drawn? | 1 | -| Besides Tiger Woods, what other player won between 2007 and 2009? | Camilo Villegas | -| What was the last Baekje Temple? | Uija | -| What is the difference between White voters and Black voters in 1948? | 0 | -| What is the average number of sailors for each country during the worlds qualification tournament? | 2 | - - -### How to Fine-tune TAPEX on TableQA - -We provide a fine-tuning script of tapex for TableQA on the WikiSQL benchmark: [WikiSQL](https://github.com/salesforce/WikiSQL). -This script is customized for tapex models, and can be easily adapted to other benchmarks such as WikiTableQuestion -(only some tweaks in the function `preprocess_tableqa_function`). - -#### TAPEX-Base on WikiSQL - -Here is how to run the script on the WikiSQL with `tapex-base`: -> The default hyper-parameter may allow you to reproduce our reported tapex-base results within the memory budget of 16GB and 1 GPU card. If you have more GPU cards, you could reduce `gradient_accumulation_steps` accordingly. - -```bash -export EXP_NAME=wikisql_tapex_base - -python run_wikisql_with_tapex.py \ - --do_train \ - --do_eval \ - --output_dir $EXP_NAME \ - --model_name_or_path microsoft/tapex-base \ - --overwrite_output_dir \ - --per_device_train_batch_size 4 \ - --gradient_accumulation_steps 8 \ - --per_device_eval_batch_size 4 \ - --learning_rate 3e-5 \ - --logging_steps 10 \ - --eval_steps 1000 \ - --save_steps 1000 \ - --warmup_steps 1000 \ - --eval_strategy steps \ - --predict_with_generate \ - --num_beams 5 \ - --weight_decay 1e-2 \ - --label_smoothing_factor 0.1 \ - --max_steps 20000 -``` - -#### TAPEX-Large on WikiSQL - -Here is how to run the script on the WikiSQL with `tapex-large`: -> The default hyper-parameter may allow you to reproduce our reported tapex-large results within the memory budget of 16GB and 1 GPU card with fp16. If you have more GPU cards, you could reduce `gradient_accumulation_steps` accordingly. If you do not install apex or other mixed-precision-training libs, you could disable the `predict_with_generate` option to save GPU memory and manually evaluate the model once the fine-tuning finished. Or just pick up the last checkpoint, which usually performs good enough on the dataset. - -```bash -export EXP_NAME=wikisql_tapex_large - -python run_wikisql_with_tapex.py \ - --do_train \ - --do_eval \ - --output_dir $EXP_NAME \ - --model_name_or_path microsoft/tapex-large \ - --overwrite_output_dir \ - --per_device_train_batch_size 1 \ - --gradient_accumulation_steps 32 \ - --per_device_eval_batch_size 4 \ - --learning_rate 3e-5 \ - --logging_steps 10 \ - --eval_steps 1000 \ - --save_steps 1000 \ - --warmup_steps 1000 \ - --eval_strategy steps \ - --predict_with_generate \ - --num_beams 5 \ - --weight_decay 1e-2 \ - --label_smoothing_factor 0.1 \ - --max_steps 20000 \ - --fp16 -``` - -#### TAPEX-Base on WikiTableQuestions - -Here is how to run the script on the WikiTableQuestions with `tapex-base`: -> The default hyper-parameter may allow you to reproduce our reported tapex-base results within the memory budget of 16GB and 1 GPU card. If you have more GPU cards, you could reduce `gradient_accumulation_steps` accordingly. - -```bash -export EXP_NAME=wikitablequestions_tapex_base - -python run_wikitablequestions_with_tapex.py \ - --do_train \ - --do_eval \ - --output_dir $EXP_NAME \ - --model_name_or_path microsoft/tapex-base \ - --overwrite_output_dir \ - --per_device_train_batch_size 4 \ - --gradient_accumulation_steps 8 \ - --per_device_eval_batch_size 4 \ - --learning_rate 3e-5 \ - --logging_steps 10 \ - --eval_steps 1000 \ - --save_steps 1000 \ - --warmup_steps 1000 \ - --eval_strategy steps \ - --predict_with_generate \ - --num_beams 5 \ - --weight_decay 1e-2 \ - --label_smoothing_factor 0.1 \ - --max_steps 20000 -``` - -#### TAPEX-Large on WikiTableQuestions - -Here is how to run the script on the WikiTableQuestions with `tapex-large`: -> The default hyper-parameter may allow you to reproduce our reported tapex-large results within the memory budget of 16GB and 1 GPU card with fp16. If you have more GPU cards, you could reduce `gradient_accumulation_steps` accordingly. If you do not install apex or other mixed-precision-training libs, you could reduce the `per_device_train_batch_size` and `per_device_eval_batch_size` and have another try. Or you could disable the `predict_with_generate` option to save GPU memory and manually evaluate the model once the fine-tuning finished. Or just pick up the last checkpoint, which usually performs good enough on the dataset. - -```bash -export EXP_NAME=wikitablequestions_tapex_large - -python run_wikitablequestions_with_tapex.py \ - --do_train \ - --do_eval \ - --output_dir $EXP_NAME \ - --model_name_or_path microsoft/tapex-large \ - --overwrite_output_dir \ - --per_device_train_batch_size 2 \ - --gradient_accumulation_steps 12 \ - --per_device_eval_batch_size 4 \ - --learning_rate 3e-5 \ - --logging_steps 10 \ - --eval_steps 1000 \ - --save_steps 1000 \ - --warmup_steps 1000 \ - --eval_strategy steps \ - --predict_with_generate \ - --num_beams 5 \ - --weight_decay 1e-2 \ - --label_smoothing_factor 0.1 \ - --max_steps 20000 \ - --fp16 -``` - -### How to Evaluate TAPEX Fine-tuned Models on TableQA - -We provide fine-tuned model weights to reproduce our results. You can evaluate them using the following command: -> You can also replace `microsoft/tapex-base-finetuned-wikisql` with your local directory to evaluate your fine-tuned models. Notice that if the model has a larger size, you should reduce `per_device_eval_batch_size` to fit the memory requirement. - -```bash -export EXP_NAME=wikisql_tapex_base_eval - -python run_wikisql_with_tapex.py \ - --do_eval \ - --model_name_or_path microsoft/tapex-base-finetuned-wikisql \ - --output_dir $EXP_NAME \ - --per_device_eval_batch_size 4 \ - --predict_with_generate \ - --num_beams 5 -``` - -## Table Fact Verification Tasks - -### What is Table Fact Verification - -![Example](https://table-pretraining.github.io/assets/tableft_task.png) - -The task of Table Fact Verification (TableFV) is to empower machines to justify if a statement follows facts in a given table. The result is a binary classification belonging to `1` (entailed) or `0` (refused). - -### How to Fine-tune TAPEX on TableFV - -#### TAPEX-Base on TabFact - -We provide a fine-tuning script of tapex for TableFV on the TabFact benchmark: [TabFact](https://github.com/wenhuchen/Table-Fact-Checking). - -Here is how to run the script on the TabFact: -> The default hyper-parameter may allow you to reproduce our reported tapex-base results within the memory budget of 16GB and 1 GPU card. If you have more GPU cards, you could reduce `gradient_accumulation_steps` accordingly. Note that the `eval_accumulation_steps` is necessary, otherwise GPU memory leaks will occur during the evaluation. - -```bash -export EXP_NAME=tabfact_tapex_base - -python run_tabfact_with_tapex.py \ - --do_train \ - --do_eval \ - --output_dir $EXP_NAME \ - --model_name_or_path microsoft/tapex-base \ - --overwrite_output_dir \ - --per_device_train_batch_size 3 \ - --gradient_accumulation_steps 16 \ - --per_device_eval_batch_size 12 \ - --eval_accumulation_steps 6 \ - --warm_steps 1000 \ - --logging_steps 10 \ - --learning_rate 3e-5 \ - --eval_steps 1000 \ - --save_steps 1000 \ - --eval_strategy steps \ - --weight_decay 1e-2 \ - --max_steps 30000 \ - --max_grad_norm 0.1 -``` - -#### TAPEX-Large on TabFact - -Here is how to run the script on the TabFact: -> The default hyper-parameter may allow you to reproduce our reported tapex-base results within the memory budget of 24GB and 1 GPU card. Sorry we cannot reduce the memory consumption since the model input in TabFact usually contains nearly ~1000 tokens. If you have more GPU cards, you could reduce `gradient_accumulation_steps` accordingly. Note that the `eval_accumulation_steps` is necessary, otherwise GPU memory leaks will occur during the evaluation. - -```bash -export EXP_NAME=tabfact_tapex_large - -python run_tabfact_with_tapex.py \ - --do_train \ - --do_eval \ - --output_dir $EXP_NAME \ - --model_name_or_path microsoft/tapex-large \ - --overwrite_output_dir \ - --per_device_train_batch_size 2 \ - --gradient_accumulation_steps 18 \ - --per_device_eval_batch_size 4 \ - --eval_accumulation_steps 12 \ - --warm_steps 1000 \ - --logging_steps 10 \ - --learning_rate 3e-5 \ - --eval_steps 1000 \ - --save_steps 1000 \ - --eval_strategy steps \ - --weight_decay 1e-2 \ - --max_steps 30000 \ - --max_grad_norm 0.1 -``` - -### How to Evaluate TAPEX Fine-tuned Models on TableFV - -We provide fine-tuned model weights to reproduce our results. You can evaluate them using the following command: -> You can also replace `microsoft/tapex-base-finetuned-tabfact` with your local directory to evaluate your fine-tuned models. Notice that if the model has a larger size, you should reduce `per_device_eval_batch_size` to fit the memory requirement. - -```bash -export EXP_NAME=tabfact_tapex_base_eval - -python run_tabfact_with_tapex.py \ - --do_eval \ - --model_name_or_path microsoft/tapex-base-finetuned-tabfact \ - --output_dir $EXP_NAME \ - --per_device_eval_batch_size 12 \ - --eval_accumulation_steps 6 -``` - -## Reproduced Results - -We get the following results on the dev set of the benchmark with the previous commands: - -| Task | Model Size | Metric | Result | -|:---:|:---:|:---:|:---:| -| WikiSQL (Weak) | Base | Denotation Accuracy | 88.1 | -| WikiSQL (Weak) | Large | Denotation Accuracy | 89.5 | -| WikiTableQuestion | Base | Denotation Accuracy | 47.1 | -| WikiTableQuestion | Large | Denotation Accuracy | 57.2 | -| TabFact | Base | Accuracy | 78.7 | -| TabFact | Large | Accuracy | 83.6 | diff --git a/examples/research_projects/tapex/requirements.txt b/examples/research_projects/tapex/requirements.txt deleted file mode 100644 index 2379012a9b2..00000000000 --- a/examples/research_projects/tapex/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -numpy -datasets -pandas -nltk \ No newline at end of file diff --git a/examples/research_projects/tapex/run_tabfact_with_tapex.py b/examples/research_projects/tapex/run_tabfact_with_tapex.py deleted file mode 100644 index 5dcec10a084..00000000000 --- a/examples/research_projects/tapex/run_tabfact_with_tapex.py +++ /dev/null @@ -1,471 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2022 The Microsoft and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Fine-tuning the library models for tapex on table-based fact verification tasks. -Adapted from script: https://github.com/huggingface/transformers/blob/master/examples/pytorch/text-classification/run_glue.py -""" - -import logging -import os -import random -import sys -from dataclasses import dataclass, field -from typing import Optional - -import datasets -import numpy as np -import pandas as pd -from datasets import load_dataset - -import transformers -from transformers import ( - AutoConfig, - BartForSequenceClassification, - DataCollatorWithPadding, - EvalPrediction, - HfArgumentParser, - TapexTokenizer, - Trainer, - TrainingArguments, - default_data_collator, - set_seed, -) -from transformers.trainer_utils import get_last_checkpoint -from transformers.utils import check_min_version -from transformers.utils.versions import require_version - - -# Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.17.0.dev0") - -require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") - -logger = logging.getLogger(__name__) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - - Using `HfArgumentParser` we can turn this class - into argparse arguments to be able to specify them on - the command line. - """ - - dataset_name: Optional[str] = field( - default="tab_fact", metadata={"help": "The name of the dataset to use (via the datasets library)."} - ) - dataset_config_name: Optional[str] = field( - default="tab_fact", - metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}, - ) - max_seq_length: int = field( - default=1024, - metadata={ - "help": ( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ) - }, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} - ) - pad_to_max_length: bool = field( - default=False, - metadata={ - "help": ( - "Whether to pad all samples to `max_seq_length`. " - "If False, will pad the samples dynamically when batching to the maximum length in the batch." - ) - }, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - ) - }, - ) - max_predict_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of prediction examples to this " - "value if set." - ) - }, - ) - train_file: Optional[str] = field( - default=None, metadata={"help": "A csv or a json file containing the training data."} - ) - validation_file: Optional[str] = field( - default=None, metadata={"help": "A csv or a json file containing the validation data."} - ) - test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."}) - - def __post_init__(self): - if self.dataset_name is not None: - pass - elif self.train_file is None or self.validation_file is None: - raise ValueError("Need either a GLUE task, a training/validation file or a dataset name.") - else: - train_extension = self.train_file.split(".")[-1] - assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." - validation_extension = self.validation_file.split(".")[-1] - assert ( - validation_extension == train_extension - ), "`validation_file` should have the same extension (csv or json) as `train_file`." - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field( - default=None, metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} - ) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - tokenizer_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, - ) - use_fast_tokenizer: bool = field( - default=True, - metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, - ) - model_revision: str = field( - default="main", - metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "Will use the token generated when running `huggingface-cli login` (necessary to use this script " - "with private models)." - ) - }, - ) - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - - log_level = training_args.get_process_log_level() - logger.setLevel(log_level) - datasets.utils.logging.set_verbosity(log_level) - transformers.utils.logging.set_verbosity(log_level) - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - - # Log on each process the small summary: - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" - + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - logger.info(f"Training/evaluation parameters {training_args}") - - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - - # Set seed before initializing model. - set_seed(training_args.seed) - - # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) - # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). - # - # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. - # - # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this - # single column. You can easily tweak this behavior (see below) - # - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if data_args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - raw_datasets = load_dataset( - data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir - ) - else: - # Loading a dataset from your local files. - # CSV/JSON training and evaluation files are needed. - data_files = {"train": data_args.train_file, "validation": data_args.validation_file} - - # Get the test dataset: you can provide your own CSV/JSON test file (see below) - # when you use `do_predict` without specifying a GLUE benchmark task. - if training_args.do_predict: - if data_args.test_file is not None: - train_extension = data_args.train_file.split(".")[-1] - test_extension = data_args.test_file.split(".")[-1] - assert ( - test_extension == train_extension - ), "`test_file` should have the same extension (csv or json) as `train_file`." - data_files["test"] = data_args.test_file - else: - raise ValueError("Need either a GLUE task or a test file for `do_predict`.") - - for key in data_files.keys(): - logger.info(f"load a local file for {key}: {data_files[key]}") - - if data_args.train_file.endswith(".csv"): - # Loading a dataset from local csv files - raw_datasets = load_dataset("csv", data_files=data_files, cache_dir=model_args.cache_dir) - else: - # Loading a dataset from local json files - raw_datasets = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir) - # See more about loading any type of standard or custom dataset at - # https://huggingface.co/docs/datasets/loading_datasets. - - # Labels - label_list = raw_datasets["train"].features["label"].names - num_labels = len(label_list) - - # Load pretrained model and tokenizer - # - # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - config = AutoConfig.from_pretrained( - model_args.config_name if model_args.config_name else model_args.model_name_or_path, - num_labels=num_labels, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - token=True if model_args.use_auth_token else None, - ) - # load tapex tokenizer - tokenizer = TapexTokenizer.from_pretrained( - model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - use_fast=model_args.use_fast_tokenizer, - revision=model_args.model_revision, - token=True if model_args.use_auth_token else None, - add_prefix_space=True, - ) - model = BartForSequenceClassification.from_pretrained( - model_args.model_name_or_path, - from_tf=bool(".ckpt" in model_args.model_name_or_path), - config=config, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - token=True if model_args.use_auth_token else None, - ) - - # Padding strategy - if data_args.pad_to_max_length: - padding = "max_length" - else: - # We will pad later, dynamically at batch creation, to the max sequence length in each batch - padding = False - - # Some models have set the order of the labels to use, so let's make sure we do use it. - model.config.label2id = {"Refused": 0, "Entailed": 1} - model.config.id2label = {0: "Refused", 1: "Entailed"} - - if data_args.max_seq_length > tokenizer.model_max_length: - logger.warning( - f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the " - f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." - ) - max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) - - def preprocess_tabfact_function(examples): - # Tokenize the texts - def _convert_table_text_to_pandas(_table_text): - """Runs the structured pandas table object for _table_text. - An example _table_text can be: round#clubs remaining\nfirst round#156\n - """ - _table_content = [_table_row.split("#") for _table_row in _table_text.strip("\n").split("\n")] - _table_pd = pd.DataFrame.from_records(_table_content[1:], columns=_table_content[0]) - return _table_pd - - questions = examples["statement"] - tables = list(map(_convert_table_text_to_pandas, examples["table_text"])) - result = tokenizer(tables, questions, padding=padding, max_length=max_seq_length, truncation=True) - - result["label"] = examples["label"] - return result - - with training_args.main_process_first(desc="dataset map pre-processing"): - raw_datasets = raw_datasets.map( - preprocess_tabfact_function, - batched=True, - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on dataset", - ) - if training_args.do_train: - if "train" not in raw_datasets: - raise ValueError("--do_train requires a train dataset") - train_dataset = raw_datasets["train"] - if data_args.max_train_samples is not None: - train_dataset = train_dataset.select(range(data_args.max_train_samples)) - - if training_args.do_eval: - if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: - raise ValueError("--do_eval requires a validation dataset") - eval_dataset = raw_datasets["validation"] - if data_args.max_eval_samples is not None: - eval_dataset = eval_dataset.select(range(data_args.max_eval_samples)) - - if training_args.do_predict or data_args.test_file is not None: - if "test" not in raw_datasets and "test_matched" not in raw_datasets: - raise ValueError("--do_predict requires a test dataset") - predict_dataset = raw_datasets["test"] - if data_args.max_predict_samples is not None: - predict_dataset = predict_dataset.select(range(data_args.max_predict_samples)) - - # Log a few random samples from the training set: - if training_args.do_train: - for index in random.sample(range(len(train_dataset)), 3): - logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") - - # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a - # predictions and label_ids field) and has to return a dictionary string to float. - def compute_metrics(p: EvalPrediction): - preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions - preds = np.argmax(preds, axis=1) - return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()} - - # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. - if data_args.pad_to_max_length: - data_collator = default_data_collator - elif training_args.fp16: - data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) - else: - data_collator = None - - # Initialize our Trainer - trainer = Trainer( - model=model, - args=training_args, - train_dataset=train_dataset if training_args.do_train else None, - eval_dataset=eval_dataset if training_args.do_eval else None, - compute_metrics=compute_metrics, - tokenizer=tokenizer, - data_collator=data_collator, - ) - - # Training - if training_args.do_train: - checkpoint = None - if training_args.resume_from_checkpoint is not None: - checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint - train_result = trainer.train(resume_from_checkpoint=checkpoint) - metrics = train_result.metrics - max_train_samples = ( - data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) - ) - metrics["train_samples"] = min(max_train_samples, len(train_dataset)) - - trainer.save_model() # Saves the tokenizer too for easy upload - - trainer.log_metrics("train", metrics) - trainer.save_metrics("train", metrics) - trainer.save_state() - - # Evaluation - if training_args.do_eval: - logger.info("*** Evaluate ***") - - metrics = trainer.evaluate(eval_dataset=eval_dataset) - max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) - metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) - - trainer.log_metrics("eval", metrics) - trainer.save_metrics("eval", metrics) - - if training_args.do_predict: - logger.info("*** Predict ***") - - # Removing the `label` columns because it contains -1 and Trainer won't like that. - predict_dataset = predict_dataset.remove_columns("label") - predictions = trainer.predict(predict_dataset, metric_key_prefix="predict").predictions - predictions = np.argmax(predictions, axis=1) - - output_predict_file = os.path.join(training_args.output_dir, "predict_results_tabfact.txt") - if trainer.is_world_process_zero(): - with open(output_predict_file, "w") as writer: - logger.info("***** Predict Results *****") - writer.write("index\tprediction\n") - for index, item in enumerate(predictions): - item = label_list[item] - writer.write(f"{index}\t{item}\n") - - kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} - - if training_args.push_to_hub: - trainer.push_to_hub(**kwargs) - else: - trainer.create_model_card(**kwargs) - - -def _mp_fn(index): - # For xla_spawn (TPUs) - main() - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/tapex/run_wikisql_with_tapex.py b/examples/research_projects/tapex/run_wikisql_with_tapex.py deleted file mode 100644 index 81e940a77c8..00000000000 --- a/examples/research_projects/tapex/run_wikisql_with_tapex.py +++ /dev/null @@ -1,649 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2022 The Microsoft and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Fine-tuning the library models for tapex on table-based question answering tasks. -Adapted from script: https://github.com/huggingface/transformers/blob/master/examples/pytorch/summarization/run_summarization.py -""" - -import logging -import os -import sys -from collections import defaultdict -from copy import deepcopy -from dataclasses import dataclass, field -from functools import partial -from typing import List, Optional - -import nltk # Here to have a nice missing dependency error message early on -import numpy as np -import pandas as pd -from datasets import load_dataset -from filelock import FileLock -from wikisql_utils import _TYPE_CONVERTER, retrieve_wikisql_query_answer_tapas - -import transformers -from transformers import ( - AutoConfig, - BartForConditionalGeneration, - DataCollatorForSeq2Seq, - HfArgumentParser, - Seq2SeqTrainer, - Seq2SeqTrainingArguments, - TapexTokenizer, - set_seed, -) -from transformers.file_utils import is_offline_mode -from transformers.trainer_utils import get_last_checkpoint, is_main_process -from transformers.utils import check_min_version - - -# Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.17.0.dev0") - -logger = logging.getLogger(__name__) - -try: - nltk.data.find("tokenizers/punkt") -except (LookupError, OSError): - if is_offline_mode(): - raise LookupError( - "Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files" - ) - with FileLock(".lock") as lock: - nltk.download("punkt", quiet=True) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field( - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, - ) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - tokenizer_name: Optional[str] = field( - default=None, - metadata={ - "help": ( - "Pretrained tokenizer name or path if not the same as model_name. " - "By default we use BART-large tokenizer for TAPEX-large." - ) - }, - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, - ) - use_fast_tokenizer: bool = field( - default=True, - metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, - ) - model_revision: str = field( - default="main", - metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "Will use the token generated when running `huggingface-cli login` (necessary to use this script " - "with private models)." - ) - }, - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - """ - - dataset_name: Optional[str] = field( - default="wikisql", metadata={"help": "The name of the dataset to use (via the datasets library)."} - ) - dataset_config_name: Optional[str] = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_file: Optional[str] = field( - default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."} - ) - validation_file: Optional[str] = field( - default=None, - metadata={ - "help": ( - "An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." - ) - }, - ) - test_file: Optional[str] = field( - default=None, - metadata={ - "help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." - }, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - max_source_length: Optional[int] = field( - default=1024, - metadata={ - "help": ( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ) - }, - ) - max_target_length: Optional[int] = field( - default=128, - metadata={ - "help": ( - "The maximum total sequence length for target text after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ) - }, - ) - val_max_target_length: Optional[int] = field( - default=None, - metadata={ - "help": ( - "The maximum total sequence length for validation target text after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`. " - "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " - "during ``evaluate`` and ``predict``." - ) - }, - ) - pad_to_max_length: bool = field( - default=False, - metadata={ - "help": ( - "Whether to pad all samples to model maximum sentence length. " - "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " - "efficient on GPU but very bad for TPU." - ) - }, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - ) - }, - ) - max_predict_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of prediction examples to this " - "value if set." - ) - }, - ) - num_beams: Optional[int] = field( - default=None, - metadata={ - "help": ( - "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " - "which is used during ``evaluate`` and ``predict``." - ) - }, - ) - ignore_pad_token_for_loss: bool = field( - default=True, - metadata={ - "help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not." - }, - ) - - def __post_init__(self): - if self.dataset_name is None and self.train_file is None and self.validation_file is None: - raise ValueError("Need either a dataset name or a training/validation file.") - else: - if self.train_file is not None: - extension = self.train_file.split(".")[-1] - assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." - if self.validation_file is not None: - extension = self.validation_file.split(".")[-1] - assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." - if self.val_max_target_length is None: - self.val_max_target_length = self.max_target_length - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) - - # Log on each process the small summary: - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" - + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - # Set the verbosity to info of the Transformers logger (on main process only): - if is_main_process(training_args.local_rank): - transformers.utils.logging.set_verbosity_info() - logger.info(f"Training/evaluation parameters {training_args}") - - # Set seed before initializing model. - set_seed(training_args.seed) - - # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) - # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ - # (the dataset will be downloaded automatically from the datasets Hub). - # - # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. - # - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if data_args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) - else: - data_files = {} - if data_args.train_file is not None: - data_files["train"] = data_args.train_file - extension = data_args.train_file.split(".")[-1] - if data_args.validation_file is not None: - data_files["validation"] = data_args.validation_file - extension = data_args.validation_file.split(".")[-1] - if data_args.test_file is not None: - data_files["test"] = data_args.test_file - extension = data_args.test_file.split(".")[-1] - datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) - - # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets. - - # Load pretrained model and tokenizer - # - # Distributed training: - # The .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - - config = AutoConfig.from_pretrained( - model_args.config_name if model_args.config_name else model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - token=True if model_args.use_auth_token else None, - ) - - # IMPORTANT: the initial BART model's decoding is penalized by no_repeat_ngram_size, and thus - # we should disable it here to avoid problematic generation - config.no_repeat_ngram_size = 0 - config.max_length = 1024 - config.early_stopping = False - - # load tapex tokenizer - tokenizer = TapexTokenizer.from_pretrained( - model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - use_fast=model_args.use_fast_tokenizer, - revision=model_args.model_revision, - token=True if model_args.use_auth_token else None, - add_prefix_space=True, - ) - - # load Bart based Tapex model (default tapex-large) - model = BartForConditionalGeneration.from_pretrained( - model_args.model_name_or_path, - from_tf=bool(".ckpt" in model_args.model_name_or_path), - config=config, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - token=True if model_args.use_auth_token else None, - ) - - if model.config.decoder_start_token_id is None: - raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") - - # Preprocessing the datasets. - # We need to tokenize inputs and targets. - if training_args.do_train: - column_names = datasets["train"].column_names - elif training_args.do_eval: - column_names = datasets["validation"].column_names - elif training_args.do_predict: - column_names = datasets["test"].column_names - else: - logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") - return - - # Temporarily set max_target_length for training. - max_target_length = data_args.max_target_length - padding = "max_length" if data_args.pad_to_max_length else False - - if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"): - logger.warning( - "label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for " - f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory" - ) - - def preprocess_tableqa_function(examples, is_training=False): - """ - The is_training FLAG is used to identify if we could use the supervision - to truncate the table content if it is required. - """ - - # this function is specific for WikiSQL since the util function need the data structure - # to retrieve the WikiSQL answer for each question - def _convert_table_types(_table): - """Runs the type converter over the table cells.""" - ret_table = deepcopy(_table) - types = ret_table["types"] - ret_table["real_rows"] = ret_table["rows"] - typed_rows = [] - for row in ret_table["rows"]: - typed_row = [] - for column, cell_value in enumerate(row): - typed_row.append(_TYPE_CONVERTER[types[column]](cell_value)) - typed_rows.append(typed_row) - ret_table["rows"] = typed_rows - return ret_table - - questions = [question.lower() for question in examples["question"]] - example_tables = examples["table"] - example_sqls = examples["sql"] - tables = [ - pd.DataFrame.from_records(example_table["rows"], columns=example_table["header"]) - for example_table in example_tables - ] - - # using tapas utils to obtain wikisql answer - answers = [] - for example_sql, example_table in zip(example_sqls, example_tables): - tapas_table = _convert_table_types(example_table) - answer_list: List[str] = retrieve_wikisql_query_answer_tapas(tapas_table, example_sql) - # you can choose other delimiters to split each answer - answers.append(answer_list) - - # IMPORTANT: we cannot pass by answers during evaluation, answers passed during training are used to - # truncate large tables in the train set! - if is_training: - model_inputs = tokenizer( - table=tables, - query=questions, - answer=answers, - max_length=data_args.max_source_length, - padding=padding, - truncation=True, - ) - else: - model_inputs = tokenizer( - table=tables, query=questions, max_length=data_args.max_source_length, padding=padding, truncation=True - ) - - labels = tokenizer( - answer=[", ".join(answer) for answer in answers], - max_length=max_target_length, - padding=padding, - truncation=True, - ) - - # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore - # padding in the loss. - if padding == "max_length" and data_args.ignore_pad_token_for_loss: - labels["input_ids"] = [ - [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] - ] - - model_inputs["labels"] = labels["input_ids"] - - return model_inputs - - # in training, we can use the answer as extra information to truncate large tables - preprocess_tableqa_function_training = partial(preprocess_tableqa_function, is_training=True) - - if training_args.do_train: - if "train" not in datasets: - raise ValueError("--do_train requires a train dataset") - train_dataset = datasets["train"] - if data_args.max_train_samples is not None: - train_dataset = train_dataset.select(range(data_args.max_train_samples)) - train_dataset = train_dataset.map( - preprocess_tableqa_function_training, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - ) - - if training_args.do_eval: - max_target_length = data_args.val_max_target_length - if "validation" not in datasets: - raise ValueError("--do_eval requires a validation dataset") - eval_dataset = datasets["validation"] - if data_args.max_eval_samples is not None: - eval_dataset = eval_dataset.select(range(data_args.max_eval_samples)) - eval_dataset = eval_dataset.map( - preprocess_tableqa_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - ) - - if training_args.do_predict: - max_target_length = data_args.val_max_target_length - if "test" not in datasets: - raise ValueError("--do_predict requires a test dataset") - predict_dataset = datasets["test"] - if data_args.max_predict_samples is not None: - predict_dataset = predict_dataset.select(range(data_args.max_predict_samples)) - predict_dataset = predict_dataset.map( - preprocess_tableqa_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - ) - - # Data collator - label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id - data_collator = DataCollatorForSeq2Seq( - tokenizer, - model=model, - label_pad_token_id=label_pad_token_id, - pad_to_multiple_of=8 if training_args.fp16 else None, - ) - - def postprocess_text(preds, labels): - preds = [pred.strip() for pred in preds] - labels = [label.strip() for label in labels] - - return preds, labels - - def compute_metrics(eval_preds): - preds, labels = eval_preds - if isinstance(preds, tuple): - preds = preds[0] - decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) - if data_args.ignore_pad_token_for_loss: - # Replace -100 in the labels as we can't decode them. - labels = np.where(labels != -100, labels, tokenizer.pad_token_id) - decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) - - # Some simple post-processing - decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) - - delimiter = ", " - - # define example evaluation - def evaluate_example(predict_str: str, ground_str: str): - predict_spans = predict_str.split(delimiter) - ground_spans = ground_str.split(delimiter) - predict_values = defaultdict(lambda: 0) - ground_values = defaultdict(lambda: 0) - for span in predict_spans: - try: - predict_values[float(span)] += 1 - except ValueError: - predict_values[span.strip()] += 1 - for span in ground_spans: - try: - ground_values[float(span)] += 1 - except ValueError: - ground_values[span.strip()] += 1 - is_correct = predict_values == ground_values - return is_correct - - def get_denotation_accuracy(predictions: List[str], references: List[str]): - assert len(predictions) == len(references) - correct_num = 0 - for predict_str, ground_str in zip(predictions, references): - is_correct = evaluate_example(predict_str.lower(), ground_str.lower()) - if is_correct: - correct_num += 1 - return correct_num / len(predictions) - - accuracy = get_denotation_accuracy(decoded_preds, decoded_labels) - result = {"denotation_accuracy": accuracy} - - return result - - # Initialize our Trainer - trainer = Seq2SeqTrainer( - model=model, - args=training_args, - train_dataset=train_dataset if training_args.do_train else None, - eval_dataset=eval_dataset if training_args.do_eval else None, - tokenizer=tokenizer, - data_collator=data_collator, - compute_metrics=compute_metrics if training_args.predict_with_generate else None, - ) - - if training_args.do_train: - checkpoint = None - if training_args.resume_from_checkpoint is not None: - checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint - train_result = trainer.train(resume_from_checkpoint=checkpoint) - trainer.save_model() # Saves the tokenizer too for easy upload - - metrics = train_result.metrics - max_train_samples = ( - data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) - ) - metrics["train_samples"] = min(max_train_samples, len(train_dataset)) - - trainer.log_metrics("train", metrics) - trainer.save_metrics("train", metrics) - trainer.save_state() - - # Evaluation - results = {} - if training_args.do_eval: - logger.info("*** Evaluate ***") - - metrics = trainer.evaluate( - max_length=data_args.val_max_target_length, num_beams=data_args.num_beams, metric_key_prefix="eval" - ) - max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) - metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) - - trainer.log_metrics("eval", metrics) - trainer.save_metrics("eval", metrics) - - if training_args.do_predict: - logger.info("*** Predict ***") - - predict_results = trainer.predict( - predict_dataset, - metric_key_prefix="predict", - max_length=data_args.val_max_target_length, - num_beams=data_args.num_beams, - ) - metrics = predict_results.metrics - max_predict_samples = ( - data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) - ) - metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) - - trainer.log_metrics("predict", metrics) - trainer.save_metrics("predict", metrics) - - if trainer.is_world_process_zero(): - if training_args.predict_with_generate: - predictions = tokenizer.batch_decode( - predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True - ) - predictions = [pred.strip() for pred in predictions] - output_prediction_file = os.path.join(training_args.output_dir, "tapex_predictions.txt") - with open(output_prediction_file, "w") as writer: - writer.write("\n".join(predictions)) - - return results - - -def _mp_fn(index): - # For xla_spawn (TPUs) - main() - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/tapex/run_wikitablequestions_with_tapex.py b/examples/research_projects/tapex/run_wikitablequestions_with_tapex.py deleted file mode 100644 index 55350025cb3..00000000000 --- a/examples/research_projects/tapex/run_wikitablequestions_with_tapex.py +++ /dev/null @@ -1,625 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2022 The Microsoft and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Fine-tuning the library models for tapex on table-based question answering tasks. -Adapted from script: https://github.com/huggingface/transformers/blob/master/examples/pytorch/summarization/run_summarization.py -""" - -import logging -import os -import sys -from collections import defaultdict -from dataclasses import dataclass, field -from functools import partial -from typing import List, Optional - -import nltk # Here to have a nice missing dependency error message early on -import numpy as np -import pandas as pd -from datasets import load_dataset -from filelock import FileLock - -import transformers -from transformers import ( - AutoConfig, - BartForConditionalGeneration, - DataCollatorForSeq2Seq, - HfArgumentParser, - Seq2SeqTrainer, - Seq2SeqTrainingArguments, - TapexTokenizer, - set_seed, -) -from transformers.file_utils import is_offline_mode -from transformers.trainer_utils import get_last_checkpoint, is_main_process -from transformers.utils import check_min_version - - -# Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.17.0.dev0") - -logger = logging.getLogger(__name__) - -try: - nltk.data.find("tokenizers/punkt") -except (LookupError, OSError): - if is_offline_mode(): - raise LookupError( - "Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files" - ) - with FileLock(".lock") as lock: - nltk.download("punkt", quiet=True) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field( - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, - ) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - tokenizer_name: Optional[str] = field( - default=None, - metadata={ - "help": ( - "Pretrained tokenizer name or path if not the same as model_name. " - "By default we use BART-large tokenizer for TAPEX-large." - ) - }, - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, - ) - use_fast_tokenizer: bool = field( - default=True, - metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, - ) - model_revision: str = field( - default="main", - metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "Will use the token generated when running `huggingface-cli login` (necessary to use this script " - "with private models)." - ) - }, - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - """ - - dataset_name: Optional[str] = field( - default="wikitablequestions", metadata={"help": "The name of the dataset to use (via the datasets library)."} - ) - dataset_config_name: Optional[str] = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_file: Optional[str] = field( - default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."} - ) - validation_file: Optional[str] = field( - default=None, - metadata={ - "help": ( - "An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." - ) - }, - ) - test_file: Optional[str] = field( - default=None, - metadata={ - "help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." - }, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - max_source_length: Optional[int] = field( - default=1024, - metadata={ - "help": ( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ) - }, - ) - max_target_length: Optional[int] = field( - default=128, - metadata={ - "help": ( - "The maximum total sequence length for target text after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ) - }, - ) - val_max_target_length: Optional[int] = field( - default=None, - metadata={ - "help": ( - "The maximum total sequence length for validation target text after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`. " - "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " - "during ``evaluate`` and ``predict``." - ) - }, - ) - pad_to_max_length: bool = field( - default=False, - metadata={ - "help": ( - "Whether to pad all samples to model maximum sentence length. " - "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " - "efficient on GPU but very bad for TPU." - ) - }, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - ) - }, - ) - max_predict_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of prediction examples to this " - "value if set." - ) - }, - ) - num_beams: Optional[int] = field( - default=None, - metadata={ - "help": ( - "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " - "which is used during ``evaluate`` and ``predict``." - ) - }, - ) - ignore_pad_token_for_loss: bool = field( - default=True, - metadata={ - "help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not." - }, - ) - - def __post_init__(self): - if self.dataset_name is None and self.train_file is None and self.validation_file is None: - raise ValueError("Need either a dataset name or a training/validation file.") - else: - if self.train_file is not None: - extension = self.train_file.split(".")[-1] - assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." - if self.validation_file is not None: - extension = self.validation_file.split(".")[-1] - assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." - if self.val_max_target_length is None: - self.val_max_target_length = self.max_target_length - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) - - # Log on each process the small summary: - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" - + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - # Set the verbosity to info of the Transformers logger (on main process only): - if is_main_process(training_args.local_rank): - transformers.utils.logging.set_verbosity_info() - logger.info(f"Training/evaluation parameters {training_args}") - - # Set seed before initializing model. - set_seed(training_args.seed) - - # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) - # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ - # (the dataset will be downloaded automatically from the datasets Hub). - # - # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. - # - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if data_args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) - else: - data_files = {} - if data_args.train_file is not None: - data_files["train"] = data_args.train_file - extension = data_args.train_file.split(".")[-1] - if data_args.validation_file is not None: - data_files["validation"] = data_args.validation_file - extension = data_args.validation_file.split(".")[-1] - if data_args.test_file is not None: - data_files["test"] = data_args.test_file - extension = data_args.test_file.split(".")[-1] - datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) - - # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets. - - # Load pretrained model and tokenizer - # - # Distributed training: - # The .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - - config = AutoConfig.from_pretrained( - model_args.config_name if model_args.config_name else model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - token=True if model_args.use_auth_token else None, - ) - - # IMPORTANT: the initial BART model's decoding is penalized by no_repeat_ngram_size, and thus - # we should disable it here to avoid problematic generation - config.no_repeat_ngram_size = 0 - config.max_length = 1024 - config.early_stopping = False - - # load tapex tokenizer - tokenizer = TapexTokenizer.from_pretrained( - model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - use_fast=model_args.use_fast_tokenizer, - revision=model_args.model_revision, - token=True if model_args.use_auth_token else None, - add_prefix_space=True, - ) - - # load Bart based Tapex model (default tapex-large) - model = BartForConditionalGeneration.from_pretrained( - model_args.model_name_or_path, - from_tf=bool(".ckpt" in model_args.model_name_or_path), - config=config, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - token=True if model_args.use_auth_token else None, - ) - - if model.config.decoder_start_token_id is None: - raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") - - # Preprocessing the datasets. - # We need to tokenize inputs and targets. - if training_args.do_train: - column_names = datasets["train"].column_names - elif training_args.do_eval: - column_names = datasets["validation"].column_names - elif training_args.do_predict: - column_names = datasets["test"].column_names - else: - logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") - return - - # Temporarily set max_target_length for training. - max_target_length = data_args.max_target_length - padding = "max_length" if data_args.pad_to_max_length else False - - if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"): - logger.warning( - "label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for " - f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory" - ) - - def preprocess_tableqa_function(examples, is_training=False): - """ - The is_training FLAG is used to identify if we could use the supervision - to truncate the table content if it is required. - """ - - questions = [question.lower() for question in examples["question"]] - example_tables = examples["table"] - tables = [ - pd.DataFrame.from_records(example_table["rows"], columns=example_table["header"]) - for example_table in example_tables - ] - - # using wikitablequestion's answer set - answers = examples["answers"] - - # IMPORTANT: we cannot pass by answers during evaluation, answers passed during training are used to - # truncate large tables in the train set! - if is_training: - model_inputs = tokenizer( - table=tables, - query=questions, - answer=answers, - max_length=data_args.max_source_length, - padding=padding, - truncation=True, - ) - else: - model_inputs = tokenizer( - table=tables, query=questions, max_length=data_args.max_source_length, padding=padding, truncation=True - ) - - labels = tokenizer( - answer=[", ".join(answer) for answer in answers], - max_length=max_target_length, - padding=padding, - truncation=True, - ) - - # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore - # padding in the loss. - if padding == "max_length" and data_args.ignore_pad_token_for_loss: - labels["input_ids"] = [ - [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] - ] - - model_inputs["labels"] = labels["input_ids"] - - return model_inputs - - # in training, we can use the answer as extra information to truncate large tables - preprocess_tableqa_function_training = partial(preprocess_tableqa_function, is_training=True) - - if training_args.do_train: - if "train" not in datasets: - raise ValueError("--do_train requires a train dataset") - train_dataset = datasets["train"] - if data_args.max_train_samples is not None: - train_dataset = train_dataset.select(range(data_args.max_train_samples)) - train_dataset = train_dataset.map( - preprocess_tableqa_function_training, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - ) - - if training_args.do_eval: - max_target_length = data_args.val_max_target_length - if "validation" not in datasets: - raise ValueError("--do_eval requires a validation dataset") - eval_dataset = datasets["validation"] - if data_args.max_eval_samples is not None: - eval_dataset = eval_dataset.select(range(data_args.max_eval_samples)) - eval_dataset = eval_dataset.map( - preprocess_tableqa_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - ) - - if training_args.do_predict: - max_target_length = data_args.val_max_target_length - if "test" not in datasets: - raise ValueError("--do_predict requires a test dataset") - predict_dataset = datasets["test"] - if data_args.max_predict_samples is not None: - predict_dataset = predict_dataset.select(range(data_args.max_predict_samples)) - predict_dataset = predict_dataset.map( - preprocess_tableqa_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - ) - - # Data collator - label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id - data_collator = DataCollatorForSeq2Seq( - tokenizer, - model=model, - label_pad_token_id=label_pad_token_id, - pad_to_multiple_of=8 if training_args.fp16 else None, - ) - - def postprocess_text(preds, labels): - preds = [pred.strip() for pred in preds] - labels = [label.strip() for label in labels] - - return preds, labels - - def compute_metrics(eval_preds): - preds, labels = eval_preds - if isinstance(preds, tuple): - preds = preds[0] - decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) - if data_args.ignore_pad_token_for_loss: - # Replace -100 in the labels as we can't decode them. - labels = np.where(labels != -100, labels, tokenizer.pad_token_id) - decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) - - # Some simple post-processing - decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) - - delimiter = ", " - - # define example evaluation - def evaluate_example(predict_str: str, ground_str: str): - predict_spans = predict_str.split(delimiter) - ground_spans = ground_str.split(delimiter) - predict_values = defaultdict(lambda: 0) - ground_values = defaultdict(lambda: 0) - for span in predict_spans: - try: - predict_values[float(span)] += 1 - except ValueError: - predict_values[span.strip()] += 1 - for span in ground_spans: - try: - ground_values[float(span)] += 1 - except ValueError: - ground_values[span.strip()] += 1 - _is_correct = predict_values == ground_values - return _is_correct - - def get_denotation_accuracy(predictions: List[str], references: List[str]): - assert len(predictions) == len(references) - correct_num = 0 - for predict_str, ground_str in zip(predictions, references): - is_correct = evaluate_example(predict_str.lower(), ground_str.lower()) - if is_correct: - correct_num += 1 - return correct_num / len(predictions) - - accuracy = get_denotation_accuracy(decoded_preds, decoded_labels) - result = {"denotation_accuracy": accuracy} - - return result - - # Initialize our Trainer - trainer = Seq2SeqTrainer( - model=model, - args=training_args, - train_dataset=train_dataset if training_args.do_train else None, - eval_dataset=eval_dataset if training_args.do_eval else None, - tokenizer=tokenizer, - data_collator=data_collator, - compute_metrics=compute_metrics if training_args.predict_with_generate else None, - ) - - if training_args.do_train: - checkpoint = None - if training_args.resume_from_checkpoint is not None: - checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint - train_result = trainer.train(resume_from_checkpoint=checkpoint) - trainer.save_model() # Saves the tokenizer too for easy upload - - metrics = train_result.metrics - max_train_samples = ( - data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) - ) - metrics["train_samples"] = min(max_train_samples, len(train_dataset)) - - trainer.log_metrics("train", metrics) - trainer.save_metrics("train", metrics) - trainer.save_state() - - # Evaluation - results = {} - if training_args.do_eval: - logger.info("*** Evaluate ***") - - metrics = trainer.evaluate( - max_length=data_args.val_max_target_length, num_beams=data_args.num_beams, metric_key_prefix="eval" - ) - max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) - metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) - - trainer.log_metrics("eval", metrics) - trainer.save_metrics("eval", metrics) - - if training_args.do_predict: - logger.info("*** Predict ***") - - predict_results = trainer.predict( - predict_dataset, - metric_key_prefix="predict", - max_length=data_args.val_max_target_length, - num_beams=data_args.num_beams, - ) - metrics = predict_results.metrics - max_predict_samples = ( - data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) - ) - metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) - - trainer.log_metrics("predict", metrics) - trainer.save_metrics("predict", metrics) - - if trainer.is_world_process_zero(): - if training_args.predict_with_generate: - predictions = tokenizer.batch_decode( - predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True - ) - predictions = [pred.strip() for pred in predictions] - output_prediction_file = os.path.join(training_args.output_dir, "tapex_predictions.txt") - with open(output_prediction_file, "w") as writer: - writer.write("\n".join(predictions)) - - return results - - -def _mp_fn(index): - # For xla_spawn (TPUs) - main() - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/tapex/wikisql_utils.py b/examples/research_projects/tapex/wikisql_utils.py deleted file mode 100644 index 13d10e091a1..00000000000 --- a/examples/research_projects/tapex/wikisql_utils.py +++ /dev/null @@ -1,257 +0,0 @@ -# coding=utf-8 -# Copyright 2022 The Microsoft, The Google and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import dataclasses -import enum -import functools -import math -import re - -# The following script is adapted from the script of TaPas. -# Original: https://github.com/google-research/tapas/master/wikisql_utils.py -from typing import Any, List - - -EMPTY_ANSWER = "none" -EMPTY_ANSWER_AGG = "none" - - -def _split_thousands(delimiter, value): - split = value.split(delimiter) - return len(split) > 1 and any((len(x) == 3 for x in split)) - - -def convert_to_float(value): - """Converts value to a float using a series of increasingly complex heuristics. - Args: - value: object that needs to be converted. Allowed types include - float/int/strings. - Returns: - A float interpretation of value. - Raises: - ValueError if the float conversion of value fails. - """ - if isinstance(value, float): - return value - if isinstance(value, int): - return float(value) - if not isinstance(value, str): - raise TypeError("Argument value is not a string. Can't parse it as float") - sanitized = value - - try: - # Example: 1,000.7 - if "." in sanitized and "," in sanitized: - return float(sanitized.replace(",", "")) - # 1,000 - if "," in sanitized and _split_thousands(",", sanitized): - return float(sanitized.replace(",", "")) - # 5,5556 - if "," in sanitized and sanitized.count(",") == 1 and not _split_thousands(",", sanitized): - return float(sanitized.replace(",", ".")) - # 0.0.0.1 - if sanitized.count(".") > 1: - return float(sanitized.replace(".", "")) - # 0,0,0,1 - if sanitized.count(",") > 1: - return float(sanitized.replace(",", "")) - return float(sanitized) - except ValueError: - # Avoid adding the sanitized value in the error message. - raise ValueError("Unable to convert value to float") - - -def _normalize_float(answer): - if answer is None: - return None - try: - value = convert_to_float(answer) - if isinstance(value, float) and math.isnan(value): - return None - return value - except ValueError: - return answer.lower() - - -_TYPE_CONVERTER = { - "text": lambda x: x, - "real": convert_to_float, -} - - -class _Aggregation(enum.Enum): - """Aggregations as defined by WikiSQL. Indexes match the data.""" - - NONE = 0 - MAX = 1 - MIN = 2 - COUNT = 3 - SUM = 4 - AVERAGE = 5 - - -class _Operator(enum.Enum): - """The boolean operators used by WikiSQL. Indexes match the data.""" - - EQUALS = 0 - GREATER = 1 - LESSER = 2 - - -@dataclasses.dataclass -class _Condition: - """Represents an SQL where clauses (e.g A = "a" or B > 5).""" - - column: str - operator: _Operator - cmp_value: Any - - -_TOKENIZER = re.compile(r"\w+|[^\w\s]+", re.UNICODE | re.MULTILINE | re.DOTALL) - - -def _normalize_for_match(x): - return list(_TOKENIZER.findall(x.lower())) - - -def _compare(operator, src, tgt): - if operator == _Operator.EQUALS: - return src == tgt - elif operator == _Operator.GREATER: - return src > tgt - elif operator == _Operator.LESSER: - return src < tgt - raise ValueError(f"Unknown operator: {operator}") - - -def _parse_value(table, column, cell_value): - """Convert numeric values to floats and keeps everything else as string.""" - types = table["types"] - return _TYPE_CONVERTER[types[column]](cell_value) - - -def _is_string(x): - return isinstance(x, str) - - -def _respect_conditions(table, row, conditions): - """True if 'row' satisfies all 'conditions'.""" - for cond in conditions: - table_value = row[cond.column] - - cmp_value = _parse_value(table, cond.column, cond.cmp_value) - - if _is_string(table_value) and _is_string(cmp_value): - table_value = _normalize_for_match(table_value) - cmp_value = _normalize_for_match(cmp_value) - - if not isinstance(table_value, type(cmp_value)): - raise TypeError("Type difference {} != {}".format(type(table_value), type(cmp_value))) - - if not _compare(cond.operator, table_value, cmp_value): - return False - return True - - -def _get_float_answer(table, answer_coordinates, aggregation_op): - """Applies operation to produce reference float answer.""" - if not answer_coordinates: - if aggregation_op == _Aggregation.COUNT: - return 0.0 - else: - return EMPTY_ANSWER_AGG - - # Count can support non numeric answers. - if aggregation_op == _Aggregation.COUNT: - return float(len(answer_coordinates)) - - # If we have just one answer, if float returns it or try a conversion. - values = [table["rows"][i][j] for (i, j) in answer_coordinates] - if len(answer_coordinates) == 1: - try: - return convert_to_float(values[0]) - except ValueError as e: - if aggregation_op != _Aggregation.NONE: - raise e - - if aggregation_op == _Aggregation.NONE: - return None - - # Other aggregation only support numeric values. Bail out if we have strings. - if not all((isinstance(v, (int, float)) for v in values)): - return None - - if aggregation_op == _Aggregation.SUM: - return float(sum(values)) - elif aggregation_op == _Aggregation.AVERAGE: - return sum(values) / len(answer_coordinates) - else: - raise ValueError(f"Unknown aggregation: {aggregation_op}") - - -def _get_answer_coordinates(table, sql_query): - """Retrieves references coordinates by executing SQL.""" - # MAX and MIN are automatically supported by the model. - aggregation_op_index = sql_query["agg"] - if aggregation_op_index >= 3: - aggregation_op = _Aggregation(aggregation_op_index) - else: - aggregation_op = _Aggregation.NONE - - target_column = sql_query["sel"] - conditions = [ - _Condition(column, _Operator(operator), cmp_value) - for column, operator, cmp_value in zip( - sql_query["conds"]["column_index"], sql_query["conds"]["operator_index"], sql_query["conds"]["condition"] - ) - ] - - indices = [] - for row in range(len(table["rows"])): - if _respect_conditions(table, table["rows"][row], conditions): - indices.append((row, target_column)) - - if not indices: - return [], aggregation_op - - if len(indices) == 1: - return indices, aggregation_op - - # Parsing of MIN/MAX. - if aggregation_op_index in (1, 2): - operators = {2: min, 1: max} - values = [(table["rows"][i][j], index) for index, (i, j) in enumerate(indices)] - reduced = functools.reduce(operators[sql_query["agg"]], values) - - ret = [indices[reduced[1]]] - return ret, _Aggregation.NONE - - return indices, aggregation_op - - -def _get_answer_text(table, answer_coordinates, float_answer): - if float_answer is not None: - return [str(float_answer)] - return [str(table["real_rows"][r][c]) for r, c in answer_coordinates] - - -def retrieve_wikisql_query_answer_tapas(table, example) -> List: - answer_coordinates, aggregation_op = _get_answer_coordinates(table, example) - float_answer = _get_float_answer(table, answer_coordinates, aggregation_op) - answer_text = _get_answer_text(table, answer_coordinates, float_answer) - # keep the original data the same with TaPas - if len(answer_text) == 0: - answer_text = [EMPTY_ANSWER] - return answer_text diff --git a/examples/research_projects/token-healing/README.md b/examples/research_projects/token-healing/README.md deleted file mode 100644 index f3594f32dc7..00000000000 --- a/examples/research_projects/token-healing/README.md +++ /dev/null @@ -1,40 +0,0 @@ - - - - -## What is token healing? - -Token healing rectifies the token boundary bias in greedy tokenization. It does this by trimming and regrowing the prompt to better align with the model's tokenizer, thus enhancing generation quality. The improvement is clearest with completion models. - -Example: given a completion prompt with a partial url ending with `:`, the model might have seen the expected completion `://` as a _single_ token in training. However, the prompt's tail token `:` tells it that the next token is not `//`, and so it looks for wrong completions. Such errors compound in auto-regressive language models. - -Debiasing token boundaries also addresses output sensitivity to prompts ending with whitespace. - -A more thorough explanation can be found on [The Art of Prompt Design: Prompt Boundaries and Token Healing | by Scott Lundberg](https://towardsdatascience.com/the-art-of-prompt-design-prompt-boundaries-and-token-healing-3b2448b0be38). - -## Usage - -```py -prompt = 'The link is (back to top)

\ No newline at end of file diff --git a/examples/research_projects/token-healing/run_token_healing.py b/examples/research_projects/token-healing/run_token_healing.py deleted file mode 100644 index 2dd9148c1bc..00000000000 --- a/examples/research_projects/token-healing/run_token_healing.py +++ /dev/null @@ -1,62 +0,0 @@ -import argparse - -from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig - - -def generate(inputs, model, tokenizer, token_healing): - input_ids = tokenizer(inputs, return_tensors="pt", padding=True, device_map="auto").input_ids - generation_config = GenerationConfig( - max_new_tokens=8, - token_healing=token_healing, - pad_token_id=model.config.pad_token_id, - repetition_penalty=1.1, - ) - output = model.generate(inputs=input_ids, generation_config=generation_config) - return tokenizer.batch_decode(output, skip_special_tokens=True) - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--prompt", type=str) - parser.add_argument("--model_name_or_path", type=str, default="TheBloke/deepseek-llm-7B-base-GPTQ") - args = parser.parse_args() - - prompts = ( - [args.prompt] - if args.prompt - else [ - 'An example ["like this"] and another example [', - 'The link is https - "I read a book about ", # test trailing whitespace - "I read a book about", # test nothing to heal - ] - ) - - model_name_or_path = args.model_name_or_path - completion_model = AutoModelForCausalLM.from_pretrained( - model_name_or_path, - device_map="auto", - use_cache=True, - ) - tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) - - raw_output = generate(prompts, completion_model, tokenizer, token_healing=False) - healed_output = generate(prompts, completion_model, tokenizer, token_healing=True) - - for p, a, b in zip(prompts, raw_output, healed_output): - print(f"\nPrompt: {p}\nWithout healing:\n{a}\nWith healing:\n{b}") - - # You can also use token healing in isolation - # This can be useful if you have other work to do before the generation - # Or if you want to delegate generation to another process - input_ids = tokenizer(prompts, return_tensors="pt", padding=True).input_ids.cuda() - healed_ids = completion_model.heal_tokens(input_ids) - healed_prompts = tokenizer.batch_decode(healed_ids, skip_special_tokens=True) - print("\nhealed prompts:") - for p in healed_prompts: - print(p) - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/visual_bert/README.md b/examples/research_projects/visual_bert/README.md deleted file mode 100644 index ec197ce5f35..00000000000 --- a/examples/research_projects/visual_bert/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# VisualBERT Demo - -This demo shows usage of VisualBERT VQA model and is adapted from LXMERT demo present [here](https://github.com/huggingface/transformers/blob/main/examples/research_projects/lxmert/demo.ipynb). -1. make a virtualenv: ``virtualenv venv`` and activate ``source venv/bin/activate`` -2. install reqs: ``pip install -r ./requirements.txt`` -3. usage is as shown in demo.ipynb diff --git a/examples/research_projects/visual_bert/demo.ipynb b/examples/research_projects/visual_bert/demo.ipynb deleted file mode 100644 index 9f61beea8e2..00000000000 --- a/examples/research_projects/visual_bert/demo.ipynb +++ /dev/null @@ -1,255 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "# %pip install-r requirements.txt" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Note**: This demo is adapted from the LXMERT Demo present here: https://github.com/huggingface/transformers/tree/main/examples/research_projects/lxmert" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2021-08-11 04:32:30.532299: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n" - ] - } - ], - "source": [ - "import io\n", - "\n", - "import numpy as np\n", - "import PIL.Image\n", - "import torch\n", - "from IPython.display import Image, display\n", - "from modeling_frcnn import GeneralizedRCNN\n", - "from processing_image import Preprocess\n", - "from visualizing_image import SingleImageViz\n", - "\n", - "import utils\n", - "from transformers import BertTokenizerFast, VisualBertForQuestionAnswering\n", - "from utils import Config\n", - "\n", - "\n", - "# URL = \"https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/images/input.jpg\"\n", - "URL = \"https://vqa.cloudcv.org/media/test2014/COCO_test2014_000000262567.jpg\"\n", - "OBJ_URL = \"https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/objects_vocab.txt\"\n", - "ATTR_URL = \"https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/attributes_vocab.txt\"\n", - "VQA_URL = \"https://dl.fbaipublicfiles.com/pythia/data/answers_vqa.txt\"\n", - "\n", - "\n", - "# for visualizing output\n", - "def showarray(a, fmt=\"jpeg\"):\n", - " a = np.uint8(np.clip(a, 0, 255))\n", - " f = io.BytesIO()\n", - " PIL.Image.fromarray(a).save(f, fmt)\n", - " display(Image(data=f.getvalue()))" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "# load object, attribute, and answer labels\n", - "\n", - "objids = utils.get_data(OBJ_URL)\n", - "attrids = utils.get_data(ATTR_URL)\n", - "vqa_answers = utils.get_data(VQA_URL)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "loading configuration file cache\n", - "loading weights file https://cdn.huggingface.co/unc-nlp/frcnn-vg-finetuned/pytorch_model.bin from cache at /home/crocoder/.cache/torch/transformers/57f6df6abe353be2773f2700159c65615babf39ab5b48114d2b49267672ae10f.77b59256a4cf8343ae0f923246a81489fc8d82f98d082edc2d2037c977c0d9d0\n", - "All model checkpoint weights were used when initializing GeneralizedRCNN.\n", - "\n", - "All the weights of GeneralizedRCNN were initialized from the model checkpoint at unc-nlp/frcnn-vg-finetuned.\n", - "If your task is similar to the task the model of the checkpoint was trained on, you can already use GeneralizedRCNN for predictions without further training.\n" - ] - } - ], - "source": [ - "# load models and model components\n", - "frcnn_cfg = Config.from_pretrained(\"unc-nlp/frcnn-vg-finetuned\")\n", - "\n", - "frcnn = GeneralizedRCNN.from_pretrained(\"unc-nlp/frcnn-vg-finetuned\", config=frcnn_cfg)\n", - "\n", - "image_preprocess = Preprocess(frcnn_cfg)\n", - "\n", - "bert_tokenizer = BertTokenizerFast.from_pretrained(\"bert-base-uncased\")\n", - "visualbert_vqa = VisualBertForQuestionAnswering.from_pretrained(\"uclanlp/visualbert-vqa\")" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/crocoder/anaconda3/envs/transformers_env/lib/python3.8/site-packages/torch/nn/functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at /pytorch/c10/core/TensorImpl.h:1156.)\n", - " return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)\n" - ] - }, - { - "data": { - "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAGPAlgDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDI1SytpPEWqXl2ryIjQxLGhAJJjBySQccL6d6kttJsJFt0aI+Zc7jGQFwgBIGRjnke1Wm03UbnxdqBtJoFjkjQsko3A4VQMgqRnrWrB4Z1tYzGt3aBTn1yM9cHbkZ9q65y5XFPsv63JMePSNMKIGibebfzyQFxx1GMeg65p66Tp215jAfKFuJlUBd2d4XBOPrzitxPB+tHB+1WfEflfeb7vp92rtr4R1eOKUG5syxhEUfJIA3hsH5eR1/OoVRP+v8Aggec65olmZpp40wUhgaJcDgOXznjnpS3ek6ZZ6bdp9jZ5BcxrG+9VK5iJ5+UnGc5GRnjpjnrbzwTr8viBPL1KyR54AjBk3qQCSOCuOMcccVM/wANPEkpnMms2MhnwZN8W7JAIBGV4OCeRg81lzptuL/r7/Q0ukldHIT+GrC2uPJEkayrIIX2zxt5mTtbag+Zce+ffFc9r9pZCdra3V1RWWIySEEn5gM8AY47c/WvUW+GfiSQR7tYsCyEMH8n5yR0y23J/E1heJPhTrVvo93eNf2s7gqxVcgtlh6gDvSdr3QnNNWOR1i1tbi31eAWkEI067WKApEEKplwVYjljhQcnng1W8LNaR3sdrcBWt5ZB8n2SORpDwNu9jlO3K+tdi3w88capZ2zy/ZnQ7ZRzGN5xwX/ALxwcc56mpbf4beObTd9njsoyWLhgsO5GPUqeq/gRT5veuVzQOO0mJEguUura2WwhV/PZ4FMjMQdqh/vbs9ADjAJPGaytM0a3v5ri3AYzi3aSFV7soDYPr8ob9K9Hi+G/jqKxWyEVg8C7iFljgkILdTlgTn8e1V7P4VeNLC7iu7SOCG4i+4/moSOMdzipvqirwabMa18M2Gl+ItPuYHkeMajax27MR85EhEhPHQMnH1FUrXQ9Nury1ubJ7yANdSwSNvG/IUMGUgcA5PHOPU11q/DTx2iWiKYQLOXzoP3iHa+Qc8nnkDrTYPhf43ttghW3UJIZlG9DhyME9fSr512JvA5dtD0u/j0GzW3aCaS1aSSXzFAYK0pbIIHzHbwS3AwD0zWTrGj6fYxwTW/lnzMhoRdJOYyO5aPjkH0Heu2ufh/4t0uythc3VhaQwSfuJZZoUKk5O0OTnHLHbnHXis+68H6neFTPqvh8heix3dtGPyUjmh6rRBzRuUDZWm06SLSAW/9mC583yx5nmFBJu39ep24zj2zVPw7aWyxX1yy2cPlqipcXCeaiMW6FSGySAecHGD9a3z4Y1s2P2M67ovlY2/8f1vu25zt3bs7c84zilg8L6vbOzQ6v4fVXVVeM3VsUYDplScE8dcZ6nvRZ3TsNSiZrWyweJLqK2srKO33iSZ5oFdUiwCWBOdqnOQBzyAOan0LTtMuIbqRbUNE905j3k5VMDANZTXPiCO91G3XUNNcGfEvmiCRZGUkAgsDkDtjinWL6vZxsi3enfNIZGCyxgHOOMAjA47YqJtpe6XD2bl7y0/r/h/l5nT/ANk6f2s48bd1K2kadjC2cfBHPPeue/tHWgADf2Rx1Iki5Hp1/wDr0v8AaOtAhvttgQDkjzY+fQdai9Tuap0rp8pvtpOmqSDaRcdfmNOfRdOGALRAScZ5rATVdZzITdaf8x4zKny/Tn/GmLqWuBwWv7JgD0aWIA/kc0+apbdkNU01Zbb/ANev4HRJouncg2qHBxnmkTRdP3Ya1XJzj0rn/wC09b3vi9sVDdAJY8L9Of5099S1qQJtvNPQr3WWPJ/Mmi89NWJuKcvPy/L5nVaHoWmS65p8U1mrB7iMMp6EbhnpXsP/AAhPhv8A6BMH5t/jXzzBrOuRX9tcR39hA8UisGSSNsEEHJBJ9K6r/hP/ABVznxTYZ/3If/iKqE5xVrv73/mRWSlLmit/u+R65/whPhv/AKBMH5t/jR/whPhv/oEwfm3+NeRD4geKyAf+EnsAT2KQ5/8AQKP+FgeKv+hp0/8A74h/+Iq1Vm9m/vf+Zi4OLs0eu/8ACE+G/wDoEwfm3+NYfhzw1pD6trcTWS7IpwqKHYYGW9688X4h+K2/5mexH1SH/wCIrP0/xv4ltbu9li8QWaNNLl22xHccnkZX37VtTrP2U1JvW3fuP2M+bltqe6/8Iron/PiP+/j/AONL/wAIron/AD4j/v4/+NeM/wDCwvFe7H/CT2OPUJD/APE13Om61rV7Z2rDxfobTSxKxTzY92SuTwF61zpNuyQOLSTfU63/AIRXRP8AnxH/AH8f/Gs/XfDGjR+HtTkSyAZbSUg+Y3BCH3qh9o8Q/wDQz6P/AN9J/wDE1Be/29d2NxbSeJtIKSxNGwVkJIIIOPlqnTn/ACv7n/kZyaUWzlrPwno9zaw7LXD3axtCfMb5du3ze/TJb/vmm2XhvSbskNZQJHO0hhJklMgAzjGMrgf7VTQ+ENXVI/K8X6fEqAhFZ8FAc5HtnJ/OpY/CWtwxmOLxrpqRk52rJgZ9cVKpVGtn9z/yMaacoJqW6IPsFr/Z+3yuP7J2dT08/OPzrPtLZNP0TUbqwVortSiiSNjuVSGJwe3IWtT/AIQ3WNu3/hMdL27dmN4+7nOPpnnFRS+EdYsbae4g8YaajrExzC+1iMdART9lP+V/c/8AI15dGipdIItQkWFQjh8lUGNr9WAHb5s8VfvLuSfTLW5HneZFOR5s8m9i2AflOBwMdO2apaP4T1abSoZI/FmnRK7F9jvyGBPJ9+vPvWjL4T1yaVJZfGunPIhyjNLkr9D2o9lPs/uf+Qcu3kUtad5b6OSRiztbwlmPUny1q7a6ZbXFgk3lEvNEYowGP+uG4/qFUf8AAqS48J65d7ftPjXTptudvmS7sZ64zTE8H6xGECeMdLUI29Ar42t6j0PA5o9lU/lf3P8AyDlfKkmRalfWelxC3+xLcRHUBCAZWUD5QC3HOeM+nPSobK8gs76ALaLI5ub63JaQ8iKNSOmOu4/nVebwjd3GtrpVx4ltJXkiE8bxHIRlbLHqOdqtzVeXw3eTzaTNbeIIY3v7hriIkZ8shVDleeWLhh2ztHNS4SWjQvZu1rlrTrzTrqxtru4itoVuZZFdS85MSrjOzarAtg5wx54rO0fWWF9c7bb9/HaSTQYfJJC5BHHXbk+2K0bnw9qtnN5qeI7mOa5s7qS482LyHdo1JBZQ5/M9scVy+p6Fd2F/ZyQ6zBG4tYJQ6naykxqeD+PWlysrldndmtreuSra6XdXFo5nubbfIzPyf3jhSTjklQp+mK27rU2l0+6s4kkkuIbS3drc8QxAlPmRu7HcM8Dq3JxWNq/hzVYTrF5LrgMN5Iq2krE/6UhbIwc8jaBnr2FLoeh39wYNLu/FcUUq3Jiaxm8w+Wqjoq4xknIIOMbfejlYnB6eRvPezWVlZSwW0FxbWepIAYbqN94+XL/KSeT26gYzXQf8LEH/AEDB/wCBH/2NcV4d8J6jCdMmTURPHBfNK0kSHyYCu3DuDjIOM87ePXpUPh+x1O71J7KWe2c3MMkURKj5ZNpKEccfMAPoTQi4R5VY76Dx69zcRwQ6UGklcIi/aQMknAH3aRfHxabyv7MQPnHzXQUA/UristrYLqOnXtn9mSO51KCKNVQfKkUjB8cdWHkkn1JqlYwXV/cWU1xHZDGpPEVjhADRbQQp4+bB7nnnk0FG+3j90VGfSSqyDchM+AwyRkfLzyCPwpv/AAsMf9Awf+BH/wBjWFaQ6hd2/hpb17c6ekLpPItsnMoklKKWwOo2ZG4ZyT3zWZ4ls76KKyEZeK5/eea81glqHXjbhFLDj5ueM8daAO1/4Te4+y/av7El+zZ2+d5p2Z9M7MUWvjS6vmZbTQ5rhlGWEUhcge+ErmhBqvlreeen9mDRjAWx8vneWRt6dfNw2OvfFc/pWkavcaxa295cJHAxEspKYIhA3MwyP7oJFAHo58XXz3D2i+H7g3CDLxBmLqPUjZkdRWtoOojW7KW4aMWzRymIoW3HgA+g9f0rz/T21LVhrNxOsl0Li5idbOzYJIqguQd2xvlUYXG09RyMVtafbeJJrvV5LPVLPyjfyH7nU4X2PsOp6UAdwYI/+e60028f/Pda5Y2Xi7/oKWf/AHwP/iKYbLxb/wBBSz/74H/xFAHVG3j/AOfhKYbaP/n4T/P41yxsvFn/AEFLP/vgf/EUw2Xiz/oJ2f8A3wP/AIikBrapbxi+08eehzL+XIrRNrF/z8p/n8a4q8tfEa3FqJtQtmcv+7IQcHjr8v0qybPxV/0E7T/vgf8AxNZw+KXy/I0l8Mf66nUm0i/5+k/z+NMNpD/z9R/5/GuWNn4q/wCgnaf98D/4mmG08U/9BK0/74H/AMTWhmdSbOH/AJ+4/wDP40w2cJ/5fI/8/jXLGz8Uf9BK0/74H/xNRm08Tj/mI2v/AHwP/iaBnVNYwf8AP5H/AJ/GomsYP+f2P9P8a5g2nib/AKCNr/3wP/iaja08S/8AQRtf++B/8TSA6WaxhWMkXkZPoP8A9dMGnwFQTfRDI6cf41ysNzq1vr1vZX11HKsiFyEQDsfYelbxHApdTmX+8v8Awr82Wv7MgZgBfxEk4AAH+NFVoF/0uH/rov8AOiqOkx9OP/FU3X/XEf8AstddAelcfYHHiq7/AOuI/wDZa622PSurFfFH/DH8iEacXSrcdU4qux9BXOBQl/5GW0/65H/2atsCsWb/AJGW0/65H/2atsDis6e8vX/IuptH0/zFrJ8UD/im7z6L/wChCtisjxR/yLd59F/9CFaGZd0of8Siy/64R/8AoIq6Kp6T/wAgiy/64R/+girtABS0UuKBhijFFLQBwnxVtmvPDmn20eA0uoxqCegyj9a89XQdLkdtgYxopZ2KkEDIHA3c9R6V6P8AE+K7k8PWTWS5niv45ByBjCv6/hXKav4b1230q9mOlxRxi3cSASqQRjn+L+VdUW404+bYupjf8I7pwV5G4iCK4IBJIJx0z1z70TeHNPjEm0B2j2kj5gNpxg5z15HFVdQ8HWlm+oo+ryvHY3C20/l2fPJbaYx5nP3TnJGO2agPg9oLm5jv7wwqbs2UTxQeYHK7TvPzDauCvPJ56VHtvN/iOxiyeHLV9Yvbe1vC5hvvJlBiICb2IBU7vmwRg5xz69adpPh+0nvbb7US0JvPs7ooOSMEjncMdMVr2/gs3E2yW4EN7c3c0KRx224eZExA3sWGAT0IB56jvVrTPDUMtnDLqNuUQ6dLdxC3tk3RyCZl5+dd5wM8noQOABU80ea5pzytY56y8MR38Znt2u3gaXyo3W1LHcACS4ViFA3DnJz6VWutDtrGxSaaUi5dnQQqmRlW2nLZ4H4H+tbcfghpLRVG37TNDJdW9s8A27Vzwzh/ldgmcYYdMkVU1jwdJY2tnHETcXDwxTNGIVRIxIgcruL8kE44GCOc9qm8bWQOcm7sr6dpum/ZrH7VZ+cb25aHPmMDEo2jK4OCcsTzkcDis+LS1m1A2YwArMHck7VVc5b6AAmuvtvCkcdrBp39kxm5OnPfC9Mp3JOEMgULnbtwoXpnPOe1Yuh6DdQ6iv2iwsArAjzbuWQInfJ8lt3bHHrzQ5JpIFJp3K+pW+m2l9bm209Ht5LdHEU0jnJI6/KwOfxxUOuWdlb6mbe2tkh8pVWVUdiPMx82NxJ4OR17V1M3h2OPxZPC1pp8lkuwwTahJNsQYBJXyzuOc8Fh0AzVGDwgY/iDDCtmsmnjU1ULcBCWi80feHfI/OiUrpruCm00ygvhMP8AcnDFp0jT5cbo2Cnf14x5ice/WnNoun3Vjp0ccoSeSGYoBF/rdruRuOeMgADr07Cuk07wgYtfsLiJoby0S5a1nikgVAOCRgbjuU44Jwfl+6KybDwFe3ekG8jSZZ7aGRwptR5OFySPND/exk/dx2zTUoLYHUkzPt9I09rOaS+d+NPWdGhhGVzMF/vDcecc+vsKYPCjHTvtWy43eQbgEwN5ewDPL5xuwM4xjtnNbVr4NFlqGmx3LCe5YwCa2Nupi8tmVtpcvkkAjI247ZNVfEHg2azuZEaCE3LSORCqAeTHn5ATuxnHbHAxzngHNEHNsxNPtLIWN5e3Fqs4hMaLEzsFLMTydpB4CnuOtV9QsrWy1W4hhjPlERyorNnaHQPjPtux+Fdpp/g61S1s9Ol0pJJr6zknkumkIMUg37FChtpX5BnIJ+Y4IxXM6RpMsmp3Rn0WHUZNwRIpHZQGzgcIwJ4GMZo3g0CqNSuOu7bTF0y3u4Le2L+cFkihabYBjO19xzu6/dOOD6V3vhLStJn1PRd+nxxtNE0jqkkmMbDtHLH0z+VZFz4SsrNtSmXSIZ4oBbhbMzHyxM65kywYMQhDqOR1HXv0HhHwZInjeeSKGRNPjSKQfvFzEskJKr1ycZ2/hVRqtNtO1yG7pJnoP/COaF/z4j/v4/8AjTJfD+hpE7LZgMFJB8x+v51PeaPBaRLKkkhO8Dk9jUMZ/wBEl/H+VXKvV5W1Uf3v/MyqRXI/R/kxlroGjTW6vJZqXOcnzG9frVqPwvob9LFf+/j/AONNsz/oqfj/ADrStTWccTWSS5397Iw8V7GHovyKf/CJ6H/z4D/v4/8AjVXU/C2ippV462IDLA5B8x+u0+9dGOlVNW/5A99/17yf+gmn9Zrfzv72bWRzfhrwxo8/h60kksgzkNk+Y394+9a3/CJ6H/z4j/v4/wDjS+Ff+Ras/o3/AKEa2KFiay05397CyMX/AIRPRP8AnxH/AH8f/Gj/AIRPRP8AnxH/AH8f/Gtqin9Zrfzv72FkcjqXw28PanMJ5IriJlUJtil46k55B9aoH4TeG/W9/wC/o/8Aia77/lmfrTDUOtUerk/vHZHBn4T+G/W9/wC/o/8AiaafhP4c9b3/AL+j/wCJruzTan2s+7HZHC/8Kn8Oet5/39H/AMTWN4l+HOiaXp0c9s92HMyod0gPBB9vavUq5vxsP+JND/18r/Jq6cJOUq8Iyd1cUloVT8PtJ/5+L3/vtP8A4mj/AIV9pP8Az8Xv/faf/E11uKK4yjkv+FfaT/z8Xv8A32n/AMTSf8K+0n/n4vf++0/+JrraSgDkv+FfaT/z8Xv/AH2n/wATSf8ACv8ASv8An4vf++1/+JrraQ0Acl/wr/Sv+fi9/wC+1/8AiaT/AIV/pX/Pxe/99r/8TXWUUAcfJ4B0oH/j4vP++1/+JrW0nSLfRbR7a2eR0ZzITIQTkgDsB6VqSfeqI0AMNNNONMNADDTDTzTDSAytT/4/bD/rp/UVfNUNT/4/LD/rp/UVfNZQ+OXy/I0l8Mf66kZqM1I1RtWhAwjFMIp5phFAEZqNgKlNRsKQHN3vPjKx/wCuDf8As1bZFYl9/wAjlY/9cD/7NW6RkCjqcy/3l/4V+bEtx/pcP++v86Kdbj/Sof8AfX+dFUdJz9oyr4putxA/cjqf92uptp4v+eif99CvMtbsluvF13JLJ5cEFqjyMF3HkgAAZGTk+opttpdnIgle5ZIXl8qJjFyxwCSRu4AyPXrTxeI99Lsorr2Mue1z2KG5g4/fR/8AfQq5HdW//PeL/vsV4/BoEDeVHLMEnld41TYSAynHJzwPwNPh0S1maIpN+5dJG3shBBQZPGenT865PrS7r8SfaI9RluIP+EktG86PaIjzuGP4q2hd23/PxD/32K8NfRYGullikLQfZ2lUlcEkNtIIzx1B71fh0ewW2leffnyUkUquduWx/eGf/r0o4jlu/Pz8ip1NEey/a7b/AJ+If++xWV4nurdvDl2BPETheA4/vCvLz4eVbUymOTIjEv8Aq22bTz97PXFZuv6ZaWdnNGjF5Fxu+XAHI9+a0hiFOXLFp/eSppuyPb9Ku7YaRZA3EX+oT+Mf3RVz7Za/8/MP/fYrxSPSrFbT7OLcebFbJJ5u9sscKSCM4xz+lRWdpZGXbNFDtP8AFKz4H/fJpRrzlFyS2BTbV0e4/bLX/n5h/wC+xS/bLX/n5h/7+CvF4NOsRqb25s0eISMS0jOGVByejDsKqxadbXV4I0jEYkYhRknHoOv0FCryd7rRK41JnuX2y1/5+Yf++xS/bLX/AJ+Yf++xXicmhQRwCUkY8rzDweDkYXr6Mp/Gp77SLM3lw8O0FJgrIEwq5PGOefpgVP1rWyt+IvaanoXjW4gk0aERzRsftCnCsD2ar3iGeGbw9qMUUsckj27hUVgSTjoBXj2t6RafYIlj3C8kvWhXamFY8YH3vlHOeB3xU2o6HHZ2pmCSYD7DvjKc+2TyODXTHG80KcHZavvrsJTTZY1fVbyGK6lu7CVf7QnEz7IG4cbjgZPT5j61WPja4M8skmkPNvl89Ve2bEb4xkfN7Dg5HAqKfSbGaxsIXtw0l44Tzd7Ax5JUEDOOvJyDxXB2lwkU4aW2W5BGBGzMAT2+6Qf1rJVZScklt/m1+hSm3ex1mneMNRhvvNOmTSNb3LzRlomyzMxY7vx9MVbi8Y6hCsCDRHkjiga32OrgOjMWOcc9W7Y6CsSDTLM6pfu6wxJaWscs9vJI3lxyswUqSMtgZzgZOeKms1e0v7pfLhjR1jdBAzMhUgkFS2Tg9eeamnXlUfLFK4lUbdkag8aaosAjTQlDrG0UcpDl40bOVHbuRkgnnrVG88SaveSpI+lbSsSRDCt0RQo7dcAVY89v71IZ3/vVt+97L72V7w3/AITDXls/IGlRbxEYRcGJzIIz1XrjHJGcZwcZqtD4l1WFyW0O2lUoF2SRTYyP4uGByfrj2qyZ3/vVGbiT+9+lH73svxH7w0+L9ckupZ7nR7W4Z9oVZLeUCMKMALtYcYwMHPSqZ8UeJf7YTU2iLzrMJtpt2Ckg5xgduO1WmuZB/F+lRNdS/wB79KX73svxD3iR/G3iQXFvLFYQw+VKZmSO3k2yuRjLZY/kMAZ6VFD4z8RwWK2/2GN5Et5LZbh4JPMEbhgRw23I3Eg4z0zkcVE13MD9/wDQVG15P/f/AEFH73svxD3hzeLPEDJbNJYI91bmPF0YpA7hMbVYBtp4AGducd6p6h4i8RalAsdxHLvSRmWVY2DhTzsz3UHpnJHrjipGvZ/+en6CozfXH/PT9BRet2X4h7w+HxT4hg00Wgti8qRvDFdvE/mxRvncq4O3+JuSpI3HBFVtF8Q6no9/JcwaVHNIYDAwlE3UnmTKuCr4yMggAdADzTmv7j/np+gqtFeTrJKRJyTzwK3pRqulUbtol37iblctReINTt7m5ePSI/slyirJYsJ2i+Ugggl94ORnIbuR0OKgfWNbuL27v7ia5heZlZggKKAOAAOwAwAKkS+uCCTL+gp00rzaTOztkg4/lXM51I25kt/MG2tz6XikM/hSylJyTFGSfwqnGf8ARZPx/lT9HfzfAenv626H9aiT/j2k/H+VdX2GKp8MvR/kye0/49k/H+dadoe1ZVof9GT8f51p2ZrKOyJw/wDBh6L8jRHSquq/8ge+/wCveT/0E1aHSqurf8ge+/695P8A0E1RqUfCv/Is2f0b/wBCNbNY/hX/AJFmz+jf+hGtigBKKWigA/gP1phqT+A/WozQMaaaacRTaQCVzfjb/kCw/wDXyv8AJq6Wua8bf8gWH/r5X+TV1YL/AHiHr/mKWx0dFLRXKUNpD0p1NIoAQ02nUlADTSU41zHjjU9R0rRoJdLkZLiS5WP5Yw5IKscYIPoKBpXdjebqajNeVf8ACS+Lyqt9uyGUsp+zJggdSPl6DBpW8QeMEETS3jRxyEBXa2QA/T5eaB8vmeommGvNr3W/E1pHI39sK+25kgx9mjGduOenfPSmxax4snt1lGrW6s4Zo4miTdIF6kfJjsepGccUWDlPSDTDXmVr4g8UXcjKNVhRUUu7vEmFUd+EJ/KpjqnikSuG1i2WJY1kM5iXZtPT+DPPpjNKwcp2Wp/8flh/10/qKvNXlmo634mj1GC3lvVeZXGzZEhyTjBGF5zxipv7f8VsFIumIclVIt15I6gfL2qIxtKTLkvdSPSjUbGvP7zVfFNtGkovWeBo43Mv2ZQoLKDtztx3o+3+LJLee4huZHihKht1qoblS2cBTwMdc9x61dieU7w0w153JrniiOBJ5Lh1hf7sjW6hW+h281Laap4jvIzJ/atvCm8Rq0sajc57DCH8zx70WDlO8JppNeeprfiaS8FoLwecX2bTEgwc45+Wi41rXYpUSLV4Llm4/cwg4Ppyg/SlYLG9ff8AI5WOP+eB/wDZ63cHA+lcRA+qjxtb21/dRTTi1LqyAbQCCccAeprrvLvcD98mPp/9albUwjBfWG7r4V+bLcA/0qH/AH1/nRVRReRyK/mp8pB6f/Woq0jqVJvZnG6hBfSeMrn7LbpcRyWypJHIQFYcH1B64OQatW2m6zESq6VbtH5nmJGzgiNsYyPnz2HXPSr1v/yOFx/1xH8hXSRnDCniaUHPVbqP5HLGKd7nMwaR4h3wSfYkZoWZwTInzFjk5+ar2m6BrkRRXsU2RpNtzIhyzJjB+bpkCustm6VpwnisJUKclaw3CLVjzZtF19dZt4hpsQUwMggEi7SpyT/FnqM9e1aX/CPeImYltKhKGMRGPzVC7Qcjo+f1rrZD/wAVLaf9cj/7NW+tTCjTbenX+upU6cUlp0PChqstyoiGlyyyhdocAF9oHs+OAPToKz9a1N7q1uJHtJlZsZ+7jqP9quv0W3ggFnttjI89tNM0+4/KQHGAOmBgZzzk1U1ex06LQJFlkiEj2gmVwJTIX4IHA2bc/L/XtXSqUIu6Rx05ydn+n/BMm01i5nsYbdNOmeVoUjLoFLuoAwPvew7dqI9WCMxOlM4OMBiOMenz102m21qJ7CWyjjS2VhHuJcSKShIDg8Z4P3eKzNTgjtZ1giT5FUETE584H+IdgPQD8ean2NNKyX4lR5m7J/h/wTNOtzGSeQ2cxeYFWb5OATk4+b2xUUepvFIsi2lwGUhhynUf8Crq5mcxXFsc/YUsI5EX+EMQh3D3LEjPuRVXQYc3P2lWi82N1WNHkVTknluSM4Hp3IqlTgtEt/8Ahhpy5W7r7jHm1+4nimjaylCyyeYcbOPYfN06fkKRtdmaSZzYy5lcO33OCDnj5q34riWx164RvtDRvcEFLeYAMd3AOAQ3B6VTjgjbXzbXCoEadomCE7VJJGR7A8/hUKhSSso/1p/khJS8tr7f8E53WNdlazH+guH+0ecjkgGNz3XD+w65qxeaqbiI40142JyWXbyf++8flW14g0qzh0N5pI8PAqxPlj/ryU5PPYO3HT5Kuz6XZz3E1p5Jtlhu4rfztxJdWbBJzxnA3DGBWyo04xi0ur/QSlbVP8P+CcrLrVwmktFFZP5qIfKkbbmMkdR838wa5TTri80qW2vl0uJiqsqPLuw7d2BDD5hkDjGOO/NepQ6faXsKFrIwDzJIzCHb94FQtt5yc5ABx6jgVyiafZajZaOs9pHZQol9MIC0pjcpt6Y3Pt4ycZPDYx2zdGm76bg2/wCl/wAE5C01KVL+8kh022SKVfLmtwzskgznJLOWzkA8EdK0bfUpZLiWa5iEeVRESPG1VUYAHPpVqOz0BP7TvYIbe7EOnpOYYmnWFJjcLH8pfa5UqeRk8kgHgEZeuWtra6tCII/Jt57eCfywxby98aswBPJAJOM80KnCL5ktS1zN6P8AD/gmp/aUX91/0/xpP7Si/uv+n+NXvEUs8tv4jgus/ZbK+jjsFP3YhlwFT0BQZwOuAazfC97e2StdPeS2+jW8okuUB+W4b/nljo5YDGDkAZJrXS4J1HG919w86jF/df8AT/GtC1s5by2SeNkCPnAY88HFWPDcuNP0uASyQm9nuDHaxJmG54ACztngAj0bAOeOtc7pz/8AEviH1/maLxW6C1aV1GSXy/4JutpFz/fi/wC+j/hUbaPc/wDPSH/vo/4Vms+aiZqLw7fiHssR/wA/F/4D/wAE0m0W5P8Ay0h/76P+FRtod1/z0h/76P8AhWazVEWovDt+IvZ4j/n4v/Af+CaTaFdn/lpB/wB9H/CmHQbv/npB/wB9H/CswmmGlzQ7fiL2eI/5+L/wH/gmmdAu/wDnpB/30f8ACqkGiXMk06h4co2Dlj7+1UzUCffk+tdVFx9lU06Lr5i9nX6zX/gP/BNg+HrvPEkH/fR/wqwuh3X9mzQmSHcxyDuOO3tWDjPFXguzSZwP74/pXn1nGy06rr5ilTr9Zr/wH/gn0N4euFj8B2Fs4JkSAKSvIyDQkyi3cc9D/Ko/Ar+b8MbA+kLD9amjP+iv9D/Kum8eR6fiE6dfklea2f2fJ+YttcIIFBDVo2t7GvVX/KqVr/x7r/nvWnY9ayi4WWn4kUKeI9jG01svs/8ABLI1GHH3X/KquqahCdIvRtfmB+3+ya1R0qrqo/4k19/17yf+gmqvDt+Jr7PEfzr/AMB/4JkeF7+JPDdmpV+A3b/aNa/9ow/3ZPyql4U/5Fmz+jf+hmtqi8O34g6eIvpNf+A/8Epf2jD/AHZPy/8Ar0n9ow/3ZPyq7SUXh2/EXs8R/wA/F/4D/wAErC+iMDPtfAOOlRf2jD/dk/Kr/wDyzNRmi8e34lOnX6TX/gP/AASmdQi/uv8AlSfb4v7r/lVs0Urw7fiL2eI/5+L/AMB/4JT/ALQi/uyflXOeNL2N9HhAV/8Aj4XqPZq66ua8bf8AIFh/6+V/k1dWCcPrENOvf1B069tZr/wH/gmz9vi/uv8AlR9vi/uv+VW6SuW8O34h7PEf8/F/4D/wSp9vi/uv+VIb6L+6/wCVXDSGi8O34h7PEf8APxf+A/8ABKf26L+6/wCVJ9ui/uv+VWzSUXh2/Efs8R/z8X/gP/BKn22L+6/5VyXxB1JYNIsJ0Vt0N/HJyOuFY13GKxPE1vDcabGk8McqiYEB1DDODzzRePb8TSlTr86vNf8AgP8AwTzu71aw+y3UFvIzCJRFa/IRuVtu8+33T1/vUyW+0yOxligkQlzEy8SF+Dzuz8uevSvSW0XSv+gZZ/8Afhf8KjOjaX/0DbP/AL8L/hU3XYv953X3f8E8v1PULaeKUROWLXs0o+Uj5W24P6GpbW8tVjsrl5tslpGyeVtJMhyzLg4x1bBye3evQ59H0wQt/wAS2z/78L/hQuj6X5a/8S6z6f8APBf8KbS5boiFSXtXCXZP8TzPTZYbWdneaMM8LBGZCyxsTjDDHPGexHIq1cXlpdG4ga5RPMiiHnbGEe9PRQMhcHsO3QV6AdH0z/oG2f8A34X/AAph0jTP+gdaf9+F/wAKi50XPKdZull1a3lt2O2Py0RiME7FVQfxxmugutXsCl0sLnCxlrcbSPnk37x7Y39f9gV0eo6Vpy3liBp9qAZOcQrzyParp0jTf+gdaf8Aflf8KiMvekW7pJnAyajC7SAzEodOSBRg43gLx+YPtSXd3a3UV/GtwqeYYJEZlbDbI2UjgHnLd+Peu8Ok6b/0DrT/AL8r/hTDpOm9tPtf+/K/4VVyLnEanqVtcW1y1v8AZV+0BAUxL5gwQccnYMYxkdvSqdnLay2CW1xci3MVx524qzblIAIGAeflGM8c9a9AOk6cOmn2n/flf8KadK04f8w+1/78r/hRcLnB2+oxJr/9qOV2yXEjNFtyyq2eemD97pntU0mqW8L2rzTNqE8XmZnRihG4AKAWXJK4Y9OM8dK7M6Vp3/Pha/8Aflf8Kb/ZWnf8+Fr/AN+V/wAKLhc4yzubO58cacbMSgJYBG3tnkLjH3R09e9dyPuiucuLS2t/Gdl5FvFFmBvuIFz970rpOw+lL7RzL/eX/hX5sY3ANFDdD9KKuJ6NHZnN23PjCf8A64j+Qro1rnLb/kb5/wDriP5CujWtMR8a9I/kccOvqaFq/StWE9KxIGw1a1u3ArJFMY5/4qS0/wCuR/8AZq6BDXOsf+Kjtf8Arkf/AGauhjPFTT3l6/5FVNo+hxOn+BrpbRo49dmjj3nKLEQDx6b6q674JurPw3dKuuTGEAZhEZCnLDtvxXead/qG/wB4/wBKqeKf+RavPov/AKEK3qaSZxYR81CDfY5uz8E3t1p9lNL4guGYRKy7oySuVHQ76efh1IyIjayxRM7VMHC564+euu0n/kD2P/XvH/6CKu1B0WS2OHPw/uGtxbnXZDCDkRmE7QfpvxSN8OpGkEja05kGMMYOeOnO+u5paB2OKi8B3cO/yvEE8fmHL7YiNx9/n5qAfDYhgRq5BBzkW/8A9lXe0HPagLHlXi3wRPbaUrvrMkoluVLq0R+ZsN8x+bk9fzrXv/AFw1ntl12WRIwNitCSF+nz8Vs+OP8AkCQ5/wCflf5NW9ff8ecn4fzrrcV7Km+7f6E9Ti18B3Nx5MkuvTO6qNrNESV+h38V4/4u03UtP8WXMM2sXUslvJvilZmyuQOR83HQfkK+lYOIYj/sj+VeJfFy0+z+LY5wMC4hBz6kcf0rme7HZHmwk1G/urxrrVbqZ3AikaWRnLoGyFOTyAQDj1FSSWEs5Vprx5CqhFLgnCgYAHPQAYAqOzb/AEi6/wB//GrofikWkhssd9cRQRTapcyR2/8AqUdmIj/3QTx+FT291rNoJBba9fwiWQyyCOZ13uerHDck4HNR76XfQHLHsMiGoW8E0EOrXUcMxJljR2CyZ67gDz+NOt4/IgWLdu255xjvRu4ppagrREpamM1RlqaWoEOY1GTQTTCaQgJphpTSUCGmoE+/J9anNQp9+T611Uf4NX0X5kvdDgcEGrqfPpU/++P6VS+lXIjjSZv+ug/pXn1tl6oUuh738NH8z4Z2w/u+Yv8AKrsf/Hq/0/pWZ8KH8z4cqP7ssg/QVpR/8erD2P8AKun/AJdsVT4Zej/Iltf+PdfT/wCvWpY/erKtT+4StOxPz1lHZE4f+DD0X5GuvSquq/8AIGvv+veT/wBBNWl6VW1b/kD33/XvJ/6Cao1KXhT/AJFmz+jf+hmtmsbwp/yLNn9G/wDQzWzQAlBopKAF/wCWZplP/gP1qOgYhpKWkpAJXN+N/wDkCw/9fK/yaulrmvG//IFh/wCvlf5NXVgv94h6/wCZMtjo6KWkrlKENJmlNIaAEpKU0lACVk+If+PCP/rqP5GtbvWR4i/48Y/+ug/kaaLp/Gi21RmpGqNqkkr3H+pakX/Vr9BS3H+pamr/AKtfoKt/B8zmj/vT/wAK/NiGmGnmmVmdRl6l/wAflh/10/qKvGqOpf8AH5Y/9dP6irxrKHxS/roaS+GP9dRh71G1PI5phrQgafrTDTyaYaAGHFNOKccZppxQBzt9/wAjlY/9e5/9nreH3awb7/kc7H/rg3/s9bw4A+lLqcy/3l/4V+bGt90/SihuAfpRWsT0aOzPLdOvPEjpf6rctHEY7H7RGymPeeVC7k5YKQTgkDOODSPrnjaKKOQqh8xkQIuxnUv90MoO5c9sgZqFvEGlyW1/cfa5vPutMS1FuSnlo6iMEj5t3OzpgfU9r2p+L7K8DTQ3jRGeaN3jSC3XZh1YkOMOeR3x7mpk7vU5VBLp/X3klprnigS3aXd3ArQ2cs6eRJFJ8yY4O0tjr04NUp/GvjKxuBBNOscxAOzapIz0BA6H2PNXF8X6XZalFewy+dcpbTK0rxxIXYlSgKoxXjDc5yc1y+q39lJqv2qznOx8SbZGB2N1Kg55GfXB9fUpD5V2/r7zstV13xPp/m3MWsJPdWUiwXKeQqiMnP3TuO4AgjkL2wCKseHvFXi/XXaFNTnWbcAPKtFkRQf4nO8FV9wDXO6rrekSpqL2s7+bqtwksocpiIAsxC4bJ5Pfb0x71X0i80OxuzdXF7OXt5S0aIiYlXtk7/lPXONw+tJITS2aOs0PxL4w1O3uFh1OZZYS25ktFaBcDOWk38Zx/dNYU/jzxpqFlcxm5WWKOPzJFKr90Ec+/aoNI1TRIZor+ad0lhldzaoE8tgTkDcWzjsRtNUtE1TT7PVUe9kVrOVXhnVHGSjqVOMn3z+FU99yIQiopKJ0g8W/EOzRbcvsMSxLsAQkbx8gGDyT6CpZfF/xFinhhM6s8zFE8sxuCw6jKkgEdweneqsvjWykSwuQym5S+E8+XADIjsyD1z+8YfgKig8RaRYeRbxXUk8JlmaSSRkDKrxmPC4JHAJOeM+gpF8q7f195ePjH4iC6S3+0xlnjMiurxmMoM5beDtwMHnNXL7xV46gmtILe9DSPZrcTmRowiZZhneSFC8Lg55z15rFj8Q6TFEmni7kaA2ssLXBKeYGZ1cEDcRxsAxu9eRU6+KtKimWBLj9ybGOAyukMjB0cnO1iVOc9M/jxyByrt/X3lqPxb8R5GnUXEa+Q6pIZHjRVLAleWYDBAOD0PHqKrweO/Hs+rR6b9sEdy8wg2ugG1s45+hrN1PxHbXFnfRJd7pJJ7dkciOPKRxuvRDgdR07VDceILNfGbaxBIhiF2JwrMMkbs4ODQFl2/r7zX8T+LfEq6XDOmri+tDcGMl7dUKyKM9NzcEHg5zwcgVdsvGnijU7NlGukXnlySeQbZdmEBbBbdnJC5+6R71yur32jtplvplpdyGBrwzySuIyyjbtAA34OOeSRnPQVbsdS0W30GSIX8kF5OGE8ixRvleyKfMBAPBPHP0HOnM+Vai5VfY6WPxT4vbT1ZNcX7Z9kN2tuIF2+WATjduzu2jONuO2c1J4wk1Kfw5oer6tAsxngQhywPLKD7461y9l4mtrHQXiF80ty8D2yxtHGBGrAg/vMlyMHheBn6V6P4qgjv8A4N6SYnWSWGzt3Cqcn/VrWcrdXYcXGO8U/v8A0aPJINQszLNttI87uePr7VZ+3W3X7JH/AJ/CsS1hma5ucQyH5+cKeOtWxBcd4Jf++DUafzfijbnp/wDPtf8Ak3+Zofbrb/n0j/z+FL9utv8An0j/AM/hVD7Pcf8APCX/AL4NHkXHeCT/AL4NHu/zfig56f8Az7X/AJN/mXvt1v8A8+kf+fwo+3W3/PpH/n8Ko/Z7j/nhL/3waTyLj/nhL/3waPd/m/FBz0/+fa/8m/zL3262/wCfSP8Az+FSJJbXNtcEW6IUTIIH19qzvs1x/wA8JP8Avg1dsreYW12DDIMpxlTzwaNP5vxRE6kErqmvx7+pn/J/eNJiP+8acbW4H/LCT/vg0n2a4/54S/8AfBo93+f8Ub/WI/8APuP3P/MbiP8AvGkxH/eNP+zXH/PCX/vg0n2W4/54S/8AfBo93+f8UH1iP/PuP3P/ADGYi/vGoYxHvkyx61Y+y3H/ADwl/wC+DUEVvOZJQIZDg84U8V10Lexq+90XVfzESxEbr93H7n/mSrHGTwxqZONJm/3x/SmxQTqfmhkAx3U1KtvP/ZUw8mTO8YGw+1efOSvbm6oeJ5ZUYTUUm29vK3qe1/B1t/gKZf7tw4/8dFbEf/Hq30Nc98G5RB4UvYZz5ZFwSA/ynlfet5JEFs4LqDz3rs54+zepxVPhfo/yZNbf6hceh/nWlYZ8w1k280YhXMijr3rRsZ4RIcyoPqwrKM423Jw6/cw9F+RvL0qtqv8AyBr7/r3k/wDQTUiXVvj/AF8X/fYqtqtzbnR70CeP/j3k/jH901XPHujWxX8Kf8izZ/Rv/QzWzWD4VuYB4aswZowcNwWH941sfarf/nvF/wB9ijnj3HYlpDUf2q3/AOe8X/fYpPtVv/z3i/77FHPHuFib+A0ykFxCYmPnR4B67hURurf/AJ7xf99ijnj3AkNJUZuYP+e8X/fYpPtUH/PeL/vsUc8e6CxJXN+Nv+QND/18r/Jq3/tUH/PaP/vsVz3jVlfRIGVgwNyvIOezV1YGSeJhZ9f8yZbHTU2l4ppNc4xc02gmoJ7iO3jLucAUgJjSGqkF/FOuUYGpvNH94UDJc1j+Ij/ocf8A10H8jWoJFPQg1j+IHzar/wBdB/I00XT+NF9jUZpxNMJqSSC4/wBS1Iv+rX6CluP9U1NX/Vr9BVv4F6nLH/en/hX5saetNNOaoyazOszdS5vLH/rp/UVeNUNS/wCPyx/66f1FXiayh8Uv66Gkvhj/AF1GseKYaeaYeK0IGGmk89KeTUZoAQ/SmkUpFNoA52//AORysf8Ar3P/ALPW9/CKwb7/AJHKx/692/8AZ63uwHtS6nMv95f+FfmxG+6fpRQ2Ap+lFaxPRo7M8rsvC+lW7aik8vm3sWnLO0BjIWMsUIw+7lgG5GB16mtC5+H6WsZMiSxtHJGkzzQvHENxC5VyfmAJGeB6jNR2s2sz3E9oNMtDPLaLBLcZ/ePGu3aCd+3jaoyBk45zWhNZ6tdMJJtHtHuNwaSYv80hH94b9vPfABNedKniVJNve3bt+X49zBNale18GWX9pTWKWlzNM1rIY1uIjFluMFfnII96x5dD02O8FvEROBhTICwXd3xzyPfj6V1Pla8qLHb6bBbxLHIiJFJ93fjcwJcnPyjvjjpVW60bWr658+SxjWVgN7I6je394/N1PfGKqhCsp3qbW/H+u34g7dCDUPDeiCW/hgsPKfT5RH5nmuTMuWUlgTgHIBGMd6u6J4U0e6tkeTT45S9wI2Mtw6YXAPyYYbm9ue3HNWLi01+8u0gfTYFeYiWZkcBp2AbBY7sdz0xyc1q2eja/CoQaPbuiSmaJXlBETHHT5+eg4OelZKjWdFwW9+/l39df6sOVk0zmbLw9okum6iDp+ZYMMkrSuGA3quMZx0J7VSl8Mafc2V0IodskcLSqNzHdt5I6+gJ/Cu203QtfS2uVOjW85uGPmSSTYY8g44kA6jPSqs+geItEgOopp6EwEHEkiMpBOCCA2SDnFdbp1I+0stemvkvu1/O5z0ZKVOLuZX/CAWMRjg2q7loolbLAM7ZB/i4AKt+VNTwXo85je2O+Es4kdwylNq7icbjkYBx9O1dLaaL4qn03T2WyX92ROkglTcWPzc/N65P4mrH9geJleMw6TBCiMzeWkibWLDDZy56jjH5VzqniratX+X+XX8HubXickPBmjuBOm42nlNI0hDBhghSNu7rkr371afwRpNw8P2a2kaJbVZHaJXZ2JYgfLu6+vOOOtdGNA8Sh126RCsAjMfkCVdhUnJz8+euDnOeBTn0HxLI43aRAYvKERh81dpUHI/jzkeuaUqWJbuv0/wAt/PYLxOUk8D6RaidrsPEkTxqMK5Zg6lhwWGCMcj6/jBF4P0z+3F06WD/luImZXbpnBI5rq5PC3iJ4JYF0qKOOSRZNqTJwVBAxlz/eNEnhfxPJqDXwsAkxk8wFZo8Bs54+atYU69nzPdPto7K36iujhfEWg6N/YkV7a6eLZvtXlMqyuwI25B5J565/pWlbeFNF/s25lns90rQO8I81xtC/xdfXgfQ1q+KND1yPT7dp9Ht0t1uN3lLKNrsQc5w+e3YjFaw0TxhY2cixwyiPyygX7SuFB7gBuDWk6FZ4eKirO73fS60v/WmgXVzlbfwnoo0S4lms83RhWaP9642LvVeme+T17Y9a9Im0a3f4dJHBFtddOQock4IQetYVvoXjBNPa3EMrQyQiPDXK/KvB4+bjpj6E139haPHoVpZzrtkW1SJ1znBCgEUUqMuaftUnd6f09hSemh8n2V/dpcXWJSGL/N8o681c/tO8/wCex/75H+FQXNt9m13VbfGPLuWX8mNJsOa19jTf2V9wJssf2pe/89j/AN8j/Cl/tS8/57H/AL5H+FVtlL5Zpexp/wAq+4Lssf2pef8APc/98j/Cganef89z/wB8j/Cq+w1FdP8AZrd5du7bjjOO9Hsaf8q+4Ls0P7TvMcTH/vkf4Vbs9QuntrotKSVTI4HvXKf2q3UWrf8AfX/1qu2OrSeRdqLQnMefv4wAD7Ueypr7K+4malJWRo/2peH/AJbf+Oj/AAoOpXnUTH/vkf4VhxajLLII47NizdBu/wDrU+XUZoH2SWZyRkESAgj2IGDR7Kltyr7i7StzdDY/tS8/57f+Oj/CnwSX2qXcFlG3mSyuERcAZY8Dn8axzc3BhaZbZCgXccTqSB9Ovep9P1e90y9ttRhslkaCRZAolDe+GA5APSnCnSUk0kTWp1HTenQ3ZNLmfRrP7Hc2N5dT30sX2iF28tUSNWOdwGNvzEnHToSKzoNHvzM0kN9YSWbxPO14vmGNVRgGBGzfkFl429wenNJb+LxptnaSaRpElrHa3kkh33bPI3mR7GAYIuBtBwRyM9zzUc3ip5rm21GUa7JHFvjjL6yzTxv8pJV/LwoxxjHP4V0RkuVnN9Xl/L5/1qaFvp02oaJK1vJbTSxXoR7xSyxJF5e4sdwGBn1Ge3XiqOi7tRv5I5pl+zwJLNLJEvJjjQudue5C4GR3GRSt4+mc3kf9kqba8nWS4gaQlZUCBMN8v3iQG3jB3c4rJ0rVX0yWW/hsjJa7mgeCSQ5aORGUruCj+HIzjrg4rGdr/caexcqduXv+lzvNMtbS502G/thMtvcgkRyuGeNlYqQWAAboDnA647VdisLC50u6liS5jltog7SO4KMxIG0ADI6nHJ6Vy+j+KSYPstloNybK3j2RIJ9zKxLMWdtmDkn0HAA961LzxO39lx2CeHL1GjXcWjugyPIersBHyfbdwP10urNEPD+5dRNdbLTJNClvEguopVkSJC9wrq7HJbgIDgAevcU3TdEbUNhjZArSiIlmI2nBOTx0wD+RrBl8Q6g+k2kC6HKsUO9j+9yXdjy23bkcBR+FWdO8V6jp1pdR/wBgTSC6TapEvKHBGcBc/dZh261CkhxwrVO/L/wx0S+Gzx5jJGArs7Oxwiq20k8evAxmo7nw4ogud88UcKRBvPZmKEMDtxgE889uxq7NqfiG5unkl8HaoYZYBDJGscgJ+bcWB2cHdz0NQ3d9rb2lzFceDtV+xGFV2KrqyBAcHcUI/ibPHftV6EKl/d/r7ytpfhgx26x3TxRbJfLyznEhJyNpA6EYOeByOauz+G4nv7hImjhQ3LwwI7nLkHoOD6gZJH1qCx1XXLu0WSXwfqU0XmeZCIkkCrj5QudhyMKo7HjrV4ax4lzKD4W16NWmeZRAZI8bjnB+Q5H5UaCdJ3vYpp4a328cvmxBpY3kSIsd7BSwPbGflPU1DY6TaTefLP5nkwReYwRgC3zBQASDjlh2qyl74iSS0c+EdVYwRPHzFJ824uc/c/2/0qvZv4gtmlEnhLVZIpkMciCF1JGQeDsODkA9KV0X7FWegt9pdtbFBCXMM0aypvPzAHsfoQae+kadLYPcQC4iEcixkyuGD5z0wBgjGcc026k166Yn/hEdWRURY4VETnYB6/Jz39OT+FTXV3rMqwfZ/BuswmDHlqwZkHqceUCSe5zRcHRjZaGhF4cs7TVrArHcx/6fHEvnMMTLu++uAOOnr1HNel/2Laej/wDfVeWQalrn2+3dPCGrRqb2O5lMiu3IPb5BtHJ657c8V3H/AAlWq/8AQr3v5t/8RQ32IWFpyXvxu/68zWn0i1SCRwHyqkj5vauc8S2sf/CNWr5bJuB3/wB6rFx4p1Q28oPhi9AKEZy3p/u1zmveItQl8O20beH7qNRODvJOD97j7taYOpKONhroN4OgldQX9fM9FNhFj7z/AJ002UI/if8AOufbxXqg/wCZYvfzb/4moX8WamM58NXg9yT/APE1PtJ9w+pYf+Rf18y74k1Sw8M6NLqV2J3jVgiqnJZjnA6cdK8Ov/iXrk9+1zGsCx5wsBBK7fQ89feq/jrx3qHinUmhWOSHToGxHAGzuYdWbjk1xrXDd4WodSfcPqWH/kX9fM9Bh+LN0yZh09Fcdcvxn86WP4rap5wM9lC0XcIxDV5wJWV+Ijg08ztjJiIHrS9pPuH1LD/yI9/0vxVHf2Ud5bSbo3H3T1U9wa2ri5jvNMjlBOS4yM9ODXg3hnWDp90Y5GIt58Z/2T2NeoWl+REqZ43A0lUne1zSlg8OppqC/r5noot0I6t+dBt09W/Okhl3oGHcZqbPAp+0n3I+pYf+Rf18yrNAoiYgn86QQLsHXpUs/wDqmpF+4v0FU6k+TfqcyweH+stci+Ffm/MhMK+9MMS+pqdqjas/az7nV9Sw38i/r5mVqMYF3Y8nmT+oq8Yx71T1H/j8seP+Wn9RV81lCpPnlr2/I1ng8O4RXIuv5+pEUA7mmlR6mpG6UwnNa+1n3M/qWG/kX9fMZjDECkJ460v8RpvvRVd2n5IWCiowlFbKUvzEzTTnFKcfjSE1mdhzl9/yOVj/ANe7f+zVvgYUfSsC/wD+Rzsf+vdv/Zq3wflH0pdTlX+8v/CvzY1s4P0oob7p+lFaRPRo7M5HSv8AkYZP+uP+FdB0bFYGk/8AIxP/ANcf8K6Bxg5p1d4/4Y/+knLH4pepKpqZTVdDxUhkWMZd1UdMk4rNK+xdiaF/+J3bf9cz/WuqgbpXFxXcA1m3JnjwEPO8e9dRb39oMZuof+/gopQneWj37Py8iqkZWWnQ09NP7lv941B4o/5Fq8+i/wDoQpmnX9msRBuoB83eQf41F4mv7R/Dl2q3UDMQvAkBP3hXVUhLmej+44MFCX1eGnT/ADNjSf8AkDWP/XvH/wCgirlZelahZDSLINd24IgQEGQcfKPern9o2P8Az+W//f1f8ajkn2f3M6uWXYs0tVf7Rsf+f23/AO/q/wCNH9o2P/P7b/8Af1f8aXJPs/uYcsuxaFLVX+0bH/n9t/8Av6v+NH9o2P8Az+2//f1f8aOSfZ/cw5ZdjE8c/wDIEg/6+V/k1b99/wAeUn4fzrmPG19aSaNCsd1A5+0qcLID2at291GyazkAvLc9Okq+v1rscJexp6dX+hPLK70LsP8Ax7Rf7g/lStxg+lVYdRsRbxj7Zb/dH/LVfT60rajZEf8AH5b/APf1f8a5XCV9n9zHyy7HzV4mtPJ8d6+gGF+1uR/30azvI56V0ni8IfGWpToyssszcqcg8mskKAahpp6i23Kgt6d9mq8qr1qVYgRwKAM025qhq0ITTZSw4BXP/fQro/I46VQ1e3VtMmDDI4/mKAuYL7pLpCblRblsxbXHyjtgfw9hVpZAOUmxKYWUsZgxzzjLDFXIdJtDaxMYuSgP3j6fWp4dIszDcEw9F4+Y+/vWTpnU8a43dt33f9ehzFudt1IJHG51dN5bPJBHX+tJcr+6ghDIzRoSxDDA5JwD3/CtSW0skbb5ZLeikk1A1pD2tW/Fj/jVcutzNVvc5bf1uZ+5YtPKqwLyv8wB6KvT8yf0q7Zbo4JI3kh2lQVEZUsee+OcfWkNrCOtuw/E1Z0yOwjuWaaFmXYRgE9cj3pxjqjKtXapysr6W/r8zOs5JFsXWG4SKUyqfmcKSMHPJpxkt5JQI5hEn2qRgQQCBgYPtkjrQbSD/nn/AOPGoRZx5Pyjr6mqjTTjN37fmdCxLSUbbf18i20sZliYSr9o8l1DtKHKtnjLDjpnmm20wSzuftUyNKZUw5YOAdpwTjqBxUMVrCHO5OMf3jUiW0H9nyIY/wB4WGDuOMce9c0oW09DaeKfJGaWt3p9y1/zNfwZvZ9SVm3vlSSGznk9+9bw+6/1NV/htNoem3uoHWLOe4jdF8sQscggnOfmFdSdQ8JESY0u85Jx8x/+Lrfl0ucrq2g427/1+COdQ/IKmgb97W/9p8LQYjn0i/SQAEq2QeeRxv8ATFWEvPCVrctHcaNqCOhwyNkFT6Eb6hRClW/dRTXS2/e39fqexxn5RVfVf+QPff8AXvJ/6Ca5JPifoqgf6LqH/ftP/i6ZqnxH0o2F1bvZalHJJAcCSJR95cg/e6EEH6VoZHR+Ff8AkWbP6N/6Ga2a820H4jaRZaJbW8ltfF0DZKxpj7xP96tL/haGif8APrqH/ftP/i6AO2zSE1xB+KGi/wDPrqH/AH7T/wCLq3pfjvTdXuWgt4LtWVC5MiKBjIHZj60DOs3Dyz9aiL1nHWYPJb5JOvoP8a5m51GW8124hjvbmCNUDAJ9B2yPWolLlCx2hf3ppk965Dy5/wDoL3v/AHyf/iqryvKhw2rXw+qn/wCKqeeX8o7HY3Mv+jS/7h/lXK+JX/4pi0H/AE8D/wBmqq4naJsatdHKnsf/AIqsW4upZtMltXnkkMVwm0ufTfWuDnbHU3JWFJe6z0iWbAJrA8S6oNP8PajdZwY7d8H3IwP1NOm1VCDgNXCfES8nvNA+x27hPOkG8s2MqOcfmBSCx4+CSdxOd3J+pqGcYPQ/hWtHod2UGZIM/wC8f8Kf/YFyVw0kP/fR/wAKYHNyTtkALgZp29zC+/HtVy9iFnI9uVUzKwyQARgjNU5p5DEy+XEAfRBmkA9ZlCqCewrv/CmqvdwxrISSh27j/FXnCgkKOmfwrutDfyZoYgQQqjkdO1HUun8aPd7KTMSj2q+DWHp0gMMfPYVsI2etNmaFn/1TUi/6tfpRMf3TUi/6tfpTfwfM54/70/8ACvzY1qYae1MNZnWZmpf8flj/ANdP6irx6VR1H/j8sf8Arp/UVePWsofFL+uhrL4Y/wBdRhpn0p7fpTO9aGYz+LikPSlPDGk78HitKm69Ec2E+GX+KX5iU00pOaTtWZ1HO3//ACOdj/17t/7NW8PuisC//wCRzsf+vc/+zVvj7o57Uupyr/eX/hX5sa2Np+lFD/dP0orSJ6NHZnJaT/yMb/8AXH/Cuik6Gub0o/8AFRSf9cf8K0tb1T+ytNa68rzcMBt3bep9cGnUV3Ff3Y/+knNBNykl3ZeR6r6qwNmv/XQfyNcp/wAJpMI/N/sltpbaD53f/vmq1943eW3AbTWRVYMSZf8A7GtMMnGtFs6acHGabO2FjZDVYFMQ2lDn5j710NrpulsQDCuf99v8a82i8fxefBdnRsxY2DNz94kH/Zq4/jyUOGh0SUKeg88n/wBkqIVMQm7ye/cc3V016dz0yw0TTXjObYH5v77f41H4i0TTofD11JHbAOAuDvb+8PeuJsvihc2oMT6BIWznmcg/+gUa58UZbjQ7mJ9BeJGC5kNwSB8wP9yuidWs5O0n95x4aGJjSjGV7pa6no2l6Bpj6VZs1qCzQISd7ddo96uf8I9pP/PoP++2/wAa8wtfjG1lptsj6BMI0iVRI0xCnA/3Kt2/xjkul3QaKZB/s3Of/ZKl1ay3k/vN2qydm2vmeif8I7pP/PoP++2/xpf+Ed0n/n0H/fbf41wQ+LN4OvhuQ/8Abwf/AIinf8LbuO/hmX/wJP8A8RS9tW/mf3ivV7/j/wAE7v8A4R3Sf+fQf99t/jXM+OLO30fRoJ9PiEU73Kx5GWyNrHGDnuBWFcfGaSB0jPhe4d3DMFS4ycKMk/c6Ac/hWBq/xotdQlsRPoc8McF4krsJg5AUEHAwMnnPXtWdWvW5HaTv6ibrWvd/eReIRrEOj28k5kjlkuvKWFoAGPy5B6Z5z6VblfXPMNsY5fPIyIvs/wAxHqBtzWAPiPpVvBYI6T3Jiu5pHOGwqtHtVh0JOTkgEdOD3pJ/iLGsUEa2FtFA8UioWE5ikBZcqSfm6jPy9D9an6xW5Yrmel+rJ5qvd/ebt3f6hp+mrNNLJ9oM6262q24MjMVyABjOfaqlrJr2sTpFJKbMZJMbBVYAcksQOMAEmsyx8f6Bpwu4orW9JuJwwnPzPCDGASmewOVxnO09c1S034h2ttqDC4sZRA8ckTSI27AZSoYAgdyD9Kj2tZ3fPL72Pmq9394niEXdlNJNPMlzDLLvilgHyty6nqAQQQQQR/Sob6D7Dp8NzNqFuZZokmS1VZPM2t052bPf71VNU8TWV5YwabFG52l5WmIIG9nZtqjGSMbRk45z9as/2u0OkXVhqV7e3CPaolvaSRHED5Vgw3cLhc8r1B9DU1U5S5nJ39RSjKTuP1JBpKIJdStpLkpG5tolk3qHUOMkoF6EdGPWorW8vbnyxb29zL5r+XHsi3b34O0YHJ5HHvTZtZjl0K5sr7Ub3UGbyxaq6E/Z9p5ILfd+X5cLwc+1S+HfENlpGnXiOLv7SrCazYIDtkKPGe/HDhvqgrPkaju7+pPs5D4bnUbh4kgtrqR5siNUhyXx1xgc4qvdf2peCewhsbyW6UfPAkBLrgjOVAyK1r7xRo9xcXkFqLuC2msjBFJ5Y+Rmm85gQDnHJTI7YqrqniHTL7SbqwSa7ikaO0X7U8f+s8oMCGxk4+YY/wBxc47CT8/vYvZyK2li/vtkENtdOIgqzMkG7yh0y2On41ae11E3WpW9vHcSWtpI8U9ysHygKSMnGcdM1d/4STTNTvIRAdSjlGpLdKUiy8/yooOBn58qSB6ueatL4z0y4u4WttPLzwXs1xEJUlOQxDBgsbgE4HIYdB1xmlr5/eJ05NbHOC38uza6j07U5rULvNwsJEe3JGd3pkEZ9QaTTRaapNKgR7eKGMyzTyyEJGgIGTgEnkgYAJyRV+HxRo6yaYrR3ohttOuLZ0H3Q0nncY753rz9PSs7S9T0OC21G0mt7uNLyAR78g4KyK4zjnHy471XLo9X95Xs5CahBZafe/ZZppFJVZElA3xujAFWB64IPpn1q9aaTawxalqHm22rRWdmsoggaQKWaQL8/CthQSTtPYc1U1bVNAvJYQlrctHa20cCFjtL7RzgfUnr2/Kll1iygWJrPV9RikgKpbeUX/cr82SCWG3r90ZHXmqjG1nd/eKVN8pc8NaJpupXMdxd2WmCwvL1bRAZLverlQSkQUE5wQcvke9V9cl02y8IadYPp0TmO8vIkuN8m9Srpk/f25IwOQeOmDzU1r4gXzAr69q88k8haXypJAGUKcbiXGTnHGOMdav6HZx31vqNv5t4bOafc8DM21267mXdgnpyc9K1iouEry/HzNI0Jz+FEPhi4Nvp3h6PSopBHf6tJDfKvImA8rEcnqu1mODxyx7VgaLq+naB4hfULS3uJYLd5Qghl2vtKsoZW5xgHOfau1k0f+zbCWOxkubSOchZhFuRZFweGAbn8aLa3gtru3NneyQ/Z93liIlTGW67cNxnjOOtc0nC9ubt1N54KsqcXbq+3+YeH7qEa1HqZvtS83UdKLwTajK081tiRlGW6lTtbDAD73TitHxFGvmWzifzp2tFa4lKlS75b5iDzyu05PJ4J61EjLDqDXyahcLfMMNPk+YR/vbs1A9v+9l865mleVtxkaPJOfXmtbQ5X734mcsHWs/d/Ff5nQataQ3BvQbXypILOCZbnc2WJEYwR93BycYGeKtPpsd14kvWuYrZoZr8wq0jSh855CBAecEcsMVgb7maAW0moXc0anOxwSAfpu9Kt2yaixmaC9vlMhzKV3DefU/NzRFRk9JfiKOFrcik1p6r/MbdWkWn2EYFslzJM0qmZ2YeXtYqAoUgZ43HOeoqXxVapHpTXSqJpJbWBHfJAt8QJgY7luuTxjpznCLZajFHKkd1eIk2fMVQwD+ueeap31jeiyui1xc4eHY+VPzKBwDz0GBj6CtPYvv+Ivq8/wCmv8w8MS3lt4WtJLBiJ5L7y5Ng5ZcfKp9iS3HfFQ35a2167+wuVSK4fyTH1ChjjH4VFokF5a6ahtru5h3qQ3lAruGTwcHmniG7gCCK7uIxG5dNgI2scAkc8Hgc+1J0G+v4jWHm/wDh1/maWqahO8WlajD9rQ4kXzXuN07AEZO/aOMNgHHHPpW1pF40uuWt3tkjE2mEgzSb5XxLjc7YGTxwcdAK46Z9Ua7W7Oo3huVGFmJbeB/vZzT7G7vbPUZL25e4u5ZI9haQnceQepz6VMqF01d/eP6tP+mv8z1cXWbCVt3Rv8K5y3uv+KlumB6xD/2WqEOuSnw9dTNbvlZQMbuv3fasrStYmuNelMdnJI5j5RTzgY9q554Xb3n/AOBB9Xl/TR1Gu+Lbfw1Db+Yks91dNtht4VBdsdTz6cVjav49tE1G00+4tbmSK6AxPtAVWI5Xr1Hem6p58+qR6pJoU08tpCRboSwIckZOcegrnvEN5q3im1jt7fQ5tPa3YPCzpubefvHgCr+pf3n/AOBC+rT/AKa/zOtTUoIt0QV8fX/69c1qd7ELa5UZ5ulIH/fVQX93NZzQLLbPGjEIC5xk/lXP6tqIUyrxzKD1+v8AjVYPDcuYUld/eKeHkov/ADPSJ9WtSp3B8fX/AOvXn/jrUdHkgt/tlpcSgycbWxg4P+0KWXXFPp7fPXMeJZ4r+GAGVVKSZzuz2qfqf95/+BFfVp/01/mRpe+GkUD+z7of8D/+yqWLUPDn22BEsLkOXXB39Dn/AHq5uSJADi5QZ/z61JaQxnUrc/aUz5ifzHvVfU/7z/8AAifq0/6a/wAzotVvPDy6lKkthctJkZIfg8D/AGqzLu78PNbuIrC6WTsSTjr/AL1Qa1bxtrM3+kIDlcD8B71n3EZCyOXJ8vAwO5oeEt73M/8AwIiVGUVzPb1NqO58NrDGJNPui+wEkP3x/vVvaNf6LcTBbe0uFdRjLNnj/vquEDggEjJxXSeCVEms7DwCpOPXkUlho3vzP72FNe+j3rTJoZLWJ0RgCoxmtiI8cVhabGIoFTstbMJ4GKHhY/zS/wDAmZJE8ufKJPSgZ2DnjFDnMLUD7ij2pvCx5Pilv/MzCK/2lr+6vzYhFNNONNNYU48lZxTbVlu79WdS0Zmaj/x+WP8A10/qKvHpiqOoj/TLH/rp/UVeNbQ+KX9dDaXwx/rqRtzTT7089eaYeK0IIyPmNIenFKfvGkNaVN16I5cJ8Mv8UvzEpp9qd07U39azOs5y+/5HOx/69z/7NW+OMc9qwL7/AJHOx/692/8AZq3x90etLqcq/wB5f+FfmxrfdP0oofO059KK0iejR2ZxumnHiCT/AK4/4VL4lMcmmqk2PLM0YbPAxmq1i23XpT/0x/wpviEJc2HkOWCs4zg88Vo/jh6R/wDSTLDaV/mYeMpCLqNY8yt8oUDPyjHHHeq9xHvAX7LMzlSD/oy5HoQmSD3FST6VZoxAlm/76H+FR6i+g2EC2lvNcXV3jc+1l2j9K0o1OeoopHbDFKUkuXt2v+Q6NbO6tzbOsZMLhVUoFXftzyOgPPT14p1rqIeZra1hknlRXUvt2oHAOAT9eKytBFpqbPPqNnczorE+VBIqgL7Ajmuyv5/C6eEbjUNJmvWu48Rx2koVWDHpxt5HuK51iYSk7bozqVm3CSjt/wAHyOc1C3uWhgWS6aFkRmkiiOG25z168c1LNFpj6UZzErFYFCzyfNyMcAnvnOfzrKvtDV9TWRnlYGMEknvk+1TPotulm7bpQRjuPX6VUq8VNozw1S9Lm5dGlv5aLp9/ysdLBfQ2+nxGUmNVRRllIzx29fwqsJNIuJGu1RBLAd5kVNpPbn1HtVW10OzaCMmSYZUfxD0+lW10Gxx/rpv++l/wqViIpp9jRSalGSV7K2v/AA3f/gjX1GayZ4lzelIt6qBiRiW7dj8p+vFWIdatpi6IrmdApeEDLLkentxn0qM+HrBuPOmIPHLL/hWVqngW0jmxp16ySAZdWORu9AQKv6zBlyq9of8AA/Df+rGyk8F14gtYt6oBBeLI5GdgMDcnHbr+VUxpemK9+Z4IVSz8tUF0ZHWUMT+9Pl5OCAMYwPmHJ707fw9I0sCX9xc6dIAytcLysisMNtxgjIyCD1zW1aeFrWO6Q2viXUEMMZWJkXZsTOcA7+OTmuariIrmk9LEzqWSlyaGQukaNqVxJDZophhmWaWXawxbFSXPzAHCFeuATuFT6fb2Op2OiwzaUpS9mnRX3N/o6GT+HnHyg5O7OQPxqiLeKNNVmhnvJXuR9lF1O/LqSC5298lcZJ6Z471bg8PXcds1umt3UNuwKmCNyFIPUEZwfyrWV1CM29He3y3M1K7+Ehi06w8mC2bTVYy6dJdteZPyMoc4x93blQpyM5PXtT5tHto/D93LNZWUN3awwSkRmQv87KPnyNnIfPynIxViPws4sjZjXLlbZjkwhvkJ9SucVJJ4anktRav4gu2twuwRM5KBcg4xuxjIBx7Co51e9wtq3ylK9i0nTNR1uSDw/Gy6deCKOJpHPmAuQXPPQYwMY+8M5q3retW0HipbSfSIlj/0dXLgZQGNMg/QHH4Vz8Gm30Ot6jcWmp3Rvo5gGkViHdWySxOckZAz9auzaObi7uHlvJridZdjbYfNbaAAOrcDt+FTUqKE7N/1ocuIxEVJxa2/r9TY1fxBK1hqRvNLQmzvlt7dZFHAIk3BfQAKvA9R61L4c8QwXDWll5MVoXuCLmHySRPGdvdRjgZzuIAHNYMlpfXNssj6jd7bXcYHlUsEAPAVt3yngcDvSXlve21rPDb6rcGCRgblAxALED7wzyD61HtIv3SViIuoall4kuItOup5bRV0pd8cMAQYuHOcAL0wMglu3HcisnTLs6/LcaYunQrcTQMbciMD51w36hWH407TxqwsIILPWLyOBZWWSOORlRFODkgNjB+b9aw47LUG1GP+zJp0laRhBLHlDx1IIPGB+lWpx11B1o3s+m/9fkegW5srHWdIntbFFt7vUIYbXKjcqqSshz1zu2n8a5m1i0zV2s7s6aIBPcXEDxLI2CEiV1bk8H5ucYBwOBWVJHfWd1aWrXF/FFb/AOpkKOjJlgWdFJ45HbngV0E3hvV4/EtrpkcjWenfaZPsN0If3c3ytyhB/eFkVe5ySAaIu63Gq0Z200IY0s9Yl8OWM1lFGh01pmmVpdzBDMdhwW4Zl5wucnjsKwtaXTM2Z00QyyyhlljtVmMYYHjb5oDEkHkc8j3rt20S8s5tOsba8vUe5sbm4jsprJYmE0ecAQB2CucnpyfTmuU0qPUtc8QGHU7q5+2tBcRK1wCWimCuBF8x+Qk4AHHJHFWnZhKpFqxo3rSpDdaXNC8emRaLHOsTrhVlKId+OzeaSuevUVU8JRy6Xo2p38sV5Zw4iAu7eAmUBiSNvI+UgcncP4eucVFrulazZaLp+kzS3reVZvdXNptfFuQzEblzwcc8gYBFEMeoaZ4bk1hdTuIbm9mEEEiOQ5gjCAtuBzjJVQP9gjtSuthSqx6m5coujvqV+Le/t57nU0hD6fiNyhTIO4r3JOVAGSMcYqKw1i50C31uC6t2vrZNQa3F7HKqOko3YI6kggE4xjin3ml3+mHWNTGq3cNndJbrZXSAqLss6EKrB/mwm/Ppgg9ag8KLfy+A7w6ZpceoXX9rxDy3tlmITynydjZH1bqAc5HWrpScbruOVRTskNk8Q6w2mJfObo2TzNEsglGC6gEjGcjhhz05qpceJDqUTR3lo90zHOXIY11DadbTtZWOjywGxTXr5ULr5yYEcZCgH7/ooP3uPWi90SBLuxuotPWTU5NNneK0ubBLbzZ0kwu+3Viu4IWwv8RVeOeYajLVrsEqk5QUW9Fsefy6fcTwS6hZ288dtDKsUnzBiruGKjGc8hG9uPpWwniW6n1eERSPj5E+zu2S5z2966TSLOS803V49es4ra7F9ZNHZmEW8XneTcbEkUY2qeD25wCQCSOd8LzXcfxEBvbeKyuPMkTa6CMxTFHEfGAFw5X0Aq1U0sZ2crryNXUNWvrXVDbXVhc21ywBEMpZGIx1AIzU0t3qlvZC9uNKvUs8488hghOcY3bcdazvENrq9tpuhWcsM/8AakMt1MYNhMiQYQjI6gZWZvoSelamjHV10PUbi501LW2nsZGTU5FkxggERrlthLEbRxuBbPas2oXWhtQU40uVPSy7DI9fRrN7ldEuZreNgjzNcvsVj0BKgAE4PFaNxJZSQSA2zxFlI3LcMQPzzSXL2g8C6hBa6rZXFtb/AGZtgWVXaU7y5OUA3E8DnGEGT6517f25t5REWGVOAV56dDVOcV0KUJy+018kPg+yxQRoLsDrgu2AefWn3Fv5JXzZTHvGVJJAb6etcxNcym2QRqzAZ/h96hW+vmtjblZDHnIBB+U+1HNB9F95PLVX2n9y/wAjpmtkPP2n8c1EbFDz9p6+/wD9esGO4vgoK7sehU1Kl3dDkq2PTFTzQ7L7zRKp/O/uX+R2FvaoPCV7F5uVaYEv2H3aqeHdRg8N64t9LKjxrG6EFtudw9frVa21WVfBGoExkuLhcKRyRlO1Yi29tqtnqF07JbPHNAoefcAoZXyMAHOSo7Hp9aidSEbO23mTJSs7zf3L/I7y9+JE1xJ5VlLYRAnALnef51Xn8Y6tp96Yrm5sMgAmMxbTgj6/jXnQ0lv7QksnuLeG5SXylSQt87ZxwQCPxJApt9ZpFotvfG4X7TI8iNAyvu+UgcfLjjOTk+mKPrC5krLX1MHCS+2/uR6Bq2uWHiGCGMvAk4nSTKyZAI4xjtXNapZWst3JvvYo8Nxuxz+tY2mWVtLDZPefaA97cmCIxYAjxtG9gQcjLdBjoearW9jHceIRZ3rFIQXDMJAnIUnG5hgcjGTWlCtCOKjUt8N/6QPn5WuZ/cjYOk2bDi/h/If41TudFsyOdUgTHfA/xqnNZwW2qwxXFleQ2zqCFW4SVnzkZWQLtI/A9DVTVLP7Lql5aRBzHDO8aluSQGIGfypxq029F5/1qP8AeS05n9yGatpo0+6WEzh8qHztx6+/tUNko/tK2O8f61P5itnUfC+oSzBo/KYCaO3ABOWLdGHH3fmXJ/2hTR4cEtrpklvLbrcPHK7KWfM5SR8leMD5VGM7c+5qJ16aloyIN8quynq6j/hIJfnH30/kKgu2/d3CZ/iU/pWv/wAI/Hcy3MtzPDa7LFLmJh5hDEyqmXwrepGB6r71nyaHdC1klEsG8xef9ny3mGMfxDjHTJxnOOcVSrws1fqbqX7tx8/0M5cFR8w6V0vhZxb3TT8DYhwScVjabp8Elrd316lw0FvsURwkKzsxOOSDgYVj0PapbiwGn6xLApkkh2K6MRglGVWGffBFCqLn5bkwupI9b8LeMrjUoJESxa5eM8mNu35V08fiO8Vwv9i3G49tx/8Aia8g0PSreKWyuIWe3S6k8oxvcxzkggYPyAbTzjaea07PVLZZ9O1H7DfR25vfKYEb3XaVORgDrnp2IqVXjJpXM0mesx67fS/I2i3KKerEnA/8dp0mu368DQrpgOAwJ5/8drO0nxlpd/MbRLxWY/cLKy5+uQK6qO7tmiA+0RdP74rfmXJv1MYwl9af+FfmzFi1u+lmVG0W5jB6sSeP/HajfXb5ZGUaHcsASAQTz/47W011bZ/4+Iv++xTftVv/AM/EX/fYriTX1h6/ZX5s6VCXMcxeaxeyXNqzaPcIUfIUk/N09qtNrt//ANAO6/M//E1dvpY5byy8uRHxJztbOORWia0h8Utf6sazTUY6/wBXOeOu35/5gdz+Z/8AiaYddvv+gJc/mf8A4mugYUw1pqRZ9zEbV7wQiT+ybgsxwU5yP0qE65fd9EufzP8A8TW7/EaQ/rWlTdeiOXCJ8stftS/Mwv7bvv8AoC3P5n/4mk/tu+/6Atz+Z/8Aia28g0hOOazsdVn3OKn1O5m8X2RfTZoyIWGDn0b2rYk1u7jDZ0ifamcvk4wO/wB2or//AJHKxP8A07t/7NWtf/8AIMuf+uLf+gml1OVJ/WXr9lfmxtlefbrCO42bd4Py5zjBI/pRVfQv+QFb/Rv/AEI0VrDY9Kh8Jyts4XXJie0X+FVdZugI/vcBsntVa81GLTr67uJX2qkQH1zjgVy09/c6pcF5iUg/giz+prR/HD/DH/0kjCq9f5jr3VZr52jtTti6NL6/SoLa0RHOB1XknqadGoUAAAAdAKWQzgxpbBmlkcIqqMliegArOhNRqxk9rm1FKElJljSmk0+cNG3ynIIx1zV2HPzgnOT1xWf9g1dbtbYCJpSrMdk8TBAvXcwOFx33EVLHYa4000YRQYFV5HaSNUCt91txO0g+ucVj9TXNzaX/AK8jRStszfRm1PVI7aAxRSNEzAO/GFVmPb0BrMuNTj+yOMox4/j962dNutdttPaSXUBBZQaZLI0KvF8zuxUM6dTlW+ViPTB9cyz1P7T4YnW41CSHSYrZIHjQxuvnNkgiEgfPlWbduzgcHtWksPHmvZHLTlOMFG5f0a4/tGe2sofJEsi4UtJgcDPYH0pF1WIpuKIF/vGTioLzW9StNftEv9Y2m3nwsazK8KIUPzomP3YwRhfes3XpftcFhfpq0kmlb2t441tVRrfGCQE3YY8gk5BPftS+rw7Iv2k+52DyyW2lC+TTIymwSGSS5DMqE4D+WMMFJ4DEYPFUItU1DVMx209lCY8Es88UROfeQjJ+lZmrXrWtqmqW+rNKL/TxbiGW3SNwiBI1+UM3H7s88fd6mq0Gm6xZw6dqOnNayvLaNNILr7PIuQ7htqOCCAqqTgEjPan9Wj2QlOfc247LWNRkubZkLyxP5MvnTJHsc5AUbsAscHCjk4rM0+SawvpBcXDzRKpVoGIUqc+uM1Lqz39/IILbU0Z55U1CSS4kjjBZxgsGIAwD0HXmq0M2vP4xvr/iHbdObh5vLiUCQnIy2FyQW4HXtWU8JFxastRzqTlFK4+/1e1axjSKFAquMASdOD7VZm1mNrlUVAqkdpf/AK1Lq0t3/wAI9bNbXSzNHYPbP/q1VYhMQSDgcEo3OckvgdcU2LV7nUjdXGqarE+lWk8U0CLEknlpuwEVONgIIBXgHGcHFdUqadOMLLS/42J5533HjUk/vf8AkSrU0jQ2FteM6+XcM6oBKc5XGc8f7Qqje6zq9vqtrFfatFe3KymW0nKK2EeMGM8g4XJQhegwcVsXl5qunDTf7T1SWe7i81RPLbLgM2MssrAliMYBI9OmKz9hHXRA5z7nHhbW71O6kkkYEnosg/wqb7JY/wDPaT/v6P8ACuoh1W5tPFztFqLTSXWmruuJAnmS/vCwyR/ENowc5wo9K1NYW0uNQS48uGaSSCF5iACWfYu7J9cg5981VRJyv/WxMqjbOSvfDL6fbLcXSSJGWCn/AEhGKMRkBgOVJGSA2DxTLHw8NSB+yOHbdtEbXcSO59FVsFj9Aa9D1UWhTVZlmtphqF0skIVlLY3M25h/CRnHOOpqvocDWepLdJJp0SxyBJHlaEsoBBJXOSfYpnpUciI52cEvhppdPN8JEjt8uoaa9ijLFQCwCsQSRkdB3qlJ4ftX0Q33244IzsIB/ix1r1LTmTYY7mSyfSvMlYiYx+dz3A+/uOB049a3I1T/AIVE0QUZAORgY5npqKE5M8ftfBUFzDBuv3USqCAEGemcCrF38N7dBF5N9cMJI8gFRndk5/pXvsBWTwpYtydlvGeTyDtA4pbogT6XIc4+UZzz2rRxShfzOec5czV+i/M+fofhkLyyuGS7uHuLdwrARg8HHB96hg+F8txEsqXMxhb/AJa+UNoHrmvp1Nscz/3pSTlT1wMc/lTNqGBrcAZAGR/D+VTZG1z5wm+Et3JO5tzeNH/CTCDkY60aP8MoLxpEe9nYqSNsaqCSO3519KFtkeBnAHqa5JbK1t9UNxFEFklJLNk89fypxinJIyxFRwpSkuiPGE+EOqXE0iL9pZ4yA6+V93PSqFj8MLy+a7aNphFatiR9g9/8DX07G48yZ9oHzdeecVyfh9l8/wAQkqBmbkH3L1tSpxdKo2tkvzNeZ3R4vZ/DOO9gaeDUWkiVghdduAxGcflV+L4TyszI1zcBwGO4Acbc5yPwNes39lZaf4eiS0tIoA9wGbYgAJwatTTbL2KTCgmBznHXO6vPm2n80dFZWowkurf6Hkdl8KrW70prmWS8UhwgmR12591K/TvTp/hJLpBEst+WiLgFgoFd/Yata2ujFJriKJjPnaxA7CuQ8W+LlvruPTYZA8YnU706HntWskrGEptRfo/yZzV54V02O7ZZNZWM9lfbnp9aYfDulKg3awijsSy/N9KyvET3UuoSICzqpGNxzjgetVruOU21p93cE7/QVi4q8f66GuFm3h7vsjTl8P6cchNehx7lf8adLp0TRuR4htiQpO0KvPt96udaKX+Nhj2qaMhUO0rnHpWnJF7oXPLua8GmIYEY69bpnPyFVyOfrTjpkGc/8JHbD2Kr/jWKZdqAsAR7Cq84DruRuD2o5Idg55dzp4tOt88+IrVv+Ar/APFVZi0u0Y8a9asO4wP/AIquMgfa2D39KtENGVlQ7SOpH9aXs4dkNVJ9zuYtCim8NXsVvq8UkrTja6qDt+7xjPP/ANeuFvHu7e31DTyGkdriNjLjH+rDr0x33evGK7Hwq6totwwOB9q/9lWuO1O7c6pe7scTuAcf7Rp+yh2Ic5PdksXjGa2vZ5/sk6NJcicCG48vPAGxyFyy8dOOprLvNb+12Jgkt5FlWaSWKRZBhQ5BIZdvPTggjrVOdvmznk1WJLE1Cw9OLukQlY29J14wC1gntGuXguPNt2Em3ax28MMHcMqDgYPXnmobm6jXUSLu3uJOSZVEoQtkcY+U7cZ75z7VQsG/4mNsP+mqfzFXtVXzPEFwufT/ANBFbYWjCWMjDa43flbHvrkc11aBrSYWVqhVYVmHmHktkuUIzk/3en50zU9UXUNTmureCWATOZGR5BJhiSTghRxz0x+NaWlxWUPyyxK+04Ykcisa7QRanKiDChjjArNUoJ3SHqtbnU3XiS7s5oAbFv3dmYsEkZfjbJ07bE4/2etYNlr80V3pQ8gsbRHixu/1m9mOenGN/wClbOujdexqT/yyH8zWAiRpqdsB185P5iieHpRk0l/X9MzpLmgmXp9faK+NtcWkjRG0W0kRZAjH5xIGBKnHIHGDTbrxTcvp5sna+Vkh8hRHdlIiuMDdHt5OOOCAfSqOsf8AIxS/76fyFUr1c3Mhz6fyqvq9Nq9uptyp0+bz/Qt6dq/2aGe2uYHuLacLuVHCMGU5BDYPqRyD1q1Ffy6pqk00ltJuYrsRG+VI1GNvTJ4CjOR0PXPGREEA5Na+hf8AH24H/PM/zFCpR5uawQ+Jam5aR/arWO2tNPulsxOJZGL73kYAgAMFAAGT2z+Va+r3F2dMkeO3vIhbqZFkmZpGDYHJYjtgVn6FqJt7YJJKscUY6k4AqHXvFCX1hLZWm7bJw8zcAjuAKXsad07EK+5T8G3k0/ivTo5n3RtLhhgDIwa+ioNNsSBmAc/7R/xr538EWw/4SaynbOxWJU+pwa+iLa4DxqQea19nDk2W5zqpP601d/CvzZJLpFkDkQDH+8f8ai/sqy/54D/vo/41o7g8XuKZiuNQj9Yat9lfmzpVSfNuUV020jdXWHDKcg7j1/OrBqRqibiuhRS2RTbe5GTxUbVI3eoiRTAZ/FSGlP3uaQnpV1N16I5cJ8Mv8UvzG/hScdxSmkbmoOo52/8A+Rysf+vc/wDs9a1//wAgy5/64t/6CayL7P8AwmVj/wBe5/8AZq17/wD5Blz/ANcX/wDQTS6nKv8AeZf4V+bKmhf8gK3x6N/6EaKNC/5AUAx2b/0I0VcNj0MP8J4dez6helJvtAaSZNzeZtUDDY44AHQVWji1XzHUMoKAFiSoAB6HPTFWoLi38lDJGW8pShyoYZLZBwTz171Obi3uYrg7GRBHGpKqAeD6A4xUqV2teyN6NOm3dPXyfl/mUlh1LyJpGnRGiZVKttGcgnP6D659qltL3UtJ1Cy1CZVlSCdJDHkAnvg45XIzg057q3lSWNomVTs2Ecn5VIGefenNcQTOzCDMkzLuDjheQTjnn9KEkne/4A403pF/j6/8D8xYbyw06e5iSe+ktLy3aCV3hjWSE7lZcAOQ2CgzkrkE9Kmuddgm02exgaYI9vDbW7yIuW2OXZnAPGSxwBnHFV7pYTJPDb26l2lJO5OFAz7+/tTYpbWJY0eNZZY2LARRjaenBJPtWnMuXR/gQ4JT5b/1f/LX8DUn1axlW9mYXn22705LRkWNTEhXYNwbOSDs6Y4z3rL054E0+80y+a4FvO0c6zQxKzo6bgBsLDOQ7d+OKvTaFc3F2ip5nllfmZUAAP581Nc+GFS0DyT7RGuDhMliT1PPvVytzfF+DMKdnG7t95lajPb6lq8t5LLLBG8yARBFZlhAx1z94ADjGDzyKta/La3hhTS7m5+zQExxW0lssYiXudwdtzE8kkDP0wBauLCwtWhDq7MYlORGDke/PWp3ks4ZDG8DhlG0jyl6+vWs5TVtHv5M0jGDbUpWMrWS+pagHtsxWsUSQQpLgEIowM4zyep56k1tW99ZQW+lyk3BvbC0kgCBF8uRmaQgk5yAN/oc9OOpriazyvySHAIP7peffr/nFOle1jLRtHIGAA/1a8d89aamk9/wYKML3uOXULPzUM6kMtlFBHK1sk/lup+Y7HO0gjjJ5HpTtR1ex1Oa9jlkuILeSWOaJ4okdlKxhGVlyowcZBBGPTniCQw3EZWOCYqFwxW3U7ffOeKzLb7P5klttkeRiCNsQLZGe2fek6nuu7/BhKMdEnfQtXGt20nh220O4E3kQox82NRuWXzHZTjPzKVfBBxg8j3paReWkVpd2t48/wBnu1UeZFGCyMjAg7SwBHUYyOvtSXtuJbgxpbzrMWGIfI+bGPTOf0qJzAsiQvHJD5ahSGiAbOMkkZ9Sa0ck4xfN+DIdk3+BtwTW2q+J4HTzUSJYUt0KA5SNAAWOeD8oPGeSa7i/TTbqK6ykzG7nFxL5jHCkbunPP3j6cVyPh02CaxbyMJwu3adsCk8IR03V2n2jSnJDG7wDwBap/wDF1UJUmvenZ/4ZfoTUTUmo6r1K3hqz0LVPH1rbItxKLeE27pMnlrwGIIZXJPWvVD4Q0AShfsA+YEnM8n6fNXi+hXdpB4m1OdJbqKRSdhihUMOf94YrsU1qaaKSZNS1ciEAu20fKCcdPM55xTfsL61P/JZf5GVpPWx26+DvD5ZkNgABjgzyZ/nSDwjoDRsx08blJwPOfI/8erhf+EicPuGp6xk9TsH/AMXQPEDDIGp6wEPbYOf/ACJRbD/8/P8AyWX+QuWXb8Ud23hHw+Iw62Izxk+dJx/49UfiS1ttM8HXNpZoIoBjC7i3Vwep571xqatdyWzzLd641uh5cQ5VT7nfVDU9SvNS0ua2trjWrucgbYlh3Z+YZ4Dn+VFsP/z9/wDJZf5Byy7Hp1owHhm1jJAX7JEQv4LTpn3vpwJBKuMD04WvObLUrwW8FqJNcedIlV4BASwwBxjdnj6VfsJbvVLjy4L7Uo2jYBlmTaUJ9PmPp7U5OhyfxOvaX+RhOEuZu3RfmekmUtalw43gEFyOR60skwCpIrAAsNxA5NciuhayxZRq9/s6Yw2P50v9haw2V/ta/wAg+jf41HNhv+fv/ksv8jo9nPt+KOrupTHbSN7VhYBuICWx7fnWXd6JrAi2vrF8Nx7hv/iqqXGi6nCEjfU7vcRkOQcj9aqDw7mrVP8AyWX+RhioS9hO/byO72RRW8itP8zbuMetcro5ijl8Qfvek64yOvLVn6jpOrWtv+91m8jDHGXyP/Zq5O4ikiFwsersTKQXbOMnn3560vrGHp0ppTu3b7Mu9+qOhUptrT8Udp4p1uwt9Jhi+0JvVw23OOxrgdZ8ePeWbGzUosaiInPJ9cfnWHfWoDEyamkpz/FgkfrVT7BA1jKP7SgPzg71AwOnvXk1K8G/muj/AMjrr039Xp+r/NFK6leQ7y7ep3HFQRTq19bgsc+YvT6ip5dPtpX3y6tEgA+6cf41Z0/SrWS6ieLUrVwrr2yev+9XQ60Nv0f+Ry1IS9m/R/kytqkiJqs2TjJHX6Cpbu2SW3tnV+QvHOc9K0NR0SCTUpZP7QiUnGVYD0HvRJpluY4Qb+KPauASOD096ydaF4/5Pt6FYKD+rf8AbqMDywrYLEH1I4qGRIMksoDdiORXSJpVsw41WBvbA/xqq3hu1USu2oR5AJAA4/nWirw/pP8AyKdORjggRLmQYxwMVUkIOcMPqK6u28J2t7ZpL/aSqT1XA45+tQ3Pg23h5/tSMD3A/wAaHXhf/gP/ACF7OVjkF+/1GK04ZBsxlT6CtAeHLNTzqkOPoP8AGnx6HZwtn+1rfPuB/wDFUe3p/wBJ/wCQezkb/hSNBpEyk7d11n2+6tcDrYCapeAsAPPk6f7xr03w9pludCnKajEw+05yAPRfeuJ1DSLFr+6Y6zbAiZ/vY4O4/wC1Ve2glf8AR/5E8krnJSABRgk59RioM8mujl0ewkGDrlt9SB/8VUX/AAj9jjJ1y2A+g/8AiqX1in/Sf+QezkY9h/yErbH/AD1T+YrU1M+Xrdwcc/L1/wB0Voab4ctPtkLpq8EhV1bAA9f96na1pdqNTmk/tSAsSAU4yOB71rgq8Hj6dvyf+Qp05KLMia+YsHi2pIOvvUcsy3RVyhWQDDY6H3rVTQrAj/kNWx/Af/FVIuiWKjB1m2/DH/xVZfWIf0n/AJFezkT6yM6nHwf9SP5mqBtJG1C2cgbfNXp9a6y50W2uNViLanEuYwApA55PvSXOj2lleQo+qxbi6kIQB3+tXWrx9o1+j/yIw1P92mzktWgUa3KQBncv8hWVfw4ndvpXY6nptm2pyyHU4A2QdhxnoPesu/0y1NvJL/aUO/j5MDPX60lVXK/Xs/8AI7XGPsfn+hFPpE9tp6Xz2tsLdkDrvuI1kZd23IjJ3kZ7gYq7caDq+jTP9o01YnSTyJFimSRkc9FYISRnBxnr2zVyS80ybQvsF1dNcTm3CQpLaxj7M2/duWbdvx975MY5ro9aurHRfF2tXVo80t3NqayyRzKojTypd5AIOWywHUDAyOetedGtV5lG13r3117309TnSOFvNM1O0eCK5sYh50hiTFzGyq4xlWIJCMM8hsEVo6d4ble4nTVLVI4xZS3MLQTpKsu0HGGXKkZBBwa2WuNPuL23Ekpu7EXX2iS0Gl20AB2sFJMePMI3fxYB9Oa2odWFpLZTWscjS2lvcRq5sooVZ3wVPlodoA79T9aUqtZxSUdfn8hcpzFlpOrLqIjjg8h4UVmLXCRiIMDtBJICsf7pwfatuH/hIYrae4NzcwwwyPFIXuthDqMlcFgScHoOT2qW1ura3a7RYpYorqVLljJaxXRjkAYMoEh+ZTuyGyGHTnkmjq2rG5smikMrSm9luDI8aoGVlRRwpwD8p4HFaxqVW0nGyuu/z6mfIudvy8ja0y5vrmze6uNfvreASLEpV2cliCem4cADk+44NNurnWrS8ltZNVvTJE5jIE7nJBxxzWNo+rWP2RrC+eUJ54nR4ArEnBDKQSOoxz2x0NbK60s2qPqyRB5jcNJ5TDKDuOQQcg/y60k2q07ptW0/TX77lWNCaz1eO6s4Dr12TNE8kreY+Itm7ePvc42mqN3dahbJDNDrF7PbzglHd2RgQcEEbjg9O56irR1623WGNP8AKWKKWGYIzcrJvB27mPZs89/asq9vIJIYLS0ErQ2+475VCszMck4BOBwB1PSpozqqcee/X9f+BbyKaNW+i1iC3iuIL28eE20cz5uvmGQMkLnO0E4zj8ajuU1U6g1vaahcMFjibMt4EJLoGwNxGeScAUkmr2PlLKi3P2pbH7JsZV2ZKbS2c56E4GPfNKNY09p55pIX3skKxu1vHLgIgVl2ucDJA+bk8dKqFXEWva9r9P8Ag9O4NIrRf29L5rC7uUETmNzLdeX8w6qNzDJ9hzUt3/accMU0F9eMn2ZJ5Xe4ICliRgHI9OB160ahqun6pJcib7TChu5rmEpGrEiTGVYbhg/KOQT1NMvNYt73SbewlSVRbwjynUD/AFncEZ5UjHPUfpWzqYiTi3G3ddlZeev4W/PGkopO3d/mNsprye2murrWLyC3iZY9yMzsWbOABuHZSTzUF9dapYX01rJqNyzRsV3LM2G9COe9Nsbq1FjPY3vnLFJIkqvCoZlZQwxgkcEMe/YUs+pxyaw2prGDIs6ukEi5TaOgJyDngduatSqqpLTTp26W1++/bQ00sR6tbXlhq+kT3OrXHmTo6y4LEwbeo68n5uRxzkVpM11baw1mdRubmBrUyAyEjcGg3jK5PTPr2rI1fV7PULjSbU2YtniaR5pY97kKx52hn5PfnHIHPWtWS5srvWYprKS4bFoY2E0SpjZBsBGGbOdue2PeopSrc0faX+1fa2+mxk0vaO3ZfqdLof8AyArf6N/6EaKTQv8AkB2/0b/0I0V2w2OvD/CeR29vCbG6JhjyJePlHotMEEP/ADxj/wC+RXX+FPCJ1m1fzroRRSzH7oy3AH+FdXf+CdF0iwjkjheaUyhS8rZ4we1Y4WLc36/oVhpxVSz7nltppkl/J5dnYmd/SKLd/Kuq074YateMpuraCyiPUyAbsfSvYoIIbaMRwRJEg6Ki4FSitFBGTrPojhrf4WaIqol0XljXny0AQMf9ojk/TOK6Oy8K+H7CMJb6LYKB3aBWb8yM1r0tUZOTZQi0bSyvOm2fX/ngv+FZ/iLSNMTQLpk060VgFwRCoP3h7VtLKqIe59BWb4ilVvD90O+F/wDQhVT+JmND+HE5fUNJtPLiey0qznu/sttlGt0fEZD7mwR6hct29qfJpGl/b79o9NillW8KvHFYpcYjwMcEjaCd3zD06jvrnw1Za19knuZbhWWzjQCNgBgDPcH1p48A6V/z8Xv/AH2v/wATXPT95P1f5nRLQ5b+yLKTS7j7PpcdvEnmt5s1nGwcBjgeZ1VscYHU/WjXtL09I3a0sLVhlPtLGFd0bbRtA44U+o6nOewrqx4A0r/n4vf++1/+Jp3/AAr/AEn/AJ+L3/vtf/ia05dSbnD2mkx3UGktaWULeRfM10RGvyL8mGf/AGcBuvHX1rC1Pw8lzqi3NhpokWR3WONYuJADnAx1wPTnmvRtQ8CaXHeWKie8w8mDl19R/s1bufh9pPlY8+8OTj76/wDxNYVZKMJt9CqmkEzyHXNKuJ5NHig0CFbpISX0qGJ9wUOx+YbvM+bPTOQOlUPEduqX1mJLK2tJfsib7OJP+PcjI2Nkli2AG+Y5G4A9K0/FnhGLQr57UvMU3gxsSPmU59vwrAtdKjexnlSWZXjVmULjk7o19P8AaNdkoNUoyfn+g0uZ6HqFiuhZtC9rYQyXFuNT+WJVKxIEEij0H+u4/wBkVFpmo2N3ptvdrFatp0kMz3lyIxiKQF8At/AQAmF4znvmvKm8P3ks7rjLKQrbnQfMf4eep9utNh0S4MkKgMGlzs+72JB+nQ9aw+ZXI+x6Kt3Z2b314bS3GiNYJLFfJCuXuDt3L5nUtvLLszwBnHetu71WytrDVJJLYQ6UpgEFysSgSxNIo3Kw++MEEnnB446V5bJok0TJLDI/ltbrIxJTd7kL1x74pZNLu4oPOaaQJgE/dyAehI6gH3p/MXI10PVG1PTRrWn20mnXQjn1GKG3mexSOCRCTwHDHzQeDnnp71zV54u0ybw/a6iYvLLXUtuPKhVchVjYA4643detckdLvlaJRO7GVxGux0Ybj2JHQ/WlTS75pdnnvgEBsSR8E9v97g8dfahadQ9m30PRNP8AENtPZadqESznTYbScXUoUbImBk3K/PDMCuB3yMVzH9svqEZgtNHvr2R4hOtuYG/ex7gMgKQxXPdT29qwXs5W1eSxivJgBM0YZscAE8nj0FVdTg8qwW4ivpp4ZCU+ZApDDGQRz6g00wdNnqA1eK6e9sjZ3EtwLG2Emm2f+sT7vygncfkwMggnnnpkWLbxvYaT4i1Lzo7mQwGBmEaglcJkrnPLD7p9wa8xtdOaSCAf2hKtxJAZkiCAjaATjd64B7VY0+R4LS8lt9TuUeNA0gaBdpOcAA7vU+lGyIlSb0PW/wDhduioTjTtWcH/AGAP61H/AMLt0hWLDStVOexx/jXkceqaxOC0dzIwDqh4Xq2cfyqxbS6vLfNBPNKuPOT5dufMRC2Pzx+dPnZqqSfQ9Nn+NOkTEFtH1Q46DI4/Wlj+JNrrX76PTruJYhtw7cn9fevJ5brV0ZlldnxCZVKOhG0fxZGQQMHgVeD6xayiCK+Vy0av8jx8AqrHODwBnqeuM1UJ2kjDE0OajJJdDsdX+KdnqUIX7DcxEdCxyf51ysnie3lYkQyHPqo/xrmLu91G3cpcyYfAOPlYEHoQRwR7itCQ3VrFOI78tNalVuI/KAC54+U98Hg8CpbvuaKO9kSzX8V1KcxPkDptqtMxuNInCqY1DjAA+lWLFp7uHzJb54wZViQLCHJY+vTA/P6VbtfNuZXtJNSmR1LbiturIAoyTncPQ9qwnG7v5o2qXnRhHzf6GI+m3F3IoBUrj6H+da+meGjFPDKxdXVwQNowefXNO05bh4GlLMzhyAQAOwq7D4nvtPljtWnnYSOFIZsjk1vc5Kq9x+j/ACY68a3t9RlF0kr5xwuD2HvTLmLz4kaGOQJjOCmePzqteeIJY9YmDP0I6jOOBW5/wmD6XYh1udrSgHb1z/nNZveP9dB4P/dv+3Uc7b3ltG5jNvIXHBBUf41LdahbtbELbzx/KeAo5/Wur0TxFBrYkY2yROozvJ5Jqa7gN/ZyeXvQBGLMe/HatCkzhbHWVt0QIkuRwcDr+tW7vXo5Y8CGTP8AtIK6TTtBWWxhYSMrMDnnrya0I9PWOBhcBTj7uBSY0eXXN3EWD7JM55+UAUz7VDId5jkPoAK7m/0uIqSq4OeKypIVt4fLTjBwfrQBp+ErmGTw/cKIyo+04Ix/srXnGoyWv9r38cm9X+0PtYDj7x616loTLHoUrdzcgfoteVajdeTrl8+wMwuJME9vmNUSymbaSSdY40ZixwuB1rrNb8L6fpvhO1nJuv7VDHzsrmJgTwBzwQKytL8W3en6lBcAkLGeQp5xXW6r8RL/AF6NrO11CTyCPmhn43+2aBI86tZ3S8tnJbaJFzj2PSrOpTrNq87qGAOOv0Feq6HrV/e6RFpV7otlPbbgSXADZz98EDkii88L6TZzvfsvmPNIq7WGQnH/ANYVeDa+vUwl8LPIoElmbEMMjn0Vc1baCe3YC4t5Ez2ZSK+orGKztF8m3hijC8jYoGRWH400VNaslKKDNGcof6VmB5pFcWkOsRm5heSMxAEBc45PvTPEPkXl3bSWxnwsikB0Axz7Gusjt3tLjDIUJUbWP3SeeKoX/iJpJVtZ4dkokCup6deCKuq7TZOHTdNHmeszNDrcu5T95fr0FZt3dl3cBTg4rr9Zjil1SVhGAcjn8BWNf2i+VI468VHtPd+Z3Kh+5evX9C3ezousIsqsUNuFO0e5rp9M1a1vrhUCzK3/AC0GM8DuOayb5dlwp/uwh+Ovetzw/KtsiXqNuYHEi+3rWWFf7qJglaojvbTxTothAsNrZ3MaD+7EuT9TnmpT4003/nhd/wDfsf41v2l5BfWy3FvIHjYdQensaW4iE8DxEkB1KkjtWpCOUk8faISUH2jf6BF/+KqWPxtpckeFjuXA4I2KR/Osqbw5qFnfIygyQgk70PseoqPTrPVYtR3QxyhvMJLEYGM96t/B8zlT/wBpf+Ffmy/9v0C/vo2SyuIZ8nDpGF7Hrg1R0vxPH4c1WeGaO4kspZW+ZUHytn613qk8Z696R1SVGjcAqwwRXGn/ALQ/8K/NnRa8jIk8a6YeRBd4P/TMf41U/wCErsYWaRorkiU5XCD9efetXSdUR559LkkBmtzhSe4qeI/vZ/8Ae/xqqv8AEh8/yKXU55/GWnl8+Tdf98D/ABph8Yaf/wA8br/vgf410T/fFMJ5opWvL1/yLlsvQw28UWS263BiudjHAGwZ/n7VCfF+n/8APG6/74H+NdBnk0mfWumpuvRHFhPhl/il+Zz58X6f18m6/wC+B/jTT4usP+eN1/3wP8a6HOBwaYcc1B1nJJqMWqeK7SaFJFRYmU71xzhj/Wuiv/8AkGXP/XF//QTVlz8pqtf/APIMuf8Ari3/AKCaXU5V/vMv8K/NlXQ/+QFb49G/9CNFJoX/ACA7f6N/6EaKuGx6GH+Eg+Hv/HhD/wBdZP5V0viX/kGx/wDXZf5Gua+Hv/HhD/11k/lXS+Jf+QbH/wBdl/kajCfE/X9EZYb+N8zbFOpBVO51jTLKbybrUbSCUDOyWdVbH0JrVJvYzLwpcVmf8JHof/QZ07/wKT/Gl/4SPQ/+gzp3/gUn+NHLLsBZEJPK884xVLxFGqeHrsgc4X/0IUsXiPQwvOs6d1/5+k/xqtq2r6NqGlz2sWt6YryYwWukxwQfX2qpxlzPQyofw4mjpH/Htbf9e0f/AKCK1RXAQanPBII4/FOiqiIFXM6dAAB2q2ur3hPHizQ//AiP/CuejCST06s6JvVeh2wpwrjl1G8PXxhoI/7eY/8ACnf2vNHz/wAJfoLH2njP9K25JdjO50t7pz37RFJWjaMkggZ9P8Kgm0W8WMFtSm6+/wDjWEPEF9/D4s0FR7zx/wCFRXGs3siAv4v0NhnoLiMD+Vc2Kw96UnbW3mX7eSjyp/gih8TNHW00WG8kuTdSJKFw46DBPqa8nsNTGn2rSKmXfei8cZ3Rnn8AenNd3478SxNowtH8SaVesZAfKtGVyODySBXn2j3BS1b/AE+1T5zwzgdhXZVpJUYWT3ffyNadabev5IsQ6laxRNCgdI/M81GaCOVgSACPm+g5H5VJHdMNLuriVW3tIwgkIxkvw/6Dt0zU32s/9BOz/wC+1pDdn/oJ2f8A32tcvI+35m6m/wCrFH+07PzQ6rP9pS0+z7So2nKkE5znoemKfNqVs6zyKspmuEWN0OAqgFSSDnJztHYYzRHdY1KU/b7XO0fMXGD0q6JpnXK3tsw7EMP8KFTb6P8AH/IUZyf9IhbW7GN4fLicIl5HPgQom1FzleDljyOT19qzLfUrUQCK5Ew2T+chjAO7gZByRjoOefpWpI05HN3B+Y/wqo5l/wCe8X5im6cv5X+P+RfM3/SKX9rRprb3yxFo2mZ9p4JUk8fXBqO+vrI2cdrF5xgRmkZ3jG4scDG3OMfKO/c1bJkz/r4vzFVb0ubSTMqHp396Xs5fyv8AH/IG3Z/8D/MtaZrKQWccYklaby2RUMS7UBB6PndjnOMAZqv9ujGmy2yIfMMpeUnoQBhR+rfnV3RWf7VajzUxt6Z/2aspu3an+8Q/e79OtV7KXJez38/8jCdSSlbyXbqzG0vV4rCWZpYmYNH8m3HDghlP5irL+ILdpbZhDIBHbSJJ05laMpke3C/rUR3Y/wBbH+dRSuUGfMQmpVKb+y/x/wAjdycVv+QLrdtFZxxGJyy20sXAGMsxI/Dmr9hrNq8jXCLIGkt1gmVolYKVVACuThvu9CBWBK+8nJzXQ+HWZdLuTHIqNuPLdP4a2hh5XV4v8Thr4qUacmraHP6nq0d1cpsHmIiBFLQpFgZJxtXgdavXGuWdwt08MMouL1lM4cAKnO44OctlgOoGKyggLBsU1X2ueoz1pewl/K/x/wAi/bSOm0/VreziaNJbmICXeJIkG6VcfdYbuB+JHJ60RX8ax3zrGY3mO1AOiKTkj9APzqHT55J4lCTDaOMtxiurtUvEUA6haAn+9tP9KynSmn8L6d/8jWpUapQku77GJpd20Ni7CJziQ/MPoKa+oQzXcINrlzIo3Ee9aWoveopLXtpJnqFxn+Vc6gkS7idJkz5gPXpzVvDN/Zf4mU68lBrTZ9F5lnVBCNQmzbxs3HJ6nge1YOpzNKIwECrHkYByO3+FaWpyub6bzJ4ycjJXvxWTNMHXaTnPaiOGkrPlf4mVKtJ0Ypvouxd8PawLS+AlyIzgcGvV4tRhGnyhDuDRONy9vlOK8NfYCcI1dN4RuL66uza205UEHKSHqO9XKEo6tWGmetaJGJtOtZAD91gf++jVm6t90XlkkbsjNXNDs/s+kRQuQXTOcdOSTTruHdKoHY5qBnLRHz5Li2kx5sJ6ewrA1u3MF2uPuyrkfUVp3twlt40kYHAlHI/Co/EaGURlcfu2Bz9aQxNJ/wCQFP7XIP6LXkmrbn1m9UAkm4k4A/2jXtnh+zFxpF0mOfPz+gqt8PvCWg6h4k1Jr5DNeRTOTHNwo+Y8gd6pEs4fw58Ltf12ZGa1NvbjBZ5DjIPTFd/D+z8nkK02tskpOdscHb65617NZadaWMKx28e1R6sT/Oro6UmwPKYfhh/YkAew1S5ZEGWW5w2cemMYrlvE8ptNsUkyZZwdufY17T4jiuH0O7+ycT+UduBk18qa/wDb5vEk9vIsrS5HBBJ6CrwWuPp/11CXwM9cttZc3AKP/Dj61tw3V06Fi4ZeoBFcFpnh/XNH06KXVIhEpIGCfmH1rduteh0ywLu4bjjnk1i20y0k0S65qdvBaskmNxwpH1zg/mK4XUX83V7VyytJtQSYOcn/APViofEd1Lc6iMscFAf1NVrNT9phyc/Ov86utL32isLTtST8h+pqf7RlPuP5Cs69H+iyfh/MVq6kP+JhKfp/IVl32RZycen86lfC/U7F/Bfr+hf1VjHcbuP9QmPzNGkzs4mSLO1BvA9vT8qr6xKDJ5X8Xlqw9xzUOiTtBdSuBkeXk/mM1GF/hRORL3zrdH1e/wBO8uayuV6fvIm5BHY+46j8K7ew8c2cqhL+J7aTuy/Mh/wryOFrm3voltgTCi70z0KHGQfoeR+NaMmqOHYDBAOAQOtbtGCZ7PHqVndxZt7qKTP91qmQnHLDGPWvGNK1NX1SFSuCSeQMdjRceIHS6mQT3C4kYcSEd6pr3Pmc0X/tT/wr82e1b1B5ZR+NYeq+KbDTFdEk8+4GcRp2Pua850nWTPqsCNLM+SeGkJ7GqV7qkaX9woGMSt/OuRL/AGl/4V+bOm/vGkNTuo9UGoCRhLv3NtHUelemaZfR39ubmM8SYYj0rxk6oW3YzgDJPPSu68IzT/aYTGSLdoPnz/EcDFVV+OHz/IpHaMfnppNITk0n1pUt5+v6I0nsvQO5pD+tGaac9a6am69EcWE+GX+KX5gfWkOaOaac+pzUHUI/Q1XvsnTbn/ri/wDKp3PHpVa+4025548l/wCRqepyr/eZf4V+bK2hf8gO3+jf+hGik0P/AJAdufZv/QjRWkNj0MP8JF8Pf+PCH/rrJ/Kul8Tf8g2P/rsv8jXNfD0f6BD/ANdZP5V0viX/AJBsf/XZf5GownxP1/RGWG/jfM2xXm/im3s5fEeqyzqHlWCFFDQq4AIPTJ4Jx1xx2r0gc9K4DXNJ1bVPGV/b2FoJUeCJnJZVIK/Uj+9Xbh20ptdv1RkznrvQdOa8nkkVYIzN5aBFJGcegIwOR+dRL4Zs96xSgJNI7RxqMkEg45OeOeO9dYPC3iTe7zaZC4Z/M2tKmFb1Hz01dA16MjOnpJIrFlcyoSpPUj5qftbvd/iI5B9AsY7aGTaWklXeEAOAMkcnPtVy28PaYUto5LUM9wGO/e2U5IGOcdu9bJ8MeIpLePGnDEa7FxKmT1P973pE8OeLlt8vp9vAig7JZZk3Jnrj5gPzzSVVqWrJhZxVjmINN0yO8PmwRlMf8tHbA9zg5q9PpWmx3ipBYxSrIq7QXk2kn+7yDj61UvRb6Owe9hjusfJhJA+9s5/hbr249Kz5/E+szXCS2OlxReWNqea2QvpgZHrnr1rGniLRacups6UnsjVvtJ05buURW6pEh2/fbHHU8n1pdSstBTUrtI3RpI7gJJDEpAj3vtGMHHBOMcVxdxaazeHN3LI+f4Q4A/Q1qXF/qc9zJcDT7SKWWdZ5mi48xlOQDljx9MZ75NWsVe92P2DJrtdO/wBIMRMFvDL5BmaEuWk5+6u/7oAzk4PPSobPwtNc35iu70HEssL4U7EZdu09eQxYDtUEMmoxmcSWNvPFNJ5pjlPyhxnBGGB7nvg981d059cma+i8pXa6YTyOWUMGDZ45wATj8hWVTExUG5N29X/mVKm0tjIfw5ZxWbzXF00KxQxzSKsO5hvYgKBuGTjB5x1qtHoyW+p/2eZBIGkQJJgjKuFKnHbhhxV/VrnVLoXkk1vCv2woHCEAKF6BeeOg9arRLqGoyG7MSq42ICjAY2IqjqeuAK2qVf3UWm92NRfNsXdQ0/TntLyS1tPINncrDnzGbzFbfgtk/e+Ttgc9Kj0a0sLqQW9xYI0YBee5Mjhok9Rg7ePcHJOKtXcmpXa7XsLdFaUTTCM485/Vvm9zwMDk0sD3kFi9mdItJYnk8xt0jgk9gSsgyB2zWf1h81+Yfs3bYz9LtbG6+0xTaeixwwO8l55j7lbB2DGdvJwMYyfWtHTLO2bTYWaLJOe59TVaKa7FsdLbSrR1jy+8u4Ysf4jtcAkDgZHH51o2EclvYxxyDa4zkZz3NZzxFRJck3cujBXd0JJY2o6RA/8AAjVWSzgH/LP9TV92461UlJyeay+tYj+d/ezqUIdim1rD2T9TVO+t4ls5CF7DufWr7HnnNUr8k2cnPp/Ol9arvRzf3spwhZ6F3RYI/tVq2znb6n+7VlYIw2pfL13Z/Wq2i/8AHza8/wAP/stWVJDaj7Z/rWjxFb2fxvfuzlqwjzvTov8A0ox5I4Uz8owPc1mTupfAHFW7uXZGe5qjGm7LvnFVTxFfdzf3snE8t7JEW0tz0FdT4ahSWwulkXKbiev+7XNjqWOMDpXUeEZY1WaCVWYuC/HTHFdEMTWcleT+9nm4pJUZdNDlEDzfKicL3p7Wkg5YAj2rqIm0fy9gtZBwe/8A9ekH9lnzCLWXgDPzdf1qfrVf+d/ezo5UUNOls/L8tkbeP73H8q6NG09rcyGNTj0Y8frWWn9lqwdbaXkdc/8A16fHcaf9ilDxyxrvAJJ+nvXPUxNe/wDEe66s3qqPsKenV/oSXT2LKdigfVj/AI1lLbpJLAfLYKZQM9jzU01zo1uC0aTS56jd/wDXqO11vSY7iJfInVPMU5ZuBz161v8AWa/87+9nPUS5Xp0ZW1rS7iO+lZUUxggABuegrEkimBwUIIPHFejM+jXzNP58bc/d3+2PWnW/hmzv7xYFBeQ8jJOQPzpLF1rfG/vZyUq9KNOMZbpLo/8AI8zKPnJBq3ol1Jp2sxXUZIKMD9fUV3+s+GbHS4i15HsA6EkjJ/OsWG38PMEczIjDsZOf/Qq2lWlPDPnbfvLf0ZosRSvdfk/8j13StTjubKK4jx5brk+xqW7uFDRuCCrdTXA6Tqmn2StFBeoInGCPMBGfXrVqfV4FREW+UbTkfMP8a4nOJoq8H3+5/wCRianqMNxqouAcyeYB9BmtK5vY50YZycis25TRri4EzzRK4bcSr4yf++qT/iV4IF2o/wCB/wD2VTzo09tT8/uf+R3PhKSEaVdOxx/pHU/RaxbvUEs/EUur6U8SXcBIljznzFzg5p/h5LZNLmFvPvjM2Sd2ecD3rFa10ldSnk89RL5jbvn75570e0QlWpNu7f3P/INc+JviJb8XdhdPHE6geV1VT3r0v4c/EdPE6pplxFL9uij3PKej+pryu40/R3ODKm3OcB+//fVWtCu7Dw7qH2yxuEjlIw3zdR+Jo9pETqw6X+5/5H0bc4NnN/1zb+VcR4j02zj0G3vEt4xcSTqGk28kDd3qtpfjuLU7doFv7fzShBQlcnip9YkupdAthN/qPOBVscE/N7fWtMJJRxkJdjKeJpqLTv8Ac/8AI6u8s454yHRWQDoRXz38SNKm07xI58tUt5OYwh4/Lsa93aTVj1Q/98//AFq4jxtaWt/at/abIsqnKEnB/pWbtYunjKSe7+5/5Hluqrm+T/rkP5mmWo/0mH/fH866G/tNLNwvmyqGCDHzdufeolttJRgyzICpyPn/APr06rXtGa0cXSVJLX7n/kY+pf8AH/L+H8hWRqDYs5Pw/nXS3dnaz3Dyi+iUN2OPT61lapp9uthKwv4mIxwMeo96SkuX5m6xlL2XLd79pdvQz9eYpfQuvURL+PJpmmKHupiPuiEsPzFa+rabbTTxM+owxnyl+Vse/vUWmabbQmYrqUMh27cDHAyPessJL93FGX1mn7RLW/o/8ilJct5cYC4BjFVTubqKuDTLZbGFTrEHzMzZ456e9RjTbX/oMw/p/jXTzI4/rdLz+5/5E+iA/wBsW/ynqe3+yar3wP2+54P+tbt7mr+jWFumrQMurxOQT8oxzwfemXNhA17cH+2IgTKxxxxz9avmXJ8zmWKpfWW9fhXR935CaBu/ty24PVu3+yaqagx/tO66/wCuft7mtjRLGGPWLdhqschBb5Rjn5T71VvrCBtQuSdYiUmVjg445PvXIn/tDf8AdX5s6PrdLm6/c/8AIyWldbebG45UD9a9g8Kp5el2ZIwfJXt7V5tYaLb3VxFD/asUheRQFAHOOfWvW7SNYQFXoowKdWS54er/ACLjiqTu9fuf+RqZoz71ErkD7po3n+7SpPWXr/kaSxlKy327P/Ik7UhPJ60zccfdNG5v7proqSV16I48Li6SjK9/il0ff0HcnvzSE03ef7tJuPoajmR0/XKXn9z/AMgb7pHeq1//AMg25/64t/I1YJJHSq9//wAg254/5Yt/I0XuzOlUVSvKUduVdGur7oq6F/yBLf6N/wChGijQ/wDkCW/0b/0I0VpDY9PD/CO+HEDzafbbR1kk5rq/FFkItKiZ3/5bqP0Ncf8ACy8aex8iNsSQTODgc4K5/rXW+K42XSo5Z2wPOX5pGwOh9aWEXvP1/RGOHdq9vM6IS2sXES7yP7oz+tc5aXEreO9RKAJm3X37JVXVfiJ4X0gmP7a19OP+WVopf/x7p+teeXPxH1WbxBd3mkWcdkZYwmZ8Oyj5ecdM8V24eUVGp/hf5oy9nOVtD21o2KGSZzsHJaRsKP6VzOqfELwvpBaJtQF3Ov8AyxtPnOfQ46V45qV/qesvu1fVLi6/2Gc7B+HSq8ccMK4jRQK4nV7HRHDr7TPSvEvj7WNLnWy0m1t4w8Yk8+YbmXJIwB07elcBqWo6prDl9X1a6uc/8s95RP8AvlcCtrxdLs1eLP8Az7r/ADauYkYFuTU1ZPmY8JCKpRduhpsIYNBt1ijUDzDgAfWqQmJ7VZmb/iQ2/wD10P8AWs4N71x0Xo/VnbIsl89aA3NQB8mpFatSSYN7Vc0vUbW1vn8+XZ+7I+6Tzx6CqAPPWtbSLmG00HxBOFuluiYYhLBcCPCsG4+6TjI5GeRgcUp041ISUtrGNeTUdOpgX13DJAAjZO4Hoaj0e9gjtXDPj94f4T6CtzxJYaeda1C/1N71kn1P7JGlo6qUwqlnOQc/eGF4zzyKq2HhKztby20i9ubp72+v5rSGaBgI4iknlBmUglssDkAjA55rvlyujGPRN/oYqpPmvoN/tC2/56f+On/Cj+0Lb/np/wCOn/CqOqWen6fpGmMhvJL68thcMxlURx/vHXAXbk5C+ox7543NFht/K8Pac9rbyRaskzXU0kQaRT5jxja55TaEDcEZzzmsOWHmX7Wp5GOl7CNSlkL/AClQAdp9qsnULY/x/wDjp/wrndLvjaayJXdQmMMXt0uNoOMkRv8AKx+tdPqd3Z2PiK2uoFENldWiOZm06GXfxgusDHYpLKRgHjnHWly0/MUalRdis19bn/lp/wCOmq73UJ6P+ho8TLaw+JnMMMkNjKsMyquFLRsituAGQu7Jbb2zjtW7N4ItIA/m3d0fIuJpJtrAZtFEpRxx1Jgbnp8y8UuSHmX7aouxzhuI/wC8fyNVb2RZLV1Ukk4wMe9araDYi1a1E95/aS6YNR83evk4KCTy9uM/cP3t33uMd6fe+F7OWS+0uzubtNQsJIEmmmcGKUySJGdqgArhnGMk5AJ4oUIX6jdapboUdJuI4Z7dpHwFXng+lTfaoc33z/6zO3g89a3bDSdPvdKk0jTJb2IPr1rayS3bq2cR3A3jAGM8/Kc4wPmOeII/DOkTX0K/bZI4pBc+dDHqFvdSqI4mkVwY+ACQRtI4x1Oci+WHLbzMJzm5NvsvzOPlRXbnJA/WoZVJwiA49au69aW1tY6bf6c90kF7G5MVxKHZGRyp+YBQQeD0Fbc0UAt5NGFrbiBdAW/Fz5Q87zzCJi3mfexk7NuduO2eacVBdwqym+xyOx2lUbcItb/h+aK1ndpm25jIHBPcVieG9UFlqK/aZwkLjEkjWMV4V+iSkD8cg13dve6Tpuua9ZNEtk1xcQSWby6fFdiOIq5PyufkDb42+XJGMY4rWDipJo48RBzpSjLZnNWVncXEDyRRlgHxnIHvV6HTLxXJaE7SMH5h/jVX7Pe6dd6jp9xMRcW908cvlNhdynBwBjjI9KuNcSrlGmkwcYO48Uv3fZ/ga/vPIYumXYJUwnj/AGhzRdaXef2RMPs+5wwIG4c9Pejz5/PIM8nI4+c09rif+y5h50m4MP4j7VhUdK+z3XY3rKr9Xp6rd9/Iwx4f1KSMMbcLnryP8a3PDnge+vJlWaILDuw5bkgeop/heG+1vUEsYbktMDkh3PSvovQtBg03T44XjRnA5JGefqa3Xs+z/AwftF1X4nn1p8N9EtLq3uFmB2r+8XBwW9elbF5o9rZXf2+1lXIwpUKRkd+a75rW3xxBF/3wKq3VmkkLhIIskf3RRel2f4E2qd0efeMra31LwpdCJVklWMtGpXvj3rwoaLfMRm1A/wCBD/GvbPFusf2Vod1p7222Vo2Ak6HnPSvGYJ7lhk3Ex/4Ga2lKmsK9/iXbszejCo5dPxLFvot2o5gx+I/xqy2l3h/5Y/8Ajw/xpIpbgDHnS/8AfRqQSzn/AJbyf99GvP5qXZ/ejujGr0a+5lc6Ref88P8Ax4f40g0e85/cf+PD/GrBlnxzNL/32aBJPj/XS4/3jSvR7P8AAq1buvuZ1Hhizlt9FmSRNrG4zjPbC1zN3pl3/aVy6xcGZiDkep966zwyXbRJyzMT9o4JOT0WuUv5pxqN1iaQASt/EfU1bdKy0f4GUVV5nqvuYo066PWH/wAeH+NPXSpjyYf1FVBcXG7ieT/vs08XVwvJnf8A77NRej2f4FtVu6/E0bXR3W5jkaEZDDnI9a39Q1rWotOWwhO6BJQ6g444Pr9a5i2vpvtESmV8bhn5j60mtX0qzOomkGGHRj6VtgXS+u0+VO/yMa0ari72+5nf6j458QXdusSqsYxhimATXB6iNVvpiZjJJnuzgn+dVpr+fHE0nP8AtGqwubhjnz5f++zWbnTff8C406kdrfczZ1OwuXukKxkjYO49TWYylGKMMEHBrZmlk/tiBTIxUpyNxx3rKuR/pUv++f51rXjG7lHuY4WUuVRl2/VkDciqGoj/AEKT8P51oHGKo6gB9jk9eP51nH4Pmdr/AIT9f0DXh/pUX/XFf61T00stxJjoYzkflWzf21retHJ9vhTEYXGQf61HYaZbLcN/xMYWyhGBj29648PWjCMU/wAn/kZJbHN3HyR28Z42xDj6k1BketbsmlWrsCdUg4AHAH+NR/2Raf8AQUh/T/Guj61T8/uf+RyeyZX8Pkf25bc92/8AQTUF44XVLrPTzn/ma3dF0u1i1aB11KF2BPyjHPyn3qtfaVaNf3DHVIVJlYkHHHP1rT61T5Ou/Z/5HKqT+st/3V+bDw9/yH7X6t/6Cap6irNql2Auf3z9P941raFYxRa3bFdVikxuwgxz8p96c9jENVnc6pFkzOSvHHJ461yrEQ+sN/3V0fd+RrytSJvBVqZPEVqrj/VK0rA/gBXrMPUntXK6BaQQyC4V43kK7d4HJFdRCeT71pKpGdSHL59H29C4qxfU/LS5qND8lLVUt5+v6Iuey9B+flFJmg8DrSZxXVU3Xojiwnwy/wAUvzHdcU09aQ57Uhz6VB1BnDYNVr//AJB1z/1yf+RqxuBFV78j+zroEc+S/wDI0CexV0P/AJAlv9G/9CNFJoZ/4kluCOzf+hGiqhsb4f4TzPR/Ed14fkuJLKeWC4MnDooIwQB3qLUPEF1qcxl1G/vbok8ByAo/AVd1NiHvOeki/wAlqh5xZRk1z4WV5fP9DTDx/eadyOPUrOEfu4XH/AR/jUK6rF9tkfbJgr6D2qyZfeq6yf6W5z/D/hXXh/hqW/lf5omV7r1JP7XhJ+7J+Q/xpDq8P92T/vkf407zD60bznk1yNo0SZteK9etrnVInjSYAQKPmUep96wDqkJ/hk/If411Xjlsa1Dz/wAuy/8AoTVzW/j71VU+JmWFT9jG3YtS6xbnRbePZLkSEngY7+9URqkP92T8h/jWxM3/ABTtqc/8tT/7NWcG965qVrP1Z0yv3IhqkI/gk/IVINVt/wC5L+Q/xqVXx/FTw+e9ak69yIatAP4JfyH+NSw38zxXEMLlbe4Ks6FRklM457dT09akDcda6TRD+4T/AHW/nROXLRqS7L9Uc+Ivyr1OObxTrME09yl2pluJVlcvBG48wA4dQykKw7EYNTaLf67BYSRwXqBWld90ih3RmADMrFSyE9ypGadet+4Xn+IVcDAdDmuuppQhJdW/wsUqa53co3FlqF3HbpPPEy28XkxDptTcWxwOeWPX1q3bS63Zae1lb3kKwNuAygZk3DDbGK7kyODtIzUm/wBaN4Jrl5mackTOshqNpqP7k2O6OExgSW0bqyk5+YMhDHJ6tk8AdAKv/adba9e6kuLOWV0EeJreORAo6BUZCqgdsAVXjYf2nLz/AAj+lW94z14o5mTCEXczb601HUbuS7vLlJZ5MbnJPYYHbgAAAAdAKtzah4gl87zNRVhPaLZScD5oVxhfu+w56nnnk09nz3qEuPWi7L5IkTXetf2Z/Z/2yL7P5flfcXf5ec7N+3dtzztzj2qvqmoa3caWbee9jMS7MlUVXfZwm5woZ8dtxOKtM3HWqd+3+hSc+n86FKVxunGxoxat4i1M28UuoRAGZbvKRJGTMqtiQlVBLfMck8njOcCrxg8QTTPcC50+ORFkUNFbxxkh1Ktu2xjcSCRk5NYkDt9ni5H3B29qv2TE2110+5/jVxqK1pK5z1qLtzRdtunmVLjw/q1xaW1rJc2zQ2wYQrnG3ccnnbk8+tSyWPiN9K/ss39r9k2eX90bym7ds8zZv2buduce1QhuelIVJzxR7WK+z+Jbw039r8P+COsdN1yxunkhfSvnREZXtIpFwowDtaMgN/tYyTkkkmtTTIfEkepXNwb2xluJ2815Z4UlYMMYKlkJX8MdvSsUR4OQtXtDH+nysFOfLP8AStqNSEpJNficWOo1KdGU1LZdgg0i+Cyy3E8UkzyGRnLklicZJJHWp59KuXAYSRe/zH/CudQuFdSGw4446HtUmX+zhCrcNR7Sn2/Ev2FZfa/A3F0y5bHzxbhyPmP+FSPpdy2nShXiyzddx9vaufgRwxyrAgd6uwR+ZpMylXUFx29xXPUqU77dV1N61Ct7CneXV9PNeZLp+n6xpOsRX+n3MUUq4BIY8juOle++H/GiX0CQ3FnNHOqgHBDBjjrnivGtF8O2U8iu8mou56iCED9SDXrvhXT9PtQI7TcJR9/fMXf8fSuiM4Pp+JjUpTivi/D/AIJ1I1m3wNySA+mB/jTTq8BJwJf++R/jVmVLcmMSOpfPy5PJNIzqpaMEFh1welJuK6fiZKNT+b8P+CeafEq0Gr2SvAuHRTkvxxXl1toFwka5eL/vo/4V6j8UtTA04WUEqvLgmQJztGOhryuzidrWP92x684961lOH1V6faXXyZ00KVa/xdOxeXR5x/HH9Mn/AAqQaTN/ej/76P8AhVfyn/55n8qPJl4PlNj6Vw89Lt+J2ezr/wA34f8ABJzpE3ZovzP+FJ/ZE/TfF/30f8KgMUv/ADzP5UnkTHpE35Uc9Lt+InSr/wA34f8ABOu0C0e30maNypJnyMH2FczfaRPJe3DB4gGkYjn3+ldL4ailGiT5Q/8AHx6ey1yeoW0/9oXJ8o481v5mrc6Vlp+JnGlX5n734f8ABGnRrj+/F/30f8KYdGuefni/76P+FRG2uD/yyNQtaz94zUc9Lt+Jp7Kv/N+H/BL8OkXIuY23xYDA/ePr9KTVtIuZbpyHiALDqx9PpVS3tpxcxHZxvH86ZrdtN9qd9vG4d/atsFOm8bTstfUyq0q3K7y/D/glr+xbn/npF/30f8Kcui3AP34v++j/AIVRFrPnBUfnUv2SZe3X3rL2lLt+P/ANPZV/5vw/4JsS4/tmDn+D/Gsu5/4+ZsdnP86m0+KRb6PIHU9/Y1DdRyG6lwB989/etJ1FKnzeb/JGdKlKE+Tsl+bISOD61Sv/APjyk/D+Yq2UkA7fnVO/Vvsch4xx/OpjJcnzOtxfsnp1/Qqsv7iP/dH8qk01T9qb/cP9KljtjJbR/MB8o/lUthaFLhjvz8h/pUxkuYiMXoYrLmoiKvGz/wBs/lUbWo7sfyoUkZuLJtBH/E7t/q3/AKCap6iv/Eyuv+uz/wAzWpoduF1q3O49W/8AQTVS/t1/tG6JJ5lb+Zrbm/dr1/Q4VF/W5f4V+bDw4P8AioLT6t/6CasSQltXuMf893/9CNO8PQKNeteT1b/0E1pW9sG1G5Ocnzn/AJmuam74h/4V+bHUjaVzqdDbbGo6YFdXA3y+9cppqlAtdLbt2611shGmh+Q0oPpTFOYyfenA81z0t5+v6I1nsvQk/gpM0nRBSZOK6qm69EcWE+GX+KX5jskj6U08UdBkUmetZnUHSq1//wAg65/65P8AyNWP51Wvz/xLbr/rk38jTEyvoRzolv04Df8AoRopuh/8ga3Ps3/oRoqobG+H+E4a8MIup45ULBmB/QVXZLNQCIW6/wCe9O1EH7fKcen8hVZ2bbg56151Gnea1f3no0aMXJPXXzLPlWZ6QH/P41WjjthqMo8o7dgwPyp6M3YGoUJOoS9fuj+ldGFh7tXV/C+vmjKdGN4779y8I7PPMJ/z+NLstBx5B/z+NRjJ7Uq5yRiuPk8395r7CPd/eb/idIxqkYuVDv5C4I9MmsYJZ5/1JrovEgsZ9QjeaZ1YQqPlHbJ9qyBDpn/PxL+X/wBat6mHvJtS/wDJjzcPVhClGMozuuyY+WOD+yoMx5j3nA9OtVDHaD/ljWvImnjSYQbiQJvODj6+1U/L0zP/AB9S/l/9auelh20/e6vqdEq9NfZn9zKfl2qn/U04LbdoTVkppeMfaZPy/wDrUuzTNo/0mT8v/rVr9Wf83/kxPt6X8s/uZAotwf8AVVt6YqGFfKXbwev1rN26WMf6TJ+X/wBatjSRZbRsmcrtOCR7/SpqUHGhVfN9nv5owxFam4pKM9+qZzk1tbTIEMRAznP+TQLOMjHmS/8AfR/xq4P7Nx/x8yfl/wDWpynTgf8Aj5l/75/+tW0VWhHljPT1X+Ru6tBu7jP7pGa1rFn78v5n/Gnx2kR/5aS/mf8AGrrrpvU3Mv5f/Wpq/wBnZyLqX8v/AK1O9f8AnX3r/IOeh/JP7pGclpbnUZUBcOFyW3Hnp71bFjF/z0f8z/jRF/ZY1KVvtMpfYOCPp7VfB07acTyfl/8AWqZSr/zr71/kKnOg0/dnu+kjPNlFnBkf9f8AGl+xxN/y1f8AM/41dJ0/HNzJ+X/1qcBp4H/HzJ+X/wBap5q/86+9f5GnPQ/kqfdIofYYs/6x/wAz/jVPVbONdNmIdyQB3PqPetoHT8/8fUn5f/WqnrC2I0qcrcSFuMce49qalXvrNfev8gcqFtIz+6RFZ2KvZQEPJzGvc+n1q5BZBY5QHfkY6n/GpbD7CLG3zcSA+Wv8vpVtfsW19szEY59v0pc1e/xr71/kRVnQ5NIz6dJd0Zg08f8APVx/wI/4002Kg4M7f99H/GtLOn5BFw/5f/WpxGnnnzpP++f/AK1RzV/5196/yNueh/LU+6RmCxXbkTSEexP+NWLLTt0p8u4kU7T3P+NXl+wAcSyfl/8AWqzZ/ZDMdkjfdPb/AOtWlGVf2ivNfev8jmx1SisNNqM726qVuhhDT1I/18n/AH0f8aZ/Zqk/62T/AL6P+NbGLLb/AK1/y/8ArU0NZAYM8n/fP/1qy5sR/OvvX+R1c1D+Wp90jFuLCNE5kkPPqf8AGpjpzeSUS4dMnOTn/Grd79jEK7JpC27uP/rVYY2OOZpP++f/AK1JSr3+Naen+RtJ4dUoS5Z7vpLpbpcpR6ZKE/e6pOF/uLnn9auQPeQQ+VDq9zDH2SLIA/WpF+wsP9fJ+X/1qbixHPnyfl/9aupYzFraUfuj/kZSqYeW8J/+Ay/zFilvoJvPh1S587GPNZju/PPAqvJPqhlZhrV1luSdx5/Wp82P/PeT8v8A61MBsQf9fJ+X/wBam8bjH9uP3R/yBVMKv+Xc/wDwGX+ZjXdpctDMzahMxKknOeePrUNhYztZRkX0i5zwM+v1rbuzYG1m2zPny24x7fSq+m/Yv7OiLTOGAPGPc+1dCxGJeEk+aN+ZdI9n/dF9YoKovcnt2l/mUjYTg838v5H/ABqRrG4CZ/tGXH4/41oMbA8md/y/+tSl9PC4+0P+P/6q5PrGJ/mj90f/AJE0+s0f5J/+Ay/zMoWExB/0+X9f8aQWMx/5fpR+f+NaJfTz/wAvT/h/+qmeZpwz/pT/AJf/AFqpV8T/ADR+6P8A8iT9aofyT+6X+Zf0fSrp9MlZNVnUCXG0ZweB71z15Zzi6nDX0pxIwyc88/Wu10GSyOjzbLlmHn+nsPauWvp9J+23Aa/YN5jZH4/StniMRyrWP3R/+RMViKKk3yy+5/5mV9jl/wCfyT/P41DJbS5x9rk/z+NaZutFUHOoH/P4VCbrQc5Oon/P4UlXxPeP3R/+RG8VQ/kn90v8ylFbSmZB9qf7w5/yakvLFzO6vcu+CDyPb61Yiu9BNxHt1Fi24YHqc/SpLy+0JLyRZdQZXGMjHTj6VP1nFKqrSW3aPf0I+s0L3cZfc/8AMr4GelB4GMUh1Tw6D/yE2H/AT/hTX1bw3/0FH/75P/xNYfVp+X3mrzKl/LL/AMBZPYn/AE2P8f5Gq1yx+0y8/wAZ/nUtjqXh976JYdQleQ5wNh9D7VDc6r4cW5lD6hMHDkEBDwc/Suj6vP2KWm/fyOb+0aXtW7S2/lfcgc8VRvyPsUn4fzq4+qeHWGEv5yx+6PLPX8qp319of2KRPtc3n8fLsOOv09KcaElDpv3N3mFJ0npLe3wvsOgcC2j9do/lVi0Obhv901BDqPhwW0aveXIYIM4jPXH0qxZ32gyTsILq4Z9pyGQ9PypRw8lK+n3mUcxpNqNpf+AszWbioHcCrTXnh0/8vt1/3wf/AImmG58OH/l8uf8Avk//ABNNYeXdfeYPMqf8sv8AwFkuiNnWbfju3/oJqrft/wATC5/66t/M1atNR0CzuUnjupyyZwGU46Y/u1h3WppNfTuqEo0jMDnqCauUHGCXmY0q6qYmU0mlypaprqzb8PNnXbb6t/6Ca1LMk6lc5HHnvj8zWL4akMmu2+AAAW7/AOya6C0jA1K4zx+9f+Zrmpq2If8AhX5surNSlodJZjp71uW/QVj2YG0VsQ/dHuK6mJGkh/ck+9OByKijP7o/WnA9eawpby9f0RrPZehPn9360zPGaU42Aj1poP511VN16I4cJ8Mv8UvzHE8+9NzwBjFBoJqDqE6VXvj/AMS+6/65N/I1Pn2qvf8A/IPuf+uTfyNAnsV9D/5Atv8ARv8A0I0U3Qv+QPb/AI/+hGiqhsb4f4TyLXL9zrFx5VzmP5cbWyPuiqEc7XV1bw3ErtG0qgjPWuttrKxNtpCRhJHuYriWZZbOM7iqSAHzCSwwVGABg9eDxXNS6FNa29vKt20moeULsW6Q5VYwC+S+eu0ZxtxjvU4dqMotraxpJRhPdme8UIhmnF1II0k2KDGMkkE/3unFaWkeHp9Wt754DcvNbWyzpELclpi0sce1cHOP3mc+2Md6r3Wl3bR3FukeyWJkluIlhbCZwvBJPdwDkDk8VuaVdQWFz4jsr6++xS3WntZ+f9nkw0oniJDBdxAKq2cAcZwCcA9ntYNO369/+GMpSjYxrTRtTn1G4sIdL1GS7iQ+bB9mYyIPUryR9aZBp19dNMtnYXlwYF3SiKFnMYyR82BxyD1rrJb7Q57m4huJYpZNPsre2inu1uVhmZThyRFiTI+VUzjheccVdfU7DxJr0sGn3jwf8TpL2GQW82bgFRwu3JEgIYjfgfMfm9c5VFayIc+lzjYbK4u9ai0pUYXzyiDypMKVfOCGz0x3z0q1qukHS4baYXltdW1wXEdxb52lkxuX5lU5GQenQgjNXH1OCx+Keo6nLCz28eoTs8kQJby2dlLAdDw2feqt2bE6bpug2eoQXAjnuruW6WGZYlLxoqqAy7ycR8nbjLDnAzRzoXOu4tnpK6jpz3NtqNnJNFDJcPZ/OJBGmSzZ2bOAM43ZxVg6BKmnG6N3Zectst49pu/fLA2MSEbduMEHGc4OcYo0OWOw0G5SXxBM2n3FpOJNKiSdWklKEKW48vCtsbduJwMYqebU9Ne3m1NLrN/daKmnJYm3cOGWJYS5bGzZtjLDBzkgY70lUtcfMzHgQ3KBoE81TIsIKIGBkbO1eB944OB1ODV7TNFm1C4nhdHgaKO4IDQZLSRJvaPHHzfdz3G4cU3wVeW2krqRvVZQYY7uxAiZt13EcxdOn32GenNbt14l059TtZrS5kEk+m3rSP5Lg/b54yrqMe4jGenvQqmgXkc8dOmgubm1vLa5guoIvMaE23zryACwOCq89eeo45q7qXhfWdK1WLT57G4NxMoMKpA3735QTtBUE43AHA4NQ2+t2MukRJLO73SeHprWQeW+fN+2NKq5x02EHPQDjOeK0m1zSBql5ePNayw61pkUG2eK5AtpEEQYSeXtbafLYZjZvfjIJzqzuK/c5m8T7NcSw3K+TPG22SJ1CMh9CuOKta94auNLtJrg3lrLLBcLBdQ27EtbSMCQjggAdCPlyARjioPEmpfbtR+2RpZukEUMKS2iTiN9mMD98d5wBjJ7AVteIb7THg8RtY3Qlm1e+S4MTRSqbUeYXbzCRgtuYAbNwwCfam6qZXM2YVroDz6O+p3Os2dhbrP9nH2pJcu+3d8vlxtxjucU7QtIk1iJI7fUrMXspfy7R/MErhRn5SEKdj95hV7wvfvpV4BL4litbGO93XNjHFcML5BjOE2bGDDIw+33xTvCeo2+lzSXf9vNaabNI/2rSohOJJk5wgKjYcg4yzDGScUuaN/IV3fQwNOmhN0DcyJt2tksTjODjOOeuKtuixyS3IMbIkPmLGjnY/zBc8nPU/pWXa2rlYW8lX3kqgZG/eHPfB4544qwFvmbzREvl+SUEXltsK5JK9c9QTnOeKKc6ajaVt+39bGkFp1ND7QqzWrQuYxPsO1XIOCeR69qnhnlB3Pqr7c4z5p4Pp1rGeOaO8jubjKLFKqBFQgDbj5een86WGTFwqJEFRXLPl884xxxx7V0wlh3B3ste3TTy239OhnPmUtG/wCvmdVDOuzP9sSeo/ef/ZU69eNtPYf22ZC38PmgjqP9qsBJI5pI3Ikwm5QfMJPbvVhmjKOE8xCxz8jlecVpfC33/qy8tr3M7yel3/XzN+MpHaRga4PMVAPLE3IOOn3uMU2VpBIAmtPsOPmWXgj865hYijiQBS2TnJJyD61tw6a0lvCF2BSg2gk8ZH0rFVKbpyWl7u2mu6t07XuEmk1eT6E7TOp2jV5T6kSnAH51XknnjBC6pcED+ITHH86bLpcsSnBi3EEZ3HH8qgWwkaNkXywSMElif6VftKfJZqN7duvT7Pyf5lXj/M/6+ZYW4uyONVmP/bU/41Nb6leW16gOozspUk/vT/jWaNJuYVl8uWM71IAJPyn16fWqD/aRqIilCK/klfvcfXpV4Z4flTl8Xa3r5fMzrNOLTbt/Xmdyut7gv+luvfHmf/X6Uq622zL3TBu48wf41wCXErR7cr/q9mS3fdnPT8KsLLKXLo3DDDhZOQfbj/GoSwrVmvz8/L+kW35v+vmdZqOrtLAoW8bJYceZz/Oqk2qTnJ+2PwcH970/WuVnMrTEiXBGPvEk/wAqZPcPICEG3e258NnP044HJ9awSo++rLy+7S2nffyNnL3I6vr/AFudMupTMcfbJfXiakF9cPtAvJiXzjEprk1mnSOUDq4wDnO0Z+lLDeyQIRubf1B34X8Rj+tXQlRXKqiXW+nmtPPS/wB/kZya1s3/AF8z0bTbqN9NiaSdWc5yWkGfvGp2uIf+esf/AH8FebQXNysKqk6/TGe/0pxurwNzMqjuSuP6V5zjLyOlPD21lL7l/md9cXERgkAkQ5U/xj0qrazRC0jDSAEZ4Le9cUbq6IyLhPyH+FHn3xXIkLfRP/rV0JVPq7VlbmX5MX+zX+KX3L/M6PUJnNrKsDtu3gjaeaxXW8fDM1wcerEVJpf264uVjMqgk/xD/wCtW7/Z94V2GaLceQe38q4Ks+SXvWPRw1CnVg/ZuWl+3T5lfQmK2kiyHBD/AMZ5/WtNmT+9H+YqG0051ZxcyKc8qY6nNlAD96Q/lVqpTa3OOVGunpF/18zpvDk0EejTK00Kt9ozgsB2WvLNYs3fXL8xpuU3DkEcjG416LpWjvcafJJE6qgl2neec4HpXL32m3K39wFkhAEjD9fpVSnBJWZEKVRyaaOTOnzf88T+VH9nzZH7quiawuh1li/z+FN+wXeM+bFj/PtU+1Xc1+ry7GNZWEi31uxjwBIpJP1qzq9jJLq07ogZSRzkegrTh0+7aVMSw8sP5/Sn3enXa3Lq0sPGP5fSsnUj7RO/T9SfYSvaxzn9mzZ/1Y/MUv8AZs391f8AvoVtNY3QGTLF+X/1qjNnc9fNj/L/AOtWqqLuDoyXQg0exki1WB22BQTklh6Gq95YSNfXBGzBlYg7h61sabZTvqEQkdGXJyAPY+1QXVnci6m2ugXe2OOgz9K2517Ja9f0MfZP2r06fqZUdhIJFJaPgg/eovLFyZHDJjjvV37LdZ++n5f/AFqR7W4OVLpz/n0pKa5Pmb+zl7Nq3UorprGNW82PoOKuaXZmK7YtIn+rI6+4qysaLGoZMkAZOanso4mnIK/wnipjPUSpNNHPGwbuy0n2E92FaWFpp20cxm6Znmx/2x+VH2MD+P8ASrxxSZXFO5PIW/DSiLxDabmAUFsk8fwmuot5IxfXP7xMGVjnI9TXOeGIludehWQbky3H/ATXTR2EH2uYGPgSN3PTJqI29u/8K/NnI/jZ0Fg0ci/I6sR1wc1swgcZNc3oaBLi7RRgK4wPzro0HSt2Wi9Gf3DH3pVPpTIj/o7fWnKeKwo7z9f0RpPZehPn90tM4pSf3Y+tNrqqbr0Rw4T4Zf4pfmOHBHNJ7UZxjNIc9Kg6gycVXvm/4l9zn/nk38jVjPrVW+40+5/65P8AyNAnsV9D/wCQNbf8C/8AQjRSaGf+JRb/AEP/AKEaKqGxvh/hPGLnVr6wvLNfJhzZwyRxhsnIk3Zzg9fmOPwptvrl61p9mFtbGYWxthdbT5vlf3Ou3pxnGccZq5qWj3F5eGaJ4gpAGGYg/wAqrRaJdW7l3khIIxwx/wAKwwlalJwjJ6nViYcmMlBPS5XuPEl7PC0f2a1Weby1nuEU+ZOEIKhvmx1VScAZIGc1siwu9Stb/UZbaLfNOZZCMABmIYgZOcZNZtn4Yu5L2PdJAVBycMf8K762sC2iXkCFQVdcZPsldOGq0ZRm+bZX/FHA+Z7nJ3kt1dWxhOm2cbMVMssaAPLtGBnnA99oGTyafp2oatpK6gumg2aXybJEglZQq7s4HzfUZOTgn1rXOkz8/PH+f/1qjbSbgdHi/wC+j/hWH1ij/MiLye5DLFLo80kUuh2FwZogCZy2QpPQbHAHTr1rKsY7iwv1uo7G3dlDAJKoZPmBHTPPWuy8S2E8moRlXj/1Kjk+5rJj0u4Dhi8X/fR/wodeitHIfvaDJYbs2CPHpFlDC0bwqiAYG7OWyWJ3e5NZNudSSF7ZNKspHi3xx3DqPMRWJyB82D1OCQSM8EcV2k1rIdHhQMmQ5PXjvWLa2c73FyFeMENjk/WsqWIpNO8urLk5JqxmWk2sWgsQmlWD/YlkCGRFO/f/AH/m+bHb0wKhtbXV7UWDrpdo32OUyoZNp35IOG+bkcfqa6Y2xhAw0bN7tVeVbtiSDF+LH/CtPb0P5kTzzOOjjv8ATr6OUWluxAKtHKFZXBGCCM+h/wAK6iz8N61rkEdzDoNoIRD5cUcciqsYz1GXyTkn7xPWqi6XdvM88rQmQ9DuJwPbivRPBX2iz0bbOwJ+Ypg5zzSqV6SpTlF6pX/IyqVJxWnU4bVtL1JoYrGTw3p0ckcYRZI5DuAHfAk25Pckd6lGi65e2iM+h2SI0iGeZCgaTbxz8/HvtAz1Oa6Ai5a9e4d0LPnOea3A08OhRZ8suzH2HerVai7+8bxlOzPHdYtJdPvGka2iEkUocK2CCeuMA9KyJbqRZI9trAuN3yKv3iwwSef/AKwrtPF2j3F3rLSxyxqjxq6oxxz0PasKHQriQGRHhD9CzMfl+nFCq0XBy5trDjKaizJguLi0jSPyYneJi0bN1iY+mDjt3zzVmzkvo7eELFEsMEvmbpOASeMHnkcn8zWpHoU8DEg28h7b2OB+GKZPol/cfM00LlSDwxwP04rNV6L3kiY1Ki2Zly3ZVmhWHewn855HPVz7ela2kvqWpX5lECGBN0r4AAXGeRk81HPoFyuZpWh2s/3VYn+ldnoehzR208QlXdPJHGAp4C7gx7egrodWgqUZcy1v+guebe5q+GvBmtSWHmQWKeW7FlMcigEH/ebNbv8AwhPiAnmxJGc486P/AOKrvNKvLayso4Vjkwoxwo/xrQ/tm3/uS/8AfI/xqPb0LfEhe0qXueYN4L18NuNkfp50f+Nd3brJbaPaQSjbJHbojLnOCFAIq7PrEGOFk/75H+NY93qcbZwH/Kj6xRW0kJucrX6GRqsuc81zRuGjmO01r6hL5hOM/jWFJC5fOV/OpeKpfzIFFmpFPuGc15p9rN946vJs5GXRfYKMf0rvRvjhc5HCnv7VwGi6dPH4hBcxlpFkbg+uT6V1YPEUnUspdH+Ry42L9hP0MiOFSmTn8KniiCnncp7c1qxaRKcAmMAdwx/wqcaTKDkmP8T/APWrk+s0v5kdqRnK8oODK7ADocGs653t95nbJrfbTZoyGJj98Hqahm0S4J+/D+Z/wqPrFLmb5ux1VE3h4esv0Ob8sqOBg/WkDzZ+8T+Fbp0O4VcK8P4sf8KRtFugMB4QMep/wqvrFH+ZHI0YLPNgkuRjnoKYskwGfNP4jIrbOhXTJgvD0/vH/CkHh+5C5d4cD/aP+FDxFH+ZCszHmUJDFMsahpMgkDgY9qqsWbPJI+tdB/Y1yqSmRofLYcqGPy46EcVBHoVyybw0Iz0yx/wrqjiKX1ST5vtL8mKz5iPw8calGGzjJrr2K+euM9KwNK0S6hvUbzIevZj/AIVumzn80IXjyR614uKq05TTUj6DKZqMJJ9n+Q9mRXP86hZ0BJPWpP7Mue8sX5mmtpc//PWP/vr/AOtWCqU/5jZ1YnUeG3U6JNgn/j4/otcrfzKNQuf+urA/ma6vw3Yyx6LMGkQn7Rng+y1y1/pczX9ywkj5lbufU10SqU+RXZyxqL2kik8y44qIzjrnrVg6TL/z0i/M/wCFINIl7vH/AN9f/WqVVpdynUXcZbzr9pi4HLjv71JqUo+3SjGOR/IU+HS5I545C0ZCsCcN/wDWqvqTj7fLx6fyFKMoyq+7rp+oua7IXmHv+VMM2On8qaTkdBn1qNic9MV0JIG2XdOmJ1KEY9f5Gqt3K32uf/fb+dS6Z/yEYfx/kaq3h/0uf/ro3866LfuV6/oc137Z+i/MjEjbh6UOx3daaOoxSSffNC/hv1/Q3/5dv1GSOw71PpzE3THP8B/mKqyEgVPpmftLf7h/mKIbmH2iiSaaT2oOfWm80zFgelMc4UmlINMYElVz1NUkZzdkdH4Ojxq9u2OpP/oJrqkX/TZ/+ujfzrA8KR7dWtgPf/0E10iD/TJiP+erfzrNf7y/8K/NnH9ol0YZur3/AK6D+tb8fOAemawtIGLq9/66f1Nbafdya3Zoi7HzAw96cCKZGcwN9aBWNHefr+iLnsvQsZ/cr9aaPrR/yyX603POa6qm69EcWE+GX+KX5js570ZyKb0oyc1B1C7sj3qtfn/QLnH/ADyb+Rqfdg1Bf4+wXPp5TfyNAmVtDP8AxKLcfX/0I0Umh/8AIJtv+Bf+hGiqhsb4f4TxLVV/0z/gIqtZ/wCvb/dNXdUH+lE/7Iqnaf69v900YLeB2Yv/AJGE/wDEzW8E6e19rq4O1UxlvSvSb6zWG11CNBjLqTz7JXM/CSz+06xKD0DCvZbTSrS4169gkVfL2qSCM/wrXdh4/u5f4f1R5KfvfJnjMltJkgIx+gzVaS2mx/qZP++TXpGsaeuk6lLBJbP5LHMUoXhh6exriNX1bUba4zb22FBI2bCeK4xXG+LEddUiIRj+4Xt7msFJXJwBzXS+K9Xn+3xxmzkbMKnKqcHk+1Y0MN/dcjTLnB6EJQM0pGYeHbbc2P3h/wDZq597h90yxDJJ9cZ9q6a40HXL3QbaC2smSQSEkSsFwOef5VQ0/wAGancSTxSXEETRNh+S3PPTj2rChs/V/maVN16IwoZZ1DPdiNBj5UUgn8aqT+ZK3ySPg9Fr0ey+HPmY3STzDu20Io/XNdTpXgzTdJIkEIln9W5C10pNmLaOA0DwPc3CpdapI0EOAwiH3mHv6V3Ng0e+FEXbAFIRfYVd1JJJFWBSQ8jiNQPfr+maYlp5mqNDEp2xpsUD2rPEq2Gqen6oxqu9vU5+W2Vry4ubhAsKfvPr6CpZbh7rRbd24/eEge3NV9Thug/2BUldI2+Z9pO403V55tK8JxyrbSyT7yscaoT8xzyfaqVSGuq+9f5nVB6P0OJ8aSQzXsSxnMttAQ2O2TwP51y8lzPARskCqR09fwq6bDUp7eWaW0umllmyxMTdh9Peq72V4SWSynkPQYiY0KpDklqunVf5jXwsqiTewMuGJPfpVqKcyttkcYU4VDwo/Clj0i+MR3WNwGPP+pbj9K1tP06aa2PnaMWkQj70bAsPyrKNSHdfev8AMziK0aizRlKsu4D5cZFeq/Dm2gm1KeLA3Q7ZkHUdNv8A7NXmmsaNbQ20Ulpa3lvOWCvCcsvPcHA/Ku18D2Wr6F4b1vUisn2sNHGFwc7CRkDjn3rrlUh7GOq3fVeXmNtXZ7vBt8oFSCPUGlkk2isnw7m38P20chwQCRuPODzz+JNXZnUxFtwJ7DNY+1h3X3r/ADIKt3cZBrn76fg81Y1G7aIfLG0n+7WJcSPIu4I/PbBqXVh3X3r/ADKVjPvX3q2Oo/lWJICWyK1pUm35ET/98msjXVurOwla3t5Gc8AhCdoI61n7SHdfev8AMu6KupT+TpF22eRGa4nw0D/bMWc/6puv0roLpL648KTO1vOZGUKQYzknPpWL4etLuPWo1kglUmJsAofSuvB1Ie0eq2fVdvU5cb/AnbsJCY8Bt56+nWnrtZyCx570R6dd7Qfsswx3Mbf4VNHa3SMT9luGb3iOBXP7WHdfev8AM61YbuQk4GT1O7tSOFfndj29KsfZZwQ7Wsq9slDg002dyAf9Cnye4jNZqpDneq6dV/mdlRpYeHrL9DKkYqx5wuO9RlnIyTwegrSm0+6UFhaTt6fuz+tQC0v9xAs52OO8R/wqvaQ/mX3r/M5LopuONzZK/wAKg8sf8KhYFwHkIHoo7CtObTLuVjLHazktjKbDlPYcciqj6dej/lzuePWJv8KPaQ7r71/mJtFF8bSEyAfWnoizwhOPNQfJ/tDuKsPp15sLNaTqMZ/1Z/wpsVneBVeO0uD3DCMnmupVIfVXqviXVdn5kfaIdOcC+jPQbq6DzBtJ6Y96y/7PvF1CNls5wGIY/um4OPpWp9kucFTbSbjzjYa8fEyg5LVfee7lU1yyXk/yGGYHim+aM9P1p/2O6xj7JP8A9+zTGtLodLSf/v01ZKUO5o6i7nYeFZAdDnx/z8+vstcjqMg/tO66f65/5mt7wpqOp6ffRWkdkfJkdnYvC2Qdvr+Ap974n1yO/uEXTUKrIwB+zv0z9a35oOK1/r7zm57Tdrfecn5vzdaQuPeukHinXs/8gxP/AAHf/GnHxRr3/QMj/wDAd/8AGlzU/wCb+vvD2r8vvOZVx70oYZ710o8Ua/8A9AyP/wAB3/xpR4n1/wD6Bsf/AIDv/jRzU/5v6+8PaP8ApnMkgnHNNLAds11H/CTa9/0Do/8AwHf/ABpD4n17/oGR/wDgO/8AjRzU/wCb+vvB1H/TMLS2H9ow8ev8jVS7JN5Px/y0b+ddbZ+ItcmvI45dOjVGzkiBxjj60ybxJr6TyImnRFVYgH7O/TP1rfmh7Fe91/T1MOdus/Tv5nI8hhx3pJThjxXWf8JL4gyM6dF1/wCfd/8AGiTxNrwYj+zY/wDwHf8AxqVOHJ8S3/rqb875H69zi2O7gZFW9M3faWH+wf6V0h8Ta+B/yDY//AZ/8adB4h1q4kKTaeiKBkEQMOfzpwnC6s/6+8yUve/4JxBBB6U09O1dkfFHiD/oFx/+Az/400+KfEP/AEC4/wDwGf8Axo54fzL8P8zJyOMPWlhXfcqPSuvPinxF/wBAuP8A8Bn/AMatw+IvEAkOdMjxgf8ALu/+NaRnDuvvX+ZhVlpYh8MJjVID9f5GugVf9Ml9PMb+dWNE8Qau99EJrONE5yTCw7H3rUHiK8+0SjyrfhyPuH1+tZxcXXbT6L82YL4jG0ri7vh6yD+ZrZU96raRrVzHf6iwSLLy5OVPqfetxdeusD93Dzz90/410M0RBGcW7fWlBGc1fTW7kwE7Ic5/un/Gga3c4+5D/wB8n/GsaVry9f8AIufT0Kv/ACxX600H8q0v7ZuPKU7Iuv8AdP8AjTP7auf+ecX/AHyf8a6p7r0Rx4W3LK380vzKFJnjg1ojWbk5+SH/AL5P+NJ/bVxn7kX/AHyf8azOkzzyDVe9/wCQfc/9cm/ka2P7ZuP+ecX/AHyf8agu9cuUs528uHiNj90+n1oEzF0P/kE2/wCP/oRoq/p+u3M2iozJD8yMDhT6n3oqobG+H+E8K1Qf6R/wEVStP9e3+7Whqi5n/wCAis+1H+kMP9k0YLeB2Yv/AJGE/wDEz0/4Hwh7+8kPRWH8hXpGk3yP4w1KR22q7bVP935VxXPfC3ww+k+HXnIJvLoF2UdhjgflSs7QahqJwVZJF+o4SvRoOykn/L+qPHWsn6M9O8pHjCTBXB9QCDWddaFpdxnz7QjPR14qhoniGC5iMNzKqkD+I8N/9euiidSA0bHaR3rnlBMz2MKXwvaTziUSkEDaAQCKtRaOkOFfy2x6Ej+taYLKPl457Ck3EHmpUUNtmJPplnLO6OrhQM4DEVi+H4beLU9YWOJcCfAzz3auhuGL3khI7dq53RSRqesEf89/6tWFDZ+r/M0qdPRG8RvYIeAemKrywNH74p5lyMEVci/0mAFlIdeD7+9dBkczIVGvW5I+WKN5D9cGq81y9jDNcHiaUcewNbNxppXU/PZf3YTB9/aud1eTz7ltxG0HH6VSScZJrp/kTJXcfU5+bUruOQvJPtTBdjtHT8q5zxL4kv5/D9tcQzmMNMQvyqePm9R7U3xTqQeU2kR4AHmY/QVi64hfwdp2MZ+0N1/4HXIsPSSdor7jqglZ+hmDxNrbW5xeYAfk7F/wpYta1aM7Ybsqp6/Ip5/EVkxyeWjxvH8rDjHY+tX4YBJKq+Y+O+xc/wA6I4el7OS5V06AkuVmlDruqMT5t84UDP3E/wAK27C81u9VJPtqW8D/APLZkU8egGOTWLBp5muWjWFUjYDLTOMgZHv/AJzXUyWzW6IuxVjVeowB/wDqqI4Wjb4F9yM4pEes3cq2MaxTsQrgb3A3OeeTxx9BivU/h9cQXPh++m1CZHT7RtPm4UYGfSvIdU4sk2MrKXGPbg1ayVXa7M5HUZyAcV0zoUlRgnFbvovIpQTbPW9e8e6NoqbY4muHx8qoeK4TUvi1qrEfZbC2gQ8DcSxH61yUtyxLqflHGc81kXL7t3zHruJ9q53Qo/yL7kV7OKOhuPiV4jZzi4gXnoIRVM/ETxJnm5iP/bIVzbEM2M8fyqMEDOMmp+r0f5F9yDlR1sXxL1lWxNFA4/3cVpwfEaO5QR3cckRPcYYfyrz0rnPHtShGLn5eaX1aj/IvuQJLseh6x4hkk0SQ2N4rNkABVGR+GKxtC1PUZdcieaYlhG2CVA9fauehQ55OK2dEU/26mBwYmIz9K6sHh6PtH7q2fTyObGpKhNrsWTruoYA+1nPbCLj+VA1vUg4/0zKnr8i8fpWZjYp3kbfQVFGw2sM4A5zXN9Wo/wAi+5HTZdjaXXL2bMbzbl6jIH+FPj1+/cMTOeB2Vf8ACsiBV3/f4xk8c0xWjBYqG6Y61CoUuZrlXToddRL6vT06y/Q1pdevihxcMreyrz+lUz4h1QDAuiT3+Rfy6VRyxPO7GeCRUUgw3HfoKv6tR/kX3I5Womp/wkGqMOLk5HfYv+FMPiDWkP8Ax/cejIh/pWW7ngbWAHqMU0ox28bfrxS+rUf5V9wrJ9DVl8Rak8ZVpVIIwcKBUcOv6lFGkcU6qgzzsBI/Os3AAf5wR6CmgKQA3TrmupUKX1VrlXxLp5MXKuY0U8Q6sLpVN6Wy39xf8K0W1m/K+b9oO8cA7R/hXMrtN2m3ONwrVDYt3+teVXoUk1aK+49fLYxtLTo/yLzeINTA5uj/AN8L/hTD4i1MD/j6P/fC/wCFZjfWonPWpWHpfyr7iZRiuh0ugeINSl161je5JUlsjYv90+1UtR8SammqXaLdEATOB8i/3j7VW8Nn/iorT6t/6Caz9VP/ABN7z/ru/wD6Ea1WHpctuVfcYNR5tjRHiXVcf8fZ/wC+F/wpw8S6mcf6Uf8Avhf8KwckUoaj6tS/lX3B7vY3x4k1P/n7P/fC/wCFPHiPU8f8fR/74X/CueEhpwkNL6rS/lX3FJx7HQDxHqWP+Pon/gC/4UHxHqX/AD9H/vhf8KwhLR5lL6tS/lX3Fe52Om03XtRl1GJHuSVJPG1fQ+1RXXiDUkuplW6ICuw+4vr9KzdGkzqsPrk/yNQXj/6bcf8AXRv51q8PT9ilyrd9PIxSj7V6dP1NVfEOpsw/0ojn+4v+FJL4h1MOf9KOP9xf8Kx0b51xnrRO37xqlYalyP3Vv2Onlh7N6dTUPiPVev2o4/3F/wAKsWOuajPOyyXJZdhIGxf8K58tV3Sj/pTf7h/pRDD0k0+VfcZRUeZaEp8Rat/z9n/vhf8ACmnxHqv/AD9n/vhf8KzCaYTQsPS/lX3GbjHsan/CR6rkZvCOf7i/4VrprmpkKftJ6c/Iv+FcknzTIPfJroLRQV59a1jhqVvhX3I5qijzbHT2OrX7JzcE8/3R/hWrAzMS55LZJrBsE6dhmt6Djt2xVxpwh8KSJSSItLJ+13vr5n9TW2hz19Kw9L/4/L3/AK6dfxNbUbc5zVspF2M/6M3+9Qp4psR/0Z/rQvOMVhR3n6/oi57L0LJ/1Ax603PpQT/o6/U00nmuqpuvRHFhPhl/il+YoIFLTc0eg/KoOoUNkY71Wvv+PG5/65N/I1YzVa+P+gXH/XNv5GgT2Kuk/wDIDi/3W/maKTSj/wASOL/db+ZoqobG+H+E8m1Ejzj/ALtVdMiWW/JJ4UdPXmrGpDMx+gqvYEQXQkJ7c08D8UDrxf8Av8/8TPpPwpdohiQHDKR8p4ptzZW1/wCItTinGFJXJXqPlSqWi6l9usIZ4NKkmBUFJEJx+YFU01G7/t++R9PuzIwDlQM8YX2FenSg7TTXT9UeZGjNPW33r/Mk1Xw3Lp0263m82PqARhq6DQdSP2JI7nqvAccj8fSseTWZZ1W3l068Eij5cjkiqkOsfZLgt/Z90pP3gV4P4VgoNaXH9Vn3X3o9EQgx9cj1pjkbCa519da3kAWzuGBXPyjipF16eUYXTbk+23/61JQdtA+qzet195cP/H0/0rnNLcRajrJIz/pGMfi1aB1S4W4b/iVXecf3aw7LU1N/qOzTLveZsycd8msKVJpPVbsueHldarZdTrbODz8Ng4Na4SG1TLYJx0rmoNbuYIQq6XdAf7n/ANalbW7luX0u7Pttrf2ZH1WfdfejRv5mkjYjC8HFeY63dvDAxQZlZiFHp711eoeImt4y02n3QB4GR1rlLy/sZrYSyWU53Mcj8/eqjTevoRLDSTWq37nnOoRFGJY5YnLH1NTanEJfBdgGXOLhv/Z63b6TQ2P7ywuB/wAC/wDsqx9YvbG50mOys0kiWKTeN/Pr7n1rH2bVzpjQkk9vvRyiQxqfugGui0axWRt7sAOw9apWenNNJvyXVeoArfsRst9vkFhuIHFOMHyP5AqE+Vr9V/mJCj/2tJGwK5QA8fStOKNkbAB2dCr/ANKz4pSl/JlWY7QME8jpipvtMoHzIwbJJGamMJELDz/pr/ML2BZ1WN1wFbIwetZtxaQ52q8g7nn/AOtVqSc4AHBBLHJzVSS4G1skA7s59q0U6sVaL/IpYeXVL71/mUJrRARgv+NV5LRVXkt19e1W3/eAkyZz3qEdcb8t6Cj2tf8Am/FB9Xl2X3r/ADKbW6Ko5OSM8Ui265Oc8VcMJ6B0Bx0IpwVVGN4P8qXtq/8AN+KF9Wl2X3r/ADKJhXvmpFt42zgt781YVY1GcqcnvSkLwN6jvxS9tX/m/FD+rS7L71/mRLbJjqxI6Ctfw7Cv9pltzZWNsZ/Af1rOIXAAcetPTGMqcnHHtVKtW6v8UZ18HKpTcFZX81/majeHryT+OH8XOP5U0eGrzH+tt+vQOf8ACsraTnD4HelEe7+In6Cs3FdvxMlhMX/z8X3L/wCSNU6FcQDc7Q88cOf8KYvh67DDdJAR6Bj/AIVSjT96XJA4wM96g8ts/wCtHvWSiud6dup11MLi/q9NKa3l0Xl/eNUaBe7ifOg5P94/4UHw9ctwXgJHcMf8KyjsX7xQkcUEgqcOgyOwrXlXb8Tk+qYz+dfcv/kjR/4R68B/10BA7Fj/AIVCfDV85JM1uf8AgZ/wrN8lADiRen5U1Yk4/eA47etHKu34i+qYz+dfcv8A5I05fDd4sZbzIMKCT8x5/SmxeH7uaBXWW3CnsWPr9KzWjySxlHToaRIQwB3j6V0qK+rPT7S6+TF9Vxd7c6v6L/5I018MXyz7hLbcc/eP+FWDot6sbRb7fcxyDuOP5ViJGqXAbzVzjoau5U2zY29a8yvCN17vbqepl+GxcVK9RbS6Lt/iL/8Awjd6UH7y3zj++f8ACo38N35/5aW3/fR/wqSytRJZRNhWwME5xzT2ggX7xjH1cVqqUP5fxOJ0Mf8A8/F9y/8AkiTQvD1/Dr1pIz2+0Fs4Y5+6fas7UvDmovqt24ltcGZyMuf7x9q0tMS2OtWeyWLdvbADAn7prnNThT+17398n+vf/wBCPvRyR/l/ESw2MejqK/ov/kix/wAI1qX/AD2tf++z/hSf8IzqX/Pa1/77P+FZvkp/z3j/AC/+vR5C/wDPdPy/+vRyL+X8R/VcZ/z8X3L/AOSNL/hGdS/57Wv/AH0f8KP+EZ1P/nva/wDfR/wrN8hP+e6flR5Cf890/KjlX8v4h9Vxn/Pxfcv/AJI0v+EZ1P8A5723/fZ/wpP+EZ1P/n4tv++z/hWd5Ef/AD3SjyE/57p+VHLH+X8Q+qYz/n4vuX/yRvaT4d1GHU4ZHngKgnIDH0PtUN34a1F7ydluIAGkYj5z6/Sq2jQKNXgP2hTyePwNQXsC/brg/aAP3rfzNaOMfZrTr38jNYXGe0a9or27Lv8A4i5H4Z1LzEzcQYyP4z/hS3HhjUWnYi4gA/3z6fSsxYUDhvtAODmorghp2IbIqHyRjqvxNZUMXGnZ1Fv2X/yRp/8ACLaj3uIP++z/AIVc03w5fW9yzvNCQUI4c+3tXO4960NHAF4//XM/zFQnC+34kU6eI51eov8AwH/gk3/CKX//AD2t/wDvs/4Uf8Ipf/8APa3/AO+z/hWTgUhAANLmh2/EzdLE/wDPxf8AgP8AwTftfC96kmTLb8f7Z/wrdtPD92oBMkOM/wB4/wCFcTaA7xXSWQOwDNaXh2/Ew9niP+fi/wDAf+CddaaROi8vF+BP+FacenygffT86wLLiMHua042wOKV4dvxKVPE/wDPxf8AgP8AwR+lWshvL8ArxJjr7mthLWQd1/Oue0tsXl5/v/1Na8bfKaV4dvxH7PEdJr/wH/gmqkDi2YZXOfWkWBx3X86ij4tH+v8AhTV6GsqTheXu9e/oXOnibL94tv5f+CW3GyFQT3qPoaYTkg0ua0lLmY6FJ0otN3bbf3j80hOaT3pT9Kk1Aniq18f9BuB28pv5GrGflxVa+P8AoFz/ANcm/kaAZV0nH9ixeu1v5mio9KONFiI9G/maKqGx0Yf4Tyy6u3WQABenpUAunbghfyqO8b98P90VEjc0sLTjeLsdeKxdf6zKPM7XNiw8R6jpjZtpdq9052n8K6zw9410eXWXm1tZ7YSxhN8R3KDx14zjivOi1RnpXbheVRqJq/u/qjhni68rXm/6+R9N2uhaFrlotxp181ynUNFKrY/IZFVbrw1HbN+885/fcAf5c18522o3unSeZZ3UsD+sbEVuW3xO8WWQ2/2q86/3ZxuFc/JQeysH1vFfzv8Ar5Hut1pVjPcDe8ocIOMgf0qAaJZq37uSfPoGH+FYEPxH0+PWIbDWlFvI0atHcqPlySRg+nSu3W5D26zwmOWIjKyRkEGh0Y25rCWNxEdOd/18ira+HUnuSGE4GOSXAwPyrI0/wvBNqmrR+dIAk+Mhh6t7e1b8d5cPIX80xqRyTycVhaXqTR6rqypkK8/Lt16tWVClBp6dWOrjMRdPney/rY1H0DTraPMlxMAByzOOf0rFum0yMlYJJnP94uMfyqxcSPI5Z3Ln1NZM1qsrMU4bP4Vv7Gn2M/r+J/nf9fIq3Fok7szTO4/hGelU7mzQWSrluG/xqzNDJCQCcZ6EVOMzWimVeQfzq4UoWlZdP8jOeNxDcbze/wDn5HG6lAIkLDP41zrzSOMIoyzYHH511muESllXhRxVHSbKJSZZAMBSFGefrXO6ML7HTHHYj+d/18iaytYVtFYS8kc8cVFZIDGEViSSxIxwAAK0pUUPuf5Y0X8aq6cALdyUfJJC4HJ9a0VKCg1bsarGV2m+d/18ivGgOpyKeEVNxz6cUXDJvIBJHrmi6tYZpC5Lozjlcjp2rNltIlZsyMR0HvWHso9ifruJX2n/AF8ieR/m4IZR69qpyXJ3EKR+Peq7xRjcodj71CYkHckdhnqaTprsH17E/wAz+/8A4BO92/GAv5Un2mTrgD8DVfZFn7pJ74NXrDTWuZ49kMskRPz7OuPWl7OPYPr2J/mf9fIZGbmRZHWMBIxlmI6Vo6bp4v7a4uXuEit4QBuYfeY9AKu3WkWmm/bUuHl8huIxkbhg9TVSGyt7qBpLVZktYQTywOffpR7OPYX17E/zP+vkQXdpKl5HbWq+dJ5YaTaM7T159OKpPK0LbXT5h/eGK1rDQ7q5g3kBY3J2/vOvsTjGasx+GLaYYkEkbno6kPk/pRyR7C+vYn+Z/wBfI577R8v3VB7DFBuXjwAVJPoOlWNR0GexnAaK48sk7W2g5/Ks42vP3vx/xo9nHsP69if5n/XyLLXj4OCue4pPtj8cjFVvLj4CgsB601EUseMNR7OPYf17E/zP+vkW2uZWGQmc+oxUIUB8Ek+yjAFQopOck/TNNKqO5qox5dkZVcRVq253cfKw3jn8aUFAhL5Yk/dHSmGBtoOANx4GaQoi/KTubuR2p6mV32HPIrf8swB7dqZhRzk49KUxxKMnJOeMGo9iHoxBo1Bt9iQsdmS2PrULORxn9aTBUndz6UE8HPFdSv8AVX/iX5Mm75hY2Au1NaBf/RHPv6/Ss6MBp1BBAPrV/wAtRZvjON3+FeXX3R6uXylaWnSX5FJyrZ+UH61Xbbn7q/lVhkAGc1AwA6VaOaUp9jT8L4/4SS0+UDlu3+yap6oR/a97/wBd5P8A0I1f8L/8jHafVv8A0E1S1T/kL3v/AF3f/wBCNX0M7yvsU8ik3U7NFILy7DSaM07NIGzQF5dhM0U7NGeaAvLsX9EP/E4t/wAf/QTVa+P+n3H/AF1b+Zq1oh/4m9v+P/oJqtfH/T7n/rq38zWr/hL1/QzvL2j06fqVuaKdmkzWRpeXYMGtDRh/pb/9cz/MVQzxWho5/wBLf/rmf5iqje5UHLmWhnc01umKdupN2TihXuZScrbFq0X5veulsV6GuetogxHWt20sUYDJf8DVXZlr2OhteAPSr8bZFYUOnQN1aT8x/hVtNKt8Z3y/99D/AApaju+xd0w/6Ve/74/ma2UIzj1rlrHTIJLi5UvJhXwMEe/tWpHotscfPN/30P8ACnqF32OkjP8Aojn/AGqaprOh0i3/ALNlj3y7WYE/MM9vaoV0G0x/rJ/++h/hWFK95+v6I0m3ZadDaHX+dPzxWKNAtf8AnpP/AN9D/Cnf8I/adPMn/wC+h/hW2pF32NntRnpWOvh+0PHmT5/3h/hQfD9p/wA9J/8Avof4U9Qu+xrk1Wvj/oNx/wBcm/lVA+H7T/npP/30P8KY2g2vTzJ/++h/hS1Fr2JdKI/saIf7LfzNFTRQJa2ohQkqoOM9aKuOx1UNInjF6f36/wC6KjjbJ/CpbuGSSUMi5G0dxUcdvKpyU/UUYWSvFXN8TRqPEykou1+wzNGeKd9nm/ufqKd9nlx9z9RXZhY354vS8eunVHFKhV091/cV26VWkFXmtpSPun8xURspD/CfzFL6lP8Amj/4Eh+yqfyP7ma/jZS2uQAdTbL/ADau/wDg/wCJhGZvDt6/7uT95bMx6N3X8eK4/U7H+1r1Lp5ljZYxGAFJ6En+tLYaS1jcpcw3mJYzuVtp4rWnhZxeso2f95EujUa+B/cz364j2TMF7dvSuV05SdQ1Rl7Tf1aqFv4yvJIVWVrcyAYJ2Nz+tZ9n4mktby9dzB+9kzyjHufT61lTws6d02t31Qp0Kr2i9l0Z1z/vUynXuKrTnyISEGXPHSsBvFOWDLJACD2jeo38Sb9372BSe4jfj6Vt7CXdfejP6tW/lf3GjBM5nLzEFcHqOgplx5n2fCOWBP3u2KyDrFuGDSSxSEDABR8flT59btrq2WM3SQ88hImqoUJK+q27omeGraPle/Zla4hWU7Q3Q4/3j6U026tcsUXAVSfaka5smAH9oAYx0ieg3NoeP7TI57Qt/hWf1afdfejVU5r7L+5/5EszBzHG+1Qo3Nu7+lZ1oXWBnWQHLHqavG8stzsL5MvjcfJfmqNra29xaMsl2FG49Izmh0JKDu106o2jGfK/df3MglJZ2wOVHPPSov7OurjCxR4GMhm4BrYhstPhYsJkc5z80bVNMUm4N+qp/dWJqxdF/wA0f/AkQ4VP5H9z/wAjFXw1KRunmCgc5Wp49I05JYw0m92OApPU1dFtabdpvS31V/8AGhYbKNo2SaFNhyCIWzU+wf8ANH/wJC9nU/kf3P8AyJbTw1BZ3HnPG4kRsbXHGPp6Ulrd29vfNHpy/ZXVs55KuT1FTXl2bsENqrDIABEZyMUy1+zQKFW8UkHLP5R3Mfc9afsH/NH/AMCQezqfyP7n/kYustcXmqXXlqGMJ6N90e59q6VfDs1r4etrV5DIb2Tc0qDG0kdB7cVSRLGPYjyxSqrb9rxMQzerDufrW1a+I5oYXSO8hwZd4/ct8oxjA9qPq/8Aej/4Ehezq/yP7n/kVtUgh0vR00u3Ym8jhAhiH/LVmH3vzrl9N1X+y5zaX2551z5jhxtT6cc1saisV9qsd+b/AMqSPGwJG2B9Kxbrwxp0s5mOoyDJyRsJoeH/AL0f/AkHs6v8j+5/5Gj/AGzBqLiAyzmLPMm8At7dKp3/AIajG6S3eRUb5iXbdj+VXbCz0qz6eRKezSROSv05q2Tb7Cgu1IbqGjY/zpewf80f/AkV7Op/I/uf+Rx76O6LmNxJg8gHn8Kglt54k5hKp1zjmuznjsZ0CtJArgcOkDA1V+xQJjGo8D1iJo9g/wCaP/gSF7Op/I/uf+RxRDo3KsPrTCwD8fdHt1ruGsrOQYku42U9jAaqtoOk7TtuQrdmEbZFHsH/ADR/8CQ/Z1P5H9z/AMjkHZ2bcwKjtSDOeFy1dZ/wj9gG3DUnzjHMRNMfw9YsuDqTAZzxEeaPYP8Amj/4Eh8lT+R/c/8AI5YqC3LnPsOKaUOeoOPwzXT/APCNad/0E3/79Gkbw1pxx/xM3/79Gj2D/mj/AOBIXs6n8j+5/wCRy7A9wfqOlISAM4z9a6n/AIRnT+2pyY9PKNWE8K6G8Q83V5lfuBCf8K0naGHcbpu6ejT6MSo1G/hf3P8AyONWTE6M3A9avGZTZyEHgH/CumPhPw/kf8Tmfj/pif8ACkPhrRgfIXVJjE3LP5RyD9Me1eZVi5NWX5f5noYPmpqXMmtJdH29DjHlHY1Azgmu5Pg7QT/zGZ/+/J/wpv8Awhmg/wDQauP+/P8A9atVSn/L+X+ZxPEQfX8znvC7j/hI7Tr1b/0E1T1Vx/a97/13k/8AQjXcaZ4a0LTdRiu11edzHn5TERnII9Pesy90LQp764lOp3ALyM2PL6ZOf7tN05pfD/X3k+2hfc4/d7Ubq6n/AIR7Qf8AoKXH/fv/AOxo/wCEd0H/AKClx/37/wDsaXJP+V/18x+1h3OVzRkDiuq/4R7Qf+gpcf8Afv8A+xo/4R3Qf+gpcf8Afv8A+xo5J/yv+vmL2sO5yu7NJu9xXV/8I7oP/QUuP+/f/wBjR/wjug/9BS4/79//AGNHJP8Alf8AXzD2sO5iaG//ABOLf6t/6CarX7f8TC56f61v5murs9H0Kyu47hdSnYpngx9eMf3azbqz0B7uZzfXOWdicL7/AO7RUlyU0mnv28hQfNUbT6fqc8WpN5rc+w+H/wDn/uv++f8A7Gj7B4f/AOf+5/75/wDsaw9suz+5m3K+6+8ww5rS0Zibx8/88z/MVa+w+Hv+f65/75/+xq3p9toUVwzRXk5YoRyv0/2aqNVN7P7hwjaS1X3nMbjTo+Wrof7H0X/n7ufyH/xNSR6RooP/AB93P5f/AGNCrL+V/czOVOTKVkgytdHaDEeajt7HRkI/0uf8v/rVqxro6pj7XL/3yf8ACn7Zfyv7mL2UhIT/APXq1GwHH401H0cf8vcv/fJ/wqZZtIDZ+1Sf98n/AAo9sv5X9zH7ORBprf6TdZ/v/wCNbMZ9KzNKWya4uz5z7S+VOOo59q2IxZAD98/5f/Wpusl0f3MFSky3Cc2T/wC9/hTVPFJ51ulu0cchOTnkGo1lUH71TRu+Z23f+Q5xlorFkHpT+pquJk/vfpT/AD0z979K3syOWXYlzgZ96UnqPWofPjx979KQzJ/e/SiwckuxLnBFRs1M85P736UwzJu60WYcsuwO3yke1FRPIpU80VUTeimk7n//2Q==", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "# image viz\n", - "frcnn_visualizer = SingleImageViz(URL, id2obj=objids, id2attr=attrids)\n", - "# run frcnn\n", - "images, sizes, scales_yx = image_preprocess(URL)\n", - "output_dict = frcnn(\n", - " images,\n", - " sizes,\n", - " scales_yx=scales_yx,\n", - " padding=\"max_detections\",\n", - " max_detections=frcnn_cfg.max_detections,\n", - " return_tensors=\"pt\",\n", - ")\n", - "# add boxes and labels to the image\n", - "\n", - "frcnn_visualizer.draw_boxes(\n", - " output_dict.get(\"boxes\"),\n", - " output_dict.pop(\"obj_ids\"),\n", - " output_dict.pop(\"obj_probs\"),\n", - " output_dict.pop(\"attr_ids\"),\n", - " output_dict.pop(\"attr_probs\"),\n", - ")\n", - "showarray(frcnn_visualizer._get_buffer())" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "# test_questions_for_url1 = [\n", - "# \"Where is this scene?\",\n", - "# \"what is the man riding?\",\n", - "# \"What is the man wearing?\",\n", - "# \"What is the color of the horse?\"\n", - "# ]\n", - "test_questions_for_url2 = [\n", - " \"Where is the cat?\",\n", - " \"What is near the disk?\",\n", - " \"What is the color of the table?\",\n", - " \"What is the color of the cat?\",\n", - " \"What is the shape of the monitor?\",\n", - "]\n", - "\n", - "# Very important that the boxes are normalized\n", - "# normalized_boxes = output_dict.get(\"normalized_boxes\")\n", - "features = output_dict.get(\"roi_features\")" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Question: ['Where is the cat?']\n", - "prediction from VisualBert VQA: outside\n", - "Question: ['What is near the disk?']\n", - "prediction from VisualBert VQA: nothing\n", - "Question: ['What is the color of the table?']\n", - "prediction from VisualBert VQA: brown\n", - "Question: ['What is the color of the cat?']\n", - "prediction from VisualBert VQA: gray\n", - "Question: ['What is the shape of the monitor?']\n", - "prediction from VisualBert VQA: square\n" - ] - } - ], - "source": [ - "for test_question in test_questions_for_url2:\n", - " test_question = [test_question]\n", - "\n", - " inputs = bert_tokenizer(\n", - " test_question,\n", - " padding=\"max_length\",\n", - " max_length=20,\n", - " truncation=True,\n", - " return_token_type_ids=True,\n", - " return_attention_mask=True,\n", - " add_special_tokens=True,\n", - " return_tensors=\"pt\",\n", - " )\n", - "\n", - " output_vqa = visualbert_vqa(\n", - " input_ids=inputs.input_ids,\n", - " attention_mask=inputs.attention_mask,\n", - " visual_embeds=features,\n", - " visual_attention_mask=torch.ones(features.shape[:-1]),\n", - " token_type_ids=inputs.token_type_ids,\n", - " output_attentions=False,\n", - " )\n", - " # get prediction\n", - " pred_vqa = output_vqa[\"logits\"].argmax(-1)\n", - " print(\"Question:\", test_question)\n", - " print(\"prediction from VisualBert VQA:\", vqa_answers[pred_vqa])" - ] - } - ], - "metadata": { - "interpreter": { - "hash": "f237d186bbb22b392353378fb98a8d08e33f23f14150c8880e3780871939e71d" - }, - "kernelspec": { - "display_name": "Python 3.8.0 64-bit ('transformers_env': conda)", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.0" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} \ No newline at end of file diff --git a/examples/research_projects/visual_bert/extracting_data.py b/examples/research_projects/visual_bert/extracting_data.py deleted file mode 100644 index 6b1342c9b11..00000000000 --- a/examples/research_projects/visual_bert/extracting_data.py +++ /dev/null @@ -1,149 +0,0 @@ -import getopt -import json -import os - -# import numpy as np -import sys -from collections import OrderedDict - -import datasets -import numpy as np -import torch -from modeling_frcnn import GeneralizedRCNN -from processing_image import Preprocess - -from utils import Config - - -""" -USAGE: -``python extracting_data.py -i -o .datasets `` -""" - - -TEST = False -CONFIG = Config.from_pretrained("unc-nlp/frcnn-vg-finetuned") -DEFAULT_SCHEMA = datasets.Features( - OrderedDict( - { - "attr_ids": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")), - "attr_probs": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")), - "boxes": datasets.Array2D((CONFIG.MAX_DETECTIONS, 4), dtype="float32"), - "img_id": datasets.Value("int32"), - "obj_ids": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")), - "obj_probs": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")), - "roi_features": datasets.Array2D((CONFIG.MAX_DETECTIONS, 2048), dtype="float32"), - "sizes": datasets.Sequence(length=2, feature=datasets.Value("float32")), - "preds_per_image": datasets.Value(dtype="int32"), - } - ) -) - - -class Extract: - def __init__(self, argv=sys.argv[1:]): - inputdir = None - outputfile = None - subset_list = None - batch_size = 1 - opts, args = getopt.getopt(argv, "i:o:b:s", ["inputdir=", "outfile=", "batch_size=", "subset_list="]) - for opt, arg in opts: - if opt in ("-i", "--inputdir"): - inputdir = arg - elif opt in ("-o", "--outfile"): - outputfile = arg - elif opt in ("-b", "--batch_size"): - batch_size = int(arg) - elif opt in ("-s", "--subset_list"): - subset_list = arg - - assert inputdir is not None # and os.path.isdir(inputdir), f"{inputdir}" - assert outputfile is not None and not os.path.isfile(outputfile), f"{outputfile}" - if subset_list is not None: - with open(os.path.realpath(subset_list)) as f: - self.subset_list = {self._vqa_file_split()[0] for x in tryload(f)} - else: - self.subset_list = None - - self.config = CONFIG - if torch.cuda.is_available(): - self.config.model.device = "cuda" - self.inputdir = os.path.realpath(inputdir) - self.outputfile = os.path.realpath(outputfile) - self.preprocess = Preprocess(self.config) - self.model = GeneralizedRCNN.from_pretrained("unc-nlp/frcnn-vg-finetuned", config=self.config) - self.batch = batch_size if batch_size != 0 else 1 - self.schema = DEFAULT_SCHEMA - - def _vqa_file_split(self, file): - img_id = int(file.split(".")[0].split("_")[-1]) - filepath = os.path.join(self.inputdir, file) - return (img_id, filepath) - - @property - def file_generator(self): - batch = [] - for i, file in enumerate(os.listdir(self.inputdir)): - if self.subset_list is not None and i not in self.subset_list: - continue - batch.append(self._vqa_file_split(file)) - if len(batch) == self.batch: - temp = batch - batch = [] - yield list(map(list, zip(*temp))) - - for i in range(1): - yield list(map(list, zip(*batch))) - - def __call__(self): - # make writer - if not TEST: - writer = datasets.ArrowWriter(features=self.schema, path=self.outputfile) - # do file generator - for i, (img_ids, filepaths) in enumerate(self.file_generator): - images, sizes, scales_yx = self.preprocess(filepaths) - output_dict = self.model( - images, - sizes, - scales_yx=scales_yx, - padding="max_detections", - max_detections=self.config.MAX_DETECTIONS, - pad_value=0, - return_tensors="np", - location="cpu", - ) - output_dict["boxes"] = output_dict.pop("normalized_boxes") - if not TEST: - output_dict["img_id"] = np.array(img_ids) - batch = self.schema.encode_batch(output_dict) - writer.write_batch(batch) - if TEST: - break - # finalizer the writer - if not TEST: - num_examples, num_bytes = writer.finalize() - print(f"Success! You wrote {num_examples} entry(s) and {num_bytes >> 20} mb") - - -def tryload(stream): - try: - data = json.load(stream) - try: - data = list(data.keys()) - except Exception: - data = [d["img_id"] for d in data] - except Exception: - try: - data = eval(stream.read()) - except Exception: - data = stream.read().split("\n") - return data - - -if __name__ == "__main__": - extract = Extract(sys.argv[1:]) - extract() - if not TEST: - dataset = datasets.Dataset.from_file(extract.outputfile) - # wala! - # print(np.array(dataset[0:2]["roi_features"]).shape) diff --git a/examples/research_projects/visual_bert/modeling_frcnn.py b/examples/research_projects/visual_bert/modeling_frcnn.py deleted file mode 100644 index c7c3bf376ce..00000000000 --- a/examples/research_projects/visual_bert/modeling_frcnn.py +++ /dev/null @@ -1,1920 +0,0 @@ -""" -coding=utf-8 -Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal -Adapted From Facebook Inc, Detectron2 && Huggingface Co. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License.import copy -""" - -import itertools -import math -import os -from abc import ABCMeta, abstractmethod -from collections import OrderedDict, namedtuple -from typing import Dict, List, Tuple - -import numpy as np -import torch -from torch import nn -from torch.nn.modules.batchnorm import BatchNorm2d -from torchvision.ops import RoIPool -from torchvision.ops.boxes import batched_nms, nms - -from utils import WEIGHTS_NAME, Config, cached_path, hf_bucket_url, is_remote_url, load_checkpoint - - -# other: -def norm_box(boxes, raw_sizes): - if not isinstance(boxes, torch.Tensor): - normalized_boxes = boxes.copy() - else: - normalized_boxes = boxes.clone() - normalized_boxes[:, :, (0, 2)] /= raw_sizes[:, 1] - normalized_boxes[:, :, (1, 3)] /= raw_sizes[:, 0] - return normalized_boxes - - -def pad_list_tensors( - list_tensors, - preds_per_image, - max_detections=None, - return_tensors=None, - padding=None, - pad_value=0, - location=None, -): - """ - location will always be cpu for np tensors - """ - if location is None: - location = "cpu" - assert return_tensors in {"pt", "np", None} - assert padding in {"max_detections", "max_batch", None} - new = [] - if padding is None: - if return_tensors is None: - return list_tensors - elif return_tensors == "pt": - if not isinstance(list_tensors, torch.Tensor): - return torch.stack(list_tensors).to(location) - else: - return list_tensors.to(location) - else: - if not isinstance(list_tensors, list): - return np.array(list_tensors.to(location)) - else: - return list_tensors.to(location) - if padding == "max_detections": - assert max_detections is not None, "specify max number of detections per batch" - elif padding == "max_batch": - max_detections = max(preds_per_image) - for i in range(len(list_tensors)): - too_small = False - tensor_i = list_tensors.pop(0) - if tensor_i.ndim < 2: - too_small = True - tensor_i = tensor_i.unsqueeze(-1) - assert isinstance(tensor_i, torch.Tensor) - tensor_i = nn.functional.pad( - input=tensor_i, - pad=(0, 0, 0, max_detections - preds_per_image[i]), - mode="constant", - value=pad_value, - ) - if too_small: - tensor_i = tensor_i.squeeze(-1) - if return_tensors is None: - if location == "cpu": - tensor_i = tensor_i.cpu() - tensor_i = tensor_i.tolist() - if return_tensors == "np": - if location == "cpu": - tensor_i = tensor_i.cpu() - tensor_i = tensor_i.numpy() - else: - if location == "cpu": - tensor_i = tensor_i.cpu() - new.append(tensor_i) - if return_tensors == "np": - return np.stack(new, axis=0) - elif return_tensors == "pt" and not isinstance(new, torch.Tensor): - return torch.stack(new, dim=0) - else: - return list_tensors - - -def do_nms(boxes, scores, image_shape, score_thresh, nms_thresh, mind, maxd): - scores = scores[:, :-1] - num_bbox_reg_classes = boxes.shape[1] // 4 - # Convert to Boxes to use the `clip` function ... - boxes = boxes.reshape(-1, 4) - _clip_box(boxes, image_shape) - boxes = boxes.view(-1, num_bbox_reg_classes, 4) # R x C x 4 - - # Select max scores - max_scores, max_classes = scores.max(1) # R x C --> R - num_objs = boxes.size(0) - boxes = boxes.view(-1, 4) - idxs = torch.arange(num_objs).to(boxes.device) * num_bbox_reg_classes + max_classes - max_boxes = boxes[idxs] # Select max boxes according to the max scores. - - # Apply NMS - keep = nms(max_boxes, max_scores, nms_thresh) - keep = keep[:maxd] - if keep.shape[-1] >= mind and keep.shape[-1] <= maxd: - max_boxes, max_scores = max_boxes[keep], max_scores[keep] - classes = max_classes[keep] - return max_boxes, max_scores, classes, keep - else: - return None - - -# Helper Functions -def _clip_box(tensor, box_size: Tuple[int, int]): - assert torch.isfinite(tensor).all(), "Box tensor contains infinite or NaN!" - h, w = box_size - tensor[:, 0].clamp_(min=0, max=w) - tensor[:, 1].clamp_(min=0, max=h) - tensor[:, 2].clamp_(min=0, max=w) - tensor[:, 3].clamp_(min=0, max=h) - - -def _nonempty_boxes(box, threshold: float = 0.0) -> torch.Tensor: - widths = box[:, 2] - box[:, 0] - heights = box[:, 3] - box[:, 1] - keep = (widths > threshold) & (heights > threshold) - return keep - - -def get_norm(norm, out_channels): - if isinstance(norm, str): - if len(norm) == 0: - return None - norm = { - "BN": BatchNorm2d, - "GN": lambda channels: nn.GroupNorm(32, channels), - "nnSyncBN": nn.SyncBatchNorm, # keep for debugging - "": lambda x: x, - }[norm] - return norm(out_channels) - - -def _create_grid_offsets(size: List[int], stride: int, offset: float, device): - grid_height, grid_width = size - shifts_x = torch.arange( - offset * stride, - grid_width * stride, - step=stride, - dtype=torch.float32, - device=device, - ) - shifts_y = torch.arange( - offset * stride, - grid_height * stride, - step=stride, - dtype=torch.float32, - device=device, - ) - - shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) - shift_x = shift_x.reshape(-1) - shift_y = shift_y.reshape(-1) - return shift_x, shift_y - - -def build_backbone(cfg): - input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN)) - norm = cfg.RESNETS.NORM - stem = BasicStem( - in_channels=input_shape.channels, - out_channels=cfg.RESNETS.STEM_OUT_CHANNELS, - norm=norm, - caffe_maxpool=cfg.MODEL.MAX_POOL, - ) - freeze_at = cfg.BACKBONE.FREEZE_AT - - if freeze_at >= 1: - for p in stem.parameters(): - p.requires_grad = False - - out_features = cfg.RESNETS.OUT_FEATURES - depth = cfg.RESNETS.DEPTH - num_groups = cfg.RESNETS.NUM_GROUPS - width_per_group = cfg.RESNETS.WIDTH_PER_GROUP - bottleneck_channels = num_groups * width_per_group - in_channels = cfg.RESNETS.STEM_OUT_CHANNELS - out_channels = cfg.RESNETS.RES2_OUT_CHANNELS - stride_in_1x1 = cfg.RESNETS.STRIDE_IN_1X1 - res5_dilation = cfg.RESNETS.RES5_DILATION - assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation) - - num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth] - - stages = [] - out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features] - max_stage_idx = max(out_stage_idx) - for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)): - dilation = res5_dilation if stage_idx == 5 else 1 - first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2 - stage_kargs = { - "num_blocks": num_blocks_per_stage[idx], - "first_stride": first_stride, - "in_channels": in_channels, - "bottleneck_channels": bottleneck_channels, - "out_channels": out_channels, - "num_groups": num_groups, - "norm": norm, - "stride_in_1x1": stride_in_1x1, - "dilation": dilation, - } - - stage_kargs["block_class"] = BottleneckBlock - blocks = ResNet.make_stage(**stage_kargs) - in_channels = out_channels - out_channels *= 2 - bottleneck_channels *= 2 - - if freeze_at >= stage_idx: - for block in blocks: - block.freeze() - stages.append(blocks) - - return ResNet(stem, stages, out_features=out_features) - - -def find_top_rpn_proposals( - proposals, - pred_objectness_logits, - images, - image_sizes, - nms_thresh, - pre_nms_topk, - post_nms_topk, - min_box_side_len, - training, -): - """Args: - proposals (list[Tensor]): (L, N, Hi*Wi*A, 4). - pred_objectness_logits: tensors of length L. - nms_thresh (float): IoU threshold to use for NMS - pre_nms_topk (int): before nms - post_nms_topk (int): after nms - min_box_side_len (float): minimum proposal box side - training (bool): True if proposals are to be used in training, - Returns: - results (List[Dict]): stores post_nms_topk object proposals for image i. - """ - num_images = len(images) - device = proposals[0].device - - # 1. Select top-k anchor for every level and every image - topk_scores = [] # #lvl Tensor, each of shape N x topk - topk_proposals = [] - level_ids = [] # #lvl Tensor, each of shape (topk,) - batch_idx = torch.arange(num_images, device=device) - for level_id, proposals_i, logits_i in zip(itertools.count(), proposals, pred_objectness_logits): - Hi_Wi_A = logits_i.shape[1] - num_proposals_i = min(pre_nms_topk, Hi_Wi_A) - - # sort is faster than topk (https://github.com/pytorch/pytorch/issues/22812) - # topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1) - logits_i, idx = logits_i.sort(descending=True, dim=1) - topk_scores_i = logits_i[batch_idx, :num_proposals_i] - topk_idx = idx[batch_idx, :num_proposals_i] - - # each is N x topk - topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 4 - - topk_proposals.append(topk_proposals_i) - topk_scores.append(topk_scores_i) - level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device)) - - # 2. Concat all levels together - topk_scores = torch.cat(topk_scores, dim=1) - topk_proposals = torch.cat(topk_proposals, dim=1) - level_ids = torch.cat(level_ids, dim=0) - - # if I change to batched_nms, I wonder if this will make a difference - # 3. For each image, run a per-level NMS, and choose topk results. - results = [] - for n, image_size in enumerate(image_sizes): - boxes = topk_proposals[n] - scores_per_img = topk_scores[n] - # I will have to take a look at the boxes clip method - _clip_box(boxes, image_size) - # filter empty boxes - keep = _nonempty_boxes(boxes, threshold=min_box_side_len) - lvl = level_ids - if keep.sum().item() != len(boxes): - boxes, scores_per_img, lvl = ( - boxes[keep], - scores_per_img[keep], - level_ids[keep], - ) - - keep = batched_nms(boxes, scores_per_img, lvl, nms_thresh) - keep = keep[:post_nms_topk] - - res = (boxes[keep], scores_per_img[keep]) - results.append(res) - - # I wonder if it would be possible for me to pad all these things. - return results - - -def subsample_labels(labels, num_samples, positive_fraction, bg_label): - """ - Returns: - pos_idx, neg_idx (Tensor): - 1D vector of indices. The total length of both is `num_samples` or fewer. - """ - positive = torch.nonzero((labels != -1) & (labels != bg_label)).squeeze(1) - negative = torch.nonzero(labels == bg_label).squeeze(1) - - num_pos = int(num_samples * positive_fraction) - # protect against not enough positive examples - num_pos = min(positive.numel(), num_pos) - num_neg = num_samples - num_pos - # protect against not enough negative examples - num_neg = min(negative.numel(), num_neg) - - # randomly select positive and negative examples - perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos] - perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg] - - pos_idx = positive[perm1] - neg_idx = negative[perm2] - return pos_idx, neg_idx - - -def add_ground_truth_to_proposals(gt_boxes, proposals): - raise NotImplementedError() - - -def add_ground_truth_to_proposals_single_image(gt_boxes, proposals): - raise NotImplementedError() - - -def _fmt_box_list(box_tensor, batch_index: int): - repeated_index = torch.full( - (len(box_tensor), 1), - batch_index, - dtype=box_tensor.dtype, - device=box_tensor.device, - ) - return torch.cat((repeated_index, box_tensor), dim=1) - - -def convert_boxes_to_pooler_format(box_lists: List[torch.Tensor]): - pooler_fmt_boxes = torch.cat( - [_fmt_box_list(box_list, i) for i, box_list in enumerate(box_lists)], - dim=0, - ) - return pooler_fmt_boxes - - -def assign_boxes_to_levels( - box_lists: List[torch.Tensor], - min_level: int, - max_level: int, - canonical_box_size: int, - canonical_level: int, -): - box_sizes = torch.sqrt(torch.cat([boxes.area() for boxes in box_lists])) - # Eqn.(1) in FPN paper - level_assignments = torch.floor(canonical_level + torch.log2(box_sizes / canonical_box_size + 1e-8)) - # clamp level to (min, max), in case the box size is too large or too small - # for the available feature maps - level_assignments = torch.clamp(level_assignments, min=min_level, max=max_level) - return level_assignments.to(torch.int64) - min_level - - -# Helper Classes -class _NewEmptyTensorOp(torch.autograd.Function): - @staticmethod - def forward(ctx, x, new_shape): - ctx.shape = x.shape - return x.new_empty(new_shape) - - @staticmethod - def backward(ctx, grad): - shape = ctx.shape - return _NewEmptyTensorOp.apply(grad, shape), None - - -class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])): - def __new__(cls, *, channels=None, height=None, width=None, stride=None): - return super().__new__(cls, channels, height, width, stride) - - -class Box2BoxTransform: - """ - This R-CNN transformation scales the box's width and height - by exp(dw), exp(dh) and shifts a box's center by the offset - (dx * width, dy * height). - """ - - def __init__(self, weights: Tuple[float, float, float, float], scale_clamp: float = None): - """ - Args: - weights (4-element tuple): Scaling factors that are applied to the - (dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set - such that the deltas have unit variance; now they are treated as - hyperparameters of the system. - scale_clamp (float): When predicting deltas, the predicted box scaling - factors (dw and dh) are clamped such that they are <= scale_clamp. - """ - self.weights = weights - if scale_clamp is not None: - self.scale_clamp = scale_clamp - else: - """ - Value for clamping large dw and dh predictions. - The heuristic is that we clamp such that dw and dh are no larger - than what would transform a 16px box into a 1000px box - (based on a small anchor, 16px, and a typical image size, 1000px). - """ - self.scale_clamp = math.log(1000.0 / 16) - - def get_deltas(self, src_boxes, target_boxes): - """ - Get box regression transformation deltas (dx, dy, dw, dh) that can be used - to transform the `src_boxes` into the `target_boxes`. That is, the relation - ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless - any delta is too large and is clamped). - Args: - src_boxes (Tensor): source boxes, e.g., object proposals - target_boxes (Tensor): target of the transformation, e.g., ground-truth - boxes. - """ - assert isinstance(src_boxes, torch.Tensor), type(src_boxes) - assert isinstance(target_boxes, torch.Tensor), type(target_boxes) - - src_widths = src_boxes[:, 2] - src_boxes[:, 0] - src_heights = src_boxes[:, 3] - src_boxes[:, 1] - src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths - src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights - - target_widths = target_boxes[:, 2] - target_boxes[:, 0] - target_heights = target_boxes[:, 3] - target_boxes[:, 1] - target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths - target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights - - wx, wy, ww, wh = self.weights - dx = wx * (target_ctr_x - src_ctr_x) / src_widths - dy = wy * (target_ctr_y - src_ctr_y) / src_heights - dw = ww * torch.log(target_widths / src_widths) - dh = wh * torch.log(target_heights / src_heights) - - deltas = torch.stack((dx, dy, dw, dh), dim=1) - assert (src_widths > 0).all().item(), "Input boxes to Box2BoxTransform are not valid!" - return deltas - - def apply_deltas(self, deltas, boxes): - """ - Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`. - Args: - deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1. - deltas[i] represents k potentially different class-specific - box transformations for the single box boxes[i]. - boxes (Tensor): boxes to transform, of shape (N, 4) - """ - boxes = boxes.to(deltas.dtype) - - widths = boxes[:, 2] - boxes[:, 0] - heights = boxes[:, 3] - boxes[:, 1] - ctr_x = boxes[:, 0] + 0.5 * widths - ctr_y = boxes[:, 1] + 0.5 * heights - - wx, wy, ww, wh = self.weights - dx = deltas[:, 0::4] / wx - dy = deltas[:, 1::4] / wy - dw = deltas[:, 2::4] / ww - dh = deltas[:, 3::4] / wh - - # Prevent sending too large values into torch.exp() - dw = torch.clamp(dw, max=self.scale_clamp) - dh = torch.clamp(dh, max=self.scale_clamp) - - pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] - pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] - pred_w = torch.exp(dw) * widths[:, None] - pred_h = torch.exp(dh) * heights[:, None] - - pred_boxes = torch.zeros_like(deltas) - pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # x1 - pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # y1 - pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w # x2 - pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h # y2 - return pred_boxes - - -class Matcher: - """ - This class assigns to each predicted "element" (e.g., a box) a ground-truth - element. Each predicted element will have exactly zero or one matches; each - ground-truth element may be matched to zero or more predicted elements. - The matching is determined by the MxN match_quality_matrix, that characterizes - how well each (ground-truth, prediction)-pair match each other. For example, - if the elements are boxes, this matrix may contain box intersection-over-union - overlap values. - The matcher returns (a) a vector of length N containing the index of the - ground-truth element m in [0, M) that matches to prediction n in [0, N). - (b) a vector of length N containing the labels for each prediction. - """ - - def __init__( - self, - thresholds: List[float], - labels: List[int], - allow_low_quality_matches: bool = False, - ): - """ - Args: - thresholds (list): a list of thresholds used to stratify predictions - into levels. - labels (list): a list of values to label predictions belonging at - each level. A label can be one of {-1, 0, 1} signifying - {ignore, negative class, positive class}, respectively. - allow_low_quality_matches (bool): if True, produce additional matches or predictions with maximum match quality lower than high_threshold. - For example, thresholds = [0.3, 0.5] labels = [0, -1, 1] All predictions with iou < 0.3 will be marked with 0 and - thus will be considered as false positives while training. All predictions with 0.3 <= iou < 0.5 will be marked with -1 and - thus will be ignored. All predictions with 0.5 <= iou will be marked with 1 and thus will be considered as true positives. - """ - thresholds = thresholds[:] - assert thresholds[0] > 0 - thresholds.insert(0, -float("inf")) - thresholds.append(float("inf")) - assert all(low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])) - assert all(label_i in [-1, 0, 1] for label_i in labels) - assert len(labels) == len(thresholds) - 1 - self.thresholds = thresholds - self.labels = labels - self.allow_low_quality_matches = allow_low_quality_matches - - def __call__(self, match_quality_matrix): - """ - Args: - match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted - elements. All elements must be >= 0 (due to the us of `torch.nonzero` for selecting indices in :meth:`set_low_quality_matches_`). - Returns: - matches (Tensor[int64]): a vector of length N, where matches[i] is a matched ground-truth index in [0, M) - match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates true or false positive or ignored - """ - assert match_quality_matrix.dim() == 2 - if match_quality_matrix.numel() == 0: - default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64) - # When no gt boxes exist, we define IOU = 0 and therefore set labels - # to `self.labels[0]`, which usually defaults to background class 0 - # To choose to ignore instead, - # can make labels=[-1,0,-1,1] + set appropriate thresholds - default_match_labels = match_quality_matrix.new_full( - (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8 - ) - return default_matches, default_match_labels - - assert torch.all(match_quality_matrix >= 0) - - # match_quality_matrix is M (gt) x N (predicted) - # Max over gt elements (dim 0) to find best gt candidate for each prediction - matched_vals, matches = match_quality_matrix.max(dim=0) - - match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) - - for l, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): - low_high = (matched_vals >= low) & (matched_vals < high) - match_labels[low_high] = l - - if self.allow_low_quality_matches: - self.set_low_quality_matches_(match_labels, match_quality_matrix) - - return matches, match_labels - - def set_low_quality_matches_(self, match_labels, match_quality_matrix): - """ - Produce additional matches for predictions that have only low-quality matches. - Specifically, for each ground-truth G find the set of predictions that have - maximum overlap with it (including ties); for each prediction in that set, if - it is unmatched, then match it to the ground-truth G. - This function implements the RPN assignment case (i) - in Sec. 3.1.2 of Faster R-CNN. - """ - # For each gt, find the prediction with which it has highest quality - highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) - # Find the highest quality match available, even if it is low, including ties. - # Note that the matches qualities must be positive due to the use of - # `torch.nonzero`. - of_quality_inds = match_quality_matrix == highest_quality_foreach_gt[:, None] - if of_quality_inds.dim() == 0: - (_, pred_inds_with_highest_quality) = of_quality_inds.unsqueeze(0).nonzero().unbind(1) - else: - (_, pred_inds_with_highest_quality) = of_quality_inds.nonzero().unbind(1) - match_labels[pred_inds_with_highest_quality] = 1 - - -class RPNOutputs: - def __init__( - self, - box2box_transform, - anchor_matcher, - batch_size_per_image, - positive_fraction, - images, - pred_objectness_logits, - pred_anchor_deltas, - anchors, - boundary_threshold=0, - gt_boxes=None, - smooth_l1_beta=0.0, - ): - """ - Args: - box2box_transform (Box2BoxTransform): :class:`Box2BoxTransform` instance for anchor-proposal transformations. - anchor_matcher (Matcher): :class:`Matcher` instance for matching anchors to ground-truth boxes; used to determine training labels. - batch_size_per_image (int): number of proposals to sample when training - positive_fraction (float): target fraction of sampled proposals that should be positive - images (ImageList): :class:`ImageList` instance representing N input images - pred_objectness_logits (list[Tensor]): A list of L elements. Element i is a tensor of shape (N, A, Hi, W) - pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape (N, A*4, Hi, Wi) - anchors (list[torch.Tensor]): nested list of boxes. anchors[i][j] at (n, l) stores anchor array for feature map l - boundary_threshold (int): if >= 0, then anchors that extend beyond the image boundary by more than boundary_thresh are not used in training. - gt_boxes (list[Boxes], optional): A list of N elements. - smooth_l1_beta (float): The transition point between L1 and L2 lossn. When set to 0, the loss becomes L1. When +inf, it is ignored - """ - self.box2box_transform = box2box_transform - self.anchor_matcher = anchor_matcher - self.batch_size_per_image = batch_size_per_image - self.positive_fraction = positive_fraction - self.pred_objectness_logits = pred_objectness_logits - self.pred_anchor_deltas = pred_anchor_deltas - - self.anchors = anchors - self.gt_boxes = gt_boxes - self.num_feature_maps = len(pred_objectness_logits) - self.num_images = len(images) - self.boundary_threshold = boundary_threshold - self.smooth_l1_beta = smooth_l1_beta - - def _get_ground_truth(self): - raise NotImplementedError() - - def predict_proposals(self): - # pred_anchor_deltas: (L, N, ? Hi, Wi) - # anchors:(N, L, -1, B) - # here we loop over specific feature map, NOT images - proposals = [] - anchors = self.anchors.transpose(0, 1) - for anchors_i, pred_anchor_deltas_i in zip(anchors, self.pred_anchor_deltas): - B = anchors_i.size(-1) - N, _, Hi, Wi = pred_anchor_deltas_i.shape - anchors_i = anchors_i.flatten(start_dim=0, end_dim=1) - pred_anchor_deltas_i = pred_anchor_deltas_i.view(N, -1, B, Hi, Wi).permute(0, 3, 4, 1, 2).reshape(-1, B) - proposals_i = self.box2box_transform.apply_deltas(pred_anchor_deltas_i, anchors_i) - # Append feature map proposals with shape (N, Hi*Wi*A, B) - proposals.append(proposals_i.view(N, -1, B)) - proposals = torch.stack(proposals) - return proposals - - def predict_objectness_logits(self): - """ - Returns: - pred_objectness_logits (list[Tensor]) -> (N, Hi*Wi*A). - """ - pred_objectness_logits = [ - # Reshape: (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A) - score.permute(0, 2, 3, 1).reshape(self.num_images, -1) - for score in self.pred_objectness_logits - ] - return pred_objectness_logits - - -# Main Classes -class Conv2d(nn.Conv2d): - def __init__(self, *args, **kwargs): - norm = kwargs.pop("norm", None) - activation = kwargs.pop("activation", None) - super().__init__(*args, **kwargs) - - self.norm = norm - self.activation = activation - - def forward(self, x): - if x.numel() == 0 and self.training: - assert not isinstance(self.norm, nn.SyncBatchNorm) - if x.numel() == 0: - assert not isinstance(self.norm, nn.GroupNorm) - output_shape = [ - (i + 2 * p - (di * (k - 1) + 1)) // s + 1 - for i, p, di, k, s in zip( - x.shape[-2:], - self.padding, - self.dilation, - self.kernel_size, - self.stride, - ) - ] - output_shape = [x.shape[0], self.weight.shape[0]] + output_shape - empty = _NewEmptyTensorOp.apply(x, output_shape) - if self.training: - _dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 - return empty + _dummy - else: - return empty - - x = super().forward(x) - if self.norm is not None: - x = self.norm(x) - if self.activation is not None: - x = self.activation(x) - return x - - -class LastLevelMaxPool(nn.Module): - """ - This module is used in the original FPN to generate a downsampled P6 feature from P5. - """ - - def __init__(self): - super().__init__() - self.num_levels = 1 - self.in_feature = "p5" - - def forward(self, x): - return [nn.functional.max_pool2d(x, kernel_size=1, stride=2, padding=0)] - - -class LastLevelP6P7(nn.Module): - """ - This module is used in RetinaNet to generate extra layers, P6 and P7 from C5 feature. - """ - - def __init__(self, in_channels, out_channels): - super().__init__() - self.num_levels = 2 - self.in_feature = "res5" - self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) - self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) - - def forward(self, c5): - p6 = self.p6(c5) - p7 = self.p7(nn.functional.relu(p6)) - return [p6, p7] - - -class BasicStem(nn.Module): - def __init__(self, in_channels=3, out_channels=64, norm="BN", caffe_maxpool=False): - super().__init__() - self.conv1 = Conv2d( - in_channels, - out_channels, - kernel_size=7, - stride=2, - padding=3, - bias=False, - norm=get_norm(norm, out_channels), - ) - self.caffe_maxpool = caffe_maxpool - # use pad 1 instead of pad zero - - def forward(self, x): - x = self.conv1(x) - x = nn.functional.relu_(x) - if self.caffe_maxpool: - x = nn.functional.max_pool2d(x, kernel_size=3, stride=2, padding=0, ceil_mode=True) - else: - x = nn.functional.max_pool2d(x, kernel_size=3, stride=2, padding=1) - return x - - @property - def out_channels(self): - return self.conv1.out_channels - - @property - def stride(self): - return 4 # = stride 2 conv -> stride 2 max pool - - -class ResNetBlockBase(nn.Module): - def __init__(self, in_channels, out_channels, stride): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.stride = stride - - def freeze(self): - for p in self.parameters(): - p.requires_grad = False - return self - - -class BottleneckBlock(ResNetBlockBase): - def __init__( - self, - in_channels, - out_channels, - bottleneck_channels, - stride=1, - num_groups=1, - norm="BN", - stride_in_1x1=False, - dilation=1, - ): - super().__init__(in_channels, out_channels, stride) - - if in_channels != out_channels: - self.shortcut = Conv2d( - in_channels, - out_channels, - kernel_size=1, - stride=stride, - bias=False, - norm=get_norm(norm, out_channels), - ) - else: - self.shortcut = None - - # The original MSRA ResNet models have stride in the first 1x1 conv - # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have - # stride in the 3x3 conv - stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) - - self.conv1 = Conv2d( - in_channels, - bottleneck_channels, - kernel_size=1, - stride=stride_1x1, - bias=False, - norm=get_norm(norm, bottleneck_channels), - ) - - self.conv2 = Conv2d( - bottleneck_channels, - bottleneck_channels, - kernel_size=3, - stride=stride_3x3, - padding=1 * dilation, - bias=False, - groups=num_groups, - dilation=dilation, - norm=get_norm(norm, bottleneck_channels), - ) - - self.conv3 = Conv2d( - bottleneck_channels, - out_channels, - kernel_size=1, - bias=False, - norm=get_norm(norm, out_channels), - ) - - def forward(self, x): - out = self.conv1(x) - out = nn.functional.relu_(out) - - out = self.conv2(out) - out = nn.functional.relu_(out) - - out = self.conv3(out) - - if self.shortcut is not None: - shortcut = self.shortcut(x) - else: - shortcut = x - - out += shortcut - out = nn.functional.relu_(out) - return out - - -class Backbone(nn.Module, metaclass=ABCMeta): - def __init__(self): - super().__init__() - - @abstractmethod - def forward(self): - pass - - @property - def size_divisibility(self): - """ - Some backbones require the input height and width to be divisible by a specific integer. This is - typically true for encoder / decoder type networks with lateral connection (e.g., FPN) for which feature maps need to match - dimension in the "bottom up" and "top down" paths. Set to 0 if no specific input size divisibility is required. - """ - return 0 - - def output_shape(self): - return { - name: ShapeSpec( - channels=self._out_feature_channels[name], - stride=self._out_feature_strides[name], - ) - for name in self._out_features - } - - @property - def out_features(self): - """deprecated""" - return self._out_features - - @property - def out_feature_strides(self): - """deprecated""" - return {f: self._out_feature_strides[f] for f in self._out_features} - - @property - def out_feature_channels(self): - """deprecated""" - return {f: self._out_feature_channels[f] for f in self._out_features} - - -class ResNet(Backbone): - def __init__(self, stem, stages, num_classes=None, out_features=None): - """ - Args: - stem (nn.Module): a stem module - stages (list[list[ResNetBlock]]): several (typically 4) stages, each contains multiple :class:`ResNetBlockBase`. - num_classes (None or int): if None, will not perform classification. - out_features (list[str]): name of the layers whose outputs should be returned in forward. Can be anything in: - "stem", "linear", or "res2" ... If None, will return the output of the last layer. - """ - super(ResNet, self).__init__() - self.stem = stem - self.num_classes = num_classes - - current_stride = self.stem.stride - self._out_feature_strides = {"stem": current_stride} - self._out_feature_channels = {"stem": self.stem.out_channels} - - self.stages_and_names = [] - for i, blocks in enumerate(stages): - for block in blocks: - assert isinstance(block, ResNetBlockBase), block - curr_channels = block.out_channels - stage = nn.Sequential(*blocks) - name = "res" + str(i + 2) - self.add_module(name, stage) - self.stages_and_names.append((stage, name)) - self._out_feature_strides[name] = current_stride = int( - current_stride * np.prod([k.stride for k in blocks]) - ) - self._out_feature_channels[name] = blocks[-1].out_channels - - if num_classes is not None: - self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) - self.linear = nn.Linear(curr_channels, num_classes) - - # Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour": - # "The 1000-way fully-connected layer is initialized by - # drawing weights from a zero-mean Gaussian with std of 0.01." - nn.init.normal_(self.linear.weight, stddev=0.01) - name = "linear" - - if out_features is None: - out_features = [name] - self._out_features = out_features - assert len(self._out_features) - children = [x[0] for x in self.named_children()] - for out_feature in self._out_features: - assert out_feature in children, "Available children: {}".format(", ".join(children)) - - def forward(self, x): - outputs = {} - x = self.stem(x) - if "stem" in self._out_features: - outputs["stem"] = x - for stage, name in self.stages_and_names: - x = stage(x) - if name in self._out_features: - outputs[name] = x - if self.num_classes is not None: - x = self.avgpool(x) - x = self.linear(x) - if "linear" in self._out_features: - outputs["linear"] = x - return outputs - - def output_shape(self): - return { - name: ShapeSpec( - channels=self._out_feature_channels[name], - stride=self._out_feature_strides[name], - ) - for name in self._out_features - } - - @staticmethod - def make_stage( - block_class, - num_blocks, - first_stride=None, - *, - in_channels, - out_channels, - **kwargs, - ): - """ - Usually, layers that produce the same feature map spatial size - are defined as one "stage". - Under such definition, stride_per_block[1:] should all be 1. - """ - if first_stride is not None: - assert "stride" not in kwargs and "stride_per_block" not in kwargs - kwargs["stride_per_block"] = [first_stride] + [1] * (num_blocks - 1) - blocks = [] - for i in range(num_blocks): - curr_kwargs = {} - for k, v in kwargs.items(): - if k.endswith("_per_block"): - assert ( - len(v) == num_blocks - ), f"Argument '{k}' of make_stage should have the same length as num_blocks={num_blocks}." - newk = k[: -len("_per_block")] - assert newk not in kwargs, f"Cannot call make_stage with both {k} and {newk}!" - curr_kwargs[newk] = v[i] - else: - curr_kwargs[k] = v - - blocks.append(block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs)) - in_channels = out_channels - - return blocks - - -class ROIPooler(nn.Module): - """ - Region of interest feature map pooler that supports pooling from one or more - feature maps. - """ - - def __init__( - self, - output_size, - scales, - sampling_ratio, - canonical_box_size=224, - canonical_level=4, - ): - super().__init__() - # assumption that stride is a power of 2. - min_level = -math.log2(scales[0]) - max_level = -math.log2(scales[-1]) - - # a bunch of testing - assert math.isclose(min_level, int(min_level)) and math.isclose(max_level, int(max_level)) - assert len(scales) == max_level - min_level + 1, "not pyramid" - assert 0 < min_level and min_level <= max_level - if isinstance(output_size, int): - output_size = (output_size, output_size) - assert len(output_size) == 2 and isinstance(output_size[0], int) and isinstance(output_size[1], int) - if len(scales) > 1: - assert min_level <= canonical_level and canonical_level <= max_level - assert canonical_box_size > 0 - - self.output_size = output_size - self.min_level = int(min_level) - self.max_level = int(max_level) - self.level_poolers = nn.ModuleList(RoIPool(output_size, spatial_scale=scale) for scale in scales) - self.canonical_level = canonical_level - self.canonical_box_size = canonical_box_size - - def forward(self, feature_maps, boxes): - """ - Args: - feature_maps: List[torch.Tensor(N,C,W,H)] - box_lists: list[torch.Tensor]) - Returns: - A tensor of shape(N*B, Channels, output_size, output_size) - """ - x = list(feature_maps.values()) - num_level_assignments = len(self.level_poolers) - assert len(x) == num_level_assignments and len(boxes) == x[0].size(0) - - pooler_fmt_boxes = convert_boxes_to_pooler_format(boxes) - - if num_level_assignments == 1: - return self.level_poolers[0](x[0], pooler_fmt_boxes) - - level_assignments = assign_boxes_to_levels( - boxes, - self.min_level, - self.max_level, - self.canonical_box_size, - self.canonical_level, - ) - - num_boxes = len(pooler_fmt_boxes) - num_channels = x[0].shape[1] - output_size = self.output_size[0] - - dtype, device = x[0].dtype, x[0].device - output = torch.zeros( - (num_boxes, num_channels, output_size, output_size), - dtype=dtype, - device=device, - ) - - for level, (x_level, pooler) in enumerate(zip(x, self.level_poolers)): - inds = torch.nonzero(level_assignments == level).squeeze(1) - pooler_fmt_boxes_level = pooler_fmt_boxes[inds] - output[inds] = pooler(x_level, pooler_fmt_boxes_level) - - return output - - -class ROIOutputs: - def __init__(self, cfg, training=False): - self.smooth_l1_beta = cfg.ROI_BOX_HEAD.SMOOTH_L1_BETA - self.box2box_transform = Box2BoxTransform(weights=cfg.ROI_BOX_HEAD.BBOX_REG_WEIGHTS) - self.training = training - self.score_thresh = cfg.ROI_HEADS.SCORE_THRESH_TEST - self.min_detections = cfg.MIN_DETECTIONS - self.max_detections = cfg.MAX_DETECTIONS - - nms_thresh = cfg.ROI_HEADS.NMS_THRESH_TEST - if not isinstance(nms_thresh, list): - nms_thresh = [nms_thresh] - self.nms_thresh = nms_thresh - - def _predict_boxes(self, proposals, box_deltas, preds_per_image): - num_pred = box_deltas.size(0) - B = proposals[0].size(-1) - K = box_deltas.size(-1) // B - box_deltas = box_deltas.view(num_pred * K, B) - proposals = torch.cat(proposals, dim=0).unsqueeze(-2).expand(num_pred, K, B) - proposals = proposals.reshape(-1, B) - boxes = self.box2box_transform.apply_deltas(box_deltas, proposals) - return boxes.view(num_pred, K * B).split(preds_per_image, dim=0) - - def _predict_objs(self, obj_logits, preds_per_image): - probs = nn.functional.softmax(obj_logits, dim=-1) - probs = probs.split(preds_per_image, dim=0) - return probs - - def _predict_attrs(self, attr_logits, preds_per_image): - attr_logits = attr_logits[..., :-1].softmax(-1) - attr_probs, attrs = attr_logits.max(-1) - return attr_probs.split(preds_per_image, dim=0), attrs.split(preds_per_image, dim=0) - - @torch.no_grad() - def inference( - self, - obj_logits, - attr_logits, - box_deltas, - pred_boxes, - features, - sizes, - scales=None, - ): - # only the pred boxes is the - preds_per_image = [p.size(0) for p in pred_boxes] - boxes_all = self._predict_boxes(pred_boxes, box_deltas, preds_per_image) - obj_scores_all = self._predict_objs(obj_logits, preds_per_image) # list of length N - attr_probs_all, attrs_all = self._predict_attrs(attr_logits, preds_per_image) - features = features.split(preds_per_image, dim=0) - - # fun for each image too, also I can experiment and do multiple images - final_results = [] - zipped = zip(boxes_all, obj_scores_all, attr_probs_all, attrs_all, sizes) - for i, (boxes, obj_scores, attr_probs, attrs, size) in enumerate(zipped): - for nms_t in self.nms_thresh: - outputs = do_nms( - boxes, - obj_scores, - size, - self.score_thresh, - nms_t, - self.min_detections, - self.max_detections, - ) - if outputs is not None: - max_boxes, max_scores, classes, ids = outputs - break - - if scales is not None: - scale_yx = scales[i] - max_boxes[:, 0::2] *= scale_yx[1] - max_boxes[:, 1::2] *= scale_yx[0] - - final_results.append( - ( - max_boxes, - classes, - max_scores, - attrs[ids], - attr_probs[ids], - features[i][ids], - ) - ) - boxes, classes, class_probs, attrs, attr_probs, roi_features = map(list, zip(*final_results)) - return boxes, classes, class_probs, attrs, attr_probs, roi_features - - def training(self, obj_logits, attr_logits, box_deltas, pred_boxes, features, sizes): - pass - - def __call__( - self, - obj_logits, - attr_logits, - box_deltas, - pred_boxes, - features, - sizes, - scales=None, - ): - if self.training: - raise NotImplementedError() - return self.inference( - obj_logits, - attr_logits, - box_deltas, - pred_boxes, - features, - sizes, - scales=scales, - ) - - -class Res5ROIHeads(nn.Module): - """ - ROIHeads perform all per-region computation in an R-CNN. - It contains logic of cropping the regions, extract per-region features - (by the res-5 block in this case), and make per-region predictions. - """ - - def __init__(self, cfg, input_shape): - super().__init__() - self.batch_size_per_image = cfg.RPN.BATCH_SIZE_PER_IMAGE - self.positive_sample_fraction = cfg.ROI_HEADS.POSITIVE_FRACTION - self.in_features = cfg.ROI_HEADS.IN_FEATURES - self.num_classes = cfg.ROI_HEADS.NUM_CLASSES - self.proposal_append_gt = cfg.ROI_HEADS.PROPOSAL_APPEND_GT - self.feature_strides = {k: v.stride for k, v in input_shape.items()} - self.feature_channels = {k: v.channels for k, v in input_shape.items()} - self.cls_agnostic_bbox_reg = cfg.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG - self.stage_channel_factor = 2**3 # res5 is 8x res2 - self.out_channels = cfg.RESNETS.RES2_OUT_CHANNELS * self.stage_channel_factor - - # self.proposal_matcher = Matcher( - # cfg.ROI_HEADS.IOU_THRESHOLDS, - # cfg.ROI_HEADS.IOU_LABELS, - # allow_low_quality_matches=False, - # ) - - pooler_resolution = cfg.ROI_BOX_HEAD.POOLER_RESOLUTION - pooler_scales = (1.0 / self.feature_strides[self.in_features[0]],) - sampling_ratio = cfg.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO - res5_halve = cfg.ROI_BOX_HEAD.RES5HALVE - use_attr = cfg.ROI_BOX_HEAD.ATTR - num_attrs = cfg.ROI_BOX_HEAD.NUM_ATTRS - - self.pooler = ROIPooler( - output_size=pooler_resolution, - scales=pooler_scales, - sampling_ratio=sampling_ratio, - ) - - self.res5 = self._build_res5_block(cfg) - if not res5_halve: - """ - Modifications for VG in RoI heads: - 1. Change the stride of conv1 and shortcut in Res5.Block1 from 2 to 1 - 2. Modifying all conv2 with (padding: 1 --> 2) and (dilation: 1 --> 2) - """ - self.res5[0].conv1.stride = (1, 1) - self.res5[0].shortcut.stride = (1, 1) - for i in range(3): - self.res5[i].conv2.padding = (2, 2) - self.res5[i].conv2.dilation = (2, 2) - - self.box_predictor = FastRCNNOutputLayers( - self.out_channels, - self.num_classes, - self.cls_agnostic_bbox_reg, - use_attr=use_attr, - num_attrs=num_attrs, - ) - - def _build_res5_block(self, cfg): - stage_channel_factor = self.stage_channel_factor # res5 is 8x res2 - num_groups = cfg.RESNETS.NUM_GROUPS - width_per_group = cfg.RESNETS.WIDTH_PER_GROUP - bottleneck_channels = num_groups * width_per_group * stage_channel_factor - out_channels = self.out_channels - stride_in_1x1 = cfg.RESNETS.STRIDE_IN_1X1 - norm = cfg.RESNETS.NORM - - blocks = ResNet.make_stage( - BottleneckBlock, - 3, - first_stride=2, - in_channels=out_channels // 2, - bottleneck_channels=bottleneck_channels, - out_channels=out_channels, - num_groups=num_groups, - norm=norm, - stride_in_1x1=stride_in_1x1, - ) - return nn.Sequential(*blocks) - - def _shared_roi_transform(self, features, boxes): - x = self.pooler(features, boxes) - return self.res5(x) - - def forward(self, features, proposal_boxes, gt_boxes=None): - if self.training: - """ - see https://github.com/airsplay/py-bottom-up-attention/\ - blob/master/detectron2/modeling/roi_heads/roi_heads.py - """ - raise NotImplementedError() - - assert not proposal_boxes[0].requires_grad - box_features = self._shared_roi_transform(features, proposal_boxes) - feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1 - obj_logits, attr_logits, pred_proposal_deltas = self.box_predictor(feature_pooled) - return obj_logits, attr_logits, pred_proposal_deltas, feature_pooled - - -class AnchorGenerator(nn.Module): - """ - For a set of image sizes and feature maps, computes a set of anchors. - """ - - def __init__(self, cfg, input_shape: List[ShapeSpec]): - super().__init__() - sizes = cfg.ANCHOR_GENERATOR.SIZES - aspect_ratios = cfg.ANCHOR_GENERATOR.ASPECT_RATIOS - self.strides = [x.stride for x in input_shape] - self.offset = cfg.ANCHOR_GENERATOR.OFFSET - assert 0.0 <= self.offset < 1.0, self.offset - - """ - sizes (list[list[int]]): sizes[i] is the list of anchor sizes for feat map i - 1. given in absolute lengths in units of the input image; - 2. they do not dynamically scale if the input image size changes. - aspect_ratios (list[list[float]]) - strides (list[int]): stride of each input feature. - """ - - self.num_features = len(self.strides) - self.cell_anchors = nn.ParameterList(self._calculate_anchors(sizes, aspect_ratios)) - self._spacial_feat_dim = 4 - - def _calculate_anchors(self, sizes, aspect_ratios): - # If one size (or aspect ratio) is specified and there are multiple feature - # maps, then we "broadcast" anchors of that single size (or aspect ratio) - if len(sizes) == 1: - sizes *= self.num_features - if len(aspect_ratios) == 1: - aspect_ratios *= self.num_features - assert self.num_features == len(sizes) - assert self.num_features == len(aspect_ratios) - - cell_anchors = [self.generate_cell_anchors(s, a).float() for s, a in zip(sizes, aspect_ratios)] - - return cell_anchors - - @property - def box_dim(self): - return self._spacial_feat_dim - - @property - def num_cell_anchors(self): - """ - Returns: - list[int]: Each int is the number of anchors at every pixel location, on that feature map. - """ - return [len(cell_anchors) for cell_anchors in self.cell_anchors] - - def grid_anchors(self, grid_sizes): - anchors = [] - for size, stride, base_anchors in zip(grid_sizes, self.strides, self.cell_anchors): - shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors.device) - shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1) - - anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)) - - return anchors - - def generate_cell_anchors(self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)): - """ - anchors are continuous geometric rectangles - centered on one feature map point sample. - We can later build the set of anchors - for the entire feature map by tiling these tensors - """ - - anchors = [] - for size in sizes: - area = size**2.0 - for aspect_ratio in aspect_ratios: - w = math.sqrt(area / aspect_ratio) - h = aspect_ratio * w - x0, y0, x1, y1 = -w / 2.0, -h / 2.0, w / 2.0, h / 2.0 - anchors.append([x0, y0, x1, y1]) - return nn.Parameter(torch.tensor(anchors)) - - def forward(self, features): - """ - Args: - features List[torch.Tensor]: list of feature maps on which to generate anchors. - Returns: - torch.Tensor: a list of #image elements. - """ - num_images = features[0].size(0) - grid_sizes = [feature_map.shape[-2:] for feature_map in features] - anchors_over_all_feature_maps = self.grid_anchors(grid_sizes) - anchors_over_all_feature_maps = torch.stack(anchors_over_all_feature_maps) - return anchors_over_all_feature_maps.unsqueeze(0).repeat_interleave(num_images, dim=0) - - -class RPNHead(nn.Module): - """ - RPN classification and regression heads. Uses a 3x3 conv to produce a shared - hidden state from which one 1x1 conv predicts objectness logits for each anchor - and a second 1x1 conv predicts bounding-box deltas specifying how to deform - each anchor into an object proposal. - """ - - def __init__(self, cfg, input_shape: List[ShapeSpec]): - super().__init__() - - # Standard RPN is shared across levels: - in_channels = [s.channels for s in input_shape] - assert len(set(in_channels)) == 1, "Each level must have the same channel!" - in_channels = in_channels[0] - - anchor_generator = AnchorGenerator(cfg, input_shape) - num_cell_anchors = anchor_generator.num_cell_anchors - box_dim = anchor_generator.box_dim - assert len(set(num_cell_anchors)) == 1, "Each level must have the same number of cell anchors" - num_cell_anchors = num_cell_anchors[0] - - if cfg.PROPOSAL_GENERATOR.HIDDEN_CHANNELS == -1: - hid_channels = in_channels - else: - hid_channels = cfg.PROPOSAL_GENERATOR.HIDDEN_CHANNELS - # Modifications for VG in RPN (modeling/proposal_generator/rpn.py) - # Use hidden dim instead fo the same dim as Res4 (in_channels) - - # 3x3 conv for the hidden representation - self.conv = nn.Conv2d(in_channels, hid_channels, kernel_size=3, stride=1, padding=1) - # 1x1 conv for predicting objectness logits - self.objectness_logits = nn.Conv2d(hid_channels, num_cell_anchors, kernel_size=1, stride=1) - # 1x1 conv for predicting box2box transform deltas - self.anchor_deltas = nn.Conv2d(hid_channels, num_cell_anchors * box_dim, kernel_size=1, stride=1) - - for layer in [self.conv, self.objectness_logits, self.anchor_deltas]: - nn.init.normal_(layer.weight, std=0.01) - nn.init.constant_(layer.bias, 0) - - def forward(self, features): - """ - Args: - features (list[Tensor]): list of feature maps - """ - pred_objectness_logits = [] - pred_anchor_deltas = [] - for x in features: - t = nn.functional.relu(self.conv(x)) - pred_objectness_logits.append(self.objectness_logits(t)) - pred_anchor_deltas.append(self.anchor_deltas(t)) - return pred_objectness_logits, pred_anchor_deltas - - -class RPN(nn.Module): - """ - Region Proposal Network, introduced by the Faster R-CNN paper. - """ - - def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): - super().__init__() - - self.min_box_side_len = cfg.PROPOSAL_GENERATOR.MIN_SIZE - self.in_features = cfg.RPN.IN_FEATURES - self.nms_thresh = cfg.RPN.NMS_THRESH - self.batch_size_per_image = cfg.RPN.BATCH_SIZE_PER_IMAGE - self.positive_fraction = cfg.RPN.POSITIVE_FRACTION - self.smooth_l1_beta = cfg.RPN.SMOOTH_L1_BETA - self.loss_weight = cfg.RPN.LOSS_WEIGHT - - self.pre_nms_topk = { - True: cfg.RPN.PRE_NMS_TOPK_TRAIN, - False: cfg.RPN.PRE_NMS_TOPK_TEST, - } - self.post_nms_topk = { - True: cfg.RPN.POST_NMS_TOPK_TRAIN, - False: cfg.RPN.POST_NMS_TOPK_TEST, - } - self.boundary_threshold = cfg.RPN.BOUNDARY_THRESH - - self.anchor_generator = AnchorGenerator(cfg, [input_shape[f] for f in self.in_features]) - self.box2box_transform = Box2BoxTransform(weights=cfg.RPN.BBOX_REG_WEIGHTS) - self.anchor_matcher = Matcher( - cfg.RPN.IOU_THRESHOLDS, - cfg.RPN.IOU_LABELS, - allow_low_quality_matches=True, - ) - self.rpn_head = RPNHead(cfg, [input_shape[f] for f in self.in_features]) - - def training(self, images, image_shapes, features, gt_boxes): - pass - - def inference(self, outputs, images, image_shapes, features, gt_boxes=None): - outputs = find_top_rpn_proposals( - outputs.predict_proposals(), - outputs.predict_objectness_logits(), - images, - image_shapes, - self.nms_thresh, - self.pre_nms_topk[self.training], - self.post_nms_topk[self.training], - self.min_box_side_len, - self.training, - ) - - results = [] - for img in outputs: - im_boxes, img_box_logits = img - img_box_logits, inds = img_box_logits.sort(descending=True) - im_boxes = im_boxes[inds] - results.append((im_boxes, img_box_logits)) - - (proposal_boxes, logits) = tuple(map(list, zip(*results))) - return proposal_boxes, logits - - def forward(self, images, image_shapes, features, gt_boxes=None): - """ - Args: - images (torch.Tensor): input images of length `N` - features (dict[str: Tensor]) - gt_instances - """ - # features is dict, key = block level, v = feature_map - features = [features[f] for f in self.in_features] - pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features) - anchors = self.anchor_generator(features) - outputs = RPNOutputs( - self.box2box_transform, - self.anchor_matcher, - self.batch_size_per_image, - self.positive_fraction, - images, - pred_objectness_logits, - pred_anchor_deltas, - anchors, - self.boundary_threshold, - gt_boxes, - self.smooth_l1_beta, - ) - # For RPN-only models, the proposals are the final output - - if self.training: - raise NotImplementedError() - return self.training(outputs, images, image_shapes, features, gt_boxes) - else: - return self.inference(outputs, images, image_shapes, features, gt_boxes) - - -class FastRCNNOutputLayers(nn.Module): - """ - Two linear layers for predicting Fast R-CNN outputs: - (1) proposal-to-detection box regression deltas - (2) classification scores - """ - - def __init__( - self, - input_size, - num_classes, - cls_agnostic_bbox_reg, - box_dim=4, - use_attr=False, - num_attrs=-1, - ): - """ - Args: - input_size (int): channels, or (channels, height, width) - num_classes (int) - cls_agnostic_bbox_reg (bool) - box_dim (int) - """ - super().__init__() - - if not isinstance(input_size, int): - input_size = np.prod(input_size) - - # (do + 1 for background class) - self.cls_score = nn.Linear(input_size, num_classes + 1) - num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes - self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim) - - self.use_attr = use_attr - if use_attr: - """ - Modifications for VG in RoI heads - Embedding: {num_classes + 1} --> {input_size // 8} - Linear: {input_size + input_size // 8} --> {input_size // 4} - Linear: {input_size // 4} --> {num_attrs + 1} - """ - self.cls_embedding = nn.Embedding(num_classes + 1, input_size // 8) - self.fc_attr = nn.Linear(input_size + input_size // 8, input_size // 4) - self.attr_score = nn.Linear(input_size // 4, num_attrs + 1) - - nn.init.normal_(self.cls_score.weight, std=0.01) - nn.init.normal_(self.bbox_pred.weight, std=0.001) - for item in [self.cls_score, self.bbox_pred]: - nn.init.constant_(item.bias, 0) - - def forward(self, roi_features): - if roi_features.dim() > 2: - roi_features = torch.flatten(roi_features, start_dim=1) - scores = self.cls_score(roi_features) - proposal_deltas = self.bbox_pred(roi_features) - if self.use_attr: - _, max_class = scores.max(-1) # [b, c] --> [b] - cls_emb = self.cls_embedding(max_class) # [b] --> [b, 256] - roi_features = torch.cat([roi_features, cls_emb], -1) # [b, 2048] + [b, 256] --> [b, 2304] - roi_features = self.fc_attr(roi_features) - roi_features = nn.functional.relu(roi_features) - attr_scores = self.attr_score(roi_features) - return scores, attr_scores, proposal_deltas - else: - return scores, proposal_deltas - - -class GeneralizedRCNN(nn.Module): - def __init__(self, cfg): - super().__init__() - - self.device = torch.device(cfg.MODEL.DEVICE) - self.backbone = build_backbone(cfg) - self.proposal_generator = RPN(cfg, self.backbone.output_shape()) - self.roi_heads = Res5ROIHeads(cfg, self.backbone.output_shape()) - self.roi_outputs = ROIOutputs(cfg) - self.to(self.device) - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): - config = kwargs.pop("config", None) - state_dict = kwargs.pop("state_dict", None) - cache_dir = kwargs.pop("cache_dir", None) - from_tf = kwargs.pop("from_tf", False) - force_download = kwargs.pop("force_download", False) - resume_download = kwargs.pop("resume_download", False) - proxies = kwargs.pop("proxies", None) - local_files_only = kwargs.pop("local_files_only", False) - use_cdn = kwargs.pop("use_cdn", True) - - # Load config if we don't provide a configuration - if not isinstance(config, Config): - config_path = config if config is not None else pretrained_model_name_or_path - # try: - config = Config.from_pretrained( - config_path, - cache_dir=cache_dir, - force_download=force_download, - resume_download=resume_download, - proxies=proxies, - local_files_only=local_files_only, - ) - - # Load model - if pretrained_model_name_or_path is not None: - if os.path.isdir(pretrained_model_name_or_path): - if os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): - # Load from a PyTorch checkpoint - archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) - else: - raise EnvironmentError( - "Error no file named {} found in directory {} ".format( - WEIGHTS_NAME, - pretrained_model_name_or_path, - ) - ) - elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): - archive_file = pretrained_model_name_or_path - elif os.path.isfile(pretrained_model_name_or_path + ".index"): - assert from_tf, "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format( - pretrained_model_name_or_path + ".index" - ) - archive_file = pretrained_model_name_or_path + ".index" - else: - archive_file = hf_bucket_url( - pretrained_model_name_or_path, - filename=WEIGHTS_NAME, - use_cdn=use_cdn, - ) - - try: - # Load from URL or cache if already cached - resolved_archive_file = cached_path( - archive_file, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - ) - if resolved_archive_file is None: - raise EnvironmentError - except EnvironmentError: - msg = f"Can't load weights for '{pretrained_model_name_or_path}'." - raise EnvironmentError(msg) - - if resolved_archive_file == archive_file: - print("loading weights file {}".format(archive_file)) - else: - print("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file)) - else: - resolved_archive_file = None - - # Instantiate model. - model = cls(config) - - if state_dict is None: - try: - try: - state_dict = torch.load(resolved_archive_file, map_location="cpu") - except Exception: - state_dict = load_checkpoint(resolved_archive_file) - - except Exception: - raise OSError( - "Unable to load weights from pytorch checkpoint file. " - "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. " - ) - - missing_keys = [] - unexpected_keys = [] - error_msgs = [] - - # Convert old format to new format if needed from a PyTorch state_dict - old_keys = [] - new_keys = [] - for key in state_dict.keys(): - new_key = None - if "gamma" in key: - new_key = key.replace("gamma", "weight") - if "beta" in key: - new_key = key.replace("beta", "bias") - if new_key: - old_keys.append(key) - new_keys.append(new_key) - for old_key, new_key in zip(old_keys, new_keys): - state_dict[new_key] = state_dict.pop(old_key) - - # copy state_dict so _load_from_state_dict can modify it - metadata = getattr(state_dict, "_metadata", None) - state_dict = state_dict.copy() - if metadata is not None: - state_dict._metadata = metadata - - model_to_load = model - model_to_load.load_state_dict(state_dict) - - if model.__class__.__name__ != model_to_load.__class__.__name__: - base_model_state_dict = model_to_load.state_dict().keys() - head_model_state_dict_without_base_prefix = [ - key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys() - ] - missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict) - - if len(unexpected_keys) > 0: - print( - f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" - f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" - f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or" - " with another architecture (e.g. initializing a BertForSequenceClassification model from a" - " BertForPreTraining model).\n- This IS NOT expected if you are initializing" - f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical" - " (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." - ) - else: - print(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") - if len(missing_keys) > 0: - print( - f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" - f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" - " TRAIN this model on a down-stream task to be able to use it for predictions and inference." - ) - else: - print( - f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" - f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint" - f" was trained on, you can already use {model.__class__.__name__} for predictions without further" - " training." - ) - if len(error_msgs) > 0: - raise RuntimeError( - "Error(s) in loading state_dict for {}:\n\t{}".format( - model.__class__.__name__, "\n\t".join(error_msgs) - ) - ) - # Set model in evaluation mode to deactivate DropOut modules by default - model.eval() - - return model - - def forward( - self, - images, - image_shapes, - gt_boxes=None, - proposals=None, - scales_yx=None, - **kwargs, - ): - """ - kwargs: - max_detections (int), return_tensors {"np", "pt", None}, padding {None, - "max_detections"}, pad_value (int), location = {"cuda", "cpu"} - """ - if self.training: - raise NotImplementedError() - return self.inference( - images=images, - image_shapes=image_shapes, - gt_boxes=gt_boxes, - proposals=proposals, - scales_yx=scales_yx, - **kwargs, - ) - - @torch.no_grad() - def inference( - self, - images, - image_shapes, - gt_boxes=None, - proposals=None, - scales_yx=None, - **kwargs, - ): - # run images through backbone - original_sizes = image_shapes * scales_yx - features = self.backbone(images) - - # generate proposals if none are available - if proposals is None: - proposal_boxes, _ = self.proposal_generator(images, image_shapes, features, gt_boxes) - else: - assert proposals is not None - - # pool object features from either gt_boxes, or from proposals - obj_logits, attr_logits, box_deltas, feature_pooled = self.roi_heads(features, proposal_boxes, gt_boxes) - - # prepare FRCNN Outputs and select top proposals - boxes, classes, class_probs, attrs, attr_probs, roi_features = self.roi_outputs( - obj_logits=obj_logits, - attr_logits=attr_logits, - box_deltas=box_deltas, - pred_boxes=proposal_boxes, - features=feature_pooled, - sizes=image_shapes, - scales=scales_yx, - ) - - # will we pad??? - subset_kwargs = { - "max_detections": kwargs.get("max_detections", None), - "return_tensors": kwargs.get("return_tensors", None), - "pad_value": kwargs.get("pad_value", 0), - "padding": kwargs.get("padding", None), - } - preds_per_image = torch.tensor([p.size(0) for p in boxes]) - boxes = pad_list_tensors(boxes, preds_per_image, **subset_kwargs) - classes = pad_list_tensors(classes, preds_per_image, **subset_kwargs) - class_probs = pad_list_tensors(class_probs, preds_per_image, **subset_kwargs) - attrs = pad_list_tensors(attrs, preds_per_image, **subset_kwargs) - attr_probs = pad_list_tensors(attr_probs, preds_per_image, **subset_kwargs) - roi_features = pad_list_tensors(roi_features, preds_per_image, **subset_kwargs) - subset_kwargs["padding"] = None - preds_per_image = pad_list_tensors(preds_per_image, None, **subset_kwargs) - sizes = pad_list_tensors(image_shapes, None, **subset_kwargs) - normalized_boxes = norm_box(boxes, original_sizes) - return OrderedDict( - { - "obj_ids": classes, - "obj_probs": class_probs, - "attr_ids": attrs, - "attr_probs": attr_probs, - "boxes": boxes, - "sizes": sizes, - "preds_per_image": preds_per_image, - "roi_features": roi_features, - "normalized_boxes": normalized_boxes, - } - ) diff --git a/examples/research_projects/visual_bert/processing_image.py b/examples/research_projects/visual_bert/processing_image.py deleted file mode 100644 index 65f8f6cd377..00000000000 --- a/examples/research_projects/visual_bert/processing_image.py +++ /dev/null @@ -1,151 +0,0 @@ -""" -coding=utf-8 -Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal -Adapted From Facebook Inc, Detectron2 - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License.import copy -""" - -import sys -from typing import Tuple - -import numpy as np -import torch -from PIL import Image -from torch import nn - -from transformers.image_utils import PILImageResampling -from utils import img_tensorize - - -class ResizeShortestEdge: - def __init__(self, short_edge_length, max_size=sys.maxsize): - """ - Args: - short_edge_length (list[min, max]) - max_size (int): maximum allowed longest edge length. - """ - self.interp_method = "bilinear" - self.max_size = max_size - self.short_edge_length = short_edge_length - - def __call__(self, imgs): - img_augs = [] - for img in imgs: - h, w = img.shape[:2] - # later: provide list and randomly choose index for resize - size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1) - if size == 0: - return img - scale = size * 1.0 / min(h, w) - if h < w: - newh, neww = size, scale * w - else: - newh, neww = scale * h, size - if max(newh, neww) > self.max_size: - scale = self.max_size * 1.0 / max(newh, neww) - newh = newh * scale - neww = neww * scale - neww = int(neww + 0.5) - newh = int(newh + 0.5) - - if img.dtype == np.uint8: - pil_image = Image.fromarray(img) - pil_image = pil_image.resize((neww, newh), PILImageResampling.BILINEAR) - img = np.asarray(pil_image) - else: - img = img.permute(2, 0, 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw - img = nn.functional.interpolate( - img, (newh, neww), mode=self.interp_method, align_corners=False - ).squeeze(0) - img_augs.append(img) - - return img_augs - - -class Preprocess: - def __init__(self, cfg): - self.aug = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST) - self.input_format = cfg.INPUT.FORMAT - self.size_divisibility = cfg.SIZE_DIVISIBILITY - self.pad_value = cfg.PAD_VALUE - self.max_image_size = cfg.INPUT.MAX_SIZE_TEST - self.device = cfg.MODEL.DEVICE - self.pixel_std = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD), 1, 1) - self.pixel_mean = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD), 1, 1) - self.normalizer = lambda x: (x - self.pixel_mean) / self.pixel_std - - def pad(self, images): - max_size = tuple(max(s) for s in zip(*[img.shape for img in images])) - image_sizes = [im.shape[-2:] for im in images] - images = [ - nn.functional.pad( - im, - [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]], - value=self.pad_value, - ) - for size, im in zip(image_sizes, images) - ] - - return torch.stack(images), torch.tensor(image_sizes) - - def __call__(self, images, single_image=False): - with torch.no_grad(): - if not isinstance(images, list): - images = [images] - if single_image: - assert len(images) == 1 - for i in range(len(images)): - if isinstance(images[i], torch.Tensor): - images.insert(i, images.pop(i).to(self.device).float()) - elif not isinstance(images[i], torch.Tensor): - images.insert( - i, - torch.as_tensor(img_tensorize(images.pop(i), input_format=self.input_format)) - .to(self.device) - .float(), - ) - # resize smallest edge - raw_sizes = torch.tensor([im.shape[:2] for im in images]) - images = self.aug(images) - # transpose images and convert to torch tensors - # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] - # now normalize before pad to avoid useless arithmetic - images = [self.normalizer(x) for x in images] - # now pad them to do the following operations - images, sizes = self.pad(images) - # Normalize - - if self.size_divisibility > 0: - raise NotImplementedError() - # pad - scales_yx = torch.true_divide(raw_sizes, sizes) - if single_image: - return images[0], sizes[0], scales_yx[0] - else: - return images, sizes, scales_yx - - -def _scale_box(boxes, scale_yx): - boxes[:, 0::2] *= scale_yx[:, 1] - boxes[:, 1::2] *= scale_yx[:, 0] - return boxes - - -def _clip_box(tensor, box_size: Tuple[int, int]): - assert torch.isfinite(tensor).all(), "Box tensor contains infinite or NaN!" - h, w = box_size - tensor[:, 0].clamp_(min=0, max=w) - tensor[:, 1].clamp_(min=0, max=h) - tensor[:, 2].clamp_(min=0, max=w) - tensor[:, 3].clamp_(min=0, max=h) diff --git a/examples/research_projects/visual_bert/requirements.txt b/examples/research_projects/visual_bert/requirements.txt deleted file mode 100644 index e2778663a53..00000000000 --- a/examples/research_projects/visual_bert/requirements.txt +++ /dev/null @@ -1,98 +0,0 @@ -appdirs==1.4.3 -argon2-cffi==20.1.0 -async-generator==1.10 -attrs==20.2.0 -backcall==0.2.0 -CacheControl==0.12.6 -certifi==2024.7.4 -cffi==1.14.2 -chardet==3.0.4 -click==7.1.2 -colorama==0.4.3 -contextlib2==0.6.0 -cycler==0.10.0 -datasets==1.0.0 -decorator==4.4.2 -defusedxml==0.6.0 -dill==0.3.2 -distlib==0.3.0 -distro==1.4.0 -entrypoints==0.3 -filelock==3.0.12 -future==0.18.3 -html5lib==1.0.1 -idna==3.7 -ipaddr==2.2.0 -ipykernel==5.3.4 -ipython -ipython-genutils==0.2.0 -ipywidgets==7.5.1 -jedi==0.17.2 -Jinja2>=2.11.3 -joblib==1.2.0 -jsonschema==3.2.0 -jupyter==1.0.0 -jupyter-client==6.1.7 -jupyter-console==6.2.0 -jupyter-core==4.11.2 -jupyterlab-pygments==0.1.1 -kiwisolver==1.2.0 -lockfile==0.12.2 -MarkupSafe==1.1.1 -matplotlib==3.3.1 -mistune==2.0.3 -msgpack==0.6.2 -nbclient==0.5.0 -nbconvert==6.5.1 -nbformat==5.0.7 -nest-asyncio==1.4.0 -notebook==6.4.12 -numpy==1.22.0 -opencv-python==4.8.1.78 -packaging==20.3 -pandas==1.1.2 -pandocfilters==1.4.2 -parso==0.7.1 -pep517==0.8.2 -pexpect==4.8.0 -pickleshare==0.7.5 -Pillow>=8.1.1 -progress==1.5 -prometheus-client==0.8.0 -prompt-toolkit==3.0.7 -ptyprocess==0.6.0 -pyaml==20.4.0 -pyarrow==15.0.0 -pycparser==2.20 -Pygments>=2.7.4 -pyparsing==2.4.6 -pyrsistent==0.16.0 -python-dateutil==2.8.1 -pytoml==0.1.21 -pytz==2020.1 -PyYAML>=5.4 -pyzmq==19.0.2 -qtconsole==4.7.7 -QtPy==1.9.0 -regex==2020.7.14 -requests==2.32.2 -retrying==1.3.3 -sacremoses==0.0.43 -Send2Trash==1.5.0 -sentencepiece==0.1.91 -six==1.14.0 -terminado==0.8.3 -testpath==0.4.4 -tokenizers==0.8.1rc2 -torch==2.2.0 -torchvision==0.7.0 -tornado==6.4.2 -tqdm==4.66.3 -traitlets -git+https://github.com/huggingface/transformers.git -urllib3==1.26.19 -wcwidth==0.2.5 -webencodings==0.5.1 -wget==3.2 -widgetsnbextension==3.5.1 -xxhash==2.0.0 diff --git a/examples/research_projects/visual_bert/utils.py b/examples/research_projects/visual_bert/utils.py deleted file mode 100644 index 995fbd2c19a..00000000000 --- a/examples/research_projects/visual_bert/utils.py +++ /dev/null @@ -1,554 +0,0 @@ -""" -coding=utf-8 -Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal, Huggingface team :) -Adapted From Facebook Inc, Detectron2 - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License.import copy -""" - -import copy -import fnmatch -import json -import os -import pickle as pkl -import shutil -import sys -import tarfile -import tempfile -from collections import OrderedDict -from contextlib import contextmanager -from functools import partial -from io import BytesIO -from pathlib import Path -from urllib.parse import urlparse -from zipfile import ZipFile, is_zipfile - -import cv2 -import numpy as np -import requests -import wget -from filelock import FileLock -from huggingface_hub.utils import insecure_hashlib -from PIL import Image -from tqdm.auto import tqdm -from yaml import Loader, dump, load - - -try: - import torch - - _torch_available = True -except ImportError: - _torch_available = False - - -try: - from torch.hub import _get_torch_home - - torch_cache_home = _get_torch_home() -except ImportError: - torch_cache_home = os.path.expanduser( - os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) - ) - -default_cache_path = os.path.join(torch_cache_home, "transformers") - -CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co" -S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert" -PATH = "/".join(str(Path(__file__).resolve()).split("/")[:-1]) -CONFIG = os.path.join(PATH, "config.yaml") -ATTRIBUTES = os.path.join(PATH, "attributes.txt") -OBJECTS = os.path.join(PATH, "objects.txt") -PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path) -PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) -TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) -WEIGHTS_NAME = "pytorch_model.bin" -CONFIG_NAME = "config.yaml" - - -def load_labels(objs=OBJECTS, attrs=ATTRIBUTES): - vg_classes = [] - with open(objs) as f: - for object in f.readlines(): - vg_classes.append(object.split(",")[0].lower().strip()) - - vg_attrs = [] - with open(attrs) as f: - for object in f.readlines(): - vg_attrs.append(object.split(",")[0].lower().strip()) - return vg_classes, vg_attrs - - -def load_checkpoint(ckp): - r = OrderedDict() - with open(ckp, "rb") as f: - ckp = pkl.load(f)["model"] - for k in copy.deepcopy(list(ckp.keys())): - v = ckp.pop(k) - if isinstance(v, np.ndarray): - v = torch.tensor(v) - else: - assert isinstance(v, torch.tensor), type(v) - r[k] = v - return r - - -class Config: - _pointer = {} - - def __init__(self, dictionary: dict, name: str = "root", level=0): - self._name = name - self._level = level - d = {} - for k, v in dictionary.items(): - if v is None: - raise ValueError() - k = copy.deepcopy(k) - v = copy.deepcopy(v) - if isinstance(v, dict): - v = Config(v, name=k, level=level + 1) - d[k] = v - setattr(self, k, v) - - self._pointer = d - - def __repr__(self): - return str(list((self._pointer.keys()))) - - def __setattr__(self, key, val): - self.__dict__[key] = val - self.__dict__[key.upper()] = val - levels = key.split(".") - last_level = len(levels) - 1 - pointer = self._pointer - if len(levels) > 1: - for i, l in enumerate(levels): - if hasattr(self, l) and isinstance(getattr(self, l), Config): - setattr(getattr(self, l), ".".join(levels[i:]), val) - if l == last_level: - pointer[l] = val - else: - pointer = pointer[l] - - def to_dict(self): - return self._pointer - - def dump_yaml(self, data, file_name): - with open(f"{file_name}", "w") as stream: - dump(data, stream) - - def dump_json(self, data, file_name): - with open(f"{file_name}", "w") as stream: - json.dump(data, stream) - - @staticmethod - def load_yaml(config): - with open(config) as stream: - data = load(stream, Loader=Loader) - return data - - def __str__(self): - t = " " - if self._name != "root": - r = f"{t * (self._level-1)}{self._name}:\n" - else: - r = "" - level = self._level - for i, (k, v) in enumerate(self._pointer.items()): - if isinstance(v, Config): - r += f"{t * (self._level)}{v}\n" - self._level += 1 - else: - r += f"{t * (self._level)}{k}: {v} ({type(v).__name__})\n" - self._level = level - return r[:-1] - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs): - config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) - return cls(config_dict) - - @classmethod - def get_config_dict(cls, pretrained_model_name_or_path: str, **kwargs): - cache_dir = kwargs.pop("cache_dir", None) - force_download = kwargs.pop("force_download", False) - resume_download = kwargs.pop("resume_download", False) - proxies = kwargs.pop("proxies", None) - local_files_only = kwargs.pop("local_files_only", False) - - if os.path.isdir(pretrained_model_name_or_path): - config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME) - elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): - config_file = pretrained_model_name_or_path - else: - config_file = hf_bucket_url(pretrained_model_name_or_path, filename=CONFIG_NAME, use_cdn=False) - - try: - # Load from URL or cache if already cached - resolved_config_file = cached_path( - config_file, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - ) - # Load config dict - if resolved_config_file is None: - raise EnvironmentError - - config_file = Config.load_yaml(resolved_config_file) - - except EnvironmentError: - msg = "Can't load config for" - raise EnvironmentError(msg) - - if resolved_config_file == config_file: - print("loading configuration file from path") - else: - print("loading configuration file cache") - - return Config.load_yaml(resolved_config_file), kwargs - - -# quick compare tensors -def compare(in_tensor): - out_tensor = torch.load("dump.pt", map_location=in_tensor.device) - n1 = in_tensor.numpy() - n2 = out_tensor.numpy()[0] - print(n1.shape, n1[0, 0, :5]) - print(n2.shape, n2[0, 0, :5]) - assert np.allclose(n1, n2, rtol=0.01, atol=0.1), ( - f"{sum([1 for x in np.isclose(n1, n2, rtol=0.01, atol=0.1).flatten() if x is False])/len(n1.flatten())*100:.4f} %" - " element-wise mismatch" - ) - raise Exception("tensors are all good") - - # Hugging face functions below - - -def is_remote_url(url_or_filename): - parsed = urlparse(url_or_filename) - return parsed.scheme in ("http", "https") - - -def hf_bucket_url(model_id: str, filename: str, use_cdn=True) -> str: - endpoint = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX - legacy_format = "/" not in model_id - if legacy_format: - return f"{endpoint}/{model_id}-{filename}" - else: - return f"{endpoint}/{model_id}/{filename}" - - -def http_get( - url, - temp_file, - proxies=None, - resume_size=0, - user_agent=None, -): - ua = "python/{}".format(sys.version.split()[0]) - if _torch_available: - ua += "; torch/{}".format(torch.__version__) - if isinstance(user_agent, dict): - ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in user_agent.items()) - elif isinstance(user_agent, str): - ua += "; " + user_agent - headers = {"user-agent": ua} - if resume_size > 0: - headers["Range"] = "bytes=%d-" % (resume_size,) - response = requests.get(url, stream=True, proxies=proxies, headers=headers) - if response.status_code == 416: # Range not satisfiable - return - content_length = response.headers.get("Content-Length") - total = resume_size + int(content_length) if content_length is not None else None - progress = tqdm( - unit="B", - unit_scale=True, - total=total, - initial=resume_size, - desc="Downloading", - ) - for chunk in response.iter_content(chunk_size=1024): - if chunk: # filter out keep-alive new chunks - progress.update(len(chunk)) - temp_file.write(chunk) - progress.close() - - -def get_from_cache( - url, - cache_dir=None, - force_download=False, - proxies=None, - etag_timeout=10, - resume_download=False, - user_agent=None, - local_files_only=False, -): - if cache_dir is None: - cache_dir = TRANSFORMERS_CACHE - if isinstance(cache_dir, Path): - cache_dir = str(cache_dir) - - os.makedirs(cache_dir, exist_ok=True) - - etag = None - if not local_files_only: - try: - response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout) - if response.status_code == 200: - etag = response.headers.get("ETag") - except (EnvironmentError, requests.exceptions.Timeout): - # etag is already None - pass - - filename = url_to_filename(url, etag) - - # get cache path to put the file - cache_path = os.path.join(cache_dir, filename) - - # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. - # try to get the last downloaded one - if etag is None: - if os.path.exists(cache_path): - return cache_path - else: - matching_files = [ - file - for file in fnmatch.filter(os.listdir(cache_dir), filename + ".*") - if not file.endswith(".json") and not file.endswith(".lock") - ] - if len(matching_files) > 0: - return os.path.join(cache_dir, matching_files[-1]) - else: - # If files cannot be found and local_files_only=True, - # the models might've been found if local_files_only=False - # Notify the user about that - if local_files_only: - raise ValueError( - "Cannot find the requested files in the cached path and outgoing traffic has been" - " disabled. To enable model look-ups and downloads online, set 'local_files_only'" - " to False." - ) - return None - - # From now on, etag is not None. - if os.path.exists(cache_path) and not force_download: - return cache_path - - # Prevent parallel downloads of the same file with a lock. - lock_path = cache_path + ".lock" - with FileLock(lock_path): - # If the download just completed while the lock was activated. - if os.path.exists(cache_path) and not force_download: - # Even if returning early like here, the lock will be released. - return cache_path - - if resume_download: - incomplete_path = cache_path + ".incomplete" - - @contextmanager - def _resumable_file_manager(): - with open(incomplete_path, "a+b") as f: - yield f - - temp_file_manager = _resumable_file_manager - if os.path.exists(incomplete_path): - resume_size = os.stat(incomplete_path).st_size - else: - resume_size = 0 - else: - temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False) - resume_size = 0 - - # Download to temporary file, then copy to cache dir once finished. - # Otherwise you get corrupt cache entries if the download gets interrupted. - with temp_file_manager() as temp_file: - print( - "%s not found in cache or force_download set to True, downloading to %s", - url, - temp_file.name, - ) - - http_get( - url, - temp_file, - proxies=proxies, - resume_size=resume_size, - user_agent=user_agent, - ) - - os.replace(temp_file.name, cache_path) - - meta = {"url": url, "etag": etag} - meta_path = cache_path + ".json" - with open(meta_path, "w") as meta_file: - json.dump(meta, meta_file) - - return cache_path - - -def url_to_filename(url, etag=None): - url_bytes = url.encode("utf-8") - url_hash = insecure_hashlib.sha256(url_bytes) - filename = url_hash.hexdigest() - - if etag: - etag_bytes = etag.encode("utf-8") - etag_hash = insecure_hashlib.sha256(etag_bytes) - filename += "." + etag_hash.hexdigest() - - if url.endswith(".h5"): - filename += ".h5" - - return filename - - -def cached_path( - url_or_filename, - cache_dir=None, - force_download=False, - proxies=None, - resume_download=False, - user_agent=None, - extract_compressed_file=False, - force_extract=False, - local_files_only=False, -): - if cache_dir is None: - cache_dir = TRANSFORMERS_CACHE - if isinstance(url_or_filename, Path): - url_or_filename = str(url_or_filename) - if isinstance(cache_dir, Path): - cache_dir = str(cache_dir) - - if is_remote_url(url_or_filename): - # URL, so get it from the cache (downloading if necessary) - output_path = get_from_cache( - url_or_filename, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - user_agent=user_agent, - local_files_only=local_files_only, - ) - elif os.path.exists(url_or_filename): - # File, and it exists. - output_path = url_or_filename - elif urlparse(url_or_filename).scheme == "": - # File, but it doesn't exist. - raise EnvironmentError("file {} not found".format(url_or_filename)) - else: - # Something unknown - raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) - - if extract_compressed_file: - if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path): - return output_path - - # Path where we extract compressed archives - # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" - output_dir, output_file = os.path.split(output_path) - output_extract_dir_name = output_file.replace(".", "-") + "-extracted" - output_path_extracted = os.path.join(output_dir, output_extract_dir_name) - - if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract: - return output_path_extracted - - # Prevent parallel extractions - lock_path = output_path + ".lock" - with FileLock(lock_path): - shutil.rmtree(output_path_extracted, ignore_errors=True) - os.makedirs(output_path_extracted) - if is_zipfile(output_path): - with ZipFile(output_path, "r") as zip_file: - zip_file.extractall(output_path_extracted) - zip_file.close() - elif tarfile.is_tarfile(output_path): - tar_file = tarfile.open(output_path) - tar_file.extractall(output_path_extracted) - tar_file.close() - else: - raise EnvironmentError("Archive format of {} could not be identified".format(output_path)) - - return output_path_extracted - - return output_path - - -def get_data(query, delim=","): - assert isinstance(query, str) - if os.path.isfile(query): - with open(query) as f: - data = eval(f.read()) - else: - req = requests.get(query) - try: - data = requests.json() - except Exception: - data = req.content.decode() - assert data is not None, "could not connect" - try: - data = eval(data) - except Exception: - data = data.split("\n") - req.close() - return data - - -def get_image_from_url(url): - response = requests.get(url) - img = np.array(Image.open(BytesIO(response.content))) - return img - - -# to load legacy frcnn checkpoint from detectron -def load_frcnn_pkl_from_url(url): - fn = url.split("/")[-1] - if fn not in os.listdir(os.getcwd()): - wget.download(url) - with open(fn, "rb") as stream: - weights = pkl.load(stream) - model = weights.pop("model") - new = {} - for k, v in model.items(): - new[k] = torch.from_numpy(v) - if "running_var" in k: - zero = torch.tensor([0]) - k2 = k.replace("running_var", "num_batches_tracked") - new[k2] = zero - return new - - -def get_demo_path(): - print(f"{os.path.abspath(os.path.join(PATH, os.pardir))}/demo.ipynb") - - -def img_tensorize(im, input_format="RGB"): - assert isinstance(im, str) - if os.path.isfile(im): - img = cv2.imread(im) - else: - img = get_image_from_url(im) - assert img is not None, f"could not connect to: {im}" - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - if input_format == "RGB": - img = img[:, :, ::-1] - return img - - -def chunk(images, batch=1): - return (images[i : i + batch] for i in range(0, len(images), batch)) diff --git a/examples/research_projects/visual_bert/visualizing_image.py b/examples/research_projects/visual_bert/visualizing_image.py deleted file mode 100644 index dcfd8426ff4..00000000000 --- a/examples/research_projects/visual_bert/visualizing_image.py +++ /dev/null @@ -1,500 +0,0 @@ -""" -coding=utf-8 -Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal -Adapted From Facebook Inc, Detectron2 - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License.import copy -""" - -import colorsys -import io - -import cv2 -import matplotlib as mpl -import matplotlib.colors as mplc -import matplotlib.figure as mplfigure -import numpy as np -import torch -from matplotlib.backends.backend_agg import FigureCanvasAgg - -from utils import img_tensorize - - -_SMALL_OBJ = 1000 - - -class SingleImageViz: - def __init__( - self, - img, - scale=1.2, - edgecolor="g", - alpha=0.5, - linestyle="-", - saveas="test_out.jpg", - rgb=True, - pynb=False, - id2obj=None, - id2attr=None, - pad=0.7, - ): - """ - img: an RGB image of shape (H, W, 3). - """ - if isinstance(img, torch.Tensor): - img = img.numpy().astype("np.uint8") - if isinstance(img, str): - img = img_tensorize(img) - assert isinstance(img, np.ndarray) - - width, height = img.shape[1], img.shape[0] - fig = mplfigure.Figure(frameon=False) - dpi = fig.get_dpi() - width_in = (width * scale + 1e-2) / dpi - height_in = (height * scale + 1e-2) / dpi - fig.set_size_inches(width_in, height_in) - ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) - ax.axis("off") - ax.set_xlim(0.0, width) - ax.set_ylim(height) - - self.saveas = saveas - self.rgb = rgb - self.pynb = pynb - self.img = img - self.edgecolor = edgecolor - self.alpha = 0.5 - self.linestyle = linestyle - self.font_size = int(np.sqrt(min(height, width)) * scale // 3) - self.width = width - self.height = height - self.scale = scale - self.fig = fig - self.ax = ax - self.pad = pad - self.id2obj = id2obj - self.id2attr = id2attr - self.canvas = FigureCanvasAgg(fig) - - def add_box(self, box, color=None): - if color is None: - color = self.edgecolor - (x0, y0, x1, y1) = box - width = x1 - x0 - height = y1 - y0 - self.ax.add_patch( - mpl.patches.Rectangle( - (x0, y0), - width, - height, - fill=False, - edgecolor=color, - linewidth=self.font_size // 3, - alpha=self.alpha, - linestyle=self.linestyle, - ) - ) - - def draw_boxes(self, boxes, obj_ids=None, obj_scores=None, attr_ids=None, attr_scores=None): - if len(boxes.shape) > 2: - boxes = boxes[0] - if len(obj_ids.shape) > 1: - obj_ids = obj_ids[0] - if len(obj_scores.shape) > 1: - obj_scores = obj_scores[0] - if len(attr_ids.shape) > 1: - attr_ids = attr_ids[0] - if len(attr_scores.shape) > 1: - attr_scores = attr_scores[0] - if isinstance(boxes, torch.Tensor): - boxes = boxes.numpy() - if isinstance(boxes, list): - boxes = np.array(boxes) - assert isinstance(boxes, np.ndarray) - areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) - sorted_idxs = np.argsort(-areas).tolist() - boxes = boxes[sorted_idxs] if boxes is not None else None - obj_ids = obj_ids[sorted_idxs] if obj_ids is not None else None - obj_scores = obj_scores[sorted_idxs] if obj_scores is not None else None - attr_ids = attr_ids[sorted_idxs] if attr_ids is not None else None - attr_scores = attr_scores[sorted_idxs] if attr_scores is not None else None - - assigned_colors = [self._random_color(maximum=1) for _ in range(len(boxes))] - assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] - if obj_ids is not None: - labels = self._create_text_labels_attr(obj_ids, obj_scores, attr_ids, attr_scores) - for i in range(len(boxes)): - color = assigned_colors[i] - self.add_box(boxes[i], color) - self.draw_labels(labels[i], boxes[i], color) - - def draw_labels(self, label, box, color): - x0, y0, x1, y1 = box - text_pos = (x0, y0) - instance_area = (y1 - y0) * (x1 - x0) - small = _SMALL_OBJ * self.scale - if instance_area < small or y1 - y0 < 40 * self.scale: - if y1 >= self.height - 5: - text_pos = (x1, y0) - else: - text_pos = (x0, y1) - - height_ratio = (y1 - y0) / np.sqrt(self.height * self.width) - lighter_color = self._change_color_brightness(color, brightness_factor=0.7) - font_size = np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) - font_size *= 0.75 * self.font_size - - self.draw_text( - text=label, - position=text_pos, - color=lighter_color, - ) - - def draw_text( - self, - text, - position, - color="g", - ha="left", - ): - rotation = 0 - font_size = self.font_size - color = np.maximum(list(mplc.to_rgb(color)), 0.2) - color[np.argmax(color)] = max(0.8, np.max(color)) - bbox = { - "facecolor": "black", - "alpha": self.alpha, - "pad": self.pad, - "edgecolor": "none", - } - x, y = position - self.ax.text( - x, - y, - text, - size=font_size * self.scale, - family="sans-serif", - bbox=bbox, - verticalalignment="top", - horizontalalignment=ha, - color=color, - zorder=10, - rotation=rotation, - ) - - def save(self, saveas=None): - if saveas is None: - saveas = self.saveas - if saveas.lower().endswith(".jpg") or saveas.lower().endswith(".png"): - cv2.imwrite( - saveas, - self._get_buffer()[:, :, ::-1], - ) - else: - self.fig.savefig(saveas) - - def _create_text_labels_attr(self, classes, scores, attr_classes, attr_scores): - labels = [self.id2obj[i] for i in classes] - attr_labels = [self.id2attr[i] for i in attr_classes] - labels = [ - f"{label} {score:.2f} {attr} {attr_score:.2f}" - for label, score, attr, attr_score in zip(labels, scores, attr_labels, attr_scores) - ] - return labels - - def _create_text_labels(self, classes, scores): - labels = [self.id2obj[i] for i in classes] - if scores is not None: - if labels is None: - labels = ["{:.0f}%".format(s * 100) for s in scores] - else: - labels = ["{} {:.0f}%".format(li, s * 100) for li, s in zip(labels, scores)] - return labels - - def _random_color(self, maximum=255): - idx = np.random.randint(0, len(_COLORS)) - ret = _COLORS[idx] * maximum - if not self.rgb: - ret = ret[::-1] - return ret - - def _get_buffer(self): - if not self.pynb: - s, (width, height) = self.canvas.print_to_buffer() - if (width, height) != (self.width, self.height): - img = cv2.resize(self.img, (width, height)) - else: - img = self.img - else: - buf = io.BytesIO() # works for cairo backend - self.canvas.print_rgba(buf) - width, height = self.width, self.height - s = buf.getvalue() - img = self.img - - buffer = np.frombuffer(s, dtype="uint8") - img_rgba = buffer.reshape(height, width, 4) - rgb, alpha = np.split(img_rgba, [3], axis=2) - - try: - import numexpr as ne # fuse them with numexpr - - visualized_image = ne.evaluate("img * (1 - alpha / 255.0) + rgb * (alpha / 255.0)") - except ImportError: - alpha = alpha.astype("float32") / 255.0 - visualized_image = img * (1 - alpha) + rgb * alpha - - return visualized_image.astype("uint8") - - def _change_color_brightness(self, color, brightness_factor): - assert brightness_factor >= -1.0 and brightness_factor <= 1.0 - color = mplc.to_rgb(color) - polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) - modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) - modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness - modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness - modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) - return modified_color - - -# Color map -_COLORS = ( - np.array( - [ - 0.000, - 0.447, - 0.741, - 0.850, - 0.325, - 0.098, - 0.929, - 0.694, - 0.125, - 0.494, - 0.184, - 0.556, - 0.466, - 0.674, - 0.188, - 0.301, - 0.745, - 0.933, - 0.635, - 0.078, - 0.184, - 0.300, - 0.300, - 0.300, - 0.600, - 0.600, - 0.600, - 1.000, - 0.000, - 0.000, - 1.000, - 0.500, - 0.000, - 0.749, - 0.749, - 0.000, - 0.000, - 1.000, - 0.000, - 0.000, - 0.000, - 1.000, - 0.667, - 0.000, - 1.000, - 0.333, - 0.333, - 0.000, - 0.333, - 0.667, - 0.000, - 0.333, - 1.000, - 0.000, - 0.667, - 0.333, - 0.000, - 0.667, - 0.667, - 0.000, - 0.667, - 1.000, - 0.000, - 1.000, - 0.333, - 0.000, - 1.000, - 0.667, - 0.000, - 1.000, - 1.000, - 0.000, - 0.000, - 0.333, - 0.500, - 0.000, - 0.667, - 0.500, - 0.000, - 1.000, - 0.500, - 0.333, - 0.000, - 0.500, - 0.333, - 0.333, - 0.500, - 0.333, - 0.667, - 0.500, - 0.333, - 1.000, - 0.500, - 0.667, - 0.000, - 0.500, - 0.667, - 0.333, - 0.500, - 0.667, - 0.667, - 0.500, - 0.667, - 1.000, - 0.500, - 1.000, - 0.000, - 0.500, - 1.000, - 0.333, - 0.500, - 1.000, - 0.667, - 0.500, - 1.000, - 1.000, - 0.500, - 0.000, - 0.333, - 1.000, - 0.000, - 0.667, - 1.000, - 0.000, - 1.000, - 1.000, - 0.333, - 0.000, - 1.000, - 0.333, - 0.333, - 1.000, - 0.333, - 0.667, - 1.000, - 0.333, - 1.000, - 1.000, - 0.667, - 0.000, - 1.000, - 0.667, - 0.333, - 1.000, - 0.667, - 0.667, - 1.000, - 0.667, - 1.000, - 1.000, - 1.000, - 0.000, - 1.000, - 1.000, - 0.333, - 1.000, - 1.000, - 0.667, - 1.000, - 0.333, - 0.000, - 0.000, - 0.500, - 0.000, - 0.000, - 0.667, - 0.000, - 0.000, - 0.833, - 0.000, - 0.000, - 1.000, - 0.000, - 0.000, - 0.000, - 0.167, - 0.000, - 0.000, - 0.333, - 0.000, - 0.000, - 0.500, - 0.000, - 0.000, - 0.667, - 0.000, - 0.000, - 0.833, - 0.000, - 0.000, - 1.000, - 0.000, - 0.000, - 0.000, - 0.167, - 0.000, - 0.000, - 0.333, - 0.000, - 0.000, - 0.500, - 0.000, - 0.000, - 0.667, - 0.000, - 0.000, - 0.833, - 0.000, - 0.000, - 1.000, - 0.000, - 0.000, - 0.000, - 0.143, - 0.143, - 0.143, - 0.857, - 0.857, - 0.857, - 1.000, - 1.000, - 1.000, - ] - ) - .astype(np.float32) - .reshape(-1, 3) -) diff --git a/examples/research_projects/vqgan-clip/README.md b/examples/research_projects/vqgan-clip/README.md deleted file mode 100644 index a74bf9209b0..00000000000 --- a/examples/research_projects/vqgan-clip/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# Simple VQGAN CLIP - -Author: @ErwannMillon - -This is a very simple VQGAN-CLIP implementation that was built as a part of the Face Editor project . This simplified version allows you to generate or edit images using text with just three lines of code. For a more full featured implementation with masking, more advanced losses, and a full GUI, check out the Face Editor project. - -By default this uses a CelebA checkpoint (for generating/editing faces), but also has an imagenet checkpoint that can be loaded by specifying vqgan_config and vqgan_checkpoint when instantiating VQGAN_CLIP. - -Learning rate and iterations can be set by modifying vqgan_clip.lr and vqgan_clip.iterations . - -You can edit images by passing `image_path` to the generate function. -See the generate function's docstring to learn more about how to format prompts. - -## Usage -The easiest way to test this out is by using the Colab demo - -To install locally: -- Clone this repo -- Install git-lfs (ubuntu: sudo apt-get install git-lfs , MacOS: brew install git-lfs) - -In the root of the repo run: - -```bash -conda create -n vqganclip python=3.8 -conda activate vqganclip -git-lfs install -git clone https://huggingface.co/datasets/erwann/face_editor_model_ckpt model_checkpoints -pip install -r requirements.txt -``` - -### Generate new images -```python -from VQGAN_CLIP import VQGAN_CLIP -vqgan_clip = VQGAN_CLIP() -vqgan_clip.generate("a picture of a smiling woman") -``` - -### Edit an image -To get a test image, run -`git clone https://huggingface.co/datasets/erwann/vqgan-clip-pic test_images` - -To edit: -```python -from VQGAN_CLIP import VQGAN_CLIP -vqgan_clip = VQGAN_CLIP() - -vqgan_clip.lr = .07 -vqgan_clip.iterations = 15 -vqgan_clip.generate( - pos_prompts= ["a picture of a beautiful asian woman", "a picture of a woman from Japan"], - neg_prompts=["a picture of an Indian person", "a picture of a white person"], - image_path="./test_images/face.jpeg", - show_intermediate=True, - save_intermediate=True, -) -``` - -### Make an animation from the most recent generation -`vqgan_clip.make_animation()` - -## Features: -- Positive and negative prompts -- Multiple prompts -- Prompt Weights -- Creating GIF animations of the transformations -- Wandb logging - - - diff --git a/examples/research_projects/vqgan-clip/VQGAN_CLIP.py b/examples/research_projects/vqgan-clip/VQGAN_CLIP.py deleted file mode 100644 index 1bfbc4cd5c3..00000000000 --- a/examples/research_projects/vqgan-clip/VQGAN_CLIP.py +++ /dev/null @@ -1,268 +0,0 @@ -import os -from glob import glob - -import imageio -import torch -import torchvision -import wandb -from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan -from loaders import load_vqgan -from PIL import Image -from torch import nn - -from transformers import CLIPModel, CLIPTokenizerFast -from utils import get_device, get_timestamp, show_pil - - -class ProcessorGradientFlow: - """ - This wraps the huggingface CLIP processor to allow backprop through the image processing step. - The original processor forces conversion to PIL images, which is faster for image processing but breaks gradient flow. - We call the original processor to get the text embeddings, but use our own image processing to keep images as torch tensors. - """ - - def __init__(self, device: str = "cpu", clip_model: str = "openai/clip-vit-large-patch14") -> None: - self.device = device - self.tokenizer = CLIPTokenizerFast.from_pretrained(clip_model) - self.image_mean = [0.48145466, 0.4578275, 0.40821073] - self.image_std = [0.26862954, 0.26130258, 0.27577711] - self.normalize = torchvision.transforms.Normalize(self.image_mean, self.image_std) - self.resize = torchvision.transforms.Resize(224) - self.center_crop = torchvision.transforms.CenterCrop(224) - - def preprocess_img(self, images): - images = self.resize(images) - images = self.center_crop(images) - images = self.normalize(images) - return images - - def __call__(self, text=None, images=None, **kwargs): - encoding = self.tokenizer(text=text, **kwargs) - encoding["pixel_values"] = self.preprocess_img(images) - encoding = {key: value.to(self.device) for (key, value) in encoding.items()} - return encoding - - -class VQGAN_CLIP(nn.Module): - def __init__( - self, - iterations=10, - lr=0.01, - vqgan=None, - vqgan_config=None, - vqgan_checkpoint=None, - clip=None, - clip_preprocessor=None, - device=None, - log=False, - save_vector=True, - return_val="image", - quantize=True, - save_intermediate=False, - show_intermediate=False, - make_grid=False, - ) -> None: - """ - Instantiate a VQGAN_CLIP model. If you want to use a custom VQGAN model, pass it as vqgan. - """ - super().__init__() - self.latent = None - self.device = device if device else get_device() - if vqgan: - self.vqgan = vqgan - else: - self.vqgan = load_vqgan(self.device, conf_path=vqgan_config, ckpt_path=vqgan_checkpoint) - self.vqgan.eval() - if clip: - self.clip = clip - else: - self.clip = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") - self.clip.to(self.device) - self.clip_preprocessor = ProcessorGradientFlow(device=self.device) - - self.iterations = iterations - self.lr = lr - self.log = log - self.make_grid = make_grid - self.return_val = return_val - self.quantize = quantize - self.latent_dim = self.vqgan.decoder.z_shape - - def make_animation(self, input_path=None, output_path=None, total_duration=5, extend_frames=True): - """ - Make an animation from the intermediate images saved during generation. - By default, uses the images from the most recent generation created by the generate function. - If you want to use images from a different generation, pass the path to the folder containing the images as input_path. - """ - images = [] - if output_path is None: - output_path = "./animation.gif" - if input_path is None: - input_path = self.save_path - paths = sorted(glob(input_path + "/*")) - if not len(paths): - raise ValueError( - "No images found in save path, aborting (did you pass save_intermediate=True to the generate" - " function?)" - ) - if len(paths) == 1: - print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)") - frame_duration = total_duration / len(paths) - durations = [frame_duration] * len(paths) - if extend_frames: - durations[0] = 1.5 - durations[-1] = 3 - for file_name in paths: - if file_name.endswith(".png"): - images.append(imageio.imread(file_name)) - imageio.mimsave(output_path, images, duration=durations) - print(f"gif saved to {output_path}") - - def _get_latent(self, path=None, img=None): - if not (path or img): - raise ValueError("Input either path or tensor") - if img is not None: - raise NotImplementedError - x = preprocess(Image.open(path), target_image_size=256).to(self.device) - x_processed = preprocess_vqgan(x) - z, *_ = self.vqgan.encode(x_processed) - return z - - def _add_vector(self, transform_vector): - """Add a vector transform to the base latent and returns the resulting image.""" - base_latent = self.latent.detach().requires_grad_() - trans_latent = base_latent + transform_vector - if self.quantize: - z_q, *_ = self.vqgan.quantize(trans_latent) - else: - z_q = trans_latent - return self.vqgan.decode(z_q) - - def _get_clip_similarity(self, prompts, image, weights=None): - clip_inputs = self.clip_preprocessor(text=prompts, images=image, return_tensors="pt", padding=True) - clip_outputs = self.clip(**clip_inputs) - similarity_logits = clip_outputs.logits_per_image - if weights is not None: - similarity_logits = similarity_logits * weights - return similarity_logits.sum() - - def _get_clip_loss(self, pos_prompts, neg_prompts, image): - pos_logits = self._get_clip_similarity(pos_prompts["prompts"], image, weights=(1 / pos_prompts["weights"])) - if neg_prompts: - neg_logits = self._get_clip_similarity(neg_prompts["prompts"], image, weights=neg_prompts["weights"]) - else: - neg_logits = torch.tensor([1], device=self.device) - loss = -torch.log(pos_logits) + torch.log(neg_logits) - return loss - - def _optimize_CLIP(self, original_img, pos_prompts, neg_prompts): - vector = torch.randn_like(self.latent, requires_grad=True, device=self.device) - optim = torch.optim.Adam([vector], lr=self.lr) - - for i in range(self.iterations): - optim.zero_grad() - transformed_img = self._add_vector(vector) - processed_img = loop_post_process(transformed_img) - clip_loss = self._get_CLIP_loss(pos_prompts, neg_prompts, processed_img) - print("CLIP loss", clip_loss) - if self.log: - wandb.log({"CLIP Loss": clip_loss}) - clip_loss.backward(retain_graph=True) - optim.step() - if self.return_val == "image": - yield custom_to_pil(transformed_img[0]) - else: - yield vector - - def _init_logging(self, positive_prompts, negative_prompts, image_path): - wandb.init(reinit=True, project="face-editor") - wandb.config.update({"Positive Prompts": positive_prompts}) - wandb.config.update({"Negative Prompts": negative_prompts}) - wandb.config.update({"lr": self.lr, "iterations": self.iterations}) - if image_path: - image = Image.open(image_path) - image = image.resize((256, 256)) - wandb.log("Original Image", wandb.Image(image)) - - def process_prompts(self, prompts): - if not prompts: - return [] - processed_prompts = [] - weights = [] - if isinstance(prompts, str): - prompts = [prompt.strip() for prompt in prompts.split("|")] - for prompt in prompts: - if isinstance(prompt, (tuple, list)): - processed_prompt = prompt[0] - weight = float(prompt[1]) - elif ":" in prompt: - processed_prompt, weight = prompt.split(":") - weight = float(weight) - else: - processed_prompt = prompt - weight = 1.0 - processed_prompts.append(processed_prompt) - weights.append(weight) - return { - "prompts": processed_prompts, - "weights": torch.tensor(weights, device=self.device), - } - - def generate( - self, - pos_prompts, - neg_prompts=None, - image_path=None, - show_intermediate=True, - save_intermediate=False, - show_final=True, - save_final=True, - save_path=None, - ): - """Generate an image from the given prompts. - If image_path is provided, the image is used as a starting point for the optimization. - If image_path is not provided, a random latent vector is used as a starting point. - You must provide at least one positive prompt, and optionally provide negative prompts. - Prompts must be formatted in one of the following ways: - - A single prompt as a string, e.g "A smiling woman" - - A set of prompts separated by pipes: "A smiling woman | a woman with brown hair" - - A set of prompts and their weights separated by colons: "A smiling woman:1 | a woman with brown hair: 3" (default weight is 1) - - A list of prompts, e.g ["A smiling woman", "a woman with brown hair"] - - A list of prompts and weights, e.g [("A smiling woman", 1), ("a woman with brown hair", 3)] - """ - if image_path: - self.latent = self._get_latent(image_path) - else: - self.latent = torch.randn(self.latent_dim, device=self.device) - if self.log: - self._init_logging(pos_prompts, neg_prompts, image_path) - - assert pos_prompts, "You must provide at least one positive prompt." - pos_prompts = self.process_prompts(pos_prompts) - neg_prompts = self.process_prompts(neg_prompts) - if save_final and save_path is None: - save_path = os.path.join("./outputs/", "_".join(pos_prompts["prompts"])) - if not os.path.exists(save_path): - os.makedirs(save_path) - else: - save_path = save_path + "_" + get_timestamp() - os.makedirs(save_path) - self.save_path = save_path - - original_img = self.vqgan.decode(self.latent)[0] - if show_intermediate: - print("Original Image") - show_pil(custom_to_pil(original_img)) - - original_img = loop_post_process(original_img) - for iter, transformed_img in enumerate(self._optimize_CLIP(original_img, pos_prompts, neg_prompts)): - if show_intermediate: - show_pil(transformed_img) - if save_intermediate: - transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png")) - if self.log: - wandb.log({"Image": wandb.Image(transformed_img)}) - if show_final: - show_pil(transformed_img) - if save_final: - transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png")) diff --git a/examples/research_projects/vqgan-clip/img_processing.py b/examples/research_projects/vqgan-clip/img_processing.py deleted file mode 100644 index 221ebd86dae..00000000000 --- a/examples/research_projects/vqgan-clip/img_processing.py +++ /dev/null @@ -1,50 +0,0 @@ -import numpy as np -import PIL -import torch -import torchvision.transforms as T -import torchvision.transforms.functional as TF -from PIL import Image - - -def preprocess(img, target_image_size=256): - s = min(img.size) - - if s < target_image_size: - raise ValueError(f"min dim for image {s} < {target_image_size}") - - r = target_image_size / s - s = (round(r * img.size[1]), round(r * img.size[0])) - img = TF.resize(img, s, interpolation=PIL.Image.LANCZOS) - img = TF.center_crop(img, output_size=2 * [target_image_size]) - img = torch.unsqueeze(T.ToTensor()(img), 0) - return img - - -def preprocess_vqgan(x): - x = 2.0 * x - 1.0 - return x - - -def custom_to_pil(x, process=True, mode="RGB"): - x = x.detach().cpu() - if process: - x = post_process_tensor(x) - x = x.numpy() - if process: - x = (255 * x).astype(np.uint8) - x = Image.fromarray(x) - if not x.mode == mode: - x = x.convert(mode) - return x - - -def post_process_tensor(x): - x = torch.clamp(x, -1.0, 1.0) - x = (x + 1.0) / 2.0 - x = x.permute(1, 2, 0) - return x - - -def loop_post_process(x): - x = post_process_tensor(x.squeeze()) - return x.permute(2, 0, 1).unsqueeze(0) diff --git a/examples/research_projects/vqgan-clip/loaders.py b/examples/research_projects/vqgan-clip/loaders.py deleted file mode 100644 index 88513bcb691..00000000000 --- a/examples/research_projects/vqgan-clip/loaders.py +++ /dev/null @@ -1,74 +0,0 @@ -import importlib - -import torch -import yaml -from omegaconf import OmegaConf -from taming.models.vqgan import VQModel - - -def load_config(config_path, display=False): - config = OmegaConf.load(config_path) - if display: - print(yaml.dump(OmegaConf.to_container(config))) - return config - - -def load_vqgan(device, conf_path=None, ckpt_path=None): - if conf_path is None: - conf_path = "./model_checkpoints/vqgan_only.yaml" - config = load_config(conf_path, display=False) - model = VQModel(**config.model.params) - if ckpt_path is None: - ckpt_path = "./model_checkpoints/vqgan_only.pt" - sd = torch.load(ckpt_path, map_location=device) - if ".ckpt" in ckpt_path: - sd = sd["state_dict"] - model.load_state_dict(sd, strict=True) - model.to(device) - del sd - return model - - -def reconstruct_with_vqgan(x, model): - z, _, [_, _, indices] = model.encode(x) - print(f"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}") - xrec = model.decode(z) - return xrec - - -def get_obj_from_str(string, reload=False): - module, cls = string.rsplit(".", 1) - if reload: - module_imp = importlib.import_module(module) - importlib.reload(module_imp) - return getattr(importlib.import_module(module, package=None), cls) - - -def instantiate_from_config(config): - if "target" not in config: - raise KeyError("Expected key `target` to instantiate.") - return get_obj_from_str(config["target"])(**config.get("params", {})) - - -def load_model_from_config(config, sd, gpu=True, eval_mode=True): - model = instantiate_from_config(config) - if sd is not None: - model.load_state_dict(sd) - if gpu: - model.cuda() - if eval_mode: - model.eval() - return {"model": model} - - -def load_model(config, ckpt, gpu, eval_mode): - # load the specified checkpoint - if ckpt: - pl_sd = torch.load(ckpt, map_location="cpu") - global_step = pl_sd["global_step"] - print(f"loaded model from global step {global_step}.") - else: - pl_sd = {"state_dict": None} - global_step = None - model = load_model_from_config(config.model, pl_sd["state_dict"], gpu=gpu, eval_mode=eval_mode)["model"] - return model, global_step diff --git a/examples/research_projects/vqgan-clip/requirements.txt b/examples/research_projects/vqgan-clip/requirements.txt deleted file mode 100644 index 19761632422..00000000000 --- a/examples/research_projects/vqgan-clip/requirements.txt +++ /dev/null @@ -1,27 +0,0 @@ -einops -gradio -icecream -imageio -lpips -matplotlib -more_itertools -numpy -omegaconf -opencv_python_headless -Pillow -pudb -pytorch_lightning -PyYAML -requests -scikit_image -scipy -setuptools -streamlit -taming-transformers -torch -torchvision -tqdm -transformers==4.48.0 -tokenizers==0.13.2 -typing_extensions -wandb diff --git a/examples/research_projects/vqgan-clip/utils.py b/examples/research_projects/vqgan-clip/utils.py deleted file mode 100644 index 7db45fcbb52..00000000000 --- a/examples/research_projects/vqgan-clip/utils.py +++ /dev/null @@ -1,35 +0,0 @@ -from datetime import datetime - -import matplotlib.pyplot as plt -import torch - - -def freeze_module(module): - for param in module.parameters(): - param.requires_grad = False - - -def get_device(): - device = "cuda" if torch.cuda.is_available() else "cpu" - if torch.backends.mps.is_available() and torch.backends.mps.is_built(): - device = "mps" - if device == "mps": - print( - "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" - " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" - " with generations." - ) - return device - - -def show_pil(img): - fig = plt.imshow(img) - fig.axes.get_xaxis().set_visible(False) - fig.axes.get_yaxis().set_visible(False) - plt.show() - - -def get_timestamp(): - current_time = datetime.now() - timestamp = current_time.strftime("%H:%M:%S") - return timestamp diff --git a/examples/research_projects/wav2vec2/FINE_TUNE_XLSR_WAV2VEC2.md b/examples/research_projects/wav2vec2/FINE_TUNE_XLSR_WAV2VEC2.md deleted file mode 100644 index 7a580a36132..00000000000 --- a/examples/research_projects/wav2vec2/FINE_TUNE_XLSR_WAV2VEC2.md +++ /dev/null @@ -1,516 +0,0 @@ -# Fine-Tuning week of XLSR-Wav2Vec2 on 60 languages 🌍 - -Welcome to the fine-tuning week! The goal of this week is to have state-of-the-art automatic speech recognition (ASR) models in as many languages as possible. The fine-tuning week ends on Friday, the 26th March at midnight PST time. - -Participants are encouraged to fine-tune the pretrained [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) checkpoint on one or more of the 60 languages of [Common Voice dataset](https://commonvoice.mozilla.org/en/datasets). -Furthermore, it is very much appreciated if participants fine-tune XLSR-Wav2Vec2 on a language that is not included in the Common Voice dataset. - -All fine-tuned models uploaded until Friday, the 26th March midnight PST, will be taken into account for competition, and the best model per language will be awarded a prize if the best model performs reasonably well. -The testing data to evaluate the models will be the official [Common Voice dataset](https://commonvoice.mozilla.org/en/datasets) *`test data`* of version 6.1. Again, participants are very much encouraged to fine-tune XLSR-Wav2Vec2 on languages that are not found in the Common Voice dataset since those languages are even more likely to be underrepresented in the speech community. -Each model fine-tuned on a language not found in Common Voice, will be evaluated by the Hugging Face team after Friday, the 26th March at midnight PST, and if the model performs reasonably well, the model receives a prize as well. -For more information on which data can be used for training, how the models are evaluated exactly, and what type of data preprocessing can be used, please see ["Training and Evaluation Rules"](#training-and-evaluation-rules). - -**Please keep in mind:** -The spirit of the fine-tuning week is to provide state-of-the-art speech recognition in as many languages as possible to the community! -So while we encourage healthy competition between people/groups of the same language so that better results are obtained, it is extremely important that we help each other and share our insights with the whole team/community. -What matters in the end is what has been achieved by the team as a whole during the fine-tuning week. -That being said, we strongly encourage people to share tips & tricks on the forum or Slack, help each other when team members encounter bugs, and work in groups. -To make it easier to share and help, forum threads have been created under the name {language} ASR: Fine-Tuning Wav2Vec2, e.g. here. -It is very much possible that prizes will be given to groups of people instead of individuals. Also, don't hesitate to ask questions, propose improvements to the organization, to the material given to participants, etc...🤗 - -## Table of Contents - -- [Organization of the fine tuning week](#organization-of-the-fine-tuning-week) -- [How to fine tune XLSR Wav2Vec2](#how-to-fine-tune-xlsr-wav2vec2) - - [Google colab setup](#google-colab-setup) - - [Local machine](#local-machine) -- [How to upload my trained checkpoint](#how-to-upload-my-trained-checkpoint) - - [How to create the README](#how-to-create-the-readme) -- [How to evaluate my trained checkpoint](#how-to-evaluate-my-trained-checkpoint) -- [Rules of training and evaluation](#rules-of-training-and-evaluation) -- [Tips and tricks](#tips-and-tricks) - - [How to combine multiple datasests into one](#how-to-combine-multiple-datasets-into-one) - - [How to effectively preprocess the data](#how-to-effectively-preprocess-the-data) - - [How to efficiently preproces the data](#how-to-do-efficiently-load-datasets-with-limited-ram-and-hard-drive-space) - - [How to do hyperparameter tuning](#how-to-do-hyperparameter-tuning) - - [How to preprocess and evaluate character based languages](#how-to-preprocess-and-evaluate-character-based-languages) -- [Further reading material](#further-reading-material) -- [FAQ](#faq) - -## Organization of the fine tuning week - -The week officially starts on 22.03.2021 and ends on 29.03.2021, but you are more than welcome to start fine-tuning models before the start date. -General questions you might have, general problems you encounter, and general tips can be shared directly on the Slack channel (see [this post](https://discuss.huggingface.co/t/open-to-the-community-xlsr-wav2vec2-fine-tuning-week-for-low-resource-languages/4467) on how to be added to Slack). -More language-specific questions or specific bugs should be posted on the [forum](https://discuss.huggingface.co/) (feel free to use already existing language-specific threads, *e.g.* [this one](https://discuss.huggingface.co/t/arabic-asr-fine-tuning-wav2vec2/4608) or open a new one if there is no thread for your language yet) or directly on [github](https://github.com/huggingface/transformers) if you think some code or document needs correction/improvement. -Starting on Monday, the 22.03.2021, the Hugging Face team will try to provide an overview of currently trained models along with their evaluation results. -All the necessary information on: - -- How to fine-tune the XLSR model -- How to upload the model -- How to share your evaluation results & training/eval script -- What are the training/evaluation rules - -can be found in the sections below. If something is still unclear, feel free to drop a message in the Slack channel. - -## How to fine tune XLSR Wav2Vec2 - -This chapter gives an in-detail explanation of how to fine-tune [Facebook's multi-lingual Wav2vec2](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on any language of the [Common Voice dataset](https://commonvoice.mozilla.org/en/datasets). - -Two possible setups can be used to fine-tune Wav2Vec2. The easiest setup is to simply use [google colab](https://colab.research.google.com/). It is possible to train the full model in a *free* google colab, but it is recommended to use google colab pro since it is more stable. - -The other option is to run a script locally. While this can be more difficult to set up, it also means that you have more control over the training run and probably access to better GPUs than you would have in a google colab. -For small datasets, it is usually totally sufficient to train your model -in a google colab. For larger and thus more memory-intensive datasets, it is probably -better to fine-tune the model locally. - -For each option, we explain in detail how to fine-tune XLSR-Wav2Vec2 in the following. - -### Google colab setup - -**Note**: Instead of reading the following section, you can simply watch [this](https://www.youtube.com/watch?v=UynYn2C3tI0&ab_channel=PatrickvonPlaten) video, where Patrick explains how to adapt the google colab for your specific language. - -**1.**: If you plan on training XLSR-Wav2Vec2 in a google colab, you should first make sure to have a valid gmail account. You can sign up for a gmail account [here](https://accounts.google.com/signup/v2/webcreateaccount?hl=en&flowName=GlifWebSignIn&flowEntry=SignUp). -Having successfully signed up for gmail, you can now sign in to your account to make sure you are logged in when opening new tabs in your browser. - -**2.**: Next, head over to the official [Fine-Tune XLSR-Wav2Vec2 with 🤗 Transformes](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_Tune_XLSR_Wav2Vec2_on_Turkish_ASR_with_%F0%9F%A4%97_Transformers.ipynb) google colab. The first thing you should do is to make a copy of it - click `->File->Save a copy in Drive`. This should save a copy of the google colab in your google drive. - -**3.**: Now it is highly recommended to carefully read the google colab without running the cells yet. -You should get an understanding of the model is trained and what you will have to change when training the model in a different language. -Having done so, you can again head over to [Common Voice](https://commonvoice.mozilla.org/en/datasets) and pick a language you want to fine-tune [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on. Make sure you remember the language code (For each language, you can find it under the field "*Version*". It corresponds to **all characters before the first underscore**. *E.g.* for Greek it is *el*, while for Irish it is *ga-IE*. - -**4.**: Now you should replace the language code used for the demo of this colab, being *tr* for Turkish with the language code corresponding to the language you just chose in the **second** cell of the google colab. This will load the correct data for your language. - -**5.**: It is time to start running the google colab! Make sure that you have selected "GPU" as your runtime environment and you can start running the cells one-by-one. Make sure you attentively read the text between the cells to understand what is happening and to eventually correct the cells to improve the fine-tuning script for your language. Things you might want to improve/change: - - - Data loading. It is very much recommended to use more than just the official training data of the Common Voice dataset. If you find more data on the internet, feel free to use it! Check out the section ["How to combined multiple datasets into one"](#how-to-combine-multiple-datasets-into-one) - -- Data Processing. You should adapt the data processing to your specific language. In data processing, you should make the data more uniform so that it will be easier for the model to learn how to classify speech in your data. Here it can be really helpful to be proficient in the language to know what can be done to simplify the language without changing the meaning. -Data processing methods include, but are not limited to: - - Normalizing your data. Make sure all characters are lower-cased. - - Remove typographical symbols and punctuation marks. See a list [here](https://en.wikipedia.org/wiki/List_of_typographical_symbols_and_punctuation_marks). Be careful to not remove punctuation marks that can change the meaning of the sentence. *E.g.* you should not remove the single quotation mark `'` in English, as it would change the words `"it's"` to `"its"` which is a different word and has thus a different meaning. For more tips on data processing see ["How to effectively preprocess the data"](#how-to-effectively-preprocess-the-data") - -- Hyperparameter Tuning. Depending on the size of the data you should probably change the hyperparameters of the google colab. You can change any parameter you like. For more tips and tricks see ["How to do hyperparameter tuning for my language"](#how-to-do-hyperparameter-tuning-for-my-language) - -When running the google colab make sure that you uncomment the cell corresponding to mounting your google drive to the colab. This cell looks as follows: - -```python -# from google.colab import drive -# drive.mount('/content/gdrive/') -``` - -Uncomment it, run it, and follow the instructions to mount your google drive. This way you can be sure that the model parameters and created tokenizer & feature extractor files are saved in **your** google drive. - -Also, make sure that you uncomment the cells corresponding to save the preprocessing files and trained model weights to your drive. Otherwise, you might lose a trained model if you google crashes. You should change the name of your model from `wav2vec2-large-xlsr-turkish-demo` to `wav2vec2-large-xlsr-{your_favorite_name}`. - -Those cells correspond to: - -```python -# processor.save_pretrained("/content/gdrive/MyDrive/wav2vec2-large-xlsr-turkish-demo") -``` - -and the line: - -```python - output_dir="/content/gdrive/MyDrive/wav2vec2-large-xlsr-turkish-demo", -``` - -further below (which should already be uncommented). - -Having finished the training you should find the following files/folders under the folder `wav2vec2-large-xlsr-{your_favorite_name}` in your google drive: - -- `preprocessor_config.json` - the parameters of the feature extractor -- `special_tokens_map.json` - the special token map of the tokenizer -- `tokenizer_config.json` - the parameters of the tokenizer -- `vocab.json` - the vocabulary of the tokenizer -- `checkpoint-{...}/` - the saved checkpoints saved during training. Each checkpoint should contain the files: `config.json`, `optimizer.pt`, `pytorch_model.bin`, `scheduler.pt`, `training_args.bin`. The files `config.json` and `pytorch_model.bin` define your model. - -If you are happy with your training results it is time to upload your model! -Download the following files to your local computer: **`preprocessor_config.json`, `special_tokens_map.json`, `tokenizer_config.json`, `vocab.json`, `config.json`, `pytorch_model.bin`**. Those files fully define a XLSR-Wav2Vec2 model checkpoint. - -Awesome you have successfully trained a XLSR-Wav2Vec2 model 😎. Now you can jump to the section ["How to upload my trained checkpoint"](#how-to-upload-my-trained-checkpoint) - -### Local machine - -We have provided `run_common_voice.py` script to run fine-tuning on local machine. The script is similar to the colab but allows you to launch training using command line, save and continue training from previous checkpoints and launch training on multiple GPUs. -For bigger datasets, we recommend to train Wav2Vec2 locally instead of in a google colab. - -1. To begin with, we should clone transformers localy and install all the required packages. - -First, you need to clone the `transformers` repo with: - -```bash -$ git clone https://github.com/huggingface/transformers.git -``` - -Second, head over to the `examples/research_projects/wav2vec2` directory, where the `run_common_voice.py` script is located. - -```bash -$ cd transformers/examples/research_projects/wav2vec2 -``` - -Third, install the required packages. The -packages are listed in the `requirements.txt` file and can be installed with - -```bash -$ pip install -r requirements.txt -``` - - **Note**: Installing the latest version of `torchaudio` will also upgrade `torch` to it's latest stable version. If you are using specific version of `torch` then make sure - to use the correct `torchaudio` version compatible with your version of `torch`. By default the `requirements.txt` will install the latest version of `torchaudio`. - -2. Next, take a look at the `run_common_voice.py` script to get an understanding of how it works. In short the script does the following: - - - Load the given common voice dataset - - Create vocab for the language - - Load the model with given hyperparameters - - Pre-process the dataset to input into the model - - Run training - - Run evaluation - -3. The following examples show how you can launch fine-tuning for the common voice dataset. -Here we will run the script on the *Turkish* Common Voice dataset for demonstration purposes. - - **To lanuch fine-tuninig on a single GPU:** - - ```bash - python run_common_voice.py \ - --model_name_or_path="facebook/wav2vec2-large-xlsr-53" \ - --dataset_config_name="tr" \ # use this argument to specify the language code - --output_dir=./wav2vec2-large-xlsr-turkish-demo \ - --overwrite_output_dir \ - --num_train_epochs="5" \ - --per_device_train_batch_size="16" \ - --learning_rate="3e-4" \ - --warmup_steps="500" \ - --eval_strategy="steps" \ - --save_steps="400" \ - --eval_steps="400" \ - --logging_steps="400" \ - --save_total_limit="3" \ - --freeze_feature_extractor \ - --feat_proj_dropout="0.0" \ - --layerdrop="0.1" \ - --gradient_checkpointing \ - --fp16 \ - --group_by_length \ - --do_train --do_eval - ``` - - **To lanuch fine-tuninig on multiple GPUs:** - - ```bash - python -m torch.distributed.launch \ - --nproc_per_node 4 run_common_voice.py \ - --model_name_or_path="facebook/wav2vec2-large-xlsr-53" \ - --dataset_config_name="tr" \ # use this argument to specify the language code - --output_dir=./wav2vec2-large-xlsr-turkish-demo \ - --overwrite_output_dir \ - --num_train_epochs="5" \ - --per_device_train_batch_size="16" \ - --learning_rate="3e-4" \ - --warmup_steps="500" \ - --eval_strategy="steps" \ - --save_steps="400" \ - --eval_steps="400" \ - --logging_steps="400" \ - --save_total_limit="3" \ - --freeze_feature_extractor \ - --feat_proj_dropout="0.0" \ - --layerdrop="0.1" \ - --gradient_checkpointing \ - --fp16 \ - --group_by_length \ - --do_train --do_eval - ``` - - The above command will launch the training on 4 GPUs. Use the `--nproc_per_node` option to specify the number of GPUs. - - Once the training is finished, the model and checkpoints will be saved under the directory specified by the `--output_dir` argument. - -4. The script also allows you to resume training from the last saved checkpoint. To resume training from last saved checkpoint remove the `--overwrite_output_dir` option and run the same command again. And to continue training from a specific checkpoint, keep the `--overwrite_output_dir` -option and pass the path of the checkpoint as `--model_name_or_path`. - -As the script is based on the `Trainer` API, refer to the [Trainer docs](https://huggingface.co/transformers/main_classes/trainer.html) for more information about ``Trainer`` and ``TrainingArguments``. - -[OVH cloud](https://www.ovh.com/world/) has generously offered free compute for this sprint. Please refer to [this video](https://www.youtube.com/watch?v=2hlkWAESMk8&ab_channel=Databuzzword) to get started with OVH. - - -## How to upload my trained checkpoint - -To upload your trained checkpoint, you have to create a new model repository on the 🤗 model hub, from this page: https://huggingface.co/new - -> You can also follow the more in-depth instructions [here](https://huggingface.co/transformers/model_sharing.html) if needed. - -Having created your model repository on the hub, you should clone it locally: - -```bash -git lfs install - -git clone https://huggingface.co/username/your-model-name -``` - -Then and add the following files that fully define a XLSR-Wav2Vec2 checkpoint into the repository. You should have added the following files. - -- `preprocessor_config.json` -- `special_tokens_map.json` -- `tokenizer_config.json` -- `vocab.json` -- `config.json` -- `pytorch_model.bin` - -Having added the above files, you should run the following to push files to your model repository. -```bash -git add . && git commit -m "Add model files" && git push -``` - -The next **very important** step is to create the model card. For people to use your fine-tuned -model it is important to understand: - -- What kind of model is it? -- What is your model useful for? -- What data was your model trained on? -- How well does your model perform? - -All these questions should be answered in a model card which is the first thing people see when -visiting your model on the hub under `https://huggingface.co/{your_username}/{your_modelname}`. - -**Note**: -It is extremely important that you add this model card or else we cannot find your model and thus cannot take the model into -account for the final evaluation. - -### How to create the readme - -The model card is written in markdown (`.md`) and should be added by simply clicking on the "Add model card" button which is found on the top right corner. -You are encouraged to copy-paste the following template into your model card. - -**Make sure that** instead of copying the output of the markdown file you copy the **raw** version of the following part. - -To get the raw version of this file, simply click on the "`raw`" button on the top right corner of this file next to "`blame`" and copy everything below the marker. -Make sure that you read and consequently remove all #TODO: statements from the model card. - -<======================Copy **raw** version from here========================= ---- -language: {lang_id} #TODO: replace {lang_id} in your language code here. Make sure the code is one of the *ISO codes* of [this](https://huggingface.co/languages) site. -datasets: -- common_voice #TODO: remove if you did not use the common voice dataset -- TODO: add more datasets if you have used additional datasets. Make sure to use the exact same -dataset name as the one found [here](https://huggingface.co/datasets). If the dataset can not be found in the official datasets, just give it a new name -metrics: -- wer -tags: -- audio -- automatic-speech-recognition -- speech -- xlsr-fine-tuning-week -license: apache-2.0 -model-index: -- name: {human_readable_name} #TODO: replace {human_readable_name} with a name of your model as it should appear on the leaderboard. It could be something like `Elgeish XLSR Wav2Vec2 Large 53` - results: - - task: - name: Speech Recognition - type: automatic-speech-recognition - dataset: - name: Common Voice {lang_id} #TODO: replace {lang_id} in your language code here. Make sure the code is one of the *ISO codes* of [this](https://huggingface.co/languages) site. - type: common_voice - args: {lang_id} #TODO: replace {lang_id} in your language code here. Make sure the code is one of the *ISO codes* of [this](https://huggingface.co/languages) site. - metrics: - - name: Test WER - type: wer - value: {wer_result_on_test} #TODO (IMPORTANT): replace {wer_result_on_test} with the WER error rate you achieved on the common_voice test set. It should be in the format XX.XX (don't add the % sign here). **Please** remember to fill out this value after you evaluated your model, so that your model appears on the leaderboard. If you fill out this model card before evaluating your model, please remember to edit the model card afterward to fill in your value ---- - -# Wav2Vec2-Large-XLSR-53-{language} #TODO: replace language with your {language}, *e.g.* French - -Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on {language} using the [Common Voice](https://huggingface.co/datasets/common_voice), ... and ... dataset{s}. #TODO: replace {language} with your language, *e.g.* French and eventually add more datasets that were used and eventually remove common voice if model was not trained on common voice -When using this model, make sure that your speech input is sampled at 16kHz. - -## Usage - -The model can be used directly (without a language model) as follows: - -```python -import torch -import torchaudio -from datasets import load_dataset -from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor - -test_dataset = load_dataset("common_voice", "{lang_id}", split="test[:2%]") #TODO: replace {lang_id} in your language code here. Make sure the code is one of the *ISO codes* of [this](https://huggingface.co/languages) site. - -processor = Wav2Vec2Processor.from_pretrained("{model_id}") #TODO: replace {model_id} with your model id. The model id consists of {your_username}/{your_modelname}, *e.g.* `elgeish/wav2vec2-large-xlsr-53-arabic` -model = Wav2Vec2ForCTC.from_pretrained("{model_id}") #TODO: replace {model_id} with your model id. The model id consists of {your_username}/{your_modelname}, *e.g.* `elgeish/wav2vec2-large-xlsr-53-arabic` - -resampler = torchaudio.transforms.Resample(48_000, 16_000) - -# Preprocessing the datasets. -# We need to read the aduio files as arrays -def speech_file_to_array_fn(batch): - speech_array, sampling_rate = torchaudio.load(batch["path"]) - batch["speech"] = resampler(speech_array).squeeze().numpy() - return batch - -test_dataset = test_dataset.map(speech_file_to_array_fn) -inputs = processor(test_dataset[:2]["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) - -with torch.no_grad(): - logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits - -predicted_ids = torch.argmax(logits, dim=-1) - -print("Prediction:", processor.batch_decode(predicted_ids)) -print("Reference:", test_dataset[:2]["sentence"]) -``` - - -## Evaluation - -The model can be evaluated as follows on the {language} test data of Common Voice. # TODO: replace #TODO: replace language with your {language}, *e.g.* French - - -```python -import torch -import torchaudio -from datasets import load_dataset, load_metric -from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor -import re - -test_dataset = load_dataset("common_voice", "{lang_id}", split="test") #TODO: replace {lang_id} in your language code here. Make sure the code is one of the *ISO codes* of [this](https://huggingface.co/languages) site. -wer = load_metric("wer") - -processor = Wav2Vec2Processor.from_pretrained("{model_id}") #TODO: replace {model_id} with your model id. The model id consists of {your_username}/{your_modelname}, *e.g.* `elgeish/wav2vec2-large-xlsr-53-arabic` -model = Wav2Vec2ForCTC.from_pretrained("{model_id}") #TODO: replace {model_id} with your model id. The model id consists of {your_username}/{your_modelname}, *e.g.* `elgeish/wav2vec2-large-xlsr-53-arabic` -model.to("cuda") - -chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“]' # TODO: adapt this list to include all special characters you removed from the data -resampler = torchaudio.transforms.Resample(48_000, 16_000) - -# Preprocessing the datasets. -# We need to read the aduio files as arrays -def speech_file_to_array_fn(batch): - batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() - speech_array, sampling_rate = torchaudio.load(batch["path"]) - batch["speech"] = resampler(speech_array).squeeze().numpy() - return batch - -test_dataset = test_dataset.map(speech_file_to_array_fn) - -# Preprocessing the datasets. -# We need to read the aduio files as arrays -def evaluate(batch): - inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) - - with torch.no_grad(): - logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits - - pred_ids = torch.argmax(logits, dim=-1) - batch["pred_strings"] = processor.batch_decode(pred_ids) - return batch - -result = test_dataset.map(evaluate, batched=True, batch_size=8) - -print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) -``` - -**Test Result**: XX.XX % # TODO: write output of print here. IMPORTANT: Please remember to also replace {wer_result_on_test} at the top of with this value here. tags. - - -## Training - -The Common Voice `train`, `validation`, and ... datasets were used for training as well as ... and ... # TODO: adapt to state all the datasets that were used for training. - -The script used for training can be found [here](...) # TODO: fill in a link to your training script here. If you trained your model in a colab, simply fill in the link here. If you trained the model locally, it would be great if you could upload the training script on github and paste the link here. - -=======================To here===============================> - -Your model in then available under *huggingface.co/{your_username}/{your_chosen_xlsr-large_model_name}* for everybody to use 🎉. - -## How to evaluate my trained checkpoint - -Having uploaded your model, you should now evaluate your model in a final step. This should be as simple as -copying the evaluation code of your model card into a python script and running it. Make sure to note -the final result on the model card **both** under the YAML tags at the very top **and** below your evaluation code under "Test Results". - -## Rules of training and evaluation - -In this section, we will quickly go over what data is allowed to be used as training -data, what kind of data preprocessing is allowed be used, and how the model should be evaluated. - -To make it very simple regarding the first point: **All data except the official common voice `test` data set can be used as training data**. For models trained in a language that is not included in Common Voice, the author of the model is responsible to -leave a reasonable amount of data for evaluation. - -Second, the rules regarding the preprocessing are not that as straight-forward. It is allowed (and recommended) to -normalize the data to only have lower-case characters. It is also allowed (and recommended) to remove typographical -symbols and punctuation marks. A list of such symbols can *e.g.* be fonud [here](https://en.wikipedia.org/wiki/List_of_typographical_symbols_and_punctuation_marks) - however here we already must be careful. We should **not** remove a symbol that -would change the meaning of the words, *e.g.* in English, we should not remove the single quotation mark `'` since it -would change the meaning of the word `"it's"` to `"its"` which would then be incorrect. So the golden rule here is to -not remove any characters that could change the meaning of a word into another word. This is not always obvious and should -be given some consideration. As another example, it is fine to remove the "Hypen-minus" sign "`-`" since it doesn't change the -meaninng of a word to another one. *E.g.* "`fine-tuning`" would be changed to "`finetuning`" which has still the same meaning. - -Since those choices are not always obvious when in doubt feel free to ask on Slack or even better post on the forum, as was -done, *e.g.* [here](https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586). - -## Tips and tricks - -This section summarizes a couple of tips and tricks across various topics. It will continously be updated during the week. - -### How to combine multiple datasets into one - -Check out [this](https://discuss.huggingface.co/t/how-to-combine-local-data-files-with-an-official-dataset/4685) post. - -### How to effectively preprocess the data - - -### How to do efficiently load datasets with limited ram and hard drive space - -Check out [this](https://discuss.huggingface.co/t/german-asr-fine-tuning-wav2vec2/4558/8?u=patrickvonplaten) post. - - -### How to do hyperparameter tuning - - -### How to preprocess and evaluate character based languages - - -## Further reading material - -It is recommended that take some time to read up on how Wav2vec2 works in theory. -Getting a better understanding of the theory and the inner mechanisms of the model often helps when fine-tuning the model. - -**However**, if you don't like reading blog posts/papers, don't worry - it is by no means necessary to go through the theory to fine-tune Wav2Vec2 on your language of choice. - -If you are interested in learning more about the model though, here are a couple of resources that are important to better understand Wav2Vec2: - -- [Facebook's Wav2Vec2 blog post](https://ai.facebook.com/blog/wav2vec-state-of-the-art-speech-recognition-through-self-supervision/) -- [Official Wav2Vec2 paper](https://arxiv.org/abs/2006.11477) -- [Official XLSR Wav2vec2 paper](https://arxiv.org/pdf/2006.13979.pdf) -- [Hugging Face Blog](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2) -- [How does CTC (Connectionist Temporal Classification) work](https://distill.pub/2017/ctc/) - -It helps to have a good understanding of the following points: - -- How was XLSR-Wav2Vec2 pretrained? -> Feature vectors were masked and had to be predicted by the model; very similar in spirit to masked language model of BERT. - -- What parts of XLSR-Wav2Vec2 are responsible for what? What is the feature extractor part used for? -> extract feature vectors from the 1D raw audio waveform; What is the transformer part doing? -> mapping feature vectors to contextualized feature vectors; ... - -- What part of the model needs to be fine-tuned? -> The pretrained model **does not** include a language head to classify the contextualized features to letters. This is randomly initialized when loading the pretrained checkpoint and has to be fine-tuned. Also, note that the authors recommend to **not** further fine-tune the feature extractor. - -- What data was used to XLSR-Wav2Vec2? The checkpoint we will use for further fine-tuning was pretrained on **53** languages. - -- What languages are considered to be similar by XLSR-Wav2Vec2? In the official [XLSR Wav2Vec2 paper](https://arxiv.org/pdf/2006.13979.pdf), the authors show nicely which languages share a common contextualized latent space. It might be useful for you to extend your training data with data of other languages that are considered to be very similar by the model (or you). - - -## FAQ - -- Can a participant fine-tune models for more than one language? -Yes! A participant can fine-tune models in as many languages she/he likes -- Can a participant use extra data (apart from the common voice data)? -Yes! All data except the official common voice `test data` can be used for training. -If a participant wants to train a model on a language that is not part of Common Voice (which -is very much encouraged!), the participant should make sure that some test data is held out to -make sure the model is not overfitting. -- Can we fine-tune for high-resource languages? -Yes! While we do not really recommend people to fine-tune models in English since there are -already so many fine-tuned speech recognition models in English. However, it is very much -appreciated if participants want to fine-tune models in other "high-resource" languages, such -as French, Spanish, or German. For such cases, one probably needs to train locally and apply -might have to apply tricks such as lazy data loading (check the ["Lazy data loading"](#how-to-do-lazy-data-loading) section for more details). diff --git a/examples/research_projects/wav2vec2/README.md b/examples/research_projects/wav2vec2/README.md deleted file mode 100644 index 88f62778a3a..00000000000 --- a/examples/research_projects/wav2vec2/README.md +++ /dev/null @@ -1,249 +0,0 @@ -**NOTE**: This example is outdated and is not longer actively maintained. Please -follow the new instructions of fine-tuning Wav2Vec2 [here](https://github.com/huggingface/transformers/blob/main/examples/pytorch/speech-recognition/README.md) - -## Fine-tuning Wav2Vec2 - -The `run_asr.py` script allows one to fine-tune pretrained Wav2Vec2 models that can be found [here](https://huggingface.co/models?search=facebook/wav2vec2). - -This finetuning script can also be run as a google colab [TODO: here]( ). - -### Fine-Tuning with TIMIT -Let's take a look at the [script](./finetune_base_timit_asr.sh) used to fine-tune [wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) -with the [TIMIT dataset](https://huggingface.co/datasets/timit_asr): - -```bash -#!/usr/bin/env bash -python run_asr.py \ ---output_dir="./wav2vec2-base-timit-asr" \ ---num_train_epochs="30" \ ---per_device_train_batch_size="20" \ ---per_device_eval_batch_size="20" \ ---eval_strategy="steps" \ ---save_steps="500" \ ---eval_steps="100" \ ---logging_steps="50" \ ---learning_rate="5e-4" \ ---warmup_steps="3000" \ ---model_name_or_path="facebook/wav2vec2-base" \ ---fp16 \ ---dataset_name="timit_asr" \ ---train_split_name="train" \ ---validation_split_name="test" \ ---orthography="timit" \ ---preprocessing_num_workers="$(nproc)" \ ---group_by_length \ ---freeze_feature_extractor \ ---verbose_logging \ -``` - -The resulting model and inference examples can be found [here](https://huggingface.co/elgeish/wav2vec2-base-timit-asr). -Some of the arguments above may look unfamiliar, let's break down what's going on: - -`--orthography="timit"` applies certain text preprocessing rules, for tokenization and normalization, to clean up the dataset. -In this case, we use the following instance of `Orthography`: - -```python -Orthography( - do_lower_case=True, - # break compounds like "quarter-century-old" and replace pauses "--" - translation_table=str.maketrans({"-": " "}), -) -``` - -The instance above is used as follows: -* creates a tokenizer with `do_lower_case=True` (ignores casing for input and lowercases output when decoding) -* replaces `"-"` with `" "` to break compounds like `"quarter-century-old"` and to clean up suspended hyphens -* cleans up consecutive whitespaces (replaces them with a single space: `" "`) -* removes characters not in vocabulary (lacking respective sound units) - -`--verbose_logging` logs text preprocessing updates and when evaluating, using the validation split every `eval_steps`, -logs references and predictions. - -### Fine-Tuning with Arabic Speech Corpus - -Other datasets, like the [Arabic Speech Corpus dataset](https://huggingface.co/datasets/arabic_speech_corpus), -require more work! Let's take a look at the [script](./finetune_large_xlsr_53_arabic_speech_corpus.sh) -used to fine-tune [wav2vec2-large-xlsr-53](https://huggingface.co/elgeish/wav2vec2-large-xlsr-53-arabic): - -```bash -#!/usr/bin/env bash -python run_asr.py \ ---output_dir="./wav2vec2-large-xlsr-53-arabic-speech-corpus" \ ---num_train_epochs="50" \ ---per_device_train_batch_size="1" \ ---per_device_eval_batch_size="1" \ ---gradient_accumulation_steps="8" \ ---eval_strategy="steps" \ ---save_steps="500" \ ---eval_steps="100" \ ---logging_steps="50" \ ---learning_rate="5e-4" \ ---warmup_steps="3000" \ ---model_name_or_path="elgeish/wav2vec2-large-xlsr-53-arabic" \ ---fp16 \ ---dataset_name="arabic_speech_corpus" \ ---train_split_name="train" \ ---validation_split_name="test" \ ---max_duration_in_seconds="15" \ ---orthography="buckwalter" \ ---preprocessing_num_workers="$(nproc)" \ ---group_by_length \ ---freeze_feature_extractor \ ---target_feature_extractor_sampling_rate \ ---verbose_logging \ -``` - -First, let's understand how this dataset represents Arabic text; it uses a format called -[Buckwalter transliteration](https://en.wikipedia.org/wiki/Buckwalter_transliteration). -We use the [lang-trans](https://github.com/kariminf/lang-trans) package to convert back to Arabic when logging. -The Buckwalter format only includes ASCII characters, some of which are non-alpha (e.g., `">"` maps to `"أ"`). - -`--orthography="buckwalter"` applies certain text preprocessing rules, for tokenization and normalization, to clean up the dataset. In this case, we use the following instance of `Orthography`: - -```python -Orthography( - vocab_file=pathlib.Path(__file__).parent.joinpath("vocab/buckwalter.json"), - word_delimiter_token="/", # "|" is Arabic letter alef with madda above - words_to_remove={"sil"}, # fixing "sil" in arabic_speech_corpus dataset - untransliterator=arabic.buckwalter.untransliterate, - translation_table=str.maketrans(translation_table = { - "-": " ", # sometimes used to represent pauses - "^": "v", # fixing "tha" in arabic_speech_corpus dataset - }), -) -``` - -The instance above is used as follows: -* creates a tokenizer with Buckwalter vocabulary and `word_delimiter_token="/"` -* replaces `"-"` with `" "` to clean up hyphens and fixes the orthography for `"ث"` -* removes words used as indicators (in this case, `"sil"` is used for silence) -* cleans up consecutive whitespaces (replaces them with a single space: `" "`) -* removes characters not in vocabulary (lacking respective sound units) - -`--verbose_logging` logs text preprocessing updates and when evaluating, using the validation split every `eval_steps`, -logs references and predictions. Using the Buckwalter format, text is also logged in Arabic abjad. - -`--target_feature_extractor_sampling_rate` resamples audio to target feature extractor's sampling rate (16kHz). - -`--max_duration_in_seconds="15"` filters out examples whose audio is longer than the specified limit, -which helps with capping GPU memory usage. - - -### DeepSpeed Integration - -To learn how to deploy Deepspeed Integration please refer to [this guide](https://huggingface.co/transformers/main/main_classes/deepspeed.html#deepspeed-trainer-integration). - -But to get started quickly all you need is to install: -```bash -pip install deepspeed -``` -and then use the default configuration files in this directory: - -* `ds_config_wav2vec2_zero2.json` -* `ds_config_wav2vec2_zero3.json` - -Here are examples of how you can use DeepSpeed: - -(edit the value for `--num_gpus` to match the number of GPUs you have) - -ZeRO-2: - -```bash -PYTHONPATH=../../../src deepspeed --num_gpus 2 \ -run_asr.py \ ---output_dir=output_dir --num_train_epochs=2 --per_device_train_batch_size=2 \ ---per_device_eval_batch_size=2 --eval_strategy=steps --save_steps=500 --eval_steps=100 \ ---logging_steps=5 --learning_rate=5e-4 --warmup_steps=3000 \ ---model_name_or_path=patrickvonplaten/wav2vec2_tiny_random_robust \ ---dataset_name=hf-internal-testing/librispeech_asr_dummy --dataset_config_name=clean \ ---train_split_name=validation --validation_split_name=validation --orthography=timit \ ---preprocessing_num_workers=1 --group_by_length --freeze_feature_extractor --verbose_logging \ ---deepspeed ds_config_wav2vec2_zero2.json -``` - -For ZeRO-2 with more than 1 gpu you need to use (which is already in the example configuration file): -```json - "zero_optimization": { - ... - "find_unused_parameters": true, - ... - } -``` - -ZeRO-3: - -```bash -PYTHONPATH=../../../src deepspeed --num_gpus 2 \ -run_asr.py \ ---output_dir=output_dir --num_train_epochs=2 --per_device_train_batch_size=2 \ ---per_device_eval_batch_size=2 --eval_strategy=steps --save_steps=500 --eval_steps=100 \ ---logging_steps=5 --learning_rate=5e-4 --warmup_steps=3000 \ ---model_name_or_path=patrickvonplaten/wav2vec2_tiny_random_robust \ ---dataset_name=hf-internal-testing/librispeech_asr_dummy --dataset_config_name=clean \ ---train_split_name=validation --validation_split_name=validation --orthography=timit \ ---preprocessing_num_workers=1 --group_by_length --freeze_feature_extractor --verbose_logging \ ---deepspeed ds_config_wav2vec2_zero3.json -``` - -### Pretraining Wav2Vec2 - -The `run_pretrain.py` script allows one to pretrain a Wav2Vec2 model from scratch using Wav2Vec2's contrastive loss objective (see official [paper](https://arxiv.org/abs/2006.11477) for more information). -It is recommended to pre-train Wav2Vec2 with Trainer + Deepspeed (please refer to [this guide](https://huggingface.co/transformers/main/main_classes/deepspeed.html#deepspeed-trainer-integration) for more information). - -Here is an example of how you can use DeepSpeed ZeRO-2 to pretrain a small Wav2Vec2 model: - -```bash -PYTHONPATH=../../../src deepspeed --num_gpus 4 run_pretrain.py \ ---output_dir="./wav2vec2-base-libri-100h" \ ---num_train_epochs="3" \ ---per_device_train_batch_size="32" \ ---per_device_eval_batch_size="32" \ ---gradient_accumulation_steps="2" \ ---save_total_limit="3" \ ---save_steps="500" \ ---logging_steps="10" \ ---learning_rate="5e-4" \ ---weight_decay="0.01" \ ---warmup_steps="3000" \ ---model_name_or_path="patrickvonplaten/wav2vec2-base-libri-100h" \ ---dataset_name="librispeech_asr" \ ---dataset_config_name="clean" \ ---train_split_name="train.100" \ ---preprocessing_num_workers="4" \ ---max_duration_in_seconds="10.0" \ ---group_by_length \ ---verbose_logging \ ---fp16 \ ---deepspeed ds_config_wav2vec2_zero2.json \ -``` - - -### Forced Alignment - -Character level forced alignment for audio and text pairs with wav2vec2 models finetuned on ASR task for a specific language. -Inspired by [this](https://pytorch.org/tutorials/intermediate/forced_alignment_with_torchaudio_tutorial.html) Pytorch tutorial. - -#### Input Formats - - Input format in script.txt Input format in wavs directroy - 0000 sentence1 0000.wav - 0001 sentence2 0001.wav - -#### Output Format - -Output directory will contain 0000.txt and 0001.txt. Each file will have format like below - - char score start_ms end_ms - h 0.25 1440 1520 - -#### Run command - -```bash -python alignment.py \ ---model_name="arijitx/wav2vec2-xls-r-300m-bengali" \ ---wav_dir="./wavs" ---text_file="script.txt" \ ---input_wavs_sr=48000 \ ---output_dir="./out_alignment" \ ---cuda -``` diff --git a/examples/research_projects/wav2vec2/alignment.py b/examples/research_projects/wav2vec2/alignment.py deleted file mode 100644 index 55b477f5ee9..00000000000 --- a/examples/research_projects/wav2vec2/alignment.py +++ /dev/null @@ -1,223 +0,0 @@ -# Parts of the code are adapted from the snippets provided in the TorchAudio Wav2Vec forced alignment tutorial. -# The full tutorial can be found here: https://pytorch.org/audio/stable/tutorials/forced_alignment_tutorial.html - -import argparse -import os -from dataclasses import dataclass - -import torch -import torchaudio -from tqdm import tqdm - -from transformers import AutoConfig, AutoModelForCTC, AutoProcessor - - -class Wav2Vec2Aligner: - def __init__(self, model_name, input_wavs_sr, cuda): - self.cuda = cuda - self.config = AutoConfig.from_pretrained(model_name) - self.model = AutoModelForCTC.from_pretrained(model_name) - self.model.eval() - if self.cuda: - self.model.to(device="cuda") - self.processor = AutoProcessor.from_pretrained(model_name) - self.resampler = torchaudio.transforms.Resample(input_wavs_sr, 16_000) - blank_id = 0 - vocab = list(self.processor.tokenizer.get_vocab().keys()) - for i in range(len(vocab)): - if vocab[i] == "[PAD]" or vocab[i] == "": - blank_id = i - print("Blank Token id [PAD]/", blank_id) - self.blank_id = blank_id - - def speech_file_to_array_fn(self, wav_path): - speech_array, sampling_rate = torchaudio.load(wav_path) - speech = self.resampler(speech_array).squeeze().numpy() - return speech - - def align_single_sample(self, item): - blank_id = self.blank_id - transcript = "|".join(item["sent"].split(" ")) - if not os.path.isfile(item["wav_path"]): - print(item["wav_path"], "not found in wavs directory") - - speech_array = self.speech_file_to_array_fn(item["wav_path"]) - inputs = self.processor(speech_array, sampling_rate=16_000, return_tensors="pt", padding=True) - if self.cuda: - inputs = inputs.to(device="cuda") - - with torch.no_grad(): - logits = self.model(inputs.input_values).logits - - # get the emission probability at frame level - emissions = torch.log_softmax(logits, dim=-1) - emission = emissions[0].cpu().detach() - - # get labels from vocab - labels = ([""] + list(self.processor.tokenizer.get_vocab().keys()))[ - :-1 - ] # logits don't align with the tokenizer's vocab - - dictionary = {c: i for i, c in enumerate(labels)} - tokens = [] - for c in transcript: - if c in dictionary: - tokens.append(dictionary[c]) - - def get_trellis(emission, tokens, blank_id=0): - """ - Build a trellis matrix of shape (num_frames + 1, num_tokens + 1) - that represents the probabilities of each source token being at a certain time step - """ - num_frames = emission.size(0) - num_tokens = len(tokens) - - # Trellis has extra diemsions for both time axis and tokens. - # The extra dim for tokens represents (start-of-sentence) - # The extra dim for time axis is for simplification of the code. - trellis = torch.full((num_frames + 1, num_tokens + 1), -float("inf")) - trellis[:, 0] = 0 - for t in range(num_frames): - trellis[t + 1, 1:] = torch.maximum( - # Score for staying at the same token - trellis[t, 1:] + emission[t, blank_id], - # Score for changing to the next token - trellis[t, :-1] + emission[t, tokens], - ) - return trellis - - trellis = get_trellis(emission, tokens, blank_id) - - @dataclass - class Point: - token_index: int - time_index: int - score: float - - def backtrack(trellis, emission, tokens, blank_id=0): - """ - Walk backwards from the last (sentence_token, time_step) pair to build the optimal sequence alignment path - """ - # Note: - # j and t are indices for trellis, which has extra dimensions - # for time and tokens at the beginning. - # When referring to time frame index `T` in trellis, - # the corresponding index in emission is `T-1`. - # Similarly, when referring to token index `J` in trellis, - # the corresponding index in transcript is `J-1`. - j = trellis.size(1) - 1 - t_start = torch.argmax(trellis[:, j]).item() - - path = [] - for t in range(t_start, 0, -1): - # 1. Figure out if the current position was stay or change - # Note (again): - # `emission[J-1]` is the emission at time frame `J` of trellis dimension. - # Score for token staying the same from time frame J-1 to T. - stayed = trellis[t - 1, j] + emission[t - 1, blank_id] - # Score for token changing from C-1 at T-1 to J at T. - changed = trellis[t - 1, j - 1] + emission[t - 1, tokens[j - 1]] - - # 2. Store the path with frame-wise probability. - prob = emission[t - 1, tokens[j - 1] if changed > stayed else 0].exp().item() - # Return token index and time index in non-trellis coordinate. - path.append(Point(j - 1, t - 1, prob)) - - # 3. Update the token - if changed > stayed: - j -= 1 - if j == 0: - break - else: - raise ValueError("Failed to align") - return path[::-1] - - path = backtrack(trellis, emission, tokens, blank_id) - - @dataclass - class Segment: - label: str - start: int - end: int - score: float - - def __repr__(self): - return f"{self.label}\t{self.score:4.2f}\t{self.start*20:5d}\t{self.end*20:5d}" - - @property - def length(self): - return self.end - self.start - - def merge_repeats(path): - """ - Merge repeated tokens into a single segment. Note: this shouldn't affect repeated characters from the - original sentences (e.g. `ll` in `hello`) - """ - i1, i2 = 0, 0 - segments = [] - while i1 < len(path): - while i2 < len(path) and path[i1].token_index == path[i2].token_index: - i2 += 1 - score = sum(path[k].score for k in range(i1, i2)) / (i2 - i1) - segments.append( - Segment( - transcript[path[i1].token_index], - path[i1].time_index, - path[i2 - 1].time_index + 1, - score, - ) - ) - i1 = i2 - return segments - - segments = merge_repeats(path) - with open(item["out_path"], "w") as out_align: - for seg in segments: - out_align.write(str(seg) + "\n") - - def align_data(self, wav_dir, text_file, output_dir): - if not os.path.exists(output_dir): - os.makedirs(output_dir) - - # load text file - lines = open(text_file, encoding="utf8").readlines() - - items = [] - for line in lines: - if len(line.strip().split("\t")) != 2: - print("Script must be in format: 00001 this is my sentence") - exit() - - wav_name, sentence = line.strip().split("\t") - wav_path = os.path.join(wav_dir, wav_name + ".wav") - out_path = os.path.join(output_dir, wav_name + ".txt") - - items.append({"sent": sentence, "wav_path": wav_path, "out_path": out_path}) - print("Number of samples found in script file", len(items)) - - for item in tqdm(items): - self.align_single_sample(item) - - -def main(): - parser = argparse.ArgumentParser() - - parser.add_argument( - "--model_name", type=str, default="arijitx/wav2vec2-xls-r-300m-bengali", help="wav2vec model name" - ) - parser.add_argument("--wav_dir", type=str, default="./wavs", help="directory containing wavs") - parser.add_argument("--text_file", type=str, default="script.txt", help="file containing text") - parser.add_argument("--input_wavs_sr", type=int, default=16000, help="sampling rate of input audios") - parser.add_argument( - "--output_dir", type=str, default="./out_alignment", help="output directory containing the alignment files" - ) - parser.add_argument("--cuda", action="store_true") - - args = parser.parse_args() - - aligner = Wav2Vec2Aligner(args.model_name, args.input_wavs_sr, args.cuda) - aligner.align_data(args.wav_dir, args.text_file, args.output_dir) - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/wav2vec2/ds_config_wav2vec2_zero2.json b/examples/research_projects/wav2vec2/ds_config_wav2vec2_zero2.json deleted file mode 100644 index 6745e9917a3..00000000000 --- a/examples/research_projects/wav2vec2/ds_config_wav2vec2_zero2.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "fp16": { - "enabled": "auto", - "loss_scale": 0, - "loss_scale_window": 1000, - "initial_scale_power": 16, - "hysteresis": 2, - "min_loss_scale": 1 - }, - - "optimizer": { - "type": "AdamW", - "params": { - "lr": "auto", - "betas": "auto", - "eps": "auto", - "weight_decay": "auto" - } - }, - - "scheduler": { - "type": "WarmupLR", - "params": { - "warmup_min_lr": "auto", - "warmup_max_lr": "auto", - "warmup_num_steps": "auto" - } - }, - - "zero_optimization": { - "stage": 2, - "offload_optimizer": { - "device": "cpu", - "pin_memory": true - }, - "find_unused_parameters": true, - "allgather_partitions": true, - "allgather_bucket_size": 2e8, - "overlap_comm": true, - "reduce_scatter": true, - "reduce_bucket_size": 2e8, - "contiguous_gradients": true - }, - - "gradient_accumulation_steps": "auto", - "gradient_clipping": "auto", - "steps_per_print": 2000, - "train_batch_size": "auto", - "train_micro_batch_size_per_gpu": "auto", - "wall_clock_breakdown": false -} diff --git a/examples/research_projects/wav2vec2/ds_config_wav2vec2_zero3.json b/examples/research_projects/wav2vec2/ds_config_wav2vec2_zero3.json deleted file mode 100644 index 1beb972ba89..00000000000 --- a/examples/research_projects/wav2vec2/ds_config_wav2vec2_zero3.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "fp16": { - "enabled": "auto", - "loss_scale": 0, - "loss_scale_window": 1000, - "initial_scale_power": 16, - "hysteresis": 2, - "min_loss_scale": 1 - }, - - "optimizer": { - "type": "AdamW", - "params": { - "lr": "auto", - "betas": "auto", - "eps": "auto", - "weight_decay": "auto" - } - }, - - "scheduler": { - "type": "WarmupLR", - "params": { - "warmup_min_lr": "auto", - "warmup_max_lr": "auto", - "warmup_num_steps": "auto" - } - }, - - "zero_optimization": { - "stage": 3, - "offload_optimizer": { - "device": "cpu", - "pin_memory": true - }, - "offload_param": { - "device": "cpu", - "pin_memory": true - }, - "overlap_comm": true, - "contiguous_gradients": true, - "sub_group_size": 1e9, - "reduce_bucket_size": "auto", - "stage3_prefetch_bucket_size": "auto", - "stage3_param_persistence_threshold": "auto", - "stage3_max_live_parameters": 1e9, - "stage3_max_reuse_distance": 1e9, - "stage3_gather_16bit_weights_on_model_save": true - }, - - "gradient_accumulation_steps": "auto", - "gradient_clipping": "auto", - "steps_per_print": 2000, - "train_batch_size": "auto", - "train_micro_batch_size_per_gpu": "auto", - "wall_clock_breakdown": false -} diff --git a/examples/research_projects/wav2vec2/finetune_base_100.sh b/examples/research_projects/wav2vec2/finetune_base_100.sh deleted file mode 100755 index 254b0afef3d..00000000000 --- a/examples/research_projects/wav2vec2/finetune_base_100.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash -python run_asr.py \ ---output_dir="./wav2vec2-base-100h" \ ---num_train_epochs="30" \ ---per_device_train_batch_size="32" \ ---per_device_eval_batch_size="32" \ ---eval_strategy="steps" \ ---save_total_limit="3" \ ---save_steps="500" \ ---eval_steps="100" \ ---logging_steps="50" \ ---learning_rate="5e-4" \ ---warmup_steps="3000" \ ---model_name_or_path="facebook/wav2vec2-base" \ ---fp16 \ ---dataset_name="librispeech_asr" \ ---dataset_config_name="clean" \ ---train_split_name="train.100" \ ---preprocessing_num_workers="32" \ ---group_by_length \ ---freeze_feature_extractor diff --git a/examples/research_projects/wav2vec2/finetune_base_timit_asr.sh b/examples/research_projects/wav2vec2/finetune_base_timit_asr.sh deleted file mode 100755 index 508cb532b0f..00000000000 --- a/examples/research_projects/wav2vec2/finetune_base_timit_asr.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -python run_asr.py \ ---output_dir="./wav2vec2-base-timit-asr" \ ---num_train_epochs="30" \ ---per_device_train_batch_size="20" \ ---per_device_eval_batch_size="20" \ ---eval_strategy="steps" \ ---save_steps="500" \ ---eval_steps="100" \ ---logging_steps="50" \ ---learning_rate="5e-4" \ ---warmup_steps="3000" \ ---model_name_or_path="facebook/wav2vec2-base" \ ---fp16 \ ---dataset_name="timit_asr" \ ---train_split_name="train" \ ---validation_split_name="test" \ ---orthography="timit" \ ---preprocessing_num_workers="$(nproc)" \ ---group_by_length \ ---freeze_feature_extractor \ ---verbose_logging \ diff --git a/examples/research_projects/wav2vec2/finetune_large_lv60_100.sh b/examples/research_projects/wav2vec2/finetune_large_lv60_100.sh deleted file mode 100755 index 6956b093e72..00000000000 --- a/examples/research_projects/wav2vec2/finetune_large_lv60_100.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash -python run_asr.py \ ---output_dir="./wav2vec2-large-lv60-100h" \ ---num_train_epochs="30" \ ---per_device_train_batch_size="16" \ ---per_device_eval_batch_size="16" \ ---eval_strategy="steps" \ ---save_total_limit="3" \ ---save_steps="500" \ ---eval_steps="100" \ ---logging_steps="50" \ ---learning_rate="5e-4" \ ---warmup_steps="3000" \ ---model_name_or_path="facebook/wav2vec2-large-lv60" \ ---fp16 \ ---dataset_name="librispeech_asr" \ ---dataset_config_name="clean" \ ---train_split_name="train.100" \ ---preprocessing_num_workers="32" \ ---group_by_length \ ---freeze_feature_extractor diff --git a/examples/research_projects/wav2vec2/finetune_large_lv60_timit_asr.sh b/examples/research_projects/wav2vec2/finetune_large_lv60_timit_asr.sh deleted file mode 100755 index fa02e71ea82..00000000000 --- a/examples/research_projects/wav2vec2/finetune_large_lv60_timit_asr.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash -python run_asr.py \ ---output_dir="./wav2vec2-large-lv60-timit-asr" \ ---num_train_epochs="30" \ ---per_device_train_batch_size="2" \ ---per_device_eval_batch_size="2" \ ---gradient_accumulation_steps="4" \ ---eval_strategy="steps" \ ---save_steps="500" \ ---eval_steps="100" \ ---logging_steps="50" \ ---learning_rate="5e-4" \ ---warmup_steps="3000" \ ---model_name_or_path="facebook/wav2vec2-large-lv60" \ ---fp16 \ ---dataset_name="timit_asr" \ ---train_split_name="train" \ ---validation_split_name="test" \ ---orthography="timit" \ ---preprocessing_num_workers="$(nproc)" \ ---group_by_length \ ---freeze_feature_extractor \ ---verbose_logging \ diff --git a/examples/research_projects/wav2vec2/finetune_large_xlsr_53_arabic_speech_corpus.sh b/examples/research_projects/wav2vec2/finetune_large_xlsr_53_arabic_speech_corpus.sh deleted file mode 100755 index e90bc8caa6c..00000000000 --- a/examples/research_projects/wav2vec2/finetune_large_xlsr_53_arabic_speech_corpus.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash -python run_asr.py \ ---output_dir="./wav2vec2-large-xlsr-53-arabic-speech-corpus" \ ---num_train_epochs="50" \ ---per_device_train_batch_size="1" \ ---per_device_eval_batch_size="1" \ ---gradient_accumulation_steps="8" \ ---eval_strategy="steps" \ ---save_steps="500" \ ---eval_steps="100" \ ---logging_steps="50" \ ---learning_rate="5e-4" \ ---warmup_steps="3000" \ ---model_name_or_path="elgeish/wav2vec2-large-xlsr-53-arabic" \ ---fp16 \ ---dataset_name="arabic_speech_corpus" \ ---train_split_name="train" \ ---validation_split_name="test" \ ---max_duration_in_seconds="15" \ ---orthography="buckwalter" \ ---preprocessing_num_workers="$(nproc)" \ ---group_by_length \ ---freeze_feature_extractor \ ---target_feature_extractor_sampling_rate \ ---verbose_logging \ diff --git a/examples/research_projects/wav2vec2/finetune_wav2vec2_xlsr_turkish.sh b/examples/research_projects/wav2vec2/finetune_wav2vec2_xlsr_turkish.sh deleted file mode 100644 index 70da0e0a0d1..00000000000 --- a/examples/research_projects/wav2vec2/finetune_wav2vec2_xlsr_turkish.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -python run_common_voice.py \ - --model_name_or_path="facebook/wav2vec2-large-xlsr-53" \ - --dataset_config_name="tr" \ - --output_dir=./wav2vec2-large-xlsr-turkish-demo \ - --overwrite_output_dir \ - --num_train_epochs="5" \ - --per_device_train_batch_size="16" \ - --eval_strategy="steps" \ - --learning_rate="3e-4" \ - --warmup_steps="500" \ - --fp16 \ - --freeze_feature_extractor \ - --save_steps="400" \ - --eval_steps="400" \ - --save_total_limit="3" \ - --logging_steps="400" \ - --group_by_length \ - --feat_proj_dropout="0.0" \ - --layerdrop="0.1" \ - --gradient_checkpointing \ - --do_train --do_eval diff --git a/examples/research_projects/wav2vec2/requirements.txt b/examples/research_projects/wav2vec2/requirements.txt deleted file mode 100644 index 26b553c1392..00000000000 --- a/examples/research_projects/wav2vec2/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -transformers -datasets -torch>=1.5.0 -torchaudio -jiwer==2.2.0 -lang-trans==0.6.0 -librosa==0.8.0 diff --git a/examples/research_projects/wav2vec2/run_alignment.sh b/examples/research_projects/wav2vec2/run_alignment.sh deleted file mode 100644 index 95bfe02cf03..00000000000 --- a/examples/research_projects/wav2vec2/run_alignment.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash -python alignment.py \ ---model_name="arijitx/wav2vec2-xls-r-300m-bengali" \ ---wav_dir="./wavs" \ ---text_file="script.txt" \ ---input_wavs_sr=48000 \ ---output_dir="./out_alignment" \ ---cuda diff --git a/examples/research_projects/wav2vec2/run_asr.py b/examples/research_projects/wav2vec2/run_asr.py deleted file mode 100755 index 796d271583b..00000000000 --- a/examples/research_projects/wav2vec2/run_asr.py +++ /dev/null @@ -1,480 +0,0 @@ -#!/usr/bin/env python3 -import logging -import pathlib -import re -import sys -from dataclasses import dataclass, field -from typing import Any, Callable, Dict, List, Optional, Set, Union - -import datasets -import librosa -import numpy as np -import torch -from lang_trans import arabic -from packaging import version -from torch import nn - -from transformers import ( - HfArgumentParser, - Trainer, - TrainingArguments, - Wav2Vec2CTCTokenizer, - Wav2Vec2FeatureExtractor, - Wav2Vec2ForCTC, - Wav2Vec2Processor, - is_apex_available, - trainer_utils, -) - - -if is_apex_available(): - from apex import amp - -if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"): - _is_native_amp_available = True - from torch.cuda.amp import autocast - - -logger = logging.getLogger(__name__) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field( - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, - ) - freeze_feature_extractor: Optional[bool] = field( - default=True, metadata={"help": "Whether to freeze the feature extractor layers of the model."} - ) - verbose_logging: Optional[bool] = field( - default=False, - metadata={"help": "Whether to log verbose messages or not."}, - ) - - -def configure_logger(model_args: ModelArguments, training_args: TrainingArguments): - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - logging_level = logging.WARNING - if model_args.verbose_logging: - logging_level = logging.DEBUG - elif trainer_utils.is_main_process(training_args.local_rank): - logging_level = logging.INFO - logger.setLevel(logging_level) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - - Using `HfArgumentParser` we can turn this class - into argparse arguments to be able to specify them on - the command line. - """ - - dataset_name: str = field( - default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} - ) - dataset_config_name: Optional[str] = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_split_name: Optional[str] = field( - default="train", - metadata={ - "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" - }, - ) - validation_split_name: Optional[str] = field( - default="validation", - metadata={ - "help": ( - "The name of the validation data set split to use (via the datasets library). Defaults to 'validation'" - ) - }, - ) - target_text_column: Optional[str] = field( - default="text", - metadata={"help": "Column in the dataset that contains label (target text). Defaults to 'text'"}, - ) - speech_file_column: Optional[str] = field( - default="file", - metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"}, - ) - target_feature_extractor_sampling_rate: Optional[bool] = field( - default=False, - metadata={"help": "Resample loaded audio to target feature extractor's sampling rate or not."}, - ) - max_duration_in_seconds: Optional[float] = field( - default=None, - metadata={"help": "Filters out examples longer than specified. Defaults to no filtering."}, - ) - orthography: Optional[str] = field( - default="librispeech", - metadata={ - "help": ( - "Orthography used for normalization and tokenization: 'librispeech' (default), 'timit', or" - " 'buckwalter'." - ) - }, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - - -@dataclass -class Orthography: - """ - Orthography scheme used for text normalization and tokenization. - - Args: - do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`False`): - Whether or not to accept lowercase input and lowercase the output when decoding. - vocab_file (:obj:`str`, `optional`): - File containing the vocabulary. - word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`"|"`): - The token used for delimiting words; it needs to be in the vocabulary. - translation_table (:obj:`Dict[str, str]`, `optional`, defaults to :obj:`{}`): - Table to use with `str.translate()` when preprocessing text (e.g., "-" -> " "). - words_to_remove (:obj:`Set[str]`, `optional`, defaults to :obj:`set()`): - Words to remove when preprocessing text (e.g., "sil"). - untransliterator (:obj:`Callable[[str], str]`, `optional`): - Function that untransliterates text back into native writing system. - """ - - do_lower_case: bool = False - vocab_file: Optional[str] = None - word_delimiter_token: Optional[str] = "|" - translation_table: Optional[Dict[str, str]] = field(default_factory=dict) - words_to_remove: Optional[Set[str]] = field(default_factory=set) - untransliterator: Optional[Callable[[str], str]] = None - - @classmethod - def from_name(cls, name: str): - if name == "librispeech": - return cls() - if name == "timit": - return cls( - do_lower_case=True, - # break compounds like "quarter-century-old" and replace pauses "--" - translation_table=str.maketrans({"-": " "}), - ) - if name == "buckwalter": - translation_table = { - "-": " ", # sometimes used to represent pauses - "^": "v", # fixing "tha" in arabic_speech_corpus dataset - } - return cls( - vocab_file=pathlib.Path(__file__).parent.joinpath("vocab/buckwalter.json"), - word_delimiter_token="/", # "|" is Arabic letter alef with madda above - translation_table=str.maketrans(translation_table), - words_to_remove={"sil"}, # fixing "sil" in arabic_speech_corpus dataset - untransliterator=arabic.buckwalter.untransliterate, - ) - raise ValueError(f"Unsupported orthography: '{name}'.") - - def preprocess_for_training(self, text: str) -> str: - # TODO(elgeish) return a pipeline (e.g., from jiwer) instead? Or rely on branch predictor as is - if len(self.translation_table) > 0: - text = text.translate(self.translation_table) - if len(self.words_to_remove) == 0: - text = " ".join(text.split()) # clean up whitespaces - else: - text = " ".join(w for w in text.split() if w not in self.words_to_remove) # and clean up whilespaces - return text - - def create_processor(self, model_args: ModelArguments) -> Wav2Vec2Processor: - feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( - model_args.model_name_or_path, cache_dir=model_args.cache_dir - ) - if self.vocab_file: - tokenizer = Wav2Vec2CTCTokenizer( - self.vocab_file, - cache_dir=model_args.cache_dir, - do_lower_case=self.do_lower_case, - word_delimiter_token=self.word_delimiter_token, - ) - else: - tokenizer = Wav2Vec2CTCTokenizer.from_pretrained( - model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - do_lower_case=self.do_lower_case, - word_delimiter_token=self.word_delimiter_token, - ) - return Wav2Vec2Processor(feature_extractor, tokenizer) - - -@dataclass -class DataCollatorCTCWithPadding: - """ - Data collator that will dynamically pad the inputs received. - Args: - processor (:class:`~transformers.Wav2Vec2Processor`) - The processor used for processing the data. - padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): - Select a strategy to pad the returned sequences (according to the model's padding side and padding index) - among: - * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single - sequence if provided). - * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the - maximum acceptable input length for the model if that argument is not provided. - * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of - different lengths). - max_length (:obj:`int`, `optional`): - Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). - max_length_labels (:obj:`int`, `optional`): - Maximum length of the ``labels`` returned list and optionally padding length (see above). - pad_to_multiple_of (:obj:`int`, `optional`): - If set will pad the sequence to a multiple of the provided value. - This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= - 7.5 (Volta). - """ - - processor: Wav2Vec2Processor - padding: Union[bool, str] = True - max_length: Optional[int] = None - max_length_labels: Optional[int] = None - pad_to_multiple_of: Optional[int] = None - pad_to_multiple_of_labels: Optional[int] = None - - def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: - # split inputs and labels since they have to be of different lengths and need - # different padding methods - input_features = [{"input_values": feature["input_values"]} for feature in features] - label_features = [{"input_ids": feature["labels"]} for feature in features] - - batch = self.processor.pad( - input_features, - padding=self.padding, - max_length=self.max_length, - pad_to_multiple_of=self.pad_to_multiple_of, - return_tensors="pt", - ) - labels_batch = self.processor.pad( - labels=label_features, - padding=self.padding, - max_length=self.max_length_labels, - pad_to_multiple_of=self.pad_to_multiple_of_labels, - return_tensors="pt", - ) - - # replace padding with -100 to ignore loss correctly - labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) - - batch["labels"] = labels - - return batch - - -class CTCTrainer(Trainer): - def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: - """ - Perform a training step on a batch of inputs. - - Subclass and override to inject custom behavior. - - Args: - model (:obj:`nn.Module`): - The model to train. - inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): - The inputs and targets of the model. - - The dictionary will be unpacked before being fed to the model. Most models expect the targets under the - argument :obj:`labels`. Check your model's documentation for all accepted arguments. - - Return: - :obj:`torch.Tensor`: The tensor with training loss on this batch. - """ - - model.train() - inputs = self._prepare_inputs(inputs) - - if self.use_amp: - with autocast(): - loss = self.compute_loss(model, inputs) - else: - loss = self.compute_loss(model, inputs) - - if self.args.n_gpu > 1: - if model.module.config.ctc_loss_reduction == "mean": - loss = loss.mean() - elif model.module.config.ctc_loss_reduction == "sum": - loss = loss.sum() / (inputs["labels"] >= 0).sum() - else: - raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']") - - if self.args.gradient_accumulation_steps > 1: - loss = loss / self.args.gradient_accumulation_steps - - if self.use_amp: - self.scaler.scale(loss).backward() - elif self.use_apex: - with amp.scale_loss(loss, self.optimizer) as scaled_loss: - scaled_loss.backward() - elif self.deepspeed: - self.deepspeed.backward(loss) - else: - loss.backward() - - return loss.detach() - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) - - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - configure_logger(model_args, training_args) - - orthography = Orthography.from_name(data_args.orthography.lower()) - processor = orthography.create_processor(model_args) - model = Wav2Vec2ForCTC.from_pretrained( - model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - gradient_checkpointing=training_args.gradient_checkpointing, - vocab_size=len(processor.tokenizer), - ) - - train_dataset = datasets.load_dataset( - data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name - ) - val_dataset = datasets.load_dataset( - data_args.dataset_name, data_args.dataset_config_name, split=data_args.validation_split_name - ) - - wer_metric = datasets.load_metric("wer") - target_sr = processor.feature_extractor.sampling_rate if data_args.target_feature_extractor_sampling_rate else None - vocabulary_chars_str = "".join(t for t in processor.tokenizer.get_vocab().keys() if len(t) == 1) - vocabulary_text_cleaner = re.compile( # remove characters not in vocabulary - rf"[^\s{re.escape(vocabulary_chars_str)}]", # allow space in addition to chars in vocabulary - flags=re.IGNORECASE if processor.tokenizer.do_lower_case else 0, - ) - text_updates = [] - - def prepare_example(example): # TODO(elgeish) make use of multiprocessing? - example["speech"], example["sampling_rate"] = librosa.load(example[data_args.speech_file_column], sr=target_sr) - if data_args.max_duration_in_seconds is not None: - example["duration_in_seconds"] = len(example["speech"]) / example["sampling_rate"] - # Normalize and clean up text; order matters! - updated_text = orthography.preprocess_for_training(example[data_args.target_text_column]) - updated_text = vocabulary_text_cleaner.sub("", updated_text) - if updated_text != example[data_args.target_text_column]: - text_updates.append((example[data_args.target_text_column], updated_text)) - example[data_args.target_text_column] = updated_text - return example - - train_dataset = train_dataset.map(prepare_example, remove_columns=[data_args.speech_file_column]) - val_dataset = val_dataset.map(prepare_example, remove_columns=[data_args.speech_file_column]) - - if data_args.max_duration_in_seconds is not None: - - def filter_by_max_duration(example): - return example["duration_in_seconds"] <= data_args.max_duration_in_seconds - - old_train_size = len(train_dataset) - old_val_size = len(val_dataset) - train_dataset = train_dataset.filter(filter_by_max_duration, remove_columns=["duration_in_seconds"]) - val_dataset = val_dataset.filter(filter_by_max_duration, remove_columns=["duration_in_seconds"]) - if len(train_dataset) > old_train_size: - logger.warning( - f"Filtered out {len(train_dataset) - old_train_size} train example(s) longer than" - f" {data_args.max_duration_in_seconds} second(s)." - ) - if len(val_dataset) > old_val_size: - logger.warning( - f"Filtered out {len(val_dataset) - old_val_size} validation example(s) longer than" - f" {data_args.max_duration_in_seconds} second(s)." - ) - logger.info(f"Split sizes: {len(train_dataset)} train and {len(val_dataset)} validation.") - - logger.warning(f"Updated {len(text_updates)} transcript(s) using '{data_args.orthography}' orthography rules.") - if logger.isEnabledFor(logging.DEBUG): - for original_text, updated_text in text_updates: - logger.debug(f'Updated text: "{original_text}" -> "{updated_text}"') - text_updates = None - - def prepare_dataset(batch): - # check that all files have the correct sampling rate - assert ( - len(set(batch["sampling_rate"])) == 1 - ), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}." - - processed_batch = processor( - audio=batch["speech"], text=batch[data_args.target_text_column], sampling_rate=batch["sampling_rate"][0] - ) - batch.update(processed_batch) - return batch - - train_dataset = train_dataset.map( - prepare_dataset, - batch_size=training_args.per_device_train_batch_size, - batched=True, - num_proc=data_args.preprocessing_num_workers, - ) - val_dataset = val_dataset.map( - prepare_dataset, - batch_size=training_args.per_device_train_batch_size, - batched=True, - num_proc=data_args.preprocessing_num_workers, - ) - - data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True) - - def compute_metrics(pred): - pred_logits = pred.predictions - pred_ids = np.argmax(pred_logits, axis=-1) - - pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id - - pred_str = processor.batch_decode(pred_ids) - # we do not want to group tokens when computing the metrics - label_str = processor.batch_decode(pred.label_ids, group_tokens=False) - if logger.isEnabledFor(logging.DEBUG): - for reference, predicted in zip(label_str, pred_str): - logger.debug(f'reference: "{reference}"') - logger.debug(f'predicted: "{predicted}"') - if orthography.untransliterator is not None: - logger.debug(f'reference (untransliterated): "{orthography.untransliterator(reference)}"') - logger.debug(f'predicted (untransliterated): "{orthography.untransliterator(predicted)}"') - - wer = wer_metric.compute(predictions=pred_str, references=label_str) - - return {"wer": wer} - - if model_args.freeze_feature_extractor: - model.freeze_feature_extractor() - - trainer = CTCTrainer( - model=model, - data_collator=data_collator, - args=training_args, - compute_metrics=compute_metrics, - train_dataset=train_dataset, - eval_dataset=val_dataset, - tokenizer=processor.feature_extractor, - ) - - trainer.train() - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/wav2vec2/run_common_voice.py b/examples/research_projects/wav2vec2/run_common_voice.py deleted file mode 100644 index 09a8458ca2a..00000000000 --- a/examples/research_projects/wav2vec2/run_common_voice.py +++ /dev/null @@ -1,513 +0,0 @@ -#!/usr/bin/env python3 -import json -import logging -import os -import re -import sys -from dataclasses import dataclass, field -from typing import Any, Dict, List, Optional, Union - -import datasets -import numpy as np -import torch -import torchaudio -from packaging import version -from torch import nn - -import transformers -from transformers import ( - HfArgumentParser, - Trainer, - TrainingArguments, - Wav2Vec2CTCTokenizer, - Wav2Vec2FeatureExtractor, - Wav2Vec2ForCTC, - Wav2Vec2Processor, - is_apex_available, - set_seed, -) -from transformers.trainer_utils import get_last_checkpoint, is_main_process - - -if is_apex_available(): - from apex import amp - - -if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"): - _is_native_amp_available = True - from torch.cuda.amp import autocast - -logger = logging.getLogger(__name__) - - -def list_field(default=None, metadata=None): - return field(default_factory=lambda: default, metadata=metadata) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field( - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, - ) - freeze_feature_extractor: Optional[bool] = field( - default=True, metadata={"help": "Whether to freeze the feature extractor layers of the model."} - ) - attention_dropout: Optional[float] = field( - default=0.1, metadata={"help": "The dropout ratio for the attention probabilities."} - ) - activation_dropout: Optional[float] = field( - default=0.1, metadata={"help": "The dropout ratio for activations inside the fully connected layer."} - ) - hidden_dropout: Optional[float] = field( - default=0.1, - metadata={ - "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler." - }, - ) - feat_proj_dropout: Optional[float] = field( - default=0.1, - metadata={"help": "The dropout probability for all 1D convolutional layers in feature extractor."}, - ) - mask_time_prob: Optional[float] = field( - default=0.05, - metadata={ - "help": ( - "Propability of each feature vector along the time axis to be chosen as the start of the vector " - "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature " - "vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``." - ) - }, - ) - layerdrop: Optional[float] = field(default=0.0, metadata={"help": "The LayerDrop probability."}) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - - Using `HfArgumentParser` we can turn this class - into argparse arguments to be able to specify them on - the command line. - """ - - dataset_config_name: Optional[str] = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_split_name: Optional[str] = field( - default="train+validation", - metadata={ - "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" - }, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_val_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of validation examples to this " - "value if set." - ) - }, - ) - chars_to_ignore: List[str] = list_field( - default=[",", "?", ".", "!", "-", ";", ":", '""', "%", "'", '"', "�"], - metadata={"help": "A list of characters to remove from the transcripts."}, - ) - - -@dataclass -class DataCollatorCTCWithPadding: - """ - Data collator that will dynamically pad the inputs received. - Args: - processor (:class:`~transformers.Wav2Vec2Processor`) - The processor used for processing the data. - padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): - Select a strategy to pad the returned sequences (according to the model's padding side and padding index) - among: - * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single - sequence if provided). - * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the - maximum acceptable input length for the model if that argument is not provided. - * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of - different lengths). - max_length (:obj:`int`, `optional`): - Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). - max_length_labels (:obj:`int`, `optional`): - Maximum length of the ``labels`` returned list and optionally padding length (see above). - pad_to_multiple_of (:obj:`int`, `optional`): - If set will pad the sequence to a multiple of the provided value. - This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= - 7.5 (Volta). - """ - - processor: Wav2Vec2Processor - padding: Union[bool, str] = True - max_length: Optional[int] = None - max_length_labels: Optional[int] = None - pad_to_multiple_of: Optional[int] = None - pad_to_multiple_of_labels: Optional[int] = None - - def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: - # split inputs and labels since they have to be of different lengths and need - # different padding methods - input_features = [{"input_values": feature["input_values"]} for feature in features] - label_features = [{"input_ids": feature["labels"]} for feature in features] - - batch = self.processor.pad( - input_features, - padding=self.padding, - max_length=self.max_length, - pad_to_multiple_of=self.pad_to_multiple_of, - return_tensors="pt", - ) - labels_batch = self.processor.pad( - labels=label_features, - padding=self.padding, - max_length=self.max_length_labels, - pad_to_multiple_of=self.pad_to_multiple_of_labels, - return_tensors="pt", - ) - - # replace padding with -100 to ignore loss correctly - labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) - - batch["labels"] = labels - - return batch - - -class CTCTrainer(Trainer): - def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: - """ - Perform a training step on a batch of inputs. - - Subclass and override to inject custom behavior. - - Args: - model (:obj:`nn.Module`): - The model to train. - inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): - The inputs and targets of the model. - - The dictionary will be unpacked before being fed to the model. Most models expect the targets under the - argument :obj:`labels`. Check your model's documentation for all accepted arguments. - - Return: - :obj:`torch.Tensor`: The tensor with training loss on this batch. - """ - - model.train() - inputs = self._prepare_inputs(inputs) - - if self.use_amp: - with autocast(): - loss = self.compute_loss(model, inputs) - else: - loss = self.compute_loss(model, inputs) - - if self.args.n_gpu > 1: - if model.module.config.ctc_loss_reduction == "mean": - loss = loss.mean() - elif model.module.config.ctc_loss_reduction == "sum": - loss = loss.sum() / (inputs["labels"] >= 0).sum() - else: - raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']") - - if self.args.gradient_accumulation_steps > 1: - loss = loss / self.args.gradient_accumulation_steps - - if self.use_amp: - self.scaler.scale(loss).backward() - elif self.use_apex: - with amp.scale_loss(loss, self.optimizer) as scaled_loss: - scaled_loss.backward() - elif self.deepspeed: - self.deepspeed.backward(loss) - else: - loss.backward() - - return loss.detach() - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) - - # Log on each process the small summary: - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" - + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - # Set the verbosity to info of the Transformers logger (on main process only): - if is_main_process(training_args.local_rank): - transformers.utils.logging.set_verbosity_info() - logger.info("Training/evaluation parameters %s", training_args) - - # Set seed before initializing model. - set_seed(training_args.seed) - - # Get the datasets: - train_dataset = datasets.load_dataset( - "common_voice", data_args.dataset_config_name, split=data_args.train_split_name - ) - eval_dataset = datasets.load_dataset("common_voice", data_args.dataset_config_name, split="test") - - # Create and save tokenizer - chars_to_ignore_regex = f'[{"".join(data_args.chars_to_ignore)}]' - - def remove_special_characters(batch): - batch["text"] = re.sub(chars_to_ignore_regex, "", batch["sentence"]).lower() + " " - return batch - - train_dataset = train_dataset.map(remove_special_characters, remove_columns=["sentence"]) - eval_dataset = eval_dataset.map(remove_special_characters, remove_columns=["sentence"]) - - def extract_all_chars(batch): - all_text = " ".join(batch["text"]) - vocab = list(set(all_text)) - return {"vocab": [vocab], "all_text": [all_text]} - - vocab_train = train_dataset.map( - extract_all_chars, - batched=True, - batch_size=-1, - keep_in_memory=True, - remove_columns=train_dataset.column_names, - ) - vocab_test = train_dataset.map( - extract_all_chars, - batched=True, - batch_size=-1, - keep_in_memory=True, - remove_columns=eval_dataset.column_names, - ) - - vocab_list = list(set(vocab_train["vocab"][0]) | set(vocab_test["vocab"][0])) - vocab_dict = {v: k for k, v in enumerate(vocab_list)} - vocab_dict["|"] = vocab_dict[" "] - del vocab_dict[" "] - vocab_dict["[UNK]"] = len(vocab_dict) - vocab_dict["[PAD]"] = len(vocab_dict) - - with open("vocab.json", "w") as vocab_file: - json.dump(vocab_dict, vocab_file) - - # Load pretrained model and tokenizer - # - # Distributed training: - # The .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - tokenizer = Wav2Vec2CTCTokenizer( - "vocab.json", - unk_token="[UNK]", - pad_token="[PAD]", - word_delimiter_token="|", - ) - feature_extractor = Wav2Vec2FeatureExtractor( - feature_size=1, sampling_rate=16_000, padding_value=0.0, do_normalize=True, return_attention_mask=True - ) - processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) - model = Wav2Vec2ForCTC.from_pretrained( - model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - activation_dropout=model_args.activation_dropout, - attention_dropout=model_args.attention_dropout, - hidden_dropout=model_args.hidden_dropout, - feat_proj_dropout=model_args.feat_proj_dropout, - mask_time_prob=model_args.mask_time_prob, - gradient_checkpointing=training_args.gradient_checkpointing, - layerdrop=model_args.layerdrop, - ctc_loss_reduction="mean", - pad_token_id=processor.tokenizer.pad_token_id, - vocab_size=len(processor.tokenizer), - ) - - if data_args.max_train_samples is not None: - max_train_samples = min(len(train_dataset), data_args.max_train_samples) - train_dataset = train_dataset.select(range(max_train_samples)) - - if data_args.max_val_samples is not None: - eval_dataset = eval_dataset.select(range(data_args.max_val_samples)) - - resampler = torchaudio.transforms.Resample(48_000, 16_000) - - # Preprocessing the datasets. - # We need to read the aduio files as arrays and tokenize the targets. - def speech_file_to_array_fn(batch): - speech_array, sampling_rate = torchaudio.load(batch["path"]) - batch["speech"] = resampler(speech_array).squeeze().numpy() - batch["sampling_rate"] = 16_000 - batch["target_text"] = batch["text"] - return batch - - train_dataset = train_dataset.map( - speech_file_to_array_fn, - remove_columns=train_dataset.column_names, - num_proc=data_args.preprocessing_num_workers, - ) - eval_dataset = eval_dataset.map( - speech_file_to_array_fn, - remove_columns=eval_dataset.column_names, - num_proc=data_args.preprocessing_num_workers, - ) - - def prepare_dataset(batch): - # check that all files have the correct sampling rate - assert ( - len(set(batch["sampling_rate"])) == 1 - ), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}." - - processed_batch = processor( - audio=batch["speech"], text=batch["target_text"], sampling_rate=batch["sampling_rate"][0] - ) - batch.update(processed_batch) - return batch - - train_dataset = train_dataset.map( - prepare_dataset, - remove_columns=train_dataset.column_names, - batch_size=training_args.per_device_train_batch_size, - batched=True, - num_proc=data_args.preprocessing_num_workers, - ) - eval_dataset = eval_dataset.map( - prepare_dataset, - remove_columns=eval_dataset.column_names, - batch_size=training_args.per_device_train_batch_size, - batched=True, - num_proc=data_args.preprocessing_num_workers, - ) - - # Metric - wer_metric = datasets.load_metric("wer") - - def compute_metrics(pred): - pred_logits = pred.predictions - pred_ids = np.argmax(pred_logits, axis=-1) - - pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id - - pred_str = processor.batch_decode(pred_ids) - # we do not want to group tokens when computing the metrics - label_str = processor.batch_decode(pred.label_ids, group_tokens=False) - - wer = wer_metric.compute(predictions=pred_str, references=label_str) - - return {"wer": wer} - - if model_args.freeze_feature_extractor: - model.freeze_feature_extractor() - - # Data collator - data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True) - - # Initialize our Trainer - trainer = CTCTrainer( - model=model, - data_collator=data_collator, - args=training_args, - compute_metrics=compute_metrics, - train_dataset=train_dataset if training_args.do_train else None, - eval_dataset=eval_dataset if training_args.do_eval else None, - tokenizer=processor.feature_extractor, - ) - - # Training - if training_args.do_train: - if last_checkpoint is not None: - checkpoint = last_checkpoint - elif os.path.isdir(model_args.model_name_or_path): - checkpoint = model_args.model_name_or_path - else: - checkpoint = None - - # Save the feature_extractor and the tokenizer - if is_main_process(training_args.local_rank): - processor.save_pretrained(training_args.output_dir) - - train_result = trainer.train(resume_from_checkpoint=checkpoint) - trainer.save_model() - - metrics = train_result.metrics - max_train_samples = ( - data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) - ) - metrics["train_samples"] = min(max_train_samples, len(train_dataset)) - - trainer.log_metrics("train", metrics) - trainer.save_metrics("train", metrics) - trainer.save_state() - - # Evaluation - results = {} - if training_args.do_eval: - logger.info("*** Evaluate ***") - metrics = trainer.evaluate() - max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset) - metrics["eval_samples"] = min(max_val_samples, len(eval_dataset)) - - trainer.log_metrics("eval", metrics) - trainer.save_metrics("eval", metrics) - - return results - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/wav2vec2/run_pretrain.py b/examples/research_projects/wav2vec2/run_pretrain.py deleted file mode 100755 index 00ef4edb37e..00000000000 --- a/examples/research_projects/wav2vec2/run_pretrain.py +++ /dev/null @@ -1,396 +0,0 @@ -#!/usr/bin/env python3 -import logging -import sys -from dataclasses import dataclass, field -from typing import Any, Dict, List, Optional, Union - -import librosa -import torch -from datasets import DatasetDict, load_dataset -from packaging import version -from torch import nn - -from transformers import ( - HfArgumentParser, - Trainer, - TrainingArguments, - Wav2Vec2Config, - Wav2Vec2FeatureExtractor, - Wav2Vec2ForPreTraining, - is_apex_available, - trainer_utils, -) -from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices - - -if is_apex_available(): - from apex import amp - -if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"): - _is_native_amp_available = True - from torch.cuda.amp import autocast - - -logger = logging.getLogger(__name__) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field( - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, - ) - freeze_feature_extractor: Optional[bool] = field( - default=True, metadata={"help": "Whether to freeze the feature extractor layers of the model."} - ) - verbose_logging: Optional[bool] = field( - default=False, - metadata={"help": "Whether to log verbose messages or not."}, - ) - max_gumbel_temperature: Optional[float] = field( - default=2.0, metadata={"help": "Maximum temperature for gumbel softmax."} - ) - min_gumbel_temperature: Optional[float] = field( - default=0.5, metadata={"help": "Minimum temperature for gumbel softmax."} - ) - gumbel_temperature_decay: Optional[float] = field( - default=0.999995, metadata={"help": "Decay of gumbel temperature during training."} - ) - - -def configure_logger(model_args: ModelArguments, training_args: TrainingArguments): - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - logging_level = logging.WARNING - if model_args.verbose_logging: - logging_level = logging.DEBUG - elif trainer_utils.is_main_process(training_args.local_rank): - logging_level = logging.INFO - logger.setLevel(logging_level) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - - Using `HfArgumentParser` we can turn this class - into argparse arguments to be able to specify them on - the command line. - """ - - dataset_name: str = field( - default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} - ) - dataset_config_name: Optional[str] = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_split_name: Optional[str] = field( - default="train", - metadata={ - "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" - }, - ) - validation_split_name: Optional[str] = field( - default="validation", - metadata={ - "help": ( - "The name of the validation data set split to use (via the datasets library). Defaults to 'validation'" - ) - }, - ) - speech_file_column: Optional[str] = field( - default="file", - metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"}, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} - ) - validation_split_percentage: Optional[int] = field( - default=1, - metadata={ - "help": "The percentage of the train set used as validation set in case there's no validation split" - }, - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - max_duration_in_seconds: Optional[float] = field( - default=20.0, metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} - ) - - -@dataclass -class DataCollatorForWav2Vec2Pretraining: - """ - Data collator that will dynamically pad the inputs received and prepare masked indices - for self-supervised pretraining. - - Args: - model (:class:`~transformers.Wav2Vec2ForPreTraining`): - The Wav2Vec2 model used for pretraining. The data collator needs to have access - to config and ``_get_feat_extract_output_lengths`` function for correct padding. - feature_extractor (:class:`~transformers.Wav2Vec2FeatureExtractor`): - The processor used for processing the data. - padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): - Select a strategy to pad the returned sequences (according to the model's padding side and padding index) - among: - * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single - sequence if provided). - * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the - maximum acceptable input length for the model if that argument is not provided. - * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of - different lengths). - max_length (:obj:`int`, `optional`): - Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). - pad_to_multiple_of (:obj:`int`, `optional`): - If set will pad the sequence to a multiple of the provided value. - This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= - 7.5 (Volta). - """ - - model: Wav2Vec2ForPreTraining - feature_extractor: Wav2Vec2FeatureExtractor - padding: Union[bool, str] = "longest" - pad_to_multiple_of: Optional[int] = None - max_length: Optional[int] = None - - def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: - # reformat list to dict and set to pytorch format - batch = self.feature_extractor.pad( - features, - max_length=self.max_length, - padding=self.padding, - pad_to_multiple_of=self.pad_to_multiple_of, - return_tensors="pt", - ) - mask_indices_seq_length = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1]) - - batch_size = batch["input_values"].shape[0] - - # make sure that no loss is computed on padded inputs - if batch["attention_mask"] is not None: - # compute real output lengths according to convolution formula - output_lengths = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to( - torch.long - ) - - attention_mask = torch.zeros( - (batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device - ) - - # these two operations makes sure that all values - # before the output lengths indices are attended to - attention_mask[ - (torch.arange(attention_mask.shape[0], device=batch["input_values"].device), output_lengths - 1) - ] = 1 - attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() - - # sample randomly masked indices - batch["mask_time_indices"] = _compute_mask_indices( - (batch_size, mask_indices_seq_length), - self.model.config.mask_time_prob, - self.model.config.mask_time_length, - attention_mask=attention_mask, - min_masks=2, - ) - - return batch - - -class Wav2Vec2PreTrainer(Trainer): - """ - Subclassed :class:`~transformers.Trainer` for Wav2Vec2-like pretraining. Trainer can decay gumbel softmax temperature during training. - """ - - def __init__(self, *args, max_gumbel_temp=1, min_gumbel_temp=0, gumbel_temp_decay=1.0, **kwargs): - super().__init__(*args, **kwargs) - self.num_update_step = 0 - self.max_gumbel_temp = max_gumbel_temp - self.min_gumbel_temp = min_gumbel_temp - self.gumbel_temp_decay = gumbel_temp_decay - - def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: - """ - Perform a training step on a batch of inputs. - - Subclass and override to inject custom behavior. - - Args: - model (:obj:`nn.Module`): - The model to train. - inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): - The inputs and targets of the model. - - The dictionary will be unpacked before being fed to the model. Most models expect the targets under the - argument :obj:`labels`. Check your model's documentation for all accepted arguments. - - Return: - :obj:`torch.Tensor`: The tensor with training loss on this batch. - """ - - model.train() - inputs = self._prepare_inputs(inputs) - - if self.use_amp: - with autocast(): - loss = self.compute_loss(model, inputs) - else: - loss = self.compute_loss(model, inputs) - - if self.args.n_gpu > 1 or self.deepspeed: - if model.module.config.ctc_loss_reduction == "mean": - loss = loss.mean() - elif model.module.config.ctc_loss_reduction == "sum": - loss = loss.sum() / (inputs["mask_time_indices"]).sum() - else: - raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']") - - if self.args.gradient_accumulation_steps > 1: - loss = loss / self.args.gradient_accumulation_steps - - if self.use_amp: - self.scaler.scale(loss).backward() - elif self.use_apex: - with amp.scale_loss(loss, self.optimizer) as scaled_loss: - scaled_loss.backward() - elif self.deepspeed: - self.deepspeed.backward(loss) - else: - loss.backward() - - self.num_update_step += 1 - # make sure gumbel softmax temperature is decayed - if self.args.n_gpu > 1 or self.deepspeed: - model.module.set_gumbel_temperature( - max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp) - ) - else: - model.set_gumbel_temperature( - max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp) - ) - - return loss.detach() - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) - - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - configure_logger(model_args, training_args) - - # Downloading and loading a dataset from the hub. - datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) - - if "validation" not in datasets.keys(): - # make sure only "validation" and "train" keys remain" - datasets = DatasetDict() - datasets["validation"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]", - cache_dir=model_args.cache_dir, - ) - datasets["train"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]", - cache_dir=model_args.cache_dir, - ) - else: - # make sure only "validation" and "train" keys remain" - datasets = DatasetDict() - datasets["validation"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split="validation", - cache_dir=model_args.cache_dir, - ) - datasets["train"] = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - split=f"{data_args.train_split_name}", - cache_dir=model_args.cache_dir, - ) - - # only normalized-inputs-training is supported - feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( - model_args.model_name_or_path, cache_dir=model_args.cache_dir, do_normalize=True - ) - - def prepare_dataset(batch): - # check that all files have the correct sampling rate - batch["speech"], _ = librosa.load(batch[data_args.speech_file_column], sr=feature_extractor.sampling_rate) - return batch - - # load audio files into numpy arrays - vectorized_datasets = datasets.map( - prepare_dataset, num_proc=data_args.preprocessing_num_workers, remove_columns=datasets["train"].column_names - ) - - # filter audio files that are too long - vectorized_datasets = vectorized_datasets.filter( - lambda data: len(data["speech"]) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate) - ) - - def normalize(batch): - return feature_extractor(batch["speech"], sampling_rate=feature_extractor.sampling_rate) - - # normalize and transform to `BatchFeatures` - vectorized_datasets = vectorized_datasets.map( - normalize, - batched=True, - num_proc=data_args.preprocessing_num_workers, - load_from_cache_file=not data_args.overwrite_cache, - remove_columns=vectorized_datasets["train"].column_names, - ) - - # pretraining is only supported for "newer" stable layer norm architecture - # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 - config = Wav2Vec2Config.from_pretrained( - model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - gradient_checkpointing=training_args.gradient_checkpointing, - ) - - if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": - raise ValueError( - "PreTraining is only supported for ``config.do_stable_layer_norm=True`` and" - " ``config.feat_extract_norm='layer'" - ) - - model = Wav2Vec2ForPreTraining(config) - - data_collator = DataCollatorForWav2Vec2Pretraining(model=model, feature_extractor=feature_extractor) - - trainer = Wav2Vec2PreTrainer( - model=model, - data_collator=data_collator, - args=training_args, - train_dataset=vectorized_datasets["train"], - eval_dataset=vectorized_datasets["validation"], - tokenizer=feature_extractor, - max_gumbel_temp=model_args.max_gumbel_temperature, - min_gumbel_temp=model_args.min_gumbel_temperature, - gumbel_temp_decay=model_args.gumbel_temperature_decay, - ) - trainer.train() - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/wav2vec2/test_wav2vec2_deepspeed.py b/examples/research_projects/wav2vec2/test_wav2vec2_deepspeed.py deleted file mode 100644 index 8fb2df71112..00000000000 --- a/examples/research_projects/wav2vec2/test_wav2vec2_deepspeed.py +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: -# hack it in for now: -import sys -from pathlib import Path - - -git_repo_path = Path(__file__).resolve().parents[3] / "src" -sys.path.insert(1, str(git_repo_path)) - -import dataclasses # noqa -import io # noqa -import itertools # noqa -import json # noqa -import os # noqa -import unittest # noqa -from copy import deepcopy # noqa - -from parameterized import parameterized # noqa -from transformers import TrainingArguments, is_torch_available # noqa -from transformers.integrations.deepspeed import is_deepspeed_available # noqa -from transformers.file_utils import WEIGHTS_NAME # noqa -from transformers.testing_utils import ( # noqa - CaptureLogger, - ExtendSysPath, - TestCasePlus, - execute_subprocess_async, - get_gpu_count, - mockenv_context, - require_deepspeed, - require_torch_gpu, - require_torch_multi_gpu, - slow, -) -from transformers.trainer_utils import set_seed # noqa - - -set_seed(42) - -models = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"} - -ZERO2 = "zero2" -ZERO3 = "zero3" -stages = [ZERO2, ZERO3] - - -def custom_name_func(func, param_num, param): - # customize the test name generator function as we want both params to appear in the sub-test - # name, as by default it shows only the first param - param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args)) - return f"{func.__name__}_{param_based_name}" - - -# Cartesian-product of zero stages with models to test -params = list(itertools.product(stages, models.keys())) - - -@slow -@require_deepspeed -@require_torch_gpu -class TestDeepSpeedWav2Vec2(TestCasePlus): - @parameterized.expand(params, name_func=custom_name_func) - def test_fp32_non_distributed(self, stage, model): - self.run_and_check( - stage=stage, - model=model, - distributed=False, - fp16=False, - ) - - @require_torch_multi_gpu - @parameterized.expand(params, name_func=custom_name_func) - def test_fp32_distributed(self, stage, model): - self.run_and_check( - stage=stage, - model=model, - distributed=True, - fp16=False, - ) - - @parameterized.expand(params, name_func=custom_name_func) - def test_fp16_non_distributed(self, stage, model): - self.run_and_check( - stage=stage, - model=model, - distributed=False, - fp16=True, - ) - - @require_torch_multi_gpu - @parameterized.expand(params, name_func=custom_name_func) - def test_fp16_distributed(self, stage, model): - self.run_and_check( - stage=stage, - model=model, - distributed=True, - fp16=True, - ) - - def do_checks(self, output_dir): - # XXX: run_asr is premature and doesn't save any results - # so all we check for now is that the process didn't fail - pass - - # XXX: need to do better validation beyond just that the run was successful - def run_and_check( - self, - stage: str, - model: str, - eval_steps: int = 10, - distributed: bool = True, - quality_checks: bool = True, - fp16: bool = True, - ): - model_name = models[model] - - output_dir = self.run_trainer( - stage=stage, - model_name=model_name, - eval_steps=eval_steps, - num_train_epochs=1, - distributed=distributed, - fp16=fp16, - ) - - self.do_checks(output_dir) - - return output_dir - - def run_trainer( - self, - stage: str, - model_name: str, - eval_steps: int = 10, - num_train_epochs: int = 1, - distributed: bool = True, - fp16: bool = True, - ): - output_dir = self.get_auto_remove_tmp_dir("./xxx", after=False) - args = f""" - --model_name_or_path {model_name} - --dataset_name hf-internal-testing/librispeech_asr_dummy - --dataset_config_name clean - --train_split_name validation - --validation_split_name validation - --output_dir {output_dir} - --num_train_epochs {str(num_train_epochs)} - --per_device_train_batch_size 2 - --per_device_eval_batch_size 2 - --eval_strategy steps - --learning_rate 5e-4 - --warmup_steps 8 - --orthography timit - --preprocessing_num_workers 1 - --group_by_length - --freeze_feature_extractor - --report_to none - --save_steps 0 - --eval_steps {eval_steps} - --report_to none - """.split() - - if fp16: - args.extend(["--fp16"]) - - # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, - # hence the separate config files - ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split() - script = [f"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"] - launcher = self.get_launcher(distributed) - - cmd = launcher + script + args + ds_args - # keep for quick debug - # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die - execute_subprocess_async(cmd, env=self.get_env()) - - return output_dir - - def get_launcher(self, distributed=False): - # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup - # - it won't be able to handle that - # 2. for now testing with just 2 gpus max (since some quality tests may give different - # results with mode gpus because we use very little data) - num_gpus = min(2, get_gpu_count()) if distributed else 1 - return f"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split() diff --git a/examples/research_projects/wav2vec2/vocab/buckwalter.json b/examples/research_projects/wav2vec2/vocab/buckwalter.json deleted file mode 100644 index 3f98fc2d521..00000000000 --- a/examples/research_projects/wav2vec2/vocab/buckwalter.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "": 0, - "": 1, - "": 2, - "": 3, - "/": 4, - "'": 5, - "|": 6, - ">": 7, - "&": 8, - "<": 9, - "}": 10, - "A": 11, - "b": 12, - "p": 13, - "t": 14, - "v": 15, - "j": 16, - "H": 17, - "x": 18, - "d": 19, - "*": 20, - "r": 21, - "z": 22, - "s": 23, - "$": 24, - "S": 25, - "D": 26, - "T": 27, - "Z": 28, - "E": 29, - "g": 30, - "_": 31, - "f": 32, - "q": 33, - "k": 34, - "l": 35, - "m": 36, - "n": 37, - "h": 38, - "w": 39, - "Y": 40, - "y": 41, - "F": 42, - "N": 43, - "K": 44, - "a": 45, - "u": 46, - "i": 47, - "~": 48, - "o": 49, - "`": 50, - "{": 51, - "P": 52, - "J": 53, - "V": 54, - "G": 55 -} \ No newline at end of file diff --git a/examples/research_projects/xtreme-s/README.md b/examples/research_projects/xtreme-s/README.md deleted file mode 100644 index 5314ba9880a..00000000000 --- a/examples/research_projects/xtreme-s/README.md +++ /dev/null @@ -1,160 +0,0 @@ - - -# XTREME-S benchmark examples - -*Maintainers: [Anton Lozhkov](https://github.com/anton-l) and [Patrick von Platen](https://github.com/patrickvonplaten)* - -The Cross-lingual TRansfer Evaluation of Multilingual Encoders for Speech (XTREME-S) benchmark is a benchmark designed to evaluate speech representations across languages, tasks, domains and data regimes. It covers XX typologically diverse languages and seven downstream tasks grouped in four families: speech recognition, translation, classification and retrieval. - -XTREME-S covers speech recognition with Fleurs, Multilingual LibriSpeech (MLS) and VoxPopuli, speech translation with CoVoST-2, speech classification with LangID (Fleurs) and intent classification (MInds-14) and finally speech(-text) retrieval with Fleurs. Each of the tasks covers a subset of the 102 languages included in XTREME-S (shown here with their ISO 3166-1 codes): afr, amh, ara, asm, ast, azj, bel, ben, bos, cat, ceb, ces, cmn, cym, dan, deu, ell, eng, spa, est, fas, ful, fin, tgl, fra, gle, glg, guj, hau, heb, hin, hrv, hun, hye, ind, ibo, isl, ita, jpn, jav, kat, kam, kea, kaz, khm, kan, kor, ckb, kir, ltz, lug, lin, lao, lit, luo, lav, mri, mkd, mal, mon, mar, msa, mlt, mya, nob, npi, nld, nso, nya, oci, orm, ory, pan, pol, pus, por, ron, rus, bul, snd, slk, slv, sna, som, srp, swe, swh, tam, tel, tgk, tha, tur, ukr, umb, urd, uzb, vie, wol, xho, yor, yue and zul. - -Paper: [XTREME-S: Evaluating Cross-lingual Speech Representations](https://arxiv.org/abs/2203.10752) - -Dataset: [https://huggingface.co/datasets/google/xtreme_s](https://huggingface.co/datasets/google/xtreme_s) - -## Fine-tuning for the XTREME-S tasks - -Based on the [`run_xtreme_s.py`](https://github.com/huggingface/transformers/blob/main/examples/research_projects/xtreme-s/run_xtreme_s.py) script. - -This script can fine-tune any of the pretrained speech models on the [hub](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition) on the [XTREME-S dataset](https://huggingface.co/datasets/google/xtreme_s) tasks. - -XTREME-S is made up of 7 different tasks. Here is how to run the script on each of them: - -```bash -export TASK_NAME=mls.all - -python run_xtreme_s.py \ - --model_name_or_path="facebook/wav2vec2-xls-r-300m" \ - --task="${TASK_NAME}" \ - --output_dir="xtreme_s_xlsr_${TASK_NAME}" \ - --num_train_epochs=100 \ - --per_device_train_batch_size=32 \ - --learning_rate="3e-4" \ - --target_column_name="transcription" \ - --save_steps=500 \ - --eval_steps=500 \ - --gradient_checkpointing \ - --fp16 \ - --group_by_length \ - --do_train \ - --do_eval \ - --do_predict \ - --push_to_hub -``` - -where `TASK_NAME` can be one of: `mls, voxpopuli, covost2, fleurs-asr, fleurs-lang_id, minds14`. - -We get the following results on the test set of the benchmark's datasets. -The corresponding training commands for each dataset are given in the sections below: - -| Task | Dataset | Result | Fine-tuned model & logs | Training time | GPUs | -|-----------------------|-----------|-----------------------|--------------------------------------------------------------------|---------------|--------| -| Speech Recognition | MLS | 30.33 WER | [here](https://huggingface.co/anton-l/xtreme_s_xlsr_300m_mls/) | 18:47:25 | 8xV100 | -| Speech Recognition | VoxPopuli | - | - | - | - | -| Speech Recognition | FLEURS | - | - | - | - | -| Speech Translation | CoVoST-2 | - | - | - | - | -| Speech Classification | Minds-14 | 90.15 F1 / 90.33 Acc. | [here](https://huggingface.co/anton-l/xtreme_s_xlsr_300m_minds14/) | 2:54:21 | 2xA100 | -| Speech Classification | FLEURS | - | - | - | - | -| Speech Retrieval | FLEURS | - | - | - | - | - -### Speech Recognition with MLS - -The following command shows how to fine-tune the [XLS-R](https://huggingface.co/docs/transformers/main/model_doc/xls_r) model on [XTREME-S MLS](https://huggingface.co/datasets/google/xtreme_s#multilingual-librispeech-mls) using 8 GPUs in half-precision. - -```bash -python -m torch.distributed.launch \ - --nproc_per_node=8 \ - run_xtreme_s.py \ - --task="mls" \ - --language="all" \ - --model_name_or_path="facebook/wav2vec2-xls-r-300m" \ - --output_dir="xtreme_s_xlsr_300m_mls" \ - --overwrite_output_dir \ - --num_train_epochs=100 \ - --per_device_train_batch_size=4 \ - --per_device_eval_batch_size=1 \ - --gradient_accumulation_steps=2 \ - --learning_rate="3e-4" \ - --warmup_steps=3000 \ - --eval_strategy="steps" \ - --max_duration_in_seconds=20 \ - --save_steps=500 \ - --eval_steps=500 \ - --logging_steps=1 \ - --layerdrop=0.0 \ - --mask_time_prob=0.3 \ - --mask_time_length=10 \ - --mask_feature_prob=0.1 \ - --mask_feature_length=64 \ - --freeze_feature_encoder \ - --gradient_checkpointing \ - --fp16 \ - --group_by_length \ - --do_train \ - --do_eval \ - --do_predict \ - --metric_for_best_model="wer" \ - --greater_is_better=False \ - --load_best_model_at_end \ - --push_to_hub -``` - -On 8 V100 GPUs, this script should run in ~19 hours and yield a cross-entropy loss of **0.6215** and word error rate of **30.33** - -### Speech Classification with Minds-14 - -The following command shows how to fine-tune the [XLS-R](https://huggingface.co/docs/transformers/main/model_doc/xls_r) model on [XTREME-S MLS](https://huggingface.co/datasets/google/xtreme_s#intent-classification---minds-14) using 2 GPUs in half-precision. - -```bash -python -m torch.distributed.launch \ - --nproc_per_node=2 \ - run_xtreme_s.py \ - --task="minds14" \ - --language="all" \ - --model_name_or_path="facebook/wav2vec2-xls-r-300m" \ - --output_dir="xtreme_s_xlsr_300m_minds14" \ - --overwrite_output_dir \ - --num_train_epochs=50 \ - --per_device_train_batch_size=32 \ - --per_device_eval_batch_size=8 \ - --gradient_accumulation_steps=1 \ - --learning_rate="3e-4" \ - --warmup_steps=1500 \ - --eval_strategy="steps" \ - --max_duration_in_seconds=30 \ - --save_steps=200 \ - --eval_steps=200 \ - --logging_steps=1 \ - --layerdrop=0.0 \ - --mask_time_prob=0.3 \ - --mask_time_length=10 \ - --mask_feature_prob=0.1 \ - --mask_feature_length=64 \ - --freeze_feature_encoder \ - --gradient_checkpointing \ - --fp16 \ - --group_by_length \ - --do_train \ - --do_eval \ - --do_predict \ - --metric_for_best_model="f1" \ - --greater_is_better=True \ - --load_best_model_at_end \ - --push_to_hub -``` - -On 2 A100 GPUs, this script should run in ~5 hours and yield a cross-entropy loss of **0.4119** and F1 score of **90.15** diff --git a/examples/research_projects/xtreme-s/requirements.txt b/examples/research_projects/xtreme-s/requirements.txt deleted file mode 100644 index 219959a4b26..00000000000 --- a/examples/research_projects/xtreme-s/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -datasets >= 1.18.0 -torch >= 1.5 -torchaudio -librosa -jiwer diff --git a/examples/research_projects/xtreme-s/run_xtreme_s.py b/examples/research_projects/xtreme-s/run_xtreme_s.py deleted file mode 100644 index a467b3c6eb8..00000000000 --- a/examples/research_projects/xtreme-s/run_xtreme_s.py +++ /dev/null @@ -1,949 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2022 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and - -"""Fine-tuning a 🤗 Transformers pretrained speech model on the XTREME-S benchmark tasks""" - -import json -import logging -import os -import re -import sys -from collections import OrderedDict, defaultdict -from dataclasses import dataclass, field -from typing import Dict, List, Optional, Union - -import datasets -import numpy as np -import torch -from datasets import DatasetDict, load_dataset, load_metric - -import transformers -from transformers import ( - AutoConfig, - AutoFeatureExtractor, - AutoModelForAudioClassification, - AutoModelForCTC, - AutoModelForSpeechSeq2Seq, - AutoProcessor, - AutoTokenizer, - HfArgumentParser, - Seq2SeqTrainer, - Seq2SeqTrainingArguments, - Trainer, - set_seed, -) -from transformers.trainer_utils import get_last_checkpoint, is_main_process -from transformers.utils import check_min_version -from transformers.utils.versions import require_version - - -# Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.18.0.dev0") - -require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt") - - -logger = logging.getLogger(__name__) - - -def list_field(default=None, metadata=None): - return field(default_factory=lambda: default, metadata=metadata) - - -TASK_TO_TARGET_COLUMN_NAME = { - "fleurs-asr": "transcription", - "fleurs-lang_id": "lang_id", - "mls": "transcription", - "voxpopuli": "transcription", - "covost2": "translation", - "minds14": "intent_class", - "babel": "transcription", -} - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field( - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} - ) - tokenizer_name_or_path: Optional[str] = field( - default=None, - metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"}, - ) - cache_dir: Optional[str] = field( - default=None, - metadata={ - "help": "Where do you want to store the pretrained models and datasets downloaded from huggingface.co" - }, - ) - freeze_feature_encoder: bool = field( - default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."} - ) - attention_dropout: float = field( - default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."} - ) - activation_dropout: float = field( - default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."} - ) - feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."}) - hidden_dropout: float = field( - default=0.0, - metadata={ - "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler." - }, - ) - final_dropout: float = field( - default=0.0, - metadata={"help": "The dropout probability for the final projection layer."}, - ) - mask_time_prob: float = field( - default=0.05, - metadata={ - "help": ( - "Probability of each feature vector along the time axis to be chosen as the start of the vector " - "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature " - "vectors will be masked along the time axis." - ) - }, - ) - mask_time_length: int = field( - default=10, - metadata={"help": "Length of vector span to mask along the time axis."}, - ) - mask_feature_prob: float = field( - default=0.0, - metadata={ - "help": ( - "Probability of each feature vector along the feature axis to be chosen as the start of the vectorspan" - " to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature" - " bins will be masked along the time axis." - ) - }, - ) - mask_feature_length: int = field( - default=10, - metadata={"help": "Length of vector span to mask along the feature axis."}, - ) - layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."}) - ctc_zero_infinity: bool = field( - default=False, - metadata={"help": "Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`."}, - ) - ctc_loss_reduction: Optional[str] = field( - default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."} - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - - Using `HfArgumentParser` we can turn this class - into argparse arguments to be able to specify them on - the command line. - """ - - dataset_name: str = field( - default="google/xtreme_s", - metadata={"help": "The name of the dataset to use (via the datasets library). Defaults to 'google/xtreme_s'"}, - ) - task: str = field( - default=None, - metadata={ - "help": ( - "The task name of the benchmark to use (via the datasets library). Should be on of: " - "'fleurs-asr', 'mls', 'voxpopuli', 'covost2', 'minds14', 'fleurs-lang_id', 'babel'." - ) - }, - ) - language: str = field( - default="all", - metadata={"help": "The language id as defined in the datasets config name or `all` for all languages."}, - ) - language_group: str = field( - default=None, - metadata={ - "help": ( - "The language group to select a subset of languages to train on. " - "This option is only used the 'fleurs-asr' task. Should be one of: " - "'western_european_we', 'eastern_european_ee', 'central_asia_middle_north_african_cmn', " - "'sub_saharan_african_ssa', 'south_asian_sa', 'south_east_asian_sea', 'chinese_japanase_korean_cjk'." - ) - }, - ) - train_split_name: str = field( - default="train", - metadata={ - "help": "The name of the training dataset split to use (via the datasets library). Defaults to 'train'" - }, - ) - eval_split_name: str = field( - default="validation", - metadata={ - "help": ( - "The name of the evaluation dataset split to use (via the datasets library). Defaults to 'validation'" - ) - }, - ) - predict_split_name: str = field( - default="test", - metadata={ - "help": "The name of the prediction dataset split to use (via the datasets library). Defaults to 'test'" - }, - ) - audio_column_name: str = field( - default="audio", - metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"}, - ) - target_column_name: str = field( - default=None, - metadata={ - "help": ( - "The name of the dataset column containing the target data (transcription/translation/label). If None," - " the name will be inferred from the task. Defaults to None." - ) - }, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of validation examples to this " - "value if set." - ) - }, - ) - max_predict_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of prediction examples to this " - "value if set." - ) - }, - ) - chars_to_ignore: Optional[List[str]] = list_field( - default=', ? . ! - ; : " “ % ‘ ” �'.split(" "), - metadata={"help": "A list of characters to remove from the transcripts."}, - ) - max_duration_in_seconds: float = field( - default=30.0, - metadata={ - "help": ( - "Filter audio files that are longer than `max_duration_in_seconds` seconds to" - " 'max_duration_in_seconds`" - ) - }, - ) - min_duration_in_seconds: float = field( - default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"} - ) - preprocessing_only: bool = field( - default=False, - metadata={ - "help": ( - "Whether to only do data preprocessing and skip training. This is especially useful when data" - " preprocessing errors out in distributed training due to timeout. In this case, one should run the" - " preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets" - " can consequently be loaded in distributed training" - ) - }, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "If :obj:`True`, will use the token generated when running" - ":obj:`huggingface-cli login` as HTTP bearer authorization for remote files." - ) - }, - ) - unk_token: str = field( - default="[UNK]", - metadata={"help": "The unk token for the tokenizer"}, - ) - pad_token: str = field( - default="[PAD]", - metadata={"help": "The padding token for the tokenizer"}, - ) - word_delimiter_token: str = field( - default="|", - metadata={"help": "The word delimiter token for the tokenizer"}, - ) - phoneme_language: Optional[str] = field( - default=None, - metadata={ - "help": ( - "The target language that should be used be" - " passed to the tokenizer for tokenization. Note that" - " this is only relevant if the model classifies the" - " input audio to a sequence of phoneme sequences." - ) - }, - ) - per_lang_metrics: bool = field( - default=True, - metadata={ - "help": ( - "If `True`, compute the test metrics separately for each language, and average the results. " - "If `False` compute the average test metrics in a single pass for all languages at once." - ) - }, - ) - - -@dataclass -class SpeechDataCollatorWithPadding: - processor: AutoProcessor - decoder_start_token_id: Optional[int] = None - padding: Union[bool, str] = "longest" - pad_labels: Optional[int] = True - pad_to_multiple_of: Optional[int] = None - pad_to_multiple_of_labels: Optional[int] = None - - def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: - # split inputs and labels since they have to be of different lengths and need - # different padding methods - input_features = [{"input_values": feature["input_values"]} for feature in features] - - batch = self.processor.pad( - input_features, - padding=self.padding, - pad_to_multiple_of=self.pad_to_multiple_of, - return_tensors="pt", - ) - - if self.pad_labels: - label_features = [{"input_ids": feature["labels"]} for feature in features] - labels_batch = self.processor.pad( - labels=label_features, - padding=self.padding, - pad_to_multiple_of=self.pad_to_multiple_of_labels, - return_tensors="pt", - ) - - # replace padding with -100 to ignore loss correctly - labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) - - # if bos token is appended in previous tokenization step, - # cut bos token here as it's append later anyways - if ( - self.decoder_start_token_id is not None - and (labels[:, 0] == self.decoder_start_token_id).all().cpu().item() - ): - labels = labels[:, 1:] - - batch["labels"] = labels - else: - batch["labels"] = torch.tensor([feature["labels"] for feature in features]) - - return batch - - -def create_vocabulary_from_data( - datasets: DatasetDict, - word_delimiter_token: Optional[str] = None, - unk_token: Optional[str] = None, - pad_token: Optional[str] = None, -): - # Given training and test labels create vocabulary - def extract_all_chars(batch): - all_text = " ".join(batch["target_text"]) - vocab = list(set(all_text)) - return {"vocab": [vocab], "all_text": [all_text]} - - vocabs = datasets.map( - extract_all_chars, - batched=True, - batch_size=-1, - keep_in_memory=True, - remove_columns=datasets["train"].column_names, - ) - - # take union of all unique characters in each dataset - vocab_set = ( - (set(vocabs["train"]["vocab"][0]) if "train" in vocabs else set()) - | (set(vocabs["eval"]["vocab"][0]) if "eval" in vocabs else set()) - | (set(vocabs["predict"]["vocab"][0]) if "predict" in vocabs else set()) - ) - - vocab_dict = {v: k for k, v in enumerate(sorted(vocab_set))} - - # replace white space with delimiter token - if word_delimiter_token is not None: - vocab_dict[word_delimiter_token] = vocab_dict[" "] - del vocab_dict[" "] - - # add unk and pad token - if unk_token is not None: - vocab_dict[unk_token] = len(vocab_dict) - - if pad_token is not None: - vocab_dict[pad_token] = len(vocab_dict) - - return vocab_dict - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) - - # Log on each process the small summary: - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " - f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - # Set the verbosity to info of the Transformers logger (on main process only): - if is_main_process(training_args.local_rank): - transformers.utils.logging.set_verbosity_info() - logger.info("Training/evaluation parameters %s", training_args) - - # Set seed before initializing model. - set_seed(training_args.seed) - - # 1. First, let's load the dataset - raw_datasets = DatasetDict() - task_name = data_args.task - lang_id = data_args.language - - if task_name is None: - raise ValueError( - "Set --task should be set to '' (e.g. 'fleurs-asr', 'mls', 'covost2', 'minds14') " - ) - if lang_id is None: - raise ValueError( - "Set --language should be set to the language id of the sub dataset " - "config to be used (e.g. 'pl', 'en.tr', 'fr-FR') or 'all'" - " for multi-lingual fine-tuning." - ) - if data_args.language_group is not None: - if data_args.task != "fleurs-asr": - raise ValueError("--language_group should only be used with --task=fleurs-asr") - if data_args.language != "all": - raise ValueError("--language_group should only be used with --language=all") - - if data_args.target_column_name is None: - target_column_name = TASK_TO_TARGET_COLUMN_NAME[task_name] - else: - target_column_name = data_args.target_column_name - - # here we differentiate between tasks with text as the target and classification tasks - is_text_target = target_column_name in ("transcription", "translation") - - config_name = ".".join([task_name.split("-")[0], lang_id]) - - if training_args.do_train: - raw_datasets["train"] = load_dataset( - data_args.dataset_name, - config_name, - split=data_args.train_split_name, - token=data_args.use_auth_token, - cache_dir=model_args.cache_dir, - ) - - if data_args.audio_column_name not in raw_datasets["train"].column_names: - raise ValueError( - f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'." - " Make sure to set `--audio_column_name` to the correct audio column - one of" - f" {', '.join(raw_datasets['train'].column_names)}." - ) - - if target_column_name not in raw_datasets["train"].column_names: - raise ValueError( - f"--target_column_name {target_column_name} not found in dataset '{data_args.dataset_name}'. " - "Make sure to set `--target_column_name` to the correct text column - one of " - f"{', '.join(raw_datasets['train'].column_names)}." - ) - - if data_args.max_train_samples is not None: - raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples)) - - if training_args.do_eval: - raw_datasets["eval"] = load_dataset( - data_args.dataset_name, - config_name, - split=data_args.eval_split_name, - token=data_args.use_auth_token, - cache_dir=model_args.cache_dir, - ) - - if data_args.max_eval_samples is not None: - raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples)) - - if training_args.do_predict: - raw_datasets["predict"] = load_dataset( - data_args.dataset_name, - config_name, - split=data_args.predict_split_name, - token=data_args.use_auth_token, - cache_dir=model_args.cache_dir, - ) - - if data_args.max_predict_samples is not None: - raw_datasets["predict"] = raw_datasets["predict"].select(range(data_args.max_predict_samples)) - - lang_list = next(iter(raw_datasets.values())).features["lang_id"].names - if not is_text_target: - label_list = next(iter(raw_datasets.values())).features[target_column_name].names - num_labels = len(label_list) - - num_workers = data_args.preprocessing_num_workers - - lang_group = data_args.language_group - if lang_group is not None: - with training_args.main_process_first(desc="language group filter"): - lang_group_id = next(iter(raw_datasets.values())).features["lang_group_id"].str2int(lang_group) - raw_datasets = raw_datasets.filter( - lambda lang_group: lang_group == lang_group_id, - num_proc=num_workers, - input_columns=["lang_group_id"], - ) - - # 2. We remove some special characters from the datasets - # that make training complicated and do not help in transcribing the speech - # E.g. characters, such as `,` and `.` do not really have an acoustic characteristic - # that could be easily picked up by the model - chars_to_ignore_regex = ( - f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None - ) - - def remove_special_characters(batch): - if chars_to_ignore_regex is not None: - batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[target_column_name]).lower() + " " - else: - batch["target_text"] = batch[target_column_name].lower() + " " - return batch - - if is_text_target: - with training_args.main_process_first(desc="dataset map special characters removal"): - raw_datasets = raw_datasets.map( - remove_special_characters, - remove_columns=[target_column_name], - desc="remove special characters from datasets", - ) - - # save special tokens for tokenizer - word_delimiter_token = data_args.word_delimiter_token - unk_token = data_args.unk_token - pad_token = data_args.pad_token - - # 3. Next, let's load the config as we might need it to create - # the tokenizer - config = AutoConfig.from_pretrained( - model_args.model_name_or_path, cache_dir=model_args.cache_dir, token=data_args.use_auth_token - ) - - if is_text_target: - # 4. (Optional, for ASR and translation) If no tokenizer file is defined, - # we create the vocabulary of the model by extracting all unique characters from - # the training and evaluation datasets - # We need to make sure that only first rank saves vocabulary - # make sure all processes wait until vocab is created - tokenizer_name_or_path = model_args.tokenizer_name_or_path - tokenizer_kwargs = {} - if tokenizer_name_or_path is None: - # save vocab in training output dir - tokenizer_name_or_path = training_args.output_dir - - vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json") - - with training_args.main_process_first(): - if training_args.overwrite_output_dir and os.path.isfile(vocab_file): - os.remove(vocab_file) - - with training_args.main_process_first(desc="dataset map vocabulary creation"): - if not os.path.isfile(vocab_file): - os.makedirs(tokenizer_name_or_path, exist_ok=True) - vocab_dict = create_vocabulary_from_data( - raw_datasets, - word_delimiter_token=word_delimiter_token, - unk_token=unk_token, - pad_token=pad_token, - ) - - # save vocab dict to be loaded into tokenizer - with open(vocab_file, "w") as file: - json.dump(vocab_dict, file) - - # if tokenizer has just been created - # it is defined by `tokenizer_class` if present in config else by `model_type` - if not config.is_encoder_decoder: - tokenizer_kwargs = { - "config": config if config.tokenizer_class is not None else None, - "tokenizer_type": config.model_type if config.tokenizer_class is None else None, - "unk_token": unk_token, - "pad_token": pad_token, - "word_delimiter_token": word_delimiter_token, - } - else: - tokenizer_kwargs = {} - - # 5. Now we can instantiate the feature extractor, tokenizer and model - # Note for distributed training, the .from_pretrained methods guarantee that only - # one local process can concurrently download model & vocab. - - # load feature_extractor and tokenizer - if is_text_target: - tokenizer = AutoTokenizer.from_pretrained( - tokenizer_name_or_path, - token=data_args.use_auth_token, - **tokenizer_kwargs, - ) - feature_extractor = AutoFeatureExtractor.from_pretrained( - model_args.model_name_or_path, cache_dir=model_args.cache_dir, token=data_args.use_auth_token - ) - - # adapt config - # (speech translation requires pre-configured seq2seq models) - if task_name != "covost2": - config.update( - { - "feat_proj_dropout": model_args.feat_proj_dropout, - "attention_dropout": model_args.attention_dropout, - "hidden_dropout": model_args.hidden_dropout, - "final_dropout": model_args.final_dropout, - "mask_time_prob": model_args.mask_time_prob, - "mask_time_length": model_args.mask_time_length, - "mask_feature_prob": model_args.mask_feature_prob, - "mask_feature_length": model_args.mask_feature_length, - "gradient_checkpointing": training_args.gradient_checkpointing, - "layerdrop": model_args.layerdrop, - "ctc_zero_infinity": model_args.ctc_zero_infinity, - "ctc_loss_reduction": model_args.ctc_loss_reduction, - "activation_dropout": model_args.activation_dropout, - } - ) - if training_args.do_train: - if is_text_target: - config.pad_token_id = tokenizer.pad_token_id - config.vocab_size = len(tokenizer) - else: - label_to_id = {v: i for i, v in enumerate(label_list)} - config.label2id = label_to_id - config.id2label = {id: label for label, id in label_to_id.items()} - config.num_labels = num_labels - - # create model - if target_column_name == "transcription": - model = AutoModelForCTC.from_pretrained( - model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - config=config, - token=data_args.use_auth_token, - ) - elif config.is_encoder_decoder: - model = AutoModelForSpeechSeq2Seq.from_pretrained( - model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - config=config, - token=data_args.use_auth_token, - ) - if model.config.decoder_start_token_id is None: - raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") - else: - model = AutoModelForAudioClassification.from_pretrained( - model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - config=config, - token=data_args.use_auth_token, - ) - - # freeze encoder - if model_args.freeze_feature_encoder: - model.freeze_feature_encoder() - - # 6. Now we preprocess the datasets including loading the audio, resampling and normalization - # Thankfully, `datasets` takes care of automatically loading and resampling the audio, - # so that we just need to set the correct target sampling rate and normalize the input - # via the `feature_extractor` - - # make sure that dataset decodes audio with correct sampling rate - dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate - if dataset_sampling_rate != feature_extractor.sampling_rate: - raw_datasets = raw_datasets.cast_column( - data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate) - ) - - # derive max & min input length for sample rate & max duration - max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate - min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate - audio_column_name = data_args.audio_column_name - - # `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification - phoneme_language = data_args.phoneme_language - - # Preprocessing the datasets. - # We need to read the audio files as arrays and tokenize the targets. - def prepare_dataset(batch): - # load audio - sample = batch[audio_column_name] - - inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"]) - batch["input_values"] = inputs.input_values[0] - batch["length"] = len(batch["input_values"]) - - # encode targets - additional_kwargs = {} - if phoneme_language is not None: - additional_kwargs["phonemizer_lang"] = phoneme_language - - if is_text_target: - batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids - else: - batch["labels"] = batch[target_column_name] - - batch["lang"] = batch["lang_id"] - - return batch - - with training_args.main_process_first(desc="dataset map preprocessing"): - vectorized_datasets = raw_datasets.map( - prepare_dataset, - remove_columns=next(iter(raw_datasets.values())).column_names, - num_proc=num_workers, - desc="preprocess datasets", - ) - - if training_args.do_train: - - def is_audio_in_length_range(length): - return length > min_input_length and length < max_input_length - - # filter data that is shorter than min_input_length - vectorized_datasets["train"] = vectorized_datasets["train"].filter( - is_audio_in_length_range, - num_proc=num_workers, - input_columns=["length"], - ) - - # 7. Next, we can prepare for the training step. - # Let's use the appropriate XTREME-S evaluation metric, - # instantiate a data collator and the trainer - - # Define evaluation metrics during training, *i.e.* word error rate, character error rate - eval_metric = load_metric("xtreme_s", task_name) - - # for large datasets it is advised to run the preprocessing on a - # single machine first with ``args.preprocessing_only`` since there will mostly likely - # be a timeout when running the script in distributed mode. - # In a second step ``args.preprocessing_only`` can then be set to `False` to load the - # cached dataset - if data_args.preprocessing_only: - logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}") - return - - def asr_logits_argmax(logits, labels): - return logits.argmax(dim=-1) - - def compute_asr_metric(pred): - pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id - - pred_str = tokenizer.batch_decode(pred.predictions) - # we do not want to group tokens when computing the metrics - label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False) - - metric = eval_metric.compute(predictions=pred_str, references=label_str) - return metric - - def compute_classification_metric(pred): - pred_ids = np.argmax(pred.predictions, axis=1) - metric = eval_metric.compute(predictions=pred_ids, references=pred.label_ids) - return metric - - # Now save everything to be able to create a single processor later - if is_main_process(training_args.local_rank): - # save feature extractor, tokenizer and config - feature_extractor.save_pretrained(training_args.output_dir) - if is_text_target: - tokenizer.save_pretrained(training_args.output_dir) - config.save_pretrained(training_args.output_dir) - # wait until configs are saved in the main process before loading the processor - if training_args.local_rank != -1: - torch.distributed.barrier() - - if is_text_target: - processor = AutoProcessor.from_pretrained(training_args.output_dir) - else: - processor = AutoFeatureExtractor.from_pretrained(training_args.output_dir) - - # Instantiate custom data collator - data_collator = SpeechDataCollatorWithPadding(processor=processor, pad_labels=is_text_target) - - # Initialize Trainer - if target_column_name == "translation": - trainer = Seq2SeqTrainer( - model=model, - data_collator=data_collator, - args=training_args, - preprocess_logits_for_metrics=asr_logits_argmax if training_args.predict_with_generate else None, - compute_metrics=compute_asr_metric if training_args.predict_with_generate else None, - train_dataset=vectorized_datasets["train"] if training_args.do_train else None, - eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None, - tokenizer=feature_extractor, - ) - else: - trainer = Trainer( - model=model, - data_collator=data_collator, - args=training_args, - preprocess_logits_for_metrics=asr_logits_argmax if is_text_target else None, - compute_metrics=compute_asr_metric if is_text_target else compute_classification_metric, - train_dataset=vectorized_datasets["train"] if training_args.do_train else None, - eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None, - tokenizer=feature_extractor, - ) - - # 8. Finally, we can start training - - # Training - if training_args.do_train: - # use last checkpoint if exist - if last_checkpoint is not None: - checkpoint = last_checkpoint - elif os.path.isdir(model_args.model_name_or_path): - checkpoint = model_args.model_name_or_path - else: - checkpoint = None - - train_result = trainer.train(resume_from_checkpoint=checkpoint) - trainer.save_model() - - metrics = train_result.metrics - max_train_samples = ( - data_args.max_train_samples - if data_args.max_train_samples is not None - else len(vectorized_datasets["train"]) - ) - metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"])) - - trainer.log_metrics("train", metrics) - trainer.save_metrics("train", metrics) - trainer.save_state() - - # Evaluation on the test set - results = {} - if training_args.do_predict: - logger.info(f"*** Evaluating on the `{data_args.predict_split_name}` set ***") - if data_args.per_lang_metrics: - # separate the `test` dataset into language-specific subsets and compute metrics for each of them - metrics = {} - average_metrics = defaultdict(list) - for lang_id in range(len(lang_list)): - lang_name = lang_list[lang_id] - with training_args.main_process_first(desc="per-language dataset filter"): - lang_dataset = vectorized_datasets["predict"].filter( - lambda lang: lang == lang_id, - num_proc=num_workers, - input_columns=["lang"], - ) - lang_metrics = trainer.evaluate(lang_dataset) - redundant_metrics = ["eval_runtime", "eval_samples_per_second", "eval_steps_per_second", "eval_epoch"] - for metric_name, value in lang_metrics.items(): - average_metrics[metric_name].append(value) - if metric_name not in redundant_metrics: - metrics[f"{metric_name}_{lang_name}"] = value - for metric_name, value in average_metrics.items(): - metrics[metric_name] = np.mean(value) - else: - metrics = trainer.evaluate(vectorized_datasets["predict"]) - max_predict_samples = ( - data_args.max_predict_samples - if data_args.max_predict_samples is not None - else len(vectorized_datasets["predict"]) - ) - metrics["predict_samples"] = min(max_predict_samples, len(vectorized_datasets["predict"])) - - # make sure that the `predict` metrics end up in the log history for the model card - trainer.log(OrderedDict(sorted(metrics.items()))) - - trainer.log_metrics("predict", metrics) - trainer.save_metrics("predict", metrics) - - # Write model card and (optionally) push to hub - kwargs = { - "finetuned_from": model_args.model_name_or_path, - "tasks": task_name, - "tags": [task_name, data_args.dataset_name], - "dataset_args": ( - f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split:" - f" {data_args.eval_split_name}, Predict split: {data_args.predict_split_name}" - ), - "dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}", - "language": data_args.language, - } - - if training_args.push_to_hub: - trainer.push_to_hub(**kwargs) - else: - trainer.create_model_card(**kwargs) - - return results - - -if __name__ == "__main__": - main() diff --git a/examples/research_projects/zero-shot-distillation/README.md b/examples/research_projects/zero-shot-distillation/README.md deleted file mode 100644 index 14b6a8ea07f..00000000000 --- a/examples/research_projects/zero-shot-distillation/README.md +++ /dev/null @@ -1,155 +0,0 @@ -# Zero-shot classifier distillation - -Author: @joeddav - -This script provides a way to improve the speed and memory performance of a zero-shot classifier by training a more -efficient student model from the zero-shot teacher's predictions over an unlabeled dataset. - -The zero-shot classification pipeline uses a model pre-trained on natural language inference (NLI) to determine the -compatibility of a set of candidate class names with a given sequence. This serves as a convenient out-of-the-box -classifier without the need for labeled training data. However, for a given sequence, the method requires each -possible label to be fed through the large NLI model separately. Thus for `N` sequences and `K` classes, a total of -`N*K` forward passes through the model are required. This requirement slows inference considerably, particularly as -`K` grows. - -Given (1) an unlabeled corpus and (2) a set of candidate class names, the provided script trains a student model -with a standard classification head with `K` output dimensions. The resulting student model can then be used for -classifying novel text instances with a significant boost in speed and memory performance while retaining similar -classification performance to the original zero-shot model - -### Usage - -A teacher NLI model can be distilled to a more efficient student model by running [`distill_classifier.py`](https://github.com/huggingface/transformers/blob/main/examples/research_projects/zero-shot-distillation/distill_classifier.py): - -```bash -python distill_classifier.py \ ---data_file \ ---class_names_file \ ---output_dir -``` - -`` should be a text file with a single unlabeled example per line. `` is a text file with one class name per line. - -Other optional arguments include: - -- `--teacher_name_or_path` (default: `roberta-large-mnli`): The name or path of the NLI teacher model. -- `--student_name_or_path` (default: `distillbert-base-uncased`): The name or path of the student model which will -be fine-tuned to copy the teacher predictions. -- `--hypothesis_template` (default `"This example is {}."`): The template used to turn each label into an NLI-style -hypothesis when generating teacher predictions. This template must include a `{}` or similar syntax for the -candidate label to be inserted into the template. For example, the default template is `"This example is {}."` With -the candidate label `sports`, this would be fed into the model like `[CLS] sequence to classify [SEP] This example -is sports . [SEP]`. -- `--multi_class`: Whether or not multiple candidate labels can be true. By default, the scores are normalized such -that the sum of the label likelihoods for each sequence is 1. If `--multi_class` is passed, the labels are -considered independent and probabilities are normalized for each candidate by doing a softmax of the entailment -score vs. the contradiction score. This is sometimes called "multi-class multi-label" classification. -- `--temperature` (default: `1.0`): The temperature applied to the softmax of the teacher model predictions. A -higher temperature results in a student with smoother (lower confidence) predictions than the teacher while a value -`<1` resultings in a higher-confidence, peaked distribution. The default `1.0` is equivalent to no smoothing. -- `--teacher_batch_size` (default: `32`): The batch size used for generating a single set of teacher predictions. -Does not affect training. Use `--per_device_train_batch_size` to change the training batch size. - -Any of the arguments in the 🤗 Trainer's -[`TrainingArguments`](https://huggingface.co/transformers/main_classes/trainer.html?#trainingarguments) can also be -modified, such as `--learning_rate`, `--fp16`, `--no_cuda`, `--warmup_steps`, etc. Run `python distill_classifier.py --h` for a full list of available arguments or consult the [Trainer -documentation](https://huggingface.co/transformers/main_classes/trainer.html#trainingarguments). - -> **Note**: Distributed and TPU training are not currently supported. Single-node multi-GPU is supported, however, -and will run automatically if multiple GPUs are available. - -### Example: Topic classification - -> A full colab demo notebook of this example can be found [here](https://colab.research.google.com/drive/1mjBjd0cR8G57ZpsnFCS3ngGyo5nCa9ya?usp=sharing). - -Let's say we're interested in classifying news articles into one of four topic categories: "the world", "sports", -"business", or "science/tech". We have an unlabeled dataset, [AG's News](https://huggingface.co/datasets/ag_news), -which corresponds to this problem (in reality AG's News is annotated, but we will pretend it is not for the sake of -example). - -We can use an NLI model like `roberta-large-mnli` for zero-shot classification like so: - -```python ->>> class_names = ["the world", "sports", "business", "science/tech"] ->>> hypothesis_template = "This text is about {}." ->>> sequence = "A new moon has been discovered in Jupiter's orbit" - ->>> zero_shot_classifier = pipeline("zero-shot-classification", model="roberta-large-mnli") ->>> zero_shot_classifier(sequence, class_names, hypothesis_template=hypothesis_template) -{'sequence': "A new moon has been discovered in Jupiter's orbit", - 'labels': ['science/tech', 'the world', 'business', 'sports'], - 'scores': [0.7035840153694153, 0.18744826316833496, 0.06027870625257492, 0.04868902638554573]} -``` - -Unfortunately, inference is slow since each of our 4 class names must be fed through the large model for every -sequence to be classified. But with our unlabeled data we can distill the model to a small distilbert classifier to -make future inference much faster. - -To run the script, we will need to put each training example (text only) from AG's News on its own line in -`agnews/train_unlabeled.txt`, and each of the four class names in the newline-separated `agnews/class_names.txt`. -Then we can run distillation with the following command: - -```bash -python distill_classifier.py \ ---data_file ./agnews/unlabeled.txt \ ---class_names_files ./agnews/class_names.txt \ ---teacher_name_or_path roberta-large-mnli \ ---hypothesis_template "This text is about {}." \ ---output_dir ./agnews/distilled -``` - -The script will generate a set of soft zero-shot predictions from `roberta-large-mnli` for each example in -`agnews/unlabeled.txt`. It will then train a student distilbert classifier on the teacher predictions and -save the resulting model in `./agnews/distilled`. - -The resulting model can then be loaded and used like any other pre-trained classifier: - -```python -from transformers import AutoModelForSequenceClassification, AutoTokenizer -model = AutoModelForSequenceClassification.from_pretrained("./agnews/distilled") -tokenizer = AutoTokenizer.from_pretrained("./agnews/distilled") -``` - -and even used trivially with a `TextClassificationPipeline`: - -```python ->>> distilled_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer, return_all_scores=True) ->>> distilled_classifier(sequence) -[[{'label': 'the world', 'score': 0.14899294078350067}, - {'label': 'sports', 'score': 0.03205857425928116}, - {'label': 'business', 'score': 0.05943061783909798}, - {'label': 'science/tech', 'score': 0.7595179080963135}]] -``` - -> Tip: pass `device=0` when constructing a pipeline to run on a GPU - -As we can see, the results of the student closely resemble that of the trainer despite never having seen this -example during training. Now let's do a quick & dirty speed comparison simulating 16K examples with a batch size of -16: - -```python -for _ in range(1000): - zero_shot_classifier([sequence] * 16, class_names) -# runs in 1m 23s on a single V100 GPU -``` - -```python -%%time -for _ in range(1000): - distilled_classifier([sequence] * 16) -# runs in 10.3s on a single V100 GPU -``` - -As we can see, the distilled student model runs an order of magnitude faster than its teacher NLI model. This is -also a seeting where we only have `K=4` possible labels. The higher the number of classes for a given task, the more -drastic the speedup will be, since the zero-shot teacher's complexity scales linearly with the number of classes. - -Since we secretly have access to ground truth labels for AG's news, we can evaluate the accuracy of each model. The -original zero-shot model `roberta-large-mnli` gets an accuracy of 69.3% on the held-out test set. After training a -student on the unlabeled training set, the distilled model gets a similar score of 70.4%. - -Lastly, you can share the distilled model with the community and/or use it with our inference API by [uploading it -to the 🤗 Hub](https://huggingface.co/transformers/model_sharing.html). We've uploaded the distilled model from this -example at -[joeddav/distilbert-base-uncased-agnews-student](https://huggingface.co/joeddav/distilbert-base-uncased-agnews-student). diff --git a/examples/research_projects/zero-shot-distillation/distill_classifier.py b/examples/research_projects/zero-shot-distillation/distill_classifier.py deleted file mode 100644 index 56181208477..00000000000 --- a/examples/research_projects/zero-shot-distillation/distill_classifier.py +++ /dev/null @@ -1,338 +0,0 @@ -import logging -import os -import sys -from dataclasses import dataclass, field -from typing import List, Optional - -import torch -from datasets import Dataset -from torch import nn -from tqdm.auto import tqdm - -from transformers import ( - AutoModelForSequenceClassification, - AutoTokenizer, - HfArgumentParser, - Trainer, - TrainingArguments, - set_seed, - utils, -) -from transformers.trainer_utils import get_last_checkpoint, is_main_process - - -DESCRIPTION = """ -Distills an NLI-based zero-shot classifier to a smaller, more efficient model with a fixed set of candidate class -names. Useful for speeding up zero-shot classification in cases where labeled training data is not available, but -when only a single fixed set of classes is needed. Takes a teacher NLI model, student classifier model, unlabeled -dataset, and set of K possible class names. Yields a single classifier with K outputs corresponding to the provided -class names. -""" - -logger = logging.getLogger(__name__) - - -@dataclass -class TeacherModelArguments: - teacher_name_or_path: Optional[str] = field( - default="roberta-large-mnli", metadata={"help": "The NLI/zero-shot teacher model to be distilled."} - ) - hypothesis_template: Optional[str] = field( - default="This example is {}.", - metadata={ - "help": ( - "Template used to turn class names into mock hypotheses for teacher NLI model. Must include {{}} " - "where class name is inserted." - ) - }, - ) - teacher_batch_size: Optional[int] = field( - default=32, metadata={"help": "Batch size for generating teacher predictions."} - ) - multi_label: Optional[bool] = field( - default=False, - metadata={ - "help": ( - "Allow multiple classes to be true rather than forcing them to sum to 1 (sometimes called " - "multi-class multi-label classification)." - ) - }, - ) - temperature: Optional[float] = field( - default=1.0, metadata={"help": "Temperature applied to teacher softmax for distillation."} - ) - - -@dataclass -class StudentModelArguments: - student_name_or_path: Optional[str] = field( - default="distilbert-base-uncased", metadata={"help": "The NLI/zero-shot teacher model to be distilled."} - ) - - -@dataclass -class DataTrainingArguments: - data_file: str = field(metadata={"help": "Text file with one unlabeled instance per line."}) - class_names_file: str = field(metadata={"help": "Text file with one class name per line."}) - use_fast_tokenizer: bool = field( - default=True, - metadata={"help": "Whether to use one of the fast tokenizer (backed by the Rust tokenizers library) or not."}, - ) - - -@dataclass -class DistillTrainingArguments(TrainingArguments): - output_dir: Optional[str] = field( - default=None, - metadata={"help": "The output directory where the model predictions and checkpoints will be written."}, - ) - per_device_train_batch_size: int = field( - default=32, metadata={"help": "Batch size per GPU/TPU core/CPU for training."} - ) - per_device_eval_batch_size: int = field( - default=128, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."} - ) - num_train_epochs: float = field(default=1.0, metadata={"help": "Total number of training epochs to perform."}) - do_train: bool = field(default=True, metadata={"help": "Whether to run training of student model."}) - do_eval: bool = field( - default=True, - metadata={ - "help": ( - "Whether to evaluate the agreement of the final student predictions and the teacher predictions " - "after training." - ) - }, - ) - save_total_limit: Optional[int] = field( - default=0, - metadata={ - "help": ( - "Limit the total amount of checkpoints. " - "Deletes the older checkpoints in the output_dir. Default is 0 (no checkpoints)." - ) - }, - ) - - -class DistillationTrainer(Trainer): - def compute_loss(self, model, inputs, return_outputs=False): - target_p = inputs["labels"] - outputs = model(inputs["input_ids"], attention_mask=inputs["attention_mask"]) - logits = outputs[0] - - loss = -torch.sum(target_p * logits.log_softmax(dim=-1), axis=-1).mean() - - if return_outputs: - return loss, outputs - - return loss - - -def read_lines(path): - lines = [] - with open(path, "r") as f: - for line in f: - line = line.strip() - if len(line) > 0: - lines.append(line) - return lines - - -def get_premise_hypothesis_pairs(examples, class_names, hypothesis_template): - premises = [] - hypotheses = [] - for example in examples: - for name in class_names: - premises.append(example) - hypotheses.append(hypothesis_template.format(name)) - return premises, hypotheses - - -def get_entailment_id(config): - for label, ind in config.label2id.items(): - if label.lower().startswith("entail"): - return ind - logger.warning("Could not identify entailment dimension from teacher config label2id. Setting to -1.") - return -1 - - -def get_teacher_predictions( - model_path: str, - examples: List[str], - class_names: List[str], - hypothesis_template: str, - batch_size: int, - temperature: float, - multi_label: bool, - use_fast_tokenizer: bool, - no_cuda: bool, - fp16: bool, -): - """ - Gets predictions by the same method as the zero-shot pipeline but with DataParallel & more efficient batching - """ - model = AutoModelForSequenceClassification.from_pretrained(model_path) - model_config = model.config - if not no_cuda and torch.cuda.is_available(): - model = nn.DataParallel(model.cuda()) - batch_size *= len(model.device_ids) - tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=use_fast_tokenizer) - - premises, hypotheses = get_premise_hypothesis_pairs(examples, class_names, hypothesis_template) - logits = [] - - for i in tqdm(range(0, len(premises), batch_size)): - batch_premises = premises[i : i + batch_size] - batch_hypotheses = hypotheses[i : i + batch_size] - - encodings = tokenizer( - batch_premises, - batch_hypotheses, - padding=True, - truncation="only_first", - return_tensors="pt", - ) - - with torch.cuda.amp.autocast(enabled=fp16): - with torch.no_grad(): - outputs = model(**encodings) - logits.append(outputs.logits.detach().cpu().float()) - - entail_id = get_entailment_id(model_config) - contr_id = -1 if entail_id == 0 else 0 - logits = torch.cat(logits, dim=0) # N*K x 3 - nli_logits = logits.reshape(len(examples), len(class_names), -1)[..., [contr_id, entail_id]] # N x K x 2 - - if multi_label: - # softmax over (contr, entail) logits for each class independently - nli_prob = (nli_logits / temperature).softmax(-1) - else: - # softmax over entail logits across classes s.t. class probabilities sum to 1. - nli_prob = (nli_logits / temperature).softmax(1) - - return nli_prob[..., 1] # N x K - - -def main(): - parser = HfArgumentParser( - (DataTrainingArguments, TeacherModelArguments, StudentModelArguments, DistillTrainingArguments), - description=DESCRIPTION, - ) - - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - data_args, teacher_args, student_args, training_args = parser.parse_json_file( - json_file=os.path.abspath(sys.argv[1]) - ) - else: - data_args, teacher_args, student_args, training_args = parser.parse_args_into_dataclasses() - - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) - - # Log on each process the small summary: - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" - + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - # Set the verbosity to info of the Transformers logger (on main process only): - if is_main_process(training_args.local_rank): - utils.logging.set_verbosity_info() - utils.logging.enable_default_handler() - utils.logging.enable_explicit_format() - - if training_args.local_rank != -1: - raise ValueError("Distributed training is not currently supported.") - if training_args.tpu_num_cores is not None: - raise ValueError("TPU acceleration is not currently supported.") - - logger.info(f"Training/evaluation parameters {training_args}") - - # Set seed before initializing model. - set_seed(training_args.seed) - - # 1. read in data - examples = read_lines(data_args.data_file) - class_names = read_lines(data_args.class_names_file) - - # 2. get teacher predictions and load into dataset - logger.info("Generating predictions from zero-shot teacher model") - teacher_soft_preds = get_teacher_predictions( - teacher_args.teacher_name_or_path, - examples, - class_names, - teacher_args.hypothesis_template, - teacher_args.teacher_batch_size, - teacher_args.temperature, - teacher_args.multi_label, - data_args.use_fast_tokenizer, - training_args.no_cuda, - training_args.fp16, - ) - dataset = Dataset.from_dict( - { - "text": examples, - "labels": teacher_soft_preds, - } - ) - - # 3. create student - logger.info("Initializing student model") - model = AutoModelForSequenceClassification.from_pretrained( - student_args.student_name_or_path, num_labels=len(class_names) - ) - tokenizer = AutoTokenizer.from_pretrained(student_args.student_name_or_path, use_fast=data_args.use_fast_tokenizer) - model.config.id2label = dict(enumerate(class_names)) - model.config.label2id = {label: i for i, label in enumerate(class_names)} - - # 4. train student on teacher predictions - dataset = dataset.map(tokenizer, input_columns="text") - dataset.set_format("torch") - - def compute_metrics(p, return_outputs=False): - preds = p.predictions.argmax(-1) - proxy_labels = p.label_ids.argmax(-1) # "label_ids" are actually distributions - return {"agreement": (preds == proxy_labels).mean().item()} - - trainer = DistillationTrainer( - model=model, - tokenizer=tokenizer, - args=training_args, - train_dataset=dataset, - compute_metrics=compute_metrics, - ) - - if training_args.do_train: - logger.info("Training student model on teacher predictions") - trainer.train() - - if training_args.do_eval: - agreement = trainer.evaluate(eval_dataset=dataset)["eval_agreement"] - logger.info(f"Agreement of student and teacher predictions: {agreement * 100:0.2f}%") - - trainer.save_model() - - -if __name__ == "__main__": - main() diff --git a/src/transformers/generation/watermarking.py b/src/transformers/generation/watermarking.py index da90c03dd0d..1c728a287b8 100644 --- a/src/transformers/generation/watermarking.py +++ b/src/transformers/generation/watermarking.py @@ -490,7 +490,7 @@ class SynthIDTextWatermarkDetector: Parameters: detector_module ([`BayesianDetectorModel`]): Bayesian detector module object initialized with parameters. - Check examples/research_projects/synthid_text/detector_training.py for usage. + Check https://github.com/huggingface/transformers-research-projects/tree/main/synthid_text for usage. logits_processor (`SynthIDTextWatermarkLogitsProcessor`): The logits processor used for watermarking. tokenizer (`Any`): @@ -502,7 +502,7 @@ class SynthIDTextWatermarkDetector: ... AutoTokenizer, BayesianDetectorModel, SynthIDTextWatermarkLogitsProcessor, SynthIDTextWatermarkDetector ... ) - >>> # Load the detector. See examples/research_projects/synthid_text for training a detector. + >>> # Load the detector. See https://github.com/huggingface/transformers-research-projects/tree/main/synthid_text for training a detector. >>> detector_model = BayesianDetectorModel.from_pretrained("joaogante/dummy_synthid_detector") >>> logits_processor = SynthIDTextWatermarkLogitsProcessor( ... **detector_model.config.watermarking_config, device="cpu" diff --git a/utils/release.py b/utils/release.py index d5b74602e68..e4e79cec158 100644 --- a/utils/release.py +++ b/utils/release.py @@ -95,8 +95,6 @@ def update_version_in_examples(version: str): """ for folder, directories, fnames in os.walk(PATH_TO_EXAMPLES): # Removing some of the folders with non-actively maintained examples from the walk - if "research_projects" in directories: - directories.remove("research_projects") if "legacy" in directories: directories.remove("legacy") for fname in fnames: