# coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for OpenAI GPT.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import json import logging import os import re import sys from io import open from tqdm import tqdm from .file_utils import cached_path from .tokenization_utils import PreTrainedTokenizer, clean_up_tokenization from .tokenization_bert import BasicTokenizer logger = logging.getLogger(__name__) VOCAB_FILES_NAMES = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'special_tokens_file': 'special_tokens.txt' } PRETRAINED_VOCAB_FILES_MAP = { 'vocab_file': { 'openai-gpt': "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-vocab.json", }, 'merges_file': { 'openai-gpt': "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-merges.txt", }, 'special_tokens_file': { 'openai-gpt': None, } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { 'openai-gpt': 512, } def get_pairs(word): """ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length strings) """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def text_standardize(text): """ fixes some issues the spacy tokenizer had on books corpus also does some whitespace standardization """ text = text.replace('—', '-') text = text.replace('–', '-') text = text.replace('―', '-') text = text.replace('…', '...') text = text.replace('´', "'") text = re.sub(r'''(-+|~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)''', r' \1 ', text) text = re.sub(r'\s*\n\s*', ' \n ', text) text = re.sub(r'[^\S\n]+', ' ', text) return text.strip() class OpenAIGPTTokenizer(PreTrainedTokenizer): """ BPE tokenizer. Peculiarities: - lower case all inputs - uses SpaCy tokenizer and ftfy for pre-BPE tokenization if they are installed, fallback to BERT's BasicTokenizer if not. - argument special_tokens and function set_special_tokens: can be used to add additional symbols (ex: "__classify__") to a vocabulary. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, vocab_file, merges_file, special_tokens_file=None, special_tokens=None, max_len=None): try: import ftfy import spacy self.nlp = spacy.load('en', disable=['parser', 'tagger', 'ner', 'textcat']) self.fix_text = ftfy.fix_text except ImportError: logger.warning("ftfy or spacy is not installed using BERT BasicTokenizer instead of SpaCy & ftfy.") self.nlp = BasicTokenizer(do_lower_case=True, never_split=special_tokens if special_tokens is not None else []) self.fix_text = None self.max_len = max_len if max_len is not None else int(1e12) self.encoder = json.load(open(vocab_file, encoding="utf-8")) self.decoder = {v:k for k,v in self.encoder.items()} merges = open(merges_file, encoding='utf-8').read().split('\n')[1:-1] merges = [tuple(merge.split()) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} all_special_tokens = [] if special_tokens_file is not None: special_tokens_to_add = open(special_tokens_file, encoding='utf-8').read().split('\n')[:-1] all_special_tokens.extend(special_tokens_to_add) if special_tokens is not None and special_tokens: all_special_tokens.extend(special_tokens) self.special_tokens = {} self.special_tokens_decoder = {} self.set_special_tokens(all_special_tokens) def __len__(self): return len(self.encoder) + len(self.special_tokens) def set_special_tokens(self, special_tokens): """ Add a list of additional tokens to the encoder. The additional tokens are indexed starting from the last index of the current vocabulary in the order of the `special_tokens` list. """ if not special_tokens: self.special_tokens = {} self.special_tokens_decoder = {} return self.special_tokens = dict((tok, len(self.encoder) + i) for i, tok in enumerate(special_tokens)) self.special_tokens_decoder = {v:k for k, v in self.special_tokens.items()} if self.fix_text is None: # Using BERT's BasicTokenizer: we can update the tokenizer self.nlp.never_split = special_tokens logger.info("Special tokens {}".format(self.special_tokens)) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + '',) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token+'' while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if word[i] == first and i < len(word)-1 and word[i+1] == second: new_word.append(first+second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) if word == '\n ': word = '\n' self.cache[token] = word return word def tokenize(self, text): """ Tokenize a string. """ split_tokens = [] if self.fix_text is None: # Using BERT's BasicTokenizer text = self.nlp.tokenize(text) for token in text: split_tokens.extend([t for t in self.bpe(token).split(' ')]) else: # Using SpaCy & ftfy (original tokenization process of OpenAI GPT) text = self.nlp(text_standardize(self.fix_text(text))) for token in text: split_tokens.extend([t for t in self.bpe(token.text.lower()).split(' ')]) return split_tokens def convert_tokens_to_ids(self, tokens): """ Converts a sequence of tokens into ids using the vocab. """ ids = [] if isinstance(tokens, str) or (sys.version_info[0] == 2 and isinstance(tokens, unicode)): if tokens in self.special_tokens: return self.special_tokens[tokens] else: return self.encoder.get(tokens, 0) for token in tokens: if token in self.special_tokens: ids.append(self.special_tokens[token]) else: ids.append(self.encoder.get(token, 0)) if len(ids) > self.max_len: logger.warning( "Token indices sequence length is longer than the specified maximum " " sequence length for this OpenAI GPT model ({} > {}). Running this" " sequence through the model will result in indexing errors".format(len(ids), self.max_len) ) return ids def convert_ids_to_tokens(self, ids, skip_special_tokens=False): """Converts a sequence of ids in BPE tokens using the vocab.""" tokens = [] for i in ids: if i in self.special_tokens_decoder: if not skip_special_tokens: tokens.append(self.special_tokens_decoder[i]) else: tokens.append(self.decoder[i]) return tokens def encode(self, text): return self.convert_tokens_to_ids(self.tokenize(text)) def decode(self, ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): """Converts a sequence of ids in a string.""" tokens = self.convert_ids_to_tokens(ids, skip_special_tokens=skip_special_tokens) out_string = ''.join(tokens).replace('', ' ').strip() if clean_up_tokenization_spaces: out_string = out_string.replace('', '') out_string = clean_up_tokenization(out_string) return out_string def save_vocabulary(self, vocab_path): """Save the tokenizer vocabulary and merge files to a directory.""" if not os.path.isdir(vocab_path): logger.error("Vocabulary path ({}) should be a directory".format(vocab_path)) return vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['merges_file']) special_tokens_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['special_tokens_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, ensure_ascii=False)) index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write(u'#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!".format(merge_file)) index = token_index writer.write(' '.join(bpe_tokens) + u'\n') index += 1 index = len(self.encoder) with open(special_tokens_file, 'w', encoding='utf-8') as writer: for token, token_index in sorted(self.special_tokens.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning("Saving special tokens vocabulary to {}: BPE indices are not consecutive." " Please check that the tokenizer is not corrupted!".format(special_tokens_file)) index = token_index writer.write(token + u'\n') index += 1 return vocab_file, merge_file, special_tokens_file