mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
Bug fix: token classification pipeline while passing offset_mapping (#22034)
fix slow tokenizers with passing offset_mapping
This commit is contained in:
parent
1cbac6867b
commit
3ec8171bed
@ -304,7 +304,9 @@ class TokenClassificationPipeline(Pipeline):
|
||||
start_ind = start_ind.item()
|
||||
end_ind = end_ind.item()
|
||||
word_ref = sentence[start_ind:end_ind]
|
||||
if getattr(self.tokenizer._tokenizer.model, "continuing_subword_prefix", None):
|
||||
if getattr(self.tokenizer, "_tokenizer", None) and getattr(
|
||||
self.tokenizer._tokenizer.model, "continuing_subword_prefix", None
|
||||
):
|
||||
# This is a BPE, word aware tokenizer, there is a correct way
|
||||
# to fuse tokens
|
||||
is_subword = len(word) != len(word_ref)
|
||||
|
Loading…
Reference in New Issue
Block a user