tokenizers is now a real dependency

This commit is contained in:
Anthony MOI 2019-12-24 13:32:41 -05:00
parent 2818e50569
commit 734d29b03d
No known key found for this signature in database
GPG Key ID: CB646B1164C636A0
3 changed files with 54 additions and 66 deletions

View File

@ -86,6 +86,7 @@ setup(
packages=find_packages("src"),
install_requires=[
"numpy",
"tokenizers",
# accessing files from S3 directly
"boto3",
# filesystem locks e.g. to prevent parallel downloads

View File

@ -20,6 +20,8 @@ import logging
import os
import unicodedata
import tokenizers as tk
from .tokenization_utils import FastPreTrainedTokenizer, PreTrainedTokenizer
@ -552,10 +554,6 @@ class BertTokenizerFast(FastPreTrainedTokenizer):
add_special_tokens=True,
**kwargs
):
try:
from tokenizers import Tokenizer, models, pre_tokenizers, decoders, processors
super(BertTokenizerFast, self).__init__(
unk_token=unk_token,
sep_token=sep_token,
@ -565,21 +563,21 @@ class BertTokenizerFast(FastPreTrainedTokenizer):
**kwargs
)
self._tokenizer = Tokenizer(models.WordPiece.from_files(vocab_file, unk_token=unk_token))
self._tokenizer = tk.Tokenizer(tk.models.WordPiece.from_files(vocab_file, unk_token=unk_token))
self._update_special_tokens()
self._tokenizer.with_pre_tokenizer(
pre_tokenizers.BertPreTokenizer.new(
tk.pre_tokenizers.BertPreTokenizer.new(
do_basic_tokenize=do_basic_tokenize,
do_lower_case=do_lower_case,
tokenize_chinese_chars=tokenize_chinese_chars,
never_split=never_split if never_split is not None else [],
)
)
self._tokenizer.with_decoder(decoders.WordPiece.new())
self._tokenizer.with_decoder(tk.decoders.WordPiece.new())
if add_special_tokens:
self._tokenizer.with_post_processor(
processors.BertProcessing.new(
tk.processors.BertProcessing.new(
(sep_token, self._tokenizer.token_to_id(sep_token)),
(cls_token, self._tokenizer.token_to_id(cls_token)),
)
@ -593,8 +591,4 @@ class BertTokenizerFast(FastPreTrainedTokenizer):
self.pad_token_type_id,
self.pad_token,
)
self._decoder = decoders.WordPiece.new()
except (AttributeError, ImportError) as e:
logger.error("Make sure you installed `tokenizers` with `pip install tokenizers==0.0.8`")
raise e
self._decoder = tk.decoders.WordPiece.new()

View File

@ -21,6 +21,7 @@ import os
from functools import lru_cache
import regex as re
import tokenizers as tk
from .tokenization_utils import FastPreTrainedTokenizer, PreTrainedTokenizer
@ -267,18 +268,14 @@ class GPT2TokenizerFast(FastPreTrainedTokenizer):
truncation_strategy="longest_first",
**kwargs
):
try:
from tokenizers import Tokenizer, models, pre_tokenizers, decoders
super(GPT2TokenizerFast, self).__init__(
bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs
)
self._tokenizer = Tokenizer(models.BPE.from_files(vocab_file, merges_file))
self._tokenizer = tk.Tokenizer(tk.models.BPE.from_files(vocab_file, merges_file))
self._update_special_tokens()
self._tokenizer.with_pre_tokenizer(pre_tokenizers.ByteLevel.new(add_prefix_space))
self._tokenizer.with_decoder(decoders.ByteLevel.new())
self._tokenizer.with_pre_tokenizer(tk.pre_tokenizers.ByteLevel.new(add_prefix_space))
self._tokenizer.with_decoder(tk.decoders.ByteLevel.new())
if max_length:
self._tokenizer.with_truncation(max_length, stride, truncation_strategy)
self._tokenizer.with_padding(
@ -288,8 +285,4 @@ class GPT2TokenizerFast(FastPreTrainedTokenizer):
self.pad_token_type_id,
self.pad_token if self.pad_token is not None else "",
)
self._decoder = decoders.ByteLevel.new()
except (AttributeError, ImportError) as e:
logger.error("Make sure you installed `tokenizers` with `pip install tokenizers==0.0.8`")
raise e
self._decoder = tk.decoders.ByteLevel.new()