BertJapaneseTokenizer accept options for mecab (#3566)

* BertJapaneseTokenizer accept options for mecab

* black

* fix mecab_option to Option[str]
This commit is contained in:
Yohei Tamura 2020-04-04 00:12:19 +09:00 committed by GitHub
parent 216e167ce6
commit 8594dd80dd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 22 additions and 3 deletions

View File

@ -89,6 +89,7 @@ class BertJapaneseTokenizer(BertTokenizer):
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
mecab_kwargs=None,
**kwargs
):
"""Constructs a MecabBertTokenizer.
@ -106,6 +107,7 @@ class BertJapaneseTokenizer(BertTokenizer):
Type of word tokenizer.
**subword_tokenizer_type**: (`optional`) string (default "wordpiece")
Type of subword tokenizer.
**mecab_kwargs**: (`optional`) dict passed to `MecabTokenizer` constructor (default None)
"""
super(BertTokenizer, self).__init__(
unk_token=unk_token,
@ -134,7 +136,9 @@ class BertJapaneseTokenizer(BertTokenizer):
do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=False
)
elif word_tokenizer_type == "mecab":
self.word_tokenizer = MecabTokenizer(do_lower_case=do_lower_case, never_split=never_split)
self.word_tokenizer = MecabTokenizer(
do_lower_case=do_lower_case, never_split=never_split, **(mecab_kwargs or {})
)
else:
raise ValueError("Invalid word_tokenizer_type '{}' is specified.".format(word_tokenizer_type))
@ -164,7 +168,7 @@ class BertJapaneseTokenizer(BertTokenizer):
class MecabTokenizer(object):
"""Runs basic tokenization with MeCab morphological parser."""
def __init__(self, do_lower_case=False, never_split=None, normalize_text=True):
def __init__(self, do_lower_case=False, never_split=None, normalize_text=True, mecab_option=None):
"""Constructs a MecabTokenizer.
Args:
@ -176,6 +180,7 @@ class MecabTokenizer(object):
List of token not to split.
**normalize_text**: (`optional`) boolean (default True)
Whether to apply unicode normalization to text before tokenization.
**mecab_option**: (`optional`) string passed to `MeCab.Tagger` constructor (default "")
"""
self.do_lower_case = do_lower_case
self.never_split = never_split if never_split is not None else []
@ -183,7 +188,7 @@ class MecabTokenizer(object):
import MeCab
self.mecab = MeCab.Tagger()
self.mecab = MeCab.Tagger(mecab_option) if mecab_option is not None else MeCab.Tagger()
def tokenize(self, text, never_split=None, **kwargs):
"""Tokenizes a piece of text."""

View File

@ -91,6 +91,20 @@ class BertJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
["アップルストア", "", "iphone", "8", "", "発売", "", "", "", ""],
)
def test_mecab_tokenizer_with_option(self):
try:
tokenizer = MecabTokenizer(
do_lower_case=True, normalize_text=False, mecab_option="-d /usr/local/lib/mecab/dic/jumandic"
)
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone\n 発売された 。 "),
["アップルストア", "", "iPhone", "", "", "発売", "", "れた", "\u3000", ""],
)
def test_mecab_tokenizer_no_normalize(self):
tokenizer = MecabTokenizer(normalize_text=False)