2019-06-21 17:09:51 +08:00
|
|
|
# coding=utf-8
|
|
|
|
# Copyright 2018 The Google AI Language Team Authors.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2019-12-22 23:20:32 +08:00
|
|
|
|
2019-06-21 17:09:51 +08:00
|
|
|
|
|
|
|
import os
|
2019-12-22 22:34:15 +08:00
|
|
|
import unittest
|
2019-06-21 17:09:51 +08:00
|
|
|
|
2020-07-01 22:31:17 +08:00
|
|
|
from transformers.testing_utils import slow
|
2019-12-21 22:57:32 +08:00
|
|
|
from transformers.tokenization_xlnet import SPIECE_UNDERLINE, XLNetTokenizer
|
2019-06-21 17:09:51 +08:00
|
|
|
|
2019-12-22 22:34:15 +08:00
|
|
|
from .test_tokenization_common import TokenizerTesterMixin
|
2019-07-05 17:20:27 +08:00
|
|
|
|
2019-12-21 22:57:32 +08:00
|
|
|
|
2019-12-21 22:46:46 +08:00
|
|
|
SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/test_sentencepiece.model")
|
|
|
|
|
2019-06-21 17:09:51 +08:00
|
|
|
|
2019-12-22 22:34:15 +08:00
|
|
|
class XLNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
|
2019-08-05 20:08:56 +08:00
|
|
|
|
|
|
|
tokenizer_class = XLNetTokenizer
|
|
|
|
|
|
|
|
def setUp(self):
|
2020-01-16 07:33:50 +08:00
|
|
|
super().setUp()
|
2019-08-05 20:08:56 +08:00
|
|
|
|
|
|
|
# We have a SentencePiece fixture for testing
|
|
|
|
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
|
|
|
|
tokenizer.save_pretrained(self.tmpdirname)
|
|
|
|
|
2019-06-21 17:09:51 +08:00
|
|
|
def test_full_tokenizer(self):
|
2019-07-05 17:20:27 +08:00
|
|
|
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
|
2019-06-21 17:09:51 +08:00
|
|
|
|
2019-12-21 22:46:46 +08:00
|
|
|
tokens = tokenizer.tokenize("This is a test")
|
|
|
|
self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"])
|
2019-08-05 20:08:56 +08:00
|
|
|
|
2019-12-21 22:46:46 +08:00
|
|
|
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382])
|
2019-08-05 20:08:56 +08:00
|
|
|
|
2019-12-21 22:46:46 +08:00
|
|
|
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
|
2019-08-05 20:08:56 +08:00
|
|
|
self.assertListEqual(
|
2019-12-21 22:46:46 +08:00
|
|
|
tokens,
|
|
|
|
[
|
|
|
|
SPIECE_UNDERLINE + "I",
|
|
|
|
SPIECE_UNDERLINE + "was",
|
|
|
|
SPIECE_UNDERLINE + "b",
|
|
|
|
"or",
|
|
|
|
"n",
|
|
|
|
SPIECE_UNDERLINE + "in",
|
|
|
|
SPIECE_UNDERLINE + "",
|
|
|
|
"9",
|
|
|
|
"2",
|
|
|
|
"0",
|
|
|
|
"0",
|
|
|
|
"0",
|
|
|
|
",",
|
|
|
|
SPIECE_UNDERLINE + "and",
|
|
|
|
SPIECE_UNDERLINE + "this",
|
|
|
|
SPIECE_UNDERLINE + "is",
|
|
|
|
SPIECE_UNDERLINE + "f",
|
|
|
|
"al",
|
|
|
|
"s",
|
|
|
|
"é",
|
|
|
|
".",
|
|
|
|
],
|
|
|
|
)
|
|
|
|
ids = tokenizer.convert_tokens_to_ids(tokens)
|
|
|
|
self.assertListEqual(ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4])
|
2019-08-05 20:08:56 +08:00
|
|
|
|
|
|
|
back_tokens = tokenizer.convert_ids_to_tokens(ids)
|
2019-12-21 22:46:46 +08:00
|
|
|
self.assertListEqual(
|
|
|
|
back_tokens,
|
|
|
|
[
|
|
|
|
SPIECE_UNDERLINE + "I",
|
|
|
|
SPIECE_UNDERLINE + "was",
|
|
|
|
SPIECE_UNDERLINE + "b",
|
|
|
|
"or",
|
|
|
|
"n",
|
|
|
|
SPIECE_UNDERLINE + "in",
|
|
|
|
SPIECE_UNDERLINE + "",
|
|
|
|
"<unk>",
|
|
|
|
"2",
|
|
|
|
"0",
|
|
|
|
"0",
|
|
|
|
"0",
|
|
|
|
",",
|
|
|
|
SPIECE_UNDERLINE + "and",
|
|
|
|
SPIECE_UNDERLINE + "this",
|
|
|
|
SPIECE_UNDERLINE + "is",
|
|
|
|
SPIECE_UNDERLINE + "f",
|
|
|
|
"al",
|
|
|
|
"s",
|
|
|
|
"<unk>",
|
|
|
|
".",
|
|
|
|
],
|
|
|
|
)
|
2019-06-21 17:09:51 +08:00
|
|
|
|
|
|
|
def test_tokenizer_lower(self):
|
|
|
|
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, do_lower_case=True)
|
2019-12-21 22:46:46 +08:00
|
|
|
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
|
|
|
|
self.assertListEqual(
|
|
|
|
tokens,
|
|
|
|
[
|
|
|
|
SPIECE_UNDERLINE + "",
|
|
|
|
"i",
|
|
|
|
SPIECE_UNDERLINE + "was",
|
|
|
|
SPIECE_UNDERLINE + "b",
|
|
|
|
"or",
|
|
|
|
"n",
|
|
|
|
SPIECE_UNDERLINE + "in",
|
|
|
|
SPIECE_UNDERLINE + "",
|
|
|
|
"9",
|
|
|
|
"2",
|
|
|
|
"0",
|
|
|
|
"0",
|
|
|
|
"0",
|
|
|
|
",",
|
|
|
|
SPIECE_UNDERLINE + "and",
|
|
|
|
SPIECE_UNDERLINE + "this",
|
|
|
|
SPIECE_UNDERLINE + "is",
|
|
|
|
SPIECE_UNDERLINE + "f",
|
|
|
|
"al",
|
|
|
|
"se",
|
|
|
|
".",
|
|
|
|
],
|
|
|
|
)
|
|
|
|
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["▁he", "ll", "o"])
|
2019-06-21 17:09:51 +08:00
|
|
|
|
|
|
|
def test_tokenizer_no_lower(self):
|
|
|
|
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, do_lower_case=False)
|
2019-12-21 22:46:46 +08:00
|
|
|
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
|
|
|
|
self.assertListEqual(
|
|
|
|
tokens,
|
|
|
|
[
|
|
|
|
SPIECE_UNDERLINE + "I",
|
|
|
|
SPIECE_UNDERLINE + "was",
|
|
|
|
SPIECE_UNDERLINE + "b",
|
|
|
|
"or",
|
|
|
|
"n",
|
|
|
|
SPIECE_UNDERLINE + "in",
|
|
|
|
SPIECE_UNDERLINE + "",
|
|
|
|
"9",
|
|
|
|
"2",
|
|
|
|
"0",
|
|
|
|
"0",
|
|
|
|
"0",
|
|
|
|
",",
|
|
|
|
SPIECE_UNDERLINE + "and",
|
|
|
|
SPIECE_UNDERLINE + "this",
|
|
|
|
SPIECE_UNDERLINE + "is",
|
|
|
|
SPIECE_UNDERLINE + "f",
|
|
|
|
"al",
|
|
|
|
"se",
|
|
|
|
".",
|
|
|
|
],
|
|
|
|
)
|
2019-06-21 17:09:51 +08:00
|
|
|
|
2019-12-07 02:57:38 +08:00
|
|
|
@slow
|
2019-08-13 03:14:15 +08:00
|
|
|
def test_sequence_builders(self):
|
|
|
|
tokenizer = XLNetTokenizer.from_pretrained("xlnet-base-cased")
|
|
|
|
|
2019-10-23 02:12:33 +08:00
|
|
|
text = tokenizer.encode("sequence builders", add_special_tokens=False)
|
|
|
|
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
|
2019-08-13 03:14:15 +08:00
|
|
|
|
2019-10-05 05:38:38 +08:00
|
|
|
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
|
|
|
|
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
|
2019-08-13 03:14:15 +08:00
|
|
|
|
|
|
|
assert encoded_sentence == text + [4, 3]
|
|
|
|
assert encoded_pair == text + [4] + text_2 + [4, 3]
|