Compare commits

...

4 Commits

Author SHA1 Message Date
Arthur Zucker a7500bc2de let's see 2024-05-17 16:31:55 +02:00
Arthur Zucker d1042545af fix 2024-05-17 16:24:58 +02:00
Arthur Zucker fe890353c3 don't use fixtures 2024-05-17 16:18:29 +02:00
Arthur Zucker 6d1c71d71f better tests 2024-05-17 15:42:13 +02:00
2 changed files with 80 additions and 111 deletions

View File

@ -23,6 +23,7 @@ import warnings
from typing import Dict, List, Tuple
from packaging import version
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
from tokenizers.models import BPE, Unigram, WordPiece
@ -1337,7 +1338,10 @@ class GemmaConvert(SpmConverter):
class LlamaConverter(SpmConverter):
handle_byte_fallback = True
def __init__(self, original_tokenizer, legacy=True, **kwargs):
super().__init__(original_tokenizer, **kwargs)
self.legacy = legacy
def vocab(self, proto):
vocab = [
(self.original_tokenizer.convert_ids_to_tokens(0), 0.0),
@ -1352,14 +1356,17 @@ class LlamaConverter(SpmConverter):
return unk_id
def decoder(self, replacement, add_prefix_space):
sequence = [
decoders.Replace("", " "),
decoders.ByteFallback(),
decoders.Fuse(),
]
if add_prefix_space:
sequence += [decoders.Strip(content=" ", left=1)]
return decoders.Sequence(sequence)
if getattr(self.original_tokenizer, "legacy", self.legacy):
sequence = [
decoders.Replace("", " "),
decoders.ByteFallback(),
decoders.Fuse(),
]
if add_prefix_space:
sequence += [decoders.Strip(content=" ", left=1)]
return decoders.Sequence(sequence)
else:
return super().decoder(replacement, add_prefix_space)
def tokenizer(self, proto):
model_type = proto.trainer_spec.model_type
@ -1393,16 +1400,16 @@ class LlamaConverter(SpmConverter):
return tokenizer
def normalizer(self, proto):
if getattr(self.original_tokenizer, "legacy", True):
if getattr(self.original_tokenizer, "legacy", self.legacy):
sequence = []
if getattr(self.original_tokenizer, "add_prefix_space", True):
sequence += [normalizers.Prepend(prepend="")]
sequence += [normalizers.Replace(pattern=" ", content="")]
return normalizers.Sequence(sequence)
return None # non-legacy, no normalizer
return None
def pre_tokenizer(self, replacement, add_prefix_space):
if not getattr(self.original_tokenizer, "legacy", True): # non-legacy, we need a replace
if not getattr(self.original_tokenizer, "legacy", self.legacy):
prepend_scheme = _get_prepend_scheme(add_prefix_space, self.original_tokenizer)
return pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme, split=False)
return None

View File

@ -20,6 +20,7 @@ import tempfile
import unittest
from datasets import load_dataset
from parameterized import parameterized
from transformers import (
SPIECE_UNDERLINE,
@ -601,11 +602,15 @@ class LlamaIntegrationTest(unittest.TestCase):
decoded_tokens = tokenizer.decode(input_ids)
self.assertEqual(decoded_tokens, "hello")
def test_no_prefix_space(self):
tokenizer = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b", legacy=False, from_slow=True, add_prefix_space=False
@parameterized.expand([LlamaTokenizerFast, LlamaTokenizer])
def test_no_prefix_space(self, tokenizer_class):
tokenizer = tokenizer_class.from_pretrained(
"huggyllama/llama-7b",
legacy=False,
from_slow=tokenizer_class == LlamaTokenizerFast,
add_prefix_space=False,
)
tokenizer.add_tokens([AddedToken("<REPR_END>", rstrip=True, lstrip=True)], special_tokens=False)
tokenizer.add_tokens([AddedToken("<REPR_END>", rstrip=False, lstrip=True)], special_tokens=False)
example_inputs = tokenizer.tokenize("<REPR_END>inform<s>. Hey. .")
self.assertEqual(example_inputs, ["<REPR_END>", "in", "form", "<s>", ".", "▁Hey", ".", "▁▁▁▁▁▁", "▁."])
@ -617,13 +622,16 @@ class LlamaIntegrationTest(unittest.TestCase):
tokenizer.encode("<REPR_END>inform", add_special_tokens=False), spaces_between_special_tokens=False
)
self.assertEqual(out1, "<REPR_END>inform")
outs = tokenizer.encode("Me <REPR_END> inform", add_special_tokens=False)
tokens = tokenizer.convert_ids_to_tokens(outs)
self.assertEqual(tokens, ["Me", "<REPR_END>", "▁inform"])
out2 = tokenizer.decode(
tokenizer.encode("<REPR_END>inform", add_special_tokens=False), spaces_between_special_tokens=True
tokenizer.encode("Me <REPR_END> inform", add_special_tokens=False), spaces_between_special_tokens=False
)
# decoding strips the added prefix space.
self.assertEqual(out2, "<REPR_END>inform")
input_ids = tokenizer.encode("<REPR_END>inform", add_special_tokens=False)
self.assertEqual(input_ids, [32000, 262, 689]) # 29871 is the spiece underline, '▁' added as it should
self.assertEqual(out2, "Me<REPR_END> inform")
input_ids = tokenizer.encode("Me<REPR_END> inform", add_special_tokens=False)
self.assertEqual(input_ids, [6816, 32000, 1871])
out2 = tokenizer.decode(
tokenizer.encode(" <REPR_END>inform", add_special_tokens=False), spaces_between_special_tokens=False
@ -658,8 +666,17 @@ class LlamaIntegrationTest(unittest.TestCase):
decoded_tokens = tokenizer.decode(input_ids)
self.assertEqual(decoded_tokens, "hello")
def test_some_edge_cases(self):
tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", legacy=False)
input_ids = tokenizer.encode("<s>hello<s>", add_special_tokens=False)
self.assertEqual(input_ids, [1, 12199, 1])
decoded_tokens = tokenizer.decode(input_ids)
self.assertEqual(decoded_tokens, "<s>hello<s>")
decoded_tokens = tokenizer.decode(input_ids, skip_special_tokens=True)
self.assertEqual(decoded_tokens, "hello")
@parameterized.expand([LlamaTokenizerFast, LlamaTokenizer])
def test_some_edge_cases(self, tokenizer_class):
tokenizer = tokenizer_class.from_pretrained("huggyllama/llama-7b", legacy=False)
sp_tokens = tokenizer.sp_model.encode("<s>>", out_type=str)
self.assertEqual(sp_tokens, ["<", "s", ">>"])
@ -700,8 +717,9 @@ class LlamaIntegrationTest(unittest.TestCase):
tokenizer = LlamaTokenizerFast(SAMPLE_VOCAB, eos_token=None, add_bos_token=True, add_eos_token=True)
@require_jinja
def test_tokenization_for_chat(self):
tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", legacy=False)
@parameterized.expand([LlamaTokenizerFast, LlamaTokenizer])
def test_tokenization_for_chat(self, tokenizer_class):
tokenizer = tokenizer_class.from_pretrained("huggyllama/llama-7b", legacy=False)
test_chats = [
[{"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}],
@ -730,96 +748,40 @@ class LlamaIntegrationTest(unittest.TestCase):
@require_tokenizers
class CommonSpmIntegrationTests(unittest.TestCase):
"""
A class that regroups important test to make sure that we properly handle the special tokens.
A class that regroups important tests to make sure that we properly handle the special tokens.
"""
@classmethod
def setUpClass(cls):
tokenizer = LlamaTokenizer(SAMPLE_VOCAB, extra_ids=0, add_bos_token=False, legacy=False)
tokenizer.add_special_tokens({"additional_special_tokens": [AddedToken("<s>", rstrip=False, lstrip=False)]})
cls.tokenizer = tokenizer
return cls
cls.tokenizers = []
for tokenizer_class in [LlamaTokenizer, LlamaTokenizerFast]:
tokenizer = tokenizer_class.from_pretrained("huggyllama/llama-7b", add_bos_token=False, legacy=False, from_slow=tokenizer_class==LlamaTokenizerFast)
tokenizer.add_special_tokens(
{"additional_special_tokens": [AddedToken("<s>", rstrip=False, lstrip=False)]}
)
cls.tokenizers.append(tokenizer)
def test_add_dummy_prefix(self):
# make sure `'▁'` is prepended, and outputs match sp_model's
# `sentencepiece.NormalizerSpec.add_dummy_prefix` attribute
input_ids = self.tokenizer.encode(". Hello")
self.assertEqual(input_ids, [7, 4, 156, 86, 20])
sp_encode = self.tokenizer.sp_model.encode(". Hello")
self.assertEqual(input_ids, [7] + sp_encode)
tokens = self.tokenizer.tokenize(". Hello")
self.assertEqual(tokens, ["", ".", "▁He", "ll", "o"])
# fmt: off
@parameterized.expand([
('. Hello', [869, 15043], ["", ".", "▁Hello"]),
('', [], []),
(' ', [259], ["▁▁"]),
('', [259], ["▁▁"]),
(" . Hello", [539, 869, 15043], ['▁▁▁▁▁▁', '▁.', '▁Hello']),
("▁He is not", [940, 338, 451], ['▁He', '▁is', '▁not']),
("▁He is not<s> ▁He", [940, 338, 451, 1, 795, 940], ['▁He', '▁is', '▁not', '<s>', '▁▁▁▁▁▁▁▁▁▁▁▁▁', '▁He']),
("▁He is not ▁He", [940, 338, 451, 795, 940], ['▁He', '▁is', '▁not', '▁▁▁▁▁▁▁▁▁▁▁▁▁', '▁He']),
("Hey <s>I", [18637, 1, 29902], ['▁Hey', '<s>', 'I']),
("Hello, <s>,", [15043, 29892, 1, 29892], ['▁Hello', ',', '<s>', ',']),
(" <s> ,", [1, 1919] , ['<s>', '▁,']),
("No <s> ▁He", [1939, 1, 29871, 940], ['▁No', '<s>', '', '▁He']),
])
# fmt: on
def test_tokenization(self, text, expected_input_ids, expected_tokens):
for tokenizer in self.tokenizers:
input_ids = tokenizer.encode(text)
with self.subTest(tokenizer=type(tokenizer).__name__, mode="encode "):
self.assertEqual(input_ids, expected_input_ids)
tokens = self.tokenizer.tokenize("")
self.assertEqual(tokens, [])
self.assertEqual(tokens, self.tokenizer.sp_model.encode("", out_type=str))
tokens = self.tokenizer.tokenize(" ")
self.assertEqual(tokens, [])
self.assertEqual(tokens, self.tokenizer.sp_model.encode(" ", out_type=str))
tokens = self.tokenizer.tokenize("")
self.assertEqual(tokens, [])
self.assertEqual(tokens, self.tokenizer.sp_model.encode("", out_type=str))
def test_remove_extra_whitespaces(self):
# make sure the extra spaces are eaten. Since the sample vocab does not have
# `______`. sentencepiece.NormalizerSpec.remove_extra_whitespaces attribute is set to False
input_ids = self.tokenizer.encode(" . Hello")
self.assertEqual(input_ids, [7, 4, 156, 86, 20])
sp_encode = self.tokenizer.sp_model.encode(" . Hello")
self.assertEqual(input_ids, [7] + sp_encode)
tokens = self.tokenizer.tokenize(" . Hello")
self.assertEqual(tokens, ["", ".", "▁He", "ll", "o"])
# `'▁'` is also a whitespace
input_ids = self.tokenizer.encode("▁He is not")
self.assertEqual(input_ids, [156, 46, 44])
tokens = self.tokenizer.tokenize("▁He is not")
sp_encode = [
self.tokenizer.sp_model.piece_to_id("▁He"),
self.tokenizer.sp_model.piece_to_id("▁is"),
self.tokenizer.sp_model.piece_to_id("▁not"),
]
self.assertEqual(input_ids, sp_encode)
self.assertEqual(tokens, ["▁He", "▁is", "▁not"]) # no extra space added
input_ids = self.tokenizer.encode("▁He is not<s> ▁He")
self.assertEqual(input_ids, [156, 46, 44, 1, 156])
tokens = self.tokenizer.tokenize("▁He is not<s> ▁He")
self.assertEqual(tokens, ["▁He", "▁is", "▁not", "<s>", "▁He"]) # spaces are eaten by spm + our strip
# make sure that the output after the extra id is the same as if
# extra_id was not there
input_ids = self.tokenizer.encode("▁He is not ▁He")
self.assertEqual(input_ids, [156, 46, 44, 156])
tokens = self.tokenizer.tokenize("▁He is not ▁He")
self.assertEqual(tokens, ["▁He", "▁is", "▁not", "▁He"]) # spaces are eaten by spm even if not start
def test_character_after_special_token(self):
# Make sure that `tokenizer.tokenize` is similar to
# adding the equivalent special token to the vocab
input_ids = self.tokenizer.encode("Hey <s>I")
self.assertEqual(input_ids, [156, 30, 1, 100])
sp_encode = self.tokenizer.sp_model.encode("Hey .I")
# the last token should be 100
self.assertEqual(input_ids[-1], sp_encode[-1])
tokens = self.tokenizer.tokenize("<s>I")
self.assertEqual(tokens, ["<s>", "I"])
input_ids = self.tokenizer.encode("Hello, <s>,")
self.assertEqual(input_ids, [156, 86, 20, 3, 1, 3])
tokens = self.tokenizer.tokenize("Hello, <s>,")
self.assertEqual(tokens, ["▁He", "ll", "o", ",", "<s>", ","])
def test_special_tokens_strip(self):
input_ids = self.tokenizer.encode(" <s> ,")
self.assertEqual(input_ids, [1, 7, 3])
tokens = self.tokenizer.tokenize(" <s> ,")
# spaces are eaten by rstrip / lstrip + spm sp_model.encode(" ") = []
self.assertEqual(tokens, ["<s>", "", ","])
input_ids = self.tokenizer.encode("No <s> ▁He")
self.assertEqual(input_ids, [284, 1, 156])
tokens = self.tokenizer.tokenize("No <s> ▁He")
self.assertEqual(tokens, ["▁No", "<s>", "▁He"]) # spaces are eaten by rstrip / lstrip
with self.subTest(tokenizer=type(tokenizer).__name__,mode="decode "):
self.assertEqual(tokenizer.decode(tokenizer.encode(text,add_special_tokens=False),spaces_between_special_tokens=False),text)