diff --git a/tests/tokenization_openai_test.py b/tests/tokenization_openai_test.py index 8a67015ffd..6213eb1b03 100644 --- a/tests/tokenization_openai_test.py +++ b/tests/tokenization_openai_test.py @@ -17,7 +17,6 @@ from __future__ import absolute_import, division, print_function, unicode_litera import os import unittest import json -from io import open from pytorch_pretrained_bert.tokenization_openai import OpenAIGPTTokenizer @@ -32,10 +31,10 @@ class OpenAIGPTTokenizationTest(unittest.TestCase): "low", "lowest", "newer", "wider"] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "l o", "lo w", "e r", ""] - with open("/tmp/openai_tokenizer_vocab_test.json", "wb") as fp: + with open("/tmp/openai_tokenizer_vocab_test.json", "w") as fp: json.dump(vocab_tokens, fp) vocab_file = fp.name - with open("/tmp/openai_tokenizer_merges_test.txt", "w", encoding='utf-8') as fp: + with open("/tmp/openai_tokenizer_merges_test.txt", "w") as fp: fp.write("\n".join(merges)) merges_file = fp.name