2019-01-15 19:59:38 +08:00
|
|
|
# coding=utf-8
|
2019-03-06 17:05:21 +08:00
|
|
|
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
|
2019-01-15 19:59:38 +08:00
|
|
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
"""PyTorch OpenAI GPT model."""
|
|
|
|
|
2019-03-14 16:55:01 +08:00
|
|
|
from __future__ import absolute_import, division, print_function, unicode_literals
|
|
|
|
|
2019-01-29 17:31:42 +08:00
|
|
|
import collections
|
2019-01-07 19:55:36 +08:00
|
|
|
import copy
|
|
|
|
import json
|
2019-01-08 19:26:58 +08:00
|
|
|
import logging
|
2019-01-29 17:31:42 +08:00
|
|
|
import math
|
|
|
|
import os
|
|
|
|
import shutil
|
2019-01-08 19:26:58 +08:00
|
|
|
import tarfile
|
|
|
|
import tempfile
|
2019-02-06 07:07:46 +08:00
|
|
|
import sys
|
|
|
|
from io import open
|
2019-01-07 19:55:36 +08:00
|
|
|
|
|
|
|
import torch
|
|
|
|
import torch.nn as nn
|
2019-01-08 19:26:58 +08:00
|
|
|
from torch.nn import CrossEntropyLoss
|
2019-01-07 19:55:36 +08:00
|
|
|
from torch.nn.parameter import Parameter
|
|
|
|
|
2019-04-15 21:00:33 +08:00
|
|
|
from .file_utils import cached_path, CONFIG_NAME, WEIGHTS_NAME
|
2019-01-29 17:31:42 +08:00
|
|
|
from .modeling import BertLayerNorm as LayerNorm
|
2019-01-07 19:55:36 +08:00
|
|
|
|
2019-01-08 19:26:58 +08:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2019-02-08 00:06:17 +08:00
|
|
|
PRETRAINED_MODEL_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-pytorch_model.bin"}
|
2019-02-08 17:33:14 +08:00
|
|
|
PRETRAINED_CONFIG_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-config.json"}
|
2019-02-08 00:06:17 +08:00
|
|
|
|
2019-01-28 23:50:23 +08:00
|
|
|
|
2019-01-29 00:03:39 +08:00
|
|
|
def load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path):
|
|
|
|
""" Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
|
|
|
|
"""
|
2019-01-29 17:31:42 +08:00
|
|
|
import re
|
|
|
|
import numpy as np
|
2019-01-29 00:03:39 +08:00
|
|
|
print("Loading weights...")
|
|
|
|
names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8'))
|
|
|
|
shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8'))
|
|
|
|
offsets = np.cumsum([np.prod(shape) for shape in shapes])
|
|
|
|
init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)]
|
|
|
|
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
|
|
|
|
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
|
|
|
|
|
2019-02-18 06:57:23 +08:00
|
|
|
# This was used when we had a single embedding matrix for positions and tokens
|
2019-01-29 17:31:42 +08:00
|
|
|
# init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
|
|
|
|
# del init_params[1]
|
2019-01-29 00:03:39 +08:00
|
|
|
init_params = [arr.squeeze() for arr in init_params]
|
|
|
|
|
|
|
|
try:
|
2019-01-29 17:31:42 +08:00
|
|
|
assert model.tokens_embed.weight.shape == init_params[1].shape
|
|
|
|
assert model.positions_embed.weight.shape == init_params[0].shape
|
2019-01-29 00:03:39 +08:00
|
|
|
except AssertionError as e:
|
2019-01-29 17:31:42 +08:00
|
|
|
e.args += (model.tokens_embed.weight.shape, init_params[1].shape)
|
|
|
|
e.args += (model.positions_embed.weight.shape, init_params[0].shape)
|
2019-01-29 00:03:39 +08:00
|
|
|
raise
|
|
|
|
|
2019-01-29 17:31:42 +08:00
|
|
|
model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
|
|
|
|
model.positions_embed.weight.data = torch.from_numpy(init_params[0])
|
2019-01-29 00:03:39 +08:00
|
|
|
names.pop(0)
|
2019-01-29 17:31:42 +08:00
|
|
|
# Pop position and token embedding arrays
|
|
|
|
init_params.pop(0)
|
2019-01-29 00:03:39 +08:00
|
|
|
init_params.pop(0)
|
|
|
|
|
|
|
|
for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
|
|
|
|
name = name[6:] # skip "model/"
|
|
|
|
assert name[-2:] == ":0"
|
|
|
|
name = name[:-2]
|
|
|
|
name = name.split('/')
|
|
|
|
pointer = model
|
|
|
|
for m_name in name:
|
|
|
|
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
|
|
|
|
l = re.split(r'(\d+)', m_name)
|
|
|
|
else:
|
|
|
|
l = [m_name]
|
|
|
|
if l[0] == 'g':
|
|
|
|
pointer = getattr(pointer, 'weight')
|
|
|
|
elif l[0] == 'b':
|
|
|
|
pointer = getattr(pointer, 'bias')
|
|
|
|
elif l[0] == 'w':
|
|
|
|
pointer = getattr(pointer, 'weight')
|
|
|
|
else:
|
|
|
|
pointer = getattr(pointer, l[0])
|
|
|
|
if len(l) >= 2:
|
|
|
|
num = int(l[1])
|
|
|
|
pointer = pointer[num]
|
|
|
|
try:
|
|
|
|
assert pointer.shape == array.shape
|
|
|
|
except AssertionError as e:
|
|
|
|
e.args += (pointer.shape, array.shape)
|
|
|
|
raise
|
|
|
|
try:
|
|
|
|
assert pointer.shape == array.shape
|
|
|
|
except AssertionError as e:
|
|
|
|
e.args += (pointer.shape, array.shape)
|
|
|
|
raise
|
|
|
|
print("Initialize PyTorch weight {}".format(name))
|
|
|
|
pointer.data = torch.from_numpy(array)
|
|
|
|
return model
|
|
|
|
|
2019-01-07 19:55:36 +08:00
|
|
|
|
|
|
|
def gelu(x):
|
|
|
|
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
|
|
|
|
|
|
|
|
|
|
|
|
def swish(x):
|
|
|
|
return x * torch.sigmoid(x)
|
|
|
|
|
|
|
|
|
2019-01-28 23:50:23 +08:00
|
|
|
ACT_FNS = {"relu": nn.ReLU, "swish": swish, "gelu": gelu}
|
|
|
|
|
2019-01-07 19:55:36 +08:00
|
|
|
|
2019-01-08 19:26:58 +08:00
|
|
|
class OpenAIGPTConfig(object):
|
|
|
|
"""Configuration class to store the configuration of a `OpenAIGPTModel`.
|
|
|
|
"""
|
2019-01-28 23:50:23 +08:00
|
|
|
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
vocab_size_or_config_json_file=40478,
|
|
|
|
n_special=0,
|
2019-01-29 16:54:18 +08:00
|
|
|
n_positions=512,
|
2019-01-28 23:50:23 +08:00
|
|
|
n_ctx=512,
|
|
|
|
n_embd=768,
|
|
|
|
n_layer=12,
|
|
|
|
n_head=12,
|
|
|
|
afn="gelu",
|
|
|
|
resid_pdrop=0.1,
|
|
|
|
embd_pdrop=0.1,
|
|
|
|
attn_pdrop=0.1,
|
2019-02-09 04:49:05 +08:00
|
|
|
layer_norm_epsilon=1e-5,
|
2019-01-28 23:50:23 +08:00
|
|
|
initializer_range=0.02,
|
2019-05-07 22:47:22 +08:00
|
|
|
predict_special_tokens=True
|
2019-01-28 23:50:23 +08:00
|
|
|
):
|
2019-01-08 19:26:58 +08:00
|
|
|
"""Constructs OpenAIGPTConfig.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `OpenAIGPTModel` or a configuration json file.
|
|
|
|
n_special: The number of special tokens to learn during fine-tuning ('[SEP]', '[CLF]', ...)
|
2019-01-29 16:54:18 +08:00
|
|
|
n_positions: Number of positional embeddings.
|
|
|
|
n_ctx: Size of the causal mask (usually same as n_positions).
|
2019-01-08 19:26:58 +08:00
|
|
|
n_embd: Dimensionality of the embeddings and hidden states.
|
|
|
|
n_layer: Number of hidden layers in the Transformer encoder.
|
|
|
|
n_head: Number of attention heads for each attention layer in
|
|
|
|
the Transformer encoder.
|
|
|
|
afn: The non-linear activation function (function or string) in the
|
|
|
|
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
|
|
|
|
resid_pdrop: The dropout probabilitiy for all fully connected
|
|
|
|
layers in the embeddings, encoder, and pooler.
|
|
|
|
attn_pdrop: The dropout ratio for the attention
|
|
|
|
probabilities.
|
|
|
|
embd_pdrop: The dropout ratio for the embeddings.
|
2019-02-09 04:49:05 +08:00
|
|
|
layer_norm_epsilon: epsilon to use in the layer norm layers
|
2019-01-08 19:26:58 +08:00
|
|
|
initializer_range: The sttdev of the truncated_normal_initializer for
|
|
|
|
initializing all weight matrices.
|
2019-05-07 22:47:22 +08:00
|
|
|
predict_special_tokens: should we predict special tokens (when the model has a LM head)
|
2019-01-08 19:26:58 +08:00
|
|
|
"""
|
2019-02-06 07:07:46 +08:00
|
|
|
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
|
|
|
|
and isinstance(vocab_size_or_config_json_file, unicode)):
|
2019-01-28 23:50:23 +08:00
|
|
|
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
|
2019-01-08 19:26:58 +08:00
|
|
|
json_config = json.loads(reader.read())
|
|
|
|
for key, value in json_config.items():
|
|
|
|
self.__dict__[key] = value
|
|
|
|
elif isinstance(vocab_size_or_config_json_file, int):
|
|
|
|
self.vocab_size = vocab_size_or_config_json_file
|
|
|
|
self.n_special = n_special
|
|
|
|
self.n_ctx = n_ctx
|
2019-01-29 16:54:18 +08:00
|
|
|
self.n_positions = n_positions
|
2019-01-08 19:26:58 +08:00
|
|
|
self.n_embd = n_embd
|
|
|
|
self.n_layer = n_layer
|
|
|
|
self.n_head = n_head
|
|
|
|
self.afn = afn
|
|
|
|
self.resid_pdrop = resid_pdrop
|
|
|
|
self.embd_pdrop = embd_pdrop
|
|
|
|
self.attn_pdrop = attn_pdrop
|
2019-02-09 04:49:05 +08:00
|
|
|
self.layer_norm_epsilon = layer_norm_epsilon
|
2019-01-08 19:26:58 +08:00
|
|
|
self.initializer_range = initializer_range
|
2019-05-07 22:47:22 +08:00
|
|
|
self.predict_special_tokens = predict_special_tokens
|
2019-01-08 19:26:58 +08:00
|
|
|
else:
|
2019-01-28 23:50:23 +08:00
|
|
|
raise ValueError(
|
|
|
|
"First argument must be either a vocabulary size (int)"
|
|
|
|
"or the path to a pretrained model config file (str)"
|
|
|
|
)
|
2019-01-08 19:26:58 +08:00
|
|
|
|
|
|
|
@property
|
2019-01-29 18:00:11 +08:00
|
|
|
def total_tokens_embeddings(self):
|
|
|
|
return self.vocab_size + self.n_special
|
2019-01-08 19:26:58 +08:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def from_dict(cls, json_object):
|
|
|
|
"""Constructs a `OpenAIGPTConfig` from a Python dictionary of parameters."""
|
|
|
|
config = OpenAIGPTConfig(vocab_size_or_config_json_file=-1)
|
|
|
|
for key, value in json_object.items():
|
|
|
|
config.__dict__[key] = value
|
|
|
|
return config
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def from_json_file(cls, json_file):
|
|
|
|
"""Constructs a `OpenAIGPTConfig` from a json file of parameters."""
|
2019-01-28 23:50:23 +08:00
|
|
|
with open(json_file, "r", encoding="utf-8") as reader:
|
2019-01-08 19:26:58 +08:00
|
|
|
text = reader.read()
|
|
|
|
return cls.from_dict(json.loads(text))
|
|
|
|
|
|
|
|
def __repr__(self):
|
|
|
|
return str(self.to_json_string())
|
|
|
|
|
|
|
|
def to_dict(self):
|
|
|
|
"""Serializes this instance to a Python dictionary."""
|
|
|
|
output = copy.deepcopy(self.__dict__)
|
|
|
|
return output
|
|
|
|
|
|
|
|
def to_json_string(self):
|
|
|
|
"""Serializes this instance to a JSON string."""
|
|
|
|
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
|
|
|
|
|
2019-04-15 20:12:08 +08:00
|
|
|
def to_json_file(self, json_file_path):
|
|
|
|
""" Save this instance to a json file."""
|
|
|
|
with open(json_file_path, "w", encoding='utf-8') as writer:
|
|
|
|
writer.write(self.to_json_string())
|
|
|
|
|
2019-01-28 23:50:23 +08:00
|
|
|
|
2019-01-07 19:55:36 +08:00
|
|
|
class Conv1D(nn.Module):
|
|
|
|
def __init__(self, nf, rf, nx):
|
|
|
|
super(Conv1D, self).__init__()
|
|
|
|
self.rf = rf
|
|
|
|
self.nf = nf
|
|
|
|
if rf == 1: # faster 1x1 conv
|
|
|
|
w = torch.empty(nx, nf)
|
|
|
|
nn.init.normal_(w, std=0.02)
|
2019-01-08 19:26:58 +08:00
|
|
|
self.weight = Parameter(w)
|
|
|
|
self.bias = Parameter(torch.zeros(nf))
|
2019-01-07 19:55:36 +08:00
|
|
|
else: # was used to train LM
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
if self.rf == 1:
|
|
|
|
size_out = x.size()[:-1] + (self.nf,)
|
2019-01-08 19:26:58 +08:00
|
|
|
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
|
2019-01-07 19:55:36 +08:00
|
|
|
x = x.view(*size_out)
|
|
|
|
else:
|
|
|
|
raise NotImplementedError
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
|
|
class Attention(nn.Module):
|
2019-05-03 00:31:26 +08:00
|
|
|
def __init__(self, nx, n_ctx, config, scale=False, output_attentions=False):
|
2019-01-07 19:55:36 +08:00
|
|
|
super(Attention, self).__init__()
|
|
|
|
n_state = nx # in Attention: n_state=768 (nx=n_embd)
|
|
|
|
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
|
2019-01-09 07:12:43 +08:00
|
|
|
assert n_state % config.n_head == 0
|
2019-01-29 16:54:18 +08:00
|
|
|
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
|
2019-01-09 07:12:43 +08:00
|
|
|
self.n_head = config.n_head
|
2019-01-07 19:55:36 +08:00
|
|
|
self.split_size = n_state
|
|
|
|
self.scale = scale
|
2019-05-03 00:31:26 +08:00
|
|
|
self.output_attentions = output_attentions
|
2019-01-07 19:55:36 +08:00
|
|
|
self.c_attn = Conv1D(n_state * 3, 1, nx)
|
|
|
|
self.c_proj = Conv1D(n_state, 1, nx)
|
2019-01-09 07:12:43 +08:00
|
|
|
self.attn_dropout = nn.Dropout(config.attn_pdrop)
|
|
|
|
self.resid_dropout = nn.Dropout(config.resid_pdrop)
|
2019-01-07 19:55:36 +08:00
|
|
|
|
|
|
|
def _attn(self, q, k, v):
|
|
|
|
w = torch.matmul(q, k)
|
|
|
|
if self.scale:
|
|
|
|
w = w / math.sqrt(v.size(-1))
|
2019-01-29 16:54:18 +08:00
|
|
|
# w = w * self.bias + -1e9 * (1 - self.bias) # TF implem method: mask_attn_weights
|
2019-01-08 23:24:23 +08:00
|
|
|
# XD: self.b may be larger than w, so we need to crop it
|
2019-01-29 16:54:18 +08:00
|
|
|
b = self.bias[:, :, : w.size(-2), : w.size(-1)]
|
2019-01-08 23:24:23 +08:00
|
|
|
w = w * b + -1e9 * (1 - b)
|
|
|
|
|
2019-01-07 19:55:36 +08:00
|
|
|
w = nn.Softmax(dim=-1)(w)
|
|
|
|
w = self.attn_dropout(w)
|
2019-05-03 00:31:26 +08:00
|
|
|
if self.output_attentions:
|
|
|
|
return w, torch.matmul(w, v)
|
2019-01-07 19:55:36 +08:00
|
|
|
return torch.matmul(w, v)
|
|
|
|
|
|
|
|
def merge_heads(self, x):
|
|
|
|
x = x.permute(0, 2, 1, 3).contiguous()
|
|
|
|
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
|
|
|
|
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
|
|
|
|
|
|
|
|
def split_heads(self, x, k=False):
|
|
|
|
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
|
|
|
|
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
|
|
|
|
if k:
|
|
|
|
return x.permute(0, 2, 3, 1)
|
|
|
|
else:
|
|
|
|
return x.permute(0, 2, 1, 3)
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
x = self.c_attn(x)
|
|
|
|
query, key, value = x.split(self.split_size, dim=2)
|
|
|
|
query = self.split_heads(query)
|
|
|
|
key = self.split_heads(key, k=True)
|
|
|
|
value = self.split_heads(value)
|
|
|
|
a = self._attn(query, key, value)
|
2019-05-03 00:31:26 +08:00
|
|
|
if self.output_attentions:
|
|
|
|
attentions, a = a
|
2019-01-07 19:55:36 +08:00
|
|
|
a = self.merge_heads(a)
|
|
|
|
a = self.c_proj(a)
|
|
|
|
a = self.resid_dropout(a)
|
2019-05-03 00:31:26 +08:00
|
|
|
if self.output_attentions:
|
|
|
|
return attentions, a
|
2019-01-07 19:55:36 +08:00
|
|
|
return a
|
|
|
|
|
|
|
|
|
|
|
|
class MLP(nn.Module):
|
2019-01-09 07:12:43 +08:00
|
|
|
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
|
2019-01-07 19:55:36 +08:00
|
|
|
super(MLP, self).__init__()
|
2019-01-09 07:12:43 +08:00
|
|
|
nx = config.n_embd
|
2019-01-07 19:55:36 +08:00
|
|
|
self.c_fc = Conv1D(n_state, 1, nx)
|
|
|
|
self.c_proj = Conv1D(nx, 1, n_state)
|
2019-01-09 07:12:43 +08:00
|
|
|
self.act = ACT_FNS[config.afn]
|
|
|
|
self.dropout = nn.Dropout(config.resid_pdrop)
|
2019-01-07 19:55:36 +08:00
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
h = self.act(self.c_fc(x))
|
|
|
|
h2 = self.c_proj(h)
|
|
|
|
return self.dropout(h2)
|
|
|
|
|
|
|
|
|
|
|
|
class Block(nn.Module):
|
2019-05-03 00:31:26 +08:00
|
|
|
def __init__(self, n_ctx, config, scale=False, output_attentions=False):
|
2019-01-07 19:55:36 +08:00
|
|
|
super(Block, self).__init__()
|
2019-01-09 07:12:43 +08:00
|
|
|
nx = config.n_embd
|
2019-05-03 00:31:26 +08:00
|
|
|
self.output_attentions = output_attentions
|
|
|
|
self.attn = Attention(nx, n_ctx, config, scale, output_attentions)
|
2019-02-09 04:49:05 +08:00
|
|
|
self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
|
2019-01-09 07:12:43 +08:00
|
|
|
self.mlp = MLP(4 * nx, config)
|
2019-02-09 04:49:05 +08:00
|
|
|
self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
|
2019-01-07 19:55:36 +08:00
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
a = self.attn(x)
|
2019-05-03 00:31:26 +08:00
|
|
|
if self.output_attentions:
|
|
|
|
attentions, a = a
|
2019-01-07 19:55:36 +08:00
|
|
|
n = self.ln_1(x + a)
|
|
|
|
m = self.mlp(n)
|
|
|
|
h = self.ln_2(n + m)
|
2019-05-03 00:31:26 +08:00
|
|
|
if self.output_attentions:
|
|
|
|
return attentions, h
|
2019-01-07 19:55:36 +08:00
|
|
|
return h
|
|
|
|
|
|
|
|
|
2019-01-08 19:26:58 +08:00
|
|
|
class OpenAIGPTLMHead(nn.Module):
|
2019-01-07 19:55:36 +08:00
|
|
|
""" Language Model Head for the transformer """
|
|
|
|
|
2019-01-09 07:12:43 +08:00
|
|
|
def __init__(self, model_embeddings_weights, config):
|
2019-01-08 19:26:58 +08:00
|
|
|
super(OpenAIGPTLMHead, self).__init__()
|
2019-01-09 07:12:43 +08:00
|
|
|
self.n_embd = config.n_embd
|
2019-05-07 22:47:22 +08:00
|
|
|
self.vocab_size = config.vocab_size
|
|
|
|
self.predict_special_tokens = config.predict_special_tokens
|
2019-04-30 16:45:26 +08:00
|
|
|
embed_shape = model_embeddings_weights.shape
|
|
|
|
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
|
2019-01-08 19:26:58 +08:00
|
|
|
self.set_embeddings_weights(model_embeddings_weights)
|
|
|
|
|
2019-05-07 22:47:22 +08:00
|
|
|
def set_embeddings_weights(self, model_embeddings_weights, predict_special_tokens=True):
|
|
|
|
self.predict_special_tokens = predict_special_tokens
|
2019-01-08 19:26:58 +08:00
|
|
|
embed_shape = model_embeddings_weights.shape
|
2019-01-28 23:50:23 +08:00
|
|
|
self.decoder.weight = model_embeddings_weights # Tied weights
|
2019-01-07 19:55:36 +08:00
|
|
|
|
2019-01-08 23:24:23 +08:00
|
|
|
def forward(self, hidden_state):
|
|
|
|
lm_logits = self.decoder(hidden_state)
|
2019-05-07 22:47:22 +08:00
|
|
|
if not self.predict_special_tokens:
|
|
|
|
lm_logits = lm_logits[..., :self.vocab_size]
|
2019-01-07 19:55:36 +08:00
|
|
|
return lm_logits
|
|
|
|
|
|
|
|
|
2019-01-08 23:24:23 +08:00
|
|
|
class OpenAIGPTMultipleChoiceHead(nn.Module):
|
2019-01-07 19:55:36 +08:00
|
|
|
""" Classifier Head for the transformer """
|
|
|
|
|
2019-01-09 07:12:43 +08:00
|
|
|
def __init__(self, config):
|
2019-01-08 23:24:23 +08:00
|
|
|
super(OpenAIGPTMultipleChoiceHead, self).__init__()
|
2019-01-09 07:12:43 +08:00
|
|
|
self.n_embd = config.n_embd
|
2019-01-08 23:24:23 +08:00
|
|
|
# self.multiple_choice_token = multiple_choice_token
|
2019-01-09 07:12:43 +08:00
|
|
|
self.dropout = nn.Dropout2d(config.resid_pdrop) # To reproduce the noise_shape parameter of TF implementation
|
|
|
|
self.linear = nn.Linear(config.n_embd, 1)
|
2019-01-07 19:55:36 +08:00
|
|
|
|
2019-01-28 23:50:23 +08:00
|
|
|
nn.init.normal_(self.linear.weight, std=0.02)
|
2019-01-07 19:55:36 +08:00
|
|
|
nn.init.normal_(self.linear.bias, 0)
|
|
|
|
|
2019-02-09 23:58:53 +08:00
|
|
|
def forward(self, hidden_states, mc_token_ids):
|
2019-01-07 19:55:36 +08:00
|
|
|
# Classification logits
|
2019-02-09 23:58:53 +08:00
|
|
|
# hidden_state (bsz, num_choices, seq_length, hidden_size)
|
2019-04-15 16:04:05 +08:00
|
|
|
# mc_token_ids (bsz, num_choices)
|
2019-04-12 05:51:03 +08:00
|
|
|
mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1))
|
2019-02-09 23:58:53 +08:00
|
|
|
# (bsz, num_choices, 1, hidden_size)
|
|
|
|
multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2)
|
|
|
|
# (bsz, num_choices, hidden_size)
|
2019-03-07 17:12:45 +08:00
|
|
|
multiple_choice_h = self.dropout(multiple_choice_h.transpose(1, 2)).transpose(1, 2)
|
2019-01-08 23:24:23 +08:00
|
|
|
multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
|
2019-02-09 23:58:53 +08:00
|
|
|
# (bsz, num_choices)
|
2019-01-08 23:24:23 +08:00
|
|
|
return multiple_choice_logits
|
|
|
|
|
|
|
|
|
|
|
|
class OpenAIGPTPreTrainedModel(nn.Module):
|
|
|
|
""" An abstract class to handle weights initialization and
|
|
|
|
a simple interface for dowloading and loading pretrained models.
|
|
|
|
"""
|
2019-01-28 23:50:23 +08:00
|
|
|
|
2019-01-08 23:24:23 +08:00
|
|
|
def __init__(self, config, *inputs, **kwargs):
|
|
|
|
super(OpenAIGPTPreTrainedModel, self).__init__()
|
|
|
|
if not isinstance(config, OpenAIGPTConfig):
|
|
|
|
raise ValueError(
|
|
|
|
"Parameter config in `{}(config)` should be an instance of class `OpenAIGPTConfig`. "
|
|
|
|
"To create a model from a pretrained model use "
|
|
|
|
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
|
|
|
|
self.__class__.__name__, self.__class__.__name__
|
2019-01-28 23:50:23 +08:00
|
|
|
)
|
|
|
|
)
|
2019-01-08 23:24:23 +08:00
|
|
|
self.config = config
|
|
|
|
|
|
|
|
def init_weights(self, module):
|
|
|
|
""" Initialize the weights.
|
|
|
|
"""
|
|
|
|
if isinstance(module, (nn.Linear, nn.Embedding)):
|
|
|
|
# Slightly different from the TF version which uses truncated_normal for initialization
|
|
|
|
# cf https://github.com/pytorch/pytorch/pull/5617
|
|
|
|
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
|
|
|
elif isinstance(module, LayerNorm):
|
|
|
|
module.bias.data.zero_()
|
|
|
|
module.weight.data.fill_(1.0)
|
|
|
|
if isinstance(module, nn.Linear) and module.bias is not None:
|
|
|
|
module.bias.data.zero_()
|
2019-01-07 19:55:36 +08:00
|
|
|
|
2019-01-08 23:24:23 +08:00
|
|
|
@classmethod
|
2019-01-28 23:50:23 +08:00
|
|
|
def from_pretrained(
|
2019-02-08 16:54:49 +08:00
|
|
|
cls, pretrained_model_name_or_path, num_special_tokens=None, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs
|
2019-01-28 23:50:23 +08:00
|
|
|
):
|
2019-01-08 23:24:23 +08:00
|
|
|
"""
|
|
|
|
Instantiate a OpenAIGPTPreTrainedModel from a pre-trained model file or a pytorch state dict.
|
|
|
|
Download and cache the pre-trained model file if needed.
|
|
|
|
|
|
|
|
Params:
|
2019-02-08 16:54:49 +08:00
|
|
|
pretrained_model_name_or_path: either:
|
2019-01-08 23:24:23 +08:00
|
|
|
- a str with the name of a pre-trained model to load selected in the list of:
|
|
|
|
. `openai-gpt`
|
|
|
|
- a path or url to a pretrained model archive containing:
|
|
|
|
. `openai_gpt_config.json` a configuration file for the model
|
|
|
|
. `pytorch_model.bin` a PyTorch dump of a OpenAIGPTModel instance
|
2019-01-28 23:50:23 +08:00
|
|
|
- a path or url to a pretrained model archive containing:
|
|
|
|
. `bert_config.json` a configuration file for the model
|
|
|
|
. a series of NumPy files containing OpenAI TensorFlow trained weights
|
|
|
|
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
|
2019-01-08 23:24:23 +08:00
|
|
|
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
|
|
|
|
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
|
|
|
|
*inputs, **kwargs: additional input for the specific Bert class
|
|
|
|
(ex: num_labels for BertForSequenceClassification)
|
|
|
|
"""
|
2019-02-08 16:54:49 +08:00
|
|
|
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
|
|
|
|
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
|
2019-02-08 00:06:17 +08:00
|
|
|
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
|
2019-01-08 23:24:23 +08:00
|
|
|
else:
|
2019-02-08 17:33:14 +08:00
|
|
|
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
|
2019-02-08 00:06:17 +08:00
|
|
|
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
|
2019-01-08 23:24:23 +08:00
|
|
|
# redirect to the cache, if necessary
|
|
|
|
try:
|
|
|
|
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
|
2019-02-08 00:06:17 +08:00
|
|
|
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
|
2019-02-06 07:07:46 +08:00
|
|
|
except EnvironmentError:
|
2019-01-08 23:24:23 +08:00
|
|
|
logger.error(
|
|
|
|
"Model name '{}' was not found in model name list ({}). "
|
2019-02-08 00:06:17 +08:00
|
|
|
"We assumed '{}' was a path or url but couldn't find files {} and {} "
|
|
|
|
"at this path or url.".format(
|
2019-02-08 16:54:49 +08:00
|
|
|
pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path,
|
2019-02-08 00:06:17 +08:00
|
|
|
archive_file, config_file
|
2019-01-28 23:50:23 +08:00
|
|
|
)
|
|
|
|
)
|
2019-01-08 23:24:23 +08:00
|
|
|
return None
|
2019-02-08 00:06:17 +08:00
|
|
|
if resolved_archive_file == archive_file and resolved_config_file == config_file:
|
|
|
|
logger.info("loading weights file {}".format(archive_file))
|
|
|
|
logger.info("loading configuration file {}".format(config_file))
|
2019-01-08 23:24:23 +08:00
|
|
|
else:
|
2019-02-08 00:06:17 +08:00
|
|
|
logger.info("loading weights file {} from cache at {}".format(
|
|
|
|
archive_file, resolved_archive_file))
|
|
|
|
logger.info("loading configuration file {} from cache at {}".format(
|
|
|
|
config_file, resolved_config_file))
|
2019-01-08 23:24:23 +08:00
|
|
|
# Load config
|
2019-02-08 00:06:17 +08:00
|
|
|
config = OpenAIGPTConfig.from_json_file(resolved_config_file)
|
2019-01-08 23:24:23 +08:00
|
|
|
logger.info("Model config {}".format(config))
|
|
|
|
# Instantiate model.
|
|
|
|
model = cls(config, *inputs, **kwargs)
|
2019-01-28 23:50:23 +08:00
|
|
|
if state_dict is None and not from_tf:
|
2019-04-15 21:43:01 +08:00
|
|
|
state_dict = torch.load(resolved_archive_file, map_location='cpu')
|
2019-01-28 23:50:23 +08:00
|
|
|
if from_tf:
|
|
|
|
# Directly load from a TensorFlow checkpoint (stored as NumPy array)
|
2019-02-08 00:06:17 +08:00
|
|
|
return load_tf_weights_in_openai_gpt(model, resolved_archive_file)
|
2019-01-08 23:24:23 +08:00
|
|
|
|
|
|
|
old_keys = []
|
|
|
|
new_keys = []
|
|
|
|
for key in state_dict.keys():
|
|
|
|
new_key = None
|
2019-01-29 16:54:18 +08:00
|
|
|
if key.endswith(".g"):
|
|
|
|
new_key = key[:-2] + ".weight"
|
|
|
|
elif key.endswith(".b"):
|
|
|
|
new_key = key[:-2] + ".bias"
|
|
|
|
elif key.endswith(".w"):
|
|
|
|
new_key = key[:-2] + ".weight"
|
2019-01-08 23:24:23 +08:00
|
|
|
if new_key:
|
|
|
|
old_keys.append(key)
|
|
|
|
new_keys.append(new_key)
|
|
|
|
for old_key, new_key in zip(old_keys, new_keys):
|
|
|
|
state_dict[new_key] = state_dict.pop(old_key)
|
|
|
|
|
|
|
|
missing_keys = []
|
|
|
|
unexpected_keys = []
|
|
|
|
error_msgs = []
|
|
|
|
# copy state_dict so _load_from_state_dict can modify it
|
2019-01-28 23:50:23 +08:00
|
|
|
metadata = getattr(state_dict, "_metadata", None)
|
2019-01-08 23:24:23 +08:00
|
|
|
state_dict = state_dict.copy()
|
|
|
|
if metadata is not None:
|
|
|
|
state_dict._metadata = metadata
|
|
|
|
|
2019-01-28 23:50:23 +08:00
|
|
|
def load(module, prefix=""):
|
2019-01-08 23:24:23 +08:00
|
|
|
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
|
|
|
|
module._load_from_state_dict(
|
2019-01-28 23:50:23 +08:00
|
|
|
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
|
|
|
|
)
|
2019-01-08 23:24:23 +08:00
|
|
|
for name, child in module._modules.items():
|
|
|
|
if child is not None:
|
2019-01-28 23:50:23 +08:00
|
|
|
load(child, prefix + name + ".")
|
|
|
|
|
2019-01-29 16:54:18 +08:00
|
|
|
start_model = model
|
|
|
|
if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()):
|
2019-01-29 00:47:29 +08:00
|
|
|
start_model = model.transformer
|
|
|
|
load(start_model, prefix="")
|
|
|
|
|
2019-01-08 23:24:23 +08:00
|
|
|
if len(missing_keys) > 0:
|
2019-01-28 23:50:23 +08:00
|
|
|
logger.info(
|
|
|
|
"Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys)
|
|
|
|
)
|
2019-01-08 23:24:23 +08:00
|
|
|
if len(unexpected_keys) > 0:
|
2019-01-28 23:50:23 +08:00
|
|
|
logger.info(
|
|
|
|
"Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys)
|
|
|
|
)
|
2019-01-08 23:24:23 +08:00
|
|
|
if len(error_msgs) > 0:
|
2019-01-28 23:50:23 +08:00
|
|
|
raise RuntimeError(
|
|
|
|
"Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs))
|
|
|
|
)
|
2019-02-08 00:06:17 +08:00
|
|
|
|
2019-01-08 23:24:23 +08:00
|
|
|
# Add additional embeddings for special tokens if needed
|
2019-01-29 18:00:11 +08:00
|
|
|
# This step also make sure we are still sharing the output and input embeddings after loading weights
|
|
|
|
model.set_num_special_tokens(num_special_tokens if num_special_tokens is not None else config.n_special)
|
2019-01-08 23:24:23 +08:00
|
|
|
return model
|
2019-01-07 19:55:36 +08:00
|
|
|
|
|
|
|
|
2019-01-08 19:26:58 +08:00
|
|
|
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
|
2019-01-09 07:12:43 +08:00
|
|
|
"""OpenAI GPT model ("Improving Language Understanding by Generative Pre-Training").
|
|
|
|
|
2019-01-29 18:00:11 +08:00
|
|
|
OpenAI GPT use a single embedding matrix to store the word and special embeddings.
|
|
|
|
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
|
|
|
|
Special tokens need to be trained during the fine-tuning if you use them.
|
|
|
|
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
|
|
|
|
|
|
|
|
The embeddings are ordered as follow in the token embeddings matrice:
|
2019-01-09 07:12:43 +08:00
|
|
|
[0, ----------------------
|
|
|
|
... -> word embeddings
|
|
|
|
config.vocab_size - 1, ______________________
|
|
|
|
config.vocab_size,
|
|
|
|
... -> special embeddings
|
2019-01-29 18:00:11 +08:00
|
|
|
config.vocab_size + config.n_special - 1] ______________________
|
2019-01-09 07:12:43 +08:00
|
|
|
|
2019-01-29 18:00:11 +08:00
|
|
|
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
|
|
|
|
total_tokens_embeddings = config.vocab_size + config.n_special
|
2019-01-09 07:12:43 +08:00
|
|
|
You should use the associate indices to index the embeddings.
|
|
|
|
|
|
|
|
Params:
|
|
|
|
config: a OpenAIGPTConfig class instance with the configuration to build a new model
|
|
|
|
|
|
|
|
Inputs:
|
|
|
|
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
|
2019-01-29 18:00:11 +08:00
|
|
|
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
|
2019-01-09 07:12:43 +08:00
|
|
|
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
|
2019-01-29 18:00:11 +08:00
|
|
|
with the position indices (selected in the range [0, config.n_positions - 1[.
|
2019-01-09 07:12:43 +08:00
|
|
|
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
|
2019-01-29 18:00:11 +08:00
|
|
|
You can use it to add a third type of embedding to each input token in the sequence
|
|
|
|
(the previous two being the word and position embeddings).
|
|
|
|
The input, position and token_type embeddings are summed inside the Transformer before the first
|
|
|
|
self-attention block.
|
2019-01-09 07:12:43 +08:00
|
|
|
|
|
|
|
Outputs:
|
|
|
|
`hidden_states`: the encoded-hidden-states at the top of the model
|
|
|
|
as a torch.FloatTensor of size [batch_size, sequence_length, hidden_size]
|
|
|
|
(or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids)
|
|
|
|
|
|
|
|
Example usage:
|
|
|
|
```python
|
|
|
|
# Already been converted into BPE token ids
|
|
|
|
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
|
|
|
|
|
|
|
|
config = modeling_openai.OpenAIGPTConfig()
|
|
|
|
|
|
|
|
model = modeling_openai.OpenAIGPTModel(config)
|
|
|
|
hidden_states = model(input_ids)
|
|
|
|
```
|
|
|
|
"""
|
2019-01-28 23:50:23 +08:00
|
|
|
|
2019-05-03 00:31:26 +08:00
|
|
|
def __init__(self, config, output_attentions=False):
|
2019-01-09 07:12:43 +08:00
|
|
|
super(OpenAIGPTModel, self).__init__(config)
|
2019-05-03 00:31:26 +08:00
|
|
|
self.output_attentions = output_attentions
|
2019-04-30 16:45:26 +08:00
|
|
|
self.tokens_embed = nn.Embedding(config.total_tokens_embeddings, config.n_embd)
|
2019-01-29 17:31:42 +08:00
|
|
|
self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
|
2019-01-09 07:12:43 +08:00
|
|
|
self.drop = nn.Dropout(config.embd_pdrop)
|
2019-05-03 00:31:26 +08:00
|
|
|
block = Block(config.n_ctx, config, scale=True, output_attentions=output_attentions)
|
2019-01-09 07:12:43 +08:00
|
|
|
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])
|
2019-01-07 19:55:36 +08:00
|
|
|
|
2019-01-08 19:26:58 +08:00
|
|
|
self.apply(self.init_weights)
|
|
|
|
|
|
|
|
def set_num_special_tokens(self, num_special_tokens):
|
2019-01-29 18:00:11 +08:00
|
|
|
" Update input embeddings with new embedding matrice if needed "
|
|
|
|
if self.config.n_special == num_special_tokens:
|
|
|
|
return
|
2019-01-08 19:26:58 +08:00
|
|
|
# Update config
|
|
|
|
self.config.n_special = num_special_tokens
|
2019-04-11 17:43:13 +08:00
|
|
|
# Build new embeddings and initialize all new embeddings (in particular the special tokens)
|
2019-01-29 17:31:42 +08:00
|
|
|
old_embed = self.tokens_embed
|
2019-01-29 18:00:11 +08:00
|
|
|
self.tokens_embed = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd)
|
2019-04-11 19:16:17 +08:00
|
|
|
self.tokens_embed.to(old_embed.weight.device)
|
2019-01-29 17:31:42 +08:00
|
|
|
self.init_weights(self.tokens_embed)
|
2019-04-11 17:43:13 +08:00
|
|
|
# Copy word embeddings from the previous weights
|
|
|
|
self.tokens_embed.weight.data[:self.config.vocab_size, :] = old_embed.weight.data[:self.config.vocab_size, :]
|
2019-01-07 19:55:36 +08:00
|
|
|
|
2019-01-08 23:24:23 +08:00
|
|
|
def forward(self, input_ids, position_ids=None, token_type_ids=None):
|
|
|
|
if position_ids is None:
|
2019-01-29 17:31:42 +08:00
|
|
|
# This was used when we had a single embedding matrice from position and token embeddings
|
|
|
|
# start = self.config.vocab_size + self.config.n_special
|
|
|
|
# end = start + input_ids.size(-1)
|
|
|
|
# position_ids = torch.arange(start, end, dtype=torch.long, device=input_ids.device)
|
|
|
|
position_ids = torch.arange(input_ids.size(-1), dtype=torch.long, device=input_ids.device)
|
2019-01-08 23:24:23 +08:00
|
|
|
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
|
|
|
|
|
|
|
|
input_shape = input_ids.size()
|
|
|
|
input_ids = input_ids.view(-1, input_ids.size(-1))
|
|
|
|
position_ids = position_ids.view(-1, position_ids.size(-1))
|
|
|
|
|
2019-01-29 17:31:42 +08:00
|
|
|
inputs_embeds = self.tokens_embed(input_ids)
|
|
|
|
position_embeds = self.positions_embed(position_ids)
|
2019-01-08 23:24:23 +08:00
|
|
|
if token_type_ids is not None:
|
|
|
|
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
|
2019-01-29 17:31:42 +08:00
|
|
|
token_type_embeds = self.tokens_embed(token_type_ids)
|
2019-01-08 23:24:23 +08:00
|
|
|
else:
|
|
|
|
token_type_embeds = 0
|
2019-01-08 19:26:58 +08:00
|
|
|
# Add the position information to the input embeddings
|
2019-01-08 23:24:23 +08:00
|
|
|
# h = e.sum(dim=2)
|
|
|
|
hidden_states = inputs_embeds + position_embeds + token_type_embeds
|
2019-05-03 00:31:26 +08:00
|
|
|
all_attentions = []
|
2019-01-08 19:26:58 +08:00
|
|
|
for block in self.h:
|
2019-05-03 00:31:26 +08:00
|
|
|
if self.output_attentions:
|
|
|
|
attentions, hidden_states = block(hidden_states)
|
|
|
|
all_attentions.append(attentions)
|
|
|
|
else:
|
|
|
|
hidden_states = block(hidden_states)
|
2019-02-06 07:07:46 +08:00
|
|
|
output_shape = input_shape + (hidden_states.size(-1),)
|
2019-05-03 00:31:26 +08:00
|
|
|
if self.output_attentions:
|
|
|
|
return all_attentions, hidden_states.view(*output_shape)
|
2019-02-06 07:07:46 +08:00
|
|
|
return hidden_states.view(*output_shape)
|
2019-01-07 19:55:36 +08:00
|
|
|
|
2019-01-28 23:50:23 +08:00
|
|
|
|
2019-01-08 23:24:23 +08:00
|
|
|
class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
|
2019-01-09 07:12:43 +08:00
|
|
|
"""OpenAI GPT model with a Language Modeling head ("Improving Language Understanding by Generative Pre-Training").
|
|
|
|
|
2019-01-29 18:00:11 +08:00
|
|
|
OpenAI GPT use a single embedding matrix to store the word and special embeddings.
|
|
|
|
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
|
|
|
|
Special tokens need to be trained during the fine-tuning if you use them.
|
|
|
|
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
|
|
|
|
|
|
|
|
The embeddings are ordered as follow in the token embeddings matrice:
|
2019-01-09 07:12:43 +08:00
|
|
|
[0, ----------------------
|
|
|
|
... -> word embeddings
|
|
|
|
config.vocab_size - 1, ______________________
|
|
|
|
config.vocab_size,
|
|
|
|
... -> special embeddings
|
2019-01-29 18:00:11 +08:00
|
|
|
config.vocab_size + config.n_special - 1] ______________________
|
2019-01-09 07:12:43 +08:00
|
|
|
|
2019-01-29 18:00:11 +08:00
|
|
|
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
|
|
|
|
total_tokens_embeddings = config.vocab_size + config.n_special
|
|
|
|
You should use the associate indices to index the embeddings.
|
2019-01-09 07:12:43 +08:00
|
|
|
|
|
|
|
Params:
|
|
|
|
config: a OpenAIGPTConfig class instance with the configuration to build a new model
|
|
|
|
|
|
|
|
Inputs:
|
|
|
|
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
|
2019-01-29 18:00:11 +08:00
|
|
|
were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
|
2019-01-09 07:12:43 +08:00
|
|
|
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
|
2019-01-29 18:00:11 +08:00
|
|
|
with the position indices (selected in the range [0, config.n_positions - 1[.
|
2019-01-09 07:12:43 +08:00
|
|
|
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
|
2019-01-29 18:00:11 +08:00
|
|
|
You can use it to add a third type of embedding to each input token in the sequence
|
|
|
|
(the previous two being the word and position embeddings).
|
|
|
|
The input, position and token_type embeddings are summed inside the Transformer before the first
|
|
|
|
self-attention block.
|
2019-01-09 07:12:43 +08:00
|
|
|
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
|
|
|
|
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
|
|
|
|
is only computed for the labels set in [0, ..., vocab_size]
|
|
|
|
|
|
|
|
Outputs:
|
|
|
|
if `lm_labels` is not `None`:
|
|
|
|
Outputs the language modeling loss.
|
|
|
|
else:
|
2019-01-29 18:00:11 +08:00
|
|
|
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, total_tokens_embeddings]
|
|
|
|
(or more generally [d_1, ..., d_n, total_tokens_embeddings] were d_1 ... d_n are the dimension of input_ids)
|
2019-01-09 07:12:43 +08:00
|
|
|
|
|
|
|
Example usage:
|
|
|
|
```python
|
|
|
|
# Already been converted into BPE token ids
|
|
|
|
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
|
|
|
|
|
|
|
|
config = modeling_openai.OpenAIGPTConfig()
|
|
|
|
|
|
|
|
model = modeling_openai.OpenAIGPTLMHeadModel(config)
|
|
|
|
lm_logits = model(input_ids)
|
|
|
|
```
|
|
|
|
"""
|
2019-01-28 23:50:23 +08:00
|
|
|
|
2019-05-03 00:31:26 +08:00
|
|
|
def __init__(self, config, output_attentions=False):
|
2019-01-09 07:12:43 +08:00
|
|
|
super(OpenAIGPTLMHeadModel, self).__init__(config)
|
2019-05-03 00:31:26 +08:00
|
|
|
self.transformer = OpenAIGPTModel(config, output_attentions=output_attentions)
|
2019-01-29 17:31:42 +08:00
|
|
|
self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
|
2019-01-08 23:24:23 +08:00
|
|
|
self.apply(self.init_weights)
|
|
|
|
|
2019-05-07 22:47:22 +08:00
|
|
|
def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
|
2019-02-08 00:06:17 +08:00
|
|
|
""" Update input and output embeddings with new embedding matrice
|
|
|
|
Make sure we are sharing the embeddings
|
|
|
|
"""
|
2019-05-07 22:47:22 +08:00
|
|
|
self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
|
2019-01-08 23:24:23 +08:00
|
|
|
self.transformer.set_num_special_tokens(num_special_tokens)
|
2019-05-07 22:47:22 +08:00
|
|
|
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight, predict_special_tokens=predict_special_tokens)
|
2019-01-08 23:24:23 +08:00
|
|
|
|
|
|
|
def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None):
|
|
|
|
hidden_states = self.transformer(input_ids, position_ids, token_type_ids)
|
2019-05-03 00:31:26 +08:00
|
|
|
if self.transformer.output_attentions:
|
|
|
|
all_attentions, hidden_states = hidden_states
|
2019-01-08 23:24:23 +08:00
|
|
|
lm_logits = self.lm_head(hidden_states)
|
|
|
|
if lm_labels is not None:
|
2019-03-25 04:36:46 +08:00
|
|
|
# Shift so that tokens < n predict n
|
2019-04-12 18:12:33 +08:00
|
|
|
shift_logits = lm_logits[..., :-1, :].contiguous()
|
|
|
|
shift_labels = lm_labels[..., 1:].contiguous()
|
2019-03-28 01:45:11 +08:00
|
|
|
# Flatten the tokens
|
2019-01-09 00:18:47 +08:00
|
|
|
loss_fct = CrossEntropyLoss(ignore_index=-1)
|
2019-03-25 04:49:42 +08:00
|
|
|
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
|
2019-03-25 04:36:46 +08:00
|
|
|
shift_labels.view(-1))
|
2019-01-08 23:24:23 +08:00
|
|
|
return loss
|
2019-05-03 00:31:26 +08:00
|
|
|
if self.transformer.output_attentions:
|
|
|
|
return all_attentions, lm_logits
|
2019-01-08 23:24:23 +08:00
|
|
|
return lm_logits
|
2019-01-07 19:55:36 +08:00
|
|
|
|
2019-01-28 23:50:23 +08:00
|
|
|
|
2019-01-08 19:26:58 +08:00
|
|
|
class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
|
2019-02-09 23:58:53 +08:00
|
|
|
"""OpenAI GPT model with a Language Modeling and a Multiple Choice head ("Improving Language Understanding by Generative Pre-Training").
|
2019-01-09 07:12:43 +08:00
|
|
|
|
2019-01-29 18:00:11 +08:00
|
|
|
OpenAI GPT use a single embedding matrix to store the word and special embeddings.
|
|
|
|
Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
|
|
|
|
Special tokens need to be trained during the fine-tuning if you use them.
|
|
|
|
The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
|
|
|
|
|
|
|
|
The embeddings are ordered as follow in the token embeddings matrice:
|
2019-01-09 07:12:43 +08:00
|
|
|
[0, ----------------------
|
|
|
|
... -> word embeddings
|
|
|
|
config.vocab_size - 1, ______________________
|
|
|
|
config.vocab_size,
|
|
|
|
... -> special embeddings
|
2019-01-29 18:00:11 +08:00
|
|
|
config.vocab_size + config.n_special - 1] ______________________
|
2019-01-09 07:12:43 +08:00
|
|
|
|
2019-01-29 18:00:11 +08:00
|
|
|
where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
|
|
|
|
total_tokens_embeddings = config.vocab_size + config.n_special
|
|
|
|
You should use the associate indices to index the embeddings.
|
2019-01-09 07:12:43 +08:00
|
|
|
|
|
|
|
Params:
|
|
|
|
config: a OpenAIGPTConfig class instance with the configuration to build a new model
|
|
|
|
|
|
|
|
Inputs:
|
2019-02-09 23:58:53 +08:00
|
|
|
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token
|
|
|
|
indices selected in the range [0, total_tokens_embeddings[
|
|
|
|
`mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from
|
|
|
|
which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence)
|
2019-01-09 07:12:43 +08:00
|
|
|
`position_ids`: an optional torch.LongTensor with the same shape as input_ids
|
2019-01-29 18:00:11 +08:00
|
|
|
with the position indices (selected in the range [0, config.n_positions - 1[.
|
2019-01-09 07:12:43 +08:00
|
|
|
`token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
|
2019-01-29 18:00:11 +08:00
|
|
|
You can use it to add a third type of embedding to each input token in the sequence
|
|
|
|
(the previous two being the word and position embeddings).
|
|
|
|
The input, position and token_type embeddings are summed inside the Transformer before the first
|
|
|
|
self-attention block.
|
2019-01-09 07:12:43 +08:00
|
|
|
`lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length]
|
2019-01-29 18:00:11 +08:00
|
|
|
with indices selected in [-1, 0, ..., total_tokens_embeddings]. All labels set to -1 are ignored (masked), the loss
|
|
|
|
is only computed for the labels set in [0, ..., total_tokens_embeddings]
|
2019-01-09 07:12:43 +08:00
|
|
|
`multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size]
|
|
|
|
with indices selected in [0, ..., num_choices].
|
|
|
|
|
|
|
|
Outputs:
|
|
|
|
if `lm_labels` and `multiple_choice_labels` are not `None`:
|
|
|
|
Outputs a tuple of losses with the language modeling loss and the multiple choice loss.
|
|
|
|
else: a tuple with
|
2019-01-29 18:00:11 +08:00
|
|
|
`lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, total_tokens_embeddings]
|
2019-01-09 07:12:43 +08:00
|
|
|
`multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices]
|
|
|
|
|
|
|
|
Example usage:
|
|
|
|
```python
|
|
|
|
# Already been converted into BPE token ids
|
2019-02-09 23:58:53 +08:00
|
|
|
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]]) # (bsz, number of choice, seq length)
|
|
|
|
mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice)
|
2019-01-09 07:12:43 +08:00
|
|
|
|
|
|
|
config = modeling_openai.OpenAIGPTConfig()
|
|
|
|
|
|
|
|
model = modeling_openai.OpenAIGPTLMHeadModel(config)
|
2019-02-09 23:58:53 +08:00
|
|
|
lm_logits, multiple_choice_logits = model(input_ids, mc_token_ids)
|
2019-01-09 07:12:43 +08:00
|
|
|
```
|
|
|
|
"""
|
2019-01-28 23:50:23 +08:00
|
|
|
|
2019-05-03 00:31:26 +08:00
|
|
|
def __init__(self, config, output_attentions=False):
|
2019-01-09 07:12:43 +08:00
|
|
|
super(OpenAIGPTDoubleHeadsModel, self).__init__(config)
|
2019-05-03 00:31:26 +08:00
|
|
|
self.transformer = OpenAIGPTModel(config, output_attentions=output_attentions)
|
2019-01-29 17:31:42 +08:00
|
|
|
self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
|
2019-01-09 07:12:43 +08:00
|
|
|
self.multiple_choice_head = OpenAIGPTMultipleChoiceHead(config)
|
2019-01-08 19:26:58 +08:00
|
|
|
self.apply(self.init_weights)
|
2019-01-07 19:55:36 +08:00
|
|
|
|
2019-05-07 22:47:22 +08:00
|
|
|
def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True):
|
2019-02-08 00:06:17 +08:00
|
|
|
""" Update input and output embeddings with new embedding matrice
|
|
|
|
Make sure we are sharing the embeddings
|
|
|
|
"""
|
2019-05-07 22:47:22 +08:00
|
|
|
self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens
|
2019-01-08 19:26:58 +08:00
|
|
|
self.transformer.set_num_special_tokens(num_special_tokens)
|
2019-05-07 22:47:22 +08:00
|
|
|
self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight, predict_special_tokens=predict_special_tokens)
|
2019-01-07 19:55:36 +08:00
|
|
|
|
2019-02-09 23:58:53 +08:00
|
|
|
def forward(self, input_ids, mc_token_ids, lm_labels=None, mc_labels=None, token_type_ids=None, position_ids=None):
|
2019-01-08 23:24:23 +08:00
|
|
|
hidden_states = self.transformer(input_ids, position_ids, token_type_ids)
|
2019-05-03 00:31:26 +08:00
|
|
|
if self.transformer.output_attentions:
|
|
|
|
all_attentions, hidden_states = hidden_states
|
2019-01-08 23:24:23 +08:00
|
|
|
lm_logits = self.lm_head(hidden_states)
|
2019-02-09 23:58:53 +08:00
|
|
|
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids)
|
2019-01-08 19:26:58 +08:00
|
|
|
losses = []
|
|
|
|
if lm_labels is not None:
|
2019-04-12 18:12:33 +08:00
|
|
|
shift_logits = lm_logits[..., :-1, :].contiguous()
|
|
|
|
shift_labels = lm_labels[..., 1:].contiguous()
|
2019-01-08 23:24:23 +08:00
|
|
|
loss_fct = CrossEntropyLoss(ignore_index=-1)
|
2019-04-12 18:12:33 +08:00
|
|
|
losses.append(loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)))
|
2019-01-28 23:50:23 +08:00
|
|
|
if mc_labels is not None:
|
2019-01-08 19:26:58 +08:00
|
|
|
loss_fct = CrossEntropyLoss()
|
2019-01-28 23:50:23 +08:00
|
|
|
losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
|
2019-01-08 19:26:58 +08:00
|
|
|
if losses:
|
|
|
|
return losses
|
2019-05-03 00:31:26 +08:00
|
|
|
if self.transformer.output_attentions:
|
|
|
|
return all_attentions, lm_logits, mc_logits
|
2019-01-28 23:50:23 +08:00
|
|
|
return lm_logits, mc_logits
|