Copy code from Bert to Roberta and add safeguard script (#7219)
* Copy code from Bert to Roberta and add safeguard script * Fix docstring * Comment code * Formatting * Update src/transformers/modeling_roberta.py Co-authored-by: Lysandre Debut <lysandre@huggingface.co> * Add test and fix bugs * Fix style and make new comand Co-authored-by: Lysandre Debut <lysandre@huggingface.co>
This commit is contained in:
parent
656c27c3a3
commit
e4b94d8e58
|
@ -247,6 +247,7 @@ jobs:
|
|||
- run: black --check --line-length 119 --target-version py35 examples templates tests src utils
|
||||
- run: isort --check-only examples templates tests src utils
|
||||
- run: flake8 examples templates tests src utils
|
||||
- run: python utils/check_copies.py
|
||||
- run: python utils/check_repo.py
|
||||
check_repository_consistency:
|
||||
working_directory: ~/transformers
|
||||
|
|
6
Makefile
6
Makefile
|
@ -6,6 +6,7 @@ quality:
|
|||
black --check --line-length 119 --target-version py35 examples templates tests src utils
|
||||
isort --check-only examples templates tests src utils
|
||||
flake8 examples templates tests src utils
|
||||
python utils/check_copies.py
|
||||
python utils/check_repo.py
|
||||
|
||||
# Format source code automatically
|
||||
|
@ -14,6 +15,11 @@ style:
|
|||
black --line-length 119 --target-version py35 examples templates tests src utils
|
||||
isort examples templates tests src utils
|
||||
|
||||
# Make marked copies of snippets of codes conform to the original
|
||||
|
||||
fix-copies:
|
||||
python utils/check_copies.py --fix_and_overwrite
|
||||
|
||||
# Run tests for the library
|
||||
|
||||
test:
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
# limitations under the License.
|
||||
"""PyTorch RoBERTa model. """
|
||||
|
||||
|
||||
import math
|
||||
import warnings
|
||||
|
||||
import torch
|
||||
|
@ -29,8 +29,10 @@ from .file_utils import (
|
|||
add_start_docstrings_to_callable,
|
||||
replace_return_docstrings,
|
||||
)
|
||||
from .modeling_bert import BertEmbeddings, BertLayerNorm, BertModel, BertPreTrainedModel, gelu
|
||||
from .modeling_bert import ACT2FN, gelu
|
||||
from .modeling_outputs import (
|
||||
BaseModelOutput,
|
||||
BaseModelOutputWithPooling,
|
||||
CausalLMOutput,
|
||||
MaskedLMOutput,
|
||||
MultipleChoiceModelOutput,
|
||||
|
@ -38,6 +40,12 @@ from .modeling_outputs import (
|
|||
SequenceClassifierOutput,
|
||||
TokenClassifierOutput,
|
||||
)
|
||||
from .modeling_utils import (
|
||||
PreTrainedModel,
|
||||
apply_chunking_to_forward,
|
||||
find_pruneable_heads_and_indices,
|
||||
prune_linear_layer,
|
||||
)
|
||||
from .utils import logging
|
||||
|
||||
|
||||
|
@ -57,15 +65,31 @@ ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
|||
]
|
||||
|
||||
|
||||
class RobertaEmbeddings(BertEmbeddings):
|
||||
RobertaLayerNorm = torch.nn.LayerNorm
|
||||
|
||||
|
||||
class RobertaEmbeddings(nn.Module):
|
||||
"""
|
||||
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
|
||||
"""
|
||||
|
||||
# Copied from transformers.modeling_bert.BertEmbeddings.__init__ with Bert->Roberta
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
super().__init__()
|
||||
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
|
||||
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
|
||||
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
|
||||
|
||||
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
|
||||
# any TensorFlow checkpoint file
|
||||
self.LayerNorm = RobertaLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
||||
|
||||
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
||||
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
|
||||
|
||||
# End copy
|
||||
self.padding_idx = config.pad_token_id
|
||||
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=self.padding_idx)
|
||||
self.position_embeddings = nn.Embedding(
|
||||
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
|
||||
)
|
||||
|
@ -78,9 +102,29 @@ class RobertaEmbeddings(BertEmbeddings):
|
|||
else:
|
||||
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
|
||||
|
||||
return super().forward(
|
||||
input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds
|
||||
)
|
||||
# Copied from transformers.modeling_bert.BertEmbeddings.forward
|
||||
if input_ids is not None:
|
||||
input_shape = input_ids.size()
|
||||
else:
|
||||
input_shape = inputs_embeds.size()[:-1]
|
||||
|
||||
seq_length = input_shape[1]
|
||||
|
||||
if position_ids is None:
|
||||
position_ids = self.position_ids[:, :seq_length]
|
||||
|
||||
if token_type_ids is None:
|
||||
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
|
||||
|
||||
if inputs_embeds is None:
|
||||
inputs_embeds = self.word_embeddings(input_ids)
|
||||
position_embeddings = self.position_embeddings(position_ids)
|
||||
token_type_embeddings = self.token_type_embeddings(token_type_ids)
|
||||
|
||||
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
|
||||
embeddings = self.LayerNorm(embeddings)
|
||||
embeddings = self.dropout(embeddings)
|
||||
return embeddings
|
||||
|
||||
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
|
||||
"""We are provided embeddings directly. We cannot infer which are padded so just generate
|
||||
|
@ -98,6 +142,343 @@ class RobertaEmbeddings(BertEmbeddings):
|
|||
return position_ids.unsqueeze(0).expand(input_shape)
|
||||
|
||||
|
||||
# Copied from transformers.modeling_bert.BertSelfAttention
|
||||
class RobertaSelfAttention(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
|
||||
raise ValueError(
|
||||
"The hidden size (%d) is not a multiple of the number of attention "
|
||||
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
|
||||
)
|
||||
|
||||
self.num_attention_heads = config.num_attention_heads
|
||||
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
|
||||
self.all_head_size = self.num_attention_heads * self.attention_head_size
|
||||
|
||||
self.query = nn.Linear(config.hidden_size, self.all_head_size)
|
||||
self.key = nn.Linear(config.hidden_size, self.all_head_size)
|
||||
self.value = nn.Linear(config.hidden_size, self.all_head_size)
|
||||
|
||||
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
|
||||
|
||||
def transpose_for_scores(self, x):
|
||||
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
||||
x = x.view(*new_x_shape)
|
||||
return x.permute(0, 2, 1, 3)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states,
|
||||
attention_mask=None,
|
||||
head_mask=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
output_attentions=False,
|
||||
):
|
||||
mixed_query_layer = self.query(hidden_states)
|
||||
|
||||
# If this is instantiated as a cross-attention module, the keys
|
||||
# and values come from an encoder; the attention mask needs to be
|
||||
# such that the encoder's padding tokens are not attended to.
|
||||
if encoder_hidden_states is not None:
|
||||
mixed_key_layer = self.key(encoder_hidden_states)
|
||||
mixed_value_layer = self.value(encoder_hidden_states)
|
||||
attention_mask = encoder_attention_mask
|
||||
else:
|
||||
mixed_key_layer = self.key(hidden_states)
|
||||
mixed_value_layer = self.value(hidden_states)
|
||||
|
||||
query_layer = self.transpose_for_scores(mixed_query_layer)
|
||||
key_layer = self.transpose_for_scores(mixed_key_layer)
|
||||
value_layer = self.transpose_for_scores(mixed_value_layer)
|
||||
|
||||
# Take the dot product between "query" and "key" to get the raw attention scores.
|
||||
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
|
||||
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
|
||||
if attention_mask is not None:
|
||||
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
|
||||
attention_scores = attention_scores + attention_mask
|
||||
|
||||
# Normalize the attention scores to probabilities.
|
||||
attention_probs = nn.Softmax(dim=-1)(attention_scores)
|
||||
|
||||
# This is actually dropping out entire tokens to attend to, which might
|
||||
# seem a bit unusual, but is taken from the original Transformer paper.
|
||||
attention_probs = self.dropout(attention_probs)
|
||||
|
||||
# Mask heads if we want to
|
||||
if head_mask is not None:
|
||||
attention_probs = attention_probs * head_mask
|
||||
|
||||
context_layer = torch.matmul(attention_probs, value_layer)
|
||||
|
||||
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
||||
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
|
||||
context_layer = context_layer.view(*new_context_layer_shape)
|
||||
|
||||
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
|
||||
return outputs
|
||||
|
||||
|
||||
# Copied from transformers.modeling_bert.BertSelfOutput with Bert->Roberta
|
||||
class RobertaSelfOutput(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
||||
self.LayerNorm = RobertaLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
||||
|
||||
def forward(self, hidden_states, input_tensor):
|
||||
hidden_states = self.dense(hidden_states)
|
||||
hidden_states = self.dropout(hidden_states)
|
||||
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
||||
return hidden_states
|
||||
|
||||
|
||||
# Copied from transformers.modeling_bert.BertAttention with Bert->Roberta
|
||||
class RobertaAttention(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.self = RobertaSelfAttention(config)
|
||||
self.output = RobertaSelfOutput(config)
|
||||
self.pruned_heads = set()
|
||||
|
||||
def prune_heads(self, heads):
|
||||
if len(heads) == 0:
|
||||
return
|
||||
heads, index = find_pruneable_heads_and_indices(
|
||||
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
|
||||
)
|
||||
|
||||
# Prune linear layers
|
||||
self.self.query = prune_linear_layer(self.self.query, index)
|
||||
self.self.key = prune_linear_layer(self.self.key, index)
|
||||
self.self.value = prune_linear_layer(self.self.value, index)
|
||||
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
|
||||
|
||||
# Update hyper params and store pruned heads
|
||||
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
|
||||
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
|
||||
self.pruned_heads = self.pruned_heads.union(heads)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states,
|
||||
attention_mask=None,
|
||||
head_mask=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
output_attentions=False,
|
||||
):
|
||||
self_outputs = self.self(
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
head_mask,
|
||||
encoder_hidden_states,
|
||||
encoder_attention_mask,
|
||||
output_attentions,
|
||||
)
|
||||
attention_output = self.output(self_outputs[0], hidden_states)
|
||||
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
|
||||
return outputs
|
||||
|
||||
|
||||
# Copied from transformers.modeling_bert.BertIntermediate
|
||||
class RobertaIntermediate(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
|
||||
if isinstance(config.hidden_act, str):
|
||||
self.intermediate_act_fn = ACT2FN[config.hidden_act]
|
||||
else:
|
||||
self.intermediate_act_fn = config.hidden_act
|
||||
|
||||
def forward(self, hidden_states):
|
||||
hidden_states = self.dense(hidden_states)
|
||||
hidden_states = self.intermediate_act_fn(hidden_states)
|
||||
return hidden_states
|
||||
|
||||
|
||||
# Copied from transformers.modeling_bert.BertOutput with Bert->Roberta
|
||||
class RobertaOutput(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
|
||||
self.LayerNorm = RobertaLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
||||
|
||||
def forward(self, hidden_states, input_tensor):
|
||||
hidden_states = self.dense(hidden_states)
|
||||
hidden_states = self.dropout(hidden_states)
|
||||
hidden_states = self.LayerNorm(hidden_states + input_tensor)
|
||||
return hidden_states
|
||||
|
||||
|
||||
# Copied from transformers.modeling_bert.BertLayer with Bert->Roberta
|
||||
class RobertaLayer(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.chunk_size_feed_forward = config.chunk_size_feed_forward
|
||||
self.seq_len_dim = 1
|
||||
self.attention = RobertaAttention(config)
|
||||
self.is_decoder = config.is_decoder
|
||||
self.add_cross_attention = config.add_cross_attention
|
||||
if self.add_cross_attention:
|
||||
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
|
||||
self.crossattention = RobertaAttention(config)
|
||||
self.intermediate = RobertaIntermediate(config)
|
||||
self.output = RobertaOutput(config)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states,
|
||||
attention_mask=None,
|
||||
head_mask=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
output_attentions=False,
|
||||
):
|
||||
self_attention_outputs = self.attention(
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
head_mask,
|
||||
output_attentions=output_attentions,
|
||||
)
|
||||
attention_output = self_attention_outputs[0]
|
||||
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
|
||||
|
||||
if self.is_decoder and encoder_hidden_states is not None:
|
||||
assert hasattr(
|
||||
self, "crossattention"
|
||||
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
|
||||
cross_attention_outputs = self.crossattention(
|
||||
attention_output,
|
||||
attention_mask,
|
||||
head_mask,
|
||||
encoder_hidden_states,
|
||||
encoder_attention_mask,
|
||||
output_attentions,
|
||||
)
|
||||
attention_output = cross_attention_outputs[0]
|
||||
outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights
|
||||
|
||||
layer_output = apply_chunking_to_forward(
|
||||
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
|
||||
)
|
||||
outputs = (layer_output,) + outputs
|
||||
return outputs
|
||||
|
||||
def feed_forward_chunk(self, attention_output):
|
||||
intermediate_output = self.intermediate(attention_output)
|
||||
layer_output = self.output(intermediate_output, attention_output)
|
||||
return layer_output
|
||||
|
||||
|
||||
# Copied from transformers.modeling_bert.BertEncoder with Bert->Roberta
|
||||
class RobertaEncoder(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states,
|
||||
attention_mask=None,
|
||||
head_mask=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
output_attentions=False,
|
||||
output_hidden_states=False,
|
||||
return_dict=False,
|
||||
):
|
||||
all_hidden_states = () if output_hidden_states else None
|
||||
all_attentions = () if output_attentions else None
|
||||
for i, layer_module in enumerate(self.layer):
|
||||
if output_hidden_states:
|
||||
all_hidden_states = all_hidden_states + (hidden_states,)
|
||||
|
||||
layer_head_mask = head_mask[i] if head_mask is not None else None
|
||||
|
||||
if getattr(self.config, "gradient_checkpointing", False):
|
||||
|
||||
def create_custom_forward(module):
|
||||
def custom_forward(*inputs):
|
||||
return module(*inputs, output_attentions)
|
||||
|
||||
return custom_forward
|
||||
|
||||
layer_outputs = torch.utils.checkpoint.checkpoint(
|
||||
create_custom_forward(layer_module),
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
layer_head_mask,
|
||||
encoder_hidden_states,
|
||||
encoder_attention_mask,
|
||||
)
|
||||
else:
|
||||
layer_outputs = layer_module(
|
||||
hidden_states,
|
||||
attention_mask,
|
||||
layer_head_mask,
|
||||
encoder_hidden_states,
|
||||
encoder_attention_mask,
|
||||
output_attentions,
|
||||
)
|
||||
hidden_states = layer_outputs[0]
|
||||
if output_attentions:
|
||||
all_attentions = all_attentions + (layer_outputs[1],)
|
||||
|
||||
if output_hidden_states:
|
||||
all_hidden_states = all_hidden_states + (hidden_states,)
|
||||
|
||||
if not return_dict:
|
||||
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
|
||||
return BaseModelOutput(
|
||||
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
|
||||
)
|
||||
|
||||
|
||||
# Copied from transformers.modeling_bert.BertPooler
|
||||
class RobertaPooler(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
||||
self.activation = nn.Tanh()
|
||||
|
||||
def forward(self, hidden_states):
|
||||
# We "pool" the model by simply taking the hidden state corresponding
|
||||
# to the first token.
|
||||
first_token_tensor = hidden_states[:, 0]
|
||||
pooled_output = self.dense(first_token_tensor)
|
||||
pooled_output = self.activation(pooled_output)
|
||||
return pooled_output
|
||||
|
||||
|
||||
class RobertaPreTrainedModel(PreTrainedModel):
|
||||
"""An abstract class to handle weights initialization and
|
||||
a simple interface for downloading and loading pretrained models.
|
||||
"""
|
||||
|
||||
config_class = RobertaConfig
|
||||
base_model_prefix = "roberta"
|
||||
authorized_missing_keys = [r"position_ids"]
|
||||
|
||||
# Copied from transformers.modeling_bert.BertPreTrainedModel._init_weights with Bert->Roberta
|
||||
def _init_weights(self, module):
|
||||
""" Initialize the weights """
|
||||
if isinstance(module, (nn.Linear, nn.Embedding)):
|
||||
# Slightly different from the TF version which uses truncated_normal for initialization
|
||||
# cf https://github.com/pytorch/pytorch/pull/5617
|
||||
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
||||
elif isinstance(module, RobertaLayerNorm):
|
||||
module.bias.data.zero_()
|
||||
module.weight.data.fill_(1.0)
|
||||
if isinstance(module, nn.Linear) and module.bias is not None:
|
||||
module.bias.data.zero_()
|
||||
|
||||
|
||||
ROBERTA_START_DOCSTRING = r"""
|
||||
|
||||
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
|
||||
|
@ -159,19 +540,34 @@ ROBERTA_INPUTS_DOCSTRING = r"""
|
|||
"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
|
||||
ROBERTA_START_DOCSTRING,
|
||||
)
|
||||
class RobertaModel(BertModel):
|
||||
"""
|
||||
This class overrides :class:`~transformers.BertModel`. Please check the
|
||||
superclass for the appropriate documentation alongside usage examples.
|
||||
class RobertaModel(RobertaPreTrainedModel):
|
||||
"""
|
||||
|
||||
config_class = RobertaConfig
|
||||
base_model_prefix = "roberta"
|
||||
The model can behave as an encoder (with only self-attention) as well
|
||||
as a decoder, in which case a layer of cross-attention is added between
|
||||
the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani,
|
||||
Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
|
||||
|
||||
To behave as an decoder the model needs to be initialized with the
|
||||
:obj:`is_decoder` argument of the configuration set to :obj:`True`.
|
||||
To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
|
||||
argument and :obj:`add_cross_attention` set to :obj:`True`; an
|
||||
:obj:`encoder_hidden_states` is then expected as an input to the forward pass.
|
||||
|
||||
.. _`Attention is all you need`:
|
||||
https://arxiv.org/abs/1706.03762
|
||||
|
||||
"""
|
||||
|
||||
# Copied from transformers.modeling_bert.BertModel.__init__ with Bert->Roberta
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
self.config = config
|
||||
|
||||
self.embeddings = RobertaEmbeddings(config)
|
||||
self.encoder = RobertaEncoder(config)
|
||||
self.pooler = RobertaPooler(config)
|
||||
|
||||
self.init_weights()
|
||||
|
||||
def get_input_embeddings(self):
|
||||
|
@ -180,14 +576,121 @@ class RobertaModel(BertModel):
|
|||
def set_input_embeddings(self, value):
|
||||
self.embeddings.word_embeddings = value
|
||||
|
||||
def _prune_heads(self, heads_to_prune):
|
||||
"""Prunes heads of the model.
|
||||
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
|
||||
See base class PreTrainedModel
|
||||
"""
|
||||
for layer, heads in heads_to_prune.items():
|
||||
self.encoder.layer[layer].attention.prune_heads(heads)
|
||||
|
||||
@add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
|
||||
@add_code_sample_docstrings(
|
||||
tokenizer_class=_TOKENIZER_FOR_DOC,
|
||||
checkpoint="roberta-base",
|
||||
output_type=BaseModelOutputWithPooling,
|
||||
config_class=_CONFIG_FOR_DOC,
|
||||
)
|
||||
# Copied from transformers.modeling_bert.BertModel.forward
|
||||
def forward(
|
||||
self,
|
||||
input_ids=None,
|
||||
attention_mask=None,
|
||||
token_type_ids=None,
|
||||
position_ids=None,
|
||||
head_mask=None,
|
||||
inputs_embeds=None,
|
||||
encoder_hidden_states=None,
|
||||
encoder_attention_mask=None,
|
||||
output_attentions=None,
|
||||
output_hidden_states=None,
|
||||
return_dict=None,
|
||||
):
|
||||
r"""
|
||||
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
||||
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
|
||||
if the model is configured as a decoder.
|
||||
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
||||
Mask to avoid performing attention on the padding token indices of the encoder input. This mask
|
||||
is used in the cross-attention if the model is configured as a decoder.
|
||||
Mask values selected in ``[0, 1]``:
|
||||
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
|
||||
"""
|
||||
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
||||
output_hidden_states = (
|
||||
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
||||
)
|
||||
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
||||
|
||||
if input_ids is not None and inputs_embeds is not None:
|
||||
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
||||
elif input_ids is not None:
|
||||
input_shape = input_ids.size()
|
||||
elif inputs_embeds is not None:
|
||||
input_shape = inputs_embeds.size()[:-1]
|
||||
else:
|
||||
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
||||
|
||||
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
||||
|
||||
if attention_mask is None:
|
||||
attention_mask = torch.ones(input_shape, device=device)
|
||||
if token_type_ids is None:
|
||||
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
|
||||
|
||||
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
||||
# ourselves in which case we just need to make it broadcastable to all heads.
|
||||
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
|
||||
|
||||
# If a 2D or 3D attention mask is provided for the cross-attention
|
||||
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
||||
if self.config.is_decoder and encoder_hidden_states is not None:
|
||||
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
|
||||
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
|
||||
if encoder_attention_mask is None:
|
||||
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
|
||||
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
||||
else:
|
||||
encoder_extended_attention_mask = None
|
||||
|
||||
# Prepare head mask if needed
|
||||
# 1.0 in head_mask indicate we keep the head
|
||||
# attention_probs has shape bsz x n_heads x N x N
|
||||
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
||||
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
||||
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
||||
|
||||
embedding_output = self.embeddings(
|
||||
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
|
||||
)
|
||||
encoder_outputs = self.encoder(
|
||||
embedding_output,
|
||||
attention_mask=extended_attention_mask,
|
||||
head_mask=head_mask,
|
||||
encoder_hidden_states=encoder_hidden_states,
|
||||
encoder_attention_mask=encoder_extended_attention_mask,
|
||||
output_attentions=output_attentions,
|
||||
output_hidden_states=output_hidden_states,
|
||||
return_dict=return_dict,
|
||||
)
|
||||
sequence_output = encoder_outputs[0]
|
||||
pooled_output = self.pooler(sequence_output)
|
||||
|
||||
if not return_dict:
|
||||
return (sequence_output, pooled_output) + encoder_outputs[1:]
|
||||
|
||||
return BaseModelOutputWithPooling(
|
||||
last_hidden_state=sequence_output,
|
||||
pooler_output=pooled_output,
|
||||
hidden_states=encoder_outputs.hidden_states,
|
||||
attentions=encoder_outputs.attentions,
|
||||
)
|
||||
|
||||
|
||||
@add_start_docstrings(
|
||||
"""RoBERTa Model with a `language modeling` head on top for CLM fine-tuning. """, ROBERTA_START_DOCSTRING
|
||||
)
|
||||
class RobertaForCausalLM(BertPreTrainedModel):
|
||||
config_class = RobertaConfig
|
||||
base_model_prefix = "roberta"
|
||||
|
||||
class RobertaForCausalLM(RobertaPreTrainedModel):
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
|
||||
|
@ -300,11 +803,7 @@ class RobertaForCausalLM(BertPreTrainedModel):
|
|||
|
||||
|
||||
@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """, ROBERTA_START_DOCSTRING)
|
||||
class RobertaForMaskedLM(BertPreTrainedModel):
|
||||
config_class = RobertaConfig
|
||||
base_model_prefix = "roberta"
|
||||
authorized_missing_keys = [r"position_ids", r"lm_head\.decoder\.bias"]
|
||||
|
||||
class RobertaForMaskedLM(RobertaPreTrainedModel):
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
|
||||
|
@ -402,7 +901,7 @@ class RobertaLMHead(nn.Module):
|
|||
def __init__(self, config):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
|
||||
self.layer_norm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
self.layer_norm = RobertaLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
|
||||
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
||||
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
|
||||
|
@ -426,10 +925,7 @@ class RobertaLMHead(nn.Module):
|
|||
on top of the pooled output) e.g. for GLUE tasks. """,
|
||||
ROBERTA_START_DOCSTRING,
|
||||
)
|
||||
class RobertaForSequenceClassification(BertPreTrainedModel):
|
||||
config_class = RobertaConfig
|
||||
base_model_prefix = "roberta"
|
||||
|
||||
class RobertaForSequenceClassification(RobertaPreTrainedModel):
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
self.num_labels = config.num_labels
|
||||
|
@ -509,10 +1005,7 @@ class RobertaForSequenceClassification(BertPreTrainedModel):
|
|||
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
|
||||
ROBERTA_START_DOCSTRING,
|
||||
)
|
||||
class RobertaForMultipleChoice(BertPreTrainedModel):
|
||||
config_class = RobertaConfig
|
||||
base_model_prefix = "roberta"
|
||||
|
||||
class RobertaForMultipleChoice(RobertaPreTrainedModel):
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
|
||||
|
@ -600,10 +1093,7 @@ class RobertaForMultipleChoice(BertPreTrainedModel):
|
|||
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
|
||||
ROBERTA_START_DOCSTRING,
|
||||
)
|
||||
class RobertaForTokenClassification(BertPreTrainedModel):
|
||||
config_class = RobertaConfig
|
||||
base_model_prefix = "roberta"
|
||||
|
||||
class RobertaForTokenClassification(RobertaPreTrainedModel):
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
self.num_labels = config.num_labels
|
||||
|
@ -708,10 +1198,7 @@ class RobertaClassificationHead(nn.Module):
|
|||
the hidden-states output to compute `span start logits` and `span end logits`). """,
|
||||
ROBERTA_START_DOCSTRING,
|
||||
)
|
||||
class RobertaForQuestionAnswering(BertPreTrainedModel):
|
||||
config_class = RobertaConfig
|
||||
base_model_prefix = "roberta"
|
||||
|
||||
class RobertaForQuestionAnswering(RobertaPreTrainedModel):
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
self.num_labels = config.num_labels
|
||||
|
|
|
@ -0,0 +1,104 @@
|
|||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
|
||||
git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
|
||||
sys.path.append(os.path.join(git_repo_path, "utils"))
|
||||
|
||||
import check_copies # noqa: E402
|
||||
|
||||
|
||||
# This is the reference code that will be used in the tests.
|
||||
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
|
||||
REFERENCE_CODE = """ def __init__(self, config):
|
||||
super().__init__()
|
||||
self.transform = BertPredictionHeadTransform(config)
|
||||
|
||||
# The output weights are the same as the input embeddings, but there is
|
||||
# an output-only bias for each token.
|
||||
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
||||
|
||||
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
|
||||
|
||||
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
|
||||
self.decoder.bias = self.bias
|
||||
|
||||
def forward(self, hidden_states):
|
||||
hidden_states = self.transform(hidden_states)
|
||||
hidden_states = self.decoder(hidden_states)
|
||||
return hidden_states
|
||||
"""
|
||||
|
||||
|
||||
class CopyCheckTester(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.transformer_dir = tempfile.mkdtemp()
|
||||
check_copies.TRANSFORMER_PATH = self.transformer_dir
|
||||
shutil.copy(
|
||||
os.path.join(git_repo_path, "src/transformers/modeling_bert.py"),
|
||||
os.path.join(self.transformer_dir, "modeling_bert.py"),
|
||||
)
|
||||
|
||||
def tearDown(self):
|
||||
check_copies.TRANSFORMER_PATH = "src/transformers"
|
||||
shutil.rmtree(self.transformer_dir)
|
||||
|
||||
def check_copy_consistency(self, comment, class_name, class_code, overwrite_result=None):
|
||||
code = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
|
||||
if overwrite_result is not None:
|
||||
expected = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
|
||||
fname = os.path.join(self.transformer_dir, "new_code.py")
|
||||
with open(fname, "w") as f:
|
||||
f.write(code)
|
||||
if overwrite_result is None:
|
||||
self.assertTrue(check_copies.is_copy_consistent(fname))
|
||||
else:
|
||||
check_copies.is_copy_consistent(f.name, overwrite=True)
|
||||
with open(fname, "r") as f:
|
||||
self.assertTrue(f.read(), expected)
|
||||
|
||||
def test_find_code_in_transformers(self):
|
||||
code = check_copies.find_code_in_transformers("modeling_bert.BertLMPredictionHead")
|
||||
self.assertEqual(code, REFERENCE_CODE)
|
||||
|
||||
def test_is_copy_consistent(self):
|
||||
# Base copy consistency
|
||||
self.check_copy_consistency(
|
||||
"# Copied from transformers.modeling_bert.BertLMPredictionHead",
|
||||
"BertLMPredictionHead",
|
||||
REFERENCE_CODE + "\n",
|
||||
)
|
||||
|
||||
# With no empty line at the end
|
||||
self.check_copy_consistency(
|
||||
"# Copied from transformers.modeling_bert.BertLMPredictionHead",
|
||||
"BertLMPredictionHead",
|
||||
REFERENCE_CODE,
|
||||
)
|
||||
|
||||
# Copy consistency with rename
|
||||
self.check_copy_consistency(
|
||||
"# Copied from transformers.modeling_bert.BertLMPredictionHead with Bert->TestModel",
|
||||
"TestModelLMPredictionHead",
|
||||
re.sub("Bert", "TestModel", REFERENCE_CODE),
|
||||
)
|
||||
|
||||
# Copy consistency with a really long name
|
||||
long_class_name = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReasonIReallyDontUnderstand"
|
||||
self.check_copy_consistency(
|
||||
f"# Copied from transformers.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}",
|
||||
f"{long_class_name}LMPredictionHead",
|
||||
re.sub("Bert", long_class_name, REFERENCE_CODE),
|
||||
)
|
||||
|
||||
# Copy consistency with overwrite
|
||||
self.check_copy_consistency(
|
||||
"# Copied from transformers.modeling_bert.BertLMPredictionHead with Bert->TestModel",
|
||||
"TestModelLMPredictionHead",
|
||||
REFERENCE_CODE,
|
||||
overwrite_result=re.sub("Bert", "TestModel", REFERENCE_CODE),
|
||||
)
|
|
@ -0,0 +1,181 @@
|
|||
# coding=utf-8
|
||||
# Copyright 2020 The HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import glob
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
|
||||
|
||||
# All paths are set with the intent you should run this script from the root of the repo with the command
|
||||
# python utils/check_copies.py
|
||||
TRANSFORMERS_PATH = "src/transformers"
|
||||
|
||||
|
||||
def find_code_in_transformers(object_name):
|
||||
""" Find and return the code source code of `object_name`."""
|
||||
parts = object_name.split(".")
|
||||
i = 0
|
||||
|
||||
# First let's find the module where our object lives.
|
||||
module = parts[i]
|
||||
while i < len(parts) and not os.path.isfile(os.path.join(TRANSFORMERS_PATH, f"{module}.py")):
|
||||
i += 1
|
||||
module = os.path.join(module, parts[i])
|
||||
if i >= len(parts):
|
||||
raise ValueError(
|
||||
f"`object_name` should begin with the name of a module of transformers but got {object_name}."
|
||||
)
|
||||
|
||||
with open(os.path.join(TRANSFORMERS_PATH, f"{module}.py"), "r") as f:
|
||||
lines = f.readlines()
|
||||
|
||||
# Now let's find the class / func in the code!
|
||||
indent = ""
|
||||
line_index = 0
|
||||
for name in parts[i + 1 :]:
|
||||
while line_index < len(lines) and re.search(f"^{indent}(class|def)\s+{name}", lines[line_index]) is None:
|
||||
line_index += 1
|
||||
indent += " "
|
||||
line_index += 1
|
||||
|
||||
if line_index >= len(lines):
|
||||
raise ValueError(f" {object_name} does not match any function or class in {module}.")
|
||||
|
||||
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
|
||||
start_index = line_index
|
||||
while line_index < len(lines) and (lines[line_index].startswith(indent) or len(lines[line_index]) <= 1):
|
||||
line_index += 1
|
||||
# Clean up empty lines at the end (if any).
|
||||
while len(lines[line_index - 1]) <= 1:
|
||||
line_index -= 1
|
||||
|
||||
code_lines = lines[start_index:line_index]
|
||||
return "".join(code_lines)
|
||||
|
||||
|
||||
_re_copy_warning = re.compile(r"^(\s*)#\s*Copied from\s+transformers\.(\S+\.\S+)\s*($|\S.*$)")
|
||||
_re_replace_pattern = re.compile(r"with\s+(\S+)->(\S+)(?:\s|$)")
|
||||
|
||||
|
||||
def blackify(code):
|
||||
"""
|
||||
Applies the black part of our `make style` command to `code`.
|
||||
"""
|
||||
has_indent = code.startswith(" ")
|
||||
if has_indent:
|
||||
code = f"class Bla:\n{code}"
|
||||
with tempfile.TemporaryDirectory() as d:
|
||||
fname = os.path.join(d, "tmp.py")
|
||||
with open(fname, "w") as f:
|
||||
f.write(code)
|
||||
os.system(f"black -q --line-length 119 --target-version py35 {fname}")
|
||||
with open(fname, "r") as f:
|
||||
result = f.read()
|
||||
return result[len("class Bla:\n") :] if has_indent else result
|
||||
|
||||
|
||||
def is_copy_consistent(filename, overwrite=False):
|
||||
"""
|
||||
Check if the code commented as a copy in `filename` matches the original.
|
||||
|
||||
Return the differences or overwrites the content depending on `overwrite`.
|
||||
"""
|
||||
with open(filename) as f:
|
||||
lines = f.readlines()
|
||||
found_diff = False
|
||||
line_index = 0
|
||||
# Not a foor loop cause `lines` is going to change (if `overwrite=True`).
|
||||
while line_index < len(lines):
|
||||
search = _re_copy_warning.search(lines[line_index])
|
||||
if search is None:
|
||||
line_index += 1
|
||||
continue
|
||||
|
||||
# There is some copied code here, let's retrieve the original.
|
||||
indent, object_name, replace_pattern = search.groups()
|
||||
theoretical_code = find_code_in_transformers(object_name)
|
||||
theoretical_indent = re.search(r"^(\s*)\S", theoretical_code).groups()[0]
|
||||
|
||||
start_index = line_index + 1 if indent == theoretical_indent else line_index + 2
|
||||
indent = theoretical_indent
|
||||
line_index = start_index
|
||||
|
||||
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
|
||||
should_continue = True
|
||||
while line_index < len(lines) and should_continue:
|
||||
line_index += 1
|
||||
if line_index >= len(lines):
|
||||
break
|
||||
line = lines[line_index]
|
||||
should_continue = (len(line) <= 1 or line.startswith(indent)) and re.search(
|
||||
f"^{indent}# End copy", line
|
||||
) is None
|
||||
# Clean up empty lines at the end (if any).
|
||||
while len(lines[line_index - 1]) <= 1:
|
||||
line_index -= 1
|
||||
|
||||
observed_code_lines = lines[start_index:line_index]
|
||||
observed_code = "".join(observed_code_lines)
|
||||
|
||||
# Before comparing, use the `replace_pattern` on the original code.
|
||||
if len(replace_pattern) > 0:
|
||||
search_patterns = _re_replace_pattern.search(replace_pattern)
|
||||
if search_patterns is not None:
|
||||
obj1, obj2 = search_patterns.groups()
|
||||
theoretical_code = re.sub(obj1, obj2, theoretical_code)
|
||||
|
||||
# Blackify each version before comparing them.
|
||||
observed_code = blackify(observed_code)
|
||||
theoretical_code = blackify(theoretical_code)
|
||||
|
||||
# Test for a diff and act accordingly.
|
||||
if observed_code != theoretical_code:
|
||||
found_diff = True
|
||||
if overwrite:
|
||||
lines = lines[:start_index] + [theoretical_code] + lines[line_index:]
|
||||
line_index = start_index + 1
|
||||
|
||||
if overwrite and found_diff:
|
||||
# Warn the user a file has been modified.
|
||||
print(f"Detected changes, rewriting {filename}.")
|
||||
with open(filename, "w") as f:
|
||||
f.writelines(lines)
|
||||
return not found_diff
|
||||
|
||||
|
||||
def check_copies(overwrite: bool = False):
|
||||
all_files = glob.glob(os.path.join(TRANSFORMERS_PATH, "**/*.py"), recursive=True)
|
||||
diffs = []
|
||||
for filename in all_files:
|
||||
consistent = is_copy_consistent(filename, overwrite)
|
||||
if not consistent:
|
||||
diffs.append(filename)
|
||||
if not overwrite and len(diffs) > 0:
|
||||
diff = "\n".join(diffs)
|
||||
raise Exception(
|
||||
"Found copy inconsistencies in the following files:\n"
|
||||
+ diff
|
||||
+ "\nRun `make fix-copies` or `python utils/check_copies --fix_and_overwrite` to fix them."
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
|
||||
args = parser.parse_args()
|
||||
|
||||
check_copies(args.fix_and_overwrite)
|
|
@ -1,3 +1,18 @@
|
|||
# coding=utf-8
|
||||
# Copyright 2020 The HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import importlib
|
||||
import inspect
|
||||
import os
|
||||
|
|
Loading…
Reference in New Issue