From 5a8c6e771a2f086a06697900d7ba6249c3833556 Mon Sep 17 00:00:00 2001 From: Emrah Budur Date: Sat, 12 Oct 2019 14:17:17 +0300 Subject: [PATCH] Fixed the sample code in the title 'Quick tour'. --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 0cc23c8389..e44ff52099 100644 --- a/README.md +++ b/README.md @@ -176,10 +176,11 @@ BERT_MODEL_CLASSES = [BertModel, BertForPreTraining, BertForMaskedLM, BertForNex # All the classes for an architecture can be initiated from pretrained weights for this architecture # Note that additional weights added for fine-tuning are only initialized # and need to be trained on the down-stream task -tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') +pretrained_weights = 'bert-base-uncased' +tokenizer = BertTokenizer.from_pretrained(pretrained_weights) for model_class in BERT_MODEL_CLASSES: # Load pretrained model/tokenizer - model = model_class.from_pretrained('bert-base-uncased') + model = model_class.from_pretrained(pretrained_weights) # Models can return full list of hidden-states & attentions weights at each layer model = model_class.from_pretrained(pretrained_weights,