config=torch.hub.load('huggingface/transformers','config','./test/bert_saved_model/')# E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
tokenizer=torch.hub.load('huggingface/transformers','tokenizer','./test/bert_saved_model/')# E.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`
model=torch.hub.load('huggingface/transformers','model','google-bert/bert-base-uncased')# Download model and configuration from huggingface.co and cache.
model=torch.hub.load('huggingface/transformers','modelForCausalLM','openai-community/gpt2')# Download model and configuration from huggingface.co and cache.
model=torch.hub.load('huggingface/transformers','modelForCausalLM','./test/saved_model/')# E.g. model was saved using `save_pretrained('./test/saved_model/')`
model=torch.hub.load('huggingface/transformers','modelForCausalLM','openai-community/gpt2',output_attentions=True)# Update configuration during loading
model=torch.hub.load('huggingface/transformers','modelForMaskedLM','google-bert/bert-base-uncased')# Download model and configuration from huggingface.co and cache.
model=torch.hub.load('huggingface/transformers','modelForMaskedLM','./test/bert_model/')# E.g. model was saved using `save_pretrained('./test/saved_model/')`
model=torch.hub.load('huggingface/transformers','modelForMaskedLM','google-bert/bert-base-uncased',output_attentions=True)# Update configuration during loading
model=torch.hub.load('huggingface/transformers','modelForSequenceClassification','google-bert/bert-base-uncased')# Download model and configuration from huggingface.co and cache.
model=torch.hub.load('huggingface/transformers','modelForSequenceClassification','./test/bert_model/')# E.g. model was saved using `save_pretrained('./test/saved_model/')`
model=torch.hub.load('huggingface/transformers','modelForSequenceClassification','google-bert/bert-base-uncased',output_attentions=True)# Update configuration during loading
model=torch.hub.load('huggingface/transformers','modelForQuestionAnswering','google-bert/bert-base-uncased')# Download model and configuration from huggingface.co and cache.
model=torch.hub.load('huggingface/transformers','modelForQuestionAnswering','./test/bert_model/')# E.g. model was saved using `save_pretrained('./test/saved_model/')`
model=torch.hub.load('huggingface/transformers','modelForQuestionAnswering','google-bert/bert-base-uncased',output_attentions=True)# Update configuration during loading