Reorder supported models in README

This commit is contained in:
Joshua Lochner 2023-04-04 22:51:36 +02:00
parent 0e4079dfd2
commit 571f077e28
3 changed files with 11 additions and 11 deletions

View File

@ -6,7 +6,7 @@
[![license](https://img.shields.io/github/license/xenova/transformers.js)](https://github.com/xenova/transformers.js/blob/main/LICENSE) [![license](https://img.shields.io/github/license/xenova/transformers.js)](https://github.com/xenova/transformers.js/blob/main/LICENSE)
Run 🤗 Transformers in your browser! We currently support [BERT](https://huggingface.co/docs/transformers/model_doc/bert), [ALBERT](https://huggingface.co/docs/transformers/model_doc/albert), [DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert), [MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert), [SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert), [T5](https://huggingface.co/docs/transformers/model_doc/t5), [T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1), [FLAN-T5](https://huggingface.co/docs/transformers/model_doc/flan-t5), [mT5](https://huggingface.co/docs/transformers/model_doc/mt5), [MarianMT](https://huggingface.co/docs/transformers/model_doc/marian), [GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2), [GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo), [BART](https://huggingface.co/docs/transformers/model_doc/bart), [CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen), [Whisper](https://huggingface.co/docs/transformers/model_doc/whisper), [CLIP](https://huggingface.co/docs/transformers/model_doc/clip), [Vision Transformer](https://huggingface.co/docs/transformers/model_doc/vit), [VisionEncoderDecoder](https://huggingface.co/docs/transformers/model_doc/vision-encoder-decoder), and [DETR](https://huggingface.co/docs/transformers/model_doc/detr) models, for a variety of tasks including: masked language modelling, text classification, zero-shot classification, text-to-text generation, translation, summarization, question answering, text generation, automatic speech recognition, image classification, zero-shot image classification, image-to-text, and object detection. Run 🤗 Transformers in your browser! We currently support [BERT](https://huggingface.co/docs/transformers/model_doc/bert), [ALBERT](https://huggingface.co/docs/transformers/model_doc/albert), [DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert), [MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert), [SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert), [T5](https://huggingface.co/docs/transformers/model_doc/t5), [T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1), [FLAN-T5](https://huggingface.co/docs/transformers/model_doc/flan-t5), [mT5](https://huggingface.co/docs/transformers/model_doc/mt5), [BART](https://huggingface.co/docs/transformers/model_doc/bart), [MarianMT](https://huggingface.co/docs/transformers/model_doc/marian), [GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2), [GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo), [CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen), [Whisper](https://huggingface.co/docs/transformers/model_doc/whisper), [CLIP](https://huggingface.co/docs/transformers/model_doc/clip), [Vision Transformer](https://huggingface.co/docs/transformers/model_doc/vit), [VisionEncoderDecoder](https://huggingface.co/docs/transformers/model_doc/vision-encoder-decoder), and [DETR](https://huggingface.co/docs/transformers/model_doc/detr) models, for a variety of tasks including: masked language modelling, text classification, zero-shot classification, text-to-text generation, translation, summarization, question answering, text generation, automatic speech recognition, image classification, zero-shot image classification, image-to-text, and object detection.
![teaser](https://user-images.githubusercontent.com/26504141/221056008-e906614e-e6f0-4e10-b0a8-7d5c99e955b4.gif) ![teaser](https://user-images.githubusercontent.com/26504141/221056008-e906614e-e6f0-4e10-b0a8-7d5c99e955b4.gif)

View File

@ -969,6 +969,15 @@ env.onnx.wasm.wasmPaths = '/path/to/files/';</code></pre>
href="https://huggingface.co/docs/transformers/model_doc/mt5">mT5 docs</a>. href="https://huggingface.co/docs/transformers/model_doc/mt5">mT5 docs</a>.
</div> </div>
</li> </li>
<li class="list-group-item d-flex justify-content-between align-items-start">
<div class="ms-2 me-auto">
<div class="fw-bold">BART</div>
Tasks: Sequence-to-sequence for summarization
<code>(AutoModelForSeq2SeqLM)</code>.
For more information, check out the <a
href="https://huggingface.co/docs/transformers/model_doc/bart">BART docs</a>.
</div>
</li>
<li class="list-group-item d-flex justify-content-between align-items-start"> <li class="list-group-item d-flex justify-content-between align-items-start">
<div class="ms-2 me-auto"> <div class="ms-2 me-auto">
<div class="fw-bold">MarianMT</div> <div class="fw-bold">MarianMT</div>
@ -997,15 +1006,6 @@ env.onnx.wasm.wasmPaths = '/path/to/files/';</code></pre>
<a href="https://huggingface.co/docs/transformers/model_doc/gpt_neo">GPT Neo docs</a>. <a href="https://huggingface.co/docs/transformers/model_doc/gpt_neo">GPT Neo docs</a>.
</div> </div>
</li> </li>
<li class="list-group-item d-flex justify-content-between align-items-start">
<div class="ms-2 me-auto">
<div class="fw-bold">BART</div>
Tasks: Sequence-to-sequence for summarization
<code>(AutoModelForSeq2SeqLM)</code>.
For more information, check out the <a
href="https://huggingface.co/docs/transformers/model_doc/bart">BART docs</a>.
</div>
</li>
<li class="list-group-item d-flex justify-content-between align-items-start"> <li class="list-group-item d-flex justify-content-between align-items-start">
<div class="ms-2 me-auto"> <div class="ms-2 me-auto">
<div class="fw-bold">CodeGen</div> <div class="fw-bold">CodeGen</div>

View File

@ -1,7 +1,7 @@
{ {
"name": "@xenova/transformers", "name": "@xenova/transformers",
"version": "1.3.5", "version": "1.3.5",
"description": "Run 🤗 Transformers in your browser! We currently support BERT, ALBERT, DistilBERT, MobileBERT, SqueezeBERT, T5, T5v1.1, FLAN-T5, mT5, MarianMT, GPT2, GPT Neo, BART, CodeGen, Whisper, CLIP, Vision Transformer, VisionEncoderDecoder, and DETR models, for a variety of tasks including: masked language modelling, text classification, zero-shot classification, text-to-text generation, translation, summarization, question answering, text generation, automatic speech recognition, image classification, zero-shot image classification, image-to-text, and object detection.", "description": "Run 🤗 Transformers in your browser! We currently support BERT, ALBERT, DistilBERT, MobileBERT, SqueezeBERT, T5, T5v1.1, FLAN-T5, mT5, BART, MarianMT, GPT2, GPT Neo, CodeGen, Whisper, CLIP, Vision Transformer, VisionEncoderDecoder, and DETR models, for a variety of tasks including: masked language modelling, text classification, zero-shot classification, text-to-text generation, translation, summarization, question answering, text generation, automatic speech recognition, image classification, zero-shot image classification, image-to-text, and object detection.",
"main": "./src/transformers.js", "main": "./src/transformers.js",
"directories": { "directories": {
"test": "tests" "test": "tests"