transformers.js/package.json

86 lines
2.5 KiB
JSON
Raw Permalink Normal View History

2023-03-03 09:14:54 +08:00
{
"name": "@xenova/transformers",
2024-01-11 00:30:36 +08:00
"version": "2.14.0",
2023-05-15 19:32:55 +08:00
"description": "State-of-the-art Machine Learning for the web. Run 🤗 Transformers directly in your browser, with no need for a server!",
2023-03-04 05:53:10 +08:00
"main": "./src/transformers.js",
2023-04-23 04:03:31 +08:00
"types": "./types/transformers.d.ts",
"type": "module",
2023-03-03 09:14:54 +08:00
"scripts": {
2023-05-08 11:44:25 +08:00
"typegen": "tsc ./src/transformers.js --allowJs --declaration --emitDeclarationOnly --declarationMap --outDir types",
2023-03-04 05:53:10 +08:00
"dev": "webpack serve --no-client-overlay",
"build": "webpack && npm run typegen",
2023-05-31 08:21:15 +08:00
"generate-tests": "python -m tests.generate_tests",
"test": "node --experimental-vm-modules node_modules/jest/bin/jest.js --verbose --maxConcurrency 1",
"readme": "python ./docs/scripts/build_readme.py",
"docs-api": "node ./docs/scripts/generate.js",
"docs-preview": "doc-builder preview transformers.js ./docs/source/ --not_python_module",
"docs-build": "doc-builder build transformers.js ./docs/source/ --not_python_module --build_dir ./docs/build/ --repo_owner xenova"
2023-03-03 09:14:54 +08:00
},
"repository": {
"type": "git",
"url": "git+https://github.com/xenova/transformers.js.git"
},
"keywords": [
"transformers",
2023-05-20 21:46:44 +08:00
"transformers.js",
"huggingface",
"hugging face",
"machine learning",
"deep learning",
"artificial intelligence",
"AI",
"ML"
2023-03-03 09:14:54 +08:00
],
"author": "Xenova",
2023-05-16 14:36:48 +08:00
"license": "Apache-2.0",
2023-03-03 09:14:54 +08:00
"bugs": {
"url": "https://github.com/xenova/transformers.js/issues"
},
"homepage": "https://github.com/xenova/transformers.js#readme",
"dependencies": {
"onnxruntime-web": "1.14.0",
"sharp": "^0.32.0",
"@huggingface/jinja": "^0.1.0"
2023-03-03 09:14:54 +08:00
},
"optionalDependencies": {
"onnxruntime-node": "1.14.0"
},
2023-03-03 09:14:54 +08:00
"devDependencies": {
2023-05-02 11:12:21 +08:00
"@types/jest": "^29.5.1",
"catharsis": "github:xenova/catharsis",
2023-03-03 09:14:54 +08:00
"copy-webpack-plugin": "^11.0.0",
2023-05-02 11:12:21 +08:00
"jest": "^29.5.0",
"jest-environment-node": "^29.5.0",
"jsdoc-to-markdown": "^8.0.0",
"typescript": "^5.2.2",
"wavefile": "^11.0.0",
2023-04-25 07:39:28 +08:00
"webpack": "^5.80.0",
"webpack-cli": "^5.0.2",
"webpack-dev-server": "^4.13.3"
2023-03-04 06:12:03 +08:00
},
Whisper word-level timestamps (#184) * Support outputting attentions in generate function * Add unit tests for concatenating tensors * Implement `cat` for `dim>0` * Add `cat` unit tests for > 2 tensors * Allow for negative indexing + bounds checking * Add test case for `cat` with negative indexing * Clean up `safeIndex` helper function * Allow indexing error message to include dimension * Reuse `safeIndex` helper function for `normalize_` * Optimize `cat` indexing * Implement `stack` tensor operation + add unit tests * Add TODOs * Implement `mean` tensor operation * Implement `std_mean` tensor ops * Fix order of `std_mean` returns * Implement median filter * Implement dynamic time warping * Implement `neg` tensor op * Throw error if audio sent to processor is not a `Float32Array` * Add `round` helper function * [WIP] Implement basic version of word-level-timestamps Known issues: - timestamps not correct for index > 0 - punctuation not same as python version * Fix typo * Fix timestamps * Round to 2 decimals * Fix punctuation * Fix typing * Remove debug statements * Cleanup code * Cleanup * Remove debug statements * Update JSDoc for extract token timestamps function * Add return type for `std_mean` tensor function * Improve typing of private whisper tokenizer functions * Indicate method is private * Allow whisper feature extractor to be called with Float64Array input * Fix typo * Throw error if `cross_attentions` are not present in model output when extracting token timestamps * Throw error during generate function * Allow whisper models to be exported with `output_attentions=True` * Add alignment heads to generation config * Remove print statement * Update versions * Override protobufjs version * Update package-lock.json * Require onnx==1.13.1 for conversion Will update once onnxruntime-web supports onnx IR version 9 * Add unit test for word-level timestamps * Extract add attentions function out of `generate` * Fix `findLongestCommonSequence` return types * Downgrade back to onnxruntime 1.14.0 1.15.1 is a little to unstable right now. * Cleanup - use `.map` - rename variables * Update comments * Add examples for how to transcribe w/ word-level timestamps * Add example for transcribing/translating audio longer than 30 seconds * Make example more compact
2023-07-10 05:21:43 +08:00
"overrides": {
"semver": "^7.5.4",
"protobufjs": "^7.2.4"
},
2023-03-04 06:12:03 +08:00
"files": [
"src",
"dist",
2023-04-23 04:03:31 +08:00
"types",
2023-03-04 06:12:03 +08:00
"README.md",
"LICENSE"
2023-03-04 08:54:57 +08:00
],
"browser": {
"fs": false,
"path": false,
2023-04-24 12:06:42 +08:00
"url": false,
"sharp": false,
"onnxruntime-node": false,
"stream/web": false
2023-05-15 19:37:37 +08:00
},
"publishConfig": {
"access": "public"
2023-05-16 00:44:08 +08:00
},
"jsdelivr": "./dist/transformers.min.js",
2023-05-16 00:44:08 +08:00
"unpkg": "./dist/transformers.min.js"
}