Xenova HF Staff whitphx HF Staff commited on
Commit
c9737af
·
verified ·
1 Parent(s): bf2e7cc

Add/update the quantized ONNX model files and README.md for Transformers.js v3 (#1)

Browse files

- Add/update the quantized ONNX model files and README.md for Transformers.js v3 (295a09e344dda0d2746dd55bbae1e501124ef99c)


Co-authored-by: Yuichiro Tachibana <[email protected]>

README.md CHANGED
@@ -6,14 +6,14 @@ library_name: transformers.js
6
  https://huggingface.co/google/siglip-base-patch16-256 with ONNX weights to be compatible with Transformers.js.
7
  ## Usage (Transformers.js)
8
 
9
- If you haven't already, you can install the [Transformers.js](https://huggingface.co/docs/transformers.js) JavaScript library from [NPM](https://www.npmjs.com/package/@xenova/transformers) using:
10
  ```bash
11
- npm i @xenova/transformers
12
  ```
13
 
14
  **Example:** Zero-shot image classification w/ `Xenova/siglip-base-patch16-256`:
15
  ```js
16
- import { pipeline } from '@xenova/transformers';
17
 
18
  const classifier = await pipeline('zero-shot-image-classification', 'Xenova/siglip-base-patch16-256');
19
  const url = 'http://images.cocodataset.org/val2017/000000039769.jpg';
@@ -30,7 +30,7 @@ console.log(output);
30
  **Example:** Compute text embeddings with `SiglipTextModel`.
31
 
32
  ```javascript
33
- import { AutoTokenizer, SiglipTextModel } from '@xenova/transformers';
34
 
35
  // Load tokenizer and text model
36
  const tokenizer = await AutoTokenizer.from_pretrained('Xenova/siglip-base-patch16-256');
@@ -53,7 +53,7 @@ const { pooler_output } = await text_model(text_inputs);
53
  **Example:** Compute vision embeddings with `SiglipVisionModel`.
54
 
55
  ```javascript
56
- import { AutoProcessor, SiglipVisionModel, RawImage} from '@xenova/transformers';
57
 
58
  // Load processor and vision model
59
  const processor = await AutoProcessor.from_pretrained('Xenova/siglip-base-patch16-256');
 
6
  https://huggingface.co/google/siglip-base-patch16-256 with ONNX weights to be compatible with Transformers.js.
7
  ## Usage (Transformers.js)
8
 
9
+ If you haven't already, you can install the [Transformers.js](https://huggingface.co/docs/transformers.js) JavaScript library from [NPM](https://www.npmjs.com/package/@huggingface/transformers) using:
10
  ```bash
11
+ npm i @huggingface/transformers
12
  ```
13
 
14
  **Example:** Zero-shot image classification w/ `Xenova/siglip-base-patch16-256`:
15
  ```js
16
+ import { pipeline } from '@huggingface/transformers';
17
 
18
  const classifier = await pipeline('zero-shot-image-classification', 'Xenova/siglip-base-patch16-256');
19
  const url = 'http://images.cocodataset.org/val2017/000000039769.jpg';
 
30
  **Example:** Compute text embeddings with `SiglipTextModel`.
31
 
32
  ```javascript
33
+ import { AutoTokenizer, SiglipTextModel } from '@huggingface/transformers';
34
 
35
  // Load tokenizer and text model
36
  const tokenizer = await AutoTokenizer.from_pretrained('Xenova/siglip-base-patch16-256');
 
53
  **Example:** Compute vision embeddings with `SiglipVisionModel`.
54
 
55
  ```javascript
56
+ import { AutoProcessor, SiglipVisionModel, RawImage} from '@huggingface/transformers';
57
 
58
  // Load processor and vision model
59
  const processor = await AutoProcessor.from_pretrained('Xenova/siglip-base-patch16-256');
onnx/model_bnb4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d370b611178b78340a5689ef835221293dc14b2cbafe82c04e911f27af71544
3
+ size 207128474
onnx/model_q4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c201e70f10debcb4399ca948b0042e409130e21d370239edec9a0d03bea7e03d
3
+ size 218149723
onnx/model_q4f16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce77a07875701f4f5a109b8afd495b0a7bd23355116296bbdc2a93dd7d42208c
3
+ size 153438199
onnx/model_uint8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a79498076503d15a264519c8a2e050cb957390b882d2eda4f1f4f8332bc5528d
3
+ size 205275628
onnx/text_model_bnb4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da73ccbb5494d96084550ff614975bf07e393ae5519195dfae4d75d3f43343da
3
+ size 149385374
onnx/text_model_int8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2efff42671c6f30ed8dfb1f8ef65786528ead706120788166f8d33ce7a41495
3
+ size 110982746
onnx/text_model_q4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8316529a8381f88fc3d62543ba44464d1e8a957c381887082c2a977d7340c2c
3
+ size 154693262
onnx/text_model_q4f16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:545f80604abc9d2ed18c27822f2e4f4de38b5de7fc6ace133c8110b1920bdb0a
3
+ size 98710743
onnx/text_model_uint8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e5afb1dc32550affbb7f9f80ff027f1b62da348c7fd0df867f5af0a75d53584
3
+ size 110982790
onnx/vision_model_bnb4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69a16639256ab63e57febdb5175d1fc4298a2423192d612c18a8fa793b8e31e9
3
+ size 57733073
onnx/vision_model_int8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d144a7b5c89105a92358b91c7c5f67dd2934705f1514b1a19110b4b4e10e4397
3
+ size 94282636
onnx/vision_model_q4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:658904a4cb0502a3a9aadaf7d8634b5a10586d1d784331d6cb65fc3c189806b0
3
+ size 63446434
onnx/vision_model_q4f16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98e303ea6b140da171a5f33ab458ccc7e3823e8a706a2c73900e07e49d755db0
3
+ size 54717499
onnx/vision_model_uint8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb7a618b9223641d123d53e0e730ba6839b02917a963684f88404c1def57fce4
3
+ size 94282683