sigureling commited on
Commit
fb95e66
·
verified ·
1 Parent(s): d99a94b

Release BrainOmni Checkpoint

Browse files
base/BrainOmni.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:435db24e57a55df05aa7e16355def7b7ecbedb22aa1ec16063e7d14efd2386d0
3
+ size 146687894
base/model_cfg.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "window_length": 512,
3
+ "n_filters": 32,
4
+ "ratios": [
5
+ 8,
6
+ 4,
7
+ 2
8
+ ],
9
+ "kernel_size": 5,
10
+ "last_kernel_size": 5,
11
+ "n_dim": 256,
12
+ "n_head": 4,
13
+ "n_neuro": 16,
14
+ "dropout": 0.0,
15
+ "codebook_dim": 256,
16
+ "codebook_size": 512,
17
+ "num_quantizers": 4,
18
+ "rotation_trick": true,
19
+ "quantize_optimize_method": "ema",
20
+ "overlap_ratio": 0.25,
21
+ "lm_dim": 512,
22
+ "lm_head": 16,
23
+ "lm_depth": 12,
24
+ "lm_dropout": 0.1,
25
+ "mask_ratio": 0.5,
26
+ "num_quantizers_used": 4
27
+ }
braintokenizer/BrainTokenizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d41c44c14c3f3b11fd0fb660752e356dff4cb4bc5f32a05f470f503ffddc7b1a
3
+ size 24425508
braintokenizer/model_cfg.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "window_length": 512,
3
+ "n_filters": 32,
4
+ "ratios": [
5
+ 8,
6
+ 4,
7
+ 2
8
+ ],
9
+ "kernel_size": 5,
10
+ "last_kernel_size": 5,
11
+ "n_dim": 256,
12
+ "n_neuro": 16,
13
+ "n_head": 4,
14
+ "dropout": 0.0,
15
+ "codebook_dim": 256,
16
+ "codebook_size": 512,
17
+ "num_quantizers": 4,
18
+ "rotation_trick": true,
19
+ "quantize_optimize_method": "ema"
20
+ }
tiny/BrainOmni.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62c67ba6a84ea0625e67a3b5e7463fe3930bfee88a612a225e9062a052542ffc
3
+ size 48816662
tiny/model_cfg.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "window_length": 512,
3
+ "n_filters": 32,
4
+ "ratios": [
5
+ 8,
6
+ 4,
7
+ 2
8
+ ],
9
+ "kernel_size": 5,
10
+ "last_kernel_size": 5,
11
+ "n_dim": 256,
12
+ "n_head": 4,
13
+ "n_neuro": 16,
14
+ "dropout": 0.0,
15
+ "codebook_dim": 256,
16
+ "codebook_size": 512,
17
+ "num_quantizers": 4,
18
+ "rotation_trick": true,
19
+ "quantize_optimize_method": "ema",
20
+ "overlap_ratio": 0.25,
21
+ "lm_dim": 256,
22
+ "lm_head": 8,
23
+ "lm_depth": 12,
24
+ "lm_dropout": 0.1,
25
+ "mask_ratio": 0.5,
26
+ "num_quantizers_used": 4
27
+ }