Ashish-Abraham commited on
Commit
69151ee
·
1 Parent(s): bdafd0f

Upload model

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ language_model/unigrams.txt filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "</s>": 77,
3
+ "<s>": 76
4
+ }
alphabet.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"labels": [" ", "\u0d02", "\u0d03", "\u0d05", "\u0d06", "\u0d07", "\u0d08", "\u0d09", "\u0d0a", "\u0d0b", "\u0d0e", "\u0d0f", "\u0d10", "\u0d12", "\u0d13", "\u0d14", "\u0d15", "\u0d16", "\u0d17", "\u0d18", "\u0d19", "\u0d1a", "\u0d1b", "\u0d1c", "\u0d1d", "\u0d1e", "\u0d1f", "\u0d20", "\u0d21", "\u0d22", "\u0d23", "\u0d24", "\u0d25", "\u0d26", "\u0d27", "\u0d28", "\u0d2a", "\u0d2b", "\u0d2c", "\u0d2d", "\u0d2e", "\u0d2f", "\u0d30", "\u0d31", "\u0d32", "\u0d33", "\u0d34", "\u0d35", "\u0d36", "\u0d37", "\u0d38", "\u0d39", "\u0d3e", "\u0d3f", "\u0d40", "\u0d41", "\u0d42", "\u0d43", "\u0d46", "\u0d47", "\u0d48", "\u0d4a", "\u0d4b", "\u0d4c", "\u0d4d", "\u0d57", "\u0d7a", "\u0d7b", "\u0d7c", "\u0d7d", "\u0d7e", "\u200c", "\u200d", "\u200e", "\u2047", "", "<s>", "</s>"], "is_bpe": false}
language_model/attrs.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"alpha": 0.5, "beta": 1.5, "unk_score_offset": -10.0, "score_boundary": true}
language_model/threegram.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c79f42beda080286dae8543f450b5fa4d1bb5ce816a46028822636c0c40a7099
3
+ size 72095872
language_model/unigrams.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:884ef507a6d4759e613f76134b846ef785976477665711c3d3d969aa937cc720
3
+ size 13487141
preprocessor_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "processor_class": "Wav2Vec2ProcessorWithLM",
8
+ "return_attention_mask": true,
9
+ "sampling_rate": 16000
10
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": true,
19
+ "normalized": false,
20
+ "rstrip": true,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "[UNK]",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": true,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "74": {
4
+ "content": "[UNK]",
5
+ "lstrip": true,
6
+ "normalized": false,
7
+ "rstrip": true,
8
+ "single_word": false,
9
+ "special": false
10
+ },
11
+ "75": {
12
+ "content": "[PAD]",
13
+ "lstrip": true,
14
+ "normalized": false,
15
+ "rstrip": true,
16
+ "single_word": false,
17
+ "special": false
18
+ },
19
+ "76": {
20
+ "content": "<s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "77": {
28
+ "content": "</s>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ }
35
+ },
36
+ "bos_token": "<s>",
37
+ "clean_up_tokenization_spaces": true,
38
+ "do_lower_case": false,
39
+ "eos_token": "</s>",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "[PAD]",
42
+ "processor_class": "Wav2Vec2ProcessorWithLM",
43
+ "replace_word_delimiter_char": " ",
44
+ "target_lang": null,
45
+ "tokenizer_class": "Wav2Vec2CTCTokenizer",
46
+ "unk_token": "[UNK]",
47
+ "word_delimiter_token": "|"
48
+ }
vocab.json ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "[PAD]": 75,
3
+ "[UNK]": 74,
4
+ "|": 0,
5
+ "ം": 1,
6
+ "ഃ": 2,
7
+ "അ": 3,
8
+ "ആ": 4,
9
+ "ഇ": 5,
10
+ "ഈ": 6,
11
+ "ഉ": 7,
12
+ "ഊ": 8,
13
+ "ഋ": 9,
14
+ "എ": 10,
15
+ "ഏ": 11,
16
+ "ഐ": 12,
17
+ "ഒ": 13,
18
+ "ഓ": 14,
19
+ "ഔ": 15,
20
+ "ക": 16,
21
+ "ഖ": 17,
22
+ "ഗ": 18,
23
+ "ഘ": 19,
24
+ "ങ": 20,
25
+ "ച": 21,
26
+ "ഛ": 22,
27
+ "ജ": 23,
28
+ "ഝ": 24,
29
+ "ഞ": 25,
30
+ "ട": 26,
31
+ "ഠ": 27,
32
+ "ഡ": 28,
33
+ "ഢ": 29,
34
+ "ണ": 30,
35
+ "ത": 31,
36
+ "ഥ": 32,
37
+ "ദ": 33,
38
+ "ധ": 34,
39
+ "ന": 35,
40
+ "പ": 36,
41
+ "ഫ": 37,
42
+ "ബ": 38,
43
+ "ഭ": 39,
44
+ "മ": 40,
45
+ "യ": 41,
46
+ "ര": 42,
47
+ "റ": 43,
48
+ "ല": 44,
49
+ "ള": 45,
50
+ "ഴ": 46,
51
+ "വ": 47,
52
+ "ശ": 48,
53
+ "ഷ": 49,
54
+ "സ": 50,
55
+ "ഹ": 51,
56
+ "ാ": 52,
57
+ "ി": 53,
58
+ "ീ": 54,
59
+ "ു": 55,
60
+ "ൂ": 56,
61
+ "ൃ": 57,
62
+ "െ": 58,
63
+ "േ": 59,
64
+ "ൈ": 60,
65
+ "ൊ": 61,
66
+ "ോ": 62,
67
+ "ൌ": 63,
68
+ "്": 64,
69
+ "ൗ": 65,
70
+ "ൺ": 66,
71
+ "ൻ": 67,
72
+ "ർ": 68,
73
+ "ൽ": 69,
74
+ "ൾ": 70,
75
+ "‌": 71,
76
+ "‍": 72,
77
+ "‎": 73
78
+ }