ArthurZ HF Staff commited on
Commit
ac1a159
·
1 Parent(s): 41a5f5e

Update checkpoint for transformers>=4.29

Browse files

Following the merge of [a PR](https://github.com/huggingface/transformers/pull/24310) in `transformers` it appeared that this model was not properly converted. This PR will fix the inference and was tested using the following script:
```python
>>> from transformers import MarianModel, MarianMTModel
>>> tokenizer = AutoTokenizer.from_pretrained('Helsinki-NLP/opus-mt-tc-big-tr-en')
>>> inputs = tokenizer("Hey!Birlikte öğrenelim", return_tensors="pt", padding=True)
>>> model = MarianMTModel.from_pretrained('Helsinki-NLP/opus-mt-tc-big-tr-en')
>>> print(tokenizer.batch_decode(model.generate(**inputs)))
["<pad> Hey! Let's learn together</s>"]
```

config.json CHANGED
@@ -7,7 +7,7 @@
7
  "attention_dropout": 0.0,
8
  "bad_words_ids": [
9
  [
10
- 35697
11
  ]
12
  ],
13
  "bos_token_id": 0,
@@ -17,15 +17,15 @@
17
  "decoder_ffn_dim": 4096,
18
  "decoder_layerdrop": 0.0,
19
  "decoder_layers": 6,
20
- "decoder_start_token_id": 35697,
21
- "decoder_vocab_size": 35698,
22
  "dropout": 0.1,
23
  "encoder_attention_heads": 16,
24
  "encoder_ffn_dim": 4096,
25
  "encoder_layerdrop": 0.0,
26
  "encoder_layers": 6,
27
- "eos_token_id": 26162,
28
- "forced_eos_token_id": 26162,
29
  "init_std": 0.02,
30
  "is_encoder_decoder": true,
31
  "max_length": 512,
@@ -34,12 +34,12 @@
34
  "normalize_embedding": false,
35
  "num_beams": 4,
36
  "num_hidden_layers": 6,
37
- "pad_token_id": 35697,
38
  "scale_embedding": true,
39
  "share_encoder_decoder_embeddings": true,
40
  "static_position_embeddings": true,
41
  "torch_dtype": "float16",
42
  "transformers_version": "4.34.0.dev0",
43
  "use_cache": true,
44
- "vocab_size": 35698
45
  }
 
7
  "attention_dropout": 0.0,
8
  "bad_words_ids": [
9
  [
10
+ 57059
11
  ]
12
  ],
13
  "bos_token_id": 0,
 
17
  "decoder_ffn_dim": 4096,
18
  "decoder_layerdrop": 0.0,
19
  "decoder_layers": 6,
20
+ "decoder_start_token_id": 57059,
21
+ "decoder_vocab_size": 57060,
22
  "dropout": 0.1,
23
  "encoder_attention_heads": 16,
24
  "encoder_ffn_dim": 4096,
25
  "encoder_layerdrop": 0.0,
26
  "encoder_layers": 6,
27
+ "eos_token_id": 43741,
28
+ "forced_eos_token_id": 43741,
29
  "init_std": 0.02,
30
  "is_encoder_decoder": true,
31
  "max_length": 512,
 
34
  "normalize_embedding": false,
35
  "num_beams": 4,
36
  "num_hidden_layers": 6,
37
+ "pad_token_id": 57059,
38
  "scale_embedding": true,
39
  "share_encoder_decoder_embeddings": true,
40
  "static_position_embeddings": true,
41
  "torch_dtype": "float16",
42
  "transformers_version": "4.34.0.dev0",
43
  "use_cache": true,
44
+ "vocab_size": 57060
45
  }
generation_config.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
  "bad_words_ids": [
3
  [
4
- 35697
5
  ]
6
  ],
7
  "bos_token_id": 0,
8
- "decoder_start_token_id": 35697,
9
- "eos_token_id": 26162,
10
- "forced_eos_token_id": 26162,
11
  "max_length": 512,
12
  "num_beams": 4,
13
- "pad_token_id": 35697,
14
  "renormalize_logits": true,
15
  "transformers_version": "4.34.0.dev0"
16
  }
 
1
  {
2
  "bad_words_ids": [
3
  [
4
+ 57059
5
  ]
6
  ],
7
  "bos_token_id": 0,
8
+ "decoder_start_token_id": 57059,
9
+ "eos_token_id": 43741,
10
+ "forced_eos_token_id": 43741,
11
  "max_length": 512,
12
  "num_beams": 4,
13
+ "pad_token_id": 57059,
14
  "renormalize_logits": true,
15
  "transformers_version": "4.34.0.dev0"
16
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:69ea2cf1906dd5768225f7ef1a778f8eaa87dbc8e2c1e4190ec2ca774975dd69
3
- size 425925172
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbb0eb4c56cf9a6248adbda79c2bbce72cd8969b4b794fbbc434d4b7d995549d
3
+ size 469717280
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ebbeee990e971cb41fd5262b168a044724e2c47dde06de7d10deb49287ad4cfc
3
- size 425982149
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7b144ac803253b7ea70e00b673f3d45c43587cb528225cff514e540e126e630
3
+ size 469774277