zR
commited on
Update tokenization_chatglm.py
Browse files- tokenization_chatglm.py +1 -2
tokenization_chatglm.py
CHANGED
|
@@ -268,11 +268,10 @@ class ChatGLMTokenizer(PreTrainedTokenizer):
|
|
| 268 |
self,
|
| 269 |
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
|
| 270 |
max_length: Optional[int] = None,
|
|
|
|
| 271 |
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
| 272 |
pad_to_multiple_of: Optional[int] = None,
|
| 273 |
return_attention_mask: Optional[bool] = None,
|
| 274 |
-
padding_side: Optional[bool] = None,
|
| 275 |
-
**kwargs
|
| 276 |
) -> dict:
|
| 277 |
"""
|
| 278 |
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
|
|
|
|
| 268 |
self,
|
| 269 |
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
|
| 270 |
max_length: Optional[int] = None,
|
| 271 |
+
padding_side: str = "left",
|
| 272 |
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
| 273 |
pad_to_multiple_of: Optional[int] = None,
|
| 274 |
return_attention_mask: Optional[bool] = None,
|
|
|
|
|
|
|
| 275 |
) -> dict:
|
| 276 |
"""
|
| 277 |
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
|