Upload make_zh.py
Browse files- samples/make_zh.py +3 -3
    	
        samples/make_zh.py
    CHANGED
    
    | @@ -2,7 +2,7 @@ | |
| 2 | 
             
            # Therefore it may NOT generalize gracefully to other texts
         | 
| 3 | 
             
            # Refer to Usage in README.md for more general usage patterns
         | 
| 4 |  | 
| 5 | 
            -
            # pip install kokoro>=0.8.1 "misaki[zh]>=0.8. | 
| 6 | 
             
            from kokoro import KModel, KPipeline
         | 
| 7 | 
             
            from pathlib import Path
         | 
| 8 | 
             
            import numpy as np
         | 
| @@ -74,7 +74,7 @@ path = Path(__file__).parent | |
| 74 | 
             
            wavs = []
         | 
| 75 | 
             
            for paragraph in tqdm.tqdm(texts):
         | 
| 76 | 
             
                for i, sentence in enumerate(paragraph):
         | 
| 77 | 
            -
                    generator = zh_pipeline(sentence, voice= | 
| 78 | 
             
                    f = path / f'zh{len(wavs):02}.wav'
         | 
| 79 | 
             
                    result = next(generator)
         | 
| 80 | 
             
                    wav = result.audio
         | 
| @@ -83,4 +83,4 @@ for paragraph in tqdm.tqdm(texts): | |
| 83 | 
             
                        wav = np.concatenate([np.zeros(N_ZEROS), wav])
         | 
| 84 | 
             
                    wavs.append(wav)
         | 
| 85 |  | 
| 86 | 
            -
            sf.write(path / f'HEARME_{ | 
|  | |
| 2 | 
             
            # Therefore it may NOT generalize gracefully to other texts
         | 
| 3 | 
             
            # Refer to Usage in README.md for more general usage patterns
         | 
| 4 |  | 
| 5 | 
            +
            # pip install kokoro>=0.8.1 "misaki[zh]>=0.8.1"
         | 
| 6 | 
             
            from kokoro import KModel, KPipeline
         | 
| 7 | 
             
            from pathlib import Path
         | 
| 8 | 
             
            import numpy as np
         | 
|  | |
| 74 | 
             
            wavs = []
         | 
| 75 | 
             
            for paragraph in tqdm.tqdm(texts):
         | 
| 76 | 
             
                for i, sentence in enumerate(paragraph):
         | 
| 77 | 
            +
                    generator = zh_pipeline(sentence, voice=VOICE, speed=speed_callable)
         | 
| 78 | 
             
                    f = path / f'zh{len(wavs):02}.wav'
         | 
| 79 | 
             
                    result = next(generator)
         | 
| 80 | 
             
                    wav = result.audio
         | 
|  | |
| 83 | 
             
                        wav = np.concatenate([np.zeros(N_ZEROS), wav])
         | 
| 84 | 
             
                    wavs.append(wav)
         | 
| 85 |  | 
| 86 | 
            +
            sf.write(path / f'HEARME_{VOICE}.wav', np.concatenate(wavs), SAMPLE_RATE)
         | 
