Updates
Browse files- api.py +9 -7
- api_new_autoregressive.py +1 -1
- eval_multiple.py +10 -4
- models/new_autoregressive.py +7 -0
- models/text_voice_clip.py +0 -2
- models/xtransformers.py +6 -2
api.py
CHANGED
|
@@ -133,7 +133,7 @@ class TextToSpeech:
|
|
| 133 |
self.tokenizer = VoiceBpeTokenizer()
|
| 134 |
download_models()
|
| 135 |
|
| 136 |
-
self.autoregressive = UnifiedVoice(max_mel_tokens=
|
| 137 |
model_dim=1024,
|
| 138 |
heads=16, number_text_tokens=256, start_text_token=255, checkpointing=False,
|
| 139 |
train_solo_embeddings=False,
|
|
@@ -151,14 +151,18 @@ class TextToSpeech:
|
|
| 151 |
layer_drop=0, unconditioned_percentage=0).cpu().eval()
|
| 152 |
self.diffusion.load_state_dict(torch.load('.models/diffusion.pth'))
|
| 153 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
self.vocoder = UnivNetGenerator().cpu()
|
| 155 |
self.vocoder.load_state_dict(torch.load('.models/vocoder.pth')['model_g'])
|
| 156 |
self.vocoder.eval(inference=True)
|
| 157 |
|
| 158 |
def tts(self, text, voice_samples, k=1,
|
| 159 |
# autoregressive generation parameters follow
|
| 160 |
-
num_autoregressive_samples=512, temperature=.5, length_penalty=
|
| 161 |
-
typical_sampling=False, typical_mass=.9,
|
| 162 |
# diffusion generation parameters follow
|
| 163 |
diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=.7,):
|
| 164 |
text = torch.IntTensor(self.tokenizer.encode(text)).unsqueeze(0).cuda()
|
|
@@ -185,10 +189,8 @@ class TextToSpeech:
|
|
| 185 |
temperature=temperature,
|
| 186 |
num_return_sequences=self.autoregressive_batch_size,
|
| 187 |
length_penalty=length_penalty,
|
| 188 |
-
repetition_penalty=repetition_penalty
|
| 189 |
-
|
| 190 |
-
typical_mass=typical_mass)
|
| 191 |
-
padding_needed = 250 - codes.shape[1]
|
| 192 |
codes = F.pad(codes, (0, padding_needed), value=stop_mel_token)
|
| 193 |
samples.append(codes)
|
| 194 |
self.autoregressive = self.autoregressive.cpu()
|
|
|
|
| 133 |
self.tokenizer = VoiceBpeTokenizer()
|
| 134 |
download_models()
|
| 135 |
|
| 136 |
+
self.autoregressive = UnifiedVoice(max_mel_tokens=604, max_text_tokens=402, max_conditioning_inputs=2, layers=30,
|
| 137 |
model_dim=1024,
|
| 138 |
heads=16, number_text_tokens=256, start_text_token=255, checkpointing=False,
|
| 139 |
train_solo_embeddings=False,
|
|
|
|
| 151 |
layer_drop=0, unconditioned_percentage=0).cpu().eval()
|
| 152 |
self.diffusion.load_state_dict(torch.load('.models/diffusion.pth'))
|
| 153 |
|
| 154 |
+
self.diffusion_next = DiffusionTts(model_channels=1024, num_layers=10, in_channels=100, out_channels=200,
|
| 155 |
+
in_latent_channels=1024, in_tokens=8193, dropout=0, use_fp16=False, num_heads=16,
|
| 156 |
+
layer_drop=0, unconditioned_percentage=0).cpu().eval()
|
| 157 |
+
self.diffusion_next.load_state_dict(torch.load('.models/diffusion_next.pth'))
|
| 158 |
+
|
| 159 |
self.vocoder = UnivNetGenerator().cpu()
|
| 160 |
self.vocoder.load_state_dict(torch.load('.models/vocoder.pth')['model_g'])
|
| 161 |
self.vocoder.eval(inference=True)
|
| 162 |
|
| 163 |
def tts(self, text, voice_samples, k=1,
|
| 164 |
# autoregressive generation parameters follow
|
| 165 |
+
num_autoregressive_samples=512, temperature=.5, length_penalty=1, repetition_penalty=2.0, top_p=.5,
|
|
|
|
| 166 |
# diffusion generation parameters follow
|
| 167 |
diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=.7,):
|
| 168 |
text = torch.IntTensor(self.tokenizer.encode(text)).unsqueeze(0).cuda()
|
|
|
|
| 189 |
temperature=temperature,
|
| 190 |
num_return_sequences=self.autoregressive_batch_size,
|
| 191 |
length_penalty=length_penalty,
|
| 192 |
+
repetition_penalty=repetition_penalty)
|
| 193 |
+
padding_needed = self.autoregressive.max_mel_tokens - codes.shape[1]
|
|
|
|
|
|
|
| 194 |
codes = F.pad(codes, (0, padding_needed), value=stop_mel_token)
|
| 195 |
samples.append(codes)
|
| 196 |
self.autoregressive = self.autoregressive.cpu()
|
api_new_autoregressive.py
CHANGED
|
@@ -135,7 +135,7 @@ class TextToSpeech:
|
|
| 135 |
download_models()
|
| 136 |
|
| 137 |
self.autoregressive = AutoregressiveCodegen(1024, 16).cpu().eval()
|
| 138 |
-
self.autoregressive.load_state_dict(torch.load('X:\\dlas\\experiments\\train_autoregressive_codegen\\models\\
|
| 139 |
|
| 140 |
self.clip = VoiceCLIP(dim_text=512, dim_speech=512, dim_latent=512, num_text_tokens=256, text_enc_depth=12,
|
| 141 |
text_seq_len=350, text_heads=8,
|
|
|
|
| 135 |
download_models()
|
| 136 |
|
| 137 |
self.autoregressive = AutoregressiveCodegen(1024, 16).cpu().eval()
|
| 138 |
+
self.autoregressive.load_state_dict(torch.load('X:\\dlas\\experiments\\train_autoregressive_codegen\\models\\20750_codegen_ema.pth'))
|
| 139 |
|
| 140 |
self.clip = VoiceCLIP(dim_text=512, dim_speech=512, dim_latent=512, num_text_tokens=256, text_enc_depth=12,
|
| 141 |
text_seq_len=350, text_heads=8,
|
eval_multiple.py
CHANGED
|
@@ -7,7 +7,7 @@ from utils.audio import load_audio
|
|
| 7 |
|
| 8 |
if __name__ == '__main__':
|
| 9 |
fname = 'Y:\\libritts\\test-clean\\transcribed-brief-w2v.tsv'
|
| 10 |
-
outpath = 'D:\\tmp\\tortoise-tts-eval\\
|
| 11 |
outpath_real = 'D:\\tmp\\tortoise-tts-eval\\real'
|
| 12 |
|
| 13 |
os.makedirs(outpath, exist_ok=True)
|
|
@@ -24,12 +24,18 @@ if __name__ == '__main__':
|
|
| 24 |
path = os.path.join(os.path.dirname(fname), line[1])
|
| 25 |
cond_audio = load_audio(path, 22050)
|
| 26 |
torchaudio.save(os.path.join(outpath_real, os.path.basename(line[1])), cond_audio, 22050)
|
| 27 |
-
sample = tts.tts(transcript, [cond_audio, cond_audio], num_autoregressive_samples=512, k=1,
|
| 28 |
repetition_penalty=2.0, length_penalty=2, temperature=.5, top_p=.5,
|
| 29 |
-
diffusion_temperature=.7, cond_free_k=2, diffusion_iterations=
|
|
|
|
| 30 |
down = torchaudio.functional.resample(sample, 24000, 22050)
|
| 31 |
-
fout_path = os.path.join(outpath, os.path.basename(line[1]))
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
torchaudio.save(fout_path, down.squeeze(0), 22050)
|
|
|
|
| 33 |
recorder.write(f'{transcript}\t{fout_path}\n')
|
| 34 |
recorder.flush()
|
| 35 |
recorder.close()
|
|
|
|
| 7 |
|
| 8 |
if __name__ == '__main__':
|
| 9 |
fname = 'Y:\\libritts\\test-clean\\transcribed-brief-w2v.tsv'
|
| 10 |
+
outpath = 'D:\\tmp\\tortoise-tts-eval\\compare_vocoders'
|
| 11 |
outpath_real = 'D:\\tmp\\tortoise-tts-eval\\real'
|
| 12 |
|
| 13 |
os.makedirs(outpath, exist_ok=True)
|
|
|
|
| 24 |
path = os.path.join(os.path.dirname(fname), line[1])
|
| 25 |
cond_audio = load_audio(path, 22050)
|
| 26 |
torchaudio.save(os.path.join(outpath_real, os.path.basename(line[1])), cond_audio, 22050)
|
| 27 |
+
sample, sample2 = tts.tts(transcript, [cond_audio, cond_audio], num_autoregressive_samples=512, k=1,
|
| 28 |
repetition_penalty=2.0, length_penalty=2, temperature=.5, top_p=.5,
|
| 29 |
+
diffusion_temperature=.7, cond_free_k=2, diffusion_iterations=200)
|
| 30 |
+
|
| 31 |
down = torchaudio.functional.resample(sample, 24000, 22050)
|
| 32 |
+
fout_path = os.path.join(outpath, 'old', os.path.basename(line[1]))
|
| 33 |
+
torchaudio.save(fout_path, down.squeeze(0), 22050)
|
| 34 |
+
|
| 35 |
+
down = torchaudio.functional.resample(sample2, 24000, 22050)
|
| 36 |
+
fout_path = os.path.join(outpath, 'new', os.path.basename(line[1]))
|
| 37 |
torchaudio.save(fout_path, down.squeeze(0), 22050)
|
| 38 |
+
|
| 39 |
recorder.write(f'{transcript}\t{fout_path}\n')
|
| 40 |
recorder.flush()
|
| 41 |
recorder.close()
|
models/new_autoregressive.py
CHANGED
|
@@ -168,6 +168,8 @@ class AutoregressiveCodegen(nn.Module):
|
|
| 168 |
|
| 169 |
self.START_TOKEN=8192
|
| 170 |
self.STOP_TOKEN=8193
|
|
|
|
|
|
|
| 171 |
self.max_text_token_id = num_text_tokens
|
| 172 |
self.max_mel_token_id = num_mel_tokens
|
| 173 |
self.mel_embedding = ConditioningEncoder(80, model_dim, do_checkpointing=False)
|
|
@@ -231,6 +233,9 @@ class AutoregressiveCodegen(nn.Module):
|
|
| 231 |
for i in range(conditioning_signal.shape[1]):
|
| 232 |
cond_embs.append(self.mel_embedding(conditioning_signal[:, i]))
|
| 233 |
cond_emb = torch.stack(cond_embs, dim=1).mean(dim=1, keepdim=True)
|
|
|
|
|
|
|
|
|
|
| 234 |
_, enc_text = self.encoder(text_codes, return_hiddens=True)
|
| 235 |
# Interleave cond_emb into the first few contexts.
|
| 236 |
full_context = enc_text
|
|
@@ -255,6 +260,8 @@ class AutoregressiveCodegen(nn.Module):
|
|
| 255 |
for i in range(conditioning_signal.shape[1]):
|
| 256 |
cond_embs.append(self.mel_embedding(conditioning_signal[:, i]))
|
| 257 |
cond_emb = torch.stack(cond_embs, dim=1).mean(dim=1, keepdim=True)
|
|
|
|
|
|
|
| 258 |
_, enc_text = self.encoder(text_codes, return_hiddens=True)
|
| 259 |
# Interleave cond_emb into the first few contexts.
|
| 260 |
full_context = enc_text
|
|
|
|
| 168 |
|
| 169 |
self.START_TOKEN=8192
|
| 170 |
self.STOP_TOKEN=8193
|
| 171 |
+
self.START_TEXT_TOKEN = 255
|
| 172 |
+
self.STOP_TEXT_TOKEN = 0
|
| 173 |
self.max_text_token_id = num_text_tokens
|
| 174 |
self.max_mel_token_id = num_mel_tokens
|
| 175 |
self.mel_embedding = ConditioningEncoder(80, model_dim, do_checkpointing=False)
|
|
|
|
| 233 |
for i in range(conditioning_signal.shape[1]):
|
| 234 |
cond_embs.append(self.mel_embedding(conditioning_signal[:, i]))
|
| 235 |
cond_emb = torch.stack(cond_embs, dim=1).mean(dim=1, keepdim=True)
|
| 236 |
+
# Since all positional embeddings are relative, it is (probably) important to "fix" the text with some permanent embeddings.
|
| 237 |
+
text_codes = F.pad(text_codes, (1,0), value=self.START_TEXT_TOKEN)
|
| 238 |
+
text_codes = F.pad(text_codes, (0,1), value=self.STOP_TEXT_TOKEN)
|
| 239 |
_, enc_text = self.encoder(text_codes, return_hiddens=True)
|
| 240 |
# Interleave cond_emb into the first few contexts.
|
| 241 |
full_context = enc_text
|
|
|
|
| 260 |
for i in range(conditioning_signal.shape[1]):
|
| 261 |
cond_embs.append(self.mel_embedding(conditioning_signal[:, i]))
|
| 262 |
cond_emb = torch.stack(cond_embs, dim=1).mean(dim=1, keepdim=True)
|
| 263 |
+
text_codes = F.pad(text_codes, (1,0), value=self.START_TEXT_TOKEN)
|
| 264 |
+
text_codes = F.pad(text_codes, (0,1), value=self.STOP_TEXT_TOKEN)
|
| 265 |
_, enc_text = self.encoder(text_codes, return_hiddens=True)
|
| 266 |
# Interleave cond_emb into the first few contexts.
|
| 267 |
full_context = enc_text
|
models/text_voice_clip.py
CHANGED
|
@@ -55,7 +55,6 @@ class VoiceCLIP(nn.Module):
|
|
| 55 |
needs_permute=False,
|
| 56 |
exit_permute=False,
|
| 57 |
max_seq_len=-1,
|
| 58 |
-
use_pos_emb=False,
|
| 59 |
attn_layers=Encoder(
|
| 60 |
dim=dim_text,
|
| 61 |
depth=text_enc_depth,
|
|
@@ -71,7 +70,6 @@ class VoiceCLIP(nn.Module):
|
|
| 71 |
needs_permute=False,
|
| 72 |
exit_permute=False,
|
| 73 |
max_seq_len=-1,
|
| 74 |
-
use_pos_emb=False,
|
| 75 |
attn_layers=Encoder(
|
| 76 |
dim=dim_speech,
|
| 77 |
depth=speech_enc_depth,
|
|
|
|
| 55 |
needs_permute=False,
|
| 56 |
exit_permute=False,
|
| 57 |
max_seq_len=-1,
|
|
|
|
| 58 |
attn_layers=Encoder(
|
| 59 |
dim=dim_text,
|
| 60 |
depth=text_enc_depth,
|
|
|
|
| 70 |
needs_permute=False,
|
| 71 |
exit_permute=False,
|
| 72 |
max_seq_len=-1,
|
|
|
|
| 73 |
attn_layers=Encoder(
|
| 74 |
dim=dim_speech,
|
| 75 |
depth=speech_enc_depth,
|
models/xtransformers.py
CHANGED
|
@@ -1186,7 +1186,9 @@ class TransformerWrapper(nn.Module):
|
|
| 1186 |
if use_cache:
|
| 1187 |
res.append(intermediates.past_key_values)
|
| 1188 |
|
| 1189 |
-
|
|
|
|
|
|
|
| 1190 |
|
| 1191 |
|
| 1192 |
class ContinuousTransformerWrapper(nn.Module):
|
|
@@ -1247,7 +1249,9 @@ class ContinuousTransformerWrapper(nn.Module):
|
|
| 1247 |
if use_cache:
|
| 1248 |
res.append(intermediates.past_key_values)
|
| 1249 |
|
| 1250 |
-
|
|
|
|
|
|
|
| 1251 |
|
| 1252 |
|
| 1253 |
class XTransformer(nn.Module):
|
|
|
|
| 1186 |
if use_cache:
|
| 1187 |
res.append(intermediates.past_key_values)
|
| 1188 |
|
| 1189 |
+
if len(res) > 1:
|
| 1190 |
+
return tuple(res)
|
| 1191 |
+
return res[0]
|
| 1192 |
|
| 1193 |
|
| 1194 |
class ContinuousTransformerWrapper(nn.Module):
|
|
|
|
| 1249 |
if use_cache:
|
| 1250 |
res.append(intermediates.past_key_values)
|
| 1251 |
|
| 1252 |
+
if len(res) > 1:
|
| 1253 |
+
return tuple(res)
|
| 1254 |
+
return res[0]
|
| 1255 |
|
| 1256 |
|
| 1257 |
class XTransformer(nn.Module):
|