2022-04-11 01:29:42 +00:00
import argparse
import os
2022-05-17 18:11:18 +00:00
from time import time
2022-04-11 01:29:42 +00:00
import torch
import torchaudio
2022-05-19 11:31:02 +00:00
from api import TextToSpeech , MODELS_DIR
from utils . audio import load_audio , load_voices
2022-05-13 11:02:17 +00:00
from utils . text import split_and_recombine_text
2022-04-11 01:29:42 +00:00
2022-04-21 22:06:43 +00:00
2022-04-11 01:29:42 +00:00
if __name__ == ' __main__ ' :
parser = argparse . ArgumentParser ( )
2022-05-03 02:56:28 +00:00
parser . add_argument ( ' --textfile ' , type = str , help = ' A file containing the text to read. ' , default = " tortoise/data/riding_hood.txt " )
2022-04-15 14:26:11 +00:00
parser . add_argument ( ' --voice ' , type = str , help = ' Selects the voice to use for generation. See options in voices/ directory (and add your own!) '
2022-04-26 02:07:07 +00:00
' Use the & character to join two voices together. Use a comma to perform inference on multiple voices. ' , default = ' pat ' )
2022-05-03 03:37:39 +00:00
parser . add_argument ( ' --output_path ' , type = str , help = ' Where to store outputs. ' , default = ' results/longform/ ' )
2022-04-20 23:24:09 +00:00
parser . add_argument ( ' --preset ' , type = str , help = ' Which voice preset to use. ' , default = ' standard ' )
2022-04-26 02:05:21 +00:00
parser . add_argument ( ' --regenerate ' , type = str , help = ' Comma-separated list of clip numbers to re-generate, or nothing. ' , default = None )
2022-05-22 11:26:01 +00:00
parser . add_argument ( ' --candidates ' , type = int , help = ' How many output candidates to produce per-voice. Only the first candidate is actually used in the final product, the others can be used manually. ' , default = 1 )
2022-05-01 23:29:25 +00:00
parser . add_argument ( ' --model_dir ' , type = str , help = ' Where to find pretrained model checkpoints. Tortoise automatically downloads these to .models, so this '
2022-05-19 16:37:57 +00:00
' should only be specified if you have custom checkpoints. ' , default = MODELS_DIR )
2022-05-17 18:11:18 +00:00
parser . add_argument ( ' --seed ' , type = int , help = ' Random seed which can be used to reproduce results. ' , default = None )
parser . add_argument ( ' --produce_debug_state ' , type = bool , help = ' Whether or not to produce debug_state.pth, which can aid in reproducing problems. Defaults to true. ' , default = True )
2022-04-11 01:29:42 +00:00
args = parser . parse_args ( )
2022-05-03 00:00:57 +00:00
tts = TextToSpeech ( models_dir = args . model_dir )
2022-04-11 01:29:42 +00:00
2022-04-15 14:26:11 +00:00
outpath = args . output_path
selected_voices = args . voice . split ( ' , ' )
2022-04-26 02:05:21 +00:00
regenerate = args . regenerate
if regenerate is not None :
regenerate = [ int ( e ) for e in regenerate . split ( ' , ' ) ]
2022-05-01 23:29:25 +00:00
2022-05-12 17:24:55 +00:00
# Process text
with open ( args . textfile , ' r ' , encoding = ' utf-8 ' ) as f :
text = ' ' . join ( [ l for l in f . readlines ( ) ] )
if ' | ' in text :
print ( " Found the ' | ' character in your text, which I will use as a cue for where to split it up. If this was not "
" your intent, please remove all ' | ' characters from the input. " )
texts = text . split ( ' | ' )
else :
texts = split_and_recombine_text ( text )
2022-05-17 18:11:18 +00:00
seed = int ( time ( ) ) if args . seed is None else args . seed
2022-04-15 14:26:11 +00:00
for selected_voice in selected_voices :
voice_outpath = os . path . join ( outpath , selected_voice )
os . makedirs ( voice_outpath , exist_ok = True )
if ' & ' in selected_voice :
voice_sel = selected_voice . split ( ' & ' )
else :
voice_sel = [ selected_voice ]
2022-04-11 01:29:42 +00:00
2022-05-01 23:25:18 +00:00
voice_samples , conditioning_latents = load_voices ( voice_sel )
2022-04-21 21:19:36 +00:00
all_parts = [ ]
2022-04-15 14:26:11 +00:00
for j , text in enumerate ( texts ) :
2022-04-26 02:05:21 +00:00
if regenerate is not None and j not in regenerate :
all_parts . append ( load_audio ( os . path . join ( voice_outpath , f ' { j } .wav ' ) , 24000 ) )
continue
2022-05-01 23:25:18 +00:00
gen = tts . tts_with_preset ( text , voice_samples = voice_samples , conditioning_latents = conditioning_latents ,
2022-05-22 11:26:01 +00:00
preset = args . preset , k = args . candidates , use_deterministic_seed = seed )
if args . candidates == 1 :
gen = gen . squeeze ( 0 ) . cpu ( )
torchaudio . save ( os . path . join ( voice_outpath , f ' { j } .wav ' ) , gen , 24000 )
else :
candidate_dir = os . path . join ( voice_outpath , str ( j ) )
os . makedirs ( candidate_dir , exist_ok = True )
for k , g in enumerate ( gen ) :
torchaudio . save ( os . path . join ( candidate_dir , f ' { k } .wav ' ) , g . squeeze ( 0 ) . cpu ( ) , 24000 )
gen = gen [ 0 ] . squeeze ( 0 ) . cpu ( )
2022-04-21 21:19:36 +00:00
all_parts . append ( gen )
2022-05-17 18:11:18 +00:00
2022-06-04 22:47:29 +00:00
if args . candidates == 1 :
full_audio = torch . cat ( all_parts , dim = - 1 )
torchaudio . save ( os . path . join ( voice_outpath , ' combined.wav ' ) , full_audio , 24000 )
2022-04-11 01:29:42 +00:00
2022-05-17 18:11:18 +00:00
if args . produce_debug_state :
os . makedirs ( ' debug_states ' , exist_ok = True )
dbg_state = ( seed , texts , voice_samples , conditioning_latents )
torch . save ( dbg_state , f ' debug_states/read_debug_ { selected_voice } .pth ' )
2022-06-04 22:47:29 +00:00
# Combine each candidate's audio clips.
if args . candidates > 1 :
audio_clips = [ ]
for candidate in range ( args . candidates ) :
for line in range ( len ( texts ) ) :
wav_file = os . path . join ( voice_outpath , str ( line ) , f " { candidate } .wav " )
audio_clips . append ( load_audio ( wav_file , 24000 ) )
audio_clips = torch . cat ( audio_clips , dim = - 1 )
torchaudio . save ( os . path . join ( voice_outpath , f " combined_ { candidate : 02d } .wav " ) , audio_clips , 24000 )
audio_clips = [ ]