Update 'src/utils.py'

whisper->whisperx
This commit is contained in:
yqxtqymn 2023-03-06 00:47:56 +00:00
parent 079cd32074
commit 9ca5192309

View File

@ -1,4 +1,5 @@
import os
if 'XDG_CACHE_HOME' not in os.environ:
os.environ['XDG_CACHE_HOME'] = os.path.realpath(os.path.join(os.getcwd(), './models/'))
@ -36,8 +37,9 @@ from tortoise.utils.audio import load_audio, load_voice, load_voices, get_voice_
from tortoise.utils.text import split_and_recombine_text
from tortoise.utils.device import get_device_name, set_device_name
MODELS['dvae.pth'] = "https://huggingface.co/jbetker/tortoise-tts-v2/resolve/3704aea61678e7e468a06d8eea121dba368a798e/.models/dvae.pth"
WHISPER_MODELS = ["tiny", "base", "small", "medium", "large"]
MODELS[
'dvae.pth'] = "https://huggingface.co/jbetker/tortoise-tts-v2/resolve/3704aea61678e7e468a06d8eea121dba368a798e/.models/dvae.pth"
WHISPER_MODELS = ["tiny", "base", "small", "medium", "large", "large-v2"]
WHISPER_SPECIALIZED_MODELS = ["tiny.en", "base.en", "small.en", "medium.en"]
EPOCH_SCHEDULE = [9, 18, 25, 33]
@ -49,6 +51,7 @@ voicefixer = None
whisper_model = None
training_state = None
def generate(
text,
delimiter,
@ -110,13 +113,16 @@ def generate(
if voice_samples and len(voice_samples) > 0:
sample_voice = torch.cat(voice_samples, dim=-1).squeeze().cpu()
conditioning_latents = tts.get_conditioning_latents(voice_samples, return_mels=not args.latents_lean_and_mean, progress=progress, slices=voice_latents_chunks, force_cpu=args.force_cpu_for_conditioning_latents)
conditioning_latents = tts.get_conditioning_latents(voice_samples, return_mels=not args.latents_lean_and_mean,
progress=progress, slices=voice_latents_chunks,
force_cpu=args.force_cpu_for_conditioning_latents)
if len(conditioning_latents) == 4:
conditioning_latents = (conditioning_latents[0], conditioning_latents[1], conditioning_latents[2], None)
if voice != "microphone":
if hasattr(tts, 'autoregressive_model_hash'):
torch.save(conditioning_latents, f'{get_voice_dir()}/{voice}/cond_latents_{tts.autoregressive_model_hash[:8]}.pth')
torch.save(conditioning_latents,
f'{get_voice_dir()}/{voice}/cond_latents_{tts.autoregressive_model_hash[:8]}.pth')
else:
torch.save(conditioning_latents, f'{get_voice_dir()}/{voice}/cond_latents.pth')
voice_samples = None
@ -132,10 +138,10 @@ def generate(
seed = None
if conditioning_latents is not None and len(conditioning_latents) == 2 and cvvp_weight > 0:
print("Requesting weighing against CVVP weight, but voice latents are missing some extra data. Please regenerate your voice latents.")
print(
"Requesting weighing against CVVP weight, but voice latents are missing some extra data. Please regenerate your voice latents.")
cvvp_weight = 0
settings = {
'temperature': float(temperature),
@ -199,7 +205,8 @@ def generate(
beta=8.555504641634386,
)
volume_adjust = torchaudio.transforms.Vol(gain=args.output_volume, gain_type="amplitude") if args.output_volume != 1 else None
volume_adjust = torchaudio.transforms.Vol(gain=args.output_volume,
gain_type="amplitude") if args.output_volume != 1 else None
idx = 0
idx_cache = {}
@ -380,7 +387,6 @@ def generate(
with open(f'{outdir}/{voice}_{name}.json', 'w', encoding="utf-8") as f:
f.write(json.dumps(info, indent='\t'))
if voice and voice != "random" and conditioning_latents is not None:
latents_path = f'{get_voice_dir()}/{voice}/cond_latents.pth'
@ -428,10 +434,12 @@ def generate(
stats,
)
def cancel_generate():
import tortoise.api
tortoise.api.STOP_SIGNAL = True
def hash_file(path, algo="md5", buffer_size=0):
import hashlib
@ -458,6 +466,7 @@ def hash_file(path, algo="md5", buffer_size=0):
return "{0}".format(hash.hexdigest())
def update_baseline_for_latents_chunks(voice):
path = f'{get_voice_dir()}/{voice}/'
if not os.path.isdir(path):
@ -481,6 +490,7 @@ def update_baseline_for_latents_chunks( voice ):
return int(total_duration / total) if total > 0 else 1
return int(total_duration / args.autocalculate_voice_chunk_duration_size) if total_duration > 0 else 1
def compute_latents(voice, voice_latents_chunks, progress=gr.Progress(track_tqdm=True)):
global tts
global args
@ -498,18 +508,22 @@ def compute_latents(voice, voice_latents_chunks, progress=gr.Progress(track_tqdm
if voice_samples is None:
return
conditioning_latents = tts.get_conditioning_latents(voice_samples, return_mels=not args.latents_lean_and_mean, progress=progress, slices=voice_latents_chunks, force_cpu=args.force_cpu_for_conditioning_latents)
conditioning_latents = tts.get_conditioning_latents(voice_samples, return_mels=not args.latents_lean_and_mean,
progress=progress, slices=voice_latents_chunks,
force_cpu=args.force_cpu_for_conditioning_latents)
if len(conditioning_latents) == 4:
conditioning_latents = (conditioning_latents[0], conditioning_latents[1], conditioning_latents[2], None)
if hasattr(tts, 'autoregressive_model_hash'):
torch.save(conditioning_latents, f'{get_voice_dir()}/{voice}/cond_latents_{tts.autoregressive_model_hash[:8]}.pth')
torch.save(conditioning_latents,
f'{get_voice_dir()}/{voice}/cond_latents_{tts.autoregressive_model_hash[:8]}.pth')
else:
torch.save(conditioning_latents, f'{get_voice_dir()}/{voice}/cond_latents.pth')
return voice
# superfluous, but it cleans up some things
class TrainingState():
def __init__(self, config_path, keep_x_past_datasets=0, start=True, gpus=1):
@ -580,7 +594,8 @@ class TrainingState():
self.cmd = ['train.bat', config_path] if os.name == "nt" else ['./train.sh', str(int(gpus)), config_path]
print("Spawning process: ", " ".join(self.cmd))
self.process = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
self.process = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True)
def load_losses(self, update=False):
if not os.path.isdir(f'{self.dataset_dir}/tb_logger/'):
@ -599,7 +614,8 @@ class TrainingState():
self.statistics = []
if use_tensorboard:
logs = sorted([f'{self.dataset_dir}/tb_logger/{d}' for d in os.listdir(f'{self.dataset_dir}/tb_logger/') if d[:6] == "events" ])
logs = sorted([f'{self.dataset_dir}/tb_logger/{d}' for d in os.listdir(f'{self.dataset_dir}/tb_logger/') if
d[:6] == "events"])
if update:
logs = [logs[-1]]
@ -893,6 +909,7 @@ class TrainingState():
message,
)
def run_training(config_path, verbose=False, gpus=1, keep_x_past_datasets=0, progress=gr.Progress(track_tqdm=True)):
global training_state
if training_state and training_state.process:
@ -911,7 +928,8 @@ def run_training(config_path, verbose=False, gpus=1, keep_x_past_datasets=0, pro
if training_state.killed:
return
result, percent, message = training_state.parse( line=line, verbose=verbose, keep_x_past_datasets=keep_x_past_datasets, progress=progress )
result, percent, message = training_state.parse(line=line, verbose=verbose,
keep_x_past_datasets=keep_x_past_datasets, progress=progress)
print(f"[Training] [{datetime.now().isoformat()}] {line[:-1]}")
if result:
yield result
@ -924,6 +942,14 @@ def run_training(config_path, verbose=False, gpus=1, keep_x_past_datasets=0, pro
return_code = training_state.process.wait()
training_state = None
def get_training_losses():
global training_state
if not training_state or not training_state.statistics:
return
return pd.DataFrame(training_state.statistics)
def update_training_dataplot(config_path=None):
global training_state
update = None
@ -932,22 +958,24 @@ def update_training_dataplot(config_path=None):
if config_path:
training_state = TrainingState(config_path=config_path, start=False)
if training_state.statistics:
update = gr.LinePlot.update(value=pd.DataFrame(training_state.statistics), x_lim=[0,training_state.its], x="step", y="value", title="Training Metrics", color="type", tooltip=['step', 'value', 'type'], width=600, height=350,)
update = gr.LinePlot.update(value=pd.DataFrame(training_state.statistics))
del training_state
training_state = None
elif training_state.statistics:
training_state.load_losses()
update = gr.LinePlot.update(value=pd.DataFrame(training_state.statistics), x_lim=[0,training_state.its], x="step", y="value", title="Training Metrics", color="type", tooltip=['step', 'value', 'type'], width=600, height=350,)
update = gr.LinePlot.update(value=pd.DataFrame(training_state.statistics))
return update
def reconnect_training(verbose=False, progress=gr.Progress(track_tqdm=True)):
global training_state
if not training_state or not training_state.process:
return "Training not in progress"
for line in iter(training_state.process.stdout.readline, ""):
result, percent, message = training_state.parse( line=line, verbose=verbose, keep_x_past_datasets=keep_x_past_datasets, progress=progress )
result, percent, message = training_state.parse(line=line, verbose=verbose,
keep_x_past_datasets=keep_x_past_datasets, progress=progress)
print(f"[Training] [{datetime.now().isoformat()}] {line[:-1]}")
if result:
yield result
@ -955,6 +983,7 @@ def reconnect_training(verbose=False, progress=gr.Progress(track_tqdm=True)):
if progress is not None and message:
progress(percent, message)
def stop_training():
global training_state
if training_state is None:
@ -965,7 +994,8 @@ def stop_training():
children = []
# wrapped in a try/catch in case for some reason this fails outside of Linux
try:
children = [p.info for p in psutil.process_iter(attrs=['pid', 'name', 'cmdline']) if './src/train.py' in p.info['cmdline']]
children = [p.info for p in psutil.process_iter(attrs=['pid', 'name', 'cmdline']) if
'./src/train.py' in p.info['cmdline']]
except Exception as e:
pass
@ -981,10 +1011,12 @@ def stop_training():
print("Killed training process.")
return f"Training cancelled: {return_code}"
def get_halfp_model_path():
autoregressive_model_path = get_model_path('autoregressive.pth')
return autoregressive_model_path.replace(".pth", "_half.pth")
def convert_to_halfp():
autoregressive_model_path = get_model_path('autoregressive.pth')
print(f'Converting model to half precision: {autoregressive_model_path}')
@ -996,66 +1028,60 @@ def convert_to_halfp():
torch.save(model, outfile)
print(f'Converted model to half precision: {outfile}')
def whisper_transcribe( file, language=None ):
# shouldn't happen, but it's for safety
if not whisper_model:
load_whisper_model(language=language)
if not args.whisper_cpp:
if not language:
language = None
return whisper_model.transcribe(file, language=language)
res = whisper_model.transcribe(file)
segments = whisper_model.extract_text_and_timestamps( res )
result = {
'segments': []
}
for segment in segments:
reparsed = {
'start': segment[0] / 100.0,
'end': segment[1] / 100.0,
'text': segment[2],
}
result['segments'].append(reparsed)
return result
def prepare_dataset(files, outdir, language=None, progress=None):
unload_tts()
global whisper_model
if whisper_model is None:
load_whisper_model(language=language)
import whisperx
device = "cuda" # add cpu option?
# original whisper https://github.com/openai/whisper
# whisperx fork https://github.com/m-bain/whisperX
# supports en, fr, de, es, it, ja, zh, nl, uk, pt
# tiny, base, small, medium, large, large-v2
whisper_model = whisperx.load_model("medium", device)
# some additional model features require huggingface token
os.makedirs(outdir, exist_ok=True)
idx = 0
results = {}
transcription = []
for file in enumerate_progress(files, desc="Iterating through voice files", progress=progress):
basename = os.path.basename(file)
result = whisper_transcribe(file, language=language)
results[basename] = result
print(f"Transcribing file: {file}")
result = whisper_model.transcribe(file)
print(result["segments"]) # before alignment
# load alignment model and metadata
model_a, metadata = whisperx.load_align_model(language_code=result["language"], device=device)
# align whisper output
result_aligned = whisperx.align(result["segments"], model_a, metadata, file, device)
print(result_aligned["segments"]) # after alignment
print(result_aligned["word_segments"]) # after alignment
results[os.path.basename(file)] = result
print(f"Transcribed file: {file}, {len(result['segments'])} found.")
waveform, sampling_rate = torchaudio.load(file)
num_channels, num_frames = waveform.shape
idx = 0
for segment in result['segments']: # enumerate_progress(result['segments'], desc="Segmenting voice file", progress=progress):
for segment in result[
'segments']: # enumerate_progress(result['segments'], desc="Segmenting voice file", progress=progress):
start = int(segment['start'] * sampling_rate)
end = int(segment['end'] * sampling_rate)
sliced_waveform = waveform[:, start:end]
sliced_name = basename.replace(".wav", f"_{pad(idx, 4)}.wav")
if not torch.any(sliced_waveform < 0):
print(f"Error with {sliced_name}, skipping...")
continue
sliced_name = f"{pad(idx, 4)}.wav"
torchaudio.save(f"{outdir}/{sliced_name}", sliced_waveform, sampling_rate)
@ -1068,22 +1094,26 @@ def prepare_dataset( files, outdir, language=None, progress=None ):
with open(f'{outdir}/whisper.json', 'w', encoding="utf-8") as f:
f.write(json.dumps(results, indent='\t'))
joined = '\n'.join(transcription)
with open(f'{outdir}/train.txt', 'w', encoding="utf-8") as f:
f.write(joined)
f.write("\n".join(transcription))
unload_whisper()
return f"Processed dataset to: {outdir}\n{joined}"
return f"Processed dataset to: {outdir}"
def calc_iterations(epochs, lines, batch_size):
iterations = int(epochs * lines / float(batch_size))
return iterations
def schedule_learning_rate(iterations, schedule=EPOCH_SCHEDULE):
return [int(iterations * d) for d in schedule]
def optimize_training_settings( epochs, learning_rate, text_ce_lr_weight, learning_rate_schedule, batch_size, gradient_accumulation_size, print_rate, save_rate, resume_path, half_p, bnb, workers, source_model, voice ):
def optimize_training_settings(epochs, learning_rate, text_ce_lr_weight, learning_rate_schedule, batch_size,
gradient_accumulation_size, print_rate, save_rate, resume_path, half_p, bnb, workers,
source_model, voice):
name = f"{voice}-finetune"
dataset_name = f"{voice}-train"
dataset_path = f"./training/{voice}/train.txt"
@ -1102,7 +1132,8 @@ def optimize_training_settings( epochs, learning_rate, text_ce_lr_weight, learni
if batch_size % lines != 0:
nearest_slice = int(lines / batch_size) + 1
batch_size = int(lines / nearest_slice)
messages.append(f"Batch size not neatly divisible by dataset size, adjusting batch size to: {batch_size} ({nearest_slice} steps per epoch)")
messages.append(
f"Batch size not neatly divisible by dataset size, adjusting batch size to: {batch_size} ({nearest_slice} steps per epoch)")
if gradient_accumulation_size == 0:
gradient_accumulation_size = 1
@ -1112,13 +1143,15 @@ def optimize_training_settings( epochs, learning_rate, text_ce_lr_weight, learni
if gradient_accumulation_size == 0:
gradient_accumulation_size = 1
messages.append(f"Gradient accumulation size is too large for a given batch size, clamping gradient accumulation size to: {gradient_accumulation_size}")
messages.append(
f"Gradient accumulation size is too large for a given batch size, clamping gradient accumulation size to: {gradient_accumulation_size}")
elif batch_size % gradient_accumulation_size != 0:
gradient_accumulation_size = int(batch_size / gradient_accumulation_size)
if gradient_accumulation_size == 0:
gradient_accumulation_size = 1
messages.append(f"Batch size is not evenly divisible by the gradient accumulation size, adjusting gradient accumulation size to: {gradient_accumulation_size}")
messages.append(
f"Batch size is not evenly divisible by the gradient accumulation size, adjusting gradient accumulation size to: {gradient_accumulation_size}")
iterations = calc_iterations(epochs=epochs, lines=lines, batch_size=batch_size)
@ -1140,13 +1173,15 @@ def optimize_training_settings( epochs, learning_rate, text_ce_lr_weight, learni
if half_p:
if bnb:
half_p = False
messages.append("Half Precision requested, but BitsAndBytes is also requested. Due to redundancies, disabling half precision...")
messages.append(
"Half Precision requested, but BitsAndBytes is also requested. Due to redundancies, disabling half precision...")
else:
messages.append("Half Precision requested. Please note this is ! EXPERIMENTAL !")
if not os.path.exists(get_halfp_model_path()):
convert_to_halfp()
messages.append(f"For {epochs} epochs with {lines} lines in batches of {batch_size}, iterating for {iterations} steps ({int(iterations / epochs)} steps per epoch)")
messages.append(
f"For {epochs} epochs with {lines} lines in batches of {batch_size}, iterating for {iterations} steps ({int(iterations / epochs)} steps per epoch)")
return (
learning_rate,
@ -1160,7 +1195,11 @@ def optimize_training_settings( epochs, learning_rate, text_ce_lr_weight, learni
messages
)
def save_training_settings( iterations=None, learning_rate=None, text_ce_lr_weight=None, learning_rate_schedule=None, batch_size=None, gradient_accumulation_size=None, print_rate=None, save_rate=None, name=None, dataset_name=None, dataset_path=None, validation_name=None, validation_path=None, output_name=None, resume_path=None, half_p=None, bnb=None, workers=None, source_model=None ):
def save_training_settings(iterations=None, learning_rate=None, text_ce_lr_weight=None, learning_rate_schedule=None,
batch_size=None, gradient_accumulation_size=None, print_rate=None, save_rate=None, name=None,
dataset_name=None, dataset_path=None, validation_name=None, validation_path=None,
output_name=None, resume_path=None, half_p=None, bnb=None, workers=None, source_model=None):
if not source_model:
source_model = f"./models/tortoise/autoregressive{'_half' if half_p else ''}.pth"
@ -1201,6 +1240,158 @@ def save_training_settings( iterations=None, learning_rate=None, text_ce_lr_weig
if not output_name:
output_name = f'{settings["name"]}.yaml'
with open(f'./models/.template.yaml', 'r', encoding="utf-8") as f:
yaml = f.read()
# i could just load and edit the YAML directly, but this is easier, as I don't need to bother with path traversals
for k in settings:
if settings[k] is None:
continue
yaml = yaml.replace(f"${{{k}}}", str(settings[k]))
outfile = f'./training/{output_name}'
with open(outfile, 'w', encoding="utf-8") as f:
f.write(yaml)
return f"Training settings saved to: {outfile}"
def calc_iterations(epochs, lines, batch_size):
iterations = int(epochs * lines / float(batch_size))
return iterations
def schedule_learning_rate(iterations, schedule=EPOCH_SCHEDULE):
return [int(iterations * d) for d in schedule]
def optimize_training_settings(epochs, learning_rate, text_ce_lr_weight, learning_rate_schedule, batch_size,
gradient_accumulation_size, print_rate, save_rate, resume_path, half_p, bnb, workers,
source_model, voice):
name = f"{voice}-finetune"
dataset_name = f"{voice}-train"
dataset_path = f"./training/{voice}/train.txt"
validation_name = f"{voice}-val"
validation_path = f"./training/{voice}/train.txt"
with open(dataset_path, 'r', encoding="utf-8") as f:
lines = len(f.readlines())
messages = []
if batch_size > lines:
batch_size = lines
messages.append(f"Batch size is larger than your dataset, clamping batch size to: {batch_size}")
if batch_size % lines != 0:
nearest_slice = int(lines / batch_size) + 1
batch_size = int(lines / nearest_slice)
messages.append(
f"Batch size not neatly divisible by dataset size, adjusting batch size to: {batch_size} ({nearest_slice} steps per epoch)")
if gradient_accumulation_size == 0:
gradient_accumulation_size = 1
if batch_size / gradient_accumulation_size < 2:
gradient_accumulation_size = int(batch_size / 2)
if gradient_accumulation_size == 0:
gradient_accumulation_size = 1
messages.append(
f"Gradient accumulation size is too large for a given batch size, clamping gradient accumulation size to: {gradient_accumulation_size}")
elif batch_size % gradient_accumulation_size != 0:
gradient_accumulation_size = int(batch_size / gradient_accumulation_size)
if gradient_accumulation_size == 0:
gradient_accumulation_size = 1
messages.append(
f"Batch size is not evenly divisible by the gradient accumulation size, adjusting gradient accumulation size to: {gradient_accumulation_size}")
iterations = calc_iterations(epochs=epochs, lines=lines, batch_size=batch_size)
if epochs < print_rate:
print_rate = epochs
messages.append(f"Print rate is too small for the given iteration step, clamping print rate to: {print_rate}")
if epochs < save_rate:
save_rate = epochs
messages.append(f"Save rate is too small for the given iteration step, clamping save rate to: {save_rate}")
if resume_path and not os.path.exists(resume_path):
resume_path = None
messages.append("Resume path specified, but does not exist. Disabling...")
if bnb:
messages.append("BitsAndBytes requested. Please note this is ! EXPERIMENTAL !")
if half_p:
if bnb:
half_p = False
messages.append(
"Half Precision requested, but BitsAndBytes is also requested. Due to redundancies, disabling half precision...")
else:
messages.append("Half Precision requested. Please note this is ! EXPERIMENTAL !")
if not os.path.exists(get_halfp_model_path()):
convert_to_halfp()
messages.append(
f"For {epochs} epochs with {lines} lines in batches of {batch_size}, iterating for {iterations} steps ({int(iterations / epochs)} steps per epoch)")
return (
learning_rate,
text_ce_lr_weight,
learning_rate_schedule,
batch_size,
gradient_accumulation_size,
print_rate,
save_rate,
resume_path,
messages
)
def save_training_settings(iterations=None, learning_rate=None, text_ce_lr_weight=None, learning_rate_schedule=None,
batch_size=None, gradient_accumulation_size=None, print_rate=None, save_rate=None, name=None,
dataset_name=None, dataset_path=None, validation_name=None, validation_path=None,
output_name=None, resume_path=None, half_p=None, bnb=None, workers=None, source_model=None):
if not source_model:
source_model = f"./models/tortoise/autoregressive{'_half' if half_p else ''}.pth"
settings = {
"iterations": iterations if iterations else 500,
"batch_size": batch_size if batch_size else 64,
"learning_rate": learning_rate if learning_rate else 1e-5,
"gen_lr_steps": learning_rate_schedule if learning_rate_schedule else EPOCH_SCHEDULE,
"gradient_accumulation_size": gradient_accumulation_size if gradient_accumulation_size else 4,
"print_rate": print_rate if print_rate else 1,
"save_rate": save_rate if save_rate else 50,
"name": name if name else "finetune",
"dataset_name": dataset_name if dataset_name else "finetune",
"dataset_path": dataset_path if dataset_path else "./training/finetune/train.txt",
"validation_name": validation_name if validation_name else "finetune",
"validation_path": validation_path if validation_path else "./training/finetune/train.txt",
"text_ce_lr_weight": text_ce_lr_weight if text_ce_lr_weight else 0.01,
'resume_state': f"resume_state: '{resume_path}'",
'pretrain_model_gpt': f"pretrain_model_gpt: '{source_model}'",
'float16': 'true' if half_p else 'false',
'bitsandbytes': 'true' if bnb else 'false',
'workers': workers if workers else 2,
}
if resume_path:
settings['pretrain_model_gpt'] = f"# {settings['pretrain_model_gpt']}"
else:
settings['resume_state'] = f"# resume_state: './training/{name if name else 'finetune'}/training_state/#.state'"
if half_p:
if not os.path.exists(get_halfp_model_path()):
convert_to_halfp()
if not output_name:
output_name = f'{settings["name"]}.yaml'
with open(f'./models/.template.yaml', 'r', encoding="utf-8") as f:
yaml = f.read()
@ -1217,6 +1408,7 @@ def save_training_settings( iterations=None, learning_rate=None, text_ce_lr_weig
return f"Training settings saved to: {outfile}"
def import_voices(files, saveAs=None, progress=None):
global args
@ -1282,13 +1474,16 @@ def import_voices(files, saveAs=None, progress=None):
print(f"Imported voice to {path}")
def get_voice_list(dir=get_voice_dir(), append_defaults=False):
os.makedirs(dir, exist_ok=True)
res = sorted([d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d)) and len(os.listdir(os.path.join(dir, d))) > 0 ])
res = sorted([d for d in os.listdir(dir) if
os.path.isdir(os.path.join(dir, d)) and len(os.listdir(os.path.join(dir, d))) > 0])
if append_defaults:
res = res + ["random", "microphone"]
return res
def get_autoregressive_models(dir="./models/finetunes/", prefixed=False):
os.makedirs(dir, exist_ok=True)
base = [get_model_path('autoregressive.pth')]
@ -1316,11 +1511,16 @@ def get_autoregressive_models(dir="./models/finetunes/", prefixed=False):
return res
def get_dataset_list(dir="./training/"):
return sorted([d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d)) and len(os.listdir(os.path.join(dir, d))) > 0 and "train.txt" in os.listdir(os.path.join(dir, d)) ])
return sorted([d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d)) and len(
os.listdir(os.path.join(dir, d))) > 0 and "train.txt" in os.listdir(os.path.join(dir, d))])
def get_training_list(dir="./training/"):
return sorted([f'./training/{d}/train.yaml' for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d)) and len(os.listdir(os.path.join(dir, d))) > 0 and "train.yaml" in os.listdir(os.path.join(dir, d)) ])
return sorted([f'./training/{d}/train.yaml' for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d)) and len(
os.listdir(os.path.join(dir, d))) > 0 and "train.yaml" in os.listdir(os.path.join(dir, d))])
def do_gc():
gc.collect()
@ -1329,9 +1529,11 @@ def do_gc():
except Exception as e:
pass
def pad(num, zeroes):
return str(num).zfill(zeroes + 1)
def curl(url):
try:
req = urllib.request.Request(url, headers={'User-Agent': 'Python'})
@ -1345,6 +1547,7 @@ def curl(url):
print(e)
return None
def check_for_updates():
if not os.path.isfile('./.git/FETCH_HEAD'):
print("Cannot check for updates: not from a git repo")
@ -1379,13 +1582,16 @@ def check_for_updates():
return False
def enumerate_progress(iterable, desc=None, progress=None, verbose=None):
if verbose and desc is not None:
print(desc)
if progress is None:
return tqdm(iterable, disable=not verbose)
return progress.tqdm(iterable, desc=f'{progress.msg_prefix} {desc}' if hasattr(progress, 'msg_prefix') else desc, track_tqdm=True)
return progress.tqdm(iterable, desc=f'{progress.msg_prefix} {desc}' if hasattr(progress, 'msg_prefix') else desc,
track_tqdm=True)
def notify_progress(message, progress=None, verbose=True):
if verbose:
@ -1396,10 +1602,12 @@ def notify_progress(message, progress=None, verbose=True):
progress(0, desc=message)
def get_args():
global args
return args
def setup_args():
global args
@ -1412,7 +1620,8 @@ def setup_args():
'sample-batch-size': None,
'embed-output-metadata': True,
'latents-lean-and-mean': True,
'voice-fixer': False, # getting tired of long initialization times in a Colab for downloading a large dataset for it
'voice-fixer': False,
# getting tired of long initialization times in a Colab for downloading a large dataset for it
'voice-fixer-use-cuda': True,
'force-cpu-for-conditioning-latents': False,
'defer-tts-load': False,
@ -1443,32 +1652,61 @@ def setup_args():
pass
parser = argparse.ArgumentParser()
parser.add_argument("--share", action='store_true', default=default_arguments['share'], help="Lets Gradio return a public URL to use anywhere")
parser.add_argument("--share", action='store_true', default=default_arguments['share'],
help="Lets Gradio return a public URL to use anywhere")
parser.add_argument("--listen", default=default_arguments['listen'], help="Path for Gradio to listen on")
parser.add_argument("--check-for-updates", action='store_true', default=default_arguments['check-for-updates'], help="Checks for update on startup")
parser.add_argument("--models-from-local-only", action='store_true', default=default_arguments['models-from-local-only'], help="Only loads models from disk, does not check for updates for models")
parser.add_argument("--low-vram", action='store_true', default=default_arguments['low-vram'], help="Disables some optimizations that increases VRAM usage")
parser.add_argument("--no-embed-output-metadata", action='store_false', default=not default_arguments['embed-output-metadata'], help="Disables embedding output metadata into resulting WAV files for easily fetching its settings used with the web UI (data is stored in the lyrics metadata tag)")
parser.add_argument("--latents-lean-and-mean", action='store_true', default=default_arguments['latents-lean-and-mean'], help="Exports the bare essentials for latents.")
parser.add_argument("--voice-fixer", action='store_true', default=default_arguments['voice-fixer'], help="Uses python module 'voicefixer' to improve audio quality, if available.")
parser.add_argument("--voice-fixer-use-cuda", action='store_true', default=default_arguments['voice-fixer-use-cuda'], help="Hints to voicefixer to use CUDA, if available.")
parser.add_argument("--force-cpu-for-conditioning-latents", default=default_arguments['force-cpu-for-conditioning-latents'], action='store_true', help="Forces computing conditional latents to be done on the CPU (if you constantyl OOM on low chunk counts)")
parser.add_argument("--defer-tts-load", default=default_arguments['defer-tts-load'], action='store_true', help="Defers loading TTS model")
parser.add_argument("--prune-nonfinal-outputs", default=default_arguments['prune-nonfinal-outputs'], action='store_true', help="Deletes non-final output files on completing a generation")
parser.add_argument("--use-bigvgan-vocoder", default=default_arguments['use-bigvgan-vocoder'], action='store_true', help="Uses BigVGAN in place of the default vocoder")
parser.add_argument("--device-override", default=default_arguments['device-override'], help="A device string to override pass through Torch")
parser.add_argument("--sample-batch-size", default=default_arguments['sample-batch-size'], type=int, help="Sets how many batches to use during the autoregressive samples pass")
parser.add_argument("--concurrency-count", type=int, default=default_arguments['concurrency-count'], help="How many Gradio events to process at once")
parser.add_argument("--autocalculate-voice-chunk-duration-size", type=float, default=default_arguments['autocalculate-voice-chunk-duration-size'], help="Number of seconds to suggest voice chunk size for (for example, 100 seconds of audio at 10 seconds per chunk will suggest 10 chunks)")
parser.add_argument("--output-sample-rate", type=int, default=default_arguments['output-sample-rate'], help="Sample rate to resample the output to (from 24KHz)")
parser.add_argument("--output-volume", type=float, default=default_arguments['output-volume'], help="Adjusts volume of output")
parser.add_argument("--check-for-updates", action='store_true', default=default_arguments['check-for-updates'],
help="Checks for update on startup")
parser.add_argument("--models-from-local-only", action='store_true',
default=default_arguments['models-from-local-only'],
help="Only loads models from disk, does not check for updates for models")
parser.add_argument("--low-vram", action='store_true', default=default_arguments['low-vram'],
help="Disables some optimizations that increases VRAM usage")
parser.add_argument("--no-embed-output-metadata", action='store_false',
default=not default_arguments['embed-output-metadata'],
help="Disables embedding output metadata into resulting WAV files for easily fetching its settings used with the web UI (data is stored in the lyrics metadata tag)")
parser.add_argument("--latents-lean-and-mean", action='store_true',
default=default_arguments['latents-lean-and-mean'],
help="Exports the bare essentials for latents.")
parser.add_argument("--voice-fixer", action='store_true', default=default_arguments['voice-fixer'],
help="Uses python module 'voicefixer' to improve audio quality, if available.")
parser.add_argument("--voice-fixer-use-cuda", action='store_true',
default=default_arguments['voice-fixer-use-cuda'],
help="Hints to voicefixer to use CUDA, if available.")
parser.add_argument("--force-cpu-for-conditioning-latents",
default=default_arguments['force-cpu-for-conditioning-latents'], action='store_true',
help="Forces computing conditional latents to be done on the CPU (if you constantyl OOM on low chunk counts)")
parser.add_argument("--defer-tts-load", default=default_arguments['defer-tts-load'], action='store_true',
help="Defers loading TTS model")
parser.add_argument("--prune-nonfinal-outputs", default=default_arguments['prune-nonfinal-outputs'],
action='store_true', help="Deletes non-final output files on completing a generation")
parser.add_argument("--use-bigvgan-vocoder", default=default_arguments['use-bigvgan-vocoder'], action='store_true',
help="Uses BigVGAN in place of the default vocoder")
parser.add_argument("--device-override", default=default_arguments['device-override'],
help="A device string to override pass through Torch")
parser.add_argument("--sample-batch-size", default=default_arguments['sample-batch-size'], type=int,
help="Sets how many batches to use during the autoregressive samples pass")
parser.add_argument("--concurrency-count", type=int, default=default_arguments['concurrency-count'],
help="How many Gradio events to process at once")
parser.add_argument("--autocalculate-voice-chunk-duration-size", type=float,
default=default_arguments['autocalculate-voice-chunk-duration-size'],
help="Number of seconds to suggest voice chunk size for (for example, 100 seconds of audio at 10 seconds per chunk will suggest 10 chunks)")
parser.add_argument("--output-sample-rate", type=int, default=default_arguments['output-sample-rate'],
help="Sample rate to resample the output to (from 24KHz)")
parser.add_argument("--output-volume", type=float, default=default_arguments['output-volume'],
help="Adjusts volume of output")
parser.add_argument("--autoregressive-model", default=default_arguments['autoregressive-model'], help="Specifies which autoregressive model to use for sampling.")
parser.add_argument("--whisper-model", default=default_arguments['whisper-model'], help="Specifies which whisper model to use for transcription.")
parser.add_argument("--whisper-cpp", default=default_arguments['whisper-cpp'], action='store_true', help="Leverages lightmare/whispercpp for transcription")
parser.add_argument("--autoregressive-model", default=default_arguments['autoregressive-model'],
help="Specifies which autoregressive model to use for sampling.")
parser.add_argument("--whisper-model", default=default_arguments['whisper-model'],
help="Specifies which whisper model to use for transcription.")
parser.add_argument("--whisper-cpp", default=default_arguments['whisper-cpp'], action='store_true',
help="Leverages lightmare/whispercpp for transcription")
parser.add_argument("--training-default-halfp", action='store_true', default=default_arguments['training-default-halfp'], help="Training default: halfp")
parser.add_argument("--training-default-bnb", action='store_true', default=default_arguments['training-default-bnb'], help="Training default: bnb")
parser.add_argument("--training-default-halfp", action='store_true',
default=default_arguments['training-default-halfp'], help="Training default: halfp")
parser.add_argument("--training-default-bnb", action='store_true',
default=default_arguments['training-default-bnb'], help="Training default: bnb")
parser.add_argument("--os", default="unix", help="Specifies which OS, easily")
args = parser.parse_args()
@ -1478,7 +1716,6 @@ def setup_args():
if not args.device_override:
set_device_name(args.device_override)
args.listen_host = None
args.listen_port = None
args.listen_path = None
@ -1499,7 +1736,12 @@ def setup_args():
return args
def update_args( listen, share, check_for_updates, models_from_local_only, low_vram, embed_output_metadata, latents_lean_and_mean, voice_fixer, voice_fixer_use_cuda, force_cpu_for_conditioning_latents, defer_tts_load, prune_nonfinal_outputs, use_bigvgan_vocoder, device_override, sample_batch_size, concurrency_count, autocalculate_voice_chunk_duration_size, output_volume, autoregressive_model, whisper_model, whisper_cpp, training_default_halfp, training_default_bnb ):
def update_args(listen, share, check_for_updates, models_from_local_only, low_vram, embed_output_metadata,
latents_lean_and_mean, voice_fixer, voice_fixer_use_cuda, force_cpu_for_conditioning_latents,
defer_tts_load, prune_nonfinal_outputs, use_bigvgan_vocoder, device_override, sample_batch_size,
concurrency_count, autocalculate_voice_chunk_duration_size, output_volume, autoregressive_model,
whisper_model, whisper_cpp, training_default_halfp, training_default_bnb):
global args
args.listen = listen
@ -1531,6 +1773,7 @@ def update_args( listen, share, check_for_updates, models_from_local_only, low_v
save_args_settings()
def save_args_settings():
global args
settings = {
@ -1567,7 +1810,6 @@ def save_args_settings():
f.write(json.dumps(settings, indent='\t'))
def import_generate_settings(file="./config/generate.json"):
settings, _ = read_generate_settings(file, read_latents=False)
@ -1604,6 +1846,7 @@ def reset_generation_settings():
f.write(json.dumps({}, indent='\t'))
return import_generate_settings()
def read_generate_settings(file, read_latents=True):
j = None
latents = None
@ -1632,17 +1875,15 @@ def read_generate_settings(file, read_latents=True):
latents = base64.b64decode(j['latents'])
del j['latents']
if "time" in j:
j["time"] = "{:.3f}".format(j["time"])
return (
j,
latents,
)
def load_tts(restart=False, model=None):
global args
global tts
@ -1650,7 +1891,6 @@ def load_tts( restart=False, model=None ):
if restart:
unload_tts()
if model:
args.autoregressive_model = model
@ -1672,8 +1912,10 @@ def load_tts( restart=False, model=None ):
print("Loaded TorToiSe, ready for generation.")
return tts
setup_tortoise = load_tts
def unload_tts():
global tts
@ -1683,9 +1925,11 @@ def unload_tts():
print("Unloaded TTS")
do_gc()
def reload_tts(model=None):
load_tts(restart=True, model=model)
def update_autoregressive_model(autoregressive_model_path):
match = re.findall(r'^\[[a-fA-F0-9]{8}\] (.+?)$', autoregressive_model_path)
if match:
@ -1714,7 +1958,8 @@ def update_autoregressive_model(autoregressive_model_path):
else:
from tortoise.models.autoregressive import UnifiedVoice
tts.autoregressive_model_path = autoregressive_model_path if autoregressive_model_path and os.path.exists(autoregressive_model_path) else get_model_path('autoregressive.pth', tts.models_dir)
tts.autoregressive_model_path = autoregressive_model_path if autoregressive_model_path and os.path.exists(
autoregressive_model_path) else get_model_path('autoregressive.pth', tts.models_dir)
del tts.autoregressive
tts.autoregressive = UnifiedVoice(max_mel_tokens=604, max_text_tokens=402, max_conditioning_inputs=2, layers=30,
@ -1735,6 +1980,7 @@ def update_autoregressive_model(autoregressive_model_path):
return autoregressive_model_path
def load_voicefixer(restart=False):
global voicefixer
@ -1749,6 +1995,7 @@ def load_voicefixer(restart=False):
except Exception as e:
print(f"Error occurred while tring to initialize voicefixer: {e}")
def unload_voicefixer():
global voicefixer
@ -1759,6 +2006,7 @@ def unload_voicefixer():
do_gc()
def load_whisper_model(language=None, model_name=None, progress=None):
global whisper_model
@ -1787,6 +2035,7 @@ def load_whisper_model(language=None, model_name=None, progress=None):
print("Loaded Whisper model")
def unload_whisper():
global whisper_model