VALL-E config edits

This commit is contained in:
mrq 2023-03-20 01:22:53 +00:00
parent 2e33bf071a
commit 34ef0467b9
3 changed files with 62 additions and 39 deletions

View File

@ -1,4 +1,33 @@
{ {
"optimizer": {
"type": "AdamW",
"params": {
"lr": 2e-05,
"betas": [
0.9,
0.96
],
"eps": 1e-07,
"weight_decay": 0.01
}
},
"scheduler":{
"type":"WarmupLR",
"params":{
"warmup_min_lr":0,
"warmup_max_lr":2e-5,
"warmup_num_steps":100,
"warmup_type":"linear"
}
},
"fp16":{
"enabled":true,
"loss_scale":0,
"loss_scale_window":1000,
"initial_scale_power":16,
"hysteresis":2,
"min_loss_scale":1
},
"autotuning":{ "autotuning":{
"enabled":false, "enabled":false,
"results_dir":"./config/autotune/results", "results_dir":"./config/autotune/results",
@ -21,15 +50,6 @@
}, },
"zero_optimization":{ "zero_optimization":{
"stage":0, "stage":0,
"offload_param": {
"device": "nvme",
"nvme_path": "/tmp/zero/",
"pin_memory": false,
"buffer_count": 5,
"buffer_size": 1e9,
"max_in_cpu": 1e9
},
"overlap_comm": true,
"reduce_bucket_size":"auto", "reduce_bucket_size":"auto",
"contiguous_gradients":true, "contiguous_gradients":true,
"sub_group_size":1e8, "sub_group_size":1e8,

View File

@ -3,14 +3,17 @@ ckpt_root: ./training/${voice}/finetune/ckpt/
log_root: ./training/${voice}/finetune/logs/ log_root: ./training/${voice}/finetune/logs/
data_dirs: [./training/${voice}/valle/] data_dirs: [./training/${voice}/valle/]
spkr_name_getter: "lambda p: p.parts[-3]" spkr_name_getter: "lambda p: p.parts[-3]" # "lambda p: p.parts[-1].split('-')[0]"
model: ${model_name} model: ${model_name}
batch_size: ${batch_size} batch_size: ${batch_size}
eval_batch_size: ${validation_batch_size} gradient_accumulation_steps: ${gradient_accumulation_size}
eval_batch_size: ${batch_size}
max_iter: ${iterations} max_iter: ${iterations}
save_ckpt_every: ${save_rate} save_ckpt_every: ${save_rate}
eval_every: ${validation_rate} eval_every: ${validation_rate}
max_phones: 256
sampling_temperature: 1.0 sampling_temperature: 1.0

View File

@ -488,7 +488,7 @@ def setup_gradio():
) )
with gr.Row(): with gr.Row():
TRAINING_SETTINGS["batch_size"] = gr.Number(label="Batch Size", value=128, precision=0) TRAINING_SETTINGS["batch_size"] = gr.Number(label="Batch Size", value=128, precision=0)
TRAINING_SETTINGS["gradient_accumulation_size"] = gr.Number(label="Gradient Accumulation Size", value=4, precision=0, visible=args.tts_backend=="tortoise") TRAINING_SETTINGS["gradient_accumulation_size"] = gr.Number(label="Gradient Accumulation Size", value=4, precision=0)
with gr.Row(): with gr.Row():
TRAINING_SETTINGS["save_rate"] = gr.Number(label="Save Frequency (in epochs)", value=5, precision=0) TRAINING_SETTINGS["save_rate"] = gr.Number(label="Save Frequency (in epochs)", value=5, precision=0)
TRAINING_SETTINGS["validation_rate"] = gr.Number(label="Validation Frequency (in epochs)", value=5, precision=0) TRAINING_SETTINGS["validation_rate"] = gr.Number(label="Validation Frequency (in epochs)", value=5, precision=0)