136 lines
2.2 KiB
YAML
136 lines
2.2 KiB
YAML
vocoder: "hifigan"
|
|
|
|
models:
|
|
- name: "autoregressive"
|
|
training: True
|
|
|
|
#loras:
|
|
#- name : "lora-test"
|
|
# rank: 128
|
|
# alpha: 128
|
|
# training: True
|
|
# parametrize: True
|
|
|
|
hyperparameters:
|
|
autotune: False
|
|
autotune_params:
|
|
start_profile_step: 1
|
|
end_profile_step: 50
|
|
num_tuning_micro_batch_sizes: 8
|
|
|
|
batch_size: 4
|
|
gradient_accumulation_steps: 2
|
|
gradient_clipping: 1.0
|
|
warmup_steps: 0
|
|
|
|
optimizer: AdamW
|
|
learning_rate: 1.0e-4
|
|
# optimizer: Prodigy
|
|
# learning_rate: 1.0
|
|
torch_optimizer: True
|
|
|
|
scheduler: "" # ScheduleFree
|
|
torch_scheduler: True
|
|
|
|
evaluation:
|
|
batch_size: 4
|
|
frequency: 1000
|
|
size: 4
|
|
|
|
steps: 500
|
|
ar_temperature: 0.95
|
|
nar_temperature: 0.25
|
|
load_disabled_engines: True
|
|
|
|
trainer:
|
|
#no_logger: True
|
|
ddp: False
|
|
check_for_oom: False
|
|
iterations: 1_000_000
|
|
|
|
save_tag: step
|
|
save_on_oom: True
|
|
save_on_quit: True
|
|
save_frequency: 500
|
|
export_on_save: True
|
|
|
|
keep_last_checkpoints: 8
|
|
|
|
aggressive_optimizations: False
|
|
load_disabled_engines: False
|
|
gradient_checkpointing: True
|
|
|
|
#load_state_dict: True
|
|
strict_loading: False
|
|
#load_tag: "9500"
|
|
#load_states: False
|
|
#restart_step_count: True
|
|
|
|
gc_mode: None # "global_step"
|
|
|
|
weight_dtype: bfloat16
|
|
amp: True
|
|
|
|
backend: deepspeed
|
|
deepspeed:
|
|
inferencing: False
|
|
zero_optimization_level: 0
|
|
use_compression_training: False
|
|
|
|
amp: False
|
|
|
|
load_webui: False
|
|
|
|
inference:
|
|
backend: local
|
|
normalize: False
|
|
|
|
# some steps break under blanket (B)FP16 + AMP
|
|
weight_dtype: float32
|
|
amp: False
|
|
|
|
optimizations:
|
|
injects: False
|
|
replace: True
|
|
|
|
linear: False
|
|
embedding: False
|
|
optimizers: True
|
|
|
|
bitsandbytes: True
|
|
dadaptation: False
|
|
bitnet: False
|
|
fp8: False
|
|
|
|
dataset:
|
|
speaker_name_getter: "lambda p: f'{p.parts[-3]}_{p.parts[-2]}'"
|
|
speaker_group_getter: "lambda p: f'{p.parts[-3]}'"
|
|
speaker_languages:
|
|
ja: []
|
|
|
|
use_hdf5: True
|
|
use_metadata: True
|
|
hdf5_flag: r
|
|
validate: True
|
|
|
|
workers: 6
|
|
cache: True
|
|
|
|
duration_range: [2.0, 3.0]
|
|
|
|
random_utterance: 1.0
|
|
max_prompts: 1
|
|
prompt_duration_range: [3.0, 3.0]
|
|
|
|
max_resps: 1
|
|
p_resp_append: 0.25
|
|
|
|
sample_type: path # path | speaker | group
|
|
sample_order: duration # duration | shuffle
|
|
|
|
tasks_list: [ "tts" ]
|
|
|
|
training: []
|
|
validation: []
|
|
noise: []
|