2024-07-18 23:46:45 +00:00
sample_rate : 24_000 # 44_000 / 44_100 for dac
2024-06-04 01:26:27 +00:00
audio_backend : "vocos" # or dac
2024-07-01 23:13:29 +00:00
# model definitions to train
2024-06-04 01:26:27 +00:00
models :
2024-06-30 16:00:12 +00:00
- name : "ar+nar" # vanity name
size : "full" # model dimensionality
resp_levels : 8 # RVQ levels this model targets
2024-10-26 01:38:09 +00:00
tasks : 9 # tasks this model can attend to, only tts is guaranteed results at the moment
langs : 4 # languages this model supports
2024-06-30 16:00:12 +00:00
tones : 1 # tones this model supports, currently unused
arch_type : llama # underlying LLM arch to use, currently focusing on llama
training : True # signals this model is to be trained
version : 5 # helps keep backwards compatibility for when I add new things to the model
attention : auto # attention mechanism to use, "auto" for safety
dropout : 0.1 # percentage of the model to disable during training
2024-06-04 01:26:27 +00:00
2024-06-30 16:00:12 +00:00
# factors for split loss values, remove to have a unified loss calculation
2024-06-04 01:26:27 +00:00
loss_factors :
2024-06-30 16:00:12 +00:00
text : 0.1 # text phoneme portion of the sequence
2024-07-18 23:46:45 +00:00
prom : 0.5 # input prompt portion of the sequence
2024-06-30 16:00:12 +00:00
resp : 1.0 # output audio portin of the sequence
2024-10-26 01:38:09 +00:00
capabilities : [ "ar" , "nar" ] # macro-tasks this model can perform
2024-06-30 16:00:12 +00:00
# experimental settings
experimental :
2024-10-26 01:38:09 +00:00
rvq_levels_p : "auto" # "equal" | "auto" | list[int], sets probabilities of which RVQ level to select during training, auto will have the next RVQ level half as likely as the previous one
audio_embedding_sums : True # whether the input embeddings include all prior RVQ levels (sums) or only the current one (further experimentation is needed to see if this matters)
2024-07-18 23:46:45 +00:00
unified_position_ids : False # specifies whether or not position IDs should be continuous across the whole sequence (if True, naive behavior), or restart them at the next segment of the sequence (if False)
2024-10-26 01:38:09 +00:00
split_classifiers : True # use per-RVQ-level projection/output/classifiers for the model (further experimentation is needed to see if this matters)
# list of LoRA(s) to use
#loras:
#- name : "lora-shodan" # LoRA name to load from
# rank: 128 # parameter size per Linear
# alpha: 128 # "influence" value
# training: True #
# rvq_levels: [] # RVQ levels to activate the LoRA on, leave empty for all
2024-06-04 01:26:27 +00:00
2024-07-01 23:13:29 +00:00
# hyperparameter settings (could be relegated to trainer settings)
2024-06-04 01:26:27 +00:00
hyperparameters :
2024-07-01 23:13:29 +00:00
# deepspeed autotune
2024-06-04 01:26:27 +00:00
autotune : False
autotune_params :
start_profile_step : 1
end_profile_step : 50
num_tuning_micro_batch_sizes : 8
2024-07-01 23:13:29 +00:00
batch_size : 16 # samples per batch, governs maximum batch size if using batch sampling
gradient_accumulation_steps: 4 # gradient accumulation : batches per update
gradient_clipping : 1.0 # smooths out the gradient when updating
warmup_steps : 100 # steps to warm up the optimizer but not update the model
2024-06-04 01:26:27 +00:00
2024-07-01 23:13:29 +00:00
# optimizer settings
2024-06-04 01:26:27 +00:00
optimizer : Prodigy
2024-07-01 23:13:29 +00:00
learning_rate : 1.0 # prodigyopt can keep its LR to 1
torch_optimizer : True # signals to deepspeed to not instantiate one
2024-06-04 01:26:27 +00:00
2024-07-01 23:13:29 +00:00
# deepspeed scheduler, local does have it implemented because I don't use one
2024-06-04 01:26:27 +00:00
scheduler : "" # ScheduleFree
2024-07-01 23:13:29 +00:00
torch_scheduler : True # signals to deepspeed to not instantiate one
2024-06-04 01:26:27 +00:00
2024-07-01 23:13:29 +00:00
# evaluation settings (could be pushed under trainer)
2024-06-04 01:26:27 +00:00
evaluation :
2024-07-01 23:13:29 +00:00
batch_size : 8 # batch size for evaluation / validation pass
frequency : 5000 # how often to perform eval during training
size : 8 # total samples to get for eval
2024-10-26 01:38:09 +00:00
# arguments to pass for the AR/NAR (matches arguments passed through vall_e.inference)
kwargs :
max_steps : 500 # how many AR steps to perform
ar_temp : 0.95 # temperature for AR sampling
nar_temp : 0.25 # temperature for NAR sampling
2024-06-04 01:26:27 +00:00
trainer :
2024-07-01 23:13:29 +00:00
iterations : 1_000_000 # how many total iterations to train before terminating, should just have this as 0 by default to not auto-terminiate
2024-06-04 01:26:27 +00:00
2024-07-01 23:13:29 +00:00
save_tag : step # tag name to save checkpoints under
save_on_oom : True # save if an OOM if caught
save_on_quit : True # save when `quit` is entered in the trainer
save_frequency : 250 # how often to save
export_on_save : True # export the weights every time the trainer saves
2024-06-04 01:26:27 +00:00
2024-07-01 23:13:29 +00:00
keep_last_checkpoints : 4 # how many previous checkpoints to keep
2024-06-04 01:26:27 +00:00
2024-07-01 23:13:29 +00:00
gradient_checkpointing : True # gradient checkpointing to save VRAM at the cost of some performance throughput
2024-06-04 01:26:27 +00:00
2024-07-01 23:13:29 +00:00
strict_loading : False # strict state dict loading (set to False if you're going to change some model settings)
2024-10-26 01:38:09 +00:00
resize_modules : True # automatically resize core modules from the state dict to match
#check_for_oom: False # wrap forward/backwards in a try/catch block and gracefully handles OOM conditions
2024-07-01 23:13:29 +00:00
#load_state_dict: True # load the state dict from fp32.pth instead of a checkpoint, should automagically be done
#load_tag: "9500" # specific tag to load from (instead of having to edit latest)
#load_states: False # flag to load optimizer / scheduler states or not
#restart_step_count: True # clear the trainer stats
2024-10-26 01:38:09 +00:00
# gc_mode: None # "global_step" # flag to call GC at specific points, seems overkill now
2024-06-04 01:26:27 +00:00
2024-10-26 01:38:09 +00:00
weight_dtype : float16 # float32 | float16 | bfloat16, dtype for the model to load under
amp : True # mixed precision during training
2024-06-04 01:26:27 +00:00
2024-07-01 23:13:29 +00:00
backend : deepspeed # deepspeed | local, training backend to use
# deepspeed specific settings
2024-06-04 01:26:27 +00:00
deepspeed :
2024-07-01 23:13:29 +00:00
inferencing : True # use deepspeed inference wrapper for inferencing, should be relegated under inference
2024-10-26 01:38:09 +00:00
amp : False # use deepspeed's AMP instead (requires nvidia/apex installed)
2024-07-01 23:13:29 +00:00
zero_optimization_level : 0 # ZeRO optimization level to use
use_compression_training : False # compression training (seems useless almost always)
2024-06-04 01:26:27 +00:00
2024-07-01 23:13:29 +00:00
load_webui : False # initialize the web UI during training (the goal is to let you inference during training, but I never found a good way to go about it)
2024-06-04 01:26:27 +00:00
2024-07-01 23:13:29 +00:00
# inferencing settings
2024-06-04 01:26:27 +00:00
inference :
2024-07-01 23:13:29 +00:00
backend : deepspeed # deepspeed | local, training backend to use
normalize : False # normalize audio before encoding / after decoding, only enable if you know what you're doing
2024-06-04 01:26:27 +00:00
2024-07-01 23:13:29 +00:00
weight_dtype : float32 # float32 | float16 | bfloat16, dtype for the model to load under
amp : False # mixed precision during inferencing
2024-06-04 01:26:27 +00:00
2024-07-01 23:13:29 +00:00
# experimental optimization flags
2024-06-04 01:26:27 +00:00
optimizations :
2024-07-01 23:13:29 +00:00
injects : False # replace the module in the torch package itself to achieve these
replace : True # replace the module in the model itself to achieve these
2024-06-04 01:26:27 +00:00
2024-07-01 23:13:29 +00:00
# bitsandbytes things
linear : False # enable nn.Linear optimizations
embedding : False # enable nn.Embedding optimizations
optimizers : True # enable torch.optim optimizations
2024-06-04 01:26:27 +00:00
2024-07-01 23:13:29 +00:00
bitsandbytes : False # use bitsandbytes
dadaptation : False # use dadaptation
bitnet : False # use bitnet
fp8 : False # use nvidia/transformer-engine's fp8 AMP
2024-06-04 01:26:27 +00:00
2024-07-01 23:13:29 +00:00
# dataset settings
2024-06-04 01:26:27 +00:00
dataset :
2024-07-01 23:13:29 +00:00
speaker_name_getter : "lambda p: f'{p.parts[-3]}_{p.parts[-2]}'" # function to eval when fetching the speaker from a name
speaker_group_getter : "lambda p: f'{p.parts[-3]}'" # function to eval when fetching the group from a name
# map to classify languages under when preparing a batch (in case the language is not provided in the dataset)
2024-06-04 01:26:27 +00:00
speaker_languages :
ja : [ ]
2024-07-01 23:13:29 +00:00
use_hdf5 : True # use HDF5 file to load the dataset from
hdf5_flag : r # flag to load the HDF5 file under (should automatically set to `a` when generating the HDF5 dataset)
use_metadata : True # use generated metadata to help prepare the dataset
validate : True # cull samples if they are outside the duration threshold
2024-06-04 01:26:27 +00:00
2024-07-01 23:13:29 +00:00
workers : 2 # worker processes to spawn for the dataloader
cache : True # cache the dataloader to disk to speed things up
2024-06-04 01:26:27 +00:00
2024-07-01 23:13:29 +00:00
duration_range : [ 3.0 , 5.0 ] # allowed sample duration in the dataset
2024-06-04 01:26:27 +00:00
2024-10-26 01:38:09 +00:00
prompt_max_samples : 1 # maximum prompts to sample for the input prompt during training
prompt_duration_range : [ 3.0 , 6.0 ] # duration range for the input prompt during training
prompt_similar_p : 1.0 # odds to instead use a similar utterance instead of a random sample (1 to always do, 0 to never do)
# not used
resps_max_samples : 1 # maximum output utterances to sample for the output during training
resps_append_p : 0.0 # odds to append another utterance to the output utterance sample
2024-06-04 01:26:27 +00:00
2024-07-01 23:13:29 +00:00
sample_type : path # path | speaker | group, type to sample the paths from (by path, speaker, or group)
sample_order : duration # duration | anything else, method of ordering the paths (duration is by duration, any other value will interleave reorder)
2024-06-29 14:11:28 +00:00
sample_max_duration_batch : 0 # used when above = duration, 120 seconds per batch at 12GiB of VRAM works
2024-10-26 01:38:09 +00:00
sample_shuffle: False # shuffle indices in the dataloader (avoid using with sample_order: duration and sample_max_duration_batch : 0 )
retokenize_text : False # do not rely on AOT'd tokens from the dataset, instead tokenize JIT (in case you botch your tokenizer during dataset preparation and don't want to recreate it)
2024-06-04 01:26:27 +00:00
2024-10-26 01:38:09 +00:00
tasks_list : [ "tts" , "stt" ] # , [ "tts", "tts-c", "ns", "sr", "tse", "cse", "nse", "stt" ], determines which tasks to randomly pick for a sample
2024-06-04 01:26:27 +00:00
2024-07-01 23:13:29 +00:00
training : [ ] # paths for the training dataset
validation : [ ] # paths for the validation dataset
noise : [ ] # paths for the noise dataset (unused, but for the above tasks that call for injecting noise)