173 lines
8.8 KiB
YAML
173 lines
8.8 KiB
YAML
sample_rate: 24_000 # 44_000 / 44_100 for dac
|
|
audio_backend: "vocos" # or dac
|
|
|
|
# model definitions to train
|
|
models:
|
|
- name: "ar+nar" # vanity name
|
|
size: "full" # model dimensionality
|
|
resp_levels: 8 # RVQ levels this model targets
|
|
tasks: 9 # tasks this model can attend to, only tts is guaranteed results at the moment
|
|
langs: 4 # languages this model supports
|
|
tones: 1 # tones this model supports, currently unused
|
|
arch_type: llama # underlying LLM arch to use, currently focusing on llama
|
|
training: True # signals this model is to be trained
|
|
version: 5 # helps keep backwards compatibility for when I add new things to the model
|
|
attention: auto # attention mechanism to use, "auto" for safety
|
|
dropout: 0.1 # percentage of the model to disable during training
|
|
|
|
# factors for split loss values, remove to have a unified loss calculation
|
|
loss_factors:
|
|
text: 0.1 # text phoneme portion of the sequence
|
|
prom: 0.5 # input prompt portion of the sequence
|
|
resp: 1.0 # output audio portin of the sequence
|
|
|
|
capabilities: ["ar", "nar"] # macro-tasks this model can perform
|
|
|
|
# experimental settings
|
|
experimental:
|
|
rvq_levels_p: "auto" # "equal" | "auto" | list[int], sets probabilities of which RVQ level to select during training, auto will have the next RVQ level half as likely as the previous one
|
|
audio_embedding_sums: True # whether the input embeddings include all prior RVQ levels (sums) or only the current one (further experimentation is needed to see if this matters)
|
|
unified_position_ids: False # specifies whether or not position IDs should be continuous across the whole sequence (if True, naive behavior), or restart them at the next segment of the sequence (if False)
|
|
split_classifiers: True # use per-RVQ-level projection/output/classifiers for the model (further experimentation is needed to see if this matters)
|
|
|
|
# list of LoRA(s) to use
|
|
#loras:
|
|
#- name : "lora-shodan" # LoRA name to load from
|
|
# rank: 128 # parameter size per Linear
|
|
# alpha: 128 # "influence" value
|
|
# training: True #
|
|
# rvq_levels: [] # RVQ levels to activate the LoRA on, leave empty for all
|
|
|
|
# hyperparameter settings (could be relegated to trainer settings)
|
|
hyperparameters:
|
|
# deepspeed autotune
|
|
autotune: False
|
|
autotune_params:
|
|
start_profile_step: 1
|
|
end_profile_step: 50
|
|
num_tuning_micro_batch_sizes: 8
|
|
|
|
batch_size: 16 # samples per batch, governs maximum batch size if using batch sampling
|
|
gradient_accumulation_steps: 4 # gradient accumulation: batches per update
|
|
gradient_clipping: 1.0 # smooths out the gradient when updating
|
|
warmup_steps: 100 # steps to warm up the optimizer but not update the model
|
|
|
|
# optimizer settings
|
|
optimizer: Prodigy
|
|
learning_rate: 1.0 # prodigyopt can keep its LR to 1
|
|
torch_optimizer: True # signals to deepspeed to not instantiate one
|
|
|
|
# deepspeed scheduler, local does have it implemented because I don't use one
|
|
scheduler: "" # ScheduleFree
|
|
torch_scheduler: True # signals to deepspeed to not instantiate one
|
|
|
|
# evaluation settings (could be pushed under trainer)
|
|
evaluation:
|
|
batch_size: 8 # batch size for evaluation / validation pass
|
|
frequency: 5000 # how often to perform eval during training
|
|
size: 8 # total samples to get for eval
|
|
|
|
# arguments to pass for the AR/NAR (matches arguments passed through vall_e.inference)
|
|
kwargs:
|
|
max_steps: 500 # how many AR steps to perform
|
|
ar_temp: 0.95 # temperature for AR sampling
|
|
nar_temp: 0.25 # temperature for NAR sampling
|
|
|
|
trainer:
|
|
iterations: 1_000_000 # how many total iterations to train before terminating, should just have this as 0 by default to not auto-terminiate
|
|
|
|
save_tag: step # tag name to save checkpoints under
|
|
save_on_oom: True # save if an OOM if caught
|
|
save_on_quit: True # save when `quit` is entered in the trainer
|
|
save_frequency: 250 # how often to save
|
|
export_on_save: True # export the weights every time the trainer saves
|
|
|
|
keep_last_checkpoints: 4 # how many previous checkpoints to keep
|
|
|
|
gradient_checkpointing: True # gradient checkpointing to save VRAM at the cost of some performance throughput
|
|
|
|
strict_loading: False # strict state dict loading (set to False if you're going to change some model settings)
|
|
resize_modules: True # automatically resize core modules from the state dict to match
|
|
|
|
#check_for_oom: False # wrap forward/backwards in a try/catch block and gracefully handles OOM conditions
|
|
#load_state_dict: True # load the state dict from fp32.pth instead of a checkpoint, should automagically be done
|
|
#load_tag: "9500" # specific tag to load from (instead of having to edit latest)
|
|
#load_states: False # flag to load optimizer / scheduler states or not
|
|
#restart_step_count: True # clear the trainer stats
|
|
# gc_mode: None # "global_step" # flag to call GC at specific points, seems overkill now
|
|
|
|
weight_dtype: float16 # float32 | float16 | bfloat16, dtype for the model to load under
|
|
amp: True # mixed precision during training
|
|
|
|
backend: deepspeed # deepspeed | local, training backend to use
|
|
|
|
# deepspeed specific settings
|
|
deepspeed:
|
|
inferencing: True # use deepspeed inference wrapper for inferencing, should be relegated under inference
|
|
amp: False # use deepspeed's AMP instead (requires nvidia/apex installed)
|
|
zero_optimization_level: 0 # ZeRO optimization level to use
|
|
use_compression_training: False # compression training (seems useless almost always)
|
|
|
|
load_webui: False # initialize the web UI during training (the goal is to let you inference during training, but I never found a good way to go about it)
|
|
|
|
# inferencing settings
|
|
inference:
|
|
backend: deepspeed # deepspeed | local, training backend to use
|
|
normalize: False # normalize audio before encoding / after decoding, only enable if you know what you're doing
|
|
|
|
weight_dtype: float32 # float32 | float16 | bfloat16, dtype for the model to load under
|
|
amp: False # mixed precision during inferencing
|
|
|
|
# experimental optimization flags
|
|
optimizations:
|
|
injects: False # replace the module in the torch package itself to achieve these
|
|
replace: True # replace the module in the model itself to achieve these
|
|
|
|
# bitsandbytes things
|
|
linear: False # enable nn.Linear optimizations
|
|
embedding: False # enable nn.Embedding optimizations
|
|
optimizers: True # enable torch.optim optimizations
|
|
|
|
bitsandbytes: False # use bitsandbytes
|
|
dadaptation: False # use dadaptation
|
|
bitnet: False # use bitnet
|
|
fp8: False # use nvidia/transformer-engine's fp8 AMP
|
|
|
|
# dataset settings
|
|
dataset:
|
|
speaker_name_getter: "lambda p: f'{p.parts[-3]}_{p.parts[-2]}'" # function to eval when fetching the speaker from a name
|
|
speaker_group_getter: "lambda p: f'{p.parts[-3]}'" # function to eval when fetching the group from a name
|
|
# map to classify languages under when preparing a batch (in case the language is not provided in the dataset)
|
|
speaker_languages:
|
|
ja: []
|
|
|
|
use_hdf5: True # use HDF5 file to load the dataset from
|
|
hdf5_flag: r # flag to load the HDF5 file under (should automatically set to `a` when generating the HDF5 dataset)
|
|
use_metadata: True # use generated metadata to help prepare the dataset
|
|
validate: True # cull samples if they are outside the duration threshold
|
|
|
|
workers: 2 # worker processes to spawn for the dataloader
|
|
cache: True # cache the dataloader to disk to speed things up
|
|
|
|
duration_range: [3.0, 5.0] # allowed sample duration in the dataset
|
|
|
|
prompt_max_samples: 1 # maximum prompts to sample for the input prompt during training
|
|
prompt_duration_range: [3.0, 6.0] # duration range for the input prompt during training
|
|
prompt_similar_p: 1.0 # odds to instead use a similar utterance instead of a random sample (1 to always do, 0 to never do)
|
|
|
|
# not used
|
|
resps_max_samples: 1 # maximum output utterances to sample for the output during training
|
|
resps_append_p: 0.0 # odds to append another utterance to the output utterance sample
|
|
|
|
sample_type: path # path | speaker | group, type to sample the paths from (by path, speaker, or group)
|
|
sample_order: duration # duration | anything else, method of ordering the paths (duration is by duration, any other value will interleave reorder)
|
|
sample_max_duration_batch: 0 # used when above = duration, 120 seconds per batch at 12GiB of VRAM works
|
|
sample_shuffle: False # shuffle indices in the dataloader (avoid using with sample_order: duration and sample_max_duration_batch: 0)
|
|
|
|
retokenize_text: False # do not rely on AOT'd tokens from the dataset, instead tokenize JIT (in case you botch your tokenizer during dataset preparation and don't want to recreate it)
|
|
|
|
tasks_list: [ "tts", "stt" ] # , [ "tts", "tts-c", "ns", "sr", "tse", "cse", "nse", "stt" ], determines which tasks to randomly pick for a sample
|
|
|
|
training: [] # paths for the training dataset
|
|
validation: [] # paths for the validation dataset
|
|
noise: [] # paths for the noise dataset (unused, but for the above tasks that call for injecting noise) |