forked from mrq/ai-voice-cloning
when you have three separate machines running and you test one one, but you accidentally revert changes because you then test on another
This commit is contained in:
parent
ef75dba995
commit
3b4f4500d1
|
@ -1149,7 +1149,7 @@ def prepare_dataset( files, outdir, language=None, skip_existings=False, progres
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if match[0] not in previous_list:
|
if match[0] not in previous_list:
|
||||||
previous_list.append(f'{match[0]}.wav')
|
previous_list.append(f'{match[0].split("/")[-1]}.wav')
|
||||||
|
|
||||||
for file in enumerate_progress(files, desc="Iterating through voice files", progress=progress):
|
for file in enumerate_progress(files, desc="Iterating through voice files", progress=progress):
|
||||||
basename = os.path.basename(file)
|
basename = os.path.basename(file)
|
||||||
|
@ -1257,12 +1257,13 @@ def optimize_training_settings( **kwargs ):
|
||||||
settings['batch_size'] = lines
|
settings['batch_size'] = lines
|
||||||
messages.append(f"Batch size is larger than your dataset, clamping batch size to: {settings['batch_size']}")
|
messages.append(f"Batch size is larger than your dataset, clamping batch size to: {settings['batch_size']}")
|
||||||
|
|
||||||
|
"""
|
||||||
if lines % settings['batch_size'] != 0:
|
if lines % settings['batch_size'] != 0:
|
||||||
settings['batch_size'] = int(lines / settings['batch_size'])
|
settings['batch_size'] = int(lines / settings['batch_size'])
|
||||||
if settings['batch_size'] == 0:
|
if settings['batch_size'] == 0:
|
||||||
settings['batch_size'] = 1
|
settings['batch_size'] = 1
|
||||||
messages.append(f"Batch size not neatly divisible by dataset size, adjusting batch size to: {settings['batch_size']}")
|
messages.append(f"Batch size not neatly divisible by dataset size, adjusting batch size to: {settings['batch_size']}")
|
||||||
|
"""
|
||||||
if settings['gradient_accumulation_size'] == 0:
|
if settings['gradient_accumulation_size'] == 0:
|
||||||
settings['gradient_accumulation_size'] = 1
|
settings['gradient_accumulation_size'] = 1
|
||||||
|
|
||||||
|
@ -1399,6 +1400,8 @@ def save_training_settings( **kwargs ):
|
||||||
if settings['gpus'] > get_device_count():
|
if settings['gpus'] > get_device_count():
|
||||||
settings['gpus'] = get_device_count()
|
settings['gpus'] = get_device_count()
|
||||||
|
|
||||||
|
settings['optimizer'] = 'adamw' if settings['gpus'] == 1 else 'adamw_zero'
|
||||||
|
|
||||||
LEARNING_RATE_SCHEMES = ["MultiStepLR", "CosineAnnealingLR_Restart"]
|
LEARNING_RATE_SCHEMES = ["MultiStepLR", "CosineAnnealingLR_Restart"]
|
||||||
if 'learning_rate_scheme' not in settings or settings['learning_rate_scheme'] not in LEARNING_RATE_SCHEMES:
|
if 'learning_rate_scheme' not in settings or settings['learning_rate_scheme'] not in LEARNING_RATE_SCHEMES:
|
||||||
settings['learning_rate_scheme'] = LEARNING_RATE_SCHEMES[0]
|
settings['learning_rate_scheme'] = LEARNING_RATE_SCHEMES[0]
|
||||||
|
|
Loading…
Reference in New Issue
Block a user