'voice-fixer':False,# getting tired of long initialization times in a Colab for downloading a large dataset for it
@ -2067,6 +2068,7 @@ def setup_args():
parser.add_argument("--prune-nonfinal-outputs",default=default_arguments['prune-nonfinal-outputs'],action='store_true',help="Deletes non-final output files on completing a generation")
parser.add_argument("--device-override",default=default_arguments['device-override'],help="A device string to override pass through Torch")
parser.add_argument("--sample-batch-size",default=default_arguments['sample-batch-size'],type=int,help="Sets how many batches to use during the autoregressive samples pass")
parser.add_argument("--unsqueeze_sample_batches",default=default_arguments['unsqueeze_sample_batches'],action='store_true',help="Unsqueezes sample batches to process one by one after sampling")
parser.add_argument("--concurrency-count",type=int,default=default_arguments['concurrency-count'],help="How many Gradio events to process at once")
parser.add_argument("--autocalculate-voice-chunk-duration-size",type=float,default=default_arguments['autocalculate-voice-chunk-duration-size'],help="Number of seconds to suggest voice chunk size for (for example, 100 seconds of audio at 10 seconds per chunk will suggest 10 chunks)")
parser.add_argument("--output-sample-rate",type=int,default=default_arguments['output-sample-rate'],help="Sample rate to resample the output to (from 24KHz)")