layout["inference_tts"]["inputs"]["max-steps"]=gr.Slider(value=50,minimum=1,maximum=200,step=1,label="Max Steps",info="Limits how many steps to perform in the NAR-len (demask) pass.")
layout["inference_tts"]["inputs"]["max-duration"]=gr.Slider(value=12,minimum=1,maximum=32,step=0.1,label="Maximum Duration",info="Limits how long an utterance can be.")
layout["inference_tts"]["inputs"]["input-prompt-length"]=gr.Slider(value=0.0,minimum=0.0,maximum=12.0,step=0.5,label="Input Prompt Repeat/Trim Length",info="Repeats/trims the input prompt down to X seconds (0 to disable).")
layout["inference_tts"]["inputs"]["text-language"]=gr.Dropdown(choices=get_languages(),label="Language (Text)",value="auto",info="Language the input text is in.")
layout["inference_tts"]["inputs"]["language"]=gr.Dropdown(choices=get_languages(),label="Language (Output)",value="auto",info="Target language/accent to output.")
layout["inference_tts"]["inputs"]["split-text-by"]=gr.Dropdown(choices=["sentences","lines"],label="Text Delimiter",info="How to split the text into utterances.",value="sentences")
layout["inference_tts"]["inputs"]["context-history"]=gr.Slider(value=0,minimum=0,maximum=4,step=1,label="(Rolling) Context History",info="How many prior lines to serve as the context/prefix (0 to disable).")
layout["inference_tts"]["inputs"]["ar-temperature"]=gr.Slider(value=1.0,minimum=0.0,maximum=1.5,step=0.05,label="Temperature (AR/NAR-len)",info="Adjusts the probabilities in the AR/NAR-len. (0 to greedy* sample)")
layout["inference_tts"]["inputs"]["nar-temperature"]=gr.Slider(value=0.0,minimum=0.0,maximum=1.5,step=0.05,label="Temperature (NAR)",info="Adjusts the probabilities in the NAR. (0 to greedy sample)")
layout["inference_tts"]["inputs"]["modality"]=gr.Dropdown(value="Auto",choices=["Auto","AR+NAR","NAR-len"],label="Modality",info="Whether to inference with the AR+NAR or through the NAR-len.")
layout["inference_tts"]["inputs"]["cfg-rescale"]=gr.Slider(value=0.75,minimum=0.0,maximum=1.0,step=0.05,label="CFG Rescale (Phi)",info="Factor when rescaling for Classifier Free Guidance (0 to disable).")
withgr.Row():
layout["inference_tts"]["inputs"]["min-p"]=gr.Slider(value=0.0,minimum=0.0,maximum=1.0,step=0.05,label="Min P",info="Filter out logits lower than this value.")
layout["inference_tts"]["inputs"]["top-p"]=gr.Slider(value=1.0,minimum=0.0,maximum=1.0,step=0.05,label="Top P",info=r"Limits the samples that are outside the top P% of probabilities.")
layout["inference_tts"]["inputs"]["top-k"]=gr.Slider(value=0,minimum=0,maximum=1024,step=1,label="Top K",info="Limits the samples to the top K of probabilities.")
layout["inference_tts"]["inputs"]["repetition-penalty"]=gr.Slider(value=1.0,minimum=0.0,maximum=5.0,step=0.05,label="Repetition Penalty",info="Incurs a penalty to tokens based on how often they appear in a sequence.")
layout["inference_tts"]["inputs"]["repetition-penalty-decay"]=gr.Slider(value=0.0,minimum=-2.0,maximum=2.0,step=0.05,label="Repetition Penalty Length Decay",info="Modifies the reptition penalty based on how far back in time the token appeared in the sequence.")
layout["inference_tts"]["inputs"]["length-penalty"]=gr.Slider(value=0.0,minimum=-2.0,maximum=2.0,step=0.05,label="Length Penalty",info="(AR only) Modifies the probability of a stop token based on the current length of the sequence.")
layout["inference_tts"]["inputs"]["max-levels"]=gr.Slider(value=7,minimum=0,maximum=7,step=1,label="Max NAR Levels",info="Limits how many steps to perform in the NAR pass.")
layout["inference_tts"]["inputs"]["beam-width"]=gr.Slider(value=0,minimum=0,maximum=32,step=1,label="Beam Width",info="Number of branches to search through for beam search sampling.")
layout["inference_tts"]["inputs"]["prefix-silence"]=gr.Slider(value=0.0,minimum=0.0,maximum=1.0,step=0.5,label="Silence Prefix Duration",info="Amount of silence to prefix to the output response before beginning inference.")
layout["inference_tts"]["inputs"]["input-prompt-prefix"]=gr.Checkbox(label="Input Prompt as Prefix",info="Treats the input prompt clip as the prefix of the generated sequence.")
layout["inference_tts"]["inputs"]["dynamic-sampling"]=gr.Checkbox(label="Dynamic Temperature",info="Dynamically adjusts the temperature based on the highest confident predicted token per sampling step.")
layout["inference_tts"]["inputs"]["entropix-sampling"]=gr.Checkbox(label="Entropix Sampling",info="Dynamically samples based on entropy/varentropy values from the logits / attention scores.")
layout["inference_tts"]["inputs"]["refine-on-stop"]=gr.Checkbox(label="Refine on <stop>",info="Uses the last step's logits for the AR sequence instead.")
layout["inference_tts"]["inputs"]["mirostat-tau"]=gr.Slider(value=0.0,minimum=0.0,maximum=8.0,step=0.05,label="Mirostat τ (Tau)",info="The \"surprise\" value when performing mirostat sampling. 0 to disable.")
layout["inference_tts"]["inputs"]["mirostat-eta"]=gr.Slider(value=0.0,minimum=0.0,maximum=2.0,step=0.05,label="Mirostat η (Eta)",info="The \"learning rate\" during mirostat sampling applied to the maximum surprise.")
withgr.Row():
layout["inference_tts"]["inputs"]["dry-multiplier"]=gr.Slider(value=0.0,minimum=0.0,maximum=8.0,step=0.05,label="DRY Multiplier",info="The multiplying factor for the DRY score penalty (0 to disable DRY sampling).")
layout["inference_tts"]["inputs"]["dry-base"]=gr.Slider(value=1.75,minimum=0.0,maximum=8.0,step=0.05,label="DRY Base",info="The base of the exponent in the DRY score penalty")
layout["inference_tts"]["inputs"]["dry-allowed-length"]=gr.Slider(value=2,minimum=0,maximum=75,step=1,label="Allowed Length",info="The maximimum length a token can be to perform DRY penalty with.")
layout["inference_tts"]["inputs"]["layer-skip-exit-layer"]=gr.Slider(value=11,minimum=0,maximum=11,step=1,label="Layer Skip Exit Layer",info="Maximum model layer to exit early from.")
layout["inference_tts"]["inputs"]["layer-skip-entropy-threshold"]=gr.Slider(value=0.1,minimum=0,maximum=1.0,step=0.01,label="Layer Skip Entropy Threshold",info="Entropy threshold for early-exit")
layout["inference_tts"]["inputs"]["layer-skip-varentropy-threshold"]=gr.Slider(value=0.1,minimum=0,maximum=1.0,step=0.01,label="Layer Skip Varentropy Threshold",info="Varentropy threshold for early-exit")
layout["inference_stt"]["inputs"]["ar-temperature"]=gr.Slider(value=0.0,minimum=0.0,maximum=1.5,step=0.05,label="Temperature (AR)",info="Modifies the randomness from the samples in the AR. (0 to greedy sample)")
layout["inference_stt"]["inputs"]["language"]=gr.Dropdown(choices=get_languages(),label="Language",value="en",info="Language of the input audio being transcribed.")
layout["inference_stt"]["inputs"]["top-p"]=gr.Slider(value=1.0,minimum=0.0,maximum=1.0,step=0.05,label="Top P",info=r"Limits the samples that are outside the top P% of probabilities.")
layout["inference_stt"]["inputs"]["top-k"]=gr.Slider(value=0,minimum=0,maximum=1024,step=1,label="Top K",info="Limits the samples to the top K of probabilities.")
layout["inference_stt"]["inputs"]["beam-width"]=gr.Slider(value=0,minimum=0,maximum=32,step=1,label="Beam Width",info="Number of branches to search through for beam search sampling.")
layout["inference_stt"]["inputs"]["repetition-penalty"]=gr.Slider(value=1.0,minimum=-2.0,maximum=2.0,step=0.05,label="Repetition Penalty",info="Incurs a penalty to tokens based on how often they appear in a sequence.")
layout["inference_stt"]["inputs"]["repetition-penalty-decay"]=gr.Slider(value=0.0,minimum=-2.0,maximum=2.0,step=0.05,label="Repetition Penalty Length Decay",info="Modifies the reptition penalty based on how far back in time the token appeared in the sequence.")
layout["inference_stt"]["inputs"]["length-penalty"]=gr.Slider(value=0.0,minimum=-2.0,maximum=2.0,step=0.05,label="Length Penalty",info="(AR only) Modifies the probability of a stop token based on the current length of the sequence.")
layout["inference_stt"]["inputs"]["dynamic-sampling"]=gr.Checkbox(label="Dynamic Temperature",info="Dynamically adjusts the temperature based on the highest confident predicted token per sampling step.")
layout["inference_stt"]["inputs"]["mirostat-tau"]=gr.Slider(value=0.0,minimum=0.0,maximum=8.0,step=0.05,label="Mirostat τ (Tau)",info="The \"surprise\" value when performing mirostat sampling. 0 to disable.")
layout["inference_stt"]["inputs"]["mirostat-eta"]=gr.Slider(value=0.0,minimum=0.0,maximum=2.0,step=0.05,label="Mirostat η (Eta)",info="The \"learning rate\" during mirostat sampling applied to the maximum surprise.")
withgr.Row():
layout["inference_stt"]["inputs"]["dry-multiplier"]=gr.Slider(value=0.0,minimum=0.0,maximum=8.0,step=0.05,label="DRY Multiplier",info="The multiplying factor for the DRY score penalty (0 to disable DRY sampling).")
layout["inference_stt"]["inputs"]["dry-base"]=gr.Slider(value=1.75,minimum=0.0,maximum=8.0,step=0.05,label="DRY Base",info="The base of the exponent in the DRY score penalty")
layout["inference_stt"]["inputs"]["dry-allowed-length"]=gr.Slider(value=2,minimum=0,maximum=75,step=1,label="Allowed Length",info="The maximimum length a token can be to perform DRY penalty with.")
layout["settings"]["inputs"]["models"]=gr.Dropdown(choices=get_model_paths(),value=args.yamlorargs.model,label="Model",info="Model to load. Can load from a config YAML or the weights itself.")
layout["settings"]["inputs"]["device"]=gr.Dropdown(choices=get_devices(),value="cuda:0",label="Device",info="Device to load the weights onto.")
withgr.Row():
layout["settings"]["inputs"]["dtype"]=gr.Dropdown(choices=get_dtypes(),value="auto",label="Precision",info="Tensor type to load the model under.")
layout["settings"]["inputs"]["attentions"]=gr.Dropdown(choices=get_attentions(),value="auto",label="Attentions",info="Attention mechanism to utilize.")