Merge branch 'master' into gradient-clipping

This commit is contained in:
Muhammad Rizqi Nur 2022-11-02 20:48:58 +07:00
commit 237e79c77d
39 changed files with 2327 additions and 1232 deletions

35
javascript/extensions.js Normal file
View File

@ -0,0 +1,35 @@
function extensions_apply(_, _){
disable = []
update = []
gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x){
if(x.name.startsWith("enable_") && ! x.checked)
disable.push(x.name.substr(7))
if(x.name.startsWith("update_") && x.checked)
update.push(x.name.substr(7))
})
restart_reload()
return [JSON.stringify(disable), JSON.stringify(update)]
}
function extensions_check(){
gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x){
x.innerHTML = "Loading..."
})
return []
}
function install_extension_from_index(button, url){
button.disabled = "disabled"
button.value = "Installing..."
textarea = gradioApp().querySelector('#extension_to_install textarea')
textarea.value = url
textarea.dispatchEvent(new Event("input", { bubbles: true }))
gradioApp().querySelector('#install_extension_button').click()
}

View File

@ -3,8 +3,21 @@ global_progressbars = {}
galleries = {} galleries = {}
galleryObservers = {} galleryObservers = {}
// this tracks laumnches of window.setTimeout for progressbar to prevent starting a new timeout when the previous is still running
timeoutIds = {}
function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip, id_interrupt, id_preview, id_gallery){ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip, id_interrupt, id_preview, id_gallery){
var progressbar = gradioApp().getElementById(id_progressbar) // gradio 3.8's enlightened approach allows them to create two nested div elements inside each other with same id
// every time you use gr.HTML(elem_id='xxx'), so we handle this here
var progressbar = gradioApp().querySelector("#"+id_progressbar+" #"+id_progressbar)
var progressbarParent
if(progressbar){
progressbarParent = gradioApp().querySelector("#"+id_progressbar)
} else{
progressbar = gradioApp().getElementById(id_progressbar)
progressbarParent = null
}
var skip = id_skip ? gradioApp().getElementById(id_skip) : null var skip = id_skip ? gradioApp().getElementById(id_skip) : null
var interrupt = gradioApp().getElementById(id_interrupt) var interrupt = gradioApp().getElementById(id_interrupt)
@ -26,18 +39,26 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip
global_progressbars[id_progressbar] = progressbar global_progressbars[id_progressbar] = progressbar
var mutationObserver = new MutationObserver(function(m){ var mutationObserver = new MutationObserver(function(m){
if(timeoutIds[id_part]) return;
preview = gradioApp().getElementById(id_preview) preview = gradioApp().getElementById(id_preview)
gallery = gradioApp().getElementById(id_gallery) gallery = gradioApp().getElementById(id_gallery)
if(preview != null && gallery != null){ if(preview != null && gallery != null){
preview.style.width = gallery.clientWidth + "px" preview.style.width = gallery.clientWidth + "px"
preview.style.height = gallery.clientHeight + "px" preview.style.height = gallery.clientHeight + "px"
if(progressbarParent) progressbar.style.width = progressbarParent.clientWidth + "px"
//only watch gallery if there is a generation process going on //only watch gallery if there is a generation process going on
check_gallery(id_gallery); check_gallery(id_gallery);
var progressDiv = gradioApp().querySelectorAll('#' + id_progressbar_span).length > 0; var progressDiv = gradioApp().querySelectorAll('#' + id_progressbar_span).length > 0;
if(!progressDiv){ if(progressDiv){
timeoutIds[id_part] = window.setTimeout(function() {
timeoutIds[id_part] = null
requestMoreProgress(id_part, id_progressbar_span, id_skip, id_interrupt)
}, 500)
} else{
if (skip) { if (skip) {
skip.style.display = "none" skip.style.display = "none"
} }
@ -49,11 +70,8 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip
galleries[id_gallery] = null; galleries[id_gallery] = null;
} }
} }
} }
window.setTimeout(function() { requestMoreProgress(id_part, id_progressbar_span, id_skip, id_interrupt) }, 500)
}); });
mutationObserver.observe( progressbar, { childList:true, subtree:true }) mutationObserver.observe( progressbar, { childList:true, subtree:true })
} }

View File

@ -7,6 +7,7 @@ import shlex
import platform import platform
dir_repos = "repositories" dir_repos = "repositories"
dir_extensions = "extensions"
python = sys.executable python = sys.executable
git = os.environ.get('GIT', "git") git = os.environ.get('GIT', "git")
index_url = os.environ.get('INDEX_URL', "") index_url = os.environ.get('INDEX_URL', "")
@ -16,11 +17,11 @@ def extract_arg(args, name):
return [x for x in args if x != name], name in args return [x for x in args if x != name], name in args
def run(command, desc=None, errdesc=None): def run(command, desc=None, errdesc=None, custom_env=None):
if desc is not None: if desc is not None:
print(desc) print(desc)
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env)
if result.returncode != 0: if result.returncode != 0:
@ -101,7 +102,25 @@ def version_check(commit):
else: else:
print("Not a git clone, can't perform version check.") print("Not a git clone, can't perform version check.")
except Exception as e: except Exception as e:
print("versipm check failed",e) print("version check failed", e)
def run_extensions_installers():
if not os.path.isdir(dir_extensions):
return
for dirname_extension in os.listdir(dir_extensions):
path_installer = os.path.join(dir_extensions, dirname_extension, "install.py")
if not os.path.isfile(path_installer):
continue
try:
env = os.environ.copy()
env['PYTHONPATH'] = os.path.abspath(".")
print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {dirname_extension}", custom_env=env))
except Exception as e:
print(e, file=sys.stderr)
def prepare_enviroment(): def prepare_enviroment():
@ -189,6 +208,8 @@ def prepare_enviroment():
run_pip(f"install -r {requirements_file}", "requirements for Web UI") run_pip(f"install -r {requirements_file}", "requirements for Web UI")
run_extensions_installers()
if update_check: if update_check:
version_check(commit) version_check(commit)

View File

@ -70,7 +70,7 @@
"None": "Nichts", "None": "Nichts",
"Prompt matrix": "Promptmatrix", "Prompt matrix": "Promptmatrix",
"Prompts from file or textbox": "Prompts aus Datei oder Textfeld", "Prompts from file or textbox": "Prompts aus Datei oder Textfeld",
"X/Y plot": "X/Y Graf", "X/Y plot": "X/Y Graph",
"Put variable parts at start of prompt": "Variable teile am start des Prompt setzen", "Put variable parts at start of prompt": "Variable teile am start des Prompt setzen",
"Iterate seed every line": "Iterate seed every line", "Iterate seed every line": "Iterate seed every line",
"List of prompt inputs": "List of prompt inputs", "List of prompt inputs": "List of prompt inputs",

View File

@ -23,6 +23,7 @@
"Image Browser": "Galleria immagini", "Image Browser": "Galleria immagini",
"Inspiration": "Ispirazione", "Inspiration": "Ispirazione",
"Settings": "Impostazioni", "Settings": "Impostazioni",
"Extensions": "Estensioni",
"Prompt": "Prompt", "Prompt": "Prompt",
"Negative prompt": "Prompt negativo", "Negative prompt": "Prompt negativo",
"Run": "Esegui", "Run": "Esegui",
@ -72,24 +73,27 @@
"Variation strength": "Forza della variazione", "Variation strength": "Forza della variazione",
"Resize seed from width": "Ridimensiona il seme dalla larghezza", "Resize seed from width": "Ridimensiona il seme dalla larghezza",
"Resize seed from height": "Ridimensiona il seme dall'altezza", "Resize seed from height": "Ridimensiona il seme dall'altezza",
"Open for Clip Aesthetic!": "Apri per Estetica CLIP!", "Open for Clip Aesthetic!": "Apri per Gradienti Estetici (CLIP)",
"▼": "▼", "▼": "▼",
"Aesthetic weight": "Estetica - Peso", "Aesthetic weight": "Estetica - Peso",
"Aesthetic steps": "Estetica - Passi", "Aesthetic steps": "Estetica - Passi",
"Aesthetic learning rate": "Estetica - Tasso di apprendimento", "Aesthetic learning rate": "Estetica - Tasso di apprendimento",
"Slerp interpolation": "Interpolazione Slerp", "Slerp interpolation": "Interpolazione Slerp",
"Aesthetic imgs embedding": "Estetica - Incorporamento di immagini", "Aesthetic imgs embedding": "Estetica - Incorporamento di immagini",
"None": "Nessuno", "None": "Niente",
"Aesthetic text for imgs": "Estetica - Testo per le immagini", "Aesthetic text for imgs": "Estetica - Testo per le immagini",
"Slerp angle": "Angolo Slerp", "Slerp angle": "Angolo Slerp",
"Is negative text": "È un testo negativo", "Is negative text": "È un testo negativo",
"Script": "Script", "Script": "Script",
"Random": "Random", "Random grid": "Generaz. casuale (griglia)",
"Random": "Generaz. casuale (no griglia)",
"StylePile": "StylePile",
"Advanced prompt matrix": "Matrice di prompt avanzata", "Advanced prompt matrix": "Matrice di prompt avanzata",
"Advanced Seed Blending": "Miscelazione Semi Avanzata",
"Alternate Sampler Noise Schedules": "Metodi alternativi di campionamento del rumore", "Alternate Sampler Noise Schedules": "Metodi alternativi di campionamento del rumore",
"Animator v6": "Animator v6",
"Asymmetric tiling": "Piastrellatura asimmetrica", "Asymmetric tiling": "Piastrellatura asimmetrica",
"Custom code": "Custom code", "Custom code": "Codice personalizzato",
"Dynamic Prompting v0.2": "Prompt dinamici v0.2",
"Embedding to Shareable PNG": "Incorporamento convertito in PNG condivisibile", "Embedding to Shareable PNG": "Incorporamento convertito in PNG condivisibile",
"Force symmetry": "Forza la simmetria", "Force symmetry": "Forza la simmetria",
"Prompts interpolation": "Interpola Prompt", "Prompts interpolation": "Interpola Prompt",
@ -97,20 +101,90 @@
"Prompt morph": "Metamorfosi del prompt", "Prompt morph": "Metamorfosi del prompt",
"Prompts from file or textbox": "Prompt da file o da casella di testo", "Prompts from file or textbox": "Prompt da file o da casella di testo",
"To Infinity and Beyond": "Verso l'infinito e oltre", "To Infinity and Beyond": "Verso l'infinito e oltre",
"Seed travel": "Seed travel", "Seed travel": "Interpolazione semi",
"Shift attention": "Sposta l'attenzione", "Shift attention": "Sposta l'attenzione",
"Text to Vector Graphics": "Da testo a grafica vettoriale", "Text to Vector Graphics": "Da testo a grafica vettoriale",
"X/Y plot": "Grafico X/Y", "X/Y plot": "Grafico X/Y",
"X/Y/Z plot": "Grafico X/Y/Z", "X/Y/Z plot": "Grafico X/Y/Z",
"Dynamic Prompting v0.13.6": "Prompt dinamici v0.13.6",
"Create inspiration images": "Crea immagini di ispirazione", "Create inspiration images": "Crea immagini di ispirazione",
"Loops": "Loops", "step1 min/max": "Passi min(o max)",
"step1 min/max": "step1 min/max", "step2 min/max": "Passi max (o min)",
"step2 min/max": "step2 min/max", "step cnt": "Q.tà di Passi",
"cfg1 min/max": "cfg1 min/max", "cfg1 min/max": "CFG min (o max)",
"cfg2 min/max": "cfg2 min/max", "cfg2 min/max": "CFG max (o min)",
"Keep -1 for seeds": "Keep -1 for seeds", "cfg cnt": "Q.tà di CFG",
"Draw legend": "Disegna legenda",
"Include Separate Images": "Includi immagini separate",
"Keep -1 for seeds": "Mantieni sempre il seme a -1",
"x/y change": "Inverti ordine assi X/Y (Passi/CFG)",
"Loops": "Cicli",
"Focus on:": "Focus su:",
"No focus": "Nessun Focus",
"Portraits (tick Restore faces above for best results)": "Ritratti (selezionare 'Restaura volti' in alto per ottenere i migliori risultati)",
"Feminine and extra attractive (tick Restore faces above for best results)": "Femminile ed estremamente attraente (selezionare 'Restaura volti' per ottenere i migliori risultati)",
"Masculine and extra attractive (tick Restore faces above for best results)": "Maschile ed estremamente attraente (selezionare 'Restaura volti' per ottenere i migliori risultati)",
"Monsters": "Mostri",
"Robots": "Robot",
"Retrofuturistic": "Retrofuturistico",
"Propaganda": "Propaganda",
"Landscapes": "Paesaggi",
"Hints": "Suggerimenti",
"Image type": "Tipo di immagine",
"Not set": "Non impostato",
"Photography": "Fotografia",
"Digital art": "Arte digitale",
"3D Rendering": "3D Rendering",
"Painting": "Dipinto",
"Sketch": "Schizzo",
"Classic Comics": "Fumetti classici",
"Modern Comics": "Fumetti moderni",
"Manga": "Manga",
"Vector art": "Arte vettoriale",
"Visual style": "Stile visivo",
"Realism": "Realismo",
"Photorealism": "Fotorealismo",
"Hyperrealism": "Iperrealismo",
"Surrealism": "Surrealismo",
"Modern Art": "Arte moderna",
"Fauvism": "Fauvismo",
"Futurism": "Futurismo",
"Painterly": "Pittorico",
"Pointillisme": "Puntinismo",
"Abstract": "Astratto",
"Pop Art": "Pop Art",
"Impressionist": "Impressionista",
"Cubism": "Cubismo",
"Linocut": "Linoleografia",
"Fantasy": "Fantasia",
"Colors": "Colori",
"Chaotic": "Caotico",
"Primary colors": "Colori primari",
"Colorful": "Colorato",
"Vivid": "Vivido",
"Muted colors": "Colori tenui",
"Low contrast": "Basso contrasto",
"Desaturated": "Desaturato",
"Grayscale": "Scala di grigi",
"Black and white": "Bianco e nero",
"Infrared": "Infrarosso",
"Complementary": "Colori complementari",
"Non-complementary": "Colori non complementari",
"View": "Visuale",
"Tilt shift": "Tilt shift",
"Wide-angle": "Angolo ampio",
"Portrait": "Ritratto",
"Macro": "Macro",
"Microscopic": "Microscopico",
"Isometric": "Isometrico",
"Panorama": "Panorama",
"Aerial photograph": "Fotografia aerea",
"Artist focus (not quite finished, not sure it helps)": "Focus sull'artista (non del tutto finito, non è sicuro che aiuti)",
"B/W Photograpy": "Fotografia B/N",
"Portrait photo": "Foto ritratto",
"Usage: a <corgi|cat> wearing <goggles|a hat>": "Utilizzo: a <corgi|cat> wearing <goggles|a hat>", "Usage: a <corgi|cat> wearing <goggles|a hat>": "Utilizzo: a <corgi|cat> wearing <goggles|a hat>",
"Noise Scheduler": "Programmatore del rumore", "Seeds": "Semi",
"Noise Scheduler": "Pianificazione del rumore",
"Default": "Predefinito", "Default": "Predefinito",
"Karras": "Karras", "Karras": "Karras",
"Exponential": "Esponenziale", "Exponential": "Esponenziale",
@ -121,30 +195,56 @@
"Beta distribution (VP only)": "Distribuzione Beta (Solo CV)", "Beta distribution (VP only)": "Distribuzione Beta (Solo CV)",
"Beta min (VP only)": "Beta min (Solo CV)", "Beta min (VP only)": "Beta min (Solo CV)",
"Epsilon (VP only)": "Epsilon (Solo CV)", "Epsilon (VP only)": "Epsilon (Solo CV)",
"Running in txt2img mode:": "Esecuzione in modalità txt2img:",
"Render these video formats:": "Renderizza in questi formati:",
"GIF": "GIF",
"MP4": "MP4",
"WEBM": "WEBM",
"Animation Parameters": "Parametri animazione",
"Total Animation Length (s)": "Durata totale dell'animazione (s)",
"Framerate": "Frequenza dei fotogrammi",
"Smoothing_Frames": "Fotogrammi da appianare",
"Add_Noise": "Aggiungi rumore",
"Noise Strength": "Intensità del rumore",
"Initial Parameters": "Parametri iniziali",
"Denoising Strength": "Intensità di riduzione del rumore",
"Seed_March": "Seed_March",
"Zoom Factor (scale/s)": "Fattore di ingrandimento (scala/s)",
"X Pixel Shift (pixels/s)": "Sposta i Pixel sull'asse X (pixel/s)",
"Y Pixel Shift (pixels/s)": "Sposta i Pixel sull'asse Y (pixel/s)",
"Rotation (deg/s)": "Rotazione (gradi/s)",
"Prompt Template, applied to each keyframe below": "Modello di prompt, applicato a ciascun fotogramma chiave qui di seguito",
"Positive Prompts": "Prompt positivi",
"Negative Prompts": "Prompt negativi",
"Props, Stamps": "Immagini Clipart da diffondere (prop), o da applicare in post elaborazione e non essere diffuse (stamp).",
"Poper_Folder:": "Cartella Immagini Clipart (PNG trasparenti):",
"Supported Keyframes:": "Fotogrammi chiave supportati:",
"time_s | source | video, images, img2img | path": "time_s | source | video, images, img2img | path",
"time_s | prompt | positive_prompts | negative_prompts": "time_s | prompt | positive_prompts | negative_prompts",
"time_s | template | positive_prompts | negative_prompts": "time_s | template | positive_prompts | negative_prompts",
"time_s | transform | zoom | x_shift | y_shift | rotation": "time_s | transform | zoom | x_shift | y_shift | rotation",
"time_s | seed | new_seed_int": "time_s | seed | new_seed_int",
"time_s | noise | added_noise_strength": "time_s | noise | added_noise_strength",
"time_s | denoise | denoise_value": "time_s | denoise | denoise_value",
"time_s | set_text | textblock_name | text_prompt | x | y | w | h | fore_color | back_color | font_name": "time_s | set_text | textblock_name | text_prompt | x | y | w | h | fore_color | back_color | font_name",
"time_s | clear_text | textblock_name": "time_s | clear_text | textblock_name",
"time_s | prop | prop_name | prop_filename | x pos | y pos | scale | rotation": "time_s | prop | prop_name | prop_filename | x pos | y pos | scale | rotation",
"time_s | set_stamp | stamp_name | stamp_filename | x pos | y pos | scale | rotation": "time_s | set_stamp | stamp_name | stamp_filename | x pos | y pos | scale | rotation",
"time_s | clear_stamp | stamp_name": "time_s | clear_stamp | stamp_name",
"time_s | col_set": "time_s | col_set",
"time_s | col_clear": "time_s | col_clear",
"time_s | model | model": "time_s | model | model",
"img2img_mode": "Modalità img2img",
"Keyframes:": "Fotogrammi chiave:",
"Tile X": "Piastrella asse X", "Tile X": "Piastrella asse X",
"Tile Y": "Piastrella asse Y", "Tile Y": "Piastrella asse Y",
"Python code": "Codice Python", "Python code": "Codice Python",
"Combinatorial generation": "Generazione combinatoria",
"Combinations": "Combinazioni",
"Choose a number of terms from a list, in this case we choose two artists": "Scegli un numero di termini da un elenco, in questo caso scegliamo due artisti",
"{2$$artist1|artist2|artist3}": "{2$$artist1|artist2|artist3}",
"If $$ is not provided, then 1$$ is assumed.": "Se $$ non viene fornito, si presume 1$$.",
"{1-3$$artist1|artist2|artist3}": "{1-3$$artist1|artist2|artist3}",
"In this case, a random number of artists between 1 and 3 is chosen.": "In questo caso viene scelto un numero casuale di artisti compreso tra 1 e 3.",
"Wildcards": "Termini jolly",
"If the groups wont drop down click": "Se i gruppi non vengono visualizzati, clicca",
"here": "qui",
"to fix the issue.": "per correggere il problema.",
"WILDCARD_DIR: scripts/wildcards": "WILDCARD_DIR: scripts/wildcards",
"You can add more wildcards by creating a text file with one term per line and name is mywildcards.txt. Place it in scripts/wildcards.": "Puoi aggiungere termini jolly creando un file di testo con un termine per riga e nominandolo, per esempio, mywildcards.txt. Inseriscilo in scripts/wildcards.",
"__<folder>/mywildcards__": "__<folder>/mywildcards__",
"will then become available.": "diverrà quindi disponibile.",
"Source embedding to convert": "Incorporamento sorgente da convertire", "Source embedding to convert": "Incorporamento sorgente da convertire",
"Embedding token": "Token Incorporamento", "Embedding token": "Token Incorporamento",
"Output directory": "Cartella di output", "Output directory": "Cartella di output",
"Horizontal symmetry": "Simmetria orizzontale", "Horizontal symmetry": "Simmetria orizzontale",
"Vertical symmetry": "Simmetria verticale", "Vertical symmetry": "Simmetria verticale",
"Alt. symmetry method (blending)": "Alt. symmetry method (blending)", "Alt. symmetry method (blending)": "Metodo di simmetria alternativo (miscelazione)",
"Apply every n steps": "Applica ogni n passi", "Apply every n steps": "Applica ogni n passi",
"Skip last n steps": "Salta gli ultimi n passi", "Skip last n steps": "Salta gli ultimi n passi",
"Interpolation prompt": "Prompt di interpolazione", "Interpolation prompt": "Prompt di interpolazione",
@ -161,18 +261,17 @@
"Iterate seed every line": "Iterare il seme per ogni riga", "Iterate seed every line": "Iterare il seme per ogni riga",
"List of prompt inputs": "Elenco di prompt di input", "List of prompt inputs": "Elenco di prompt di input",
"Upload prompt inputs": "Carica un file contenente i prompt di input", "Upload prompt inputs": "Carica un file contenente i prompt di input",
"n": "n", "n": "Esegui n volte",
"Destination seed(s) (Comma separated)": "Seme/i di destinazione (separati da virgola)", "Destination seed(s) (Comma separated)": "Seme/i di destinazione (separati da virgola)",
"Only use Random seeds (Unless comparing paths)": "Usa solo semi casuali (a meno che non si confrontino i percorsi)", "Only use Random seeds (Unless comparing paths)": "Usa solo semi casuali (a meno che non si confrontino i percorsi)",
"Number of random seed(s)": "Numero di semi casuali", "Number of random seed(s)": "Numero di semi casuali",
"Compare paths (Separate travels from 1st seed to each destination)": "Confronta percorsi (transizioni separate dal primo seme a ciascuna destinazione)", "Compare paths (Separate travels from 1st seed to each destination)": "Confronta percorsi (transizioni separate dal primo seme a ciascuna destinazione)",
"Steps": "Passi", "Steps": "Passi",
"Loop back to initial seed": "Ritorna al seme iniziale", "Loop back to initial seed": "Ritorna al seme iniziale",
"Bump seed (If > 0 do a Compare Paths but only one image. No video)": "Bump seed (If > 0 do a Compare Paths but only one image. No video)", "Bump seed (If > 0 do a Compare Paths but only one image. No video)": "Modula seme (se > 0 mescola il seme iniziale con quelli di destinazione ma solo un'immagine. Nessun video)",
"Show generated images in ui": "Mostra le immagini generate nell'interfaccia utente", "Show generated images in ui": "Mostra le immagini generate nell'interfaccia utente",
"\"Hug the middle\" during interpolation": "\"Hug the middle\" durante l'interpolazione", "\"Hug the middle\" during interpolation": "\"Hug the middle\" durante l'interpolazione. Rende l'interpolazione un po' più veloce all'inizio e alla fine. A volte può produrre video più fluidi, il più delle volte no.",
"Allow the default Euler a Sampling method. (Does not produce good results)": "Consenti Euler_a come metodo di campionamento predefinito. (Non produce buoni risultati)", "Allow the default Euler a Sampling method. (Does not produce good results)": "Consenti Euler_a come metodo di campionamento predefinito. (Non produce buoni risultati)",
"Visual style": "Stile visivo",
"Illustration": "Illustrazione", "Illustration": "Illustrazione",
"Logo": "Logo", "Logo": "Logo",
"Drawing": "Disegno", "Drawing": "Disegno",
@ -210,28 +309,44 @@
"Eta": "ETA", "Eta": "ETA",
"Clip skip": "Salta CLIP", "Clip skip": "Salta CLIP",
"Denoising": "Riduzione del rumore", "Denoising": "Riduzione del rumore",
"Cond. Image Mask Weight": "Cond. Image Mask Weight", "Cond. Image Mask Weight": "Peso maschera immagine condizionale",
"X values": "Valori per X", "X values": "Valori per X",
"Y type": "Parametro asse Y", "Y type": "Parametro asse Y",
"Y values": "Valori per Y", "Y values": "Valori per Y",
"Draw legend": "Disegna legenda",
"Include Separate Images": "Includi immagini separate",
"Z type": "Parametro asse Z", "Z type": "Parametro asse Z",
"Z values": "Valori per Z", "Z values": "Valori per Z",
"Combinatorial generation": "Generazione combinatoria",
"Combinatorial batches": "Lotti combinatori",
"Magic prompt": "Prompt magico",
"Fixed seed": "Seme fisso",
"Combinations": "Combinazioni",
"Choose a number of terms from a list, in this case we choose two artists": "Scegli un numero di termini da un elenco, in questo caso scegliamo due artisti",
"{{2$artist1|artist2|artist3}}": "{{2$artist1|artist2|artist3}}",
"If $ is not provided, then 1$ is assumed.\n\n A range can be provided:": "Se $ non viene fornito, si presume 1$.\n\n È possibile fornire un intervallo di valori:",
"{{1-3$artist1|artist2|artist3}}": "{{1-3$artist1|artist2|artist3}}",
"In this case, a random number of artists between 1 and 3 is chosen.": "In questo caso viene scelto un numero casuale di artisti compreso tra 1 e 3.",
"Wildcards": "Termini jolly",
"If the groups wont drop down click": "Se i gruppi non vengono visualizzati, clicca",
"here": "qui",
"to fix the issue.": "per correggere il problema.",
"WILDCARD_DIR: C:\\stable-diffusion-webui\\extensions\\sd-dynamic-prompts\\wildcards": "WILDCARD_DIR: C:\\stable-diffusion-webui\\extensions\\sd-dynamic-prompts\\wildcards",
"You can add more wildcards by creating a text file with one term per line and name is mywildcards.txt. Place it in scripts/wildcards.": "Puoi aggiungere termini jolly creando un file di testo con un termine per riga e nominandolo, per esempio, mywildcards.txt. Inseriscilo in scripts/wildcards.",
"__<folder>/mywildcards__": "__<cartella>/mywildcards__",
"will then become available.": "diverrà quindi disponibile.",
"Artist or styles name list. '.txt' files with one name per line": "Elenco nomi di artisti o stili. File '.txt' con un nome per riga", "Artist or styles name list. '.txt' files with one name per line": "Elenco nomi di artisti o stili. File '.txt' con un nome per riga",
"Prompt words before artist or style name": "Parole chiave prima del nome dell'artista o dello stile", "Prompt words before artist or style name": "Parole chiave prima del nome dell'artista o dello stile",
"Prompt words after artist or style name": "Parole chiave dopo il nome dell'artista o dello stile", "Prompt words after artist or style name": "Parole chiave dopo il nome dell'artista o dello stile",
"Negative Prompt": "Prompt negativo", "Negative Prompt": "Prompt negativo",
"Save": "Salva", "Save": "Salva",
"Send to img2img": "Invia a img2img", "Send to img2img": "Invia a img2img",
"Send to inpaint": "Invia a inpaint", "Send to inpaint": "Invia a Inpaint",
"Send to extras": "Invia a extra", "Send to extras": "Invia a Extra",
"Make Zip when Save?": "Crea un file ZIP quando si usa 'Salva'", "Make Zip when Save?": "Crea un file ZIP quando si usa 'Salva'",
"Textbox": "Casella di testo", "Textbox": "Casella di testo",
"Interrogate\nCLIP": "Interroga\nCLIP", "Interrogate\nCLIP": "Interroga\nCLIP",
"Interrogate\nDeepBooru": "Interroga\nDeepBooru", "Interrogate\nDeepBooru": "Interroga\nDeepBooru",
"Inpaint": "Inpaint", "Inpaint": "Inpaint",
"Batch img2img": "Lotti img2img", "Batch img2img": "img2img in lotti",
"Image for img2img": "Immagine per img2img", "Image for img2img": "Immagine per img2img",
"Drop Image Here": "Trascina l'immagine qui", "Drop Image Here": "Trascina l'immagine qui",
"Image for inpainting with mask": "Immagine per inpainting con maschera", "Image for inpainting with mask": "Immagine per inpainting con maschera",
@ -246,8 +361,8 @@
"Masked content": "Contenuto mascherato", "Masked content": "Contenuto mascherato",
"fill": "riempi", "fill": "riempi",
"original": "originale", "original": "originale",
"latent noise": "rumore latente", "latent noise": "rumore nello spazio latente",
"latent nothing": "latenza nulla", "latent nothing": "nulla nello spazio latente",
"Inpaint at full resolution": "Inpaint alla massima risoluzione", "Inpaint at full resolution": "Inpaint alla massima risoluzione",
"Inpaint at full resolution padding, pixels": "Inpaint con riempimento a piena risoluzione, pixel", "Inpaint at full resolution padding, pixels": "Inpaint con riempimento a piena risoluzione, pixel",
"Process images in a directory on the same machine where the server is running.": "Elabora le immagini in una cartella sulla stessa macchina su cui è in esecuzione il server.", "Process images in a directory on the same machine where the server is running.": "Elabora le immagini in una cartella sulla stessa macchina su cui è in esecuzione il server.",
@ -258,14 +373,13 @@
"Crop and resize": "Ritaglia e ridimensiona", "Crop and resize": "Ritaglia e ridimensiona",
"Resize and fill": "Ridimensiona e riempie", "Resize and fill": "Ridimensiona e riempie",
"Advanced loopback": "Advanced loopback", "Advanced loopback": "Advanced loopback",
"Animator v5": "Animator v5",
"External Image Masking": "Immagine esterna per la mascheratura", "External Image Masking": "Immagine esterna per la mascheratura",
"img2img alternative test": "Test alternativo per img2img", "img2img alternative test": "Test alternativo per img2img",
"img2tiles": "img2tiles", "img2tiles": "img2tiles",
"Interpolate": "Interpolare", "Interpolate": "Interpola immagini",
"Loopback": "Rielaborazione ricorsiva", "Loopback": "Rielaborazione ricorsiva",
"Loopback and Superimpose": "Rielabora ricorsivamente e sovraimponi", "Loopback and Superimpose": "Rielabora ricorsivamente e sovraimponi",
"Outpaint Canvas Region": "Regione della tela di Outpaint", "Alpha Canvas": "Alpha Canvas",
"Outpainting mk2": "Outpainting mk2", "Outpainting mk2": "Outpainting mk2",
"Poor man's outpainting": "Poor man's outpainting", "Poor man's outpainting": "Poor man's outpainting",
"SD upscale": "Ampliamento SD", "SD upscale": "Ampliamento SD",
@ -273,67 +387,32 @@
"[C] Video to video": "[C] Video to video", "[C] Video to video": "[C] Video to video",
"Videos": "Filmati", "Videos": "Filmati",
"Deforum-webui (use tab extension instead!)": "Deforum-webui (usa piuttosto la scheda Deforum delle estensioni!)", "Deforum-webui (use tab extension instead!)": "Deforum-webui (usa piuttosto la scheda Deforum delle estensioni!)",
"Use first image colors (custom color correction)": "Use first image colors (custom color correction)", "Use first image colors (custom color correction)": "Usa i colori della prima immagine (correzione del colore personalizzata)",
"Denoising strength change factor (overridden if proportional used)": "Denoising strength change factor (overridden if proportional used)", "Denoising strength change factor (overridden if proportional used)": "Fattore di variazione dell'intensità di riduzione del rumore (sovrascritto se si usa proporzionale)",
"Zoom level": "Zoom level", "Zoom level": "Livello di Zoom",
"Direction X": "Direction X", "Direction X": "Direzione X",
"Direction Y": "Direction Y", "Direction Y": "Direzione Y",
"Denoising strength start": "Denoising strength start", "Denoising strength start": "Intensità di riduzione del rumore - Inizio",
"Denoising strength end": "Denoising strength end", "Denoising strength end": "Intensità di riduzione del rumore - Fine",
"Denoising strength proportional change starting value": "Denoising strength proportional change starting value", "Denoising strength proportional change starting value": "Intensità di riduzione del rumore - Valore iniziale della variazione proporzionale",
"Denoising strength proportional change ending value (0.1 = disabled)": "Denoising strength proportional change ending value (0.1 = disabled)", "Denoising strength proportional change ending value (0.1 = disabled)": "Intensità di riduzione del rumore - Valore finale della variazione proporzionale (0.1 = disabilitato)",
"Saturation enhancement per image": "Saturation enhancement per image", "Saturation enhancement per image": "Miglioramento della saturazione per ciascuna immagine",
"Use sine denoising strength variation": "Use sine denoising strength variation", "Use sine denoising strength variation": "Utilizzare la variazione sinusoidale dell'intensità di riduzione del rumore",
"Phase difference": "Phase difference", "Phase difference": "Differenza di Fase",
"Denoising strength exponentiation": "Denoising strength exponentiation", "Denoising strength exponentiation": "Esponenziazione dell'intensità di riduzione del rumore",
"Use sine zoom variation": "Use sine zoom variation", "Use sine zoom variation": "Usa la variazione sinusoidale dello zoom",
"Zoom exponentiation": "Zoom exponentiation", "Zoom exponentiation": "Esponeniazione dello Zoom",
"Use multiple prompts": "Use multiple prompts", "Use multiple prompts": "Usa prompt multipli",
"Same seed per prompt": "Same seed per prompt", "Same seed per prompt": "Stesso seme per ogni prompt",
"Same seed for everything": "Same seed for everything", "Same seed for everything": "Stesso seme per tutto",
"Original init image for everything": "Original init image for everything", "Original init image for everything": "Immagine originale di inizializzazione per tutto",
"Multiple prompts : 1 line positive, 1 line negative, leave a blank line for no negative": "Multiple prompts : 1 line positive, 1 line negative, leave a blank line for no negative", "Multiple prompts : 1 line positive, 1 line negative, leave a blank line for no negative": "Prompt multipli: 1 riga positivo, 1 riga negativo, lasciare una riga vuota per nessun negativo",
"Render these video formats:": "Renderizza in questi formati:", "Running in img2img mode:": "Esecuzione in modalità img2img:",
"GIF": "GIF",
"MP4": "MP4",
"WEBM": "WEBM",
"Animation Parameters": "Parametri animazione",
"Total Animation Length (s)": "Durata totale dell'animazione (s)",
"Framerate": "Frequenza dei fotogrammi",
"Initial Parameters": "Parametri iniziali",
"Denoising Strength (overrides img2img slider)": "Intensità di riduzione del rumore (sovrascrive il cursore img2img)",
"Seed_March": "Seed_March",
"Smoothing_Frames": "Smoothing_Frames",
"Zoom Factor (scale/s)": "Fattore di ingrandimento (scala/s)",
"X Pixel Shift (pixels/s)": "X Pixel Shift (pixels/s)",
"Y Pixel Shift (pixels/s)": "Y Pixel Shift (pixels/s)",
"Rotation (deg/s)": "Rotazione (gradi/s)",
"Prompt Template, applied to each keyframe below": "Modello di prompt, applicato a ciascun fotogramma chiave qui di seguito",
"Positive Prompts": "Prompt positivi",
"Negative Prompts": "Prompt negativi",
"Props": "Props",
"Folder:": "Cartella:",
"Supported Keyframes:": "Fotogrammi chiave supportati:",
"time_s | source | video, images, img2img | path": "time_s | source | video, images, img2img | path",
"time_s | prompt | positive_prompts | negative_prompts": "time_s | prompt | positive_prompts | negative_prompts",
"time_s | template | positive_prompts | negative_prompts": "time_s | template | positive_prompts | negative_prompts",
"time_s | transform | zoom | x_shift | y_shift | rotation": "time_s | transform | zoom | x_shift | y_shift | rotation",
"time_s | seed | new_seed_int": "time_s | seed | new_seed_int",
"time_s | denoise | denoise_value": "time_s | denoise | denoise_value",
"time_s | set_text | textblock_name | text_prompt | x | y | w | h | fore_color | back_color | font_name": "time_s | set_text | textblock_name | text_prompt | x | y | w | h | fore_color | back_color | font_name",
"time_s | clear_text | textblock_name": "time_s | clear_text | textblock_name",
"time_s | prop | prop_name | prop_filename | x pos | y pos | scale | rotation": "time_s | prop | prop_name | prop_filename | x pos | y pos | scale | rotation",
"time_s | set_stamp | stamp_name | stamp_filename | x pos | y pos | scale | rotation": "time_s | set_stamp | stamp_name | stamp_filename | x pos | y pos | scale | rotation",
"time_s | clear_stamp | stamp_name": "time_s | clear_stamp | stamp_name",
"time_s | col_set": "time_s | col_set",
"time_s | col_clear": "time_s | col_clear",
"time_s | model | sd-v1-4_f16, sd-v1-5-inpainting, sd-v1-5-pruned-emaonly_fp16, wd-v1-3-float16": "time_s | model | sd-v1-4_f16, sd-v1-5-inpainting, sd-v1-5-pruned-emaonly_fp16, wd-v1-3-float16",
"Keyframes:": "Fotogrammi chiave:",
"Masking preview size": "Dimensione dell'anteprima della mascheratura", "Masking preview size": "Dimensione dell'anteprima della mascheratura",
"Draw new mask on every run": "Disegna una nuova maschera ad ogni esecuzione", "Draw new mask on every run": "Disegna una nuova maschera ad ogni esecuzione",
"Process non-contigious masks separately": "Elaborare le maschere non contigue separatamente", "Process non-contigious masks separately": "Elaborare le maschere non contigue separatamente",
"should be 2 or lower.": "dovrebbe essere 2 o inferiore.", "should be 2 or lower.": "dovrebbe essere 2 o inferiore.",
"Override `Sampling method` to Euler?(this method is built for it)": "Sovrascrivi il `Metodo di campionamento` con Eulero? (questo metodo è stato creato per questo)", "Override `Sampling method` to Euler?(this method is built for it)": "Sovrascrivi il 'Metodo di campionamento' con Eulero? (questo metodo è stato creato per questo)",
"Override `prompt` to the same value as `original prompt`?(and `negative prompt`)": "Sovrascrivi `prompt` con lo stesso valore del `prompt originale`? (e `prompt negativo`)", "Override `prompt` to the same value as `original prompt`?(and `negative prompt`)": "Sovrascrivi `prompt` con lo stesso valore del `prompt originale`? (e `prompt negativo`)",
"Original prompt": "Prompt originale", "Original prompt": "Prompt originale",
"Original negative prompt": "Prompt negativo originale", "Original negative prompt": "Prompt negativo originale",
@ -345,7 +424,7 @@
"Sigma adjustment for finding noise for image": "Regolazione Sigma per trovare il rumore per l'immagine", "Sigma adjustment for finding noise for image": "Regolazione Sigma per trovare il rumore per l'immagine",
"Tile size": "Dimensione piastrella", "Tile size": "Dimensione piastrella",
"Tile overlap": "Sovrapposizione piastrella", "Tile overlap": "Sovrapposizione piastrella",
"alternate img2img imgage": "alternate img2img imgage", "alternate img2img imgage": "Immagine alternativa per img2img",
"interpolation values": "Valori di interpolazione", "interpolation values": "Valori di interpolazione",
"Refinement loops": "Cicli di affinamento", "Refinement loops": "Cicli di affinamento",
"Loopback alpha": "Trasparenza rielaborazione ricorsiva", "Loopback alpha": "Trasparenza rielaborazione ricorsiva",
@ -353,20 +432,17 @@
"Blending strides": "Passi di fusione", "Blending strides": "Passi di fusione",
"Reuse Seed": "Riusa il seme", "Reuse Seed": "Riusa il seme",
"One grid": "Singola griglia", "One grid": "Singola griglia",
"Interpolate VarSeed": "Interpola il seme di variazione", "Interpolate VarSeed": "Interpola il seme della variazione",
"Paste on mask": "Incolla sulla maschera", "Paste on mask": "Incolla sulla maschera",
"Inpaint all": "Inpaint tutto", "Inpaint all": "Inpaint tutto",
"Interpolate in latent": "Interpola in latenza", "Interpolate in latent": "Interpola nello spazio latente",
"Denoising strength change factor": "Fattore di variazione dell'intensità di denoising", "Denoising strength change factor": "Fattore di variazione dell'intensità di denoising",
"Superimpose alpha": "Sovrapporre Alpha", "Superimpose alpha": "Sovrapporre Alpha",
"Show extra settings": "Mostra impostazioni aggiuntive", "Show extra settings": "Mostra impostazioni aggiuntive",
"Reuse seed": "Riusa il seme", "Reuse seed": "Riusa il seme",
"CFG decay factor": "Fattore di decadimento CFG", "CFG decay factor": "Fattore di decadimento CFG",
"CFG target": "CFG di destinazione", "CFG target": "CFG di destinazione",
"Show/Hide Canvas": "Mostra/Nascondi Tela", "Show/Hide AlphaCanvas": "Mostra/Nascondi AlphaCanvas",
"Left start coord": "Coordinate iniziali - Sinistra",
"top start coord": "Coordinate iniziali - Sopra",
"unused": "non usato",
"Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "Impostazioni consigliate: Passi di campionamento: 80-100, Campionatore: Euler a, Intensità denoising: 0.8", "Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "Impostazioni consigliate: Passi di campionamento: 80-100, Campionatore: Euler a, Intensità denoising: 0.8",
"Pixels to expand": "Pixel da espandere", "Pixels to expand": "Pixel da espandere",
"Outpainting direction": "Direzione di Outpainting", "Outpainting direction": "Direzione di Outpainting",
@ -462,7 +538,7 @@
"Modules": "Moduli", "Modules": "Moduli",
"Enter hypernetwork layer structure": "Immettere la struttura del livello della Iperrete", "Enter hypernetwork layer structure": "Immettere la struttura del livello della Iperrete",
"Select activation function of hypernetwork": "Selezionare la funzione di attivazione della Iperrete", "Select activation function of hypernetwork": "Selezionare la funzione di attivazione della Iperrete",
"linear": "linear", "linear": "lineare",
"relu": "relu", "relu": "relu",
"leakyrelu": "leakyrelu", "leakyrelu": "leakyrelu",
"elu": "elu", "elu": "elu",
@ -490,9 +566,9 @@
"softshrink": "softshrink", "softshrink": "softshrink",
"softsign": "softsign", "softsign": "softsign",
"tanhshrink": "tanhshrink", "tanhshrink": "tanhshrink",
"threshold": "threshold", "threshold": "soglia",
"Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "Seleziona inizializzazione dei pesi dei livelli. relu-like - Kaiming, Si consiglia sigmoid-like - Xavier", "Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "Seleziona inizializzazione dei pesi dei livelli. relu-like - Kaiming, Si consiglia sigmoid-like - Xavier",
"Normal": "Normal", "Normal": "Normale",
"KaimingUniform": "KaimingUniform", "KaimingUniform": "KaimingUniform",
"KaimingNormal": "KaimingNormal", "KaimingNormal": "KaimingNormal",
"XavierUniform": "XavierUniform", "XavierUniform": "XavierUniform",
@ -516,7 +592,7 @@
"Split image overlap ratio": "Rapporto di sovrapposizione dell'immagine", "Split image overlap ratio": "Rapporto di sovrapposizione dell'immagine",
"Focal point face weight": "Peso della faccia del punto focale", "Focal point face weight": "Peso della faccia del punto focale",
"Focal point entropy weight": "Peso dell'entropia del punto focale", "Focal point entropy weight": "Peso dell'entropia del punto focale",
"Focal point edges weight": "Peso dei bordi del punto focalePeso dei bordi del punto focale", "Focal point edges weight": "Peso dei bordi del punto focale",
"Create debug image": "Crea immagine di debug", "Create debug image": "Crea immagine di debug",
"Preprocess": "Preprocessa", "Preprocess": "Preprocessa",
"Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "Addestra un Incorporamento o Iperrete; è necessario specificare una directory con un set di immagini con rapporto 1:1", "Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "Addestra un Incorporamento o Iperrete; è necessario specificare una directory con un set di immagini con rapporto 1:1",
@ -573,7 +649,7 @@
"ex B.": "esempio B.", "ex B.": "esempio B.",
"Original Text = \"A, B, C\"Selected Tags = \"(nothing)\"Edit Tags = \"X, Y\"": "Testo originale = \"A, B, C\"Tag selezionati = \"(nothing)\"Modifica tag = \"X, Y\"", "Original Text = \"A, B, C\"Selected Tags = \"(nothing)\"Edit Tags = \"X, Y\"": "Testo originale = \"A, B, C\"Tag selezionati = \"(nothing)\"Modifica tag = \"X, Y\"",
"Result = \"A, B, C, X, Y\"(add X and Y to the end (default))": "Risultato = \"A, B, C, X, Y\"(aggiunge X e Y alla fine (predefinito))", "Result = \"A, B, C, X, Y\"(add X and Y to the end (default))": "Risultato = \"A, B, C, X, Y\"(aggiunge X e Y alla fine (predefinito))",
"Risultato = \"X, Y, A, B, C\"(aggiunge X e Y all'inizio (\"Aggiungi tag addizionali all'inizio\" selezionato))": "Risultato = \"X, Y, A, B, C\"(aggiunge X e Y all'inizio (\"Aggiungi tag addizionali all'inizio\" selezionato))", "Result = \"X, Y, A, B, C\"(add X and Y to the beginning (\"Append additional tags to the beginning\" checked))": "Risultato = \"X, Y, A, B, C\"(aggiunge X e Y all'inizio (\"Aggiungi tag addizionali all'inizio\" selezionato))",
"ex C.": "esempio C.", "ex C.": "esempio C.",
"Original Text = \"A, B, C, D, E\"Selected Tags = \"A, B, D\"Edit Tags = \", X, \"": "Testo originale = \"A, B, C, D, E\"Tag selezionati = \"A, B, D\"Modifica tag = \", X, \"", "Original Text = \"A, B, C, D, E\"Selected Tags = \"A, B, D\"Edit Tags = \", X, \"": "Testo originale = \"A, B, C, D, E\"Tag selezionati = \"A, B, D\"Modifica tag = \", X, \"",
"Result = \"X, C, E\"(A->\"\", B->X, D->\"\")": "Risultato = \"X, C, E\"(A->\"\", B->X, D->\"\")", "Result = \"X, C, E\"(A->\"\", B->X, D->\"\")": "Risultato = \"X, C, E\"(A->\"\", B->X, D->\"\")",
@ -603,10 +679,10 @@
"H": "A", "H": "A",
"seed": "Seme", "seed": "Seme",
"sampler": "Campionatore", "sampler": "Campionatore",
"Enable extras": "Abilita Extra", "Enable extras": "Abilita 'Extra'",
"subseed": "sub seme", "subseed": "Sub seme",
"subseed_strength": "Intensità subseme", "subseed_strength": "Intensità subseme",
"steps": "passi", "steps": "Passi",
"ddim_eta": "ETA DDIM", "ddim_eta": "ETA DDIM",
"n_batch": "Numero lotto", "n_batch": "Numero lotto",
"make_grid": "Crea griglia", "make_grid": "Crea griglia",
@ -623,7 +699,7 @@
"iter": "Iterativo", "iter": "Iterativo",
"fixed": "Fisso", "fixed": "Fisso",
"random": "Casuale", "random": "Casuale",
"schedule": "Programmato", "schedule": "Pianificato",
"Animation settings": "Impostazioni animazione", "Animation settings": "Impostazioni animazione",
"animation_mode": "Modalità animazione", "animation_mode": "Modalità animazione",
"2D": "2D", "2D": "2D",
@ -659,7 +735,7 @@
"fov_schedule": "Pianificazione del campo visivo", "fov_schedule": "Pianificazione del campo visivo",
"near_schedule": "Pianificazione da vicino", "near_schedule": "Pianificazione da vicino",
"far_schedule": "Pianificazione da lontano", "far_schedule": "Pianificazione da lontano",
"To enable seed schedule select seed behavior — 'schedule'": "Per abilitare la pianificazione del seme, seleziona il comportamento del seme — 'programma'", "To enable seed schedule select seed behavior — 'schedule'": "Per abilitare la pianificazione del seme, seleziona il comportamento del seme — 'pianifica'",
"seed_schedule": "Pianificazione del seme", "seed_schedule": "Pianificazione del seme",
"Coherence:": "Coerenza:", "Coherence:": "Coerenza:",
"color_coherence": "Coerenza del colore", "color_coherence": "Coerenza del colore",
@ -691,8 +767,8 @@
"animation_prompts": "Prompt animazione", "animation_prompts": "Prompt animazione",
"Init settings": "Impostazioni iniziali", "Init settings": "Impostazioni iniziali",
"use_init": "Usa le impostazioni iniziali", "use_init": "Usa le impostazioni iniziali",
"from_img2img_instead_of_link": "from_img2img_instead_of_link", "from_img2img_instead_of_link": "da img2img invece che da link",
"strength_0_no_init": "strength_0_no_init", "strength_0_no_init": "Intensità 0 nessuna inizializzazione",
"strength": "Intensità", "strength": "Intensità",
"init_image": "Immagine di inizializzazione", "init_image": "Immagine di inizializzazione",
"use_mask": "Usa maschera", "use_mask": "Usa maschera",
@ -715,11 +791,11 @@
"resume_from_timestring": "Riprendi da stringa temporale", "resume_from_timestring": "Riprendi da stringa temporale",
"resume_timestring": "Stringa temporale", "resume_timestring": "Stringa temporale",
"Video output settings": "Impostazioni uscita video", "Video output settings": "Impostazioni uscita video",
"skip_video_for_run_all": "skip_video_for_run_all", "skip_video_for_run_all": "Salta il video per eseguire tutto",
"fps": "FPS", "fps": "FPS",
"output_format": "Formato di uscita", "output_format": "Formato di uscita",
"PIL gif": "PIL gif", "PIL gif": "PIL GIF",
"FFMPEG mp4": "FFMPEG mp4", "FFMPEG mp4": "FFMPEG MP4",
"ffmpeg_location": "Percorso ffmpeg", "ffmpeg_location": "Percorso ffmpeg",
"add_soundtrack": "Aggiungi colonna sonora", "add_soundtrack": "Aggiungi colonna sonora",
"soundtrack_path": "Percorso colonna sonora", "soundtrack_path": "Percorso colonna sonora",
@ -825,9 +901,8 @@
"All images generated with CompVis/stable-diffusion-v1-4 +": "Tutte le immagini sono state generate con CompVis/stable-diffusion-v1-4 +", "All images generated with CompVis/stable-diffusion-v1-4 +": "Tutte le immagini sono state generate con CompVis/stable-diffusion-v1-4 +",
"artists.csv": "artists.csv", "artists.csv": "artists.csv",
"| License: Attribution 4.0 International (CC BY 4.0)": "| Licenza: Attribution 4.0 International (CC BY 4.0)", "| License: Attribution 4.0 International (CC BY 4.0)": "| Licenza: Attribution 4.0 International (CC BY 4.0)",
"extras": "Extra", "Favorites": "Preferiti",
"favorites": "Preferiti", "Others": "Altre immagini",
"others": "Altre immagini",
"Images directory": "Cartella immagini", "Images directory": "Cartella immagini",
"Dropdown": "Elenco cartelle", "Dropdown": "Elenco cartelle",
"First Page": "Prima pagina", "First Page": "Prima pagina",
@ -843,8 +918,8 @@
"keyword": "Parola chiave", "keyword": "Parola chiave",
"Generate Info": "Genera Info", "Generate Info": "Genera Info",
"File Name": "Nome del file", "File Name": "Nome del file",
"Collect": "Aggiungi ai preferiti", "Move to favorites": "Aggiungi ai preferiti",
"Renew page": "Aggiorna la pagina", "Renew Page": "Aggiorna la pagina",
"Number": "Numero", "Number": "Numero",
"set_index": "Imposta indice", "set_index": "Imposta indice",
"load_switch": "load_switch", "load_switch": "load_switch",
@ -856,13 +931,13 @@
"mediums": "Tecniche", "mediums": "Tecniche",
"movements": "Movimenti artistici", "movements": "Movimenti artistici",
"All": "Tutto", "All": "Tutto",
"Favorites": "Preferiti",
"Exclude abandoned": "Escludi scartati", "Exclude abandoned": "Escludi scartati",
"Abandoned": "Scartati", "Abandoned": "Scartati",
"Key word": "Parola chiave", "Key word": "Parola chiave",
"Get inspiration": "Ispirami", "Get inspiration": "Ispirami",
"to txt2img": "Invia a txt2img", "to txt2img": "Invia a txt2img",
"to img2img": "Invia a img2img", "to img2img": "Invia a img2img",
"Collect": "Salva nei preferiti",
"Don't show again": "Scarta", "Don't show again": "Scarta",
"Move out": "Rimuovi", "Move out": "Rimuovi",
"set button": "Pulsante imposta", "set button": "Pulsante imposta",
@ -918,8 +993,8 @@
"Add a second progress bar to the console that shows progress for an entire job.": "Aggiungi una seconda barra di avanzamento alla console che mostra l'avanzamento complessivo del lavoro.", "Add a second progress bar to the console that shows progress for an entire job.": "Aggiungi una seconda barra di avanzamento alla console che mostra l'avanzamento complessivo del lavoro.",
"Training": "Addestramento", "Training": "Addestramento",
"Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "Sposta VAE e CLIP nella RAM durante l'addestramento di Iperreti. Risparmia VRAM.", "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "Sposta VAE e CLIP nella RAM durante l'addestramento di Iperreti. Risparmia VRAM.",
"Filename word regex": "Filename word regex", "Filename word regex": "Espressione regolare per estrarre parole dal nome del file",
"Filename join string": "Filename join string", "Filename join string": "Stringa per unire le parole estratte dal nome del file",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "Numero di ripetizioni per una singola immagine di input per epoca; utilizzato solo per visualizzare il numero di epoca", "Number of repeats for a single input image per epoch; used only for displaying epoch number": "Numero di ripetizioni per una singola immagine di input per epoca; utilizzato solo per visualizzare il numero di epoca",
"Save an csv containing the loss to log directory every N steps, 0 to disable": "Salva un file CSV contenente la perdita nella cartella di registrazione ogni N passaggi, 0 per disabilitare", "Save an csv containing the loss to log directory every N steps, 0 to disable": "Salva un file CSV contenente la perdita nella cartella di registrazione ogni N passaggi, 0 per disabilitare",
"Stable Diffusion": "Stable Diffusion", "Stable Diffusion": "Stable Diffusion",
@ -967,7 +1042,7 @@
"ar_AR": "ar_AR", "ar_AR": "ar_AR",
"de_DE": "de_DE", "de_DE": "de_DE",
"es_ES": "es_ES", "es_ES": "es_ES",
"fr-FR": "fr-FR", "fr_FR": "fr_FR",
"it_IT": "it_IT", "it_IT": "it_IT",
"ja_JP": "ja_JP", "ja_JP": "ja_JP",
"ko_KR": "ko_KR", "ko_KR": "ko_KR",
@ -987,11 +1062,17 @@
"sigma tmin": "sigma tmin", "sigma tmin": "sigma tmin",
"sigma noise": "sigma noise", "sigma noise": "sigma noise",
"Eta noise seed delta": "ETA del delta del seme del rumore", "Eta noise seed delta": "ETA del delta del seme del rumore",
"Number of columns on image gallery": "Numero di colonne nella galleria di immagini",
"Aesthetic Image Scorer": "Punteggio delle immagini estetiche", "Aesthetic Image Scorer": "Punteggio delle immagini estetiche",
"Save score as EXIF or PNG Info Chunk": "Salva il punteggio come info EXIF o PNG", "Save score as EXIF or PNG Info Chunk": "Salva il punteggio come info EXIF o PNG",
"Save score as tag (Windows Only)": "Salva punteggio come etichetta (solo Windows)", "aesthetic_score": "Punteggio estetico",
"cfg_scale": "Scala CFG",
"sd_model_hash": "Hash del modello SD",
"hash": "Hash",
"Save tags (Windows only)": "Salva etichette (solo Windows)",
"Save category (Windows only)": "Salva categoria (solo Windows)",
"Save generation params text": "Salva testo parametri di generazione",
"Force CPU (Requires Custom Script Reload)": "Forza CPU (richiede il ricaricamento dello script personalizzato)", "Force CPU (Requires Custom Script Reload)": "Forza CPU (richiede il ricaricamento dello script personalizzato)",
"Number of columns on image gallery": "Numero di colonne nella galleria di immagini",
"Images Browser": "Galleria immagini", "Images Browser": "Galleria immagini",
"Preload images at startup": "Precarica le immagini all'avvio", "Preload images at startup": "Precarica le immagini all'avvio",
"Number of columns on the page": "Numero di colonne nella pagina", "Number of columns on the page": "Numero di colonne nella pagina",
@ -1003,6 +1084,40 @@
"Download localization template": "Scarica il modello per la localizzazione", "Download localization template": "Scarica il modello per la localizzazione",
"Reload custom script bodies (No ui updates, No restart)": "Ricarica gli script personalizzati (nessun aggiornamento dell'interfaccia utente, nessun riavvio)", "Reload custom script bodies (No ui updates, No restart)": "Ricarica gli script personalizzati (nessun aggiornamento dell'interfaccia utente, nessun riavvio)",
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "Riavvia Gradio e aggiorna i componenti (solo script personalizzati, ui.py, js e css)", "Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "Riavvia Gradio e aggiorna i componenti (solo script personalizzati, ui.py, js e css)",
"Installed": "Installato",
"Available": "Disponibile",
"Install from URL": "Installa da URL",
"Apply and restart UI": "Applica e riavvia l'interfaccia utente",
"Check for updates": "Controlla aggiornamenti",
"Extension": "Estensione",
"URL": "URL",
"Update": "Aggiorna",
"aesthetic-gradients": "Gradienti Estetici (CLIP)",
"https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients": "https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients",
"unknown": "sconosciuto",
"dataset-tag-editor": "Dataset Tag Editor",
"https://github.com/toshiaki1729/stable-diffusion-webui-dataset-tag-editor.git": "https://github.com/toshiaki1729/stable-diffusion-webui-dataset-tag-editor.git",
"deforum-for-automatic1111-webui": "Deforum",
"https://github.com/deforum-art/deforum-for-automatic1111-webui": "https://github.com/deforum-art/deforum-for-automatic1111-webui",
"sd-dynamic-prompts": "Prompt dinamici",
"https://github.com/adieyal/sd-dynamic-prompts": "https://github.com/adieyal/sd-dynamic-prompts",
"stable-diffusion-webui-aesthetic-image-scorer": "Punteggio immagini estetiche",
"https://github.com/tsngo/stable-diffusion-webui-aesthetic-image-scorer": "https://github.com/tsngo/stable-diffusion-webui-aesthetic-image-scorer",
"stable-diffusion-webui-artists-to-study": "Artisti per studiare",
"https://github.com/camenduru/stable-diffusion-webui-artists-to-study": "https://github.com/camenduru/stable-diffusion-webui-artists-to-study",
"stable-diffusion-webui-images-browser": "Galleria immagini",
"https://github.com/yfszzx/stable-diffusion-webui-images-browser": "https://github.com/yfszzx/stable-diffusion-webui-images-browser",
"stable-diffusion-webui-inspiration": "Ispirazione",
"https://github.com/yfszzx/stable-diffusion-webui-inspiration": "https://github.com/yfszzx/stable-diffusion-webui-inspiration",
"tag-autocomplete": "Autocompletamento etichette",
"https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git": "https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git",
"wildcards": "Termini Jolly",
"https://github.com/AUTOMATIC1111/stable-diffusion-webui-wildcards.git": "https://github.com/AUTOMATIC1111/stable-diffusion-webui-wildcards.git",
"Load from:": "Carica da:",
"Extension index URL": "URL dell'indice delle Estensioni",
"URL for extension's git repository": "URL del repository GIT dell'estensione",
"Local directory name": "Nome cartella locale",
"Install": "Installa",
"Prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt (premi Ctrl+Invio o Alt+Invio per generare)", "Prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt (premi Ctrl+Invio o Alt+Invio per generare)",
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt negativo (premere Ctrl+Invio o Alt+Invio per generare)", "Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt negativo (premere Ctrl+Invio o Alt+Invio per generare)",
"Add a random artist to the prompt.": "Aggiungi un artista casuale al prompt.", "Add a random artist to the prompt.": "Aggiungi un artista casuale al prompt.",
@ -1030,6 +1145,7 @@
"Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "Prova a produrre un'immagine simile a quella che sarebbe stata prodotta con lo stesso seme alla risoluzione specificata", "Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "Prova a produrre un'immagine simile a quella che sarebbe stata prodotta con lo stesso seme alla risoluzione specificata",
"This text is used to rotate the feature space of the imgs embs": "Questo testo viene utilizzato per ruotare lo spazio delle funzioni delle immagini incorporate", "This text is used to rotate the feature space of the imgs embs": "Questo testo viene utilizzato per ruotare lo spazio delle funzioni delle immagini incorporate",
"How many times to repeat processing an image and using it as input for the next iteration": "Quante volte ripetere l'elaborazione di un'immagine e utilizzarla come input per l'iterazione successiva", "How many times to repeat processing an image and using it as input for the next iteration": "Quante volte ripetere l'elaborazione di un'immagine e utilizzarla come input per l'iterazione successiva",
"Hello, StylePile here.\nUntil some weird bug gets fixed you will see this even if the script itself is not active. Meanwhile, some hints to take your artwork to new heights:\nUse the 'Focus on' dropdown to select complex presets. Toggle selections below (with or without Focus) to affect your results. Mix and match to get some interesting results. \nAnd some general Stable Diffusion tips that will take your designs to next level:\nYou can add parenthesis to make parts of the prompt stronger. So (((cute))) kitten will make it extra cute (try it out). This is alsow important if a style is affecting your original prompt too much. Make that prompt stronger by adding parenthesis around it, like this: ((promt)).\nYou can type promts like [A|B] to sequentially use terms one after another on each step. So, like [cat|dog] will produce a hybrid catdog. And [A:B:0.4] to switch to other terms after the first one has been active for a certain percentage of steps. So [cat:dog:0.4] will build a cat 40% of the time and then start turning it into a dog. This needs more steps to work properly.": "Salve, qui è StylePile.\nFinché qualche strano bug non verrà risolto, vedrai questo testo anche se lo script non è attivo. Nel frattempo, alcuni suggerimenti per portare la tua grafica a nuovi livelli:\nUtilizza il menu a discesa 'Focus on' per selezionare valori predefiniti complessi. Attiva o disattiva le selezioni seguenti (con o senza Focus) per influire sui risultati. Mescola e abbina per ottenere risultati interessanti. \nE alcuni suggerimenti generali su Stable Diffusion che porteranno i tuoi risultati a un livello superiore:\nPuoi aggiungere parentesi per aumentare l'influenza di certe parti del prompt. Quindi '(((cute))) kitten' lo renderà molto carino (fai delle prove). Questo è importante quando uno stile influisce troppo sul prompt originale. Rendi più forte quel prompt aggiungendo delle parentesi intorno ad esso, così: ((promt)).\nPuoi digitare prompt nel formato [A|B] per usare in sequenza i termini uno dopo l'altro in ogni passaggio. Quindi, come [cat|dog] produrrà un 'canegatto' ibrido. E [A:B:0.4] per passare ad altri termini dopo che il primo è stato attivo per una certa percentuale di passaggi. Quindi [cat:dog:0.4] genererà un gatto il 40% dei passaggi e poi inizierà a trasformarlo in un cane. Sono richiesti più passaggi perchè funzioni correttamente.",
"Enter one prompt per line. Blank lines will be ignored.": "Immettere un prompt per riga. Le righe vuote verranno ignorate.", "Enter one prompt per line. Blank lines will be ignored.": "Immettere un prompt per riga. Le righe vuote verranno ignorate.",
"Separate values for X axis using commas.": "Separare i valori per l'asse X usando le virgole.", "Separate values for X axis using commas.": "Separare i valori per l'asse X usando le virgole.",
"Separate values for Y axis using commas.": "Separare i valori per l'asse Y usando le virgole.", "Separate values for Y axis using commas.": "Separare i valori per l'asse Y usando le virgole.",
@ -1066,5 +1182,36 @@
"This string will be used to join split words into a single line if the option above is enabled.": "Questa stringa verrà utilizzata per unire le parole divise in un'unica riga se l'opzione sopra è abilitata.", "This string will be used to join split words into a single line if the option above is enabled.": "Questa stringa verrà utilizzata per unire le parole divise in un'unica riga se l'opzione sopra è abilitata.",
"Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "Si applica solo ai modelli di pittura. Determina con quale forza mascherare l'immagine originale per inpainting e img2img. 1.0 significa completamente mascherato, che è il comportamento predefinito. 0.0 significa un condizionamento completamente non mascherato. Valori più bassi aiuteranno a preservare la composizione generale dell'immagine, ma avranno difficoltà con grandi cambiamenti.", "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "Si applica solo ai modelli di pittura. Determina con quale forza mascherare l'immagine originale per inpainting e img2img. 1.0 significa completamente mascherato, che è il comportamento predefinito. 0.0 significa un condizionamento completamente non mascherato. Valori più bassi aiuteranno a preservare la composizione generale dell'immagine, ma avranno difficoltà con grandi cambiamenti.",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "Elenco dei nomi delle impostazioni, separati da virgole, per le impostazioni che dovrebbero essere visualizzate nella barra di accesso rapido in alto, anziché nella normale scheda delle impostazioni. Vedi modules/shared.py per impostare i nomi. Richiede il riavvio per applicare.", "List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "Elenco dei nomi delle impostazioni, separati da virgole, per le impostazioni che dovrebbero essere visualizzate nella barra di accesso rapido in alto, anziché nella normale scheda delle impostazioni. Vedi modules/shared.py per impostare i nomi. Richiede il riavvio per applicare.",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "Se questo valore è diverso da zero, verrà aggiunto al seed e utilizzato per inizializzare il generatore di numeri casuali per il rumore quando si utilizzano campionatori con ETA. Puoi usarlo per produrre ancora più variazioni di immagini, oppure puoi usarlo per abbinare le immagini di altri software se sai cosa stai facendo." "If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "Se questo valore è diverso da zero, verrà aggiunto al seed e utilizzato per inizializzare il generatore di numeri casuali per il rumore quando si utilizzano campionatori con ETA. Puoi usarlo per produrre ancora più variazioni di immagini, oppure puoi usarlo per abbinare le immagini di altri software se sai cosa stai facendo.",
"Leave empty for auto": "Lasciare vuoto per automatico",
"Autocomplete options": "Opzioni di autocompletamento",
"Enable Autocomplete": "Abilita autocompletamento",
"Append commas": "Aggiungi virgole",
"AlphaCanvas": "AlphaCanvas",
"Close": "Chiudi",
"Grab Results": "Ottieni risultati",
"Apply Patch": "Applica Patch",
"Hue:0": "Hue:0",
"S:0": "S:0",
"L:0": "L:0",
"Load Canvas": "Carica Tela",
"saveCanvas": "Salva Tela",
"latest": "aggiornato",
"behind": "da aggiornare",
"Description": "Descrizione",
"Action": "Azione",
"Aesthetic Gradients": "Gradienti estetici",
"Create an embedding from one or few pictures and use it to apply their style to generated images.": "Crea un incorporamento da una o poche immagini e usalo per applicare il loro stile alle immagini generate.",
"Sample extension. Allows you to use __name__ syntax in your prompt to get a random line from a file named name.txt in the wildcards directory. Also see Dynamic Prompts for similar functionality.": "Estensione del campione. Consente di utilizzare la sintassi __name__ nel prompt per ottenere una riga casuale da un file denominato name.txt nella cartella dei termini jolly. Vedi anche 'Prompt dinamici' per funzionalità simili.",
"Dynamic Prompts": "Prompt dinamici",
"Implements an expressive template language for random or combinatorial prompt generation along with features to support deep wildcard directory structures.": "Implementa un modello di linguaggio espressivo per la generazione di prompt casuale o combinatoria insieme a funzionalità per supportare cartelle strutturate contenenti termini jolly.",
"Image browser": "Galleria immagini",
"Provides an interface to browse created images in the web browser.": "Fornisce un'interfaccia nel browser web per sfogliare le immagini create.",
"Randomly display the pictures of the artist's or artistic genres typical style, more pictures of this artist or genre is displayed after selecting. So you don't have to worry about how hard it is to choose the right style of art when you create.": "Visualizza in modo casuale le immagini dello stile tipico dell'artista o dei generi artistici, dopo la selezione vengono visualizzate più immagini di questo artista o genere. Così non dovete preoccuparvi della difficoltà di scegliere lo stile artistico giusto quando create.",
"The official port of Deforum, an extensive script for 2D and 3D animations, supporting keyframable sequences, dynamic math parameters (even inside the prompts), dynamic masking, depth estimation and warping.": "Il porting ufficiale di Deforum, uno script completo per animazioni 2D e 3D, che supporta sequenze di fotogrammi chiave, parametri matematici dinamici (anche all'interno dei prompt), mascheramento dinamico, stima della profondità e warping.",
"Artists to study": "Artisti per studiare",
"Shows a gallery of generated pictures by artists separated into categories.": "Mostra una galleria di immagini generate dagli artisti suddivise in categorie.",
"Calculates aesthetic score for generated images using CLIP+MLP Aesthetic Score Predictor based on Chad Scorer": "Calcola il punteggio estetico per le immagini generate utilizzando il predittore del punteggio estetico CLIP+MLP basato su Chad Scorer",
"Lets you edit captions in training datasets.": "Consente di modificare i sottotitoli nei set di dati di addestramento.",
"Time taken:": "Tempo impiegato:"
} }

View File

@ -9,11 +9,13 @@
" images in this directory. Loaded ": "개의 이미지가 이 경로에 존재합니다. ", " images in this directory. Loaded ": "개의 이미지가 이 경로에 존재합니다. ",
" pages": "페이지로 나뉘어 표시합니다.", " pages": "페이지로 나뉘어 표시합니다.",
", divided into ": "입니다. ", ", divided into ": "입니다. ",
". Use Installed tab to restart.": "에 성공적으로 설치하였습니다. 설치된 확장기능 탭에서 UI를 재시작해주세요.",
"1st and last digit must be 1. ex:'1, 2, 1'": "1st and last digit must be 1. ex:'1, 2, 1'", "1st and last digit must be 1. ex:'1, 2, 1'": "1st and last digit must be 1. ex:'1, 2, 1'",
"[wiki]": " [위키] 참조", "[wiki]": " [위키] 참조",
"A directory on the same machine where the server is running.": "WebUI 서버가 돌아가고 있는 디바이스에 존재하는 디렉토리를 선택해 주세요.", "A directory on the same machine where the server is running.": "WebUI 서버가 돌아가고 있는 디바이스에 존재하는 디렉토리를 선택해 주세요.",
"A merger of the two checkpoints will be generated in your": "체크포인트들이 병합된 결과물이 당신의", "A merger of the two checkpoints will be generated in your": "체크포인트들이 병합된 결과물이 당신의",
"A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "난수 생성기의 결과물을 지정하는 값 - 동일한 설정값과 동일한 시드를 적용 시, 완전히 똑같은 결과물을 얻게 됩니다.", "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "난수 생성기의 결과물을 지정하는 값 - 동일한 설정값과 동일한 시드를 적용 시, 완전히 똑같은 결과물을 얻게 됩니다.",
"Action": "작업",
"Add a random artist to the prompt.": "프롬프트에 랜덤한 작가 추가", "Add a random artist to the prompt.": "프롬프트에 랜덤한 작가 추가",
"Add a second progress bar to the console that shows progress for an entire job.": "콘솔에 전체 작업의 진행도를 보여주는 2번째 프로그레스 바 추가하기", "Add a second progress bar to the console that shows progress for an entire job.": "콘솔에 전체 작업의 진행도를 보여주는 2번째 프로그레스 바 추가하기",
"Add difference": "차이점 추가", "Add difference": "차이점 추가",
@ -22,6 +24,8 @@
"Add model hash to generation information": "생성 정보에 모델 해시 추가", "Add model hash to generation information": "생성 정보에 모델 해시 추가",
"Add model name to generation information": "생성 정보에 모델 이름 추가", "Add model name to generation information": "생성 정보에 모델 이름 추가",
"Add number to filename when saving": "이미지를 저장할 때 파일명에 숫자 추가하기", "Add number to filename when saving": "이미지를 저장할 때 파일명에 숫자 추가하기",
"Aesthetic Gradients": "스타일 그라디언트",
"Aesthetic Image Scorer": "스타일 이미지 스코어러",
"Aesthetic imgs embedding": "스타일 이미지 임베딩", "Aesthetic imgs embedding": "스타일 이미지 임베딩",
"Aesthetic learning rate": "스타일 학습 수", "Aesthetic learning rate": "스타일 학습 수",
"Aesthetic steps": "스타일 스텝 수", "Aesthetic steps": "스타일 스텝 수",
@ -33,22 +37,32 @@
"Always save all generated images": "생성된 이미지 항상 저장하기", "Always save all generated images": "생성된 이미지 항상 저장하기",
"api": "", "api": "",
"append": "뒤에 삽입", "append": "뒤에 삽입",
"Append commas": "쉼표 삽입",
"Apply and restart UI": "적용 후 UI 재시작",
"Apply color correction to img2img results to match original colors.": "이미지→이미지 결과물이 기존 색상과 일치하도록 색상 보정 적용하기", "Apply color correction to img2img results to match original colors.": "이미지→이미지 결과물이 기존 색상과 일치하도록 색상 보정 적용하기",
"Apply selected styles to current prompt": "현재 프롬프트에 선택된 스타일 적용", "Apply selected styles to current prompt": "현재 프롬프트에 선택된 스타일 적용",
"Apply settings": "설정 적용하기", "Apply settings": "설정 적용하기",
"Artists to study": "연구할만한 작가들",
"auto": "자동",
"Auto focal point crop": "초점 기준 크롭(자동 감지)", "Auto focal point crop": "초점 기준 크롭(자동 감지)",
"Autocomplete options": "자동완성 설정",
"Available": "지원되는 확장기능 목록",
"Batch count": "배치 수", "Batch count": "배치 수",
"Batch from Directory": "저장 경로로부터 여러장 처리", "Batch from Directory": "저장 경로로부터 여러장 처리",
"Batch img2img": "이미지→이미지 배치", "Batch img2img": "이미지→이미지 배치",
"Batch Process": "이미지 여러장 처리", "Batch Process": "이미지 여러장 처리",
"Batch size": "배치 크기", "Batch size": "배치 크기",
"behind": "최신 아님",
"BSRGAN 4x": "BSRGAN 4x", "BSRGAN 4x": "BSRGAN 4x",
"built with gradio": "gradio로 제작되었습니다", "built with gradio": "gradio로 제작되었습니다",
"Calculates aesthetic score for generated images using CLIP+MLP Aesthetic Score Predictor based on Chad Scorer": "Chad 스코어러를 기반으로 한 CLIP+MLP 스타일 점수 예측기를 이용해 생성된 이미지의 스타일 점수를 계산합니다.",
"Cancel generate forever": "반복 생성 취소", "Cancel generate forever": "반복 생성 취소",
"cfg cnt": "CFG 변화 횟수",
"cfg count": "CFG 변화 횟수", "cfg count": "CFG 변화 횟수",
"CFG Scale": "CFG 스케일", "CFG Scale": "CFG 스케일",
"cfg1 min/max": "CFG1 최소/최대", "cfg1 min/max": "CFG1 최소/최대",
"cfg2 min/max": "CFG2 최소/최대", "cfg2 min/max": "CFG2 최소/최대",
"Check for updates": "업데이트 확인",
"Check progress": "진행도 체크", "Check progress": "진행도 체크",
"Check progress (first)": "진행도 체크 (처음)", "Check progress (first)": "진행도 체크 (처음)",
"checkpoint": " 체크포인트 ", "checkpoint": " 체크포인트 ",
@ -64,10 +78,14 @@
"CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "CodeFormer 가중치 설정값 (0 = 최대 효과, 1 = 최소 효과)", "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "CodeFormer 가중치 설정값 (0 = 최대 효과, 1 = 최소 효과)",
"Collect": "즐겨찾기", "Collect": "즐겨찾기",
"Color variation": "색깔 다양성", "Color variation": "색깔 다양성",
"Combinations": "조합",
"Combinatorial batches": "조합 배치 수",
"Combinatorial generation": "조합 생성",
"copy": "복사", "copy": "복사",
"Create a grid where images will have different parameters. Use inputs below to specify which parameters will be shared by columns and rows": "서로 다른 설정값으로 생성된 이미지의 그리드를 만듭니다. 아래의 설정으로 가로/세로에 어떤 설정값을 적용할지 선택하세요.", "Create a grid where images will have different parameters. Use inputs below to specify which parameters will be shared by columns and rows": "서로 다른 설정값으로 생성된 이미지의 그리드를 만듭니다. 아래의 설정으로 가로/세로에 어떤 설정값을 적용할지 선택하세요.",
"Create a text file next to every image with generation parameters.": "생성된 이미지마다 생성 설정값을 담은 텍스트 파일 생성하기", "Create a text file next to every image with generation parameters.": "생성된 이미지마다 생성 설정값을 담은 텍스트 파일 생성하기",
"Create aesthetic images embedding": "스타일 이미지 임베딩 생성하기", "Create aesthetic images embedding": "스타일 이미지 임베딩 생성하기",
"Create an embedding from one or few pictures and use it to apply their style to generated images.": "하나 혹은 그 이상의 이미지들로부터 임베딩을 생성해, 그 이미지들의 스타일을 다른 이미지 생성 시 적용할 수 있게 해줍니다.",
"Create debug image": "디버그 이미지 생성", "Create debug image": "디버그 이미지 생성",
"Create embedding": "임베딩 생성", "Create embedding": "임베딩 생성",
"Create flipped copies": "좌우로 뒤집은 복사본 생성", "Create flipped copies": "좌우로 뒤집은 복사본 생성",
@ -78,14 +96,18 @@
"custom fold": "커스텀 경로", "custom fold": "커스텀 경로",
"Custom Name (Optional)": "병합 모델 이름 (선택사항)", "Custom Name (Optional)": "병합 모델 이름 (선택사항)",
"Dataset directory": "데이터셋 경로", "Dataset directory": "데이터셋 경로",
"Dataset Tag Editor": "데이터셋 태그 편집기",
"date": "생성 일자",
"DDIM": "DDIM", "DDIM": "DDIM",
"Decode CFG scale": "디코딩 CFG 스케일", "Decode CFG scale": "디코딩 CFG 스케일",
"Decode steps": "디코딩 스텝 수", "Decode steps": "디코딩 스텝 수",
"Delete": "삭제", "Delete": "삭제",
"delete next": "선택한 이미지부터 시작해서 삭제할 이미지 갯수",
"Denoising": "디노이징", "Denoising": "디노이징",
"Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit Models - 인페이팅에 뛰어남", "Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit Models - 인페이팅에 뛰어남",
"Denoising strength": "디노이즈 강도", "Denoising strength": "디노이즈 강도",
"Denoising strength change factor": "디노이즈 강도 변경 배수", "Denoising strength change factor": "디노이즈 강도 변경 배수",
"Description": "설명",
"Destination directory": "결과물 저장 경로", "Destination directory": "결과물 저장 경로",
"Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "알고리즘이 얼마나 원본 이미지를 반영할지를 결정하는 수치입니다. 0일 경우 아무것도 바뀌지 않고, 1일 경우 원본 이미지와 전혀 관련없는 결과물을 얻게 됩니다. 1.0 아래의 값일 경우, 설정된 샘플링 스텝 수보다 적은 스텝 수를 거치게 됩니다.", "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "알고리즘이 얼마나 원본 이미지를 반영할지를 결정하는 수치입니다. 0일 경우 아무것도 바뀌지 않고, 1일 경우 원본 이미지와 전혀 관련없는 결과물을 얻게 됩니다. 1.0 아래의 값일 경우, 설정된 샘플링 스텝 수보다 적은 스텝 수를 거치게 됩니다.",
"Directory for saving images using the Save button": "저장 버튼을 이용해 저장하는 이미지들의 저장 경로", "Directory for saving images using the Save button": "저장 버튼을 이용해 저장하는 이미지들의 저장 경로",
@ -108,6 +130,8 @@
"Draw mask": "마스크 직접 그리기", "Draw mask": "마스크 직접 그리기",
"Drop File Here": "파일을 끌어 놓으세요", "Drop File Here": "파일을 끌어 놓으세요",
"Drop Image Here": "이미지를 끌어 놓으세요", "Drop Image Here": "이미지를 끌어 놓으세요",
"Dropdown": "드롭다운",
"Dynamic Prompts": "다이나믹 프롬프트",
"Embedding": "임베딩", "Embedding": "임베딩",
"Embedding Learning rate": "임베딩 학습률", "Embedding Learning rate": "임베딩 학습률",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "강조 : (텍스트)를 이용해 모델의 텍스트에 대한 가중치를 더 강하게 주고 [텍스트]를 이용해 더 약하게 줍니다.", "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "강조 : (텍스트)를 이용해 모델의 텍스트에 대한 가중치를 더 강하게 주고 [텍스트]를 이용해 더 약하게 줍니다.",
@ -127,6 +151,9 @@
"Euler a": "Euler a", "Euler a": "Euler a",
"Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - 매우 창의적, 스텝 수에 따라 완전히 다른 결과물이 나올 수 있음. 30~40보다 높은 스텝 수는 효과가 미미함", "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - 매우 창의적, 스텝 수에 따라 완전히 다른 결과물이 나올 수 있음. 30~40보다 높은 스텝 수는 효과가 미미함",
"Existing Caption txt Action": "이미 존재하는 캡션 텍스트 처리", "Existing Caption txt Action": "이미 존재하는 캡션 텍스트 처리",
"Extension": "확장기능",
"Extension index URL": "확장기능 목록 URL",
"Extensions": "확장기능",
"Extra": "고급", "Extra": "고급",
"Extras": "부가기능", "Extras": "부가기능",
"extras": "부가기능", "extras": "부가기능",
@ -134,7 +161,7 @@
"Face restoration": "얼굴 보정", "Face restoration": "얼굴 보정",
"Face restoration model": "얼굴 보정 모델", "Face restoration model": "얼굴 보정 모델",
"Fall-off exponent (lower=higher detail)": "감쇠 지수 (낮을수록 디테일이 올라감)", "Fall-off exponent (lower=higher detail)": "감쇠 지수 (낮을수록 디테일이 올라감)",
"favorites": "즐겨찾기", "Favorites": "즐겨찾기",
"File": "파일", "File": "파일",
"File format for grids": "그리드 이미지 파일 형식", "File format for grids": "그리드 이미지 파일 형식",
"File format for images": "이미지 파일 형식", "File format for images": "이미지 파일 형식",
@ -150,6 +177,7 @@
"First Page": "처음 페이지", "First Page": "처음 페이지",
"Firstpass height": "초기 세로길이", "Firstpass height": "초기 세로길이",
"Firstpass width": "초기 가로길이", "Firstpass width": "초기 가로길이",
"Fixed seed": "시드 고정",
"Focal point edges weight": "경계면 가중치", "Focal point edges weight": "경계면 가중치",
"Focal point entropy weight": "엔트로피 가중치", "Focal point entropy weight": "엔트로피 가중치",
"Focal point face weight": "얼굴 가중치", "Focal point face weight": "얼굴 가중치",
@ -184,8 +212,10 @@
"ignore": "무시", "ignore": "무시",
"Image": "이미지", "Image": "이미지",
"Image Browser": "이미지 브라우저", "Image Browser": "이미지 브라우저",
"Image browser": "이미지 브라우저",
"Image for img2img": "Image for img2img", "Image for img2img": "Image for img2img",
"Image for inpainting with mask": "마스크로 인페인팅할 이미지", "Image for inpainting with mask": "마스크로 인페인팅할 이미지",
"Image not found (may have been already moved)": "이미지를 찾을 수 없습니다 (이미 옮겨졌을 수 있음)",
"Images Browser": "이미지 브라우저", "Images Browser": "이미지 브라우저",
"Images directory": "이미지 경로", "Images directory": "이미지 경로",
"Images filename pattern": "이미지 파일명 패턴", "Images filename pattern": "이미지 파일명 패턴",
@ -193,6 +223,7 @@
"img2img alternative test": "이미지→이미지 대체버전 테스트", "img2img alternative test": "이미지→이미지 대체버전 테스트",
"img2img DDIM discretize": "이미지→이미지 DDIM 이산화", "img2img DDIM discretize": "이미지→이미지 DDIM 이산화",
"img2img history": "이미지→이미지 기록", "img2img history": "이미지→이미지 기록",
"Implements an expressive template language for random or combinatorial prompt generation along with features to support deep wildcard directory structures.": "무작위/조합 프롬프트 생성을 위한 문법과 복잡한 와일드카드 구조를 지원합니다.",
"In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "루프백 모드에서는 매 루프마다 디노이즈 강도에 이 값이 곱해집니다. 1보다 작을 경우 다양성이 낮아져 결과 이미지들이 고정된 형태로 모일 겁니다. 1보다 클 경우 다양성이 높아져 결과 이미지들이 갈수록 혼란스러워지겠죠.", "In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "루프백 모드에서는 매 루프마다 디노이즈 강도에 이 값이 곱해집니다. 1보다 작을 경우 다양성이 낮아져 결과 이미지들이 고정된 형태로 모일 겁니다. 1보다 클 경우 다양성이 높아져 결과 이미지들이 갈수록 혼란스러워지겠죠.",
"Include Separate Images": "분리된 이미지 포함하기", "Include Separate Images": "분리된 이미지 포함하기",
"Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "75개보다 많은 토큰을 사용시 마지막 쉼표로부터 N개의 토큰 이내에 패딩을 추가해 통일성 증가시키기", "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "75개보다 많은 토큰을 사용시 마지막 쉼표로부터 N개의 토큰 이내에 패딩을 추가해 통일성 증가시키기",
@ -205,6 +236,11 @@
"Inpainting conditioning mask strength": "인페인팅 조절 마스크 강도", "Inpainting conditioning mask strength": "인페인팅 조절 마스크 강도",
"Input directory": "인풋 이미지 경로", "Input directory": "인풋 이미지 경로",
"Input images directory": "이미지 경로 입력", "Input images directory": "이미지 경로 입력",
"Inspiration": "\"영감\"",
"Install": "설치",
"Install from URL": "URL로부터 확장기능 설치",
"Installed": "설치된 확장기능",
"Installed into ": "확장기능을 ",
"Interpolation Method": "보간 방법", "Interpolation Method": "보간 방법",
"Interrogate\nCLIP": "CLIP\n분석", "Interrogate\nCLIP": "CLIP\n분석",
"Interrogate\nDeepBooru": "DeepBooru\n분석", "Interrogate\nDeepBooru": "DeepBooru\n분석",
@ -223,6 +259,7 @@
"Just resize": "리사이징", "Just resize": "리사이징",
"Keep -1 for seeds": "시드값 -1로 유지", "Keep -1 for seeds": "시드값 -1로 유지",
"keep whatever was there originally": "이미지 원본 유지", "keep whatever was there originally": "이미지 원본 유지",
"keyword": "프롬프트",
"Label": "라벨", "Label": "라벨",
"Lanczos": "Lanczos", "Lanczos": "Lanczos",
"Last prompt:": "마지막 프롬프트 : ", "Last prompt:": "마지막 프롬프트 : ",
@ -230,23 +267,29 @@
"Last saved image:": "마지막으로 저장된 이미지 : ", "Last saved image:": "마지막으로 저장된 이미지 : ",
"latent noise": "잠재 노이즈", "latent noise": "잠재 노이즈",
"latent nothing": "잠재 공백", "latent nothing": "잠재 공백",
"latest": "최신 버전",
"LDSR": "LDSR", "LDSR": "LDSR",
"LDSR processing steps. Lower = faster": "LDSR 스텝 수. 낮은 값 = 빠른 속도", "LDSR processing steps. Lower = faster": "LDSR 스텝 수. 낮은 값 = 빠른 속도",
"leakyrelu": "leakyrelu", "leakyrelu": "leakyrelu",
"Leave blank to save images to the default path.": "기존 저장 경로에 이미지들을 저장하려면 비워두세요.", "Leave blank to save images to the default path.": "기존 저장 경로에 이미지들을 저장하려면 비워두세요.",
"Leave empty for auto": "자동 설정하려면 비워두십시오",
"left": "왼쪽", "left": "왼쪽",
"Lets you edit captions in training datasets.": "훈련에 사용되는 데이터셋의 캡션을 수정할 수 있게 해줍니다.",
"linear": "linear", "linear": "linear",
"List of prompt inputs": "프롬프트 입력 리스트", "List of prompt inputs": "프롬프트 입력 리스트",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "설정 탭이 아니라 상단의 빠른 설정 바에 위치시킬 설정 이름을 쉼표로 분리해서 입력하십시오. 설정 이름은 modules/shared.py에서 찾을 수 있습니다. 재시작이 필요합니다.", "List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "설정 탭이 아니라 상단의 빠른 설정 바에 위치시킬 설정 이름을 쉼표로 분리해서 입력하십시오. 설정 이름은 modules/shared.py에서 찾을 수 있습니다. 재시작이 필요합니다.",
"LMS": "LMS", "LMS": "LMS",
"LMS Karras": "LMS Karras", "LMS Karras": "LMS Karras",
"Load": "불러오기", "Load": "불러오기",
"Load from:": "URL로부터 불러오기",
"Loading...": "로딩 중...", "Loading...": "로딩 중...",
"Local directory name": "로컬 경로 이름",
"Localization (requires restart)": "현지화 (재시작 필요)", "Localization (requires restart)": "현지화 (재시작 필요)",
"Log directory": "로그 경로", "Log directory": "로그 경로",
"Loopback": "루프백", "Loopback": "루프백",
"Loops": "루프 수", "Loops": "루프 수",
"Loss:": "손실(Loss) : ", "Loss:": "손실(Loss) : ",
"Magic prompt": "매직 프롬프트",
"Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "동일한 시드 값으로 생성되었을 이미지를 주어진 해상도로 최대한 유사하게 재현합니다.", "Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "동일한 시드 값으로 생성되었을 이미지를 주어진 해상도로 최대한 유사하게 재현합니다.",
"Make K-diffusion samplers produce same images in a batch as when making a single image": "K-diffusion 샘플러들이 단일 이미지를 생성하는 것처럼 배치에서도 동일한 이미지를 생성하게 하기", "Make K-diffusion samplers produce same images in a batch as when making a single image": "K-diffusion 샘플러들이 단일 이미지를 생성하는 것처럼 배치에서도 동일한 이미지를 생성하게 하기",
"Make Zip when Save?": "저장 시 Zip 생성하기", "Make Zip when Save?": "저장 시 Zip 생성하기",
@ -260,7 +303,9 @@
"Minimum number of pages per load": "한번 불러올 때마다 불러올 최소 페이지 수", "Minimum number of pages per load": "한번 불러올 때마다 불러올 최소 페이지 수",
"Modules": "모듈", "Modules": "모듈",
"Move face restoration model from VRAM into RAM after processing": "처리가 완료되면 얼굴 보정 모델을 VRAM에서 RAM으로 옮기기", "Move face restoration model from VRAM into RAM after processing": "처리가 완료되면 얼굴 보정 모델을 VRAM에서 RAM으로 옮기기",
"Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "하이퍼네트워크 훈련 진행 시 VAE와 CLIP을 RAM으로 옮기기. VRAM이 절약됩니다.", "Move to favorites": "즐겨찾기로 옮기기",
"Move VAE and CLIP to RAM when training if possible. Saves VRAM.": "훈련 진행 시 가능하면 VAE와 CLIP을 RAM으로 옮기기. VRAM이 절약됩니다.",
"Moved to favorites": "즐겨찾기로 옮겨짐",
"Multiplier (M) - set to 0 to get model A": "배율 (M) - 0으로 적용하면 모델 A를 얻게 됩니다", "Multiplier (M) - set to 0 to get model A": "배율 (M) - 0으로 적용하면 모델 A를 얻게 됩니다",
"Name": "이름", "Name": "이름",
"Negative prompt": "네거티브 프롬프트", "Negative prompt": "네거티브 프롬프트",
@ -285,6 +330,7 @@
"original": "원본 유지", "original": "원본 유지",
"Original negative prompt": "기존 네거티브 프롬프트", "Original negative prompt": "기존 네거티브 프롬프트",
"Original prompt": "기존 프롬프트", "Original prompt": "기존 프롬프트",
"Others": "기타",
"Outpainting direction": "아웃페인팅 방향", "Outpainting direction": "아웃페인팅 방향",
"Outpainting mk2": "아웃페인팅 마크 2", "Outpainting mk2": "아웃페인팅 마크 2",
"Output directory": "이미지 저장 경로", "Output directory": "이미지 저장 경로",
@ -303,6 +349,7 @@
"Overwrite Old Hypernetwork": "기존 하이퍼네트워크 덮어쓰기", "Overwrite Old Hypernetwork": "기존 하이퍼네트워크 덮어쓰기",
"Page Index": "페이지 인덱스", "Page Index": "페이지 인덱스",
"parameters": "설정값", "parameters": "설정값",
"path name": "경로 이름",
"Path to directory where to write outputs": "결과물을 출력할 경로", "Path to directory where to write outputs": "결과물을 출력할 경로",
"Path to directory with input images": "인풋 이미지가 있는 경로", "Path to directory with input images": "인풋 이미지가 있는 경로",
"Paths for saving": "저장 경로", "Paths for saving": "저장 경로",
@ -330,6 +377,7 @@
"Prompt template file": "프롬프트 템플릿 파일 경로", "Prompt template file": "프롬프트 템플릿 파일 경로",
"Prompts": "프롬프트", "Prompts": "프롬프트",
"Prompts from file or textbox": "파일이나 텍스트박스로부터 프롬프트 불러오기", "Prompts from file or textbox": "파일이나 텍스트박스로부터 프롬프트 불러오기",
"Provides an interface to browse created images in the web browser.": "생성된 이미지를 브라우저 내에서 볼 수 있는 인터페이스를 추가합니다.",
"Put variable parts at start of prompt": "변경되는 프롬프트를 앞에 위치시키기", "Put variable parts at start of prompt": "변경되는 프롬프트를 앞에 위치시키기",
"quad": "quad", "quad": "quad",
"Quality for saved jpeg images": "저장된 jpeg 이미지들의 품질", "Quality for saved jpeg images": "저장된 jpeg 이미지들의 품질",
@ -337,11 +385,13 @@
"R-ESRGAN 4x+ Anime6B": "R-ESRGAN 4x+ Anime6B", "R-ESRGAN 4x+ Anime6B": "R-ESRGAN 4x+ Anime6B",
"Random": "랜덤", "Random": "랜덤",
"Random grid": "랜덤 그리드", "Random grid": "랜덤 그리드",
"Randomly display the pictures of the artist's or artistic genres typical style, more pictures of this artist or genre is displayed after selecting. So you don't have to worry about how hard it is to choose the right style of art when you create.": "특정 작가 또는 스타일의 이미지들 중 하나를 무작위로 보여줍니다. 선택 후 선택한 작가 또는 스타일의 이미지들이 더 나타나게 됩니다. 고르기 어려워도 걱정하실 필요 없어요!",
"Randomness": "랜덤성", "Randomness": "랜덤성",
"Read generation parameters from prompt or last generation if prompt is empty into user interface.": "클립보드에 복사된 정보로부터 설정값 읽어오기/프롬프트창이 비어있을경우 제일 최근 설정값 불러오기", "Read generation parameters from prompt or last generation if prompt is empty into user interface.": "클립보드에 복사된 정보로부터 설정값 읽어오기/프롬프트창이 비어있을경우 제일 최근 설정값 불러오기",
"Read parameters (prompt, etc...) from txt2img tab when making previews": "프리뷰 이미지 생성 시 텍스트→이미지 탭에서 설정값(프롬프트 등) 읽어오기", "Read parameters (prompt, etc...) from txt2img tab when making previews": "프리뷰 이미지 생성 시 텍스트→이미지 탭에서 설정값(프롬프트 등) 읽어오기",
"Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "추천 설정값 - 샘플링 스텝 수 : 80-100 , 샘플러 : Euler a, 디노이즈 강도 : 0.8", "Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "추천 설정값 - 샘플링 스텝 수 : 80-100 , 샘플러 : Euler a, 디노이즈 강도 : 0.8",
"Reload custom script bodies (No ui updates, No restart)": "커스텀 스크립트 리로드하기(UI 업데이트 없음, 재시작 없음)", "Reload custom script bodies (No ui updates, No restart)": "커스텀 스크립트 리로드하기(UI 업데이트 없음, 재시작 없음)",
"Reloading...": "재시작 중...",
"relu": "relu", "relu": "relu",
"Renew Page": "Renew Page", "Renew Page": "Renew Page",
"Request browser notifications": "브라우저 알림 권한 요청", "Request browser notifications": "브라우저 알림 권한 요청",
@ -361,6 +411,7 @@
"Reuse seed from last generation, mostly useful if it was randomed": "이전 생성에서 사용된 시드를 불러옵니다. 랜덤하게 생성했을 시 도움됨", "Reuse seed from last generation, mostly useful if it was randomed": "이전 생성에서 사용된 시드를 불러옵니다. 랜덤하게 생성했을 시 도움됨",
"right": "오른쪽", "right": "오른쪽",
"Run": "가동", "Run": "가동",
"Sample extension. Allows you to use __name__ syntax in your prompt to get a random line from a file named name.txt in the wildcards directory. Also see Dynamic Prompts for similar functionality.": "샘플 확장기능입니다. __이름__형식의 문법을 사용해 와일드카드 경로 내의 이름.txt파일로부터 무작위 프롬프트를 적용할 수 있게 해줍니다. 유사한 확장기능으로 다이나믹 프롬프트가 있습니다.",
"Sampler": "샘플러", "Sampler": "샘플러",
"Sampler parameters": "샘플러 설정값", "Sampler parameters": "샘플러 설정값",
"Sampling method": "샘플링 방법", "Sampling method": "샘플링 방법",
@ -368,6 +419,7 @@
"Save": "저장", "Save": "저장",
"Save a copy of embedding to log directory every N steps, 0 to disable": "N스텝마다 로그 경로에 임베딩을 저장합니다, 비활성화하려면 0으로 설정하십시오.", "Save a copy of embedding to log directory every N steps, 0 to disable": "N스텝마다 로그 경로에 임베딩을 저장합니다, 비활성화하려면 0으로 설정하십시오.",
"Save a copy of image before applying color correction to img2img results": "이미지→이미지 결과물에 색상 보정을 진행하기 전 이미지의 복사본을 저장하기", "Save a copy of image before applying color correction to img2img results": "이미지→이미지 결과물에 색상 보정을 진행하기 전 이미지의 복사본을 저장하기",
"Save a copy of image before applying highres fix.": "고해상도 보정을 진행하기 전 이미지의 복사본을 저장하기",
"Save a copy of image before doing face restoration.": "얼굴 보정을 진행하기 전 이미지의 복사본을 저장하기", "Save a copy of image before doing face restoration.": "얼굴 보정을 진행하기 전 이미지의 복사본을 저장하기",
"Save an csv containing the loss to log directory every N steps, 0 to disable": "N스텝마다 로그 경로에 손실(Loss)을 포함하는 csv 파일을 저장합니다, 비활성화하려면 0으로 설정하십시오.", "Save an csv containing the loss to log directory every N steps, 0 to disable": "N스텝마다 로그 경로에 손실(Loss)을 포함하는 csv 파일을 저장합니다, 비활성화하려면 0으로 설정하십시오.",
"Save an image to log directory every N steps, 0 to disable": "N스텝마다 로그 경로에 이미지를 저장합니다, 비활성화하려면 0으로 설정하십시오.", "Save an image to log directory every N steps, 0 to disable": "N스텝마다 로그 경로에 이미지를 저장합니다, 비활성화하려면 0으로 설정하십시오.",
@ -412,6 +464,7 @@
"Show progressbar": "프로그레스 바 보이기", "Show progressbar": "프로그레스 바 보이기",
"Show result images": "이미지 결과 보이기", "Show result images": "이미지 결과 보이기",
"Show Textbox": "텍스트박스 보이기", "Show Textbox": "텍스트박스 보이기",
"Shows a gallery of generated pictures by artists separated into categories.": "생성된 이미지들을 작가별로 분류해 보여줍니다. 원본 - https://artiststostudy.pages.dev",
"Sigma adjustment for finding noise for image": "이미지 노이즈를 찾기 위해 시그마 조정", "Sigma adjustment for finding noise for image": "이미지 노이즈를 찾기 위해 시그마 조정",
"Sigma Churn": "시그마 섞기", "Sigma Churn": "시그마 섞기",
"sigma churn": "시그마 섞기", "sigma churn": "시그마 섞기",
@ -424,6 +477,7 @@
"Skip": "건너뛰기", "Skip": "건너뛰기",
"Slerp angle": "구면 선형 보간 각도", "Slerp angle": "구면 선형 보간 각도",
"Slerp interpolation": "구면 선형 보간", "Slerp interpolation": "구면 선형 보간",
"sort by": "정렬 기준",
"Source": "원본", "Source": "원본",
"Source directory": "원본 경로", "Source directory": "원본 경로",
"Split image overlap ratio": "이미지 분할 겹침 비율", "Split image overlap ratio": "이미지 분할 겹침 비율",
@ -431,6 +485,7 @@
"Split oversized images": "사이즈가 큰 이미지 분할하기", "Split oversized images": "사이즈가 큰 이미지 분할하기",
"Stable Diffusion": "Stable Diffusion", "Stable Diffusion": "Stable Diffusion",
"Stable Diffusion checkpoint": "Stable Diffusion 체크포인트", "Stable Diffusion checkpoint": "Stable Diffusion 체크포인트",
"step cnt": "스텝 변화 횟수",
"step count": "스텝 변화 횟수", "step count": "스텝 변화 횟수",
"step1 min/max": "스텝1 최소/최대", "step1 min/max": "스텝1 최소/최대",
"step2 min/max": "스텝2 최소/최대", "step2 min/max": "스텝2 최소/최대",
@ -447,6 +502,7 @@
"System": "시스템", "System": "시스템",
"Tertiary model (C)": "3차 모델 (C)", "Tertiary model (C)": "3차 모델 (C)",
"Textbox": "텍스트박스", "Textbox": "텍스트박스",
"The official port of Deforum, an extensive script for 2D and 3D animations, supporting keyframable sequences, dynamic math parameters (even inside the prompts), dynamic masking, depth estimation and warping.": "Deforum의 공식 포팅 버전입니다. 2D와 3D 애니메이션, 키프레임 시퀀스, 수학적 매개변수, 다이나믹 마스킹 등을 지원합니다.",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "이 정규표현식은 파일명으로부터 단어를 추출하는 데 사용됩니다. 추출된 단어들은 하단의 설정을 이용해 라벨 텍스트로 변환되어 훈련에 사용됩니다. 파일명 텍스트를 유지하려면 비워두십시오.", "This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "이 정규표현식은 파일명으로부터 단어를 추출하는 데 사용됩니다. 추출된 단어들은 하단의 설정을 이용해 라벨 텍스트로 변환되어 훈련에 사용됩니다. 파일명 텍스트를 유지하려면 비워두십시오.",
"This string will be used to join split words into a single line if the option above is enabled.": "이 문자열은 상단 설정이 활성화되어있을 때 분리된 단어들을 한 줄로 합치는 데 사용됩니다.", "This string will be used to join split words into a single line if the option above is enabled.": "이 문자열은 상단 설정이 활성화되어있을 때 분리된 단어들을 한 줄로 합치는 데 사용됩니다.",
"This text is used to rotate the feature space of the imgs embs": "이 텍스트는 이미지 임베딩의 특징 공간을 회전하는 데 사용됩니다.", "This text is used to rotate the feature space of the imgs embs": "이 텍스트는 이미지 임베딩의 특징 공간을 회전하는 데 사용됩니다.",
@ -467,7 +523,9 @@
"txt2img": "텍스트→이미지", "txt2img": "텍스트→이미지",
"txt2img history": "텍스트→이미지 기록", "txt2img history": "텍스트→이미지 기록",
"uniform": "uniform", "uniform": "uniform",
"unknown": "알수 없음",
"up": "위쪽", "up": "위쪽",
"Update": "업데이트",
"Upload mask": "마스크 업로드하기", "Upload mask": "마스크 업로드하기",
"Upload prompt inputs": "입력할 프롬프트를 업로드하십시오", "Upload prompt inputs": "입력할 프롬프트를 업로드하십시오",
"Upscale Before Restoring Faces": "얼굴 보정을 진행하기 전에 업스케일링 먼저 진행하기", "Upscale Before Restoring Faces": "얼굴 보정을 진행하기 전에 업스케일링 먼저 진행하기",
@ -479,15 +537,20 @@
"Upscaler 2 visibility": "업스케일러 2 가시성", "Upscaler 2 visibility": "업스케일러 2 가시성",
"Upscaler for img2img": "이미지→이미지 업스케일러", "Upscaler for img2img": "이미지→이미지 업스케일러",
"Upscaling": "업스케일링", "Upscaling": "업스케일링",
"URL for extension's git repository": "확장기능의 git 레포 URL",
"Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "저해상도 이미지를 1차적으로 생성 후 업스케일을 진행하여, 이미지의 전체적인 구성을 바꾸지 않고 세부적인 디테일을 향상시킵니다.", "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "저해상도 이미지를 1차적으로 생성 후 업스케일을 진행하여, 이미지의 전체적인 구성을 바꾸지 않고 세부적인 디테일을 향상시킵니다.",
"Use an empty output directory to save pictures normally instead of writing to the output directory.": "저장 경로를 비워두면 기본 저장 폴더에 이미지들이 저장됩니다.", "Use an empty output directory to save pictures normally instead of writing to the output directory.": "저장 경로를 비워두면 기본 저장 폴더에 이미지들이 저장됩니다.",
"Use BLIP for caption": "캡션에 BLIP 사용", "Use BLIP for caption": "캡션에 BLIP 사용",
"Use checkbox to enable the extension; it will be enabled or disabled when you click apply button": "체크박스를 이용해 적용할 확장기능을 선택하세요. 변경사항은 적용 후 UI 재시작 버튼을 눌러야 적용됩니다.",
"Use checkbox to mark the extension for update; it will be updated when you click apply button": "체크박스를 이용해 업데이트할 확장기능을 선택하세요. 업데이트는 적용 후 UI 재시작 버튼을 눌러야 적용됩니다.",
"Use cross attention optimizations while training": "훈련 진행 시 크로스 어텐션 최적화 사용",
"Use deepbooru for caption": "캡션에 deepbooru 사용", "Use deepbooru for caption": "캡션에 deepbooru 사용",
"Use dropout": "드롭아웃 사용", "Use dropout": "드롭아웃 사용",
"Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "다음 태그들을 사용해 이미지 파일명 형식을 결정하세요 : [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]. 비워두면 기본값으로 설정됩니다.", "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "다음 태그들을 사용해 이미지 파일명 형식을 결정하세요 : [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]. 비워두면 기본값으로 설정됩니다.",
"Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "다음 태그들을 사용해 이미지와 그리드의 하위 디렉토리명의 형식을 결정하세요 : [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]. 비워두면 기본값으로 설정됩니다.", "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "다음 태그들을 사용해 이미지와 그리드의 하위 디렉토리명의 형식을 결정하세요 : [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]. 비워두면 기본값으로 설정됩니다.",
"Use old emphasis implementation. Can be useful to reproduce old seeds.": "옛 방식의 강조 구현을 사용합니다. 옛 시드를 재현하는 데 효과적일 수 있습니다.", "Use old emphasis implementation. Can be useful to reproduce old seeds.": "옛 방식의 강조 구현을 사용합니다. 옛 시드를 재현하는 데 효과적일 수 있습니다.",
"Use original name for output filename during batch process in extras tab": "부가기능 탭에서 이미지를 여러장 처리 시 결과물 파일명에 기존 파일명 사용하기", "Use original name for output filename during batch process in extras tab": "부가기능 탭에서 이미지를 여러장 처리 시 결과물 파일명에 기존 파일명 사용하기",
"Use same random seed for all lines": "모든 줄에 동일한 시드 사용",
"Use same seed for each image": "각 이미지에 동일한 시드 사용", "Use same seed for each image": "각 이미지에 동일한 시드 사용",
"use spaces for tags in deepbooru": "deepbooru에서 태그에 공백 사용", "use spaces for tags in deepbooru": "deepbooru에서 태그에 공백 사용",
"User interface": "사용자 인터페이스", "User interface": "사용자 인터페이스",

View File

@ -17,6 +17,7 @@
"Checkpoint Merger": "Fusão de Checkpoint", "Checkpoint Merger": "Fusão de Checkpoint",
"Train": "Treinar", "Train": "Treinar",
"Settings": "Configurações", "Settings": "Configurações",
"Extensions": "Extensions",
"Prompt": "Prompt", "Prompt": "Prompt",
"Negative prompt": "Prompt negativo", "Negative prompt": "Prompt negativo",
"Run": "Executar", "Run": "Executar",
@ -93,13 +94,13 @@
"Eta": "Tempo estimado", "Eta": "Tempo estimado",
"Clip skip": "Pular Clip", "Clip skip": "Pular Clip",
"Denoising": "Denoising", "Denoising": "Denoising",
"Cond. Image Mask Weight": "Peso da Máscara Condicional de Imagem",
"X values": "Valores de X", "X values": "Valores de X",
"Y type": "Tipo de Y", "Y type": "Tipo de Y",
"Y values": "Valores de Y", "Y values": "Valores de Y",
"Draw legend": "Desenhar a legenda", "Draw legend": "Desenhar a legenda",
"Include Separate Images": "Incluir Imagens Separadas", "Include Separate Images": "Incluir Imagens Separadas",
"Keep -1 for seeds": "Manter em -1 para seeds", "Keep -1 for seeds": "Manter em -1 para seeds",
"Drop Image Here": "Solte a imagem aqui",
"Save": "Salvar", "Save": "Salvar",
"Send to img2img": "Mandar para img2img", "Send to img2img": "Mandar para img2img",
"Send to inpaint": "Mandar para inpaint", "Send to inpaint": "Mandar para inpaint",
@ -110,6 +111,7 @@
"Inpaint": "Inpaint", "Inpaint": "Inpaint",
"Batch img2img": "Lote img2img", "Batch img2img": "Lote img2img",
"Image for img2img": "Imagem para img2img", "Image for img2img": "Imagem para img2img",
"Drop Image Here": "Solte a imagem aqui",
"Image for inpainting with mask": "Imagem para inpainting com máscara", "Image for inpainting with mask": "Imagem para inpainting com máscara",
"Mask": "Máscara", "Mask": "Máscara",
"Mask blur": "Desfoque da máscara", "Mask blur": "Desfoque da máscara",
@ -166,16 +168,10 @@
"Upscaler": "Ampliador", "Upscaler": "Ampliador",
"Lanczos": "Lanczos", "Lanczos": "Lanczos",
"LDSR": "LDSR", "LDSR": "LDSR",
"4x_foolhardy_Remacri": "4x_foolhardy_Remacri", "ESRGAN_4x": "ESRGAN_4x",
"Put ESRGAN models here": "Coloque modelos ESRGAN aqui", "ScuNET GAN": "ScuNET GAN",
"R-ESRGAN General 4xV3": "R-ESRGAN General 4xV3",
"R-ESRGAN AnimeVideo": "R-ESRGAN AnimeVideo",
"R-ESRGAN 4x+": "R-ESRGAN 4x+",
"R-ESRGAN 4x+ Anime6B": "R-ESRGAN 4x+ Anime6B",
"R-ESRGAN 2x+": "R-ESRGAN 2x+",
"ScuNET": "ScuNET",
"ScuNET PSNR": "ScuNET PSNR", "ScuNET PSNR": "ScuNET PSNR",
"put_swinir_models_here": "put_swinir_models_here", "SwinIR 4x": "SwinIR 4x",
"Single Image": "Uma imagem", "Single Image": "Uma imagem",
"Batch Process": "Processo em lote", "Batch Process": "Processo em lote",
"Batch from Directory": "Lote apartir de diretório", "Batch from Directory": "Lote apartir de diretório",
@ -189,7 +185,7 @@
"GFPGAN visibility": "Visibilidade GFPGAN", "GFPGAN visibility": "Visibilidade GFPGAN",
"CodeFormer visibility": "Visibilidade CodeFormer", "CodeFormer visibility": "Visibilidade CodeFormer",
"CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "Peso do CodeFormer (0 = efeito máximo, 1 = efeito mínimo)", "CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "Peso do CodeFormer (0 = efeito máximo, 1 = efeito mínimo)",
"Open output directory": "Abrir diretório de saída", "Upscale Before Restoring Faces": "Ampliar Antes de Refinar Rostos",
"Send to txt2img": "Mandar para txt2img", "Send to txt2img": "Mandar para txt2img",
"A merger of the two checkpoints will be generated in your": "Uma fusão dos dois checkpoints será gerada em seu", "A merger of the two checkpoints will be generated in your": "Uma fusão dos dois checkpoints será gerada em seu",
"checkpoint": "checkpoint", "checkpoint": "checkpoint",
@ -216,6 +212,7 @@
"Modules": "Módulos", "Modules": "Módulos",
"Enter hypernetwork layer structure": "Entrar na estrutura de camadas da hypernetwork", "Enter hypernetwork layer structure": "Entrar na estrutura de camadas da hypernetwork",
"Select activation function of hypernetwork": "Selecionar a função de ativação de hypernetwork", "Select activation function of hypernetwork": "Selecionar a função de ativação de hypernetwork",
"linear": "linear",
"relu": "relu", "relu": "relu",
"leakyrelu": "leakyrelu", "leakyrelu": "leakyrelu",
"elu": "elu", "elu": "elu",
@ -227,12 +224,10 @@
"glu": "glu", "glu": "glu",
"hardshrink": "hardshrink", "hardshrink": "hardshrink",
"hardsigmoid": "hardsigmoid", "hardsigmoid": "hardsigmoid",
"hardswish": "hardswish",
"hardtanh": "hardtanh", "hardtanh": "hardtanh",
"logsigmoid": "logsigmoid", "logsigmoid": "logsigmoid",
"logsoftmax": "logsoftmax", "logsoftmax": "logsoftmax",
"mish": "mish", "mish": "mish",
"multiheadattention": "multiheadattention",
"prelu": "prelu", "prelu": "prelu",
"rrelu": "rrelu", "rrelu": "rrelu",
"relu6": "relu6", "relu6": "relu6",
@ -274,9 +269,9 @@
"Focal point edges weight": "Peso de ponto focal para bordas", "Focal point edges weight": "Peso de ponto focal para bordas",
"Create debug image": "Criar imagem de depuração", "Create debug image": "Criar imagem de depuração",
"Preprocess": "Pré-processar", "Preprocess": "Pré-processar",
"Train an embedding; must specify a directory with a set of 1:1 ratio images": "Treinar um embedding; precisa especificar um diretório com imagens de proporção 1:1", "Train an embedding; must specify a directory with a set of 1:1 ratio images": "Treinar uma incorporação; precisa especificar um diretório com imagens de proporção 1:1",
"[wiki]": "[wiki]", "[wiki]": "[wiki]",
"Embedding": "Embedding", "Embedding": "Incorporação",
"Embedding Learning rate": "Taxa de aprendizagem da incorporação", "Embedding Learning rate": "Taxa de aprendizagem da incorporação",
"Hypernetwork Learning rate": "Taxa de aprendizagem de Hypernetwork", "Hypernetwork Learning rate": "Taxa de aprendizagem de Hypernetwork",
"Dataset directory": "Diretório de Dataset", "Dataset directory": "Diretório de Dataset",
@ -345,9 +340,11 @@
"Filename join string": "Nome de arquivo join string", "Filename join string": "Nome de arquivo join string",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "Número de repetições para entrada única de imagens por época; serve apenas para mostrar o número de época", "Number of repeats for a single input image per epoch; used only for displaying epoch number": "Número de repetições para entrada única de imagens por época; serve apenas para mostrar o número de época",
"Save an csv containing the loss to log directory every N steps, 0 to disable": "Salvar um csv com as perdas para o diretório de log a cada N steps, 0 para desativar", "Save an csv containing the loss to log directory every N steps, 0 to disable": "Salvar um csv com as perdas para o diretório de log a cada N steps, 0 para desativar",
"Use cross attention optimizations while training": "Usar otimizações de atenção cruzada enquanto treinando",
"Stable Diffusion": "Stable Diffusion", "Stable Diffusion": "Stable Diffusion",
"Checkpoints to cache in RAM": "Checkpoints para manter no cache da RAM", "Checkpoints to cache in RAM": "Checkpoints para manter no cache da RAM",
"Hypernetwork strength": "Força da Hypernetwork", "Hypernetwork strength": "Força da Hypernetwork",
"Inpainting conditioning mask strength": "Força do inpaint para máscaras condicioniais",
"Apply color correction to img2img results to match original colors.": "Aplicar correção de cor nas imagens geradas em img2img, usando a imagem original como base.", "Apply color correction to img2img results to match original colors.": "Aplicar correção de cor nas imagens geradas em img2img, usando a imagem original como base.",
"Save a copy of image before applying color correction to img2img results": "Salvar uma cópia das imagens geradas em img2img antes de aplicar a correção de cor", "Save a copy of image before applying color correction to img2img results": "Salvar uma cópia das imagens geradas em img2img antes de aplicar a correção de cor",
"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "Durante gerações img2img, fazer examente o número de steps definidos na barra (normalmente você faz menos steps com denoising menor).", "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "Durante gerações img2img, fazer examente o número de steps definidos na barra (normalmente você faz menos steps com denoising menor).",
@ -379,6 +376,7 @@
"Add model hash to generation information": "Adicionar hash do modelo para informação de geração", "Add model hash to generation information": "Adicionar hash do modelo para informação de geração",
"Add model name to generation information": "Adicionar nome do modelo para informação de geração", "Add model name to generation information": "Adicionar nome do modelo para informação de geração",
"When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "Quando ler parâmetros de texto para a interface (de informações de PNG ou texto copiado), não alterar o modelo/intervalo selecionado.", "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "Quando ler parâmetros de texto para a interface (de informações de PNG ou texto copiado), não alterar o modelo/intervalo selecionado.",
"Send seed when sending prompt or image to other interface": "Enviar seed quando enviar prompt ou imagem para outra interface",
"Font for image grids that have text": "Fonte para grade de imagens que têm texto", "Font for image grids that have text": "Fonte para grade de imagens que têm texto",
"Enable full page image viewer": "Ativar visualizador de página inteira", "Enable full page image viewer": "Ativar visualizador de página inteira",
"Show images zoomed in by default in full page image viewer": "Mostrar imagens com zoom por definição no visualizador de página inteira", "Show images zoomed in by default in full page image viewer": "Mostrar imagens com zoom por definição no visualizador de página inteira",
@ -386,13 +384,17 @@
"Quicksettings list": "Lista de configurações rapidas", "Quicksettings list": "Lista de configurações rapidas",
"Localization (requires restart)": "Localização (precisa reiniciar)", "Localization (requires restart)": "Localização (precisa reiniciar)",
"ar_AR": "ar_AR", "ar_AR": "ar_AR",
"de_DE": "de_DE",
"es_ES": "es_ES", "es_ES": "es_ES",
"fr-FR": "fr-FR", "fr_FR": "fr_FR",
"it_IT": "it_IT",
"ja_JP": "ja_JP", "ja_JP": "ja_JP",
"ko_KR": "ko_KR", "ko_KR": "ko_KR",
"pt_BR": "pt_BR",
"ru_RU": "ru_RU", "ru_RU": "ru_RU",
"tr_TR": "tr_TR", "tr_TR": "tr_TR",
"zh_CN": "zh_CN", "zh_CN": "zh_CN",
"zh_TW": "zh_TW",
"Sampler parameters": "Parâmetros de Amostragem", "Sampler parameters": "Parâmetros de Amostragem",
"Hide samplers in user interface (requires restart)": "Esconder amostragens na interface de usuário (precisa reiniciar)", "Hide samplers in user interface (requires restart)": "Esconder amostragens na interface de usuário (precisa reiniciar)",
"eta (noise multiplier) for DDIM": "tempo estimado (multiplicador de ruído) para DDIM", "eta (noise multiplier) for DDIM": "tempo estimado (multiplicador de ruído) para DDIM",
@ -408,6 +410,19 @@
"Download localization template": "Baixar arquivo modelo de localização", "Download localization template": "Baixar arquivo modelo de localização",
"Reload custom script bodies (No ui updates, No restart)": "Recarregar scripts personalizados (Sem atualizar a interface, Sem reiniciar)", "Reload custom script bodies (No ui updates, No restart)": "Recarregar scripts personalizados (Sem atualizar a interface, Sem reiniciar)",
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "Reiniciar Gradio e atualizar componentes (Scripts personalizados, ui.py, js e css)", "Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "Reiniciar Gradio e atualizar componentes (Scripts personalizados, ui.py, js e css)",
"Installed": "Instalado",
"Available": "Disponível",
"Install from URL": "Instalado de URL",
"Apply and restart UI": "Apicar e reiniciar a interface",
"Check for updates": "Procurar por atualizações",
"Extension": "Extensão",
"URL": "URL",
"Update": "Atualização",
"Load from:": "Carregar de:",
"Extension index URL": "Índice de extensão URL",
"URL for extension's git repository": "URL para repositório git da extensão",
"Local directory name": "Nome do diretório local",
"Install": "Instalar",
"Prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt (apertar Ctrl+Enter ou Alt+Enter para gerar)", "Prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt (apertar Ctrl+Enter ou Alt+Enter para gerar)",
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt Negativo (apertar Ctrl+Enter ou Alt+Enter para gerar)", "Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt Negativo (apertar Ctrl+Enter ou Alt+Enter para gerar)",
"Add a random artist to the prompt.": "Adicionar um artista aleatório para o prompt.", "Add a random artist to the prompt.": "Adicionar um artista aleatório para o prompt.",
@ -420,7 +435,7 @@
"Do not do anything special": "Não faça nada de especial", "Do not do anything special": "Não faça nada de especial",
"Which algorithm to use to produce the image": "O tipo de algoritmo para gerar imagens.", "Which algorithm to use to produce the image": "O tipo de algoritmo para gerar imagens.",
"Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - cria mais variações para as imagens em diferentes passos. Mais que 40 passos cancela o efeito.", "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - cria mais variações para as imagens em diferentes passos. Mais que 40 passos cancela o efeito.",
"Denoising Diffusion Implicit Models - Funciona melhor para inpainting.": "Denoising Diffusion Implicit Models - Funciona melhor para inpainting.", "Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit Models - Funciona melhor para inpainting.",
"Produce an image that can be tiled.": "Produz uma imagem que pode ser ladrilhada.", "Produce an image that can be tiled.": "Produz uma imagem que pode ser ladrilhada.",
"Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "Cria um processo em duas etapas, com uma imagem em baixa qualidade primeiro, aumenta a imagem e refina os detalhes sem alterar a composição da imagem", "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "Cria um processo em duas etapas, com uma imagem em baixa qualidade primeiro, aumenta a imagem e refina os detalhes sem alterar a composição da imagem",
"Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "Quanto o algoritmo deve manter da imagem original. Em 0, nada muda. Em 1 o algoritmo ignora a imagem original. Valores menores que 1.0 demoram mais.", "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "Quanto o algoritmo deve manter da imagem original. Em 0, nada muda. Em 1 o algoritmo ignora a imagem original. Valores menores que 1.0 demoram mais.",
@ -438,7 +453,7 @@
"Write image to a directory (default - log/images) and generation parameters into csv file.": "Salva a imagem no diretório padrão ou escolhido e cria um arquivo csv com os parâmetros da geração.", "Write image to a directory (default - log/images) and generation parameters into csv file.": "Salva a imagem no diretório padrão ou escolhido e cria um arquivo csv com os parâmetros da geração.",
"Open images output directory": "Abre o diretório de saída de imagens.", "Open images output directory": "Abre o diretório de saída de imagens.",
"How much to blur the mask before processing, in pixels.": "Transição do contorno da máscara, em pixels.", "How much to blur the mask before processing, in pixels.": "Transição do contorno da máscara, em pixels.",
"What to put inside the masked area before processing it with Stable Diffusion.": "O que vai dentro da máscara antes de processar.", "What to put inside the masked area before processing it with Stable Diffusion.": "O que vai dentro da máscara antes de processá-la com Stable Diffusion.",
"fill it with colors of the image": "Preenche usando as cores da imagem.", "fill it with colors of the image": "Preenche usando as cores da imagem.",
"keep whatever was there originally": "manter usando o que estava lá originalmente", "keep whatever was there originally": "manter usando o que estava lá originalmente",
"fill it with latent space noise": "Preenche com ruídos do espaço latente.", "fill it with latent space noise": "Preenche com ruídos do espaço latente.",
@ -463,6 +478,8 @@
"Restore low quality faces using GFPGAN neural network": "Restaurar rostos de baixa qualidade usando a rede neural GFPGAN", "Restore low quality faces using GFPGAN neural network": "Restaurar rostos de baixa qualidade usando a rede neural GFPGAN",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "Esta expressão regular vai retirar palavras do nome do arquivo e serão juntadas via regex usando a opção abaixo em etiquetas usadas em treinamento. Não mexer para manter os nomes como estão.", "This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "Esta expressão regular vai retirar palavras do nome do arquivo e serão juntadas via regex usando a opção abaixo em etiquetas usadas em treinamento. Não mexer para manter os nomes como estão.",
"This string will be used to join split words into a single line if the option above is enabled.": "Esta string será usada para unir palavras divididas em uma única linha se a opção acima estiver habilitada.", "This string will be used to join split words into a single line if the option above is enabled.": "Esta string será usada para unir palavras divididas em uma única linha se a opção acima estiver habilitada.",
"Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "Aplicável somente para modelos de inpaint. Determina quanto deve mascarar da imagem original para inpaint e img2img. 1.0 significa totalmente mascarado, que é o comportamento padrão. 0.0 significa uma condição totalmente não mascarada. Valores baixos ajudam a preservar a composição geral da imagem, mas vai encontrar dificuldades com grandes mudanças.",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "Lista de nomes de configurações, separados por vírgulas, para configurações que devem ir para a barra de acesso rápido na parte superior, em vez da guia de configuração usual. Veja modules/shared.py para nomes de configuração. Necessita reinicialização para aplicar.", "List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "Lista de nomes de configurações, separados por vírgulas, para configurações que devem ir para a barra de acesso rápido na parte superior, em vez da guia de configuração usual. Veja modules/shared.py para nomes de configuração. Necessita reinicialização para aplicar.",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "Se este valor for diferente de zero, ele será adicionado à seed e usado para inicializar o RNG para ruídos ao usar amostragens com Tempo Estimado. Você pode usar isso para produzir ainda mais variações de imagens ou pode usar isso para combinar imagens de outro software se souber o que está fazendo." "If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "Se este valor for diferente de zero, ele será adicionado à seed e usado para inicializar o RNG para ruídos ao usar amostragens com Tempo Estimado. Você pode usar isso para produzir ainda mais variações de imagens ou pode usar isso para combinar imagens de outro software se souber o que está fazendo."
"Leave empty for auto": "Deixar desmarcado para automático"
} }

View File

View File

@ -1,12 +1,13 @@
import base64
import io
import time import time
import uvicorn import uvicorn
from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image from gradio.processing_utils import decode_base64_to_file, decode_base64_to_image
from fastapi import APIRouter, Depends, HTTPException from fastapi import APIRouter, Depends, HTTPException
import modules.shared as shared import modules.shared as shared
from modules import devices
from modules.api.models import * from modules.api.models import *
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
from modules.sd_samplers import all_samplers from modules.sd_samplers import all_samplers, sample_to_image, samples_to_image_grid
from modules.extras import run_extras, run_pnginfo from modules.extras import run_extras, run_pnginfo
@ -29,6 +30,12 @@ def setUpscalers(req: dict):
return reqDict return reqDict
def encode_pil_to_base64(image):
buffer = io.BytesIO()
image.save(buffer, format="png")
return base64.b64encode(buffer.getvalue())
class Api: class Api:
def __init__(self, app, queue_lock): def __init__(self, app, queue_lock):
self.router = APIRouter() self.router = APIRouter()
@ -40,6 +47,7 @@ class Api:
self.app.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) self.app.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse)
self.app.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse) self.app.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse)
self.app.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse) self.app.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse)
self.app.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"])
def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI):
sampler_index = sampler_to_index(txt2imgreq.sampler_index) sampler_index = sampler_to_index(txt2imgreq.sampler_index)
@ -170,12 +178,19 @@ class Api:
progress = min(progress, 1) progress = min(progress, 1)
shared.state.set_current_image()
current_image = None current_image = None
if shared.state.current_image and not req.skip_current_image: if shared.state.current_image and not req.skip_current_image:
current_image = encode_pil_to_base64(shared.state.current_image) current_image = encode_pil_to_base64(shared.state.current_image)
return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image) return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image)
def interruptapi(self):
shared.state.interrupt()
return {}
def launch(self, server_name, port): def launch(self, server_name, port):
self.app.include_router(self.router) self.app.include_router(self.router)
uvicorn.run(self.app, host=server_name, port=port) uvicorn.run(self.app, host=server_name, port=port)

View File

@ -50,6 +50,7 @@ def mod2normal(state_dict):
def resrgan2normal(state_dict, nb=23): def resrgan2normal(state_dict, nb=23):
# this code is copied from https://github.com/victorca25/iNNfer # this code is copied from https://github.com/victorca25/iNNfer
if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict: if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict:
re8x = 0
crt_net = {} crt_net = {}
items = [] items = []
for k, v in state_dict.items(): for k, v in state_dict.items():
@ -75,10 +76,18 @@ def resrgan2normal(state_dict, nb=23):
crt_net['model.3.bias'] = state_dict['conv_up1.bias'] crt_net['model.3.bias'] = state_dict['conv_up1.bias']
crt_net['model.6.weight'] = state_dict['conv_up2.weight'] crt_net['model.6.weight'] = state_dict['conv_up2.weight']
crt_net['model.6.bias'] = state_dict['conv_up2.bias'] crt_net['model.6.bias'] = state_dict['conv_up2.bias']
crt_net['model.8.weight'] = state_dict['conv_hr.weight']
crt_net['model.8.bias'] = state_dict['conv_hr.bias'] if 'conv_up3.weight' in state_dict:
crt_net['model.10.weight'] = state_dict['conv_last.weight'] # modification supporting: https://github.com/ai-forever/Real-ESRGAN/blob/main/RealESRGAN/rrdbnet_arch.py
crt_net['model.10.bias'] = state_dict['conv_last.bias'] re8x = 3
crt_net['model.9.weight'] = state_dict['conv_up3.weight']
crt_net['model.9.bias'] = state_dict['conv_up3.bias']
crt_net[f'model.{8+re8x}.weight'] = state_dict['conv_hr.weight']
crt_net[f'model.{8+re8x}.bias'] = state_dict['conv_hr.bias']
crt_net[f'model.{10+re8x}.weight'] = state_dict['conv_last.weight']
crt_net[f'model.{10+re8x}.bias'] = state_dict['conv_last.bias']
state_dict = crt_net state_dict = crt_net
return state_dict return state_dict

83
modules/extensions.py Normal file
View File

@ -0,0 +1,83 @@
import os
import sys
import traceback
import git
from modules import paths, shared
extensions = []
extensions_dir = os.path.join(paths.script_path, "extensions")
def active():
return [x for x in extensions if x.enabled]
class Extension:
def __init__(self, name, path, enabled=True):
self.name = name
self.path = path
self.enabled = enabled
self.status = ''
self.can_update = False
repo = None
try:
if os.path.exists(os.path.join(path, ".git")):
repo = git.Repo(path)
except Exception:
print(f"Error reading github repository info from {path}:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
if repo is None or repo.bare:
self.remote = None
else:
self.remote = next(repo.remote().urls, None)
self.status = 'unknown'
def list_files(self, subdir, extension):
from modules import scripts
dirpath = os.path.join(self.path, subdir)
if not os.path.isdir(dirpath):
return []
res = []
for filename in sorted(os.listdir(dirpath)):
res.append(scripts.ScriptFile(self.path, filename, os.path.join(dirpath, filename)))
res = [x for x in res if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)]
return res
def check_updates(self):
repo = git.Repo(self.path)
for fetch in repo.remote().fetch("--dry-run"):
if fetch.flags != fetch.HEAD_UPTODATE:
self.can_update = True
self.status = "behind"
return
self.can_update = False
self.status = "latest"
def pull(self):
repo = git.Repo(self.path)
repo.remotes.origin.pull()
def list_extensions():
extensions.clear()
if not os.path.isdir(extensions_dir):
return
for dirname in sorted(os.listdir(extensions_dir)):
path = os.path.join(extensions_dir, dirname)
if not os.path.isdir(path):
continue
extension = Extension(name=dirname, path=path, enabled=dirname not in shared.opts.disabled_extensions)
extensions.append(extension)

View File

@ -141,7 +141,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
upscaling_resize_w, upscaling_resize_h, upscaling_crop) upscaling_resize_w, upscaling_resize_h, upscaling_crop)
cache_key = LruCache.Key(image_hash=hash(np.array(image.getdata()).tobytes()), cache_key = LruCache.Key(image_hash=hash(np.array(image.getdata()).tobytes()),
info_hash=hash(info), info_hash=hash(info),
args_hash=hash(upscale_args)) args_hash=hash((upscale_args, upscale_first)))
cached_entry = cached_images.get(cache_key) cached_entry = cached_images.get(cache_key)
if cached_entry is None: if cached_entry is None:
res = upscale(image, *upscale_args) res = upscale(image, *upscale_args)

View File

@ -17,6 +17,11 @@ paste_fields = {}
bind_list = [] bind_list = []
def reset():
paste_fields.clear()
bind_list.clear()
def quote(text): def quote(text):
if ',' not in str(text): if ',' not in str(text):
return text return text

View File

@ -510,6 +510,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
if extension.lower() == '.png': if extension.lower() == '.png':
pnginfo_data = PngImagePlugin.PngInfo() pnginfo_data = PngImagePlugin.PngInfo()
if opts.enable_pnginfo:
for k, v in params.pnginfo.items(): for k, v in params.pnginfo.items():
pnginfo_data.add_text(k, str(v)) pnginfo_data.add_text(k, str(v))

View File

@ -55,6 +55,7 @@ def process_batch(p, input_dir, output_dir, args):
filename = f"{left}-{n}{right}" filename = f"{left}-{n}{right}"
if not save_normally: if not save_normally:
os.makedirs(output_dir, exist_ok=True)
processed_image.save(os.path.join(output_dir, filename)) processed_image.save(os.path.join(output_dir, filename))
@ -80,6 +81,7 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro
mask = None mask = None
# Use the EXIF orientation of photos taken by smartphones. # Use the EXIF orientation of photos taken by smartphones.
if image is not None:
image = ImageOps.exif_transpose(image) image = ImageOps.exif_transpose(image)
assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]' assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'
@ -136,6 +138,8 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro
if processed is None: if processed is None:
processed = process_images(p) processed = process_images(p)
p.close()
shared.total_tqdm.clear() shared.total_tqdm.clear()
generation_info_js = processed.js() generation_info_js = processed.js()

View File

@ -56,9 +56,9 @@ class InterrogateModels:
import clip import clip
if self.running_on_cpu: if self.running_on_cpu:
model, preprocess = clip.load(clip_model_name, device="cpu") model, preprocess = clip.load(clip_model_name, device="cpu", download_root=shared.cmd_opts.clip_models_path)
else: else:
model, preprocess = clip.load(clip_model_name) model, preprocess = clip.load(clip_model_name, download_root=shared.cmd_opts.clip_models_path)
model.eval() model.eval()
model = model.to(devices.device_interrogate) model = model.to(devices.device_interrogate)

View File

@ -38,13 +38,18 @@ def setup_for_low_vram(sd_model, use_medvram):
# see below for register_forward_pre_hook; # see below for register_forward_pre_hook;
# first_stage_model does not use forward(), it uses encode/decode, so register_forward_pre_hook is # first_stage_model does not use forward(), it uses encode/decode, so register_forward_pre_hook is
# useless here, and we just replace those methods # useless here, and we just replace those methods
def first_stage_model_encode_wrap(self, encoder, x):
send_me_to_gpu(self, None)
return encoder(x)
def first_stage_model_decode_wrap(self, decoder, z): first_stage_model = sd_model.first_stage_model
send_me_to_gpu(self, None) first_stage_model_encode = sd_model.first_stage_model.encode
return decoder(z) first_stage_model_decode = sd_model.first_stage_model.decode
def first_stage_model_encode_wrap(x):
send_me_to_gpu(first_stage_model, None)
return first_stage_model_encode(x)
def first_stage_model_decode_wrap(z):
send_me_to_gpu(first_stage_model, None)
return first_stage_model_decode(z)
# remove three big modules, cond, first_stage, and unet from the model and then # remove three big modules, cond, first_stage, and unet from the model and then
# send the model to GPU. Then put modules back. the modules will be in CPU. # send the model to GPU. Then put modules back. the modules will be in CPU.
@ -56,8 +61,8 @@ def setup_for_low_vram(sd_model, use_medvram):
# register hooks for those the first two models # register hooks for those the first two models
sd_model.cond_stage_model.transformer.register_forward_pre_hook(send_me_to_gpu) sd_model.cond_stage_model.transformer.register_forward_pre_hook(send_me_to_gpu)
sd_model.first_stage_model.register_forward_pre_hook(send_me_to_gpu) sd_model.first_stage_model.register_forward_pre_hook(send_me_to_gpu)
sd_model.first_stage_model.encode = lambda x, en=sd_model.first_stage_model.encode: first_stage_model_encode_wrap(sd_model.first_stage_model, en, x) sd_model.first_stage_model.encode = first_stage_model_encode_wrap
sd_model.first_stage_model.decode = lambda z, de=sd_model.first_stage_model.decode: first_stage_model_decode_wrap(sd_model.first_stage_model, de, z) sd_model.first_stage_model.decode = first_stage_model_decode_wrap
parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model
if use_medvram: if use_medvram:

View File

@ -85,6 +85,9 @@ def cleanup_models():
src_path = os.path.join(root_path, "ESRGAN") src_path = os.path.join(root_path, "ESRGAN")
dest_path = os.path.join(models_path, "ESRGAN") dest_path = os.path.join(models_path, "ESRGAN")
move_files(src_path, dest_path) move_files(src_path, dest_path)
src_path = os.path.join(models_path, "BSRGAN")
dest_path = os.path.join(models_path, "ESRGAN")
move_files(src_path, dest_path, ".pth")
src_path = os.path.join(root_path, "gfpgan") src_path = os.path.join(root_path, "gfpgan")
dest_path = os.path.join(models_path, "GFPGAN") dest_path = os.path.join(models_path, "GFPGAN")
move_files(src_path, dest_path) move_files(src_path, dest_path)

View File

@ -199,9 +199,13 @@ class StableDiffusionProcessing():
def init(self, all_prompts, all_seeds, all_subseeds): def init(self, all_prompts, all_seeds, all_subseeds):
pass pass
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength): def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
raise NotImplementedError() raise NotImplementedError()
def close(self):
self.sd_model = None
self.sampler = None
class Processed: class Processed:
def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None): def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None):
@ -517,7 +521,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
shared.state.job = f"Batch {n+1} out of {p.n_iter}" shared.state.job = f"Batch {n+1} out of {p.n_iter}"
with devices.autocast(): with devices.autocast():
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength) samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts)
samples_ddim = samples_ddim.to(devices.dtype_vae) samples_ddim = samples_ddim.to(devices.dtype_vae)
x_samples_ddim = decode_first_stage(p.sd_model, samples_ddim) x_samples_ddim = decode_first_stage(p.sd_model, samples_ddim)
@ -645,7 +649,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
self.truncate_x = int(self.firstphase_width - firstphase_width_truncated) // opt_f self.truncate_x = int(self.firstphase_width - firstphase_width_truncated) // opt_f
self.truncate_y = int(self.firstphase_height - firstphase_height_truncated) // opt_f self.truncate_y = int(self.firstphase_height - firstphase_height_truncated) // opt_f
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength): def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model) self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
if not self.enable_hr: if not self.enable_hr:
@ -658,9 +662,21 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
samples = samples[:, :, self.truncate_y//2:samples.shape[2]-self.truncate_y//2, self.truncate_x//2:samples.shape[3]-self.truncate_x//2] samples = samples[:, :, self.truncate_y//2:samples.shape[2]-self.truncate_y//2, self.truncate_x//2:samples.shape[3]-self.truncate_x//2]
"""saves image before applying hires fix, if enabled in options; takes as an arguyment either an image or batch with latent space images"""
def save_intermediate(image, index):
if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix:
return
if not isinstance(image, Image.Image):
image = sd_samplers.sample_to_image(image, index)
images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, suffix="-before-highres-fix")
if opts.use_scale_latent_for_hires_fix: if opts.use_scale_latent_for_hires_fix:
samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear") samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
for i in range(samples.shape[0]):
save_intermediate(samples, i)
else: else:
decoded_samples = decode_first_stage(self.sd_model, samples) decoded_samples = decode_first_stage(self.sd_model, samples)
lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0) lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
@ -670,6 +686,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8) x_sample = x_sample.astype(np.uint8)
image = Image.fromarray(x_sample) image = Image.fromarray(x_sample)
save_intermediate(image, i)
image = images.resize_image(0, image, self.width, self.height) image = images.resize_image(0, image, self.width, self.height)
image = np.array(image).astype(np.float32) / 255.0 image = np.array(image).astype(np.float32) / 255.0
image = np.moveaxis(image, 2, 0) image = np.moveaxis(image, 2, 0)
@ -827,8 +846,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, self.image_mask) self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, self.image_mask)
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning) samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning)

View File

@ -32,7 +32,7 @@ class RestrictedUnpickler(pickle.Unpickler):
return getattr(collections, name) return getattr(collections, name)
if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter']: if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter']:
return getattr(torch._utils, name) return getattr(torch._utils, name)
if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage', 'DoubleStorage']: if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage', 'DoubleStorage', 'ByteStorage']:
return getattr(torch, name) return getattr(torch, name)
if module == 'torch.nn.modules.container' and name in ['ParameterDict']: if module == 'torch.nn.modules.container' and name in ['ParameterDict']:
return getattr(torch.nn.modules.container, name) return getattr(torch.nn.modules.container, name)

View File

@ -2,7 +2,10 @@ import sys
import traceback import traceback
from collections import namedtuple from collections import namedtuple
import inspect import inspect
from typing import Optional
from fastapi import FastAPI
from gradio import Blocks
def report_exception(c, job): def report_exception(c, job):
print(f"Error executing callback {job} for {c.script}", file=sys.stderr) print(f"Error executing callback {job} for {c.script}", file=sys.stderr)
@ -24,12 +27,32 @@ class ImageSaveParams:
"""dictionary with parameters for image's PNG info data; infotext will have the key 'parameters'""" """dictionary with parameters for image's PNG info data; infotext will have the key 'parameters'"""
class CFGDenoiserParams:
def __init__(self, x, image_cond, sigma, sampling_step, total_sampling_steps):
self.x = x
"""Latent image representation in the process of being denoised"""
self.image_cond = image_cond
"""Conditioning image"""
self.sigma = sigma
"""Current sigma noise step value"""
self.sampling_step = sampling_step
"""Current Sampling step number"""
self.total_sampling_steps = total_sampling_steps
"""Total number of sampling steps planned"""
ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"]) ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"])
callbacks_app_started = []
callbacks_model_loaded = [] callbacks_model_loaded = []
callbacks_ui_tabs = [] callbacks_ui_tabs = []
callbacks_ui_settings = [] callbacks_ui_settings = []
callbacks_before_image_saved = [] callbacks_before_image_saved = []
callbacks_image_saved = [] callbacks_image_saved = []
callbacks_cfg_denoiser = []
def clear_callbacks(): def clear_callbacks():
@ -38,6 +61,14 @@ def clear_callbacks():
callbacks_ui_settings.clear() callbacks_ui_settings.clear()
callbacks_before_image_saved.clear() callbacks_before_image_saved.clear()
callbacks_image_saved.clear() callbacks_image_saved.clear()
callbacks_cfg_denoiser.clear()
def app_started_callback(demo: Optional[Blocks], app: FastAPI):
for c in callbacks_app_started:
try:
c.callback(demo, app)
except Exception:
report_exception(c, 'app_started_callback')
def model_loaded_callback(sd_model): def model_loaded_callback(sd_model):
@ -69,7 +100,7 @@ def ui_settings_callback():
def before_image_saved_callback(params: ImageSaveParams): def before_image_saved_callback(params: ImageSaveParams):
for c in callbacks_image_saved: for c in callbacks_before_image_saved:
try: try:
c.callback(params) c.callback(params)
except Exception: except Exception:
@ -84,6 +115,14 @@ def image_saved_callback(params: ImageSaveParams):
report_exception(c, 'image_saved_callback') report_exception(c, 'image_saved_callback')
def cfg_denoiser_callback(params: CFGDenoiserParams):
for c in callbacks_cfg_denoiser:
try:
c.callback(params)
except Exception:
report_exception(c, 'cfg_denoiser_callback')
def add_callback(callbacks, fun): def add_callback(callbacks, fun):
stack = [x for x in inspect.stack() if x.filename != __file__] stack = [x for x in inspect.stack() if x.filename != __file__]
filename = stack[0].filename if len(stack) > 0 else 'unknown file' filename = stack[0].filename if len(stack) > 0 else 'unknown file'
@ -91,6 +130,12 @@ def add_callback(callbacks, fun):
callbacks.append(ScriptCallback(filename, fun)) callbacks.append(ScriptCallback(filename, fun))
def on_app_started(callback):
"""register a function to be called when the webui started, the gradio `Block` component and
fastapi `FastAPI` object are passed as the arguments"""
add_callback(callbacks_app_started, callback)
def on_model_loaded(callback): def on_model_loaded(callback):
"""register a function to be called when the stable diffusion model is created; the model is """register a function to be called when the stable diffusion model is created; the model is
passed as an argument""" passed as an argument"""
@ -130,3 +175,12 @@ def on_image_saved(callback):
- params: ImageSaveParams - parameters the image was saved with. Changing fields in this object does nothing. - params: ImageSaveParams - parameters the image was saved with. Changing fields in this object does nothing.
""" """
add_callback(callbacks_image_saved, callback) add_callback(callbacks_image_saved, callback)
def on_cfg_denoiser(callback):
"""register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs.
The callback is called with one argument:
- params: CFGDenoiserParams - parameters to be passed to the inner model and sampling state details.
"""
add_callback(callbacks_cfg_denoiser, callback)

View File

@ -7,7 +7,7 @@ import modules.ui as ui
import gradio as gr import gradio as gr
from modules.processing import StableDiffusionProcessing from modules.processing import StableDiffusionProcessing
from modules import shared, paths, script_callbacks from modules import shared, paths, script_callbacks, extensions
AlwaysVisible = object() AlwaysVisible = object()
@ -107,17 +107,8 @@ def list_scripts(scriptdirname, extension):
for filename in sorted(os.listdir(basedir)): for filename in sorted(os.listdir(basedir)):
scripts_list.append(ScriptFile(paths.script_path, filename, os.path.join(basedir, filename))) scripts_list.append(ScriptFile(paths.script_path, filename, os.path.join(basedir, filename)))
extdir = os.path.join(paths.script_path, "extensions") for ext in extensions.active():
if os.path.exists(extdir): scripts_list += ext.list_files(scriptdirname, extension)
for dirname in sorted(os.listdir(extdir)):
dirpath = os.path.join(extdir, dirname)
scriptdirpath = os.path.join(dirpath, scriptdirname)
if not os.path.isdir(scriptdirpath):
continue
for filename in sorted(os.listdir(scriptdirpath)):
scripts_list.append(ScriptFile(dirpath, filename, os.path.join(scriptdirpath, filename)))
scripts_list = [x for x in scripts_list if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)] scripts_list = [x for x in scripts_list if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)]
@ -127,11 +118,7 @@ def list_scripts(scriptdirname, extension):
def list_files_with_name(filename): def list_files_with_name(filename):
res = [] res = []
dirs = [paths.script_path] dirs = [paths.script_path] + [ext.path for ext in extensions.active()]
extdir = os.path.join(paths.script_path, "extensions")
if os.path.exists(extdir):
dirs += [os.path.join(extdir, d) for d in sorted(os.listdir(extdir))]
for dirpath in dirs: for dirpath in dirs:
if not os.path.isdir(dirpath): if not os.path.isdir(dirpath):

View File

@ -94,6 +94,10 @@ class StableDiffusionModelHijack:
if type(model_embeddings.token_embedding) == EmbeddingsWithFixes: if type(model_embeddings.token_embedding) == EmbeddingsWithFixes:
model_embeddings.token_embedding = model_embeddings.token_embedding.wrapped model_embeddings.token_embedding = model_embeddings.token_embedding.wrapped
self.layers = None
self.circular_enabled = False
self.clip = None
def apply_circular(self, enable): def apply_circular(self, enable):
if self.circular_enabled == enable: if self.circular_enabled == enable:
return return

View File

@ -1,6 +1,7 @@
import collections import collections
import os.path import os.path
import sys import sys
import gc
from collections import namedtuple from collections import namedtuple
import torch import torch
import re import re
@ -8,7 +9,7 @@ from omegaconf import OmegaConf
from ldm.util import instantiate_from_config from ldm.util import instantiate_from_config
from modules import shared, modelloader, devices, script_callbacks from modules import shared, modelloader, devices, script_callbacks, sd_vae
from modules.paths import models_path from modules.paths import models_path
from modules.sd_hijack_inpainting import do_inpainting_hijack, should_hijack_inpainting from modules.sd_hijack_inpainting import do_inpainting_hijack, should_hijack_inpainting
@ -158,14 +159,15 @@ def get_state_dict_from_checkpoint(pl_sd):
return pl_sd return pl_sd
vae_ignore_keys = {"model_ema.decay", "model_ema.num_updates"} def load_model_weights(model, checkpoint_info, vae_file="auto"):
def load_model_weights(model, checkpoint_info):
checkpoint_file = checkpoint_info.filename checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash sd_model_hash = checkpoint_info.hash
if checkpoint_info not in checkpoints_loaded: vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)
checkpoint_key = checkpoint_info
if checkpoint_key not in checkpoints_loaded:
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}") print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location) pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
@ -181,37 +183,38 @@ def load_model_weights(model, checkpoint_info):
model.to(memory_format=torch.channels_last) model.to(memory_format=torch.channels_last)
if not shared.cmd_opts.no_half: if not shared.cmd_opts.no_half:
vae = model.first_stage_model
# with --no-half-vae, remove VAE from model when doing half() to prevent its weights from being converted to float16
if shared.cmd_opts.no_half_vae:
model.first_stage_model = None
model.half() model.half()
model.first_stage_model = vae
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16 devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16 devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
vae_file = os.path.splitext(checkpoint_file)[0] + ".vae.pt"
if not os.path.exists(vae_file) and shared.cmd_opts.vae_path is not None:
vae_file = shared.cmd_opts.vae_path
if os.path.exists(vae_file):
print(f"Loading VAE weights from: {vae_file}")
vae_ckpt = torch.load(vae_file, map_location=shared.weight_load_location)
vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss" and k not in vae_ignore_keys}
model.first_stage_model.load_state_dict(vae_dict)
model.first_stage_model.to(devices.dtype_vae) model.first_stage_model.to(devices.dtype_vae)
if shared.opts.sd_checkpoint_cache > 0: if shared.opts.sd_checkpoint_cache > 0:
checkpoints_loaded[checkpoint_info] = model.state_dict().copy() # if PR #4035 were to get merged, restore base VAE first before caching
checkpoints_loaded[checkpoint_key] = model.state_dict().copy()
while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache: while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
checkpoints_loaded.popitem(last=False) # LRU checkpoints_loaded.popitem(last=False) # LRU
else: else:
print(f"Loading weights [{sd_model_hash}] from cache") vae_name = sd_vae.get_filename(vae_file)
checkpoints_loaded.move_to_end(checkpoint_info) print(f"Loading weights [{sd_model_hash}] with {vae_name} VAE from cache")
model.load_state_dict(checkpoints_loaded[checkpoint_info]) checkpoints_loaded.move_to_end(checkpoint_key)
model.load_state_dict(checkpoints_loaded[checkpoint_key])
model.sd_model_hash = sd_model_hash model.sd_model_hash = sd_model_hash
model.sd_model_checkpoint = checkpoint_file model.sd_model_checkpoint = checkpoint_file
model.sd_checkpoint_info = checkpoint_info model.sd_checkpoint_info = checkpoint_info
sd_vae.load_vae(model, vae_file)
def load_model(checkpoint_info=None): def load_model(checkpoint_info=None):
from modules import lowvram, sd_hijack from modules import lowvram, sd_hijack
@ -220,6 +223,12 @@ def load_model(checkpoint_info=None):
if checkpoint_info.config != shared.cmd_opts.config: if checkpoint_info.config != shared.cmd_opts.config:
print(f"Loading config from: {checkpoint_info.config}") print(f"Loading config from: {checkpoint_info.config}")
if shared.sd_model:
sd_hijack.model_hijack.undo_hijack(shared.sd_model)
shared.sd_model = None
gc.collect()
devices.torch_gc()
sd_config = OmegaConf.load(checkpoint_info.config) sd_config = OmegaConf.load(checkpoint_info.config)
if should_hijack_inpainting(checkpoint_info): if should_hijack_inpainting(checkpoint_info):
@ -233,6 +242,7 @@ def load_model(checkpoint_info=None):
checkpoint_info = checkpoint_info._replace(config=checkpoint_info.config.replace(".yaml", "-inpainting.yaml")) checkpoint_info = checkpoint_info._replace(config=checkpoint_info.config.replace(".yaml", "-inpainting.yaml"))
do_inpainting_hijack() do_inpainting_hijack()
sd_model = instantiate_from_config(sd_config.model) sd_model = instantiate_from_config(sd_config.model)
load_model_weights(sd_model, checkpoint_info) load_model_weights(sd_model, checkpoint_info)
@ -252,14 +262,18 @@ def load_model(checkpoint_info=None):
return sd_model return sd_model
def reload_model_weights(sd_model, info=None): def reload_model_weights(sd_model=None, info=None):
from modules import lowvram, devices, sd_hijack from modules import lowvram, devices, sd_hijack
checkpoint_info = info or select_checkpoint() checkpoint_info = info or select_checkpoint()
if not sd_model:
sd_model = shared.sd_model
if sd_model.sd_model_checkpoint == checkpoint_info.filename: if sd_model.sd_model_checkpoint == checkpoint_info.filename:
return return
if sd_model.sd_checkpoint_info.config != checkpoint_info.config or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info): if sd_model.sd_checkpoint_info.config != checkpoint_info.config or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info):
del sd_model
checkpoints_loaded.clear() checkpoints_loaded.clear()
load_model(checkpoint_info) load_model(checkpoint_info)
return shared.sd_model return shared.sd_model

View File

@ -1,5 +1,6 @@
from collections import namedtuple from collections import namedtuple
import numpy as np import numpy as np
from math import floor
import torch import torch
import tqdm import tqdm
from PIL import Image from PIL import Image
@ -11,6 +12,7 @@ from modules import prompt_parser, devices, processing, images
from modules.shared import opts, cmd_opts, state from modules.shared import opts, cmd_opts, state
import modules.shared as shared import modules.shared as shared
from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback
SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options']) SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
@ -91,8 +93,8 @@ def single_sample_to_image(sample):
return Image.fromarray(x_sample) return Image.fromarray(x_sample)
def sample_to_image(samples): def sample_to_image(samples, index=0):
return single_sample_to_image(samples[0]) return single_sample_to_image(samples[index])
def samples_to_image_grid(samples): def samples_to_image_grid(samples):
@ -205,17 +207,22 @@ class VanillaStableDiffusionSampler:
self.mask = p.mask if hasattr(p, 'mask') else None self.mask = p.mask if hasattr(p, 'mask') else None
self.nmask = p.nmask if hasattr(p, 'nmask') else None self.nmask = p.nmask if hasattr(p, 'nmask') else None
def adjust_steps_if_invalid(self, p, num_steps):
if (self.config.name == 'DDIM' and p.ddim_discretize == 'uniform') or (self.config.name == 'PLMS'):
valid_step = 999 / (1000 // num_steps)
if valid_step == floor(valid_step):
return int(valid_step) + 1
return num_steps
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
steps, t_enc = setup_img2img_steps(p, steps) steps, t_enc = setup_img2img_steps(p, steps)
steps = self.adjust_steps_if_invalid(p, steps)
self.initialize(p) self.initialize(p)
# existing code fails with certain step counts, like 9
try:
self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False) self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
except Exception:
self.sampler.make_schedule(ddim_num_steps=steps+1, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise) x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise)
self.init_latent = x self.init_latent = x
@ -239,18 +246,14 @@ class VanillaStableDiffusionSampler:
self.last_latent = x self.last_latent = x
self.step = 0 self.step = 0
steps = steps or p.steps steps = self.adjust_steps_if_invalid(p, steps or p.steps)
# Wrap the conditioning models with additional image conditioning for inpainting model # Wrap the conditioning models with additional image conditioning for inpainting model
if image_conditioning is not None: if image_conditioning is not None:
conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]} conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]}
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]} unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
# existing code fails with certain step counts, like 9
try:
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0]) samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
except Exception:
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps+1, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
return samples_ddim return samples_ddim
@ -278,6 +281,12 @@ class CFGDenoiser(torch.nn.Module):
image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond]) image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond])
sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma])
denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps)
cfg_denoiser_callback(denoiser_params)
x_in = denoiser_params.x
image_cond_in = denoiser_params.image_cond
sigma_in = denoiser_params.sigma
if tensor.shape[1] == uncond.shape[1]: if tensor.shape[1] == uncond.shape[1]:
cond_in = torch.cat([tensor, uncond]) cond_in = torch.cat([tensor, uncond])

207
modules/sd_vae.py Normal file
View File

@ -0,0 +1,207 @@
import torch
import os
from collections import namedtuple
from modules import shared, devices, script_callbacks
from modules.paths import models_path
import glob
model_dir = "Stable-diffusion"
model_path = os.path.abspath(os.path.join(models_path, model_dir))
vae_dir = "VAE"
vae_path = os.path.abspath(os.path.join(models_path, vae_dir))
vae_ignore_keys = {"model_ema.decay", "model_ema.num_updates"}
default_vae_dict = {"auto": "auto", "None": "None"}
default_vae_list = ["auto", "None"]
default_vae_values = [default_vae_dict[x] for x in default_vae_list]
vae_dict = dict(default_vae_dict)
vae_list = list(default_vae_list)
first_load = True
base_vae = None
loaded_vae_file = None
checkpoint_info = None
def get_base_vae(model):
if base_vae is not None and checkpoint_info == model.sd_checkpoint_info and model:
return base_vae
return None
def store_base_vae(model):
global base_vae, checkpoint_info
if checkpoint_info != model.sd_checkpoint_info:
base_vae = model.first_stage_model.state_dict().copy()
checkpoint_info = model.sd_checkpoint_info
def delete_base_vae():
global base_vae, checkpoint_info
base_vae = None
checkpoint_info = None
def restore_base_vae(model):
global base_vae, checkpoint_info
if base_vae is not None and checkpoint_info == model.sd_checkpoint_info:
load_vae_dict(model, base_vae)
delete_base_vae()
def get_filename(filepath):
return os.path.splitext(os.path.basename(filepath))[0]
def refresh_vae_list(vae_path=vae_path, model_path=model_path):
global vae_dict, vae_list
res = {}
candidates = [
*glob.iglob(os.path.join(model_path, '**/*.vae.ckpt'), recursive=True),
*glob.iglob(os.path.join(model_path, '**/*.vae.pt'), recursive=True),
*glob.iglob(os.path.join(vae_path, '**/*.ckpt'), recursive=True),
*glob.iglob(os.path.join(vae_path, '**/*.pt'), recursive=True)
]
if shared.cmd_opts.vae_path is not None and os.path.isfile(shared.cmd_opts.vae_path):
candidates.append(shared.cmd_opts.vae_path)
for filepath in candidates:
name = get_filename(filepath)
res[name] = filepath
vae_list.clear()
vae_list.extend(default_vae_list)
vae_list.extend(list(res.keys()))
vae_dict.clear()
vae_dict.update(res)
vae_dict.update(default_vae_dict)
return vae_list
def resolve_vae(checkpoint_file, vae_file="auto"):
global first_load, vae_dict, vae_list
# if vae_file argument is provided, it takes priority, but not saved
if vae_file and vae_file not in default_vae_list:
if not os.path.isfile(vae_file):
vae_file = "auto"
print("VAE provided as function argument doesn't exist")
# for the first load, if vae-path is provided, it takes priority, saved, and failure is reported
if first_load and shared.cmd_opts.vae_path is not None:
if os.path.isfile(shared.cmd_opts.vae_path):
vae_file = shared.cmd_opts.vae_path
shared.opts.data['sd_vae'] = get_filename(vae_file)
else:
print("VAE provided as command line argument doesn't exist")
# else, we load from settings
if vae_file == "auto" and shared.opts.sd_vae is not None:
# if saved VAE settings isn't recognized, fallback to auto
vae_file = vae_dict.get(shared.opts.sd_vae, "auto")
# if VAE selected but not found, fallback to auto
if vae_file not in default_vae_values and not os.path.isfile(vae_file):
vae_file = "auto"
print("Selected VAE doesn't exist")
# vae-path cmd arg takes priority for auto
if vae_file == "auto" and shared.cmd_opts.vae_path is not None:
if os.path.isfile(shared.cmd_opts.vae_path):
vae_file = shared.cmd_opts.vae_path
print("Using VAE provided as command line argument")
# if still not found, try look for ".vae.pt" beside model
model_path = os.path.splitext(checkpoint_file)[0]
if vae_file == "auto":
vae_file_try = model_path + ".vae.pt"
if os.path.isfile(vae_file_try):
vae_file = vae_file_try
print("Using VAE found beside selected model")
# if still not found, try look for ".vae.ckpt" beside model
if vae_file == "auto":
vae_file_try = model_path + ".vae.ckpt"
if os.path.isfile(vae_file_try):
vae_file = vae_file_try
print("Using VAE found beside selected model")
# No more fallbacks for auto
if vae_file == "auto":
vae_file = None
# Last check, just because
if vae_file and not os.path.exists(vae_file):
vae_file = None
return vae_file
def load_vae(model, vae_file=None):
global first_load, vae_dict, vae_list, loaded_vae_file
# save_settings = False
if vae_file:
print(f"Loading VAE weights from: {vae_file}")
vae_ckpt = torch.load(vae_file, map_location=shared.weight_load_location)
vae_dict_1 = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss" and k not in vae_ignore_keys}
load_vae_dict(model, vae_dict_1)
# If vae used is not in dict, update it
# It will be removed on refresh though
vae_opt = get_filename(vae_file)
if vae_opt not in vae_dict:
vae_dict[vae_opt] = vae_file
vae_list.append(vae_opt)
loaded_vae_file = vae_file
"""
# Save current VAE to VAE settings, maybe? will it work?
if save_settings:
if vae_file is None:
vae_opt = "None"
# shared.opts.sd_vae = vae_opt
"""
first_load = False
# don't call this from outside
def load_vae_dict(model, vae_dict_1=None):
if vae_dict_1:
store_base_vae(model)
model.first_stage_model.load_state_dict(vae_dict_1)
else:
restore_base_vae()
model.first_stage_model.to(devices.dtype_vae)
def reload_vae_weights(sd_model=None, vae_file="auto"):
from modules import lowvram, devices, sd_hijack
if not sd_model:
sd_model = shared.sd_model
checkpoint_info = sd_model.sd_checkpoint_info
checkpoint_file = checkpoint_info.filename
vae_file = resolve_vae(checkpoint_file, vae_file=vae_file)
if loaded_vae_file == vae_file:
return
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
else:
sd_model.to(devices.cpu)
sd_hijack.model_hijack.undo_hijack(sd_model)
load_vae(sd_model, vae_file)
sd_hijack.model_hijack.hijack(sd_model)
script_callbacks.model_loaded_callback(sd_model)
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
sd_model.to(devices.device)
print(f"VAE Weights loaded.")
return sd_model

View File

@ -4,6 +4,7 @@ import json
import os import os
import sys import sys
from collections import OrderedDict from collections import OrderedDict
import time
import gradio as gr import gradio as gr
import tqdm import tqdm
@ -14,7 +15,7 @@ import modules.memmon
import modules.sd_models import modules.sd_models
import modules.styles import modules.styles
import modules.devices as devices import modules.devices as devices
from modules import sd_samplers, sd_models, localization from modules import sd_samplers, sd_models, localization, sd_vae
from modules.hypernetworks import hypernetwork from modules.hypernetworks import hypernetwork
from modules.paths import models_path, script_path, sd_path from modules.paths import models_path, script_path, sd_path
@ -40,7 +41,7 @@ parser.add_argument("--lowram", action='store_true', help="load stable diffusion
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram") parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.") parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast") parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site (doesn't work for me but you might have better luck)") parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None) parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
parser.add_argument("--ngrok-region", type=str, help="The region in which ngrok should start.", default="us") parser.add_argument("--ngrok-region", type=str, help="The region in which ngrok should start.", default="us")
parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer')) parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
@ -51,6 +52,7 @@ parser.add_argument("--realesrgan-models-path", type=str, help="Path to director
parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(models_path, 'ScuNET')) parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(models_path, 'ScuNET'))
parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR')) parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR'))
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR')) parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR'))
parser.add_argument("--clip-models-path", type=str, help="Path to directory with CLIP model file(s).", default=None)
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers") parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work") parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
parser.add_argument("--deepdanbooru", action='store_true', help="enable deepdanbooru interrogator") parser.add_argument("--deepdanbooru", action='store_true', help="enable deepdanbooru interrogator")
@ -97,6 +99,8 @@ restricted_opts = {
"outdir_save", "outdir_save",
} }
cmd_opts.disable_extension_access = cmd_opts.share or cmd_opts.listen
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_swinir, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \ devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_swinir, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \
(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'swinir', 'esrgan', 'scunet', 'codeformer']) (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'swinir', 'esrgan', 'scunet', 'codeformer'])
@ -132,6 +136,8 @@ class State:
current_image = None current_image = None
current_image_sampling_step = 0 current_image_sampling_step = 0
textinfo = None textinfo = None
time_start = None
need_restart = False
def skip(self): def skip(self):
self.skipped = True self.skipped = True
@ -168,6 +174,7 @@ class State:
self.skipped = False self.skipped = False
self.interrupted = False self.interrupted = False
self.textinfo = None self.textinfo = None
self.time_start = time.time()
devices.torch_gc() devices.torch_gc()
@ -177,6 +184,20 @@ class State:
devices.torch_gc() devices.torch_gc()
"""sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this"""
def set_current_image(self):
if not parallel_processing_allowed:
return
if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and self.current_latent is not None:
if opts.show_progress_grid:
self.current_image = sd_samplers.samples_to_image_grid(self.current_latent)
else:
self.current_image = sd_samplers.sample_to_image(self.current_latent)
self.current_image_sampling_step = self.sampling_step
state = State() state = State()
artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv')) artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
@ -234,6 +255,8 @@ options_templates.update(options_section(('saving-images', "Saving images/grids"
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"), "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
"save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."), "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."), "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
"save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."),
"save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
"export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"), "export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"),
@ -285,21 +308,22 @@ options_templates.update(options_section(('system', "System"), {
})) }))
options_templates.update(options_section(('training', "Training"), { options_templates.update(options_section(('training', "Training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM."), "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"dataset_filename_word_regex": OptionInfo("", "Filename word regex"), "dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
"dataset_filename_join_string": OptionInfo(" ", "Filename join string"), "dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
"training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}), "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
"training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"), "training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"),
"training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"),
})) }))
options_templates.update(options_section(('sd', "Stable Diffusion"), { options_templates.update(options_section(('sd', "Stable Diffusion"), {
"sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, refresh=sd_models.list_models), "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, refresh=sd_models.list_models),
"sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
"sd_vae": OptionInfo("auto", "SD VAE", gr.Dropdown, lambda: {"choices": list(sd_vae.vae_list)}, refresh=sd_vae.refresh_vae_list),
"sd_hypernetwork": OptionInfo("None", "Hypernetwork", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks), "sd_hypernetwork": OptionInfo("None", "Hypernetwork", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks),
"sd_hypernetwork_strength": OptionInfo(1.0, "Hypernetwork strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.001}), "sd_hypernetwork_strength": OptionInfo(1.0, "Hypernetwork strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.001}),
"inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
"save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."), "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."),
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."), "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
"enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"), "enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"),
@ -354,6 +378,12 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}), 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}),
})) }))
options_templates.update(options_section((None, "Hidden options"), {
"disabled_extensions": OptionInfo([], "Disable those extensions"),
}))
options_templates.update()
class Options: class Options:
data = None data = None
@ -365,8 +395,9 @@ class Options:
def __setattr__(self, key, value): def __setattr__(self, key, value):
if self.data is not None: if self.data is not None:
if key in self.data: if key in self.data or key in self.data_labels:
self.data[key] = value self.data[key] = value
return
return super(Options, self).__setattr__(key, value) return super(Options, self).__setattr__(key, value)
@ -407,10 +438,11 @@ class Options:
if bad_settings > 0: if bad_settings > 0:
print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr) print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr)
def onchange(self, key, func): def onchange(self, key, func, call=True):
item = self.data_labels.get(key) item = self.data_labels.get(key)
item.onchange = func item.onchange = func
if call:
func() func()
def dumpjson(self): def dumpjson(self):

View File

@ -235,6 +235,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt') filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), embedding_name) log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), embedding_name)
unload = shared.opts.unload_models_when_training
if save_embedding_every > 0: if save_embedding_every > 0:
embedding_dir = os.path.join(log_directory, "embeddings") embedding_dir = os.path.join(log_directory, "embeddings")
@ -277,6 +278,8 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
with torch.autocast("cuda"): with torch.autocast("cuda"):
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, device=devices.device, template_file=template_file, batch_size=batch_size) ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, device=devices.device, template_file=template_file, batch_size=batch_size)
if unload:
shared.sd_model.first_stage_model.to(devices.cpu)
embedding.vec.requires_grad = True embedding.vec.requires_grad = True
optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate) optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate)
@ -342,6 +345,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
if images_dir is not None and steps_done % create_image_every == 0: if images_dir is not None and steps_done % create_image_every == 0:
forced_filename = f'{embedding_name}-{steps_done}' forced_filename = f'{embedding_name}-{steps_done}'
last_saved_image = os.path.join(images_dir, forced_filename) last_saved_image = os.path.join(images_dir, forced_filename)
shared.sd_model.first_stage_model.to(devices.device)
p = processing.StableDiffusionProcessingTxt2Img( p = processing.StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model, sd_model=shared.sd_model,
do_not_save_grid=True, do_not_save_grid=True,
@ -369,6 +375,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
processed = processing.process_images(p) processed = processing.process_images(p)
image = processed.images[0] image = processed.images[0]
if unload:
shared.sd_model.first_stage_model.to(devices.cpu)
shared.state.current_image = image shared.state.current_image = image
if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded: if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded:
@ -414,6 +423,7 @@ Last saved image: {html.escape(last_saved_image)}<br/>
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt') filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True) save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True)
shared.sd_model.first_stage_model.to(devices.device)
return embedding, filename return embedding, filename

View File

@ -25,7 +25,9 @@ def train_embedding(*args):
assert not shared.cmd_opts.lowvram, 'Training models with lowvram not possible' assert not shared.cmd_opts.lowvram, 'Training models with lowvram not possible'
apply_optimizations = shared.opts.training_xattention_optimizations
try: try:
if not apply_optimizations:
sd_hijack.undo_optimizations() sd_hijack.undo_optimizations()
embedding, filename = modules.textual_inversion.textual_inversion.train_embedding(*args) embedding, filename = modules.textual_inversion.textual_inversion.train_embedding(*args)
@ -38,5 +40,6 @@ Embedding saved to {html.escape(filename)}
except Exception: except Exception:
raise raise
finally: finally:
if not apply_optimizations:
sd_hijack.apply_optimizations() sd_hijack.apply_optimizations()

View File

@ -47,6 +47,8 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2:
if processed is None: if processed is None:
processed = process_images(p) processed = process_images(p)
p.close()
shared.total_tqdm.clear() shared.total_tqdm.clear()
generation_info_js = processed.js() generation_info_js = processed.js()

View File

@ -19,7 +19,7 @@ import numpy as np
from PIL import Image, PngImagePlugin from PIL import Image, PngImagePlugin
from modules import sd_hijack, sd_models, localization, script_callbacks from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions
from modules.paths import script_path from modules.paths import script_path
from modules.shared import opts, cmd_opts, restricted_opts from modules.shared import opts, cmd_opts, restricted_opts
@ -277,15 +277,7 @@ def check_progress_call(id_part):
preview_visibility = gr_show(False) preview_visibility = gr_show(False)
if opts.show_progress_every_n_steps > 0: if opts.show_progress_every_n_steps > 0:
if shared.parallel_processing_allowed: shared.state.set_current_image()
if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None:
if opts.show_progress_grid:
shared.state.current_image = modules.sd_samplers.samples_to_image_grid(shared.state.current_latent)
else:
shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent)
shared.state.current_image_sampling_step = shared.state.sampling_step
image = shared.state.current_image image = shared.state.current_image
if image is None: if image is None:
@ -671,6 +663,9 @@ def create_ui(wrap_gradio_gpu_call):
import modules.img2img import modules.img2img
import modules.txt2img import modules.txt2img
reload_javascript()
parameters_copypaste.reset()
with gr.Blocks(analytics_enabled=False) as txt2img_interface: with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, token_counter, token_button = create_toprow(is_img2img=False) txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, token_counter, token_button = create_toprow(is_img2img=False)
@ -1059,7 +1054,7 @@ def create_ui(wrap_gradio_gpu_call):
with gr.Tabs(elem_id="extras_resize_mode"): with gr.Tabs(elem_id="extras_resize_mode"):
with gr.TabItem('Scale by'): with gr.TabItem('Scale by'):
upscaling_resize = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Resize", value=2) upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4)
with gr.TabItem('Scale to'): with gr.TabItem('Scale to'):
with gr.Group(): with gr.Group():
with gr.Row(): with gr.Row():
@ -1517,8 +1512,9 @@ def create_ui(wrap_gradio_gpu_call):
column = None column = None
with gr.Row(elem_id="settings").style(equal_height=False): with gr.Row(elem_id="settings").style(equal_height=False):
for i, (k, item) in enumerate(opts.data_labels.items()): for i, (k, item) in enumerate(opts.data_labels.items()):
section_must_be_skipped = item.section[0] is None
if previous_section != item.section: if previous_section != item.section and not section_must_be_skipped:
if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None): if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None):
if column is not None: if column is not None:
column.__exit__() column.__exit__()
@ -1537,6 +1533,8 @@ def create_ui(wrap_gradio_gpu_call):
if k in quicksettings_names and not shared.cmd_opts.freeze_settings: if k in quicksettings_names and not shared.cmd_opts.freeze_settings:
quicksettings_list.append((i, k, item)) quicksettings_list.append((i, k, item))
components.append(dummy_component) components.append(dummy_component)
elif section_must_be_skipped:
components.append(dummy_component)
else: else:
component = create_setting_component(k) component = create_setting_component(k)
component_dict[k] = component component_dict[k] = component
@ -1572,19 +1570,19 @@ def create_ui(wrap_gradio_gpu_call):
reload_script_bodies.click( reload_script_bodies.click(
fn=reload_scripts, fn=reload_scripts,
inputs=[], inputs=[],
outputs=[], outputs=[]
_js='function(){}'
) )
def request_restart(): def request_restart():
shared.state.interrupt() shared.state.interrupt()
settings_interface.gradio_ref.do_restart = True shared.state.need_restart = True
restart_gradio.click( restart_gradio.click(
fn=request_restart, fn=request_restart,
inputs=[], inputs=[],
outputs=[], outputs=[],
_js='function(){restart_reload()}' _js='restart_reload'
) )
if column is not None: if column is not None:
@ -1618,14 +1616,15 @@ def create_ui(wrap_gradio_gpu_call):
interfaces += script_callbacks.ui_tabs_callback() interfaces += script_callbacks.ui_tabs_callback()
interfaces += [(settings_interface, "Settings", "settings")] interfaces += [(settings_interface, "Settings", "settings")]
extensions_interface = ui_extensions.create_ui()
interfaces += [(extensions_interface, "Extensions", "extensions")]
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo: with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Row(elem_id="quicksettings"): with gr.Row(elem_id="quicksettings"):
for i, k, item in quicksettings_list: for i, k, item in quicksettings_list:
component = create_setting_component(k, is_quicksettings=True) component = create_setting_component(k, is_quicksettings=True)
component_dict[k] = component component_dict[k] = component
settings_interface.gradio_ref = demo
parameters_copypaste.integrate_settings_paste_fields(component_dict) parameters_copypaste.integrate_settings_paste_fields(component_dict)
parameters_copypaste.run_bind() parameters_copypaste.run_bind()
@ -1782,4 +1781,3 @@ def load_javascript(raw_response):
reload_javascript = partial(load_javascript, gradio.routes.templates.TemplateResponse) reload_javascript = partial(load_javascript, gradio.routes.templates.TemplateResponse)
reload_javascript()

268
modules/ui_extensions.py Normal file
View File

@ -0,0 +1,268 @@
import json
import os.path
import shutil
import sys
import time
import traceback
import git
import gradio as gr
import html
from modules import extensions, shared, paths
available_extensions = {"extensions": []}
def check_access():
assert not shared.cmd_opts.disable_extension_access, "extension access disabed because of commandline flags"
def apply_and_restart(disable_list, update_list):
check_access()
disabled = json.loads(disable_list)
assert type(disabled) == list, f"wrong disable_list data for apply_and_restart: {disable_list}"
update = json.loads(update_list)
assert type(update) == list, f"wrong update_list data for apply_and_restart: {update_list}"
update = set(update)
for ext in extensions.extensions:
if ext.name not in update:
continue
try:
ext.pull()
except Exception:
print(f"Error pulling updates for {ext.name}:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
shared.opts.disabled_extensions = disabled
shared.opts.save(shared.config_filename)
shared.state.interrupt()
shared.state.need_restart = True
def check_updates():
check_access()
for ext in extensions.extensions:
if ext.remote is None:
continue
try:
ext.check_updates()
except Exception:
print(f"Error checking updates for {ext.name}:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
return extension_table()
def extension_table():
code = f"""<!-- {time.time()} -->
<table id="extensions">
<thead>
<tr>
<th><abbr title="Use checkbox to enable the extension; it will be enabled or disabled when you click apply button">Extension</abbr></th>
<th>URL</th>
<th><abbr title="Use checkbox to mark the extension for update; it will be updated when you click apply button">Update</abbr></th>
</tr>
</thead>
<tbody>
"""
for ext in extensions.extensions:
if ext.can_update:
ext_status = f"""<label><input class="gr-check-radio gr-checkbox" name="update_{html.escape(ext.name)}" checked="checked" type="checkbox">{html.escape(ext.status)}</label>"""
else:
ext_status = ext.status
code += f"""
<tr>
<td><label><input class="gr-check-radio gr-checkbox" name="enable_{html.escape(ext.name)}" type="checkbox" {'checked="checked"' if ext.enabled else ''}>{html.escape(ext.name)}</label></td>
<td><a href="{html.escape(ext.remote or '')}">{html.escape(ext.remote or '')}</a></td>
<td{' class="extension_status"' if ext.remote is not None else ''}>{ext_status}</td>
</tr>
"""
code += """
</tbody>
</table>
"""
return code
def normalize_git_url(url):
if url is None:
return ""
url = url.replace(".git", "")
return url
def install_extension_from_url(dirname, url):
check_access()
assert url, 'No URL specified'
if dirname is None or dirname == "":
*parts, last_part = url.split('/')
last_part = normalize_git_url(last_part)
dirname = last_part
target_dir = os.path.join(extensions.extensions_dir, dirname)
assert not os.path.exists(target_dir), f'Extension directory already exists: {target_dir}'
normalized_url = normalize_git_url(url)
assert len([x for x in extensions.extensions if normalize_git_url(x.remote) == normalized_url]) == 0, 'Extension with this URL is already installed'
tmpdir = os.path.join(paths.script_path, "tmp", dirname)
try:
shutil.rmtree(tmpdir, True)
repo = git.Repo.clone_from(url, tmpdir)
repo.remote().fetch()
os.rename(tmpdir, target_dir)
extensions.list_extensions()
return [extension_table(), html.escape(f"Installed into {target_dir}. Use Installed tab to restart.")]
finally:
shutil.rmtree(tmpdir, True)
def install_extension_from_index(url):
ext_table, message = install_extension_from_url(None, url)
return refresh_available_extensions_from_data(), ext_table, message
def refresh_available_extensions(url):
global available_extensions
import urllib.request
with urllib.request.urlopen(url) as response:
text = response.read()
available_extensions = json.loads(text)
return url, refresh_available_extensions_from_data(), ''
def refresh_available_extensions_from_data():
extlist = available_extensions["extensions"]
installed_extension_urls = {normalize_git_url(extension.remote): extension.name for extension in extensions.extensions}
code = f"""<!-- {time.time()} -->
<table id="available_extensions">
<thead>
<tr>
<th>Extension</th>
<th>Description</th>
<th>Action</th>
</tr>
</thead>
<tbody>
"""
for ext in extlist:
name = ext.get("name", "noname")
url = ext.get("url", None)
description = ext.get("description", "")
if url is None:
continue
existing = installed_extension_urls.get(normalize_git_url(url), None)
install_code = f"""<input onclick="install_extension_from_index(this, '{html.escape(url)}')" type="button" value="{"Install" if not existing else "Installed"}" {"disabled=disabled" if existing else ""} class="gr-button gr-button-lg gr-button-secondary">"""
code += f"""
<tr>
<td><a href="{html.escape(url)}">{html.escape(name)}</a></td>
<td>{html.escape(description)}</td>
<td>{install_code}</td>
</tr>
"""
code += """
</tbody>
</table>
"""
return code
def create_ui():
import modules.ui
with gr.Blocks(analytics_enabled=False) as ui:
with gr.Tabs(elem_id="tabs_extensions") as tabs:
with gr.TabItem("Installed"):
with gr.Row():
apply = gr.Button(value="Apply and restart UI", variant="primary")
check = gr.Button(value="Check for updates")
extensions_disabled_list = gr.Text(elem_id="extensions_disabled_list", visible=False).style(container=False)
extensions_update_list = gr.Text(elem_id="extensions_update_list", visible=False).style(container=False)
extensions_table = gr.HTML(lambda: extension_table())
apply.click(
fn=apply_and_restart,
_js="extensions_apply",
inputs=[extensions_disabled_list, extensions_update_list],
outputs=[],
)
check.click(
fn=check_updates,
_js="extensions_check",
inputs=[],
outputs=[extensions_table],
)
with gr.TabItem("Available"):
with gr.Row():
refresh_available_extensions_button = gr.Button(value="Load from:", variant="primary")
available_extensions_index = gr.Text(value="https://raw.githubusercontent.com/wiki/AUTOMATIC1111/stable-diffusion-webui/Extensions-index.md", label="Extension index URL").style(container=False)
extension_to_install = gr.Text(elem_id="extension_to_install", visible=False)
install_extension_button = gr.Button(elem_id="install_extension_button", visible=False)
install_result = gr.HTML()
available_extensions_table = gr.HTML()
refresh_available_extensions_button.click(
fn=modules.ui.wrap_gradio_call(refresh_available_extensions, extra_outputs=[gr.update(), gr.update()]),
inputs=[available_extensions_index],
outputs=[available_extensions_index, available_extensions_table, install_result],
)
install_extension_button.click(
fn=modules.ui.wrap_gradio_call(install_extension_from_index, extra_outputs=[gr.update(), gr.update()]),
inputs=[extension_to_install],
outputs=[available_extensions_table, extensions_table, install_result],
)
with gr.TabItem("Install from URL"):
install_url = gr.Text(label="URL for extension's git repository")
install_dirname = gr.Text(label="Local directory name", placeholder="Leave empty for auto")
install_button = gr.Button(value="Install", variant="primary")
install_result = gr.HTML(elem_id="extension_install_result")
install_button.click(
fn=modules.ui.wrap_gradio_call(install_extension_from_url, extra_outputs=[gr.update()]),
inputs=[install_dirname, install_url],
outputs=[extensions_table, install_result],
)
return ui

View File

@ -10,6 +10,7 @@ import modules.shared
from modules import modelloader, shared from modules import modelloader, shared
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
NEAREST = (Image.Resampling.NEAREST if hasattr(Image, 'Resampling') else Image.NEAREST)
from modules.paths import models_path from modules.paths import models_path
@ -57,7 +58,7 @@ class Upscaler:
dest_w = img.width * scale dest_w = img.width * scale
dest_h = img.height * scale dest_h = img.height * scale
for i in range(3): for i in range(3):
if img.width >= dest_w and img.height >= dest_h: if img.width > dest_w and img.height > dest_h:
break break
img = self.do_upscale(img, selected_model) img = self.do_upscale(img, selected_model)
if img.width != dest_w or img.height != dest_h: if img.width != dest_w or img.height != dest_h:
@ -120,3 +121,17 @@ class UpscalerLanczos(Upscaler):
self.name = "Lanczos" self.name = "Lanczos"
self.scalers = [UpscalerData("Lanczos", None, self)] self.scalers = [UpscalerData("Lanczos", None, self)]
class UpscalerNearest(Upscaler):
scalers = []
def do_upscale(self, img, selected_model=None):
return img.resize((int(img.width * self.scale), int(img.height * self.scale)), resample=NEAREST)
def load_model(self, _):
pass
def __init__(self, dirname=None):
super().__init__(False)
self.name = "Nearest"
self.scalers = [UpscalerData("Nearest", None, self)]

View File

@ -4,7 +4,7 @@ fairscale==0.4.4
fonts fonts
font-roboto font-roboto
gfpgan gfpgan
gradio==3.5 gradio==3.8
invisible-watermark invisible-watermark
numpy numpy
omegaconf omegaconf
@ -12,7 +12,7 @@ opencv-python
requests requests
piexif piexif
Pillow Pillow
pytorch_lightning pytorch_lightning==1.7.7
realesrgan realesrgan
scikit-image>=0.19 scikit-image>=0.19
timm==0.4.12 timm==0.4.12
@ -26,3 +26,4 @@ torchdiffeq
kornia kornia
lark lark
inflection inflection
GitPython

View File

@ -2,7 +2,7 @@ transformers==4.19.2
diffusers==0.3.0 diffusers==0.3.0
basicsr==1.4.2 basicsr==1.4.2
gfpgan==1.3.8 gfpgan==1.3.8
gradio==3.5 gradio==3.8
numpy==1.23.3 numpy==1.23.3
Pillow==9.2.0 Pillow==9.2.0
realesrgan==0.3.0 realesrgan==0.3.0
@ -23,3 +23,4 @@ torchdiffeq==0.2.3
kornia==0.6.7 kornia==0.6.7
lark==1.1.2 lark==1.1.2
inflection==0.5.1 inflection==0.5.1
GitPython==3.1.27

View File

@ -166,8 +166,7 @@ class Script(scripts.Script):
if override_strength: if override_strength:
p.denoising_strength = 1.0 p.denoising_strength = 1.0
def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
lat = (p.init_latent.cpu().numpy() * 10).astype(int) lat = (p.init_latent.cpu().numpy() * 10).astype(int)
same_params = self.cache is not None and self.cache.cfg_scale == cfg and self.cache.steps == st \ same_params = self.cache is not None and self.cache.cfg_scale == cfg and self.cache.steps == st \

View File

@ -96,6 +96,7 @@ class Script(scripts.Script):
def ui(self, is_img2img): def ui(self, is_img2img):
checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False) checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False)
checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False)
prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1) prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1)
file = gr.File(label="Upload prompt inputs", type='bytes') file = gr.File(label="Upload prompt inputs", type='bytes')
@ -106,9 +107,9 @@ class Script(scripts.Script):
# We don't shrink back to 1, because that causes the control to ignore [enter], and it may # We don't shrink back to 1, because that causes the control to ignore [enter], and it may
# be unclear to the user that shift-enter is needed. # be unclear to the user that shift-enter is needed.
prompt_txt.change(lambda tb: gr.update(lines=7) if ("\n" in tb) else gr.update(lines=2), inputs=[prompt_txt], outputs=[prompt_txt]) prompt_txt.change(lambda tb: gr.update(lines=7) if ("\n" in tb) else gr.update(lines=2), inputs=[prompt_txt], outputs=[prompt_txt])
return [checkbox_iterate, file, prompt_txt] return [checkbox_iterate, checkbox_iterate_batch, file, prompt_txt]
def run(self, p, checkbox_iterate, file, prompt_txt: str): def run(self, p, checkbox_iterate, checkbox_iterate_batch, file, prompt_txt: str):
lines = [x.strip() for x in prompt_txt.splitlines()] lines = [x.strip() for x in prompt_txt.splitlines()]
lines = [x for x in lines if len(x) > 0] lines = [x for x in lines if len(x) > 0]
@ -137,7 +138,7 @@ class Script(scripts.Script):
jobs.append(args) jobs.append(args)
print(f"Will process {len(lines)} lines in {job_count} jobs.") print(f"Will process {len(lines)} lines in {job_count} jobs.")
if (checkbox_iterate and p.seed == -1): if (checkbox_iterate or checkbox_iterate_batch) and p.seed == -1:
p.seed = int(random.randrange(4294967294)) p.seed = int(random.randrange(4294967294))
state.job_count = job_count state.job_count = job_count
@ -153,7 +154,7 @@ class Script(scripts.Script):
proc = process_images(copy_p) proc = process_images(copy_p)
images += proc.images images += proc.images
if (checkbox_iterate): if checkbox_iterate:
p.seed = p.seed + (p.batch_size * p.n_iter) p.seed = p.seed + (p.batch_size * p.n_iter)

View File

@ -260,6 +260,16 @@ input[type="range"]{
#txt2img_negative_prompt, #img2img_negative_prompt{ #txt2img_negative_prompt, #img2img_negative_prompt{
} }
/* gradio 3.8 adds opacity to progressbar which makes it blink; disable it here */
.transition.opacity-20 {
opacity: 1 !important;
}
/* more gradio's garbage cleanup */
.min-h-\[4rem\] {
min-height: unset !important;
}
#txt2img_progressbar, #img2img_progressbar, #ti_progressbar{ #txt2img_progressbar, #img2img_progressbar, #ti_progressbar{
position: absolute; position: absolute;
z-index: 1000; z-index: 1000;
@ -491,7 +501,7 @@ input[type="range"]{
padding: 0; padding: 0;
} }
#refresh_sd_model_checkpoint, #refresh_sd_hypernetwork, #refresh_train_hypernetwork_name, #refresh_train_embedding_name, #refresh_localization{ #refresh_sd_model_checkpoint, #refresh_sd_vae, #refresh_sd_hypernetwork, #refresh_train_hypernetwork_name, #refresh_train_embedding_name, #refresh_localization{
max-width: 2.5em; max-width: 2.5em;
min-width: 2.5em; min-width: 2.5em;
height: 2.4em; height: 2.4em;
@ -530,6 +540,29 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h
min-height: 480px !important; min-height: 480px !important;
} }
/* Extensions */
#tab_extensions table{
border-collapse: collapse;
}
#tab_extensions table td, #tab_extensions table th{
border: 1px solid #ccc;
padding: 0.25em 0.5em;
}
#tab_extensions table input[type="checkbox"]{
margin-right: 0.5em;
}
#tab_extensions button{
max-width: 16em;
}
#tab_extensions input[disabled="disabled"]{
opacity: 0.5;
}
/* The following handles localization for right-to-left (RTL) languages like Arabic. /* The following handles localization for right-to-left (RTL) languages like Arabic.
The rtl media type will only be activated by the logic in javascript/localization.js. The rtl media type will only be activated by the logic in javascript/localization.js.
If you change anything above, you need to make sure it is RTL compliant by just running If you change anything above, you need to make sure it is RTL compliant by just running

View File

@ -9,7 +9,7 @@ from fastapi.middleware.gzip import GZipMiddleware
from modules.paths import script_path from modules.paths import script_path
from modules import devices, sd_samplers, upscaler from modules import devices, sd_samplers, upscaler, extensions
import modules.codeformer_model as codeformer import modules.codeformer_model as codeformer
import modules.extras import modules.extras
import modules.face_restoration import modules.face_restoration
@ -21,8 +21,10 @@ import modules.paths
import modules.scripts import modules.scripts
import modules.sd_hijack import modules.sd_hijack
import modules.sd_models import modules.sd_models
import modules.sd_vae
import modules.shared as shared import modules.shared as shared
import modules.txt2img import modules.txt2img
import modules.script_callbacks
import modules.ui import modules.ui
from modules import devices from modules import devices
@ -60,6 +62,8 @@ def wrap_gradio_gpu_call(func, extra_outputs=None):
def initialize(): def initialize():
extensions.list_extensions()
if cmd_opts.ui_debug_mode: if cmd_opts.ui_debug_mode:
shared.sd_upscalers = upscaler.UpscalerLanczos().scalers shared.sd_upscalers = upscaler.UpscalerLanczos().scalers
modules.scripts.load_scripts() modules.scripts.load_scripts()
@ -74,8 +78,10 @@ def initialize():
modules.scripts.load_scripts() modules.scripts.load_scripts()
modules.sd_vae.refresh_vae_list()
modules.sd_models.load_model() modules.sd_models.load_model()
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model))) shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()))
shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)
shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: modules.hypernetworks.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork))) shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: modules.hypernetworks.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork)))
shared.opts.onchange("sd_hypernetwork_strength", modules.hypernetworks.hypernetwork.apply_strength) shared.opts.onchange("sd_hypernetwork_strength", modules.hypernetworks.hypernetwork.apply_strength)
@ -92,15 +98,18 @@ def create_api(app):
api = Api(app, queue_lock) api = Api(app, queue_lock)
return api return api
def wait_on_server(demo=None): def wait_on_server(demo=None):
while 1: while 1:
time.sleep(0.5) time.sleep(0.5)
if demo and getattr(demo, 'do_restart', False): if shared.state.need_restart:
shared.state.need_restart = False
time.sleep(0.5) time.sleep(0.5)
demo.close() demo.close()
time.sleep(0.5) time.sleep(0.5)
break break
def api_only(): def api_only():
initialize() initialize()
@ -108,6 +117,8 @@ def api_only():
app.add_middleware(GZipMiddleware, minimum_size=1000) app.add_middleware(GZipMiddleware, minimum_size=1000)
api = create_api(app) api = create_api(app)
modules.script_callbacks.app_started_callback(None, app)
api.launch(server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1", port=cmd_opts.port if cmd_opts.port else 7861) api.launch(server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1", port=cmd_opts.port if cmd_opts.port else 7861)
@ -132,14 +143,18 @@ def webui():
app.add_middleware(GZipMiddleware, minimum_size=1000) app.add_middleware(GZipMiddleware, minimum_size=1000)
if (launch_api): if launch_api:
create_api(app) create_api(app)
modules.script_callbacks.app_started_callback(demo, app)
wait_on_server(demo) wait_on_server(demo)
sd_samplers.set_samplers() sd_samplers.set_samplers()
print('Reloading Custom Scripts') print('Reloading extensions')
extensions.list_extensions()
print('Reloading custom scripts')
modules.scripts.reload_scripts() modules.scripts.reload_scripts()
print('Reloading modules: modules.ui') print('Reloading modules: modules.ui')
importlib.reload(modules.ui) importlib.reload(modules.ui)
@ -148,8 +163,6 @@ def webui():
print('Restarting Gradio') print('Restarting Gradio')
task = []
if __name__ == "__main__": if __name__ == "__main__":
if cmd_opts.nowebui: if cmd_opts.nowebui:
api_only() api_only()