From 70931652a4289e28d83869b6d10cf11e80a70345 Mon Sep 17 00:00:00 2001
From: RnDMonkey
Date: Fri, 30 Sep 2022 18:02:46 -0700
Subject: [PATCH 001/172] [xy_grid] made -1 seed fixing apply to Var. seed too
---
scripts/xy_grid.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 146663b0..9c078888 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -218,7 +218,7 @@ class Script(scripts.Script):
ys = process_axis(y_opt, y_values)
def fix_axis_seeds(axis_opt, axis_list):
- if axis_opt.label == 'Seed':
+ if axis_opt.label == 'Seed' or 'Var. seed':
return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list]
else:
return axis_list
From cf141157e7b49b0b3a6e57dc7aa0d1345158b4c8 Mon Sep 17 00:00:00 2001
From: RnDMonkey
Date: Fri, 30 Sep 2022 22:02:29 -0700
Subject: [PATCH 002/172] Added X/Y plot parameters to extra_generation_params
---
scripts/xy_grid.py | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 9c078888..d9f8d55b 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -244,6 +244,14 @@ class Script(scripts.Script):
return process_images(pc)
+ if not x_opt.label == 'Nothing':
+ p.extra_generation_params["X/Y Plot X Type"] = x_opt.label
+ p.extra_generation_params["X Values"] = '{' + ", ".join([f'{x}' for x in xs]) + '}'
+
+ if not y_opt.label == 'Nothing':
+ p.extra_generation_params["X/Y Plot Y Type"] = y_opt.label
+ p.extra_generation_params["Y Values"] = '{' + ", ".join([f'{y}' for y in ys]) + '}'
+
processed = draw_xy_grid(
p,
xs=xs,
From eba0c29dbc3bad8c4e32f1fa3a03dc6f9caf1f5a Mon Sep 17 00:00:00 2001
From: RnDMonkey
Date: Sat, 1 Oct 2022 13:56:29 -0700
Subject: [PATCH 003/172] Updated xy_grid infotext formatting, parser regex
---
modules/generation_parameters_copypaste.py | 2 +-
scripts/xy_grid.py | 12 ++++++++----
2 files changed, 9 insertions(+), 5 deletions(-)
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index ac1ba7f4..39d67d94 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -1,7 +1,7 @@
import re
import gradio as gr
-re_param_code = r"\s*([\w ]+):\s*([^,]+)(?:,|$)"
+re_param_code = r"\s*([\w ]+):\s*((?:{[^}]+})|(?:[^,]+))(?:,|$)"
re_param = re.compile(re_param_code)
re_params = re.compile(r"^(?:" + re_param_code + "){3,}$")
re_imagesize = re.compile(r"^(\d+)x(\d+)$")
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index d9f8d55b..f87c6c1f 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -245,12 +245,16 @@ class Script(scripts.Script):
return process_images(pc)
if not x_opt.label == 'Nothing':
- p.extra_generation_params["X/Y Plot X Type"] = x_opt.label
- p.extra_generation_params["X Values"] = '{' + ", ".join([f'{x}' for x in xs]) + '}'
+ p.extra_generation_params["XY Plot X Type"] = x_opt.label
+ p.extra_generation_params["X Values"] = '{' + x_values + '}'
+ if x_opt.label in ["Seed","Var. seed"] and not no_fixed_seeds:
+ p.extra_generation_params["Fixed X Values"] = '{' + ", ".join([str(x) for x in xs])+ '}'
if not y_opt.label == 'Nothing':
- p.extra_generation_params["X/Y Plot Y Type"] = y_opt.label
- p.extra_generation_params["Y Values"] = '{' + ", ".join([f'{y}' for y in ys]) + '}'
+ p.extra_generation_params["XY Plot Y Type"] = y_opt.label
+ p.extra_generation_params["Y Values"] = '{' + y_values + '}'
+ if y_opt.label in ["Seed","Var. seed"] and not no_fixed_seeds:
+ p.extra_generation_params["Fixed Y Values"] = '{' + ", ".join([str(y) for y in ys])+ '}'
processed = draw_xy_grid(
p,
From b99a4f769f11ed74df0344a23069d3858613fbef Mon Sep 17 00:00:00 2001
From: RnDMonkey
Date: Sat, 1 Oct 2022 14:26:12 -0700
Subject: [PATCH 004/172] fixed expression error in condition
---
scripts/xy_grid.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index f87c6c1f..f1f54d9c 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -218,7 +218,7 @@ class Script(scripts.Script):
ys = process_axis(y_opt, y_values)
def fix_axis_seeds(axis_opt, axis_list):
- if axis_opt.label == 'Seed' or 'Var. seed':
+ if axis_opt.label in ["Seed","Var. seed"]:
return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list]
else:
return axis_list
From f6a97868e57e44fba6c4283769fedd30ee11cacf Mon Sep 17 00:00:00 2001
From: RnDMonkey
Date: Sat, 1 Oct 2022 14:36:09 -0700
Subject: [PATCH 005/172] fix to allow empty {} values
---
modules/generation_parameters_copypaste.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index 39d67d94..27d58dfd 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -1,7 +1,7 @@
import re
import gradio as gr
-re_param_code = r"\s*([\w ]+):\s*((?:{[^}]+})|(?:[^,]+))(?:,|$)"
+re_param_code = r"\s*([\w ]+):\s*((?:{[^}]*})|(?:[^,]+))(?:,|$)"
re_param = re.compile(re_param_code)
re_params = re.compile(r"^(?:" + re_param_code + "){3,}$")
re_imagesize = re.compile(r"^(\d+)x(\d+)$")
From fe6e2362e8fa5d739de6997ab155a26686d20a49 Mon Sep 17 00:00:00 2001
From: RnDMonkey
Date: Sun, 2 Oct 2022 22:04:28 -0700
Subject: [PATCH 006/172] Update xy_grid.py
Changed XY Plot infotext value keys to not be so generic.
---
scripts/xy_grid.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index f1f54d9c..ae011a17 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -246,15 +246,15 @@ class Script(scripts.Script):
if not x_opt.label == 'Nothing':
p.extra_generation_params["XY Plot X Type"] = x_opt.label
- p.extra_generation_params["X Values"] = '{' + x_values + '}'
+ p.extra_generation_params["XY Plot X Values"] = '{' + x_values + '}'
if x_opt.label in ["Seed","Var. seed"] and not no_fixed_seeds:
- p.extra_generation_params["Fixed X Values"] = '{' + ", ".join([str(x) for x in xs])+ '}'
+ p.extra_generation_params["XY Plot Fixed X Values"] = '{' + ", ".join([str(x) for x in xs])+ '}'
if not y_opt.label == 'Nothing':
p.extra_generation_params["XY Plot Y Type"] = y_opt.label
- p.extra_generation_params["Y Values"] = '{' + y_values + '}'
+ p.extra_generation_params["XY Plot Y Values"] = '{' + y_values + '}'
if y_opt.label in ["Seed","Var. seed"] and not no_fixed_seeds:
- p.extra_generation_params["Fixed Y Values"] = '{' + ", ".join([str(y) for y in ys])+ '}'
+ p.extra_generation_params["XY Plot Fixed Y Values"] = '{' + ", ".join([str(y) for y in ys])+ '}'
processed = draw_xy_grid(
p,
From 14c1c2b9351f16d43ba4e6b6c9062edad44a6bec Mon Sep 17 00:00:00 2001
From: Alexandre Simard
Date: Wed, 19 Oct 2022 13:53:52 -0400
Subject: [PATCH 007/172] Show PB texts at same time and earlier
For big tasks (1000+ steps), waiting 1 minute to see ETA is long and this changes it so the number of steps done plays a role in showing the text as well.
---
modules/ui.py | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index a2dbd41e..0abd177a 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -261,14 +261,14 @@ def wrap_gradio_call(func, extra_outputs=None):
return f
-def calc_time_left(progress, threshold, label, force_display):
+def calc_time_left(progress, threshold, label, force_display, showTime):
if progress == 0:
return ""
else:
time_since_start = time.time() - shared.state.time_start
eta = (time_since_start/progress)
eta_relative = eta-time_since_start
- if (eta_relative > threshold and progress > 0.02) or force_display:
+ if (eta_relative > threshold and showTime) or force_display:
if eta_relative > 3600:
return label + time.strftime('%H:%M:%S', time.gmtime(eta_relative))
elif eta_relative > 60:
@@ -290,7 +290,10 @@ def check_progress_call(id_part):
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
- time_left = calc_time_left( progress, 1, " ETA: ", shared.state.time_left_force_display )
+ # Show progress percentage and time left at the same moment, and base it also on steps done
+ showPBText = progress >= 0.01 or shared.state.sampling_step >= 10
+
+ time_left = calc_time_left( progress, 1, " ETA: ", shared.state.time_left_force_display, showPBText )
if time_left != "":
shared.state.time_left_force_display = True
@@ -298,7 +301,7 @@ def check_progress_call(id_part):
progressbar = ""
if opts.show_progressbar:
- progressbar = f"""{" " * 2 + str(int(progress*100))+"%" + time_left if progress > 0.01 else ""}
"""
+ progressbar = f"""{" " * 2 + str(int(progress*100))+"%" + time_left if showPBText else ""}
"""
image = gr_show(False)
preview_visibility = gr_show(False)
From 4fbdbddc18b21f712acae58bf41740d27023285f Mon Sep 17 00:00:00 2001
From: Alexandre Simard
Date: Wed, 19 Oct 2022 15:21:36 -0400
Subject: [PATCH 008/172] Remove pad spaces from progress bar text
---
javascript/progressbar.js | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/javascript/progressbar.js b/javascript/progressbar.js
index 7a05726e..24ab4795 100644
--- a/javascript/progressbar.js
+++ b/javascript/progressbar.js
@@ -10,7 +10,7 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip
if(opts.show_progress_in_title && progressbar && progressbar.offsetParent){
if(progressbar.innerText){
- let newtitle = 'Stable Diffusion - ' + progressbar.innerText
+ let newtitle = 'Stable Diffusion - ' + progressbar.innerText.slice(2)
if(document.title != newtitle){
document.title = newtitle;
}
From c4b5ca5778340b21288d84dfb8fe1d5773c886a8 Mon Sep 17 00:00:00 2001
From: Yuta Hayashibe
Date: Thu, 27 Oct 2022 22:00:28 +0900
Subject: [PATCH 009/172] Truncate too long filename
---
modules/images.py | 16 ++++++++++++----
1 file changed, 12 insertions(+), 4 deletions(-)
diff --git a/modules/images.py b/modules/images.py
index 7870b5b7..42363ed3 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -416,6 +416,14 @@ def get_next_sequence_number(path, basename):
return result + 1
+def truncate_fullpath(full_path, encoding='utf-8'):
+ dir_name, full_name = os.path.split(full_path)
+ file_name, file_ext = os.path.splitext(full_name)
+ max_length = os.statvfs(dir_name).f_namemax
+ file_name_truncated = file_name.encode(encoding)[:max_length - len(file_ext)].decode(encoding, 'ignore')
+ return os.path.join(dir_name , file_name_truncated + file_ext)
+
+
def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix="", save_to_dirs=None):
"""Save an image.
@@ -456,7 +464,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
if save_to_dirs:
dirname = namegen.apply(opts.directories_filename_pattern or "[prompt_words]").lstrip(' ').rstrip('\\ /')
- path = os.path.join(path, dirname)
+ path = truncate_fullpath(os.path.join(path, dirname))
os.makedirs(path, exist_ok=True)
@@ -480,13 +488,13 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
fullfn = None
for i in range(500):
fn = f"{basecount + i:05}" if basename == '' else f"{basename}-{basecount + i:04}"
- fullfn = os.path.join(path, f"{fn}{file_decoration}.{extension}")
+ fullfn = truncate_fullpath(os.path.join(path, f"{fn}{file_decoration}.{extension}"))
if not os.path.exists(fullfn):
break
else:
- fullfn = os.path.join(path, f"{file_decoration}.{extension}")
+ fullfn = truncate_fullpath(os.path.join(path, f"{file_decoration}.{extension}"))
else:
- fullfn = os.path.join(path, f"{forced_filename}.{extension}")
+ fullfn = truncate_fullpath(os.path.join(path, f"{forced_filename}.{extension}"))
pnginfo = existing_info or {}
if info is not None:
From 2a25729623717cc499e873752d9f4ebebd1e1078 Mon Sep 17 00:00:00 2001
From: Muhammad Rizqi Nur
Date: Fri, 28 Oct 2022 09:44:56 +0700
Subject: [PATCH 010/172] Gradient clipping in train tab
---
modules/hypernetworks/hypernetwork.py | 10 +++++++++-
modules/ui.py | 7 +++++++
2 files changed, 16 insertions(+), 1 deletion(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 8113b35b..c5d60654 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -327,7 +327,7 @@ def report_statistics(loss_info:dict):
-def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
+def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, clip_grad_mode, clip_grad_value, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
# images allows training previews to have infotext. Importing it at the top causes a circular import problem.
from modules import images
@@ -384,6 +384,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
if ititial_step > steps:
return hypernetwork, filename
+ clip_grad_mode_value = clip_grad_mode == "value"
+ clip_grad_mode_norm = clip_grad_mode == "norm"
+
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
# if optimizer == "AdamW": or else Adam / AdamW / SGD, etc...
optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate)
@@ -426,6 +429,11 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
steps_without_grad = 0
assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue'
+ if clip_grad_mode_value:
+ torch.nn.utils.clip_grad_value_(weights, clip_value=clip_grad_value)
+ elif clip_grad_mode_norm:
+ torch.nn.utils.clip_grad_norm_(weights, max_norm=clip_grad_value)
+
optimizer.step()
if torch.isnan(losses[hypernetwork.step % losses.shape[0]]):
diff --git a/modules/ui.py b/modules/ui.py
index 0a63e357..97de7da2 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1313,6 +1313,9 @@ def create_ui(wrap_gradio_gpu_call):
training_width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
training_height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
steps = gr.Number(label='Max steps', value=100000, precision=0)
+ with gr.Row():
+ clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"])
+ clip_grad_value = gr.Number(value=1.0, show_label=False)
create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0)
save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0)
save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True)
@@ -1406,6 +1409,8 @@ def create_ui(wrap_gradio_gpu_call):
training_width,
training_height,
steps,
+ clip_grad_mode,
+ clip_grad_value,
create_image_every,
save_embedding_every,
template_file,
@@ -1431,6 +1436,8 @@ def create_ui(wrap_gradio_gpu_call):
training_width,
training_height,
steps,
+ clip_grad_mode,
+ clip_grad_value,
create_image_every,
save_embedding_every,
template_file,
From a133042c669f666763f5da0f4440abdc839db653 Mon Sep 17 00:00:00 2001
From: Muhammad Rizqi Nur
Date: Fri, 28 Oct 2022 10:01:46 +0700
Subject: [PATCH 011/172] Forgot to remove this from train_embedding
---
modules/ui.py | 2 --
1 file changed, 2 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index 97de7da2..ba5e92a7 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1409,8 +1409,6 @@ def create_ui(wrap_gradio_gpu_call):
training_width,
training_height,
steps,
- clip_grad_mode,
- clip_grad_value,
create_image_every,
save_embedding_every,
template_file,
From 1618df41bad092e068c61bf510b1e20856821ad5 Mon Sep 17 00:00:00 2001
From: Muhammad Rizqi Nur
Date: Fri, 28 Oct 2022 10:31:27 +0700
Subject: [PATCH 012/172] Gradient clipping for textual embedding
---
modules/textual_inversion/textual_inversion.py | 11 ++++++++++-
modules/ui.py | 2 ++
2 files changed, 12 insertions(+), 1 deletion(-)
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index ff002d3e..7bad73a6 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -206,7 +206,7 @@ def write_loss(log_directory, filename, step, epoch_len, values):
})
-def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
+def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, clip_grad_mode, clip_grad_value, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
assert embedding_name, 'embedding not selected'
shared.state.textinfo = "Initializing textual inversion training..."
@@ -256,6 +256,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
if ititial_step > steps:
return embedding, filename
+ clip_grad_mode_value = clip_grad_mode == "value"
+ clip_grad_mode_norm = clip_grad_mode == "norm"
+
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate)
@@ -280,6 +283,12 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
optimizer.zero_grad()
loss.backward()
+
+ if clip_grad_mode_value:
+ torch.nn.utils.clip_grad_value_(embedding.vec, clip_value=clip_grad_value)
+ elif clip_grad_mode_norm:
+ torch.nn.utils.clip_grad_norm_(embedding.vec, max_norm=clip_grad_value)
+
optimizer.step()
diff --git a/modules/ui.py b/modules/ui.py
index ba5e92a7..97de7da2 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1409,6 +1409,8 @@ def create_ui(wrap_gradio_gpu_call):
training_width,
training_height,
steps,
+ clip_grad_mode,
+ clip_grad_value,
create_image_every,
save_embedding_every,
template_file,
From 16451ca573220e49f2eaaab97580b6b91287c8c4 Mon Sep 17 00:00:00 2001
From: Muhammad Rizqi Nur
Date: Fri, 28 Oct 2022 17:16:23 +0700
Subject: [PATCH 013/172] Learning rate sched syntax support for grad clipping
---
modules/hypernetworks/hypernetwork.py | 13 ++++++++++---
modules/textual_inversion/learn_schedule.py | 11 ++++++++---
modules/textual_inversion/textual_inversion.py | 12 +++++++++---
modules/ui.py | 7 +++----
4 files changed, 30 insertions(+), 13 deletions(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index c5d60654..86532063 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -383,11 +383,15 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
ititial_step = hypernetwork.step or 0
if ititial_step > steps:
return hypernetwork, filename
-
+
clip_grad_mode_value = clip_grad_mode == "value"
clip_grad_mode_norm = clip_grad_mode == "norm"
+ clip_grad_enabled = clip_grad_mode_value or clip_grad_mode_norm
+ if clip_grad_enabled:
+ clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, ititial_step, verbose=False)
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
+
# if optimizer == "AdamW": or else Adam / AdamW / SGD, etc...
optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate)
@@ -407,6 +411,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
if shared.state.interrupted:
break
+ if clip_grad_enabled:
+ clip_grad_sched.step(hypernetwork.step)
+
with torch.autocast("cuda"):
c = stack_conds([entry.cond for entry in entries]).to(devices.device)
# c = torch.vstack([entry.cond for entry in entries]).to(devices.device)
@@ -430,9 +437,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue'
if clip_grad_mode_value:
- torch.nn.utils.clip_grad_value_(weights, clip_value=clip_grad_value)
+ torch.nn.utils.clip_grad_value_(weights, clip_value=clip_grad_sched.learn_rate)
elif clip_grad_mode_norm:
- torch.nn.utils.clip_grad_norm_(weights, max_norm=clip_grad_value)
+ torch.nn.utils.clip_grad_norm_(weights, max_norm=clip_grad_sched.learn_rate)
optimizer.step()
diff --git a/modules/textual_inversion/learn_schedule.py b/modules/textual_inversion/learn_schedule.py
index 2062726a..ffec3e1b 100644
--- a/modules/textual_inversion/learn_schedule.py
+++ b/modules/textual_inversion/learn_schedule.py
@@ -51,14 +51,19 @@ class LearnRateScheduler:
self.finished = False
- def apply(self, optimizer, step_number):
+ def step(self, step_number):
if step_number <= self.end_step:
- return
+ return False
try:
(self.learn_rate, self.end_step) = next(self.schedules)
- except Exception:
+ except StopIteration:
self.finished = True
+ return False
+ return True
+
+ def apply(self, optimizer, step_number):
+ if not self.step(step_number):
return
if self.verbose:
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 7bad73a6..6b00c6a1 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -255,9 +255,12 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
ititial_step = embedding.step or 0
if ititial_step > steps:
return embedding, filename
-
+
clip_grad_mode_value = clip_grad_mode == "value"
clip_grad_mode_norm = clip_grad_mode == "norm"
+ clip_grad_enabled = clip_grad_mode_value or clip_grad_mode_norm
+ if clip_grad_enabled:
+ clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, ititial_step, verbose=False)
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate)
@@ -273,6 +276,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
if shared.state.interrupted:
break
+ if clip_grad_enabled:
+ clip_grad_sched.step(embedding.step)
+
with torch.autocast("cuda"):
c = cond_model([entry.cond_text for entry in entries])
x = torch.stack([entry.latent for entry in entries]).to(devices.device)
@@ -285,9 +291,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
loss.backward()
if clip_grad_mode_value:
- torch.nn.utils.clip_grad_value_(embedding.vec, clip_value=clip_grad_value)
+ torch.nn.utils.clip_grad_value_(embedding.vec, clip_value=clip_grad_sched.learn_rate)
elif clip_grad_mode_norm:
- torch.nn.utils.clip_grad_norm_(embedding.vec, max_norm=clip_grad_value)
+ torch.nn.utils.clip_grad_norm_(embedding.vec, max_norm=clip_grad_sched.learn_rate)
optimizer.step()
diff --git a/modules/ui.py b/modules/ui.py
index 97de7da2..47d16429 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1305,7 +1305,9 @@ def create_ui(wrap_gradio_gpu_call):
with gr.Row():
embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005")
hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001")
-
+ with gr.Row():
+ clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"])
+ clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="1.0", show_label=False)
batch_size = gr.Number(label='Batch size', value=1, precision=0)
dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images")
log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion")
@@ -1313,9 +1315,6 @@ def create_ui(wrap_gradio_gpu_call):
training_width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512)
training_height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512)
steps = gr.Number(label='Max steps', value=100000, precision=0)
- with gr.Row():
- clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"])
- clip_grad_value = gr.Number(value=1.0, show_label=False)
create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0)
save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0)
save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True)
From 840307f23738c38f7ac3ad636e53ccec66e71f8b Mon Sep 17 00:00:00 2001
From: Muhammad Rizqi Nur
Date: Mon, 31 Oct 2022 13:49:24 +0700
Subject: [PATCH 014/172] Change default clip grad value to 0.1
It still defaults to disabled.
Ref for value: https://github.com/danielalcalde/stable-diffusion-webui/commit/732b15820a9bde9f47e075a6209c3d47d47acb08
---
modules/ui.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/ui.py b/modules/ui.py
index 98f9565f..364953aa 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1256,7 +1256,7 @@ def create_ui(wrap_gradio_gpu_call):
hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001")
with gr.Row():
clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"])
- clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="1.0", show_label=False)
+ clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False)
batch_size = gr.Number(label='Batch size', value=1, precision=0)
dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images")
log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion")
From 4123be632a98f70cda06e14c2f556f7ad38cd436 Mon Sep 17 00:00:00 2001
From: Muhammad Rizqi Nur
Date: Mon, 31 Oct 2022 13:53:22 +0700
Subject: [PATCH 015/172] Fix merge conflicts
---
modules/hypernetworks/hypernetwork.py | 17 ++++++-----------
1 file changed, 6 insertions(+), 11 deletions(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 65a584bb..207808ee 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -373,6 +373,12 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
+ clip_grad_mode_value = clip_grad_mode == "value"
+ clip_grad_mode_norm = clip_grad_mode == "norm"
+ clip_grad_enabled = clip_grad_mode_value or clip_grad_mode_norm
+ if clip_grad_enabled:
+ clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, ititial_step, verbose=False)
+
# dataset loading may take a while, so input validations and early returns should be done before this
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
with torch.autocast("cuda"):
@@ -389,21 +395,10 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
previous_mean_loss = 0
print("Mean loss of {} elements".format(size))
- last_saved_file = ""
- last_saved_image = ""
- forced_filename = ""
-
ititial_step = hypernetwork.step or 0
if ititial_step > steps:
return hypernetwork, filename
- clip_grad_mode_value = clip_grad_mode == "value"
- clip_grad_mode_norm = clip_grad_mode == "norm"
- clip_grad_enabled = clip_grad_mode_value or clip_grad_mode_norm
- if clip_grad_enabled:
- clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, ititial_step, verbose=False)
-
- scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
weights = hypernetwork.weights()
for weight in weights:
From d5ea878b2aa117588d85287cbd8983aa52177df5 Mon Sep 17 00:00:00 2001
From: Muhammad Rizqi Nur
Date: Mon, 31 Oct 2022 13:54:40 +0700
Subject: [PATCH 016/172] Fix merge conflicts
---
modules/hypernetworks/hypernetwork.py | 5 -----
1 file changed, 5 deletions(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 207808ee..2df38c70 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -395,11 +395,6 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
previous_mean_loss = 0
print("Mean loss of {} elements".format(size))
- ititial_step = hypernetwork.step or 0
- if ititial_step > steps:
- return hypernetwork, filename
-
-
weights = hypernetwork.weights()
for weight in weights:
weight.requires_grad = True
From cffc240a7327ae60671ff533469fc4ed4bf605de Mon Sep 17 00:00:00 2001
From: Nerogar
Date: Sun, 23 Oct 2022 14:05:25 +0200
Subject: [PATCH 017/172] fixed textual inversion training with inpainting
models
---
.../textual_inversion/textual_inversion.py | 27 ++++++++++++++++++-
1 file changed, 26 insertions(+), 1 deletion(-)
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 0aeb0459..2630c7c9 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -224,6 +224,26 @@ def validate_train_inputs(model_name, learn_rate, batch_size, data_root, templat
if save_model_every or create_image_every:
assert log_directory, "Log directory is empty"
+def create_dummy_mask(x, width=None, height=None):
+ if shared.sd_model.model.conditioning_key in {'hybrid', 'concat'}:
+
+ # The "masked-image" in this case will just be all zeros since the entire image is masked.
+ image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
+ image_conditioning = shared.sd_model.get_first_stage_encoding(shared.sd_model.encode_first_stage(image_conditioning))
+
+ # Add the fake full 1s mask to the first dimension.
+ image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
+ image_conditioning = image_conditioning.to(x.dtype)
+
+ else:
+ # Dummy zero conditioning if we're not using inpainting model.
+ # Still takes up a bit of memory, but no encoder call.
+ # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
+ image_conditioning = torch.zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device)
+
+ return image_conditioning
+
+
def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
save_embedding_every = save_embedding_every or 0
create_image_every = create_image_every or 0
@@ -286,6 +306,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
forced_filename = ""
embedding_yet_to_be_embedded = False
+ img_c = None
pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step)
for i, entries in pbar:
embedding.step = i + ititial_step
@@ -299,8 +320,12 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
with torch.autocast("cuda"):
c = cond_model([entry.cond_text for entry in entries])
+ if img_c is None:
+ img_c = create_dummy_mask(c, training_width, training_height)
+
x = torch.stack([entry.latent for entry in entries]).to(devices.device)
- loss = shared.sd_model(x, c)[0]
+ cond = {"c_concat": [img_c], "c_crossattn": [c]}
+ loss = shared.sd_model(x, cond)[0]
del x
losses[embedding.step % losses.shape[0]] = loss.item()
From d624cb82a7c65a1ea04e4b6e23f0164a3ba25e25 Mon Sep 17 00:00:00 2001
From: Ikko Ashimine
Date: Thu, 3 Nov 2022 01:05:00 +0900
Subject: [PATCH 018/172] Fix typo in ui.js
interation -> interaction
---
javascript/ui.js | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/javascript/ui.js b/javascript/ui.js
index 7e116465..0308dce3 100644
--- a/javascript/ui.js
+++ b/javascript/ui.js
@@ -1,4 +1,4 @@
-// various functions for interation with ui.py not large enough to warrant putting them in separate files
+// various functions for interaction with ui.py not large enough to warrant putting them in separate files
function set_theme(theme){
gradioURL = window.location.href
From bb832d7725187f8a8ab44faa6ee1b38cb5f600aa Mon Sep 17 00:00:00 2001
From: Muhammad Rizqi Nur
Date: Sat, 5 Nov 2022 11:48:38 +0700
Subject: [PATCH 019/172] Simplify grad clip
---
modules/hypernetworks/hypernetwork.py | 16 +++++++---------
modules/textual_inversion/textual_inversion.py | 16 +++++++---------
2 files changed, 14 insertions(+), 18 deletions(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index f4c2668f..02b624e1 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -385,10 +385,10 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
- clip_grad_mode_value = clip_grad_mode == "value"
- clip_grad_mode_norm = clip_grad_mode == "norm"
- clip_grad_enabled = clip_grad_mode_value or clip_grad_mode_norm
- if clip_grad_enabled:
+ clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else \
+ torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else \
+ None
+ if clip_grad:
clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, ititial_step, verbose=False)
# dataset loading may take a while, so input validations and early returns should be done before this
@@ -433,7 +433,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
if shared.state.interrupted:
break
- if clip_grad_enabled:
+ if clip_grad:
clip_grad_sched.step(hypernetwork.step)
with torch.autocast("cuda"):
@@ -458,10 +458,8 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
steps_without_grad = 0
assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue'
- if clip_grad_mode_value:
- torch.nn.utils.clip_grad_value_(weights, clip_value=clip_grad_sched.learn_rate)
- elif clip_grad_mode_norm:
- torch.nn.utils.clip_grad_norm_(weights, max_norm=clip_grad_sched.learn_rate)
+ if clip_grad:
+ clip_grad(weights, clip_grad_sched.learn_rate)
optimizer.step()
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index c567ec3f..687d97bb 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -269,10 +269,10 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
- clip_grad_mode_value = clip_grad_mode == "value"
- clip_grad_mode_norm = clip_grad_mode == "norm"
- clip_grad_enabled = clip_grad_mode_value or clip_grad_mode_norm
- if clip_grad_enabled:
+ clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else \
+ torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else \
+ None
+ if clip_grad:
clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, ititial_step, verbose=False)
# dataset loading may take a while, so input validations and early returns should be done before this
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
@@ -302,7 +302,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
if shared.state.interrupted:
break
- if clip_grad_enabled:
+ if clip_grad:
clip_grad_sched.step(embedding.step)
with torch.autocast("cuda"):
@@ -316,10 +316,8 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
optimizer.zero_grad()
loss.backward()
- if clip_grad_mode_value:
- torch.nn.utils.clip_grad_value_(embedding.vec, clip_value=clip_grad_sched.learn_rate)
- elif clip_grad_mode_norm:
- torch.nn.utils.clip_grad_norm_(embedding.vec, max_norm=clip_grad_sched.learn_rate)
+ if clip_grad:
+ clip_grad(embedding.vec, clip_grad_sched.learn_rate)
optimizer.step()
From 9fd457e21d6c809a69a1318f03d75f7b3e09b865 Mon Sep 17 00:00:00 2001
From: camenduru <54370274+camenduru@users.noreply.github.com>
Date: Thu, 15 Dec 2022 21:57:48 +0300
Subject: [PATCH 020/172] allow_credentials and allow_headers for api
from https://fastapi.tiangolo.com/tutorial/cors/
---
webui.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/webui.py b/webui.py
index c2d0c6be..13a4d14a 100644
--- a/webui.py
+++ b/webui.py
@@ -90,11 +90,11 @@ def initialize():
def setup_cors(app):
if cmd_opts.cors_allow_origins and cmd_opts.cors_allow_origins_regex:
- app.add_middleware(CORSMiddleware, allow_origins=cmd_opts.cors_allow_origins.split(','), allow_origin_regex=cmd_opts.cors_allow_origins_regex, allow_methods=['*'])
+ app.add_middleware(CORSMiddleware, allow_origins=cmd_opts.cors_allow_origins.split(','), allow_origin_regex=cmd_opts.cors_allow_origins_regex, allow_methods=['*'], allow_credentials=True, allow_headers=['*'])
elif cmd_opts.cors_allow_origins:
- app.add_middleware(CORSMiddleware, allow_origins=cmd_opts.cors_allow_origins.split(','), allow_methods=['*'])
+ app.add_middleware(CORSMiddleware, allow_origins=cmd_opts.cors_allow_origins.split(','), allow_methods=['*'], allow_credentials=True, allow_headers=['*'])
elif cmd_opts.cors_allow_origins_regex:
- app.add_middleware(CORSMiddleware, allow_origin_regex=cmd_opts.cors_allow_origins_regex, allow_methods=['*'])
+ app.add_middleware(CORSMiddleware, allow_origin_regex=cmd_opts.cors_allow_origins_regex, allow_methods=['*'], allow_credentials=True, allow_headers=['*'])
def create_api(app):
From f23a822f1c9cb3bd2e8772c75af429e06515eaef Mon Sep 17 00:00:00 2001
From: Philpax
Date: Sat, 24 Dec 2022 20:45:16 +1100
Subject: [PATCH 021/172] feat(api): include job_timestamp in progress
---
modules/shared.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/modules/shared.py b/modules/shared.py
index 8ea3b441..f356dbf7 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -171,6 +171,7 @@ class State:
"interrupted": self.skipped,
"job": self.job,
"job_count": self.job_count,
+ "job_timestamp": self.job_timestamp,
"job_no": self.job_no,
"sampling_step": self.sampling_step,
"sampling_steps": self.sampling_steps,
From fa931733f6acc94e058a1d3d4655846e33ae34be Mon Sep 17 00:00:00 2001
From: Philpax
Date: Sun, 25 Dec 2022 20:17:49 +1100
Subject: [PATCH 022/172] fix(api): assign sd_model after settings change
---
modules/api/api.py | 2 --
modules/processing.py | 6 ++++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/modules/api/api.py b/modules/api/api.py
index 1ceba75d..0a1a1905 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -121,7 +121,6 @@ class Api:
def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI):
populate = txt2imgreq.copy(update={ # Override __init__ params
- "sd_model": shared.sd_model,
"sampler_name": validate_sampler_name(txt2imgreq.sampler_name or txt2imgreq.sampler_index),
"do_not_save_samples": True,
"do_not_save_grid": True
@@ -153,7 +152,6 @@ class Api:
mask = decode_base64_to_image(mask)
populate = img2imgreq.copy(update={ # Override __init__ params
- "sd_model": shared.sd_model,
"sampler_name": validate_sampler_name(img2imgreq.sampler_name or img2imgreq.sampler_index),
"do_not_save_samples": True,
"do_not_save_grid": True,
diff --git a/modules/processing.py b/modules/processing.py
index 4a406084..0b270278 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -50,9 +50,9 @@ def apply_color_correction(correction, original_image):
correction,
channel_axis=2
), cv2.COLOR_LAB2RGB).astype("uint8"))
-
+
image = blendLayers(image, original_image, BlendType.LUMINOSITY)
-
+
return image
@@ -466,6 +466,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
if k == 'sd_model_checkpoint': sd_models.reload_model_weights() # make onchange call for changing SD model
if k == 'sd_vae': sd_vae.reload_vae_weights() # make onchange call for changing VAE
+ # Assign sd_model here to ensure that it reflects the model after any changes
+ p.sd_model = shared.sd_model
res = process_images_inner(p)
finally:
From 4df5009acb6832daef1ff5949404b5aadc8f8fa4 Mon Sep 17 00:00:00 2001
From: hentailord85ez <112723046+hentailord85ez@users.noreply.github.com>
Date: Mon, 26 Dec 2022 20:49:13 +0000
Subject: [PATCH 023/172] Update sd_samplers.py
---
modules/sd_samplers.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index 177b5338..f4473832 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -462,6 +462,9 @@ class KDiffusionSampler:
return extra_params_kwargs
def get_sigmas(self, p, steps):
+ disc = opts.always_discard_next_to_last_sigma or (self.config is not None and self.config.options.get('discard_next_to_last_sigma', False))
+ steps += 1 if disc else 0
+
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
@@ -469,7 +472,7 @@ class KDiffusionSampler:
else:
sigmas = self.model_wrap.get_sigmas(steps)
- if self.config is not None and self.config.options.get('discard_next_to_last_sigma', False):
+ if disc:
sigmas = torch.cat([sigmas[:-2], sigmas[-1:]])
return sigmas
From 03f486a2399df0a2b24c7aeea72e64f106a87297 Mon Sep 17 00:00:00 2001
From: hentailord85ez <112723046+hentailord85ez@users.noreply.github.com>
Date: Mon, 26 Dec 2022 20:49:33 +0000
Subject: [PATCH 024/172] Update shared.py
---
modules/shared.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/modules/shared.py b/modules/shared.py
index d4ddeea0..5edb316c 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -418,6 +418,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}),
+ 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma"),
}))
options_templates.update(options_section((None, "Hidden options"), {
From 5ba04f9ec050a66e918571f07e8863f157f05b44 Mon Sep 17 00:00:00 2001
From: Nicolas Patry
Date: Wed, 21 Dec 2022 13:45:58 +0100
Subject: [PATCH 025/172] Attempting to solve slow loads for `safetensors`.
Fixes #5893
---
modules/sd_models.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index ecdd91c5..cd938656 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -168,7 +168,10 @@ def get_state_dict_from_checkpoint(pl_sd):
def read_state_dict(checkpoint_file, print_global_state=False, map_location=None):
_, extension = os.path.splitext(checkpoint_file)
if extension.lower() == ".safetensors":
- pl_sd = safetensors.torch.load_file(checkpoint_file, device=map_location or shared.weight_load_location)
+ device = map_location or shared.weight_load_location
+ if device is None:
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
+ pl_sd = safetensors.torch.load_file(checkpoint_file, device=device)
else:
pl_sd = torch.load(checkpoint_file, map_location=map_location or shared.weight_load_location)
From 5a523d13050a5ede43c473767f29dfe2e391136a Mon Sep 17 00:00:00 2001
From: Nicolas Patry
Date: Tue, 27 Dec 2022 11:27:40 +0100
Subject: [PATCH 026/172] Version 0.2.7 Fixes Windows SAFETENSORS_FAST_GPU
path.
---
requirements_versions.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements_versions.txt b/requirements_versions.txt
index c126c8c4..52e98818 100644
--- a/requirements_versions.txt
+++ b/requirements_versions.txt
@@ -26,5 +26,5 @@ lark==1.1.2
inflection==0.5.1
GitPython==3.1.27
torchsde==0.2.5
-safetensors==0.2.5
+safetensors==0.2.7
httpcore<=0.15
From a005fccddd5a37c57f1afe5234660b59b9a41508 Mon Sep 17 00:00:00 2001
From: me <25877290+Kryptortio@users.noreply.github.com>
Date: Sun, 1 Jan 2023 14:51:12 +0100
Subject: [PATCH 027/172] Add a lot more elem_id/HTML id, modified some that
were duplicates for seed section
---
modules/generation_parameters_copypaste.py | 2 +-
modules/ui.py | 252 ++++++++++-----------
style.css | 12 +-
3 files changed, 133 insertions(+), 133 deletions(-)
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index 54b3372d..8e7f0df0 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -93,7 +93,7 @@ def integrate_settings_paste_fields(component_dict):
def create_buttons(tabs_list):
buttons = {}
for tab in tabs_list:
- buttons[tab] = gr.Button(f"Send to {tab}")
+ buttons[tab] = gr.Button(f"Send to {tab}", elem_id=f"{tab}_tab")
return buttons
diff --git a/modules/ui.py b/modules/ui.py
index 27da2c2c..7070ea15 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -272,17 +272,17 @@ def interrogate_deepbooru(image):
return gr_show(True) if prompt is None else prompt
-def create_seed_inputs():
+def create_seed_inputs(target_interface):
with gr.Row():
with gr.Box():
- with gr.Row(elem_id='seed_row'):
- seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1)
+ with gr.Row(elem_id=target_interface + '_seed_row'):
+ seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=target_interface + '_seed')
seed.style(container=False)
- random_seed = gr.Button(random_symbol, elem_id='random_seed')
- reuse_seed = gr.Button(reuse_symbol, elem_id='reuse_seed')
+ random_seed = gr.Button(random_symbol, elem_id=target_interface + '_random_seed')
+ reuse_seed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_seed')
- with gr.Box(elem_id='subseed_show_box'):
- seed_checkbox = gr.Checkbox(label='Extra', elem_id='subseed_show', value=False)
+ with gr.Box(elem_id=target_interface + '_subseed_show_box'):
+ seed_checkbox = gr.Checkbox(label='Extra', elem_id=target_interface + '_subseed_show', value=False)
# Components to show/hide based on the 'Extra' checkbox
seed_extras = []
@@ -290,17 +290,17 @@ def create_seed_inputs():
with gr.Row(visible=False) as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
with gr.Box():
- with gr.Row(elem_id='subseed_row'):
- subseed = gr.Number(label='Variation seed', value=-1)
+ with gr.Row(elem_id=target_interface + '_subseed_row'):
+ subseed = gr.Number(label='Variation seed', value=-1, elem_id=target_interface + '_subseed')
subseed.style(container=False)
- random_subseed = gr.Button(random_symbol, elem_id='random_subseed')
- reuse_subseed = gr.Button(reuse_symbol, elem_id='reuse_subseed')
- subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01)
+ random_subseed = gr.Button(random_symbol, elem_id=target_interface + '_random_subseed')
+ reuse_subseed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_subseed')
+ subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=target_interface + '_subseed_strength')
with gr.Row(visible=False) as seed_extra_row_2:
seed_extras.append(seed_extra_row_2)
- seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0)
- seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0)
+ seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=target_interface + '_seed_resize_from_w')
+ seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=target_interface + '_seed_resize_from_h')
random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
@@ -678,28 +678,28 @@ def create_ui():
steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img")
with gr.Group():
- width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512)
- height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512)
+ width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width")
+ height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height")
with gr.Row():
- restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
- tiling = gr.Checkbox(label='Tiling', value=False)
- enable_hr = gr.Checkbox(label='Highres. fix', value=False)
+ restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces")
+ tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling")
+ enable_hr = gr.Checkbox(label='Highres. fix', value=False, elem_id="txt2img_enable_hr")
with gr.Row(visible=False) as hr_options:
- firstphase_width = gr.Slider(minimum=0, maximum=1024, step=8, label="Firstpass width", value=0)
- firstphase_height = gr.Slider(minimum=0, maximum=1024, step=8, label="Firstpass height", value=0)
- denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
+ firstphase_width = gr.Slider(minimum=0, maximum=1024, step=8, label="Firstpass width", value=0, elem_id="txt2img_firstphase_width")
+ firstphase_height = gr.Slider(minimum=0, maximum=1024, step=8, label="Firstpass height", value=0, elem_id="txt2img_firstphase_height")
+ denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength")
with gr.Row(equal_height=True):
- batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1)
- batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
+ batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
+ batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
- cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
+ cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale")
- seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
+ seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img')
- with gr.Group():
+ with gr.Group(elem_id="txt2img_script_container"):
custom_inputs = modules.scripts.scripts_txt2img.setup_ui()
txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples)
@@ -821,10 +821,10 @@ def create_ui():
with gr.Column(variant='panel', elem_id="img2img_settings"):
with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode:
- with gr.TabItem('img2img', id='img2img'):
+ with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab"):
init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool=cmd_opts.gradio_img2img_tool, image_mode="RGBA").style(height=480)
- with gr.TabItem('Inpaint', id='inpaint'):
+ with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab"):
init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool=cmd_opts.gradio_inpaint_tool, image_mode="RGBA").style(height=480)
init_img_with_mask_orig = gr.State(None)
@@ -843,24 +843,24 @@ def create_ui():
init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_mask")
with gr.Row():
- mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
- mask_alpha = gr.Slider(label="Mask transparency", interactive=use_color_sketch, visible=use_color_sketch)
+ mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur")
+ mask_alpha = gr.Slider(label="Mask transparency", interactive=use_color_sketch, visible=use_color_sketch, elem_id="img2img_mask_alpha")
with gr.Row():
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode")
- inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index")
+ inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode")
- inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index")
+ inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill")
with gr.Row():
- inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False)
- inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32)
+ inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False, elem_id="img2img_inpaint_full_res")
+ inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding")
- with gr.TabItem('Batch img2img', id='batch'):
+ with gr.TabItem('Batch img2img', id='batch', elem_id="img2img_batch_tab"):
hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
gr.HTML(f"Process images in a directory on the same machine where the server is running.
Use an empty output directory to save pictures normally instead of writing to the output directory.{hidden}
")
- img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs)
- img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs)
+ img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir")
+ img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir")
with gr.Row():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize")
@@ -872,20 +872,20 @@ def create_ui():
height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
with gr.Row():
- restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
- tiling = gr.Checkbox(label='Tiling', value=False)
+ restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces")
+ tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling")
with gr.Row():
- batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1)
- batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
+ batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
+ batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
with gr.Group():
- cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0)
- denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75)
+ cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale")
+ denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength")
- seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs()
+ seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img')
- with gr.Group():
+ with gr.Group(elem_id="img2img_script_container"):
custom_inputs = modules.scripts.scripts_img2img.setup_ui()
img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples)
@@ -1032,45 +1032,45 @@ def create_ui():
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="mode_extras"):
- with gr.TabItem('Single Image'):
- extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
+ with gr.TabItem('Single Image', elem_id="extras_single_tab"):
+ extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil", elem_id="extras_image")
- with gr.TabItem('Batch Process'):
- image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
+ with gr.TabItem('Batch Process', elem_id="extras_batch_process_tab"):
+ image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file", elem_id="extras_image_batch")
- with gr.TabItem('Batch from Directory'):
- extras_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, placeholder="A directory on the same machine where the server is running.")
- extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.")
- show_extras_results = gr.Checkbox(label='Show result images', value=True)
+ with gr.TabItem('Batch from Directory', elem_id="extras_batch_directory_tab"):
+ extras_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, placeholder="A directory on the same machine where the server is running.", elem_id="extras_batch_input_dir")
+ extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.", elem_id="extras_batch_output_dir")
+ show_extras_results = gr.Checkbox(label='Show result images', value=True, elem_id="extras_show_extras_results")
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
with gr.Tabs(elem_id="extras_resize_mode"):
- with gr.TabItem('Scale by'):
- upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4)
- with gr.TabItem('Scale to'):
+ with gr.TabItem('Scale by', elem_id="extras_scale_by_tab"):
+ upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize")
+ with gr.TabItem('Scale to', elem_id="extras_scale_to_tab"):
with gr.Group():
with gr.Row():
- upscaling_resize_w = gr.Number(label="Width", value=512, precision=0)
- upscaling_resize_h = gr.Number(label="Height", value=512, precision=0)
- upscaling_crop = gr.Checkbox(label='Crop to fit', value=True)
+ upscaling_resize_w = gr.Number(label="Width", value=512, precision=0, elem_id="extras_upscaling_resize_w")
+ upscaling_resize_h = gr.Number(label="Height", value=512, precision=0, elem_id="extras_upscaling_resize_h")
+ upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop")
with gr.Group():
extras_upscaler_1 = gr.Radio(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
with gr.Group():
extras_upscaler_2 = gr.Radio(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
- extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1)
+ extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1, elem_id="extras_upscaler_2_visibility")
with gr.Group():
- gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan)
+ gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan, elem_id="extras_gfpgan_visibility")
with gr.Group():
- codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
- codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
+ codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer, elem_id="extras_codeformer_visibility")
+ codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer, elem_id="extras_codeformer_weight")
with gr.Group():
- upscale_before_face_fix = gr.Checkbox(label='Upscale Before Restoring Faces', value=False)
+ upscale_before_face_fix = gr.Checkbox(label='Upscale Before Restoring Faces', value=False, elem_id="extras_upscale_before_face_fix")
result_images, html_info_x, html_info, html_log = create_output_panel("extras", opts.outdir_extras_samples)
@@ -1117,7 +1117,7 @@ def create_ui():
with gr.Column(variant='panel'):
html = gr.HTML()
- generation_info = gr.Textbox(visible=False)
+ generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info")
html2 = gr.HTML()
with gr.Row():
buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"])
@@ -1144,13 +1144,13 @@ def create_ui():
tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)")
create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C")
- custom_name = gr.Textbox(label="Custom Name (Optional)")
- interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3)
- interp_method = gr.Radio(choices=["Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method")
+ custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name")
+ interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount")
+ interp_method = gr.Radio(choices=["Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method")
with gr.Row():
- checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="ckpt", label="Checkpoint format")
- save_as_half = gr.Checkbox(value=False, label="Save as float16")
+ checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="ckpt", label="Checkpoint format", elem_id="modelmerger_checkpoint_format")
+ save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half")
modelmerger_merge = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary')
@@ -1165,58 +1165,58 @@ def create_ui():
with gr.Tabs(elem_id="train_tabs"):
with gr.Tab(label="Create embedding"):
- new_embedding_name = gr.Textbox(label="Name")
- initialization_text = gr.Textbox(label="Initialization text", value="*")
- nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1)
- overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding")
+ new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name")
+ initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text")
+ nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt")
+ overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding")
with gr.Row():
with gr.Column(scale=3):
gr.HTML(value="")
with gr.Column():
- create_embedding = gr.Button(value="Create embedding", variant='primary')
+ create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding")
with gr.Tab(label="Create hypernetwork"):
- new_hypernetwork_name = gr.Textbox(label="Name")
- new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"])
- new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'")
- new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys)
- new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"])
- new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization")
- new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout")
- overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork")
+ new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name")
+ new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes")
+ new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure")
+ new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func")
+ new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option")
+ new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm")
+ new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout")
+ overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork")
with gr.Row():
with gr.Column(scale=3):
gr.HTML(value="")
with gr.Column():
- create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary')
+ create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork")
with gr.Tab(label="Preprocess images"):
- process_src = gr.Textbox(label='Source directory')
- process_dst = gr.Textbox(label='Destination directory')
- process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512)
- process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512)
- preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"])
+ process_src = gr.Textbox(label='Source directory', elem_id="train_process_src")
+ process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst")
+ process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width")
+ process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height")
+ preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action")
with gr.Row():
- process_flip = gr.Checkbox(label='Create flipped copies')
- process_split = gr.Checkbox(label='Split oversized images')
- process_focal_crop = gr.Checkbox(label='Auto focal point crop')
- process_caption = gr.Checkbox(label='Use BLIP for caption')
- process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True)
+ process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip")
+ process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split")
+ process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop")
+ process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption")
+ process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru")
with gr.Row(visible=False) as process_split_extra_row:
- process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05)
- process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05)
+ process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold")
+ process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio")
with gr.Row(visible=False) as process_focal_crop_row:
- process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05)
- process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05)
- process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05)
- process_focal_crop_debug = gr.Checkbox(label='Create debug image')
+ process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight")
+ process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight")
+ process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight")
+ process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug")
with gr.Row():
with gr.Column(scale=3):
@@ -1224,8 +1224,8 @@ def create_ui():
with gr.Column():
with gr.Row():
- interrupt_preprocessing = gr.Button("Interrupt")
- run_preprocess = gr.Button(value="Preprocess", variant='primary')
+ interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing")
+ run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess")
process_split.change(
fn=lambda show: gr_show(show),
@@ -1248,31 +1248,31 @@ def create_ui():
train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=[x for x in shared.hypernetworks.keys()])
create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted([x for x in shared.hypernetworks.keys()])}, "refresh_train_hypernetwork_name")
with gr.Row():
- embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005")
- hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001")
+ embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate")
+ hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate")
- batch_size = gr.Number(label='Batch size', value=1, precision=0)
- gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0)
- dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images")
- log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion")
- template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt"))
- training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512)
- training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512)
- steps = gr.Number(label='Max steps', value=100000, precision=0)
- create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0)
- save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0)
- save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True)
- preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False)
+ batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size")
+ gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step")
+ dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory")
+ log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory")
+ template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt"), elem_id="train_template_file")
+ training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width")
+ training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height")
+ steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps")
+ create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every")
+ save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every")
+ save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding")
+ preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img")
with gr.Row():
- shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False)
- tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0)
+ shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags")
+ tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out")
with gr.Row():
- latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'])
+ latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method")
with gr.Row():
- interrupt_training = gr.Button(value="Interrupt")
- train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary')
- train_embedding = gr.Button(value="Train Embedding", variant='primary')
+ interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training")
+ train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork")
+ train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding")
params = script_callbacks.UiTrainTabParams(txt2img_preview_params)
@@ -1490,7 +1490,7 @@ def create_ui():
return gr.update(value=value), opts.dumpjson()
with gr.Blocks(analytics_enabled=False) as settings_interface:
- settings_submit = gr.Button(value="Apply settings", variant='primary')
+ settings_submit = gr.Button(value="Apply settings", variant='primary', elem_id="settings_submit")
result = gr.HTML()
settings_cols = 3
@@ -1541,8 +1541,8 @@ def create_ui():
download_localization = gr.Button(value='Download localization template', elem_id="download_localization")
with gr.Row():
- reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary')
- restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary')
+ reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies")
+ restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary', elem_id="settings_restart_gradio")
request_notifications.click(
fn=lambda: None,
diff --git a/style.css b/style.css
index f168571e..924d4ae7 100644
--- a/style.css
+++ b/style.css
@@ -73,7 +73,7 @@
margin-right: auto;
}
-#random_seed, #random_subseed, #reuse_seed, #reuse_subseed, #open_folder{
+[id$=_random_seed], [id$=_random_subseed], [id$=_reuse_seed], [id$=_reuse_subseed], #open_folder{
min-width: auto;
flex-grow: 0;
padding-left: 0.25em;
@@ -84,27 +84,27 @@
display: none;
}
-#seed_row, #subseed_row{
+[id$=_seed_row], [id$=_subseed_row]{
gap: 0.5rem;
}
-#subseed_show_box{
+[id$=_subseed_show_box]{
min-width: auto;
flex-grow: 0;
}
-#subseed_show_box > div{
+[id$=_subseed_show_box] > div{
border: 0;
height: 100%;
}
-#subseed_show{
+[id$=_subseed_show]{
min-width: auto;
flex-grow: 0;
padding: 0;
}
-#subseed_show label{
+[id$=_subseed_show] label{
height: 100%;
}
From 311354c0bb8930ea939d6aa6b3edd50c69301320 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Mon, 2 Jan 2023 00:38:09 +0300
Subject: [PATCH 028/172] fix the issue with training on SD2.0
---
modules/sd_models.py | 2 ++
modules/textual_inversion/textual_inversion.py | 3 +--
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index ebd4dff7..bff8d6c9 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -228,6 +228,8 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
model.sd_model_checkpoint = checkpoint_file
model.sd_checkpoint_info = checkpoint_info
+ model.logvar = model.logvar.to(devices.device) # fix for training
+
sd_vae.delete_base_vae()
sd_vae.clear_loaded_vae()
vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 66f40367..1e5722e7 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -282,7 +282,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
return embedding, filename
scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
- # dataset loading may take a while, so input validations and early returns should be done before this
+ # dataset loading may take a while, so input validations and early returns should be done before this
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
old_parallel_processing_allowed = shared.parallel_processing_allowed
@@ -310,7 +310,6 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
loss_step = 0
_loss_step = 0 #internal
-
last_saved_file = ""
last_saved_image = ""
forced_filename = ""
From b5819d9bf1794071139c640b5f1e72c84a0e051a Mon Sep 17 00:00:00 2001
From: Philpax
Date: Mon, 2 Jan 2023 10:17:33 +1100
Subject: [PATCH 029/172] feat(api): add /sdapi/v1/embeddings
---
modules/api/api.py | 8 ++++++++
modules/api/models.py | 3 +++
2 files changed, 11 insertions(+)
diff --git a/modules/api/api.py b/modules/api/api.py
index 11daff0d..30bf3dac 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -100,6 +100,7 @@ class Api:
self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[PromptStyleItem])
self.add_api_route("/sdapi/v1/artist-categories", self.get_artists_categories, methods=["GET"], response_model=List[str])
self.add_api_route("/sdapi/v1/artists", self.get_artists, methods=["GET"], response_model=List[ArtistItem])
+ self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=EmbeddingsResponse)
self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"])
self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=CreateResponse)
self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=CreateResponse)
@@ -327,6 +328,13 @@ class Api:
def get_artists(self):
return [{"name":x[0], "score":x[1], "category":x[2]} for x in shared.artist_db.artists]
+ def get_embeddings(self):
+ db = sd_hijack.model_hijack.embedding_db
+ return {
+ "loaded": sorted(db.word_embeddings.keys()),
+ "skipped": sorted(db.skipped_embeddings),
+ }
+
def refresh_checkpoints(self):
shared.refresh_checkpoints()
diff --git a/modules/api/models.py b/modules/api/models.py
index c446ce7a..a8472dc9 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -249,3 +249,6 @@ class ArtistItem(BaseModel):
score: float = Field(title="Score")
category: str = Field(title="Category")
+class EmbeddingsResponse(BaseModel):
+ loaded: List[str] = Field(title="Loaded", description="Embeddings loaded for the current model")
+ skipped: List[str] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)")
\ No newline at end of file
From c65909ad16a1962129114c6251de092f49479b06 Mon Sep 17 00:00:00 2001
From: Philpax
Date: Mon, 2 Jan 2023 12:21:22 +1100
Subject: [PATCH 030/172] feat(api): return more data for embeddings
---
modules/api/api.py | 17 +++++++++++++++--
modules/api/models.py | 11 +++++++++--
modules/textual_inversion/textual_inversion.py | 8 ++++----
3 files changed, 28 insertions(+), 8 deletions(-)
diff --git a/modules/api/api.py b/modules/api/api.py
index 30bf3dac..9c670f00 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -330,9 +330,22 @@ class Api:
def get_embeddings(self):
db = sd_hijack.model_hijack.embedding_db
+
+ def convert_embedding(embedding):
+ return {
+ "step": embedding.step,
+ "sd_checkpoint": embedding.sd_checkpoint,
+ "sd_checkpoint_name": embedding.sd_checkpoint_name,
+ "shape": embedding.shape,
+ "vectors": embedding.vectors,
+ }
+
+ def convert_embeddings(embeddings):
+ return {embedding.name: convert_embedding(embedding) for embedding in embeddings.values()}
+
return {
- "loaded": sorted(db.word_embeddings.keys()),
- "skipped": sorted(db.skipped_embeddings),
+ "loaded": convert_embeddings(db.word_embeddings),
+ "skipped": convert_embeddings(db.skipped_embeddings),
}
def refresh_checkpoints(self):
diff --git a/modules/api/models.py b/modules/api/models.py
index a8472dc9..4a632c68 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -249,6 +249,13 @@ class ArtistItem(BaseModel):
score: float = Field(title="Score")
category: str = Field(title="Category")
+class EmbeddingItem(BaseModel):
+ step: Optional[int] = Field(title="Step", description="The number of steps that were used to train this embedding, if available")
+ sd_checkpoint: Optional[str] = Field(title="SD Checkpoint", description="The hash of the checkpoint this embedding was trained on, if available")
+ sd_checkpoint_name: Optional[str] = Field(title="SD Checkpoint Name", description="The name of the checkpoint this embedding was trained on, if available. Note that this is the name that was used by the trainer; for a stable identifier, use `sd_checkpoint` instead")
+ shape: int = Field(title="Shape", description="The length of each individual vector in the embedding")
+ vectors: int = Field(title="Vectors", description="The number of vectors in the embedding")
+
class EmbeddingsResponse(BaseModel):
- loaded: List[str] = Field(title="Loaded", description="Embeddings loaded for the current model")
- skipped: List[str] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)")
\ No newline at end of file
+ loaded: Dict[str, EmbeddingItem] = Field(title="Loaded", description="Embeddings loaded for the current model")
+ skipped: Dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)")
\ No newline at end of file
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 1e5722e7..fd253477 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -59,7 +59,7 @@ class EmbeddingDatabase:
def __init__(self, embeddings_dir):
self.ids_lookup = {}
self.word_embeddings = {}
- self.skipped_embeddings = []
+ self.skipped_embeddings = {}
self.dir_mtime = None
self.embeddings_dir = embeddings_dir
self.expected_shape = -1
@@ -91,7 +91,7 @@ class EmbeddingDatabase:
self.dir_mtime = mt
self.ids_lookup.clear()
self.word_embeddings.clear()
- self.skipped_embeddings = []
+ self.skipped_embeddings.clear()
self.expected_shape = self.get_expected_shape()
def process_file(path, filename):
@@ -136,7 +136,7 @@ class EmbeddingDatabase:
if self.expected_shape == -1 or self.expected_shape == embedding.shape:
self.register_embedding(embedding, shared.sd_model)
else:
- self.skipped_embeddings.append(name)
+ self.skipped_embeddings[name] = embedding
for fn in os.listdir(self.embeddings_dir):
try:
@@ -153,7 +153,7 @@ class EmbeddingDatabase:
print(f"Textual inversion embeddings loaded({len(self.word_embeddings)}): {', '.join(self.word_embeddings.keys())}")
if len(self.skipped_embeddings) > 0:
- print(f"Textual inversion embeddings skipped({len(self.skipped_embeddings)}): {', '.join(self.skipped_embeddings)}")
+ print(f"Textual inversion embeddings skipped({len(self.skipped_embeddings)}): {', '.join(self.skipped_embeddings.keys())}")
def find_embedding_at_position(self, tokens, offset):
token = tokens[offset]
From ef27a18b6b7cb1a8eebdc9b2e88d25baf2c2414d Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Mon, 2 Jan 2023 19:42:10 +0300
Subject: [PATCH 031/172] Hires fix rework
---
modules/generation_parameters_copypaste.py | 32 ++++++++++
modules/images.py | 24 ++++++--
modules/processing.py | 68 +++++++++-------------
modules/shared.py | 7 ++-
modules/txt2img.py | 6 +-
modules/ui.py | 15 +++--
scripts/xy_grid.py | 4 +-
7 files changed, 96 insertions(+), 60 deletions(-)
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index 8e7f0df0..d6fa822b 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -1,5 +1,6 @@
import base64
import io
+import math
import os
import re
from pathlib import Path
@@ -164,6 +165,35 @@ def find_hypernetwork_key(hypernet_name, hypernet_hash=None):
return None
+def restore_old_hires_fix_params(res):
+ """for infotexts that specify old First pass size parameter, convert it into
+ width, height, and hr scale"""
+
+ firstpass_width = res.get('First pass size-1', None)
+ firstpass_height = res.get('First pass size-2', None)
+
+ if firstpass_width is None or firstpass_height is None:
+ return
+
+ firstpass_width, firstpass_height = int(firstpass_width), int(firstpass_height)
+ width = int(res.get("Size-1", 512))
+ height = int(res.get("Size-2", 512))
+
+ if firstpass_width == 0 or firstpass_height == 0:
+ # old algorithm for auto-calculating first pass size
+ desired_pixel_count = 512 * 512
+ actual_pixel_count = width * height
+ scale = math.sqrt(desired_pixel_count / actual_pixel_count)
+ firstpass_width = math.ceil(scale * width / 64) * 64
+ firstpass_height = math.ceil(scale * height / 64) * 64
+
+ hr_scale = width / firstpass_width if firstpass_width > 0 else height / firstpass_height
+
+ res['Size-1'] = firstpass_width
+ res['Size-2'] = firstpass_height
+ res['Hires upscale'] = hr_scale
+
+
def parse_generation_parameters(x: str):
"""parses generation parameters string, the one you see in text field under the picture in UI:
```
@@ -221,6 +251,8 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
hypernet_hash = res.get("Hypernet hash", None)
res["Hypernet"] = find_hypernetwork_key(hypernet_name, hypernet_hash)
+ restore_old_hires_fix_params(res)
+
return res
diff --git a/modules/images.py b/modules/images.py
index f84fd485..c3a5fc8b 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -230,16 +230,32 @@ def draw_prompt_matrix(im, width, height, all_prompts):
return draw_grid_annotations(im, width, height, hor_texts, ver_texts)
-def resize_image(resize_mode, im, width, height):
+def resize_image(resize_mode, im, width, height, upscaler_name=None):
+ """
+ Resizes an image with the specified resize_mode, width, and height.
+
+ Args:
+ resize_mode: The mode to use when resizing the image.
+ 0: Resize the image to the specified width and height.
+ 1: Resize the image to fill the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, cropping the excess.
+ 2: Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image.
+ im: The image to resize.
+ width: The width to resize the image to.
+ height: The height to resize the image to.
+ upscaler_name: The name of the upscaler to use. If not provided, defaults to opts.upscaler_for_img2img.
+ """
+
+ upscaler_name = upscaler_name or opts.upscaler_for_img2img
+
def resize(im, w, h):
- if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None" or im.mode == 'L':
+ if upscaler_name is None or upscaler_name == "None" or im.mode == 'L':
return im.resize((w, h), resample=LANCZOS)
scale = max(w / im.width, h / im.height)
if scale > 1.0:
- upscalers = [x for x in shared.sd_upscalers if x.name == opts.upscaler_for_img2img]
- assert len(upscalers) > 0, f"could not find upscaler named {opts.upscaler_for_img2img}"
+ upscalers = [x for x in shared.sd_upscalers if x.name == upscaler_name]
+ assert len(upscalers) > 0, f"could not find upscaler named {upscaler_name}"
upscaler = upscalers[0]
im = upscaler.scaler.upscale(im, scale, upscaler.data_path)
diff --git a/modules/processing.py b/modules/processing.py
index 42dc19ea..4654570c 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -658,14 +658,18 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
sampler = None
- def __init__(self, enable_hr: bool=False, denoising_strength: float=0.75, firstphase_width: int=0, firstphase_height: int=0, **kwargs):
+ def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, **kwargs):
super().__init__(**kwargs)
self.enable_hr = enable_hr
self.denoising_strength = denoising_strength
- self.firstphase_width = firstphase_width
- self.firstphase_height = firstphase_height
- self.truncate_x = 0
- self.truncate_y = 0
+ self.hr_scale = hr_scale
+ self.hr_upscaler = hr_upscaler
+
+ if firstphase_width != 0 or firstphase_height != 0:
+ print("firstphase_width/firstphase_height no longer supported; use hr_scale", file=sys.stderr)
+ self.hr_scale = self.width / firstphase_width
+ self.width = firstphase_width
+ self.height = firstphase_height
def init(self, all_prompts, all_seeds, all_subseeds):
if self.enable_hr:
@@ -674,47 +678,29 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
else:
state.job_count = state.job_count * 2
- self.extra_generation_params["First pass size"] = f"{self.firstphase_width}x{self.firstphase_height}"
-
- if self.firstphase_width == 0 or self.firstphase_height == 0:
- desired_pixel_count = 512 * 512
- actual_pixel_count = self.width * self.height
- scale = math.sqrt(desired_pixel_count / actual_pixel_count)
- self.firstphase_width = math.ceil(scale * self.width / 64) * 64
- self.firstphase_height = math.ceil(scale * self.height / 64) * 64
- firstphase_width_truncated = int(scale * self.width)
- firstphase_height_truncated = int(scale * self.height)
-
- else:
-
- width_ratio = self.width / self.firstphase_width
- height_ratio = self.height / self.firstphase_height
-
- if width_ratio > height_ratio:
- firstphase_width_truncated = self.firstphase_width
- firstphase_height_truncated = self.firstphase_width * self.height / self.width
- else:
- firstphase_width_truncated = self.firstphase_height * self.width / self.height
- firstphase_height_truncated = self.firstphase_height
-
- self.truncate_x = int(self.firstphase_width - firstphase_width_truncated) // opt_f
- self.truncate_y = int(self.firstphase_height - firstphase_height_truncated) // opt_f
+ self.extra_generation_params["Hires upscale"] = self.hr_scale
+ if self.hr_upscaler is not None:
+ self.extra_generation_params["Hires upscaler"] = self.hr_upscaler
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
+ latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_default_mode
+ if self.enable_hr and latent_scale_mode is None:
+ assert len([x for x in shared.sd_upscalers if x.name == self.hr_upscaler]) > 0, f"could not find upscaler named {self.hr_upscaler}"
+
+ x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
+ samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
+
if not self.enable_hr:
- x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
- samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
return samples
- x = create_random_tensors([opt_C, self.firstphase_height // opt_f, self.firstphase_width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
- samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x, self.firstphase_width, self.firstphase_height))
+ target_width = int(self.width * self.hr_scale)
+ target_height = int(self.height * self.hr_scale)
- samples = samples[:, :, self.truncate_y//2:samples.shape[2]-self.truncate_y//2, self.truncate_x//2:samples.shape[3]-self.truncate_x//2]
-
- """saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images"""
def save_intermediate(image, index):
+ """saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images"""
+
if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix:
return
@@ -723,11 +709,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, suffix="-before-highres-fix")
- if opts.use_scale_latent_for_hires_fix:
+ if latent_scale_mode is not None:
for i in range(samples.shape[0]):
save_intermediate(samples, i)
- samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
+ samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=latent_scale_mode)
# Avoid making the inpainting conditioning unless necessary as
# this does need some extra compute to decode / encode the image again.
@@ -747,7 +733,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
save_intermediate(image, i)
- image = images.resize_image(0, image, self.width, self.height)
+ image = images.resize_image(0, image, target_width, target_height, upscaler_name=self.hr_upscaler)
image = np.array(image).astype(np.float32) / 255.0
image = np.moveaxis(image, 2, 0)
batch_images.append(image)
@@ -764,7 +750,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
- noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
+ noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, p=self)
# GC now before running the next img2img to prevent running out of memory
x = None
diff --git a/modules/shared.py b/modules/shared.py
index 7f430b93..b65559ee 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -327,7 +327,6 @@ options_templates.update(options_section(('upscaling', "Upscaling"), {
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": realesrgan_models_names()}),
"upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}),
- "use_scale_latent_for_hires_fix": OptionInfo(False, "Upscale latent space image when doing hires. fix"),
}))
options_templates.update(options_section(('face-restoration', "Face restoration"), {
@@ -545,6 +544,12 @@ opts = Options()
if os.path.exists(config_filename):
opts.load(config_filename)
+latent_upscale_default_mode = "Latent"
+latent_upscale_modes = {
+ "Latent": "bilinear",
+ "Latent (nearest)": "nearest",
+}
+
sd_upscalers = []
sd_model = None
diff --git a/modules/txt2img.py b/modules/txt2img.py
index 7f61e19a..e189a899 100644
--- a/modules/txt2img.py
+++ b/modules/txt2img.py
@@ -8,7 +8,7 @@ import modules.processing as processing
from modules.ui import plaintext_to_html
-def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, firstphase_width: int, firstphase_height: int, *args):
+def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, *args):
p = StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples,
@@ -33,8 +33,8 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2:
tiling=tiling,
enable_hr=enable_hr,
denoising_strength=denoising_strength if enable_hr else None,
- firstphase_width=firstphase_width if enable_hr else None,
- firstphase_height=firstphase_height if enable_hr else None,
+ hr_scale=hr_scale,
+ hr_upscaler=hr_upscaler,
)
p.scripts = modules.scripts.scripts_txt2img
diff --git a/modules/ui.py b/modules/ui.py
index 7070ea15..27cd9ddd 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -684,11 +684,11 @@ def create_ui():
with gr.Row():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces")
tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling")
- enable_hr = gr.Checkbox(label='Highres. fix', value=False, elem_id="txt2img_enable_hr")
+ enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr")
with gr.Row(visible=False) as hr_options:
- firstphase_width = gr.Slider(minimum=0, maximum=1024, step=8, label="Firstpass width", value=0, elem_id="txt2img_firstphase_width")
- firstphase_height = gr.Slider(minimum=0, maximum=1024, step=8, label="Firstpass height", value=0, elem_id="txt2img_firstphase_height")
+ hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode)
+ hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale")
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength")
with gr.Row(equal_height=True):
@@ -729,8 +729,8 @@ def create_ui():
width,
enable_hr,
denoising_strength,
- firstphase_width,
- firstphase_height,
+ hr_scale,
+ hr_upscaler,
] + custom_inputs,
outputs=[
@@ -762,7 +762,6 @@ def create_ui():
outputs=[hr_options],
)
-
txt2img_paste_fields = [
(txt2img_prompt, "Prompt"),
(txt2img_negative_prompt, "Negative prompt"),
@@ -781,8 +780,8 @@ def create_ui():
(denoising_strength, "Denoising strength"),
(enable_hr, lambda d: "Denoising strength" in d),
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
- (firstphase_width, "First pass size-1"),
- (firstphase_height, "First pass size-2"),
+ (hr_scale, "Hires upscale"),
+ (hr_upscaler, "Hires upscaler"),
*modules.scripts.scripts_txt2img.infotext_fields
]
parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields)
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 3e0b2805..f92f9776 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -202,7 +202,7 @@ axis_options = [
AxisOption("Eta", float, apply_field("eta"), format_value_add_label, None),
AxisOption("Clip skip", int, apply_clip_skip, format_value_add_label, None),
AxisOption("Denoising", float, apply_field("denoising_strength"), format_value_add_label, None),
- AxisOption("Upscale latent space for hires.", str, apply_upscale_latent_space, format_value_add_label, None),
+ AxisOption("Hires upscaler", str, apply_field("hr_upscaler"), format_value_add_label, None),
AxisOption("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight"), format_value_add_label, None),
AxisOption("VAE", str, apply_vae, format_value_add_label, None),
AxisOption("Styles", str, apply_styles, format_value_add_label, None),
@@ -267,7 +267,6 @@ class SharedSettingsStackHelper(object):
self.CLIP_stop_at_last_layers = opts.CLIP_stop_at_last_layers
self.hypernetwork = opts.sd_hypernetwork
self.model = shared.sd_model
- self.use_scale_latent_for_hires_fix = opts.use_scale_latent_for_hires_fix
self.vae = opts.sd_vae
def __exit__(self, exc_type, exc_value, tb):
@@ -278,7 +277,6 @@ class SharedSettingsStackHelper(object):
hypernetwork.apply_strength()
opts.data["CLIP_stop_at_last_layers"] = self.CLIP_stop_at_last_layers
- opts.data["use_scale_latent_for_hires_fix"] = self.use_scale_latent_for_hires_fix
re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
From 4dbde228ff48dbb105241b1ed25c21ce3f87d182 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Mon, 2 Jan 2023 20:01:16 +0300
Subject: [PATCH 032/172] make it possible to use fractional values for SD
upscale.
---
modules/upscaler.py | 6 +++---
scripts/sd_upscale.py | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/modules/upscaler.py b/modules/upscaler.py
index c4e6e6bd..231680cb 100644
--- a/modules/upscaler.py
+++ b/modules/upscaler.py
@@ -53,10 +53,10 @@ class Upscaler:
def do_upscale(self, img: PIL.Image, selected_model: str):
return img
- def upscale(self, img: PIL.Image, scale: int, selected_model: str = None):
+ def upscale(self, img: PIL.Image, scale, selected_model: str = None):
self.scale = scale
- dest_w = img.width * scale
- dest_h = img.height * scale
+ dest_w = int(img.width * scale)
+ dest_h = int(img.height * scale)
for i in range(3):
shape = (img.width, img.height)
diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py
index e8c80a6c..9739545c 100644
--- a/scripts/sd_upscale.py
+++ b/scripts/sd_upscale.py
@@ -19,7 +19,7 @@ class Script(scripts.Script):
def ui(self, is_img2img):
info = gr.HTML("Will upscale the image by the selected scale factor; use width and height sliders to set tile size
")
overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64)
- scale_factor = gr.Slider(minimum=1, maximum=4, step=1, label='Scale Factor', value=2)
+ scale_factor = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label='Scale Factor', value=2.0)
upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
return [info, overlap, upscaler_index, scale_factor]
From 84dd7e8e2495c4fc2997e97f8267aa831eb90d11 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Mon, 2 Jan 2023 20:30:02 +0300
Subject: [PATCH 033/172] error out with a readable message in chwewckpoint
merger for incompatible tensor shapes (ie when trying to merge SD1.5 with
SD2.0)
---
modules/extras.py | 2 ++
modules/ui.py | 2 +-
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/modules/extras.py b/modules/extras.py
index 68939dea..5e270250 100644
--- a/modules/extras.py
+++ b/modules/extras.py
@@ -303,6 +303,8 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier)
result_is_inpainting_model = True
else:
+ assert a.shape == b.shape, f'Incompatible shapes for layer {key}: A is {a.shape}, and B is {b.shape}'
+
theta_0[key] = theta_func2(a, b, multiplier)
if save_as_half:
diff --git a/modules/ui.py b/modules/ui.py
index 27cd9ddd..67a51888 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1663,7 +1663,7 @@ def create_ui():
print("Error loading/saving model file:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
modules.sd_models.list_models() # to remove the potentially missing models from the list
- return ["Error loading/saving model file. It doesn't exist or the name contains illegal characters"] + [gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(3)]
+ return [f"Error merging checkpoints: {e}"] + [gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)]
return results
modelmerger_merge.click(
From 8d12a729b8b036cb765cf2d87576d5ae256135c8 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Mon, 2 Jan 2023 20:46:51 +0300
Subject: [PATCH 034/172] fix possible error with accessing nonexistent setting
---
modules/ui.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/ui.py b/modules/ui.py
index 67a51888..9350a80f 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -491,7 +491,7 @@ def apply_setting(key, value):
return
valtype = type(opts.data_labels[key].default)
- oldval = opts.data[key]
+ oldval = opts.data.get(key, None)
opts.data[key] = valtype(value) if valtype != type(None) else value
if oldval != value and opts.data_labels[key].onchange is not None:
opts.data_labels[key].onchange()
From 251ecee6949c36e9df1d99a950b3e1af2b5fa2b6 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Mon, 2 Jan 2023 22:44:46 +0300
Subject: [PATCH 035/172] make "send to" buttons send actual dimension of the
sent image rather than fields
---
javascript/ui.js | 4 +-
modules/generation_parameters_copypaste.py | 60 +++++++++++++++-------
2 files changed, 43 insertions(+), 21 deletions(-)
diff --git a/javascript/ui.js b/javascript/ui.js
index 587dd782..d0c054d9 100644
--- a/javascript/ui.js
+++ b/javascript/ui.js
@@ -19,7 +19,7 @@ function selected_gallery_index(){
function extract_image_from_gallery(gallery){
if(gallery.length == 1){
- return gallery[0]
+ return [gallery[0]]
}
index = selected_gallery_index()
@@ -28,7 +28,7 @@ function extract_image_from_gallery(gallery){
return [null]
}
- return gallery[index];
+ return [gallery[index]];
}
function args_to_array(args){
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index d6fa822b..ec60319a 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -103,35 +103,57 @@ def bind_buttons(buttons, send_image, send_generate_info):
bind_list.append([buttons, send_image, send_generate_info])
+def send_image_and_dimensions(x):
+ if isinstance(x, Image.Image):
+ img = x
+ else:
+ img = image_from_url_text(x)
+
+ if shared.opts.send_size and isinstance(img, Image.Image):
+ w = img.width
+ h = img.height
+ else:
+ w = gr.update()
+ h = gr.update()
+
+ return img, w, h
+
+
def run_bind():
- for buttons, send_image, send_generate_info in bind_list:
+ for buttons, source_image_component, send_generate_info in bind_list:
for tab in buttons:
button = buttons[tab]
- if send_image and paste_fields[tab]["init_img"]:
- if type(send_image) == gr.Gallery:
- button.click(
- fn=lambda x: image_from_url_text(x),
- _js="extract_image_from_gallery",
- inputs=[send_image],
- outputs=[paste_fields[tab]["init_img"]],
- )
- else:
- button.click(
- fn=lambda x: x,
- inputs=[send_image],
- outputs=[paste_fields[tab]["init_img"]],
- )
+ destination_image_component = paste_fields[tab]["init_img"]
+ fields = paste_fields[tab]["fields"]
- if send_generate_info and paste_fields[tab]["fields"] is not None:
+ destination_width_component = next(iter([field for field, name in fields if name == "Size-1"] if fields else []), None)
+ destination_height_component = next(iter([field for field, name in fields if name == "Size-2"] if fields else []), None)
+
+ if source_image_component and destination_image_component:
+ if isinstance(source_image_component, gr.Gallery):
+ func = send_image_and_dimensions if destination_width_component else image_from_url_text
+ jsfunc = "extract_image_from_gallery"
+ else:
+ func = send_image_and_dimensions if destination_width_component else lambda x: x
+ jsfunc = None
+
+ button.click(
+ fn=func,
+ _js=jsfunc,
+ inputs=[source_image_component],
+ outputs=[destination_image_component, destination_width_component, destination_height_component] if destination_width_component else [destination_image_component],
+ )
+
+ if send_generate_info and fields is not None:
if send_generate_info in paste_fields:
- paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (['Size-1', 'Size-2'] if shared.opts.send_size else []) + (["Seed"] if shared.opts.send_seed else [])
+ paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (["Seed"] if shared.opts.send_seed else [])
button.click(
fn=lambda *x: x,
inputs=[field for field, name in paste_fields[send_generate_info]["fields"] if name in paste_field_names],
- outputs=[field for field, name in paste_fields[tab]["fields"] if name in paste_field_names],
+ outputs=[field for field, name in fields if name in paste_field_names],
)
else:
- connect_paste(button, paste_fields[tab]["fields"], send_generate_info)
+ connect_paste(button, fields, send_generate_info)
button.click(
fn=None,
From 1d7a31def8b5f4c348e2dd07536ac56cb4350614 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 3 Jan 2023 06:21:53 +0300
Subject: [PATCH 036/172] make edit fields for sliders not get hidden by
slider's label when there's not enough space
---
style.css | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/style.css b/style.css
index 924d4ae7..77551dd7 100644
--- a/style.css
+++ b/style.css
@@ -509,7 +509,7 @@ canvas[key="mask"] {
position: absolute;
right: 0.5em;
top: -0.6em;
- z-index: 200;
+ z-index: 400;
width: 8em;
}
#quicksettings .gr-box > div > div > input.gr-text-input {
From 269f6e867651cadef40d2c939a79d13291280bcd Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 3 Jan 2023 07:20:20 +0300
Subject: [PATCH 037/172] change settings UI to use vertical tabs
---
modules/ui.py | 45 +++++++++++++++++----------------------------
style.css | 27 +++++++++++++++++++++++++++
2 files changed, 44 insertions(+), 28 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index 9350a80f..f8c973ba 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1489,41 +1489,34 @@ def create_ui():
return gr.update(value=value), opts.dumpjson()
with gr.Blocks(analytics_enabled=False) as settings_interface:
- settings_submit = gr.Button(value="Apply settings", variant='primary', elem_id="settings_submit")
- result = gr.HTML()
+ with gr.Row():
+ settings_submit = gr.Button(value="Apply settings", variant='primary', elem_id="settings_submit")
+ restart_gradio = gr.Button(value='Restart UI', variant='primary', elem_id="settings_restart_gradio")
- settings_cols = 3
- items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols)
+ result = gr.HTML(elem_id="settings_result")
quicksettings_names = [x.strip() for x in opts.quicksettings.split(",")]
quicksettings_names = set(x for x in quicksettings_names if x != 'quicksettings')
quicksettings_list = []
- cols_displayed = 0
- items_displayed = 0
previous_section = None
- column = None
- with gr.Row(elem_id="settings").style(equal_height=False):
+ current_tab = None
+ with gr.Tabs(elem_id="settings"):
for i, (k, item) in enumerate(opts.data_labels.items()):
section_must_be_skipped = item.section[0] is None
if previous_section != item.section and not section_must_be_skipped:
- if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None):
- if column is not None:
- column.__exit__()
+ elem_id, text = item.section
- column = gr.Column(variant='panel')
- column.__enter__()
+ if current_tab is not None:
+ current_tab.__exit__()
- items_displayed = 0
- cols_displayed += 1
+ current_tab = gr.TabItem(elem_id="settings_{}".format(elem_id), label=text)
+ current_tab.__enter__()
previous_section = item.section
- elem_id, text = item.section
- gr.HTML(elem_id="settings_header_text_{}".format(elem_id), value=''.format(text))
-
if k in quicksettings_names and not shared.cmd_opts.freeze_settings:
quicksettings_list.append((i, k, item))
components.append(dummy_component)
@@ -1533,15 +1526,14 @@ def create_ui():
component = create_setting_component(k)
component_dict[k] = component
components.append(component)
- items_displayed += 1
- with gr.Row():
- request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
- download_localization = gr.Button(value='Download localization template', elem_id="download_localization")
+ if current_tab is not None:
+ current_tab.__exit__()
- with gr.Row():
- reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies")
- restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary', elem_id="settings_restart_gradio")
+ with gr.TabItem("Actions"):
+ request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
+ download_localization = gr.Button(value='Download localization template', elem_id="download_localization")
+ reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies")
request_notifications.click(
fn=lambda: None,
@@ -1578,9 +1570,6 @@ def create_ui():
outputs=[],
)
- if column is not None:
- column.__exit__()
-
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
(img2img_interface, "img2img", "img2img"),
diff --git a/style.css b/style.css
index 77551dd7..7df4d960 100644
--- a/style.css
+++ b/style.css
@@ -241,6 +241,33 @@ fieldset span.text-gray-500, .gr-block.gr-box span.text-gray-500, label.block s
z-index: 200;
}
+#settings{
+ display: block;
+}
+
+#settings > div{
+ border: none;
+ margin-left: 10em;
+}
+
+#settings > div.flex-wrap{
+ float: left;
+ display: block;
+ margin-left: 0;
+ width: 10em;
+}
+
+#settings > div.flex-wrap button{
+ display: block;
+ border: none;
+ text-align: left;
+}
+
+#settings_result{
+ height: 1.4em;
+ margin: 0 1.2em;
+}
+
input[type="range"]{
margin: 0.5em 0 -0.3em 0;
}
From 18c03cdeac6272734b0c09afd3fbe47d1372dd07 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 3 Jan 2023 09:04:29 +0300
Subject: [PATCH 038/172] styling rework to make things more compact
---
modules/ui.py | 127 ++++++++++++++++++++-------------------
modules/ui_components.py | 7 +++
style.css | 35 ++++++-----
3 files changed, 92 insertions(+), 77 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index f8c973ba..f787b518 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -19,7 +19,8 @@ import numpy as np
from PIL import Image, PngImagePlugin
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
-from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, ui_components
+from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru
+from modules.ui_components import FormRow, FormGroup, ToolButton
from modules.paths import script_path
from modules.shared import opts, cmd_opts, restricted_opts
@@ -273,31 +274,27 @@ def interrogate_deepbooru(image):
def create_seed_inputs(target_interface):
- with gr.Row():
- with gr.Box():
- with gr.Row(elem_id=target_interface + '_seed_row'):
- seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=target_interface + '_seed')
- seed.style(container=False)
- random_seed = gr.Button(random_symbol, elem_id=target_interface + '_random_seed')
- reuse_seed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_seed')
+ with FormRow(elem_id=target_interface + '_seed_row'):
+ seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=target_interface + '_seed')
+ seed.style(container=False)
+ random_seed = gr.Button(random_symbol, elem_id=target_interface + '_random_seed')
+ reuse_seed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_seed')
- with gr.Box(elem_id=target_interface + '_subseed_show_box'):
+ with gr.Group(elem_id=target_interface + '_subseed_show_box'):
seed_checkbox = gr.Checkbox(label='Extra', elem_id=target_interface + '_subseed_show', value=False)
# Components to show/hide based on the 'Extra' checkbox
seed_extras = []
- with gr.Row(visible=False) as seed_extra_row_1:
+ with FormRow(visible=False, elem_id=target_interface + '_subseed_row') as seed_extra_row_1:
seed_extras.append(seed_extra_row_1)
- with gr.Box():
- with gr.Row(elem_id=target_interface + '_subseed_row'):
- subseed = gr.Number(label='Variation seed', value=-1, elem_id=target_interface + '_subseed')
- subseed.style(container=False)
- random_subseed = gr.Button(random_symbol, elem_id=target_interface + '_random_subseed')
- reuse_subseed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_subseed')
+ subseed = gr.Number(label='Variation seed', value=-1, elem_id=target_interface + '_subseed')
+ subseed.style(container=False)
+ random_subseed = gr.Button(random_symbol, elem_id=target_interface + '_random_subseed')
+ reuse_subseed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_subseed')
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=target_interface + '_subseed_strength')
- with gr.Row(visible=False) as seed_extra_row_2:
+ with FormRow(visible=False) as seed_extra_row_2:
seed_extras.append(seed_extra_row_2)
seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=target_interface + '_seed_resize_from_w')
seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=target_interface + '_seed_resize_from_h')
@@ -523,7 +520,7 @@ def create_refresh_button(refresh_component, refresh_method, refreshed_args, ele
return gr.update(**(args or {}))
- refresh_button = ui_components.ToolButton(value=refresh_symbol, elem_id=elem_id)
+ refresh_button = ToolButton(value=refresh_symbol, elem_id=elem_id)
refresh_button.click(
fn=refresh,
inputs=[],
@@ -636,11 +633,11 @@ Requested path was: {f}
def create_sampler_and_steps_selection(choices, tabname):
if opts.samplers_in_dropdown:
- with gr.Row(elem_id=f"sampler_selection_{tabname}"):
+ with FormRow(elem_id=f"sampler_selection_{tabname}"):
sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index")
steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling Steps", value=20)
else:
- with gr.Group(elem_id=f"sampler_selection_{tabname}"):
+ with FormGroup(elem_id=f"sampler_selection_{tabname}"):
steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling Steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index")
@@ -677,29 +674,29 @@ def create_ui():
with gr.Column(variant='panel', elem_id="txt2img_settings"):
steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img")
- with gr.Group():
- width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width")
- height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height")
-
- with gr.Row():
- restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces")
- tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling")
- enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr")
-
- with gr.Row(visible=False) as hr_options:
- hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode)
- hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale")
- denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength")
-
- with gr.Row(equal_height=True):
- batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
- batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
+ with FormRow():
+ with gr.Column(elem_id="txt2img_column_size", scale=4):
+ width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width")
+ height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height")
+ with gr.Column(elem_id="txt2img_column_batch"):
+ batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
+ batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale")
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img')
- with gr.Group(elem_id="txt2img_script_container"):
+ with FormRow(elem_id="txt2img_checkboxes"):
+ restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces")
+ tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling")
+ enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr")
+
+ with FormRow(visible=False) as hr_options:
+ hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode)
+ hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale")
+ denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength")
+
+ with FormGroup(elem_id="txt2img_script_container"):
custom_inputs = modules.scripts.scripts_txt2img.setup_ui()
txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples)
@@ -816,7 +813,7 @@ def create_ui():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
setup_progressbar(progressbar, img2img_preview, 'img2img')
- with gr.Row().style(equal_height=False):
+ with FormRow().style(equal_height=False):
with gr.Column(variant='panel', elem_id="img2img_settings"):
with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode:
@@ -841,19 +838,23 @@ def create_ui():
init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_base")
init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_mask")
- with gr.Row():
+ with FormRow():
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur")
mask_alpha = gr.Slider(label="Mask transparency", interactive=use_color_sketch, visible=use_color_sketch, elem_id="img2img_mask_alpha")
- with gr.Row():
- mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode")
- inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode")
+ with FormRow():
+ mask_mode = gr.Radio(label="Mask source", choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode")
+ inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode")
- inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill")
+ with FormRow():
+ inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill")
- with gr.Row():
- inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False, elem_id="img2img_inpaint_full_res")
- inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding")
+ with FormRow():
+ with gr.Column():
+ inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res")
+
+ with gr.Column(scale=4):
+ inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding")
with gr.TabItem('Batch img2img', id='batch', elem_id="img2img_batch_tab"):
hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
@@ -861,30 +862,30 @@ def create_ui():
img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir")
img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir")
- with gr.Row():
- resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize")
+ with FormRow():
+ resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize")
steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img")
- with gr.Group():
- width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width")
- height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
+ with FormRow():
+ with gr.Column(elem_id="img2img_column_size", scale=4):
+ width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width")
+ height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
+ with gr.Column(elem_id="img2img_column_batch"):
+ batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
+ batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
- with gr.Row():
- restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces")
- tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling")
-
- with gr.Row():
- batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
- batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
-
- with gr.Group():
+ with FormGroup():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale")
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength")
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img')
- with gr.Group(elem_id="img2img_script_container"):
+ with FormRow(elem_id="img2img_checkboxes"):
+ restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces")
+ tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling")
+
+ with FormGroup(elem_id="img2img_script_container"):
custom_inputs = modules.scripts.scripts_img2img.setup_ui()
img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples)
@@ -1444,7 +1445,7 @@ def create_ui():
res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {}))
create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key)
else:
- with ui_components.FormRow():
+ with FormRow():
res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {}))
create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key)
else:
diff --git a/modules/ui_components.py b/modules/ui_components.py
index d0519d2d..91eb0e3d 100644
--- a/modules/ui_components.py
+++ b/modules/ui_components.py
@@ -16,3 +16,10 @@ class FormRow(gr.Row, gr.components.FormComponent):
def get_block_name(self):
return "row"
+
+
+class FormGroup(gr.Group, gr.components.FormComponent):
+ """Same as gr.Row but fits inside gradio forms"""
+
+ def get_block_name(self):
+ return "group"
diff --git a/style.css b/style.css
index 7df4d960..86a265f6 100644
--- a/style.css
+++ b/style.css
@@ -74,7 +74,8 @@
}
[id$=_random_seed], [id$=_random_subseed], [id$=_reuse_seed], [id$=_reuse_subseed], #open_folder{
- min-width: auto;
+ min-width: 2.3em;
+ height: 2.5em;
flex-grow: 0;
padding-left: 0.25em;
padding-right: 0.25em;
@@ -86,6 +87,7 @@
[id$=_seed_row], [id$=_subseed_row]{
gap: 0.5rem;
+ padding: 0.6em;
}
[id$=_subseed_show_box]{
@@ -206,24 +208,24 @@ button{
fieldset span.text-gray-500, .gr-block.gr-box span.text-gray-500, label.block span{
position: absolute;
- top: -0.6em;
+ top: -0.5em;
line-height: 1.2em;
- padding: 0 0.5em;
- margin: 0;
+ padding: 0;
+ margin: 0 0.5em;
background-color: white;
- border-top: 1px solid #eee;
- border-left: 1px solid #eee;
- border-right: 1px solid #eee;
+ box-shadow: 0 0 5px 5px white;
z-index: 300;
}
.dark fieldset span.text-gray-500, .dark .gr-block.gr-box span.text-gray-500, .dark label.block span{
background-color: rgb(31, 41, 55);
- border-top: 1px solid rgb(55 65 81);
- border-left: 1px solid rgb(55 65 81);
- border-right: 1px solid rgb(55 65 81);
+ box-shadow: 0 0 5px 5px rgb(31, 41, 55);
+}
+
+#txt2img_column_batch, #img2img_column_batch{
+ min-width: min(13.5em, 100%) !important;
}
#settings fieldset span.text-gray-500, #settings .gr-block.gr-box span.text-gray-500, #settings label.block span{
@@ -232,10 +234,6 @@ fieldset span.text-gray-500, .gr-block.gr-box span.text-gray-500, label.block s
margin-right: 8em;
}
-.gr-panel div.flex-col div.justify-between label span{
- margin: 0;
-}
-
#settings .gr-panel div.flex-col div.justify-between div{
position: relative;
z-index: 200;
@@ -609,6 +607,15 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h
}
+#img2img_settings > div.gr-form, #txt2img_settings > div.gr-form {
+ padding-top: 0.9em;
+}
+
+#img2img_settings div.gr-form .gr-form, #txt2img_settings div.gr-form .gr-form{
+ border: none;
+ padding-bottom: 0.5em;
+}
+
/* The following handles localization for right-to-left (RTL) languages like Arabic.
The rtl media type will only be activated by the logic in javascript/localization.js.
From 2bc86712ec16cada01a2353f1d978c1aabc84dbb Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 3 Jan 2023 09:13:35 +0300
Subject: [PATCH 039/172] make quicksettings UI elements appear in same order
as they are listed in the setting
---
modules/ui.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index f787b518..d7b911da 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1497,7 +1497,7 @@ def create_ui():
result = gr.HTML(elem_id="settings_result")
quicksettings_names = [x.strip() for x in opts.quicksettings.split(",")]
- quicksettings_names = set(x for x in quicksettings_names if x != 'quicksettings')
+ quicksettings_names = {x: i for i, x in enumerate(quicksettings_names) if x != 'quicksettings'}
quicksettings_list = []
@@ -1604,7 +1604,7 @@ def create_ui():
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Row(elem_id="quicksettings"):
- for i, k, item in quicksettings_list:
+ for i, k, item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])):
component = create_setting_component(k, is_quicksettings=True)
component_dict[k] = component
From 9d4eff097deff6153c4023f158bd9fbd4f3e88b3 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 3 Jan 2023 10:01:06 +0300
Subject: [PATCH 040/172] add a button to show all setting pages
---
javascript/ui.js | 11 +++++++++++
modules/ui.py | 2 ++
2 files changed, 13 insertions(+)
diff --git a/javascript/ui.js b/javascript/ui.js
index d0c054d9..34406f3f 100644
--- a/javascript/ui.js
+++ b/javascript/ui.js
@@ -188,6 +188,17 @@ onUiUpdate(function(){
img2img_textarea = gradioApp().querySelector("#img2img_prompt > label > textarea");
img2img_textarea?.addEventListener("input", () => update_token_counter("img2img_token_button"));
}
+
+ show_all_pages = gradioApp().getElementById('settings_show_all_pages')
+ settings_tabs = gradioApp().querySelector('#settings div')
+ if(show_all_pages && settings_tabs){
+ settings_tabs.appendChild(show_all_pages)
+ show_all_pages.onclick = function(){
+ gradioApp().querySelectorAll('#settings > div').forEach(function(elem){
+ elem.style.display = "block";
+ })
+ }
+ }
})
let txt2img_textarea, img2img_textarea = undefined;
diff --git a/modules/ui.py b/modules/ui.py
index d7b911da..2c92c422 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1536,6 +1536,8 @@ def create_ui():
download_localization = gr.Button(value='Download localization template', elem_id="download_localization")
reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies")
+ gr.Button(value="Show all pages", elem_id="settings_show_all_pages")
+
request_notifications.click(
fn=lambda: None,
inputs=[],
From a1cf55a9d1c82f8e56c00d549bca5c8fa069f412 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 3 Jan 2023 10:39:21 +0300
Subject: [PATCH 041/172] add option to reorder items in main UI
---
modules/shared.py | 13 ++++++
modules/ui.py | 112 +++++++++++++++++++++++++++++++---------------
2 files changed, 88 insertions(+), 37 deletions(-)
diff --git a/modules/shared.py b/modules/shared.py
index b65559ee..23657a93 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -109,6 +109,17 @@ restricted_opts = {
"outdir_save",
}
+ui_reorder_categories = [
+ "sampler",
+ "dimensions",
+ "cfg",
+ "seed",
+ "checkboxes",
+ "hires_fix",
+ "batch",
+ "scripts",
+]
+
cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \
@@ -410,7 +421,9 @@ options_templates.update(options_section(('ui', "User interface"), {
"js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
"show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
"samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group"),
+ "dimensions_and_batch_together": OptionInfo(True, "Show Witdth/Height and Batch sliders in same row"),
'quicksettings': OptionInfo("sd_model_checkpoint", "Quicksettings list"),
+ 'ui_reorder': OptionInfo(", ".join(ui_reorder_categories), "txt2img/ing2img UI item order"),
'localization': OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)),
}))
diff --git a/modules/ui.py b/modules/ui.py
index 2c92c422..f2e7c0d6 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -644,6 +644,13 @@ def create_sampler_and_steps_selection(choices, tabname):
return steps, sampler_index
+def ordered_ui_categories():
+ user_order = {x.strip(): i for i, x in enumerate(shared.opts.ui_reorder.split(","))}
+
+ for i, category in sorted(enumerate(shared.ui_reorder_categories), key=lambda x: user_order.get(x[1], x[0] + 1000)):
+ yield category
+
+
def create_ui():
import modules.img2img
import modules.txt2img
@@ -672,32 +679,48 @@ def create_ui():
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel', elem_id="txt2img_settings"):
- steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img")
+ for category in ordered_ui_categories():
+ if category == "sampler":
+ steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img")
- with FormRow():
- with gr.Column(elem_id="txt2img_column_size", scale=4):
- width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width")
- height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height")
- with gr.Column(elem_id="txt2img_column_batch"):
- batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
- batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
+ elif category == "dimensions":
+ with FormRow():
+ with gr.Column(elem_id="txt2img_column_size", scale=4):
+ width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width")
+ height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height")
- cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale")
+ if opts.dimensions_and_batch_together:
+ with gr.Column(elem_id="txt2img_column_batch"):
+ batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
+ batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
- seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img')
+ elif category == "cfg":
+ cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale")
- with FormRow(elem_id="txt2img_checkboxes"):
- restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces")
- tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling")
- enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr")
+ elif category == "seed":
+ seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img')
- with FormRow(visible=False) as hr_options:
- hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode)
- hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale")
- denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength")
+ elif category == "checkboxes":
+ with FormRow(elem_id="txt2img_checkboxes"):
+ restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces")
+ tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling")
+ enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr")
- with FormGroup(elem_id="txt2img_script_container"):
- custom_inputs = modules.scripts.scripts_txt2img.setup_ui()
+ elif category == "hires_fix":
+ with FormRow(visible=False, elem_id="txt2img_hires_fix") as hr_options:
+ hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode)
+ hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale")
+ denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength")
+
+ elif category == "batch":
+ if not opts.dimensions_and_batch_together:
+ with FormRow(elem_id="txt2img_column_batch"):
+ batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
+ batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
+
+ elif category == "scripts":
+ with FormGroup(elem_id="txt2img_script_container"):
+ custom_inputs = modules.scripts.scripts_txt2img.setup_ui()
txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples)
parameters_copypaste.bind_buttons({"txt2img": txt2img_paste}, None, txt2img_prompt)
@@ -865,28 +888,43 @@ def create_ui():
with FormRow():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize")
- steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img")
+ for category in ordered_ui_categories():
+ if category == "sampler":
+ steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img")
- with FormRow():
- with gr.Column(elem_id="img2img_column_size", scale=4):
- width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width")
- height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
- with gr.Column(elem_id="img2img_column_batch"):
- batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
- batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
+ elif category == "dimensions":
+ with FormRow():
+ with gr.Column(elem_id="img2img_column_size", scale=4):
+ width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width")
+ height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
- with FormGroup():
- cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale")
- denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength")
+ if opts.dimensions_and_batch_together:
+ with gr.Column(elem_id="img2img_column_batch"):
+ batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
+ batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
- seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img')
+ elif category == "cfg":
+ with FormGroup():
+ cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale")
+ denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength")
- with FormRow(elem_id="img2img_checkboxes"):
- restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces")
- tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling")
+ elif category == "seed":
+ seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img')
- with FormGroup(elem_id="img2img_script_container"):
- custom_inputs = modules.scripts.scripts_img2img.setup_ui()
+ elif category == "checkboxes":
+ with FormRow(elem_id="img2img_checkboxes"):
+ restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces")
+ tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling")
+
+ elif category == "batch":
+ if not opts.dimensions_and_batch_together:
+ with FormRow(elem_id="img2img_column_batch"):
+ batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
+ batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
+
+ elif category == "scripts":
+ with FormGroup(elem_id="img2img_script_container"):
+ custom_inputs = modules.scripts.scripts_img2img.setup_ui()
img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples)
parameters_copypaste.bind_buttons({"img2img": img2img_paste}, None, img2img_prompt)
From fda1ed184381fdf8aa81be4f64e77787f3fac1b2 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 3 Jan 2023 12:01:32 +0300
Subject: [PATCH 042/172] some minor improvements for dark mode UI
---
style.css | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/style.css b/style.css
index 86a265f6..7296ce91 100644
--- a/style.css
+++ b/style.css
@@ -208,20 +208,20 @@ button{
fieldset span.text-gray-500, .gr-block.gr-box span.text-gray-500, label.block span{
position: absolute;
- top: -0.5em;
+ top: -0.7em;
line-height: 1.2em;
padding: 0;
margin: 0 0.5em;
background-color: white;
- box-shadow: 0 0 5px 5px white;
+ box-shadow: 6px 0 6px 0px white, -6px 0 6px 0px white;
z-index: 300;
}
.dark fieldset span.text-gray-500, .dark .gr-block.gr-box span.text-gray-500, .dark label.block span{
background-color: rgb(31, 41, 55);
- box-shadow: 0 0 5px 5px rgb(31, 41, 55);
+ box-shadow: 6px 0 6px 0px rgb(31, 41, 55), -6px 0 6px 0px rgb(31, 41, 55);
}
#txt2img_column_batch, #img2img_column_batch{
From 9a3b0ee960b0c61c4f60e3081ae6f2098533d393 Mon Sep 17 00:00:00 2001
From: hithereai <121192995+hithereai@users.noreply.github.com>
Date: Tue, 3 Jan 2023 11:22:06 +0200
Subject: [PATCH 043/172] update req.txt
The old 'opencv-python' package is very limiting in terms of optical flow - so I propose a package change to 'opencv-contrib-python', which has more cv2.optflow methods.
These are needed for optical flow trickery in auto1111 and its extensions, and it cannot be installed by an extension as only a single package of opencv needs to be installed for optical flow to work properly. Change of the main one is Inevitable.
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index e2c3876b..4f09385f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -9,7 +9,7 @@ gradio==3.15.0
invisible-watermark
numpy
omegaconf
-opencv-python
+opencv-contrib-python
requests
piexif
Pillow
From c0ee1488702d5a6ae35fbf7e0422f9f685394920 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 3 Jan 2023 14:18:48 +0300
Subject: [PATCH 044/172] add support for running with gradio 3.9 installed
---
modules/generation_parameters_copypaste.py | 4 ++--
modules/ui_tempdir.py | 23 ++++++++++++++++++++--
2 files changed, 23 insertions(+), 4 deletions(-)
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index ec60319a..d94f11a3 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -7,7 +7,7 @@ from pathlib import Path
import gradio as gr
from modules.shared import script_path
-from modules import shared
+from modules import shared, ui_tempdir
import tempfile
from PIL import Image
@@ -39,7 +39,7 @@ def quote(text):
def image_from_url_text(filedata):
if type(filedata) == dict and filedata["is_file"]:
filename = filedata["name"]
- is_in_right_dir = any([filename in fileset for fileset in shared.demo.temp_file_sets])
+ is_in_right_dir = ui_tempdir.check_tmp_file(shared.demo, filename)
assert is_in_right_dir, 'trying to open image file outside of allowed directories'
return Image.open(filename)
diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py
index 363d449d..21945235 100644
--- a/modules/ui_tempdir.py
+++ b/modules/ui_tempdir.py
@@ -1,6 +1,7 @@
import os
import tempfile
from collections import namedtuple
+from pathlib import Path
import gradio as gr
@@ -12,10 +13,28 @@ from modules import shared
Savedfile = namedtuple("Savedfile", ["name"])
+def register_tmp_file(gradio, filename):
+ if hasattr(gradio, 'temp_file_sets'): # gradio 3.15
+ gradio.temp_file_sets[0] = gradio.temp_file_sets[0] | {os.path.abspath(filename)}
+
+ if hasattr(gradio, 'temp_dirs'): # gradio 3.9
+ gradio.temp_dirs = gradio.temp_dirs | {os.path.abspath(os.path.dirname(filename))}
+
+
+def check_tmp_file(gradio, filename):
+ if hasattr(gradio, 'temp_file_sets'):
+ return any([filename in fileset for fileset in gradio.temp_file_sets])
+
+ if hasattr(gradio, 'temp_dirs'):
+ return any(Path(temp_dir).resolve() in Path(filename).resolve().parents for temp_dir in gradio.temp_dirs)
+
+ return False
+
+
def save_pil_to_file(pil_image, dir=None):
already_saved_as = getattr(pil_image, 'already_saved_as', None)
if already_saved_as and os.path.isfile(already_saved_as):
- shared.demo.temp_file_sets[0] = shared.demo.temp_file_sets[0] | {os.path.abspath(already_saved_as)}
+ register_tmp_file(shared.demo, already_saved_as)
file_obj = Savedfile(already_saved_as)
return file_obj
@@ -45,7 +64,7 @@ def on_tmpdir_changed():
os.makedirs(shared.opts.temp_dir, exist_ok=True)
- shared.demo.temp_file_sets[0] = shared.demo.temp_file_sets[0] | {os.path.abspath(shared.opts.temp_dir)}
+ register_tmp_file(shared.demo, os.path.join(shared.opts.temp_dir, "x"))
def cleanup_tmpdr():
From bddebe09edeb6a18f2c06986d5658a7be3a563ea Mon Sep 17 00:00:00 2001
From: Shondoit
Date: Tue, 3 Jan 2023 10:26:37 +0100
Subject: [PATCH 045/172] Save Optimizer next to TI embedding
Also add check to load only .PT and .BIN files as embeddings. (since we add .optim files in the same directory)
---
modules/shared.py | 2 +-
.../textual_inversion/textual_inversion.py | 40 +++++++++++++++----
2 files changed, 33 insertions(+), 9 deletions(-)
diff --git a/modules/shared.py b/modules/shared.py
index 23657a93..c541d18c 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -355,7 +355,7 @@ options_templates.update(options_section(('system', "System"), {
options_templates.update(options_section(('training', "Training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
- "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training can be resumed with HN itself and matching optim file."),
+ "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."),
"dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
"dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
"training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index fd253477..16176e90 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -28,6 +28,7 @@ class Embedding:
self.cached_checksum = None
self.sd_checkpoint = None
self.sd_checkpoint_name = None
+ self.optimizer_state_dict = None
def save(self, filename):
embedding_data = {
@@ -41,6 +42,13 @@ class Embedding:
torch.save(embedding_data, filename)
+ if shared.opts.save_optimizer_state and self.optimizer_state_dict is not None:
+ optimizer_saved_dict = {
+ 'hash': self.checksum(),
+ 'optimizer_state_dict': self.optimizer_state_dict,
+ }
+ torch.save(optimizer_saved_dict, filename + '.optim')
+
def checksum(self):
if self.cached_checksum is not None:
return self.cached_checksum
@@ -95,9 +103,10 @@ class EmbeddingDatabase:
self.expected_shape = self.get_expected_shape()
def process_file(path, filename):
- name = os.path.splitext(filename)[0]
+ name, ext = os.path.splitext(filename)
+ ext = ext.upper()
- if os.path.splitext(filename.upper())[-1] in ['.PNG', '.WEBP', '.JXL', '.AVIF']:
+ if ext in ['.PNG', '.WEBP', '.JXL', '.AVIF']:
embed_image = Image.open(path)
if hasattr(embed_image, 'text') and 'sd-ti-embedding' in embed_image.text:
data = embedding_from_b64(embed_image.text['sd-ti-embedding'])
@@ -105,8 +114,10 @@ class EmbeddingDatabase:
else:
data = extract_image_data_embed(embed_image)
name = data.get('name', name)
- else:
+ elif ext in ['.BIN', '.PT']:
data = torch.load(path, map_location="cpu")
+ else:
+ return
# textual inversion embeddings
if 'string_to_param' in data:
@@ -300,6 +311,20 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
embedding.vec.requires_grad = True
optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate, weight_decay=0.0)
+ if shared.opts.save_optimizer_state:
+ optimizer_state_dict = None
+ if os.path.exists(filename + '.optim'):
+ optimizer_saved_dict = torch.load(filename + '.optim', map_location='cpu')
+ if embedding.checksum() == optimizer_saved_dict.get('hash', None):
+ optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
+
+ if optimizer_state_dict is not None:
+ optimizer.load_state_dict(optimizer_state_dict)
+ print("Loaded existing optimizer from checkpoint")
+ else:
+ print("No saved optimizer exists in checkpoint")
+
+
scaler = torch.cuda.amp.GradScaler()
batch_size = ds.batch_size
@@ -366,9 +391,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
# Before saving, change name to match current checkpoint.
embedding_name_every = f'{embedding_name}-{steps_done}'
last_saved_file = os.path.join(embedding_dir, f'{embedding_name_every}.pt')
- #if shared.opts.save_optimizer_state:
- #embedding.optimizer_state_dict = optimizer.state_dict()
- save_embedding(embedding, checkpoint, embedding_name_every, last_saved_file, remove_cached_checksum=True)
+ save_embedding(embedding, optimizer, checkpoint, embedding_name_every, last_saved_file, remove_cached_checksum=True)
embedding_yet_to_be_embedded = True
write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, steps_per_epoch, {
@@ -458,7 +481,7 @@ Last saved image: {html.escape(last_saved_image)}
"""
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
- save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True)
+ save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True)
except Exception:
print(traceback.format_exc(), file=sys.stderr)
pass
@@ -470,7 +493,7 @@ Last saved image: {html.escape(last_saved_image)}
return embedding, filename
-def save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True):
+def save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True):
old_embedding_name = embedding.name
old_sd_checkpoint = embedding.sd_checkpoint if hasattr(embedding, "sd_checkpoint") else None
old_sd_checkpoint_name = embedding.sd_checkpoint_name if hasattr(embedding, "sd_checkpoint_name") else None
@@ -481,6 +504,7 @@ def save_embedding(embedding, checkpoint, embedding_name, filename, remove_cache
if remove_cached_checksum:
embedding.cached_checksum = None
embedding.name = embedding_name
+ embedding.optimizer_state_dict = optimizer.state_dict()
embedding.save(filename)
except:
embedding.sd_checkpoint = old_sd_checkpoint
From e9fb9bb0c25f59109a816fc53c385bed58965c24 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 3 Jan 2023 17:40:20 +0300
Subject: [PATCH 046/172] fix hires fix not working in API when user does not
specify upscaler
---
modules/processing.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/processing.py b/modules/processing.py
index 4654570c..a172af0b 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -685,7 +685,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
- latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_default_mode
+ latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
if self.enable_hr and latent_scale_mode is None:
assert len([x for x in shared.sd_upscalers if x.name == self.hr_upscaler]) > 0, f"could not find upscaler named {self.hr_upscaler}"
From aaa4c2aacbb6523077334093c81bd475d757f7a1 Mon Sep 17 00:00:00 2001
From: Vladimir Mandic
Date: Tue, 3 Jan 2023 09:45:16 -0500
Subject: [PATCH 047/172] add api logging
---
modules/api/api.py | 24 +++++++++++++++++++++++-
modules/shared.py | 1 +
2 files changed, 24 insertions(+), 1 deletion(-)
diff --git a/modules/api/api.py b/modules/api/api.py
index 9c670f00..53135470 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -1,11 +1,12 @@
import base64
import io
import time
+import datetime
import uvicorn
from threading import Lock
from io import BytesIO
from gradio.processing_utils import decode_base64_to_file
-from fastapi import APIRouter, Depends, FastAPI, HTTPException
+from fastapi import APIRouter, Depends, FastAPI, HTTPException, Request, Response
from fastapi.security import HTTPBasic, HTTPBasicCredentials
from secrets import compare_digest
@@ -67,6 +68,26 @@ def encode_pil_to_base64(image):
bytes_data = output_bytes.getvalue()
return base64.b64encode(bytes_data)
+def init_api_middleware(app: FastAPI):
+ @app.middleware("http")
+ async def log_and_time(req: Request, call_next):
+ ts = time.time()
+ res: Response = await call_next(req)
+ duration = str(round(time.time() - ts, 4))
+ res.headers["X-Process-Time"] = duration
+ if shared.cmd_opts.api_log:
+ print('API {t} {code} {prot}/{ver} {method} {p} {cli} {duration}'.format(
+ t = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"),
+ code = res.status_code,
+ ver = req.scope.get('http_version', '0.0'),
+ cli = req.scope.get('client', ('0:0.0.0', 0))[0],
+ prot = req.scope.get('scheme', 'err'),
+ method = req.scope.get('method', 'err'),
+ p = req.scope.get('path', 'err'),
+ duration = duration,
+ ))
+ return res
+
class Api:
def __init__(self, app: FastAPI, queue_lock: Lock):
@@ -78,6 +99,7 @@ class Api:
self.router = APIRouter()
self.app = app
+ init_api_middleware(self.app)
self.queue_lock = queue_lock
self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse)
self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse)
diff --git a/modules/shared.py b/modules/shared.py
index 23657a93..2a03d716 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -82,6 +82,7 @@ parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencode
parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
parser.add_argument("--api", action='store_true', help="use api=True to launch the API together with the webui (use --nowebui instead for only the API)")
parser.add_argument("--api-auth", type=str, help='Set authentication for API like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
+parser.add_argument("--api-log", action='store_true', help="use api-log=True to enable logging of all API requests")
parser.add_argument("--nowebui", action='store_true', help="use api=True to launch the API instead of the webui")
parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI")
parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None)
From 1d9dc48efda2e8da6d13fc62e65500198a9b041c Mon Sep 17 00:00:00 2001
From: Vladimir Mandic
Date: Tue, 3 Jan 2023 10:21:51 -0500
Subject: [PATCH 048/172] init job and add info to model merge
---
modules/extras.py | 14 ++++++++++++--
1 file changed, 12 insertions(+), 2 deletions(-)
diff --git a/modules/extras.py b/modules/extras.py
index 5e270250..7e222313 100644
--- a/modules/extras.py
+++ b/modules/extras.py
@@ -242,6 +242,9 @@ def run_pnginfo(image):
def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format):
+ shared.state.begin()
+ shared.state.job = 'model-merge'
+
def weighted_sum(theta0, theta1, alpha):
return ((1 - alpha) * theta0) + (alpha * theta1)
@@ -263,8 +266,11 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
theta_func1, theta_func2 = theta_funcs[interp_method]
if theta_func1 and not tertiary_model_info:
+ shared.state.textinfo = "Failed: Interpolation method requires a tertiary model."
+ shared.state.end()
return ["Failed: Interpolation method requires a tertiary model."] + [gr.Dropdown.update(choices=sd_models.checkpoint_tiles()) for _ in range(4)]
+ shared.state.textinfo = f"Loading {secondary_model_info.filename}..."
print(f"Loading {secondary_model_info.filename}...")
theta_1 = sd_models.read_state_dict(secondary_model_info.filename, map_location='cpu')
@@ -281,6 +287,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
theta_1[key] = torch.zeros_like(theta_1[key])
del theta_2
+ shared.state.textinfo = f"Loading {primary_model_info.filename}..."
print(f"Loading {primary_model_info.filename}...")
theta_0 = sd_models.read_state_dict(primary_model_info.filename, map_location='cpu')
@@ -291,6 +298,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
a = theta_0[key]
b = theta_1[key]
+ shared.state.textinfo = f'Merging layer {key}'
# this enables merging an inpainting model (A) with another one (B);
# where normal model would have 4 channels, for latenst space, inpainting model would
# have another 4 channels for unmasked picture's latent space, plus one channel for mask, for a total of 9
@@ -303,8 +311,6 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier)
result_is_inpainting_model = True
else:
- assert a.shape == b.shape, f'Incompatible shapes for layer {key}: A is {a.shape}, and B is {b.shape}'
-
theta_0[key] = theta_func2(a, b, multiplier)
if save_as_half:
@@ -332,6 +338,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
output_modelname = os.path.join(ckpt_dir, filename)
+ shared.state.textinfo = f"Saving to {output_modelname}..."
print(f"Saving to {output_modelname}...")
_, extension = os.path.splitext(output_modelname)
@@ -343,4 +350,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
sd_models.list_models()
print("Checkpoint saved.")
+ shared.state.textinfo = "Checkpoint saved to " + output_modelname
+ shared.state.end()
+
return ["Checkpoint saved to " + output_modelname] + [gr.Dropdown.update(choices=sd_models.checkpoint_tiles()) for _ in range(4)]
From 192ddc04d6de0d780f73aa5fbaa8c66cd4642e1c Mon Sep 17 00:00:00 2001
From: Vladimir Mandic
Date: Tue, 3 Jan 2023 10:34:51 -0500
Subject: [PATCH 049/172] add job info to modules
---
modules/extras.py | 17 +++++++++++++----
modules/hypernetworks/hypernetwork.py | 1 +
modules/textual_inversion/preprocess.py | 1 +
modules/textual_inversion/textual_inversion.py | 1 +
4 files changed, 16 insertions(+), 4 deletions(-)
diff --git a/modules/extras.py b/modules/extras.py
index 7e222313..d665440a 100644
--- a/modules/extras.py
+++ b/modules/extras.py
@@ -58,6 +58,9 @@ cached_images: LruCache = LruCache(max_size=5)
def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_dir, show_extras_results, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility, upscale_first: bool, save_output: bool = True):
devices.torch_gc()
+ shared.state.begin()
+ shared.state.job = 'extras'
+
imageArr = []
# Also keep track of original file names
imageNameArr = []
@@ -94,6 +97,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
# Extra operation definitions
def run_gfpgan(image: Image.Image, info: str) -> Tuple[Image.Image, str]:
+ shared.state.job = 'extras-gfpgan'
restored_img = modules.gfpgan_model.gfpgan_fix_faces(np.array(image, dtype=np.uint8))
res = Image.fromarray(restored_img)
@@ -104,6 +108,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
return (res, info)
def run_codeformer(image: Image.Image, info: str) -> Tuple[Image.Image, str]:
+ shared.state.job = 'extras-codeformer'
restored_img = modules.codeformer_model.codeformer.restore(np.array(image, dtype=np.uint8), w=codeformer_weight)
res = Image.fromarray(restored_img)
@@ -114,6 +119,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
return (res, info)
def upscale(image, scaler_index, resize, mode, resize_w, resize_h, crop):
+ shared.state.job = 'extras-upscale'
upscaler = shared.sd_upscalers[scaler_index]
res = upscaler.scaler.upscale(image, resize, upscaler.data_path)
if mode == 1 and crop:
@@ -180,6 +186,9 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
for image, image_name in zip(imageArr, imageNameArr):
if image is None:
return outputs, "Please select an input image.", ''
+
+ shared.state.textinfo = f'Processing image {image_name}'
+
existing_pnginfo = image.info or {}
image = image.convert("RGB")
@@ -193,6 +202,10 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
else:
basename = ''
+ if opts.enable_pnginfo: # append info before save
+ image.info = existing_pnginfo
+ image.info["extras"] = info
+
if save_output:
# Add upscaler name as a suffix.
suffix = f"-{shared.sd_upscalers[extras_upscaler_1].name}" if shared.opts.use_upscaler_name_as_suffix else ""
@@ -203,10 +216,6 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
images.save_image(image, path=outpath, basename=basename, seed=None, prompt=None, extension=opts.samples_format, info=info, short_filename=True,
no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo, forced_filename=None, suffix=suffix)
- if opts.enable_pnginfo:
- image.info = existing_pnginfo
- image.info["extras"] = info
-
if extras_mode != 2 or show_extras_results :
outputs.append(image)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 109e8078..450fecac 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -417,6 +417,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
shared.loaded_hypernetwork = Hypernetwork()
shared.loaded_hypernetwork.load(path)
+ shared.state.job = "train-hypernetwork"
shared.state.textinfo = "Initializing hypernetwork training..."
shared.state.job_count = steps
diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py
index 56b9b2eb..feb876c6 100644
--- a/modules/textual_inversion/preprocess.py
+++ b/modules/textual_inversion/preprocess.py
@@ -124,6 +124,7 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre
files = listfiles(src)
+ shared.state.job = "preprocess"
shared.state.textinfo = "Preprocessing..."
shared.state.job_count = len(files)
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index fd253477..2c1251d6 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -245,6 +245,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
create_image_every = create_image_every or 0
validate_train_inputs(embedding_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, save_embedding_every, create_image_every, log_directory, name="embedding")
+ shared.state.job = "train-embedding"
shared.state.textinfo = "Initializing textual inversion training..."
shared.state.job_count = steps
From 2d5a5076bb2a0c05cc27d75a1bcadab7f32a46d0 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 3 Jan 2023 18:38:21 +0300
Subject: [PATCH 050/172] Make it so that upscalers are not repeated when
restarting UI.
---
modules/modelloader.py | 20 ++++++++++++++++++++
webui.py | 14 +++++++-------
2 files changed, 27 insertions(+), 7 deletions(-)
diff --git a/modules/modelloader.py b/modules/modelloader.py
index e647f6fa..6a1a7ac8 100644
--- a/modules/modelloader.py
+++ b/modules/modelloader.py
@@ -123,6 +123,23 @@ def move_files(src_path: str, dest_path: str, ext_filter: str = None):
pass
+builtin_upscaler_classes = []
+forbidden_upscaler_classes = set()
+
+
+def list_builtin_upscalers():
+ load_upscalers()
+
+ builtin_upscaler_classes.clear()
+ builtin_upscaler_classes.extend(Upscaler.__subclasses__())
+
+
+def forbid_loaded_nonbuiltin_upscalers():
+ for cls in Upscaler.__subclasses__():
+ if cls not in builtin_upscaler_classes:
+ forbidden_upscaler_classes.add(cls)
+
+
def load_upscalers():
# We can only do this 'magic' method to dynamically load upscalers if they are referenced,
# so we'll try to import any _model.py files before looking in __subclasses__
@@ -139,6 +156,9 @@ def load_upscalers():
datas = []
commandline_options = vars(shared.cmd_opts)
for cls in Upscaler.__subclasses__():
+ if cls in forbidden_upscaler_classes:
+ continue
+
name = cls.__name__
cmd_name = f"{name.lower().replace('upscaler', '')}_models_path"
scaler = cls(commandline_options.get(cmd_name, None))
diff --git a/webui.py b/webui.py
index 3aee8792..c7d55a97 100644
--- a/webui.py
+++ b/webui.py
@@ -1,4 +1,5 @@
import os
+import sys
import threading
import time
import importlib
@@ -55,8 +56,8 @@ def initialize():
gfpgan.setup_model(cmd_opts.gfpgan_models_path)
shared.face_restorers.append(modules.face_restoration.FaceRestoration())
+ modelloader.list_builtin_upscalers()
modules.scripts.load_scripts()
-
modelloader.load_upscalers()
modules.sd_vae.refresh_vae_list()
@@ -169,23 +170,22 @@ def webui():
modules.script_callbacks.app_started_callback(shared.demo, app)
wait_on_server(shared.demo)
+ print('Restarting UI...')
sd_samplers.set_samplers()
- print('Reloading extensions')
extensions.list_extensions()
localization.list_localizations(cmd_opts.localizations_dir)
- print('Reloading custom scripts')
+ modelloader.forbid_loaded_nonbuiltin_upscalers()
modules.scripts.reload_scripts()
modelloader.load_upscalers()
- print('Reloading modules: modules.ui')
- importlib.reload(modules.ui)
- print('Refreshing Model List')
+ for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]:
+ importlib.reload(module)
+
modules.sd_models.list_models()
- print('Restarting Gradio')
if __name__ == "__main__":
From 8f96f9289981a66741ba770d14f3d27ce335a0fb Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 3 Jan 2023 18:39:14 +0300
Subject: [PATCH 051/172] call script callbacks for reloaded model after
loading embeddings
---
modules/sd_models.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index bff8d6c9..b98b05fc 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -324,12 +324,12 @@ def load_model(checkpoint_info=None):
sd_model.eval()
shared.sd_model = sd_model
+ sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) # Reload embeddings after model load as they may or may not fit the model
+
script_callbacks.model_loaded_callback(sd_model)
print("Model loaded.")
- sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload = True) # Reload embeddings after model load as they may or may not fit the model
-
return sd_model
From cec209981ee988536c2521297baf9bc1b256005f Mon Sep 17 00:00:00 2001
From: Vladimir Mandic
Date: Tue, 3 Jan 2023 10:58:52 -0500
Subject: [PATCH 052/172] log only sdapi
---
modules/api/api.py | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/modules/api/api.py b/modules/api/api.py
index 53135470..78751c57 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -68,22 +68,23 @@ def encode_pil_to_base64(image):
bytes_data = output_bytes.getvalue()
return base64.b64encode(bytes_data)
-def init_api_middleware(app: FastAPI):
+def api_middleware(app: FastAPI):
@app.middleware("http")
async def log_and_time(req: Request, call_next):
ts = time.time()
res: Response = await call_next(req)
duration = str(round(time.time() - ts, 4))
res.headers["X-Process-Time"] = duration
- if shared.cmd_opts.api_log:
- print('API {t} {code} {prot}/{ver} {method} {p} {cli} {duration}'.format(
+ endpoint = req.scope.get('path', 'err')
+ if shared.cmd_opts.api_log and endpoint.startswith('/sdapi'):
+ print('API {t} {code} {prot}/{ver} {method} {endpoint} {cli} {duration}'.format(
t = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"),
code = res.status_code,
ver = req.scope.get('http_version', '0.0'),
cli = req.scope.get('client', ('0:0.0.0', 0))[0],
prot = req.scope.get('scheme', 'err'),
method = req.scope.get('method', 'err'),
- p = req.scope.get('path', 'err'),
+ endpoint = endpoint,
duration = duration,
))
return res
From d8d206c1685d1e7027d4af82ed18d106f41d1cc4 Mon Sep 17 00:00:00 2001
From: Vladimir Mandic
Date: Tue, 3 Jan 2023 11:01:04 -0500
Subject: [PATCH 053/172] add state to interrogate
---
modules/interrogate.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/modules/interrogate.py b/modules/interrogate.py
index 6f761c5a..738d8ff7 100644
--- a/modules/interrogate.py
+++ b/modules/interrogate.py
@@ -136,7 +136,8 @@ class InterrogateModels:
def interrogate(self, pil_image):
res = ""
-
+ shared.state.begin()
+ shared.state.job = 'interrogate'
try:
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
@@ -177,5 +178,6 @@ class InterrogateModels:
res += ""
self.unload()
+ shared.state.end()
return res
From 82cfc227d735c140447d5b8dca29a71ee9bde127 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 3 Jan 2023 20:23:17 +0300
Subject: [PATCH 054/172] added licenses screen to settings added footer
removed unused inpainting code
---
README.md | 2 +
html/footer.html | 9 +
html/licenses.html | 392 ++++++++++++++++++++++++++++++++
modules/sd_hijack_inpainting.py | 232 -------------------
modules/ui.py | 15 +-
style.css | 11 +
6 files changed, 427 insertions(+), 234 deletions(-)
create mode 100644 html/footer.html
create mode 100644 html/licenses.html
diff --git a/README.md b/README.md
index 556000fb..88250a6b 100644
--- a/README.md
+++ b/README.md
@@ -127,6 +127,8 @@ Here's how to add code to this repo: [Contributing](https://github.com/AUTOMATIC
The documentation was moved from this README over to the project's [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki).
## Credits
+Licenses for borrowed code can be found in `Settings -> Licenses` screen, and also in `html/licenses.html` file.
+
- Stable Diffusion - https://github.com/CompVis/stable-diffusion, https://github.com/CompVis/taming-transformers
- k-diffusion - https://github.com/crowsonkb/k-diffusion.git
- GFPGAN - https://github.com/TencentARC/GFPGAN.git
diff --git a/html/footer.html b/html/footer.html
new file mode 100644
index 00000000..a8f2adf7
--- /dev/null
+++ b/html/footer.html
@@ -0,0 +1,9 @@
+
diff --git a/html/licenses.html b/html/licenses.html
new file mode 100644
index 00000000..9eeaa072
--- /dev/null
+++ b/html/licenses.html
@@ -0,0 +1,392 @@
+
+
+
+Parts of CodeFormer code had to be copied to be compatible with GFPGAN.
+
+S-Lab License 1.0
+
+Copyright 2022 S-Lab
+
+Redistribution and use for non-commercial purpose in source and
+binary forms, with or without modification, are permitted provided
+that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+In the event that redistribution and/or use for commercial purpose in
+source or binary forms, with or without modification is required,
+please contact the contributor(s) of the work.
+
+
+
+
+Code for architecture and reading models copied.
+
+MIT License
+
+Copyright (c) 2021 victorca25
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+
+Some code is copied to support ESRGAN models.
+
+BSD 3-Clause License
+
+Copyright (c) 2021, Xintao Wang
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+Some code for compatibility with OSX is taken from lstein's repository.
+
+MIT License
+
+Copyright (c) 2022 InvokeAI Team
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+
+Code added by contirubtors, most likely copied from this repository.
+
+MIT License
+
+Copyright (c) 2022 Machine Vision and Learning Group, LMU Munich
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+
+Some small amounts of code borrowed and reworked.
+
+MIT License
+
+Copyright (c) 2022 pharmapsychotic
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+
+Code added by contirubtors, most likely copied from this repository.
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [2021] [SwinIR Authors]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py
index 06b75772..3c214a35 100644
--- a/modules/sd_hijack_inpainting.py
+++ b/modules/sd_hijack_inpainting.py
@@ -12,191 +12,6 @@ from ldm.models.diffusion.ddpm import LatentDiffusion
from ldm.models.diffusion.plms import PLMSSampler
from ldm.models.diffusion.ddim import DDIMSampler, noise_like
-# =================================================================================================
-# Monkey patch DDIMSampler methods from RunwayML repo directly.
-# Adapted from:
-# https://github.com/runwayml/stable-diffusion/blob/main/ldm/models/diffusion/ddim.py
-# =================================================================================================
-@torch.no_grad()
-def sample_ddim(self,
- S,
- batch_size,
- shape,
- conditioning=None,
- callback=None,
- normals_sequence=None,
- img_callback=None,
- quantize_x0=False,
- eta=0.,
- mask=None,
- x0=None,
- temperature=1.,
- noise_dropout=0.,
- score_corrector=None,
- corrector_kwargs=None,
- verbose=True,
- x_T=None,
- log_every_t=100,
- unconditional_guidance_scale=1.,
- unconditional_conditioning=None,
- # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
- **kwargs
- ):
- if conditioning is not None:
- if isinstance(conditioning, dict):
- ctmp = conditioning[list(conditioning.keys())[0]]
- while isinstance(ctmp, list):
- ctmp = ctmp[0]
- cbs = ctmp.shape[0]
- if cbs != batch_size:
- print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
- else:
- if conditioning.shape[0] != batch_size:
- print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
-
- self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
- # sampling
- C, H, W = shape
- size = (batch_size, C, H, W)
- print(f'Data shape for DDIM sampling is {size}, eta {eta}')
-
- samples, intermediates = self.ddim_sampling(conditioning, size,
- callback=callback,
- img_callback=img_callback,
- quantize_denoised=quantize_x0,
- mask=mask, x0=x0,
- ddim_use_original_steps=False,
- noise_dropout=noise_dropout,
- temperature=temperature,
- score_corrector=score_corrector,
- corrector_kwargs=corrector_kwargs,
- x_T=x_T,
- log_every_t=log_every_t,
- unconditional_guidance_scale=unconditional_guidance_scale,
- unconditional_conditioning=unconditional_conditioning,
- )
- return samples, intermediates
-
-@torch.no_grad()
-def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
- unconditional_guidance_scale=1., unconditional_conditioning=None):
- b, *_, device = *x.shape, x.device
-
- if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
- e_t = self.model.apply_model(x, t, c)
- else:
- x_in = torch.cat([x] * 2)
- t_in = torch.cat([t] * 2)
- if isinstance(c, dict):
- assert isinstance(unconditional_conditioning, dict)
- c_in = dict()
- for k in c:
- if isinstance(c[k], list):
- c_in[k] = [
- torch.cat([unconditional_conditioning[k][i], c[k][i]])
- for i in range(len(c[k]))
- ]
- else:
- c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])
- else:
- c_in = torch.cat([unconditional_conditioning, c])
- e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
- e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
-
- if score_corrector is not None:
- assert self.model.parameterization == "eps"
- e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
-
- alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
- alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
- sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
- sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
- # select parameters corresponding to the currently considered timestep
- a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
- a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
- sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
- sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
-
- # current prediction for x_0
- pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
- if quantize_denoised:
- pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
- # direction pointing to x_t
- dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
- noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
- if noise_dropout > 0.:
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
- x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
- return x_prev, pred_x0
-
-
-# =================================================================================================
-# Monkey patch PLMSSampler methods.
-# This one was not actually patched correctly in the RunwayML repo, but we can replicate the changes.
-# Adapted from:
-# https://github.com/CompVis/stable-diffusion/blob/main/ldm/models/diffusion/plms.py
-# =================================================================================================
-@torch.no_grad()
-def sample_plms(self,
- S,
- batch_size,
- shape,
- conditioning=None,
- callback=None,
- normals_sequence=None,
- img_callback=None,
- quantize_x0=False,
- eta=0.,
- mask=None,
- x0=None,
- temperature=1.,
- noise_dropout=0.,
- score_corrector=None,
- corrector_kwargs=None,
- verbose=True,
- x_T=None,
- log_every_t=100,
- unconditional_guidance_scale=1.,
- unconditional_conditioning=None,
- # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
- **kwargs
- ):
- if conditioning is not None:
- if isinstance(conditioning, dict):
- ctmp = conditioning[list(conditioning.keys())[0]]
- while isinstance(ctmp, list):
- ctmp = ctmp[0]
- cbs = ctmp.shape[0]
- if cbs != batch_size:
- print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
- else:
- if conditioning.shape[0] != batch_size:
- print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
-
- self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
- # sampling
- C, H, W = shape
- size = (batch_size, C, H, W)
- # print(f'Data shape for PLMS sampling is {size}') # remove unnecessary message
-
- samples, intermediates = self.plms_sampling(conditioning, size,
- callback=callback,
- img_callback=img_callback,
- quantize_denoised=quantize_x0,
- mask=mask, x0=x0,
- ddim_use_original_steps=False,
- noise_dropout=noise_dropout,
- temperature=temperature,
- score_corrector=score_corrector,
- corrector_kwargs=corrector_kwargs,
- x_T=x_T,
- log_every_t=log_every_t,
- unconditional_guidance_scale=unconditional_guidance_scale,
- unconditional_conditioning=unconditional_conditioning,
- )
- return samples, intermediates
-
@torch.no_grad()
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
@@ -280,44 +95,6 @@ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=F
return x_prev, pred_x0, e_t
-# =================================================================================================
-# Monkey patch LatentInpaintDiffusion to load the checkpoint with a proper config.
-# Adapted from:
-# https://github.com/runwayml/stable-diffusion/blob/main/ldm/models/diffusion/ddpm.py
-# =================================================================================================
-
-@torch.no_grad()
-def get_unconditional_conditioning(self, batch_size, null_label=None):
- if null_label is not None:
- xc = null_label
- if isinstance(xc, ListConfig):
- xc = list(xc)
- if isinstance(xc, dict) or isinstance(xc, list):
- c = self.get_learned_conditioning(xc)
- else:
- if hasattr(xc, "to"):
- xc = xc.to(self.device)
- c = self.get_learned_conditioning(xc)
- else:
- # todo: get null label from cond_stage_model
- raise NotImplementedError()
- c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device)
- return c
-
-
-class LatentInpaintDiffusion(LatentDiffusion):
- def __init__(
- self,
- concat_keys=("mask", "masked_image"),
- masked_image_key="masked_image",
- *args,
- **kwargs,
- ):
- super().__init__(*args, **kwargs)
- self.masked_image_key = masked_image_key
- assert self.masked_image_key in concat_keys
- self.concat_keys = concat_keys
-
def should_hijack_inpainting(checkpoint_info):
ckpt_basename = os.path.basename(checkpoint_info.filename).lower()
@@ -326,15 +103,6 @@ def should_hijack_inpainting(checkpoint_info):
def do_inpainting_hijack():
- # most of this stuff seems to no longer be needed because it is already included into SD2.0
# p_sample_plms is needed because PLMS can't work with dicts as conditionings
- # this file should be cleaned up later if everything turns out to work fine
-
- # ldm.models.diffusion.ddpm.get_unconditional_conditioning = get_unconditional_conditioning
- # ldm.models.diffusion.ddpm.LatentInpaintDiffusion = LatentInpaintDiffusion
-
- # ldm.models.diffusion.ddim.DDIMSampler.p_sample_ddim = p_sample_ddim
- # ldm.models.diffusion.ddim.DDIMSampler.sample = sample_ddim
ldm.models.diffusion.plms.PLMSSampler.p_sample_plms = p_sample_plms
- # ldm.models.diffusion.plms.PLMSSampler.sample = sample_plms
diff --git a/modules/ui.py b/modules/ui.py
index f2e7c0d6..d941cb5f 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1529,8 +1529,10 @@ def create_ui():
with gr.Blocks(analytics_enabled=False) as settings_interface:
with gr.Row():
- settings_submit = gr.Button(value="Apply settings", variant='primary', elem_id="settings_submit")
- restart_gradio = gr.Button(value='Restart UI', variant='primary', elem_id="settings_restart_gradio")
+ with gr.Column(scale=6):
+ settings_submit = gr.Button(value="Apply settings", variant='primary', elem_id="settings_submit")
+ with gr.Column():
+ restart_gradio = gr.Button(value='Reload UI', variant='primary', elem_id="settings_restart_gradio")
result = gr.HTML(elem_id="settings_result")
@@ -1574,6 +1576,11 @@ def create_ui():
download_localization = gr.Button(value='Download localization template', elem_id="download_localization")
reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies")
+ if os.path.exists("html/licenses.html"):
+ with open("html/licenses.html", encoding="utf8") as file:
+ with gr.TabItem("Licenses"):
+ gr.HTML(file.read(), elem_id="licenses")
+
gr.Button(value="Show all pages", elem_id="settings_show_all_pages")
request_notifications.click(
@@ -1659,6 +1666,10 @@ def create_ui():
if os.path.exists(os.path.join(script_path, "notification.mp3")):
audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
+ if os.path.exists("html/footer.html"):
+ with open("html/footer.html", encoding="utf8") as file:
+ gr.HTML(file.read(), elem_id="footer")
+
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
fn=wrap_gradio_call(run_settings, extra_outputs=[gr.update()]),
diff --git a/style.css b/style.css
index 7296ce91..2116ec3c 100644
--- a/style.css
+++ b/style.css
@@ -616,6 +616,17 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h
padding-bottom: 0.5em;
}
+footer {
+ display: none !important;
+}
+
+#footer{
+ text-align: center;
+}
+
+#footer div{
+ display: inline-block;
+}
/* The following handles localization for right-to-left (RTL) languages like Arabic.
The rtl media type will only be activated by the logic in javascript/localization.js.
From 7c89f3718f9f078113833a88a86f02d3205855b4 Mon Sep 17 00:00:00 2001
From: MMaker
Date: Tue, 3 Jan 2023 12:46:48 -0500
Subject: [PATCH 055/172] Add image paste fallback
Fixes Firefox pasting support
(and possibly other browsers)
---
javascript/dragdrop.js | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/javascript/dragdrop.js b/javascript/dragdrop.js
index 3ed1cb3c..fe008924 100644
--- a/javascript/dragdrop.js
+++ b/javascript/dragdrop.js
@@ -9,11 +9,19 @@ function dropReplaceImage( imgWrap, files ) {
return;
}
+ const tmpFile = files[0];
+
imgWrap.querySelector('.modify-upload button + button, .touch-none + div button + button')?.click();
const callback = () => {
const fileInput = imgWrap.querySelector('input[type="file"]');
if ( fileInput ) {
- fileInput.files = files;
+ if ( files.length === 0 ) {
+ files = new DataTransfer();
+ files.items.add(tmpFile);
+ fileInput.files = files.files;
+ } else {
+ fileInput.files = files;
+ }
fileInput.dispatchEvent(new Event('change'));
}
};
From 3e22e294135ed0327ce9d9738655ff03c53df3c0 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 3 Jan 2023 21:49:24 +0300
Subject: [PATCH 056/172] fix broken send to extras button
---
modules/generation_parameters_copypaste.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index d94f11a3..4baf4d9a 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -37,7 +37,10 @@ def quote(text):
def image_from_url_text(filedata):
- if type(filedata) == dict and filedata["is_file"]:
+ if type(filedata) == list and len(filedata) > 0 and type(filedata[0]) == dict and filedata[0].get("is_file", False):
+ filedata = filedata[0]
+
+ if type(filedata) == dict and filedata.get("is_file", False):
filename = filedata["name"]
is_in_right_dir = ui_tempdir.check_tmp_file(shared.demo, filename)
assert is_in_right_dir, 'trying to open image file outside of allowed directories'
From 917b5bd8d0cd47c9dc241c1852ccd440a8c61668 Mon Sep 17 00:00:00 2001
From: Max Weber
Date: Tue, 3 Jan 2023 18:19:56 -0700
Subject: [PATCH 057/172] ui: save dropdown sampling method to the ui-config
---
modules/ui.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/modules/ui.py b/modules/ui.py
index d941cb5f..bfc93634 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -635,6 +635,7 @@ def create_sampler_and_steps_selection(choices, tabname):
if opts.samplers_in_dropdown:
with FormRow(elem_id=f"sampler_selection_{tabname}"):
sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index")
+ sampler_index.save_to_config = True
steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling Steps", value=20)
else:
with FormGroup(elem_id=f"sampler_selection_{tabname}"):
From 4fc81542077af73610279ad7b6b26e38718a0f81 Mon Sep 17 00:00:00 2001
From: Gerschel
Date: Tue, 3 Jan 2023 23:25:34 -0800
Subject: [PATCH 058/172] better targetting, class tabs was autoassigned
I moved a preset manager into quicksettings, this function
was targeting my component instead of the tabs. This is
because class tabs is autoassigned, while element id #tabs
is not, this allows a tabbed component to live in the quicksettings.
---
script.js | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/script.js b/script.js
index 9748ec90..0e117d06 100644
--- a/script.js
+++ b/script.js
@@ -4,7 +4,7 @@ function gradioApp() {
}
function get_uiCurrentTab() {
- return gradioApp().querySelector('.tabs button:not(.border-transparent)')
+ return gradioApp().querySelector('#tabs button:not(.border-transparent)')
}
function get_uiCurrentTabContent() {
From e5b7ee910e7bb88f08e8876b5732cb034c6fe529 Mon Sep 17 00:00:00 2001
From: MMaker
Date: Wed, 4 Jan 2023 04:22:01 -0500
Subject: [PATCH 059/172] fix: Save full res of intermediate step
---
modules/processing.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/processing.py b/modules/processing.py
index a172af0b..93e75ba6 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -705,7 +705,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
return
if not isinstance(image, Image.Image):
- image = sd_samplers.sample_to_image(image, index)
+ image = sd_samplers.sample_to_image(image, index, approximation=0)
images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, suffix="-before-highres-fix")
From 02d7abf5141431b9a3a8a189bb3136c71abd5e79 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 4 Jan 2023 12:35:07 +0300
Subject: [PATCH 060/172] helpful error message when trying to load 2.0 without
config failing to load model weights from settings won't break generation for
currently loaded model anymore
---
modules/errors.py | 25 +++++++++++++++++++++++--
modules/sd_models.py | 24 +++++++++++++++++-------
modules/shared.py | 9 +++++++--
webui.py | 12 ++++++++++--
4 files changed, 57 insertions(+), 13 deletions(-)
diff --git a/modules/errors.py b/modules/errors.py
index 372dc51a..a668c014 100644
--- a/modules/errors.py
+++ b/modules/errors.py
@@ -2,9 +2,30 @@ import sys
import traceback
+def print_error_explanation(message):
+ lines = message.strip().split("\n")
+ max_len = max([len(x) for x in lines])
+
+ print('=' * max_len, file=sys.stderr)
+ for line in lines:
+ print(line, file=sys.stderr)
+ print('=' * max_len, file=sys.stderr)
+
+
+def display(e: Exception, task):
+ print(f"{task or 'error'}: {type(e).__name__}", file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
+
+ message = str(e)
+ if "copying a param with shape torch.Size([640, 1024]) from checkpoint, the shape in current model is torch.Size([640, 768])" in message:
+ print_error_explanation("""
+The most likely cause of this is you are trying to load Stable Diffusion 2.0 model without specifying its connfig file.
+See https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20 for how to solve this.
+ """)
+
+
def run(code, task):
try:
code()
except Exception as e:
- print(f"{task}: {type(e).__name__}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ display(task, e)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index b98b05fc..6846b74a 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -278,6 +278,7 @@ def enable_midas_autodownload():
midas.api.load_model = load_model_wrapper
+
def load_model(checkpoint_info=None):
from modules import lowvram, sd_hijack
checkpoint_info = checkpoint_info or select_checkpoint()
@@ -312,6 +313,7 @@ def load_model(checkpoint_info=None):
sd_config.model.params.unet_config.params.use_fp16 = False
sd_model = instantiate_from_config(sd_config.model)
+
load_model_weights(sd_model, checkpoint_info)
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
@@ -336,10 +338,12 @@ def load_model(checkpoint_info=None):
def reload_model_weights(sd_model=None, info=None):
from modules import lowvram, devices, sd_hijack
checkpoint_info = info or select_checkpoint()
-
+
if not sd_model:
sd_model = shared.sd_model
+ current_checkpoint_info = sd_model.sd_checkpoint_info
+
if sd_model.sd_model_checkpoint == checkpoint_info.filename:
return
@@ -356,13 +360,19 @@ def reload_model_weights(sd_model=None, info=None):
sd_hijack.model_hijack.undo_hijack(sd_model)
- load_model_weights(sd_model, checkpoint_info)
+ try:
+ load_model_weights(sd_model, checkpoint_info)
+ except Exception as e:
+ print("Failed to load checkpoint, restoring previous")
+ load_model_weights(sd_model, current_checkpoint_info)
+ raise
+ finally:
+ sd_hijack.model_hijack.hijack(sd_model)
+ script_callbacks.model_loaded_callback(sd_model)
- sd_hijack.model_hijack.hijack(sd_model)
- script_callbacks.model_loaded_callback(sd_model)
-
- if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
- sd_model.to(devices.device)
+ if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
+ sd_model.to(devices.device)
print("Weights loaded.")
+
return sd_model
diff --git a/modules/shared.py b/modules/shared.py
index 23657a93..7588c47b 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -14,7 +14,7 @@ import modules.interrogate
import modules.memmon
import modules.styles
import modules.devices as devices
-from modules import localization, sd_vae, extensions, script_loading
+from modules import localization, sd_vae, extensions, script_loading, errors
from modules.paths import models_path, script_path, sd_path
@@ -494,7 +494,12 @@ class Options:
return False
if self.data_labels[key].onchange is not None:
- self.data_labels[key].onchange()
+ try:
+ self.data_labels[key].onchange()
+ except Exception as e:
+ errors.display(e, f"changing setting {key} to {value}")
+ setattr(self, key, oldval)
+ return False
return True
diff --git a/webui.py b/webui.py
index c7d55a97..13375e71 100644
--- a/webui.py
+++ b/webui.py
@@ -9,7 +9,7 @@ from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.middleware.gzip import GZipMiddleware
-from modules import import_hook
+from modules import import_hook, errors
from modules.call_queue import wrap_queued_call, queue_lock, wrap_gradio_gpu_call
from modules.paths import script_path
@@ -61,7 +61,15 @@ def initialize():
modelloader.load_upscalers()
modules.sd_vae.refresh_vae_list()
- modules.sd_models.load_model()
+
+ try:
+ modules.sd_models.load_model()
+ except Exception as e:
+ errors.display(e, "loading stable diffusion model")
+ print("", file=sys.stderr)
+ print("Stable diffusion model failed to load, exiting", file=sys.stderr)
+ exit(1)
+
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()))
shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)
shared.opts.onchange("sd_vae_as_default", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)
From 8d8a05a3bbb50fdfeab51679a919d2487bd97976 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 4 Jan 2023 12:47:42 +0300
Subject: [PATCH 061/172] find configs for models at runtime rather than when
starting
---
modules/sd_hijack_inpainting.py | 5 ++++-
modules/sd_models.py | 31 ++++++++++++++++++-------------
2 files changed, 22 insertions(+), 14 deletions(-)
diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py
index 3c214a35..31d2c898 100644
--- a/modules/sd_hijack_inpainting.py
+++ b/modules/sd_hijack_inpainting.py
@@ -97,8 +97,11 @@ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=F
def should_hijack_inpainting(checkpoint_info):
+ from modules import sd_models
+
ckpt_basename = os.path.basename(checkpoint_info.filename).lower()
- cfg_basename = os.path.basename(checkpoint_info.config).lower()
+ cfg_basename = os.path.basename(sd_models.find_checkpoint_config(checkpoint_info)).lower()
+
return "inpainting" in ckpt_basename and not "inpainting" in cfg_basename
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 6846b74a..6dca4ddf 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -20,7 +20,7 @@ from modules.sd_hijack_inpainting import do_inpainting_hijack, should_hijack_inp
model_dir = "Stable-diffusion"
model_path = os.path.abspath(os.path.join(models_path, model_dir))
-CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name', 'config'])
+CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name'])
checkpoints_list = {}
checkpoints_loaded = collections.OrderedDict()
@@ -48,6 +48,14 @@ def checkpoint_tiles():
return sorted([x.title for x in checkpoints_list.values()], key = alphanumeric_key)
+def find_checkpoint_config(info):
+ config = os.path.splitext(info.filename)[0] + ".yaml"
+ if os.path.exists(config):
+ return config
+
+ return shared.cmd_opts.config
+
+
def list_models():
checkpoints_list.clear()
model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"])
@@ -73,7 +81,7 @@ def list_models():
if os.path.exists(cmd_ckpt):
h = model_hash(cmd_ckpt)
title, short_model_name = modeltitle(cmd_ckpt, h)
- checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name, shared.cmd_opts.config)
+ checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name)
shared.opts.data['sd_model_checkpoint'] = title
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
@@ -81,12 +89,7 @@ def list_models():
h = model_hash(filename)
title, short_model_name = modeltitle(filename, h)
- basename, _ = os.path.splitext(filename)
- config = basename + ".yaml"
- if not os.path.exists(config):
- config = shared.cmd_opts.config
-
- checkpoints_list[title] = CheckpointInfo(filename, title, h, short_model_name, config)
+ checkpoints_list[title] = CheckpointInfo(filename, title, h, short_model_name)
def get_closet_checkpoint_match(searchString):
@@ -282,9 +285,10 @@ def enable_midas_autodownload():
def load_model(checkpoint_info=None):
from modules import lowvram, sd_hijack
checkpoint_info = checkpoint_info or select_checkpoint()
+ checkpoint_config = find_checkpoint_config(checkpoint_info)
- if checkpoint_info.config != shared.cmd_opts.config:
- print(f"Loading config from: {checkpoint_info.config}")
+ if checkpoint_config != shared.cmd_opts.config:
+ print(f"Loading config from: {checkpoint_config}")
if shared.sd_model:
sd_hijack.model_hijack.undo_hijack(shared.sd_model)
@@ -292,7 +296,7 @@ def load_model(checkpoint_info=None):
gc.collect()
devices.torch_gc()
- sd_config = OmegaConf.load(checkpoint_info.config)
+ sd_config = OmegaConf.load(checkpoint_config)
if should_hijack_inpainting(checkpoint_info):
# Hardcoded config for now...
@@ -302,7 +306,7 @@ def load_model(checkpoint_info=None):
sd_config.model.params.finetune_keys = None
# Create a "fake" config with a different name so that we know to unload it when switching models.
- checkpoint_info = checkpoint_info._replace(config=checkpoint_info.config.replace(".yaml", "-inpainting.yaml"))
+ checkpoint_info = checkpoint_info._replace(config=checkpoint_config.replace(".yaml", "-inpainting.yaml"))
if not hasattr(sd_config.model.params, "use_ema"):
sd_config.model.params.use_ema = False
@@ -343,11 +347,12 @@ def reload_model_weights(sd_model=None, info=None):
sd_model = shared.sd_model
current_checkpoint_info = sd_model.sd_checkpoint_info
+ checkpoint_config = find_checkpoint_config(current_checkpoint_info)
if sd_model.sd_model_checkpoint == checkpoint_info.filename:
return
- if sd_model.sd_checkpoint_info.config != checkpoint_info.config or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info):
+ if checkpoint_config != find_checkpoint_config(checkpoint_info) or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info):
del sd_model
checkpoints_loaded.clear()
load_model(checkpoint_info)
From 96cf15bedecbed97ef9b70b8413d543a9aee5adf Mon Sep 17 00:00:00 2001
From: MMaker
Date: Wed, 4 Jan 2023 05:12:06 -0500
Subject: [PATCH 062/172] Add new latent upscale modes
---
modules/shared.py | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/modules/shared.py b/modules/shared.py
index 7588c47b..a10f69a9 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -564,8 +564,11 @@ if os.path.exists(config_filename):
latent_upscale_default_mode = "Latent"
latent_upscale_modes = {
- "Latent": "bilinear",
- "Latent (nearest)": "nearest",
+ "Latent": {"mode": "bilinear", "antialias": False},
+ "Latent (antialiased)": {"mode": "bilinear", "antialias": True},
+ "Latent (bicubic)": {"mode": "bicubic", "antialias": False},
+ "Latent (bicubic, antialiased)": {"mode": "bicubic", "antialias": True},
+ "Latent (nearest)": {"mode": "nearest", "antialias": False},
}
sd_upscalers = []
From 15fd0b8bc4734ea85bca1acfb12b51465ab9817d Mon Sep 17 00:00:00 2001
From: MMaker
Date: Wed, 4 Jan 2023 05:12:54 -0500
Subject: [PATCH 063/172] Update processing.py
---
modules/processing.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/processing.py b/modules/processing.py
index a172af0b..7c72b56a 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -713,7 +713,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
for i in range(samples.shape[0]):
save_intermediate(samples, i)
- samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=latent_scale_mode)
+ samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"])
# Avoid making the inpainting conditioning unless necessary as
# this does need some extra compute to decode / encode the image again.
From 4ec6470a1a2d9430b91266426f995e48f59564e1 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 4 Jan 2023 13:26:23 +0300
Subject: [PATCH 064/172] fix checkpoint list API
---
modules/api/api.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/api/api.py b/modules/api/api.py
index 9c670f00..2b1f180c 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -18,7 +18,7 @@ from modules.textual_inversion.textual_inversion import create_embedding, train_
from modules.textual_inversion.preprocess import preprocess
from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork
from PIL import PngImagePlugin,Image
-from modules.sd_models import checkpoints_list
+from modules.sd_models import checkpoints_list, find_checkpoint_config
from modules.realesrgan_model import get_realesrgan_models
from modules import devices
from typing import List
@@ -303,7 +303,7 @@ class Api:
return upscalers
def get_sd_models(self):
- return [{"title":x.title, "model_name":x.model_name, "hash":x.hash, "filename": x.filename, "config": x.config} for x in checkpoints_list.values()]
+ return [{"title":x.title, "model_name":x.model_name, "hash":x.hash, "filename": x.filename, "config": find_checkpoint_config(x)} for x in checkpoints_list.values()]
def get_hypernetworks(self):
return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks]
From b2151b934fe0a3613570c6abd7615d3788fd1c8f Mon Sep 17 00:00:00 2001
From: MMaker
Date: Wed, 4 Jan 2023 05:36:18 -0500
Subject: [PATCH 065/172] Rename bicubic antialiased option
Comma was causing the the value in PNG info to be quoted, which causes the upscaler dropdown option to be blank when sending to UI
---
modules/shared.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/shared.py b/modules/shared.py
index a10f69a9..c1b20081 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -567,7 +567,7 @@ latent_upscale_modes = {
"Latent": {"mode": "bilinear", "antialias": False},
"Latent (antialiased)": {"mode": "bilinear", "antialias": True},
"Latent (bicubic)": {"mode": "bicubic", "antialias": False},
- "Latent (bicubic, antialiased)": {"mode": "bicubic", "antialias": True},
+ "Latent (bicubic antialiased)": {"mode": "bicubic", "antialias": True},
"Latent (nearest)": {"mode": "nearest", "antialias": False},
}
From 3bd737767b071878ea980e94b8705f603bcf545e Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 4 Jan 2023 14:20:32 +0300
Subject: [PATCH 066/172] disable broken API logging
---
modules/api/api.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/modules/api/api.py b/modules/api/api.py
index a6c1d6ed..6267afdc 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -100,7 +100,6 @@ class Api:
self.router = APIRouter()
self.app = app
- init_api_middleware(self.app)
self.queue_lock = queue_lock
self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse)
self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse)
From 0cd6399b8b1699b8b7acad6f0ad2988111fe618e Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 4 Jan 2023 14:29:13 +0300
Subject: [PATCH 067/172] fix broken inpainting model
---
modules/sd_models.py | 3 ---
1 file changed, 3 deletions(-)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 6dca4ddf..a568823d 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -305,9 +305,6 @@ def load_model(checkpoint_info=None):
sd_config.model.params.unet_config.params.in_channels = 9
sd_config.model.params.finetune_keys = None
- # Create a "fake" config with a different name so that we know to unload it when switching models.
- checkpoint_info = checkpoint_info._replace(config=checkpoint_config.replace(".yaml", "-inpainting.yaml"))
-
if not hasattr(sd_config.model.params, "use_ema"):
sd_config.model.params.use_ema = False
From 11b8160a086c434d5baf4971edda46e6d2126800 Mon Sep 17 00:00:00 2001
From: Vladimir Mandic
Date: Wed, 4 Jan 2023 06:36:57 -0500
Subject: [PATCH 068/172] fix typo
---
modules/api/api.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/modules/api/api.py b/modules/api/api.py
index 6267afdc..48a70a44 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -101,6 +101,7 @@ class Api:
self.router = APIRouter()
self.app = app
self.queue_lock = queue_lock
+ api_middleware(self.app)
self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse)
self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse)
self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse)
From 642142556d8ecdea9beb86d7618b628b1803ab98 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 4 Jan 2023 15:09:53 +0300
Subject: [PATCH 069/172] use commandline-supplied cuda device name instead of
cuda:0 for safetensors PR that doesn't fix anything
---
modules/sd_models.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index ee918f24..76a89e88 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -173,7 +173,7 @@ def read_state_dict(checkpoint_file, print_global_state=False, map_location=None
if extension.lower() == ".safetensors":
device = map_location or shared.weight_load_location
if device is None:
- device = "cuda:0" if torch.cuda.is_available() else "cpu"
+ device = devices.get_cuda_device_string() if torch.cuda.is_available() else "cpu"
pl_sd = safetensors.torch.load_file(checkpoint_file, device=device)
else:
pl_sd = torch.load(checkpoint_file, map_location=map_location or shared.weight_load_location)
From 21ee77db314ede7ccbb18787962347c09a4df0c7 Mon Sep 17 00:00:00 2001
From: Vladimir Mandic
Date: Wed, 4 Jan 2023 08:04:38 -0500
Subject: [PATCH 070/172] add cross-attention info
---
modules/sd_hijack.py | 12 +++++++++++-
1 file changed, 11 insertions(+), 1 deletion(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index edcbaf52..fa2cd4bb 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -35,26 +35,35 @@ def apply_optimizations():
ldm.modules.diffusionmodules.model.nonlinearity = silu
ldm.modules.diffusionmodules.openaimodel.th = sd_hijack_unet.th
+
+ optimization_method = None
if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)):
print("Applying xformers cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
+ optimization_method = 'xformers'
elif cmd_opts.opt_split_attention_v1:
print("Applying v1 cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
+ optimization_method = 'V1'
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention_invokeai or not torch.cuda.is_available()):
if not invokeAI_mps_available and shared.device.type == 'mps':
print("The InvokeAI cross attention optimization for MPS requires the psutil package which is not installed.")
print("Applying v1 cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
+ optimization_method = 'V1'
else:
print("Applying cross attention optimization (InvokeAI).")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_invokeAI
+ optimization_method = 'InvokeAI'
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
print("Applying cross attention optimization (Doggettx).")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
+ optimization_method = 'Doggettx'
+
+ return optimization_method
def undo_optimizations():
@@ -75,6 +84,7 @@ class StableDiffusionModelHijack:
layers = None
circular_enabled = False
clip = None
+ optimization_method = None
embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase(cmd_opts.embeddings_dir)
@@ -94,7 +104,7 @@ class StableDiffusionModelHijack:
m.cond_stage_model.model.token_embedding = EmbeddingsWithFixes(m.cond_stage_model.model.token_embedding, self)
m.cond_stage_model = sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
- apply_optimizations()
+ self.optimization_method = apply_optimizations()
self.clip = m.cond_stage_model
From 1cfd8aec4ae5a6ca1afd67b44cb4ef6dd14d8c34 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 4 Jan 2023 16:05:42 +0300
Subject: [PATCH 071/172] make it possible to work with
opts.show_progress_every_n_steps = -1 with medvram
---
modules/shared.py | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/modules/shared.py b/modules/shared.py
index 4fcc6edd..54a6ba23 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -214,12 +214,13 @@ class State:
"""sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this"""
def set_current_image(self):
+ if not parallel_processing_allowed:
+ return
+
if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.show_progress_every_n_steps > 0:
self.do_set_current_image()
def do_set_current_image(self):
- if not parallel_processing_allowed:
- return
if self.current_latent is None:
return
@@ -231,6 +232,7 @@ class State:
self.current_image_sampling_step = self.sampling_step
+
state = State()
artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
From 79c682ad4f2d982b26fa1a15044582d1005134f9 Mon Sep 17 00:00:00 2001
From: Vladimir Mandic
Date: Wed, 4 Jan 2023 08:20:42 -0500
Subject: [PATCH 072/172] fix jpeg
---
modules/extras.py | 2 --
modules/images.py | 2 ++
requirements_versions.txt | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/modules/extras.py b/modules/extras.py
index d665440a..7407bfe3 100644
--- a/modules/extras.py
+++ b/modules/extras.py
@@ -19,8 +19,6 @@ from modules.shared import opts
import modules.gfpgan_model
from modules.ui import plaintext_to_html
import modules.codeformer_model
-import piexif
-import piexif.helper
import gradio as gr
import safetensors.torch
diff --git a/modules/images.py b/modules/images.py
index c3a5fc8b..a73be3fa 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -22,6 +22,8 @@ from modules.shared import opts, cmd_opts
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
+Image.init() # initialize once all known file format handlers
+
def image_grid(imgs, batch_size=1, rows=None):
if rows is None:
diff --git a/requirements_versions.txt b/requirements_versions.txt
index 975102d9..7ae118cb 100644
--- a/requirements_versions.txt
+++ b/requirements_versions.txt
@@ -5,7 +5,7 @@ basicsr==1.4.2
gfpgan==1.3.8
gradio==3.15.0
numpy==1.23.3
-Pillow==9.2.0
+Pillow==9.3.0
realesrgan==0.3.0
torch
omegaconf==2.2.3
From 4d66bf2c0d27702cc83b9cc57ebb1f359d18d938 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 4 Jan 2023 17:24:46 +0300
Subject: [PATCH 073/172] add infotext to "-before-highres-fix" images
---
modules/processing.py | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/modules/processing.py b/modules/processing.py
index fd7c7015..c03e77e7 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -136,6 +136,7 @@ class StableDiffusionProcessing():
self.all_negative_prompts = None
self.all_seeds = None
self.all_subseeds = None
+ self.iteration = 0
def txt2img_image_conditioning(self, x, width=None, height=None):
if self.sampler.conditioning_key not in {'hybrid', 'concat'}:
@@ -544,6 +545,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
state.job_count = p.n_iter
for n in range(p.n_iter):
+ p.iteration = n
+
if state.skipped:
state.skipped = False
@@ -707,7 +710,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
if not isinstance(image, Image.Image):
image = sd_samplers.sample_to_image(image, index, approximation=0)
- images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, suffix="-before-highres-fix")
+ info = create_infotext(self, self.all_prompts, self.all_seeds, self.all_subseeds, [], iteration=self.iteration, position_in_batch=index)
+ images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, suffix="-before-highres-fix")
if latent_scale_mode is not None:
for i in range(samples.shape[0]):
From 184e670126f5fc50ba56fa0fedcf0cf60e45ed7e Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 4 Jan 2023 17:45:01 +0300
Subject: [PATCH 074/172] fix the merge
---
modules/textual_inversion/textual_inversion.py | 14 +++++---------
1 file changed, 5 insertions(+), 9 deletions(-)
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 5421a758..8731ea5d 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -251,6 +251,7 @@ def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, dat
if save_model_every or create_image_every:
assert log_directory, "Log directory is empty"
+
def create_dummy_mask(x, width=None, height=None):
if shared.sd_model.model.conditioning_key in {'hybrid', 'concat'}:
@@ -380,17 +381,12 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
break
with devices.autocast():
- # c = stack_conds(batch.cond).to(devices.device)
- # mask = torch.tensor(batch.emb_index).to(devices.device, non_blocking=pin_memory)
- # print(mask)
- # c[:, 1:1+embedding.vec.shape[0]] = embedding.vec.to(devices.device, non_blocking=pin_memory)
-
-
- if img_c is None:
- img_c = create_dummy_mask(c, training_width, training_height)
-
x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
c = shared.sd_model.cond_stage_model(batch.cond_text)
+
+ if img_c is None:
+ img_c = create_dummy_mask(c, training_width, training_height)
+
cond = {"c_concat": [img_c], "c_crossattn": [c]}
loss = shared.sd_model(x, cond)[0] / gradient_step
del x
From 590c5ae016ae494f4873ca20079b30684ea3060c Mon Sep 17 00:00:00 2001
From: Vladimir Mandic
Date: Wed, 4 Jan 2023 09:48:54 -0500
Subject: [PATCH 075/172] update pillow
---
modules/images.py | 2 --
requirements_versions.txt | 2 +-
2 files changed, 1 insertion(+), 3 deletions(-)
diff --git a/modules/images.py b/modules/images.py
index a73be3fa..c3a5fc8b 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -22,8 +22,6 @@ from modules.shared import opts, cmd_opts
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
-Image.init() # initialize once all known file format handlers
-
def image_grid(imgs, batch_size=1, rows=None):
if rows is None:
diff --git a/requirements_versions.txt b/requirements_versions.txt
index 7ae118cb..d2899292 100644
--- a/requirements_versions.txt
+++ b/requirements_versions.txt
@@ -5,7 +5,7 @@ basicsr==1.4.2
gfpgan==1.3.8
gradio==3.15.0
numpy==1.23.3
-Pillow==9.3.0
+Pillow==9.4.0
realesrgan==0.3.0
torch
omegaconf==2.2.3
From 525cea924562afd676f55470095268a0f6fca59e Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 4 Jan 2023 17:58:07 +0300
Subject: [PATCH 076/172] use shared function from processing for creating
dummy mask when training inpainting model
---
modules/processing.py | 39 ++++++++++---------
.../textual_inversion/textual_inversion.py | 33 +++++-----------
2 files changed, 29 insertions(+), 43 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index c03e77e7..c7264aff 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -76,6 +76,24 @@ def apply_overlay(image, paste_loc, index, overlays):
return image
+def txt2img_image_conditioning(sd_model, x, width, height):
+ if sd_model.model.conditioning_key not in {'hybrid', 'concat'}:
+ # Dummy zero conditioning if we're not using inpainting model.
+ # Still takes up a bit of memory, but no encoder call.
+ # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
+ return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device)
+
+ # The "masked-image" in this case will just be all zeros since the entire image is masked.
+ image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
+ image_conditioning = sd_model.get_first_stage_encoding(sd_model.encode_first_stage(image_conditioning))
+
+ # Add the fake full 1s mask to the first dimension.
+ image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
+ image_conditioning = image_conditioning.to(x.dtype)
+
+ return image_conditioning
+
+
class StableDiffusionProcessing():
"""
The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing
@@ -139,26 +157,9 @@ class StableDiffusionProcessing():
self.iteration = 0
def txt2img_image_conditioning(self, x, width=None, height=None):
- if self.sampler.conditioning_key not in {'hybrid', 'concat'}:
- # Dummy zero conditioning if we're not using inpainting model.
- # Still takes up a bit of memory, but no encoder call.
- # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
- return x.new_zeros(x.shape[0], 5, 1, 1)
+ self.is_using_inpainting_conditioning = self.sd_model.model.conditioning_key in {'hybrid', 'concat'}
- self.is_using_inpainting_conditioning = True
-
- height = height or self.height
- width = width or self.width
-
- # The "masked-image" in this case will just be all zeros since the entire image is masked.
- image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
- image_conditioning = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image_conditioning))
-
- # Add the fake full 1s mask to the first dimension.
- image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
- image_conditioning = image_conditioning.to(x.dtype)
-
- return image_conditioning
+ return txt2img_image_conditioning(self.sd_model, x, width or self.width, height or self.height)
def depth2img_image_conditioning(self, source_image):
# Use the AddMiDaS helper to Format our source image to suit the MiDaS model
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 8731ea5d..2250e41b 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -252,26 +252,6 @@ def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, dat
assert log_directory, "Log directory is empty"
-def create_dummy_mask(x, width=None, height=None):
- if shared.sd_model.model.conditioning_key in {'hybrid', 'concat'}:
-
- # The "masked-image" in this case will just be all zeros since the entire image is masked.
- image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
- image_conditioning = shared.sd_model.get_first_stage_encoding(shared.sd_model.encode_first_stage(image_conditioning))
-
- # Add the fake full 1s mask to the first dimension.
- image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
- image_conditioning = image_conditioning.to(x.dtype)
-
- else:
- # Dummy zero conditioning if we're not using inpainting model.
- # Still takes up a bit of memory, but no encoder call.
- # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
- image_conditioning = torch.zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device)
-
- return image_conditioning
-
-
def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, steps, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
save_embedding_every = save_embedding_every or 0
create_image_every = create_image_every or 0
@@ -346,7 +326,6 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
else:
print("No saved optimizer exists in checkpoint")
-
scaler = torch.cuda.amp.GradScaler()
batch_size = ds.batch_size
@@ -362,7 +341,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
forced_filename = ""
embedding_yet_to_be_embedded = False
+ is_training_inpainting_model = shared.sd_model.model.conditioning_key in {'hybrid', 'concat'}
img_c = None
+
pbar = tqdm.tqdm(total=steps - initial_step)
try:
for i in range((steps-initial_step) * gradient_step):
@@ -384,10 +365,14 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
c = shared.sd_model.cond_stage_model(batch.cond_text)
- if img_c is None:
- img_c = create_dummy_mask(c, training_width, training_height)
+ if is_training_inpainting_model:
+ if img_c is None:
+ img_c = processing.txt2img_image_conditioning(shared.sd_model, c, training_width, training_height)
+
+ cond = {"c_concat": [img_c], "c_crossattn": [c]}
+ else:
+ cond = c
- cond = {"c_concat": [img_c], "c_crossattn": [c]}
loss = shared.sd_model(x, cond)[0] / gradient_step
del x
From a8eb9e3bf814f72293e474c11e9ff0098859a942 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 4 Jan 2023 18:20:38 +0300
Subject: [PATCH 077/172] Revert "Merge pull request #3791 from
shirayu/fix/filename"
This reverts commit eed58279e7cb0e873ebd88a29609f9bab0f1f3af, reversing
changes made to 4ae960b01c6711c66985479f14809dc7fa549fc2.
---
modules/images.py | 16 ++++------------
1 file changed, 4 insertions(+), 12 deletions(-)
diff --git a/modules/images.py b/modules/images.py
index 2967fa9a..c3a5fc8b 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -447,14 +447,6 @@ def get_next_sequence_number(path, basename):
return result + 1
-def truncate_fullpath(full_path, encoding='utf-8'):
- dir_name, full_name = os.path.split(full_path)
- file_name, file_ext = os.path.splitext(full_name)
- max_length = os.statvfs(dir_name).f_namemax
- file_name_truncated = file_name.encode(encoding)[:max_length - len(file_ext)].decode(encoding, 'ignore')
- return os.path.join(dir_name , file_name_truncated + file_ext)
-
-
def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix="", save_to_dirs=None):
"""Save an image.
@@ -495,7 +487,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
if save_to_dirs:
dirname = namegen.apply(opts.directories_filename_pattern or "[prompt_words]").lstrip(' ').rstrip('\\ /')
- path = truncate_fullpath(os.path.join(path, dirname))
+ path = os.path.join(path, dirname)
os.makedirs(path, exist_ok=True)
@@ -519,13 +511,13 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
fullfn = None
for i in range(500):
fn = f"{basecount + i:05}" if basename == '' else f"{basename}-{basecount + i:04}"
- fullfn = truncate_fullpath(os.path.join(path, f"{fn}{file_decoration}.{extension}"))
+ fullfn = os.path.join(path, f"{fn}{file_decoration}.{extension}")
if not os.path.exists(fullfn):
break
else:
- fullfn = truncate_fullpath(os.path.join(path, f"{file_decoration}.{extension}"))
+ fullfn = os.path.join(path, f"{file_decoration}.{extension}")
else:
- fullfn = truncate_fullpath(os.path.join(path, f"{forced_filename}.{extension}"))
+ fullfn = os.path.join(path, f"{forced_filename}.{extension}")
pnginfo = existing_info or {}
if info is not None:
From 3dae545a03f5102ba5d9c3f27bb6241824c5a916 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 4 Jan 2023 18:42:51 +0300
Subject: [PATCH 078/172] rename weirdly named variables from #3176
---
modules/ui.py | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index e4859020..184af7ad 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -162,16 +162,14 @@ def save_files(js_data, images, do_make_zip, index):
return gr.File.update(value=fullfns, visible=True), plaintext_to_html(f"Saved: {filenames[0]}")
-
-
-def calc_time_left(progress, threshold, label, force_display, showTime):
+def calc_time_left(progress, threshold, label, force_display, show_eta):
if progress == 0:
return ""
else:
time_since_start = time.time() - shared.state.time_start
eta = (time_since_start/progress)
eta_relative = eta-time_since_start
- if (eta_relative > threshold and showTime) or force_display:
+ if (eta_relative > threshold and show_eta) or force_display:
if eta_relative > 3600:
return label + time.strftime('%H:%M:%S', time.gmtime(eta_relative))
elif eta_relative > 60:
@@ -194,9 +192,9 @@ def check_progress_call(id_part):
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
# Show progress percentage and time left at the same moment, and base it also on steps done
- showPBText = progress >= 0.01 or shared.state.sampling_step >= 10
+ show_eta = progress >= 0.01 or shared.state.sampling_step >= 10
- time_left = calc_time_left( progress, 1, " ETA: ", shared.state.time_left_force_display, showPBText )
+ time_left = calc_time_left(progress, 1, " ETA: ", shared.state.time_left_force_display, show_eta)
if time_left != "":
shared.state.time_left_force_display = True
@@ -204,7 +202,7 @@ def check_progress_call(id_part):
progressbar = ""
if opts.show_progressbar:
- progressbar = f"""{" " * 2 + str(int(progress*100))+"%" + time_left if showPBText else ""}
"""
+ progressbar = f"""{" " * 2 + str(int(progress*100))+"%" + time_left if show_eta else ""}
"""
image = gr_show(False)
preview_visibility = gr_show(False)
From 097a90b88bb92878cf435c513b4757b5b82ae299 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 4 Jan 2023 19:19:11 +0300
Subject: [PATCH 079/172] add XY plot parameters to grid image and do not add
them to individual images
---
modules/processing.py | 2 +-
scripts/xy_grid.py | 38 ++++++++++++++++++++++++--------------
2 files changed, 25 insertions(+), 15 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index c7264aff..47712159 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -422,7 +422,7 @@ def fix_seed(p):
p.subseed = get_fixed_seed(p.subseed)
-def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0):
+def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0):
index = position_in_batch + iteration * p.batch_size
clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers)
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 59907f0b..78ff12c5 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -10,7 +10,7 @@ import numpy as np
import modules.scripts as scripts
import gradio as gr
-from modules import images, paths, sd_samplers
+from modules import images, paths, sd_samplers, processing
from modules.hypernetworks import hypernetwork
from modules.processing import process_images, Processed, StableDiffusionProcessingTxt2Img
from modules.shared import opts, cmd_opts, state
@@ -285,6 +285,7 @@ re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d
re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*")
re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*")
+
class Script(scripts.Script):
def title(self):
return "X/Y plot"
@@ -381,7 +382,7 @@ class Script(scripts.Script):
ys = process_axis(y_opt, y_values)
def fix_axis_seeds(axis_opt, axis_list):
- if axis_opt.label in ['Seed','Var. seed']:
+ if axis_opt.label in ['Seed', 'Var. seed']:
return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list]
else:
return axis_list
@@ -403,24 +404,33 @@ class Script(scripts.Script):
print(f"X/Y plot will create {len(xs) * len(ys) * p.n_iter} images on a {len(xs)}x{len(ys)} grid. (Total steps to process: {total_steps * p.n_iter})")
shared.total_tqdm.updateTotal(total_steps * p.n_iter)
+ grid_infotext = [None]
+
def cell(x, y):
pc = copy(p)
x_opt.apply(pc, x, xs)
y_opt.apply(pc, y, ys)
- return process_images(pc)
+ res = process_images(pc)
- if not x_opt.label == 'Nothing':
- p.extra_generation_params["XY Plot X Type"] = x_opt.label
- p.extra_generation_params["XY Plot X Values"] = '{' + x_values + '}'
- if x_opt.label in ["Seed","Var. seed"] and not no_fixed_seeds:
- p.extra_generation_params["XY Plot Fixed X Values"] = '{' + ", ".join([str(x) for x in xs])+ '}'
+ if grid_infotext[0] is None:
+ pc.extra_generation_params = copy(pc.extra_generation_params)
- if not y_opt.label == 'Nothing':
- p.extra_generation_params["XY Plot Y Type"] = y_opt.label
- p.extra_generation_params["XY Plot Y Values"] = '{' + y_values + '}'
- if y_opt.label in ["Seed","Var. seed"] and not no_fixed_seeds:
- p.extra_generation_params["XY Plot Fixed Y Values"] = '{' + ", ".join([str(y) for y in ys])+ '}'
+ if x_opt.label != 'Nothing':
+ pc.extra_generation_params["X Type"] = x_opt.label
+ pc.extra_generation_params["X Values"] = x_values
+ if x_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds:
+ pc.extra_generation_params["Fixed X Values"] = ", ".join([str(x) for x in xs])
+
+ if y_opt.label != 'Nothing':
+ pc.extra_generation_params["Y Type"] = y_opt.label
+ pc.extra_generation_params["Y Values"] = y_values
+ if y_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds:
+ pc.extra_generation_params["Fixed Y Values"] = ", ".join([str(y) for y in ys])
+
+ grid_infotext[0] = processing.create_infotext(pc, pc.all_prompts, pc.all_seeds, pc.all_subseeds)
+
+ return res
with SharedSettingsStackHelper():
processed = draw_xy_grid(
@@ -435,6 +445,6 @@ class Script(scripts.Script):
)
if opts.grid_save:
- images.save_image(processed.images[0], p.outpath_grids, "xy_grid", extension=opts.grid_format, prompt=p.prompt, seed=processed.seed, grid=True, p=p)
+ images.save_image(processed.images[0], p.outpath_grids, "xy_grid", info=grid_infotext[0], extension=opts.grid_format, prompt=p.prompt, seed=processed.seed, grid=True, p=p)
return processed
From 24d4a0841d3cc0e5908b098f65a9caa3fa889af8 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 4 Jan 2023 20:10:40 +0300
Subject: [PATCH 080/172] train tab visual updates allow setting train tab
values from ui-config.json
---
modules/ui.py | 35 +++++++++++++++++++++--------------
style.css | 2 +-
2 files changed, 22 insertions(+), 15 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index 72e7b7d2..44f4f3a4 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1281,42 +1281,48 @@ def create_ui():
with gr.Tab(label="Train"):
gr.HTML(value="Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]
")
- with gr.Row():
+ with FormRow():
train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name")
- with gr.Row():
+
train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=[x for x in shared.hypernetworks.keys()])
create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted([x for x in shared.hypernetworks.keys()])}, "refresh_train_hypernetwork_name")
- with gr.Row():
+
+ with FormRow():
embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate")
hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate")
- with gr.Row():
+ with FormRow():
clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"])
clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False)
- batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size")
- gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step")
+ with FormRow():
+ batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size")
+ gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step")
+
dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory")
log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory")
template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt"), elem_id="train_template_file")
training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width")
training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height")
steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps")
- create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every")
- save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every")
+
+ with FormRow():
+ create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every")
+ save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every")
+
save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding")
preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img")
- with gr.Row():
- shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags")
- tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out")
- with gr.Row():
- latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method")
+
+ shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags")
+ tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out")
+
+ latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method")
with gr.Row():
+ train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding")
interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training")
train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork")
- train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding")
params = script_callbacks.UiTrainTabParams(txt2img_preview_params)
@@ -1803,6 +1809,7 @@ def create_ui():
visit(img2img_interface, loadsave, "img2img")
visit(extras_interface, loadsave, "extras")
visit(modelmerger_interface, loadsave, "modelmerger")
+ visit(train_interface, loadsave, "train")
if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
with open(ui_config_file, "w", encoding="utf8") as file:
diff --git a/style.css b/style.css
index 2116ec3c..09ee540b 100644
--- a/style.css
+++ b/style.css
@@ -611,7 +611,7 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h
padding-top: 0.9em;
}
-#img2img_settings div.gr-form .gr-form, #txt2img_settings div.gr-form .gr-form{
+#img2img_settings div.gr-form .gr-form, #txt2img_settings div.gr-form .gr-form, #train_tabs div.gr-form .gr-form{
border: none;
padding-bottom: 0.5em;
}
From 81490780949fffed77493b4bd741e96ec737fe27 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 4 Jan 2023 22:04:40 +0300
Subject: [PATCH 081/172] added the option to specify target resolution with
possibility of truncating for hires fix; also sampling steps
---
javascript/hints.js | 11 +++--
modules/generation_parameters_copypaste.py | 9 ++--
modules/processing.py | 51 +++++++++++++++++++---
modules/txt2img.py | 5 ++-
modules/ui.py | 24 +++++++---
5 files changed, 81 insertions(+), 19 deletions(-)
diff --git a/javascript/hints.js b/javascript/hints.js
index 63e17e05..dda66e09 100644
--- a/javascript/hints.js
+++ b/javascript/hints.js
@@ -81,9 +81,6 @@ titles = {
"vram": "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).",
- "Highres. fix": "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition",
- "Scale latent": "Uscale the image in latent space. Alternative is to produce the full image from latent representation, upscale that, and then move it back to latent space.",
-
"Eta noise seed delta": "If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.",
"Do not add watermark to images": "If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.",
@@ -100,7 +97,13 @@ titles = {
"Clip skip": "Early stopping parameter for CLIP model; 1 is stop at last layer as usual, 2 is stop at penultimate layer, etc.",
"Approx NN": "Cheap neural network approximation. Very fast compared to VAE, but produces pictures with 4 times smaller horizontal/vertical resoluton and lower quality.",
- "Approx cheap": "Very cheap approximation. Very fast compared to VAE, but produces pictures with 8 times smaller horizontal/vertical resoluton and extremely low quality."
+ "Approx cheap": "Very cheap approximation. Very fast compared to VAE, but produces pictures with 8 times smaller horizontal/vertical resoluton and extremely low quality.",
+
+ "Hires. fix": "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition",
+ "Hires steps": "Number of sampling steps for upscaled picture. If 0, uses same as for original.",
+ "Upscale by": "Adjusts the size of the image by multiplying the original width and height by the selected value. Ignored if either Resize width to or Resize height to are non-zero.",
+ "Resize width to": "Resizes image to this width. If 0, width is inferred from either of two nearby sliders.",
+ "Resize height to": "Resizes image to this height. If 0, height is inferred from either of two nearby sliders."
}
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index 4baf4d9a..12a9de3d 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -212,11 +212,10 @@ def restore_old_hires_fix_params(res):
firstpass_width = math.ceil(scale * width / 64) * 64
firstpass_height = math.ceil(scale * height / 64) * 64
- hr_scale = width / firstpass_width if firstpass_width > 0 else height / firstpass_height
-
res['Size-1'] = firstpass_width
res['Size-2'] = firstpass_height
- res['Hires upscale'] = hr_scale
+ res['Hires resize-1'] = width
+ res['Hires resize-2'] = height
def parse_generation_parameters(x: str):
@@ -276,6 +275,10 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
hypernet_hash = res.get("Hypernet hash", None)
res["Hypernet"] = find_hypernetwork_key(hypernet_name, hypernet_hash)
+ if "Hires resize-1" not in res:
+ res["Hires resize-1"] = 0
+ res["Hires resize-2"] = 0
+
restore_old_hires_fix_params(res)
return res
diff --git a/modules/processing.py b/modules/processing.py
index 47712159..9cad05f2 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -662,12 +662,17 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
sampler = None
- def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, **kwargs):
+ def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, **kwargs):
super().__init__(**kwargs)
self.enable_hr = enable_hr
self.denoising_strength = denoising_strength
self.hr_scale = hr_scale
self.hr_upscaler = hr_upscaler
+ self.hr_second_pass_steps = hr_second_pass_steps
+ self.hr_resize_x = hr_resize_x
+ self.hr_resize_y = hr_resize_y
+ self.hr_upscale_to_x = hr_resize_x
+ self.hr_upscale_to_y = hr_resize_y
if firstphase_width != 0 or firstphase_height != 0:
print("firstphase_width/firstphase_height no longer supported; use hr_scale", file=sys.stderr)
@@ -675,6 +680,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
self.width = firstphase_width
self.height = firstphase_height
+ self.truncate_x = 0
+ self.truncate_y = 0
+
def init(self, all_prompts, all_seeds, all_subseeds):
if self.enable_hr:
if state.job_count == -1:
@@ -682,7 +690,38 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
else:
state.job_count = state.job_count * 2
- self.extra_generation_params["Hires upscale"] = self.hr_scale
+ if self.hr_resize_x == 0 and self.hr_resize_y == 0:
+ self.extra_generation_params["Hires upscale"] = self.hr_scale
+ self.hr_upscale_to_x = int(self.width * self.hr_scale)
+ self.hr_upscale_to_y = int(self.height * self.hr_scale)
+ else:
+ self.extra_generation_params["Hires resize"] = f"{self.hr_resize_x}x{self.hr_resize_y}"
+
+ if self.hr_resize_y == 0:
+ self.hr_upscale_to_x = self.hr_resize_x
+ self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
+ elif self.hr_resize_x == 0:
+ self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
+ self.hr_upscale_to_y = self.hr_resize_y
+ else:
+ target_w = self.hr_resize_x
+ target_h = self.hr_resize_y
+ src_ratio = self.width / self.height
+ dst_ratio = self.hr_resize_x / self.hr_resize_y
+
+ if src_ratio < dst_ratio:
+ self.hr_upscale_to_x = self.hr_resize_x
+ self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
+ else:
+ self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
+ self.hr_upscale_to_y = self.hr_resize_y
+
+ self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f
+ self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f
+
+ if self.hr_second_pass_steps:
+ self.extra_generation_params["Hires steps"] = self.hr_second_pass_steps
+
if self.hr_upscaler is not None:
self.extra_generation_params["Hires upscaler"] = self.hr_upscaler
@@ -699,8 +738,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
if not self.enable_hr:
return samples
- target_width = int(self.width * self.hr_scale)
- target_height = int(self.height * self.hr_scale)
+ target_width = self.hr_upscale_to_x
+ target_height = self.hr_upscale_to_y
def save_intermediate(image, index):
"""saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images"""
@@ -755,13 +794,15 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
+ samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2]
+
noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, p=self)
# GC now before running the next img2img to prevent running out of memory
x = None
devices.torch_gc()
- samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.steps, image_conditioning=image_conditioning)
+ samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning)
return samples
diff --git a/modules/txt2img.py b/modules/txt2img.py
index e189a899..38b5f591 100644
--- a/modules/txt2img.py
+++ b/modules/txt2img.py
@@ -8,7 +8,7 @@ import modules.processing as processing
from modules.ui import plaintext_to_html
-def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, *args):
+def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, *args):
p = StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples,
@@ -35,6 +35,9 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2:
denoising_strength=denoising_strength if enable_hr else None,
hr_scale=hr_scale,
hr_upscaler=hr_upscaler,
+ hr_second_pass_steps=hr_second_pass_steps,
+ hr_resize_x=hr_resize_x,
+ hr_resize_y=hr_resize_y,
)
p.scripts = modules.scripts.scripts_txt2img
diff --git a/modules/ui.py b/modules/ui.py
index 44f4f3a4..04091e67 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -637,10 +637,10 @@ def create_sampler_and_steps_selection(choices, tabname):
with FormRow(elem_id=f"sampler_selection_{tabname}"):
sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index")
sampler_index.save_to_config = True
- steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling Steps", value=20)
+ steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20)
else:
with FormGroup(elem_id=f"sampler_selection_{tabname}"):
- steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling Steps", value=20)
+ steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20)
sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index")
return steps, sampler_index
@@ -709,10 +709,16 @@ def create_ui():
enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr")
elif category == "hires_fix":
- with FormRow(visible=False, elem_id="txt2img_hires_fix") as hr_options:
- hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode)
- hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale")
- denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength")
+ with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options:
+ with FormRow(elem_id="txt2img_hires_fix_row1"):
+ hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode)
+ hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps")
+ denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength")
+
+ with FormRow(elem_id="txt2img_hires_fix_row2"):
+ hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale")
+ hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x")
+ hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y")
elif category == "batch":
if not opts.dimensions_and_batch_together:
@@ -753,6 +759,9 @@ def create_ui():
denoising_strength,
hr_scale,
hr_upscaler,
+ hr_second_pass_steps,
+ hr_resize_x,
+ hr_resize_y,
] + custom_inputs,
outputs=[
@@ -804,6 +813,9 @@ def create_ui():
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
(hr_scale, "Hires upscale"),
(hr_upscaler, "Hires upscaler"),
+ (hr_second_pass_steps, "Hires steps"),
+ (hr_resize_x, "Hires resize-1"),
+ (hr_resize_y, "Hires resize-2"),
*modules.scripts.scripts_txt2img.infotext_fields
]
parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields)
From 1288a3bb7d21064e5bd0af7158a3840886027c51 Mon Sep 17 00:00:00 2001
From: Suffocate <70031311+lolsuffocate@users.noreply.github.com>
Date: Wed, 4 Jan 2023 20:36:30 +0000
Subject: [PATCH 082/172] Use the read_info_from_image function directly
---
modules/api/api.py | 16 ++++++++++++----
modules/api/models.py | 5 +++--
2 files changed, 15 insertions(+), 6 deletions(-)
diff --git a/modules/api/api.py b/modules/api/api.py
index 48a70a44..2103709b 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -11,10 +11,10 @@ from fastapi.security import HTTPBasic, HTTPBasicCredentials
from secrets import compare_digest
import modules.shared as shared
-from modules import sd_samplers, deepbooru, sd_hijack
+from modules import sd_samplers, deepbooru, sd_hijack, images
from modules.api.models import *
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
-from modules.extras import run_extras, run_pnginfo
+from modules.extras import run_extras
from modules.textual_inversion.textual_inversion import create_embedding, train_embedding
from modules.textual_inversion.preprocess import preprocess
from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork
@@ -233,9 +233,17 @@ class Api:
if(not req.image.strip()):
return PNGInfoResponse(info="")
- result = run_pnginfo(decode_base64_to_image(req.image.strip()))
+ image = decode_base64_to_image(req.image.strip())
+ if image is None:
+ return PNGInfoResponse(info="")
- return PNGInfoResponse(info=result[1])
+ geninfo, items = images.read_info_from_image(image)
+ if geninfo is None:
+ geninfo = ""
+
+ items = {**{'parameters': geninfo}, **items}
+
+ return PNGInfoResponse(info=geninfo, items=items)
def progressapi(self, req: ProgressRequest = Depends()):
# copy from check_progress_call of ui.py
diff --git a/modules/api/models.py b/modules/api/models.py
index 4a632c68..d8198a27 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -157,7 +157,8 @@ class PNGInfoRequest(BaseModel):
image: str = Field(title="Image", description="The base64 encoded PNG image")
class PNGInfoResponse(BaseModel):
- info: str = Field(title="Image info", description="A string with all the info the image had")
+ info: str = Field(title="Image info", description="A string with the parameters used to generate the image")
+ items: dict = Field(title="Items", description="An object containing all the info the image had")
class ProgressRequest(BaseModel):
skip_current_image: bool = Field(default=False, title="Skip current image", description="Skip current image serialization")
@@ -258,4 +259,4 @@ class EmbeddingItem(BaseModel):
class EmbeddingsResponse(BaseModel):
loaded: Dict[str, EmbeddingItem] = Field(title="Loaded", description="Embeddings loaded for the current model")
- skipped: Dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)")
\ No newline at end of file
+ skipped: Dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)")
From bc43293c640aef65df3136de9e5bd8b7e79eb3e0 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 4 Jan 2023 23:56:43 +0300
Subject: [PATCH 083/172] fix incorrect display/calculation for number of steps
for hires fix in progress bars
---
modules/processing.py | 9 ++++++---
modules/sd_samplers.py | 5 +++--
modules/shared.py | 4 +++-
3 files changed, 12 insertions(+), 6 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index 9cad05f2..f28e7212 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -685,10 +685,13 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
def init(self, all_prompts, all_seeds, all_subseeds):
if self.enable_hr:
- if state.job_count == -1:
- state.job_count = self.n_iter * 2
- else:
+ if not state.processing_has_refined_job_count:
+ if state.job_count == -1:
+ state.job_count = self.n_iter
+
+ shared.total_tqdm.updateTotal((self.steps + (self.hr_second_pass_steps or self.steps)) * state.job_count)
state.job_count = state.job_count * 2
+ state.processing_has_refined_job_count = True
if self.hr_resize_x == 0 and self.hr_resize_y == 0:
self.extra_generation_params["Hires upscale"] = self.hr_scale
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index e904d860..3851a77f 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -97,8 +97,9 @@ sampler_extra_params = {
def setup_img2img_steps(p, steps=None):
if opts.img2img_fix_steps or steps is not None:
- steps = int((steps or p.steps) / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0
- t_enc = p.steps - 1
+ requested_steps = (steps or p.steps)
+ steps = int(requested_steps / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0
+ t_enc = requested_steps - 1
else:
steps = p.steps
t_enc = int(min(p.denoising_strength, 0.999) * steps)
diff --git a/modules/shared.py b/modules/shared.py
index 54a6ba23..04c545ee 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -153,6 +153,7 @@ class State:
job = ""
job_no = 0
job_count = 0
+ processing_has_refined_job_count = False
job_timestamp = '0'
sampling_step = 0
sampling_steps = 0
@@ -194,6 +195,7 @@ class State:
def begin(self):
self.sampling_step = 0
self.job_count = -1
+ self.processing_has_refined_job_count = False
self.job_no = 0
self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
self.current_latent = None
@@ -608,7 +610,7 @@ class TotalTQDM:
return
if self._tqdm is None:
self.reset()
- self._tqdm.total=new_total
+ self._tqdm.total = new_total
def clear(self):
if self._tqdm is not None:
From 5851bc839b6f639cda59e84eb1ee8c706986633d Mon Sep 17 00:00:00 2001
From: me <25877290+Kryptortio@users.noreply.github.com>
Date: Wed, 4 Jan 2023 22:03:32 +0100
Subject: [PATCH 084/172] Add element ids for script components and a few more
in ui.py
---
modules/ui.py | 16 ++++++++--------
scripts/custom_code.py | 4 +++-
scripts/img2imgalt.py | 22 ++++++++++++----------
scripts/loopback.py | 6 ++++--
scripts/outpainting_mk_2.py | 12 +++++++-----
scripts/poor_mans_outpainting.py | 10 ++++++----
scripts/prompt_matrix.py | 6 ++++--
scripts/prompts_from_file.py | 10 ++++++----
scripts/sd_upscale.py | 8 +++++---
scripts/xy_grid.py | 15 ++++++++-------
10 files changed, 63 insertions(+), 46 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index 04091e67..bb64fe20 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -560,7 +560,7 @@ Requested path was: {f}
generation_info = None
with gr.Column():
with gr.Row(elem_id=f"image_buttons_{tabname}"):
- open_folder_button = gr.Button(folder_symbol, elem_id="hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder')
+ open_folder_button = gr.Button(folder_symbol, elem_id="hidden_element" if shared.cmd_opts.hide_ui_dir_config else f'open_folder_{tabname}')
if tabname != "extras":
save = gr.Button('Save', elem_id=f'save_{tabname}')
@@ -576,13 +576,13 @@ Requested path was: {f}
if tabname != "extras":
with gr.Row():
- download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False)
+ download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False, elem_id=f'download_files_{tabname}')
with gr.Group():
- html_info = gr.HTML()
- html_log = gr.HTML()
+ html_info = gr.HTML(elem_id=f'html_info_{tabname}')
+ html_log = gr.HTML(elem_id=f'html_log_{tabname}')
- generation_info = gr.Textbox(visible=False)
+ generation_info = gr.Textbox(visible=False, elem_id=f'generation_info_{tabname}')
if tabname == 'txt2img' or tabname == 'img2img':
generation_info_button = gr.Button(visible=False, elem_id=f"{tabname}_generation_info_button")
generation_info_button.click(
@@ -624,9 +624,9 @@ Requested path was: {f}
)
else:
- html_info_x = gr.HTML()
- html_info = gr.HTML()
- html_log = gr.HTML()
+ html_info_x = gr.HTML(elem_id=f'html_info_x_{tabname}')
+ html_info = gr.HTML(elem_id=f'html_info_{tabname}')
+ html_log = gr.HTML(elem_id=f'html_log_{tabname}')
parameters_copypaste.bind_buttons(buttons, result_gallery, "txt2img" if tabname == "txt2img" else None)
return result_gallery, generation_info if tabname != "extras" else html_info_x, html_info, html_log
diff --git a/scripts/custom_code.py b/scripts/custom_code.py
index 22e7b77a..841fed97 100644
--- a/scripts/custom_code.py
+++ b/scripts/custom_code.py
@@ -14,7 +14,9 @@ class Script(scripts.Script):
return cmd_opts.allow_code
def ui(self, is_img2img):
- code = gr.Textbox(label="Python code", lines=1)
+ elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_custom_code_'
+
+ code = gr.Textbox(label="Python code", lines=1, elem_id=elem_prefix + "code")
return [code]
diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py
index 1229f61b..cddd46e7 100644
--- a/scripts/img2imgalt.py
+++ b/scripts/img2imgalt.py
@@ -126,24 +126,26 @@ class Script(scripts.Script):
return is_img2img
def ui(self, is_img2img):
+ elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_i2i_alternative_test_'
+
info = gr.Markdown('''
* `CFG Scale` should be 2 or lower.
''')
- override_sampler = gr.Checkbox(label="Override `Sampling method` to Euler?(this method is built for it)", value=True)
+ override_sampler = gr.Checkbox(label="Override `Sampling method` to Euler?(this method is built for it)", value=True, elem_id=elem_prefix + "override_sampler")
- override_prompt = gr.Checkbox(label="Override `prompt` to the same value as `original prompt`?(and `negative prompt`)", value=True)
- original_prompt = gr.Textbox(label="Original prompt", lines=1)
- original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1)
+ override_prompt = gr.Checkbox(label="Override `prompt` to the same value as `original prompt`?(and `negative prompt`)", value=True, elem_id=elem_prefix + "override_prompt")
+ original_prompt = gr.Textbox(label="Original prompt", lines=1, elem_id=elem_prefix + "original_prompt")
+ original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1, elem_id=elem_prefix + "original_negative_prompt")
- override_steps = gr.Checkbox(label="Override `Sampling Steps` to the same value as `Decode steps`?", value=True)
- st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50)
+ override_steps = gr.Checkbox(label="Override `Sampling Steps` to the same value as `Decode steps`?", value=True, elem_id=elem_prefix + "override_steps")
+ st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50, elem_id=elem_prefix + "st")
- override_strength = gr.Checkbox(label="Override `Denoising strength` to 1?", value=True)
+ override_strength = gr.Checkbox(label="Override `Denoising strength` to 1?", value=True, elem_id=elem_prefix + "override_strength")
- cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0)
- randomness = gr.Slider(label="Randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0)
- sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False)
+ cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0, elem_id=elem_prefix + "cfg")
+ randomness = gr.Slider(label="Randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0, elem_id=elem_prefix + "randomness")
+ sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False, elem_id=elem_prefix + "sigma_adjustment")
return [
info,
diff --git a/scripts/loopback.py b/scripts/loopback.py
index d8c68af8..5c1265a0 100644
--- a/scripts/loopback.py
+++ b/scripts/loopback.py
@@ -17,8 +17,10 @@ class Script(scripts.Script):
return is_img2img
def ui(self, is_img2img):
- loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4)
- denoising_strength_change_factor = gr.Slider(minimum=0.9, maximum=1.1, step=0.01, label='Denoising strength change factor', value=1)
+ elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_loopback_'
+
+ loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4, elem_id=elem_prefix + "loops")
+ denoising_strength_change_factor = gr.Slider(minimum=0.9, maximum=1.1, step=0.01, label='Denoising strength change factor', value=1, elem_id=elem_prefix + "denoising_strength_change_factor")
return [loops, denoising_strength_change_factor]
diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py
index cf71cb92..760cce64 100644
--- a/scripts/outpainting_mk_2.py
+++ b/scripts/outpainting_mk_2.py
@@ -129,13 +129,15 @@ class Script(scripts.Script):
if not is_img2img:
return None
+ elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_outpainting_mk_2_'
+
info = gr.HTML("Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8
")
- pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128)
- mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8)
- direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'])
- noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0)
- color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05)
+ pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=elem_prefix + "pixels")
+ mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8, elem_id=elem_prefix + "mask_blur")
+ direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'], elem_id=elem_prefix + "direction")
+ noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0, elem_id=elem_prefix + "noise_q")
+ color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05, elem_id=elem_prefix + "color_variation")
return [info, pixels, mask_blur, direction, noise_q, color_variation]
diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py
index ea45beb0..6bcdcc02 100644
--- a/scripts/poor_mans_outpainting.py
+++ b/scripts/poor_mans_outpainting.py
@@ -21,10 +21,12 @@ class Script(scripts.Script):
if not is_img2img:
return None
- pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128)
- mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
- inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index")
- direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'])
+ elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_poor_mans_outpainting_'
+
+ pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=elem_prefix + "pixels")
+ mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id=elem_prefix + "mask_blur")
+ inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", elem_id=elem_prefix + "inpainting_fill")
+ direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'], elem_id=elem_prefix + "direction")
return [pixels, mask_blur, inpainting_fill, direction]
diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py
index 4c79eaef..59172315 100644
--- a/scripts/prompt_matrix.py
+++ b/scripts/prompt_matrix.py
@@ -45,8 +45,10 @@ class Script(scripts.Script):
return "Prompt matrix"
def ui(self, is_img2img):
- put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False)
- different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False)
+ elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_prompt_matrix_'
+
+ put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False, elem_id=elem_prefix + "put_at_start")
+ different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False, elem_id=elem_prefix + "different_seeds")
return [put_at_start, different_seeds]
diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py
index e8386ed2..fc8ddd8a 100644
--- a/scripts/prompts_from_file.py
+++ b/scripts/prompts_from_file.py
@@ -112,11 +112,13 @@ class Script(scripts.Script):
return "Prompts from file or textbox"
def ui(self, is_img2img):
- checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False)
- checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False)
+ elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_prompt_from_file_'
+
+ checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False, elem_id=elem_prefix + "checkbox_iterate")
+ checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False, elem_id=elem_prefix + "checkbox_iterate_batch")
- prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1)
- file = gr.File(label="Upload prompt inputs", type='bytes')
+ prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1, elem_id=elem_prefix + "prompt_txt")
+ file = gr.File(label="Upload prompt inputs", type='bytes', elem_id=elem_prefix + "file")
file.change(fn=load_prompt_file, inputs=[file], outputs=[file, prompt_txt, prompt_txt])
diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py
index 9739545c..9f483a67 100644
--- a/scripts/sd_upscale.py
+++ b/scripts/sd_upscale.py
@@ -17,10 +17,12 @@ class Script(scripts.Script):
return is_img2img
def ui(self, is_img2img):
+ elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_sd_upscale_'
+
info = gr.HTML("Will upscale the image by the selected scale factor; use width and height sliders to set tile size
")
- overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64)
- scale_factor = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label='Scale Factor', value=2.0)
- upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
+ overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, elem_id=elem_prefix + "overlap")
+ scale_factor = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label='Scale Factor', value=2.0, elem_id=elem_prefix + "scale_factor")
+ upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", elem_id=elem_prefix + "upscaler_index")
return [info, overlap, upscaler_index, scale_factor]
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 78ff12c5..90226ccd 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -292,18 +292,19 @@ class Script(scripts.Script):
def ui(self, is_img2img):
current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img]
+ elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_xy_grid_'
with gr.Row():
- x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, type="index", elem_id="x_type")
- x_values = gr.Textbox(label="X values", lines=1)
+ x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, type="index", elem_id=elem_prefix + "x_type")
+ x_values = gr.Textbox(label="X values", lines=1, elem_id=elem_prefix + "x_values")
with gr.Row():
- y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, type="index", elem_id="y_type")
- y_values = gr.Textbox(label="Y values", lines=1)
+ y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, type="index", elem_id=elem_prefix + "y_type")
+ y_values = gr.Textbox(label="Y values", lines=1, elem_id=elem_prefix + "y_values")
- draw_legend = gr.Checkbox(label='Draw legend', value=True)
- include_lone_images = gr.Checkbox(label='Include Separate Images', value=False)
- no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False)
+ draw_legend = gr.Checkbox(label='Draw legend', value=True, elem_id=elem_prefix + "draw_legend")
+ include_lone_images = gr.Checkbox(label='Include Separate Images', value=False, elem_id=elem_prefix + "include_lone_images")
+ no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False, elem_id=elem_prefix + "no_fixed_seeds")
return [x_type, x_values, y_type, y_values, draw_legend, include_lone_images, no_fixed_seeds]
From b663ee2cff6831354e1b5326800c8d1bf300cafe Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Thu, 5 Jan 2023 00:36:10 +0300
Subject: [PATCH 085/172] fix fullscreen view showing wrong image on firefox
---
javascript/imageviewer.js | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js
index 67916536..97f56c07 100644
--- a/javascript/imageviewer.js
+++ b/javascript/imageviewer.js
@@ -148,7 +148,7 @@ function showGalleryImage() {
if(e && e.parentElement.tagName == 'DIV'){
e.style.cursor='pointer'
e.style.userSelect='none'
- e.addEventListener('click', function (evt) {
+ e.addEventListener('mousedown', function (evt) {
if(!opts.js_modal_lightbox) return;
modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed)
showModal(evt)
From 99b67cff0b48c4a1ad6e14d9cc591b11db6e293c Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Thu, 5 Jan 2023 01:25:52 +0300
Subject: [PATCH 086/172] make hires fix not do anything if the user chooses
the second pass resolution to be the same as first pass resolution
---
modules/processing.py | 25 +++++++++++++++++--------
1 file changed, 17 insertions(+), 8 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index f28e7212..7e853287 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -683,16 +683,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
self.truncate_x = 0
self.truncate_y = 0
+
def init(self, all_prompts, all_seeds, all_subseeds):
if self.enable_hr:
- if not state.processing_has_refined_job_count:
- if state.job_count == -1:
- state.job_count = self.n_iter
-
- shared.total_tqdm.updateTotal((self.steps + (self.hr_second_pass_steps or self.steps)) * state.job_count)
- state.job_count = state.job_count * 2
- state.processing_has_refined_job_count = True
-
if self.hr_resize_x == 0 and self.hr_resize_y == 0:
self.extra_generation_params["Hires upscale"] = self.hr_scale
self.hr_upscale_to_x = int(self.width * self.hr_scale)
@@ -722,6 +715,22 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f
self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f
+ # special case: the user has chosen to do nothing
+ if self.hr_upscale_to_x == self.width and self.hr_upscale_to_y == self.height:
+ self.enable_hr = False
+ self.denoising_strength = None
+ self.extra_generation_params.pop("Hires upscale", None)
+ self.extra_generation_params.pop("Hires resize", None)
+ return
+
+ if not state.processing_has_refined_job_count:
+ if state.job_count == -1:
+ state.job_count = self.n_iter
+
+ shared.total_tqdm.updateTotal((self.steps + (self.hr_second_pass_steps or self.steps)) * state.job_count)
+ state.job_count = state.job_count * 2
+ state.processing_has_refined_job_count = True
+
if self.hr_second_pass_steps:
self.extra_generation_params["Hires steps"] = self.hr_second_pass_steps
From 066390eb5683945a6e094a817584ada6b1f7118e Mon Sep 17 00:00:00 2001
From: Wes Roberts
Date: Wed, 4 Jan 2023 17:58:16 -0500
Subject: [PATCH 087/172] Fixes webui.sh to exec LAUNCH_SCRIPT
---
webui.sh | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/webui.sh b/webui.sh
index 04ecbf76..c4d6521d 100755
--- a/webui.sh
+++ b/webui.sh
@@ -160,10 +160,10 @@ then
printf "\n%s\n" "${delimiter}"
printf "Accelerating launch.py..."
printf "\n%s\n" "${delimiter}"
- accelerate launch --num_cpu_threads_per_process=6 "${LAUNCH_SCRIPT}" "$@"
+ exec accelerate launch --num_cpu_threads_per_process=6 "${LAUNCH_SCRIPT}" "$@"
else
printf "\n%s\n" "${delimiter}"
printf "Launching launch.py..."
printf "\n%s\n" "${delimiter}"
- "${python_cmd}" "${LAUNCH_SCRIPT}" "$@"
+ exec "${python_cmd}" "${LAUNCH_SCRIPT}" "$@"
fi
From 5f4fa942b8ec3ed3b15a352903489d6f9e6eb46e Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Thu, 5 Jan 2023 02:38:52 +0300
Subject: [PATCH 088/172] do not show full window image preview when right
mouse button is used
---
javascript/imageviewer.js | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js
index 97f56c07..b7bc2fe1 100644
--- a/javascript/imageviewer.js
+++ b/javascript/imageviewer.js
@@ -149,7 +149,7 @@ function showGalleryImage() {
e.style.cursor='pointer'
e.style.userSelect='none'
e.addEventListener('mousedown', function (evt) {
- if(!opts.js_modal_lightbox) return;
+ if(!opts.js_modal_lightbox || evt.button != 0) return;
modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed)
showModal(evt)
}, true);
From 2e30997450835ed8f80ab5e8f02f7d4c7f26dd3f Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Thu, 5 Jan 2023 10:21:17 +0300
Subject: [PATCH 089/172] move sd_model assignment to the place where we change
the sd_model
---
modules/processing.py | 14 +++++++++-----
1 file changed, 9 insertions(+), 5 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index a12bd9e8..61e97077 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -466,12 +466,16 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
try:
for k, v in p.override_settings.items():
setattr(opts, k, v)
- if k == 'sd_hypernetwork': shared.reload_hypernetworks() # make onchange call for changing hypernet
- if k == 'sd_model_checkpoint': sd_models.reload_model_weights() # make onchange call for changing SD model
- if k == 'sd_vae': sd_vae.reload_vae_weights() # make onchange call for changing VAE
+ if k == 'sd_hypernetwork':
+ shared.reload_hypernetworks() # make onchange call for changing hypernet
+
+ if k == 'sd_model_checkpoint':
+ sd_models.reload_model_weights() # make onchange call for changing SD model
+ p.sd_model = shared.sd_model
+
+ if k == 'sd_vae':
+ sd_vae.reload_vae_weights() # make onchange call for changing VAE
- # Assign sd_model here to ensure that it reflects the model after any changes
- p.sd_model = shared.sd_model
res = process_images_inner(p)
finally:
From c3109fa18a5a105eea5e343875b540939884f304 Mon Sep 17 00:00:00 2001
From: me <25877290+Kryptortio@users.noreply.github.com>
Date: Thu, 5 Jan 2023 08:27:09 +0100
Subject: [PATCH 090/172] Adjusted prefix from i2i/t2i to txt2img and img2img
and removed those prefixes from img exclusive scripts
---
scripts/custom_code.py | 2 +-
scripts/img2imgalt.py | 2 +-
scripts/loopback.py | 2 +-
scripts/outpainting_mk_2.py | 2 +-
scripts/poor_mans_outpainting.py | 2 +-
scripts/prompt_matrix.py | 2 +-
scripts/prompts_from_file.py | 2 +-
scripts/sd_upscale.py | 2 +-
scripts/xy_grid.py | 2 +-
9 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/scripts/custom_code.py b/scripts/custom_code.py
index 841fed97..b3bbee03 100644
--- a/scripts/custom_code.py
+++ b/scripts/custom_code.py
@@ -14,7 +14,7 @@ class Script(scripts.Script):
return cmd_opts.allow_code
def ui(self, is_img2img):
- elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_custom_code_'
+ elem_prefix = ('img2img' if is_img2img else 'txt2txt') + '_script_custom_code_'
code = gr.Textbox(label="Python code", lines=1, elem_id=elem_prefix + "code")
diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py
index cddd46e7..c062dd24 100644
--- a/scripts/img2imgalt.py
+++ b/scripts/img2imgalt.py
@@ -126,7 +126,7 @@ class Script(scripts.Script):
return is_img2img
def ui(self, is_img2img):
- elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_i2i_alternative_test_'
+ elem_prefix = 'script_i2i_alternative_test_'
info = gr.Markdown('''
* `CFG Scale` should be 2 or lower.
diff --git a/scripts/loopback.py b/scripts/loopback.py
index 5c1265a0..93eda1eb 100644
--- a/scripts/loopback.py
+++ b/scripts/loopback.py
@@ -17,7 +17,7 @@ class Script(scripts.Script):
return is_img2img
def ui(self, is_img2img):
- elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_loopback_'
+ elem_prefix = 'script_loopback_'
loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4, elem_id=elem_prefix + "loops")
denoising_strength_change_factor = gr.Slider(minimum=0.9, maximum=1.1, step=0.01, label='Denoising strength change factor', value=1, elem_id=elem_prefix + "denoising_strength_change_factor")
diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py
index 760cce64..c37bc238 100644
--- a/scripts/outpainting_mk_2.py
+++ b/scripts/outpainting_mk_2.py
@@ -129,7 +129,7 @@ class Script(scripts.Script):
if not is_img2img:
return None
- elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_outpainting_mk_2_'
+ elem_prefix = 'script_outpainting_mk_2_'
info = gr.HTML("Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8
")
diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py
index 6bcdcc02..784ee422 100644
--- a/scripts/poor_mans_outpainting.py
+++ b/scripts/poor_mans_outpainting.py
@@ -21,7 +21,7 @@ class Script(scripts.Script):
if not is_img2img:
return None
- elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_poor_mans_outpainting_'
+ elem_prefix = 'script_poor_mans_outpainting_'
pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=elem_prefix + "pixels")
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id=elem_prefix + "mask_blur")
diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py
index 59172315..f610c334 100644
--- a/scripts/prompt_matrix.py
+++ b/scripts/prompt_matrix.py
@@ -45,7 +45,7 @@ class Script(scripts.Script):
return "Prompt matrix"
def ui(self, is_img2img):
- elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_prompt_matrix_'
+ elem_prefix = ('img2img' if is_img2img else 'txt2txt') + '_script_prompt_matrix_'
put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False, elem_id=elem_prefix + "put_at_start")
different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False, elem_id=elem_prefix + "different_seeds")
diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py
index fc8ddd8a..c6a0b709 100644
--- a/scripts/prompts_from_file.py
+++ b/scripts/prompts_from_file.py
@@ -112,7 +112,7 @@ class Script(scripts.Script):
return "Prompts from file or textbox"
def ui(self, is_img2img):
- elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_prompt_from_file_'
+ elem_prefix = ('img2img' if is_img2img else 'txt2txt') + '_script_prompt_from_file_'
checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False, elem_id=elem_prefix + "checkbox_iterate")
checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False, elem_id=elem_prefix + "checkbox_iterate_batch")
diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py
index 9f483a67..2aeeb106 100644
--- a/scripts/sd_upscale.py
+++ b/scripts/sd_upscale.py
@@ -17,7 +17,7 @@ class Script(scripts.Script):
return is_img2img
def ui(self, is_img2img):
- elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_sd_upscale_'
+ elem_prefix = 'script_sd_upscale_'
info = gr.HTML("Will upscale the image by the selected scale factor; use width and height sliders to set tile size
")
overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, elem_id=elem_prefix + "overlap")
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 90226ccd..8c9cfb9b 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -292,7 +292,7 @@ class Script(scripts.Script):
def ui(self, is_img2img):
current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img]
- elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_xy_grid_'
+ elem_prefix = ('img2img' if is_img2img else 'txt2txt') + '_script_xy_grid_'
with gr.Row():
x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, type="index", elem_id=elem_prefix + "x_type")
From 42fcc79bd31e5e5485f1cf115ad505cc623d0ac9 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Thu, 5 Jan 2023 10:43:21 +0300
Subject: [PATCH 091/172] add Discard penultimate sigma to infotext
---
modules/sd_samplers.py | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index 31b255a3..01221b89 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -463,8 +463,12 @@ class KDiffusionSampler:
return extra_params_kwargs
def get_sigmas(self, p, steps):
- disc = opts.always_discard_next_to_last_sigma or (self.config is not None and self.config.options.get('discard_next_to_last_sigma', False))
- steps += 1 if disc else 0
+ discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False)
+ if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma:
+ discard_next_to_last_sigma = True
+ p.extra_generation_params["Discard penultimate sigma"] = True
+
+ steps += 1 if discard_next_to_last_sigma else 0
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
@@ -475,7 +479,7 @@ class KDiffusionSampler:
else:
sigmas = self.model_wrap.get_sigmas(steps)
- if disc:
+ if discard_next_to_last_sigma:
sigmas = torch.cat([sigmas[:-2], sigmas[-1:]])
return sigmas
From f185baeb28f348e4ec97cd7070ed219b5f74a48e Mon Sep 17 00:00:00 2001
From: me <25877290+Kryptortio@users.noreply.github.com>
Date: Thu, 5 Jan 2023 09:29:07 +0100
Subject: [PATCH 092/172] Refactor elem_prefix as function elem_id
---
scripts/custom_code.py | 9 ++++++---
scripts/img2imgalt.py | 30 +++++++++++++++++-------------
scripts/loopback.py | 15 ++++++++++-----
scripts/outpainting_mk_2.py | 18 +++++++++++-------
scripts/poor_mans_outpainting.py | 17 ++++++++++-------
scripts/prompt_matrix.py | 14 +++++++++-----
scripts/prompts_from_file.py | 18 +++++++++++-------
scripts/sd_upscale.py | 16 ++++++++++------
scripts/xy_grid.py | 20 ++++++++++++--------
9 files changed, 96 insertions(+), 61 deletions(-)
diff --git a/scripts/custom_code.py b/scripts/custom_code.py
index b3bbee03..9ce1f650 100644
--- a/scripts/custom_code.py
+++ b/scripts/custom_code.py
@@ -3,20 +3,23 @@ import gradio as gr
from modules.processing import Processed
from modules.shared import opts, cmd_opts, state
+import re
class Script(scripts.Script):
def title(self):
return "Custom code"
+ def elem_id(self, item_id):
+ gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id
+ gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id)
+ return gen_elem_id
def show(self, is_img2img):
return cmd_opts.allow_code
def ui(self, is_img2img):
- elem_prefix = ('img2img' if is_img2img else 'txt2txt') + '_script_custom_code_'
-
- code = gr.Textbox(label="Python code", lines=1, elem_id=elem_prefix + "code")
+ code = gr.Textbox(label="Python code", lines=1, elem_id=self.elem_id("code"))
return [code]
diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py
index c062dd24..7555e874 100644
--- a/scripts/img2imgalt.py
+++ b/scripts/img2imgalt.py
@@ -16,6 +16,7 @@ import k_diffusion as K
from PIL import Image
from torch import autocast
from einops import rearrange, repeat
+import re
def find_noise_for_image(p, cond, uncond, cfg_scale, steps):
@@ -122,30 +123,33 @@ class Script(scripts.Script):
def title(self):
return "img2img alternative test"
+ def elem_id(self, item_id):
+ gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id
+ gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id)
+ return gen_elem_id
+
def show(self, is_img2img):
return is_img2img
- def ui(self, is_img2img):
- elem_prefix = 'script_i2i_alternative_test_'
-
+ def ui(self, is_img2img):
info = gr.Markdown('''
* `CFG Scale` should be 2 or lower.
''')
- override_sampler = gr.Checkbox(label="Override `Sampling method` to Euler?(this method is built for it)", value=True, elem_id=elem_prefix + "override_sampler")
+ override_sampler = gr.Checkbox(label="Override `Sampling method` to Euler?(this method is built for it)", value=True, elem_id=self.elem_id("override_sampler"))
- override_prompt = gr.Checkbox(label="Override `prompt` to the same value as `original prompt`?(and `negative prompt`)", value=True, elem_id=elem_prefix + "override_prompt")
- original_prompt = gr.Textbox(label="Original prompt", lines=1, elem_id=elem_prefix + "original_prompt")
- original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1, elem_id=elem_prefix + "original_negative_prompt")
+ override_prompt = gr.Checkbox(label="Override `prompt` to the same value as `original prompt`?(and `negative prompt`)", value=True, elem_id=self.elem_id("override_prompt"))
+ original_prompt = gr.Textbox(label="Original prompt", lines=1, elem_id=self.elem_id("original_prompt"))
+ original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1, elem_id=self.elem_id("original_negative_prompt"))
- override_steps = gr.Checkbox(label="Override `Sampling Steps` to the same value as `Decode steps`?", value=True, elem_id=elem_prefix + "override_steps")
- st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50, elem_id=elem_prefix + "st")
+ override_steps = gr.Checkbox(label="Override `Sampling Steps` to the same value as `Decode steps`?", value=True, elem_id=self.elem_id("override_steps"))
+ st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50, elem_id=self.elem_id("st"))
- override_strength = gr.Checkbox(label="Override `Denoising strength` to 1?", value=True, elem_id=elem_prefix + "override_strength")
+ override_strength = gr.Checkbox(label="Override `Denoising strength` to 1?", value=True, elem_id=self.elem_id("override_strength"))
- cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0, elem_id=elem_prefix + "cfg")
- randomness = gr.Slider(label="Randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0, elem_id=elem_prefix + "randomness")
- sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False, elem_id=elem_prefix + "sigma_adjustment")
+ cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0, elem_id=self.elem_id("cfg"))
+ randomness = gr.Slider(label="Randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0, elem_id=self.elem_id("randomness"))
+ sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False, elem_id=self.elem_id("sigma_adjustment"))
return [
info,
diff --git a/scripts/loopback.py b/scripts/loopback.py
index 93eda1eb..4df7b73f 100644
--- a/scripts/loopback.py
+++ b/scripts/loopback.py
@@ -8,19 +8,24 @@ from modules import processing, shared, sd_samplers, images
from modules.processing import Processed
from modules.sd_samplers import samplers
from modules.shared import opts, cmd_opts, state
+import re
+
class Script(scripts.Script):
def title(self):
return "Loopback"
+ def elem_id(self, item_id):
+ gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id
+ gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id)
+ return gen_elem_id
+
def show(self, is_img2img):
return is_img2img
- def ui(self, is_img2img):
- elem_prefix = 'script_loopback_'
-
- loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4, elem_id=elem_prefix + "loops")
- denoising_strength_change_factor = gr.Slider(minimum=0.9, maximum=1.1, step=0.01, label='Denoising strength change factor', value=1, elem_id=elem_prefix + "denoising_strength_change_factor")
+ def ui(self, is_img2img):
+ loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4, elem_id=self.elem_id("loops"))
+ denoising_strength_change_factor = gr.Slider(minimum=0.9, maximum=1.1, step=0.01, label='Denoising strength change factor', value=1, elem_id=self.elem_id("denoising_strength_change_factor"))
return [loops, denoising_strength_change_factor]
diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py
index c37bc238..b4a0dc73 100644
--- a/scripts/outpainting_mk_2.py
+++ b/scripts/outpainting_mk_2.py
@@ -10,6 +10,7 @@ from PIL import Image, ImageDraw
from modules import images, processing, devices
from modules.processing import Processed, process_images
from modules.shared import opts, cmd_opts, state
+import re
# this function is taken from https://github.com/parlance-zz/g-diffuser-bot
@@ -122,6 +123,11 @@ class Script(scripts.Script):
def title(self):
return "Outpainting mk2"
+ def elem_id(self, item_id):
+ gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id
+ gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id)
+ return gen_elem_id
+
def show(self, is_img2img):
return is_img2img
@@ -129,15 +135,13 @@ class Script(scripts.Script):
if not is_img2img:
return None
- elem_prefix = 'script_outpainting_mk_2_'
-
info = gr.HTML("Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8
")
- pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=elem_prefix + "pixels")
- mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8, elem_id=elem_prefix + "mask_blur")
- direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'], elem_id=elem_prefix + "direction")
- noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0, elem_id=elem_prefix + "noise_q")
- color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05, elem_id=elem_prefix + "color_variation")
+ pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=self.elem_id("pixels"))
+ mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8, elem_id=self.elem_id("mask_blur"))
+ direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'], elem_id=self.elem_id("direction"))
+ noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0, elem_id=self.elem_id("noise_q"))
+ color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05, elem_id=self.elem_id("color_variation"))
return [info, pixels, mask_blur, direction, noise_q, color_variation]
diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py
index 784ee422..1c7dc467 100644
--- a/scripts/poor_mans_outpainting.py
+++ b/scripts/poor_mans_outpainting.py
@@ -7,26 +7,29 @@ from PIL import Image, ImageDraw
from modules import images, processing, devices
from modules.processing import Processed, process_images
from modules.shared import opts, cmd_opts, state
-
+import re
class Script(scripts.Script):
def title(self):
return "Poor man's outpainting"
+ def elem_id(self, item_id):
+ gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id
+ gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id)
+ return gen_elem_id
+
def show(self, is_img2img):
return is_img2img
def ui(self, is_img2img):
if not is_img2img:
return None
-
- elem_prefix = 'script_poor_mans_outpainting_'
- pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=elem_prefix + "pixels")
- mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id=elem_prefix + "mask_blur")
- inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", elem_id=elem_prefix + "inpainting_fill")
- direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'], elem_id=elem_prefix + "direction")
+ pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=self.elem_id("pixels"))
+ mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id=self.elem_id("mask_blur"))
+ inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", elem_id=self.elem_id("inpainting_fill"))
+ direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'], elem_id=self.elem_id("direction"))
return [pixels, mask_blur, inpainting_fill, direction]
diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py
index f610c334..278d2e68 100644
--- a/scripts/prompt_matrix.py
+++ b/scripts/prompt_matrix.py
@@ -10,6 +10,7 @@ from modules import images
from modules.processing import process_images, Processed
from modules.shared import opts, cmd_opts, state
import modules.sd_samplers
+import re
def draw_xy_grid(xs, ys, x_label, y_label, cell):
@@ -44,11 +45,14 @@ class Script(scripts.Script):
def title(self):
return "Prompt matrix"
- def ui(self, is_img2img):
- elem_prefix = ('img2img' if is_img2img else 'txt2txt') + '_script_prompt_matrix_'
-
- put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False, elem_id=elem_prefix + "put_at_start")
- different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False, elem_id=elem_prefix + "different_seeds")
+ def elem_id(self, item_id):
+ gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id
+ gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id)
+ return gen_elem_id
+
+ def ui(self, is_img2img):
+ put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False, elem_id=self.elem_id("put_at_start"))
+ different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False, elem_id=self.elem_id("different_seeds"))
return [put_at_start, different_seeds]
diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py
index c6a0b709..5c84c3e9 100644
--- a/scripts/prompts_from_file.py
+++ b/scripts/prompts_from_file.py
@@ -13,6 +13,7 @@ from modules import sd_samplers
from modules.processing import Processed, process_images
from PIL import Image
from modules.shared import opts, cmd_opts, state
+import re
def process_string_tag(tag):
@@ -111,14 +112,17 @@ class Script(scripts.Script):
def title(self):
return "Prompts from file or textbox"
- def ui(self, is_img2img):
- elem_prefix = ('img2img' if is_img2img else 'txt2txt') + '_script_prompt_from_file_'
-
- checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False, elem_id=elem_prefix + "checkbox_iterate")
- checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False, elem_id=elem_prefix + "checkbox_iterate_batch")
+ def elem_id(self, item_id):
+ gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id
+ gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id)
+ return gen_elem_id
- prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1, elem_id=elem_prefix + "prompt_txt")
- file = gr.File(label="Upload prompt inputs", type='bytes', elem_id=elem_prefix + "file")
+ def ui(self, is_img2img):
+ checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False, elem_id=self.elem_id("checkbox_iterate"))
+ checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False, elem_id=self.elem_id("checkbox_iterate_batch"))
+
+ prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1, elem_id=self.elem_id("prompt_txt"))
+ file = gr.File(label="Upload prompt inputs", type='bytes', elem_id=self.elem_id("file"))
file.change(fn=load_prompt_file, inputs=[file], outputs=[file, prompt_txt, prompt_txt])
diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py
index 2aeeb106..247e755b 100644
--- a/scripts/sd_upscale.py
+++ b/scripts/sd_upscale.py
@@ -7,22 +7,26 @@ from PIL import Image
from modules import processing, shared, sd_samplers, images, devices
from modules.processing import Processed
from modules.shared import opts, cmd_opts, state
+import re
class Script(scripts.Script):
def title(self):
return "SD upscale"
+ def elem_id(self, item_id):
+ gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id
+ gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id)
+ return gen_elem_id
+
def show(self, is_img2img):
return is_img2img
- def ui(self, is_img2img):
- elem_prefix = 'script_sd_upscale_'
-
+ def ui(self, is_img2img):
info = gr.HTML("Will upscale the image by the selected scale factor; use width and height sliders to set tile size
")
- overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, elem_id=elem_prefix + "overlap")
- scale_factor = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label='Scale Factor', value=2.0, elem_id=elem_prefix + "scale_factor")
- upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", elem_id=elem_prefix + "upscaler_index")
+ overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, elem_id=self.elem_id("overlap"))
+ scale_factor = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label='Scale Factor', value=2.0, elem_id=self.elem_id("scale_factor"))
+ upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", elem_id=self.elem_id("upscaler_index"))
return [info, overlap, upscaler_index, scale_factor]
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 8c9cfb9b..b277a439 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -290,21 +290,25 @@ class Script(scripts.Script):
def title(self):
return "X/Y plot"
+ def elem_id(self, item_id):
+ gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id
+ gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id)
+ return gen_elem_id
+
def ui(self, is_img2img):
current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img]
- elem_prefix = ('img2img' if is_img2img else 'txt2txt') + '_script_xy_grid_'
with gr.Row():
- x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, type="index", elem_id=elem_prefix + "x_type")
- x_values = gr.Textbox(label="X values", lines=1, elem_id=elem_prefix + "x_values")
+ x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, type="index", elem_id=self.elem_id("x_type"))
+ x_values = gr.Textbox(label="X values", lines=1, elem_id=self.elem_id("x_values"))
with gr.Row():
- y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, type="index", elem_id=elem_prefix + "y_type")
- y_values = gr.Textbox(label="Y values", lines=1, elem_id=elem_prefix + "y_values")
+ y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, type="index", elem_id=self.elem_id("y_type"))
+ y_values = gr.Textbox(label="Y values", lines=1, elem_id=self.elem_id("y_values"))
- draw_legend = gr.Checkbox(label='Draw legend', value=True, elem_id=elem_prefix + "draw_legend")
- include_lone_images = gr.Checkbox(label='Include Separate Images', value=False, elem_id=elem_prefix + "include_lone_images")
- no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False, elem_id=elem_prefix + "no_fixed_seeds")
+ draw_legend = gr.Checkbox(label='Draw legend', value=True, elem_id=self.elem_id("draw_legend"))
+ include_lone_images = gr.Checkbox(label='Include Separate Images', value=False, elem_id=self.elem_id("include_lone_images"))
+ no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False, elem_id=self.elem_id("no_fixed_seeds"))
return [x_type, x_values, y_type, y_values, draw_legend, include_lone_images, no_fixed_seeds]
From 997461d3dd86f51c06ea0c2eff17ce8b8b48c0af Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Thu, 5 Jan 2023 11:57:01 +0300
Subject: [PATCH 093/172] add footer with versions
---
html/footer.html | 4 ++++
launch.py | 20 ++++++++++++++++----
modules/ui.py | 31 ++++++++++++++++++++++++++++++-
style.css | 5 +++++
4 files changed, 55 insertions(+), 5 deletions(-)
diff --git a/html/footer.html b/html/footer.html
index a8f2adf7..bad87ff6 100644
--- a/html/footer.html
+++ b/html/footer.html
@@ -7,3 +7,7 @@
•
Reload UI
+
+
+{versions}
+
diff --git a/launch.py b/launch.py
index af0d418b..49b91b1f 100644
--- a/launch.py
+++ b/launch.py
@@ -13,6 +13,21 @@ dir_extensions = "extensions"
python = sys.executable
git = os.environ.get('GIT', "git")
index_url = os.environ.get('INDEX_URL', "")
+stored_commit_hash = None
+
+
+def commit_hash():
+ global stored_commit_hash
+
+ if stored_commit_hash is not None:
+ return stored_commit_hash
+
+ try:
+ stored_commit_hash = run(f"{git} rev-parse HEAD").strip()
+ except Exception:
+ stored_commit_hash = ""
+
+ return stored_commit_hash
def extract_arg(args, name):
@@ -194,10 +209,7 @@ def prepare_environment():
xformers = '--xformers' in sys.argv
ngrok = '--ngrok' in sys.argv
- try:
- commit = run(f"{git} rev-parse HEAD").strip()
- except Exception:
- commit = ""
+ commit = commit_hash()
print(f"Python {sys.version}")
print(f"Commit hash: {commit}")
diff --git a/modules/ui.py b/modules/ui.py
index bb64fe20..81d96c5b 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1696,7 +1696,9 @@ def create_ui():
if os.path.exists("html/footer.html"):
with open("html/footer.html", encoding="utf8") as file:
- gr.HTML(file.read(), elem_id="footer")
+ footer = file.read()
+ footer = footer.format(versions=versions_html())
+ gr.HTML(footer, elem_id="footer")
text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
settings_submit.click(
@@ -1857,3 +1859,30 @@ def reload_javascript():
if not hasattr(shared, 'GradioTemplateResponseOriginal'):
shared.GradioTemplateResponseOriginal = gradio.routes.templates.TemplateResponse
+
+
+def versions_html():
+ import torch
+ import launch
+
+ python_version = ".".join([str(x) for x in sys.version_info[0:3]])
+ commit = launch.commit_hash()
+ short_commit = commit[0:8]
+
+ if shared.xformers_available:
+ import xformers
+ xformers_version = xformers.__version__
+ else:
+ xformers_version = "N/A"
+
+ return f"""
+python: {python_version}
+ •
+torch: {torch.__version__}
+ •
+xformers: {xformers_version}
+ •
+gradio: {gr.__version__}
+ •
+commit: {short_commit}
+"""
diff --git a/style.css b/style.css
index 09ee540b..ee74d79e 100644
--- a/style.css
+++ b/style.css
@@ -628,6 +628,11 @@ footer {
display: inline-block;
}
+#footer .versions{
+ font-size: 85%;
+ opacity: 0.85;
+}
+
/* The following handles localization for right-to-left (RTL) languages like Arabic.
The rtl media type will only be activated by the logic in javascript/localization.js.
If you change anything above, you need to make sure it is RTL compliant by just running
From f8d0cf6a6ec4911559cfecb9a9d1d46b547b38e8 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Thu, 5 Jan 2023 12:08:11 +0300
Subject: [PATCH 094/172] rework #6329 to remove duplicate code and add prevent
tab names for showing in ids for scripts that only exist on one tab
---
modules/scripts.py | 10 ++++++++++
scripts/custom_code.py | 6 ------
scripts/img2imgalt.py | 6 ------
scripts/loopback.py | 6 ------
scripts/outpainting_mk_2.py | 6 ------
scripts/poor_mans_outpainting.py | 6 ------
scripts/prompt_matrix.py | 6 ------
scripts/prompts_from_file.py | 6 ------
scripts/sd_upscale.py | 6 ------
scripts/xy_grid.py | 5 -----
10 files changed, 10 insertions(+), 53 deletions(-)
diff --git a/modules/scripts.py b/modules/scripts.py
index 722f8685..0c44f191 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -1,4 +1,5 @@
import os
+import re
import sys
import traceback
from collections import namedtuple
@@ -128,6 +129,15 @@ class Script:
"""unused"""
return ""
+ def elem_id(self, item_id):
+ """helper function to generate id for a HTML element, constructs final id out of script name, tab and user-supplied item_id"""
+
+ need_tabname = self.show(True) == self.show(False)
+ tabname = ('img2img' if self.is_img2img else 'txt2txt') + "_" if need_tabname else ""
+ title = re.sub(r'[^a-z_0-9]', '', re.sub(r'\s', '_', self.title().lower()))
+
+ return f'script_{tabname}{title}_{item_id}'
+
current_basedir = paths.script_path
diff --git a/scripts/custom_code.py b/scripts/custom_code.py
index 9ce1f650..d29113e6 100644
--- a/scripts/custom_code.py
+++ b/scripts/custom_code.py
@@ -3,18 +3,12 @@ import gradio as gr
from modules.processing import Processed
from modules.shared import opts, cmd_opts, state
-import re
class Script(scripts.Script):
def title(self):
return "Custom code"
- def elem_id(self, item_id):
- gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id
- gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id)
- return gen_elem_id
-
def show(self, is_img2img):
return cmd_opts.allow_code
diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py
index 7555e874..cbdfc6b3 100644
--- a/scripts/img2imgalt.py
+++ b/scripts/img2imgalt.py
@@ -16,7 +16,6 @@ import k_diffusion as K
from PIL import Image
from torch import autocast
from einops import rearrange, repeat
-import re
def find_noise_for_image(p, cond, uncond, cfg_scale, steps):
@@ -123,11 +122,6 @@ class Script(scripts.Script):
def title(self):
return "img2img alternative test"
- def elem_id(self, item_id):
- gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id
- gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id)
- return gen_elem_id
-
def show(self, is_img2img):
return is_img2img
diff --git a/scripts/loopback.py b/scripts/loopback.py
index 4df7b73f..1dab9476 100644
--- a/scripts/loopback.py
+++ b/scripts/loopback.py
@@ -8,18 +8,12 @@ from modules import processing, shared, sd_samplers, images
from modules.processing import Processed
from modules.sd_samplers import samplers
from modules.shared import opts, cmd_opts, state
-import re
class Script(scripts.Script):
def title(self):
return "Loopback"
- def elem_id(self, item_id):
- gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id
- gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id)
- return gen_elem_id
-
def show(self, is_img2img):
return is_img2img
diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py
index b4a0dc73..0906da6a 100644
--- a/scripts/outpainting_mk_2.py
+++ b/scripts/outpainting_mk_2.py
@@ -10,7 +10,6 @@ from PIL import Image, ImageDraw
from modules import images, processing, devices
from modules.processing import Processed, process_images
from modules.shared import opts, cmd_opts, state
-import re
# this function is taken from https://github.com/parlance-zz/g-diffuser-bot
@@ -123,11 +122,6 @@ class Script(scripts.Script):
def title(self):
return "Outpainting mk2"
- def elem_id(self, item_id):
- gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id
- gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id)
- return gen_elem_id
-
def show(self, is_img2img):
return is_img2img
diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py
index 1c7dc467..d8feda00 100644
--- a/scripts/poor_mans_outpainting.py
+++ b/scripts/poor_mans_outpainting.py
@@ -7,18 +7,12 @@ from PIL import Image, ImageDraw
from modules import images, processing, devices
from modules.processing import Processed, process_images
from modules.shared import opts, cmd_opts, state
-import re
class Script(scripts.Script):
def title(self):
return "Poor man's outpainting"
- def elem_id(self, item_id):
- gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id
- gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id)
- return gen_elem_id
-
def show(self, is_img2img):
return is_img2img
diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py
index 278d2e68..dd95e588 100644
--- a/scripts/prompt_matrix.py
+++ b/scripts/prompt_matrix.py
@@ -10,7 +10,6 @@ from modules import images
from modules.processing import process_images, Processed
from modules.shared import opts, cmd_opts, state
import modules.sd_samplers
-import re
def draw_xy_grid(xs, ys, x_label, y_label, cell):
@@ -45,11 +44,6 @@ class Script(scripts.Script):
def title(self):
return "Prompt matrix"
- def elem_id(self, item_id):
- gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id
- gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id)
- return gen_elem_id
-
def ui(self, is_img2img):
put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False, elem_id=self.elem_id("put_at_start"))
different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False, elem_id=self.elem_id("different_seeds"))
diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py
index 5c84c3e9..2751f98a 100644
--- a/scripts/prompts_from_file.py
+++ b/scripts/prompts_from_file.py
@@ -13,7 +13,6 @@ from modules import sd_samplers
from modules.processing import Processed, process_images
from PIL import Image
from modules.shared import opts, cmd_opts, state
-import re
def process_string_tag(tag):
@@ -112,11 +111,6 @@ class Script(scripts.Script):
def title(self):
return "Prompts from file or textbox"
- def elem_id(self, item_id):
- gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id
- gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id)
- return gen_elem_id
-
def ui(self, is_img2img):
checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False, elem_id=self.elem_id("checkbox_iterate"))
checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False, elem_id=self.elem_id("checkbox_iterate_batch"))
diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py
index 247e755b..9b8ffd85 100644
--- a/scripts/sd_upscale.py
+++ b/scripts/sd_upscale.py
@@ -7,18 +7,12 @@ from PIL import Image
from modules import processing, shared, sd_samplers, images, devices
from modules.processing import Processed
from modules.shared import opts, cmd_opts, state
-import re
class Script(scripts.Script):
def title(self):
return "SD upscale"
- def elem_id(self, item_id):
- gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id
- gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id)
- return gen_elem_id
-
def show(self, is_img2img):
return is_img2img
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index b277a439..f04d9b7e 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -290,11 +290,6 @@ class Script(scripts.Script):
def title(self):
return "X/Y plot"
- def elem_id(self, item_id):
- gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id
- gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id)
- return gen_elem_id
-
def ui(self, is_img2img):
current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img]
From eea8fc40e16664ddc8a9aec77206da704a35dde0 Mon Sep 17 00:00:00 2001
From: timntorres
Date: Thu, 5 Jan 2023 07:24:22 -0800
Subject: [PATCH 095/172] Add option to save ti settings to file.
---
modules/shared.py | 1 +
.../textual_inversion/textual_inversion.py | 30 +++++++++++++++++--
2 files changed, 28 insertions(+), 3 deletions(-)
diff --git a/modules/shared.py b/modules/shared.py
index e0f44c6d..933cd738 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -362,6 +362,7 @@ options_templates.update(options_section(('training', "Training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
"save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."),
+ "save_train_settings_to_txt": OptionInfo(False, "Save textual inversion and hypernet settings to a text file when training starts."),
"dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
"dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
"training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 71e07bcc..2bed2ecb 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -1,6 +1,7 @@
import os
import sys
import traceback
+import inspect
import torch
import tqdm
@@ -229,6 +230,28 @@ def write_loss(log_directory, filename, step, epoch_len, values):
**values,
})
+def save_settings_to_file(initial_step, num_of_dataset_images, embedding_name, vectors_per_token, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
+ checkpoint = sd_models.select_checkpoint()
+ model_name = checkpoint.model_name
+ model_hash = '[{}]'.format(checkpoint.hash)
+
+ # Get a list of the argument names.
+ arg_names = inspect.getfullargspec(save_settings_to_file).args
+
+ # Create a list of the argument names to include in the settings string.
+ names = arg_names[:16] # Include all arguments up until the preview-related ones.
+ if preview_from_txt2img:
+ names.extend(arg_names[16:]) # Include all remaining arguments if `preview_from_txt2img` is True.
+
+ # Build the settings string.
+ settings_str = "datetime : " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n"
+ for name in names:
+ value = locals()[name]
+ settings_str += f"{name}: {value}\n"
+
+ with open(os.path.join(log_directory, 'settings.txt'), "a+") as fout:
+ fout.write(settings_str + "\n\n")
+
def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, save_model_every, create_image_every, log_directory, name="embedding"):
assert model_name, f"{name} not selected"
assert learn_rate, "Learning rate is empty or 0"
@@ -292,13 +315,13 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
if initial_step >= steps:
shared.state.textinfo = "Model has already been trained beyond specified max steps"
return embedding, filename
+
scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
-
clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else \
torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else \
None
if clip_grad:
- clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, ititial_step, verbose=False)
+ clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, initial_step, verbose=False)
# dataset loading may take a while, so input validations and early returns should be done before this
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
old_parallel_processing_allowed = shared.parallel_processing_allowed
@@ -306,7 +329,8 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
pin_memory = shared.opts.pin_memory
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method)
-
+ if shared.opts.save_train_settings_to_txt:
+ save_settings_to_file(initial_step , len(ds) , embedding_name, len(embedding.vec) , learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height)
latent_sampling_method = ds.latent_sampling_method
dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory)
From 19a81ac2871ec900fc8b7955bbc2554b6c5ac6b1 Mon Sep 17 00:00:00 2001
From: cat
Date: Thu, 5 Jan 2023 20:17:39 +0500
Subject: [PATCH 096/172] hires-fix: add "nearest-exact" latent upscale mode.
---
modules/shared.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/modules/shared.py b/modules/shared.py
index e0f44c6d..b7a3ce5c 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -576,6 +576,7 @@ latent_upscale_modes = {
"Latent (bicubic)": {"mode": "bicubic", "antialias": False},
"Latent (bicubic antialiased)": {"mode": "bicubic", "antialias": True},
"Latent (nearest)": {"mode": "nearest", "antialias": False},
+ "Latent (nearest-exact)": {"mode": "nearest-exact", "antialias": False},
}
sd_upscalers = []
From b85c2b5cf4a6809bc871718cf4680d49c3e95e94 Mon Sep 17 00:00:00 2001
From: timntorres
Date: Thu, 5 Jan 2023 08:14:38 -0800
Subject: [PATCH 097/172] Clean up ti, add same behavior to hypernetwork.
---
modules/hypernetworks/hypernetwork.py | 31 ++++++++++++++++++-
modules/shared.py | 2 +-
.../textual_inversion/textual_inversion.py | 14 ++++++---
3 files changed, 40 insertions(+), 7 deletions(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 6a9b1398..d5985263 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -401,7 +401,33 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None,
hypernet.save(fn)
shared.reload_hypernetworks()
+# Note: textual_inversion.py has a nearly identical function of the same name.
+def save_settings_to_file(initial_step, num_of_dataset_images, hypernetwork_name, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
+ checkpoint = sd_models.select_checkpoint()
+ model_name = checkpoint.model_name
+ model_hash = '[{}]'.format(checkpoint.hash)
+ # Starting index of preview-related arguments.
+ border_index = 19
+ # Get a list of the argument names, excluding default argument.
+ sig = inspect.signature(save_settings_to_file)
+ arg_names = [p.name for p in sig.parameters.values() if p.default == p.empty]
+
+ # Create a list of the argument names to include in the settings string.
+ names = arg_names[:border_index] # Include all arguments up until the preview-related ones.
+
+ # Include preview-related arguments if applicable.
+ if preview_from_txt2img:
+ names.extend(arg_names[border_index:])
+
+ # Build the settings string.
+ settings_str = "datetime : " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n"
+ for name in names:
+ value = locals()[name]
+ settings_str += f"{name}: {value}\n"
+
+ with open(os.path.join(log_directory, 'settings.txt'), "a+") as fout:
+ fout.write(settings_str + "\n\n")
def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
# images allows training previews to have infotext. Importing it at the top causes a circular import problem.
@@ -457,7 +483,10 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
pin_memory = shared.opts.pin_memory
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method)
-
+
+ if shared.opts.save_training_settings_to_txt:
+ save_settings_to_file(initial_step, len(ds), hypernetwork_name, hypernetwork.layer_structure, hypernetwork.activation_func, hypernetwork.weight_init, hypernetwork.add_layer_norm, hypernetwork.use_dropout, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height)
+
latent_sampling_method = ds.latent_sampling_method
dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory)
diff --git a/modules/shared.py b/modules/shared.py
index 933cd738..10231a75 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -362,7 +362,7 @@ options_templates.update(options_section(('training', "Training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
"save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."),
- "save_train_settings_to_txt": OptionInfo(False, "Save textual inversion and hypernet settings to a text file when training starts."),
+ "save_training_settings_to_txt": OptionInfo(False, "Save textual inversion and hypernet settings to a text file whenever training starts."),
"dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
"dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
"training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 2bed2ecb..68648550 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -230,18 +230,20 @@ def write_loss(log_directory, filename, step, epoch_len, values):
**values,
})
+# Note: hypernetwork.py has a nearly identical function of the same name.
def save_settings_to_file(initial_step, num_of_dataset_images, embedding_name, vectors_per_token, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
checkpoint = sd_models.select_checkpoint()
model_name = checkpoint.model_name
model_hash = '[{}]'.format(checkpoint.hash)
-
+ # Starting index of preview-related arguments.
+ border_index = 16
# Get a list of the argument names.
arg_names = inspect.getfullargspec(save_settings_to_file).args
# Create a list of the argument names to include in the settings string.
- names = arg_names[:16] # Include all arguments up until the preview-related ones.
+ names = arg_names[:border_index] # Include all arguments up until the preview-related ones.
if preview_from_txt2img:
- names.extend(arg_names[16:]) # Include all remaining arguments if `preview_from_txt2img` is True.
+ names.extend(arg_names[border_index:]) # Include all remaining arguments if `preview_from_txt2img` is True.
# Build the settings string.
settings_str = "datetime : " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n"
@@ -329,8 +331,10 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
pin_memory = shared.opts.pin_memory
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method)
- if shared.opts.save_train_settings_to_txt:
- save_settings_to_file(initial_step , len(ds) , embedding_name, len(embedding.vec) , learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height)
+
+ if shared.opts.save_training_settings_to_txt:
+ save_settings_to_file(initial_step, len(ds), embedding_name, len(embedding.vec), learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height)
+
latent_sampling_method = ds.latent_sampling_method
dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory)
From b6bab2f052b32c0ffebe6aecc1819ccf20cf8c5d Mon Sep 17 00:00:00 2001
From: timntorres
Date: Thu, 5 Jan 2023 09:14:56 -0800
Subject: [PATCH 098/172] Include model in log file. Exclude directory.
---
modules/hypernetworks/hypernetwork.py | 28 +++++++------------
.../textual_inversion/textual_inversion.py | 22 ++++++---------
2 files changed, 19 insertions(+), 31 deletions(-)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index d5985263..3237c37a 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -402,30 +402,22 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None,
shared.reload_hypernetworks()
# Note: textual_inversion.py has a nearly identical function of the same name.
-def save_settings_to_file(initial_step, num_of_dataset_images, hypernetwork_name, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
- checkpoint = sd_models.select_checkpoint()
- model_name = checkpoint.model_name
- model_hash = '[{}]'.format(checkpoint.hash)
+def save_settings_to_file(model_name, model_hash, initial_step, num_of_dataset_images, hypernetwork_name, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
# Starting index of preview-related arguments.
- border_index = 19
-
- # Get a list of the argument names, excluding default argument.
- sig = inspect.signature(save_settings_to_file)
- arg_names = [p.name for p in sig.parameters.values() if p.default == p.empty]
-
+ border_index = 21
+ # Get a list of the argument names.
+ arg_names = inspect.getfullargspec(save_settings_to_file).args
# Create a list of the argument names to include in the settings string.
names = arg_names[:border_index] # Include all arguments up until the preview-related ones.
-
- # Include preview-related arguments if applicable.
if preview_from_txt2img:
- names.extend(arg_names[border_index:])
-
+ names.extend(arg_names[border_index:]) # Include preview-related arguments if applicable.
# Build the settings string.
settings_str = "datetime : " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n"
for name in names:
- value = locals()[name]
- settings_str += f"{name}: {value}\n"
-
+ if name != 'log_directory': # It's useless and redundant to save log_directory.
+ value = locals()[name]
+ settings_str += f"{name}: {value}\n"
+ # Create or append to the file.
with open(os.path.join(log_directory, 'settings.txt'), "a+") as fout:
fout.write(settings_str + "\n\n")
@@ -485,7 +477,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method)
if shared.opts.save_training_settings_to_txt:
- save_settings_to_file(initial_step, len(ds), hypernetwork_name, hypernetwork.layer_structure, hypernetwork.activation_func, hypernetwork.weight_init, hypernetwork.add_layer_norm, hypernetwork.use_dropout, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height)
+ save_settings_to_file(checkpoint.model_name, '[{}]'.format(checkpoint.hash), initial_step, len(ds), hypernetwork_name, hypernetwork.layer_structure, hypernetwork.activation_func, hypernetwork.weight_init, hypernetwork.add_layer_norm, hypernetwork.use_dropout, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height)
latent_sampling_method = ds.latent_sampling_method
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 68648550..ce7e4f5d 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -231,26 +231,22 @@ def write_loss(log_directory, filename, step, epoch_len, values):
})
# Note: hypernetwork.py has a nearly identical function of the same name.
-def save_settings_to_file(initial_step, num_of_dataset_images, embedding_name, vectors_per_token, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
- checkpoint = sd_models.select_checkpoint()
- model_name = checkpoint.model_name
- model_hash = '[{}]'.format(checkpoint.hash)
+def save_settings_to_file(model_name, model_hash, initial_step, num_of_dataset_images, embedding_name, vectors_per_token, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
# Starting index of preview-related arguments.
- border_index = 16
+ border_index = 18
# Get a list of the argument names.
- arg_names = inspect.getfullargspec(save_settings_to_file).args
-
+ arg_names = inspect.getfullargspec(save_settings_to_file).args
# Create a list of the argument names to include in the settings string.
names = arg_names[:border_index] # Include all arguments up until the preview-related ones.
if preview_from_txt2img:
- names.extend(arg_names[border_index:]) # Include all remaining arguments if `preview_from_txt2img` is True.
-
+ names.extend(arg_names[border_index:]) # Include preview-related arguments if applicable.
# Build the settings string.
settings_str = "datetime : " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n"
for name in names:
- value = locals()[name]
- settings_str += f"{name}: {value}\n"
-
+ if name != 'log_directory': # It's useless and redundant to save log_directory.
+ value = locals()[name]
+ settings_str += f"{name}: {value}\n"
+ # Create or append to the file.
with open(os.path.join(log_directory, 'settings.txt'), "a+") as fout:
fout.write(settings_str + "\n\n")
@@ -333,7 +329,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method)
if shared.opts.save_training_settings_to_txt:
- save_settings_to_file(initial_step, len(ds), embedding_name, len(embedding.vec), learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height)
+ save_settings_to_file(checkpoint.model_name, '[{}]'.format(checkpoint.hash), initial_step, len(ds), embedding_name, len(embedding.vec), learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height)
latent_sampling_method = ds.latent_sampling_method
From fda04e620d529031e2134520e74756d0efa30464 Mon Sep 17 00:00:00 2001
From: Kuma <36082288+KumiIT@users.noreply.github.com>
Date: Thu, 5 Jan 2023 18:44:19 +0100
Subject: [PATCH 099/172] typo in TI
---
modules/textual_inversion/textual_inversion.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 71e07bcc..24b43045 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -298,7 +298,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else \
None
if clip_grad:
- clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, ititial_step, verbose=False)
+ clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, initial_step, verbose=False)
# dataset loading may take a while, so input validations and early returns should be done before this
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
old_parallel_processing_allowed = shared.parallel_processing_allowed
From 847f869c67c7108e3e792fc193331d0e6acca29c Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Thu, 5 Jan 2023 21:00:52 +0300
Subject: [PATCH 100/172] experimental optimization
---
modules/processing.py | 28 +++++++++++++++++++++++++---
1 file changed, 25 insertions(+), 3 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index 61e97077..a408d622 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -544,6 +544,29 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
infotexts = []
output_images = []
+ cached_uc = [None, None]
+ cached_c = [None, None]
+
+ def get_conds_with_caching(function, required_prompts, steps, cache):
+ """
+ Returns the result of calling function(shared.sd_model, required_prompts, steps)
+ using a cache to store the result if the same arguments have been used before.
+
+ cache is an array containing two elements. The first element is a tuple
+ representing the previously used arguments, or None if no arguments
+ have been used before. The second element is where the previously
+ computed result is stored.
+ """
+
+ if cache[0] is not None and (required_prompts, steps) == cache[0]:
+ return cache[1]
+
+ with devices.autocast():
+ cache[1] = function(shared.sd_model, required_prompts, steps)
+
+ cache[0] = (required_prompts, steps)
+ return cache[1]
+
with torch.no_grad(), p.sd_model.ema_scope():
with devices.autocast():
p.init(p.all_prompts, p.all_seeds, p.all_subseeds)
@@ -571,9 +594,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if p.scripts is not None:
p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds)
- with devices.autocast():
- uc = prompt_parser.get_learned_conditioning(shared.sd_model, negative_prompts, p.steps)
- c = prompt_parser.get_multicond_learned_conditioning(shared.sd_model, prompts, p.steps)
+ uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps, cached_uc)
+ c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, cached_c)
if len(model_hijack.comments) > 0:
for comment in model_hijack.comments:
From 81133d4168ae0bae9bf8bf1a1d4983319a589112 Mon Sep 17 00:00:00 2001
From: Faber
Date: Fri, 6 Jan 2023 03:38:37 +0700
Subject: [PATCH 101/172] allow loading embeddings from subdirectories
---
.../textual_inversion/textual_inversion.py | 21 ++++++++++---------
1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 24b43045..0a059044 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -149,19 +149,20 @@ class EmbeddingDatabase:
else:
self.skipped_embeddings[name] = embedding
- for fn in os.listdir(self.embeddings_dir):
- try:
- fullfn = os.path.join(self.embeddings_dir, fn)
+ for root, dirs, fns in os.walk(self.embeddings_dir):
+ for fn in fns:
+ try:
+ fullfn = os.path.join(root, fn)
- if os.stat(fullfn).st_size == 0:
+ if os.stat(fullfn).st_size == 0:
+ continue
+
+ process_file(fullfn, fn)
+ except Exception:
+ print(f"Error loading embedding {fn}:", file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
continue
- process_file(fullfn, fn)
- except Exception:
- print(f"Error loading embedding {fn}:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
- continue
-
print(f"Textual inversion embeddings loaded({len(self.word_embeddings)}): {', '.join(self.word_embeddings.keys())}")
if len(self.skipped_embeddings) > 0:
print(f"Textual inversion embeddings skipped({len(self.skipped_embeddings)}): {', '.join(self.skipped_embeddings.keys())}")
From b5253f0dab529707f1fe2e11211a10ce2f264617 Mon Sep 17 00:00:00 2001
From: noodleanon <122053346+noodleanon@users.noreply.github.com>
Date: Thu, 5 Jan 2023 21:21:48 +0000
Subject: [PATCH 102/172] allow img2img api to run scripts
---
modules/api/api.py | 27 ++++++++++++++++++++++++---
modules/api/models.py | 2 +-
modules/processing.py | 4 ++--
3 files changed, 27 insertions(+), 6 deletions(-)
diff --git a/modules/api/api.py b/modules/api/api.py
index 2103709b..aa62a42e 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -11,7 +11,7 @@ from fastapi.security import HTTPBasic, HTTPBasicCredentials
from secrets import compare_digest
import modules.shared as shared
-from modules import sd_samplers, deepbooru, sd_hijack, images
+from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui
from modules.api.models import *
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
from modules.extras import run_extras
@@ -28,8 +28,13 @@ def upscaler_to_index(name: str):
try:
return [x.name.lower() for x in shared.sd_upscalers].index(name.lower())
except:
- raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be on of these: {' , '.join([x.name for x in sd_upscalers])}")
+ raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be one of these: {' , '.join([x.name for x in sd_upscalers])}")
+def script_name_to_index(name, scripts):
+ try:
+ return [script.title().lower() for script in scripts].index(name.lower())
+ except:
+ raise HTTPException(status_code=422, detail=f"Script '{name}' not found")
def validate_sampler_name(name):
config = sd_samplers.all_samplers_map.get(name, None)
@@ -170,6 +175,14 @@ class Api:
if init_images is None:
raise HTTPException(status_code=404, detail="Init image not found")
+ if img2imgreq.script_name is not None:
+ if scripts.scripts_img2img.scripts == []:
+ scripts.scripts_img2img.initialize_scripts(True)
+ ui.create_ui()
+
+ script_idx = script_name_to_index(img2imgreq.script_name, scripts.scripts_img2img.selectable_scripts)
+ script = scripts.scripts_img2img.selectable_scripts[script_idx]
+
mask = img2imgreq.mask
if mask:
mask = decode_base64_to_image(mask)
@@ -186,13 +199,21 @@ class Api:
args = vars(populate)
args.pop('include_init_images', None) # this is meant to be done by "exclude": True in model, but it's for a reason that I cannot determine.
+ args.pop('script_name', None)
with self.queue_lock:
p = StableDiffusionProcessingImg2Img(sd_model=shared.sd_model, **args)
p.init_images = [decode_base64_to_image(x) for x in init_images]
shared.state.begin()
- processed = process_images(p)
+ if 'script' in locals():
+ p.outpath_grids = opts.outdir_img2img_grids
+ p.outpath_samples = opts.outdir_img2img_samples
+ p.script_args = [script_idx + 1] + [None] * (script.args_from - 1) + p.script_args
+ processed = scripts.scripts_img2img.run(p, *p.script_args)
+ else:
+ processed = process_images(p)
+
shared.state.end()
b64images = list(map(encode_pil_to_base64, processed.images))
diff --git a/modules/api/models.py b/modules/api/models.py
index d8198a27..862477e7 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -106,7 +106,7 @@ StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator(
StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator(
"StableDiffusionProcessingImg2Img",
StableDiffusionProcessingImg2Img,
- [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}, {"key": "include_init_images", "type": bool, "default": False, "exclude" : True}]
+ [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}, {"key": "include_init_images", "type": bool, "default": False, "exclude" : True}, {"key": "script_name", "type": str, "default": None}, {"key": "script_args", "type": list, "default": []}]
).generate_model()
class TextToImageResponse(BaseModel):
diff --git a/modules/processing.py b/modules/processing.py
index a408d622..d5ac7eb1 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -98,7 +98,7 @@ class StableDiffusionProcessing():
"""
The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing
"""
- def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None):
+ def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None):
if sampler_index is not None:
print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr)
@@ -149,7 +149,7 @@ class StableDiffusionProcessing():
self.seed_resize_from_w = 0
self.scripts = None
- self.script_args = None
+ self.script_args = script_args
self.all_prompts = None
self.all_negative_prompts = None
self.all_seeds = None
From eadd1bf06adbd7263875640a6446d3b0184d1561 Mon Sep 17 00:00:00 2001
From: noodleanon <122053346+noodleanon@users.noreply.github.com>
Date: Thu, 5 Jan 2023 21:22:04 +0000
Subject: [PATCH 103/172] allow sdupscale to accept upscaler name
---
scripts/sd_upscale.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py
index 9b8ffd85..332d76d9 100644
--- a/scripts/sd_upscale.py
+++ b/scripts/sd_upscale.py
@@ -25,6 +25,8 @@ class Script(scripts.Script):
return [info, overlap, upscaler_index, scale_factor]
def run(self, p, _, overlap, upscaler_index, scale_factor):
+ if isinstance(upscaler_index, str):
+ upscaler_index = [x.name.lower() for x in shared.sd_upscalers].index(upscaler_index.lower())
processing.fix_seed(p)
upscaler = shared.sd_upscalers[upscaler_index]
From 8111b5569d07c7ac3b695e28171aede728b4ae56 Mon Sep 17 00:00:00 2001
From: brkirch
Date: Tue, 3 Jan 2023 20:43:05 -0500
Subject: [PATCH 104/172] Add support for PyTorch nightly and local builds
---
modules/devices.py | 28 +++++++++++++++++++++++-----
webui.py | 7 ++++++-
2 files changed, 29 insertions(+), 6 deletions(-)
diff --git a/modules/devices.py b/modules/devices.py
index 800510b7..caeb0276 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -133,8 +133,26 @@ def numpy_fix(self, *args, **kwargs):
return orig_tensor_numpy(self, *args, **kwargs)
-# PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working
-if has_mps() and version.parse(torch.__version__) < version.parse("1.13"):
- torch.Tensor.to = tensor_to_fix
- torch.nn.functional.layer_norm = layer_norm_fix
- torch.Tensor.numpy = numpy_fix
+# MPS workaround for https://github.com/pytorch/pytorch/issues/89784
+orig_cumsum = torch.cumsum
+orig_Tensor_cumsum = torch.Tensor.cumsum
+def cumsum_fix(input, cumsum_func, *args, **kwargs):
+ if input.device.type == 'mps':
+ output_dtype = kwargs.get('dtype', input.dtype)
+ if any(output_dtype == broken_dtype for broken_dtype in [torch.bool, torch.int8, torch.int16, torch.int64]):
+ return cumsum_func(input.cpu(), *args, **kwargs).to(input.device)
+ return cumsum_func(input, *args, **kwargs)
+
+
+if has_mps():
+ if version.parse(torch.__version__) < version.parse("1.13"):
+ # PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working
+ torch.Tensor.to = tensor_to_fix
+ torch.nn.functional.layer_norm = layer_norm_fix
+ torch.Tensor.numpy = numpy_fix
+ elif version.parse(torch.__version__) > version.parse("1.13.1"):
+ if not torch.Tensor([1,2]).to(torch.device("mps")).equal(torch.Tensor([1,1]).to(torch.device("mps")).cumsum(0, dtype=torch.int16)):
+ torch.cumsum = lambda input, *args, **kwargs: ( cumsum_fix(input, orig_cumsum, *args, **kwargs) )
+ torch.Tensor.cumsum = lambda self, *args, **kwargs: ( cumsum_fix(self, orig_Tensor_cumsum, *args, **kwargs) )
+ orig_narrow = torch.narrow
+ torch.narrow = lambda *args, **kwargs: ( orig_narrow(*args, **kwargs).clone() )
diff --git a/webui.py b/webui.py
index 13375e71..ddfaea95 100644
--- a/webui.py
+++ b/webui.py
@@ -4,7 +4,7 @@ import threading
import time
import importlib
import signal
-import threading
+import re
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.middleware.gzip import GZipMiddleware
@@ -13,6 +13,11 @@ from modules import import_hook, errors
from modules.call_queue import wrap_queued_call, queue_lock, wrap_gradio_gpu_call
from modules.paths import script_path
+import torch
+# Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors
+if ".dev" in torch.__version__ or "+git" in torch.__version__:
+ torch.__version__ = re.search(r'[\d.]+', torch.__version__).group(0)
+
from modules import shared, devices, sd_samplers, upscaler, extensions, localization, ui_tempdir
import modules.codeformer_model as codeformer
import modules.extras
From d61a5aa4f623f6630670241aca8fc5c2a6381769 Mon Sep 17 00:00:00 2001
From: acncagua
Date: Fri, 6 Jan 2023 10:58:22 +0900
Subject: [PATCH 105/172] Add files via upload
---
modules/ui.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/modules/ui.py b/modules/ui.py
index 81d96c5b..030f0685 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -550,6 +550,8 @@ Requested path was: {f}
os.startfile(path)
elif platform.system() == "Darwin":
sp.Popen(["open", path])
+ elif "microsoft-standard-WSL2" in platform.uname().release:
+ sp.Popen(["wsl-open", path])
else:
sp.Popen(["xdg-open", path])
From d782a95967c9eea753df3333cd1954b6ec73eba0 Mon Sep 17 00:00:00 2001
From: brkirch
Date: Tue, 27 Dec 2022 08:50:55 -0500
Subject: [PATCH 106/172] Add Birch-san's sub-quadratic attention
implementation
---
README.md | 1 +
modules/sd_hijack.py | 15 +--
modules/sd_hijack_optimizations.py | 124 ++++++++++++++----
modules/shared.py | 4 +
modules/sub_quadratic_attention.py | 201 +++++++++++++++++++++++++++++
requirements.txt | 2 +-
6 files changed, 312 insertions(+), 35 deletions(-)
create mode 100644 modules/sub_quadratic_attention.py
diff --git a/README.md b/README.md
index 556000fb..1913caf3 100644
--- a/README.md
+++ b/README.md
@@ -139,6 +139,7 @@ The documentation was moved from this README over to the project's [wiki](https:
- Ideas for optimizations - https://github.com/basujindal/stable-diffusion
- Cross Attention layer optimization - Doggettx - https://github.com/Doggettx/stable-diffusion, original idea for prompt editing.
- Cross Attention layer optimization - InvokeAI, lstein - https://github.com/invoke-ai/InvokeAI (originally http://github.com/lstein/stable-diffusion)
+- Sub-quadratic Cross Attention layer optimization - Alex Birch (https://github.com/Birch-san), Amin Rezaei (https://github.com/AminRezaei0x443)
- Textual Inversion - Rinon Gal - https://github.com/rinongal/textual_inversion (we're not using his code, but we are using his ideas).
- Idea for SD upscale - https://github.com/jquesnelle/txt2imghd
- Noise generation for outpainting mk2 - https://github.com/parlance-zz/g-diffuser-bot
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 690a9ec2..019a6f3f 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -7,8 +7,6 @@ from modules.hypernetworks import hypernetwork
from modules.shared import cmd_opts
from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet
-from modules.sd_hijack_optimizations import invokeAI_mps_available
-
import ldm.modules.attention
import ldm.modules.diffusionmodules.model
import ldm.modules.diffusionmodules.openaimodel
@@ -40,17 +38,16 @@ def apply_optimizations():
print("Applying xformers cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
+ elif cmd_opts.opt_sub_quad_attention:
+ print("Applying sub-quadratic cross attention optimization.")
+ ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.sub_quad_attention_forward
+ ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.sub_quad_attnblock_forward
elif cmd_opts.opt_split_attention_v1:
print("Applying v1 cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention_invokeai or not torch.cuda.is_available()):
- if not invokeAI_mps_available and shared.device.type == 'mps':
- print("The InvokeAI cross attention optimization for MPS requires the psutil package which is not installed.")
- print("Applying v1 cross attention optimization.")
- ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
- else:
- print("Applying cross attention optimization (InvokeAI).")
- ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_invokeAI
+ print("Applying cross attention optimization (InvokeAI).")
+ ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_invokeAI
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
print("Applying cross attention optimization (Doggettx).")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index 02c87f40..f5c153e8 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -1,7 +1,7 @@
import math
import sys
import traceback
-import importlib
+import psutil
import torch
from torch import einsum
@@ -12,6 +12,8 @@ from einops import rearrange
from modules import shared
from modules.hypernetworks import hypernetwork
+from .sub_quadratic_attention import efficient_dot_product_attention
+
if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers:
try:
@@ -22,6 +24,19 @@ if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers:
print(traceback.format_exc(), file=sys.stderr)
+def get_available_vram():
+ if shared.device.type == 'cuda':
+ stats = torch.cuda.memory_stats(shared.device)
+ mem_active = stats['active_bytes.all.current']
+ mem_reserved = stats['reserved_bytes.all.current']
+ mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device())
+ mem_free_torch = mem_reserved - mem_active
+ mem_free_total = mem_free_cuda + mem_free_torch
+ return mem_free_total
+ else:
+ return psutil.virtual_memory().available
+
+
# see https://github.com/basujindal/stable-diffusion/pull/117 for discussion
def split_cross_attention_forward_v1(self, x, context=None, mask=None):
h = self.heads
@@ -76,12 +91,7 @@ def split_cross_attention_forward(self, x, context=None, mask=None):
r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
- stats = torch.cuda.memory_stats(q.device)
- mem_active = stats['active_bytes.all.current']
- mem_reserved = stats['reserved_bytes.all.current']
- mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device())
- mem_free_torch = mem_reserved - mem_active
- mem_free_total = mem_free_cuda + mem_free_torch
+ mem_free_total = get_available_vram()
gb = 1024 ** 3
tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size()
@@ -118,19 +128,8 @@ def split_cross_attention_forward(self, x, context=None, mask=None):
return self.to_out(r2)
-def check_for_psutil():
- try:
- spec = importlib.util.find_spec('psutil')
- return spec is not None
- except ModuleNotFoundError:
- return False
-
-invokeAI_mps_available = check_for_psutil()
-
# -- Taken from https://github.com/invoke-ai/InvokeAI and modified --
-if invokeAI_mps_available:
- import psutil
- mem_total_gb = psutil.virtual_memory().total // (1 << 30)
+mem_total_gb = psutil.virtual_memory().total // (1 << 30)
def einsum_op_compvis(q, k, v):
s = einsum('b i d, b j d -> b i j', q, k)
@@ -215,6 +214,70 @@ def split_cross_attention_forward_invokeAI(self, x, context=None, mask=None):
# -- End of code from https://github.com/invoke-ai/InvokeAI --
+
+# Based on Birch-san's modified implementation of sub-quadratic attention from https://github.com/Birch-san/diffusers/pull/1
+def sub_quad_attention_forward(self, x, context=None, mask=None):
+ assert mask is None, "attention-mask not currently implemented for SubQuadraticCrossAttnProcessor."
+
+ h = self.heads
+
+ q = self.to_q(x)
+ context = default(context, x)
+
+ context_k, context_v = hypernetwork.apply_hypernetwork(shared.loaded_hypernetwork, context)
+ k = self.to_k(context_k)
+ v = self.to_v(context_v)
+ del context, context_k, context_v, x
+
+ q = q.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1)
+ k = k.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1)
+ v = v.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1)
+
+ x = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold_bytes=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training)
+
+ x = x.unflatten(0, (-1, h)).transpose(1,2).flatten(start_dim=2)
+
+ out_proj, dropout = self.to_out
+ x = out_proj(x)
+ x = dropout(x)
+
+ return x
+
+def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, kv_chunk_size_min=None, chunk_threshold_bytes=None, use_checkpoint=True):
+ bytes_per_token = torch.finfo(q.dtype).bits//8
+ batch_x_heads, q_tokens, _ = q.shape
+ _, k_tokens, _ = k.shape
+ qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens
+
+ available_vram = int(get_available_vram() * 0.9) if q.device.type == 'mps' else int(get_available_vram() * 0.7)
+
+ if chunk_threshold_bytes is None:
+ chunk_threshold_bytes = available_vram
+ elif chunk_threshold_bytes == 0:
+ chunk_threshold_bytes = None
+
+ if kv_chunk_size_min is None:
+ kv_chunk_size_min = chunk_threshold_bytes // (batch_x_heads * bytes_per_token * (k.shape[2] + v.shape[2]))
+ elif kv_chunk_size_min == 0:
+ kv_chunk_size_min = None
+
+ if chunk_threshold_bytes is not None and qk_matmul_size_bytes <= chunk_threshold_bytes:
+ # the big matmul fits into our memory limit; do everything in 1 chunk,
+ # i.e. send it down the unchunked fast-path
+ query_chunk_size = q_tokens
+ kv_chunk_size = k_tokens
+
+ return efficient_dot_product_attention(
+ q,
+ k,
+ v,
+ query_chunk_size=q_chunk_size,
+ kv_chunk_size=kv_chunk_size,
+ kv_chunk_size_min = kv_chunk_size_min,
+ use_checkpoint=use_checkpoint,
+ )
+
+
def xformers_attention_forward(self, x, context=None, mask=None):
h = self.heads
q_in = self.to_q(x)
@@ -252,12 +315,7 @@ def cross_attention_attnblock_forward(self, x):
h_ = torch.zeros_like(k, device=q.device)
- stats = torch.cuda.memory_stats(q.device)
- mem_active = stats['active_bytes.all.current']
- mem_reserved = stats['reserved_bytes.all.current']
- mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device())
- mem_free_torch = mem_reserved - mem_active
- mem_free_total = mem_free_cuda + mem_free_torch
+ mem_free_total = get_available_vram()
tensor_size = q.shape[0] * q.shape[1] * k.shape[2] * q.element_size()
mem_required = tensor_size * 2.5
@@ -312,3 +370,19 @@ def xformers_attnblock_forward(self, x):
return x + out
except NotImplementedError:
return cross_attention_attnblock_forward(self, x)
+
+def sub_quad_attnblock_forward(self, x):
+ h_ = x
+ h_ = self.norm(h_)
+ q = self.q(h_)
+ k = self.k(h_)
+ v = self.v(h_)
+ b, c, h, w = q.shape
+ q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v))
+ q = q.contiguous()
+ k = k.contiguous()
+ v = v.contiguous()
+ out = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold_bytes=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training)
+ out = rearrange(out, 'b (h w) c -> b c h w', h=h)
+ out = self.proj_out(out)
+ return x + out
diff --git a/modules/shared.py b/modules/shared.py
index d4ddeea0..487a7792 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -56,6 +56,10 @@ parser.add_argument("--xformers", action='store_true', help="enable xformers for
parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
parser.add_argument("--deepdanbooru", action='store_true', help="does not do anything")
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables Doggettx's cross-attention layer optimization. By default, it's on for torch cuda.")
+parser.add_argument("--opt-sub-quad-attention", action='store_true', help="enable memory efficient sub-quadratic cross-attention layer optimization")
+parser.add_argument("--sub-quad-q-chunk-size", type=int, help="query chunk size for the sub-quadratic cross-attention layer optimization to use", default=1024)
+parser.add_argument("--sub-quad-kv-chunk-size", type=int, help="kv chunk size for the sub-quadratic cross-attention layer optimization to use", default=None)
+parser.add_argument("--sub-quad-chunk-threshold", type=int, help="the size threshold in bytes for the sub-quadratic cross-attention layer optimization to use chunking", default=None)
parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
diff --git a/modules/sub_quadratic_attention.py b/modules/sub_quadratic_attention.py
new file mode 100644
index 00000000..b11dc1c7
--- /dev/null
+++ b/modules/sub_quadratic_attention.py
@@ -0,0 +1,201 @@
+# original source:
+# https://github.com/AminRezaei0x443/memory-efficient-attention/blob/1bc0d9e6ac5f82ea43a375135c4e1d3896ee1694/memory_efficient_attention/attention_torch.py
+# license:
+# unspecified
+# credit:
+# Amin Rezaei (original author)
+# Alex Birch (optimized algorithm for 3D tensors, at the expense of removing bias, masking and callbacks)
+# implementation of:
+# Self-attention Does Not Need O(n2) Memory":
+# https://arxiv.org/abs/2112.05682v2
+
+from functools import partial
+import torch
+from torch import Tensor
+from torch.utils.checkpoint import checkpoint
+import math
+from typing import Optional, NamedTuple, Protocol, List
+
+def dynamic_slice(
+ x: Tensor,
+ starts: List[int],
+ sizes: List[int],
+) -> Tensor:
+ slicing = [slice(start, start + size) for start, size in zip(starts, sizes)]
+ return x[slicing]
+
+class AttnChunk(NamedTuple):
+ exp_values: Tensor
+ exp_weights_sum: Tensor
+ max_score: Tensor
+
+class SummarizeChunk(Protocol):
+ @staticmethod
+ def __call__(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ ) -> AttnChunk: ...
+
+class ComputeQueryChunkAttn(Protocol):
+ @staticmethod
+ def __call__(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ ) -> Tensor: ...
+
+def _summarize_chunk(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ scale: float,
+) -> AttnChunk:
+ attn_weights = torch.baddbmm(
+ torch.empty(1, 1, 1, device=query.device, dtype=query.dtype),
+ query,
+ key.transpose(1,2),
+ alpha=scale,
+ beta=0,
+ )
+ max_score, _ = torch.max(attn_weights, -1, keepdim=True)
+ max_score = max_score.detach()
+ exp_weights = torch.exp(attn_weights - max_score)
+ exp_values = torch.bmm(exp_weights, value)
+ max_score = max_score.squeeze(-1)
+ return AttnChunk(exp_values, exp_weights.sum(dim=-1), max_score)
+
+def _query_chunk_attention(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ summarize_chunk: SummarizeChunk,
+ kv_chunk_size: int,
+) -> Tensor:
+ batch_x_heads, k_tokens, k_channels_per_head = key.shape
+ _, _, v_channels_per_head = value.shape
+
+ def chunk_scanner(chunk_idx: int) -> AttnChunk:
+ key_chunk = dynamic_slice(
+ key,
+ (0, chunk_idx, 0),
+ (batch_x_heads, kv_chunk_size, k_channels_per_head)
+ )
+ value_chunk = dynamic_slice(
+ value,
+ (0, chunk_idx, 0),
+ (batch_x_heads, kv_chunk_size, v_channels_per_head)
+ )
+ return summarize_chunk(query, key_chunk, value_chunk)
+
+ chunks: List[AttnChunk] = [
+ chunk_scanner(chunk) for chunk in torch.arange(0, k_tokens, kv_chunk_size)
+ ]
+ acc_chunk = AttnChunk(*map(torch.stack, zip(*chunks)))
+ chunk_values, chunk_weights, chunk_max = acc_chunk
+
+ global_max, _ = torch.max(chunk_max, 0, keepdim=True)
+ max_diffs = torch.exp(chunk_max - global_max)
+ chunk_values *= torch.unsqueeze(max_diffs, -1)
+ chunk_weights *= max_diffs
+
+ all_values = chunk_values.sum(dim=0)
+ all_weights = torch.unsqueeze(chunk_weights, -1).sum(dim=0)
+ return all_values / all_weights
+
+# TODO: refactor CrossAttention#get_attention_scores to share code with this
+def _get_attention_scores_no_kv_chunking(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ scale: float,
+) -> Tensor:
+ attn_scores = torch.baddbmm(
+ torch.empty(1, 1, 1, device=query.device, dtype=query.dtype),
+ query,
+ key.transpose(1,2),
+ alpha=scale,
+ beta=0,
+ )
+ attn_probs = attn_scores.softmax(dim=-1)
+ del attn_scores
+ hidden_states_slice = torch.bmm(attn_probs, value)
+ return hidden_states_slice
+
+class ScannedChunk(NamedTuple):
+ chunk_idx: int
+ attn_chunk: AttnChunk
+
+def efficient_dot_product_attention(
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ query_chunk_size=1024,
+ kv_chunk_size: Optional[int] = None,
+ kv_chunk_size_min: Optional[int] = None,
+ use_checkpoint=True,
+):
+ """Computes efficient dot-product attention given query, key, and value.
+ This is efficient version of attention presented in
+ https://arxiv.org/abs/2112.05682v2 which comes with O(sqrt(n)) memory requirements.
+ Args:
+ query: queries for calculating attention with shape of
+ `[batch * num_heads, tokens, channels_per_head]`.
+ key: keys for calculating attention with shape of
+ `[batch * num_heads, tokens, channels_per_head]`.
+ value: values to be used in attention with shape of
+ `[batch * num_heads, tokens, channels_per_head]`.
+ query_chunk_size: int: query chunks size
+ kv_chunk_size: Optional[int]: key/value chunks size. if None: defaults to sqrt(key_tokens)
+ kv_chunk_size_min: Optional[int]: key/value minimum chunk size. only considered when kv_chunk_size is None. changes `sqrt(key_tokens)` into `max(sqrt(key_tokens), kv_chunk_size_min)`, to ensure our chunk sizes don't get too small (smaller chunks = more chunks = less concurrent work done).
+ use_checkpoint: bool: whether to use checkpointing (recommended True for training, False for inference)
+ Returns:
+ Output of shape `[batch * num_heads, query_tokens, channels_per_head]`.
+ """
+ batch_x_heads, q_tokens, q_channels_per_head = query.shape
+ _, k_tokens, _ = key.shape
+ scale = q_channels_per_head ** -0.5
+
+ kv_chunk_size = min(kv_chunk_size or int(math.sqrt(k_tokens)), k_tokens)
+ if kv_chunk_size_min is not None:
+ kv_chunk_size = max(kv_chunk_size, kv_chunk_size_min)
+
+ def get_query_chunk(chunk_idx: int) -> Tensor:
+ return dynamic_slice(
+ query,
+ (0, chunk_idx, 0),
+ (batch_x_heads, min(query_chunk_size, q_tokens), q_channels_per_head)
+ )
+
+ summarize_chunk: SummarizeChunk = partial(_summarize_chunk, scale=scale)
+ summarize_chunk: SummarizeChunk = partial(checkpoint, summarize_chunk) if use_checkpoint else summarize_chunk
+ compute_query_chunk_attn: ComputeQueryChunkAttn = partial(
+ _get_attention_scores_no_kv_chunking,
+ scale=scale
+ ) if k_tokens <= kv_chunk_size else (
+ # fast-path for when there's just 1 key-value chunk per query chunk (this is just sliced attention btw)
+ partial(
+ _query_chunk_attention,
+ kv_chunk_size=kv_chunk_size,
+ summarize_chunk=summarize_chunk,
+ )
+ )
+
+ if q_tokens <= query_chunk_size:
+ # fast-path for when there's just 1 query chunk
+ return compute_query_chunk_attn(
+ query=query,
+ key=key,
+ value=value,
+ )
+
+ # TODO: maybe we should use torch.empty_like(query) to allocate storage in-advance,
+ # and pass slices to be mutated, instead of torch.cat()ing the returned slices
+ res = torch.cat([
+ compute_query_chunk_attn(
+ query=get_query_chunk(i * query_chunk_size),
+ key=key,
+ value=value,
+ ) for i in range(math.ceil(q_tokens / query_chunk_size))
+ ], dim=1)
+ return res
diff --git a/requirements.txt b/requirements.txt
index 5bed694e..0dbea322 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -30,4 +30,4 @@ inflection
GitPython
torchsde
safetensors
-psutil; sys_platform == 'darwin'
+psutil
From b119815333026164f2bd7d1ca71f3e4f7a9afd0d Mon Sep 17 00:00:00 2001
From: brkirch
Date: Thu, 5 Jan 2023 04:37:17 -0500
Subject: [PATCH 107/172] Use narrow instead of dynamic_slice
---
modules/sub_quadratic_attention.py | 34 +++++++++++++++++-------------
1 file changed, 19 insertions(+), 15 deletions(-)
diff --git a/modules/sub_quadratic_attention.py b/modules/sub_quadratic_attention.py
index b11dc1c7..95924d24 100644
--- a/modules/sub_quadratic_attention.py
+++ b/modules/sub_quadratic_attention.py
@@ -5,6 +5,7 @@
# credit:
# Amin Rezaei (original author)
# Alex Birch (optimized algorithm for 3D tensors, at the expense of removing bias, masking and callbacks)
+# brkirch (modified to use torch.narrow instead of dynamic_slice implementation)
# implementation of:
# Self-attention Does Not Need O(n2) Memory":
# https://arxiv.org/abs/2112.05682v2
@@ -16,13 +17,13 @@ from torch.utils.checkpoint import checkpoint
import math
from typing import Optional, NamedTuple, Protocol, List
-def dynamic_slice(
- x: Tensor,
- starts: List[int],
- sizes: List[int],
+def narrow_trunc(
+ input: Tensor,
+ dim: int,
+ start: int,
+ length: int
) -> Tensor:
- slicing = [slice(start, start + size) for start, size in zip(starts, sizes)]
- return x[slicing]
+ return torch.narrow(input, dim, start, length if input.shape[dim] >= start + length else input.shape[dim] - start)
class AttnChunk(NamedTuple):
exp_values: Tensor
@@ -76,15 +77,17 @@ def _query_chunk_attention(
_, _, v_channels_per_head = value.shape
def chunk_scanner(chunk_idx: int) -> AttnChunk:
- key_chunk = dynamic_slice(
+ key_chunk = narrow_trunc(
key,
- (0, chunk_idx, 0),
- (batch_x_heads, kv_chunk_size, k_channels_per_head)
+ 1,
+ chunk_idx,
+ kv_chunk_size
)
- value_chunk = dynamic_slice(
+ value_chunk = narrow_trunc(
value,
- (0, chunk_idx, 0),
- (batch_x_heads, kv_chunk_size, v_channels_per_head)
+ 1,
+ chunk_idx,
+ kv_chunk_size
)
return summarize_chunk(query, key_chunk, value_chunk)
@@ -161,10 +164,11 @@ def efficient_dot_product_attention(
kv_chunk_size = max(kv_chunk_size, kv_chunk_size_min)
def get_query_chunk(chunk_idx: int) -> Tensor:
- return dynamic_slice(
+ return narrow_trunc(
query,
- (0, chunk_idx, 0),
- (batch_x_heads, min(query_chunk_size, q_tokens), q_channels_per_head)
+ 1,
+ chunk_idx,
+ min(query_chunk_size, q_tokens)
)
summarize_chunk: SummarizeChunk = partial(_summarize_chunk, scale=scale)
From 683287d87f6401083a8d63eedc00ca7410214ca1 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 6 Jan 2023 08:52:06 +0300
Subject: [PATCH 108/172] rework saving training params to file #6372
---
modules/hypernetworks/hypernetwork.py | 28 +++++--------------
modules/shared.py | 2 +-
modules/textual_inversion/logging.py | 24 ++++++++++++++++
.../textual_inversion/textual_inversion.py | 23 ++-------------
4 files changed, 35 insertions(+), 42 deletions(-)
create mode 100644 modules/textual_inversion/logging.py
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 3237c37a..b0cfbe71 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -13,7 +13,7 @@ import tqdm
from einops import rearrange, repeat
from ldm.util import default
from modules import devices, processing, sd_models, shared, sd_samplers
-from modules.textual_inversion import textual_inversion
+from modules.textual_inversion import textual_inversion, logging
from modules.textual_inversion.learn_schedule import LearnRateScheduler
from torch import einsum
from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_
@@ -401,25 +401,7 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None,
hypernet.save(fn)
shared.reload_hypernetworks()
-# Note: textual_inversion.py has a nearly identical function of the same name.
-def save_settings_to_file(model_name, model_hash, initial_step, num_of_dataset_images, hypernetwork_name, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
- # Starting index of preview-related arguments.
- border_index = 21
- # Get a list of the argument names.
- arg_names = inspect.getfullargspec(save_settings_to_file).args
- # Create a list of the argument names to include in the settings string.
- names = arg_names[:border_index] # Include all arguments up until the preview-related ones.
- if preview_from_txt2img:
- names.extend(arg_names[border_index:]) # Include preview-related arguments if applicable.
- # Build the settings string.
- settings_str = "datetime : " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n"
- for name in names:
- if name != 'log_directory': # It's useless and redundant to save log_directory.
- value = locals()[name]
- settings_str += f"{name}: {value}\n"
- # Create or append to the file.
- with open(os.path.join(log_directory, 'settings.txt'), "a+") as fout:
- fout.write(settings_str + "\n\n")
+
def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
# images allows training previews to have infotext. Importing it at the top causes a circular import problem.
@@ -477,7 +459,11 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method)
if shared.opts.save_training_settings_to_txt:
- save_settings_to_file(checkpoint.model_name, '[{}]'.format(checkpoint.hash), initial_step, len(ds), hypernetwork_name, hypernetwork.layer_structure, hypernetwork.activation_func, hypernetwork.weight_init, hypernetwork.add_layer_norm, hypernetwork.use_dropout, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height)
+ saved_params = dict(
+ model_name=checkpoint.model_name, model_hash=checkpoint.hash, num_of_dataset_images=len(ds),
+ **{field: getattr(hypernetwork, field) for field in ['layer_structure', 'activation_func', 'weight_init', 'add_layer_norm', 'use_dropout', ]}
+ )
+ logging.save_settings_to_file(log_directory, {**saved_params, **locals()})
latent_sampling_method = ds.latent_sampling_method
diff --git a/modules/shared.py b/modules/shared.py
index f0e10b35..57e489d0 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -362,7 +362,7 @@ options_templates.update(options_section(('training', "Training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
"save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."),
- "save_training_settings_to_txt": OptionInfo(False, "Save textual inversion and hypernet settings to a text file whenever training starts."),
+ "save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."),
"dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
"dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
"training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
diff --git a/modules/textual_inversion/logging.py b/modules/textual_inversion/logging.py
new file mode 100644
index 00000000..8b1981d5
--- /dev/null
+++ b/modules/textual_inversion/logging.py
@@ -0,0 +1,24 @@
+import datetime
+import json
+import os
+
+saved_params_shared = {"model_name", "model_hash", "initial_step", "num_of_dataset_images", "learn_rate", "batch_size", "data_root", "log_directory", "training_width", "training_height", "steps", "create_image_every", "template_file"}
+saved_params_ti = {"embedding_name", "num_vectors_per_token", "save_embedding_every", "save_image_with_stored_embedding"}
+saved_params_hypernet = {"hypernetwork_name", "layer_structure", "activation_func", "weight_init", "add_layer_norm", "use_dropout", "save_hypernetwork_every"}
+saved_params_all = saved_params_shared | saved_params_ti | saved_params_hypernet
+saved_params_previews = {"preview_prompt", "preview_negative_prompt", "preview_steps", "preview_sampler_index", "preview_cfg_scale", "preview_seed", "preview_width", "preview_height"}
+
+
+def save_settings_to_file(log_directory, all_params):
+ now = datetime.datetime.now()
+ params = {"datetime": now.strftime("%Y-%m-%d %H:%M:%S")}
+
+ keys = saved_params_all
+ if all_params.get('preview_from_txt2img'):
+ keys = keys | saved_params_previews
+
+ params.update({k: v for k, v in all_params.items() if k in keys})
+
+ filename = f'settings-{now.strftime("%Y-%m-%d-%H-%M-%S")}.json'
+ with open(os.path.join(log_directory, filename), "w") as file:
+ json.dump(params, file, indent=4)
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index e9cf432f..f9f5e8cd 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -18,6 +18,8 @@ from modules.textual_inversion.learn_schedule import LearnRateScheduler
from modules.textual_inversion.image_embedding import (embedding_to_b64, embedding_from_b64,
insert_image_data_embed, extract_image_data_embed,
caption_image_overlay)
+from modules.textual_inversion.logging import save_settings_to_file
+
class Embedding:
def __init__(self, vec, name, step=None):
@@ -231,25 +233,6 @@ def write_loss(log_directory, filename, step, epoch_len, values):
**values,
})
-# Note: hypernetwork.py has a nearly identical function of the same name.
-def save_settings_to_file(model_name, model_hash, initial_step, num_of_dataset_images, embedding_name, vectors_per_token, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
- # Starting index of preview-related arguments.
- border_index = 18
- # Get a list of the argument names.
- arg_names = inspect.getfullargspec(save_settings_to_file).args
- # Create a list of the argument names to include in the settings string.
- names = arg_names[:border_index] # Include all arguments up until the preview-related ones.
- if preview_from_txt2img:
- names.extend(arg_names[border_index:]) # Include preview-related arguments if applicable.
- # Build the settings string.
- settings_str = "datetime : " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n"
- for name in names:
- if name != 'log_directory': # It's useless and redundant to save log_directory.
- value = locals()[name]
- settings_str += f"{name}: {value}\n"
- # Create or append to the file.
- with open(os.path.join(log_directory, 'settings.txt'), "a+") as fout:
- fout.write(settings_str + "\n\n")
def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, save_model_every, create_image_every, log_directory, name="embedding"):
assert model_name, f"{name} not selected"
@@ -330,7 +313,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method)
if shared.opts.save_training_settings_to_txt:
- save_settings_to_file(checkpoint.model_name, '[{}]'.format(checkpoint.hash), initial_step, len(ds), embedding_name, len(embedding.vec), learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height)
+ save_settings_to_file(log_directory, {**dict(model_name=checkpoint.model_name, model_hash=checkpoint.hash, num_of_dataset_images=len(ds), num_vectors_per_token=len(embedding.vec)), **locals()})
latent_sampling_method = ds.latent_sampling_method
From b95a4c0ce5ab9c414e0494193bfff665f45e9e65 Mon Sep 17 00:00:00 2001
From: brkirch
Date: Fri, 6 Jan 2023 01:01:51 -0500
Subject: [PATCH 109/172] Change sub-quad chunk threshold to use percentage
---
modules/sd_hijack_optimizations.py | 18 +++++++++---------
modules/shared.py | 2 +-
2 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index f5c153e8..b416e9ac 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -233,7 +233,7 @@ def sub_quad_attention_forward(self, x, context=None, mask=None):
k = k.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1)
v = v.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1)
- x = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold_bytes=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training)
+ x = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training)
x = x.unflatten(0, (-1, h)).transpose(1,2).flatten(start_dim=2)
@@ -243,20 +243,20 @@ def sub_quad_attention_forward(self, x, context=None, mask=None):
return x
-def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, kv_chunk_size_min=None, chunk_threshold_bytes=None, use_checkpoint=True):
+def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, kv_chunk_size_min=None, chunk_threshold=None, use_checkpoint=True):
bytes_per_token = torch.finfo(q.dtype).bits//8
batch_x_heads, q_tokens, _ = q.shape
_, k_tokens, _ = k.shape
qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens
- available_vram = int(get_available_vram() * 0.9) if q.device.type == 'mps' else int(get_available_vram() * 0.7)
-
- if chunk_threshold_bytes is None:
- chunk_threshold_bytes = available_vram
- elif chunk_threshold_bytes == 0:
+ if chunk_threshold is None:
+ chunk_threshold_bytes = int(get_available_vram() * 0.9) if q.device.type == 'mps' else int(get_available_vram() * 0.7)
+ elif chunk_threshold == 0:
chunk_threshold_bytes = None
+ else:
+ chunk_threshold_bytes = int(0.01 * chunk_threshold * get_available_vram())
- if kv_chunk_size_min is None:
+ if kv_chunk_size_min is None and chunk_threshold_bytes is not None:
kv_chunk_size_min = chunk_threshold_bytes // (batch_x_heads * bytes_per_token * (k.shape[2] + v.shape[2]))
elif kv_chunk_size_min == 0:
kv_chunk_size_min = None
@@ -382,7 +382,7 @@ def sub_quad_attnblock_forward(self, x):
q = q.contiguous()
k = k.contiguous()
v = v.contiguous()
- out = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold_bytes=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training)
+ out = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training)
out = rearrange(out, 'b (h w) c -> b c h w', h=h)
out = self.proj_out(out)
return x + out
diff --git a/modules/shared.py b/modules/shared.py
index cb1dc312..d7a81db1 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -59,7 +59,7 @@ parser.add_argument("--opt-split-attention", action='store_true', help="force-en
parser.add_argument("--opt-sub-quad-attention", action='store_true', help="enable memory efficient sub-quadratic cross-attention layer optimization")
parser.add_argument("--sub-quad-q-chunk-size", type=int, help="query chunk size for the sub-quadratic cross-attention layer optimization to use", default=1024)
parser.add_argument("--sub-quad-kv-chunk-size", type=int, help="kv chunk size for the sub-quadratic cross-attention layer optimization to use", default=None)
-parser.add_argument("--sub-quad-chunk-threshold", type=int, help="the size threshold in bytes for the sub-quadratic cross-attention layer optimization to use chunking", default=None)
+parser.add_argument("--sub-quad-chunk-threshold", type=int, help="the percentage of VRAM threshold for the sub-quadratic cross-attention layer optimization to use chunking", default=None)
parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
From 5deb2a19ccea57a50252e8fcb07b4d17c6599def Mon Sep 17 00:00:00 2001
From: brkirch
Date: Fri, 6 Jan 2023 01:33:15 -0500
Subject: [PATCH 110/172] Allow Doggettx's cross attention opt without CUDA
---
modules/sd_hijack.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index ef25dadb..bd101e5b 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -50,7 +50,7 @@ def apply_optimizations():
print("Applying v1 cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
optimization_method = 'V1'
- elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention_invokeai or not torch.cuda.is_available()):
+ elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention_invokeai or not cmd_opts.opt_split_attention and not torch.cuda.is_available()):
print("Applying cross attention optimization (InvokeAI).")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_invokeAI
optimization_method = 'InvokeAI'
From c9bded39ee05bd0507ccd27d2b674d86d6c0c8e8 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 6 Jan 2023 12:32:44 +0300
Subject: [PATCH 111/172] sort extensions by date and add an option to sort by
other columns
---
modules/ui_extensions.py | 44 +++++++++++++++++++++++++++++-----------
style.css | 11 +++++++++-
2 files changed, 42 insertions(+), 13 deletions(-)
diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py
index eec9586f..742e745e 100644
--- a/modules/ui_extensions.py
+++ b/modules/ui_extensions.py
@@ -162,15 +162,15 @@ def install_extension_from_url(dirname, url):
shutil.rmtree(tmpdir, True)
-def install_extension_from_index(url, hide_tags):
+def install_extension_from_index(url, hide_tags, sort_column):
ext_table, message = install_extension_from_url(None, url)
- code, _ = refresh_available_extensions_from_data(hide_tags)
+ code, _ = refresh_available_extensions_from_data(hide_tags, sort_column)
return code, ext_table, message
-def refresh_available_extensions(url, hide_tags):
+def refresh_available_extensions(url, hide_tags, sort_column):
global available_extensions
import urllib.request
@@ -179,18 +179,28 @@ def refresh_available_extensions(url, hide_tags):
available_extensions = json.loads(text)
- code, tags = refresh_available_extensions_from_data(hide_tags)
+ code, tags = refresh_available_extensions_from_data(hide_tags, sort_column)
return url, code, gr.CheckboxGroup.update(choices=tags), ''
-def refresh_available_extensions_for_tags(hide_tags):
- code, _ = refresh_available_extensions_from_data(hide_tags)
+def refresh_available_extensions_for_tags(hide_tags, sort_column):
+ code, _ = refresh_available_extensions_from_data(hide_tags, sort_column)
return code, ''
-def refresh_available_extensions_from_data(hide_tags):
+sort_ordering = [
+ # (reverse, order_by_function)
+ (True, lambda x: x.get('added', 'z')),
+ (False, lambda x: x.get('added', 'z')),
+ (False, lambda x: x.get('name', 'z')),
+ (True, lambda x: x.get('name', 'z')),
+ (False, lambda x: 'z'),
+]
+
+
+def refresh_available_extensions_from_data(hide_tags, sort_column):
extlist = available_extensions["extensions"]
installed_extension_urls = {normalize_git_url(extension.remote): extension.name for extension in extensions.extensions}
@@ -210,8 +220,11 @@ def refresh_available_extensions_from_data(hide_tags):
"""
- for ext in extlist:
+ sort_reverse, sort_function = sort_ordering[sort_column if 0 <= sort_column < len(sort_ordering) else 0]
+
+ for ext in sorted(extlist, key=sort_function, reverse=sort_reverse):
name = ext.get("name", "noname")
+ added = ext.get('added', 'unknown')
url = ext.get("url", None)
description = ext.get("description", "")
extension_tags = ext.get("tags", [])
@@ -233,7 +246,7 @@ def refresh_available_extensions_from_data(hide_tags):
code += f"""
{html.escape(name)} {tags_text} |
- {html.escape(description)} |
+ {html.escape(description)} Added: {html.escape(added)} |
{install_code} |
@@ -291,25 +304,32 @@ def create_ui():
with gr.Row():
hide_tags = gr.CheckboxGroup(value=["ads", "localization", "installed"], label="Hide extensions with tags", choices=["script", "ads", "localization", "installed"])
+ sort_column = gr.Radio(value="newest first", label="Order", choices=["newest first", "oldest first", "a-z", "z-a", "internal order", ], type="index")
install_result = gr.HTML()
available_extensions_table = gr.HTML()
refresh_available_extensions_button.click(
fn=modules.ui.wrap_gradio_call(refresh_available_extensions, extra_outputs=[gr.update(), gr.update(), gr.update()]),
- inputs=[available_extensions_index, hide_tags],
+ inputs=[available_extensions_index, hide_tags, sort_column],
outputs=[available_extensions_index, available_extensions_table, hide_tags, install_result],
)
install_extension_button.click(
fn=modules.ui.wrap_gradio_call(install_extension_from_index, extra_outputs=[gr.update(), gr.update()]),
- inputs=[extension_to_install, hide_tags],
+ inputs=[extension_to_install, hide_tags, sort_column],
outputs=[available_extensions_table, extensions_table, install_result],
)
hide_tags.change(
fn=modules.ui.wrap_gradio_call(refresh_available_extensions_for_tags, extra_outputs=[gr.update()]),
- inputs=[hide_tags],
+ inputs=[hide_tags, sort_column],
+ outputs=[available_extensions_table, install_result]
+ )
+
+ sort_column.change(
+ fn=modules.ui.wrap_gradio_call(refresh_available_extensions_for_tags, extra_outputs=[gr.update()]),
+ inputs=[hide_tags, sort_column],
outputs=[available_extensions_table, install_result]
)
diff --git a/style.css b/style.css
index ee74d79e..f1b23b53 100644
--- a/style.css
+++ b/style.css
@@ -555,7 +555,7 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h
/* Extensions */
-#tab_extensions table{
+#tab_extensions table``{
border-collapse: collapse;
}
@@ -581,6 +581,15 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h
font-size: 95%;
}
+#available_extensions .info{
+ margin: 0;
+}
+
+#available_extensions .date_added{
+ opacity: 0.85;
+ font-size: 90%;
+}
+
#image_buttons_txt2img button, #image_buttons_img2img button, #image_buttons_extras button{
min-width: auto;
padding-left: 0.5em;
From 65ed4421e609dda3112f236c13e4db14caa71364 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 6 Jan 2023 13:55:50 +0300
Subject: [PATCH 112/172] add callback for when the script is unloaded
---
modules/script_callbacks.py | 18 +++++++++++++++++-
webui.py | 2 ++
2 files changed, 19 insertions(+), 1 deletion(-)
diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py
index de69fd9f..608c5300 100644
--- a/modules/script_callbacks.py
+++ b/modules/script_callbacks.py
@@ -71,6 +71,7 @@ callback_map = dict(
callbacks_before_component=[],
callbacks_after_component=[],
callbacks_image_grid=[],
+ callbacks_script_unloaded=[],
)
@@ -171,6 +172,14 @@ def image_grid_callback(params: ImageGridLoopParams):
report_exception(c, 'image_grid')
+def script_unloaded_callback():
+ for c in reversed(callback_map['callbacks_script_unloaded']):
+ try:
+ c.callback()
+ except Exception:
+ report_exception(c, 'script_unloaded')
+
+
def add_callback(callbacks, fun):
stack = [x for x in inspect.stack() if x.filename != __file__]
filename = stack[0].filename if len(stack) > 0 else 'unknown file'
@@ -202,7 +211,7 @@ def on_app_started(callback):
def on_model_loaded(callback):
"""register a function to be called when the stable diffusion model is created; the model is
- passed as an argument"""
+ passed as an argument; this function is also called when the script is reloaded. """
add_callback(callback_map['callbacks_model_loaded'], callback)
@@ -279,3 +288,10 @@ def on_image_grid(callback):
- params: ImageGridLoopParams - parameters to be used for grid creation. Can be modified.
"""
add_callback(callback_map['callbacks_image_grid'], callback)
+
+
+def on_script_unloaded(callback):
+ """register a function to be called before the script is unloaded. Any hooks/hijacks/monkeying about that
+ the script did should be reverted here"""
+
+ add_callback(callback_map['callbacks_script_unloaded'], callback)
diff --git a/webui.py b/webui.py
index ff6eb6eb..733a06b5 100644
--- a/webui.py
+++ b/webui.py
@@ -187,12 +187,14 @@ def webui():
sd_samplers.set_samplers()
+ modules.script_callbacks.script_unloaded_callback()
extensions.list_extensions()
localization.list_localizations(cmd_opts.localizations_dir)
modelloader.forbid_loaded_nonbuiltin_upscalers()
modules.scripts.reload_scripts()
+ modules.script_callbacks.model_loaded_callback(shared.sd_model)
modelloader.load_upscalers()
for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]:
From 848605fb654a55ee6947335d7df6e13366606fad Mon Sep 17 00:00:00 2001
From: brkirch
Date: Fri, 6 Jan 2023 06:58:49 -0500
Subject: [PATCH 113/172] Update README.md
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index c1944d33..fea6cb35 100644
--- a/README.md
+++ b/README.md
@@ -141,7 +141,7 @@ Licenses for borrowed code can be found in `Settings -> Licenses` screen, and al
- Ideas for optimizations - https://github.com/basujindal/stable-diffusion
- Cross Attention layer optimization - Doggettx - https://github.com/Doggettx/stable-diffusion, original idea for prompt editing.
- Cross Attention layer optimization - InvokeAI, lstein - https://github.com/invoke-ai/InvokeAI (originally http://github.com/lstein/stable-diffusion)
-- Sub-quadratic Cross Attention layer optimization - Alex Birch (https://github.com/Birch-san), Amin Rezaei (https://github.com/AminRezaei0x443)
+- Sub-quadratic Cross Attention layer optimization - Alex Birch (https://github.com/Birch-san/diffusers/pull/1), Amin Rezaei (https://github.com/AminRezaei0x443/memory-efficient-attention)
- Textual Inversion - Rinon Gal - https://github.com/rinongal/textual_inversion (we're not using his code, but we are using his ideas).
- Idea for SD upscale - https://github.com/jquesnelle/txt2imghd
- Noise generation for outpainting mk2 - https://github.com/parlance-zz/g-diffuser-bot
From 5e6566324bba20554bcc04f3dda798e560397f38 Mon Sep 17 00:00:00 2001
From: brkirch
Date: Fri, 6 Jan 2023 07:06:26 -0500
Subject: [PATCH 114/172] Always end version number with a digit
---
webui.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/webui.py b/webui.py
index 733a06b5..8737e593 100644
--- a/webui.py
+++ b/webui.py
@@ -16,7 +16,7 @@ from modules.paths import script_path
import torch
# Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors
if ".dev" in torch.__version__ or "+git" in torch.__version__:
- torch.__version__ = re.search(r'[\d.]+', torch.__version__).group(0)
+ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0)
from modules import shared, devices, sd_samplers, upscaler, extensions, localization, ui_tempdir
import modules.codeformer_model as codeformer
From 3246a2d6b898da6a98fe9df4dc67944635a41bd3 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 6 Jan 2023 16:03:43 +0300
Subject: [PATCH 115/172] remove restriction for saving dropdowns to
ui-config.json
---
modules/scripts.py | 1 -
modules/ui.py | 10 ++--------
2 files changed, 2 insertions(+), 9 deletions(-)
diff --git a/modules/scripts.py b/modules/scripts.py
index 0c44f191..35164093 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -290,7 +290,6 @@ class ScriptRunner:
script.group = group
dropdown = gr.Dropdown(label="Script", elem_id="script_list", choices=["None"] + self.titles, value="None", type="index")
- dropdown.save_to_config = True
inputs[0] = dropdown
for script in self.selectable_scripts:
diff --git a/modules/ui.py b/modules/ui.py
index 030f0685..b79d24ee 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -435,11 +435,9 @@ def create_toprow(is_img2img):
with gr.Row():
with gr.Column(scale=1, elem_id="style_pos_col"):
prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())))
- prompt_style.save_to_config = True
with gr.Column(scale=1, elem_id="style_neg_col"):
prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())))
- prompt_style2.save_to_config = True
return prompt, prompt_style, negative_prompt, prompt_style2, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, token_counter, token_button
@@ -638,7 +636,6 @@ def create_sampler_and_steps_selection(choices, tabname):
if opts.samplers_in_dropdown:
with FormRow(elem_id=f"sampler_selection_{tabname}"):
sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index")
- sampler_index.save_to_config = True
steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20)
else:
with FormGroup(elem_id=f"sampler_selection_{tabname}"):
@@ -1794,7 +1791,7 @@ def create_ui():
if init_field is not None:
init_field(saved_value)
- if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible:
+ if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number, gr.Dropdown] and x.visible:
apply_field(x, 'visible')
if type(x) == gr.Slider:
@@ -1815,11 +1812,8 @@ def create_ui():
if type(x) == gr.Number:
apply_field(x, 'value')
- # Since there are many dropdowns that shouldn't be saved,
- # we only mark dropdowns that should be saved.
- if type(x) == gr.Dropdown and getattr(x, 'save_to_config', False):
+ if type(x) == gr.Dropdown:
apply_field(x, 'value', lambda val: val in x.choices, getattr(x, 'init_field', None))
- apply_field(x, 'visible')
visit(txt2img_interface, loadsave, "txt2img")
visit(img2img_interface, loadsave, "img2img")
From 50194de93ffc9db763d9b08fcc9c3bde1aa86151 Mon Sep 17 00:00:00 2001
From: Kuma <36082288+KumiIT@users.noreply.github.com>
Date: Fri, 6 Jan 2023 16:12:45 +0100
Subject: [PATCH 116/172] typo UI fixes #6391
---
modules/shared.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/shared.py b/modules/shared.py
index 57e489d0..865c3c07 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -430,7 +430,7 @@ options_templates.update(options_section(('ui', "User interface"), {
"samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group"),
"dimensions_and_batch_together": OptionInfo(True, "Show Witdth/Height and Batch sliders in same row"),
'quicksettings': OptionInfo("sd_model_checkpoint", "Quicksettings list"),
- 'ui_reorder': OptionInfo(", ".join(ui_reorder_categories), "txt2img/ing2img UI item order"),
+ 'ui_reorder': OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"),
'localization': OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)),
}))
From 3992ecbe6e46a465062508c677964534e7397f72 Mon Sep 17 00:00:00 2001
From: Mitchell Boot <47387831+Mitchell1711@users.noreply.github.com>
Date: Fri, 6 Jan 2023 18:02:46 +0100
Subject: [PATCH 117/172] Added UI elements
Added a new row to hires fix that shows the new resolution after scaling
---
modules/ui.py | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/modules/ui.py b/modules/ui.py
index b79d24ee..20f7d2a2 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -255,6 +255,12 @@ def add_style(name: str, prompt: str, negative_prompt: str):
return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(4)]
+def calc_resolution_hires(x, y, scale):
+ #final res can only be a multiple of 8
+ scaled_x = int(x * scale // 8) * 8
+ scaled_y = int(y * scale // 8) * 8
+
+ return "Upscaled Resolution: "+str(scaled_x)+"x"+str(scaled_y)+"
"
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
@@ -718,6 +724,12 @@ def create_ui():
hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale")
hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x")
hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y")
+
+ with FormRow(elem_id="txt2img_hires_fix_row3"):
+ hr_final_resolution = gr.HTML(value=calc_resolution_hires(width.value, height.value, hr_scale.value), elem_id="txtimg_hr_finalres")
+ hr_scale.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False)
+ width.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False)
+ height.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False)
elif category == "batch":
if not opts.dimensions_and_batch_together:
From 991368c8d54404d8e13d4c6e76a0f32644e65ad4 Mon Sep 17 00:00:00 2001
From: Mitchell Boot <47387831+Mitchell1711@users.noreply.github.com>
Date: Fri, 6 Jan 2023 18:24:29 +0100
Subject: [PATCH 118/172] remove camelcase
---
modules/ui.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/ui.py b/modules/ui.py
index 20f7d2a2..6fc8b7d7 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -260,7 +260,7 @@ def calc_resolution_hires(x, y, scale):
scaled_x = int(x * scale // 8) * 8
scaled_y = int(y * scale // 8) * 8
- return "Upscaled Resolution: "+str(scaled_x)+"x"+str(scaled_y)+"
"
+ return "Upscaled resolution: "+str(scaled_x)+"x"+str(scaled_y)+"
"
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
From c18add68ef7d2de3617cbbaff864b0c74cfdf6c0 Mon Sep 17 00:00:00 2001
From: brkirch
Date: Fri, 6 Jan 2023 16:42:47 -0500
Subject: [PATCH 119/172] Added license
---
html/licenses.html | 29 ++++++++++++++++++++++++++++-
modules/sd_hijack_optimizations.py | 1 +
modules/sub_quadratic_attention.py | 2 +-
3 files changed, 30 insertions(+), 2 deletions(-)
diff --git a/html/licenses.html b/html/licenses.html
index 9eeaa072..570630eb 100644
--- a/html/licenses.html
+++ b/html/licenses.html
@@ -184,7 +184,7 @@ SOFTWARE.
-Code added by contirubtors, most likely copied from this repository.
+Code added by contributors, most likely copied from this repository.
Apache License
@@ -390,3 +390,30 @@ SOFTWARE.
limitations under the License.
+
+The sub-quadratic cross attention optimization uses modified code from the Memory Efficient Attention package that Alex Birch optimized for 3D tensors. This license is updated to reflect that.
+
+MIT License
+
+Copyright (c) 2023 Alex Birch
+Copyright (c) 2023 Amin Rezaei
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index b416e9ac..cdc63ed7 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -216,6 +216,7 @@ def split_cross_attention_forward_invokeAI(self, x, context=None, mask=None):
# Based on Birch-san's modified implementation of sub-quadratic attention from https://github.com/Birch-san/diffusers/pull/1
+# The sub_quad_attention_forward function is under the MIT License listed under Memory Efficient Attention in the Licenses section of the web UI interface
def sub_quad_attention_forward(self, x, context=None, mask=None):
assert mask is None, "attention-mask not currently implemented for SubQuadraticCrossAttnProcessor."
diff --git a/modules/sub_quadratic_attention.py b/modules/sub_quadratic_attention.py
index 95924d24..fea7aaac 100644
--- a/modules/sub_quadratic_attention.py
+++ b/modules/sub_quadratic_attention.py
@@ -1,7 +1,7 @@
# original source:
# https://github.com/AminRezaei0x443/memory-efficient-attention/blob/1bc0d9e6ac5f82ea43a375135c4e1d3896ee1694/memory_efficient_attention/attention_torch.py
# license:
-# unspecified
+# MIT License (see Memory Efficient Attention under the Licenses section in the web UI interface for the full license)
# credit:
# Amin Rezaei (original author)
# Alex Birch (optimized algorithm for 3D tensors, at the expense of removing bias, masking and callbacks)
From 82c1f10b144f733460feead0bdc37a861489dc57 Mon Sep 17 00:00:00 2001
From: Dean Hopkins
Date: Fri, 6 Jan 2023 22:00:12 +0000
Subject: [PATCH 120/172] increase upscale api validation limit
---
modules/api/models.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/api/models.py b/modules/api/models.py
index f77951fc..22b88c59 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -125,7 +125,7 @@ class ExtrasBaseRequest(BaseModel):
gfpgan_visibility: float = Field(default=0, title="GFPGAN Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of GFPGAN, values should be between 0 and 1.")
codeformer_visibility: float = Field(default=0, title="CodeFormer Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of CodeFormer, values should be between 0 and 1.")
codeformer_weight: float = Field(default=0, title="CodeFormer Weight", ge=0, le=1, allow_inf_nan=False, description="Sets the weight of CodeFormer, values should be between 0 and 1.")
- upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=4, description="By how much to upscale the image, only used when resize_mode=0.")
+ upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=8, description="By how much to upscale the image, only used when resize_mode=0.")
upscaling_resize_w: int = Field(default=512, title="Target Width", ge=1, description="Target width for the upscaler to hit. Only used when resize_mode=1.")
upscaling_resize_h: int = Field(default=512, title="Target Height", ge=1, description="Target height for the upscaler to hit. Only used when resize_mode=1.")
upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the choosen size?")
From 79e39fae6110c20a3ee6255e2841c877f65e8cbd Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 7 Jan 2023 01:45:28 +0300
Subject: [PATCH 121/172] CLIP hijack rework
---
modules/sd_hijack.py | 6 +-
modules/sd_hijack_clip.py | 328 +++++++++---------
modules/sd_hijack_clip_old.py | 81 +++++
.../textual_inversion/textual_inversion.py | 1 -
modules/ui.py | 2 +-
5 files changed, 246 insertions(+), 172 deletions(-)
create mode 100644 modules/sd_hijack_clip_old.py
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index fa2cd4bb..71cc145a 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -150,10 +150,10 @@ class StableDiffusionModelHijack:
def clear_comments(self):
self.comments = []
- def tokenize(self, text):
- _, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text])
+ def get_prompt_lengths(self, text):
+ _, token_count = self.clip.process_texts([text])
- return remade_batch_tokens[0], token_count, sd_hijack_clip.get_target_prompt_token_count(token_count)
+ return token_count, self.clip.get_target_prompt_token_count(token_count)
class EmbeddingsWithFixes(torch.nn.Module):
diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py
index ca92b142..ac3020d7 100644
--- a/modules/sd_hijack_clip.py
+++ b/modules/sd_hijack_clip.py
@@ -1,12 +1,28 @@
import math
+from collections import namedtuple
import torch
from modules import prompt_parser, devices
from modules.shared import opts
-def get_target_prompt_token_count(token_count):
- return math.ceil(max(token_count, 1) / 75) * 75
+
+class PromptChunk:
+ """
+ This object contains token ids, weight (multipliers:1.4) and textual inversion embedding info for a chunk of prompt.
+ If a prompt is short, it is represented by one PromptChunk, otherwise, multiple are necessary.
+ Each PromptChunk contains an exact amount of tokens - 77, which includes one for start and end token,
+ so just 75 tokens from prompt.
+ """
+
+ def __init__(self):
+ self.tokens = []
+ self.multipliers = []
+ self.fixes = []
+
+
+PromptChunkFix = namedtuple('PromptChunkFix', ['offset', 'embedding'])
+"""This is a marker showing that textual inversion embedding's vectors have to placed at offset in the prompt chunk"""
class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
@@ -14,17 +30,49 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
super().__init__()
self.wrapped = wrapped
self.hijack = hijack
+ self.chunk_length = 75
+
+ def empty_chunk(self):
+ """creates an empty PromptChunk and returns it"""
+
+ chunk = PromptChunk()
+ chunk.tokens = [self.id_start] + [self.id_end] * (self.chunk_length + 1)
+ chunk.multipliers = [1.0] * (self.chunk_length + 2)
+ return chunk
+
+ def get_target_prompt_token_count(self, token_count):
+ """returns the maximum number of tokens a prompt of a known length can have before it requires one more PromptChunk to be represented"""
+
+ return math.ceil(max(token_count, 1) / self.chunk_length) * self.chunk_length
def tokenize(self, texts):
+ """Converts a batch of texts into a batch of token ids"""
+
raise NotImplementedError
def encode_with_transformers(self, tokens):
+ """
+ converts a batch of token ids (in python lists) into a single tensor with numeric respresentation of those tokens;
+ All python lists with tokens are assumed to have same length, usually 77.
+ if input is a list with B elements and each element has T tokens, expected output shape is (B, T, C), where C depends on
+ model - can be 768 and 1024
+ """
+
raise NotImplementedError
def encode_embedding_init_text(self, init_text, nvpt):
+ """Converts text into a tensor with this text's tokens' embeddings. Note that those are embeddings before they are passed through
+ transformers. nvpt is used as a maximum length in tokens. If text produces less teokens than nvpt, only this many is returned."""
+
raise NotImplementedError
- def tokenize_line(self, line, used_custom_terms, hijack_comments):
+ def tokenize_line(self, line):
+ """
+ this transforms a single prompt into a list of PromptChunk objects - as many as needed to
+ represent the prompt.
+ Returns the list and the total number of tokens in the prompt.
+ """
+
if opts.enable_emphasis:
parsed = prompt_parser.parse_prompt_attention(line)
else:
@@ -32,205 +80,152 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
tokenized = self.tokenize([text for text, _ in parsed])
- fixes = []
- remade_tokens = []
- multipliers = []
+ chunks = []
+ chunk = PromptChunk()
+ token_count = 0
last_comma = -1
- for tokens, (text, weight) in zip(tokenized, parsed):
- i = 0
- while i < len(tokens):
- token = tokens[i]
+ def next_chunk():
+ """puts current chunk into the list of results and produces the next one - empty"""
+ nonlocal token_count
+ nonlocal last_comma
+ nonlocal chunk
- embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
+ token_count += len(chunk.tokens)
+ to_add = self.chunk_length - len(chunk.tokens)
+ if to_add > 0:
+ chunk.tokens += [self.id_end] * to_add
+ chunk.multipliers += [1.0] * to_add
+
+ chunk.tokens = [self.id_start] + chunk.tokens + [self.id_end]
+ chunk.multipliers = [1.0] + chunk.multipliers + [1.0]
+
+ last_comma = -1
+ chunks.append(chunk)
+ chunk = PromptChunk()
+
+ for tokens, (text, weight) in zip(tokenized, parsed):
+ position = 0
+ while position < len(tokens):
+ token = tokens[position]
if token == self.comma_token:
- last_comma = len(remade_tokens)
- elif opts.comma_padding_backtrack != 0 and max(len(remade_tokens), 1) % 75 == 0 and last_comma != -1 and len(remade_tokens) - last_comma <= opts.comma_padding_backtrack:
- last_comma += 1
- reloc_tokens = remade_tokens[last_comma:]
- reloc_mults = multipliers[last_comma:]
+ last_comma = len(chunk.tokens)
- remade_tokens = remade_tokens[:last_comma]
- length = len(remade_tokens)
+ # this is when we are at the end of alloted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack
+ # is a setting that specifies that is there is a comma nearby, the text after comma should be moved out of this chunk and into the next.
+ elif opts.comma_padding_backtrack != 0 and len(chunk.tokens) == self.chunk_length and last_comma != -1 and len(chunk.tokens) - last_comma <= opts.comma_padding_backtrack:
+ break_location = last_comma + 1
- rem = int(math.ceil(length / 75)) * 75 - length
- remade_tokens += [self.id_end] * rem + reloc_tokens
- multipliers = multipliers[:last_comma] + [1.0] * rem + reloc_mults
+ reloc_tokens = chunk.tokens[break_location:]
+ reloc_mults = chunk.multipliers[break_location:]
+ chunk.tokens = chunk.tokens[:break_location]
+ chunk.multipliers = chunk.multipliers[:break_location]
+
+ next_chunk()
+ chunk.tokens = reloc_tokens
+ chunk.multipliers = reloc_mults
+
+ if len(chunk.tokens) == self.chunk_length:
+ next_chunk()
+
+ embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, position)
if embedding is None:
- remade_tokens.append(token)
- multipliers.append(weight)
- i += 1
- else:
- emb_len = int(embedding.vec.shape[0])
- iteration = len(remade_tokens) // 75
- if (len(remade_tokens) + emb_len) // 75 != iteration:
- rem = (75 * (iteration + 1) - len(remade_tokens))
- remade_tokens += [self.id_end] * rem
- multipliers += [1.0] * rem
- iteration += 1
- fixes.append((iteration, (len(remade_tokens) % 75, embedding)))
- remade_tokens += [0] * emb_len
- multipliers += [weight] * emb_len
- used_custom_terms.append((embedding.name, embedding.checksum()))
- i += embedding_length_in_tokens
+ chunk.tokens.append(token)
+ chunk.multipliers.append(weight)
+ position += 1
+ continue
- token_count = len(remade_tokens)
- prompt_target_length = get_target_prompt_token_count(token_count)
- tokens_to_add = prompt_target_length - len(remade_tokens)
+ emb_len = int(embedding.vec.shape[0])
+ if len(chunk.tokens) + emb_len > self.chunk_length:
+ next_chunk()
- remade_tokens = remade_tokens + [self.id_end] * tokens_to_add
- multipliers = multipliers + [1.0] * tokens_to_add
+ chunk.fixes.append(PromptChunkFix(len(chunk.tokens), embedding))
- return remade_tokens, fixes, multipliers, token_count
+ chunk.tokens += [0] * emb_len
+ chunk.multipliers += [weight] * emb_len
+ position += embedding_length_in_tokens
+
+ if len(chunk.tokens) > 0:
+ next_chunk()
+
+ return chunks, token_count
+
+ def process_texts(self, texts):
+ """
+ Accepts a list of texts and calls tokenize_line() on each, with cache. Returns the list of results and maximum
+ length, in tokens, of all texts.
+ """
- def process_text(self, texts):
- used_custom_terms = []
- remade_batch_tokens = []
- hijack_comments = []
- hijack_fixes = []
token_count = 0
cache = {}
- batch_multipliers = []
+ batch_chunks = []
for line in texts:
if line in cache:
- remade_tokens, fixes, multipliers = cache[line]
+ chunks = cache[line]
else:
- remade_tokens, fixes, multipliers, current_token_count = self.tokenize_line(line, used_custom_terms, hijack_comments)
+ chunks, current_token_count = self.tokenize_line(line)
token_count = max(current_token_count, token_count)
- cache[line] = (remade_tokens, fixes, multipliers)
+ cache[line] = chunks
- remade_batch_tokens.append(remade_tokens)
- hijack_fixes.append(fixes)
- batch_multipliers.append(multipliers)
+ batch_chunks.append(chunks)
- return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count
+ return batch_chunks, token_count
- def process_text_old(self, texts):
- id_start = self.id_start
- id_end = self.id_end
- maxlen = self.wrapped.max_length # you get to stay at 77
- used_custom_terms = []
- remade_batch_tokens = []
- hijack_comments = []
- hijack_fixes = []
- token_count = 0
+ def forward(self, texts):
+ """
+ Accepts an array of texts; Passes texts through transformers network to create a tensor with numerical representation of those texts.
+ Returns a tensor with shape of (B, T, C), where B is length of the array; T is length, in tokens, of texts (including padding) - T will
+ be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768, and for SD2 it's 1024.
+ An example shape returned by this function can be: (2, 77, 768).
+ Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one elemenet
+ is when you do prompt editing: "a picture of a [cat:dog:0.4] eating ice cream"
+ """
- cache = {}
- batch_tokens = self.tokenize(texts)
- batch_multipliers = []
- for tokens in batch_tokens:
- tuple_tokens = tuple(tokens)
+ if opts.use_old_emphasis_implementation:
+ import modules.sd_hijack_clip_old
+ return modules.sd_hijack_clip_old.forward_old(self, texts)
- if tuple_tokens in cache:
- remade_tokens, fixes, multipliers = cache[tuple_tokens]
- else:
- fixes = []
- remade_tokens = []
- multipliers = []
- mult = 1.0
+ batch_chunks, token_count = self.process_texts(texts)
- i = 0
- while i < len(tokens):
- token = tokens[i]
+ used_embeddings = {}
+ chunk_count = max([len(x) for x in batch_chunks])
- embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
+ zs = []
+ for i in range(chunk_count):
+ batch_chunk = [chunks[i] if i < len(chunks) else self.empty_chunk() for chunks in batch_chunks]
- mult_change = self.token_mults.get(token) if opts.enable_emphasis else None
- if mult_change is not None:
- mult *= mult_change
- i += 1
- elif embedding is None:
- remade_tokens.append(token)
- multipliers.append(mult)
- i += 1
- else:
- emb_len = int(embedding.vec.shape[0])
- fixes.append((len(remade_tokens), embedding))
- remade_tokens += [0] * emb_len
- multipliers += [mult] * emb_len
- used_custom_terms.append((embedding.name, embedding.checksum()))
- i += embedding_length_in_tokens
+ tokens = [x.tokens for x in batch_chunk]
+ multipliers = [x.multipliers for x in batch_chunk]
+ self.hijack.fixes = [x.fixes for x in batch_chunk]
- if len(remade_tokens) > maxlen - 2:
- vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()}
- ovf = remade_tokens[maxlen - 2:]
- overflowing_words = [vocab.get(int(x), "") for x in ovf]
- overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words))
- hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n")
+ for fixes in self.hijack.fixes:
+ for position, embedding in fixes:
+ used_embeddings[embedding.name] = embedding
- token_count = len(remade_tokens)
- remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens))
- remade_tokens = [id_start] + remade_tokens[0:maxlen - 2] + [id_end]
- cache[tuple_tokens] = (remade_tokens, fixes, multipliers)
+ z = self.process_tokens(tokens, multipliers)
+ zs.append(z)
- multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers))
- multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0]
+ if len(used_embeddings) > 0:
+ embeddings_list = ", ".join([f'{name} [{embedding.checksum()}]' for name, embedding in used_embeddings.items()])
+ self.hijack.comments.append(f"Used embeddings: {embeddings_list}")
- remade_batch_tokens.append(remade_tokens)
- hijack_fixes.append(fixes)
- batch_multipliers.append(multipliers)
- return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count
-
- def forward(self, text):
- use_old = opts.use_old_emphasis_implementation
- if use_old:
- batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text_old(text)
- else:
- batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text(text)
-
- self.hijack.comments += hijack_comments
-
- if len(used_custom_terms) > 0:
- self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms]))
-
- if use_old:
- self.hijack.fixes = hijack_fixes
- return self.process_tokens(remade_batch_tokens, batch_multipliers)
-
- z = None
- i = 0
- while max(map(len, remade_batch_tokens)) != 0:
- rem_tokens = [x[75:] for x in remade_batch_tokens]
- rem_multipliers = [x[75:] for x in batch_multipliers]
-
- self.hijack.fixes = []
- for unfiltered in hijack_fixes:
- fixes = []
- for fix in unfiltered:
- if fix[0] == i:
- fixes.append(fix[1])
- self.hijack.fixes.append(fixes)
-
- tokens = []
- multipliers = []
- for j in range(len(remade_batch_tokens)):
- if len(remade_batch_tokens[j]) > 0:
- tokens.append(remade_batch_tokens[j][:75])
- multipliers.append(batch_multipliers[j][:75])
- else:
- tokens.append([self.id_end] * 75)
- multipliers.append([1.0] * 75)
-
- z1 = self.process_tokens(tokens, multipliers)
- z = z1 if z is None else torch.cat((z, z1), axis=-2)
-
- remade_batch_tokens = rem_tokens
- batch_multipliers = rem_multipliers
- i += 1
-
- return z
+ return torch.hstack(zs)
def process_tokens(self, remade_batch_tokens, batch_multipliers):
- if not opts.use_old_emphasis_implementation:
- remade_batch_tokens = [[self.id_start] + x[:75] + [self.id_end] for x in remade_batch_tokens]
- batch_multipliers = [[1.0] + x[:75] + [1.0] for x in batch_multipliers]
-
+ """
+ sends one single prompt chunk to be encoded by transformers neural network.
+ remade_batch_tokens is a batch of tokens - a list, where every element is a list of tokens; usually
+ there are exactly 77 tokens in the list. batch_multipliers is the same but for multipliers instead of tokens.
+ Multipliers are used to give more or less weight to the outputs of transformers network. Each multiplier
+ corresponds to one token.
+ """
tokens = torch.asarray(remade_batch_tokens).to(devices.device)
+ # this is for SD2: SD1 uses the same token for padding and end of text, while SD2 uses different ones.
if self.id_end != self.id_pad:
for batch_pos in range(len(remade_batch_tokens)):
index = remade_batch_tokens[batch_pos].index(self.id_end)
@@ -239,8 +234,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
z = self.encode_with_transformers(tokens)
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
- batch_multipliers_of_same_length = [x + [1.0] * (75 - len(x)) for x in batch_multipliers]
- batch_multipliers = torch.asarray(batch_multipliers_of_same_length).to(devices.device)
+ batch_multipliers = torch.asarray(batch_multipliers).to(devices.device)
original_mean = z.mean()
z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
new_mean = z.mean()
diff --git a/modules/sd_hijack_clip_old.py b/modules/sd_hijack_clip_old.py
new file mode 100644
index 00000000..6d9fbbe6
--- /dev/null
+++ b/modules/sd_hijack_clip_old.py
@@ -0,0 +1,81 @@
+from modules import sd_hijack_clip
+from modules import shared
+
+
+def process_text_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, texts):
+ id_start = self.id_start
+ id_end = self.id_end
+ maxlen = self.wrapped.max_length # you get to stay at 77
+ used_custom_terms = []
+ remade_batch_tokens = []
+ hijack_comments = []
+ hijack_fixes = []
+ token_count = 0
+
+ cache = {}
+ batch_tokens = self.tokenize(texts)
+ batch_multipliers = []
+ for tokens in batch_tokens:
+ tuple_tokens = tuple(tokens)
+
+ if tuple_tokens in cache:
+ remade_tokens, fixes, multipliers = cache[tuple_tokens]
+ else:
+ fixes = []
+ remade_tokens = []
+ multipliers = []
+ mult = 1.0
+
+ i = 0
+ while i < len(tokens):
+ token = tokens[i]
+
+ embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
+
+ mult_change = self.token_mults.get(token) if shared.opts.enable_emphasis else None
+ if mult_change is not None:
+ mult *= mult_change
+ i += 1
+ elif embedding is None:
+ remade_tokens.append(token)
+ multipliers.append(mult)
+ i += 1
+ else:
+ emb_len = int(embedding.vec.shape[0])
+ fixes.append((len(remade_tokens), embedding))
+ remade_tokens += [0] * emb_len
+ multipliers += [mult] * emb_len
+ used_custom_terms.append((embedding.name, embedding.checksum()))
+ i += embedding_length_in_tokens
+
+ if len(remade_tokens) > maxlen - 2:
+ vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()}
+ ovf = remade_tokens[maxlen - 2:]
+ overflowing_words = [vocab.get(int(x), "") for x in ovf]
+ overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words))
+ hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n")
+
+ token_count = len(remade_tokens)
+ remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens))
+ remade_tokens = [id_start] + remade_tokens[0:maxlen - 2] + [id_end]
+ cache[tuple_tokens] = (remade_tokens, fixes, multipliers)
+
+ multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers))
+ multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0]
+
+ remade_batch_tokens.append(remade_tokens)
+ hijack_fixes.append(fixes)
+ batch_multipliers.append(multipliers)
+ return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count
+
+
+def forward_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, texts):
+ batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = process_text_old(self, texts)
+
+ self.hijack.comments += hijack_comments
+
+ if len(used_custom_terms) > 0:
+ self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms]))
+
+ self.hijack.fixes = hijack_fixes
+ return self.process_tokens(remade_batch_tokens, batch_multipliers)
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index f9f5e8cd..45882ed6 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -79,7 +79,6 @@ class EmbeddingDatabase:
self.word_embeddings[embedding.name] = embedding
- # TODO changing between clip and open clip changes tokenization, which will cause embeddings to stop working
ids = model.cond_stage_model.tokenize([embedding.name])[0]
first_id = ids[0]
diff --git a/modules/ui.py b/modules/ui.py
index b79d24ee..5d2f5bad 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -368,7 +368,7 @@ def update_token_counter(text, steps):
flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules)
prompts = [prompt_text for step, prompt_text in flat_prompts]
- tokens, token_count, max_length = max([model_hijack.tokenize(prompt) for prompt in prompts], key=lambda args: args[1])
+ token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], key=lambda args: args[0])
style_class = ' class="red"' if (token_count > max_length) else ""
return f"{token_count}/{max_length}"
From f94cfc563bbedd923d5e95563a5e8d93c8516ac3 Mon Sep 17 00:00:00 2001
From: Mitchell Boot <47387831+Mitchell1711@users.noreply.github.com>
Date: Sat, 7 Jan 2023 01:15:22 +0100
Subject: [PATCH 122/172] Changed HTML to textbox instead
Using HTML caused an issue where the row would expand for a frame when changing the sliders because of the loading animation. This solution also doesn't use any additional HTML padding
---
modules/ui.py | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index 6fc8b7d7..6ea1b5d7 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -260,7 +260,7 @@ def calc_resolution_hires(x, y, scale):
scaled_x = int(x * scale // 8) * 8
scaled_y = int(y * scale // 8) * 8
- return "Upscaled resolution: "+str(scaled_x)+"x"+str(scaled_y)+"
"
+ return str(scaled_x)+"x"+str(scaled_y)
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
@@ -726,7 +726,10 @@ def create_ui():
hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y")
with FormRow(elem_id="txt2img_hires_fix_row3"):
- hr_final_resolution = gr.HTML(value=calc_resolution_hires(width.value, height.value, hr_scale.value), elem_id="txtimg_hr_finalres")
+ hr_final_resolution = gr.Textbox(value=calc_resolution_hires(width.value, height.value, hr_scale.value),
+ elem_id="txtimg_hr_finalres",
+ label="Upscaled resolution",
+ interactive=False)
hr_scale.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False)
width.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False)
height.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False)
From 08066676a47b560235d4c085dd3cfcb470b80997 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 7 Jan 2023 07:22:07 +0300
Subject: [PATCH 123/172] make it not break on empty inputs; thank you tarded,
we are
---
modules/sd_hijack_clip.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py
index ac3020d7..16aef76a 100644
--- a/modules/sd_hijack_clip.py
+++ b/modules/sd_hijack_clip.py
@@ -147,7 +147,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
chunk.multipliers += [weight] * emb_len
position += embedding_length_in_tokens
- if len(chunk.tokens) > 0:
+ if len(chunk.tokens) > 0 or len(chunks) == 0:
next_chunk()
return chunks, token_count
From 1740c33547b62f692834c95914a2b295d51684c7 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 7 Jan 2023 07:48:44 +0300
Subject: [PATCH 124/172] more comments
---
modules/sd_hijack_clip.py | 21 ++++++++++++++++-----
1 file changed, 16 insertions(+), 5 deletions(-)
diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py
index 16aef76a..5520c9b2 100644
--- a/modules/sd_hijack_clip.py
+++ b/modules/sd_hijack_clip.py
@@ -3,7 +3,7 @@ from collections import namedtuple
import torch
-from modules import prompt_parser, devices
+from modules import prompt_parser, devices, sd_hijack
from modules.shared import opts
@@ -22,14 +22,24 @@ class PromptChunk:
PromptChunkFix = namedtuple('PromptChunkFix', ['offset', 'embedding'])
-"""This is a marker showing that textual inversion embedding's vectors have to placed at offset in the prompt chunk"""
+"""An object of this type is a marker showing that textual inversion embedding's vectors have to placed at offset in the prompt
+chunk. Thos objects are found in PromptChunk.fixes and, are placed into FrozenCLIPEmbedderWithCustomWordsBase.hijack.fixes, and finally
+are applied by sd_hijack.EmbeddingsWithFixes's forward function."""
class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
+ """A pytorch module that is a wrapper for FrozenCLIPEmbedder module. it enhances FrozenCLIPEmbedder, making it possible to
+ have unlimited prompt length and assign weights to tokens in prompt.
+ """
+
def __init__(self, wrapped, hijack):
super().__init__()
+
self.wrapped = wrapped
- self.hijack = hijack
+ """Original FrozenCLIPEmbedder module; can also be FrozenOpenCLIPEmbedder or xlmr.BertSeriesModelWithTransformation,
+ depending on model."""
+
+ self.hijack: sd_hijack.StableDiffusionModelHijack = hijack
self.chunk_length = 75
def empty_chunk(self):
@@ -55,7 +65,8 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
converts a batch of token ids (in python lists) into a single tensor with numeric respresentation of those tokens;
All python lists with tokens are assumed to have same length, usually 77.
if input is a list with B elements and each element has T tokens, expected output shape is (B, T, C), where C depends on
- model - can be 768 and 1024
+ model - can be 768 and 1024.
+ Among other things, this call will read self.hijack.fixes, apply it to its inputs, and clear it (setting it to None).
"""
raise NotImplementedError
@@ -113,7 +124,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
last_comma = len(chunk.tokens)
# this is when we are at the end of alloted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack
- # is a setting that specifies that is there is a comma nearby, the text after comma should be moved out of this chunk and into the next.
+ # is a setting that specifies that if there is a comma nearby, the text after the comma should be moved out of this chunk and into the next.
elif opts.comma_padding_backtrack != 0 and len(chunk.tokens) == self.chunk_length and last_comma != -1 and len(chunk.tokens) - last_comma <= opts.comma_padding_backtrack:
break_location = last_comma + 1
From de9738044571877450d1038e18f1ecce93d24af3 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 7 Jan 2023 08:53:53 +0300
Subject: [PATCH 125/172] this breaks on default config because width, height,
hr_scale are None at that point.
---
modules/ui.py | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index f946382d..a18b9007 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -725,14 +725,8 @@ def create_ui():
hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x")
hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y")
- with FormRow(elem_id="txt2img_hires_fix_row3"):
- hr_final_resolution = gr.Textbox(value=calc_resolution_hires(width.value, height.value, hr_scale.value),
- elem_id="txtimg_hr_finalres",
- label="Upscaled resolution",
- interactive=False)
- hr_scale.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False)
- width.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False)
- height.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False)
+ with FormRow(elem_id="txt2img_hires_fix_row3"):
+ hr_final_resolution = gr.Textbox(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False)
elif category == "batch":
if not opts.dimensions_and_batch_together:
@@ -744,6 +738,10 @@ def create_ui():
with FormGroup(elem_id="txt2img_script_container"):
custom_inputs = modules.scripts.scripts_txt2img.setup_ui()
+ hr_scale.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False)
+ width.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False)
+ height.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False)
+
txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples)
parameters_copypaste.bind_buttons({"txt2img": txt2img_paste}, None, txt2img_prompt)
From 1a5b86ad65fd738eadea1ad72f4abad3a4aabf17 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 7 Jan 2023 09:56:37 +0300
Subject: [PATCH 126/172] rework hires fix preview for #6437: movie it to where
it takes less place, make it actually account for all relevant sliders and
calculate dimensions correctly
---
modules/processing.py | 1 -
modules/ui.py | 40 +++++++++++++++++++++++++++-------------
modules/ui_components.py | 8 ++++++++
style.css | 17 +++++++++++++++++
4 files changed, 52 insertions(+), 14 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index a408d622..82157bc9 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -711,7 +711,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
self.truncate_x = 0
self.truncate_y = 0
-
def init(self, all_prompts, all_seeds, all_subseeds):
if self.enable_hr:
if self.hr_resize_x == 0 and self.hr_resize_y == 0:
diff --git a/modules/ui.py b/modules/ui.py
index a18b9007..6c765262 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -20,7 +20,7 @@ from PIL import Image, PngImagePlugin
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru
-from modules.ui_components import FormRow, FormGroup, ToolButton
+from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
from modules.paths import script_path
from modules.shared import opts, cmd_opts, restricted_opts
@@ -255,12 +255,20 @@ def add_style(name: str, prompt: str, negative_prompt: str):
return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(4)]
-def calc_resolution_hires(x, y, scale):
- #final res can only be a multiple of 8
- scaled_x = int(x * scale // 8) * 8
- scaled_y = int(y * scale // 8) * 8
-
- return str(scaled_x)+"x"+str(scaled_y)
+
+def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y):
+ from modules import processing, devices
+
+ if not enable:
+ return ""
+
+ p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y)
+
+ with devices.autocast():
+ p.init([""], [0], [0])
+
+ return f"resize to: {p.hr_upscale_to_x}x{p.hr_upscale_to_y}"
+
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name])
@@ -712,6 +720,7 @@ def create_ui():
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces")
tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling")
enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr")
+ hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False)
elif category == "hires_fix":
with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options:
@@ -724,9 +733,6 @@ def create_ui():
hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale")
hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x")
hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y")
-
- with FormRow(elem_id="txt2img_hires_fix_row3"):
- hr_final_resolution = gr.Textbox(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False)
elif category == "batch":
if not opts.dimensions_and_batch_together:
@@ -738,9 +744,16 @@ def create_ui():
with FormGroup(elem_id="txt2img_script_container"):
custom_inputs = modules.scripts.scripts_txt2img.setup_ui()
- hr_scale.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False)
- width.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False)
- height.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False)
+ hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y]
+ hr_resolution_preview_args = dict(
+ fn=calc_resolution_hires,
+ inputs=hr_resolution_preview_inputs,
+ outputs=[hr_final_resolution],
+ show_progress=False
+ )
+
+ for input in hr_resolution_preview_inputs:
+ input.change(**hr_resolution_preview_args)
txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples)
parameters_copypaste.bind_buttons({"txt2img": txt2img_paste}, None, txt2img_prompt)
@@ -803,6 +816,7 @@ def create_ui():
fn=lambda x: gr_show(x),
inputs=[enable_hr],
outputs=[hr_options],
+ show_progress = False,
)
txt2img_paste_fields = [
diff --git a/modules/ui_components.py b/modules/ui_components.py
index 91eb0e3d..cac001dc 100644
--- a/modules/ui_components.py
+++ b/modules/ui_components.py
@@ -23,3 +23,11 @@ class FormGroup(gr.Group, gr.components.FormComponent):
def get_block_name(self):
return "group"
+
+
+class FormHTML(gr.HTML, gr.components.FormComponent):
+ """Same as gr.HTML but fits inside gradio forms"""
+
+ def get_block_name(self):
+ return "html"
+
diff --git a/style.css b/style.css
index f1b23b53..76721756 100644
--- a/style.css
+++ b/style.css
@@ -642,6 +642,23 @@ footer {
opacity: 0.85;
}
+#txtimg_hr_finalres{
+ min-height: 0 !important;
+ padding: .625rem .75rem;
+ margin-left: -0.75em
+
+}
+
+#txtimg_hr_finalres .resolution{
+ font-weight: bold;
+}
+
+#txt2img_checkboxes > div > div{
+ flex: 0;
+ white-space: nowrap;
+ min-width: auto;
+}
+
/* The following handles localization for right-to-left (RTL) languages like Arabic.
The rtl media type will only be activated by the logic in javascript/localization.js.
If you change anything above, you need to make sure it is RTL compliant by just running
From a36e2744e2b18a2582247bc5b95bfa0339dfa629 Mon Sep 17 00:00:00 2001
From: Taithrah
Date: Sat, 7 Jan 2023 04:09:02 -0500
Subject: [PATCH 127/172] Update hints.js
Small touch up to hints
---
javascript/hints.js | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/javascript/hints.js b/javascript/hints.js
index dda66e09..73ab4a26 100644
--- a/javascript/hints.js
+++ b/javascript/hints.js
@@ -4,7 +4,7 @@ titles = {
"Sampling steps": "How many times to improve the generated image iteratively; higher values take longer; very low values can produce bad results",
"Sampling method": "Which algorithm to use to produce the image",
"GFPGAN": "Restore low quality faces using GFPGAN neural network",
- "Euler a": "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help",
+ "Euler a": "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps higher than 30-40 does not help",
"DDIM": "Denoising Diffusion Implicit Models - best at inpainting",
"DPM adaptive": "Ignores step count - uses a number of steps determined by the CFG and resolution",
@@ -12,8 +12,8 @@ titles = {
"Batch size": "How many image to create in a single batch",
"CFG Scale": "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results",
"Seed": "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result",
- "\u{1f3b2}\ufe0f": "Set seed to -1, which will cause a new random number to be used every time",
- "\u267b\ufe0f": "Reuse seed from last generation, mostly useful if it was randomed",
+ "\u{1f3b2}\ufe0f": "Set seed to -1 will set a new random number every time.",
+ "\u267b\ufe0f": "Reuse seed from last generation, most useful if it was randomized.",
"\u{1f3a8}": "Add a random artist to the prompt.",
"\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.",
"\u{1f4c2}": "Open images output directory",
@@ -74,7 +74,7 @@ titles = {
"Style 1": "Style to apply; styles have components for both positive and negative prompts and apply to both",
"Style 2": "Style to apply; styles have components for both positive and negative prompts and apply to both",
"Apply style": "Insert selected styles into prompt fields",
- "Create style": "Save current prompts as a style. If you add the token {prompt} to the text, the style use that as placeholder for your prompt when you use the style in the future.",
+ "Create style": "Save current prompts as a style. If you add the token {prompt} to the text, the style uses that as a placeholder for your prompt when you use the style in the future.",
"Checkpoint name": "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.",
"Inpainting conditioning mask strength": "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.",
@@ -92,12 +92,12 @@ titles = {
"Weighted sum": "Result = A * (1 - M) + B * M",
"Add difference": "Result = A + (B - C) * M",
- "Learning rate": "how fast should the training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.",
+ "Learning rate": "How fast should training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.",
"Clip skip": "Early stopping parameter for CLIP model; 1 is stop at last layer as usual, 2 is stop at penultimate layer, etc.",
- "Approx NN": "Cheap neural network approximation. Very fast compared to VAE, but produces pictures with 4 times smaller horizontal/vertical resoluton and lower quality.",
- "Approx cheap": "Very cheap approximation. Very fast compared to VAE, but produces pictures with 8 times smaller horizontal/vertical resoluton and extremely low quality.",
+ "Approx NN": "Cheap neural network approximation. Very fast compared to VAE, but produces pictures with 4 times smaller horizontal/vertical resolution and lower quality.",
+ "Approx cheap": "Very cheap approximation. Very fast compared to VAE, but produces pictures with 8 times smaller horizontal/vertical resolution and extremely low quality.",
"Hires. fix": "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition",
"Hires steps": "Number of sampling steps for upscaled picture. If 0, uses same as for original.",
From 0fc1848e40dbd46c93753a2937403e1139ecd366 Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Sat, 7 Jan 2023 11:25:41 +0200
Subject: [PATCH 128/172] CI: Use native actions/setup-python caching
---
.github/workflows/on_pull_request.yaml | 13 +++++--------
1 file changed, 5 insertions(+), 8 deletions(-)
diff --git a/.github/workflows/on_pull_request.yaml b/.github/workflows/on_pull_request.yaml
index b097d180..a168be5b 100644
--- a/.github/workflows/on_pull_request.yaml
+++ b/.github/workflows/on_pull_request.yaml
@@ -19,22 +19,19 @@ jobs:
- name: Checkout Code
uses: actions/checkout@v3
- name: Set up Python 3.10
- uses: actions/setup-python@v3
+ uses: actions/setup-python@v4
with:
python-version: 3.10.6
- - uses: actions/cache@v2
- with:
- path: ~/.cache/pip
- key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
- restore-keys: |
- ${{ runner.os }}-pip-
+ cache: pip
+ cache-dependency-path: |
+ **/requirements*txt
- name: Install PyLint
run: |
python -m pip install --upgrade pip
pip install pylint
# This lets PyLint check to see if it can resolve imports
- name: Install dependencies
- run : |
+ run: |
export COMMANDLINE_ARGS="--skip-torch-cuda-test --exit"
python launch.py
- name: Analysing the code with pylint
From a77873974b97618351791ea3015639be7d9f98d1 Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Sat, 7 Jan 2023 11:34:02 +0200
Subject: [PATCH 129/172] ... also for tests.
---
.github/workflows/run_tests.yaml | 8 +++-----
1 file changed, 3 insertions(+), 5 deletions(-)
diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml
index 49dc92bd..ecb9012a 100644
--- a/.github/workflows/run_tests.yaml
+++ b/.github/workflows/run_tests.yaml
@@ -14,11 +14,9 @@ jobs:
uses: actions/setup-python@v4
with:
python-version: 3.10.6
- - uses: actions/cache@v3
- with:
- path: ~/.cache/pip
- key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
- restore-keys: ${{ runner.os }}-pip-
+ cache: pip
+ cache-dependency-path: |
+ **/requirements*txt
- name: Run tests
run: python launch.py --tests basic_features --no-half --disable-opt-split-attention --use-cpu all --skip-torch-cuda-test
- name: Upload main app stdout-stderr
From fdfce4711076c2ebac1089bac8169d043eb7978f Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 7 Jan 2023 13:29:47 +0300
Subject: [PATCH 130/172] add "from" resolution for hires fix to be less
confusing.
---
modules/ui.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/ui.py b/modules/ui.py
index 6c765262..99483130 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -267,7 +267,7 @@ def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resiz
with devices.autocast():
p.init([""], [0], [0])
- return f"resize to: {p.hr_upscale_to_x}x{p.hr_upscale_to_y}"
+ return f"resize: from {width}x{height} to {p.hr_upscale_to_x}x{p.hr_upscale_to_y}"
def apply_styles(prompt, prompt_neg, style1_name, style2_name):
From 151233399c4b79934bdbb7c12a97eeb6499572fb Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 7 Jan 2023 13:30:06 +0300
Subject: [PATCH 131/172] new screenshot
---
README.md | 9 +++------
screenshot.png | Bin 525075 -> 420577 bytes
txt2img_Screenshot.png | Bin 337094 -> 0 bytes
3 files changed, 3 insertions(+), 6 deletions(-)
delete mode 100644 txt2img_Screenshot.png
diff --git a/README.md b/README.md
index fea6cb35..d783fdf0 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,7 @@
# Stable Diffusion web UI
A browser interface based on Gradio library for Stable Diffusion.
-
-
-Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Scripts) wiki page for extra scripts developed by users.
+
## Features
[Detailed feature showcase with images](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features):
@@ -97,9 +95,8 @@ Alternatively, use online services (like Google Colab):
1. Install [Python 3.10.6](https://www.python.org/downloads/windows/), checking "Add Python to PATH"
2. Install [git](https://git-scm.com/download/win).
3. Download the stable-diffusion-webui repository, for example by running `git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git`.
-4. Place `model.ckpt` in the `models` directory (see [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) for where to get it).
-5. _*(Optional)*_ Place `GFPGANv1.4.pth` in the base directory, alongside `webui.py` (see [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) for where to get it).
-6. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user.
+4. Place stable diffusion checkpoint (`model.ckpt`) in the `models/Stable-diffusion` directory (see [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) for where to get it).
+5. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user.
### Automatic Installation on Linux
1. Install the dependencies:
diff --git a/screenshot.png b/screenshot.png
index 86c3209fe3a3b92e5afa584e9e6dcd0b3dcf2ecf..47a1be4ec43e315f3e47139b10b0f9a8045904f3 100644
GIT binary patch
literal 420577
zcmd43byQVd^fqeI(%sU6bT`r<-JOR#h)6exlypjm^r7?64bm;0(hU-aBmEtG)%X3~
z?;H28d+)eoptAQ~d+)iPIp;HDt!;?1qBJTp5%Qx)k5FZ0BvcS5PZ7}*<>>u
z;T}sy(~`6}zCUM{#?Sqv{y4^=7qa~0vaOB2sk-`jWMuj9@X%p2arCSqok?xky`b4>
zIobM&7$QXg#y@|3vU@rC?-63iV`Q%XTs8e57V&!^Y)~}87iFqHHzHC{!~ef^>kBlo
zQuw^f%a^5<@PMsnc+bfKDi=ybc^2~5`#T0tD!B;ALn}O&;7MLq!Q%!lY#YCB6Lq`V
zyqwiB3@Ekk8apiX+u`=$7$`aJ7K0QXYBxe~ib_|nHig0ug|JW}_5AI>uS-&$tYu?Q
zD>oyuxc;4Sw0e%BSn2y~r}Lv}^eBkO<0zpM5|8Hwe&|eS
zX~I54fxYZwO})G&OizkzfR@N2BLqRx*ZF`pF8#K*9TjE%Z0ku}1PGBMgyGAMYrJAl
zwHl;5d`T();x0=9#s>xk5QX;#GYqe$c)c~OCqfACASxj}qUaX`(PiQNRG_I^8Ei)p
zLMXD9*dd~MQo8SErt1$*vG9*&5M#nKEfoLzG>VX%3$eqWl;^wr$b(({#G9t;;gxQ+
z(&XoE=9J4MEfYI=8U`1bnhL~$tt|+vvb$`xy$O1hlkkA~GHA@*7OUXgwZ`+LTGR%Y
zUe>zBBECSdwcVOES4M$RhQES)3Yi%Zg=Y5h{se%bmROuUO7pm2(Ut8ILa{Rep?OZi
zbXI&iAotTQih`iTaL_fkU~+@0p@$_fjp^*i)`a-9GNy}Vvn}&XdmUR>>(E*In`XXs
zgAIZe6&!D4rj-}x-j4Jxyjyxbb;fo+#)4J)dQ|moo#Mr87n@qc4sRAdVfR=`fFgS5
z)rd<+gG_b?aw5sxxp-svmg;jM{XTZ46PnU?LK+#V+t0Q|mULON#7YtMw&gp*4=?w?
z@k^9%Z-06Z5depS9lhz1p^RV0{D5BUW@&Tc
zhvVC{CR4!<{hNyN1Y;~O9mO`Zx$&CMNv`DQLJr>Sx^q8m$xJ6^)^idX0&<(me8Er5
z(b88SJarkBDlY?6s1K(Ot?c3geysRZSE{uzt8!%WWh=?pF~um{lU9BunPtN@$Vd%q
zG3NpawqVOm`&cCeI+6ihOw>nxdc1r~dfcrptpt}jP*WKR>DTuc4{4ff^#`_D@z8do
znC*_)MVypN^C3U6#aUNdu=hOaDulUu{k}^WtfvvD%!x7vwkQ
zu1zoF+_GPEWHFf#y_6WJCMP4WB?>lc5Ydk~mshKW3)DpwPUCpCG#sxi8s%Aqv)4wAwm(SIg`0{U#z}T-+AXf>^23Nue4|Y`A{Iam4C;hZfyD
zDQvia@oa-2SdRqh)Nos6D@UPAcxJe*z}NwWYuk0RdizIx&QONe9-rg<{BBa8AcRPm
z%v=3Z_-X<<`1!f6QKNf2^uQVCssZY--y`|2us^z~CxtI1PD3zB$>j+Hq(a4
z%Iw{DJ_I(ByZWuc-L)a5{L8Pz|_
zpg7DH8TBprdz<)jC9@~z9A{DHoSgis5-7d>;Ia3QQ-&pM_LhKo?&rRpm&s4*N2AGn
zMZ?`aOc5_1{@@rb#06Lq(a3<5oq9~GzBS1c@M~6-#4}Wh=%ahxtkTmwe9DXIHd+ZDEp&Mkb
zeb3QhbbTKk@4folMP!?!S7s>{
zdT7?=aZZwae}~9Il-7Wh^CuWtcw(Eu+BaLqf>#p0PW!h{$UN8h&J|IF4^OPFFq{$w
zIXqkZ=6Z*S7l^xHVPiK-j_uO9K$$pPYnkNZyB7GYg}BkI>+iT7o-U+mVPrres@fzw
zb#fg)Q6(r3i^%~OMpRq#kaIa(;HP9*Ts`jscrSO)w%;xV%mxreGD)_*0}8VHdiR)I
zezZ?OENZ_qx66UXslu}#-;UdD`Bc-T*pLJ+6zHv}nNJ-`KG!AjLM|}MqE*_^7haUf)LHS15I6{&kwUWePKO=ESN^wAA{uTZqpuB0>IiB7EJKEG5o$8h!ux8KOAiLADb
zSMy00$GD<8Js!G*YIC8%^P<54fb9uN+vl4Sm>n~p&Vt$A8
zkC9W8xgmp#7|yDQ#6H%0v|);kAa9)ZpN8(7>b6p?C~)8W^@eW$Phtvp1#P*x&vD;=
ziN;Vg?)WZBU?cn`9>XEw#l1ULX=37%02AST&ezc!?0-x~ruoy$$&oZZ|K^X`KRgRC
z#Sr=Sqls_jSXkzu(qh7K$JD2fpZT0{-2YBVuvjRO!6_*zxy>cP7-BNF?{7A&0W0K4
zm-4M~B@(HBkPE@j5IaJUd#}i}m
z+vXJIi$tKOCq(EJV84H57R}lUF2XX)91>Oe%=K2;uQ|>>mV6VAx5gQDi_sH3m@~En
zIQneS8z7)Ro=r-Yln}i5_4fUtGwFNp)LvGvdx~7=qtNXcT)maZ*W{rxwZoWTFS-~$}SqiP~x+u2R$|+(XUcd8x
zMV4lQQX@_;8%>k3{gBP
z<06>4SJC%f5+~RL%$rP#g1mWKiOC3jCy5*=#F#3=@!S_!cZ^7%7fy+l#(nqb>m9fw
zwRfJq4psYRuM|d>q-%!isoZMAPFBC^Qk_wA@S$mH05LwxM$oxXt?KUOq>L@dF6&|<
z%i+19en5`pkR8s}XVVE!8n?C`YyveQwTSRQY5G5fmaNhnL~!^~UG|`<&6g~U$BqW*
z^22d{*lzoou0`%xAsP`Smw=OL=3+~oOI;_MDMfEhTQt3mksUvq`#TI>{>|RX-TLju
zrYYC6Ql1Y*C3!mzY6611T|T2Yx*eOY=)L6W%8rWVz=eG)X9Jd@bBdTqTb*#eUGWnr
zUI;!g!<1eg7GdjsS0XREE|P?P6e&_Uw$L{tZ=Z`*w?*}k5rXDJ9v>b_L~%`DeLkt#
z34ZR_=bt09KTVE}o&w2bu)*Z7^LXU%LgZg|GMS}F5A$!sfq{Wa`!mn!=nT}`^w?%Mmu$@7P%yZlF$HX5Lt#
zbN&YzNORK_lq{s^wFiEMslC~y;ywgxdofPW=9BQqBW{geQiJxYvVfR4AJJ+eC87?PpAKohL6o`I~VIjLMV9K5gwVEd_
zhq<_MuJO!CtvfI#D&)R$P=kkz?ruN+b(YITmBTRnO`Q$7d=mSN2VFc;+%`exAQUKm
ziwq`td((X-b8?aq_fzLJNEYpT<*mgGI7Q9RAkpurf>el*Wr3VzLrxSXA7iRf
zz*c^}WOcD4CO7*@zkzth)x*Hs(C_pVh{JpwWn;Hz4p}a2k2Fai8>&xJJz4?dJ{4FD
zRaD!&GuZMTx`;Rk*n;*Qldk(xFEvuXdvV{^;smg1rWn%@Id^;Wm-uyrj*KZUxtz
z!JBVVqdBJrRAaSEh)K6XX07vumnq?{Va26n60U33C*ItYY-QJUx=R<2;NPMgGEv8G
zlXh&`x4ge)x{bla_~f9od}tBHkw}82l7ma`DU}m3rB^68`k>SIE+M*+%BwWy-wyJ9
zlDrog2Ehv@4oIJM0|)7hcjnck>`IDC_<#)`Tk@I%Q*`oYtTxrCvys5!1U^av`U~Ee
z!U=>7EB;fdrDPtF?AS5xw?crY9|w{~Q#0DfAXUP7T8WE#BL{qRJkZSZ*;1>s3C*0v
z&Lih-EMOpq$M?`OIh71BvD^ecU_=0+)N`x8
zke-skTh>vG&Y~LxkpRu|BR1ddT%eH0GY?Q;R4;8UzqweJiwi)GqfQ;Ax%_TA;R+WR
z{A0T=D$Dr=`Rb|Pg{7tiE|r|ml`J3p?D4n7jnlepC&Afl>>#=0;x6p5Wp|{EPjLw>#(^HzGI7ylChYI@&g=7(
z+(xO-R7dsfB^M}WcM@ca;k%mKX4}SXr&rq}t|JCd8=1NCG(X+&hOR;-Fn|<^_YH|F
zHri_3k11uC*8NcObE|s`v4MVM4yAU|)q0Hp5L2@krXINi+Fyjsx$|
zPGtp2h6d_|e0@>`;M-7L{bbrtGZ}8bG>r)r;w8wL*ft##cg?vJul}JTLt6R1tT)=(
ze0uZy?IF%-gPfb}(mY2qc8Y)}Ur=~2<<>9YF4hx(C(+hoFjr6`uoV381;b#Ga(JXY
zYMhD#U7Wr8eb_D8ziuEKuFPR3Y8lRX;HOt+)oRx=@&5}fRw~~8z&V~ZM
zA3|FGxw0cl8>`tvSQwl_DG`wT4c#!q=4<2PEUoKsn3b~{h-4C(E0tgm#vRkV6*RiX
z+lFCb^4NP?p!6X3{SFT8s&=2zfo^?9d%+XOE@?Qo?z`|m2~cbRwG@a@DEbOe$o4(q
zRm!z^;5ny5lpS`4i&3Z!7$^PddPz?t&wcQPtB7e|xIm*qkCdE#TH2VT=O}yJOIX!Q
zc)J8_ERuYAO7-Kz$M?_q&UVD&kCH-Qg$De$lg#?;sji(#n|%A^LN*aUIYs|w=-$)F
zODC4LnTq_kPk0lR2AWC9(o7An_Q(npL35deN<6kx=!C*#G8ojM%4+M)`BQ`1HmE@_
zPKyFBzr(MquC(X199`n12@wOZf!fRD!*@cYbfuz>4dxlc8KEPFmK~lTdNGNlYowA@
zby=mRgmhNU_s7C0uwvtfUcte!Du@ehZHi+!-zRYXv7q;CbBOL`h@ieL3dbw0axmE&
zW97)igVFy)$QC59uVRh4f-rmIN`y^gdCfDGdi_f)F?`PJ>rd_eaN((U2)Pk@s!8Yw
z_4)ghW`x+Ro>^Qd=kIm>>B>Ce&zus`iIk?i5LxDwl+@dp7;23?QMi(OzYHM>fB8}b
z6bXWoVMe<+DAmmw9Cw3n>vZjS-$T2L<^u}sfWa63wa*f0Y;O0heA8);o{I%0Kr8r+
zU~`6(F4<%eaU1hE)ECCIgXSDtI1+8$uH7JUo!uGy%KU6@uSBUP)f*JteM&AXkc&x<
zNL`1BEaA`&stpAO#wtX$M&9}>Vl+Eqwbm7Tzb~~9S!`f%?31j9FhLQ+OGhEwf5fa`Al5{`OILP;Qea!)A{8$
zm)dh}R4K%XfaSnqJFn;dB+#8Jt5a+%9|Hvam5axMEG^|l7XbYN&vE^X@3Z+&%UVlW
z^2xw(>TH_Ajz&9q=}<_A-tTN?RgI7fQwB~$s`MBCO0r`OXXR3a{gA96oTI{J8{4v#eb2JzhD3W<
zz$|AS?>30YnEf6mIL@B&kd)v<1}NV2ydFO$!#L!ArWt;ZLrO6gwAe_Ip!vyQ8~0mCc)|c7G%p
zL7iaByn;+-DXq>cnwOJ6KSb~X8PAb8w@)y{aNtb1r+9PM37U=r%+;h;1bxN$v+N>+
zrHU?g__9aXOC-a<9F>-6Keo46Pvm1{OU764!(++)5db7{FeZMh>tqSYa#Zvt+7@D5
zvXbEz(tR*aJBYKgoS0kr`cE}RaR)-kN}wj_%q>e?#1xZRr??Y4M6o)}Gs@{t+>d7Q
zPW;QkPH8O&l4bu|{V~Nb%1Uti>q8+QAMX0}_)okG
z_}QffGEW~7*Wj{yr-(>lDNt3O=C<(GeG)s2QxX<+
zb_Sn_+`$EYoo;&Q0C1a;vs`=3Z4b7JVD)?G{}A|Zn}JId*2}1>!s2Uyi-*Za&z*JwJLAv@T|_mHB(&Y|XDg8T+6o?&jE6)>`MwV`o@HBglk+77h^ha;{*o
zRY;R?YOFV~@o$%9@-s+GAv!2j{|yziAf`vo-Mt~Es*3&Lou%kz&&_k>Z%S;}&(jB02fJ65
z7#aMm>pzc?wS75>_5A<-AzO^tz|Sw4N^1=1=pZWj8X6kY#qbit;#fm)6ek6==)rYw
zJJvS!Dj+$g`@_`{$Z7L+=+{b`+Fd94t-W@RQiq`<+U2r2U&hHju_|n*KP(hFDg<0Wvf9nyfoiCy!ADvJX3=tJDNk&4ed6x2GeI0T
zLfD0F7eos8G>W9NZ0WiJDwDPU><0#(LYhC@Aev5Bgnws?UZvY*V@1i5o|(5luYJPI
zDdf1g(Fsx7k>UH>0>Tv?Fuu<8`wxJoO1C8n99{p%z|fNuml%T!H}o~9ZpY=g*`n5J
zND|jh81jJuH+fD$S$hgZuj04b3Ya>A-tI@#8&9Wh3OJv=^Qry06gSNp(%pNUvH%ipI;n)T%*sFjQ5wFKU$t6_
zj%{Dz6I+aS?946109w8gfCl(K4QWh!XoFG7&8C9ot<$y@A0wlgls1jXIrUxz(VA?}
zKPWPy8wp%!JAR&h{Xs?v29@^jujvlq(TMwIA#d}cL&Zex{-_trz}9pT68
zBjS%tvWv1zM`EQLtn7()U4B?L^};Xtg18*FO%LMpk`gO%*6D^jW*Zjl*^R~C*c;#S
zn;o%$gNPAy_VEGd8#GDB{DkLJli@-4u}`O!*V_#Y33pN06uD?85=fP~`c6olC;07x
zi13%TKblz%nDsV|n@@^Nxk8VMTS?#9v!!uGsZs)z*A2X`28`k#{oqttf
zOl3Ta>!egraWuuZP|`5xvL$3+o(ma1F6ofha9P(N<#Z*r3|21Yt3(*oI&fm=KmU&y+2I>}(d!~uma^)9#4=+?}>2)#4
zQcR9~-|!@^xM?YgzQ5s3E##}mw8q{xebWPP_HHg{lTdE55-7~R4E4;W$$rbmuB77+
zpP%$x=#F#5vEWfbtqD+Q|2fex~eZMTJ)?gcnWraxMyPuUo|15-1nc
zXxf*ln=wY%W*aUx%=9(#T~Bw-^O8#4Z>Q<~yMt^#dmbbXwac9WDRZAlI6PD3GSn$V
z3V6l3%DKf#x!YvffsueIrx?p?l?}$EB9gR$N|q==fdF0GPpwRENVUTYt0yWCzCaQ>
z2uZYKB6l5$GD*r`InTLfv`o4X%P!l#;wo(SBy{|75!yx38&g#Bc4H5nk!Z+U3Z%?2
zqxObeZY_dha&dD5d)T+?fw@pc#RBGsu3JOwe0Bvnt*_j@AgOy0p5v~eHTwFqCJ`}D
zY4mD;76X4f!#hU9Y44RQMZ{wMHDU!_s$n8^Y~y*c{
zsyZPpfcf=;OuJCpZExb4=d({NgA+>T(uc_2E2ONQ8GYVOZBP=xuEccBN@ziG%rEW6
zyT+m;%xHiMMi%J~o!gbdhuBy15ZxDRdbkm9DP)F?V}E0_g{*#gf&B&z5HSU?8rp!9
zNlh@Zm@?T%Q5?b^`HtkpT+7(ko>tlrxhkpcT|scIIe34N((*~O@4hpEMBG9OzK4J7
z)oTU3S{63X_J|Bl5ngl@7n;pHTEmFWftGzXYYWVPta?v7y_Ht$efyP}~UMWeu>u1GG+_=1LIGp}!n$7l5$
zl_ukfTA&K{&7Nzi+m!kXw+hj9e)lq!F2F>BNu0r^*J=Ww!{aJTV=ITw+;~~DiLS?l
zF_U`*R17GPFV1U5oBD!?=d@yZROhr{`clE~MdOlQYH$l#)SBEdfvs^1zOiMAW3OEm@4cfd{DB4h99-elL&-RYEeOA1rx#dr
zPCCl~QK>}$dtry0qaU5M>@`A9Ui11dE+zY&mD+Pmvh74eJfRJ>oiisd2WrjJ67}{~
zw_N*~w@V2gV*L{&Okm!ebj3~xObI_#j$`^^tzvo8!bJX?iv
z{+rilD%#lsHRQcT){d+;#opFA*8$5UWFpR(7Ud!zzrht$*sgkoYp$og9o4!JW{kf?
zPOH&09*uQsPOn_R5Ob(%G@TZ7&Q1>O>>Rs(OM|l9!iZc!WAM3SAg8A}@mMFUF2Yc&
z$WL24z&32Me=!@XqOFXh-6|xv^}=@ARNk;2Te{jYrd+WR82PSaQF7r&hdrd_qdtj#
zX+Ggji}^(~VcZ>=;^$0JjSz-ySC_9&4SJO030g;JL(0zMN0$I(@KcLXaL8cg*LRln
z0rS!}96);|@;YV5nQxRN!}{5ZZ*4Et11CO0*^F*2?p)~a*&!o@Y5L`@9QH_>vO)>Y
zrz5K28ePeBvDM>iL8}d?n!Mn$j*TOQuETdiJ)cOmU?9
z@jePALof4USX2qWO&nO|^TOoRtc(5>KIWRuR7K9wUah02NNcB6v&QOqQ}Lv*9v7Bl
zm@>nrD^MU`Aua0suwe2%?kV~^&92DXRz_&e*Cc)b7kBZgKstV~zlKou627)MtMdgG
zkf@TZ{R8-RNTaQeJZV9p^`%|7qt%caUUQ^^fURX5&VC2=Z-
zgE|^Po#;FOhk449yD`RsiBn{pU=q1d<&wxba8aAe1@2{PqRII=;_8PmYkan;K&XvT6~J(<*{+={PpK0Vt6OkrQQc
z^>3ZA*7ytA6a_WOz(Hd4OD*trO8OD$=F`NOipYl<`0#YgT#BqSUL=BisJ80;LegR^
z!K}hcE1$i13c##eO6f0xyXRy$6_3~}_I|LHq+~?GHTi>?Yy3=2n|ZuBgirZXk#)2r
zgKFgTycuZ}+rv+h(;jWF?UdOji2x#j;kM}U$tR>a`Xk@R1}@z^PkbEpK4YsT#v{_V
zRlPk}X&H613fi#BfcPMKWZ|hQ*<1ib`&{+g=oiAAG?cdAqL;~mB?*M~dKec|<0Hfk
z3{dDibO`n>j?gwz{ow*
zN(-N;B2SuF2&a*_Jn!VIevHU(w+<*>%8*98bJW!ZdYX}I`lX|66D77F?3ZLqdApi6
zu8X{S)_`vGKC@(DB;vY05Ro=HpDAUOpbd$$lM$^{9=(hO&@k1y1#)cc4^y$w&B|}^
z%?@iBOxqvqt)zRKs;Id*nEeCXb~0akGo8*gd-(IC{m6H48<_Qeo$BRr_94|x*?N$)
z2ZJr=5eI$C6S{o6ZsuNbq#LFJ8(%z^8%)#kw&iPN$E8TaAFb;oJRy~P;ydo~)r*S6
zEpT*YLP1nE93QuzhW_xDId6P72`FFh=zzEh*uvHBKFhU9zY-yHf2#-A#46|Q`*n44
z)(*C(V*FdXm_oBQzW(I7V4CZ)2Ei6@@DDuT=h=x{EY4d+=@JJ+<}bLwNE1ojeOfEi
z1rC4ZtOV$m(bzdLuzBl?lYf_ng3l*ry^>Y_Sb{7(4@!KWwN?HQ4#oLAHVd@*M5my%
z$vN%&SNI-cB_j0C(OzYb%*d7`lX;xE=2$B9^(#4G8kW5q0mT)3CE&1T?y>N~aQIkk
z^F`uqO}yp$_XMUs8KZz2>#e-U7yZt=caWqKBK>uXa%Ukh+a>G3tM?&614EnI&2tC1
z)&Q?wX8%}pR=ey=kqH>~1Dr1^o+zQ{l_^?Lab+x_5-A9wUPsz!r?VC`>{S{M)otZN
z`d@47SchohO?wBX@yI9U7TZp0vutfQUfGyNh&YO{Z-)5oHesIbpbg)3+(pn|P(`ey
z^|t$n(@l!`V7I2l)Hc0fM|E5ta4qBZ$oo7f>{vvPF1}ZyBErtPpd&RsymiQ*o0<{s
z8eV-DU)V_gGYYd>`bW3ZHg75M^}@P6lWSGZk}Gik!waRP<=`QWVpfAgo^ZLjCvDE<
zqgD{!WLBhPvb#3d@l>Kak9+c~X7hNXbCFFJTBcP_iC
zcCfHGxN^qm*gH^{Y5J)ZEHjM6T~G=z{FwCiCkUO3Zu#qE$Q}tWHm9zN(u*9crA%6g
zY|_kR@aD3|ytZ1i$%8Iv$mTFoT5QKFajpH$Zb*8hwcOsmY!$3j+)E9@2c8{wUMi$!
z?_QR!)~BZgE5{Xh-$~wXVL7>qjBucdDR{Ixx51L|!z(fZu04$#Xw>$)7bFWXy(giH
z%!MhkpJ;qg!=>HkNnYDh{baqMDDDyTe&RDp7lU&r*+kw_d#`L()Fp!k9r-#PlLnpN
zb%o6!zr2fKZEQe5Vli=b;#5+|s(HT67c1g<1fC)X?GFvdB;82s4xGcX?iy&q+HKQT
zHBrg6%+@T&p9~GBT}jN1^@Zu$VzAYE?IP~;GMmYQOIBYPMp!~E$B9`?x_a)7gLxi>
zUrvuGbzwJ@q)EI&sdk+XkX(4$^62SYVuZu~*$?=j2tF7?%@Qzgf>B?c@#HIqar!ut
zJx$DPB4Sk`Rz@ymL=??88BYCFBU@YvS(+lSg9IQJR!iM#hi(cOnCF^8QzjLaxHi}=
zMWH#
z){bF@_NImr5;K{Qu$O}c$dDSnjWni3&6wGac@AKEJ7N9l$uA~>x|(Y_0-;~OTF}*6~8nL_bUgTPE=dJn_tUQ<+TDxKwVwry~UU1#GKIPuD=J8%m
zF-z00Wf0G_BVT;>W&~C-i(vTxJlCle(@2mU0Gv*#MS1yDgaiQm5^)O)bg9N)vU+
zSULe88%wZGAhc>ILo@6G-g9M_XlnI1na(y#2u_G7*d@v@qTjvxi`6C#!HRu?U#0x6
zc2WaoTzVV$sAa1;e7d{%
zXMVf&5x>owNSfL2`dXo&MB=?^dn{EA+-0@Cm)OOY+J^~hTIe<^zCuvkcO`@Vi8SlJ
zkeAt02>WRk%aJ4iK3MO+!c#Z+SUh{iS)}(VT~Et7ZCRHg;rn45+eqn;E-RKMhU`VP
zuX_E_VTA*=2okId2hh!yRogaChw=O-C1*=+BK|x&t;U8JY>#+etGg!t*Ep@;T6FA@g%IF#*(VU*HFbw9O*nHRQK
z9weL>&cXwf@4L2*pO!Hk<0vBJCV7?ZdF9`#i@48k`V>bt_1bZtKS8Ai+($?1q$(!u
zt4=|@U$L`#6aj~3p+L@_%t-fR-EZanU7jcgEoMN9$zN3~T8FpRLjy_38znN;zl`5z
zI*P5DN1#?tW18;lNrYgja@?iMQdF<(<+5{7ySBNA{Rn7(uA{&JNc65h+{Ne%pVTar
za~}BFTqD7K-41F`RWE$|_FI4GyXD&hyXLym`AJgf6;fgKH0q^4deD0eXYwqa-hQbg
z=An4Q)%~a!E4rVG1kWr^FT{hgnG2j(s>k#INzi>{cMoUVz4GnWwdZKIPRqxoI16~Q
zEnaaLIl|A#modU2k6tvG2etTW?VBesIIiPigr+Rnu+CqO5lPlGk{!A!JH8jtO|Q982fWp5@T|1({!xq170g4)WXo4;#mxlk=k&{U1`si9uiuq(NyeSE
z2C$?<^#pm^1emR_NS&M(pB=UsCO*r&+P%8_kfSzUYmLomjg=ABsCPTNMls1SvmQep
z6xp`B-4;4HAJwtUD^t6H1GIeal@3%dimQFSVW{QEUMa90?%tntR{i<_8;+bVPKRs{1s%nL`?(RQuwC
zz@eJYk+y?MI-p9D<&Vj6=E>VswgdVazKNh7<@deXjlNX*eViK^JgRn<=B82OhuR!I
z#eQasiG8yoX2#PWC8ZkX&+2voP1XRcv{s_FtC9LSV*B>-FAhA$H;7zIwFIH@;$}z~
znwF#xz#=Djsi-Qc-b(2^eX(wqrX!Zkqzfsix9TPi-`L{|bEfD~B)I1)4oU}NClK)F
zs$CEw9MR0u1T1}%iIi5Gh=AO7Buqs=6#Qvb)&4Et6GT3SrEp3v6iz4Z
zy}lXbMCUThQ
zk($+weQA4u8L?J^DS|r33X+H+==9@`d98iD`3;eDU2_CVMlOp{!59;LtF0elYell$
zFDLa064NtWDdwTXfhsxbhPNzt=isv+w@nsJ%sj=UvO4)aEiblA2Ts$c)nb-=Q{Yfo
zU3#v%4H%4<5@~EZ@ZMHu4Ar<_Qii8DKGBQ)DVrG(%{vAPOmU|FmMf`U_0@x6>4GUq
zz?PY|Trhm2`FlFX1jc(K(T6C8j{`JOr}SMAY@WXrUUXTotuMbWPO4)vY9S_%1Fu@YR;AnFb~+t$ZBM@Ye+4?(fGxK_jx0mC
zV1Y*~e;f>E#aI%839E1EH`VFC%SLq=UiX3>*f+`lK|)z~s+ad^MCo
zj69C4JtW$?gNz|est6H`MdEltmYJ_SC5f7HFxgw#jWW?Ea?A{wICwZcw*t0rWs(h=
z4l;BGhdwbiACQ8Okr{{p0Vt&e@AvS+Q$B~O#zVtlZ_~;HXaGFMqzA_O=MiA
zN(QN~x)G5xg3+;;e)P!`1-hOa3969;tGr|me!eX_qy$TZL{XMr*vWoMnir#clw){7
z1r!^}60e5S#{1bsWGe(ceeup6hLkvC8C9R0mM}jVrXQs=>&4+v)hrtM`5E}H)@q)4
z%dnSIi)F}mZs-Uha+b0>O2OXSO)(0j8msxp_@>76;c-K+AQYq2s1##0
zokN7_1ZUX6f^zY%zlA=?{m<=h{qSP@Epl(XF4S}W?2N(4-jpAqAjlSbHMlsA$X@;o
z_6n35i9U`H%@n?zIDn5Tf_uW)vL%t|7Vd@cS@N^NYF()CH`vI@TBy$J3|Y>qAyO81
zJbLn>-sF#fX+u`_e`nCi>S5LQ6cMdd$_p+PX
z!_8Hm3G=>YQipj`e>ba8hjUntw7aEj$2fb4ZhfvUgeGeyHT;?nG3@c5*o;h$tNZ?W
z#jn$A6r+2RUn-$G8}bek5zdC7dR!IDQ|9IU%C)!9CN@XoXU^LnNAXhR|->U#EY{x-si%OgVmp&
zz!vy!q^L%ybxpRy6~iwTKEkDFhF^wAMZ42%dS>#O4ssZ%w`Bd%b2PlfQCD25)+e>O
zhQ2`-F%x^FH-iJHNSdk)zJpUM?CandtTX4zt5@_4%&6{6^o;EHTljdZPVfBK-d*l*
z_dEF@A%f%MEU_X4<9=B?+b?U!`e&|T@mCKj!qb9b``dJ2YKt|79S;2CH?p$-TvAL6
zq-G+}h#Dcv($vw2xcO*mmOg?aANPmQdI+sqWST_h(fdCjGVh8e#A$t~szR2_{VJWG
zTrL;B`UC>4Nm0~vFz>DbDnIe%iNbG4#WNbJ4J4;ZtHnTbl%-!P
zv`J;&DC_DDus-kl^@EM~AZQ1k4Qj{|2NNc{g5jV;ShGO79sz#gmJ-O?ELq^d8xH`y
z{{ARLDpvAAZN;wt10I;k3BskoBzR*>PV(Y`Ty$Mw=}eBCY2W1I;tZ@{t&`cZr#EKf1BqZJp_w7;
z^uP;i^4Yi+BRkzmnYh|QrDfCGuup0cg
z9Tsxnpq)Zmq3(^x&m2Z;BnCL$nZA9_uy;Pp!@~g**;`1z8v9q^qn(YU%t_UZN@#NM
zYsbF={&H2AStY=LGW~2Bn;Q)i3&sa}U*@oe2{q`{G(r7B=((x2OcNX5wG9kx8Kcu-
zMDRyvT>&-Nxl1n&5%fxAn<&Sj>T&L|8Kx&*kNn7!Y5Up8K15p%i5o;$aY_Ui2*y2#
zVD&)VLuun7Y$AoFf@&~pCLjOJsGLcHarIx5R{k^OB-#=}Gxis=Bk%ke^CK%5Nzpw-
zgwbEF`e1R1g5M;kRA3AvI)Th}X++KUn%6wVyCVMH`i~m~+4sKV;LS^^d4;tkr$A&n
zp%_a2EkS4nuWr#H?2TTHif=?s<=uQa)y7u)3U|+btK3KS;pTyY*(8^m9%IfYVx^-0
zyf(KMX3;QD_@(l`=FVkH;Rn;7_>bvFy9c(6X7|>k>xMsJimn?5d8AgszRklgtSMoX
z$e%-y{hif}uj1I$s>itm*05m7!hFv&Kcck~Da8x=nDcV!eh&k3J<8pIY8N)T08XmwW=l^_J6-Lq_VgEw~_91L-g~boRt;gd6
z*%dEWs0-x({1*};FsK%_N`>bc4QIs^=)P^RD~W>)>WCCPyp)4ME4~XjK*eNQXhfVf
zog&D(`;SiB3iDongQ@n9O1t&G>~17e=WQla|A#<`{qQJ9sbM~)_-2qp78ZbA)t3~l
zvuF{7bsqgm6=WD;@14g+rJtl+;;>a044m&%PNpq4pC2dkA7}Mp>CEkb)BOlj$*bSF
z!SX3%?CWm>|HhArUqL(W*&rNN^S>(a&MIOll)F1+ltT
zo*R`CsO3yjjysyGdK7Q{Pqr7W9pe&ELzioyLT-!4V%Eq$EQ9{yQ9V#)wl+`c#sM-(r%*ESw2k4|pi)i(VH{2b%T@oGIv%}!jAd+~
zg%d1Je73}@!#N{@`=7x?cOcm5&@%0Zlz_Kyen1|f*_ZVi_80Xl3IQUn7
zBIsyQj0-0H`mX#n~FpfI#ALoCt$Nx?}Uh+-JlTw%+#NxngAr>&c*6PGz@%I_)(dOe-
z&YTsJ>~CzvVKy;|u0hi`!t8h6N(Zas5Q|je)BP!r-1Z}9*ur~x91>y}Vf&=MQQIwH
zFi#rts|U(Dq7$;0N=U8If1|E_Fc-=hv-uNjAQ(37hdA?VjxaRfAE1I6=zpO4zYfuc
zk;uCTJAKG%e{oeTq`MnL
zq@+_iq(QnHM5G%QT}nzzH@tJ<*1gX;|KIg~g$3(*<{UlldyFPk>Y}aumVJaG9LLs8
zTY7Ar;}5a%rFP1V^{-yXWy5YREUb=dFX-
zr<-YzG7wyz?Z3i{U-9=pHx7&T1C>AefiM{+T3COJ77tAP_?dXI8R;f2WI+hGh;1V8
zniD(?$gu^+xTBeGVrXX9(p>4Q600R7
z&~>9G#axL?K*J`8J9W=52m&_W}!tV3G&I(m5Z{Av9JH_sdIq-W&EwRusf9Y1!*RD
z4_Nl`%W?u&h2gx*UPO5i^e^Mt&N$@oj=^mHQRuV;K`4ZNffMf+o!05|nB`6}?yzi&Wei8kZ^2<-tCkzwp=?
z@l=CnhCj`S5LO5wD}@STxMe8!YTC9t5)Bp4I9M=7ozO#@-`wYOim+V>S}h{!ESTB4
zIpbWjKMfLh+227i$?2F|Y;ILcF!tOsBlH4fZY!5JDZGy@n?K3xo8CwcWmG*$BG_f?
zm1v*yk?c78e45hr;|i72D}`0*(GzO|7%NUy*O@+X?v#Z%rTpQ_{>li}kNIkD>a?gk
z;=||y^T)O!NF4Jui+(5ydW|9aW3SA$;aBa#W*f0RLsniIUY01ToWYNEX$avuS2oM^
zn?I~9UFEDTukUw+V5h6=b>29o|eQL{78sdHdeoSE%T{mqJVezj>uu*5K8
zH()^X2b*|7@M`T$H(TZ9!yWe(@yk&~FLLsg!%GqUv+QfGhCNoKg%@bJaZqm|^Zg=b
z$s|d0&GL;32Ho(RQ}aUCYxx&x?`BSHkizQUhz1%i*wUJFQy;&%KcVTESyj<@Fs|vS
zA$L(-{G;O3ch6}ob$y&q$?&iV;)Xh#*AL;YQZR@Ny(ahsti`d|gyr9l1mZ`gcLZYl=h)({DuqE2Insu#vQvRiD
zOUoA3WzhkH{*47iQ=ygnwxGF`57e)Dy$cCC@o>oY%k}4#w-OZ1n@ANQh3AB`Yfs3x
z7YrK9wLh)Y36&yi9*yvD#c!UruWaxbdStLv1B3?qJ}YPQ%wCqFP?)E5Z-b5G%~2O;
z^OhrnO<;7yc`nbi+gk?~QD#?y~=VTS5yl^e!l5HsvGR3(P77_EpKS
zG*4Fcp;2-V_w%mFfJWc^&CbM`_t3O~(XviY!CWX#=Q+!!&hk~|V^dcO5&~b3!mXQ9
zMUs#!qni)s+wIb?W>{->ueC{#&?M*AFwkPPHE;UdhFvrGoTir$d7bzIJuLS4{QbPJ
z>x%+HCaQ|R_^QX)kx@qeZKpHPiy*;*PN-Fo1dvU?lI4FRB1h5Vz*{OSew&tnLKWJ+
zEILTt^MN~qC5j;anA%Z)0xM|!ZN8#vd)TEdmlM7M7naq^G)I{G%dZ(kF2)*Hp_?$d
zuJ)})8bP^cq!|*sROfo;2PgZ^2b%ACJitIz`PiuCnQMZaTDxxL
zkYLT_-{TzmbjFF|6~|1Y5IcU}wvA_Qw@Xc}MoKBO$eN1t;`*zk_T``VE3J=^R`J*h
z@A8jZRp2YOtMS<}sF2jG+CN9-Kn$4?P;Wj?FYAiI&>dq*n7FdyaCbZLS@sLfdW~(e^`2);F3TsxY;!
z%mN-7zlU^p)Oyvt2o*Y@Y`+fhq_BcnUgO9VByD<8R45#4cve1^-)V%{z
z?yC79Zz3!4+f$qHdRLViies|DhvBU#932KJE@rZJNgVUc!@3867eDiPK6cUTx8J@0iSo=)$`65sn241Iu2=d`qa>fkDvW)&
z=-hcZ%DT3msKTsYbW~r}b#j?NZFZAVU_`Zp>%Z!un^bzl+AKh5dWP1QIVFZ(Xs
zQH_>7{?@WQ6^o7exSISzfm7aImu(tT&`g)Dq2Rnpk!v^{&U@c6HhpI@gI?yyw2PIz
zR8Bg@ZhWxo$EJNH+ni6OD$e(N$_ob9Tqqu&ZIMPT6vp3d<)jANe5Qp8x+sse!|3sE
z(4NK)mkI4dotutFb$SWTCO<+h`i;CuET5Be4)h&8<1L`+sn#FED>*>eF5lcpQ)W(_
zBmO?Q{&{n95d;@E43$iYOWOgi-uknm-i}yG3mG2}$q=a3eG@fvx_@>74$yk~YJ{n6
zOF*JLaJRR`Uj`)+P^kd=Y>d!C&bK@XY<{lNStPsb?T015pP&yKyD(
z;h;wMAF=6sn5cuP5@?`86;l&Su$LPZvDSt~ouq
z@penLFCIUTRXi}ix?c8W@*X7T`owsQSwBT6p;d{$Vtci(_nZ4ACox(D^v5v`QMKI_Z=jhO4=0R{T$fWn1PVUaSTao=-
zZCht9RaP=4nIi;^T(xm+IFEk7mX16V4<2v`}X4c)RJ1_Sf27cqMOvv7rwgxq`ZO
zyXod6Df->_30?@)kW}6wk(|b>wJq`al>K8i1F=KB=~>E5^GWr#vLvBN^+Cjy3`aUP
zX``$_O+)G4r=_Kj7d~v_Lg7w>49@E6y(S964#N2n=N7%)v=`hbwT?$ht2OF_oU3m=
zgbm&px;t4NJUR=Lc_Jo=&9g=^QYQW)cEE6=-<{*|i1&E{AMIc-TQ=lXef>6_2Z<8r
z4c=9we`jjV;h>&P0ARaEuy=a={*$I?eI;CfbfT)9?*`xPdF~Nd2BNB(Ut&E~+3BiqR0;
z+MC%IALsAti7iKZKVKr&5ih_tzwc6iyu0q`&4v5$d8_fB#R9PjUOpNXZYbsEh|EN#
zsDgacS^*>_UiaVx&4@Wr;Ak%Cogh!LZ^}>qlp*@%rpCbljiBPFE`#$ebwU$kGD}VHcLx}5oLYO|Z{x=8w%ahk<5~Oa2j?)P926v-nR0yM(atg$80+0
z*Vc`5wX%Cf{l_cYXkY|p`Y4x9r;QL=`Ig=i3bXK)-A2!p2lL|I@UtJr1GWhz{W9xB
zWlV9%330S5B2pRBvBYLppmY{WY?ju9Dc5|t$tQNzoKFW$2K5mUIr?sLaaeDSX0jMtgGJQzu$LK*9`Dfeq>`yis
zhB$y2gAzFpkl*{u3Ck9{o;koUX8x-3e5D$iRh)67Kr+Q{SK`PpoOEOJ*9~Tw7dNRe~)prtIdou3$M>M1pvuz26lZ0(8*JAcj+D*X)
zo;T!ej+kH72U;$WH92V@&tm@|%F<5Vf1xIrhmlwpoBrvPlv>EGCPy+bxU|E>gg3x~
zId#UVlFxulkY0PnwTt76ft*R5OKjjo{Ud@Bo4eLmFRF#7gbo*leM`&P+=nL@NiEV=
zlZ`5`lFVm2%37=BvjZTnF7*z`FVIy{Ig`}F;PjTTv^ufn)cNAHxp$jw$JX);r>)?*
zra!crUlI6D3`gj^lNae&X-YfRm@l<_s@aTn>aq=nNobcE=f%>_$QQ0*ko6LC>3(0q
zsb)elv-NOze4b#Rxax8w=%jssZkZ+`_0H5J-HDAJ=gd!Bj}0_-aCiINi9`b)uv1dD
z^|gpD@3J&<8RVU5i0jBa(Db6Jr~w7-tz?1Zo&mWC$msiWslec;nkEu7pz{VRZj`D0
zu3Jhw?6T+bjOOld?8P%=OszmNsJT?vXFAmX5M4_Xf(vzCY#51u|I2!v?Gf{gR*3NBVXWGP{&VYpXvzk)%L!JYn;%;u{
zs{5>{GyiI9fgU8Z%2T<{S^NG{^Gh=|H%4COm5@o?z2uXhj>}C*M)!WCz33xGx6{A4
zu(p$*a8GGD*|=xsTI7C5T3eIY4Yg{PQ-5zu?PC57%4{f)v#6i}PmjQJYYNVO)gpJC
zZn-ZhCn5#Y?5Kmy8E83*Z*Yo)*2k^CnzmQ&vUHj=mw2L5*f+lvRJWcFmdTqI_`1>L
zdY51#@nja=F4v(AOFIsKCVJQLTiLcdu%nKYPlrV=*XqA{&KuN;>^K{|54sq3uBa2T
zn`jJ4Dn_(4*z6ViUR5miq{`;KbMP15CL7CK8e8LUIxiG^K5Z9tRX(aU8&s=Qy|C6+
z$K%
zAutpsvUP0=M^<*Pjz2MA7n<>Jwj_CZLFU>-@*>p>;~-2UZAxs$DI-n(ayR73_$5>6r;xxL6;RptlAnq<4MtcOg;MO(MrixJ?oZu*BZux-?U+za@P8T%qhQUnnH|G&ZYd9HvOe(z?S@-5Zp=
z{^pfwH_zF%n=XVSeRBALc4j%o$U5DlTTgDg$gf6shpUryOYvnd&oaMY
zV&>)CGg^YHq;EV8UJuSnoS#*}myHoYejcvf+g%eZdLvxwLp3h((c>^X%v3vQF@*aC
z47mVM7{8X;<_`#IMMfkL18bvRnsJexa9o;JT$(JEDA+|!t|X<w&E4we_>K5C%N_408UtW%<5geg-m;MLLz2WW*EV81S?(UbnvKLVxk|vY
zbPJcLL22DB*)74347;#{vyr)@kdAFhtW4o-V7z(YZhJMy{B2AJ^??vTHFKMP-!Odr
z?W)1#NZO`kp=&hAav`#zR=C*ORM2&|8HeRi-P~0MRo`eg%#0{;v)S+b;_GY9uop>t
zpFYW`63jH7yXzYx)rh47ZPl0{n+aa4-nRCl!tHfT;Z5C~aqh$jpWU6LE-0(MX)>W+
z$v+OLIn)jhG!t;D6Jx-5ww!ma-DyfbJGbI`*A#Wm%8GeA0M5Mv7nP=#b6LrHu-t40
z!MR5DNNlWib^mE7UaO9q-UWv$V05-9TZ*JelzMK2!?nksDY;p<^@wUXKX}ht|((Ty58lXtm@*t&1C!XBwbQ+ek!{jIZkg(tx77t6c><
zZ8$~)4lSSf@D(Ufte}0
zx%bB3BVos!JJaEOd(}FX=b_&V$vX)tikH*kU2*G&rznqfU$nT&Tp4w`bXHCmEL7Fx
zIMO}mUe8KkM*&@hJFe8ySRfMo<;q84x$v(N_fmxMfCavl#5M&!K)c_-HA}iJ#dEg{
zrQ4aj4v)|gckA0VZm_DiSUgG~0j(rU;gsIMvu1&82F%XD0)g+nZc3W)k@F{F*EBb}
z9XFT++$~%^#`n&v?YS))S=Q=~4Zo*-a%5i0DVFJ{nNZ)fJNo>PJng+qLTy^LseTB>
z{tM#JeRxgbXCZ_7w7c)weV8R1$ldW+8?*P3zY_lF^vCNX6Wuu5UyxzawHu_9zCRj2-tk~zB
zl@~2H#JP73-ViH-A@aE3&Clr;=_Z>Jg^J>@Y>ZduC-KIe_3)jSDL4AT82ydAB*DkW
zK3KCL8JPdk!+`#5ouIzyyz^B1>y?Vi`P;y>OSQr%6RryJ(ptUXWa6_y~Hx=
zW-Q}r6CPvemQ6#@mOK#Jzc!IdryeaX26U;Qz#q?JWHG#H1eXk^hxw^pFckBs0Kg3v
z%mY-w=+pf^fDtQ~#R&z#r=`|AuwEp-w2|?^ivM8`$w3rvqd8B{eEnTN@~@QEfhl~=$19ij
za>GkeU$?mK2cM`vmMk6-rkO%=roUvK-bHyI0#C)qsBCL6Bk}_>ydWYU484`#mJ(Egt
z74{BEMP1s7yrAF^!Fa}69kvzbcn(QeXmAv`L=W21%HSe{xH96%QqRBBMX9I
zFb%LeT|EP$)nsEEx;ZI5J)Ajoqs8=h(y^JnbE1u${Ka|?@ml(f!OBw5|QBt4pF*
z%|Q6Kb1u}SZg^B`6Px|<%)t~Q(0VHVF}q1OtEW|**KneY9>sQ`6GHK@=t6>%v=imQ
zE~CnVSh9-GHveJ;%7*#OwT%b&rfCcUrSToUo@`s9l(>3CV#6vkn{RYJS=g9FZFeqG
z^^m{t2@tq!7@{BS=@Gua7qDl@e4?3jyXO($qK$
zDuN@&{O9CSE3VdsS*Ohu+10QSCzReCl@JVzeCno-Dlf}41ggG4vfI6|FTjjZO{
zTcg;q=utCeKl;4)>D1{
zBG8pCzpEUJ+ZCQuCSq#Abx-Iee|rK^%G1&3Y$YZz^cMhQm{6)``F8yRb;dE+Oa8{f
z{CX+|(;OHO#Zztx{WseBbtICAT0IEail$pg%D?*bFl0yu-uh=5SJiZE{`?MwCi5J8!
zK{|wJsjrIZ=OLN;)wPDI_Y%HJ`t$J`WPdS^Xo=^ctEcA2S(p9Qzc7W(L)*YK9)<=u
zIRBq7B6B^%cHU+fLpR?-WJHhpdi#4tR`><4eeKp=kFA7b8o#BEbJ8w+#4y_V_ucZW
zs6R)j<}8dCoJ^ONKre=B!~jHlH`w)$eZAyrnmpK7wycc<(a;@LHIR2T4qHk{jp5kozvaww{4XZQ$}cN_gT
z`mG->AbyQ8Ae-VsAKOKeI8vFo7?9yMoS$g^)Uh6=pEPz^>l_j
zg!dCwfQ7pcgFy)Y3tfY0=14&bU8EjDXB?zNz%-$=SvNA_0Oi((33WA~G719A3C5tX
zKqK#JMCpAZBtr+}R_agnKgtB)?4EsuV}piF8~^Lyc2h%pdVCZvDq*_0=P;P;5fj^f
zb4UDHy2=df_3HsL00vNsSw|-y%m9#dj*8oQSv%-K0PB+qry&aRzgrAb%=%)Cu_L<4
zEhaS?hDo(XXB;vEz^T}$_LAFef1qQbQ3hycY$SEA`woLz%PTnPrM3674Ojf{)6;!u
zS=q2o*qfFLJgZl``hbZQ=ieBMiJjnefEaAUSM%9pI`^Dlu=<6CNRjt)0^?9+27?lZ
z9fJmFc|6}x2S8XDT$}uNS_+$Lz0DtQ{th4$HD>~~+fm;O1;cl_U1BE0R`-#)J|C~J?G9%RBeB>9T7zY!ZV1LKK
zN>6JV@+^J)>3(6^vC!yqcK%6eVGJHynY`M}MC!hJmq=Sm6rk4{nmFoEpGFgXH-3ijYokQk3#
zQUS=~qyILI755YF
z>gBjvr%-cKVjkTeGf!~3LaYL|cB-;#`1b|;-ILb<0<}1Ae2cb&y~i*7qGOD4X!c10
zyOlS!`iU9Kziyljp0h|cI@OdwJnss<#<>BtsJIbbRp$(+(#r^YeySczQ?wjD+8?R)
zad(#Z{d%E2ag#mMEB7YNx5lDs()26HRJGkWRy(vKeyv-l?U
z+_R1sDbnP2aKA*WY}%?YaT}e{g8}I)m#Cq?b$B2LpdVp!-Em1dyf1sJa*?pWv@h)M
zYg=a6g?)WJEpk3m`=0+
zO}>Tx0`NPVHfWy$p$BosKSI9%1!$&`jNEh>Z1=V%>wpkx?1*M^TPXTv%3yZPI(`@^
z1?4ZB*#GUP*au^w|IogmN{TRI!D%pTgJSe$(71D88pOW4ve89%UmeFgR<>ynkVl9@
zfeFPjc7oY#t^K~5r!v8*0CW*35hf1KSv*iC^$gqO*Ehj*5oX=cZP#-
zsb!_wx+1MP8zJ_+9IlDE#R1cw%i`AHZY%Ljw!#jHn_Hx0s|qqj7tEl(91S86=M)Vx~;6WL(0
z>l)QR0YHb8#ruA0pboiOiwCYjC;m*2xdk-Q#Fq0?W1)`)I^;XVaiX1q4!dE?rG)4nH6>x$g
zfnfjgTUpfrSBfoY3($L@On_{+ZX^n*Hvp_W4K4xzv86v93K0+zgZ{d2SH*oq4ho-l
z{~SEEMS_Zw((A1BU0_sAHyq<3y@TTNPF&@gR5UneR_Uuzrgg4<9k-$lO8(&7gGqkH
zkiYyx#Vt8W(>oSh5E=M#fSp7J5K7o-S7F`SSA-bjC^7iT2XV&efD-X%P0fg!O;6>>
zh!PUvsff>ls2u(1%SY^UBHJADDFT7f!*&}E@i0lii#4T@$TJmF&pJheL2AP!5~qNP
z;s0$6D=J}ZuCZvC;lKT%V}vo_ugH>vLjs?aVlVkez3##Kb5zKlsKCO|j~qf?9IW$`
ziEJ0KB*eZb0q36~8+S_}`$t7ghewA1yN1>+4cPlzy@JWP62Liw4jwu{PJ>noV4t<9
zUu66NGj~nNQzpbBoj^tbRb(66%ysIV*i^4uB6XD*fcdd+WP~^Y^T*`2dADPdl|WPk
zLQG(KL0H)gr=f8OQlZ-pb)^1qloBdR#D9MTX7%L&rb&zfxa(|?y@QwJhglkm4v&@g
zpM|ciNWZPwvITV%peRYR&5AP00{3-7mFDO|>#dmshCv|y5~SFF3la<<8MO)?C_1Yr
zI|)6Mb`&ryUQ2z_s;9#v#Q!BfFc#q-f5gq?T7B&I$|BN<3xV?=R>6A$<^-b!QOmFI
zBl8Li3*)$gajL;_RA!)y=3UVeqf(iisl?0ncGI)u5aFaBw+g(9dyyfB7_Jh#p7D>3
z{AD{#hhg#eu~0DKe_NVc4>k-kD_bKP&{qA=zZAzX@kV-ikQM8wE3xsYs&LemmYzBH
zH>=DSO)h7u&2Es@R8{JNJd(#xBVP7J8{7b~8Nh<%lOaa(_vw}g^tEjuwC#mBqH@;A
z2JZ?&+1{S4K}9~AX2BwsTXzD>gR)#D4
z=2*3jjW~EVz$IIH^=AQ-?V@TPJ=T3(wj&?fZVnD|yXVyzXvkZ6xaD+nCGKW#Ybd9)
zJbbk29=h4cgMwm}6(<|GQe!411>ndU|9$@-jBY&*Fz7EUK^sJ=5~hh$fk`koz^`F?
z>u$e_rnbv`n
zTAKT>XiS>0+%JEN5qen6egAA+(NrO`@yhor9w4xo+SX5PGIXi
zs{6A)6yhqe`*B)In7bF@V@KD*fLvvU`r9A)QvL-Pgv{kG{^THgME~dKuvGG21I`gX
z77Co&+-)FIp1?(==<1d0-p)|{9j+i)EPtL&~xwp@_LG2I~6S>JvXJAbp9oy
zs#kBwgv9f{iS~_>th`!|<*KaGuFE7{HQ&)XxYafU=mNC%u;QNu$A8um`+mK@NfL~@`TYrIF^2w`z|
z=c-?U?PuJ=*6Ht_EEbu2(wJs6P_g*NGq446osT$S+&v3z`0df|@)z!5>mGT70#oHR
z%de%WS3i3al{Q~QefkrjNC}cI$}+Xf#>mps#UDNq6pv{}^?|j16%0)v`!t
z#U4EV;X_;^`!y+3_4UmjGA=h6hGda7Jg?^J_adx-G5VUJyw*Tjuq+o|;n*D`x_UKp
z`kXREAMb}8SW6@Ae{Q_@y-;;n|D4SHeb%294v*1|vjXA;-ag$)h?Rm70P9ECnaI`Y}!^>{cu-P%G2uhqWOL-`m;apg
zVqXHgamfE9#Uo@VQ9UjMgF;iNDQSf7&=PVB=LI@hmg|W+z}JMt
zMEuN@mjns14c5F51Y8iOD;3Y&aYE)DrfXz-nOP`CV_l_mp4eJg;=2hAleBRKsl(&o
zcL$3iy7WCj`Fi+~@Wq$2+E0Mn
z!?%Tub#}qm!7v}Z1I-|A-r%@ywEsz`F5wxCP=!X&$cz{*CmSTk3*-j
zit6jN!m)^tsw8B5#)UbHrkD>U2Wy<~w@4}x|Er`a@|tu~l<}SH3$I{qhju@?H~w(}
zhaDa2OJ$B%5*Bj|Q?K7wG7_-MIqq#Qls3UP&J=C*!EB3P|MKe}8c%fIfH
zr~Kx9yERHjroy3Q%{=qBGMZG;-Ac=@Dx(9~=GGmq#|p=8_bToQzFA)V={e-t^Tu2)
z;^h2$8S6}6st~7S*{h$yE?abU%R6ua*U+SRui4Yrk43YNTD?+SnJAX&!j>6w?`$r_
zvJU3+O#qR#8GjZu_p&n__0RN*2weDMNHFPbR$b;h?UBsD7uK}f6kMcP^Xgz#Zn9SI
z-gt>_I5Gj(>!qCj$wou^>gEPSl25xt{jdU&Y&1=9Ph~XN)AX_@TFPEAYh6M`JtlaR
zjHD~uB{7$}hewMdkEyqF{gO>bUb0c|io*8Ge}4O;DtI1!{sTfWY)I{d1X4FQ(qr++
zN9_&}`zK&-MrrqSPY)S<#tt#$#*vPmo?S^thgeEn9PW4EAS=b+YQ-B%KJ=z4@lO5RP{0*l84fV;B
z0tiIy-Duqz&xa2Lv4s}WsgYZwPWziNg%R;fLWYv;s5GKcKGC^$HeZ6~<_#}9cZi;B
zHmB=dnR`eZTvg<*i~GQH)~$JC`u-j-K!~}O%xnL96b~+SRto9%15@G2*K*TXfz7#z
zeF2rx;5A)|0N3v1d2kE_cE`NZAPE5zZ&>lGrg&XD%2o3nMerY%7aOL`ycXRM$pGKi
zNLhY;22PV4YS4-p{9MEUHXY#Z{|*Pp@{E+SNT){TmyVZ>o*;|!c=NWhbLWe;vv^Oi
z8X7fp+|*D*RCk|l^BGgs2luJSj!@{Gd_*N0bhu5Dvi#liHE_7^As9(&8Y&X6|VWfoE(ElyQY@|5iSe)GU5iHYx!s_Jz=wP^IYRs56d`CGM;+C{CdlCHj%_je#)1
z_N2(~08^S50PDqLO#Rwvc?9p(qRLa(pE=XqXi1164PzboKTcA%lkpYS30@(X#BEPYJN6B=&4{hX<3CNx4a~mm#!~7Q(KPVh%~C*53b};gOY*
zZ^N4^aKu1*g;K&n+5Mbv_|ir&n+!WCAU9bv@QJ>cx~RPs$5O
z$-c-#zXy9F8xo%5(&NI5;+{}pJmX3}$$yUE|Arosrw@_pNX>yoQON(t-&GbA6adH)
zc|AJSsMt5@L7oJsj2T5t{I7`l1Yj9fdS3WB%~FWU8d#~jo&V{n_|-N$0-3GEF>ISt
z*OUm5+-Eu3>gs`oTLK0IN=4U()A@y3`iX4YVxsBK8YAjKDu5sK_jxl>`D!RjqXqS9
zKMeiEhKa308Yx6$KJ%nc!ya>G+-7MN38hHuef9;07t4vLayK2}Y!j+`_;M%h6#U$4H0jW4fu^B(6&@x~x|ctZ;;
z2nG}HUmX@Y+m5IUxsQK3sV3{i4-p2AVfw*VjlXYUe=n=yN_qzwwA@*5;GK3?*@HIP
znc%}iqOO_;(f*0Aw=|pJ^=bM$4?{TPIVp(rbBo${M9x?|Qv5N3A?z*uPR}2VXj2hmC4Z;KZ^!^YG!!4G+s^nB88^zsrr!<=S;Lg
zTPlJwXy(W^J&V4*H*5hnSds)9VD~jYNtdwJm8i>GrIUT$a(byv(KI49f|YOE%VD5_
zOg8FU4}tv-#tzf=8vdHvrhl+dlc@XNOGjmfmbw;{T`AO-p#(I
zKdvGvS_*c?z4xv9xuQHEA#d6CGJL;(p^*WBVAa@$&7QTR?+s4pJ@*dwuCpJM&qF8P
zjGNw1&%u%|>#DI4;~G1z*~{_he3Df(AL19SqHipPN*xCYR%1RaSBkaWIG^4YEgxqt
zKC+(sYOmETA$uhCb^VZ$#|jx{Q1fL$JEk=iwXP&Vs$_rPj>6E;&@o7m7%w+u#(E~d
zIWKsq>&F}V;?oP0E!*kf_A98ne%T;CWqv3$)Bww@(92l(_jUFhLNse#ag5{~wThRq
z&8Rvf63QMgWyST!WUG}eyrd<68FXy<^^op>)yj^qV@A+Wg^0_I-Lp{;V>CBgUv`=>
zB&iIW{{X5WmI{#XoGsRYnCmOAFHv&KSUu5Hh#a))RjNsXbBbES$nj{~BaaGLO!o;x
zVpEk0P&Gw5rNNpLwLs*HW4&$*a}9g0#ixNsWmL4ssgq#|W&-+gy$*w<&NYb(boQ(o
zo?frdvA5_e*IEe|a_(K+TS}_E{Ofrr7#XRCH!!*KV})i(H~a*}@&&R5XLI0syn$D<
z*S!$~{Ea3ACv`uF_X!*J_Y{orSK4vyyq4+9EGoE5iVG>tg!ISpi^Z#7ne258wfl8~
z871jMPVPYoKZ|fOZ5!qhZ#oQ&FHKfx^_b9ctw=-|XhbekK?{#;Puo71ha{aa39)UI
zcvOG>RG>tn`dX&Ck)&?kcQE(JTU;(oW7mGQV6cx(v!aB@g}1VBP&vTHZi$VvzfKK(
zJSiVub7Zt^>lK_6Q*!WO%7C3CJm;WWdfW#ck$$zD{?1bCBlJv$4p|+4=R(IPO~oAw
zkB+U_-Cwyv@xl#5_=-y{>0&;GJHMI_Im0bECRsA5BRc9--&ry${1S_C%+tr$BKgJ~
zs?Hfo`FSB>DOk;%a~8_-@RK3U9kk_%b(v5p(Nse9
zJ2K-4C=Hou4&cr0K1)JzUP?vqesJGU;vERXIy?Gz)kWR7Eat5|O1PxvBKVX5jW3%k
zU+tHD*Z}sy_~)?37>kZ86cY{<@+-|K2ihz9q9x+`6Tf(>v5ak)z9=QApeg#om04DZ
zQoR}bYFffQL4I-0`S^Q`twQb-LiH4HR=aLZIk+I<*^qA2f-y#8}|X*NIEFxiAY!t-}j2Jd;^Q
zln;A7waht5lvh%vja`R1s>2-IZ%Tj6l;o42pe`{rj`_wH6)d@k{S~Q@@5xF|
zgr(ue-9GILw4mi0?;|6{Qp5^samLy
z0A7C%jC>bxlcy`E0YY|vDK`MVhFM8WbBy7}PGEa4W}URGtF&R78UUvWg@?Kk_$j$Uj36u)*J(Jy)AD|=#XXivtuV&`*FO4)hyX1*s}1pjWBJMo$cOP
zV!3pU3&VUmW{Gq%|83WxB4Pxk9kNksV1$%nK+VE%r8NL!ayzfS!v*)5ywLMr{Od_=
za$T+J+@w53{Q!~qo
zvI?SqWo`qaw{D)yu{tQaS&+xrDM=y9`^?A{7*10LwKlA|
zrn&=bRuSf4=Qg)x<|XOn0VMDnu~WD%*@{W`-K8qgWMA?Y$5^@T*mh5LuM0N5mn*L(
z_Nj2W>4-*Sx8M)OqUX&zRtlooSj>;!EZ5iI=qH*J3a)91Y3IHxPR81wDcyhnQ`_g~
zph)h@^=t$1NXC!nj62*bYLZsg>zL#e@jPtax+3U7YHP=}zuMMq`UWQ?J`i1_v6;$l7~o$c8#mLTxhek`jw25KhWPMv
z2h*d+{?C2dKn1NSEOz2Z)L)NJ_L1@7XRH(1&jB3~@W`9a560prKueDOnS7(u>!&jp
ztgn*J0=+{TE}ome<*}6sb?HKSwjp7M$a(bcsH9ao`&V4AB%han+;yf_?TSb}6FT6t
zx##qCo-X_J)%t7MgbFE61{}X2+_SmEP@dP;JOh9WeMZi`8b>V2x#8TJOVP6uQA|v(
zF^jC{cUYNJi(*8B=j=0!Ns^BIouQF^lvwigbe4_6fv2(6`mlD7N)JVESj>0|YNeiW
z9F-3?uvN*hbkrja<>Uwu%wP{VA0C%SmZxRbHI9?#`z1OzA*2nGjjKGlhkuPtsDlWTvP$az558v3
zl8=p(o_-2j_3cGTR41SQ-NTY%S9^j)CiG6t5KHHjQ?1qvX8tvBAjGb1Ii;^`Rbo5Q5yS&Rj93c=^=PV3$MZ)2+NQ*OMb#rz@n4Z+AT@~*HgUuQz8p*v*a&Hl*AKw
zjOR5apMSJp+}oV#z_tE1uNIW}+T&C{#}&g_c=P3pum^AfrQ^1B8PdMH_`Y7)e2g_O
z_lmi7M3C#qwYGHPgV2@d&D_<%>dBe_izrLd!&K9tS)zC}S{iVxYRvy1dvD#>_O^A8
zwxtwWthhr91&X^%Htqx|QY^SjvErn-d$AxXZUu@J52bif9CFg^GY+vqHJmwPxttAbO=G(Nk
zg9r~f$?H8U$aCyC%-|03&>?^isI$CpT^zwEesnYlwY_)fqOYCV$qVs3su}V3E`C35
zMhaXaw^eqz{Vc6>XPkeL9nGj;Flb<*U&2Iay+6+yLw*p--(%i@^VKk@vdi4!bbQoY
z$7R~j4^;o-gm%1OK+j4ElogQr9Uf?O)B?%X;T&jYv6tv_NRdk7U7o+lya!C#`ZT=
z>SKI=f3^zFjLC$d9!9#hbEfEJ3GGI`?-~gri)*z=5sBrprJEAQ_OtmSopWeJk|){AOys${&9Ru<@9dvxY*<3MBTYXd?QNQt>RYg4
zly^<9E1vRgP>7Hz=!&bnb;m*96eF@k+2guEc*L{ZhJmd8hVwk@WAe9`%|SA}lb4!n
zyNx~d4@u&l&m;Dy-q4)B>-2c-M#~#Swg;+;+yB@4IY{*^R^+1m?fOpOXuAFB>fPI<
zLJ{4R8cO}l6{{ZY>~Sw4LxD~Bdhw^)wt9aL@%LY}HQ!>d&;WroSwcG)2)~Y#Yoxmg
z|0egod{%($j88LaB8q7GZX*lCTFPUoNh!0ZnK)Zuz3o{*teeevQe$!jz}5T+OuCTT
zr$2X17XQs{w|1a|)WA35on;QLlRlL>Ys)awbR?12V&>QV8q$33<>?pv()sp12#ybX
zq+@DKtWn`fz?fd_L8>MdEL0`!I
zE}C+4&LSVyXu)|0+g!U31Z=FD~_88r>OXN*LBRbZ@yWF?J
z7&Dy{lOOEf5pFGa^ac=!02O_ak-1d$3}+8iqW-VCN9KhD7F}qiiIz~;fKE)*Mk$9c
zGU3rH_mnXE+1r_nI+-X(9@MTw_ie_n+|$#qhIG3iwVXUP?9ES{ql
zA->O@BGfdZVgbL-mb`!aM9VuPi*_^7-mP4$dp(9IEaatE$yY_T46xs(W1a-Q2&j!!FvtNi>#NNIam~Y_^DesJjq6
zN)_8GNSJGxzjiRg#Jy#79yLRf#)m7pn5@*5u3=evwF!ZiMx}ESqGZ`cV+;vXmv#FR
z(2t#BL`400ZQ{ssVbPIL@3n06ziW>FDkCEWQO|$NB{~y;hlyJqoKd`h#PWPNv|Y{q
zmd{!~f!#=hh`Wcn;O2eDyVVaebu}BQVvcMfSl-xIS7bqp6|nHB0*HzI`q;qmdWhOz
z?ASdG!|gA~>gc$8rr8~>;kfKwqLWOi%imZ(QUg`c3n@Sixwd^#YvJKDpYg{~4W|o{
zvBb3xCiyYo_T~bST4%z#S(Xu9*wh-%i9u|teoYev(wMO8q)s_ynHvpQcV_^cAC^9NP$TciX&s`N$8)#DkRG`893`#!~
zB|(x`h4(Rze%C(p9H;F_&S1Bqm^_~Pl3T)mlJv&iiUnaHkyCXh5xvRNQ~`Gnng2JY&zur$%Cgb
z<(dp;yG!Ee@YY5h=f`u!Z9X5O5FF9~?SlO5DHmD}uU~TK)#-?;~^h*YHhCGLH%ij+zzNq4w;bz9Fo9I7oBny(R#>wzJ$vU5j{A4^mZ(floQl?IQvNFvYmph3`Ht7{t<2cmB`MGAEliQAD_%@!W{jA9
z`m$`SMM%$IG*^`8yo`HQr!R6D&|j5rWw20ott{U(@!g>D=xe*V>U&(ui}(Kv3dy7Vy=#hn
ztiLta!7czQDmFr3GbXbfFAHiXPwZNm2zNP_h-LGz_{oAvDU6ycd)^7SDKtJ;ig&bz
z4-KV%*^p~N5;Tzj0Fq0!@w=#>G5e`{BI+?_FKrCD
zl&h{=0Y4B|u|MXjyO$BK$2c79__-E?)oh4;|E1%@3^+JC2M*P&)ybwCJKQeA!rC6
z;ZW
zvBy*T!=CPe)>LTrprXp`|A0D-p*6I>A*cjvE`5$&86Jciu$j}Fsw*0ddZpl4gDl~G
z+%!ACc1OECODZPeccEJJX~1ysbA7w`Sy8h4>cnVZ6qW^Wfjrj}6b$>%{OsIQ}%d*N2;?T4L7H``K#&KN$
zFO_37Y3$15Y(6T<(f*?-a#wE1Z4LRPY@9WgCk~Q3XwA~&$2VK~56dRO87Z)M_8Y%r
zB0uf`0z!}U=SY3K`+w*q7MhOt3z7)C2t5;oM{@?V0^bVsj^jMK5
zAS6V>(1{~DkLpA~bSCb#f)^v1qGGx0{HUTx}NESgn@}%q}OD0;Cgp-nNl@eRV&|l~Iovi(P
zKbm~3MnO_?vdQufVzT|;%}m$DEdCEDoEdop(kqkrKd3ed