Merge branch 'master' of https://github.com/yfszzx/stable-diffusion-webui-plus
This commit is contained in:
commit
2a3e7ed872
6
.github/workflows/on_pull_request.yaml
vendored
6
.github/workflows/on_pull_request.yaml
vendored
|
@ -22,6 +22,12 @@ jobs:
|
|||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.10.6
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
- name: Install PyLint
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
|
|
|
@ -31,8 +31,8 @@ function imageMaskResize() {
|
|||
|
||||
wrapper.style.width = `${wW}px`;
|
||||
wrapper.style.height = `${wH}px`;
|
||||
wrapper.style.left = `${(w-wW)/2}px`;
|
||||
wrapper.style.top = `${(h-wH)/2}px`;
|
||||
wrapper.style.left = `0px`;
|
||||
wrapper.style.top = `0px`;
|
||||
|
||||
canvases.forEach( c => {
|
||||
c.style.width = c.style.height = '';
|
||||
|
@ -42,4 +42,4 @@ function imageMaskResize() {
|
|||
});
|
||||
}
|
||||
|
||||
onUiUpdate(() => imageMaskResize());
|
||||
onUiUpdate(() => imageMaskResize());
|
||||
|
|
|
@ -31,7 +31,7 @@ function updateOnBackgroundChange() {
|
|||
}
|
||||
})
|
||||
|
||||
if (modalImage.src != currentButton.children[0].src) {
|
||||
if (currentButton?.children?.length > 0 && modalImage.src != currentButton.children[0].src) {
|
||||
modalImage.src = currentButton.children[0].src;
|
||||
if (modalImage.style.display === 'none') {
|
||||
modal.style.setProperty('background-image', `url(${modalImage.src})`)
|
||||
|
|
|
@ -175,11 +175,14 @@ def run_pnginfo(image):
|
|||
|
||||
|
||||
def run_modelmerger(primary_model_name, secondary_model_name, teritary_model_name, interp_method, multiplier, save_as_half, custom_name):
|
||||
def weighted_sum(theta0, theta1, theta2, alpha):
|
||||
def weighted_sum(theta0, theta1, alpha):
|
||||
return ((1 - alpha) * theta0) + (alpha * theta1)
|
||||
|
||||
def add_difference(theta0, theta1, theta2, alpha):
|
||||
return theta0 + (theta1 - theta2) * alpha
|
||||
def get_difference(theta1, theta2):
|
||||
return theta1 - theta2
|
||||
|
||||
def add_difference(theta0, theta1_2_diff, alpha):
|
||||
return theta0 + (alpha * theta1_2_diff)
|
||||
|
||||
primary_model_info = sd_models.checkpoints_list[primary_model_name]
|
||||
secondary_model_info = sd_models.checkpoints_list[secondary_model_name]
|
||||
|
@ -198,23 +201,28 @@ def run_modelmerger(primary_model_name, secondary_model_name, teritary_model_nam
|
|||
teritary_model = torch.load(teritary_model_info.filename, map_location='cpu')
|
||||
theta_2 = sd_models.get_state_dict_from_checkpoint(teritary_model)
|
||||
else:
|
||||
teritary_model = None
|
||||
theta_2 = None
|
||||
|
||||
theta_funcs = {
|
||||
"Weighted sum": weighted_sum,
|
||||
"Add difference": add_difference,
|
||||
"Weighted sum": (None, weighted_sum),
|
||||
"Add difference": (get_difference, add_difference),
|
||||
}
|
||||
theta_func = theta_funcs[interp_method]
|
||||
theta_func1, theta_func2 = theta_funcs[interp_method]
|
||||
|
||||
print(f"Merging...")
|
||||
|
||||
if theta_func1:
|
||||
for key in tqdm.tqdm(theta_1.keys()):
|
||||
if 'model' in key:
|
||||
t2 = theta_2.get(key, torch.zeros_like(theta_1[key]))
|
||||
theta_1[key] = theta_func1(theta_1[key], t2)
|
||||
del theta_2, teritary_model
|
||||
|
||||
for key in tqdm.tqdm(theta_0.keys()):
|
||||
if 'model' in key and key in theta_1:
|
||||
t2 = (theta_2 or {}).get(key)
|
||||
if t2 is None:
|
||||
t2 = torch.zeros_like(theta_0[key])
|
||||
|
||||
theta_0[key] = theta_func(theta_0[key], theta_1[key], t2, multiplier)
|
||||
theta_0[key] = theta_func2(theta_0[key], theta_1[key], multiplier)
|
||||
|
||||
if save_as_half:
|
||||
theta_0[key] = theta_0[key].half()
|
||||
|
|
|
@ -123,7 +123,7 @@ class InterrogateModels:
|
|||
|
||||
return caption[0]
|
||||
|
||||
def interrogate(self, pil_image, include_ranks=False):
|
||||
def interrogate(self, pil_image):
|
||||
res = None
|
||||
|
||||
try:
|
||||
|
@ -156,10 +156,10 @@ class InterrogateModels:
|
|||
for name, topn, items in self.categories:
|
||||
matches = self.rank(image_features, items, top_count=topn)
|
||||
for match, score in matches:
|
||||
if include_ranks:
|
||||
res += ", " + match
|
||||
if shared.opts.interrogate_return_ranks:
|
||||
res += f", ({match}:{score/100:.3f})"
|
||||
else:
|
||||
res += f", ({match}:{score})"
|
||||
res += ", " + match
|
||||
|
||||
except Exception:
|
||||
print(f"Error interrogating", file=sys.stderr)
|
||||
|
|
|
@ -58,6 +58,9 @@ def load_scripts(basedir):
|
|||
for filename in sorted(os.listdir(basedir)):
|
||||
path = os.path.join(basedir, filename)
|
||||
|
||||
if os.path.splitext(path)[1].lower() != '.py':
|
||||
continue
|
||||
|
||||
if not os.path.isfile(path):
|
||||
continue
|
||||
|
||||
|
|
|
@ -77,6 +77,16 @@ parser.add_argument("--disable-safe-unpickle", action='store_true', help="disabl
|
|||
|
||||
|
||||
cmd_opts = parser.parse_args()
|
||||
restricted_opts = [
|
||||
"samples_filename_pattern",
|
||||
"outdir_samples",
|
||||
"outdir_txt2img_samples",
|
||||
"outdir_img2img_samples",
|
||||
"outdir_extras_samples",
|
||||
"outdir_grids",
|
||||
"outdir_txt2img_grids",
|
||||
"outdir_save",
|
||||
]
|
||||
|
||||
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_bsrgan, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \
|
||||
(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'bsrgan', 'esrgan', 'scunet', 'codeformer'])
|
||||
|
|
|
@ -137,6 +137,7 @@ class EmbeddingDatabase:
|
|||
continue
|
||||
|
||||
print(f"Loaded a total of {len(self.word_embeddings)} textual inversion embeddings.")
|
||||
print("Embeddings:", ', '.join(self.word_embeddings.keys()))
|
||||
|
||||
def find_embedding_at_position(self, tokens, offset):
|
||||
token = tokens[offset]
|
||||
|
|
|
@ -25,7 +25,7 @@ import gradio.routes
|
|||
|
||||
from modules import sd_hijack, sd_models
|
||||
from modules.paths import script_path
|
||||
from modules.shared import opts, cmd_opts
|
||||
from modules.shared import opts, cmd_opts, restricted_opts
|
||||
if cmd_opts.deepdanbooru:
|
||||
from modules.deepbooru import get_deepbooru_tags
|
||||
import modules.shared as shared
|
||||
|
@ -1430,6 +1430,9 @@ Requested path was: {f}
|
|||
if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
|
||||
continue
|
||||
|
||||
if cmd_opts.hide_ui_dir_config and key in restricted_opts:
|
||||
continue
|
||||
|
||||
oldval = opts.data.get(key, None)
|
||||
opts.data[key] = value
|
||||
|
||||
|
@ -1447,6 +1450,9 @@ Requested path was: {f}
|
|||
if not opts.same_type(value, opts.data_labels[key].default):
|
||||
return gr.update(visible=True), opts.dumpjson()
|
||||
|
||||
if cmd_opts.hide_ui_dir_config and key in restricted_opts:
|
||||
return gr.update(value=oldval), opts.dumpjson()
|
||||
|
||||
oldval = opts.data.get(key, None)
|
||||
opts.data[key] = value
|
||||
|
||||
|
|
|
@ -233,6 +233,21 @@ def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend, include_lone_
|
|||
return processed_result
|
||||
|
||||
|
||||
class SharedSettingsStackHelper(object):
|
||||
def __enter__(self):
|
||||
self.CLIP_stop_at_last_layers = opts.CLIP_stop_at_last_layers
|
||||
self.hypernetwork = opts.sd_hypernetwork
|
||||
self.model = shared.sd_model
|
||||
|
||||
def __exit__(self, exc_type, exc_value, tb):
|
||||
modules.sd_models.reload_model_weights(self.model)
|
||||
|
||||
hypernetwork.load_hypernetwork(self.hypernetwork)
|
||||
hypernetwork.apply_strength()
|
||||
|
||||
opts.data["CLIP_stop_at_last_layers"] = self.CLIP_stop_at_last_layers
|
||||
|
||||
|
||||
re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
|
||||
re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\(([+-]\d+(?:.\d*)?)\s*\))?\s*")
|
||||
|
||||
|
@ -267,9 +282,6 @@ class Script(scripts.Script):
|
|||
if not opts.return_grid:
|
||||
p.batch_size = 1
|
||||
|
||||
|
||||
CLIP_stop_at_last_layers = opts.CLIP_stop_at_last_layers
|
||||
|
||||
def process_axis(opt, vals):
|
||||
if opt.label == 'Nothing':
|
||||
return [0]
|
||||
|
@ -367,27 +379,19 @@ class Script(scripts.Script):
|
|||
|
||||
return process_images(pc)
|
||||
|
||||
processed = draw_xy_grid(
|
||||
p,
|
||||
xs=xs,
|
||||
ys=ys,
|
||||
x_labels=[x_opt.format_value(p, x_opt, x) for x in xs],
|
||||
y_labels=[y_opt.format_value(p, y_opt, y) for y in ys],
|
||||
cell=cell,
|
||||
draw_legend=draw_legend,
|
||||
include_lone_images=include_lone_images
|
||||
)
|
||||
with SharedSettingsStackHelper():
|
||||
processed = draw_xy_grid(
|
||||
p,
|
||||
xs=xs,
|
||||
ys=ys,
|
||||
x_labels=[x_opt.format_value(p, x_opt, x) for x in xs],
|
||||
y_labels=[y_opt.format_value(p, y_opt, y) for y in ys],
|
||||
cell=cell,
|
||||
draw_legend=draw_legend,
|
||||
include_lone_images=include_lone_images
|
||||
)
|
||||
|
||||
if opts.grid_save:
|
||||
images.save_image(processed.images[0], p.outpath_grids, "xy_grid", prompt=p.prompt, seed=processed.seed, grid=True, p=p)
|
||||
|
||||
# restore checkpoint in case it was changed by axes
|
||||
modules.sd_models.reload_model_weights(shared.sd_model)
|
||||
|
||||
hypernetwork.load_hypernetwork(opts.sd_hypernetwork)
|
||||
hypernetwork.apply_strength()
|
||||
|
||||
|
||||
opts.data["CLIP_stop_at_last_layers"] = CLIP_stop_at_last_layers
|
||||
|
||||
return processed
|
||||
|
|
Loading…
Reference in New Issue
Block a user