made 'reuse seed' button give you the seed/subseed of the currently selected picture rather than the first
This commit is contained in:
parent
7ae3dc2866
commit
7539f04e28
|
@ -83,7 +83,7 @@ class StableDiffusionProcessing:
|
||||||
|
|
||||||
|
|
||||||
class Processed:
|
class Processed:
|
||||||
def __init__(self, p: StableDiffusionProcessing, images_list, seed, info, subseed=None):
|
def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0):
|
||||||
self.images = images_list
|
self.images = images_list
|
||||||
self.prompt = p.prompt
|
self.prompt = p.prompt
|
||||||
self.negative_prompt = p.negative_prompt
|
self.negative_prompt = p.negative_prompt
|
||||||
|
@ -93,26 +93,62 @@ class Processed:
|
||||||
self.info = info
|
self.info = info
|
||||||
self.width = p.width
|
self.width = p.width
|
||||||
self.height = p.height
|
self.height = p.height
|
||||||
|
self.sampler_index = p.sampler_index
|
||||||
self.sampler = samplers[p.sampler_index].name
|
self.sampler = samplers[p.sampler_index].name
|
||||||
self.cfg_scale = p.cfg_scale
|
self.cfg_scale = p.cfg_scale
|
||||||
self.steps = p.steps
|
self.steps = p.steps
|
||||||
|
self.batch_size = p.batch_size
|
||||||
|
self.restore_faces = p.restore_faces
|
||||||
|
self.face_restoration_model = opts.face_restoration_model if p.restore_faces else None
|
||||||
|
self.sd_model_hash = shared.sd_model.sd_model_hash
|
||||||
|
self.seed_resize_from_w = p.seed_resize_from_w
|
||||||
|
self.seed_resize_from_h = p.seed_resize_from_h
|
||||||
|
self.denoising_strength = getattr(p, 'denoising_strength', None)
|
||||||
|
self.extra_generation_params = p.extra_generation_params
|
||||||
|
self.index_of_first_image = index_of_first_image
|
||||||
|
|
||||||
|
self.prompt = self.prompt if type(self.prompt) != list else self.prompt[0]
|
||||||
|
self.negative_prompt = self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0]
|
||||||
|
self.seed = int(self.seed if type(self.seed) != list else self.seed[0])
|
||||||
|
self.subseed = int(self.subseed if type(self.subseed) != list else self.subseed[0]) if self.subseed is not None else -1
|
||||||
|
|
||||||
|
self.all_prompts = all_prompts or [self.prompt]
|
||||||
|
self.all_seeds = all_seeds or [self.seed]
|
||||||
|
self.all_subseeds = all_subseeds or [self.subseed]
|
||||||
|
|
||||||
def js(self):
|
def js(self):
|
||||||
obj = {
|
obj = {
|
||||||
"prompt": self.prompt if type(self.prompt) != list else self.prompt[0],
|
"prompt": self.prompt,
|
||||||
"negative_prompt": self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0],
|
"all_prompts": self.all_prompts,
|
||||||
"seed": int(self.seed if type(self.seed) != list else self.seed[0]),
|
"negative_prompt": self.negative_prompt,
|
||||||
"subseed": int(self.subseed if type(self.subseed) != list else self.subseed[0]) if self.subseed is not None else -1,
|
"seed": self.seed,
|
||||||
|
"all_seeds": self.all_seeds,
|
||||||
|
"subseed": self.subseed,
|
||||||
|
"all_subseeds": self.all_subseeds,
|
||||||
"subseed_strength": self.subseed_strength,
|
"subseed_strength": self.subseed_strength,
|
||||||
"width": self.width,
|
"width": self.width,
|
||||||
"height": self.height,
|
"height": self.height,
|
||||||
|
"sampler_index": self.sampler_index,
|
||||||
"sampler": self.sampler,
|
"sampler": self.sampler,
|
||||||
"cfg_scale": self.cfg_scale,
|
"cfg_scale": self.cfg_scale,
|
||||||
"steps": self.steps,
|
"steps": self.steps,
|
||||||
|
"batch_size": self.batch_size,
|
||||||
|
"restore_faces": self.restore_faces,
|
||||||
|
"face_restoration_model": self.face_restoration_model,
|
||||||
|
"sd_model_hash": self.sd_model_hash,
|
||||||
|
"seed_resize_from_w": self.seed_resize_from_w,
|
||||||
|
"seed_resize_from_h": self.seed_resize_from_h,
|
||||||
|
"denoising_strength": self.denoising_strength,
|
||||||
|
"extra_generation_params": self.extra_generation_params,
|
||||||
|
"index_of_first_image": self.index_of_first_image,
|
||||||
}
|
}
|
||||||
|
|
||||||
return json.dumps(obj)
|
return json.dumps(obj)
|
||||||
|
|
||||||
|
def infotext(self, p: StableDiffusionProcessing, index):
|
||||||
|
return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size)
|
||||||
|
|
||||||
|
|
||||||
# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3
|
# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3
|
||||||
def slerp(val, low, high):
|
def slerp(val, low, high):
|
||||||
low_norm = low/torch.norm(low, dim=1, keepdim=True)
|
low_norm = low/torch.norm(low, dim=1, keepdim=True)
|
||||||
|
@ -156,11 +192,9 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see
|
||||||
noise = devices.randn(seed, noise_shape)
|
noise = devices.randn(seed, noise_shape)
|
||||||
|
|
||||||
if subnoise is not None:
|
if subnoise is not None:
|
||||||
#noise = subnoise * subseed_strength + noise * (1 - subseed_strength)
|
|
||||||
noise = slerp(subseed_strength, noise, subnoise)
|
noise = slerp(subseed_strength, noise, subnoise)
|
||||||
|
|
||||||
if noise_shape != shape:
|
if noise_shape != shape:
|
||||||
#noise = torch.nn.functional.interpolate(noise.unsqueeze(1), size=shape[1:], mode="bilinear").squeeze()
|
|
||||||
x = devices.randn(seed, shape)
|
x = devices.randn(seed, shape)
|
||||||
dx = (shape[2] - noise_shape[2]) // 2
|
dx = (shape[2] - noise_shape[2]) // 2
|
||||||
dy = (shape[1] - noise_shape[1]) // 2
|
dy = (shape[1] - noise_shape[1]) // 2
|
||||||
|
@ -194,6 +228,35 @@ def fix_seed(p):
|
||||||
p.subseed = int(random.randrange(4294967294)) if p.subseed is None or p.subseed == -1 else p.subseed
|
p.subseed = int(random.randrange(4294967294)) if p.subseed is None or p.subseed == -1 else p.subseed
|
||||||
|
|
||||||
|
|
||||||
|
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0):
|
||||||
|
index = position_in_batch + iteration * p.batch_size
|
||||||
|
|
||||||
|
generation_params = {
|
||||||
|
"Steps": p.steps,
|
||||||
|
"Sampler": samplers[p.sampler_index].name,
|
||||||
|
"CFG scale": p.cfg_scale,
|
||||||
|
"Seed": all_seeds[index],
|
||||||
|
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
|
||||||
|
"Size": f"{p.width}x{p.height}",
|
||||||
|
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
|
||||||
|
"Batch size": (None if p.batch_size < 2 else p.batch_size),
|
||||||
|
"Batch pos": (None if p.batch_size < 2 else position_in_batch),
|
||||||
|
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
|
||||||
|
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
|
||||||
|
"Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
|
||||||
|
"Denoising strength": getattr(p, 'denoising_strength', None),
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.extra_generation_params is not None:
|
||||||
|
generation_params.update(p.extra_generation_params)
|
||||||
|
|
||||||
|
generation_params_text = ", ".join([k if k == v else f'{k}: {v}' for k, v in generation_params.items() if v is not None])
|
||||||
|
|
||||||
|
negative_prompt_text = "\nNegative prompt: " + p.negative_prompt if p.negative_prompt else ""
|
||||||
|
|
||||||
|
return f"{all_prompts[index]}{negative_prompt_text}\n{generation_params_text}".strip() + "".join(["\n\n" + x for x in comments])
|
||||||
|
|
||||||
|
|
||||||
def process_images(p: StableDiffusionProcessing) -> Processed:
|
def process_images(p: StableDiffusionProcessing) -> Processed:
|
||||||
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
|
"""this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
|
||||||
|
|
||||||
|
@ -231,32 +294,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
|
||||||
all_subseeds = [int(p.subseed + x) for x in range(len(all_prompts))]
|
all_subseeds = [int(p.subseed + x) for x in range(len(all_prompts))]
|
||||||
|
|
||||||
def infotext(iteration=0, position_in_batch=0):
|
def infotext(iteration=0, position_in_batch=0):
|
||||||
index = position_in_batch + iteration * p.batch_size
|
return create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration, position_in_batch)
|
||||||
|
|
||||||
generation_params = {
|
|
||||||
"Steps": p.steps,
|
|
||||||
"Sampler": samplers[p.sampler_index].name,
|
|
||||||
"CFG scale": p.cfg_scale,
|
|
||||||
"Seed": all_seeds[index],
|
|
||||||
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
|
|
||||||
"Size": f"{p.width}x{p.height}",
|
|
||||||
"Model hash": (None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
|
|
||||||
"Batch size": (None if p.batch_size < 2 else p.batch_size),
|
|
||||||
"Batch pos": (None if p.batch_size < 2 else position_in_batch),
|
|
||||||
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
|
|
||||||
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
|
|
||||||
"Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
|
|
||||||
"Denoising strength": getattr(p, 'denoising_strength', None),
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.extra_generation_params is not None:
|
|
||||||
generation_params.update(p.extra_generation_params)
|
|
||||||
|
|
||||||
generation_params_text = ", ".join([k if k == v else f'{k}: {v}' for k, v in generation_params.items() if v is not None])
|
|
||||||
|
|
||||||
negative_prompt_text = "\nNegative prompt: " + p.negative_prompt if p.negative_prompt else ""
|
|
||||||
|
|
||||||
return f"{all_prompts[index]}{negative_prompt_text}\n{generation_params_text}".strip() + "".join(["\n\n" + x for x in comments])
|
|
||||||
|
|
||||||
if os.path.exists(cmd_opts.embeddings_dir):
|
if os.path.exists(cmd_opts.embeddings_dir):
|
||||||
model_hijack.load_textual_inversion_embeddings(cmd_opts.embeddings_dir, p.sd_model)
|
model_hijack.load_textual_inversion_embeddings(cmd_opts.embeddings_dir, p.sd_model)
|
||||||
|
@ -350,18 +388,20 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
|
||||||
|
|
||||||
p.color_corrections = None
|
p.color_corrections = None
|
||||||
|
|
||||||
|
index_of_first_image = 0
|
||||||
unwanted_grid_because_of_img_count = len(output_images) < 2 and opts.grid_only_if_multiple
|
unwanted_grid_because_of_img_count = len(output_images) < 2 and opts.grid_only_if_multiple
|
||||||
if (opts.return_grid or opts.grid_save) and not p.do_not_save_grid and not unwanted_grid_because_of_img_count:
|
if (opts.return_grid or opts.grid_save) and not p.do_not_save_grid and not unwanted_grid_because_of_img_count:
|
||||||
grid = images.image_grid(output_images, p.batch_size)
|
grid = images.image_grid(output_images, p.batch_size)
|
||||||
|
|
||||||
if opts.return_grid:
|
if opts.return_grid:
|
||||||
output_images.insert(0, grid)
|
output_images.insert(0, grid)
|
||||||
|
index_of_first_image = 1
|
||||||
|
|
||||||
if opts.grid_save:
|
if opts.grid_save:
|
||||||
images.save_image(grid, p.outpath_grids, "grid", all_seeds[0], all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p)
|
images.save_image(grid, p.outpath_grids, "grid", all_seeds[0], all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p)
|
||||||
|
|
||||||
devices.torch_gc()
|
devices.torch_gc()
|
||||||
return Processed(p, output_images, all_seeds[0], infotext(), subseed=all_subseeds[0])
|
return Processed(p, output_images, all_seeds[0], infotext(), subseed=all_subseeds[0], all_prompts=all_prompts, all_seeds=all_seeds, all_subseeds=all_subseeds, index_of_first_image=index_of_first_image)
|
||||||
|
|
||||||
|
|
||||||
class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
||||||
|
|
|
@ -297,53 +297,39 @@ def create_seed_inputs():
|
||||||
return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w
|
return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w
|
||||||
|
|
||||||
|
|
||||||
def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox):
|
def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed):
|
||||||
""" Connects a 'reuse seed' button's click event so that it copies last used
|
""" Connects a 'reuse (sub)seed' button's click event so that it copies last used
|
||||||
seed value from generation info the to the seed."""
|
(sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
|
||||||
def copy_seed(gen_info_string: str):
|
|
||||||
try:
|
|
||||||
gen_info = json.loads(gen_info_string)
|
|
||||||
return gen_info.get('seed', -1)
|
|
||||||
except json.decoder.JSONDecodeError as e:
|
|
||||||
if gen_info_string != '':
|
|
||||||
print("Error parsing JSON generation info:", file=sys.stderr)
|
|
||||||
print(gen_info_string, file=sys.stderr)
|
|
||||||
return -1
|
|
||||||
|
|
||||||
reuse_seed.click(
|
|
||||||
fn=copy_seed,
|
|
||||||
show_progress=False,
|
|
||||||
inputs=[generation_info],
|
|
||||||
outputs=[seed]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def connect_reuse_subseed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox):
|
|
||||||
""" Connects a 'reuse subseed' button's click event so that it copies last used
|
|
||||||
subseed value from generation info the to the subseed. If subseed strength
|
|
||||||
was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
|
was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
|
||||||
def copy_seed(gen_info_string: str):
|
def copy_seed(gen_info_string: str, index):
|
||||||
|
res = -1
|
||||||
|
|
||||||
try:
|
try:
|
||||||
gen_info = json.loads(gen_info_string)
|
gen_info = json.loads(gen_info_string)
|
||||||
subseed_strength = gen_info.get('subseed_strength', 0)
|
index -= gen_info.get('index_of_first_image', 0)
|
||||||
if subseed_strength > 0:
|
|
||||||
return gen_info.get('subseed', -1)
|
if is_subseed and gen_info.get('subseed_strength', 0) > 0:
|
||||||
|
all_subseeds = gen_info.get('all_subseeds', [-1])
|
||||||
|
res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
|
||||||
else:
|
else:
|
||||||
return gen_info.get('seed', -1)
|
all_seeds = gen_info.get('all_seeds', [-1])
|
||||||
|
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
|
||||||
|
|
||||||
except json.decoder.JSONDecodeError as e:
|
except json.decoder.JSONDecodeError as e:
|
||||||
if gen_info_string != '':
|
if gen_info_string != '':
|
||||||
print("Error parsing JSON generation info:", file=sys.stderr)
|
print("Error parsing JSON generation info:", file=sys.stderr)
|
||||||
print(gen_info_string, file=sys.stderr)
|
print(gen_info_string, file=sys.stderr)
|
||||||
return -1
|
|
||||||
|
return [res, gr_show(False)]
|
||||||
|
|
||||||
reuse_seed.click(
|
reuse_seed.click(
|
||||||
fn=copy_seed,
|
fn=copy_seed,
|
||||||
|
_js="(x, y) => [x, selected_gallery_index()]",
|
||||||
show_progress=False,
|
show_progress=False,
|
||||||
inputs=[generation_info],
|
inputs=[generation_info, dummy_component],
|
||||||
outputs=[seed]
|
outputs=[seed, dummy_component]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def create_toprow(is_img2img):
|
def create_toprow(is_img2img):
|
||||||
with gr.Row(elem_id="toprow"):
|
with gr.Row(elem_id="toprow"):
|
||||||
with gr.Column(scale=4):
|
with gr.Column(scale=4):
|
||||||
|
@ -399,6 +385,7 @@ def setup_progressbar(progressbar, preview):
|
||||||
def create_ui(txt2img, img2img, run_extras, run_pnginfo):
|
def create_ui(txt2img, img2img, run_extras, run_pnginfo):
|
||||||
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
|
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
|
||||||
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style = create_toprow(is_img2img=False)
|
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style = create_toprow(is_img2img=False)
|
||||||
|
dummy_component = gr.Label(visible=False)
|
||||||
|
|
||||||
with gr.Row().style(equal_height=False):
|
with gr.Row().style(equal_height=False):
|
||||||
with gr.Column(variant='panel'):
|
with gr.Column(variant='panel'):
|
||||||
|
@ -445,8 +432,8 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
|
||||||
html_info = gr.HTML()
|
html_info = gr.HTML()
|
||||||
generation_info = gr.Textbox(visible=False)
|
generation_info = gr.Textbox(visible=False)
|
||||||
|
|
||||||
connect_reuse_seed(seed, reuse_seed, generation_info)
|
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
|
||||||
connect_reuse_subseed(subseed, reuse_subseed, generation_info)
|
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
|
||||||
|
|
||||||
txt2img_args = dict(
|
txt2img_args = dict(
|
||||||
fn=txt2img,
|
fn=txt2img,
|
||||||
|
@ -487,11 +474,11 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
|
||||||
|
|
||||||
save.click(
|
save.click(
|
||||||
fn=wrap_gradio_call(save_files),
|
fn=wrap_gradio_call(save_files),
|
||||||
_js = "(x, y, z) => [x, y, selected_gallery_index()]",
|
_js="(x, y, z) => [x, y, selected_gallery_index()]",
|
||||||
inputs=[
|
inputs=[
|
||||||
generation_info,
|
generation_info,
|
||||||
txt2img_gallery,
|
txt2img_gallery,
|
||||||
html_info
|
html_info,
|
||||||
],
|
],
|
||||||
outputs=[
|
outputs=[
|
||||||
html_info,
|
html_info,
|
||||||
|
@ -583,8 +570,8 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
|
||||||
html_info = gr.HTML()
|
html_info = gr.HTML()
|
||||||
generation_info = gr.Textbox(visible=False)
|
generation_info = gr.Textbox(visible=False)
|
||||||
|
|
||||||
connect_reuse_seed(seed, reuse_seed, generation_info)
|
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
|
||||||
connect_reuse_subseed(subseed, reuse_subseed, generation_info)
|
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
|
||||||
|
|
||||||
def apply_mode(mode, uploadmask):
|
def apply_mode(mode, uploadmask):
|
||||||
is_classic = mode == 0
|
is_classic = mode == 0
|
||||||
|
@ -723,7 +710,6 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
|
||||||
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
|
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
|
||||||
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
|
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
|
||||||
|
|
||||||
dummy_component = gr.Label(visible=False)
|
|
||||||
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
|
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
|
||||||
button.click(
|
button.click(
|
||||||
fn=add_style,
|
fn=add_style,
|
||||||
|
|
Loading…
Reference in New Issue
Block a user