diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index bbcbbe7d..eda42fa7 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -2,7 +2,7 @@ name: Feature request about: Suggest an idea for this project title: '' -labels: '' +labels: 'suggestion' assignees: '' --- diff --git a/javascript/contextMenus.js b/javascript/contextMenus.js index 7852793c..7636c4b3 100644 --- a/javascript/contextMenus.js +++ b/javascript/contextMenus.js @@ -16,7 +16,7 @@ contextMenuInit = function(){ oldMenu.remove() } - let tabButton = gradioApp().querySelector('button') + let tabButton = uiCurrentTab let baseStyle = window.getComputedStyle(tabButton) const contextMenu = document.createElement('nav') @@ -123,44 +123,53 @@ contextMenuInit = function(){ return [appendContextMenuOption, removeContextMenuOption, addContextMenuEventListener] } -initResponse = contextMenuInit() -appendContextMenuOption = initResponse[0] -removeContextMenuOption = initResponse[1] -addContextMenuEventListener = initResponse[2] +initResponse = contextMenuInit(); +appendContextMenuOption = initResponse[0]; +removeContextMenuOption = initResponse[1]; +addContextMenuEventListener = initResponse[2]; - -//Start example Context Menu Items -generateOnRepeatId = appendContextMenuOption('#txt2img_generate','Generate forever',function(){ - let genbutton = gradioApp().querySelector('#txt2img_generate'); - let interruptbutton = gradioApp().querySelector('#txt2img_interrupt'); - if(!interruptbutton.offsetParent){ - genbutton.click(); - } - clearInterval(window.generateOnRepeatInterval) - window.generateOnRepeatInterval = setInterval(function(){ +(function(){ + //Start example Context Menu Items + let generateOnRepeat = function(genbuttonid,interruptbuttonid){ + let genbutton = gradioApp().querySelector(genbuttonid); + let interruptbutton = gradioApp().querySelector(interruptbuttonid); if(!interruptbutton.offsetParent){ genbutton.click(); } - }, - 500)} -) - -cancelGenerateForever = function(){ - clearInterval(window.generateOnRepeatInterval) -} - -appendContextMenuOption('#txt2img_interrupt','Cancel generate forever',cancelGenerateForever) -appendContextMenuOption('#txt2img_generate', 'Cancel generate forever',cancelGenerateForever) - - -appendContextMenuOption('#roll','Roll three', - function(){ - let rollbutton = gradioApp().querySelector('#roll'); - setTimeout(function(){rollbutton.click()},100) - setTimeout(function(){rollbutton.click()},200) - setTimeout(function(){rollbutton.click()},300) + clearInterval(window.generateOnRepeatInterval) + window.generateOnRepeatInterval = setInterval(function(){ + if(!interruptbutton.offsetParent){ + genbutton.click(); + } + }, + 500) } -) + + appendContextMenuOption('#txt2img_generate','Generate forever',function(){ + generateOnRepeat('#txt2img_generate','#txt2img_interrupt'); + }) + appendContextMenuOption('#img2img_generate','Generate forever',function(){ + generateOnRepeat('#img2img_generate','#img2img_interrupt'); + }) + + let cancelGenerateForever = function(){ + clearInterval(window.generateOnRepeatInterval) + } + + appendContextMenuOption('#txt2img_interrupt','Cancel generate forever',cancelGenerateForever) + appendContextMenuOption('#txt2img_generate', 'Cancel generate forever',cancelGenerateForever) + appendContextMenuOption('#img2img_interrupt','Cancel generate forever',cancelGenerateForever) + appendContextMenuOption('#img2img_generate', 'Cancel generate forever',cancelGenerateForever) + + appendContextMenuOption('#roll','Roll three', + function(){ + let rollbutton = get_uiCurrentTabContent().querySelector('#roll'); + setTimeout(function(){rollbutton.click()},100) + setTimeout(function(){rollbutton.click()},200) + setTimeout(function(){rollbutton.click()},300) + } + ) +})(); //End example Context Menu Items onUiUpdate(function(){ diff --git a/launch.py b/launch.py index e1000f55..16627a03 100644 --- a/launch.py +++ b/launch.py @@ -104,6 +104,7 @@ def prepare_enviroment(): args, skip_torch_cuda_test = extract_arg(args, '--skip-torch-cuda-test') xformers = '--xformers' in args deepdanbooru = '--deepdanbooru' in args + ngrok = '--ngrok' in args try: commit = run(f"{git} rev-parse HEAD").strip() @@ -134,6 +135,9 @@ def prepare_enviroment(): if not is_installed("deepdanbooru") and deepdanbooru: run_pip("install git+https://github.com/KichangKim/DeepDanbooru.git@edf73df4cdaeea2cf00e9ac08bd8a9026b7a7b26#egg=deepdanbooru[tensorflow] tensorflow==2.10.0 tensorflow-io==0.27.0", "deepdanbooru") + if not is_installed("pyngrok") and ngrok: + run_pip("install pyngrok", "ngrok") + os.makedirs(dir_repos, exist_ok=True) git_clone("https://github.com/CompVis/stable-diffusion.git", repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash) diff --git a/modules/hypernetwork/hypernetwork.py b/modules/hypernetworks/hypernetwork.py similarity index 100% rename from modules/hypernetwork/hypernetwork.py rename to modules/hypernetworks/hypernetwork.py diff --git a/modules/hypernetwork/ui.py b/modules/hypernetworks/ui.py similarity index 79% rename from modules/hypernetwork/ui.py rename to modules/hypernetworks/ui.py index f6d1d0a3..e7540f41 100644 --- a/modules/hypernetwork/ui.py +++ b/modules/hypernetworks/ui.py @@ -6,14 +6,14 @@ import gradio as gr import modules.textual_inversion.textual_inversion import modules.textual_inversion.preprocess from modules import sd_hijack, shared -from modules.hypernetwork import hypernetwork +from modules.hypernetworks import hypernetwork def create_hypernetwork(name): fn = os.path.join(shared.cmd_opts.hypernetwork_dir, f"{name}.pt") assert not os.path.exists(fn), f"file {fn} already exists" - hypernet = modules.hypernetwork.hypernetwork.Hypernetwork(name=name) + hypernet = modules.hypernetworks.hypernetwork.Hypernetwork(name=name) hypernet.save(fn) shared.reload_hypernetworks() @@ -28,7 +28,7 @@ def train_hypernetwork(*args): try: sd_hijack.undo_optimizations() - hypernetwork, filename = modules.hypernetwork.hypernetwork.train_hypernetwork(*args) + hypernetwork, filename = modules.hypernetworks.hypernetwork.train_hypernetwork(*args) res = f""" Training {'interrupted' if shared.state.interrupted else 'finished'} at {hypernetwork.step} steps. diff --git a/modules/ngrok.py b/modules/ngrok.py new file mode 100644 index 00000000..7d03a6df --- /dev/null +++ b/modules/ngrok.py @@ -0,0 +1,15 @@ +from pyngrok import ngrok, conf, exception + + +def connect(token, port): + if token == None: + token = 'None' + conf.get_default().auth_token = token + try: + public_url = ngrok.connect(port).public_url + except exception.PyngrokNgrokError: + print(f'Invalid ngrok authtoken, ngrok connection aborted.\n' + f'Your token: {token}, get the right one on https://dashboard.ngrok.com/get-started/your-authtoken') + else: + print(f'ngrok connected to localhost:{port}! URL: {public_url}\n' + 'You can use this link after the launch is complete.') diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index f873049a..f07ec041 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -37,7 +37,7 @@ def apply_optimizations(): def undo_optimizations(): - from modules.hypernetwork import hypernetwork + from modules.hypernetworks import hypernetwork ldm.modules.attention.CrossAttention.forward = hypernetwork.attention_CrossAttention_forward ldm.modules.diffusionmodules.model.nonlinearity = diffusionmodules_model_nonlinearity diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 27e571fc..3349b9c3 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -9,7 +9,7 @@ from ldm.util import default from einops import rearrange from modules import shared -from modules.hypernetwork import hypernetwork +from modules.hypernetworks import hypernetwork if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers: diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index d168b938..20309e06 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -57,7 +57,7 @@ def set_samplers(): global samplers, samplers_for_img2img hidden = set(opts.hide_samplers) - hidden_img2img = set(opts.hide_samplers + ['PLMS', 'DPM fast', 'DPM adaptive']) + hidden_img2img = set(opts.hide_samplers + ['PLMS']) samplers = [x for x in all_samplers if x.name not in hidden] samplers_for_img2img = [x for x in all_samplers if x.name not in hidden_img2img] @@ -365,16 +365,26 @@ class KDiffusionSampler: else: sigmas = self.model_wrap.get_sigmas(steps) - noise = noise * sigmas[steps - t_enc - 1] - xi = x + noise - - extra_params_kwargs = self.initialize(p) - sigma_sched = sigmas[steps - t_enc - 1:] + xi = x + noise * sigma_sched[0] + + extra_params_kwargs = self.initialize(p) + if 'sigma_min' in inspect.signature(self.func).parameters: + ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last + extra_params_kwargs['sigma_min'] = sigma_sched[-2] + if 'sigma_max' in inspect.signature(self.func).parameters: + extra_params_kwargs['sigma_max'] = sigma_sched[0] + if 'n' in inspect.signature(self.func).parameters: + extra_params_kwargs['n'] = len(sigma_sched) - 1 + if 'sigma_sched' in inspect.signature(self.func).parameters: + extra_params_kwargs['sigma_sched'] = sigma_sched + if 'sigmas' in inspect.signature(self.func).parameters: + extra_params_kwargs['sigmas'] = sigma_sched self.model_wrap_cfg.init_latent = x - return self.func(self.model_wrap_cfg, xi, sigma_sched, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state, **extra_params_kwargs) + return self.func(self.model_wrap_cfg, xi, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state, **extra_params_kwargs) + def sample(self, p, x, conditioning, unconditional_conditioning, steps=None): steps = steps or p.steps diff --git a/modules/shared.py b/modules/shared.py index 8753015e..1dc2ccf2 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -14,7 +14,7 @@ import modules.sd_models import modules.styles import modules.devices as devices from modules import sd_samplers -from modules.hypernetwork import hypernetwork +from modules.hypernetworks import hypernetwork from modules.paths import models_path, script_path, sd_path sd_model_file = os.path.join(script_path, 'model.ckpt') @@ -38,6 +38,7 @@ parser.add_argument("--always-batch-cond-uncond", action='store_true', help="dis parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.") parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast") parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site (doesn't work for me but you might have better luck)") +parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None) parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer')) parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN')) parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN')) diff --git a/modules/ui.py b/modules/ui.py index 948f303b..fc7c56bf 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -52,6 +52,11 @@ if not cmd_opts.share and not cmd_opts.listen: gradio.utils.version_check = lambda: None gradio.utils.get_local_ip_address = lambda: '127.0.0.1' +if cmd_opts.ngrok != None: + import modules.ngrok as ngrok + print('ngrok authtoken detected, trying to connect...') + ngrok.connect(cmd_opts.ngrok, cmd_opts.port if cmd_opts.port != None else 7860) + def gr_show(visible=True): return {"visible": visible, "__type__": "update"} @@ -430,7 +435,10 @@ def create_toprow(is_img2img): with gr.Row(): with gr.Column(scale=8): - negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2) + with gr.Row(): + negative_prompt = gr.Textbox(label="Negative prompt", elem_id="negative_prompt", show_label=False, placeholder="Negative prompt", lines=2) + with gr.Column(scale=1, elem_id="roll_col"): + sh = gr.Button(elem_id="sh", visible=True) with gr.Column(scale=1, elem_id="style_neg_col"): prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1) @@ -550,16 +558,15 @@ def create_ui(wrap_gradio_gpu_call): button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder' open_txt2img_folder = gr.Button(folder_symbol, elem_id=button_id) - with gr.Row(): - do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False) + with gr.Row(): + do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False) - with gr.Row(): - download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False) + with gr.Row(): + download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False) with gr.Group(): html_info = gr.HTML() generation_info = gr.Textbox(visible=False) - connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) @@ -740,17 +747,16 @@ def create_ui(wrap_gradio_gpu_call): button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder' open_img2img_folder = gr.Button(folder_symbol, elem_id=button_id) - with gr.Row(): - do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False) + with gr.Row(): + do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False) - with gr.Row(): - download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False) + with gr.Row(): + download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False) with gr.Group(): html_info = gr.HTML() generation_info = gr.Textbox(visible=False) - connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) @@ -1106,7 +1112,7 @@ def create_ui(wrap_gradio_gpu_call): ) create_hypernetwork.click( - fn=modules.hypernetwork.ui.create_hypernetwork, + fn=modules.hypernetworks.ui.create_hypernetwork, inputs=[ new_hypernetwork_name, ], @@ -1159,7 +1165,7 @@ def create_ui(wrap_gradio_gpu_call): ) train_hypernetwork.click( - fn=wrap_gradio_gpu_call(modules.hypernetwork.ui.train_hypernetwork, extra_outputs=[gr.update()]), + fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]), _js="start_training_textual_inversion", inputs=[ train_hypernetwork_name, diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 16918c99..cddb192a 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -11,7 +11,7 @@ import modules.scripts as scripts import gradio as gr from modules import images -from modules.hypernetwork import hypernetwork +from modules.hypernetworks import hypernetwork from modules.processing import process_images, Processed, get_correct_sampler from modules.shared import opts, cmd_opts, state import modules.shared as shared diff --git a/style.css b/style.css index fa03863e..e53b8446 100644 --- a/style.css +++ b/style.css @@ -2,6 +2,27 @@ max-width: 100%; } +#txt2img_token_counter { + height: 0px; +} + +#img2img_token_counter { + height: 0px; +} + +#sh{ + min-width: 2em; + min-height: 2em; + max-width: 2em; + max-height: 2em; + flex-grow: 0; + padding-left: 0.25em; + padding-right: 0.25em; + margin: 0.1em 0; + opacity: 0%; + cursor: default; +} + .output-html p {margin: 0 0.5em;} .row > *, diff --git a/webui.py b/webui.py index ba2156c8..338f58e1 100644 --- a/webui.py +++ b/webui.py @@ -29,7 +29,7 @@ from modules import devices from modules import modelloader from modules.paths import script_path from modules.shared import cmd_opts -import modules.hypernetwork.hypernetwork +import modules.hypernetworks.hypernetwork modelloader.cleanup_models() modules.sd_models.setup_model() @@ -83,7 +83,7 @@ modules.scripts.load_scripts(os.path.join(script_path, "scripts")) shared.sd_model = modules.sd_models.load_model() shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model))) -shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: modules.hypernetwork.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork))) +shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: modules.hypernetworks.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork))) def webui():