do not let user choose his own prompt token count limit
This commit is contained in:
parent
00117a07ef
commit
4999eb2ef9
|
@ -65,6 +65,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
|
||||||
- [Composable-Diffusion](https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/), a way to use multiple prompts at once
|
- [Composable-Diffusion](https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/), a way to use multiple prompts at once
|
||||||
- separate prompts using uppercase `AND`
|
- separate prompts using uppercase `AND`
|
||||||
- also supports weights for prompts: `a cat :1.2 AND a dog AND a penguin :2.2`
|
- also supports weights for prompts: `a cat :1.2 AND a dog AND a penguin :2.2`
|
||||||
|
- No token limit for prompts (original stable diffusion lets you use up to 75 tokens)
|
||||||
|
|
||||||
## Installation and Running
|
## Installation and Running
|
||||||
Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
|
Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
|
||||||
|
|
|
@ -123,7 +123,6 @@ class Processed:
|
||||||
self.index_of_first_image = index_of_first_image
|
self.index_of_first_image = index_of_first_image
|
||||||
self.styles = p.styles
|
self.styles = p.styles
|
||||||
self.job_timestamp = state.job_timestamp
|
self.job_timestamp = state.job_timestamp
|
||||||
self.max_prompt_tokens = opts.max_prompt_tokens
|
|
||||||
|
|
||||||
self.eta = p.eta
|
self.eta = p.eta
|
||||||
self.ddim_discretize = p.ddim_discretize
|
self.ddim_discretize = p.ddim_discretize
|
||||||
|
@ -171,7 +170,6 @@ class Processed:
|
||||||
"infotexts": self.infotexts,
|
"infotexts": self.infotexts,
|
||||||
"styles": self.styles,
|
"styles": self.styles,
|
||||||
"job_timestamp": self.job_timestamp,
|
"job_timestamp": self.job_timestamp,
|
||||||
"max_prompt_tokens": self.max_prompt_tokens,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return json.dumps(obj)
|
return json.dumps(obj)
|
||||||
|
@ -269,8 +267,6 @@ def fix_seed(p):
|
||||||
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0):
|
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0):
|
||||||
index = position_in_batch + iteration * p.batch_size
|
index = position_in_batch + iteration * p.batch_size
|
||||||
|
|
||||||
max_tokens = getattr(p, 'max_prompt_tokens', opts.max_prompt_tokens)
|
|
||||||
|
|
||||||
generation_params = {
|
generation_params = {
|
||||||
"Steps": p.steps,
|
"Steps": p.steps,
|
||||||
"Sampler": sd_samplers.samplers[p.sampler_index].name,
|
"Sampler": sd_samplers.samplers[p.sampler_index].name,
|
||||||
|
@ -286,7 +282,6 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration
|
||||||
"Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
|
"Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
|
||||||
"Denoising strength": getattr(p, 'denoising_strength', None),
|
"Denoising strength": getattr(p, 'denoising_strength', None),
|
||||||
"Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta),
|
"Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta),
|
||||||
"Max tokens": (None if max_tokens == shared.vanilla_max_prompt_tokens else max_tokens)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
generation_params.update(p.extra_generation_params)
|
generation_params.update(p.extra_generation_params)
|
||||||
|
|
|
@ -36,6 +36,13 @@ def undo_optimizations():
|
||||||
ldm.modules.diffusionmodules.model.AttnBlock.forward = diffusionmodules_model_AttnBlock_forward
|
ldm.modules.diffusionmodules.model.AttnBlock.forward = diffusionmodules_model_AttnBlock_forward
|
||||||
|
|
||||||
|
|
||||||
|
def get_target_prompt_token_count(token_count):
|
||||||
|
if token_count < 75:
|
||||||
|
return 75
|
||||||
|
|
||||||
|
return math.ceil(token_count / 10) * 10
|
||||||
|
|
||||||
|
|
||||||
class StableDiffusionModelHijack:
|
class StableDiffusionModelHijack:
|
||||||
fixes = None
|
fixes = None
|
||||||
comments = []
|
comments = []
|
||||||
|
@ -84,7 +91,7 @@ class StableDiffusionModelHijack:
|
||||||
def tokenize(self, text):
|
def tokenize(self, text):
|
||||||
max_length = opts.max_prompt_tokens - 2
|
max_length = opts.max_prompt_tokens - 2
|
||||||
_, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text])
|
_, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text])
|
||||||
return remade_batch_tokens[0], token_count, max_length
|
return remade_batch_tokens[0], token_count, get_target_prompt_token_count(token_count)
|
||||||
|
|
||||||
|
|
||||||
class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
|
class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
|
||||||
|
@ -114,7 +121,6 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
|
||||||
def tokenize_line(self, line, used_custom_terms, hijack_comments):
|
def tokenize_line(self, line, used_custom_terms, hijack_comments):
|
||||||
id_start = self.wrapped.tokenizer.bos_token_id
|
id_start = self.wrapped.tokenizer.bos_token_id
|
||||||
id_end = self.wrapped.tokenizer.eos_token_id
|
id_end = self.wrapped.tokenizer.eos_token_id
|
||||||
maxlen = opts.max_prompt_tokens
|
|
||||||
|
|
||||||
if opts.enable_emphasis:
|
if opts.enable_emphasis:
|
||||||
parsed = prompt_parser.parse_prompt_attention(line)
|
parsed = prompt_parser.parse_prompt_attention(line)
|
||||||
|
@ -146,19 +152,12 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
|
||||||
used_custom_terms.append((embedding.name, embedding.checksum()))
|
used_custom_terms.append((embedding.name, embedding.checksum()))
|
||||||
i += embedding_length_in_tokens
|
i += embedding_length_in_tokens
|
||||||
|
|
||||||
if len(remade_tokens) > maxlen - 2:
|
|
||||||
vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()}
|
|
||||||
ovf = remade_tokens[maxlen - 2:]
|
|
||||||
overflowing_words = [vocab.get(int(x), "") for x in ovf]
|
|
||||||
overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words))
|
|
||||||
hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n")
|
|
||||||
|
|
||||||
token_count = len(remade_tokens)
|
token_count = len(remade_tokens)
|
||||||
remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens))
|
prompt_target_length = get_target_prompt_token_count(token_count)
|
||||||
remade_tokens = [id_start] + remade_tokens[0:maxlen - 2] + [id_end]
|
tokens_to_add = prompt_target_length - len(remade_tokens) + 1
|
||||||
|
|
||||||
multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers))
|
remade_tokens = [id_start] + remade_tokens + [id_end] * tokens_to_add
|
||||||
multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0]
|
multipliers = [1.0] + multipliers + [1.0] * tokens_to_add
|
||||||
|
|
||||||
return remade_tokens, fixes, multipliers, token_count
|
return remade_tokens, fixes, multipliers, token_count
|
||||||
|
|
||||||
|
|
|
@ -123,8 +123,6 @@ interrogator = modules.interrogate.InterrogateModels("interrogate")
|
||||||
|
|
||||||
face_restorers = []
|
face_restorers = []
|
||||||
|
|
||||||
vanilla_max_prompt_tokens = 77
|
|
||||||
|
|
||||||
|
|
||||||
def realesrgan_models_names():
|
def realesrgan_models_names():
|
||||||
import modules.realesrgan_model
|
import modules.realesrgan_model
|
||||||
|
@ -225,7 +223,6 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
|
||||||
"use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
|
"use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
|
||||||
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
|
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
|
||||||
"filter_nsfw": OptionInfo(False, "Filter NSFW content"),
|
"filter_nsfw": OptionInfo(False, "Filter NSFW content"),
|
||||||
"max_prompt_tokens": OptionInfo(vanilla_max_prompt_tokens, f"Max prompt token count. Two tokens are reserved for for start and end. Default is {vanilla_max_prompt_tokens}. Setting this to a different value will result in different pictures for same seed.", gr.Number, {"precision": 0}),
|
|
||||||
"random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
|
"random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user