2022-09-11 20:24:24 +00:00
|
|
|
import contextlib
|
2022-09-11 15:48:36 +00:00
|
|
|
import os
|
|
|
|
import sys
|
|
|
|
import traceback
|
|
|
|
from collections import namedtuple
|
|
|
|
import re
|
|
|
|
|
|
|
|
import torch
|
|
|
|
|
|
|
|
from torchvision import transforms
|
|
|
|
from torchvision.transforms.functional import InterpolationMode
|
|
|
|
|
|
|
|
import modules.shared as shared
|
2022-09-12 08:55:27 +00:00
|
|
|
from modules import devices, paths, lowvram
|
2022-09-11 15:48:36 +00:00
|
|
|
|
|
|
|
blip_image_eval_size = 384
|
|
|
|
blip_model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth'
|
|
|
|
clip_model_name = 'ViT-L/14'
|
|
|
|
|
|
|
|
Category = namedtuple("Category", ["name", "topn", "items"])
|
|
|
|
|
|
|
|
re_topn = re.compile(r"\.top(\d+)\.")
|
|
|
|
|
2022-10-02 19:41:21 +00:00
|
|
|
|
2022-09-11 15:48:36 +00:00
|
|
|
class InterrogateModels:
|
|
|
|
blip_model = None
|
|
|
|
clip_model = None
|
|
|
|
clip_preprocess = None
|
|
|
|
categories = None
|
2022-09-11 20:24:24 +00:00
|
|
|
dtype = None
|
2022-09-11 15:48:36 +00:00
|
|
|
|
|
|
|
def __init__(self, content_dir):
|
|
|
|
self.categories = []
|
|
|
|
|
|
|
|
if os.path.exists(content_dir):
|
|
|
|
for filename in os.listdir(content_dir):
|
|
|
|
m = re_topn.search(filename)
|
|
|
|
topn = 1 if m is None else int(m.group(1))
|
|
|
|
|
|
|
|
with open(os.path.join(content_dir, filename), "r", encoding="utf8") as file:
|
|
|
|
lines = [x.strip() for x in file.readlines()]
|
|
|
|
|
|
|
|
self.categories.append(Category(name=filename, topn=topn, items=lines))
|
|
|
|
|
|
|
|
def load_blip_model(self):
|
|
|
|
import models.blip
|
|
|
|
|
|
|
|
blip_model = models.blip.blip_decoder(pretrained=blip_model_url, image_size=blip_image_eval_size, vit='base', med_config=os.path.join(paths.paths["BLIP"], "configs", "med_config.json"))
|
|
|
|
blip_model.eval()
|
|
|
|
|
|
|
|
return blip_model
|
|
|
|
|
|
|
|
def load_clip_model(self):
|
|
|
|
import clip
|
|
|
|
|
|
|
|
model, preprocess = clip.load(clip_model_name)
|
|
|
|
model.eval()
|
2022-10-14 08:42:53 +00:00
|
|
|
model = model.to(devices.device_interrogate)
|
2022-09-11 15:48:36 +00:00
|
|
|
|
|
|
|
return model, preprocess
|
|
|
|
|
|
|
|
def load(self):
|
|
|
|
if self.blip_model is None:
|
|
|
|
self.blip_model = self.load_blip_model()
|
2022-09-11 20:24:24 +00:00
|
|
|
if not shared.cmd_opts.no_half:
|
|
|
|
self.blip_model = self.blip_model.half()
|
2022-09-11 15:48:36 +00:00
|
|
|
|
2022-10-14 08:42:53 +00:00
|
|
|
self.blip_model = self.blip_model.to(devices.device_interrogate)
|
2022-09-11 15:48:36 +00:00
|
|
|
|
|
|
|
if self.clip_model is None:
|
|
|
|
self.clip_model, self.clip_preprocess = self.load_clip_model()
|
2022-09-11 20:24:24 +00:00
|
|
|
if not shared.cmd_opts.no_half:
|
|
|
|
self.clip_model = self.clip_model.half()
|
2022-09-11 15:48:36 +00:00
|
|
|
|
2022-10-14 08:42:53 +00:00
|
|
|
self.clip_model = self.clip_model.to(devices.device_interrogate)
|
2022-09-11 15:48:36 +00:00
|
|
|
|
2022-09-11 20:24:24 +00:00
|
|
|
self.dtype = next(self.clip_model.parameters()).dtype
|
|
|
|
|
2022-09-12 08:55:27 +00:00
|
|
|
def send_clip_to_ram(self):
|
2022-09-11 15:48:36 +00:00
|
|
|
if not shared.opts.interrogate_keep_models_in_memory:
|
|
|
|
if self.clip_model is not None:
|
|
|
|
self.clip_model = self.clip_model.to(devices.cpu)
|
|
|
|
|
2022-09-12 08:55:27 +00:00
|
|
|
def send_blip_to_ram(self):
|
|
|
|
if not shared.opts.interrogate_keep_models_in_memory:
|
2022-09-11 15:48:36 +00:00
|
|
|
if self.blip_model is not None:
|
|
|
|
self.blip_model = self.blip_model.to(devices.cpu)
|
|
|
|
|
2022-09-12 08:55:27 +00:00
|
|
|
def unload(self):
|
|
|
|
self.send_clip_to_ram()
|
|
|
|
self.send_blip_to_ram()
|
|
|
|
|
|
|
|
devices.torch_gc()
|
2022-09-11 15:48:36 +00:00
|
|
|
|
|
|
|
def rank(self, image_features, text_array, top_count=1):
|
|
|
|
import clip
|
|
|
|
|
2022-09-12 08:55:27 +00:00
|
|
|
if shared.opts.interrogate_clip_dict_limit != 0:
|
|
|
|
text_array = text_array[0:int(shared.opts.interrogate_clip_dict_limit)]
|
|
|
|
|
2022-09-11 15:48:36 +00:00
|
|
|
top_count = min(top_count, len(text_array))
|
2022-10-14 08:42:53 +00:00
|
|
|
text_tokens = clip.tokenize([text for text in text_array], truncate=True).to(devices.device_interrogate)
|
2022-09-11 20:24:24 +00:00
|
|
|
text_features = self.clip_model.encode_text(text_tokens).type(self.dtype)
|
2022-09-11 15:48:36 +00:00
|
|
|
text_features /= text_features.norm(dim=-1, keepdim=True)
|
|
|
|
|
2022-10-14 08:42:53 +00:00
|
|
|
similarity = torch.zeros((1, len(text_array))).to(devices.device_interrogate)
|
2022-09-11 15:48:36 +00:00
|
|
|
for i in range(image_features.shape[0]):
|
|
|
|
similarity += (100.0 * image_features[i].unsqueeze(0) @ text_features.T).softmax(dim=-1)
|
|
|
|
similarity /= image_features.shape[0]
|
|
|
|
|
|
|
|
top_probs, top_labels = similarity.cpu().topk(top_count, dim=-1)
|
|
|
|
return [(text_array[top_labels[0][i].numpy()], (top_probs[0][i].numpy()*100)) for i in range(top_count)]
|
|
|
|
|
|
|
|
def generate_caption(self, pil_image):
|
|
|
|
gpu_image = transforms.Compose([
|
|
|
|
transforms.Resize((blip_image_eval_size, blip_image_eval_size), interpolation=InterpolationMode.BICUBIC),
|
|
|
|
transforms.ToTensor(),
|
|
|
|
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
|
2022-10-14 08:42:53 +00:00
|
|
|
])(pil_image).unsqueeze(0).type(self.dtype).to(devices.device_interrogate)
|
2022-09-11 15:48:36 +00:00
|
|
|
|
|
|
|
with torch.no_grad():
|
|
|
|
caption = self.blip_model.generate(gpu_image, sample=False, num_beams=shared.opts.interrogate_clip_num_beams, min_length=shared.opts.interrogate_clip_min_length, max_length=shared.opts.interrogate_clip_max_length)
|
|
|
|
|
|
|
|
return caption[0]
|
|
|
|
|
Interrogate: add option to include ranks in output
Since the UI also allows users to specify ranks, it can be useful to show people what ranks are being returned by interrogate
This can also give much better results when feeding the interrogate results back into either img2img or txt2img, especially when trying to generate a specific character or scene for which you have a similar concept image
Testing Steps:
Launch Webui with command line arg: --deepdanbooru
Navigate to img2img tab, use interrogate DeepBooru, verify tags appears as before. Use "Interrogate CLIP", verify prompt appears as before
Navigate to Settings tab, enable new option, click "apply settings"
Navigate to img2img, Interrogate DeepBooru again, verify that weights appear and are properly formatted. Note that "Interrogate CLIP" prompt is still unchanged
In my testing, this change has no effect to "Interrogate CLIP", as it seems to generate a sentence-structured caption, and not a set of tags.
(reproduce changes from https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/2149/commits/6ed4faac46c45ca7353f228aca9b436bbaba7bc7)
2022-10-12 01:02:41 +00:00
|
|
|
def interrogate(self, pil_image, include_ranks=False):
|
2022-09-11 15:48:36 +00:00
|
|
|
res = None
|
|
|
|
|
|
|
|
try:
|
2022-09-12 08:55:27 +00:00
|
|
|
|
|
|
|
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
|
|
|
|
lowvram.send_everything_to_cpu()
|
|
|
|
devices.torch_gc()
|
|
|
|
|
2022-09-11 15:48:36 +00:00
|
|
|
self.load()
|
|
|
|
|
|
|
|
caption = self.generate_caption(pil_image)
|
2022-09-12 08:55:27 +00:00
|
|
|
self.send_blip_to_ram()
|
|
|
|
devices.torch_gc()
|
|
|
|
|
2022-09-11 15:48:36 +00:00
|
|
|
res = caption
|
|
|
|
|
2022-10-14 08:42:53 +00:00
|
|
|
clip_image = self.clip_preprocess(pil_image).unsqueeze(0).type(self.dtype).to(devices.device_interrogate)
|
2022-09-11 15:48:36 +00:00
|
|
|
|
2022-09-11 20:24:24 +00:00
|
|
|
precision_scope = torch.autocast if shared.cmd_opts.precision == "autocast" else contextlib.nullcontext
|
|
|
|
with torch.no_grad(), precision_scope("cuda"):
|
2022-10-08 19:12:24 +00:00
|
|
|
image_features = self.clip_model.encode_image(clip_image).type(self.dtype)
|
2022-09-11 15:48:36 +00:00
|
|
|
|
2022-09-11 20:24:24 +00:00
|
|
|
image_features /= image_features.norm(dim=-1, keepdim=True)
|
2022-09-11 15:48:36 +00:00
|
|
|
|
2022-09-11 20:24:24 +00:00
|
|
|
if shared.opts.interrogate_use_builtin_artists:
|
|
|
|
artist = self.rank(image_features, ["by " + artist.name for artist in shared.artist_db.artists])[0]
|
2022-09-11 15:48:36 +00:00
|
|
|
|
2022-09-11 20:24:24 +00:00
|
|
|
res += ", " + artist[0]
|
2022-09-11 15:48:36 +00:00
|
|
|
|
2022-09-11 20:24:24 +00:00
|
|
|
for name, topn, items in self.categories:
|
|
|
|
matches = self.rank(image_features, items, top_count=topn)
|
|
|
|
for match, score in matches:
|
Interrogate: add option to include ranks in output
Since the UI also allows users to specify ranks, it can be useful to show people what ranks are being returned by interrogate
This can also give much better results when feeding the interrogate results back into either img2img or txt2img, especially when trying to generate a specific character or scene for which you have a similar concept image
Testing Steps:
Launch Webui with command line arg: --deepdanbooru
Navigate to img2img tab, use interrogate DeepBooru, verify tags appears as before. Use "Interrogate CLIP", verify prompt appears as before
Navigate to Settings tab, enable new option, click "apply settings"
Navigate to img2img, Interrogate DeepBooru again, verify that weights appear and are properly formatted. Note that "Interrogate CLIP" prompt is still unchanged
In my testing, this change has no effect to "Interrogate CLIP", as it seems to generate a sentence-structured caption, and not a set of tags.
(reproduce changes from https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/2149/commits/6ed4faac46c45ca7353f228aca9b436bbaba7bc7)
2022-10-12 01:02:41 +00:00
|
|
|
if include_ranks:
|
|
|
|
res += f", ({match}:{score})"
|
2022-10-16 22:10:59 +00:00
|
|
|
else:
|
|
|
|
res += ", " + match
|
2022-09-11 15:48:36 +00:00
|
|
|
|
|
|
|
except Exception:
|
|
|
|
print(f"Error interrogating", file=sys.stderr)
|
|
|
|
print(traceback.format_exc(), file=sys.stderr)
|
2022-09-12 09:26:37 +00:00
|
|
|
res += "<error>"
|
2022-09-11 15:48:36 +00:00
|
|
|
|
|
|
|
self.unload()
|
|
|
|
|
|
|
|
return res
|