From c92f2ff1965c28a170a3238bae4c5c383dea3567 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 10 Sep 2022 12:06:19 +0300 Subject: [PATCH] Update to cross attention from https://github.com/Doggettx/stable-diffusion #219 --- README.md | 3 ++- modules/sd_hijack.py | 47 ++++++++++++++++++++++++++++++++++---------- 2 files changed, 39 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 51cc0ee2..7c730480 100644 --- a/README.md +++ b/README.md @@ -279,7 +279,8 @@ After that follow the instructions in the `Manual instructions` section starting - k-diffusion - https://github.com/crowsonkb/k-diffusion.git - GFPGAN - https://github.com/TencentARC/GFPGAN.git - ESRGAN - https://github.com/xinntao/ESRGAN -- Ideas for optimizations and some code (from users) - https://github.com/basujindal/stable-diffusion +- Ideas for optimizations - https://github.com/basujindal/stable-diffusion +- Cross Attention layer optimization - https://github.com/Doggettx/stable-diffusion - Idea for SD upscale - https://github.com/jquesnelle/txt2imghd - Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user. - (You) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index db9952a5..60bc6671 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -1,3 +1,4 @@ +import math import os import sys import traceback @@ -12,30 +13,56 @@ from einops import rearrange import ldm.modules.attention -# see https://github.com/basujindal/stable-diffusion/pull/117 for discussion +# taken from https://github.com/Doggettx/stable-diffusion def split_cross_attention_forward(self, x, context=None, mask=None): h = self.heads - q = self.to_q(x) + q_in = self.to_q(x) context = default(context, x) - k = self.to_k(context) - v = self.to_v(context) + k_in = self.to_k(context) + v_in = self.to_v(context) del context, x - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) + del q_in, k_in, v_in r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device) - for i in range(0, q.shape[0], 2): - end = i + 2 - s1 = einsum('b i d, b j d -> b i j', q[i:end], k[i:end]) - s1 *= self.scale + + stats = torch.cuda.memory_stats(q.device) + mem_active = stats['active_bytes.all.current'] + mem_reserved = stats['reserved_bytes.all.current'] + mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device()) + mem_free_torch = mem_reserved - mem_active + mem_free_total = mem_free_cuda + mem_free_torch + + gb = 1024 ** 3 + tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * 4 + mem_required = tensor_size * 2.5 + steps = 1 + + if mem_required > mem_free_total: + steps = 2 ** (math.ceil(math.log(mem_required / mem_free_total, 2))) + # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB " + # f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}") + + if steps > 64: + max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64 + raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). ' + f'Need: {mem_required / 64 / gb:0.1f}GB free, Have:{mem_free_total / gb:0.1f}GB free') + + slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1] + for i in range(0, q.shape[1], slice_size): + end = i + slice_size + s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * self.scale s2 = s1.softmax(dim=-1) del s1 - r1[i:end] = einsum('b i j, b j d -> b i d', s2, v[i:end]) + r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v) del s2 + del q, k, v + r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h) del r1