From 3b51d239ac9201228c6032fc109111e347e8e6b0 Mon Sep 17 00:00:00 2001 From: cluder <1590330+cluder@users.noreply.github.com> Date: Wed, 9 Nov 2022 04:54:21 +0100 Subject: [PATCH 1/2] - do not use ckpt cache, if disabled - cache model after is has been loaded from file --- modules/sd_models.py | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 34c57bfa..720c2a96 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -163,13 +163,21 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"): checkpoint_file = checkpoint_info.filename sd_model_hash = checkpoint_info.hash - if shared.opts.sd_checkpoint_cache > 0 and hasattr(model, "sd_checkpoint_info"): + cache_enabled = shared.opts.sd_checkpoint_cache > 0 + + if cache_enabled: sd_vae.restore_base_vae(model) - checkpoints_loaded[model.sd_checkpoint_info] = model.state_dict().copy() vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file) - if checkpoint_info not in checkpoints_loaded: + if cache_enabled and checkpoint_info in checkpoints_loaded: + # use checkpoint cache + vae_name = sd_vae.get_filename(vae_file) if vae_file else None + vae_message = f" with {vae_name} VAE" if vae_name else "" + print(f"Loading weights [{sd_model_hash}]{vae_message} from cache") + model.load_state_dict(checkpoints_loaded[checkpoint_info]) + else: + # load from file print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}") pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location) @@ -180,6 +188,10 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"): del pl_sd model.load_state_dict(sd, strict=False) del sd + + if cache_enabled: + # cache newly loaded model + checkpoints_loaded[checkpoint_info] = model.state_dict().copy() if shared.cmd_opts.opt_channelslast: model.to(memory_format=torch.channels_last) @@ -199,13 +211,8 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"): model.first_stage_model.to(devices.dtype_vae) - else: - vae_name = sd_vae.get_filename(vae_file) if vae_file else None - vae_message = f" with {vae_name} VAE" if vae_name else "" - print(f"Loading weights [{sd_model_hash}]{vae_message} from cache") - model.load_state_dict(checkpoints_loaded[checkpoint_info]) - - if shared.opts.sd_checkpoint_cache > 0: + # clean up cache if limit is reached + if cache_enabled: while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache: checkpoints_loaded.popitem(last=False) # LRU From eebf49592ad2c0933e58b06a098b92e48d47e4fe Mon Sep 17 00:00:00 2001 From: cluder <1590330+cluder@users.noreply.github.com> Date: Wed, 9 Nov 2022 07:17:09 +0100 Subject: [PATCH 2/2] restore #4035 behavior - if checkpoint cache is set to 1, keep 2 models in cache (current +1 more) --- modules/sd_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 720c2a96..80addf03 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -213,7 +213,7 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"): # clean up cache if limit is reached if cache_enabled: - while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache: + while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache + 1: # we need to count the current model checkpoints_loaded.popitem(last=False) # LRU model.sd_model_hash = sd_model_hash