Merge pull request #4514 from cluder/4448_fix_ckpt_cache
#4448 fix checkpoint cache usage
This commit is contained in:
commit
e666220ee4
|
@ -163,13 +163,21 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
|
||||||
checkpoint_file = checkpoint_info.filename
|
checkpoint_file = checkpoint_info.filename
|
||||||
sd_model_hash = checkpoint_info.hash
|
sd_model_hash = checkpoint_info.hash
|
||||||
|
|
||||||
if shared.opts.sd_checkpoint_cache > 0 and hasattr(model, "sd_checkpoint_info"):
|
cache_enabled = shared.opts.sd_checkpoint_cache > 0
|
||||||
|
|
||||||
|
if cache_enabled:
|
||||||
sd_vae.restore_base_vae(model)
|
sd_vae.restore_base_vae(model)
|
||||||
checkpoints_loaded[model.sd_checkpoint_info] = model.state_dict().copy()
|
|
||||||
|
|
||||||
vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)
|
vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)
|
||||||
|
|
||||||
if checkpoint_info not in checkpoints_loaded:
|
if cache_enabled and checkpoint_info in checkpoints_loaded:
|
||||||
|
# use checkpoint cache
|
||||||
|
vae_name = sd_vae.get_filename(vae_file) if vae_file else None
|
||||||
|
vae_message = f" with {vae_name} VAE" if vae_name else ""
|
||||||
|
print(f"Loading weights [{sd_model_hash}]{vae_message} from cache")
|
||||||
|
model.load_state_dict(checkpoints_loaded[checkpoint_info])
|
||||||
|
else:
|
||||||
|
# load from file
|
||||||
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
|
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
|
||||||
|
|
||||||
pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
|
pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
|
||||||
|
@ -180,6 +188,10 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
|
||||||
del pl_sd
|
del pl_sd
|
||||||
model.load_state_dict(sd, strict=False)
|
model.load_state_dict(sd, strict=False)
|
||||||
del sd
|
del sd
|
||||||
|
|
||||||
|
if cache_enabled:
|
||||||
|
# cache newly loaded model
|
||||||
|
checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
|
||||||
|
|
||||||
if shared.cmd_opts.opt_channelslast:
|
if shared.cmd_opts.opt_channelslast:
|
||||||
model.to(memory_format=torch.channels_last)
|
model.to(memory_format=torch.channels_last)
|
||||||
|
@ -199,14 +211,9 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
|
||||||
|
|
||||||
model.first_stage_model.to(devices.dtype_vae)
|
model.first_stage_model.to(devices.dtype_vae)
|
||||||
|
|
||||||
else:
|
# clean up cache if limit is reached
|
||||||
vae_name = sd_vae.get_filename(vae_file) if vae_file else None
|
if cache_enabled:
|
||||||
vae_message = f" with {vae_name} VAE" if vae_name else ""
|
while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache + 1: # we need to count the current model
|
||||||
print(f"Loading weights [{sd_model_hash}]{vae_message} from cache")
|
|
||||||
model.load_state_dict(checkpoints_loaded[checkpoint_info])
|
|
||||||
|
|
||||||
if shared.opts.sd_checkpoint_cache > 0:
|
|
||||||
while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
|
|
||||||
checkpoints_loaded.popitem(last=False) # LRU
|
checkpoints_loaded.popitem(last=False) # LRU
|
||||||
|
|
||||||
model.sd_model_hash = sd_model_hash
|
model.sd_model_hash = sd_model_hash
|
||||||
|
|
Loading…
Reference in New Issue
Block a user