expose VRAM easily

remotes/1710189933836426429/master
mrq 2023-03-09 00:38:31 +07:00
parent 3dd5cad324
commit 6410df569b
1 changed files with 7 additions and 7 deletions

@ -47,19 +47,19 @@ def get_device(verbose=False):
return torch.device(name)
def get_device_batch_size():
def get_device_vram( name=get_device_name() ):
available = 1
name = get_device_name()
if name == "dml":
# there's nothing publically accessible in the DML API that exposes this
# there's a method to get currently used RAM statistics... as tiles
available = 1
elif name == "cuda":
if name == "cuda":
_, available = torch.cuda.mem_get_info()
elif name == "cpu":
available = psutil.virtual_memory()[4]
return available
def get_device_batch_size(name=None):
available = get_device_vram(name)
vram = available / (1024 ** 3)
# I'll need to rework this better
# simply adding more tiers clearly is not a good way to go about it