Allow TF32 in CUDA for increased performance #279
This commit is contained in:
parent
11e648f6c7
commit
b70b51cc72
|
@ -1,6 +1,8 @@
|
|||
import torch
|
||||
|
||||
# has_mps is only available in nightly pytorch (for now), `getattr` for compatibility
|
||||
from modules import errors
|
||||
|
||||
has_mps = getattr(torch, 'has_mps', False)
|
||||
|
||||
cpu = torch.device("cpu")
|
||||
|
@ -20,3 +22,12 @@ def torch_gc():
|
|||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
torch.cuda.ipc_collect()
|
||||
|
||||
|
||||
def enable_tf32():
|
||||
if torch.cuda.is_available():
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
torch.backends.cudnn.allow_tf32 = True
|
||||
|
||||
|
||||
errors.run(enable_tf32, "Enabling TF32")
|
||||
|
|
10
modules/errors.py
Normal file
10
modules/errors.py
Normal file
|
@ -0,0 +1,10 @@
|
|||
import sys
|
||||
import traceback
|
||||
|
||||
|
||||
def run(code, task):
|
||||
try:
|
||||
code()
|
||||
except Exception as e:
|
||||
print(f"{task}: {type(e).__name__}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
Loading…
Reference in New Issue
Block a user