vall-e/vall_e/utils/trainer.py

266 lines
6.6 KiB
Python
Raw Normal View History

2023-08-02 21:53:35 +00:00
"""
# https://github.com/enhuiz/pytorch-training-utilities
"""
2023-08-02 23:12:36 +00:00
import humanize
import json
2023-08-02 21:53:35 +00:00
import logging
2023-08-02 23:12:36 +00:00
import numpy as np
import random
import selectors
import sys
2023-08-02 21:53:35 +00:00
import torch
import os
2023-08-02 21:53:35 +00:00
2023-08-02 23:12:36 +00:00
from functools import cache
from torch.distributed import broadcast_object_list
from torch.utils.data import DataLoader
from tqdm import tqdm
from typing import Protocol
2023-08-02 21:53:35 +00:00
2023-08-04 01:26:36 +00:00
from ..config import cfg
2023-08-02 23:12:36 +00:00
from .distributed import (
init_distributed,
distributed_initialized,
world_size,
2023-08-04 01:26:36 +00:00
global_leader_only,
global_rank,
is_global_leader,
is_local_leader,
local_leader_only,
2023-08-02 23:12:36 +00:00
)
from ..engines import Engine, Engines, TrainFeeder, default_feeder, load_engines
2023-08-04 01:26:36 +00:00
from .utils import to_device, do_gc, truncate_json
2023-08-04 01:26:36 +00:00
from ..utils import wrapper as ml
from ..data import get_phone_symmap # should decouple from this trainer script
2023-08-02 21:53:35 +00:00
_logger = logging.getLogger(__name__)
2023-08-02 23:12:36 +00:00
_command: str
class EvalFn(Protocol):
2023-08-04 01:26:36 +00:00
def __call__(self, *, engines: Engines):
...
2023-08-02 23:12:36 +00:00
class Logger(Protocol):
2023-08-04 01:26:36 +00:00
def __call__(self, *, data: dict):
...
2023-08-02 23:12:36 +00:00
@cache
def _get_stdin_selector():
2023-08-04 01:26:36 +00:00
selector = selectors.DefaultSelector()
selector.register(fileobj=sys.stdin, events=selectors.EVENT_READ)
return selector
2023-08-02 23:12:36 +00:00
2023-08-05 03:22:15 +00:00
if os.name == "nt":
import msvcrt
_buffer = []
2023-08-02 23:12:36 +00:00
def _non_blocking_input():
2023-08-04 01:26:36 +00:00
global _command
2023-08-05 03:22:15 +00:00
global _buffer
2023-08-04 01:26:36 +00:00
l = [""]
2023-08-05 03:22:15 +00:00
def _windows():
global _buffer
if msvcrt.kbhit():
s: str = msvcrt.getch().decode('utf-8')
if s == '\r':
s = "".join(_buffer)
_buffer = []
return s
_buffer.append(s)
return ""
def _linux():
2023-08-04 01:26:36 +00:00
s = ""
selector = _get_stdin_selector()
events = selector.select(timeout=0)
for key, _ in events:
s: str = key.fileobj.readline().strip()
2023-08-05 03:22:15 +00:00
return s
if is_global_leader():
s = _windows() if os.name == 'nt' else _linux()
if s != "":
2023-08-04 01:26:36 +00:00
_logger.info(f'Get stdin "{s}".')
2023-08-05 03:22:15 +00:00
2023-08-04 01:26:36 +00:00
l[0] = s
2023-08-05 03:22:15 +00:00
2023-08-24 22:05:56 +00:00
if world_size() > 1:
broadcast_object_list(l, src=0)
2023-08-04 01:26:36 +00:00
_command = l[0]
return _command
2023-08-02 23:12:36 +00:00
def _make_infinite_epochs(dl):
2023-08-04 01:26:36 +00:00
while True:
if dl.dataset.index() == 0:
_logger.info("New epoch starts.")
#yield from tqdm(dl, "Epoch progress", dynamic_ncols=True, disable=not is_global_leader())
# this number may jump from the dataloader sampling before the actual training step happens
yield from tqdm(dl, "Epoch progress", dynamic_ncols=True, disable=not is_global_leader(), initial=dl.dataset.index())
2023-08-02 23:12:36 +00:00
@local_leader_only(default=None)
def logger(data):
2023-08-04 01:26:36 +00:00
return _logger.info(json.dumps(data, default=str))
2023-08-02 23:12:36 +00:00
def seed(seed):
2023-08-04 01:26:36 +00:00
# Set up random seeds, after fork()
random.seed(seed + global_rank())
np.random.seed(seed + global_rank())
torch.manual_seed(seed + global_rank())
2023-08-02 23:12:36 +00:00
def train(
2023-08-04 01:26:36 +00:00
train_dl: DataLoader,
train_feeder: TrainFeeder = default_feeder,
eval_fn: EvalFn = lambda x: ...,
logger: Logger = logger,
2023-08-02 23:12:36 +00:00
):
2023-08-04 01:26:36 +00:00
engines = load_engines()
# validate if there's at least one model to train
found = False
for name, engine in engines.items():
if engine.training:
found = True
break
if not found:
raise Exception('Training, but no model loaded set to train...')
2023-08-04 01:26:36 +00:00
"""
if is_local_leader():
cfg.dump()
_logger.info(cfg)
"""
events = []
eval_fn = global_leader_only(eval_fn)
# Pre-loop command
command = _non_blocking_input()
if command in ["eval", "eval_quit"]:
eval_fn(engines=engines)
2024-05-25 22:46:52 +00:00
2023-08-04 01:26:36 +00:00
if command in ["quit", "eval_quit"]:
engines.quit()
2023-08-04 01:26:36 +00:00
return
last_save_step = engines.global_step
last_eval_step = 0
2024-06-01 15:30:13 +00:00
"""
2024-05-11 21:31:05 +00:00
if cfg.distributed:
train_dl.sampler.set_epoch(int(engines.global_samples / len(train_dl.dataset.paths)))
2024-06-01 15:30:13 +00:00
"""
2024-05-11 21:31:05 +00:00
2023-08-04 01:26:36 +00:00
# Training loop
for batch in _make_infinite_epochs(train_dl):
if engines.global_step >= cfg.trainer.iterations:
break
#batch = to_device(batch, torch.cuda.current_device())
stats = engines.step(batch=batch, feeder=train_feeder)
2024-06-05 15:30:04 +00:00
stats['epoch'] = engines.global_samples / (len(train_dl.dataset.paths) * world_size())
2023-08-04 01:26:36 +00:00
elapsed_time = stats.get("elapsed_time", 0)
try:
metrics = json.dumps(stats)
except Exception as e:
metrics = str(stats)
_logger.info(f"Training Metrics: {truncate_json(metrics)}.")
2023-08-04 01:26:36 +00:00
command = _non_blocking_input()
if "@" in command:
what, when = command.split("@")
try:
events.append((what, int(when)))
_logger.info(f"Event {command} registered.")
except Exception as e:
_logger.error(e)
command = ""
# Commands are the current command plus the triggered (i.e. iteration >= trigger point) events
events = [e for e in events if e[1] >= engines.global_step]
commands = [command] + [e[0] for e in events if e[1] == engines.global_step]
for command in commands:
if command in ["event show", "event"]:
msg = "Events:\n" + "\n".join(["@".join(map(str, e)) for e in events])
_logger.info(msg)
if command == "event clear":
events.clear()
if "time" in command:
target_iter = cfg.trainer.iterations
if " to " in command:
try:
target_iter = int(command.split(" to ")[-1])
except Exception as e:
_logger.error(e)
remaining_iters = target_iter - engines.global_step + 1
remaining_time = int(remaining_iters * elapsed_time)
_logger.info(humanize.precisedelta(remaining_time))
if "lr" in command:
rate = float(command.split(" ")[-1])
try:
engines.set_lr(rate)
_logger.info(f"Updating LR to: {rate}")
except Exception as e:
_logger.warning(f"Failed to set LR rate to: {rate}, {str(e)}")
if "export" in command:
train_dl.dataset.save_state_dict()
engines.save_checkpoint()
last_save_step = engines.global_step
if is_global_leader():
engines.export(userdata={"symmap": get_phone_symmap()})
2023-08-04 01:26:36 +00:00
save_ckpt_every = cfg.trainer.save_frequency or cfg.evaluation.frequency
saving_commands = ["save"]
export_commands = ["export"]
2023-08-04 01:26:36 +00:00
if cfg.trainer.save_on_quit:
saving_commands.append("quit")
if cfg.trainer.export_on_quit:
export_commands.append("quit")
if cfg.trainer.export_on_save:
export_commands.append("save")
2023-08-04 01:26:36 +00:00
if engines.global_step != last_save_step:
if engines.global_step % save_ckpt_every == 0 or command in saving_commands:
train_dl.dataset.save_state_dict()
2023-08-04 01:26:36 +00:00
engines.save_checkpoint()
last_save_step = engines.global_step
if command in export_commands and is_global_leader():
engines.export(userdata={"symmap": get_phone_symmap()})
2023-08-04 01:26:36 +00:00
if engines.global_step != last_eval_step:
if engines.global_step % cfg.evaluation.frequency == 0 or command in ["eval"]:
last_eval_step = engines.global_step
2024-05-25 22:46:52 +00:00
eval_fn(engines=engines)
2023-08-04 01:26:36 +00:00
if command in ["quit"]:
engines.quit()
return