forked from mrq/ai-voice-cloning
fixed notebooks, provided paperspace notebook
This commit is contained in:
parent
b4098dca73
commit
83b5125854
|
@ -51,10 +51,10 @@
|
||||||
"\n",
|
"\n",
|
||||||
"!python -m pip install --upgrade pip\n",
|
"!python -m pip install --upgrade pip\n",
|
||||||
"!pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116\n",
|
"!pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116\n",
|
||||||
"!python -m pip install -r ./dlas/requirements.txt\n",
|
|
||||||
"!python -m pip install -r ./tortoise-tts/requirements.txt\n",
|
|
||||||
"!python -m pip install -r ./requirements.txt\n",
|
"!python -m pip install -r ./requirements.txt\n",
|
||||||
|
"!python -m pip install -r ./tortoise-tts/requirements.txt\n",
|
||||||
"!python -m pip install -e ./tortoise-tts/\n",
|
"!python -m pip install -e ./tortoise-tts/\n",
|
||||||
|
"!python -m pip install -r ./dlas/requirements.txt\n",
|
||||||
"\n",
|
"\n",
|
||||||
"!rm ./tortoise-tts/{main,webui}.py"
|
"!rm ./tortoise-tts/{main,webui}.py"
|
||||||
]
|
]
|
||||||
|
@ -152,7 +152,7 @@
|
||||||
"\n",
|
"\n",
|
||||||
"args = utils.setup_args()\n",
|
"args = utils.setup_args()\n",
|
||||||
"ui = webui.setup_gradio()\n",
|
"ui = webui.setup_gradio()\n",
|
||||||
"# Be very, very sure to check \"Defer TTS Load\" in Settings, then restart, before you start training\n",
|
"# Be very, very sure to check \"Do Not Load TTS On Startup\" in Settings after all the models download, then restart, before you start training\n",
|
||||||
"# You'll crash the runtime if you don't\n",
|
"# You'll crash the runtime if you don't\n",
|
||||||
"if not args.defer_tts_load:\n",
|
"if not args.defer_tts_load:\n",
|
||||||
"\tutils.setup_tortoise()\n",
|
"\tutils.setup_tortoise()\n",
|
132
notebook_paperspace.ipynb
Executable file
132
notebook_paperspace.ipynb
Executable file
|
@ -0,0 +1,132 @@
|
||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"id": "ni41hmE03DL6"
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"## Initialization"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"metadata": {
|
||||||
|
"id": "FtsMKKfH18iM"
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"!sudo apt update\n",
|
||||||
|
"!sudo apt-get install python3.9-venv -y\n",
|
||||||
|
"%cd /notebooks/\n",
|
||||||
|
"!git clone https://git.ecker.tech/mrq/ai-voice-cloning/\n",
|
||||||
|
"!ln -s ./ai-voice-cloning/models/ ./\n",
|
||||||
|
"%cd ai-voice-cloning\n",
|
||||||
|
"!./setup-cuda.sh\n",
|
||||||
|
"#!./update.sh"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"id": "IzrGt5IcHlAD"
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"# Update Repos"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"metadata": {
|
||||||
|
"id": "3DktoOXSHmtw"
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"# for my debugging purposes\n",
|
||||||
|
"%cd /notebooks/ai-voice-cloning/\n",
|
||||||
|
"!sudo apt update\n",
|
||||||
|
"!sudo apt-get install python3.9-venv -y\n",
|
||||||
|
"!mkdir -p ~/.cache\n",
|
||||||
|
"!ln -s /notebooks/ai-voice-cloning/models/voicefixer ~/.cache/.\n",
|
||||||
|
"!./update-force.sh\n",
|
||||||
|
"#!git pull\n",
|
||||||
|
"#!git submodule update --remote"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"id": "o1gkfw3B3JSk"
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"## Running"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"metadata": {
|
||||||
|
"id": "c_EQZLTA19c7"
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"%cd /notebooks/ai-voice-cloning\n",
|
||||||
|
"\n",
|
||||||
|
"!export TORTOISE_MODELS_DIR=/notebooks/ai-voice-cloning/models/tortoise/\n",
|
||||||
|
"!export TRANSFORMERS_CACHE=/notebooks/ai-voice-cloning/models/transformers/\n",
|
||||||
|
"\n",
|
||||||
|
"!./start.sh --share"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {
|
||||||
|
"id": "2AnVQxEJx47p"
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"## Exporting"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"metadata": {
|
||||||
|
"id": "YOACiDCXx72G"
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"%cd /notebooks/ai-voice-cloning\n",
|
||||||
|
"!apt install -y p7zip-full\n",
|
||||||
|
"from datetime import datetime\n",
|
||||||
|
"timestamp = datetime.now().strftime('%m-%d-%Y_%H:%M:%S')\n",
|
||||||
|
"!mkdir -p \"../{timestamp}/results\"\n",
|
||||||
|
"!mv ./results/* \"../{timestamp}/results/.\"\n",
|
||||||
|
"!mv ./training/* \"../{timestamp}/training/.\"\n",
|
||||||
|
"!7z a -t7z -m0=lzma2 -mx=9 -mfb=64 -md=32m -ms=on \"../{timestamp}.7z\" \"../{timestamp}/\"\n",
|
||||||
|
"!ls ~/\n",
|
||||||
|
"!echo \"Finished zipping, archive is available at {timestamp}.7z\""
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"accelerator": "GPU",
|
||||||
|
"colab": {
|
||||||
|
"private_outputs": true,
|
||||||
|
"provenance": []
|
||||||
|
},
|
||||||
|
"gpuClass": "standard",
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.13"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 4
|
||||||
|
}
|
23
src/utils.py
23
src/utils.py
|
@ -697,23 +697,22 @@ class TrainingState():
|
||||||
logs = [logs[-1]]
|
logs = [logs[-1]]
|
||||||
|
|
||||||
for log in logs:
|
for log in logs:
|
||||||
try:
|
|
||||||
ea = event_accumulator.EventAccumulator(log, size_guidance={event_accumulator.SCALARS: 0})
|
ea = event_accumulator.EventAccumulator(log, size_guidance={event_accumulator.SCALARS: 0})
|
||||||
ea.Reload()
|
ea.Reload()
|
||||||
|
|
||||||
for key in keys:
|
for key in keys:
|
||||||
scalar = ea.Scalars(key)
|
try:
|
||||||
for s in scalar:
|
scalar = ea.Scalars(key)
|
||||||
if update and s.step <= self.last_info_check_at:
|
for s in scalar:
|
||||||
continue
|
if update and s.step <= self.last_info_check_at:
|
||||||
highest_step = max( highest_step, s.step )
|
continue
|
||||||
self.statistics.append( { "step": s.step, "value": s.value, "type": key } )
|
highest_step = max( highest_step, s.step )
|
||||||
|
self.statistics.append( { "step": s.step, "value": s.value, "type": key } )
|
||||||
|
|
||||||
if key == 'loss_gpt_total':
|
if key == 'loss_gpt_total':
|
||||||
self.losses.append( { "step": s.step, "value": s.value, "type": key } )
|
self.losses.append( { "step": s.step, "value": s.value, "type": key } )
|
||||||
|
except Exception as e:
|
||||||
except Exception as e:
|
pass
|
||||||
pass
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
logs = sorted([f'{self.dataset_dir}/{d}' for d in os.listdir(self.dataset_dir) if d[-4:] == ".log" ])
|
logs = sorted([f'{self.dataset_dir}/{d}' for d in os.listdir(self.dataset_dir) if d[-4:] == ".log" ])
|
||||||
|
|
Loading…
Reference in New Issue
Block a user