Colab notebook (part 1)

This commit is contained in:
mrq 2023-02-10 15:58:56 +00:00
parent efa556b793
commit 3d6ac3afaa
5 changed files with 36 additions and 214 deletions

27
main.py Executable file
View File

@ -0,0 +1,27 @@
import webui as mrq
if __name__ == "__main__":
mrq.args = mrq.setup_args()
if mrq.args.listen_path is not None and mrq.args.listen_path != "/":
import uvicorn
uvicorn.run("main:app", host=mrq.args.listen_host, port=mrq.args.listen_port if not None else 8000)
else:
mrq.webui = mrq.setup_gradio()
mrq.webui.launch(share=mrq.args.share, prevent_thread_lock=True, server_name=mrq.args.listen_host, server_port=mrq.args.listen_port)
mrq.tts = mrq.setup_tortoise()
mrq.webui.block_thread()
elif __name__ == "main":
from fastapi import FastAPI
import gradio as gr
import sys
sys.argv = [sys.argv[0]]
app = FastAPI()
mrq.args = mrq.setup_args()
mrq.webui = mrq.setup_gradio()
app = gr.mount_gradio_app(app, mrq.webui, path=mrq.args.listen_path)
mrq.tts = mrq.setup_tortoise()

View File

@ -1,4 +1,4 @@
call .\tortoise-venv\Scripts\activate.bat call .\tortoise-venv\Scripts\activate.bat
python app.py python main.py
deactivate deactivate
pause pause

View File

@ -1,3 +1,3 @@
source ./tortoise-venv/bin/activate source ./tortoise-venv/bin/activate
python ./app.py python3 ./main.py
deactivate deactivate

186
tortoise_tts.ipynb Normal file → Executable file
View File

@ -1,185 +1 @@
{ {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"private_outputs":true,"provenance":[]},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"accelerator":"GPU","gpuClass":"standard"},"cells":[{"cell_type":"markdown","source":["## Initialization"],"metadata":{"id":"ni41hmE03DL6"}},{"cell_type":"code","execution_count":null,"metadata":{"id":"FtsMKKfH18iM"},"outputs":[],"source":["!git clone https://git.ecker.tech/mrq/tortoise-tts/\n","%cd tortoise-tts\n","!python -m pip install --upgrade pip\n","!pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116\n","!python -m pip install -r ./requirements.txt\n","!pip install Pillow==9.0.0 # errors out only when importing\n","!python setup.py install"]},{"cell_type":"markdown","source":["## Running"],"metadata":{"id":"o1gkfw3B3JSk"}},{"cell_type":"code","source":["import webui as mrq\n","\n","mrq.args = mrq.setup_args()\n","mrq.webui = mrq.setup_gradio()\n","mrq.webui.launch(share=True, prevent_thread_lock=True)\n","mrq.tts = mrq.setup_tortoise()\n","mrq.webui.block_thread()"],"metadata":{"id":"c_EQZLTA19c7"},"execution_count":null,"outputs":[]}]}
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"name": "tortoise-tts.ipynb",
"provenance": [],
"collapsed_sections": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"source": [
"Welcome to Tortoise! 🐢🐢🐢🐢\n",
"\n",
"Before you begin, I **strongly** recommend you turn on a GPU runtime.\n",
"\n",
"There's a reason this is called \"Tortoise\" - this model takes up to a minute to perform inference for a single sentence on a GPU. Expect waits on the order of hours on a CPU."
],
"metadata": {
"id": "_pIZ3ZXNp7cf"
}
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "JrK20I32grP6"
},
"outputs": [],
"source": [
"!git clone https://github.com/neonbjb/tortoise-tts.git\n",
"%cd tortoise-tts\n",
"!pip3 install -r requirements.txt\n",
"!python3 setup.py install"
]
},
{
"cell_type": "code",
"source": [
"# Imports used through the rest of the notebook.\n",
"import torch\n",
"import torchaudio\n",
"import torch.nn as nn\n",
"import torch.nn.functional as F\n",
"\n",
"import IPython\n",
"\n",
"from tortoise.api import TextToSpeech\n",
"from tortoise.utils.audio import load_audio, load_voice, load_voices\n",
"\n",
"# This will download all the models used by Tortoise from the HF hub.\n",
"tts = TextToSpeech()"
],
"metadata": {
"id": "Gen09NM4hONQ"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# This is the text that will be spoken.\n",
"text = \"Joining two modalities results in a surprising increase in generalization! What would happen if we combined them all?\"\n",
"\n",
"# Here's something for the poetically inclined.. (set text=)\n",
"\"\"\"\n",
"Then took the other, as just as fair,\n",
"And having perhaps the better claim,\n",
"Because it was grassy and wanted wear;\n",
"Though as for that the passing there\n",
"Had worn them really about the same,\"\"\"\n",
"\n",
"# Pick a \"preset mode\" to determine quality. Options: {\"ultra_fast\", \"fast\" (default), \"standard\", \"high_quality\"}. See docs in api.py\n",
"preset = \"fast\""
],
"metadata": {
"id": "bt_aoxONjfL2"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Tortoise will attempt to mimic voices you provide. It comes pre-packaged\n",
"# with some voices you might recognize.\n",
"\n",
"# Let's list all the voices available. These are just some random clips I've gathered\n",
"# from the internet as well as a few voices from the training dataset.\n",
"# Feel free to add your own clips to the voices/ folder.\n",
"%ls tortoise/voices\n",
"\n",
"IPython.display.Audio('tortoise/voices/tom/1.wav')"
],
"metadata": {
"id": "SSleVnRAiEE2"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Pick one of the voices from the output above\n",
"voice = 'tom'\n",
"\n",
"# Load it and send it through Tortoise.\n",
"voice_samples, conditioning_latents = load_voice(voice)\n",
"gen = tts.tts_with_preset(text, voice_samples=voice_samples, conditioning_latents=conditioning_latents, \n",
" preset=preset)\n",
"torchaudio.save('generated.wav', gen.squeeze(0).cpu(), 24000)\n",
"IPython.display.Audio('generated.wav')"
],
"metadata": {
"id": "KEXOKjIvn6NW"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Tortoise can also generate speech using a random voice. The voice changes each time you execute this!\n",
"# (Note: random voices can be prone to strange utterances)\n",
"gen = tts.tts_with_preset(text, voice_samples=None, conditioning_latents=None, preset=preset)\n",
"torchaudio.save('generated.wav', gen.squeeze(0).cpu(), 24000)\n",
"IPython.display.Audio('generated.wav')"
],
"metadata": {
"id": "16Xs2SSC3BXa"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# You can also combine conditioning voices. Combining voices produces a new voice\n",
"# with traits from all the parents.\n",
"#\n",
"# Lets see what it would sound like if Picard and Kirk had a kid with a penchant for philosophy:\n",
"voice_samples, conditioning_latents = load_voices(['pat', 'william'])\n",
"\n",
"gen = tts.tts_with_preset(\"They used to say that if man was meant to fly, hed have wings. But he did fly. He discovered he had to.\", \n",
" voice_samples=None, conditioning_latents=None, preset=preset)\n",
"torchaudio.save('captain_kirkard.wav', gen.squeeze(0).cpu(), 24000)\n",
"IPython.display.Audio('captain_kirkard.wav')"
],
"metadata": {
"id": "fYTk8KUezUr5"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"del tts # Will break other cells, but necessary to conserve RAM if you want to run this cell.\n",
"\n",
"# Tortoise comes with some scripts that does a lot of the lifting for you. For example,\n",
"# read.py will read a text file for you.\n",
"!python3 tortoise/read.py --voice=train_atkins --textfile=tortoise/data/riding_hood.txt --preset=ultra_fast --output_path=.\n",
"\n",
"IPython.display.Audio('train_atkins/combined.wav')\n",
"# This will take awhile.."
],
"metadata": {
"id": "t66yqWgu68KL"
},
"execution_count": null,
"outputs": []
}
]
}

View File

@ -20,6 +20,10 @@ from tortoise.api import TextToSpeech
from tortoise.utils.audio import load_audio, load_voice, load_voices from tortoise.utils.audio import load_audio, load_voice, load_voices
from tortoise.utils.text import split_and_recombine_text from tortoise.utils.text import split_and_recombine_text
args = None
webui = None
tts = None
def generate(text, delimiter, emotion, prompt, voice, mic_audio, seed, candidates, num_autoregressive_samples, diffusion_iterations, temperature, diffusion_sampler, breathing_room, cvvp_weight, experimentals, progress=gr.Progress(track_tqdm=True)): def generate(text, delimiter, emotion, prompt, voice, mic_audio, seed, candidates, num_autoregressive_samples, diffusion_iterations, temperature, diffusion_sampler, breathing_room, cvvp_weight, experimentals, progress=gr.Progress(track_tqdm=True)):
try: try:
tts tts
@ -424,7 +428,7 @@ def setup_args():
args.listen_host = None args.listen_host = None
args.listen_port = None args.listen_port = None
args.listen_path = None args.listen_path = None
if args.listen is not None: if args.listen:
match = re.findall(r"^(?:(.+?):(\d+))?(\/.+?)?$", args.listen)[0] match = re.findall(r"^(?:(.+?):(\d+))?(\/.+?)?$", args.listen)[0]
args.listen_host = match[0] if match[0] != "" else "127.0.0.1" args.listen_host = match[0] if match[0] != "" else "127.0.0.1"
@ -624,28 +628,3 @@ def setup_gradio():
webui.queue(concurrency_count=args.concurrency_count) webui.queue(concurrency_count=args.concurrency_count)
return webui return webui
if __name__ == "__main__":
args = setup_args()
if args.listen_path is not None and args.listen_path != "/":
import uvicorn
uvicorn.run("app:app", host=args.listen_host, port=args.listen_port if not None else 8000)
else:
webui = setup_gradio()
webui.launch(share=args.share, prevent_thread_lock=True, server_name=args.listen_host, server_port=args.listen_port)
tts = setup_tortoise()
webui.block_thread()
elif __name__ == "app":
import sys
from fastapi import FastAPI
sys.argv = [sys.argv[0]]
app = FastAPI()
args = setup_args()
webui = setup_gradio()
app = gr.mount_gradio_app(app, webui, path=args.listen_path)
tts = setup_tortoise()