From 5d87418d3a62dace52e80c7f29b8db24fde7a4f9 Mon Sep 17 00:00:00 2001 From: ken11o2 Date: Mon, 4 Sep 2023 18:40:17 +0000 Subject: [PATCH 1/4] add DeepSpeed feature add DeepSpeed feature --- ai_voice_cloning_edited_to_use_deepspeed.zip | Bin 0 -> 62526 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 ai_voice_cloning_edited_to_use_deepspeed.zip diff --git a/ai_voice_cloning_edited_to_use_deepspeed.zip b/ai_voice_cloning_edited_to_use_deepspeed.zip new file mode 100644 index 0000000000000000000000000000000000000000..3f44449068750c14e4508da083d4f69e43698182 GIT binary patch literal 62526 zcmb5VQ+Ou8*X@~fY}>YNCmmaFY@?Gmwr$(CZQHi(be#13XP)z&Idd-NIbU5=)y-P9 zZ|k@BQj!G&M+f=8*GU1n=Kpi~9~<_6PBT+0pqZ%w(80jP+04My%*@f*(ag-0N$G#8 z-v2ih#s7~A=->o&uyQtI00N!=7mNS>VTS)x3uhB2E64xE3L3;Bf=rXGXzzL#2?Ru* z1q6iuziahhB^vyvL`FxC8i2O*6+61`Tph9*B-MU9!xosB;kwZ}Z*xDG^H|Za4B4gD zXyQ_;?E8)P*>C+NOasWd=7y7^rVCr%$E&)W)mC1;t3x#mDCOO*9GcyXnde^i6tu=p zMl&<@CtF)*i@PnIvX>K$vr3;;FAf_bxt0wCLem^8wk$ikrZP~AX7vdA7HHwUe{b(t zdwQjLeow0OwaYnOtZ5}G!mQcKd^uae2^?&7DY8hC4#_bR_NJWPhVxPh+)7rC>#S>x zDH5p~)+R4X(J~j`wxf~d!Z!_MMj%Jhgw0gXDk?KIRO1dJZlzENc zB9^JM^bApClasodsMFWQWd1SqC%l@2wguaRH_0o0mxloGd+75p&0@HQI_EOk>BquI zog3Fe71p)Y3xtJw$}X&Usq?`MQB)J@X3Jq|Q^*-`o^EiI2b__?oiuARJ&dSdAg&la zI3w`&sva;KAP^F$UXPS0d$u7PHfL&~8P$ri-ll;V_Ijle*KBHOVOsQQ^rQanrLCx< zwS};v=>oeqDM?V;HH8MgU#Cq>%pf^O0*VCn?iEfge=uZq`9Kd%L^@Xex5k`@Sbd;t3uK_lI+AJF#{&p;iYEkT$qKl&47JN zYhBNAxC*DYMOZPg{0RYXeYel%SaMALa7dV|Uk2&_g3`wZf7y*aq_MWqVmcj}M2Fxa z7ghpVa{JjrbCuG>*BWCyuJ(!d9hR;pmnlef;!=EWtU>n5F71=nG>DSJGNELrG&G4j zunpbSLbFZt9nq&RQ^*IpDT|Sg-w}_bdIyyicC5lV1M1*7qjh(s=A49)#gaxrx-t|6 zVg%`(EHvt{k}R{@|11V5yY{{u&1qr@8MMeTpIb_th`89s^GJ(GyQ6*+9f`!d$3i{6 zq7WUQ{F{_LdsrmJH2=tF!$udK=lEm1@wJ|}R2E%-#Fwr=d>`M`Kyh3 zgH{TF)7>Bh&#P#vq!Uaf_;YNhdQN^kg1t&AfmI?X2 zu7L>CIiteGr>7ZlQ4vl*2)-X9NO(XQdyNZaG?P_Bmaea&3U--qEMkPA1gQ~?ghNNF ziMdzm7d;&3{HHC19KbEK^eC0MkK4usQcD;^d|vP+{~l3hD{(}gKaq%XpWLu(jC_xy=lzNwV3| z954oq;r-C@4I}M`VvevyesFV&IZtCxNB!_1suOOvliQ2IE(sddGl@ z280I~LkXr-z^bkZzC2Dc8tbq28a?IagMTEpcZP;kz}j<`_tA(hh--K#K?s4t&eoziYP(>L=_ZYW)vP1+ zV_sH9=E@mfpl$K{^yIz#X%@D`TrYO<)zq!DoKOlJ5fYR3R5P6RY7WP?sv| z^#C1XrGpA8vq;$(G}58kgVI{Y78zjM1G~lsZVo>D6j#N7nFhVp)Z)+UE(6BU)|Uv3 zpcham?~YYpi@p7wAdRUB{fR(gE5E`@`@pWVVttxC`I-$fnuVPSj+h+sBy=e|LYZ-w zmCOZ9<-a(3dI?3DHDdWFJsXCn((>{qBT_YH6|fmypAp;Mq+jePr7+s0)=n~B(K+xp zdC@N)22GrJbVd1~Y4v){7Zfgi_*$A(qLz(I;G#0>3P}NdjY^=ovT~2%VCrjg(8mlI z;6WUg7V5ni+VTgpU<4yQbE6O-FjMTnhJ}nfC))-us;fmF0UcTog-Zg17jn!=yV8rn z3@2>`^narGrutQ_-i(*>wboYxfg{{xp>t>tIUVX4Vo*|`pEU}Zf17+k6`g#F9zW4o z(zsNr5vX?bJNFRHwWfy4}lo0{aacI63s>7x-15RlASBpPxQM>q+&~QP8V*#S*y5`Nr zM9}r;T_-s|H_jOmow7IN;R_A7ZYJeFmQahNc#iuItaWM(r)tUO)G1u0g_d|K$VGC} z^Usvk+klhP7Y}($PGS3xDENQIhd^P0DpMWVB_1gVoBD%f!+()*z1&Z1U`0*phWtK$QT@CI}J*GO?(@kZgdiv?U@GpNxEL7<4HiQ{i@kO(h zx_Ju%$(Opx;MsYD<;P8(drEpTB_s!FkJ*L&Fu7@Y5%Uqm>`q!lxp0acY7ak8)qm!rbwD++7guhRw>=Uo^p@(x z#;Ub0ng2UV60Es(uPOk=f}?lOL*i#2c1#U(&Xa{K@*d;n=``7IL3>zB&w*>;N5)j- z$R=a`%$E+>GOXhyxN+_;N z&40UE?)Dv|1Q-}UAvZg^FEj{dej7_Ve0{(DZP@-6>08A1v!lo?(rsM^O97VxT-J`} z)?aX~c;k6Y7!VR#>aQ7uTOl!XTRq0Gl*G=dk@Z=R4?3^nY&;h>3EmkRvRRJ|=m);= zb}L<96V^lc+Kdphsq7<%g`?t=y~4~4(tSvmA2Xf78JC6p_Hg0PG+JWB)SHXNOq4sf zAHd%Y=wXS>#p#BwJG5A?zvs=1wOybW5aXKiC3jK7?h938uu`*8o?Gd1W)@mnH;qGpl?Z-^ zCVmOSs_1}$DkbO&h)Y0XsHZ$rq?ZVuL95Q)nu+uM0PPG?txw874Sl$6zD%iLSIsM6 z_tImA6k$r}tk~TCa8^#1mJqkMC-~AetD&Va@)YzYe9(Nu9h>t3##7ZUu$;D$^mz|~ z7$74lPMpqpkK8Y>`uWWN!9qe3Y;3(2qH)>qf3KkD%1fyR?;!K}N}lg~FO+3}&b%iqk<#y#^N{|j9yxgYyWD;hog0;yp>r1+hUhD8z66q5_ZQoNQCiC$0_W?)H+=^7NqTT*FPhGu zn$i+b2bIn_uk#BNX4Ubl4ncaMdq6oCXT3h?-m=2HNi^b2(s{tfe)}|ZTtaPd5yb$T zZre8%Jz2|K{J*myl_l$X+!~qMsam{1yAhRJ_T&jdOrQtd>yYDVxTh{p%AY;N&5CJXTETVjO?)vZ{I1GfnG$B&vPq#;NyUSYL-?tkZ8^(Wv4;tM0 zJ3HUHV;D)SrBz;<(Flwvf&v(=IkJA2iu^+(GKjq|N`9o9oAheBvvg>Y8M%2s3~=3H z^0I%gzqdM0*9iWNQOfkO%cPO*eV?WRE9WDggT(BZ!1`L92Ijfq&04wAd?wt7uH%dsFV-q7L{DM=x8}U-pwf#uD4zR5Y}1 zJq8SOSWSK?u@U@e`rEgwdM(A|euIpOiOEmS)7z%TxR{=XE1y72$YJ{bQ`xxs(&g8zv#ph3E_$Td-L zzj7`cpS(-xdwmQnR&3r{wy{p!<$n#cef^!lz5+; z+Rwi52pWG*@9q*v@4AI<6JhrvBt$O#ul@)Sp`>4N^w5Gt^h8 z8FFYZJQJ}h>3%aj(q1t|N^{nI9C-lL-`VG6LQc?Vzex>F=Tc3QY5ZocHZ(hz+q8MD zLfI$EmRumJ6Qx5VA@@~N9x7`yt4eK(O{2Yh3uwNl5&_xWjE{dPuf7Gf$8~?YmwqAo zD6F~&YML3KVCb7ZC8=}PM*O}gjKRgCQ@^M>tMjv_aWP8q+|iJkt}@MC-Dp&pwbW8g zR>trBGE$+%nfD45;FhGg=#EYqU`JXCzVmXgz2eiZIAzbp$xk` zxRGB(ZDQ(~v?9thMk7@_hrZE+DbsYSP`CK$D~`Tm=?^F=BMnusxjgvJ%LFZf1)2bY zr?LbLOigj+tcE%hub9{rFhM?1S))vvBiAhHFTV}A0oK_D>%XD-Agb}Ss_T?V%j!^j z0)5jX@EmR(Mq!}(x29?L?5LvJhk|oiBV04-6>~3XiG@!oV>?Vy^Qc<2zZy~%+Zca8 zZQa6wbON%Qwo!%T(D|5g#!Ys&=ew%Y;|`aQphs+R-I7PjO}G2|^Tt!s@r9cZSw;EB z#v>`z#iu3duFv1cGfk#GK9AINl`a9=7MUv?cSYVZqz0L{n(?$ZQJ1l_r1q^S*UwMm z#_Out+dJ1Ty$xExmowGiTx}QsXO(BG7yixcrKdW<-QC=(g~j`$Z6wJ}T-5g-bKseF zg~;uSON-l|_PEWFFVZc8;&-P$rY$@FZ8SUFUSQe{PPx4fR@G3KT_|WKX1K|=6&U+O;tW=^OSdt|9?#;Da zqZPYsl{V|lnYP?@H&)40lkvK?9HHy))NF;OM6S$(d@nioshY?t5lpkiN34#$a8^}F zG=wk|F;otDY4xtS>Gr9NjqO^snZ=+NKyp=;dIpVVDY4A+^$oX90zRIio1bJEocQnP|cE{)x6()zC@m&nYY zKL~+nKhYpZx@YTP^9FTF3{e%eAhwYCHPzx3bTG>1e>-(0bA+Joc)w{Pd|jqTEH<{J zeIXkVBu1L?=a^T}lBH*#-tl{Qf&i#fBHUS6WYjdNmSj+K3anZj;led08}Fbq5`{eD zWgO(6wu=eB<(X4XC3vNWKO#@qai{LY03fI%3S~oneKMf6jKG!fzPZNVY6D~Fk-@ri zDA*n=Ykx1maIeI%qIGxXu4V~o1k4;F+hna6g+>;jtMFEwD|M2w zsC;+NVq(kzf*QH3gaLp=c$QyYt@+Bl{_h;RYGWgfvuQT+7}|N<2=~n;K#PxbjZRD_ z!6UZ%&MX}aYiRBRRcV83?S;J31UD2LA-_QWyN_={?xG6m(R zOOn$oU!vbY)(kW3okr1ju7_Ku_~@71Y>|JPH#dxvd#`!kZDWx;yP8kNFywAsWw44#h@jn*AHaHU4r(fH4fqY<3n1a~gH1{QOZjnpjj zOy!Ae!{%V*$QKfsb4v@le|RrnPA{xJt`!uU9PH9_yMgfFiKOrcK&fAdKcnox&6Zbc zFaY8zY)U=e3PrT(@b61~_PJCG%T0n5gZ#C#zv{)&R>LIcd+8d(7Jtv!0Zq@OMe_aQ zB+3#=HaxTB^MssV@D!7lK#Z8Pck7T#_Cyd!Im*MVczQ^6=0c8%-&NN$y8|@TBu23O zHG-=u!H=}ws~SI7f|;eUV_+-q0#(Lq;ig8a4)vE_3k~Hiv{sX3EE82MGG-BK^UacI zYw51C*bdR9?ZN6IN_2pwVL2LcKn!K3I$bodd^S~R2Y&aYC{Wx4`Ce@4X!_U)cyM?+y zYreN_kq>PvtelK;+r%=W8y~qymma_xOG5~3QaPgr%vPq=ET|bh@~QH!nkUB;-B4nGp5>*6 zvN}C$4=8a^z5*#{a}%uywCTQBe>)&P1^dvfW!7>)-#k4wje2NFy6!higJEu6xn7Gg z4Cm-*vK!yXjpXS)C_I17f;~J3hTW`6q7_Fjbvy&rPVWWwPu__|(|by)T?2J{PFYJgPJa~v>53T3P44l-1Hcal{%mKaB3rTwAA`qJA22kq)P^G>Yg zdaad_%gBj(%;MmNc1gZ8Iq_;s#y6MDrE4>FPERI-~8AS<8AO#emnQ+B^nJN}Jp+Dk+n`{IAm{my14DvAG z3f*$bRK(pYqZo$q7e315d84#z{j83UDC&bDEA?Y$rE3^INB$>UC5^jO&)8vH0s#j3aTV$~YUe zqz?q~n7hiV>9#PG7!M@RVvxjn5;1n)ZN8v8$ib9poxF$3wmt%X#JhocDmEk3ZZvYF zmJF7tHnV+|imIqoRUwSH-Evk9S|WPxNXX8x6!pEFi<+uVrqhv;8UIU5{#2+H0yici zPb_r2hy335fzmntZU)Fz9};q7*oNt`wq)2IGNw`ZQAU9NGN7eEPMiQlK zxL&A-upkatll}V_y2`xgH+O_wM)sgo#C)A+7bPa0S41J8D4OATk0AM4TUXnl>t?&AR0s90PD{w)`QfG8nf zp>$)MQlAxG#uB-*RGguH<)PPrsDrVDTZYhJ6^DhqS){zpSMS23j$%vnFA4*@(oMPM z&6+%wWX`|`9u=?{osu{w$>L3+J?RP&uV({QV-^?C@n(2Jzj%@vya;ZMt-z-yqAwze z!-Y&9jsVRw&K|gDwq>Prly#2@OLilFpBs77TO@!S%$CTz#p3=zEKx^X(@U$MmG$R+ z8}1&joSpol78aV0z8*WYWHJ-2j(h;NE{VLh@xA%XnzgkKUE0{L^e-WR+{EP<2eVm@ zPPk)XRb%Cbq?0W%H)X$X-oF9oj>E&!;M`i#^#n^(j5-?C+tbAS+RN9aYQ7D{9u6q& z$=@`rrkO8(vg1O+u5W}Krl*WnE9qDpkt)0+?Q4vF5!16?a9DY zm8o7BNF4{WqF|v zq-(iKG`(JAOLVonLj0?+d9G>cEK#02Fqo2JI@R7gP$q|V__eK<#~%5w5dV9G-PBqi z)x%LmJTOTFo-)V^T%-&VVqKru2F`0>oNCn_GLrH0<%8vr@1e7o-%=ef7)6C*|J1~5 zW;BKzZ_CrhZOjhBV_5QB_;0=2G9PcuZ9$$_lIT6aHit~#$XJPv*^?VZ-zYG}d%i?< zd(j@Rz?d7}qeurrm9jc$I3@*`o6DM`xN`-t&a+}S@d(1*jna~RXB$%_aYy+Skmzy@ z1cjqoHF7__T>+fDaRQFMb|&20Y=H4z-Nd8L!qqG)9zAdhNlh5VB(orIPsbzZ4k`GyY3q7`Q>mJfDZYx5{K})TL zaO?x8!AB0sSZ)P_jB`$xSS}3*j7^KJRn9|9>n*;W75haI>_QD4jv3+l)GfG%gVdU+ z8#7(@?{Dz+cWH$X;YW6|9r$~0@&;x?fA;_jUwSkCRbfs=!UfruYUr{hSKhEEKR z&2PKXF2BqIAFPjtHJJoUA{P_74~9CNJiGkXH&L9DQoZfuQ0P9onv)trUAoCare1}O z<1L38DCu2CrOZuBsYl8pbneDB#AHT~8*B*XHUdh$hYyrR2`;2q##RsAU(szuxOE}s z!>-#Yl(%XK@4Oha^r)y}VR`(X+fnCP+>}duB^~PC2DK7a4GKp79&f*YH^3CHi>G@b zLfuee(pJd_4M8D66}C37;@dLM=39kiT7NwZlre)uu$TFyln+#b?>sI9$^TwVeN3fy ziQf&@qGF)gfZ$mS9q!IBg5pvh1ik$*WU`DNMs$k1YmQi4*9ev*8wPRF-+B#QS5^fb zd*AJM%lYC4H^vj+#4^p)u911` z(x8U28`=`nQCA(vZE|qsYAQr7);tARIek17eba}G>RLSqjWNSY}g7 zaW0+cS%8^+eWvyH|00&p*<3%eJm0P@#RCD>mcDdKf#I7G&usX_L57ho@DQ5q0J(VbP?lnpP&?`gP#g zZ!-^4jB9b!_Q$&1QO29j%oeJ)ZL0y_?Ou~gVb?fq+SeshkIh;w!Q>~-wM=+jOzHH3 z+mdQ95d5Sz!rj_O+Dlru^s4a|Ypzw(EXm00nG5KbOCWvLr{Lwz>8D|60`SA-Kuc&&(KI?&_w~g4fz@JY!Q7$JsAOqdhW38+v5c#VWi* z3ueALDIfRw>m~SKwwV&2uD*G7F|RgSNG}(tx|w%+_Z007?ci;53WqB{Lqnsv*%q41 zNHt`4t+WoarS%u9eGZ+0?^B=;v9j-)3#~S-I2x*R#pFS}c=ksQ0dyE}@jJ`nJp8JA;O3-Evpxt0%mwyaf)!H|dwJboD0pV7NSu*Vjpv~~fVBJ4W-Bjj z$x~8|^w{>eA;@HV8f7cu0etc=@e^ugr zZQ-^LBXKLs(ZI$4#D}Wt6Zoy+2#x<)J1P6>F$)ATmaZeWVO5!fg2-zy!tvdDFX!r(kE@3N(j4}fnVZ<( z#w^=-NAZ(6s{UgD6IXR@bI7EULgRHTkgc2SC%q${dZz>{F+CoODfHebLVfK{{-;BE zJNNjPE5a)-LI06)6Af!)bfhydD22f1*HhPQQ~%Y>%@2#gb|EGbhtCf$wqdW*9&@FW z6Ewajib1g2u2<3YLuH+9+sDw*&prubeS7X-T%4mz&+i@l0+R48WN?70Kn@*iBbY2A z?i61tT6A0I@Y{-FUw9Jtr2EGqPh_;}qq)~Z0kAEr1YaVMAY|N`G;18sF(1-EgTt45 z>;3ReZbHt!Lz9!QmIGSca$F5@o82`tcJfK4Lt|tYH^-zLPhl`exPgnv?u*Kl2WbL1JYNX4~45Jf?ti|2|GtR4r#(hWWcLOA1+Iw*>$EMY!vEW$9 z{6YS|HzP6o(mO3CR$Ys1c)qi6xIyGUsh%4!m5$l|2S-0*U+}Sn1O)t^j?eoIYmRHK zgbrj|3qmrS+#3Jl#xdoV9LKum*H$IKi{MoBdp5d{yv~`iEc|cPxNLXP@3FFbq(k7Q z23NFc8{W3f@JG&&m5DNS(!oAP8>_p~SKnot#;Jt(5|fRSu1hWMZA-<4^cD#E+DXcT zE$4NKko~@pQD>f-XfQ)CeWFvp6;)Aw3x6+9W9D&S%+%oCjg8ZmQ-P%Sqw^sQoEKLY z)%u1BbqWKKO_P)d(Gmg;0UE1k`Wtv{7K8T*nT8^}X)T>PtwwD{g5J@PSr^(DEcb4;QnA_Mu=;1>bqg0_rn(Y$cipbNJVWS!abji+b zmAc!04r$L~x>bjujj$m#JuSwpC(d>Eyv;pPabM4KkNYcJwQW18?0wTZ%Ii`nno2YA`018t#QV^ zp(AIKz`k9=*L;5R{Yi@ZJLo}0LTl9LQIl$$4C5b7iA~rWvA-3R197PX<&wm18lB8z z>dmbu;-~1~43U&j#JElhY!5^U^~oHX)iIt3O2UKtcrQhDB2UJqhJ;Y0AHB7AF9 zC;MpHO*r_mVp>JkU`lfsF23oNViOC?$le-_1cr-adN{pZT5R~t-y$k}M_4@02{uwe zmTK(M^fg57eo8AI5uLiG$4I)W(+^re7BSJDjJyF0j)6V+*%*gCW~?yW(ya9mYadg2@ z@XRk9(xEt7%J>ZY)+9f@DP|$RTR;NP-|rtwY-4a`z!1u~<#tuj56A@ebG7-4wHd*S z&a8neP3vl;NGp21v5t)JOA|h({;wc6jemZF(`jqior=49 zvCxGlrq+KGf>iWnRa6MOU27PL144$~1ln3|Hs66_d~r~}%(-jo!o1!b!S;thiQ5!U zUxR&*h5fLm1u&X*W0&wAUYoZ7N~|*Qf~WkV?KfWe z$96|!0^7kCmMS&`uYrYElA&a=2dwayhU0Xh-TfIfjs$Z7KLrk@r;tO~RD5fCwYJoy zWtKKDuSx>+#C7g^Vw}9@@{17de*~U44h4cnUXJFQTSKANAOc1~I2sp78Xhp;T;cl& z{w+j%j#5?@YQYQW3(B4?nXa(E-|pqfqhauqZ;+p0(~#?JCP+tBL_Cbh%4c6q)Z=y! z%7Z$~(5x2u9ZrD6@~|U8;>DAvck$!&bOk8~$f99^$|w%=aMX836^cjXwcg%PlX zEt)b0dbdN;GVVmjgYh9rcn5s4C00S|cSvNTe!N!>Y!}ZbFk$9nw1>BL(F4Iga*3AS zySovB6n_6yDr|jbclAS{@VZfXaE8j2r`Lr~8;dSKxai@Ogj_WN3q?L+su(~X%(wI1 zTu8EA93|Z_8K~dxaJ;A;fv`Bv5`D72iYM$eOp8g&(<84~fkB;Lf7g->?tgZg9DIFU z|I_ZfvRFV8>iv2=uIuCP=DhzoI+v25LCc~ZAzdVKz;#G`wj7cqU|C3~K3r$X3J3Ej z)n!J?Rm*H=Pf)phq9!fWnq=8$U zpDi?g4YemWPis|9BHCaG0k47s2;$8b9$084_Ik)4Dzj^+y-~s*hzWhjKrkHo7XB)j zaC|!9&O;{K3B^G_t{6o*J5wtu3|Z2idn*e(A|$kV=A#`(JnurG$Fd3dg*&24G}q)( z5@+=B{53RC5nVG9a?<=Ov$HR9FopP!6muA2`;Z524D6*#Tg-9px^wh=8hWlHJv6<| z#KgclKQ6q&g%TrQ(Su(oocVqUY5C*8&w>99sFTp+mP$ZA+KRxm8EjL;@cOm~)SZf!O1;VvskW?I@o>2at^0n-( z5sXSXCTpfUT(u7;y;5LoWOxEfTnDy<-meUd=j8BR?{cyI1=<%ZVmq`@8@Vp78RSzm zl>&PTh7F>032qw6>ZNKHZ1^YBteK(+jN63rjlouU|k=Tk0T{{m0fd__HCBAn+#2*)G{xqV#vsIv7(-Nz-ztXfX6+{nsYjj2JSpd$6Js4)wBe z2tsVHy|4h7=JV)VdUT15)I8pPsX&%DEsz2@c>x_^DnUa&cm?(mp;1o6j+(wL+iS)b z%2WX4Ec4c}NFw8&%m=kEMg@^E|LyR0fc5EX-*2W;%6m=7(2eR`e0b?DQREYhHa;rU zA3iSXpXwn2l+nmv-t1uex+*Ebv#@ZvQr#Tdxn&`Av@>bY`@?H`955qnU%RT>Fj};QsE8}@e!&Ny z91)Jg==@1Ir~x=@YAZ)MKt=5VprEkIIgz%12O?NyM;efL4Wc}rY8L7O(9Gv2A|Eoh zBRw28OCPi)oAOU8wxwA{;f^s0UbKA&qj$MlDSYP)bcE!`A03{TNJiIDHT4iD&eQ2X z7GU~l9q0K5G9Ymn->WeligRA$S7R7?Ca)s)rDYIQ2W{3T&=M5wiXz>&`UUpa8K+5R z=&d}gy#-HfvRPK4;(h}BLkkyDsY5-r!{dDpQF$gKWjbvw&sXe03$(e+#T=V_)_KZb zPdnWrUI9f`?A?@;u`YnU)pe+~0dC*Ddl&mSjvg-HDigZ9VY3TM`CEX2b5yJz+Sd$_ zsvBl)9j`9>GCJ!LI8^r_VuGCZB{bJ}J0Et4JBh43;WfFeqA^wrU`p zYftA)dMRSM&?YnHA~oqb^ubH7Q@O+y%w||0Pi$28i2F61C1$r>hD0#>wQk#C5tza& zJoVv5Uu;Kdyyiotyi~K&zv+u7N2tP`kiO%w2~yt!cUhQQAB2QQm-TpR#@L1HKo$1u zx~9BIzN=ZUR|;(xq`B1+&~>9!)zCAWR@sHd-xyT6*6p(Sl4+H+TC3PNSjK{vH{6|s zJ-~0KV+@ux%}nW-?v`|An~HQ-;rC%3=;%;CcC?M0_n!3;7ZUpU#k_BUt+cyd5#KqslKr`zCO1Pqg0~1lPi_ix9qRP z)aay0aw2ZfUL<`6~Q% z0#9Po<|jcA>+6#-UmFqP^hs}D`iLn6@~^2o$M$&2ZK3*{+sL;k)IA=*k=!CdYfun{ z9iMVq$g$DV&diKtT`Xn&rSys^jC_i^E>@c5oCoLAezS`a?nO>7Ovg^p?hVZ6#r%$F zC*CdML>3OJW!nYnUyTKTP=#jYiz$Mr`V&b0G3a2m{lcR&ndxcW=s6e8S#uV?F_9ED}SRe+- z!qzL7gq2E+_aI0rJb)Dhpi^FB`%XO{R|9$WE5gqf_(a#*OCf{-0{Lq}2bRn}uP8bS zy`D1=H7fQ6v$KE2*K8Y4K4`x?!4jTY=`|jfEZYwReAUa(X!NZS?DfNlguQH}gz3GT zCQY_TDZTx>cjHrPsF2qrNw{Ilk>~AYb*dN41r^Z@`CN_pUB-?$5Z~TX{uzBfq_bTzh@I`BoC`bK>GjOt3uqQ#fEci+Dqb&~y=nbR3C(whMssP- znVtXcNSryaP640Imz+j1?DyNK)3<}ukg!y_nnad)3v(-U8AJsel2y%iJr(D}p2-HQ<1Agt9w(KRhAD}LncDbVL z(qNRJQ0(`IGw1Xud)q@&^Q)5%H}PlpFhQbC9U|U`Wy4Io>#=_a0lBV_gQxoyEVVF} zI8&MkaIzfhG0RunW1O9hwXr=0B?D z5M(Z+i*!?}oX~bQ#)6@N zCPj-Y;Vcy5jd+xPsp0q`1M!w{4;VGLP;bIvrI z1$wWAo88D*z=_CN%w_NOpPE9d<{9P|nsmb8_>Q8rYd3^tS6Jyrwl+7`dPjc8GCp|) zGgqA0qzzLrIvkv`bIx32`IUMyFK5_Wi6icK7cTH9Zu=sOiLQK!^cTjK$x z9BAwnpz=6oVk3i=QTM)y(%)L#wSPQtRC_eA!Y@**J6*|wp-fNt%ao-_$!Y)iT)T;? zue43qP?M_Y<+4MaB@pf$M=%12vDg&Ur5NQf6gK{>bV_sv&(5L8U8I$kTm$+@&It9Y zc52k?PBno%=}gq)*k|&%|`=5wzKDnm2CYo^ zG+kP6v}^uMd;!H%!}%2@{&^GX_1zi*(383>PX>9>RB5VQQ3i}%<80|hJbnf0Vis0y z8^5$BllQes0;*OU2cZ7bArI3USND^d4IjnaTwc^tNu@n0Mp1VWkw3e}@RTvOa#`Vt zAWN_TOI2B&xGt{|B!-IiA*0LSnIauusz1p|0f)1sz$pzbzd)W$ z`Ws}yWkyj_m6yQn;4EgHPSQwFJ-2B-BTjB)+JJjwCk%MCUNnVjv@P_QZccZ_LAz{9V(0NK5>**vH>h-6Yye2#x96ZJ>H zUge9Wf3>4JmUJZ7K&(I2)RwHSXyAD9BEP|hbeU?=GgG1wGO60`H1fG z2ND#~AuF0dDn3{_;JcNtc2uhfAEzgZa`1%B;;2z(Kgf;uOwZ~Z<40aFTmtCmd@7bf zOi!;!pvK!b1w3o8Qr+$Gl2*9YUWh=jl`Lg<)Oqs31Y{@v%&HX_=F(#CtHCB)uWGM1 zHM!sx>(%k%7Q@dgbvQ2C7lDgxQ41{KRZ2K7PzAUS6JAQ5=~3c57W9u@IuNW_;~4i{ zdfnIrywTWUZ~h1S8H%4(FOJtN68k z8p{DRN6E-SM4d7RgjcH5ZL}wFp*^wbpOu}xs-x%Mv>C=EReXyGZB#m|=R z_cuN@VRqWOG(wu8k%Sx6QX0>-b+kE-kv45;a7*b|V8i!V+Tm8mtX$*oRIg-uvufUK zQ}OS*J7{6GO0(2De1nN#N25q^!N4*FA*U_lDW4zcdwpD8UuX)>n0L^gLqEegP)#&j{NCh}<QmjTSwk$I(>B?Eru2%~O>^ei?1ZFT!9mi8TREB4AK3+Ba%g#x!8 zQ?FpOx|daOU4+(U&OUeeQu#*M61(B6&D{3t4WVSVPKVi~Z%zi=8|`R<4gD)GUGK|~ z6XA88#^jzkB$6-e>|oB|GjvmcpH{n4<Y=C#ob;VpBRiz_aIBf6=xb%BwJt zkHv_2F)(VrKX-z^yeL$mjy)hQ4ZhgT!Q!EmN~fq<ebHXo0M9GA~!%oLth zHSw+%0YnR_JfsydG}l+wD1sG9Gp5)Ox|Dbw6JQsbW)Z8rhEshgVsYa?%5_kNXLCAn znx=ZFa8M4>vEBT^K~wL>(mi!|d&;+Po9j!8Q*XMc7XGy$@g_4fE1Ot#eZt;3-nrwG z{h3)&jh*I|USJ{r&wcMLfeZV}k~|+aA^Ji2wS1%hAb(8nM7%%Zwk!TpKU|yD#q|yP zzrhUvfc(FFBrN}JaI>*9HnILc=@^gzi+^}|?ZFITj{yKo8v+2L|G%#^aQ2vYu@3l5o zemEXD&cykvX=z^9s4iF{j4zvRx`1e0XB^M9IP+zOC*)tJSY49YU@QJDve;TkYOdKR zDMQ*DT(7WVGd_zb(QDogap6It;eEe2`0Q@cP+5iY{H`d(&{1G%`s|{y$c~%cxU@X$ zz1SeT{%HBAx_}_i>JZqgXrI~KuG@x#{_-5Ip}Uyn{YgOc)q}fy>1es##RA_T^z{a; z+Ll@RGwh=4D6HQ31w&u5;5{Vls7htnMc4rATv?c*LsbDbRe4O>046{K24ST5!y}!} zon7@fP^=-LHqF!*U39EkS9#VR{gdrZJ#-y3*QNrXS>=G!NtabvM|++Ing>l55U}(l zm3f{$cH4{fsv@d|D@#O~O|>ykSvIvMYX4SVUJy!oSau3qEnH26l1&x z{1&~mEq~Y7O$4W2-pK6Wei&()bjS4$)~#Er1lEqMHkjf?=$G|#t}K6_;o;%c_uis= zH-lbytK8_r({7%$RJ$6A>KZ69Rz><>d2A%ccDS^D0gLiA+w05yi4$Nsa$j2XNRB)1 zFpo=ked0p&L@kB>t*Qc<%j!gk1%%(5&v$EI6v+t;`7~JZ;@9ksMP%AH``w+dBV;gE zsb~J|DA)p)Xv-Rl(prCz)y&7^on3eb)LWA4jWaynE2!N+%tR)puH`DC2F`}T zV6ZXofgw>Z^1{FY&dzZl&E~aGWiFOUW>P@}T?W&~#p)X%Tpk2t-5Mbn399%nKZc*cEJ7zMOan^R=ajUOYHO;~e-ZEO44nz5j(wPl?F+ zG+U$C{gf@q2SoHyVThYQeLQkGaCJ$H2Ph~3y0e@OW;H_ocVvr01y(oXE&%^52{az0 z>})_7)u`;=1QMI>Ij(Ufy|At@gEgK@_M8*Fg2FM`7faFP;JybhC>z@Twxsi)2`z8u=0p?am%#hCv)+Q?ZShO(>9o=Le zTavFnbQ{4O-hV1xKj%KgYd9OnI%kRi5FYr10RBRf&Ibx2>-*WL;kBl~>N)hNyh>+X zLK`K%IP3~`k*yTSTc84Pp4|Tm?QAN}qFNrY<|uTOI@(@ddX~DGjpkl~gsg0KtLyLNr~^ zV}MutJ^?zwEUC^N5KVE4;5lj0U=SWf=ko0EL+8%u#;aN_O?oe87+Ctq`nuy#A%*~~ z(Qhwl(91W06@uliGO1L{E+PVbX4&|8@wQD8# z6JTI61SAq$Fbm9pDHO7gM|Iv?l2XFNS8eIjZLm}ArHy(^X7;ZT6mh=Lr$4Ze8(O4lZzP?-i z)B+K#;wlm8w+bPrHbQuCf{l2czi%HKXm%x5^ny_R<7KN8l%&z2nuXL#^(sv1xJE_k ztS-zFTc=D6y|nk!Iu%e{!?eKxja>_kx@{Vm0Z>2SUpxz3ZB`1NU`HN*uFi%5TTq~3 zcACO=tEZKTMO$#mXsMq8lE1YOXawCfzovC-6D0{34Ay!QE<=02S=GHFyj=Yf5U4i5 zY`+CaTLYv?%b1Nn9R%?^J;6*9qzaX>By|dUJJs9<9GxY9FQ?g51xa)Kv?uv0u<#b- zCtIh&hc=IL`Y7Zx^HWC>iakZjDQAoY@zYdV(|`a?5rfKj9k~WTI(rZ zZcg|KN)7GGdO z;v4Yy@VNiq8W$CvlS>u&^}#JCrL^zsYK76_aI#^Z$EP9odtj#qr|{;St1J!zY{mVR z*8J!QP5FoWn|JI!KI*=>>dH!ZS&+5#hw)ObHT=m8{aUxV0Ap}Os+!dqJ#U)+ z@F$zV7GU|U7po=rAl^Sm_E+Kih!w~7KdbS5ZK>(lMS*+P>P)qa}TLiU6D(2ew=auta9fu)A;Nh%ZzY;Y%z^GiuJcv{Hz7f_K2YwGxl1M8?J1RNo_8fn{J2}42Lxqhk7fQ zX^k05D@w@?sURlxn>k3>tDhN*dFAMl)T5`z`^3>Ar4iq%yn6{eGL~|iK$aDgr7-pV zF-ciOjYjr7-Kp;EI}nDn%lGMfS?q~9_TwsbL~P#(JOU5!cU;iy4<1zR<@Yztfwzv5 zCum8pT9a?DcJ^>Jecy$*3F|PnP{>?y(@KvG{%)aB9<^h&#jTRQMuT{TSM#24SNk+FV#q!ni8%JxVdb?t;wc1(Z+gmzgsqy+-Wr4NDYYR6O5S+IA z#n8l4UC6IYnkbRj2`3+yZ|FajHRfi>BA(<&KnQj7SoYLj#Tz7WsFZ&VzrpiN<}$3P zc@T(gT3J!6!hsG6-FH{2>!YDlF{%MF)N~#@_pq}cudg;#{vb$EhJksa2ST^}8*3Tp zhD+#2Fe^$6HNsHRzZO@9XzA>zFXa&)1H7RAl<9wyb_FEO^Mh)eho`A`#kEVUfgUAO zzIm-0Q9#t~sCqlADAA{|Y@^DS4WxneD`Mp2^gKsMjHWyD-#;GoY+SJxDJ0cr+*$hQ zKf%l55$b45a7mNTD=M@E5tiQKALaVqt?Tt&(*+VDp-3C6?A^(GY9&*YHWiseNQJa3O z^k{9`j{es9LKKWjHgXfY7~mWL#+pZk9$7_55nMAf+whF1<6aZ{6zq(rn%75pNTQ;O z0=UCf=$`KUQ#`77Hd|z`18xX`f|O7V38>ISRTxM7z^Y-q^lNm0!uZyb%bx1se>hcA zas)6d`ygCIrzoMa$BycSo~#3-%81SB6^+;sF2T+5YU*&b){^y5V10UhOhv7?_Lx3g zHML_(PffCM3UD;Athy*w~G_aA?g9>QO3)f>%+ ztGQj1K#s4igBAj55fx55H4oPx5C5d!>FPJ*{^LT; zt^n&`18eu^nQt)P)Yh0HZ-#KK8BPy~zPWz|6QUly9|(`$AJoZ{5v)1mP=f-i&(Oy- z&1O=mn|NbuPM-{cBLje-&x_q! zAN+tNS7}3F=_{UwerTpW3)^fp-(|;Eu}dd-%RQ6Q!Ik7MFGDaly0a@P+97{7@$_4I z0ksp4EP_3w^WvbR&dKlUAyxR1&m@Ep%XOEzUGt}*jJ2Y*eY_+qQ|WEP;pLl*Ps{$# z=b??5tY;Yu{GbZ`!G@J&ROAKX7ENIJG%_O07J}KJ7~rqH-bI zi#>`EpUi6UO@bWId;(muYM4Es+2kYc3aG1Z;eRS~CIi!8AF>-btkkOt+sJ*B__-av zYg~>JL52k_NICp1EgtKB!|<#2)11atKk;dPCf|$S*;G^1Yhg7+x5RW*{nQivHUUw# zc00-k^Gx~FC?{0+W;~6Eh_}JTd&go0NNYG|h#~r?-i#oD=zt{m=$V9e%OXm526=#7 zUAMOoEEm-Q>^nZjZT8Y4raNLr69H7acMuY^wFS7pIyP*TWf_&bZmT{KT#MTzG%&t% z^RV+6s{>@t-jk(l=CMIs_#`cDj{*`c4e1AJDQVCE zCA+W6pdjzOdQ>!>kP0 zn?N2shfyS%C!n!!JBE0j^V7 zZ>2sjlfp*Z+q$0A!EnJuu`ToTysVoI9UZ|bxbMsRfIxWQ1HXMN1@qy|M;LdH8m!hA z#Z%1Q@t@3_h&Wd?*^}>mZwz$KEX*rL5))gZi16u*71+_5;;M?NA+7mw;8D z!w;G}KR98uNsuu{@II+HOc+|MTuT3h7?;J92Z2 zqmbgso23bXpw%(H?OFti$!KPKaGmfKMg?Vyv!&D`Tim7WbhcXY5<-2`O|#|uE=*hx z)76NTB-g}Tl@S5#-+254$uTk$&1b|E%`ujnf5_Gd%z?e4m6lQi&{6bbG)lJ?CWR9? z_L)Y(=;Z>KU@zo5tlJ<-@U4ndHH=q=4)5NVC6vCylZX3Jp9iyd7uCBSd(@rplT1H4 zi%$K?{D*A4Unz>_*HjZ%j5&&^o6&JXiPqh+9ok*+5AFBA$K>&aCm~2iK6>bk*h;Ad zstg^XrL26vC2nU6*_YNTX|0le*D+ux;EzT&UX@O=x40{3q!z1^sQy~2fEfR|2(0_F z8gvZUQSiU7N1GsLutR*nut6Cwi~ZMh!)2nSp9dz-SUS~l2+eFQ!??n_@E&48xXy)Q z(gXVg`xO@bMRZG}$Wpq%xnasTR4nG!sUVZD0KIm+S zlEZxV8Kt>#f#;6`#Su;9+^Q}OKh2D7PJJN94)xOM81wIEC862QaM`2w? z`8|BX#St>t3Q>t3VMBVbAhf6_*i+sO4`RlZb!9Vlw{+4pJNgqGoj#Alu6fUsq`LG_I9 z$Xa%C{cpUc)ra-5y2>d*SyAYZIPHi1$DkNa=FTuZ;$Q{r2Da44-}sMW*mrhCbYtF? zDIe~ZG>27zi$u45s8yOyZm$xrl@Kq1=$9^?BGev5M;4cGe1ZlaD;9TqkiD%oG%Qi8 zqW8>i3|!+w&gTjWse{Mwi_Bx^=e)a1qQy*G7aDP^!nAn4vNgwF1S0eD>0ZWZct>)2 zK!17Jx*%+&$-25ho%5itFim9L$eqv$pVQ{kjK|sW{o>5GzTCXTny2yBXLe0Q%%!1D z^&zK+5P40ABE7L*)qcLRVx7Nycb|OVP9jrr;9_qwZSlGN`X#>v+?Q}iTujv!n)V9i zbYoxI$2+Qh^@W9n1A!LAnWMbTpPaZ8sOA{!m#bP>xu2@PK+P|=2v+KmEX*h7<+8j= z-!+p{!{&$(?6_P93081!znNduv;rzn1Os!jm@vM)H|#3?Pq}c!rO3IVOIp14HRA2| z&hF7!ksZMAYg8N>8kl=f8yKla|MsEkmjxfeN3c;3<*ohT+!G)BYW185KXbOHU1x(b zJ-To#+O?$&A6xRzH*bcDJ5Q)I9$YHbM}yTt#HAU*&Ei96icBN8&K?NwtAgEKweNF+ z48A0cc37IiD(9-E>=vIB3CE6oF)h|y6B&&d{-Bj@Asje{N~hZCb2t8l`<&VGD0a9* zQ2LtYaJsh%becOZO2npx-wz*OhOYZ$sqbsV%nq26w@Wl zAli6L&ay{e{C+0>w5x0RZCi#f79SL>Wusv13lyfXlB>-ry0c^jd^pu3J__NPqt3D2 z!Q(cK;PW6_K_uXg#M)Q&DV&X4U7uJ+O`hCnhYTp2_xapZ#2p-41-KjGERTJc^)8_; z@gIpqW+6AfM(iSiZa)dQlq)-5%A7&>>9_2kIinnNejM8NgwKkI5an#g!fXIfPOWyk z^?Aa$@=E8mV)*N?tiJfXi(E~X~?ku)mk!gn-H#VHLVo5GOx-${Lc6qWcK=# z{wGv!aDTj#B}?XLb#2Xn#2kvwp$-ZmR&-Vu{0iXYj!C!kdogTQkH-%akW(;qV%HVn zE7AkRHTq&_5W&S0tlJktMJM9k)(*(iZ)80z@#YC0m3 zT|}mt7r@$V#=VQX>()_W>0~9)v?KF{(~{|hq*L>E zqC8v_PSfsK?L)QKqG-wmiQD(>ZoZN2e6X}OTi;B@!cY|tRg&f*NsajBQ>0QlqrPjH z3BqA9X`-ZHW>M9&W1~#7q^Vc146Mk4=68BY94TUv1(|YoN&hz5UL>9^5eGZLu3f7R znbk*x>%fyZH9AMZ#mnH{yK{p=nIbsSnq9PXlnu$+c!Na12R}~S`9=Ycie3K=X&$f$ zv?k>PX0xM4SWVM-x(Hhn1ZEY2idjD=ufU9}d zoHj^RlB%(4OVD|uXr)W7=VV(`GR4XWKQu6XVKQOAXiHePU|g`ZNjA|H_V1`%OUZSs z*g|aP0r1HB#cUhS%z~{OVbEO?$e4_JY=W%2B|$1UpA4Mhn%1D}k%n&Y<6<5QNs~@q z8QHSgm7$FKG^qKLNl|&K>0)cx2O0uvn~j)ugCdK(>#}Ieh{OK7y(DN@@rq06G9UPZ;OZnlV^rkCJtz}E zp&8GQd)~85_^Vou(9M9hLiH3+M5iT2lA)_=(oVZ9*m7DNKMAO6LpG@`R$-qC1nokZ>*C6iAEZI-NjmCW~8kOXR1krur`G(V~*1F8afVKh5?yz+7vCeF*-VR z*L5dM*vtjAMQ%L|hhlmM)eGcc=x~({F+Da$@*6}3S50R2%>*uUup>!%AEOEdw1$8U zsLKJ2%jgUteyp=3t*;z4__%yBf!P;}Dco#{&KgSRNV~l=TTu`By`D` zplr8Z88*|XthQp03RjAfJ7B!+%Jbu zQS*|H*~-DykuH?>QP~CxY9#a^Qhz~V>&AF|ozzExd^k|UcuCuR^DdY)bkYm)PthS2 zH*7&q^VdF&<;a{z)t$q2qx`(fNdIzzq88EvOYDI5a}=5Rx!JK)O}uHQ$(!42aS&16=Zm z_Sfm5#~tk})*h|Z`=5-IEq2eUPEPWLgrr_+g`8S*Rcz=BNj&!Q9m34S5&*IpjT@ict{mO zU-^Jz4~L_85nQRcFcOBRs?lo727`|vujUg-+d1P%WKhR1}b zvRBUw7~RaY9Br46WAhT9yPP5_2cmebUs+i}$$!ym$CdS>+PRg7n)fghLyrvdK}ng~ ziN;pn319qm=YHsU0Obsc$Y|((;pQXLXwk9EjVZuZfyk^mn}dVx5QoaNlr3cFUw84T zq1;{(?3YJXLjwL16Bevi2(I-|0GX=Xv&*Cl-7iQ{YHVXy{P$e^efIK4ZnXYbx!!~B zSL7;^Hs_DePp|GAf|k1Gz;i zj(B1xQlkbb&3V1-4@BrYM~Hz&Q{=z8fnFSD0^^gQ&=Yg)JUITck`gd6iD zwfnN8WoP=e3n+=+4#iqn{fK~=(>s3s=C=0-rfkS27uEL*9PZth?n>J6Y`o$ePD1^N z;iLNwWNfuourp!jYBC$pju$1bRhsDNrTXT_mG+h_+$6)4C09z$`eOIf$BqV1U!=|H zkOA-f?ogl0rSGdHV86?id9WrFyx{5d^r5=#jvSHywQayq*!y1)uPcXCZ85E#kH`uD zXnO+!5dJ?{Y-cA6>;E}?;R?^{&t}_wqut3jNUW`xGV!Df?RarU1)ko)WV=jO)U_0s zib_ffMW$J|q@s(q+E4cfpbkLLkTv;K&TELgrbCS$Ic!*;moV;?a07;pyt+f!X8za&b3p@p^G)3@7@# zjyF3K*O*bNVa?$3Xz%rJrz^P~f}CvmW(?=)P zu*NCW=|+(p$U7V6_|!>?&LD759=XKxVE5n}cr%o%eTrCh7UjyHy4+U2l2Ok zx-&U(OOyi5<~*uF`;-v(jx_kZbztus*G$sVO{`f!u%zav7mpNp{a2O((_fjv8ksM|)IPOv^~g^Hy=U~kGivMU8<~vG5992qhMDV&qk{v`Z>|V%2T%IV zuY2y9+JmLEm~8G@mzvzG5(icsz0poR-^Lf_h{Ds4*7KsY>nawV-klaCJYhr8Cu(W9>b%_Qny zugu_`h?$|;m)@6e;ky_Oce9g)fox*FoL(x{z}D%uVE8gZcholhkuuktfvLTG+&;t1 zrjKseWSwlO60EUGhJ@{HQ{)QRmjnG?vP~{Cg~k;ye8j^Dx4OV=tw1eexbalRYwstqUAZ!{eVbJ*DOU8&67Yc)l^?V$hKDYPB_wOeQ4_DAH?s3<+jS7!12PxY;>EBI5Go>cA6r5j$ByM~aFaK-5*J=hxC0(e!biGa0MOVSJfUmd z4-RGg&-w;O-`+2;TeV&8P0#1gw_0d@x?VnZs9jwAUfvIU!rZHC11bXeoh}Yogjj?g z??11SMQ_*rhIqqV;5Zg84@ zSGLmz5|bc52(UMBhuru__j94iT;J?rr+vg<5~fN`M+`3T+;C!~>Lrj^8ckwjiul0s zdFw+V1=Li~XV||iAeCGkcfL25)VJL@0hhql3f{QiHb^pm&IG|$h7;ZKRz8tS`hw=5 z!^WLZO98ghEYci9M%8{819MUU^^<)HtkISxM90%Pq?MFHB}oY%S}fp(MO@;2-Q@eKpqu@Hf59l-cg5FBvs zL5nQWH9fc)lx$eg2tU&dm6eA)uG?=w%GcL;XY1{}R9Yzm&0*I<6F2U*0OU_-FO6KY z9~Jp)l1o2D`bOs+H9JKH(~^_ z$osV#OBZFH%1BErLTHa7jVi;&KK)g18>C2`&(dPI%LRfm4gN^A)qO&2?SYN;;d;eu z=4Czkjo&48?D{kKL&LfsSn{e(iSabaG0Q?;2qrZL#L~?V1bQ$6^DkUv^{5PKI{v3G zoaR^geG6Jxjut$n4n5+fA}dh*zk^{V4CulCWh=nvJ0pl=i7O4#rw0TQfNOB$H*-5; zcNQ_jHU=Np>E_)vm@|f&fQ=4crog802UgueytFuQWvKvs-@xJ>s|coXUlGnSz`wLCOSLIwk}FXG)PfFUUxq|V926BuIXi;Rk)5Ud zLgm9y--A@|*7Qv1FCabq#lJ+8iQJw~58cYFNmVjUvg(Gb6iuq(f`61V=BqzwEbzog zgD-M)XlD$b%pY?ROSGMcc(-06|AE%bj$=N48?gCmFm#5B{I!G=7)guxp}Y#IlKtgq z-%dB=NTZWO)eYlC1(H7cPqI*+`|nsF6uMZYQg%N-;f%!*e`tde!OC$|kK4dRFxaDW z+Wu>bUj5K#Hy;$Q`@RLoJ;}{{0qnu0(>me3dWHfFxpgAwO1OofA*AoL_p*O2q4LVbtvv%L-Ry&@{`g>du44{Edo=nH9K7)TdnW(8|FinJt7`ulL1EOG=B-_5K+As=|Y|`2a$GS%%IIXch6FZ42Jz2 zRjNKHKuWiTEmJE+zCT~;TWi-hpW7zSDBUp{wFN-y0_c9JyWk@btD9n;7+!1(V@cVs zk;+91bA}T(^lKVUg;*kwbRF7Mw(5Br$xl4!EhD#%6MgB|(>6#r{&2Qwgq5M(ve z_1H!Wr*(!o!1)z1h)Dji0*cL~&5{Ve3ny6YywiyFuc*VDaI-0tLPMRZJ7IqG{bf?{ zzzS`!>!>d6K6v8?p5)|taAhkmFpP6)3>?0BC*1F9ZMt%~Py&bi(7_1MDy~;}RNOIc z%NP51WHEtx$p@(vR?vAtZhok#rITfM%oxIjn90ZlLJWwtZ^MlKamCLU_O(S`P_`u% z=KVC{GfK|9q$cqSwI&E#m@o7@uJfBKw#^LdMG&c_%n)BIce8)OpfN{GRLpmSlWEZ^ zXYEqSWUq!vbJ^dw*C{LkV_43jPb%<4ft)khiiT$Yf>sMpn9@X?co&1yp_Hf>$i1T~ zzWcEF3y*e_I4Iydrz6V5TOn6U#zl5g=+TrPjmGxsg?Z#mF+A}Q^r;mJTBzr|yTTWU zX4%a^+}>VnCI0b!WmiYRIG9Sb@DVbRS^T)GlBqz8I8zuk3-i0Is1Vbvc=up3Fpcbk zEYX}hTM55A2v^#SNS1Yi}-H9SVOgqI?$N?yCE-WY-9)Fw+C-3XKeW; z^Eop=GfAa?;y#=|U-~Qyjy3vAY0u>&3sr0lYC%HPvwbF`qHAbiBb#%T$YC4O3;hjL z-^i92?XX-2P|s!b7!6_nYIr{vdaAyqb@LW^+a;~p z&PO@5FQ?!Y789RcVJkporpI07)(1A}HXt?TONwf}S<^Q~CQX$wuarRfWv?s?wD*^2 z5NT`kRzNYEP7b2nWE!XGg^brta-Z;|~hWhbh4VXIf;P&M>8QLy{097j}ftVuS+`sfvz| zvNz9ssJw=DW2(>=&ttkwRmj@^^`V+_JnL3rP%qLSX4e^^Esa~oYypnsSW3sN4*f~E zS3D>UU?rQ+qclI@EW2M!!x-oblljn~2j#~5EA?`&G!{!ZaC9zh#^r@w#SlGl_)I!i z+|@8B>54Y`$8Cv=ffGKlWv*+Z+JGv>M`vEGt!8|-8M+Z7D`kKwYu#)lYDN?of9ouH__!* zBALbs|3f-MXEs>l3kJkL5QKI*Vn#2H8__|xF}ug-DVNK)?fd<-SgS{`fLI>omaoLv z@o!RJfdUE7X@FR4jjcT^^RfdOZ>s_NnUSR?sIzF49YR8SONXj?HK7Q^5lc{=EzfUr zwh9aV=s5Z9>dKor3o6bcdSfiIL3Xn1VXrX3hO%@twLA~F--TuHX_YYlTp@tGNE~n2 z#&N2O(qVw6cE1ec3#dRysXVla34cJJ@{qaoLNXg7Q=y#2 zY3Y>OToSR(XWNoA%MfWxOZSyy3nl{DLdH?_)Z#C+7Eoyl{1%FZz*#^6kPOX20OKSs zh&vu>DoRA(C~g92puQF``K((-nm#XlU+bn#Z~2(HI|!+xl%t`?T}qU#5FFyG7&?@+ zyZ|GOP+lcL7^4KJI;^@1gB{DnBeNf|eJ|>+Pa-F@iOu;qxf5s{#TR|ulKgeF$=k-j zHF(~Pxeiv~jqIPwl>TgIk+69VcWcIh7Pp-}TJl+c>{so&RhdO&OjjfY_sI_Qd+`EG zMyHBj;*msC8*uU@h?4kgb?W|!&hfyaO8Mr0o(Y*RAcDUn$YzaR_2B2)1b!Ui`)RQl zrMW56L=mU6`|;5@-6o0d&Tu_odKQ7579)QZu_9gcLy8g{;Y7E;7;*A55f}GXIg4tG zR2NE;A!Ay=jXIlTla*UuJOs;PeD(J>e8SF0Bva&0ZMXEXF4}JYW;$Gp$Ax|aog1Jg2XaZ zjxCR!mg9obE3+(#6f-YzQOi+!Aj*;g1xjIvN`gV)J{L!Pfx&>Oc!j~|lpb!)fsPL$ z?qp354YFuu!gfo!pqx5jAvOBKXn0{^%+wCL?o&$5No&jwhe5eth-OMP)h-6Dakc## zH=7gbFq@AJMACL=4r}D_6?!m7d{ESa7@6TWd#Dqu0E4k^%tLbUzRW@iDI85v25KX^ z2u@fFeD#$TqcSa~kP+0Tc&tiGOR z*c#u!=uCqJhS6grOimPHb~3kY6SMO8Y(d3s;KZIm`6(2D3}fyCz=OX+&h;j&pu6^5L@11LGS8!%`v`mm1>tOO_K5xPrV*c%+8kkcrj z<}}_Z!T!RCIN-S{`88x`ZdII;Ojum)deP3I-0A377B*?1qxN7u;6}vHzh=+*gnLc> zpFs~_LEk74il&zEr&%p1-yuJ55!35|5tt@5HR{^~?FD%FVrM<>CC`1spD!U?-4 zT91?y`!D*jPpBrrwN4LP1~VT-JU}z5JeFx!uVBdZiC45qPXb%-0buK}aX-_#)$)b)JlRt_oq=){^f&Hn>8Kyu~;B! z`{@~>hDA1~c)w)Q?l#6tR`ulIC__%Nx_64uy_?;`B^uw!>zoZ@`(8}1?+eq{HX4W6 zcB4A!`Byw~?#_O>(oo*jxXC7&k^-vgzLDxyHc|cJ5p%fv88I~x^M~ZoXvkuVqHn3e;c~d^e1_u*f!Um% z>+Wi>Gw8lA2CgsOuNZ+nv_24CPq9MmL?&S$UWXBcwEMg|ibJah4H=#gdSqtO(KVTh zzY>y6kcC2ZV$_;qRxxe$sLvPqnwsqWg;d82*n&)c6Xe>!X4jgt6A(8w=omhKVAZKpWv|Mqf87HUgNdk!lx??^~nXrw{<*=NGe#J6S zy-oC*V;f_;vGS1d?`kuV!UM)jxV&H2xG!4K=Z{{1FX*EzxoWCzV zgZ)vK>?hE9@}eH!IDx-o#yl9+Ky>i+w_|pUw=a|OkclmWH8fhmUTcMS-HRwLHtSX0 z!ogOD6nE{r76CO8#kMM;sUkt@oK7t-!8rv+IO^VmiPP3|Pjcvs7V$7FX!DA@T!ne9 zn!KD9sw-t0lFn4ZS}}bB{zoi)ULk-E^hu$6p@D|7uq_5f(8g4`?!WD7tFkI5@Xp~u zv^dlu3aw7AK`w^sBKa{1wDZ+yRV&IZ_d!jOKV`@)WK}u8*JH3<#IQX?G0g1T*`;Y9 z3av`+R~)n3DO@ciJH)NWt14Lqu_Y$}O+JQJq1?8bbanGzw32tCBD_Mi%OCop319bU zBYRAhEiLGO^g)>QS%a(fnBlP~E|(-x_m?pz08{kF9O>Q*3qW7AGfbm6@-T;Tcf#uc zMb~>yX4rxP1XmHfYuI@OO17*p9y%V#Fp?Xf znSfbq5aQc=$cc?Tqwj4pVA)ZdYl3u;&pz#?bB?WZd(pfv5B9bDI^Pn0Yy;1=C9uWO zrYapZNI%USt0g->)|G9puElfLw~RVhtxd39XN(J^fDq+S@T)*9yy2&}?I?a>|8hGK zesN-5+8yOPsZ^_^Q)NLcPh!h5cKdB1&MZ(%|Ctv{y**Kq z&U~!8Zuu{J*Q$*<;d}rOr0@=zLz|VtBsbxS3P(r;|JfzS#k}HSTdKD9z4}A(Y_#Gg zu?tS@!M_fmwp=48pO?#D!u3Ek_IlAse6vvEx%5*Aj^%%7{1K zoZcG!{R`rh>1c0kKd@a~0>9j-?`*eb(h22uwhAbi2a-(tjfI^&b1PToX7<<1DIeEM z=!kvdY2*0g`q7*9gBPni?@zKjuN5BXt+%4$+)9cA22$ZhWYc)dX`}m|6isFD-t^M- zw&+iOs0=E*9=VJJ_HGgN7h5s4jc?52cf?SU&7;3E^z^rIM|by%EVW-$}hQvSd%VISQP-5C0vx8?s4pGUmdV?6-+P(RIlN$c_LEYL8THCDy1xi%~Z-N zTh7rf5G3gWU7{(ekVe#Qfe6Of5qTMZ+Pp z>@(HEP2cjiQPF~WHeSt;R-*?YQ*GqO%Q87N0i#7{7d3xo)T#ohO+slpXw0msee@C?z5z;xfr5-Lnl3sZ)8K zq%ZY{4=BPj3k*G;%9B48gRbP0Z4vfi4|t{tP*_aOx+_SpXe*rczp=NL*)!<=YEArU z!?Nh7s_n50DKOYeixfr0b=aGe@lATo(1^lqviObZH%5flWbD0mudekEQd^S7^UNb< z3K{)3AiiX#8A37%NazE*=3EYm#@4?MD8DH1k@Q1&v=Q?oBFw{wkQvdWr={Y&g6Oue zS&}%2DQdgfk60>e(NJ|I0HdehtdcpN?UdV7JhnkEm$Ut|9yaH1ZnYI_&{i%uFGTYO z3n;*Bc{3NQYFwxs*c|&;G}^!I@s3zP5|a!x+~4*GJ!z*kXx0)cPxO-1*20{52g5_IEtxxNX?+PwWmsBx z)+T;JbD=MT)_OIMarWk8rkbZfL0ereP>cPZl9jgLblp&+AyKV-b>s+nj4@bI3p7N)#cMY<&+}CpCjb!guNinOy?#8ETejPd?zOi>T_EIZMS}*m71`Wp)>jmNvxX%$Bz+~S;z7X(#Xd`n(xI+ zEgVcB4u^Z3uCcK_@yXYL{4#&F>a0MX+4uu|GRM(s@28nSzeY;6e%vqU8-|YO-1sF@ zvEajZ6tAqhn#C7bs*G<82E8ftZ2+N7ujxd@Kld?YzI2D=BPx}#f)0a)tu>eRe=T13 zp#R+eM^XHWP2I3~Hyoo(@_;Uh@r=3We!?}__N1cQnsug`GBvraEzM7(oxm2#jl z-~SF;5O_b|;ZYadF%UV$+T6NHlO9wInt$?~d`xLHHlK7Yp(G<2E-@5{4~Ksg1Uo)9 z**&6Rih76j8e47zkf)dJ3iBQQ1*941I+ZxQKMTkFLIomklWC;PKmyyp-=2i7hJcBq za`m1)ieCQ4>T0%ch(*`hSFDd31WlwO`&yxj>P28Jg|AK?1A0fJ*ACSMCtBOO|rS ziO{I_+zj7|bXOTRD^zJPj~&jpZ=l-5NLmk$j{X9ke97NVwq6jpD5B@8I|eLy9TWjX zGB)D{uOGGJ$g~@Nv}tYj@}Lc#FI;#G>y3JO&%j?_L1C2Jg&5zM4{Gm5W7!D=$5rhs zEtx4en6O7+q5un;bvP98A-X=)4kUHyv+;uD*$GX$U3hdk~EW3zRjh6RW9VfF_u_oGIgq zD`? z_hpx_$W>Rqv0QQ`xY~ zf+x$eA)}Y)`*vSjxU^+vDaJRgQnEjP!}cC%tTyjTQA+pgBnt*VTh4dAl!pYOKZmVj-E$_*by=QxGhI6XY7UGedMr}9H3WI1ZjD~s%f!Wr^}Z@ z#pSBPko7|J;T0l?cFVpsn-K>7Bo++r2OQ+uV| z+#Rl}E?)ao_uM8h`?@B6qD6)~bV@s(zyoYI$GGR|R*AKRGYCo{=*|he<`w1D* z%zCa|$jU`cJGr!~0g81#MVZwJsuHb--*Bv~*srU2qAq}=+VIH}PRW(da3L9MjjQ-7 zN9+EqCvM&jC^SwnP~hhK!%j#6E*s8nqKYWi!-4Ape1p+=F26>{Y;s-)7^NuOy9H9tQ`MiNA{DdH4}tAe=zg5#}@K0(m@? zlE4858dSt?U%>(&5AB7-RA~c?g16GJ;f>E?q*>^{wdklCS#xNGK}lh=iui3b8HWrA zwDApwWE7Jhq|?APClu~5z>=yGU~3o?E3vB=S;_&gTlboVeqq(Fq$ta_@Aj5JAGpri zILZyDY3QuFwgl)1UPBgT((|R-?`s@XaoT%Sqv%?~FgWpiHZW_klw_48xXNlV$q7|n z5f43o>r!&yS!7#YNfJ<0=&+0v{=A85K?*XJPH&prEfWoxmc4EdOMzaY%0;_3GDihz3*0ZrZ6KM*|bhWTe<~dJbYYems_Z?PXb9 zg9$1bUKIeT%rREo0>@!z2V*is@eco8LMar2Q?MAv_>6?YnCBK@;3CxZ?<|g%exq+< ziRA_f6)jc6=||t=FdD9s#PT%vIf&8I+ZNb0xH!(Yhx}6%6mF!HtU&Q#3lAZzA7OX` z&4aaoF&vqn#kqQEzXyvV(767?a@4f}j@}VFnrx58xP_Cz&2-G{<^m?Tkh&$Hr1QR|Arq85=aT2jsnFVjXZMej_^& zM0S%`?LA-#*>BQDO>jBKkFS z`T(5lI2}nD2upvdRxNX;zNe7-O-&@pYbhjo2_3)%m+WaRHddDLv9v%TTS+3Troktc zMiS#_4hMFWb6QDFkd2Rm3m}h!)VhRVx^kvXJ%nMwrCZ5_EYd635q23|?T?Cwa{Ifg zy17j9U6kMmVyM6NR4Io4%=53+Mb9cahDrtm8>PlG3}35fxfxyyJ$AZVVEi7WaR68Fr(+a^RTCscWc;>YsJ zzy`KG9gh{fyi0>tlSVkY*&-#V*OcU4fM_>W16A&z$XvDNnRZN#qEjxl3CSoaLP*Dy zDH2jp{k@T87>eNo3Rrrz zY! z$M)b0aJ|mXH+{>y(uGg^Yc{F9{S}=ON2m%wNz?Hpi@v4qh!iQ!|E*sTr_nN3oM^>M z>Qjv-`Ot4pIv)G*vH0-T)EuXNzp2k~^m&+>2I?cZYc&M#x+&Q%*re!tH9je(+a`rb zd1TA2!xyr+d5Fg(*Crl_tIfn5P?zv_cb|CMaK!xRVgZW#$b2Df1W^h41~h)(iq4y^ z@SBTt8bx%;MI#=8Y8~^)c=;6DX31Pf*$)xFK+A^QNo>jr>&Y5ePfAz{C(!6So@yQ> zZP-Eg`hIqjTzFXn<%x@;5CVlujL=ilT5J>)3}}@ZaRF2< zXe9azNH)U+BIoxAfiXsgl`lOg+^>*BZ(BA0Ys2mo{CWv}ODe2V3{%5Xwj3)bGTe}* zbiWZq4Nz{$JQIYZgVL*f!X5b~sxU95iKX`s?-(c=Dp>6_K_IUZ3Q@YaFp@4L?;hzt zdSDr%)QfE|AKj=;7JRypzG=v5af=N1S+qEwN09Z8iM%mn{h{C{x85b32`U;foe7YR z7i6+y$q){z)X+Wh_V?Iah*S;&ZbnGTl_?G5Bs*a&Nwz}-H59JT%pYHYm$x17(sfyJuQ!Fi$WdOt#UqXFs zqbjhl?0Zrn3oTb1o*e*-vWyaNxj6*ZpUEnn%b{cnjC^$*9eb}1&kkRn9lrX?yDQ;9 zJ9i3W={xR|DpekoN{>?>oH+wfDb!X{Wpy@l>F}Lkbz2XLxQu5P))&&?j$PHsJ`6li zp2oX(!)1d~+3>>?c0%$!cnwNj9a63_anXi$eXn1L;Xt9vrSOq2e|-+`6zURL+=Um+ zLwE?Qh5+&^Q7bhfj;wm{mlpfJeBLWF*HbUyo@-wzlQs&Tec8#Y-ng6U zl{*80syyVz^2PLTrXLZftfywxqOIGtIe_1j&uM@>P{t^h;vG0BTO-?}KB;m=x+9f! zsmT7^ik2!>l-&kr9U>SQNu`rCC0x?DN7(0ae(2HL7M*MXnKe$_6h`@ojL@3Gw!YnO z&f{o~s85KM59!Kdw)H7TcR%s$4&{cvpW57-TPlV$pN-*?mRlIKds~G>0E9k7vkCbu?9U?EKzH$Qm4%<5Vk9v?mjR;rumIZcI_;sP8?D;>^N7YuN z&!15287h5SOFnDkb|9&O5k!4Bo6P9FxY}hj_}Yxq>eAE0hXZ0p5gXLhtSPV#nu$tn z>D5|}aJ^sGF28l#$@4L&J30y^0?ryle>KKgi=t3FdJ-)#)_K6GioQRJ{Ymn^lN51x z1!1_wB)G6eh#A1$InC5G1Ofq zqTh@89|ED@1;QjasCZA28MN`&L?+a`Y*=?1^^q z=@r#=Z+$;rDOU-hykl6)KX>VZZLEeOLWKL`ZE7&aYpBxrq? zbv<#xmPQMx;e6>I=>is5D3xUHd_+T+2_rVw4`Rd>A2P%O`vi9E3^<+)>XV_c0G!V< zwfS95T~Ai`(4ZJ>x0Lu{(HFq1_S#Um)|luSZVsxq@?6ITpi7T}h<#S)BAs;Mo7 zc$Ju`iE%2nxLYh?mHMQF_F`b5&V;O=)>}v6lhcgrZPceTB{bGfI96s4VJd%Xi#DRu zh7MWb^gy7RAamuV5Uu3|Q0I~|2TeOttud`5dal>JDY#Biv-CvWRloVD-r<0C+d54* z)lx*tbMMc?Zt1!}HQ7>}=viB{X!nXI4I2Lu9NaXQzqBImjb{D))?=pJ3>uP^D@!e% z@+~Fnt+!)2C;YoB_<#Q88~4N14s~i!z zp@F~*UHMQ@1X;C73ZYtjf@1iW?u~`!hjs7=)#E8HjzI$)y9k#mt+|(4dV^ZCi3WHf%|@C^1IK`%K-W?^xZKzDofk zoi`7)bSM@@<~YraA>1Uc1cj)ReWY#fnoM1}@e-n3EZFSg$X0J(=*CK~1suX$}ny@tUQ; z51nOUP{gig%XRNV0){&ss0V(hAN22AC;iq*cetBU8pRts>d2}zE1VB;P;?#oJ|wez zPR3E9OcOGbf(2npPok$1v>%;s+&1g6{?P9Oyj6; zP=n+~*v}9#t4*G{?$$3%pVIN-cjse@oD|FEOU4%#z-KVeUI0g#IBz^dj2GK#I>Hzw zzrX-pEvjJEfm=AHe_^!ggZ?$yV||1NnPcsTHUMQ3((`pk1FiG*EE|&}lCdjvkd0@{ zV%z~&R4lSyE^+l3o5L!0Wwjox?6WG@CEn*#V2m%0Kn1v=r!L1o{DMzD`z<@N@Vl}2 zxP4dzJ$TQUF(cf<)6b6jK*Q_j5Wem*`!EZpDgkmqinq;)CG$fKuE}yf-z@S8*2zy1bpO5q zX^R2thVyVeiD;g1c?>&D5KCUAOe*BxK&z>nN9T&kqQ)knVr#pQS z($^y4ICk@lyo;qB+F#UMuE7T{SI)+sr)TZMOh8lB5a7eINkHhS#UzG#EP}RqqJ=)R z*)OD6QAQD?%5J?0ARNf$Dz>?<#1TFB>-MrZz%cH zo`17LjQKf2GMK9*3|q~R`LWa}M}nt?t<9#jjrBvRQLr@BPbi*3iI&6y?SgOS6m|}f zF434o{%ZMhQ<^LuJZ(=&%5;&h@!3Va!jyb;YLcg({oIX^uEG0#7y6>BD-mZ^-KRPo zdsdfpVX2?Z9UTLE(FK_Sc!rWdTf45g=fnlj;{W{V*Y;E)dGgImKCfIBq^(A&>x=VV zjC}EW*D0}=CCK$d=L6YMG{&# zn=LdXx)Xy<+mj-{f~kRX4m4}1egmqL<6&^1?e}O|<)`3G#KLaL>O^{-t#3uA(%4x* zgvsd)Rvwcn3bj{CqVamkJxiYX;m04=z}{Mn&e>^xsIxk*8N(pqk=9?b>WRYV6E||Y zjeG4LWX*7^G*64nP&QeuI!1$6|p;>dRg>=DDXh5oehuqoV;s&@7PrV=v#*B>*Rv zCL@P(B>lK0-aK(aLFinVv9w>>UADi|< z0jDCr*w&3O$@XN&3`Pf3)@T(2ExEYiHVW_LCe?M$y^z3CcV!*-334-IL(VfIzUo&O zeLGdHbe?4atJHI9bzw4BK>B;KV!(`MHPGk%Sz;nn_mufHP~PJ_usceYi`gyrrgGn6 zVf8Ov-1baaydB#2cFlj#IW{^p+dLMVIS$ui0X#ZT&hrW5N@#6>l?NAGz(bczYjv`) zKaJ~79&JxH@=B0aGkbg*UqX5Pt8Y){4emi4wHw~w_L^CGoEHtApZzgTR}Epa<>E~v zT~UMsTcx|FG{Y~G-h|!e;QUo>0C?eTzDh7CJ#JU@lnR{^gX7J5%^<(_lZ>rp&*h)l z*<4JMcSKMiwuln`8MxczMO`TQE&)goY69LG3C+(hQKf(1qzlxtk{o0dID!_pP@u^? z0A(ugKnZ$^MjyTg#YMp5@0aJV!AtA;i)ZHvjPbb|bG-T*JJIJ=J5ddahet;m1(dFb zi$2{&eGonx!(HdvCd)8xGXpgQXO^d3Q4YfD#p-PkhgLjAy{P~3Sd`jP;NWnE=$a8;lkWX*KF~u zVn^Ol6qKhOvwCJRs>~+V&@RiQcc2$CK35!FMEsX^B*ZtBIIu4-E4{HNHdDO%lsTA_ zaN9AP;mkI$*vF1Hfr2&YeR^9mLbh-61_nl^T&m7ZHd}QR;6e*%X7~lGNzomI17cjW z`IcxvsfdJTMcLFYOHZ$6^$h2deZ<^f=TJu;n@}H8pR*oboa&pDl~99k-^1LO8@e7d z+bhZP{E-y7=$dKATEvUT&y!c33JCO0nIKbgvk3C|5%EIoo#Ku!%$zAOs8cY0@B^-^ zzcW~&cvDFTb`d4J-@}VNh^P78e-n>ukk`lA-j-kI63?2YQ%q`{x|7>AIEA*UV6s`} zs5b_fu$oOEpNqQbq4%JOzW9W(<8u zb7Prms2``J_Dwzcrdx9gW!XZO6$Q+(acsB3#q0JebnT%}bW*Qae^|mu1Tq4DX}aDl zCcvP0M)EG5Mk`V7U>X2M>yBKYR}&dY35*2`8qud)n=G%d0d9OmgD78GoRErLcZl7G z(f&BPtacMJf%0W1Q?J|kw*a2!n)sPa`W-nC)A0Rx;1D?(C%VIRZrM!P-ifp?qbYaJX$19@x@(lsV=owJD5^!~BYR z#3cgk+`jOHp&GYd!77Jf>VYR12i0S<2acTd#j3LElZbhrf$k_{%Vv!gzh|iKL{3GR zr^_Nnwk;_eR=r|#SyU>m;k4uR?LHwN+~YOp70wr7eW;EJZpL{*H;`H`lv_3anYuHnThDKy^X)=gP7S&~pFXd_I)mGPc*yn@J!Cu7 z>)McQZg|#+F_@@BNx4|vzu_lj~piI$WE0Qp&Ix;e~iwy2x2H>0|=h{;>Z4Kak= z^OuXu0JCVoNG~bCtD6P=?nA9?a z=Ro^*?fa_U70PpjmlJvp)Gav>xdTIhPVT=wk~x6-q$h*$mk)o;Is+;nXWCgqHL z6p>I@M zd<#sVJ&7hMTfPQuM_&4WY zKY3*XL?n1jp@DiqwF9NIAOU7z`j!cy;QG9aGbOgkod(@@ZL}4-kXLEAbHEhjT|g0e zu*NyH=yL5{F6{Kc*tY0GgI`K3aI>0~Sphz1E8I|TNQ>2bUSxu#TmwK!DLO`CwBF(T zs#wm_J*!s|c2VtBuz_x)mBl*FTcBiX>nG;dLLaJLnGP8*iAs+>Rb>0ZW0deLOc;eW zvQniff_))<8(dsoFCiHwg#i!@J#~Ro0V-#}cFR~MK_@|%k0 z3&|n@PRUSMW&w(Z^TnH4Uf#S03ct4LAU|YXWufXXvRtUcc)!BX>Qw~Xa|axj2C;1x zx>K_$?=?|`W}rBp9325vie8pkQHtr(9S8}p6FPfUJ%1km>g;9m?ELFrzW!CjP=$1A zKXBIxs1Cyx{emKuZu=mtiIohih`IDTn7n*WWYJXLkGvMxjyygdJ!Le4tF zhA~fp5X}C~{E>q&%qRJJc57-mZ_4ErUEh+RBfX47m+h^!p3 z69M=&GBbtCqbIO(po+xwce6I&u|FT^pTfj>JO4I< zx46o4mno^(_(zgh3!!BGX|hDCflT?(+17$4wMh~wP4X$?j`SOOeP*9TKqrD3qHUR01( z)CF$sFD{X{EF(~sm$Qw#Q!_E)zLV2w*+`w3PkvU+RJC=prz9c8S^(f{)Y76PhB%6;nVs8!{V@e zmQ5Q8=s>W^f+nDZ&764B3~)JLj-pj9lgYquyMn5`5G$YZNF{jJ<|lW{uwY*q{xR^& zB5o8DlXvLrQge|7i)7(N7V0OzgxGs^A=KH$^c!N}nYu`-_Fyhf{aL@rJ4eK_mY0KSn%RL$ytz^OesD5othOT-JH$6(!#`eZkP}H7E*VOw|GLWk z-rD0rlI%2%*LmwZvNV5ut@J)hX?29!sHDLd$smQB!0PF*6)Vh#zWkAatbr8y6Tkb! zvs|Q+Jy;THWKwGTbp&_UdyL18HDfUiR7-g!lhZT&bbiN}MD_^L5~9PZ%Mzy}(r^8~ z)}jqk7-ZC+eR&lrc#${y5{Dg+C6K!B*ro9f^@jZ*(oF)F*S3S`;-=B=ycFdM4!Id+D*;QK!L)_n(u8eix4K=GGJXiK zi2q|%&AdcBqE^-y@pR9s2bj!TLQL{~<7zAl&dub;na(DOeUGN>DGu2? zU|QV}AmE@O7>RRmotPYIZ_x1nabl?7SpAvUO!CZgrP38HQnj%S%-7PAG%{O zobjZWZxiH|%QAx)&sbyW=(rv&rr<@Fbjt3f!~GqPgIzScUzp`?=Y6c_m})Qb(YHhf zgG$RxL{G1Cj*&TxH_WJG%L&CYq!_Qu@oB9A88Zk%gp=kggjB>|dCBLH1v()w)GeI8 z8aiMu%+fjCufLK4bXPK7DX>4CyzufZ?f!mEJLqNm#zr8^Tds})u{f;nD(Un3I*xaC z5^8OWEX7mPY5p!R^D9^=uk3&k&_JbAeBf!X?sEG|fZphE!AUxqZ04I8O|YOrZQ-eX zjqG&Tw(g&*ll`j3;j69m1e<@Kmbb4TnjRfoiOB4hGc!U?=SiNW>P`MGTR5ZY%#1ft zL^Aa4NM%;YX7ywA{kFaI`JVhS9Dmm;|8t}0$uU0lhJmWi()CSNJ4`ZfKk{G#!7T@K zwJBSO!j{8C$o_*8@s}^Y!N=|v>wAhr?V#_)RTCg^B>;P}_Qij4{`;qo&y%mtUO)a- z^6KfI&y(k8FHmQow)K7lPOrx&U&5B>00_YQS*YVL)EUIN8-988%Yk}|!Ez~*;_Mdd zgNaVM3Hg}`&^WW5vy78x&0E42~3){^{FV4FG;kHv>S zM%m};{EsirUp{?){`Kp#XXSD)qPVlPY+Xr;w0*+0OZIi+V7tmY&#_?Sgzlx@z>b4$ z>^R=hGJ||nG>4o-`vj+Ub&2MvaRg0#^j<+J96YP+m2(h(sO*ul!0MiDu!3P&IpUzy zrKMv-Vz!;OZ4)CxyoHj#e>;C~%ih1WQh&6~lHJsDJUp1=)6fJ`cgj)+&*Hu=SB^G< z9_YLzNWLLk!jz-PR0Y{8!aPzM}Vyp$lxf}D)y_x$h4}n zI{pIY&Rqz;Rs2hlCFzdDpfNz^dIJeRm)EsA@IX&Q?AB|nLX{k_){WL$0@HV&(hT>RdArsirQsbi!gZ_>4xziA6;7jQZ#8x*Vh!d>0Vf6GFTlUP9bt_M$EAYfV4 z^&>>z5P_6a&c8Xuh#d3h*DI@dRv|7 z*3Q=Ew2qNEw2(A~t*G_lO(0Dd)-fB&u9jQHb{ngH?H%ccHKbq96#OXmRP?@B&pR6c zxk%z_Zh{cBHnBms=o4V1k4T*CYYB5BiK17=mP!F{y;{1b>5VHV3;Y23r@Q-$>S@Jl zRqRl(*%WInSW?nEmx^xXe3PuYtJb>hR5b-ZP(t^zF37@lG#Q7C3_E^0^egz5d@{=) z8Nc4&q-!*$Zq^_S)I2XSq7#RxexH_J8_f}QK^VK~G@ZZiqD3{N4cYXVo5gSm6kB#p zQ@Y+6dxD4I^USy+T4bKW6EAXU%7h2<&(B{`XY9lPniCljan&c8*QQ9czKdN2c`PG& zDefY!JNgO#WLD@RBHL?mZabIMsk^&sY+CvXTKepz`|ePEVfIt7b96VHbWX)P!bZ}8 z;TV73Tft)5mOT@eVLqstewCGR0*NSq32r>mTcY*vsIw`Qj zZp0v*+TaD8Z_@cZJyl1eFZ9CEmWv)QVF+mKR+gX>3;}H>*Ir)H9V7qLDv}qlF`bJ#j(_R#da$(U*19 zawGS|%!LT~MU*2!A@zHac0eEGc{lFPzS91Qy9cJJs@{hO$NimE>RhZ8R*{P@-~jRX z=pp>`1)j(qeR)~eMF#DAdfd6(v#-3;wk17f*>VU(4q^G~JIu9%_R){N7^o5c{{;Uz zKKbHuKN|3c+V1(_!k@ra4ySHADe_v6LA?ag0IlT*Pml(D4A2^1woY+^oT|@1zXtie zu0@Pct+ASM0y=lI^nN&bFxsGINL@oa{X)DWW7OHH0d9zxds_tD~ zp>4NQf6Ab7x=Dg71ZG^Q9qy*{<5PK{_M(#=F6c)7Is;F2T=dNkdQi3!vD0R@*je4C z^O=p-Dbap%HK9OlxE)t87x-ddHqF;`BK4=U=g(jVSY#9MFQF3((%0!=H7)b37+@7o z)f6@&=!U}QtXE4YTaa0d4}73o!O~X3{?j@v1VYr~vA36Um9%oaOo|hZ_nOs%eKaBtOzGl>vzb#5xnkNGw8@9j;6D;93kHBpsiJ&pxeK^ zEu6;eyhzP_zkrQWF$BMZ0+v*Mo?JfFWv5*Wu&%6i6JY$V^EV7mqfv7fEYE8rzppmy zPQT4L3P=1)sJc?WjYSr$v&yRx{OP=bPaWyn#AVB1@XYd3&pb-NHJpB2wt1Ng2}7bI zFLM12P^W7ha}K9v0t$FWve&efkPSFf!^#a(t+D(qh^NR6_h%5N;jYD=dT{v)H132s zg*b|NDRh&5MMt=|D%I^m?!uwcvD8cy!#n#1T>?qHK3pDT55YsRAAjGA z?;}JD>UyP@UTzj7(Yiuh7k&TeQyIs!8B(5$DMNx>9>dZK{RXv&3&`1I|# z;Zf$3#Wp{VdLzo_nr zB83^Ig%bD|KviGvKYsfy2G&08$i)~&WpBmEYndd>2ADmrh%f&MO^(wmp(4xiMF(1q z^~_g4#6QFCCDw)D?)$OwP7VU%l4PJnGjWOY!U6!@zc{k7#=1v-JcA>HharPk9hj0IQgJXC;9*qXp6edYO05O4G}bM%9e zO0XMx(Q#t@30paJXxG$<*|ttsiQxaEJb)Z5!7K_bu&Pc!D$z$S>zz+UH{(CLrFA5M~Ux zoGr8k!7$?7>~np$A1k(-bSw#nazB2DZXv(`DJsp2RupK(}Hy%yXu6h<+hokSW7@j|iN1r&qT;~g* zY>}0R9o&b|sy;Om2@&w`{%I>&0y-H*A~ja zE@uyTGl2Ims5v1f_%O@sMLKerUYriUyzB?PxL`_O*aTk#C31*xnz~4rg}h+DxJzgvCArEBrFar; znqOaUP#EtmH5_SIrS3vi<$_c*%_fjuuxJ1l5Mn}a0Ob>7vw<2=XKN*Z+D8O@VMG8{ z2|P*ZGO-LapY2~Sfd@Wbfj)DCEPh@9tg3=yNvI-gm840)#F_qc|CwaQ+b4A&aP)e@KS&rjg(0rl7r z{zBb2B&^3Ii06-A0Fej*gq^Ib6ca-_pVK(Z9BJ7hi2UN*p2p*MH8-jpd)?nRafy|< z_M72Cg2R{)7npBq=(F)y6J=(JDWsF#iDMjsk-r=Dl9t+kTRp!c-p>xE`ZE4>Hx$;A z-P+V$X`H`+Ot<{8hx*mB-H5P#!--v6k-N9LxRYMSxF^9pQW%AIu>lJTdNgIF$4u)2 zv-+cd;9LDY26%96i%vGhtOx(cGByGb+~t622C1|BYNRiAj9&6z1RkiHj2`+Q-*vw! zvtoD#25F4m+kJ7nhW&1L+t-3<5#oVQva!N9LYY_K5ZIhKNhg}ctRcRQT289 zUg1GC$PwP8xa8asJnkac+no{7w@Z;c@=>L5n$i4(W3$AZM(Ti6)$_>#QgHkOZo)ZZGf)E(snn31iL!$%h zH}A2a+99{P(pkU>fN%|+fz(y=-eH|fVE!VX`a`iQV1GPZpn?S(INjT&M=?|m(yxGp z<`xLqkT!EdS!6K7Yjzu?X~<2R+-w$ahbC^d8T{iT-MTJUVMZRHAe)Z~2Tnd9n{DP) zl9Tm&E6^RYDGes9^PGmed)sd}*F)OUc#XtvVfCBF;`OWZS0LS_kZ<=J65G27!~o#t z*#RY!fX(^+Zy*2m$@$Ayc(}t5b}KeS-AoGmLG}wF(LR+bB%(oA4FgsyoG&)-c%6OS&E7 zuswQFxTFM2Qnql;bdk}7l3aK0h1L`i9^UMV8WDj_LraoUx3Uu%( zIhf4`wYDbe7RzB&EeWW-Mp-1}(xi%z97PtPpWvqA(x(h8v;g>)7edTt-=rCLH#wNj zZdv`u+YJyk$4E;>%|Ocy<<4%4d{RoS>-0^(sh^^n*?opI zhDZA6`~xlwC6RaqvPHpq0`Q>3ar<@gQ4^_-LUs=VozgA6>m?el z3lQ!4HA@U&m9K9HWP^T}<})1iMyfpp^B#12(z!PuEmkgTp!0P7!R@p z@H~7Bk<)tlHOAk95@b49-_F!;fBPIEKmaOIzd7hz**Hc09Yb?-eb^mMdxF5F(z_&7 zw^{5r5fk5rG!qpov$NlyWU~wxBPcecaz>!C&|EHPNC1X6c-osdZ`KGZ8?0usE5O3t zXNt4*SqqN70aB>S0Rt4v&6^wb8l453=m~b;%@kF^iKg}KDjUOmGt|98W$gwSY!3RE z3I1bd-*kf%6NmbHDaE);YCsdobtZvk%4u~O6Lwg==g%uHkjNWP^%>Ge7* z7!;b>)Qn)S+>xGaY)Meik$Sq*jh>plgCZWOpvXY+M#ZJ&PTi1;ce)x0ZoT!l&G>o) zI!uAamNG+Ohb=IH0a>7H*@eYu>FJRQ^MOVPFB=}2l7NI4PHg$HB*PX~<9H7zIvP1b zaB8zW4DAwa$1W@>t$u}xHo%^Bm!_%0rwU=szt8OD$R604B_zZ6j*LWL{USEQE3EmdDnfwAzkt=qsm6AJhO=J795U^zON zZ)&jEj&`h(4xrOO9j32TU}Blv&|;{8Sa!0SQeS<{G}YNTF{>r&3G$(Np=Qc(4vNrg z)TXR?tPOnvyn{3gcu%UdVXh5L9%%F8A!PF2Nf*`BHPPdn0hkT;O-JyTArq-#p3D+}VoT5pXAy|NiXRv*CF?8||Wo%%g4lrvki#n_lFT zw}wnA8FZEE`R3e#n&I{2d0J#9X|h_uvay@~7X2xp^*t)2hF!WY(nUEbaxzVh+7M-I z&aIc*pkRzl$`n=N4_xMtI|e##>b}Q5HjYRLeXa8ij#I@o_4l9+OzaFu2M($9xjF;=YM!q13B5=Fj*pv3HnqD>5mJ2q0a?eRj?*{HPYFA( zu}D&%f6hxTvfb z>}Mgx;(!B|Ir)v3Zad^x>z@1x3Tc|_u_Gvs1%>nCeAsw#?UAx4e)Wf)=GXJ&3{5&v zi1VMoj8c#6P))`e62m7hJ`F=0_ZVTuWMLp7kZ*2Lb6B&JH96$@fsaGC5amuM&Y_-i z69%q>31{F(;15DM@R{Ieoxce~1zvoO{wG|9GZV~AC^Ny#L3{5B!a|S^p49UrYHT2=BLQ-O_KaUS< zGj=-pP%)Eh*BHvJcm1g?2YFa(>v-1Q!ADIzqXNBNB>un6MOy7!geddPUJ@X+1=w4=J#a6F3e>LNUbz^66c1K% zuWXPj1;Z5`NvEUC(_~UXB#-*LAOipG8Hul&ta|yh$X;>RRtX zI_ZM8X0i7Q#sH)`S^C40k|%{cEg&o+IZ`0d2D}n}78Zd*H7bEuP_n~*5e*@vP|~Hc zKuz$9NS;&^Y7EgL(hqU9Ay!Cog}^%1!7CpJq1tP;h#CX}plP7j91x4F+xAvQB(JIn zv`_;OogZ{UT!_12@QD__#vk{6paWHI(4Bn+a2#ElWQ%1nQ;V5d7Be$5Gc(I#X0n*U zVrC|b*eJUSr&FS53s&75-12GTk zs0{LJz~>Sp!+w+M+#Bu`kD$PdVCw}rI5-dKm51VKvnO0F1&^{7?+Br4CvK1|$Jd{@ z9v3QsU+Rhob4mA*k$&Q!2ZmT4gUA5HW3>`NC&)|0ot!zI zxUHak`&mA!M+B4{Mj$?hByS~d7;6bQUK$wa%hj|2y_G*LE(>@=P`Dks`YpnA2COt<4@$NOS7VieKa*_nY z?ZV}r!jsIxN9ZV8wA7UQr9LKtzZQB+;~R;c{~{jYyo#pD|PA(7;$(y@ex#Z!mHYoT-j4H?oa`<|yVxvrT(s4zP; z&Cb*acn9D~>oeS_ryDtjb>YnYynEBqRR66q^Mh=1DVYyJZ{ zmjfZ=0`^Xjs`SuP@F1(>s4>a{(wqkN$PN-40s2uQdGij`G$iT#0{lhU7K+KC^|ZU%*Bf zvs-cuIk#47ktrxFR2L>l9R*7ltGv{4zp#F{yi2%USll!At`gf8BsN4a2As6B5Jek- zR00BifxB<=iFi4q{6djM1Is#bOoRl?X~AS`gU0`$XN-GRkIZPx{6kb%c3D+KaaXXO zsnkP+hZsL%333CBls*pAppYT><6NfZxCsj9F32~&g|~R_Sf(VqnMI>zi0y!L-Vr)U z2Pf1=Z*0-5epjbbNXBDG6CzrFv}$>eRCa%M zi&|b|T>5rq(T<9jS89sX74jkvy-$DsiONo7mQGjlki0O1kSA*^zr)_OMkNUC*BHQ-bw?7h~ zajokS7q(?>`W96QXa+Pz5MEPA)JX`*E|oG|*^SDaR#Ou@mHNhSZS5$Y*fKp+W5R#GTEEhS`IDOtHoE*g@5?xejspnWU6my*E}esrByhc;l(Rw zbl0{+HARC}1%5qE2s>SF{`QLAUBTYLw#%bCxy=Rq?bq+tzQ%FA8zB#!3HC=O4AC1O zE?~3W*prdFU2)`x>!y3Kr0Gv65c<3Ut{OC5BAQfsiXnIgK_%GcnkVtJz>w(I`H`Kj zMp=1`S4h27^BIG#Q1q#j@HO1nt4UKn(^Yn8JR@ug(6r@`A0)cE-K^i(nUuFe2`liM+<0HLh1lGIv z(Ycw5zSA?`kG_V!sIbDk*QtV?V4Fu)x=_HypYM_&K%bc)&6MAub}7RSxd=_yF(q452_a2z?u8n@GeNVG^yBnkB`69B=B71Ms^k$R#^tEEy!=Dxk< zJvm;tpP=sgA|5`yDzk5tLr6Q9K;3^B#|1oub;Q6v%7E12jTAx8 z5D*54^t zseMeQw8t;FUshOzk~nY_i19yiPM}Swc(KQ5ibyUhz*ym80h(V7_oZDN*e*CdLvCrJ zWS5xRTSYU!U4&|62kf`Gf`Rq)@Y-^B0JuH*T`q7(+f=~ozP?l-$VpE^Gdj)Ul5 zJK9$Y?n!c)9#6V7bg}{PHT1*4v}2be$X1Sa{2<4}v!9puvP{(D$6nRO158JtGLYu! zqme(clQOxcB5GWYzH09zNWb2~)T<6@p zt0o`q)dQQ)p$8pPbwBq~pnR62aZ4ZMktK6Q{mg5Z4@!ws9%wwZJCt10!6rjP;zoi^ zWR95)jv`A}#bUKBX;>9(eP^2y?QokxYC^@E2TGJ1dyS`X#Y2*+_jTp=dz8OL{dOtS zI`CMQvI3upluN)M#BM$8FX$?b;g7(?jjag)SgyQ_Q{JY_UvX89&mH;79%AXLZyl&1 zC37#gJ7(AT|4d)k{@lP`(_C&zQVPsgVdHw*7o*sZ$+!h5A!QC2mr|@~Ia*Xp4ECV= zl`b#l16uT6PXWqg#H?Xk`9UB`O@I+2$hGiZACUzb>~qa@#Qv~qJN}2y8mVk3qSi0b z@^aVF7#T>$AyL+YueN5anF`>3y#mAM`gPfg&~&7FKVZminD;3Zq`|;HyGH_`0aZz) z>T#ZH))a)F4JA_n|Fpf9yQz_@D&C_y)q~xA@?5cOGKPT66A-k+d+3ov1)%yKXy{L>9&nIWdZzgZA|bZ6d<)6y3hwnlo= zs178+N8piK?$4wv%wFdne}TN6L3r1s z@)P9xu<=zSud)UOv6Odq3Z=+R4=D)t+IQE@z41}q#N&mDuB@B;xy;=$LoekeA92M+ z5_iW066;haminJ`#X(haM;$sJ?a;#SFpu@4@ENW{btte;&T2AF7ed3t_WuaigO3;E zKFeh@_;5)z=3?k~!8mA`T+Z>G_dh?b?tO6e?lRb{>L;Zqcqg7h&gZ8>Na&{6p%{|QXYPevVSKlvJu_ zT(mZFmXV6U<4&_Nalx}Oa5Q!{jWCR{GzYgo&uEi44ac}IW{crKOH`Ps71btC`a1JqAahc1P&UfSOzM3 z$2zZ8@T9a^dx{$LJGWdT4K@*E$si&6mT2jd)*h3Nfn;75D$+D00ZE^Ydv?f15L1TL zBvXc$m>MBYe=C%w31O|67oK{1Q1dd4s#Ubn0`eeTF4VZ3?s@kcK-97wc*xt)0} ztbGDgJ9mJ9*J!3!Kdr~AEr65uR|CtZ7*@_DO&6<3jJZODiIFxJ@kr?v^uXJeQa<1% zL@;Fps9uG;cE@}VM!#Z{=r>dR66M8us<4~wnqjg{ODR3NA%{Q2_?6^QLMs$sLKBe& zyfC#x9qbhbMx(}TekDKOlG;=%;M zj|UOYT%W0X$gS)xj;s_$xwhGCnFKj>9C(TPYfq?4??usCCCpAdHv;N}ZU7azn#3%2 zmjh57B>9_;ygEa4D{~?IV(igi~NFnePw_-S{qdhbV(uq{h z=$tP*NczFrwJg(47>LU}auN{8jMKxW3HyxxUYblYI|bkr6L2xt;9 z47uKoeuEohF10PUdeHVxVek_A- zoGGWKwMXzGz)$8`pjXjtS*P%Ww>Y|B3ba(BV$np?LXuQ=%1|MCs_56VS$*lA^t*c}2tHbP$^J7xb)|Lku2B9$-`V6cE-Qu|xVP;O(W`*(vw&OCe7VcZ@~Xg)*^q$A zGW=OUtGr*u&okax_y-%=Ov2P*XA$jgq-;zf1p9g$7%!mr4s zDi&b}u)k#bsBK6p1IA*0Txijl=(kmwB~v*Z{aicB!*UHfnsAFas{tc#+-K{Y?T&{c z&abmI#Xf#F1ZVQ{sh!DMt#}ws>?5v&vnMX48K7n|nBk=b3cB?-`75|Pwi#eYZ{iyy z4dv)q2)eaWW9lw%ZpgO#+;SZLKA#G6_kM`t2})7b!TetSePcx4dvDi7#Z-z`tFcl=BZ(kwwwD&+=J5fC-L`}{ zHFC$iB8(9NPu#5XoK>&yfIY_Hkc_WBff~<9`u6vzbCn<4oUvb>;^*cBXULSNG-_on4%X%u_$OJC}i^# zS>f|wPnE0&Vt$0PL_Qk?=ac&AjOF#ol%e_kuw;W7-7V=)AR`H6C%cELb805%6VN8Nl#`7{@G2%OZEk z^&l)rN8u13X?Gb9_7YVWB1p&aIqOdIK_3hTt_)+vR*M}hPCTdwHV*RqwaUA}a0Mhm z>i@V8IBl@+LiK8C$=rApm^B=OQSQg?g|*L$9ejr1mw(+|b(VXf^--*ftu#9LEtn}; zk2_^7Xs^nDplv1B6iPplmW@xs8zSp6jC!Sm=}spn*ux-~f|Y8$w|3j}|H-yK|8RX? zqIk}_0P3LW|G4W(*WCM zz>X7o`QCpN>4g;`ibj0o0yc?!&m8JF4#uoN(~ZKmiYk0Xccs5AH{8OxyHXx(9J}ed zEN2tZoFtqR{r*-jaStgotzUyy+)129@0mjQ`Sg{rp}m50Z~y&!7tK9>hs*72avOg$ z6;N!})^P`|0o3BKSh$LreiSwoI-o-uu6%cK7K%4c*?>1jMOJ`rrBc5!pia90n~-O!TNs8GHL=W0!^mq21DQS=Fj$I*JNr^P%OvQt%;rvo z$hPa-gVW#eyc+%G&o9Afc#~}LV@4vTW=9b~cvZalIJ#@eV$c9=`PNMsxL*=-0LSdd zZ}1@Sm;?7r)1-+&Plx$6q>&oiDUTWme-@Dv(E&oJHKLXFfZeo+-CDWE;*}FYD#=RZ zoyRibrjJsC-TV{|UL_)u1mwYnu4n*X!OZ4j9it;FW`sx`Ks`oWkeGH#t?P*^I|c3d zAR3a@bA*j_9`}}h*e>nP;t@fmIMg4B83!v*C}n(6LuFDCxFN=wJtl`agC<6AK7u&Y zg47F5XWSSr2+edE#+-wJGD*$>)IN1I#&hAu1n299T`fIh+CQWR+ z{?q_pkuN;PiiMQOd(FT9i||Ye;9hLo2C`qB=r=U<`DejM1I3ducoc%dBsvvvtPHft zde&_@Imp1`7RQ{TR9&eDZeP!P+$qwa)Zt-@X}#zU^}=)fxVY0Y+&pX2Nhqh~p>8ZJ zTo4jZ13D>WTUfB4?!}C}iIjv)rrf$ejMT+4gyB2YO4g3S2^#{atA8rZWAOap>4lIF z%qU6()^${r7ix(-?nNp>B$~pM4_X#>2vx@rSGa{AbFfN$Guu)3gI=3KiY@7L8ZrI8 zX#22wOir$*d?ZYG3W+RTuIl+E#=%DvOloK6Qkef>oV4GA$#d%z>#CwW%3=1@n{40+ z$yV1hW!;fPpNe}T@s2rpke;5aHTxVI^8}PnzWxOtvOQ`hj9Ibt#<_V4dpgiKiZU(~ z31TPC^av_J<2Xu2KAu!a|7e%fRLN;IVFje2ee7l4M<&`v< zMT@M@^aY3f8O$K4a}xWVQ=-QC*%u9S>9io)g9>ZbdW2_SI!qyh?h4h->`>|?#!vM% z7D6!GhNm(w#h=S9G?~wkWKAJZH%)*MHhS7(K0$J>cH%{JSGP&fFVL2JMakreN*c_o zUJtk9{(Dl;UPEc*IU=-yWx%@^Q2+cV--#_o>N<9&2wchw)YlLZ#b$7H{$6}3v-Zut z$}fV@6K`lS+a#tTQ`_;)DrIIWA{fwL0yYsKm~bm-cV-#dq+XI{3AR#mlFmGNAZ%$b z2X1hS;UqPGg(0(|yK1@IE;&eRs{1G}wKIc|vM?-;OC~*(TbYI78u?$fSjv%lO(5hKLsivI#(S>)z8b$_Q%nIF5SoQgF$wZ(3< zC2|istR-hik{2-Z(@oJ_MNQd_7G~UtpkR~%DYBwtktna{nz@}Scqf8NDdk`^TI1*o zn}ewGOT6U4i&)3eGZS=1>6JQ`@GkZ<(!q{QAn5W}`BGCD(?u25VX^EN4KCT5H6g?Y zcAuUdEP##`SvIx&ewr+mjtswHuS4NVPfJgQ+oSK1?M@H%;)&8 zfx9@~9rn;bswzJX5^C7bbFU2^yok|D;98>WBI|Bvi-NcAW8HawNjqZhx$h%YbqE5V!{H_A$)!wI+^!PEkiWfH}57*4We86epgR?Np@mpZ?6QA1c{z98Jfo8F*oDJ6jI z;u%3-?XTk!A^GhfwOVRHH`FWc`(?WY&dos;@G$Rr-qMl3cKF#c{2_VDP9HT0m{-=FgH1)9;!yRgE#fHet zrQI=6(uYQ8dGb-v^>}0c>w913*NiN4J-=0%z|AJ>l6zS(6Z=+4{HobVTMI(Hj@}mZ z1^K(!^!Qrk*dGyD-FFKT*T-Us<}ixD`ebWU?U@&aS(byQ&bFhQDjc6%Bi?}4z;T2V zKy+Y;<6$6A*!7sxKnxu-g?)249jnO50anzBWRW7>R6Rsi-A`|zu2fvOep~0Y&Ek`p za$3k3f@&4M3PW+7^>7WI`gtCgL+1}GvQD?JmTP*jF!Q>Sb50txa9DA~dA-RTk@NW+ zCR=hod6W-Gv1Qe$6xbRgQMaRDz6+a3fh5iQ4rjP0Ca2#5aUiWp6c#F+UaBrPmR?(r zm}YoAXVFS-D(^-L49Yy+#qmh&2&PeK!?UX>gj8a}TVP?a=`Q&)c`%mtcO0_5nKK%# zUX3EN!s%Ak#RzimG_sjmdwmvN=4G(iO~C`ras&O0ar#80=I@qn374zA$n2Tcg7N%( z(iKhqqi2s|6?UjS7}#e#QF(q$nCeD`AYlcABTTdi0T8>sYRnSALsXL(P@P-XOP1EUgTQc;az@W zjldr+_Tf`$0xmRuzt$C_zO3}`3guk0#=&xkC{PG7d8C>kyZYhzN7051Uf^l9fjLh6 zawic4@zy}oXE_*XfvDhnv%<*=uEM?Y>bs%rA%Ei;8ZYG~aiZD~fUY`f*TTv+_o^3$ zkcP5jIkAyGXzU5d9o64G93Q@(Q?m3nMn>DCUc?pegWlw>#a|EL?A1L>^%)#Kza(kt z*_aqk^y;=8?vz`&yo^N%8DrmTGS@cQT1faXMR=08t?(m4P{|hiNIKl9C-G%{k;wFO zac~6q7<32hRwTLiR63dv0L%JFmJ1t_Fet>(djv3Xf_T8FAKraYIx|28CHi(u3blIYgq98bvZ<+u-EIJu+mxZqMBJ~atsz~LCFpWI+{wQ+Oluq^}CHTtYoqcGti{&G4K$G3E(F8Wn?1wo`U&;xAa zp(*0v+v~g+Hb*48Fkytd-0dV?SCXpf)vx5D+PGm@IV#2YGd2eC=M+cy-9c~A;H7&! zO>mEKweCD23y{jh;m)4}2IxF`#zM~Hy9Dhk+i(_HS`mzNiaLKtD zS=f~8yAY|cX}>ffi7u178x-$2=*yj}Z_=a*Ct?tnL_0`Q3oUo#4y6DM&ISqBD=VOo zOGTpL+cW(3`smmb@r=TANa`NeRpbY^aW#rP8_Oc=-Fg~9z@8}VXN1a&>wzK2Q@Oe( za>?d`bg3o~c+RZUaBAZ1Af|N5ak;&;duWXAW3^rfTHy&(5CNZ7QL)0gc!IBf*$Tp> zJ4wfx-UlMh`!F^c0H`ji-o7zvHj3@$$9|Z^Y;NA7i5r0`m6c=HHV0KJI}kQ5Up>|P z_SHmOwXouDBKpN=(Rt7Gx3OI4jO3djY%T8yPSBoFa@Px9sbgJ?V;)oZOqRp_Tk#74> zk$!K4NA2}q$HLUOEQO?*@%ZgHK(T7ob_2(iHDVm}^?no#H}Mhc1;qeib|n?MA)rc< z%M)Hkl?y?R5@cb*X!5*^DU9{OJnp2pgEs7tnP%v z0tLqAibFCc07ZgnSAXEoB;Ad-+sUkr%$^z^ksJL~UY7PmYkC?T^XXT=EVSU6kF5bS@8dAs}>dH7@O*@1S zUpk!CNteeWRwDes|G+V0;(w^yu57M}T%&Or9-s)?r3P{oWX=9gcn-yI`9DE1yfa7Li8Lq-~$0=7AI8}mG7 zkAmV{G&j{m-VrPEaq1rtk3m}c1T*%>{nYTR*xx`U!bW@8ncb|OyzM`mZfI)_i1%1Z zwK0gk0T#}J%N@tG%r}@Zi>ch{+vAFpatd;g9^_nJ7(tgVDDUtfj7@DxCV}&>A6+H6 zaPHfiU2pPyZyuOAj_#=a>{y-V!6m0(vf%5SgAM1O(k0WizA?@zNvpSbTvlEx=3ks) z*$MkigUI4w;3M)r$ode>q$)sl;Z8l`*Z5EWBK2gn>~QqLlGutJr28TGERoZj9Z4{S zkdsMjLW4L9oidv4(5mvD8ut?`AVmHLbC-;hoHza|^Dt~+=p*R>t;Wx=(1w$JltAbm z;w_njZ|Qs4l)QVMUV}W{ao5!kTt%IZ9?=GHDF%)rMYjgCHCwj|gQ+`aTacd@Usmga zZ{Pxl&>{>BSMvJR+Ye4Mk&xY!iIC9zy3;?&XTvrL@CX;m&&X9V#Sk5PgA7v^G7gp- z6IbZ%MV>V{LsNPujvof3*(f&FpE9XDHG(B@M*yRu zJ#u_!_oJcnuoZ{evdN7pE18J1; z@tH|at1)4->8(W0G6Lj8r~zn4wrYi7p#z!Cpk|+a)5W$jCzP3}R25q7LasFswgWxl zJewM>P>zQ;tLn*S`YMw6d?%oaX+tNhs@h>S zr!FHQ@OsmDGT+wDcLK7TGS-4lQ>`&_<5qHq7b1y>gO zDXF{xuf*azxK{VB64|9%&7kClN|AvyrUtI%uY^#i%b9mFuECJh7TY^XR+DqzeW zrF?Q@ABvn3K>1qTlDYV-`C!SievyK4_C|KHgh^M&B2M_rW{iTbK;F50U*UU{o)-Pr zGS7;;WWqICVwhhZC5QauStqD?&5fH0Epv%9ah5;@yQNdHqFttupA9)p1dKD^RPn*n z**7h!YP803i}yfNuVKfkpap>A8{^E6eNOE{TqkJw{V;tNMVFIP3F=rNOJXn}s{_6v zCC>|1lOQD*&+9S*Wo5w0x-s16yO&+SRtLpoMK39ewH?-^9$QPp^U;;>+Cy{`T86T6 z+GJoDbI--t{U2cEOG1VXmmbkiuP0!| zD;|>_?UVWF_nf27tV^rs&iK@0kgad$Kc37Zp}Nhca;X;Kk7}?GHd;Uj;~D)TX8Eyf z$ZzGJ*Z|A&4^3e;on%kpVp78Mi})qLJw^V;BY)KCA+;rcTLKKUi@^-zmVkM)gQuVx zUEA+?j3+aKlU`s;8U~$O?diIrc@X_NYpV_m8v{vTNi2nvoo&O)D$O|M8=0eEORGlM z%g<7IuS}B5$KOXEg3FuJDtvpMm3Iy%BSF@>i}jTwuZVy7FKm_?-e-zf!B|uW9bp_` zLR&%Svl?c?OG~mWxf}%p)Lxue1(;ZU%H5AbF}PP%70Q86lj;W zzufmu82EAGYcG;;5XU@E^%)CC0fLW0hNn{5z#1rtk$x4VL7g|G)q2l(4^>o@4@9S( zzqykc-HujSXbly>qY9f@DQveCJ~8+4;P`Us^o2UD<>d5~MLiP(Vcp{LcD$Xon@7R_ zhRn6S0$+qizulBZ(=~8APw#uhb@6BK8T5143jNW2n$WLlV(;h|w;%1-CqdQgkDgUr z`{i?AD((lE%2%gk^tRUcjz!cFWji*`%8J`O?>JH_AAq+=^$h)M)~Tge9&kC z_z~HPrblZjA-hbKy+@|!r9$#It_d?+oVa!y@I{73o1%g-vt>WbimpszoW@(FSIosT zw$0U;(V?mJ<)J7gp&zn6NmkGXeUz5J7f!@Mfi*F@o?>Mme3^AKmV<5c$-GJ*RLEyQ zQz4jAkFHgBrlW1d_q3juh^r&~S0tL@tM1=Tmz&n;=Iy6xz67pweBV{Yd2h~xB%}io z-N%|F7s{(WD1-_h<87hu$rL+RCQ9v%NMas|FdgvjDs?(T(YiV;>SGQ0sy{a=hpOCv zh73SJ`2l~$3;=-rd{Xs)8~pnl7J&5MM@Z2Bdtho}>0)Z4?_#g->TIfSVruH(>|knY z!T|Gckput$1b_uV<-qRJ-dq9s+kg6N{I^I?|A0jHcSv79fBoMfx!60o*jqZA(z&=e z{~fHPsmJ{P1pAK%NcVTJp8v0~&c;rb4*vlEnf3c0jv5+Z9zmkcQn-1xfd~M|G64WQ z{|Qg{sqetQ3jZHc=zmJ_8Tj~H4~$vDw>&ofxd)U5fA2Q^f3o?@?N_A#M-Tpk#ow8+ z`;iW%^H0wKXh26MsX8*w8`mWDrxNu50PBC^|D}cm^1q62=~`Bzz(*#8%Hbcp^!*7~PJK2tvbUt|I049w9pV!>~SOflYCH4Q8hN0`{d17kr zWa{i}>1O&5K1~t5qWyDupaHD$r0PY$?fl$NpCmqg`akmg#midzUzNwn)X?M~XrKmB zVMzW{m!JWiIi%`yX>I&O)&M~36UgVX_qX!sehT*AEnKcHmNx%5O@$lV1~vW%#Q!_{ q^|y$flm9*9eFc1i?qVPMGpmxvIb!U6zrpD(D-5z+Zy-~Jb2W%kGb literal 0 HcmV?d00001 -- 2.45.2 From 57f8da780277e207107824e0acdba3d023891bd4 Mon Sep 17 00:00:00 2001 From: ken11o2 Date: Mon, 4 Sep 2023 18:53:22 +0000 Subject: [PATCH 2/4] add args.use_deepspeed add DeepSpeed feature --- src/utils.py | 7918 +++++++++++++++++++++++++------------------------- 1 file changed, 3961 insertions(+), 3957 deletions(-) diff --git a/src/utils.py b/src/utils.py index 435cd4d..e7cad07 100755 --- a/src/utils.py +++ b/src/utils.py @@ -1,3958 +1,3962 @@ -import os -if 'XDG_CACHE_HOME' not in os.environ: - os.environ['XDG_CACHE_HOME'] = os.path.realpath(os.path.join(os.getcwd(), './models/')) - -if 'TORTOISE_MODELS_DIR' not in os.environ: - os.environ['TORTOISE_MODELS_DIR'] = os.path.realpath(os.path.join(os.getcwd(), './models/tortoise/')) - -if 'TRANSFORMERS_CACHE' not in os.environ: - os.environ['TRANSFORMERS_CACHE'] = os.path.realpath(os.path.join(os.getcwd(), './models/transformers/')) - -import argparse -import time -import math -import json -import base64 -import re -import urllib.request -import signal -import gc -import subprocess -import psutil -import yaml -import hashlib -import string -import random - -from tqdm import tqdm -import torch -import torchaudio -import music_tag -import gradio as gr -import gradio.utils -import pandas as pd -import numpy as np - -from glob import glob -from datetime import datetime -from datetime import timedelta - -from tortoise.api import TextToSpeech as TorToise_TTS, MODELS, get_model_path, pad_or_truncate -from tortoise.utils.audio import load_audio, load_voice, load_voices, get_voice_dir, get_voices -from tortoise.utils.text import split_and_recombine_text -from tortoise.utils.device import get_device_name, set_device_name, get_device_count, get_device_vram, get_device_batch_size, do_gc - - -MODELS['dvae.pth'] = "https://huggingface.co/jbetker/tortoise-tts-v2/resolve/3704aea61678e7e468a06d8eea121dba368a798e/.models/dvae.pth" - -WHISPER_MODELS = ["tiny", "base", "small", "medium", "large", "large-v1", "large-v2"] -WHISPER_SPECIALIZED_MODELS = ["tiny.en", "base.en", "small.en", "medium.en"] -WHISPER_BACKENDS = ["openai/whisper", "lightmare/whispercpp", "m-bain/whisperx"] -VOCODERS = ['univnet', 'bigvgan_base_24khz_100band', 'bigvgan_24khz_100band'] -TTSES = ['tortoise'] - -INFERENCING = False -GENERATE_SETTINGS_ARGS = None - -LEARNING_RATE_SCHEMES = {"Multistep": "MultiStepLR", "Cos. Annealing": "CosineAnnealingLR_Restart"} -LEARNING_RATE_SCHEDULE = [ 2, 4, 9, 18, 25, 33, 50 ] - -RESAMPLERS = {} - -MIN_TRAINING_DURATION = 0.6 -MAX_TRAINING_DURATION = 11.6097505669 -MAX_TRAINING_CHAR_LENGTH = 200 - -VALLE_ENABLED = False -BARK_ENABLED = False - -VERBOSE_DEBUG = True - -import traceback - -try: - from whisper.normalizers.english import EnglishTextNormalizer - from whisper.normalizers.basic import BasicTextNormalizer - from whisper.tokenizer import LANGUAGES - - print("Whisper detected") -except Exception as e: - if VERBOSE_DEBUG: - print(traceback.format_exc()) - pass - -try: - from vall_e.emb.qnt import encode as valle_quantize - from vall_e.emb.g2p import encode as valle_phonemize - - from vall_e.inference import TTS as VALLE_TTS - - import soundfile - - print("VALL-E detected") - VALLE_ENABLED = True -except Exception as e: - if VERBOSE_DEBUG: - print(traceback.format_exc()) - pass - -if VALLE_ENABLED: - TTSES.append('vall-e') - -# torchaudio.set_audio_backend('soundfile') - -try: - import bark - from bark import text_to_semantic - from bark.generation import SAMPLE_RATE as BARK_SAMPLE_RATE, ALLOWED_PROMPTS, preload_models, codec_decode, generate_coarse, generate_fine, generate_text_semantic, load_codec_model - from bark.api import generate_audio as bark_generate_audio - from encodec.utils import convert_audio - - from scipy.io.wavfile import write as write_wav - - print("Bark detected") - BARK_ENABLED = True -except Exception as e: - if VERBOSE_DEBUG: - print(traceback.format_exc()) - pass - -if BARK_ENABLED: - TTSES.append('bark') - - def semantic_to_audio_tokens( - semantic_tokens, - history_prompt = None, - temp = 0.7, - silent = False, - output_full = False, - ): - coarse_tokens = generate_coarse( - semantic_tokens, history_prompt=history_prompt, temp=temp, silent=silent, use_kv_caching=True - ) - fine_tokens = generate_fine(coarse_tokens, history_prompt=history_prompt, temp=0.5) - - if output_full: - full_generation = { - "semantic_prompt": semantic_tokens, - "coarse_prompt": coarse_tokens, - "fine_prompt": fine_tokens, - } - return full_generation - return fine_tokens - - class Bark_TTS(): - def __init__(self, small=False): - self.input_sample_rate = BARK_SAMPLE_RATE - self.output_sample_rate = BARK_SAMPLE_RATE # args.output_sample_rate - - preload_models( - text_use_gpu=True, - coarse_use_gpu=True, - fine_use_gpu=True, - codec_use_gpu=True, - - text_use_small=small, - coarse_use_small=small, - fine_use_small=small, - - force_reload=False - ) - - self.device = get_device_name() - - try: - from vocos import Vocos - self.vocos_enabled = True - print("Vocos detected") - except Exception as e: - if VERBOSE_DEBUG: - print(traceback.format_exc()) - self.vocos_enabled = False - - try: - from hubert.hubert_manager import HuBERTManager - - hubert_manager = HuBERTManager() - hubert_manager.make_sure_hubert_installed() - hubert_manager.make_sure_tokenizer_installed() - - self.hubert_enabled = True - print("HuBERT detected") - except Exception as e: - if VERBOSE_DEBUG: - print(traceback.format_exc()) - self.hubert_enabled = False - - if self.vocos_enabled: - self.vocos = Vocos.from_pretrained("charactr/vocos-encodec-24khz").to(self.device) - - def create_voice( self, voice ): - transcription_json = f'./training/{voice}/whisper.json' - if not os.path.exists(transcription_json): - raise f"Transcription for voice not found: {voice}" - - transcriptions = json.load(open(transcription_json, 'r', encoding="utf-8")) - candidates = [] - for file in transcriptions: - result = transcriptions[file] - added = 0 - - for segment in result['segments']: - path = file.replace(".wav", f"_{pad(segment['id'], 4)}.wav") - # check if the slice actually exists - if not os.path.exists(f'./training/{voice}/audio/{path}'): - continue - - entry = ( - path, - segment['end'] - segment['start'], - segment['text'] - ) - candidates.append(entry) - added = added + 1 - - # if nothing got added (assuming because nothign was sliced), use the master file - if added == 0: # added < len(result['segments']): - start = 0 - end = 0 - for segment in result['segments']: - start = max( start, segment['start'] ) - end = max( end, segment['end'] ) - - entry = ( - file, - end - start, - result['text'] - ) - candidates.append(entry) - - candidates.sort(key=lambda x: x[1]) - candidate = random.choice(candidates) - audio_filepath = f'./training/{voice}/audio/{candidate[0]}' - text = candidate[-1] - - print("Using as reference:", audio_filepath, text) - - # Load and pre-process the audio waveform - model = load_codec_model(use_gpu=True) - wav, sr = torchaudio.load(audio_filepath) - wav = convert_audio(wav, sr, model.sample_rate, model.channels) - - # generate semantic tokens - - if self.hubert_enabled: - from hubert.pre_kmeans_hubert import CustomHubert - from hubert.customtokenizer import CustomTokenizer - - wav = wav.to(self.device) - - # Extract discrete codes from EnCodec - with torch.no_grad(): - encoded_frames = model.encode(wav.unsqueeze(0)) - codes = torch.cat([encoded[0] for encoded in encoded_frames], dim=-1).squeeze() # [n_q, T] - - # get seconds of audio - seconds = wav.shape[-1] / model.sample_rate - - # Load the HuBERT model - hubert_model = CustomHubert(checkpoint_path='./data/models/hubert/hubert.pt').to(self.device) - - # Load the CustomTokenizer model - tokenizer = CustomTokenizer.load_from_checkpoint('./data/models/hubert/tokenizer.pth').to(self.device) - - semantic_vectors = hubert_model.forward(wav, input_sample_hz=model.sample_rate) - semantic_tokens = tokenizer.get_token(semantic_vectors) - - # move codes to cpu - codes = codes.cpu().numpy() - # move semantic tokens to cpu - semantic_tokens = semantic_tokens.cpu().numpy() - else: - wav = wav.unsqueeze(0).to(self.device) - - # Extract discrete codes from EnCodec - with torch.no_grad(): - encoded_frames = model.encode(wav) - codes = torch.cat([encoded[0] for encoded in encoded_frames], dim=-1).squeeze().cpu().numpy() # [n_q, T] - - # get seconds of audio - seconds = wav.shape[-1] / model.sample_rate - - # generate semantic tokens - semantic_tokens = generate_text_semantic(text, max_gen_duration_s=seconds, top_k=50, top_p=.95, temp=0.7) - - # print(bark.__file__) - bark_location = os.path.dirname(os.path.relpath(bark.__file__)) # './modules/bark/bark/' - output_path = f'./{bark_location}/assets/prompts/' + voice.replace("/", "_") + '.npz' - np.savez(output_path, fine_prompt=codes, coarse_prompt=codes[:2, :], semantic_prompt=semantic_tokens) - - def inference( self, text, voice, text_temp=0.7, waveform_temp=0.7 ): - if voice == "random": - voice = None - else: - if not os.path.exists('./modules/bark/bark/assets/prompts/' + voice + '.npz'): - self.create_voice( voice ) - voice = voice.replace("/", "_") - if voice not in ALLOWED_PROMPTS: - ALLOWED_PROMPTS.add( voice ) - - semantic_tokens = text_to_semantic(text, history_prompt=voice, temp=text_temp, silent=False) - audio_tokens = semantic_to_audio_tokens( semantic_tokens, history_prompt=voice, temp=waveform_temp, silent=False, output_full=False ) - - if self.vocos_enabled: - audio_tokens_torch = torch.from_numpy(audio_tokens).to(self.device) - features = self.vocos.codes_to_features(audio_tokens_torch) - wav = self.vocos.decode(features, bandwidth_id=torch.tensor([2], device=self.device)) - else: - wav = codec_decode( audio_tokens ) - - return ( wav, BARK_SAMPLE_RATE ) - # return (bark_generate_audio(text, history_prompt=voice, text_temp=text_temp, waveform_temp=waveform_temp), BARK_SAMPLE_RATE) - -args = None -tts = None -tts_loading = False -webui = None -voicefixer = None - -whisper_model = None -whisper_align_model = None - -training_state = None - -current_voice = None - -def cleanup_voice_name( name ): - return name.split("/")[-1] - -def resample( waveform, input_rate, output_rate=44100 ): - # mono-ize - waveform = torch.mean(waveform, dim=0, keepdim=True) - - if input_rate == output_rate: - return waveform, output_rate - - key = f'{input_rate}:{output_rate}' - if not key in RESAMPLERS: - RESAMPLERS[key] = torchaudio.transforms.Resample( - input_rate, - output_rate, - lowpass_filter_width=16, - rolloff=0.85, - resampling_method="kaiser_window", - beta=8.555504641634386, - ) - - return RESAMPLERS[key]( waveform ), output_rate - -def generate(**kwargs): - if args.tts_backend == "tortoise": - return generate_tortoise(**kwargs) - if args.tts_backend == "vall-e": - return generate_valle(**kwargs) - if args.tts_backend == "bark": - return generate_bark(**kwargs) - -def generate_bark(**kwargs): - parameters = {} - parameters.update(kwargs) - - voice = parameters['voice'] - progress = parameters['progress'] if 'progress' in parameters else None - if parameters['seed'] == 0: - parameters['seed'] = None - - usedSeed = parameters['seed'] - - global args - global tts - - unload_whisper() - unload_voicefixer() - - if not tts: - # should check if it's loading or unloaded, and load it if it's unloaded - if tts_loading: - raise Exception("TTS is still initializing...") - if progress is not None: - notify_progress("Initializing TTS...", progress=progress) - load_tts() - if hasattr(tts, "loading") and tts.loading: - raise Exception("TTS is still initializing...") - - do_gc() - - voice_samples = None - conditioning_latents = None - sample_voice = None - - voice_cache = {} - - def get_settings( override=None ): - settings = { - 'voice': parameters['voice'], - 'text_temp': float(parameters['temperature']), - 'waveform_temp': float(parameters['temperature']), - } - - # could be better to just do a ternary on everything above, but i am not a professional - selected_voice = voice - if override is not None: - if 'voice' in override: - selected_voice = override['voice'] - - for k in override: - if k not in settings: - continue - settings[k] = override[k] - - return settings - - if not parameters['delimiter']: - parameters['delimiter'] = "\n" - elif parameters['delimiter'] == "\\n": - parameters['delimiter'] = "\n" - - if parameters['delimiter'] and parameters['delimiter'] != "" and parameters['delimiter'] in parameters['text']: - texts = parameters['text'].split(parameters['delimiter']) - else: - texts = split_and_recombine_text(parameters['text']) - - full_start_time = time.time() - - outdir = f"{args.results_folder}/{voice}/" - os.makedirs(outdir, exist_ok=True) - - audio_cache = {} - - volume_adjust = torchaudio.transforms.Vol(gain=args.output_volume, gain_type="amplitude") if args.output_volume != 1 else None - - idx = 0 - idx_cache = {} - for i, file in enumerate(os.listdir(outdir)): - filename = os.path.basename(file) - extension = os.path.splitext(filename)[-1][1:] - if extension != "json" and extension != "wav": - continue - match = re.findall(rf"^{cleanup_voice_name(voice)}_(\d+)(?:.+?)?{extension}$", filename) - if match and len(match) > 0: - key = int(match[0]) - idx_cache[key] = True - - if len(idx_cache) > 0: - keys = sorted(list(idx_cache.keys())) - idx = keys[-1] + 1 - - idx = pad(idx, 4) - - def get_name(line=0, candidate=0, combined=False): - name = f"{idx}" - if combined: - name = f"{name}_combined" - elif len(texts) > 1: - name = f"{name}_{line}" - if parameters['candidates'] > 1: - name = f"{name}_{candidate}" - return name - - def get_info( voice, settings = None, latents = True ): - info = {} - info.update(parameters) - - info['time'] = time.time()-full_start_time - info['datetime'] = datetime.now().isoformat() - - info['progress'] = None - del info['progress'] - - if info['delimiter'] == "\n": - info['delimiter'] = "\\n" - - if settings is not None: - for k in settings: - if k in info: - info[k] = settings[k] - return info - - INFERENCING = True - for line, cut_text in enumerate(texts): - tqdm_prefix = f'[{str(line+1)}/{str(len(texts))}]' - print(f"{tqdm_prefix} Generating line: {cut_text}") - start_time = time.time() - - # do setting editing - match = re.findall(r'^(\{.+\}) (.+?)$', cut_text) - override = None - if match and len(match) > 0: - match = match[0] - try: - override = json.loads(match[0]) - cut_text = match[1].strip() - except Exception as e: - raise Exception("Prompt settings editing requested, but received invalid JSON") - - settings = get_settings( override=override ) - - gen = tts.inference(cut_text, **settings ) - - run_time = time.time()-start_time - print(f"Generating line took {run_time} seconds") - - if not isinstance(gen, list): - gen = [gen] - - for j, g in enumerate(gen): - wav, sr = g - name = get_name(line=line, candidate=j) - - settings['text'] = cut_text - settings['time'] = run_time - settings['datetime'] = datetime.now().isoformat() - - # save here in case some error happens mid-batch - if tts.vocos_enabled: - torchaudio.save(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav', wav.cpu(), sr) - else: - write_wav(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav', sr, wav) - wav, sr = torchaudio.load(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav') - - audio_cache[name] = { - 'audio': wav, - 'settings': get_info(voice=override['voice'] if override and 'voice' in override else voice, settings=settings) - } - - del gen - do_gc() - INFERENCING = False - - for k in audio_cache: - audio = audio_cache[k]['audio'] - - audio, _ = resample(audio, tts.output_sample_rate, args.output_sample_rate) - if volume_adjust is not None: - audio = volume_adjust(audio) - - audio_cache[k]['audio'] = audio - torchaudio.save(f'{outdir}/{cleanup_voice_name(voice)}_{k}.wav', audio, args.output_sample_rate) - - output_voices = [] - for candidate in range(parameters['candidates']): - if len(texts) > 1: - audio_clips = [] - for line in range(len(texts)): - name = get_name(line=line, candidate=candidate) - audio = audio_cache[name]['audio'] - audio_clips.append(audio) - - name = get_name(candidate=candidate, combined=True) - audio = torch.cat(audio_clips, dim=-1) - torchaudio.save(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav', audio, args.output_sample_rate) - - audio = audio.squeeze(0).cpu() - audio_cache[name] = { - 'audio': audio, - 'settings': get_info(voice=voice), - 'output': True - } - else: - try: - name = get_name(candidate=candidate) - audio_cache[name]['output'] = True - except Exception as e: - for name in audio_cache: - audio_cache[name]['output'] = True - - - if args.voice_fixer: - if not voicefixer: - notify_progress("Loading voicefix...", progress=progress) - load_voicefixer() - - try: - fixed_cache = {} - for name in tqdm(audio_cache, desc="Running voicefix..."): - del audio_cache[name]['audio'] - if 'output' not in audio_cache[name] or not audio_cache[name]['output']: - continue - - path = f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav' - fixed = f'{outdir}/{cleanup_voice_name(voice)}_{name}_fixed.wav' - voicefixer.restore( - input=path, - output=fixed, - cuda=get_device_name() == "cuda" and args.voice_fixer_use_cuda, - #mode=mode, - ) - - fixed_cache[f'{name}_fixed'] = { - 'settings': audio_cache[name]['settings'], - 'output': True - } - audio_cache[name]['output'] = False - - for name in fixed_cache: - audio_cache[name] = fixed_cache[name] - except Exception as e: - print(e) - print("\nFailed to run Voicefixer") - - for name in audio_cache: - if 'output' not in audio_cache[name] or not audio_cache[name]['output']: - if args.prune_nonfinal_outputs: - audio_cache[name]['pruned'] = True - os.remove(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav') - continue - - output_voices.append(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav') - - if not args.embed_output_metadata: - with open(f'{outdir}/{cleanup_voice_name(voice)}_{name}.json', 'w', encoding="utf-8") as f: - f.write(json.dumps(audio_cache[name]['settings'], indent='\t') ) - - if args.embed_output_metadata: - for name in tqdm(audio_cache, desc="Embedding metadata..."): - if 'pruned' in audio_cache[name] and audio_cache[name]['pruned']: - continue - - metadata = music_tag.load_file(f"{outdir}/{cleanup_voice_name(voice)}_{name}.wav") - metadata['lyrics'] = json.dumps(audio_cache[name]['settings']) - metadata.save() - - if sample_voice is not None: - sample_voice = (tts.input_sample_rate, sample_voice.numpy()) - - info = get_info(voice=voice, latents=False) - print(f"Generation took {info['time']} seconds, saved to '{output_voices[0]}'\n") - - info['seed'] = usedSeed - if 'latents' in info: - del info['latents'] - - os.makedirs('./config/', exist_ok=True) - with open(f'./config/generate.json', 'w', encoding="utf-8") as f: - f.write(json.dumps(info, indent='\t') ) - - stats = [ - [ parameters['seed'], "{:.3f}".format(info['time']) ] - ] - - return ( - sample_voice, - output_voices, - stats, - ) - -def generate_valle(**kwargs): - parameters = {} - parameters.update(kwargs) - - voice = parameters['voice'] - progress = parameters['progress'] if 'progress' in parameters else None - if parameters['seed'] == 0: - parameters['seed'] = None - - usedSeed = parameters['seed'] - - global args - global tts - - unload_whisper() - unload_voicefixer() - - if not tts: - # should check if it's loading or unloaded, and load it if it's unloaded - if tts_loading: - raise Exception("TTS is still initializing...") - if progress is not None: - notify_progress("Initializing TTS...", progress=progress) - load_tts() - if hasattr(tts, "loading") and tts.loading: - raise Exception("TTS is still initializing...") - - do_gc() - - voice_samples = None - conditioning_latents = None - sample_voice = None - - voice_cache = {} - def fetch_voice( voice ): - if voice in voice_cache: - return voice_cache[voice] - - """ - voice_dir = f'./training/{voice}/audio/' - - if not os.path.isdir(voice_dir) or len(os.listdir(voice_dir)) == 0: - voice_dir = f'./voices/{voice}/' - - files = [ f'{voice_dir}/{d}' for d in os.listdir(voice_dir) if d[-4:] == ".wav" ] - """ - - if os.path.isdir(f'./training/{voice}/audio/'): - files = get_voice(name="audio", dir=f"./training/{voice}/", load_latents=False) - else: - files = get_voice(name=voice, load_latents=False) - - # return files - voice_cache[voice] = random.sample(files, k=min(3, len(files))) - return voice_cache[voice] - - def get_settings( override=None ): - settings = { - 'ar_temp': float(parameters['temperature']), - 'nar_temp': float(parameters['temperature']), - 'max_ar_steps': parameters['num_autoregressive_samples'], - } - - # could be better to just do a ternary on everything above, but i am not a professional - selected_voice = voice - if override is not None: - if 'voice' in override: - selected_voice = override['voice'] - - for k in override: - if k not in settings: - continue - settings[k] = override[k] - - settings['references'] = fetch_voice(voice=selected_voice) # [ fetch_voice(voice=selected_voice) for _ in range(3) ] - return settings - - if not parameters['delimiter']: - parameters['delimiter'] = "\n" - elif parameters['delimiter'] == "\\n": - parameters['delimiter'] = "\n" - - if parameters['delimiter'] and parameters['delimiter'] != "" and parameters['delimiter'] in parameters['text']: - texts = parameters['text'].split(parameters['delimiter']) - else: - texts = split_and_recombine_text(parameters['text']) - - full_start_time = time.time() - - outdir = f"{args.results_folder}/{voice}/" - os.makedirs(outdir, exist_ok=True) - - audio_cache = {} - - volume_adjust = torchaudio.transforms.Vol(gain=args.output_volume, gain_type="amplitude") if args.output_volume != 1 else None - - idx = 0 - idx_cache = {} - for i, file in enumerate(os.listdir(outdir)): - filename = os.path.basename(file) - extension = os.path.splitext(filename)[-1][1:] - if extension != "json" and extension != "wav": - continue - match = re.findall(rf"^{voice}_(\d+)(?:.+?)?{extension}$", filename) - if match and len(match) > 0: - key = int(match[0]) - idx_cache[key] = True - - if len(idx_cache) > 0: - keys = sorted(list(idx_cache.keys())) - idx = keys[-1] + 1 - - idx = pad(idx, 4) - - def get_name(line=0, candidate=0, combined=False): - name = f"{idx}" - if combined: - name = f"{name}_combined" - elif len(texts) > 1: - name = f"{name}_{line}" - if parameters['candidates'] > 1: - name = f"{name}_{candidate}" - return name - - def get_info( voice, settings = None, latents = True ): - info = {} - info.update(parameters) - - info['time'] = time.time()-full_start_time - info['datetime'] = datetime.now().isoformat() - - info['progress'] = None - del info['progress'] - - if info['delimiter'] == "\n": - info['delimiter'] = "\\n" - - if settings is not None: - for k in settings: - if k in info: - info[k] = settings[k] - return info - - INFERENCING = True - for line, cut_text in enumerate(texts): - tqdm_prefix = f'[{str(line+1)}/{str(len(texts))}]' - print(f"{tqdm_prefix} Generating line: {cut_text}") - start_time = time.time() - - # do setting editing - match = re.findall(r'^(\{.+\}) (.+?)$', cut_text) - override = None - if match and len(match) > 0: - match = match[0] - try: - override = json.loads(match[0]) - cut_text = match[1].strip() - except Exception as e: - raise Exception("Prompt settings editing requested, but received invalid JSON") - - name = get_name(line=line, candidate=0) - - settings = get_settings( override=override ) - references = settings['references'] - settings.pop("references") - settings['out_path'] = f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav' - - gen = tts.inference(cut_text, references, **settings ) - - run_time = time.time()-start_time - print(f"Generating line took {run_time} seconds") - - if not isinstance(gen, list): - gen = [gen] - - for j, g in enumerate(gen): - wav, sr = g - name = get_name(line=line, candidate=j) - - settings['text'] = cut_text - settings['time'] = run_time - settings['datetime'] = datetime.now().isoformat() - - # save here in case some error happens mid-batch - #torchaudio.save(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav', wav.cpu(), sr) - #soundfile.write(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav', wav.cpu()[0,0], sr) - wav, sr = torchaudio.load(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav') - - audio_cache[name] = { - 'audio': wav, - 'settings': get_info(voice=override['voice'] if override and 'voice' in override else voice, settings=settings) - } - - del gen - do_gc() - INFERENCING = False - - for k in audio_cache: - audio = audio_cache[k]['audio'] - - audio, _ = resample(audio, tts.output_sample_rate, args.output_sample_rate) - if volume_adjust is not None: - audio = volume_adjust(audio) - - audio_cache[k]['audio'] = audio - torchaudio.save(f'{outdir}/{cleanup_voice_name(voice)}_{k}.wav', audio, args.output_sample_rate) - - output_voices = [] - for candidate in range(parameters['candidates']): - if len(texts) > 1: - audio_clips = [] - for line in range(len(texts)): - name = get_name(line=line, candidate=candidate) - audio = audio_cache[name]['audio'] - audio_clips.append(audio) - - name = get_name(candidate=candidate, combined=True) - audio = torch.cat(audio_clips, dim=-1) - torchaudio.save(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav', audio, args.output_sample_rate) - - audio = audio.squeeze(0).cpu() - audio_cache[name] = { - 'audio': audio, - 'settings': get_info(voice=voice), - 'output': True - } - else: - name = get_name(candidate=candidate) - audio_cache[name]['output'] = True - - - if args.voice_fixer: - if not voicefixer: - notify_progress("Loading voicefix...", progress=progress) - load_voicefixer() - - try: - fixed_cache = {} - for name in tqdm(audio_cache, desc="Running voicefix..."): - del audio_cache[name]['audio'] - if 'output' not in audio_cache[name] or not audio_cache[name]['output']: - continue - - path = f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav' - fixed = f'{outdir}/{cleanup_voice_name(voice)}_{name}_fixed.wav' - voicefixer.restore( - input=path, - output=fixed, - cuda=get_device_name() == "cuda" and args.voice_fixer_use_cuda, - #mode=mode, - ) - - fixed_cache[f'{name}_fixed'] = { - 'settings': audio_cache[name]['settings'], - 'output': True - } - audio_cache[name]['output'] = False - - for name in fixed_cache: - audio_cache[name] = fixed_cache[name] - except Exception as e: - print(e) - print("\nFailed to run Voicefixer") - - for name in audio_cache: - if 'output' not in audio_cache[name] or not audio_cache[name]['output']: - if args.prune_nonfinal_outputs: - audio_cache[name]['pruned'] = True - os.remove(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav') - continue - - output_voices.append(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav') - - if not args.embed_output_metadata: - with open(f'{outdir}/{cleanup_voice_name(voice)}_{name}.json', 'w', encoding="utf-8") as f: - f.write(json.dumps(audio_cache[name]['settings'], indent='\t') ) - - if args.embed_output_metadata: - for name in tqdm(audio_cache, desc="Embedding metadata..."): - if 'pruned' in audio_cache[name] and audio_cache[name]['pruned']: - continue - - metadata = music_tag.load_file(f"{outdir}/{cleanup_voice_name(voice)}_{name}.wav") - metadata['lyrics'] = json.dumps(audio_cache[name]['settings']) - metadata.save() - - if sample_voice is not None: - sample_voice = (tts.input_sample_rate, sample_voice.numpy()) - - info = get_info(voice=voice, latents=False) - print(f"Generation took {info['time']} seconds, saved to '{output_voices[0]}'\n") - - info['seed'] = usedSeed - if 'latents' in info: - del info['latents'] - - os.makedirs('./config/', exist_ok=True) - with open(f'./config/generate.json', 'w', encoding="utf-8") as f: - f.write(json.dumps(info, indent='\t') ) - - stats = [ - [ parameters['seed'], "{:.3f}".format(info['time']) ] - ] - - return ( - sample_voice, - output_voices, - stats, - ) - -def generate_tortoise(**kwargs): - parameters = {} - parameters.update(kwargs) - - voice = parameters['voice'] - progress = parameters['progress'] if 'progress' in parameters else None - if parameters['seed'] == 0: - parameters['seed'] = None - - usedSeed = parameters['seed'] - - global args - global tts - - unload_whisper() - unload_voicefixer() - - if not tts: - # should check if it's loading or unloaded, and load it if it's unloaded - if tts_loading: - raise Exception("TTS is still initializing...") - load_tts() - if hasattr(tts, "loading") and tts.loading: - raise Exception("TTS is still initializing...") - - do_gc() - - voice_samples = None - conditioning_latents = None - sample_voice = None - - voice_cache = {} - def fetch_voice( voice ): - cache_key = f'{voice}:{tts.autoregressive_model_hash[:8]}' - if cache_key in voice_cache: - return voice_cache[cache_key] - - print(f"Loading voice: {voice} with model {tts.autoregressive_model_hash[:8]}") - sample_voice = None - if voice == "microphone": - if parameters['mic_audio'] is None: - raise Exception("Please provide audio from mic when choosing `microphone` as a voice input") - voice_samples, conditioning_latents = [load_audio(parameters['mic_audio'], tts.input_sample_rate)], None - elif voice == "random": - voice_samples, conditioning_latents = None, tts.get_random_conditioning_latents() - else: - if progress is not None: - notify_progress(f"Loading voice: {voice}", progress=progress) - - voice_samples, conditioning_latents = load_voice(voice, model_hash=tts.autoregressive_model_hash) - - if voice_samples and len(voice_samples) > 0: - if conditioning_latents is None: - conditioning_latents = compute_latents(voice=voice, voice_samples=voice_samples, voice_latents_chunks=parameters['voice_latents_chunks']) - - sample_voice = torch.cat(voice_samples, dim=-1).squeeze().cpu() - voice_samples = None - - voice_cache[cache_key] = (voice_samples, conditioning_latents, sample_voice) - return voice_cache[cache_key] - - def get_settings( override=None ): - settings = { - 'temperature': float(parameters['temperature']), - - 'top_p': float(parameters['top_p']), - 'diffusion_temperature': float(parameters['diffusion_temperature']), - 'length_penalty': float(parameters['length_penalty']), - 'repetition_penalty': float(parameters['repetition_penalty']), - 'cond_free_k': float(parameters['cond_free_k']), - - 'num_autoregressive_samples': parameters['num_autoregressive_samples'], - 'sample_batch_size': args.sample_batch_size, - 'diffusion_iterations': parameters['diffusion_iterations'], - - 'voice_samples': None, - 'conditioning_latents': None, - - 'use_deterministic_seed': parameters['seed'], - 'return_deterministic_state': True, - 'k': parameters['candidates'], - 'diffusion_sampler': parameters['diffusion_sampler'], - 'breathing_room': parameters['breathing_room'], - 'half_p': "Half Precision" in parameters['experimentals'], - 'cond_free': "Conditioning-Free" in parameters['experimentals'], - 'cvvp_amount': parameters['cvvp_weight'], - - 'autoregressive_model': args.autoregressive_model, - 'diffusion_model': args.diffusion_model, - 'tokenizer_json': args.tokenizer_json, - } - - # could be better to just do a ternary on everything above, but i am not a professional - selected_voice = voice - if override is not None: - if 'voice' in override: - selected_voice = override['voice'] - - for k in override: - if k not in settings: - continue - settings[k] = override[k] - - if settings['autoregressive_model'] is not None: - if settings['autoregressive_model'] == "auto": - settings['autoregressive_model'] = deduce_autoregressive_model(selected_voice) - tts.load_autoregressive_model(settings['autoregressive_model']) - - if settings['diffusion_model'] is not None: - if settings['diffusion_model'] == "auto": - settings['diffusion_model'] = deduce_diffusion_model(selected_voice) - tts.load_diffusion_model(settings['diffusion_model']) - - if settings['tokenizer_json'] is not None: - tts.load_tokenizer_json(settings['tokenizer_json']) - - settings['voice_samples'], settings['conditioning_latents'], _ = fetch_voice(voice=selected_voice) - - # clamp it down for the insane users who want this - # it would be wiser to enforce the sample size to the batch size, but this is what the user wants - settings['sample_batch_size'] = args.sample_batch_size - if not settings['sample_batch_size']: - settings['sample_batch_size'] = tts.autoregressive_batch_size - if settings['num_autoregressive_samples'] < settings['sample_batch_size']: - settings['sample_batch_size'] = settings['num_autoregressive_samples'] - - if settings['conditioning_latents'] is not None and len(settings['conditioning_latents']) == 2 and settings['cvvp_amount'] > 0: - print("Requesting weighing against CVVP weight, but voice latents are missing some extra data. Please regenerate your voice latents with 'Slimmer voice latents' unchecked.") - settings['cvvp_amount'] = 0 - - return settings - - if not parameters['delimiter']: - parameters['delimiter'] = "\n" - elif parameters['delimiter'] == "\\n": - parameters['delimiter'] = "\n" - - if parameters['delimiter'] and parameters['delimiter'] != "" and parameters['delimiter'] in parameters['text']: - texts = parameters['text'].split(parameters['delimiter']) - else: - texts = split_and_recombine_text(parameters['text']) - - full_start_time = time.time() - - outdir = f"{args.results_folder}/{voice}/" - os.makedirs(outdir, exist_ok=True) - - audio_cache = {} - - volume_adjust = torchaudio.transforms.Vol(gain=args.output_volume, gain_type="amplitude") if args.output_volume != 1 else None - - idx = 0 - idx_cache = {} - for i, file in enumerate(os.listdir(outdir)): - filename = os.path.basename(file) - extension = os.path.splitext(filename)[-1][1:] - if extension != "json" and extension != "wav": - continue - match = re.findall(rf"^{voice}_(\d+)(?:.+?)?{extension}$", filename) - if match and len(match) > 0: - key = int(match[0]) - idx_cache[key] = True - - if len(idx_cache) > 0: - keys = sorted(list(idx_cache.keys())) - idx = keys[-1] + 1 - - idx = pad(idx, 4) - - def get_name(line=0, candidate=0, combined=False): - name = f"{idx}" - if combined: - name = f"{name}_combined" - elif len(texts) > 1: - name = f"{name}_{line}" - if parameters['candidates'] > 1: - name = f"{name}_{candidate}" - return name - - def get_info( voice, settings = None, latents = True ): - info = {} - info.update(parameters) - - info['time'] = time.time()-full_start_time - info['datetime'] = datetime.now().isoformat() - - info['model'] = tts.autoregressive_model_path - info['model_hash'] = tts.autoregressive_model_hash - - info['progress'] = None - del info['progress'] - - if info['delimiter'] == "\n": - info['delimiter'] = "\\n" - - if settings is not None: - for k in settings: - if k in info: - info[k] = settings[k] - - if 'half_p' in settings and 'cond_free' in settings: - info['experimentals'] = [] - if settings['half_p']: - info['experimentals'].append("Half Precision") - if settings['cond_free']: - info['experimentals'].append("Conditioning-Free") - - if latents and "latents" not in info: - voice = info['voice'] - model_hash = settings["model_hash"][:8] if settings is not None and "model_hash" in settings else tts.autoregressive_model_hash[:8] - - dir = f'{get_voice_dir()}/{voice}/' - latents_path = f'{dir}/cond_latents_{model_hash}.pth' - - if voice == "random" or voice == "microphone": - if latents and settings is not None and settings['conditioning_latents']: - os.makedirs(dir, exist_ok=True) - torch.save(conditioning_latents, latents_path) - - if latents_path and os.path.exists(latents_path): - try: - with open(latents_path, 'rb') as f: - info['latents'] = base64.b64encode(f.read()).decode("ascii") - except Exception as e: - pass - - return info - - INFERENCING = True - for line, cut_text in enumerate(texts): - if should_phonemize(): - cut_text = phonemizer( cut_text ) - - if parameters['emotion'] == "Custom": - if parameters['prompt'] and parameters['prompt'].strip() != "": - cut_text = f"[{parameters['prompt']},] {cut_text}" - elif parameters['emotion'] != "None" and parameters['emotion']: - cut_text = f"[I am really {parameters['emotion'].lower()},] {cut_text}" - - tqdm_prefix = f'[{str(line+1)}/{str(len(texts))}]' - print(f"{tqdm_prefix} Generating line: {cut_text}") - start_time = time.time() - - # do setting editing - match = re.findall(r'^(\{.+\}) (.+?)$', cut_text) - override = None - if match and len(match) > 0: - match = match[0] - try: - override = json.loads(match[0]) - cut_text = match[1].strip() - except Exception as e: - raise Exception("Prompt settings editing requested, but received invalid JSON") - - settings = get_settings( override=override ) - gen, additionals = tts.tts(cut_text, **settings ) - - parameters['seed'] = additionals[0] - run_time = time.time()-start_time - print(f"Generating line took {run_time} seconds") - - if not isinstance(gen, list): - gen = [gen] - - for j, g in enumerate(gen): - audio = g.squeeze(0).cpu() - name = get_name(line=line, candidate=j) - - settings['text'] = cut_text - settings['time'] = run_time - settings['datetime'] = datetime.now().isoformat() - if args.tts_backend == "tortoise": - settings['model'] = tts.autoregressive_model_path - settings['model_hash'] = tts.autoregressive_model_hash - - audio_cache[name] = { - 'audio': audio, - 'settings': get_info(voice=override['voice'] if override and 'voice' in override else voice, settings=settings) - } - # save here in case some error happens mid-batch - torchaudio.save(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav', audio, tts.output_sample_rate) - - del gen - do_gc() - INFERENCING = False - - for k in audio_cache: - audio = audio_cache[k]['audio'] - - audio, _ = resample(audio, tts.output_sample_rate, args.output_sample_rate) - if volume_adjust is not None: - audio = volume_adjust(audio) - - audio_cache[k]['audio'] = audio - torchaudio.save(f'{outdir}/{cleanup_voice_name(voice)}_{k}.wav', audio, args.output_sample_rate) - - output_voices = [] - for candidate in range(parameters['candidates']): - if len(texts) > 1: - audio_clips = [] - for line in range(len(texts)): - name = get_name(line=line, candidate=candidate) - audio = audio_cache[name]['audio'] - audio_clips.append(audio) - - name = get_name(candidate=candidate, combined=True) - audio = torch.cat(audio_clips, dim=-1) - torchaudio.save(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav', audio, args.output_sample_rate) - - audio = audio.squeeze(0).cpu() - audio_cache[name] = { - 'audio': audio, - 'settings': get_info(voice=voice), - 'output': True - } - else: - name = get_name(candidate=candidate) - audio_cache[name]['output'] = True - - - if args.voice_fixer: - if not voicefixer: - notify_progress("Loading voicefix...", progress=progress) - load_voicefixer() - - try: - fixed_cache = {} - for name in tqdm(audio_cache, desc="Running voicefix..."): - del audio_cache[name]['audio'] - if 'output' not in audio_cache[name] or not audio_cache[name]['output']: - continue - - path = f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav' - fixed = f'{outdir}/{cleanup_voice_name(voice)}_{name}_fixed.wav' - voicefixer.restore( - input=path, - output=fixed, - cuda=get_device_name() == "cuda" and args.voice_fixer_use_cuda, - #mode=mode, - ) - - fixed_cache[f'{name}_fixed'] = { - 'settings': audio_cache[name]['settings'], - 'output': True - } - audio_cache[name]['output'] = False - - for name in fixed_cache: - audio_cache[name] = fixed_cache[name] - except Exception as e: - print(e) - print("\nFailed to run Voicefixer") - - for name in audio_cache: - if 'output' not in audio_cache[name] or not audio_cache[name]['output']: - if args.prune_nonfinal_outputs: - audio_cache[name]['pruned'] = True - os.remove(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav') - continue - - output_voices.append(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav') - - if not args.embed_output_metadata: - with open(f'{outdir}/{cleanup_voice_name(voice)}_{name}.json', 'w', encoding="utf-8") as f: - f.write(json.dumps(audio_cache[name]['settings'], indent='\t') ) - - if args.embed_output_metadata: - for name in tqdm(audio_cache, desc="Embedding metadata..."): - if 'pruned' in audio_cache[name] and audio_cache[name]['pruned']: - continue - - metadata = music_tag.load_file(f"{outdir}/{cleanup_voice_name(voice)}_{name}.wav") - metadata['lyrics'] = json.dumps(audio_cache[name]['settings']) - metadata.save() - - if sample_voice is not None: - sample_voice = (tts.input_sample_rate, sample_voice.numpy()) - - info = get_info(voice=voice, latents=False) - print(f"Generation took {info['time']} seconds, saved to '{output_voices[0]}'\n") - - info['seed'] = usedSeed - if 'latents' in info: - del info['latents'] - - os.makedirs('./config/', exist_ok=True) - with open(f'./config/generate.json', 'w', encoding="utf-8") as f: - f.write(json.dumps(info, indent='\t') ) - - stats = [ - [ parameters['seed'], "{:.3f}".format(info['time']) ] - ] - - return ( - sample_voice, - output_voices, - stats, - ) - -def cancel_generate(): - if not INFERENCING: - return - - import tortoise.api - - tortoise.api.STOP_SIGNAL = True - -def hash_file(path, algo="md5", buffer_size=0): - hash = None - if algo == "md5": - hash = hashlib.md5() - elif algo == "sha1": - hash = hashlib.sha1() - else: - raise Exception(f'Unknown hash algorithm specified: {algo}') - - if not os.path.exists(path): - raise Exception(f'Path not found: {path}') - - with open(path, 'rb') as f: - if buffer_size > 0: - while True: - data = f.read(buffer_size) - if not data: - break - hash.update(data) - else: - hash.update(f.read()) - - return "{0}".format(hash.hexdigest()) - -def update_baseline_for_latents_chunks( voice ): - global current_voice - current_voice = voice - - path = f'{get_voice_dir()}/{voice}/' - if not os.path.isdir(path): - return 1 - - dataset_file = f'./training/{voice}/train.txt' - if os.path.exists(dataset_file): - return 0 # 0 will leverage using the LJspeech dataset for computing latents - - files = os.listdir(path) - - total = 0 - total_duration = 0 - - for file in files: - if file[-4:] != ".wav": - continue - - metadata = torchaudio.info(f'{path}/{file}') - duration = metadata.num_frames / metadata.sample_rate - total_duration += duration - total = total + 1 - - - # brain too fried to figure out a better way - if args.autocalculate_voice_chunk_duration_size == 0: - return int(total_duration / total) if total > 0 else 1 - return int(total_duration / args.autocalculate_voice_chunk_duration_size) if total_duration > 0 else 1 - -def compute_latents(voice=None, voice_samples=None, voice_latents_chunks=0, original_ar=False, original_diffusion=False): - global tts - global args - - unload_whisper() - unload_voicefixer() - - if not tts: - if tts_loading: - raise Exception("TTS is still initializing...") - load_tts() - - if hasattr(tts, "loading") and tts.loading: - raise Exception("TTS is still initializing...") - - if args.tts_backend == "bark": - tts.create_voice( voice ) - return - - if args.autoregressive_model == "auto": - tts.load_autoregressive_model(deduce_autoregressive_model(voice)) - - if voice: - load_from_dataset = voice_latents_chunks == 0 - - if load_from_dataset: - dataset_path = f'./training/{voice}/train.txt' - if not os.path.exists(dataset_path): - load_from_dataset = False - else: - with open(dataset_path, 'r', encoding="utf-8") as f: - lines = f.readlines() - - print("Leveraging dataset for computing latents") - - voice_samples = [] - max_length = 0 - for line in lines: - filename = f'./training/{voice}/{line.split("|")[0]}' - - waveform = load_audio(filename, 22050) - max_length = max(max_length, waveform.shape[-1]) - voice_samples.append(waveform) - - for i in range(len(voice_samples)): - voice_samples[i] = pad_or_truncate(voice_samples[i], max_length) - - voice_latents_chunks = len(voice_samples) - if voice_latents_chunks == 0: - print("Dataset is empty!") - load_from_dataset = True - if not load_from_dataset: - voice_samples, _ = load_voice(voice, load_latents=False) - - if voice_samples is None: - return - - conditioning_latents = tts.get_conditioning_latents(voice_samples, return_mels=not args.latents_lean_and_mean, slices=voice_latents_chunks, force_cpu=args.force_cpu_for_conditioning_latents, original_ar=original_ar, original_diffusion=original_diffusion) - - if len(conditioning_latents) == 4: - conditioning_latents = (conditioning_latents[0], conditioning_latents[1], conditioning_latents[2], None) - - outfile = f'{get_voice_dir()}/{voice}/cond_latents_{tts.autoregressive_model_hash[:8]}.pth' - torch.save(conditioning_latents, outfile) - print(f'Saved voice latents: {outfile}') - - return conditioning_latents - -# superfluous, but it cleans up some things -class TrainingState(): - def __init__(self, config_path, keep_x_past_checkpoints=0, start=True): - self.killed = False - - self.training_dir = os.path.dirname(config_path) - with open(config_path, 'r') as file: - self.yaml_config = yaml.safe_load(file) - - self.json_config = json.load(open(f"{self.training_dir}/train.json", 'r', encoding="utf-8")) - self.dataset_path = f"{self.training_dir}/train.txt" - with open(self.dataset_path, 'r', encoding="utf-8") as f: - self.dataset_size = len(f.readlines()) - - self.batch_size = self.json_config["batch_size"] - self.save_rate = self.json_config["save_rate"] - - self.epoch = 0 - self.epochs = self.json_config["epochs"] - self.it = 0 - self.its = calc_iterations( self.epochs, self.dataset_size, self.batch_size ) - self.step = 0 - self.steps = int(self.its / self.dataset_size) - self.checkpoint = 0 - self.checkpoints = int((self.its - self.it) / self.save_rate) - - self.gpus = self.json_config['gpus'] - - self.buffer = [] - - self.open_state = False - self.training_started = False - - self.info = {} - - self.it_rate = "" - self.it_rates = 0 - - self.epoch_rate = "" - - self.eta = "?" - self.eta_hhmmss = "?" - - self.nan_detected = False - - self.last_info_check_at = 0 - self.statistics = { - 'loss': [], - 'lr': [], - 'grad_norm': [], - } - self.losses = [] - self.metrics = { - 'step': "", - 'rate': "", - 'loss': "", - } - - self.loss_milestones = [ 1.0, 0.15, 0.05 ] - - if args.tts_backend=="vall-e": - self.valle_last_it = 0 - self.valle_steps = 0 - - if keep_x_past_checkpoints > 0: - self.cleanup_old(keep=keep_x_past_checkpoints) - if start: - self.spawn_process(config_path=config_path, gpus=self.gpus) - - def spawn_process(self, config_path, gpus=1): - if args.tts_backend == "vall-e": - self.cmd = ['deepspeed', f'--num_gpus={gpus}', '--module', 'vall_e.train', f'yaml="{config_path}"'] - else: - self.cmd = ['train.bat', config_path] if os.name == "nt" else ['./train.sh', config_path] - - print("Spawning process: ", " ".join(self.cmd)) - self.process = subprocess.Popen(self.cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) - - def parse_metrics(self, data): - if isinstance(data, str): - if line.find('Training Metrics:') >= 0: - data = json.loads(line.split("Training Metrics:")[-1]) - data['mode'] = "training" - elif line.find('Validation Metrics:') >= 0: - data = json.loads(line.split("Validation Metrics:")[-1]) - data['mode'] = "validation" - else: - return - - self.info = data - if 'epoch' in self.info: - self.epoch = int(self.info['epoch']) - if 'it' in self.info: - self.it = int(self.info['it']) - if 'step' in self.info: - self.step = int(self.info['step']) - if 'steps' in self.info: - self.steps = int(self.info['steps']) - - if 'elapsed_time' in self.info: - self.info['iteration_rate'] = self.info['elapsed_time'] - del self.info['elapsed_time'] - - if 'iteration_rate' in self.info: - it_rate = self.info['iteration_rate'] - self.it_rate = f'{"{:.3f}".format(1/it_rate)}it/s' if 0 < it_rate and it_rate < 1 else f'{"{:.3f}".format(it_rate)}s/it' - self.it_rates += it_rate - - if self.it_rates > 0 and self.it * self.steps > 0: - epoch_rate = self.it_rates / self.it * self.steps - self.epoch_rate = f'{"{:.3f}".format(1/epoch_rate)}epoch/s' if 0 < epoch_rate and epoch_rate < 1 else f'{"{:.3f}".format(epoch_rate)}s/epoch' - - try: - self.eta = (self.its - self.it) * (self.it_rates / self.it) - eta = str(timedelta(seconds=int(self.eta))) - self.eta_hhmmss = eta - except Exception as e: - self.eta_hhmmss = "?" - pass - - self.metrics['step'] = [f"{self.epoch}/{self.epochs}"] - if self.epochs != self.its: - self.metrics['step'].append(f"{self.it}/{self.its}") - if self.steps > 1: - self.metrics['step'].append(f"{self.step}/{self.steps}") - self.metrics['step'] = ", ".join(self.metrics['step']) - - if args.tts_backend == "tortoise": - epoch = self.epoch + (self.step / self.steps) - else: - epoch = self.info['epoch'] if 'epoch' in self.info else self.it - - if self.it > 0: - # probably can double for-loop but whatever - keys = { - 'lrs': ['lr'], - 'losses': ['loss_text_ce', 'loss_mel_ce'], - 'accuracies': [], - 'precisions': [], - 'grad_norms': [], - } - if args.tts_backend == "vall-e": - keys['lrs'] = [ - 'ar.lr', 'nar.lr', - ] - keys['losses'] = [ - # 'ar.loss', 'nar.loss', 'ar+nar.loss', - 'ar.loss.nll', 'nar.loss.nll', - ] - - keys['accuracies'] = [ - 'ar.loss.acc', 'nar.loss.acc', - 'ar.stats.acc', 'nar.loss.acc', - ] - keys['precisions'] = [ 'ar.loss.precision', 'nar.loss.precision', ] - keys['grad_norms'] = ['ar.grad_norm', 'nar.grad_norm'] - - for k in keys['lrs']: - if k not in self.info: - continue - - self.statistics['lr'].append({'epoch': epoch, 'it': self.it, 'value': self.info[k], 'type': k}) - - for k in keys['accuracies']: - if k not in self.info: - continue - - self.statistics['loss'].append({'epoch': epoch, 'it': self.it, 'value': self.info[k], 'type': k}) - - for k in keys['precisions']: - if k not in self.info: - continue - - self.statistics['loss'].append({'epoch': epoch, 'it': self.it, 'value': self.info[k], 'type': k}) - - for k in keys['losses']: - if k not in self.info: - continue - - prefix = "" - - if "mode" in self.info and self.info["mode"] == "validation": - prefix = f'{self.info["name"] if "name" in self.info else "val"}_' - - self.statistics['loss'].append({'epoch': epoch, 'it': self.it, 'value': self.info[k], 'type': f'{prefix}{k}' }) - - self.losses.append( self.statistics['loss'][-1] ) - - for k in keys['grad_norms']: - if k not in self.info: - continue - self.statistics['grad_norm'].append({'epoch': epoch, 'it': self.it, 'value': self.info[k], 'type': k}) - - return data - - def get_status(self): - message = None - - self.metrics['rate'] = [] - if self.epoch_rate: - self.metrics['rate'].append(self.epoch_rate) - if self.it_rate and self.epoch_rate[:-7] != self.it_rate[:-4]: - self.metrics['rate'].append(self.it_rate) - self.metrics['rate'] = ", ".join(self.metrics['rate']) - - eta_hhmmss = self.eta_hhmmss if self.eta_hhmmss else "?" - - self.metrics['loss'] = [] - if 'lr' in self.info: - self.metrics['loss'].append(f'LR: {"{:.3e}".format(self.info["lr"])}') - - if len(self.losses) > 0: - self.metrics['loss'].append(f'Loss: {"{:.3f}".format(self.losses[-1]["value"])}') - - if False and len(self.losses) >= 2: - deriv = 0 - accum_length = len(self.losses)//2 # i *guess* this is fine when you think about it - loss_value = self.losses[-1]["value"] - - for i in range(accum_length): - d1_loss = self.losses[accum_length-i-1]["value"] - d2_loss = self.losses[accum_length-i-2]["value"] - dloss = (d2_loss - d1_loss) - - d1_step = self.losses[accum_length-i-1]["it"] - d2_step = self.losses[accum_length-i-2]["it"] - dstep = (d2_step - d1_step) - - if dstep == 0: - continue - - inst_deriv = dloss / dstep - deriv += inst_deriv - - deriv = deriv / accum_length - - print("Deriv: ", deriv) - - if deriv != 0: # dloss < 0: - next_milestone = None - for milestone in self.loss_milestones: - if loss_value > milestone: - next_milestone = milestone - break - - print(f"Loss value: {loss_value} | Next milestone: {next_milestone} | Distance: {loss_value - next_milestone}") - - if next_milestone: - # tfw can do simple calculus but not basic algebra in my head - est_its = (next_milestone - loss_value) / deriv * 100 - print(f"Estimated: {est_its}") - if est_its >= 0: - self.metrics['loss'].append(f'Est. milestone {next_milestone} in: {int(est_its)}its') - else: - est_loss = inst_deriv * (self.its - self.it) + loss_value - if est_loss >= 0: - self.metrics['loss'].append(f'Est. final loss: {"{:.3f}".format(est_loss)}') - - self.metrics['loss'] = ", ".join(self.metrics['loss']) - - message = f"[{self.metrics['step']}] [{self.metrics['rate']}] [ETA: {eta_hhmmss}] [{self.metrics['loss']}]" - if self.nan_detected: - message = f"[!NaN DETECTED! {self.nan_detected}] {message}" - - return message - - def load_statistics(self, update=False): - if not os.path.isdir(self.training_dir): - return - - if args.tts_backend == "tortoise": - logs = sorted([f'{self.training_dir}/finetune/{d}' for d in os.listdir(f'{self.training_dir}/finetune/') if d[-4:] == ".log" ]) - else: - log_dir = "logs" - logs = sorted([f'{self.training_dir}/{log_dir}/{d}/log.txt' for d in os.listdir(f'{self.training_dir}/{log_dir}/') ]) - - if update: - logs = [logs[-1]] - - infos = {} - highest_step = self.last_info_check_at - - if not update: - self.statistics['loss'] = [] - self.statistics['lr'] = [] - self.statistics['grad_norm'] = [] - self.it_rates = 0 - - unq = {} - averager = None - prev_state = 0 - - for log in logs: - with open(log, 'r', encoding="utf-8") as f: - lines = f.readlines() - - for line in lines: - line = line.strip() - if not line: - continue - - if line[-1] == ".": - line = line[:-1] - - if line.find('Training Metrics:') >= 0: - split = line.split("Training Metrics:")[-1] - data = json.loads(split) - - name = "train" - mode = "training" - prev_state = 0 - elif line.find('Validation Metrics:') >= 0: - data = json.loads(line.split("Validation Metrics:")[-1]) - if "it" not in data: - data['it'] = it - if "epoch" not in data: - data['epoch'] = epoch - - # name = data['name'] if 'name' in data else "val" - mode = "validation" - - if prev_state == 0: - name = "subtrain" - else: - name = "val" - - prev_state += 1 - else: - continue - - if "it" not in data: - continue - - it = data['it'] - epoch = data['epoch'] - - if args.tts_backend == "vall-e": - if not averager or averager['key'] != f'{it}_{name}' or averager['mode'] != mode: - averager = { - 'key': f'{it}_{name}', - 'name': name, - 'mode': mode, - "metrics": {} - } - for k in data: - if data[k] is None: - continue - averager['metrics'][k] = [ data[k] ] - else: - for k in data: - if data[k] is None: - continue - if k not in averager['metrics']: - averager['metrics'][k] = [ data[k] ] - else: - averager['metrics'][k].append( data[k] ) - - unq[f'{it}_{mode}_{name}'] = averager - else: - unq[f'{it}_{mode}_{name}'] = data - - if update and it <= self.last_info_check_at: - continue - - blacklist = [ "batch", "eval" ] - for it in unq: - if args.tts_backend == "vall-e": - stats = unq[it] - data = {k: sum(v) / len(v) for k, v in stats['metrics'].items() if k not in blacklist } - #data = {k: min(v) for k, v in stats['metrics'].items() if k not in blacklist } - #data = {k: max(v) for k, v in stats['metrics'].items() if k not in blacklist } - data['name'] = stats['name'] - data['mode'] = stats['mode'] - data['steps'] = len(stats['metrics']['it']) - else: - data = unq[it] - self.parse_metrics(data) - - self.last_info_check_at = highest_step - - def cleanup_old(self, keep=2): - if keep <= 0: - return - - if args.tts_backend == "vall-e": - return - - if not os.path.isdir(f'{self.training_dir}/finetune/'): - return - - models = sorted([ int(d[:-8]) for d in os.listdir(f'{self.training_dir}/finetune/models/') if d[-8:] == "_gpt.pth" ]) - states = sorted([ int(d[:-6]) for d in os.listdir(f'{self.training_dir}/finetune/training_state/') if d[-6:] == ".state" ]) - remove_models = models[:-keep] - remove_states = states[:-keep] - - for d in remove_models: - path = f'{self.training_dir}/finetune/models/{d}_gpt.pth' - print("Removing", path) - os.remove(path) - for d in remove_states: - path = f'{self.training_dir}/finetune/training_state/{d}.state' - print("Removing", path) - os.remove(path) - - def parse(self, line, verbose=False, keep_x_past_checkpoints=0, buffer_size=8, progress=None ): - self.buffer.append(f'{line}') - - data = None - percent = 0 - message = None - should_return = False - - MESSAGE_START = 'Start training from epoch' - MESSAGE_FINSIHED = 'Finished training' - MESSAGE_SAVING = 'Saving models and training states.' - - MESSAGE_METRICS_TRAINING = 'Training Metrics:' - MESSAGE_METRICS_VALIDATION = 'Validation Metrics:' - - if line.find(MESSAGE_FINSIHED) >= 0: - self.killed = True - # rip out iteration info - elif not self.training_started: - if line.find(MESSAGE_START) >= 0: - self.training_started = True # could just leverage the above variable, but this is python, and there's no point in these aggressive microoptimizations - - match = re.findall(r'epoch: ([\d,]+)', line) - if match and len(match) > 0: - self.epoch = int(match[0].replace(",", "")) - match = re.findall(r'iter: ([\d,]+)', line) - if match and len(match) > 0: - self.it = int(match[0].replace(",", "")) - - self.checkpoints = int((self.its - self.it) / self.save_rate) - - self.load_statistics() - - should_return = True - else: - if line.find(MESSAGE_SAVING) >= 0: - self.checkpoint += 1 - message = f"[{self.checkpoint}/{self.checkpoints}] Saving checkpoint..." - percent = self.checkpoint / self.checkpoints - - self.cleanup_old(keep=keep_x_past_checkpoints) - elif line.find(MESSAGE_METRICS_TRAINING) >= 0: - data = json.loads(line.split(MESSAGE_METRICS_TRAINING)[-1]) - data['mode'] = "training" - elif line.find(MESSAGE_METRICS_VALIDATION) >= 0: - data = json.loads(line.split(MESSAGE_METRICS_VALIDATION)[-1]) - data['mode'] = "validation" - - if data is not None: - if ': nan' in line and not self.nan_detected: - self.nan_detected = self.it - - self.parse_metrics( data ) - message = self.get_status() - - if message: - percent = self.it / float(self.its) # self.epoch / float(self.epochs) - if progress is not None: - progress(percent, message) - - self.buffer.append(f'[{"{:.3f}".format(percent*100)}%] {message}') - should_return = True - - if verbose and not self.training_started: - should_return = True - - self.buffer = self.buffer[-buffer_size:] - - result = None - if should_return: - result = "".join(self.buffer) if not self.training_started else message - - return ( - result, - percent, - message, - ) - -try: - import altair as alt - alt.data_transformers.enable('default', max_rows=None) -except Exception as e: - print(e) - pass - -def run_training(config_path, verbose=False, keep_x_past_checkpoints=0, progress=gr.Progress(track_tqdm=True)): - global training_state - if training_state and training_state.process: - return "Training already in progress" - - - # ensure we have the dvae.pth - if args.tts_backend == "tortoise": - get_model_path('dvae.pth') - - # I don't know if this is still necessary, as it was bitching at me for not doing this, despite it being in a separate process - torch.multiprocessing.freeze_support() - - unload_tts() - unload_whisper() - unload_voicefixer() - - training_state = TrainingState(config_path=config_path, keep_x_past_checkpoints=keep_x_past_checkpoints) - - for line in iter(training_state.process.stdout.readline, ""): - if training_state is None or training_state.killed: - return - - result, percent, message = training_state.parse( line=line, verbose=verbose, keep_x_past_checkpoints=keep_x_past_checkpoints, progress=progress ) - print(f"[Training] [{datetime.now().isoformat()}] {line[:-1]}") - if result: - yield result - - if progress is not None and message: - progress(percent, message) - - if training_state: - training_state.process.stdout.close() - return_code = training_state.process.wait() - training_state = None - -def update_training_dataplot(x_min=None, x_max=None, y_min=None, y_max=None, config_path=None): - global training_state - losses = None - lrs = None - grad_norms = None - - x_lim = [ x_min, x_max ] - y_lim = [ y_min, y_max ] - - if not training_state: - if config_path: - training_state = TrainingState(config_path=config_path, start=False) - training_state.load_statistics() - message = training_state.get_status() - - if training_state: - if not x_lim[-1]: - x_lim[-1] = training_state.epochs - - if not y_lim[-1]: - y_lim = None - - if len(training_state.statistics['loss']) > 0: - losses = gr.LinePlot.update( - value = pd.DataFrame(training_state.statistics['loss']), - x_lim=x_lim, y_lim=y_lim, - x="epoch", y="value", # x="it", - title="Loss Metrics", color="type", tooltip=['epoch', 'it', 'value', 'type'], - width=500, height=350 - ) - if len(training_state.statistics['lr']) > 0: - lrs = gr.LinePlot.update( - value = pd.DataFrame(training_state.statistics['lr']), - x_lim=x_lim, - x="epoch", y="value", # x="it", - title="Learning Rate", color="type", tooltip=['epoch', 'it', 'value', 'type'], - width=500, height=350 - ) - if len(training_state.statistics['grad_norm']) > 0: - grad_norms = gr.LinePlot.update( - value = pd.DataFrame(training_state.statistics['grad_norm']), - x_lim=x_lim, - x="epoch", y="value", # x="it", - title="Gradient Normals", color="type", tooltip=['epoch', 'it', 'value', 'type'], - width=500, height=350 - ) - - if config_path: - del training_state - training_state = None - - return (losses, lrs, grad_norms) - -def reconnect_training(verbose=False, progress=gr.Progress(track_tqdm=True)): - global training_state - if not training_state or not training_state.process: - return "Training not in progress" - - for line in iter(training_state.process.stdout.readline, ""): - result, percent, message = training_state.parse( line=line, verbose=verbose, progress=progress ) - print(f"[Training] [{datetime.now().isoformat()}] {line[:-1]}") - if result: - yield result - - if progress is not None and message: - progress(percent, message) - -def stop_training(): - global training_state - if training_state is None: - return "No training in progress" - print("Killing training process...") - training_state.killed = True - - children = [] - if args.tts_backend == "tortoise": - # wrapped in a try/catch in case for some reason this fails outside of Linux - try: - children = [p.info for p in psutil.process_iter(attrs=['pid', 'name', 'cmdline']) if './src/train.py' in p.info['cmdline']] - except Exception as e: - pass - - training_state.process.stdout.close() - training_state.process.terminate() - training_state.process.kill() - elif args.tts_backend == "vall-e": - print(training_state.process.communicate(input='quit')[0]) - - return_code = training_state.process.wait() - - for p in children: - os.kill( p['pid'], signal.SIGKILL ) - - training_state = None - print("Killed training process.") - return f"Training cancelled: {return_code}" - -def get_halfp_model_path(): - autoregressive_model_path = get_model_path('autoregressive.pth') - return autoregressive_model_path.replace(".pth", "_half.pth") - -def convert_to_halfp(): - autoregressive_model_path = get_model_path('autoregressive.pth') - print(f'Converting model to half precision: {autoregressive_model_path}') - model = torch.load(autoregressive_model_path) - for k in model: - model[k] = model[k].half() - - outfile = get_halfp_model_path() - torch.save(model, outfile) - print(f'Converted model to half precision: {outfile}') - - -# collapses short segments into the previous segment -def whisper_sanitize( results ): - sanitized = json.loads(json.dumps(results)) - sanitized['segments'] = [] - - for segment in results['segments']: - length = segment['end'] - segment['start'] - if length >= MIN_TRAINING_DURATION or len(sanitized['segments']) == 0: - sanitized['segments'].append(segment) - continue - - last_segment = sanitized['segments'][-1] - # segment already asimilitated it, somehow - if last_segment['end'] >= segment['end']: - continue - """ - # segment already asimilitated it, somehow - if last_segment['text'].endswith(segment['text']): - continue - """ - last_segment['text'] += segment['text'] - last_segment['end'] = segment['end'] - - for i in range(len(sanitized['segments'])): - sanitized['segments'][i]['id'] = i - - return sanitized - -def whisper_transcribe( file, language=None ): - # shouldn't happen, but it's for safety - global whisper_model - global whisper_align_model - - if not whisper_model: - load_whisper_model(language=language) - - if args.whisper_backend == "openai/whisper": - if not language: - language = None - - return whisper_model.transcribe(file, language=language) - - if args.whisper_backend == "lightmare/whispercpp": - res = whisper_model.transcribe(file) - segments = whisper_model.extract_text_and_timestamps( res ) - - result = { - 'text': [], - 'segments': [] - } - for segment in segments: - reparsed = { - 'start': segment[0] / 100.0, - 'end': segment[1] / 100.0, - 'text': segment[2], - 'id': len(result['segments']) - } - result['text'].append( segment[2] ) - result['segments'].append(reparsed) - - result['text'] = " ".join(result['text']) - return result - - if args.whisper_backend == "m-bain/whisperx": - import whisperx - - device = "cuda" if get_device_name() == "cuda" else "cpu" - result = whisper_model.transcribe(file, batch_size=args.whisper_batchsize) - - align_model, metadata = whisper_align_model - result_aligned = whisperx.align(result["segments"], align_model, metadata, file, device, return_char_alignments=False) - - result['segments'] = result_aligned['segments'] - result['text'] = [] - for segment in result['segments']: - segment['id'] = len(result['text']) - result['text'].append(segment['text'].strip()) - result['text'] = " ".join(result['text']) - - return result - -def validate_waveform( waveform, sample_rate, min_only=False ): - if not torch.any(waveform < 0): - return "Waveform is empty" - - num_channels, num_frames = waveform.shape - duration = num_frames / sample_rate - - if duration < MIN_TRAINING_DURATION: - return "Duration too short ({:.3f}s < {:.3f}s)".format(duration, MIN_TRAINING_DURATION) - - if not min_only: - if duration > MAX_TRAINING_DURATION: - return "Duration too long ({:.3f}s < {:.3f}s)".format(MAX_TRAINING_DURATION, duration) - - return - -def transcribe_dataset( voice, language=None, skip_existings=False, progress=None ): - unload_tts() - - global whisper_model - if whisper_model is None: - load_whisper_model(language=language) - - results = {} - - files = get_voice(voice, load_latents=False) - indir = f'./training/{voice}/' - infile = f'{indir}/whisper.json' - - quantize_in_memory = args.tts_backend == "vall-e" - - os.makedirs(f'{indir}/audio/', exist_ok=True) - - TARGET_SAMPLE_RATE = 22050 - if args.tts_backend != "tortoise": - TARGET_SAMPLE_RATE = 24000 - if tts: - TARGET_SAMPLE_RATE = tts.input_sample_rate - - if os.path.exists(infile): - results = json.load(open(infile, 'r', encoding="utf-8")) - - for file in tqdm(files, desc="Iterating through voice files"): - basename = os.path.basename(file) - - if basename in results and skip_existings: - print(f"Skipping already parsed file: {basename}") - continue - - try: - result = whisper_transcribe(file, language=language) - except Exception as e: - print("Failed to transcribe:", file, e) - continue - - results[basename] = result - - if not quantize_in_memory: - waveform, sample_rate = torchaudio.load(file) - # resample to the input rate, since it'll get resampled for training anyways - # this should also "help" increase throughput a bit when filling the dataloaders - waveform, sample_rate = resample(waveform, sample_rate, TARGET_SAMPLE_RATE) - if waveform.shape[0] == 2: - waveform = waveform[:1] - - try: - kwargs = {} - if basename[-4:] == ".wav": - kwargs['encoding'] = "PCM_S" - kwargs['bits_per_sample'] = 16 - - torchaudio.save(f"{indir}/audio/{basename}", waveform, sample_rate, **kwargs) - except Exception as e: - print(e) - - with open(infile, 'w', encoding="utf-8") as f: - f.write(json.dumps(results, indent='\t')) - - do_gc() - - modified = False - for basename in results: - try: - sanitized = whisper_sanitize(results[basename]) - if len(sanitized['segments']) > 0 and len(sanitized['segments']) != len(results[basename]['segments']): - results[basename] = sanitized - modified = True - print("Segments sanizited: ", basename) - except Exception as e: - print("Failed to sanitize:", basename, e) - pass - - if modified: - os.rename(infile, infile.replace(".json", ".unsanitized.json")) - with open(infile, 'w', encoding="utf-8") as f: - f.write(json.dumps(results, indent='\t')) - - return f"Processed dataset to: {indir}" - -def slice_waveform( waveform, sample_rate, start, end, trim ): - start = int(start * sample_rate) - end = int(end * sample_rate) - - if start < 0: - start = 0 - if end >= waveform.shape[-1]: - end = waveform.shape[-1] - 1 - - sliced = waveform[:, start:end] - - error = validate_waveform( sliced, sample_rate, min_only=True ) - if trim and not error: - sliced = torchaudio.functional.vad( sliced, sample_rate ) - - return sliced, error - -def slice_dataset( voice, trim_silence=True, start_offset=0, end_offset=0, results=None, progress=gr.Progress() ): - indir = f'./training/{voice}/' - infile = f'{indir}/whisper.json' - messages = [] - - if not os.path.exists(infile): - message = f"Missing dataset: {infile}" - print(message) - return message - - if results is None: - results = json.load(open(infile, 'r', encoding="utf-8")) - - TARGET_SAMPLE_RATE = 22050 - if args.tts_backend != "tortoise": - TARGET_SAMPLE_RATE = 24000 - if tts: - TARGET_SAMPLE_RATE = tts.input_sample_rate - - files = 0 - segments = 0 - for filename in results: - path = f'./voices/{voice}/{filename}' - extension = os.path.splitext(filename)[-1][1:] - out_extension = extension # "wav" - - if not os.path.exists(path): - path = f'./training/{voice}/{filename}' - - if not os.path.exists(path): - message = f"Missing source audio: {filename}" - print(message) - messages.append(message) - continue - - files += 1 - result = results[filename] - waveform, sample_rate = torchaudio.load(path) - num_channels, num_frames = waveform.shape - duration = num_frames / sample_rate - - for segment in result['segments']: - file = filename.replace(f".{extension}", f"_{pad(segment['id'], 4)}.{out_extension}") - - sliced, error = slice_waveform( waveform, sample_rate, segment['start'] + start_offset, segment['end'] + end_offset, trim_silence ) - if error: - message = f"{error}, skipping... {file}" - print(message) - messages.append(message) - continue - - sliced, _ = resample( sliced, sample_rate, TARGET_SAMPLE_RATE ) - - if waveform.shape[0] == 2: - waveform = waveform[:1] - - kwargs = {} - if file[-4:] == ".wav": - kwargs['encoding'] = "PCM_S" - kwargs['bits_per_sample'] = 16 - - torchaudio.save(f"{indir}/audio/{file}", sliced, TARGET_SAMPLE_RATE, **kwargs) - - segments +=1 - - messages.append(f"Sliced segments: {files} => {segments}.") - return "\n".join(messages) - -# takes an LJSpeech-dataset-formatted .txt file and phonemize it -def phonemize_txt_file( path ): - with open(path, 'r', encoding='utf-8') as f: - lines = f.readlines() - - reparsed = [] - with open(path.replace(".txt", ".phn.txt"), 'a', encoding='utf-8') as f: - for line in tqdm(lines, desc='Phonemizing...'): - split = line.split("|") - audio = split[0] - text = split[2] - - phonemes = phonemizer( text ) - reparsed.append(f'{audio}|{phonemes}') - f.write(f'\n{audio}|{phonemes}') - - - joined = "\n".join(reparsed) - with open(path.replace(".txt", ".phn.txt"), 'w', encoding='utf-8') as f: - f.write(joined) - - return joined - -# takes an LJSpeech-dataset-formatted .txt (and phonemized .phn.txt from the above) and creates a JSON that should slot in as whisper.json -def create_dataset_json( path ): - with open(path, 'r', encoding='utf-8') as f: - lines = f.readlines() - - phonemes = None - phn_path = path.replace(".txt", ".phn.txt") - if os.path.exists(phn_path): - with open(phn_path, 'r', encoding='utf-8') as f: - phonemes = f.readlines() - - data = {} - - for line in lines: - split = line.split("|") - audio = split[0] - text = split[1] - - data[audio] = { - 'text': text.strip() - } - - for line in phonemes: - split = line.split("|") - audio = split[0] - text = split[1] - - data[audio]['phonemes'] = text.strip() - - with open(path.replace(".txt", ".json"), 'w', encoding='utf-8') as f: - f.write(json.dumps(data, indent="\t")) - - -cached_backends = {} - -def phonemizer( text, language="en-us" ): - from phonemizer import phonemize - from phonemizer.backend import BACKENDS - - def _get_backend( language="en-us", backend="espeak" ): - key = f'{language}_{backend}' - if key in cached_backends: - return cached_backends[key] - - if backend == 'espeak': - phonemizer = BACKENDS[backend]( language, preserve_punctuation=True, with_stress=True) - elif backend == 'espeak-mbrola': - phonemizer = BACKENDS[backend]( language ) - else: - phonemizer = BACKENDS[backend]( language, preserve_punctuation=True ) - - cached_backends[key] = phonemizer - return phonemizer - if language == "en": - language = "en-us" - - backend = _get_backend(language=language, backend=args.phonemizer_backend) - if backend is not None: - tokens = backend.phonemize( [text], strip=True ) - else: - tokens = phonemize( [text], language=language, strip=True, preserve_punctuation=True, with_stress=True ) - - return tokens[0] if len(tokens) == 0 else tokens - tokenized = " ".join( tokens ) - -def should_phonemize(): - if args.tts_backend == "vall-e": - return False - - should = args.tokenizer_json is not None and args.tokenizer_json[-8:] == "ipa.json" - if should: - try: - from phonemizer import phonemize - except Exception as e: - return False - return should - -def prepare_dataset( voice, use_segments=False, text_length=0, audio_length=0, progress=gr.Progress() ): - indir = f'./training/{voice}/' - infile = f'{indir}/whisper.json' - if not os.path.exists(infile): - message = f"Missing dataset: {infile}" - print(message) - return message - - results = json.load(open(infile, 'r', encoding="utf-8")) - - errored = 0 - messages = [] - normalize = False # True - phonemize = should_phonemize() - lines = { 'training': [], 'validation': [] } - segments = {} - - quantize_in_memory = args.tts_backend == "vall-e" - - if args.tts_backend != "tortoise": - text_length = 0 - audio_length = 0 - - start_offset = -0.1 - end_offset = 0.1 - trim_silence = False - - TARGET_SAMPLE_RATE = 22050 - if args.tts_backend != "tortoise": - TARGET_SAMPLE_RATE = 24000 - if tts: - TARGET_SAMPLE_RATE = tts.input_sample_rate - - for filename in tqdm(results, desc="Parsing results"): - use_segment = use_segments - - extension = os.path.splitext(filename)[-1][1:] - out_extension = extension # "wav" - result = results[filename] - lang = result['language'] - language = LANGUAGES[lang] if lang in LANGUAGES else lang - normalizer = EnglishTextNormalizer() if language and language == "english" else BasicTextNormalizer() - - # check if unsegmented text exceeds 200 characters - if not use_segment: - if len(result['text']) > MAX_TRAINING_CHAR_LENGTH: - message = f"Text length too long ({MAX_TRAINING_CHAR_LENGTH} < {len(result['text'])}), using segments: {filename}" - print(message) - messages.append(message) - use_segment = True - - # check if unsegmented audio exceeds 11.6s - if not use_segment: - path = f'{indir}/audio/{filename}' - if not quantize_in_memory and not os.path.exists(path): - messages.append(f"Missing source audio: {filename}") - errored += 1 - continue - - duration = 0 - for segment in result['segments']: - duration = max(duration, segment['end']) - - if duration >= MAX_TRAINING_DURATION: - message = f"Audio too large, using segments: {filename}" - print(message) - messages.append(message) - use_segment = True - - # implicitly segment - if use_segment and not use_segments: - exists = True - for segment in result['segments']: - duration = segment['end'] - segment['start'] - if duration <= MIN_TRAINING_DURATION or MAX_TRAINING_DURATION <= duration: - continue - - path = f'{indir}/audio/' + filename.replace(f".{extension}", f"_{pad(segment['id'], 4)}.{out_extension}") - if os.path.exists(path): - continue - exists = False - break - - if not quantize_in_memory and not exists: - tmp = {} - tmp[filename] = result - print(f"Audio not segmented, segmenting: {filename}") - message = slice_dataset( voice, results=tmp ) - print(message) - messages = messages + message.split("\n") - - waveform = None - - - if quantize_in_memory: - path = f'{indir}/audio/{filename}' - if not os.path.exists(path): - path = f'./voices/{voice}/{filename}' - - if not os.path.exists(path): - message = f"Audio not found: {path}" - print(message) - messages.append(message) - #continue - else: - waveform = torchaudio.load(path) - waveform = resample(waveform[0], waveform[1], TARGET_SAMPLE_RATE) - - if not use_segment: - segments[filename] = { - 'text': result['text'], - 'lang': lang, - 'language': language, - 'normalizer': normalizer, - 'phonemes': result['phonemes'] if 'phonemes' in result else None - } - - if waveform: - segments[filename]['waveform'] = waveform - else: - for segment in result['segments']: - duration = segment['end'] - segment['start'] - if duration <= MIN_TRAINING_DURATION or MAX_TRAINING_DURATION <= duration: - continue - - file = filename.replace(f".{extension}", f"_{pad(segment['id'], 4)}.{out_extension}") - - segments[file] = { - 'text': segment['text'], - 'lang': lang, - 'language': language, - 'normalizer': normalizer, - 'phonemes': segment['phonemes'] if 'phonemes' in segment else None - } - - if waveform: - sliced, error = slice_waveform( waveform[0], waveform[1], segment['start'] + start_offset, segment['end'] + end_offset, trim_silence ) - if error: - message = f"{error}, skipping... {file}" - print(message) - messages.append(message) - segments[file]['error'] = error - #continue - else: - segments[file]['waveform'] = (sliced, waveform[1]) - - jobs = { - 'quantize': [[], []], - 'phonemize': [[], []], - } - - for file in tqdm(segments, desc="Parsing segments"): - extension = os.path.splitext(file)[-1][1:] - result = segments[file] - path = f'{indir}/audio/{file}' - - text = result['text'] - lang = result['lang'] - language = result['language'] - normalizer = result['normalizer'] - phonemes = result['phonemes'] - if phonemize and phonemes is None: - phonemes = phonemizer( text, language=lang ) - - normalized = normalizer(text) if normalize else text - - if len(text) > MAX_TRAINING_CHAR_LENGTH: - message = f"Text length too long ({MAX_TRAINING_CHAR_LENGTH} < {len(text)}), skipping... {file}" - print(message) - messages.append(message) - errored += 1 - continue - - # num_channels, num_frames = waveform.shape - #duration = num_frames / sample_rate - - - culled = len(text) < text_length - #if not culled and audio_length > 0: - # culled = duration < audio_length - - line = f'audio/{file}|{phonemes if phonemize and phonemes else text}' - - lines['training' if not culled else 'validation'].append(line) - - if culled or args.tts_backend != "vall-e": - continue - - os.makedirs(f'{indir}/valle/', exist_ok=True) - #os.makedirs(f'./training/valle/data/{voice}/', exist_ok=True) - - phn_file = f'{indir}/valle/{file.replace(f".{extension}",".phn.txt")}' - #phn_file = f'./training/valle/data/{voice}/{file.replace(f".{extension}",".phn.txt")}' - if not os.path.exists(phn_file): - jobs['phonemize'][0].append(phn_file) - jobs['phonemize'][1].append(normalized) - """ - phonemized = valle_phonemize( normalized ) - open(f'{indir}/valle/{file.replace(".wav",".phn.txt")}', 'w', encoding='utf-8').write(" ".join(phonemized)) - print("Phonemized:", file, normalized, text) - """ - - qnt_file = f'{indir}/valle/{file.replace(f".{extension}",".qnt.pt")}' - #qnt_file = f'./training/valle/data/{voice}/{file.replace(f".{extension}",".qnt.pt")}' - if 'error' not in result: - if not quantize_in_memory and not os.path.exists(path): - message = f"Missing segment, skipping... {file}" - print(message) - messages.append(message) - errored += 1 - continue - - if not os.path.exists(qnt_file): - waveform = None - if 'waveform' in result: - waveform, sample_rate = result['waveform'] - elif os.path.exists(path): - waveform, sample_rate = torchaudio.load(path) - error = validate_waveform( waveform, sample_rate ) - if error: - message = f"{error}, skipping... {file}" - print(message) - messages.append(message) - errored += 1 - continue - - if waveform is not None: - jobs['quantize'][0].append(qnt_file) - jobs['quantize'][1].append((waveform, sample_rate)) - """ - quantized = valle_quantize( waveform, sample_rate ).cpu() - torch.save(quantized, f'{indir}/valle/{file.replace(".wav",".qnt.pt")}') - print("Quantized:", file) - """ - - for i in tqdm(range(len(jobs['quantize'][0])), desc="Quantizing"): - qnt_file = jobs['quantize'][0][i] - waveform, sample_rate = jobs['quantize'][1][i] - - quantized = valle_quantize( waveform, sample_rate ).cpu() - torch.save(quantized, qnt_file) - #print("Quantized:", qnt_file) - - for i in tqdm(range(len(jobs['phonemize'][0])), desc="Phonemizing"): - phn_file = jobs['phonemize'][0][i] - normalized = jobs['phonemize'][1][i] - - try: - phonemized = valle_phonemize( normalized ) - open(phn_file, 'w', encoding='utf-8').write(" ".join(phonemized)) - #print("Phonemized:", phn_file) - except Exception as e: - message = f"Failed to phonemize: {phn_file}: {normalized}" - messages.append(message) - print(message) - - - training_joined = "\n".join(lines['training']) - validation_joined = "\n".join(lines['validation']) - - with open(f'{indir}/train.txt', 'w', encoding="utf-8") as f: - f.write(training_joined) - - with open(f'{indir}/validation.txt', 'w', encoding="utf-8") as f: - f.write(validation_joined) - - messages.append(f"Prepared {len(lines['training'])} lines (validation: {len(lines['validation'])}, culled: {errored}).\n{training_joined}\n\n{validation_joined}") - return "\n".join(messages) - -def calc_iterations( epochs, lines, batch_size ): - return int(math.ceil(epochs * math.ceil(lines / batch_size))) - -def schedule_learning_rate( iterations, schedule=LEARNING_RATE_SCHEDULE ): - return [int(iterations * d) for d in schedule] - -def optimize_training_settings( **kwargs ): - messages = [] - settings = {} - settings.update(kwargs) - - dataset_path = f"./training/{settings['voice']}/train.txt" - with open(dataset_path, 'r', encoding="utf-8") as f: - lines = len(f.readlines()) - - if lines == 0: - raise Exception("Empty dataset.") - - if settings['batch_size'] > lines: - settings['batch_size'] = lines - messages.append(f"Batch size is larger than your dataset, clamping batch size to: {settings['batch_size']}") - - """ - if lines % settings['batch_size'] != 0: - settings['batch_size'] = int(lines / settings['batch_size']) - if settings['batch_size'] == 0: - settings['batch_size'] = 1 - messages.append(f"Batch size not neatly divisible by dataset size, adjusting batch size to: {settings['batch_size']}") - """ - if settings['gradient_accumulation_size'] == 0: - settings['gradient_accumulation_size'] = 1 - - if settings['batch_size'] / settings['gradient_accumulation_size'] < 2: - settings['gradient_accumulation_size'] = int(settings['batch_size'] / 2) - if settings['gradient_accumulation_size'] == 0: - settings['gradient_accumulation_size'] = 1 - - messages.append(f"Gradient accumulation size is too large for a given batch size, clamping gradient accumulation size to: {settings['gradient_accumulation_size']}") - elif settings['batch_size'] % settings['gradient_accumulation_size'] != 0: - settings['gradient_accumulation_size'] -= settings['batch_size'] % settings['gradient_accumulation_size'] - if settings['gradient_accumulation_size'] == 0: - settings['gradient_accumulation_size'] = 1 - - messages.append(f"Batch size is not evenly divisible by the gradient accumulation size, adjusting gradient accumulation size to: {settings['gradient_accumulation_size']}") - - if settings['batch_size'] % settings['gpus'] != 0: - settings['batch_size'] -= settings['batch_size'] % settings['gpus'] - if settings['batch_size'] == 0: - settings['batch_size'] = 1 - messages.append(f"Batch size not neatly divisible by GPU count, adjusting batch size to: {settings['batch_size']}") - - - def get_device_batch_size( vram ): - DEVICE_BATCH_SIZE_MAP = [ - (70, 128), # based on an A100-80G, I can safely get a ratio of 4096:32 = 128 - (32, 64), # based on my two 6800XTs, I can only really safely get a ratio of 128:2 = 64 - (16, 8), # based on an A4000, I can do a ratio of 512:64 = 8:1 - (8, 4), # interpolated - (6, 2), # based on my 2060, it only really lets me have a batch ratio of 2:1 - ] - for k, v in DEVICE_BATCH_SIZE_MAP: - if vram > (k-1): - return v - return 1 - - if settings['gpus'] > get_device_count(): - settings['gpus'] = get_device_count() - messages.append(f"GPU count exceeds defacto GPU count, clamping to: {settings['gpus']}") - - if settings['gpus'] <= 1: - settings['gpus'] = 1 - else: - messages.append(f"! EXPERIMENTAL ! Multi-GPU training is extremely particular, expect issues.") - - # assuming you have equal GPUs - vram = get_device_vram() * settings['gpus'] - batch_ratio = int(settings['batch_size'] / settings['gradient_accumulation_size']) - batch_cap = get_device_batch_size(vram) - - if batch_ratio > batch_cap: - settings['gradient_accumulation_size'] = int(settings['batch_size'] / batch_cap) - messages.append(f"Batch ratio ({batch_ratio}) is expected to exceed your VRAM capacity ({'{:.3f}'.format(vram)}GB, suggested {batch_cap} batch size cap), adjusting gradient accumulation size to: {settings['gradient_accumulation_size']}") - - iterations = calc_iterations(epochs=settings['epochs'], lines=lines, batch_size=settings['batch_size']) - - if settings['epochs'] < settings['save_rate']: - settings['save_rate'] = settings['epochs'] - messages.append(f"Save rate is too small for the given iteration step, clamping save rate to: {settings['save_rate']}") - - if settings['epochs'] < settings['validation_rate']: - settings['validation_rate'] = settings['epochs'] - messages.append(f"Validation rate is too small for the given iteration step, clamping validation rate to: {settings['validation_rate']}") - - if settings['resume_state'] and not os.path.exists(settings['resume_state']): - settings['resume_state'] = None - messages.append("Resume path specified, but does not exist. Disabling...") - - if settings['bitsandbytes']: - messages.append("! EXPERIMENTAL ! BitsAndBytes requested.") - - if settings['half_p']: - if settings['bitsandbytes']: - settings['half_p'] = False - messages.append("Half Precision requested, but BitsAndBytes is also requested. Due to redundancies, disabling half precision...") - else: - messages.append("! EXPERIMENTAL ! Half Precision requested.") - if not os.path.exists(get_halfp_model_path()): - convert_to_halfp() - - steps = int(iterations / settings['epochs']) - - messages.append(f"For {settings['epochs']} epochs with {lines} lines in batches of {settings['batch_size']}, iterating for {iterations} steps ({steps}) steps per epoch)") - - return settings, messages - -def save_training_settings( **kwargs ): - messages = [] - settings = {} - settings.update(kwargs) - - - outjson = f'./training/{settings["voice"]}/train.json' - with open(outjson, 'w', encoding="utf-8") as f: - f.write(json.dumps(settings, indent='\t') ) - - settings['dataset_path'] = f"./training/{settings['voice']}/train.txt" - settings['validation_path'] = f"./training/{settings['voice']}/validation.txt" - - with open(settings['dataset_path'], 'r', encoding="utf-8") as f: - lines = len(f.readlines()) - - settings['iterations'] = calc_iterations(epochs=settings['epochs'], lines=lines, batch_size=settings['batch_size']) - - if not settings['source_model'] or settings['source_model'] == "auto": - settings['source_model'] = f"./models/tortoise/autoregressive{'_half' if settings['half_p'] else ''}.pth" - - if settings['half_p']: - if not os.path.exists(get_halfp_model_path()): - convert_to_halfp() - - messages.append(f"For {settings['epochs']} epochs with {lines} lines, iterating for {settings['iterations']} steps") - - iterations_per_epoch = settings['iterations'] / settings['epochs'] - - settings['save_rate'] = int(settings['save_rate'] * iterations_per_epoch) - settings['validation_rate'] = int(settings['validation_rate'] * iterations_per_epoch) - - iterations_per_epoch = int(iterations_per_epoch) - - if settings['save_rate'] < 1: - settings['save_rate'] = 1 - """ - if settings['validation_rate'] < 1: - settings['validation_rate'] = 1 - """ - """ - if settings['iterations'] % settings['save_rate'] != 0: - adjustment = int(settings['iterations'] / settings['save_rate']) * settings['save_rate'] - messages.append(f"Iteration rate is not evenly divisible by save rate, adjusting: {settings['iterations']} => {adjustment}") - settings['iterations'] = adjustment - """ - - settings['validation_batch_size'] = int(settings['batch_size'] / settings['gradient_accumulation_size']) - if not os.path.exists(settings['validation_path']): - settings['validation_enabled'] = False - messages.append("Validation not found, disabling validation...") - elif settings['validation_batch_size'] == 0: - settings['validation_enabled'] = False - messages.append("Validation batch size == 0, disabling validation...") - else: - with open(settings['validation_path'], 'r', encoding="utf-8") as f: - validation_lines = len(f.readlines()) - - if validation_lines < settings['validation_batch_size']: - settings['validation_batch_size'] = validation_lines - messages.append(f"Batch size exceeds validation dataset size, clamping validation batch size to {validation_lines}") - - settings['tokenizer_json'] = args.tokenizer_json if args.tokenizer_json else get_tokenizer_jsons()[0] - - if settings['gpus'] > get_device_count(): - settings['gpus'] = get_device_count() - - # what an utter mistake this was - settings['optimizer'] = 'adamw' # if settings['gpus'] == 1 else 'adamw_zero' - - if 'learning_rate_scheme' not in settings or settings['learning_rate_scheme'] not in LEARNING_RATE_SCHEMES: - settings['learning_rate_scheme'] = "Multistep" - - settings['learning_rate_scheme'] = LEARNING_RATE_SCHEMES[settings['learning_rate_scheme']] - - learning_rate_schema = [f"default_lr_scheme: {settings['learning_rate_scheme']}"] - if settings['learning_rate_scheme'] == "MultiStepLR": - if not settings['learning_rate_schedule']: - settings['learning_rate_schedule'] = LEARNING_RATE_SCHEDULE - elif isinstance(settings['learning_rate_schedule'],str): - settings['learning_rate_schedule'] = json.loads(settings['learning_rate_schedule']) - - settings['learning_rate_schedule'] = schedule_learning_rate( iterations_per_epoch, settings['learning_rate_schedule'] ) - - learning_rate_schema.append(f" gen_lr_steps: {settings['learning_rate_schedule']}") - learning_rate_schema.append(f" lr_gamma: 0.5") - elif settings['learning_rate_scheme'] == "CosineAnnealingLR_Restart": - epochs = settings['epochs'] - restarts = settings['learning_rate_restarts'] - restart_period = int(epochs / restarts) - - if 'learning_rate_warmup' not in settings: - settings['learning_rate_warmup'] = 0 - if 'learning_rate_min' not in settings: - settings['learning_rate_min'] = 1e-08 - - if 'learning_rate_period' not in settings: - settings['learning_rate_period'] = [ iterations_per_epoch * restart_period for x in range(epochs) ] - - settings['learning_rate_restarts'] = [ iterations_per_epoch * (x+1) * restart_period for x in range(restarts) ] # [52, 104, 156, 208] - - if 'learning_rate_restart_weights' not in settings: - settings['learning_rate_restart_weights'] = [ ( restarts - x - 1 ) / restarts for x in range(restarts) ] # [.75, .5, .25, .125] - settings['learning_rate_restart_weights'][-1] = settings['learning_rate_restart_weights'][-2] * 0.5 - - learning_rate_schema.append(f" T_period: {settings['learning_rate_period']}") - learning_rate_schema.append(f" warmup: {settings['learning_rate_warmup']}") - learning_rate_schema.append(f" eta_min: !!float {settings['learning_rate_min']}") - learning_rate_schema.append(f" restarts: {settings['learning_rate_restarts']}") - learning_rate_schema.append(f" restart_weights: {settings['learning_rate_restart_weights']}") - settings['learning_rate_scheme'] = "\n".join(learning_rate_schema) - - if settings['resume_state']: - settings['source_model'] = f"# pretrain_model_gpt: '{settings['source_model']}'" - settings['resume_state'] = f"resume_state: '{settings['resume_state']}'" - else: - settings['source_model'] = f"pretrain_model_gpt: '{settings['source_model']}'" - settings['resume_state'] = f"# resume_state: '{settings['resume_state']}'" - - def use_template(template, out): - with open(template, 'r', encoding="utf-8") as f: - yaml = f.read() - - # i could just load and edit the YAML directly, but this is easier, as I don't need to bother with path traversals - for k in settings: - if settings[k] is None: - continue - yaml = yaml.replace(f"${{{k}}}", str(settings[k])) - - with open(out, 'w', encoding="utf-8") as f: - f.write(yaml) - - if args.tts_backend == "tortoise": - use_template(f'./models/.template.dlas.yaml', f'./training/{settings["voice"]}/train.yaml') - elif args.tts_backend == "vall-e": - settings['model_name'] = "[ 'ar-quarter', 'nar-quarter' ]" - use_template(f'./models/.template.valle.yaml', f'./training/{settings["voice"]}/config.yaml') - - messages.append(f"Saved training output") - return settings, messages - -def import_voices(files, saveAs=None, progress=None): - global args - - if not isinstance(files, list): - files = [files] - - for file in tqdm(files, desc="Importing voice files"): - j, latents = read_generate_settings(file, read_latents=True) - - if j is not None and saveAs is None: - saveAs = j['voice'] - if saveAs is None or saveAs == "": - raise Exception("Specify a voice name") - - outdir = f'{get_voice_dir()}/{saveAs}/' - os.makedirs(outdir, exist_ok=True) - - if latents: - print(f"Importing latents to {latents}") - with open(f'{outdir}/cond_latents.pth', 'wb') as f: - f.write(latents) - latents = f'{outdir}/cond_latents.pth' - print(f"Imported latents to {latents}") - else: - filename = file.name - if filename[-4:] != ".wav": - raise Exception("Please convert to a WAV first") - - path = f"{outdir}/{os.path.basename(filename)}" - print(f"Importing voice to {path}") - - waveform, sample_rate = torchaudio.load(filename) - - if args.voice_fixer: - if not voicefixer: - load_voicefixer() - - waveform, sample_rate = resample(waveform, sample_rate, 44100) - torchaudio.save(path, waveform, sample_rate) - - print(f"Running 'voicefixer' on voice sample: {path}") - voicefixer.restore( - input = path, - output = path, - cuda=get_device_name() == "cuda" and args.voice_fixer_use_cuda, - #mode=mode, - ) - else: - torchaudio.save(path, waveform, sample_rate) - - print(f"Imported voice to {path}") - -def relative_paths( dirs ): - return [ './' + os.path.relpath( d ).replace("\\", "/") for d in dirs ] - -def get_voice( name, dir=get_voice_dir(), load_latents=True, extensions=["wav", "mp3", "flac"] ): - subj = f'{dir}/{name}/' - if not os.path.isdir(subj): - return - files = os.listdir(subj) - - if load_latents: - extensions.append("pth") - - voice = [] - for file in files: - ext = os.path.splitext(file)[-1][1:] - if ext not in extensions: - continue - - voice.append(f'{subj}/{file}') - - return sorted( voice ) - -def get_voice_list(dir=get_voice_dir(), append_defaults=False, extensions=["wav", "mp3", "flac", "pth"]): - defaults = [ "random", "microphone" ] - os.makedirs(dir, exist_ok=True) - #res = sorted([d for d in os.listdir(dir) if d not in defaults and os.path.isdir(os.path.join(dir, d)) and len(os.listdir(os.path.join(dir, d))) > 0 ]) - - res = [] - for name in os.listdir(dir): - if name in defaults: - continue - if not os.path.isdir(f'{dir}/{name}'): - continue - if len(os.listdir(os.path.join(dir, name))) == 0: - continue - files = get_voice( name, dir=dir, extensions=extensions ) - - if len(files) > 0: - res.append(name) - else: - for subdir in os.listdir(f'{dir}/{name}'): - if not os.path.isdir(f'{dir}/{name}/{subdir}'): - continue - files = get_voice( f'{name}/{subdir}', dir=dir, extensions=extensions ) - if len(files) == 0: - continue - res.append(f'{name}/{subdir}') - - res = sorted(res) - - if append_defaults: - res = res + defaults - - return res - -def get_valle_models(dir="./training/"): - return [ f'{dir}/{d}/config.yaml' for d in os.listdir(dir) if os.path.exists(f'{dir}/{d}/config.yaml') ] - -def get_autoregressive_models(dir="./models/finetunes/", prefixed=False, auto=False): - os.makedirs(dir, exist_ok=True) - base = [get_model_path('autoregressive.pth')] - halfp = get_halfp_model_path() - if os.path.exists(halfp): - base.append(halfp) - - additionals = sorted([f'{dir}/{d}' for d in os.listdir(dir) if d[-4:] == ".pth" ]) - found = [] - for training in os.listdir(f'./training/'): - if not os.path.isdir(f'./training/{training}/') or not os.path.isdir(f'./training/{training}/finetune/') or not os.path.isdir(f'./training/{training}/finetune/models/'): - continue - models = sorted([ int(d[:-8]) for d in os.listdir(f'./training/{training}/finetune/models/') if d[-8:] == "_gpt.pth" ]) - found = found + [ f'./training/{training}/finetune/models/{d}_gpt.pth' for d in models ] - - res = base + additionals + found - - if prefixed: - for i in range(len(res)): - path = res[i] - hash = hash_file(path) - shorthash = hash[:8] - - res[i] = f'[{shorthash}] {path}' - - paths = relative_paths(res) - if auto: - paths = ["auto"] + paths - - return paths - -def get_diffusion_models(dir="./models/finetunes/", prefixed=False): - return relative_paths([ get_model_path('diffusion_decoder.pth') ]) - -def get_tokenizer_jsons( dir="./models/tokenizers/" ): - additionals = sorted([ f'{dir}/{d}' for d in os.listdir(dir) if d[-5:] == ".json" ]) if os.path.isdir(dir) else [] - return relative_paths([ "./modules/tortoise-tts/tortoise/data/tokenizer.json" ] + additionals) - -def tokenize_text( text, config=None, stringed=True, skip_specials=False ): - from tortoise.utils.tokenizer import VoiceBpeTokenizer - - if not config: - config = args.tokenizer_json if args.tokenizer_json else get_tokenizer_jsons()[0] - - if not tts: - tokenizer = VoiceBpeTokenizer(config) - else: - tokenizer = tts.tokenizer - - encoded = tokenizer.encode(text) - decoded = tokenizer.tokenizer.decode(encoded, skip_special_tokens=skip_specials).split(" ") - - if stringed: - return "\n".join([ str(encoded), str(decoded) ]) - - return decoded - -def get_dataset_list(dir="./training/"): - return sorted([d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d)) and "train.txt" in os.listdir(os.path.join(dir, d)) ]) - -def get_training_list(dir="./training/"): - if args.tts_backend == "tortoise": - return sorted([f'./training/{d}/train.yaml' for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d)) and "train.yaml" in os.listdir(os.path.join(dir, d)) ]) - else: - return sorted([f'./training/{d}/config.yaml' for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d)) and "config.yaml" in os.listdir(os.path.join(dir, d)) ]) - -def pad(num, zeroes): - return str(num).zfill(zeroes+1) - -def curl(url): - try: - req = urllib.request.Request(url, headers={'User-Agent': 'Python'}) - conn = urllib.request.urlopen(req) - data = conn.read() - data = data.decode() - data = json.loads(data) - conn.close() - return data - except Exception as e: - print(e) - return None - -def check_for_updates( dir = None ): - if dir is None: - check_for_updates("./.git/") - check_for_updates("./.git/modules/dlas/") - check_for_updates("./.git/modules/tortoise-tts/") - return - - git_dir = dir - if not os.path.isfile(f'{git_dir}/FETCH_HEAD'): - print(f"Cannot check for updates for {dir}: not from a git repo") - return False - - with open(f'{git_dir}/FETCH_HEAD', 'r', encoding="utf-8") as f: - head = f.read() - - match = re.findall(r"^([a-f0-9]+).+?https:\/\/(.+?)\/(.+?)\/(.+?)\n", head) - if match is None or len(match) == 0: - print(f"Cannot check for updates for {dir}: cannot parse FETCH_HEAD") - return False - - match = match[0] - - local = match[0] - host = match[1] - owner = match[2] - repo = match[3] - - res = curl(f"https://{host}/api/v1/repos/{owner}/{repo}/branches/") #this only works for gitea instances - - if res is None or len(res) == 0: - print(f"Cannot check for updates for {dir}: cannot fetch from remote") - return False - - remote = res[0]["commit"]["id"] - - if remote != local: - print(f"New version found for {dir}: {local[:8]} => {remote[:8]}") - return True - - return False - -def notify_progress(message, progress=None, verbose=True): - if verbose: - print(message) - - if progress is None: - tqdm.write(message) - else: - progress(0, desc=message) - -def get_args(): - global args - return args - -def setup_args(cli=False): - global args - - default_arguments = { - 'share': False, - 'listen': None, - 'check-for-updates': False, - 'models-from-local-only': False, - 'low-vram': False, - 'sample-batch-size': None, - 'unsqueeze-sample-batches': False, - 'embed-output-metadata': True, - 'latents-lean-and-mean': True, - 'voice-fixer': False, # getting tired of long initialization times in a Colab for downloading a large dataset for it - 'voice-fixer-use-cuda': True, - - - 'force-cpu-for-conditioning-latents': False, - 'defer-tts-load': False, - 'device-override': None, - 'prune-nonfinal-outputs': True, - 'concurrency-count': 2, - 'autocalculate-voice-chunk-duration-size': 10, - - 'output-sample-rate': 44100, - 'output-volume': 1, - 'results-folder': "./results/", - - 'hf-token': None, - 'tts-backend': TTSES[0], - - 'autoregressive-model': None, - 'diffusion-model': None, - 'vocoder-model': VOCODERS[-1], - 'tokenizer-json': None, - - 'phonemizer-backend': 'espeak', - - 'valle-model': None, - - 'whisper-backend': 'openai/whisper', - 'whisper-model': "base", - 'whisper-batchsize': 1, - - 'training-default-halfp': False, - 'training-default-bnb': True, - - 'websocket-listen-address': "127.0.0.1", - 'websocket-listen-port': 8069, - 'websocket-enabled': False - } - - if os.path.isfile('./config/exec.json'): - with open(f'./config/exec.json', 'r', encoding="utf-8") as f: - try: - overrides = json.load(f) - for k in overrides: - default_arguments[k] = overrides[k] - except Exception as e: - print(e) - pass - - parser = argparse.ArgumentParser(allow_abbrev=not cli) - parser.add_argument("--share", action='store_true', default=default_arguments['share'], help="Lets Gradio return a public URL to use anywhere") - parser.add_argument("--listen", default=default_arguments['listen'], help="Path for Gradio to listen on") - parser.add_argument("--check-for-updates", action='store_true', default=default_arguments['check-for-updates'], help="Checks for update on startup") - parser.add_argument("--models-from-local-only", action='store_true', default=default_arguments['models-from-local-only'], help="Only loads models from disk, does not check for updates for models") - parser.add_argument("--low-vram", action='store_true', default=default_arguments['low-vram'], help="Disables some optimizations that increases VRAM usage") - parser.add_argument("--no-embed-output-metadata", action='store_false', default=not default_arguments['embed-output-metadata'], help="Disables embedding output metadata into resulting WAV files for easily fetching its settings used with the web UI (data is stored in the lyrics metadata tag)") - parser.add_argument("--latents-lean-and-mean", action='store_true', default=default_arguments['latents-lean-and-mean'], help="Exports the bare essentials for latents.") - parser.add_argument("--voice-fixer", action='store_true', default=default_arguments['voice-fixer'], help="Uses python module 'voicefixer' to improve audio quality, if available.") - parser.add_argument("--voice-fixer-use-cuda", action='store_true', default=default_arguments['voice-fixer-use-cuda'], help="Hints to voicefixer to use CUDA, if available.") - parser.add_argument("--force-cpu-for-conditioning-latents", default=default_arguments['force-cpu-for-conditioning-latents'], action='store_true', help="Forces computing conditional latents to be done on the CPU (if you constantyl OOM on low chunk counts)") - parser.add_argument("--defer-tts-load", default=default_arguments['defer-tts-load'], action='store_true', help="Defers loading TTS model") - parser.add_argument("--prune-nonfinal-outputs", default=default_arguments['prune-nonfinal-outputs'], action='store_true', help="Deletes non-final output files on completing a generation") - parser.add_argument("--device-override", default=default_arguments['device-override'], help="A device string to override pass through Torch") - parser.add_argument("--sample-batch-size", default=default_arguments['sample-batch-size'], type=int, help="Sets how many batches to use during the autoregressive samples pass") - parser.add_argument("--unsqueeze-sample-batches", default=default_arguments['unsqueeze-sample-batches'], action='store_true', help="Unsqueezes sample batches to process one by one after sampling") - parser.add_argument("--concurrency-count", type=int, default=default_arguments['concurrency-count'], help="How many Gradio events to process at once") - parser.add_argument("--autocalculate-voice-chunk-duration-size", type=float, default=default_arguments['autocalculate-voice-chunk-duration-size'], help="Number of seconds to suggest voice chunk size for (for example, 100 seconds of audio at 10 seconds per chunk will suggest 10 chunks)") - parser.add_argument("--output-sample-rate", type=int, default=default_arguments['output-sample-rate'], help="Sample rate to resample the output to (from 24KHz)") - parser.add_argument("--output-volume", type=float, default=default_arguments['output-volume'], help="Adjusts volume of output") - parser.add_argument("--results-folder", type=str, default=default_arguments['results-folder'], help="Sets output directory") - - parser.add_argument("--hf-token", type=str, default=default_arguments['hf-token'], help="HuggingFace Token") - parser.add_argument("--tts-backend", default=default_arguments['tts-backend'], help="Specifies which TTS backend to use.") - - parser.add_argument("--autoregressive-model", default=default_arguments['autoregressive-model'], help="Specifies which autoregressive model to use for sampling.") - parser.add_argument("--diffusion-model", default=default_arguments['diffusion-model'], help="Specifies which diffusion model to use for sampling.") - parser.add_argument("--vocoder-model", default=default_arguments['vocoder-model'], action='store_true', help="Specifies with vocoder to use") - parser.add_argument("--tokenizer-json", default=default_arguments['tokenizer-json'], help="Specifies which tokenizer json to use for tokenizing.") - - parser.add_argument("--phonemizer-backend", default=default_arguments['phonemizer-backend'], help="Specifies which phonemizer backend to use.") - - parser.add_argument("--valle-model", default=default_arguments['valle-model'], help="Specifies which VALL-E model to use for sampling.") - - parser.add_argument("--whisper-backend", default=default_arguments['whisper-backend'], action='store_true', help="Picks which whisper backend to use (openai/whisper, lightmare/whispercpp)") - parser.add_argument("--whisper-model", default=default_arguments['whisper-model'], help="Specifies which whisper model to use for transcription.") - parser.add_argument("--whisper-batchsize", type=int, default=default_arguments['whisper-batchsize'], help="Specifies batch size for WhisperX") - - parser.add_argument("--training-default-halfp", action='store_true', default=default_arguments['training-default-halfp'], help="Training default: halfp") - parser.add_argument("--training-default-bnb", action='store_true', default=default_arguments['training-default-bnb'], help="Training default: bnb") - - parser.add_argument("--websocket-listen-port", type=int, default=default_arguments['websocket-listen-port'], help="Websocket server listen port, default: 8069") - parser.add_argument("--websocket-listen-address", default=default_arguments['websocket-listen-address'], help="Websocket server listen address, default: 127.0.0.1") - parser.add_argument("--websocket-enabled", action='store_true', default=default_arguments['websocket-enabled'], help="Websocket API server enabled, default: false") - - if cli: - args, unknown = parser.parse_known_args() - else: - args = parser.parse_args() - - args.embed_output_metadata = not args.no_embed_output_metadata - - if not args.device_override: - set_device_name(args.device_override) - - if args.sample_batch_size == 0 and get_device_batch_size() == 1: - print("!WARNING! Automatically deduced sample batch size returned 1.") - - args.listen_host = None - args.listen_port = None - args.listen_path = None - if args.listen: - try: - match = re.findall(r"^(?:(.+?):(\d+))?(\/.*?)?$", args.listen)[0] - - args.listen_host = match[0] if match[0] != "" else "127.0.0.1" - args.listen_port = match[1] if match[1] != "" else None - args.listen_path = match[2] if match[2] != "" else "/" - except Exception as e: - pass - - if args.listen_port is not None: - args.listen_port = int(args.listen_port) - if args.listen_port == 0: - args.listen_port = None - - return args - -def get_default_settings( hypenated=True ): - settings = { - 'listen': None if not args.listen else args.listen, - 'share': args.share, - 'low-vram':args.low_vram, - 'check-for-updates':args.check_for_updates, - 'models-from-local-only':args.models_from_local_only, - 'force-cpu-for-conditioning-latents': args.force_cpu_for_conditioning_latents, - 'defer-tts-load': args.defer_tts_load, - 'prune-nonfinal-outputs': args.prune_nonfinal_outputs, - 'device-override': args.device_override, - 'sample-batch-size': args.sample_batch_size, - 'unsqueeze-sample-batches': args.unsqueeze_sample_batches, - 'embed-output-metadata': args.embed_output_metadata, - 'latents-lean-and-mean': args.latents_lean_and_mean, - 'voice-fixer': args.voice_fixer, - 'voice-fixer-use-cuda': args.voice_fixer_use_cuda, - 'concurrency-count': args.concurrency_count, - 'output-sample-rate': args.output_sample_rate, - 'autocalculate-voice-chunk-duration-size': args.autocalculate_voice_chunk_duration_size, - 'output-volume': args.output_volume, - 'results-folder': args.results_folder, - - 'hf-token': args.hf_token, - 'tts-backend': args.tts_backend, - - 'autoregressive-model': args.autoregressive_model, - 'diffusion-model': args.diffusion_model, - 'vocoder-model': args.vocoder_model, - 'tokenizer-json': args.tokenizer_json, - - 'phonemizer-backend': args.phonemizer_backend, - - 'valle-model': args.valle_model, - - 'whisper-backend': args.whisper_backend, - 'whisper-model': args.whisper_model, - 'whisper-batchsize': args.whisper_batchsize, - - 'training-default-halfp': args.training_default_halfp, - 'training-default-bnb': args.training_default_bnb, - } - - res = {} - for k in settings: - res[k.replace("-", "_") if not hypenated else k] = settings[k] - return res - -def update_args( **kwargs ): - global args - - settings = get_default_settings(hypenated=False) - settings.update(kwargs) - - args.listen = settings['listen'] - args.share = settings['share'] - args.check_for_updates = settings['check_for_updates'] - args.models_from_local_only = settings['models_from_local_only'] - args.low_vram = settings['low_vram'] - args.force_cpu_for_conditioning_latents = settings['force_cpu_for_conditioning_latents'] - args.defer_tts_load = settings['defer_tts_load'] - args.prune_nonfinal_outputs = settings['prune_nonfinal_outputs'] - args.device_override = settings['device_override'] - args.sample_batch_size = settings['sample_batch_size'] - args.unsqueeze_sample_batches = settings['unsqueeze_sample_batches'] - args.embed_output_metadata = settings['embed_output_metadata'] - args.latents_lean_and_mean = settings['latents_lean_and_mean'] - args.voice_fixer = settings['voice_fixer'] - args.voice_fixer_use_cuda = settings['voice_fixer_use_cuda'] - args.concurrency_count = settings['concurrency_count'] - args.output_sample_rate = 44000 - args.autocalculate_voice_chunk_duration_size = settings['autocalculate_voice_chunk_duration_size'] - args.output_volume = settings['output_volume'] - args.results_folder = settings['results_folder'] - - args.hf_token = settings['hf_token'] - args.tts_backend = settings['tts_backend'] - - args.autoregressive_model = settings['autoregressive_model'] - args.diffusion_model = settings['diffusion_model'] - args.vocoder_model = settings['vocoder_model'] - args.tokenizer_json = settings['tokenizer_json'] - - args.phonemizer_backend = settings['phonemizer_backend'] - - args.valle_model = settings['valle_model'] - - args.whisper_backend = settings['whisper_backend'] - args.whisper_model = settings['whisper_model'] - args.whisper_batchsize = settings['whisper_batchsize'] - - args.training_default_halfp = settings['training_default_halfp'] - args.training_default_bnb = settings['training_default_bnb'] - - save_args_settings() - -def save_args_settings(): - global args - settings = get_default_settings() - - os.makedirs('./config/', exist_ok=True) - with open(f'./config/exec.json', 'w', encoding="utf-8") as f: - f.write(json.dumps(settings, indent='\t') ) - -# super kludgy )`; -def import_generate_settings(file = None): - if not file: - file = "./config/generate.json" - - res = { - 'text': None, - 'delimiter': None, - 'emotion': None, - 'prompt': None, - 'voice': "random", - 'mic_audio': None, - 'voice_latents_chunks': None, - 'candidates': None, - 'seed': None, - 'num_autoregressive_samples': 16, - 'diffusion_iterations': 30, - 'temperature': 0.8, - 'diffusion_sampler': "DDIM", - 'breathing_room': 8 , - 'cvvp_weight': 0.0, - 'top_p': 0.8, - 'diffusion_temperature': 1.0, - 'length_penalty': 1.0, - 'repetition_penalty': 2.0, - 'cond_free_k': 2.0, - 'experimentals': None, - } - - settings, _ = read_generate_settings(file, read_latents=False) - - if settings is not None: - res.update(settings) - - return res - -def reset_generate_settings(): - with open(f'./config/generate.json', 'w', encoding="utf-8") as f: - f.write(json.dumps({}, indent='\t') ) - return import_generate_settings() - -def read_generate_settings(file, read_latents=True): - j = None - latents = None - - if isinstance(file, list) and len(file) == 1: - file = file[0] - - try: - if file is not None: - if hasattr(file, 'name'): - file = file.name - - if file[-4:] == ".wav": - metadata = music_tag.load_file(file) - if 'lyrics' in metadata: - j = json.loads(str(metadata['lyrics'])) - elif file[-5:] == ".json": - with open(file, 'r') as f: - j = json.load(f) - except Exception as e: - pass - - if j is not None: - if 'latents' in j: - if read_latents: - latents = base64.b64decode(j['latents']) - del j['latents'] - - - if "time" in j: - j["time"] = "{:.3f}".format(j["time"]) - - - - return ( - j, - latents, - ) - -def version_check_tts( min_version ): - global tts - if not tts: - raise Exception("TTS is not initialized") - - if not hasattr(tts, 'version'): - return False - - if min_version[0] > tts.version[0]: - return True - if min_version[1] > tts.version[1]: - return True - if min_version[2] >= tts.version[2]: - return True - return False - -def load_tts( restart=False, - # TorToiSe configs - autoregressive_model=None, diffusion_model=None, vocoder_model=None, tokenizer_json=None, - # VALL-E configs - valle_model=None, -): - global args - global tts - - if restart: - unload_tts() - - tts_loading = True - if args.tts_backend == "tortoise": - if autoregressive_model: - args.autoregressive_model = autoregressive_model - else: - autoregressive_model = args.autoregressive_model - - if autoregressive_model == "auto": - autoregressive_model = deduce_autoregressive_model() - - if diffusion_model: - args.diffusion_model = diffusion_model - else: - diffusion_model = args.diffusion_model - - if vocoder_model: - args.vocoder_model = vocoder_model - else: - vocoder_model = args.vocoder_model - - if tokenizer_json: - args.tokenizer_json = tokenizer_json - else: - tokenizer_json = args.tokenizer_json - - if get_device_name() == "cpu": - print("!!!! WARNING !!!! No GPU available in PyTorch. You may need to reinstall PyTorch.") - - print(f"Loading TorToiSe... (AR: {autoregressive_model}, diffusion: {diffusion_model}, vocoder: {vocoder_model})") - tts = TorToise_TTS(minor_optimizations=not args.low_vram, autoregressive_model_path=autoregressive_model, diffusion_model_path=diffusion_model, vocoder_model=vocoder_model, tokenizer_json=tokenizer_json, unsqueeze_sample_batches=args.unsqueeze_sample_batches) - elif args.tts_backend == "vall-e": - if valle_model: - args.valle_model = valle_model - else: - valle_model = args.valle_model - - print(f"Loading VALL-E... (Config: {valle_model})") - tts = VALLE_TTS(config=args.valle_model) - elif args.tts_backend == "bark": - - print(f"Loading Bark...") - tts = Bark_TTS(small=args.low_vram) - - print("Loaded TTS, ready for generation.") - tts_loading = False - return tts - -def unload_tts(): - global tts - - if tts: - del tts - tts = None - print("Unloaded TTS") - do_gc() - -def reload_tts(): - unload_tts() - load_tts() - -def get_current_voice(): - global current_voice - if current_voice: - return current_voice - - settings, _ = read_generate_settings("./config/generate.json", read_latents=False) - - if settings and "voice" in settings['voice']: - return settings["voice"] - - return None - -def deduce_autoregressive_model(voice=None): - if not voice: - voice = get_current_voice() - - if voice: - if os.path.exists(f'./models/finetunes/{voice}.pth'): - return f'./models/finetunes/{voice}.pth' - - dir = f'./training/{voice}/finetune/models/' - if os.path.isdir(dir): - counts = sorted([ int(d[:-8]) for d in os.listdir(dir) if d[-8:] == "_gpt.pth" ]) - names = [ f'{dir}/{d}_gpt.pth' for d in counts ] - if len(names) > 0: - return names[-1] - - if args.autoregressive_model != "auto": - return args.autoregressive_model - - return get_model_path('autoregressive.pth') - -def update_autoregressive_model(autoregressive_model_path): - if args.tts_backend != "tortoise": - raise f"Unsupported backend: {args.tts_backend}" - - if autoregressive_model_path == "auto": - autoregressive_model_path = deduce_autoregressive_model() - else: - match = re.findall(r'^\[[a-fA-F0-9]{8}\] (.+?)$', autoregressive_model_path) - if match: - autoregressive_model_path = match[0] - - if not autoregressive_model_path or not os.path.exists(autoregressive_model_path): - print(f"Invalid model: {autoregressive_model_path}") - return - - args.autoregressive_model = autoregressive_model_path - save_args_settings() - print(f'Stored autoregressive model to settings: {autoregressive_model_path}') - - global tts - if not tts: - if tts_loading: - raise Exception("TTS is still initializing...") - return - - if hasattr(tts, "loading") and tts.loading: - raise Exception("TTS is still initializing...") - - - if autoregressive_model_path == tts.autoregressive_model_path: - return - - tts.load_autoregressive_model(autoregressive_model_path) - - do_gc() - - return autoregressive_model_path - -def update_diffusion_model(diffusion_model_path): - if args.tts_backend != "tortoise": - raise f"Unsupported backend: {args.tts_backend}" - - match = re.findall(r'^\[[a-fA-F0-9]{8}\] (.+?)$', diffusion_model_path) - if match: - diffusion_model_path = match[0] - - if not diffusion_model_path or not os.path.exists(diffusion_model_path): - print(f"Invalid model: {diffusion_model_path}") - return - - args.diffusion_model = diffusion_model_path - save_args_settings() - print(f'Stored diffusion model to settings: {diffusion_model_path}') - - global tts - if not tts: - if tts_loading: - raise Exception("TTS is still initializing...") - return - - if hasattr(tts, "loading") and tts.loading: - raise Exception("TTS is still initializing...") - - if diffusion_model_path == "auto": - diffusion_model_path = deduce_diffusion_model() - - if diffusion_model_path == tts.diffusion_model_path: - return - - tts.load_diffusion_model(diffusion_model_path) - - do_gc() - - return diffusion_model_path - -def update_vocoder_model(vocoder_model): - if args.tts_backend != "tortoise": - raise f"Unsupported backend: {args.tts_backend}" - - args.vocoder_model = vocoder_model - save_args_settings() - print(f'Stored vocoder model to settings: {vocoder_model}') - - global tts - if not tts: - if tts_loading: - raise Exception("TTS is still initializing...") - return - - if hasattr(tts, "loading") and tts.loading: - raise Exception("TTS is still initializing...") - - print(f"Loading model: {vocoder_model}") - tts.load_vocoder_model(vocoder_model) - print(f"Loaded model: {tts.vocoder_model}") - - do_gc() - - return vocoder_model - -def update_tokenizer(tokenizer_json): - if args.tts_backend != "tortoise": - raise f"Unsupported backend: {args.tts_backend}" - - args.tokenizer_json = tokenizer_json - save_args_settings() - print(f'Stored tokenizer to settings: {tokenizer_json}') - - global tts - if not tts: - if tts_loading: - raise Exception("TTS is still initializing...") - return - - if hasattr(tts, "loading") and tts.loading: - raise Exception("TTS is still initializing...") - - print(f"Loading tokenizer vocab: {tokenizer_json}") - tts.load_tokenizer_json(tokenizer_json) - print(f"Loaded tokenizer vocab: {tts.tokenizer_json}") - - do_gc() - - return vocoder_model - -def load_voicefixer(restart=False): - global voicefixer - - if restart: - unload_voicefixer() - - try: - print("Loading Voicefixer") - from voicefixer import VoiceFixer - voicefixer = VoiceFixer() - print("Loaded Voicefixer") - except Exception as e: - print(f"Error occurred while tring to initialize voicefixer: {e}") - if voicefixer: - del voicefixer - voicefixer = None - -def unload_voicefixer(): - global voicefixer - - if voicefixer: - del voicefixer - voicefixer = None - print("Unloaded Voicefixer") - - do_gc() - -def load_whisper_model(language=None, model_name=None, progress=None): - global whisper_model - global whisper_align_model - - if args.whisper_backend not in WHISPER_BACKENDS: - raise Exception(f"unavailable backend: {args.whisper_backend}") - - if not model_name: - model_name = args.whisper_model - else: - args.whisper_model = model_name - save_args_settings() - - if language and f'{model_name}.{language}' in WHISPER_SPECIALIZED_MODELS: - model_name = f'{model_name}.{language}' - print(f"Loading specialized model for language: {language}") - - notify_progress(f"Loading Whisper model: {model_name}", progress=progress) - - if args.whisper_backend == "openai/whisper": - import whisper - try: - #is it possible for model to fit on vram but go oom later on while executing on data? - whisper_model = whisper.load_model(model_name) - except: - print("Out of VRAM memory. falling back to loading Whisper on CPU.") - whisper_model = whisper.load_model(model_name, device="cpu") - elif args.whisper_backend == "lightmare/whispercpp": - from whispercpp import Whisper - if not language: - language = 'auto' - - b_lang = language.encode('ascii') - whisper_model = Whisper(model_name, models_dir='./models/', language=b_lang) - elif args.whisper_backend == "m-bain/whisperx": - import whisper, whisperx - device = "cuda" if get_device_name() == "cuda" else "cpu" - whisper_model = whisperx.load_model(model_name, device) - whisper_align_model = whisperx.load_align_model(model_name="WAV2VEC2_ASR_LARGE_LV60K_960H" if language=="en" else None, language_code=language, device=device) - - print("Loaded Whisper model") - -def unload_whisper(): - global whisper_model - global whisper_align_model - - if whisper_align_model: - del whisper_align_model - whisper_align_model = None - - if whisper_model: - del whisper_model - whisper_model = None - print("Unloaded Whisper") - - do_gc() - -# shamelessly borrowed from Voldy's Web UI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/master/modules/extras.py#L74 -def merge_models( primary_model_name, secondary_model_name, alpha, progress=gr.Progress() ): - key_blacklist = [] - - def weighted_sum(theta0, theta1, alpha): - return ((1 - alpha) * theta0) + (alpha * theta1) - - def read_model( filename ): - print(f"Loading {filename}") - return torch.load(filename) - - theta_func = weighted_sum - - theta_0 = read_model(primary_model_name) - theta_1 = read_model(secondary_model_name) - - for key in tqdm(theta_0.keys(), desc="Merging..."): - if key in key_blacklist: - print("Skipping ignored key:", key) - continue - - a = theta_0[key] - b = theta_1[key] - - if a.dtype != torch.float32 and a.dtype != torch.float16: - print("Skipping key:", key, a.dtype) - continue - - if b.dtype != torch.float32 and b.dtype != torch.float16: - print("Skipping key:", key, b.dtype) - continue - - theta_0[key] = theta_func(a, b, alpha) - - del theta_1 - - primary_basename = os.path.splitext(os.path.basename(primary_model_name))[0] - secondary_basename = os.path.splitext(os.path.basename(secondary_model_name))[0] - suffix = "{:.3f}".format(alpha) - output_path = f'./models/finetunes/{primary_basename}_{secondary_basename}_{suffix}_merge.pth' - - torch.save(theta_0, output_path) - message = f"Saved to {output_path}" - print(message) +import os +if 'XDG_CACHE_HOME' not in os.environ: + os.environ['XDG_CACHE_HOME'] = os.path.realpath(os.path.join(os.getcwd(), './models/')) + +if 'TORTOISE_MODELS_DIR' not in os.environ: + os.environ['TORTOISE_MODELS_DIR'] = os.path.realpath(os.path.join(os.getcwd(), './models/tortoise/')) + +if 'TRANSFORMERS_CACHE' not in os.environ: + os.environ['TRANSFORMERS_CACHE'] = os.path.realpath(os.path.join(os.getcwd(), './models/transformers/')) + +import argparse +import time +import math +import json +import base64 +import re +import urllib.request +import signal +import gc +import subprocess +import psutil +import yaml +import hashlib +import string +import random + +from tqdm import tqdm +import torch +import torchaudio +import music_tag +import gradio as gr +import gradio.utils +import pandas as pd +import numpy as np + +from glob import glob +from datetime import datetime +from datetime import timedelta + +from tortoise.api import TextToSpeech as TorToise_TTS, MODELS, get_model_path, pad_or_truncate +from tortoise.utils.audio import load_audio, load_voice, load_voices, get_voice_dir, get_voices +from tortoise.utils.text import split_and_recombine_text +from tortoise.utils.device import get_device_name, set_device_name, get_device_count, get_device_vram, get_device_batch_size, do_gc + + +MODELS['dvae.pth'] = "https://huggingface.co/jbetker/tortoise-tts-v2/resolve/3704aea61678e7e468a06d8eea121dba368a798e/.models/dvae.pth" + +WHISPER_MODELS = ["tiny", "base", "small", "medium", "large", "large-v1", "large-v2"] +WHISPER_SPECIALIZED_MODELS = ["tiny.en", "base.en", "small.en", "medium.en"] +WHISPER_BACKENDS = ["openai/whisper", "lightmare/whispercpp", "m-bain/whisperx"] +VOCODERS = ['univnet', 'bigvgan_base_24khz_100band', 'bigvgan_24khz_100band'] +TTSES = ['tortoise'] + +INFERENCING = False +GENERATE_SETTINGS_ARGS = None + +LEARNING_RATE_SCHEMES = {"Multistep": "MultiStepLR", "Cos. Annealing": "CosineAnnealingLR_Restart"} +LEARNING_RATE_SCHEDULE = [ 2, 4, 9, 18, 25, 33, 50 ] + +RESAMPLERS = {} + +MIN_TRAINING_DURATION = 0.6 +MAX_TRAINING_DURATION = 11.6097505669 +MAX_TRAINING_CHAR_LENGTH = 200 + +VALLE_ENABLED = False +BARK_ENABLED = False + +VERBOSE_DEBUG = True + +import traceback + +try: + from whisper.normalizers.english import EnglishTextNormalizer + from whisper.normalizers.basic import BasicTextNormalizer + from whisper.tokenizer import LANGUAGES + + print("Whisper detected") +except Exception as e: + if VERBOSE_DEBUG: + print(traceback.format_exc()) + pass + +try: + from vall_e.emb.qnt import encode as valle_quantize + from vall_e.emb.g2p import encode as valle_phonemize + + from vall_e.inference import TTS as VALLE_TTS + + import soundfile + + print("VALL-E detected") + VALLE_ENABLED = True +except Exception as e: + if VERBOSE_DEBUG: + print(traceback.format_exc()) + pass + +if VALLE_ENABLED: + TTSES.append('vall-e') + +# torchaudio.set_audio_backend('soundfile') + +try: + import bark + from bark import text_to_semantic + from bark.generation import SAMPLE_RATE as BARK_SAMPLE_RATE, ALLOWED_PROMPTS, preload_models, codec_decode, generate_coarse, generate_fine, generate_text_semantic, load_codec_model + from bark.api import generate_audio as bark_generate_audio + from encodec.utils import convert_audio + + from scipy.io.wavfile import write as write_wav + + print("Bark detected") + BARK_ENABLED = True +except Exception as e: + if VERBOSE_DEBUG: + print(traceback.format_exc()) + pass + +if BARK_ENABLED: + TTSES.append('bark') + + def semantic_to_audio_tokens( + semantic_tokens, + history_prompt = None, + temp = 0.7, + silent = False, + output_full = False, + ): + coarse_tokens = generate_coarse( + semantic_tokens, history_prompt=history_prompt, temp=temp, silent=silent, use_kv_caching=True + ) + fine_tokens = generate_fine(coarse_tokens, history_prompt=history_prompt, temp=0.5) + + if output_full: + full_generation = { + "semantic_prompt": semantic_tokens, + "coarse_prompt": coarse_tokens, + "fine_prompt": fine_tokens, + } + return full_generation + return fine_tokens + + class Bark_TTS(): + def __init__(self, small=False): + self.input_sample_rate = BARK_SAMPLE_RATE + self.output_sample_rate = BARK_SAMPLE_RATE # args.output_sample_rate + + preload_models( + text_use_gpu=True, + coarse_use_gpu=True, + fine_use_gpu=True, + codec_use_gpu=True, + + text_use_small=small, + coarse_use_small=small, + fine_use_small=small, + + force_reload=False + ) + + self.device = get_device_name() + + try: + from vocos import Vocos + self.vocos_enabled = True + print("Vocos detected") + except Exception as e: + if VERBOSE_DEBUG: + print(traceback.format_exc()) + self.vocos_enabled = False + + try: + from hubert.hubert_manager import HuBERTManager + + hubert_manager = HuBERTManager() + hubert_manager.make_sure_hubert_installed() + hubert_manager.make_sure_tokenizer_installed() + + self.hubert_enabled = True + print("HuBERT detected") + except Exception as e: + if VERBOSE_DEBUG: + print(traceback.format_exc()) + self.hubert_enabled = False + + if self.vocos_enabled: + self.vocos = Vocos.from_pretrained("charactr/vocos-encodec-24khz").to(self.device) + + def create_voice( self, voice ): + transcription_json = f'./training/{voice}/whisper.json' + if not os.path.exists(transcription_json): + raise f"Transcription for voice not found: {voice}" + + transcriptions = json.load(open(transcription_json, 'r', encoding="utf-8")) + candidates = [] + for file in transcriptions: + result = transcriptions[file] + added = 0 + + for segment in result['segments']: + path = file.replace(".wav", f"_{pad(segment['id'], 4)}.wav") + # check if the slice actually exists + if not os.path.exists(f'./training/{voice}/audio/{path}'): + continue + + entry = ( + path, + segment['end'] - segment['start'], + segment['text'] + ) + candidates.append(entry) + added = added + 1 + + # if nothing got added (assuming because nothign was sliced), use the master file + if added == 0: # added < len(result['segments']): + start = 0 + end = 0 + for segment in result['segments']: + start = max( start, segment['start'] ) + end = max( end, segment['end'] ) + + entry = ( + file, + end - start, + result['text'] + ) + candidates.append(entry) + + candidates.sort(key=lambda x: x[1]) + candidate = random.choice(candidates) + audio_filepath = f'./training/{voice}/audio/{candidate[0]}' + text = candidate[-1] + + print("Using as reference:", audio_filepath, text) + + # Load and pre-process the audio waveform + model = load_codec_model(use_gpu=True) + wav, sr = torchaudio.load(audio_filepath) + wav = convert_audio(wav, sr, model.sample_rate, model.channels) + + # generate semantic tokens + + if self.hubert_enabled: + from hubert.pre_kmeans_hubert import CustomHubert + from hubert.customtokenizer import CustomTokenizer + + wav = wav.to(self.device) + + # Extract discrete codes from EnCodec + with torch.no_grad(): + encoded_frames = model.encode(wav.unsqueeze(0)) + codes = torch.cat([encoded[0] for encoded in encoded_frames], dim=-1).squeeze() # [n_q, T] + + # get seconds of audio + seconds = wav.shape[-1] / model.sample_rate + + # Load the HuBERT model + hubert_model = CustomHubert(checkpoint_path='./data/models/hubert/hubert.pt').to(self.device) + + # Load the CustomTokenizer model + tokenizer = CustomTokenizer.load_from_checkpoint('./data/models/hubert/tokenizer.pth').to(self.device) + + semantic_vectors = hubert_model.forward(wav, input_sample_hz=model.sample_rate) + semantic_tokens = tokenizer.get_token(semantic_vectors) + + # move codes to cpu + codes = codes.cpu().numpy() + # move semantic tokens to cpu + semantic_tokens = semantic_tokens.cpu().numpy() + else: + wav = wav.unsqueeze(0).to(self.device) + + # Extract discrete codes from EnCodec + with torch.no_grad(): + encoded_frames = model.encode(wav) + codes = torch.cat([encoded[0] for encoded in encoded_frames], dim=-1).squeeze().cpu().numpy() # [n_q, T] + + # get seconds of audio + seconds = wav.shape[-1] / model.sample_rate + + # generate semantic tokens + semantic_tokens = generate_text_semantic(text, max_gen_duration_s=seconds, top_k=50, top_p=.95, temp=0.7) + + # print(bark.__file__) + bark_location = os.path.dirname(os.path.relpath(bark.__file__)) # './modules/bark/bark/' + output_path = f'./{bark_location}/assets/prompts/' + voice.replace("/", "_") + '.npz' + np.savez(output_path, fine_prompt=codes, coarse_prompt=codes[:2, :], semantic_prompt=semantic_tokens) + + def inference( self, text, voice, text_temp=0.7, waveform_temp=0.7 ): + if voice == "random": + voice = None + else: + if not os.path.exists('./modules/bark/bark/assets/prompts/' + voice + '.npz'): + self.create_voice( voice ) + voice = voice.replace("/", "_") + if voice not in ALLOWED_PROMPTS: + ALLOWED_PROMPTS.add( voice ) + + semantic_tokens = text_to_semantic(text, history_prompt=voice, temp=text_temp, silent=False) + audio_tokens = semantic_to_audio_tokens( semantic_tokens, history_prompt=voice, temp=waveform_temp, silent=False, output_full=False ) + + if self.vocos_enabled: + audio_tokens_torch = torch.from_numpy(audio_tokens).to(self.device) + features = self.vocos.codes_to_features(audio_tokens_torch) + wav = self.vocos.decode(features, bandwidth_id=torch.tensor([2], device=self.device)) + else: + wav = codec_decode( audio_tokens ) + + return ( wav, BARK_SAMPLE_RATE ) + # return (bark_generate_audio(text, history_prompt=voice, text_temp=text_temp, waveform_temp=waveform_temp), BARK_SAMPLE_RATE) + +args = None +tts = None +tts_loading = False +webui = None +voicefixer = None + +whisper_model = None +whisper_align_model = None + +training_state = None + +current_voice = None + +def cleanup_voice_name( name ): + return name.split("/")[-1] + +def resample( waveform, input_rate, output_rate=44100 ): + # mono-ize + waveform = torch.mean(waveform, dim=0, keepdim=True) + + if input_rate == output_rate: + return waveform, output_rate + + key = f'{input_rate}:{output_rate}' + if not key in RESAMPLERS: + RESAMPLERS[key] = torchaudio.transforms.Resample( + input_rate, + output_rate, + lowpass_filter_width=16, + rolloff=0.85, + resampling_method="kaiser_window", + beta=8.555504641634386, + ) + + return RESAMPLERS[key]( waveform ), output_rate + +def generate(**kwargs): + if args.tts_backend == "tortoise": + return generate_tortoise(**kwargs) + if args.tts_backend == "vall-e": + return generate_valle(**kwargs) + if args.tts_backend == "bark": + return generate_bark(**kwargs) + +def generate_bark(**kwargs): + parameters = {} + parameters.update(kwargs) + + voice = parameters['voice'] + progress = parameters['progress'] if 'progress' in parameters else None + if parameters['seed'] == 0: + parameters['seed'] = None + + usedSeed = parameters['seed'] + + global args + global tts + + unload_whisper() + unload_voicefixer() + + if not tts: + # should check if it's loading or unloaded, and load it if it's unloaded + if tts_loading: + raise Exception("TTS is still initializing...") + if progress is not None: + notify_progress("Initializing TTS...", progress=progress) + load_tts() + if hasattr(tts, "loading") and tts.loading: + raise Exception("TTS is still initializing...") + + do_gc() + + voice_samples = None + conditioning_latents = None + sample_voice = None + + voice_cache = {} + + def get_settings( override=None ): + settings = { + 'voice': parameters['voice'], + 'text_temp': float(parameters['temperature']), + 'waveform_temp': float(parameters['temperature']), + } + + # could be better to just do a ternary on everything above, but i am not a professional + selected_voice = voice + if override is not None: + if 'voice' in override: + selected_voice = override['voice'] + + for k in override: + if k not in settings: + continue + settings[k] = override[k] + + return settings + + if not parameters['delimiter']: + parameters['delimiter'] = "\n" + elif parameters['delimiter'] == "\\n": + parameters['delimiter'] = "\n" + + if parameters['delimiter'] and parameters['delimiter'] != "" and parameters['delimiter'] in parameters['text']: + texts = parameters['text'].split(parameters['delimiter']) + else: + texts = split_and_recombine_text(parameters['text']) + + full_start_time = time.time() + + outdir = f"{args.results_folder}/{voice}/" + os.makedirs(outdir, exist_ok=True) + + audio_cache = {} + + volume_adjust = torchaudio.transforms.Vol(gain=args.output_volume, gain_type="amplitude") if args.output_volume != 1 else None + + idx = 0 + idx_cache = {} + for i, file in enumerate(os.listdir(outdir)): + filename = os.path.basename(file) + extension = os.path.splitext(filename)[-1][1:] + if extension != "json" and extension != "wav": + continue + match = re.findall(rf"^{cleanup_voice_name(voice)}_(\d+)(?:.+?)?{extension}$", filename) + if match and len(match) > 0: + key = int(match[0]) + idx_cache[key] = True + + if len(idx_cache) > 0: + keys = sorted(list(idx_cache.keys())) + idx = keys[-1] + 1 + + idx = pad(idx, 4) + + def get_name(line=0, candidate=0, combined=False): + name = f"{idx}" + if combined: + name = f"{name}_combined" + elif len(texts) > 1: + name = f"{name}_{line}" + if parameters['candidates'] > 1: + name = f"{name}_{candidate}" + return name + + def get_info( voice, settings = None, latents = True ): + info = {} + info.update(parameters) + + info['time'] = time.time()-full_start_time + info['datetime'] = datetime.now().isoformat() + + info['progress'] = None + del info['progress'] + + if info['delimiter'] == "\n": + info['delimiter'] = "\\n" + + if settings is not None: + for k in settings: + if k in info: + info[k] = settings[k] + return info + + INFERENCING = True + for line, cut_text in enumerate(texts): + tqdm_prefix = f'[{str(line+1)}/{str(len(texts))}]' + print(f"{tqdm_prefix} Generating line: {cut_text}") + start_time = time.time() + + # do setting editing + match = re.findall(r'^(\{.+\}) (.+?)$', cut_text) + override = None + if match and len(match) > 0: + match = match[0] + try: + override = json.loads(match[0]) + cut_text = match[1].strip() + except Exception as e: + raise Exception("Prompt settings editing requested, but received invalid JSON") + + settings = get_settings( override=override ) + + gen = tts.inference(cut_text, **settings ) + + run_time = time.time()-start_time + print(f"Generating line took {run_time} seconds") + + if not isinstance(gen, list): + gen = [gen] + + for j, g in enumerate(gen): + wav, sr = g + name = get_name(line=line, candidate=j) + + settings['text'] = cut_text + settings['time'] = run_time + settings['datetime'] = datetime.now().isoformat() + + # save here in case some error happens mid-batch + if tts.vocos_enabled: + torchaudio.save(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav', wav.cpu(), sr) + else: + write_wav(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav', sr, wav) + wav, sr = torchaudio.load(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav') + + audio_cache[name] = { + 'audio': wav, + 'settings': get_info(voice=override['voice'] if override and 'voice' in override else voice, settings=settings) + } + + del gen + do_gc() + INFERENCING = False + + for k in audio_cache: + audio = audio_cache[k]['audio'] + + audio, _ = resample(audio, tts.output_sample_rate, args.output_sample_rate) + if volume_adjust is not None: + audio = volume_adjust(audio) + + audio_cache[k]['audio'] = audio + torchaudio.save(f'{outdir}/{cleanup_voice_name(voice)}_{k}.wav', audio, args.output_sample_rate) + + output_voices = [] + for candidate in range(parameters['candidates']): + if len(texts) > 1: + audio_clips = [] + for line in range(len(texts)): + name = get_name(line=line, candidate=candidate) + audio = audio_cache[name]['audio'] + audio_clips.append(audio) + + name = get_name(candidate=candidate, combined=True) + audio = torch.cat(audio_clips, dim=-1) + torchaudio.save(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav', audio, args.output_sample_rate) + + audio = audio.squeeze(0).cpu() + audio_cache[name] = { + 'audio': audio, + 'settings': get_info(voice=voice), + 'output': True + } + else: + try: + name = get_name(candidate=candidate) + audio_cache[name]['output'] = True + except Exception as e: + for name in audio_cache: + audio_cache[name]['output'] = True + + + if args.voice_fixer: + if not voicefixer: + notify_progress("Loading voicefix...", progress=progress) + load_voicefixer() + + try: + fixed_cache = {} + for name in tqdm(audio_cache, desc="Running voicefix..."): + del audio_cache[name]['audio'] + if 'output' not in audio_cache[name] or not audio_cache[name]['output']: + continue + + path = f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav' + fixed = f'{outdir}/{cleanup_voice_name(voice)}_{name}_fixed.wav' + voicefixer.restore( + input=path, + output=fixed, + cuda=get_device_name() == "cuda" and args.voice_fixer_use_cuda, + #mode=mode, + ) + + fixed_cache[f'{name}_fixed'] = { + 'settings': audio_cache[name]['settings'], + 'output': True + } + audio_cache[name]['output'] = False + + for name in fixed_cache: + audio_cache[name] = fixed_cache[name] + except Exception as e: + print(e) + print("\nFailed to run Voicefixer") + + for name in audio_cache: + if 'output' not in audio_cache[name] or not audio_cache[name]['output']: + if args.prune_nonfinal_outputs: + audio_cache[name]['pruned'] = True + os.remove(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav') + continue + + output_voices.append(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav') + + if not args.embed_output_metadata: + with open(f'{outdir}/{cleanup_voice_name(voice)}_{name}.json', 'w', encoding="utf-8") as f: + f.write(json.dumps(audio_cache[name]['settings'], indent='\t') ) + + if args.embed_output_metadata: + for name in tqdm(audio_cache, desc="Embedding metadata..."): + if 'pruned' in audio_cache[name] and audio_cache[name]['pruned']: + continue + + metadata = music_tag.load_file(f"{outdir}/{cleanup_voice_name(voice)}_{name}.wav") + metadata['lyrics'] = json.dumps(audio_cache[name]['settings']) + metadata.save() + + if sample_voice is not None: + sample_voice = (tts.input_sample_rate, sample_voice.numpy()) + + info = get_info(voice=voice, latents=False) + print(f"Generation took {info['time']} seconds, saved to '{output_voices[0]}'\n") + + info['seed'] = usedSeed + if 'latents' in info: + del info['latents'] + + os.makedirs('./config/', exist_ok=True) + with open(f'./config/generate.json', 'w', encoding="utf-8") as f: + f.write(json.dumps(info, indent='\t') ) + + stats = [ + [ parameters['seed'], "{:.3f}".format(info['time']) ] + ] + + return ( + sample_voice, + output_voices, + stats, + ) + +def generate_valle(**kwargs): + parameters = {} + parameters.update(kwargs) + + voice = parameters['voice'] + progress = parameters['progress'] if 'progress' in parameters else None + if parameters['seed'] == 0: + parameters['seed'] = None + + usedSeed = parameters['seed'] + + global args + global tts + + unload_whisper() + unload_voicefixer() + + if not tts: + # should check if it's loading or unloaded, and load it if it's unloaded + if tts_loading: + raise Exception("TTS is still initializing...") + if progress is not None: + notify_progress("Initializing TTS...", progress=progress) + load_tts() + if hasattr(tts, "loading") and tts.loading: + raise Exception("TTS is still initializing...") + + do_gc() + + voice_samples = None + conditioning_latents = None + sample_voice = None + + voice_cache = {} + def fetch_voice( voice ): + if voice in voice_cache: + return voice_cache[voice] + + """ + voice_dir = f'./training/{voice}/audio/' + + if not os.path.isdir(voice_dir) or len(os.listdir(voice_dir)) == 0: + voice_dir = f'./voices/{voice}/' + + files = [ f'{voice_dir}/{d}' for d in os.listdir(voice_dir) if d[-4:] == ".wav" ] + """ + + if os.path.isdir(f'./training/{voice}/audio/'): + files = get_voice(name="audio", dir=f"./training/{voice}/", load_latents=False) + else: + files = get_voice(name=voice, load_latents=False) + + # return files + voice_cache[voice] = random.sample(files, k=min(3, len(files))) + return voice_cache[voice] + + def get_settings( override=None ): + settings = { + 'ar_temp': float(parameters['temperature']), + 'nar_temp': float(parameters['temperature']), + 'max_ar_steps': parameters['num_autoregressive_samples'], + } + + # could be better to just do a ternary on everything above, but i am not a professional + selected_voice = voice + if override is not None: + if 'voice' in override: + selected_voice = override['voice'] + + for k in override: + if k not in settings: + continue + settings[k] = override[k] + + settings['references'] = fetch_voice(voice=selected_voice) # [ fetch_voice(voice=selected_voice) for _ in range(3) ] + return settings + + if not parameters['delimiter']: + parameters['delimiter'] = "\n" + elif parameters['delimiter'] == "\\n": + parameters['delimiter'] = "\n" + + if parameters['delimiter'] and parameters['delimiter'] != "" and parameters['delimiter'] in parameters['text']: + texts = parameters['text'].split(parameters['delimiter']) + else: + texts = split_and_recombine_text(parameters['text']) + + full_start_time = time.time() + + outdir = f"{args.results_folder}/{voice}/" + os.makedirs(outdir, exist_ok=True) + + audio_cache = {} + + volume_adjust = torchaudio.transforms.Vol(gain=args.output_volume, gain_type="amplitude") if args.output_volume != 1 else None + + idx = 0 + idx_cache = {} + for i, file in enumerate(os.listdir(outdir)): + filename = os.path.basename(file) + extension = os.path.splitext(filename)[-1][1:] + if extension != "json" and extension != "wav": + continue + match = re.findall(rf"^{voice}_(\d+)(?:.+?)?{extension}$", filename) + if match and len(match) > 0: + key = int(match[0]) + idx_cache[key] = True + + if len(idx_cache) > 0: + keys = sorted(list(idx_cache.keys())) + idx = keys[-1] + 1 + + idx = pad(idx, 4) + + def get_name(line=0, candidate=0, combined=False): + name = f"{idx}" + if combined: + name = f"{name}_combined" + elif len(texts) > 1: + name = f"{name}_{line}" + if parameters['candidates'] > 1: + name = f"{name}_{candidate}" + return name + + def get_info( voice, settings = None, latents = True ): + info = {} + info.update(parameters) + + info['time'] = time.time()-full_start_time + info['datetime'] = datetime.now().isoformat() + + info['progress'] = None + del info['progress'] + + if info['delimiter'] == "\n": + info['delimiter'] = "\\n" + + if settings is not None: + for k in settings: + if k in info: + info[k] = settings[k] + return info + + INFERENCING = True + for line, cut_text in enumerate(texts): + tqdm_prefix = f'[{str(line+1)}/{str(len(texts))}]' + print(f"{tqdm_prefix} Generating line: {cut_text}") + start_time = time.time() + + # do setting editing + match = re.findall(r'^(\{.+\}) (.+?)$', cut_text) + override = None + if match and len(match) > 0: + match = match[0] + try: + override = json.loads(match[0]) + cut_text = match[1].strip() + except Exception as e: + raise Exception("Prompt settings editing requested, but received invalid JSON") + + name = get_name(line=line, candidate=0) + + settings = get_settings( override=override ) + references = settings['references'] + settings.pop("references") + settings['out_path'] = f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav' + + gen = tts.inference(cut_text, references, **settings ) + + run_time = time.time()-start_time + print(f"Generating line took {run_time} seconds") + + if not isinstance(gen, list): + gen = [gen] + + for j, g in enumerate(gen): + wav, sr = g + name = get_name(line=line, candidate=j) + + settings['text'] = cut_text + settings['time'] = run_time + settings['datetime'] = datetime.now().isoformat() + + # save here in case some error happens mid-batch + #torchaudio.save(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav', wav.cpu(), sr) + #soundfile.write(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav', wav.cpu()[0,0], sr) + wav, sr = torchaudio.load(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav') + + audio_cache[name] = { + 'audio': wav, + 'settings': get_info(voice=override['voice'] if override and 'voice' in override else voice, settings=settings) + } + + del gen + do_gc() + INFERENCING = False + + for k in audio_cache: + audio = audio_cache[k]['audio'] + + audio, _ = resample(audio, tts.output_sample_rate, args.output_sample_rate) + if volume_adjust is not None: + audio = volume_adjust(audio) + + audio_cache[k]['audio'] = audio + torchaudio.save(f'{outdir}/{cleanup_voice_name(voice)}_{k}.wav', audio, args.output_sample_rate) + + output_voices = [] + for candidate in range(parameters['candidates']): + if len(texts) > 1: + audio_clips = [] + for line in range(len(texts)): + name = get_name(line=line, candidate=candidate) + audio = audio_cache[name]['audio'] + audio_clips.append(audio) + + name = get_name(candidate=candidate, combined=True) + audio = torch.cat(audio_clips, dim=-1) + torchaudio.save(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav', audio, args.output_sample_rate) + + audio = audio.squeeze(0).cpu() + audio_cache[name] = { + 'audio': audio, + 'settings': get_info(voice=voice), + 'output': True + } + else: + name = get_name(candidate=candidate) + audio_cache[name]['output'] = True + + + if args.voice_fixer: + if not voicefixer: + notify_progress("Loading voicefix...", progress=progress) + load_voicefixer() + + try: + fixed_cache = {} + for name in tqdm(audio_cache, desc="Running voicefix..."): + del audio_cache[name]['audio'] + if 'output' not in audio_cache[name] or not audio_cache[name]['output']: + continue + + path = f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav' + fixed = f'{outdir}/{cleanup_voice_name(voice)}_{name}_fixed.wav' + voicefixer.restore( + input=path, + output=fixed, + cuda=get_device_name() == "cuda" and args.voice_fixer_use_cuda, + #mode=mode, + ) + + fixed_cache[f'{name}_fixed'] = { + 'settings': audio_cache[name]['settings'], + 'output': True + } + audio_cache[name]['output'] = False + + for name in fixed_cache: + audio_cache[name] = fixed_cache[name] + except Exception as e: + print(e) + print("\nFailed to run Voicefixer") + + for name in audio_cache: + if 'output' not in audio_cache[name] or not audio_cache[name]['output']: + if args.prune_nonfinal_outputs: + audio_cache[name]['pruned'] = True + os.remove(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav') + continue + + output_voices.append(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav') + + if not args.embed_output_metadata: + with open(f'{outdir}/{cleanup_voice_name(voice)}_{name}.json', 'w', encoding="utf-8") as f: + f.write(json.dumps(audio_cache[name]['settings'], indent='\t') ) + + if args.embed_output_metadata: + for name in tqdm(audio_cache, desc="Embedding metadata..."): + if 'pruned' in audio_cache[name] and audio_cache[name]['pruned']: + continue + + metadata = music_tag.load_file(f"{outdir}/{cleanup_voice_name(voice)}_{name}.wav") + metadata['lyrics'] = json.dumps(audio_cache[name]['settings']) + metadata.save() + + if sample_voice is not None: + sample_voice = (tts.input_sample_rate, sample_voice.numpy()) + + info = get_info(voice=voice, latents=False) + print(f"Generation took {info['time']} seconds, saved to '{output_voices[0]}'\n") + + info['seed'] = usedSeed + if 'latents' in info: + del info['latents'] + + os.makedirs('./config/', exist_ok=True) + with open(f'./config/generate.json', 'w', encoding="utf-8") as f: + f.write(json.dumps(info, indent='\t') ) + + stats = [ + [ parameters['seed'], "{:.3f}".format(info['time']) ] + ] + + return ( + sample_voice, + output_voices, + stats, + ) + +def generate_tortoise(**kwargs): + parameters = {} + parameters.update(kwargs) + + voice = parameters['voice'] + progress = parameters['progress'] if 'progress' in parameters else None + if parameters['seed'] == 0: + parameters['seed'] = None + + usedSeed = parameters['seed'] + + global args + global tts + + unload_whisper() + unload_voicefixer() + + if not tts: + # should check if it's loading or unloaded, and load it if it's unloaded + if tts_loading: + raise Exception("TTS is still initializing...") + load_tts() + if hasattr(tts, "loading") and tts.loading: + raise Exception("TTS is still initializing...") + + do_gc() + + voice_samples = None + conditioning_latents = None + sample_voice = None + + voice_cache = {} + def fetch_voice( voice ): + cache_key = f'{voice}:{tts.autoregressive_model_hash[:8]}' + if cache_key in voice_cache: + return voice_cache[cache_key] + + print(f"Loading voice: {voice} with model {tts.autoregressive_model_hash[:8]}") + sample_voice = None + if voice == "microphone": + if parameters['mic_audio'] is None: + raise Exception("Please provide audio from mic when choosing `microphone` as a voice input") + voice_samples, conditioning_latents = [load_audio(parameters['mic_audio'], tts.input_sample_rate)], None + elif voice == "random": + voice_samples, conditioning_latents = None, tts.get_random_conditioning_latents() + else: + if progress is not None: + notify_progress(f"Loading voice: {voice}", progress=progress) + + voice_samples, conditioning_latents = load_voice(voice, model_hash=tts.autoregressive_model_hash) + + if voice_samples and len(voice_samples) > 0: + if conditioning_latents is None: + conditioning_latents = compute_latents(voice=voice, voice_samples=voice_samples, voice_latents_chunks=parameters['voice_latents_chunks']) + + sample_voice = torch.cat(voice_samples, dim=-1).squeeze().cpu() + voice_samples = None + + voice_cache[cache_key] = (voice_samples, conditioning_latents, sample_voice) + return voice_cache[cache_key] + + def get_settings( override=None ): + settings = { + 'temperature': float(parameters['temperature']), + + 'top_p': float(parameters['top_p']), + 'diffusion_temperature': float(parameters['diffusion_temperature']), + 'length_penalty': float(parameters['length_penalty']), + 'repetition_penalty': float(parameters['repetition_penalty']), + 'cond_free_k': float(parameters['cond_free_k']), + + 'num_autoregressive_samples': parameters['num_autoregressive_samples'], + 'sample_batch_size': args.sample_batch_size, + 'diffusion_iterations': parameters['diffusion_iterations'], + + 'voice_samples': None, + 'conditioning_latents': None, + + 'use_deterministic_seed': parameters['seed'], + 'return_deterministic_state': True, + 'k': parameters['candidates'], + 'diffusion_sampler': parameters['diffusion_sampler'], + 'breathing_room': parameters['breathing_room'], + 'half_p': "Half Precision" in parameters['experimentals'], + 'cond_free': "Conditioning-Free" in parameters['experimentals'], + 'cvvp_amount': parameters['cvvp_weight'], + + 'autoregressive_model': args.autoregressive_model, + 'diffusion_model': args.diffusion_model, + 'tokenizer_json': args.tokenizer_json, + } + + # could be better to just do a ternary on everything above, but i am not a professional + selected_voice = voice + if override is not None: + if 'voice' in override: + selected_voice = override['voice'] + + for k in override: + if k not in settings: + continue + settings[k] = override[k] + + if settings['autoregressive_model'] is not None: + if settings['autoregressive_model'] == "auto": + settings['autoregressive_model'] = deduce_autoregressive_model(selected_voice) + tts.load_autoregressive_model(settings['autoregressive_model']) + + if settings['diffusion_model'] is not None: + if settings['diffusion_model'] == "auto": + settings['diffusion_model'] = deduce_diffusion_model(selected_voice) + tts.load_diffusion_model(settings['diffusion_model']) + + if settings['tokenizer_json'] is not None: + tts.load_tokenizer_json(settings['tokenizer_json']) + + settings['voice_samples'], settings['conditioning_latents'], _ = fetch_voice(voice=selected_voice) + + # clamp it down for the insane users who want this + # it would be wiser to enforce the sample size to the batch size, but this is what the user wants + settings['sample_batch_size'] = args.sample_batch_size + if not settings['sample_batch_size']: + settings['sample_batch_size'] = tts.autoregressive_batch_size + if settings['num_autoregressive_samples'] < settings['sample_batch_size']: + settings['sample_batch_size'] = settings['num_autoregressive_samples'] + + if settings['conditioning_latents'] is not None and len(settings['conditioning_latents']) == 2 and settings['cvvp_amount'] > 0: + print("Requesting weighing against CVVP weight, but voice latents are missing some extra data. Please regenerate your voice latents with 'Slimmer voice latents' unchecked.") + settings['cvvp_amount'] = 0 + + return settings + + if not parameters['delimiter']: + parameters['delimiter'] = "\n" + elif parameters['delimiter'] == "\\n": + parameters['delimiter'] = "\n" + + if parameters['delimiter'] and parameters['delimiter'] != "" and parameters['delimiter'] in parameters['text']: + texts = parameters['text'].split(parameters['delimiter']) + else: + texts = split_and_recombine_text(parameters['text']) + + full_start_time = time.time() + + outdir = f"{args.results_folder}/{voice}/" + os.makedirs(outdir, exist_ok=True) + + audio_cache = {} + + volume_adjust = torchaudio.transforms.Vol(gain=args.output_volume, gain_type="amplitude") if args.output_volume != 1 else None + + idx = 0 + idx_cache = {} + for i, file in enumerate(os.listdir(outdir)): + filename = os.path.basename(file) + extension = os.path.splitext(filename)[-1][1:] + if extension != "json" and extension != "wav": + continue + match = re.findall(rf"^{voice}_(\d+)(?:.+?)?{extension}$", filename) + if match and len(match) > 0: + key = int(match[0]) + idx_cache[key] = True + + if len(idx_cache) > 0: + keys = sorted(list(idx_cache.keys())) + idx = keys[-1] + 1 + + idx = pad(idx, 4) + + def get_name(line=0, candidate=0, combined=False): + name = f"{idx}" + if combined: + name = f"{name}_combined" + elif len(texts) > 1: + name = f"{name}_{line}" + if parameters['candidates'] > 1: + name = f"{name}_{candidate}" + return name + + def get_info( voice, settings = None, latents = True ): + info = {} + info.update(parameters) + + info['time'] = time.time()-full_start_time + info['datetime'] = datetime.now().isoformat() + + info['model'] = tts.autoregressive_model_path + info['model_hash'] = tts.autoregressive_model_hash + + info['progress'] = None + del info['progress'] + + if info['delimiter'] == "\n": + info['delimiter'] = "\\n" + + if settings is not None: + for k in settings: + if k in info: + info[k] = settings[k] + + if 'half_p' in settings and 'cond_free' in settings: + info['experimentals'] = [] + if settings['half_p']: + info['experimentals'].append("Half Precision") + if settings['cond_free']: + info['experimentals'].append("Conditioning-Free") + + if latents and "latents" not in info: + voice = info['voice'] + model_hash = settings["model_hash"][:8] if settings is not None and "model_hash" in settings else tts.autoregressive_model_hash[:8] + + dir = f'{get_voice_dir()}/{voice}/' + latents_path = f'{dir}/cond_latents_{model_hash}.pth' + + if voice == "random" or voice == "microphone": + if latents and settings is not None and settings['conditioning_latents']: + os.makedirs(dir, exist_ok=True) + torch.save(conditioning_latents, latents_path) + + if latents_path and os.path.exists(latents_path): + try: + with open(latents_path, 'rb') as f: + info['latents'] = base64.b64encode(f.read()).decode("ascii") + except Exception as e: + pass + + return info + + INFERENCING = True + for line, cut_text in enumerate(texts): + if should_phonemize(): + cut_text = phonemizer( cut_text ) + + if parameters['emotion'] == "Custom": + if parameters['prompt'] and parameters['prompt'].strip() != "": + cut_text = f"[{parameters['prompt']},] {cut_text}" + elif parameters['emotion'] != "None" and parameters['emotion']: + cut_text = f"[I am really {parameters['emotion'].lower()},] {cut_text}" + + tqdm_prefix = f'[{str(line+1)}/{str(len(texts))}]' + print(f"{tqdm_prefix} Generating line: {cut_text}") + start_time = time.time() + + # do setting editing + match = re.findall(r'^(\{.+\}) (.+?)$', cut_text) + override = None + if match and len(match) > 0: + match = match[0] + try: + override = json.loads(match[0]) + cut_text = match[1].strip() + except Exception as e: + raise Exception("Prompt settings editing requested, but received invalid JSON") + + settings = get_settings( override=override ) + gen, additionals = tts.tts(cut_text, **settings ) + + parameters['seed'] = additionals[0] + run_time = time.time()-start_time + print(f"Generating line took {run_time} seconds") + + if not isinstance(gen, list): + gen = [gen] + + for j, g in enumerate(gen): + audio = g.squeeze(0).cpu() + name = get_name(line=line, candidate=j) + + settings['text'] = cut_text + settings['time'] = run_time + settings['datetime'] = datetime.now().isoformat() + if args.tts_backend == "tortoise": + settings['model'] = tts.autoregressive_model_path + settings['model_hash'] = tts.autoregressive_model_hash + + audio_cache[name] = { + 'audio': audio, + 'settings': get_info(voice=override['voice'] if override and 'voice' in override else voice, settings=settings) + } + # save here in case some error happens mid-batch + torchaudio.save(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav', audio, tts.output_sample_rate) + + del gen + do_gc() + INFERENCING = False + + for k in audio_cache: + audio = audio_cache[k]['audio'] + + audio, _ = resample(audio, tts.output_sample_rate, args.output_sample_rate) + if volume_adjust is not None: + audio = volume_adjust(audio) + + audio_cache[k]['audio'] = audio + torchaudio.save(f'{outdir}/{cleanup_voice_name(voice)}_{k}.wav', audio, args.output_sample_rate) + + output_voices = [] + for candidate in range(parameters['candidates']): + if len(texts) > 1: + audio_clips = [] + for line in range(len(texts)): + name = get_name(line=line, candidate=candidate) + audio = audio_cache[name]['audio'] + audio_clips.append(audio) + + name = get_name(candidate=candidate, combined=True) + audio = torch.cat(audio_clips, dim=-1) + torchaudio.save(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav', audio, args.output_sample_rate) + + audio = audio.squeeze(0).cpu() + audio_cache[name] = { + 'audio': audio, + 'settings': get_info(voice=voice), + 'output': True + } + else: + name = get_name(candidate=candidate) + audio_cache[name]['output'] = True + + + if args.voice_fixer: + if not voicefixer: + notify_progress("Loading voicefix...", progress=progress) + load_voicefixer() + + try: + fixed_cache = {} + for name in tqdm(audio_cache, desc="Running voicefix..."): + del audio_cache[name]['audio'] + if 'output' not in audio_cache[name] or not audio_cache[name]['output']: + continue + + path = f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav' + fixed = f'{outdir}/{cleanup_voice_name(voice)}_{name}_fixed.wav' + voicefixer.restore( + input=path, + output=fixed, + cuda=get_device_name() == "cuda" and args.voice_fixer_use_cuda, + #mode=mode, + ) + + fixed_cache[f'{name}_fixed'] = { + 'settings': audio_cache[name]['settings'], + 'output': True + } + audio_cache[name]['output'] = False + + for name in fixed_cache: + audio_cache[name] = fixed_cache[name] + except Exception as e: + print(e) + print("\nFailed to run Voicefixer") + + for name in audio_cache: + if 'output' not in audio_cache[name] or not audio_cache[name]['output']: + if args.prune_nonfinal_outputs: + audio_cache[name]['pruned'] = True + os.remove(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav') + continue + + output_voices.append(f'{outdir}/{cleanup_voice_name(voice)}_{name}.wav') + + if not args.embed_output_metadata: + with open(f'{outdir}/{cleanup_voice_name(voice)}_{name}.json', 'w', encoding="utf-8") as f: + f.write(json.dumps(audio_cache[name]['settings'], indent='\t') ) + + if args.embed_output_metadata: + for name in tqdm(audio_cache, desc="Embedding metadata..."): + if 'pruned' in audio_cache[name] and audio_cache[name]['pruned']: + continue + + metadata = music_tag.load_file(f"{outdir}/{cleanup_voice_name(voice)}_{name}.wav") + metadata['lyrics'] = json.dumps(audio_cache[name]['settings']) + metadata.save() + + if sample_voice is not None: + sample_voice = (tts.input_sample_rate, sample_voice.numpy()) + + info = get_info(voice=voice, latents=False) + print(f"Generation took {info['time']} seconds, saved to '{output_voices[0]}'\n") + + info['seed'] = usedSeed + if 'latents' in info: + del info['latents'] + + os.makedirs('./config/', exist_ok=True) + with open(f'./config/generate.json', 'w', encoding="utf-8") as f: + f.write(json.dumps(info, indent='\t') ) + + stats = [ + [ parameters['seed'], "{:.3f}".format(info['time']) ] + ] + + return ( + sample_voice, + output_voices, + stats, + ) + +def cancel_generate(): + if not INFERENCING: + return + + import tortoise.api + + tortoise.api.STOP_SIGNAL = True + +def hash_file(path, algo="md5", buffer_size=0): + hash = None + if algo == "md5": + hash = hashlib.md5() + elif algo == "sha1": + hash = hashlib.sha1() + else: + raise Exception(f'Unknown hash algorithm specified: {algo}') + + if not os.path.exists(path): + raise Exception(f'Path not found: {path}') + + with open(path, 'rb') as f: + if buffer_size > 0: + while True: + data = f.read(buffer_size) + if not data: + break + hash.update(data) + else: + hash.update(f.read()) + + return "{0}".format(hash.hexdigest()) + +def update_baseline_for_latents_chunks( voice ): + global current_voice + current_voice = voice + + path = f'{get_voice_dir()}/{voice}/' + if not os.path.isdir(path): + return 1 + + dataset_file = f'./training/{voice}/train.txt' + if os.path.exists(dataset_file): + return 0 # 0 will leverage using the LJspeech dataset for computing latents + + files = os.listdir(path) + + total = 0 + total_duration = 0 + + for file in files: + if file[-4:] != ".wav": + continue + + metadata = torchaudio.info(f'{path}/{file}') + duration = metadata.num_frames / metadata.sample_rate + total_duration += duration + total = total + 1 + + + # brain too fried to figure out a better way + if args.autocalculate_voice_chunk_duration_size == 0: + return int(total_duration / total) if total > 0 else 1 + return int(total_duration / args.autocalculate_voice_chunk_duration_size) if total_duration > 0 else 1 + +def compute_latents(voice=None, voice_samples=None, voice_latents_chunks=0, original_ar=False, original_diffusion=False): + global tts + global args + + unload_whisper() + unload_voicefixer() + + if not tts: + if tts_loading: + raise Exception("TTS is still initializing...") + load_tts() + + if hasattr(tts, "loading") and tts.loading: + raise Exception("TTS is still initializing...") + + if args.tts_backend == "bark": + tts.create_voice( voice ) + return + + if args.autoregressive_model == "auto": + tts.load_autoregressive_model(deduce_autoregressive_model(voice)) + + if voice: + load_from_dataset = voice_latents_chunks == 0 + + if load_from_dataset: + dataset_path = f'./training/{voice}/train.txt' + if not os.path.exists(dataset_path): + load_from_dataset = False + else: + with open(dataset_path, 'r', encoding="utf-8") as f: + lines = f.readlines() + + print("Leveraging dataset for computing latents") + + voice_samples = [] + max_length = 0 + for line in lines: + filename = f'./training/{voice}/{line.split("|")[0]}' + + waveform = load_audio(filename, 22050) + max_length = max(max_length, waveform.shape[-1]) + voice_samples.append(waveform) + + for i in range(len(voice_samples)): + voice_samples[i] = pad_or_truncate(voice_samples[i], max_length) + + voice_latents_chunks = len(voice_samples) + if voice_latents_chunks == 0: + print("Dataset is empty!") + load_from_dataset = True + if not load_from_dataset: + voice_samples, _ = load_voice(voice, load_latents=False) + + if voice_samples is None: + return + + conditioning_latents = tts.get_conditioning_latents(voice_samples, return_mels=not args.latents_lean_and_mean, slices=voice_latents_chunks, force_cpu=args.force_cpu_for_conditioning_latents, original_ar=original_ar, original_diffusion=original_diffusion) + + if len(conditioning_latents) == 4: + conditioning_latents = (conditioning_latents[0], conditioning_latents[1], conditioning_latents[2], None) + + outfile = f'{get_voice_dir()}/{voice}/cond_latents_{tts.autoregressive_model_hash[:8]}.pth' + torch.save(conditioning_latents, outfile) + print(f'Saved voice latents: {outfile}') + + return conditioning_latents + +# superfluous, but it cleans up some things +class TrainingState(): + def __init__(self, config_path, keep_x_past_checkpoints=0, start=True): + self.killed = False + + self.training_dir = os.path.dirname(config_path) + with open(config_path, 'r') as file: + self.yaml_config = yaml.safe_load(file) + + self.json_config = json.load(open(f"{self.training_dir}/train.json", 'r', encoding="utf-8")) + self.dataset_path = f"{self.training_dir}/train.txt" + with open(self.dataset_path, 'r', encoding="utf-8") as f: + self.dataset_size = len(f.readlines()) + + self.batch_size = self.json_config["batch_size"] + self.save_rate = self.json_config["save_rate"] + + self.epoch = 0 + self.epochs = self.json_config["epochs"] + self.it = 0 + self.its = calc_iterations( self.epochs, self.dataset_size, self.batch_size ) + self.step = 0 + self.steps = int(self.its / self.dataset_size) + self.checkpoint = 0 + self.checkpoints = int((self.its - self.it) / self.save_rate) + + self.gpus = self.json_config['gpus'] + + self.buffer = [] + + self.open_state = False + self.training_started = False + + self.info = {} + + self.it_rate = "" + self.it_rates = 0 + + self.epoch_rate = "" + + self.eta = "?" + self.eta_hhmmss = "?" + + self.nan_detected = False + + self.last_info_check_at = 0 + self.statistics = { + 'loss': [], + 'lr': [], + 'grad_norm': [], + } + self.losses = [] + self.metrics = { + 'step': "", + 'rate': "", + 'loss': "", + } + + self.loss_milestones = [ 1.0, 0.15, 0.05 ] + + if args.tts_backend=="vall-e": + self.valle_last_it = 0 + self.valle_steps = 0 + + if keep_x_past_checkpoints > 0: + self.cleanup_old(keep=keep_x_past_checkpoints) + if start: + self.spawn_process(config_path=config_path, gpus=self.gpus) + + def spawn_process(self, config_path, gpus=1): + if args.tts_backend == "vall-e": + self.cmd = ['deepspeed', f'--num_gpus={gpus}', '--module', 'vall_e.train', f'yaml="{config_path}"'] + else: + self.cmd = ['train.bat', config_path] if os.name == "nt" else ['./train.sh', config_path] + + print("Spawning process: ", " ".join(self.cmd)) + self.process = subprocess.Popen(self.cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) + + def parse_metrics(self, data): + if isinstance(data, str): + if line.find('Training Metrics:') >= 0: + data = json.loads(line.split("Training Metrics:")[-1]) + data['mode'] = "training" + elif line.find('Validation Metrics:') >= 0: + data = json.loads(line.split("Validation Metrics:")[-1]) + data['mode'] = "validation" + else: + return + + self.info = data + if 'epoch' in self.info: + self.epoch = int(self.info['epoch']) + if 'it' in self.info: + self.it = int(self.info['it']) + if 'step' in self.info: + self.step = int(self.info['step']) + if 'steps' in self.info: + self.steps = int(self.info['steps']) + + if 'elapsed_time' in self.info: + self.info['iteration_rate'] = self.info['elapsed_time'] + del self.info['elapsed_time'] + + if 'iteration_rate' in self.info: + it_rate = self.info['iteration_rate'] + self.it_rate = f'{"{:.3f}".format(1/it_rate)}it/s' if 0 < it_rate and it_rate < 1 else f'{"{:.3f}".format(it_rate)}s/it' + self.it_rates += it_rate + + if self.it_rates > 0 and self.it * self.steps > 0: + epoch_rate = self.it_rates / self.it * self.steps + self.epoch_rate = f'{"{:.3f}".format(1/epoch_rate)}epoch/s' if 0 < epoch_rate and epoch_rate < 1 else f'{"{:.3f}".format(epoch_rate)}s/epoch' + + try: + self.eta = (self.its - self.it) * (self.it_rates / self.it) + eta = str(timedelta(seconds=int(self.eta))) + self.eta_hhmmss = eta + except Exception as e: + self.eta_hhmmss = "?" + pass + + self.metrics['step'] = [f"{self.epoch}/{self.epochs}"] + if self.epochs != self.its: + self.metrics['step'].append(f"{self.it}/{self.its}") + if self.steps > 1: + self.metrics['step'].append(f"{self.step}/{self.steps}") + self.metrics['step'] = ", ".join(self.metrics['step']) + + if args.tts_backend == "tortoise": + epoch = self.epoch + (self.step / self.steps) + else: + epoch = self.info['epoch'] if 'epoch' in self.info else self.it + + if self.it > 0: + # probably can double for-loop but whatever + keys = { + 'lrs': ['lr'], + 'losses': ['loss_text_ce', 'loss_mel_ce'], + 'accuracies': [], + 'precisions': [], + 'grad_norms': [], + } + if args.tts_backend == "vall-e": + keys['lrs'] = [ + 'ar.lr', 'nar.lr', + ] + keys['losses'] = [ + # 'ar.loss', 'nar.loss', 'ar+nar.loss', + 'ar.loss.nll', 'nar.loss.nll', + ] + + keys['accuracies'] = [ + 'ar.loss.acc', 'nar.loss.acc', + 'ar.stats.acc', 'nar.loss.acc', + ] + keys['precisions'] = [ 'ar.loss.precision', 'nar.loss.precision', ] + keys['grad_norms'] = ['ar.grad_norm', 'nar.grad_norm'] + + for k in keys['lrs']: + if k not in self.info: + continue + + self.statistics['lr'].append({'epoch': epoch, 'it': self.it, 'value': self.info[k], 'type': k}) + + for k in keys['accuracies']: + if k not in self.info: + continue + + self.statistics['loss'].append({'epoch': epoch, 'it': self.it, 'value': self.info[k], 'type': k}) + + for k in keys['precisions']: + if k not in self.info: + continue + + self.statistics['loss'].append({'epoch': epoch, 'it': self.it, 'value': self.info[k], 'type': k}) + + for k in keys['losses']: + if k not in self.info: + continue + + prefix = "" + + if "mode" in self.info and self.info["mode"] == "validation": + prefix = f'{self.info["name"] if "name" in self.info else "val"}_' + + self.statistics['loss'].append({'epoch': epoch, 'it': self.it, 'value': self.info[k], 'type': f'{prefix}{k}' }) + + self.losses.append( self.statistics['loss'][-1] ) + + for k in keys['grad_norms']: + if k not in self.info: + continue + self.statistics['grad_norm'].append({'epoch': epoch, 'it': self.it, 'value': self.info[k], 'type': k}) + + return data + + def get_status(self): + message = None + + self.metrics['rate'] = [] + if self.epoch_rate: + self.metrics['rate'].append(self.epoch_rate) + if self.it_rate and self.epoch_rate[:-7] != self.it_rate[:-4]: + self.metrics['rate'].append(self.it_rate) + self.metrics['rate'] = ", ".join(self.metrics['rate']) + + eta_hhmmss = self.eta_hhmmss if self.eta_hhmmss else "?" + + self.metrics['loss'] = [] + if 'lr' in self.info: + self.metrics['loss'].append(f'LR: {"{:.3e}".format(self.info["lr"])}') + + if len(self.losses) > 0: + self.metrics['loss'].append(f'Loss: {"{:.3f}".format(self.losses[-1]["value"])}') + + if False and len(self.losses) >= 2: + deriv = 0 + accum_length = len(self.losses)//2 # i *guess* this is fine when you think about it + loss_value = self.losses[-1]["value"] + + for i in range(accum_length): + d1_loss = self.losses[accum_length-i-1]["value"] + d2_loss = self.losses[accum_length-i-2]["value"] + dloss = (d2_loss - d1_loss) + + d1_step = self.losses[accum_length-i-1]["it"] + d2_step = self.losses[accum_length-i-2]["it"] + dstep = (d2_step - d1_step) + + if dstep == 0: + continue + + inst_deriv = dloss / dstep + deriv += inst_deriv + + deriv = deriv / accum_length + + print("Deriv: ", deriv) + + if deriv != 0: # dloss < 0: + next_milestone = None + for milestone in self.loss_milestones: + if loss_value > milestone: + next_milestone = milestone + break + + print(f"Loss value: {loss_value} | Next milestone: {next_milestone} | Distance: {loss_value - next_milestone}") + + if next_milestone: + # tfw can do simple calculus but not basic algebra in my head + est_its = (next_milestone - loss_value) / deriv * 100 + print(f"Estimated: {est_its}") + if est_its >= 0: + self.metrics['loss'].append(f'Est. milestone {next_milestone} in: {int(est_its)}its') + else: + est_loss = inst_deriv * (self.its - self.it) + loss_value + if est_loss >= 0: + self.metrics['loss'].append(f'Est. final loss: {"{:.3f}".format(est_loss)}') + + self.metrics['loss'] = ", ".join(self.metrics['loss']) + + message = f"[{self.metrics['step']}] [{self.metrics['rate']}] [ETA: {eta_hhmmss}] [{self.metrics['loss']}]" + if self.nan_detected: + message = f"[!NaN DETECTED! {self.nan_detected}] {message}" + + return message + + def load_statistics(self, update=False): + if not os.path.isdir(self.training_dir): + return + + if args.tts_backend == "tortoise": + logs = sorted([f'{self.training_dir}/finetune/{d}' for d in os.listdir(f'{self.training_dir}/finetune/') if d[-4:] == ".log" ]) + else: + log_dir = "logs" + logs = sorted([f'{self.training_dir}/{log_dir}/{d}/log.txt' for d in os.listdir(f'{self.training_dir}/{log_dir}/') ]) + + if update: + logs = [logs[-1]] + + infos = {} + highest_step = self.last_info_check_at + + if not update: + self.statistics['loss'] = [] + self.statistics['lr'] = [] + self.statistics['grad_norm'] = [] + self.it_rates = 0 + + unq = {} + averager = None + prev_state = 0 + + for log in logs: + with open(log, 'r', encoding="utf-8") as f: + lines = f.readlines() + + for line in lines: + line = line.strip() + if not line: + continue + + if line[-1] == ".": + line = line[:-1] + + if line.find('Training Metrics:') >= 0: + split = line.split("Training Metrics:")[-1] + data = json.loads(split) + + name = "train" + mode = "training" + prev_state = 0 + elif line.find('Validation Metrics:') >= 0: + data = json.loads(line.split("Validation Metrics:")[-1]) + if "it" not in data: + data['it'] = it + if "epoch" not in data: + data['epoch'] = epoch + + # name = data['name'] if 'name' in data else "val" + mode = "validation" + + if prev_state == 0: + name = "subtrain" + else: + name = "val" + + prev_state += 1 + else: + continue + + if "it" not in data: + continue + + it = data['it'] + epoch = data['epoch'] + + if args.tts_backend == "vall-e": + if not averager or averager['key'] != f'{it}_{name}' or averager['mode'] != mode: + averager = { + 'key': f'{it}_{name}', + 'name': name, + 'mode': mode, + "metrics": {} + } + for k in data: + if data[k] is None: + continue + averager['metrics'][k] = [ data[k] ] + else: + for k in data: + if data[k] is None: + continue + if k not in averager['metrics']: + averager['metrics'][k] = [ data[k] ] + else: + averager['metrics'][k].append( data[k] ) + + unq[f'{it}_{mode}_{name}'] = averager + else: + unq[f'{it}_{mode}_{name}'] = data + + if update and it <= self.last_info_check_at: + continue + + blacklist = [ "batch", "eval" ] + for it in unq: + if args.tts_backend == "vall-e": + stats = unq[it] + data = {k: sum(v) / len(v) for k, v in stats['metrics'].items() if k not in blacklist } + #data = {k: min(v) for k, v in stats['metrics'].items() if k not in blacklist } + #data = {k: max(v) for k, v in stats['metrics'].items() if k not in blacklist } + data['name'] = stats['name'] + data['mode'] = stats['mode'] + data['steps'] = len(stats['metrics']['it']) + else: + data = unq[it] + self.parse_metrics(data) + + self.last_info_check_at = highest_step + + def cleanup_old(self, keep=2): + if keep <= 0: + return + + if args.tts_backend == "vall-e": + return + + if not os.path.isdir(f'{self.training_dir}/finetune/'): + return + + models = sorted([ int(d[:-8]) for d in os.listdir(f'{self.training_dir}/finetune/models/') if d[-8:] == "_gpt.pth" ]) + states = sorted([ int(d[:-6]) for d in os.listdir(f'{self.training_dir}/finetune/training_state/') if d[-6:] == ".state" ]) + remove_models = models[:-keep] + remove_states = states[:-keep] + + for d in remove_models: + path = f'{self.training_dir}/finetune/models/{d}_gpt.pth' + print("Removing", path) + os.remove(path) + for d in remove_states: + path = f'{self.training_dir}/finetune/training_state/{d}.state' + print("Removing", path) + os.remove(path) + + def parse(self, line, verbose=False, keep_x_past_checkpoints=0, buffer_size=8, progress=None ): + self.buffer.append(f'{line}') + + data = None + percent = 0 + message = None + should_return = False + + MESSAGE_START = 'Start training from epoch' + MESSAGE_FINSIHED = 'Finished training' + MESSAGE_SAVING = 'Saving models and training states.' + + MESSAGE_METRICS_TRAINING = 'Training Metrics:' + MESSAGE_METRICS_VALIDATION = 'Validation Metrics:' + + if line.find(MESSAGE_FINSIHED) >= 0: + self.killed = True + # rip out iteration info + elif not self.training_started: + if line.find(MESSAGE_START) >= 0: + self.training_started = True # could just leverage the above variable, but this is python, and there's no point in these aggressive microoptimizations + + match = re.findall(r'epoch: ([\d,]+)', line) + if match and len(match) > 0: + self.epoch = int(match[0].replace(",", "")) + match = re.findall(r'iter: ([\d,]+)', line) + if match and len(match) > 0: + self.it = int(match[0].replace(",", "")) + + self.checkpoints = int((self.its - self.it) / self.save_rate) + + self.load_statistics() + + should_return = True + else: + if line.find(MESSAGE_SAVING) >= 0: + self.checkpoint += 1 + message = f"[{self.checkpoint}/{self.checkpoints}] Saving checkpoint..." + percent = self.checkpoint / self.checkpoints + + self.cleanup_old(keep=keep_x_past_checkpoints) + elif line.find(MESSAGE_METRICS_TRAINING) >= 0: + data = json.loads(line.split(MESSAGE_METRICS_TRAINING)[-1]) + data['mode'] = "training" + elif line.find(MESSAGE_METRICS_VALIDATION) >= 0: + data = json.loads(line.split(MESSAGE_METRICS_VALIDATION)[-1]) + data['mode'] = "validation" + + if data is not None: + if ': nan' in line and not self.nan_detected: + self.nan_detected = self.it + + self.parse_metrics( data ) + message = self.get_status() + + if message: + percent = self.it / float(self.its) # self.epoch / float(self.epochs) + if progress is not None: + progress(percent, message) + + self.buffer.append(f'[{"{:.3f}".format(percent*100)}%] {message}') + should_return = True + + if verbose and not self.training_started: + should_return = True + + self.buffer = self.buffer[-buffer_size:] + + result = None + if should_return: + result = "".join(self.buffer) if not self.training_started else message + + return ( + result, + percent, + message, + ) + +try: + import altair as alt + alt.data_transformers.enable('default', max_rows=None) +except Exception as e: + print(e) + pass + +def run_training(config_path, verbose=False, keep_x_past_checkpoints=0, progress=gr.Progress(track_tqdm=True)): + global training_state + if training_state and training_state.process: + return "Training already in progress" + + + # ensure we have the dvae.pth + if args.tts_backend == "tortoise": + get_model_path('dvae.pth') + + # I don't know if this is still necessary, as it was bitching at me for not doing this, despite it being in a separate process + torch.multiprocessing.freeze_support() + + unload_tts() + unload_whisper() + unload_voicefixer() + + training_state = TrainingState(config_path=config_path, keep_x_past_checkpoints=keep_x_past_checkpoints) + + for line in iter(training_state.process.stdout.readline, ""): + if training_state is None or training_state.killed: + return + + result, percent, message = training_state.parse( line=line, verbose=verbose, keep_x_past_checkpoints=keep_x_past_checkpoints, progress=progress ) + print(f"[Training] [{datetime.now().isoformat()}] {line[:-1]}") + if result: + yield result + + if progress is not None and message: + progress(percent, message) + + if training_state: + training_state.process.stdout.close() + return_code = training_state.process.wait() + training_state = None + +def update_training_dataplot(x_min=None, x_max=None, y_min=None, y_max=None, config_path=None): + global training_state + losses = None + lrs = None + grad_norms = None + + x_lim = [ x_min, x_max ] + y_lim = [ y_min, y_max ] + + if not training_state: + if config_path: + training_state = TrainingState(config_path=config_path, start=False) + training_state.load_statistics() + message = training_state.get_status() + + if training_state: + if not x_lim[-1]: + x_lim[-1] = training_state.epochs + + if not y_lim[-1]: + y_lim = None + + if len(training_state.statistics['loss']) > 0: + losses = gr.LinePlot.update( + value = pd.DataFrame(training_state.statistics['loss']), + x_lim=x_lim, y_lim=y_lim, + x="epoch", y="value", # x="it", + title="Loss Metrics", color="type", tooltip=['epoch', 'it', 'value', 'type'], + width=500, height=350 + ) + if len(training_state.statistics['lr']) > 0: + lrs = gr.LinePlot.update( + value = pd.DataFrame(training_state.statistics['lr']), + x_lim=x_lim, + x="epoch", y="value", # x="it", + title="Learning Rate", color="type", tooltip=['epoch', 'it', 'value', 'type'], + width=500, height=350 + ) + if len(training_state.statistics['grad_norm']) > 0: + grad_norms = gr.LinePlot.update( + value = pd.DataFrame(training_state.statistics['grad_norm']), + x_lim=x_lim, + x="epoch", y="value", # x="it", + title="Gradient Normals", color="type", tooltip=['epoch', 'it', 'value', 'type'], + width=500, height=350 + ) + + if config_path: + del training_state + training_state = None + + return (losses, lrs, grad_norms) + +def reconnect_training(verbose=False, progress=gr.Progress(track_tqdm=True)): + global training_state + if not training_state or not training_state.process: + return "Training not in progress" + + for line in iter(training_state.process.stdout.readline, ""): + result, percent, message = training_state.parse( line=line, verbose=verbose, progress=progress ) + print(f"[Training] [{datetime.now().isoformat()}] {line[:-1]}") + if result: + yield result + + if progress is not None and message: + progress(percent, message) + +def stop_training(): + global training_state + if training_state is None: + return "No training in progress" + print("Killing training process...") + training_state.killed = True + + children = [] + if args.tts_backend == "tortoise": + # wrapped in a try/catch in case for some reason this fails outside of Linux + try: + children = [p.info for p in psutil.process_iter(attrs=['pid', 'name', 'cmdline']) if './src/train.py' in p.info['cmdline']] + except Exception as e: + pass + + training_state.process.stdout.close() + training_state.process.terminate() + training_state.process.kill() + elif args.tts_backend == "vall-e": + print(training_state.process.communicate(input='quit')[0]) + + return_code = training_state.process.wait() + + for p in children: + os.kill( p['pid'], signal.SIGKILL ) + + training_state = None + print("Killed training process.") + return f"Training cancelled: {return_code}" + +def get_halfp_model_path(): + autoregressive_model_path = get_model_path('autoregressive.pth') + return autoregressive_model_path.replace(".pth", "_half.pth") + +def convert_to_halfp(): + autoregressive_model_path = get_model_path('autoregressive.pth') + print(f'Converting model to half precision: {autoregressive_model_path}') + model = torch.load(autoregressive_model_path) + for k in model: + model[k] = model[k].half() + + outfile = get_halfp_model_path() + torch.save(model, outfile) + print(f'Converted model to half precision: {outfile}') + + +# collapses short segments into the previous segment +def whisper_sanitize( results ): + sanitized = json.loads(json.dumps(results)) + sanitized['segments'] = [] + + for segment in results['segments']: + length = segment['end'] - segment['start'] + if length >= MIN_TRAINING_DURATION or len(sanitized['segments']) == 0: + sanitized['segments'].append(segment) + continue + + last_segment = sanitized['segments'][-1] + # segment already asimilitated it, somehow + if last_segment['end'] >= segment['end']: + continue + """ + # segment already asimilitated it, somehow + if last_segment['text'].endswith(segment['text']): + continue + """ + last_segment['text'] += segment['text'] + last_segment['end'] = segment['end'] + + for i in range(len(sanitized['segments'])): + sanitized['segments'][i]['id'] = i + + return sanitized + +def whisper_transcribe( file, language=None ): + # shouldn't happen, but it's for safety + global whisper_model + global whisper_align_model + + if not whisper_model: + load_whisper_model(language=language) + + if args.whisper_backend == "openai/whisper": + if not language: + language = None + + return whisper_model.transcribe(file, language=language) + + if args.whisper_backend == "lightmare/whispercpp": + res = whisper_model.transcribe(file) + segments = whisper_model.extract_text_and_timestamps( res ) + + result = { + 'text': [], + 'segments': [] + } + for segment in segments: + reparsed = { + 'start': segment[0] / 100.0, + 'end': segment[1] / 100.0, + 'text': segment[2], + 'id': len(result['segments']) + } + result['text'].append( segment[2] ) + result['segments'].append(reparsed) + + result['text'] = " ".join(result['text']) + return result + + if args.whisper_backend == "m-bain/whisperx": + import whisperx + + device = "cuda" if get_device_name() == "cuda" else "cpu" + result = whisper_model.transcribe(file, batch_size=args.whisper_batchsize) + + align_model, metadata = whisper_align_model + result_aligned = whisperx.align(result["segments"], align_model, metadata, file, device, return_char_alignments=False) + + result['segments'] = result_aligned['segments'] + result['text'] = [] + for segment in result['segments']: + segment['id'] = len(result['text']) + result['text'].append(segment['text'].strip()) + result['text'] = " ".join(result['text']) + + return result + +def validate_waveform( waveform, sample_rate, min_only=False ): + if not torch.any(waveform < 0): + return "Waveform is empty" + + num_channels, num_frames = waveform.shape + duration = num_frames / sample_rate + + if duration < MIN_TRAINING_DURATION: + return "Duration too short ({:.3f}s < {:.3f}s)".format(duration, MIN_TRAINING_DURATION) + + if not min_only: + if duration > MAX_TRAINING_DURATION: + return "Duration too long ({:.3f}s < {:.3f}s)".format(MAX_TRAINING_DURATION, duration) + + return + +def transcribe_dataset( voice, language=None, skip_existings=False, progress=None ): + unload_tts() + + global whisper_model + if whisper_model is None: + load_whisper_model(language=language) + + results = {} + + files = get_voice(voice, load_latents=False) + indir = f'./training/{voice}/' + infile = f'{indir}/whisper.json' + + quantize_in_memory = args.tts_backend == "vall-e" + + os.makedirs(f'{indir}/audio/', exist_ok=True) + + TARGET_SAMPLE_RATE = 22050 + if args.tts_backend != "tortoise": + TARGET_SAMPLE_RATE = 24000 + if tts: + TARGET_SAMPLE_RATE = tts.input_sample_rate + + if os.path.exists(infile): + results = json.load(open(infile, 'r', encoding="utf-8")) + + for file in tqdm(files, desc="Iterating through voice files"): + basename = os.path.basename(file) + + if basename in results and skip_existings: + print(f"Skipping already parsed file: {basename}") + continue + + try: + result = whisper_transcribe(file, language=language) + except Exception as e: + print("Failed to transcribe:", file, e) + continue + + results[basename] = result + + if not quantize_in_memory: + waveform, sample_rate = torchaudio.load(file) + # resample to the input rate, since it'll get resampled for training anyways + # this should also "help" increase throughput a bit when filling the dataloaders + waveform, sample_rate = resample(waveform, sample_rate, TARGET_SAMPLE_RATE) + if waveform.shape[0] == 2: + waveform = waveform[:1] + + try: + kwargs = {} + if basename[-4:] == ".wav": + kwargs['encoding'] = "PCM_S" + kwargs['bits_per_sample'] = 16 + + torchaudio.save(f"{indir}/audio/{basename}", waveform, sample_rate, **kwargs) + except Exception as e: + print(e) + + with open(infile, 'w', encoding="utf-8") as f: + f.write(json.dumps(results, indent='\t')) + + do_gc() + + modified = False + for basename in results: + try: + sanitized = whisper_sanitize(results[basename]) + if len(sanitized['segments']) > 0 and len(sanitized['segments']) != len(results[basename]['segments']): + results[basename] = sanitized + modified = True + print("Segments sanizited: ", basename) + except Exception as e: + print("Failed to sanitize:", basename, e) + pass + + if modified: + os.rename(infile, infile.replace(".json", ".unsanitized.json")) + with open(infile, 'w', encoding="utf-8") as f: + f.write(json.dumps(results, indent='\t')) + + return f"Processed dataset to: {indir}" + +def slice_waveform( waveform, sample_rate, start, end, trim ): + start = int(start * sample_rate) + end = int(end * sample_rate) + + if start < 0: + start = 0 + if end >= waveform.shape[-1]: + end = waveform.shape[-1] - 1 + + sliced = waveform[:, start:end] + + error = validate_waveform( sliced, sample_rate, min_only=True ) + if trim and not error: + sliced = torchaudio.functional.vad( sliced, sample_rate ) + + return sliced, error + +def slice_dataset( voice, trim_silence=True, start_offset=0, end_offset=0, results=None, progress=gr.Progress() ): + indir = f'./training/{voice}/' + infile = f'{indir}/whisper.json' + messages = [] + + if not os.path.exists(infile): + message = f"Missing dataset: {infile}" + print(message) + return message + + if results is None: + results = json.load(open(infile, 'r', encoding="utf-8")) + + TARGET_SAMPLE_RATE = 22050 + if args.tts_backend != "tortoise": + TARGET_SAMPLE_RATE = 24000 + if tts: + TARGET_SAMPLE_RATE = tts.input_sample_rate + + files = 0 + segments = 0 + for filename in results: + path = f'./voices/{voice}/{filename}' + extension = os.path.splitext(filename)[-1][1:] + out_extension = extension # "wav" + + if not os.path.exists(path): + path = f'./training/{voice}/{filename}' + + if not os.path.exists(path): + message = f"Missing source audio: {filename}" + print(message) + messages.append(message) + continue + + files += 1 + result = results[filename] + waveform, sample_rate = torchaudio.load(path) + num_channels, num_frames = waveform.shape + duration = num_frames / sample_rate + + for segment in result['segments']: + file = filename.replace(f".{extension}", f"_{pad(segment['id'], 4)}.{out_extension}") + + sliced, error = slice_waveform( waveform, sample_rate, segment['start'] + start_offset, segment['end'] + end_offset, trim_silence ) + if error: + message = f"{error}, skipping... {file}" + print(message) + messages.append(message) + continue + + sliced, _ = resample( sliced, sample_rate, TARGET_SAMPLE_RATE ) + + if waveform.shape[0] == 2: + waveform = waveform[:1] + + kwargs = {} + if file[-4:] == ".wav": + kwargs['encoding'] = "PCM_S" + kwargs['bits_per_sample'] = 16 + + torchaudio.save(f"{indir}/audio/{file}", sliced, TARGET_SAMPLE_RATE, **kwargs) + + segments +=1 + + messages.append(f"Sliced segments: {files} => {segments}.") + return "\n".join(messages) + +# takes an LJSpeech-dataset-formatted .txt file and phonemize it +def phonemize_txt_file( path ): + with open(path, 'r', encoding='utf-8') as f: + lines = f.readlines() + + reparsed = [] + with open(path.replace(".txt", ".phn.txt"), 'a', encoding='utf-8') as f: + for line in tqdm(lines, desc='Phonemizing...'): + split = line.split("|") + audio = split[0] + text = split[2] + + phonemes = phonemizer( text ) + reparsed.append(f'{audio}|{phonemes}') + f.write(f'\n{audio}|{phonemes}') + + + joined = "\n".join(reparsed) + with open(path.replace(".txt", ".phn.txt"), 'w', encoding='utf-8') as f: + f.write(joined) + + return joined + +# takes an LJSpeech-dataset-formatted .txt (and phonemized .phn.txt from the above) and creates a JSON that should slot in as whisper.json +def create_dataset_json( path ): + with open(path, 'r', encoding='utf-8') as f: + lines = f.readlines() + + phonemes = None + phn_path = path.replace(".txt", ".phn.txt") + if os.path.exists(phn_path): + with open(phn_path, 'r', encoding='utf-8') as f: + phonemes = f.readlines() + + data = {} + + for line in lines: + split = line.split("|") + audio = split[0] + text = split[1] + + data[audio] = { + 'text': text.strip() + } + + for line in phonemes: + split = line.split("|") + audio = split[0] + text = split[1] + + data[audio]['phonemes'] = text.strip() + + with open(path.replace(".txt", ".json"), 'w', encoding='utf-8') as f: + f.write(json.dumps(data, indent="\t")) + + +cached_backends = {} + +def phonemizer( text, language="en-us" ): + from phonemizer import phonemize + from phonemizer.backend import BACKENDS + + def _get_backend( language="en-us", backend="espeak" ): + key = f'{language}_{backend}' + if key in cached_backends: + return cached_backends[key] + + if backend == 'espeak': + phonemizer = BACKENDS[backend]( language, preserve_punctuation=True, with_stress=True) + elif backend == 'espeak-mbrola': + phonemizer = BACKENDS[backend]( language ) + else: + phonemizer = BACKENDS[backend]( language, preserve_punctuation=True ) + + cached_backends[key] = phonemizer + return phonemizer + if language == "en": + language = "en-us" + + backend = _get_backend(language=language, backend=args.phonemizer_backend) + if backend is not None: + tokens = backend.phonemize( [text], strip=True ) + else: + tokens = phonemize( [text], language=language, strip=True, preserve_punctuation=True, with_stress=True ) + + return tokens[0] if len(tokens) == 0 else tokens + tokenized = " ".join( tokens ) + +def should_phonemize(): + if args.tts_backend == "vall-e": + return False + + should = args.tokenizer_json is not None and args.tokenizer_json[-8:] == "ipa.json" + if should: + try: + from phonemizer import phonemize + except Exception as e: + return False + return should + +def prepare_dataset( voice, use_segments=False, text_length=0, audio_length=0, progress=gr.Progress() ): + indir = f'./training/{voice}/' + infile = f'{indir}/whisper.json' + if not os.path.exists(infile): + message = f"Missing dataset: {infile}" + print(message) + return message + + results = json.load(open(infile, 'r', encoding="utf-8")) + + errored = 0 + messages = [] + normalize = False # True + phonemize = should_phonemize() + lines = { 'training': [], 'validation': [] } + segments = {} + + quantize_in_memory = args.tts_backend == "vall-e" + + if args.tts_backend != "tortoise": + text_length = 0 + audio_length = 0 + + start_offset = -0.1 + end_offset = 0.1 + trim_silence = False + + TARGET_SAMPLE_RATE = 22050 + if args.tts_backend != "tortoise": + TARGET_SAMPLE_RATE = 24000 + if tts: + TARGET_SAMPLE_RATE = tts.input_sample_rate + + for filename in tqdm(results, desc="Parsing results"): + use_segment = use_segments + + extension = os.path.splitext(filename)[-1][1:] + out_extension = extension # "wav" + result = results[filename] + lang = result['language'] + language = LANGUAGES[lang] if lang in LANGUAGES else lang + normalizer = EnglishTextNormalizer() if language and language == "english" else BasicTextNormalizer() + + # check if unsegmented text exceeds 200 characters + if not use_segment: + if len(result['text']) > MAX_TRAINING_CHAR_LENGTH: + message = f"Text length too long ({MAX_TRAINING_CHAR_LENGTH} < {len(result['text'])}), using segments: {filename}" + print(message) + messages.append(message) + use_segment = True + + # check if unsegmented audio exceeds 11.6s + if not use_segment: + path = f'{indir}/audio/{filename}' + if not quantize_in_memory and not os.path.exists(path): + messages.append(f"Missing source audio: {filename}") + errored += 1 + continue + + duration = 0 + for segment in result['segments']: + duration = max(duration, segment['end']) + + if duration >= MAX_TRAINING_DURATION: + message = f"Audio too large, using segments: {filename}" + print(message) + messages.append(message) + use_segment = True + + # implicitly segment + if use_segment and not use_segments: + exists = True + for segment in result['segments']: + duration = segment['end'] - segment['start'] + if duration <= MIN_TRAINING_DURATION or MAX_TRAINING_DURATION <= duration: + continue + + path = f'{indir}/audio/' + filename.replace(f".{extension}", f"_{pad(segment['id'], 4)}.{out_extension}") + if os.path.exists(path): + continue + exists = False + break + + if not quantize_in_memory and not exists: + tmp = {} + tmp[filename] = result + print(f"Audio not segmented, segmenting: {filename}") + message = slice_dataset( voice, results=tmp ) + print(message) + messages = messages + message.split("\n") + + waveform = None + + + if quantize_in_memory: + path = f'{indir}/audio/{filename}' + if not os.path.exists(path): + path = f'./voices/{voice}/{filename}' + + if not os.path.exists(path): + message = f"Audio not found: {path}" + print(message) + messages.append(message) + #continue + else: + waveform = torchaudio.load(path) + waveform = resample(waveform[0], waveform[1], TARGET_SAMPLE_RATE) + + if not use_segment: + segments[filename] = { + 'text': result['text'], + 'lang': lang, + 'language': language, + 'normalizer': normalizer, + 'phonemes': result['phonemes'] if 'phonemes' in result else None + } + + if waveform: + segments[filename]['waveform'] = waveform + else: + for segment in result['segments']: + duration = segment['end'] - segment['start'] + if duration <= MIN_TRAINING_DURATION or MAX_TRAINING_DURATION <= duration: + continue + + file = filename.replace(f".{extension}", f"_{pad(segment['id'], 4)}.{out_extension}") + + segments[file] = { + 'text': segment['text'], + 'lang': lang, + 'language': language, + 'normalizer': normalizer, + 'phonemes': segment['phonemes'] if 'phonemes' in segment else None + } + + if waveform: + sliced, error = slice_waveform( waveform[0], waveform[1], segment['start'] + start_offset, segment['end'] + end_offset, trim_silence ) + if error: + message = f"{error}, skipping... {file}" + print(message) + messages.append(message) + segments[file]['error'] = error + #continue + else: + segments[file]['waveform'] = (sliced, waveform[1]) + + jobs = { + 'quantize': [[], []], + 'phonemize': [[], []], + } + + for file in tqdm(segments, desc="Parsing segments"): + extension = os.path.splitext(file)[-1][1:] + result = segments[file] + path = f'{indir}/audio/{file}' + + text = result['text'] + lang = result['lang'] + language = result['language'] + normalizer = result['normalizer'] + phonemes = result['phonemes'] + if phonemize and phonemes is None: + phonemes = phonemizer( text, language=lang ) + + normalized = normalizer(text) if normalize else text + + if len(text) > MAX_TRAINING_CHAR_LENGTH: + message = f"Text length too long ({MAX_TRAINING_CHAR_LENGTH} < {len(text)}), skipping... {file}" + print(message) + messages.append(message) + errored += 1 + continue + + # num_channels, num_frames = waveform.shape + #duration = num_frames / sample_rate + + + culled = len(text) < text_length + #if not culled and audio_length > 0: + # culled = duration < audio_length + + line = f'audio/{file}|{phonemes if phonemize and phonemes else text}' + + lines['training' if not culled else 'validation'].append(line) + + if culled or args.tts_backend != "vall-e": + continue + + os.makedirs(f'{indir}/valle/', exist_ok=True) + #os.makedirs(f'./training/valle/data/{voice}/', exist_ok=True) + + phn_file = f'{indir}/valle/{file.replace(f".{extension}",".phn.txt")}' + #phn_file = f'./training/valle/data/{voice}/{file.replace(f".{extension}",".phn.txt")}' + if not os.path.exists(phn_file): + jobs['phonemize'][0].append(phn_file) + jobs['phonemize'][1].append(normalized) + """ + phonemized = valle_phonemize( normalized ) + open(f'{indir}/valle/{file.replace(".wav",".phn.txt")}', 'w', encoding='utf-8').write(" ".join(phonemized)) + print("Phonemized:", file, normalized, text) + """ + + qnt_file = f'{indir}/valle/{file.replace(f".{extension}",".qnt.pt")}' + #qnt_file = f'./training/valle/data/{voice}/{file.replace(f".{extension}",".qnt.pt")}' + if 'error' not in result: + if not quantize_in_memory and not os.path.exists(path): + message = f"Missing segment, skipping... {file}" + print(message) + messages.append(message) + errored += 1 + continue + + if not os.path.exists(qnt_file): + waveform = None + if 'waveform' in result: + waveform, sample_rate = result['waveform'] + elif os.path.exists(path): + waveform, sample_rate = torchaudio.load(path) + error = validate_waveform( waveform, sample_rate ) + if error: + message = f"{error}, skipping... {file}" + print(message) + messages.append(message) + errored += 1 + continue + + if waveform is not None: + jobs['quantize'][0].append(qnt_file) + jobs['quantize'][1].append((waveform, sample_rate)) + """ + quantized = valle_quantize( waveform, sample_rate ).cpu() + torch.save(quantized, f'{indir}/valle/{file.replace(".wav",".qnt.pt")}') + print("Quantized:", file) + """ + + for i in tqdm(range(len(jobs['quantize'][0])), desc="Quantizing"): + qnt_file = jobs['quantize'][0][i] + waveform, sample_rate = jobs['quantize'][1][i] + + quantized = valle_quantize( waveform, sample_rate ).cpu() + torch.save(quantized, qnt_file) + #print("Quantized:", qnt_file) + + for i in tqdm(range(len(jobs['phonemize'][0])), desc="Phonemizing"): + phn_file = jobs['phonemize'][0][i] + normalized = jobs['phonemize'][1][i] + + try: + phonemized = valle_phonemize( normalized ) + open(phn_file, 'w', encoding='utf-8').write(" ".join(phonemized)) + #print("Phonemized:", phn_file) + except Exception as e: + message = f"Failed to phonemize: {phn_file}: {normalized}" + messages.append(message) + print(message) + + + training_joined = "\n".join(lines['training']) + validation_joined = "\n".join(lines['validation']) + + with open(f'{indir}/train.txt', 'w', encoding="utf-8") as f: + f.write(training_joined) + + with open(f'{indir}/validation.txt', 'w', encoding="utf-8") as f: + f.write(validation_joined) + + messages.append(f"Prepared {len(lines['training'])} lines (validation: {len(lines['validation'])}, culled: {errored}).\n{training_joined}\n\n{validation_joined}") + return "\n".join(messages) + +def calc_iterations( epochs, lines, batch_size ): + return int(math.ceil(epochs * math.ceil(lines / batch_size))) + +def schedule_learning_rate( iterations, schedule=LEARNING_RATE_SCHEDULE ): + return [int(iterations * d) for d in schedule] + +def optimize_training_settings( **kwargs ): + messages = [] + settings = {} + settings.update(kwargs) + + dataset_path = f"./training/{settings['voice']}/train.txt" + with open(dataset_path, 'r', encoding="utf-8") as f: + lines = len(f.readlines()) + + if lines == 0: + raise Exception("Empty dataset.") + + if settings['batch_size'] > lines: + settings['batch_size'] = lines + messages.append(f"Batch size is larger than your dataset, clamping batch size to: {settings['batch_size']}") + + """ + if lines % settings['batch_size'] != 0: + settings['batch_size'] = int(lines / settings['batch_size']) + if settings['batch_size'] == 0: + settings['batch_size'] = 1 + messages.append(f"Batch size not neatly divisible by dataset size, adjusting batch size to: {settings['batch_size']}") + """ + if settings['gradient_accumulation_size'] == 0: + settings['gradient_accumulation_size'] = 1 + + if settings['batch_size'] / settings['gradient_accumulation_size'] < 2: + settings['gradient_accumulation_size'] = int(settings['batch_size'] / 2) + if settings['gradient_accumulation_size'] == 0: + settings['gradient_accumulation_size'] = 1 + + messages.append(f"Gradient accumulation size is too large for a given batch size, clamping gradient accumulation size to: {settings['gradient_accumulation_size']}") + elif settings['batch_size'] % settings['gradient_accumulation_size'] != 0: + settings['gradient_accumulation_size'] -= settings['batch_size'] % settings['gradient_accumulation_size'] + if settings['gradient_accumulation_size'] == 0: + settings['gradient_accumulation_size'] = 1 + + messages.append(f"Batch size is not evenly divisible by the gradient accumulation size, adjusting gradient accumulation size to: {settings['gradient_accumulation_size']}") + + if settings['batch_size'] % settings['gpus'] != 0: + settings['batch_size'] -= settings['batch_size'] % settings['gpus'] + if settings['batch_size'] == 0: + settings['batch_size'] = 1 + messages.append(f"Batch size not neatly divisible by GPU count, adjusting batch size to: {settings['batch_size']}") + + + def get_device_batch_size( vram ): + DEVICE_BATCH_SIZE_MAP = [ + (70, 128), # based on an A100-80G, I can safely get a ratio of 4096:32 = 128 + (32, 64), # based on my two 6800XTs, I can only really safely get a ratio of 128:2 = 64 + (16, 8), # based on an A4000, I can do a ratio of 512:64 = 8:1 + (8, 4), # interpolated + (6, 2), # based on my 2060, it only really lets me have a batch ratio of 2:1 + ] + for k, v in DEVICE_BATCH_SIZE_MAP: + if vram > (k-1): + return v + return 1 + + if settings['gpus'] > get_device_count(): + settings['gpus'] = get_device_count() + messages.append(f"GPU count exceeds defacto GPU count, clamping to: {settings['gpus']}") + + if settings['gpus'] <= 1: + settings['gpus'] = 1 + else: + messages.append(f"! EXPERIMENTAL ! Multi-GPU training is extremely particular, expect issues.") + + # assuming you have equal GPUs + vram = get_device_vram() * settings['gpus'] + batch_ratio = int(settings['batch_size'] / settings['gradient_accumulation_size']) + batch_cap = get_device_batch_size(vram) + + if batch_ratio > batch_cap: + settings['gradient_accumulation_size'] = int(settings['batch_size'] / batch_cap) + messages.append(f"Batch ratio ({batch_ratio}) is expected to exceed your VRAM capacity ({'{:.3f}'.format(vram)}GB, suggested {batch_cap} batch size cap), adjusting gradient accumulation size to: {settings['gradient_accumulation_size']}") + + iterations = calc_iterations(epochs=settings['epochs'], lines=lines, batch_size=settings['batch_size']) + + if settings['epochs'] < settings['save_rate']: + settings['save_rate'] = settings['epochs'] + messages.append(f"Save rate is too small for the given iteration step, clamping save rate to: {settings['save_rate']}") + + if settings['epochs'] < settings['validation_rate']: + settings['validation_rate'] = settings['epochs'] + messages.append(f"Validation rate is too small for the given iteration step, clamping validation rate to: {settings['validation_rate']}") + + if settings['resume_state'] and not os.path.exists(settings['resume_state']): + settings['resume_state'] = None + messages.append("Resume path specified, but does not exist. Disabling...") + + if settings['bitsandbytes']: + messages.append("! EXPERIMENTAL ! BitsAndBytes requested.") + + if settings['half_p']: + if settings['bitsandbytes']: + settings['half_p'] = False + messages.append("Half Precision requested, but BitsAndBytes is also requested. Due to redundancies, disabling half precision...") + else: + messages.append("! EXPERIMENTAL ! Half Precision requested.") + if not os.path.exists(get_halfp_model_path()): + convert_to_halfp() + + steps = int(iterations / settings['epochs']) + + messages.append(f"For {settings['epochs']} epochs with {lines} lines in batches of {settings['batch_size']}, iterating for {iterations} steps ({steps}) steps per epoch)") + + return settings, messages + +def save_training_settings( **kwargs ): + messages = [] + settings = {} + settings.update(kwargs) + + + outjson = f'./training/{settings["voice"]}/train.json' + with open(outjson, 'w', encoding="utf-8") as f: + f.write(json.dumps(settings, indent='\t') ) + + settings['dataset_path'] = f"./training/{settings['voice']}/train.txt" + settings['validation_path'] = f"./training/{settings['voice']}/validation.txt" + + with open(settings['dataset_path'], 'r', encoding="utf-8") as f: + lines = len(f.readlines()) + + settings['iterations'] = calc_iterations(epochs=settings['epochs'], lines=lines, batch_size=settings['batch_size']) + + if not settings['source_model'] or settings['source_model'] == "auto": + settings['source_model'] = f"./models/tortoise/autoregressive{'_half' if settings['half_p'] else ''}.pth" + + if settings['half_p']: + if not os.path.exists(get_halfp_model_path()): + convert_to_halfp() + + messages.append(f"For {settings['epochs']} epochs with {lines} lines, iterating for {settings['iterations']} steps") + + iterations_per_epoch = settings['iterations'] / settings['epochs'] + + settings['save_rate'] = int(settings['save_rate'] * iterations_per_epoch) + settings['validation_rate'] = int(settings['validation_rate'] * iterations_per_epoch) + + iterations_per_epoch = int(iterations_per_epoch) + + if settings['save_rate'] < 1: + settings['save_rate'] = 1 + """ + if settings['validation_rate'] < 1: + settings['validation_rate'] = 1 + """ + """ + if settings['iterations'] % settings['save_rate'] != 0: + adjustment = int(settings['iterations'] / settings['save_rate']) * settings['save_rate'] + messages.append(f"Iteration rate is not evenly divisible by save rate, adjusting: {settings['iterations']} => {adjustment}") + settings['iterations'] = adjustment + """ + + settings['validation_batch_size'] = int(settings['batch_size'] / settings['gradient_accumulation_size']) + if not os.path.exists(settings['validation_path']): + settings['validation_enabled'] = False + messages.append("Validation not found, disabling validation...") + elif settings['validation_batch_size'] == 0: + settings['validation_enabled'] = False + messages.append("Validation batch size == 0, disabling validation...") + else: + with open(settings['validation_path'], 'r', encoding="utf-8") as f: + validation_lines = len(f.readlines()) + + if validation_lines < settings['validation_batch_size']: + settings['validation_batch_size'] = validation_lines + messages.append(f"Batch size exceeds validation dataset size, clamping validation batch size to {validation_lines}") + + settings['tokenizer_json'] = args.tokenizer_json if args.tokenizer_json else get_tokenizer_jsons()[0] + + if settings['gpus'] > get_device_count(): + settings['gpus'] = get_device_count() + + # what an utter mistake this was + settings['optimizer'] = 'adamw' # if settings['gpus'] == 1 else 'adamw_zero' + + if 'learning_rate_scheme' not in settings or settings['learning_rate_scheme'] not in LEARNING_RATE_SCHEMES: + settings['learning_rate_scheme'] = "Multistep" + + settings['learning_rate_scheme'] = LEARNING_RATE_SCHEMES[settings['learning_rate_scheme']] + + learning_rate_schema = [f"default_lr_scheme: {settings['learning_rate_scheme']}"] + if settings['learning_rate_scheme'] == "MultiStepLR": + if not settings['learning_rate_schedule']: + settings['learning_rate_schedule'] = LEARNING_RATE_SCHEDULE + elif isinstance(settings['learning_rate_schedule'],str): + settings['learning_rate_schedule'] = json.loads(settings['learning_rate_schedule']) + + settings['learning_rate_schedule'] = schedule_learning_rate( iterations_per_epoch, settings['learning_rate_schedule'] ) + + learning_rate_schema.append(f" gen_lr_steps: {settings['learning_rate_schedule']}") + learning_rate_schema.append(f" lr_gamma: 0.5") + elif settings['learning_rate_scheme'] == "CosineAnnealingLR_Restart": + epochs = settings['epochs'] + restarts = settings['learning_rate_restarts'] + restart_period = int(epochs / restarts) + + if 'learning_rate_warmup' not in settings: + settings['learning_rate_warmup'] = 0 + if 'learning_rate_min' not in settings: + settings['learning_rate_min'] = 1e-08 + + if 'learning_rate_period' not in settings: + settings['learning_rate_period'] = [ iterations_per_epoch * restart_period for x in range(epochs) ] + + settings['learning_rate_restarts'] = [ iterations_per_epoch * (x+1) * restart_period for x in range(restarts) ] # [52, 104, 156, 208] + + if 'learning_rate_restart_weights' not in settings: + settings['learning_rate_restart_weights'] = [ ( restarts - x - 1 ) / restarts for x in range(restarts) ] # [.75, .5, .25, .125] + settings['learning_rate_restart_weights'][-1] = settings['learning_rate_restart_weights'][-2] * 0.5 + + learning_rate_schema.append(f" T_period: {settings['learning_rate_period']}") + learning_rate_schema.append(f" warmup: {settings['learning_rate_warmup']}") + learning_rate_schema.append(f" eta_min: !!float {settings['learning_rate_min']}") + learning_rate_schema.append(f" restarts: {settings['learning_rate_restarts']}") + learning_rate_schema.append(f" restart_weights: {settings['learning_rate_restart_weights']}") + settings['learning_rate_scheme'] = "\n".join(learning_rate_schema) + + if settings['resume_state']: + settings['source_model'] = f"# pretrain_model_gpt: '{settings['source_model']}'" + settings['resume_state'] = f"resume_state: '{settings['resume_state']}'" + else: + settings['source_model'] = f"pretrain_model_gpt: '{settings['source_model']}'" + settings['resume_state'] = f"# resume_state: '{settings['resume_state']}'" + + def use_template(template, out): + with open(template, 'r', encoding="utf-8") as f: + yaml = f.read() + + # i could just load and edit the YAML directly, but this is easier, as I don't need to bother with path traversals + for k in settings: + if settings[k] is None: + continue + yaml = yaml.replace(f"${{{k}}}", str(settings[k])) + + with open(out, 'w', encoding="utf-8") as f: + f.write(yaml) + + if args.tts_backend == "tortoise": + use_template(f'./models/.template.dlas.yaml', f'./training/{settings["voice"]}/train.yaml') + elif args.tts_backend == "vall-e": + settings['model_name'] = "[ 'ar-quarter', 'nar-quarter' ]" + use_template(f'./models/.template.valle.yaml', f'./training/{settings["voice"]}/config.yaml') + + messages.append(f"Saved training output") + return settings, messages + +def import_voices(files, saveAs=None, progress=None): + global args + + if not isinstance(files, list): + files = [files] + + for file in tqdm(files, desc="Importing voice files"): + j, latents = read_generate_settings(file, read_latents=True) + + if j is not None and saveAs is None: + saveAs = j['voice'] + if saveAs is None or saveAs == "": + raise Exception("Specify a voice name") + + outdir = f'{get_voice_dir()}/{saveAs}/' + os.makedirs(outdir, exist_ok=True) + + if latents: + print(f"Importing latents to {latents}") + with open(f'{outdir}/cond_latents.pth', 'wb') as f: + f.write(latents) + latents = f'{outdir}/cond_latents.pth' + print(f"Imported latents to {latents}") + else: + filename = file.name + if filename[-4:] != ".wav": + raise Exception("Please convert to a WAV first") + + path = f"{outdir}/{os.path.basename(filename)}" + print(f"Importing voice to {path}") + + waveform, sample_rate = torchaudio.load(filename) + + if args.voice_fixer: + if not voicefixer: + load_voicefixer() + + waveform, sample_rate = resample(waveform, sample_rate, 44100) + torchaudio.save(path, waveform, sample_rate) + + print(f"Running 'voicefixer' on voice sample: {path}") + voicefixer.restore( + input = path, + output = path, + cuda=get_device_name() == "cuda" and args.voice_fixer_use_cuda, + #mode=mode, + ) + else: + torchaudio.save(path, waveform, sample_rate) + + print(f"Imported voice to {path}") + +def relative_paths( dirs ): + return [ './' + os.path.relpath( d ).replace("\\", "/") for d in dirs ] + +def get_voice( name, dir=get_voice_dir(), load_latents=True, extensions=["wav", "mp3", "flac"] ): + subj = f'{dir}/{name}/' + if not os.path.isdir(subj): + return + files = os.listdir(subj) + + if load_latents: + extensions.append("pth") + + voice = [] + for file in files: + ext = os.path.splitext(file)[-1][1:] + if ext not in extensions: + continue + + voice.append(f'{subj}/{file}') + + return sorted( voice ) + +def get_voice_list(dir=get_voice_dir(), append_defaults=False, extensions=["wav", "mp3", "flac", "pth"]): + defaults = [ "random", "microphone" ] + os.makedirs(dir, exist_ok=True) + #res = sorted([d for d in os.listdir(dir) if d not in defaults and os.path.isdir(os.path.join(dir, d)) and len(os.listdir(os.path.join(dir, d))) > 0 ]) + + res = [] + for name in os.listdir(dir): + if name in defaults: + continue + if not os.path.isdir(f'{dir}/{name}'): + continue + if len(os.listdir(os.path.join(dir, name))) == 0: + continue + files = get_voice( name, dir=dir, extensions=extensions ) + + if len(files) > 0: + res.append(name) + else: + for subdir in os.listdir(f'{dir}/{name}'): + if not os.path.isdir(f'{dir}/{name}/{subdir}'): + continue + files = get_voice( f'{name}/{subdir}', dir=dir, extensions=extensions ) + if len(files) == 0: + continue + res.append(f'{name}/{subdir}') + + res = sorted(res) + + if append_defaults: + res = res + defaults + + return res + +def get_valle_models(dir="./training/"): + return [ f'{dir}/{d}/config.yaml' for d in os.listdir(dir) if os.path.exists(f'{dir}/{d}/config.yaml') ] + +def get_autoregressive_models(dir="./models/finetunes/", prefixed=False, auto=False): + os.makedirs(dir, exist_ok=True) + base = [get_model_path('autoregressive.pth')] + halfp = get_halfp_model_path() + if os.path.exists(halfp): + base.append(halfp) + + additionals = sorted([f'{dir}/{d}' for d in os.listdir(dir) if d[-4:] == ".pth" ]) + found = [] + for training in os.listdir(f'./training/'): + if not os.path.isdir(f'./training/{training}/') or not os.path.isdir(f'./training/{training}/finetune/') or not os.path.isdir(f'./training/{training}/finetune/models/'): + continue + models = sorted([ int(d[:-8]) for d in os.listdir(f'./training/{training}/finetune/models/') if d[-8:] == "_gpt.pth" ]) + found = found + [ f'./training/{training}/finetune/models/{d}_gpt.pth' for d in models ] + + res = base + additionals + found + + if prefixed: + for i in range(len(res)): + path = res[i] + hash = hash_file(path) + shorthash = hash[:8] + + res[i] = f'[{shorthash}] {path}' + + paths = relative_paths(res) + if auto: + paths = ["auto"] + paths + + return paths + +def get_diffusion_models(dir="./models/finetunes/", prefixed=False): + return relative_paths([ get_model_path('diffusion_decoder.pth') ]) + +def get_tokenizer_jsons( dir="./models/tokenizers/" ): + additionals = sorted([ f'{dir}/{d}' for d in os.listdir(dir) if d[-5:] == ".json" ]) if os.path.isdir(dir) else [] + return relative_paths([ "./modules/tortoise-tts/tortoise/data/tokenizer.json" ] + additionals) + +def tokenize_text( text, config=None, stringed=True, skip_specials=False ): + from tortoise.utils.tokenizer import VoiceBpeTokenizer + + if not config: + config = args.tokenizer_json if args.tokenizer_json else get_tokenizer_jsons()[0] + + if not tts: + tokenizer = VoiceBpeTokenizer(config) + else: + tokenizer = tts.tokenizer + + encoded = tokenizer.encode(text) + decoded = tokenizer.tokenizer.decode(encoded, skip_special_tokens=skip_specials).split(" ") + + if stringed: + return "\n".join([ str(encoded), str(decoded) ]) + + return decoded + +def get_dataset_list(dir="./training/"): + return sorted([d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d)) and "train.txt" in os.listdir(os.path.join(dir, d)) ]) + +def get_training_list(dir="./training/"): + if args.tts_backend == "tortoise": + return sorted([f'./training/{d}/train.yaml' for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d)) and "train.yaml" in os.listdir(os.path.join(dir, d)) ]) + else: + return sorted([f'./training/{d}/config.yaml' for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d)) and "config.yaml" in os.listdir(os.path.join(dir, d)) ]) + +def pad(num, zeroes): + return str(num).zfill(zeroes+1) + +def curl(url): + try: + req = urllib.request.Request(url, headers={'User-Agent': 'Python'}) + conn = urllib.request.urlopen(req) + data = conn.read() + data = data.decode() + data = json.loads(data) + conn.close() + return data + except Exception as e: + print(e) + return None + +def check_for_updates( dir = None ): + if dir is None: + check_for_updates("./.git/") + check_for_updates("./.git/modules/dlas/") + check_for_updates("./.git/modules/tortoise-tts/") + return + + git_dir = dir + if not os.path.isfile(f'{git_dir}/FETCH_HEAD'): + print(f"Cannot check for updates for {dir}: not from a git repo") + return False + + with open(f'{git_dir}/FETCH_HEAD', 'r', encoding="utf-8") as f: + head = f.read() + + match = re.findall(r"^([a-f0-9]+).+?https:\/\/(.+?)\/(.+?)\/(.+?)\n", head) + if match is None or len(match) == 0: + print(f"Cannot check for updates for {dir}: cannot parse FETCH_HEAD") + return False + + match = match[0] + + local = match[0] + host = match[1] + owner = match[2] + repo = match[3] + + res = curl(f"https://{host}/api/v1/repos/{owner}/{repo}/branches/") #this only works for gitea instances + + if res is None or len(res) == 0: + print(f"Cannot check for updates for {dir}: cannot fetch from remote") + return False + + remote = res[0]["commit"]["id"] + + if remote != local: + print(f"New version found for {dir}: {local[:8]} => {remote[:8]}") + return True + + return False + +def notify_progress(message, progress=None, verbose=True): + if verbose: + print(message) + + if progress is None: + tqdm.write(message) + else: + progress(0, desc=message) + +def get_args(): + global args + return args + +def setup_args(cli=False): + global args + + default_arguments = { + 'share': False, + 'listen': None, + 'check-for-updates': False, + 'models-from-local-only': False, + 'low-vram': False, + 'sample-batch-size': None, + 'unsqueeze-sample-batches': False, + 'embed-output-metadata': True, + 'latents-lean-and-mean': True, + 'voice-fixer': False, # getting tired of long initialization times in a Colab for downloading a large dataset for it + 'use-deepspeed': True, + 'voice-fixer-use-cuda': True, + + + 'force-cpu-for-conditioning-latents': False, + 'defer-tts-load': False, + 'device-override': None, + 'prune-nonfinal-outputs': True, + 'concurrency-count': 2, + 'autocalculate-voice-chunk-duration-size': 10, + + 'output-sample-rate': 44100, + 'output-volume': 1, + 'results-folder': "./results/", + + 'hf-token': None, + 'tts-backend': TTSES[0], + + 'autoregressive-model': None, + 'diffusion-model': None, + 'vocoder-model': VOCODERS[-1], + 'tokenizer-json': None, + + 'phonemizer-backend': 'espeak', + + 'valle-model': None, + + 'whisper-backend': 'openai/whisper', + 'whisper-model': "base", + 'whisper-batchsize': 1, + + 'training-default-halfp': False, + 'training-default-bnb': True, + + 'websocket-listen-address': "127.0.0.1", + 'websocket-listen-port': 8069, + 'websocket-enabled': False + } + + if os.path.isfile('./config/exec.json'): + with open(f'./config/exec.json', 'r', encoding="utf-8") as f: + try: + overrides = json.load(f) + for k in overrides: + default_arguments[k] = overrides[k] + except Exception as e: + print(e) + pass + + parser = argparse.ArgumentParser(allow_abbrev=not cli) + parser.add_argument("--share", action='store_true', default=default_arguments['share'], help="Lets Gradio return a public URL to use anywhere") + parser.add_argument("--listen", default=default_arguments['listen'], help="Path for Gradio to listen on") + parser.add_argument("--check-for-updates", action='store_true', default=default_arguments['check-for-updates'], help="Checks for update on startup") + parser.add_argument("--models-from-local-only", action='store_true', default=default_arguments['models-from-local-only'], help="Only loads models from disk, does not check for updates for models") + parser.add_argument("--low-vram", action='store_true', default=default_arguments['low-vram'], help="Disables some optimizations that increases VRAM usage") + parser.add_argument("--no-embed-output-metadata", action='store_false', default=not default_arguments['embed-output-metadata'], help="Disables embedding output metadata into resulting WAV files for easily fetching its settings used with the web UI (data is stored in the lyrics metadata tag)") + parser.add_argument("--latents-lean-and-mean", action='store_true', default=default_arguments['latents-lean-and-mean'], help="Exports the bare essentials for latents.") + parser.add_argument("--voice-fixer", action='store_true', default=default_arguments['voice-fixer'], help="Uses python module 'voicefixer' to improve audio quality, if available.") + parser.add_argument("--voice-fixer-use-cuda", action='store_true', default=default_arguments['voice-fixer-use-cuda'], help="Hints to voicefixer to use CUDA, if available.") + parser.add_argument("--use-deepspeed", action='store_true', default=default_arguments['use-deepspeed'], help="Use deepspeed for speed bump.") + parser.add_argument("--force-cpu-for-conditioning-latents", default=default_arguments['force-cpu-for-conditioning-latents'], action='store_true', help="Forces computing conditional latents to be done on the CPU (if you constantyl OOM on low chunk counts)") + parser.add_argument("--defer-tts-load", default=default_arguments['defer-tts-load'], action='store_true', help="Defers loading TTS model") + parser.add_argument("--prune-nonfinal-outputs", default=default_arguments['prune-nonfinal-outputs'], action='store_true', help="Deletes non-final output files on completing a generation") + parser.add_argument("--device-override", default=default_arguments['device-override'], help="A device string to override pass through Torch") + parser.add_argument("--sample-batch-size", default=default_arguments['sample-batch-size'], type=int, help="Sets how many batches to use during the autoregressive samples pass") + parser.add_argument("--unsqueeze-sample-batches", default=default_arguments['unsqueeze-sample-batches'], action='store_true', help="Unsqueezes sample batches to process one by one after sampling") + parser.add_argument("--concurrency-count", type=int, default=default_arguments['concurrency-count'], help="How many Gradio events to process at once") + parser.add_argument("--autocalculate-voice-chunk-duration-size", type=float, default=default_arguments['autocalculate-voice-chunk-duration-size'], help="Number of seconds to suggest voice chunk size for (for example, 100 seconds of audio at 10 seconds per chunk will suggest 10 chunks)") + parser.add_argument("--output-sample-rate", type=int, default=default_arguments['output-sample-rate'], help="Sample rate to resample the output to (from 24KHz)") + parser.add_argument("--output-volume", type=float, default=default_arguments['output-volume'], help="Adjusts volume of output") + parser.add_argument("--results-folder", type=str, default=default_arguments['results-folder'], help="Sets output directory") + + parser.add_argument("--hf-token", type=str, default=default_arguments['hf-token'], help="HuggingFace Token") + parser.add_argument("--tts-backend", default=default_arguments['tts-backend'], help="Specifies which TTS backend to use.") + + parser.add_argument("--autoregressive-model", default=default_arguments['autoregressive-model'], help="Specifies which autoregressive model to use for sampling.") + parser.add_argument("--diffusion-model", default=default_arguments['diffusion-model'], help="Specifies which diffusion model to use for sampling.") + parser.add_argument("--vocoder-model", default=default_arguments['vocoder-model'], action='store_true', help="Specifies with vocoder to use") + parser.add_argument("--tokenizer-json", default=default_arguments['tokenizer-json'], help="Specifies which tokenizer json to use for tokenizing.") + + parser.add_argument("--phonemizer-backend", default=default_arguments['phonemizer-backend'], help="Specifies which phonemizer backend to use.") + + parser.add_argument("--valle-model", default=default_arguments['valle-model'], help="Specifies which VALL-E model to use for sampling.") + + parser.add_argument("--whisper-backend", default=default_arguments['whisper-backend'], action='store_true', help="Picks which whisper backend to use (openai/whisper, lightmare/whispercpp)") + parser.add_argument("--whisper-model", default=default_arguments['whisper-model'], help="Specifies which whisper model to use for transcription.") + parser.add_argument("--whisper-batchsize", type=int, default=default_arguments['whisper-batchsize'], help="Specifies batch size for WhisperX") + + parser.add_argument("--training-default-halfp", action='store_true', default=default_arguments['training-default-halfp'], help="Training default: halfp") + parser.add_argument("--training-default-bnb", action='store_true', default=default_arguments['training-default-bnb'], help="Training default: bnb") + + parser.add_argument("--websocket-listen-port", type=int, default=default_arguments['websocket-listen-port'], help="Websocket server listen port, default: 8069") + parser.add_argument("--websocket-listen-address", default=default_arguments['websocket-listen-address'], help="Websocket server listen address, default: 127.0.0.1") + parser.add_argument("--websocket-enabled", action='store_true', default=default_arguments['websocket-enabled'], help="Websocket API server enabled, default: false") + + if cli: + args, unknown = parser.parse_known_args() + else: + args = parser.parse_args() + + args.embed_output_metadata = not args.no_embed_output_metadata + + if not args.device_override: + set_device_name(args.device_override) + + if args.sample_batch_size == 0 and get_device_batch_size() == 1: + print("!WARNING! Automatically deduced sample batch size returned 1.") + + args.listen_host = None + args.listen_port = None + args.listen_path = None + if args.listen: + try: + match = re.findall(r"^(?:(.+?):(\d+))?(\/.*?)?$", args.listen)[0] + + args.listen_host = match[0] if match[0] != "" else "127.0.0.1" + args.listen_port = match[1] if match[1] != "" else None + args.listen_path = match[2] if match[2] != "" else "/" + except Exception as e: + pass + + if args.listen_port is not None: + args.listen_port = int(args.listen_port) + if args.listen_port == 0: + args.listen_port = None + + return args + +def get_default_settings( hypenated=True ): + settings = { + 'listen': None if not args.listen else args.listen, + 'share': args.share, + 'low-vram':args.low_vram, + 'check-for-updates':args.check_for_updates, + 'models-from-local-only':args.models_from_local_only, + 'force-cpu-for-conditioning-latents': args.force_cpu_for_conditioning_latents, + 'defer-tts-load': args.defer_tts_load, + 'prune-nonfinal-outputs': args.prune_nonfinal_outputs, + 'device-override': args.device_override, + 'sample-batch-size': args.sample_batch_size, + 'unsqueeze-sample-batches': args.unsqueeze_sample_batches, + 'embed-output-metadata': args.embed_output_metadata, + 'latents-lean-and-mean': args.latents_lean_and_mean, + 'voice-fixer': args.voice_fixer, + 'use-deepspeed': args.use_deepspeed, + 'voice-fixer-use-cuda': args.voice_fixer_use_cuda, + 'concurrency-count': args.concurrency_count, + 'output-sample-rate': args.output_sample_rate, + 'autocalculate-voice-chunk-duration-size': args.autocalculate_voice_chunk_duration_size, + 'output-volume': args.output_volume, + 'results-folder': args.results_folder, + + 'hf-token': args.hf_token, + 'tts-backend': args.tts_backend, + + 'autoregressive-model': args.autoregressive_model, + 'diffusion-model': args.diffusion_model, + 'vocoder-model': args.vocoder_model, + 'tokenizer-json': args.tokenizer_json, + + 'phonemizer-backend': args.phonemizer_backend, + + 'valle-model': args.valle_model, + + 'whisper-backend': args.whisper_backend, + 'whisper-model': args.whisper_model, + 'whisper-batchsize': args.whisper_batchsize, + + 'training-default-halfp': args.training_default_halfp, + 'training-default-bnb': args.training_default_bnb, + } + + res = {} + for k in settings: + res[k.replace("-", "_") if not hypenated else k] = settings[k] + return res + +def update_args( **kwargs ): + global args + + settings = get_default_settings(hypenated=False) + settings.update(kwargs) + + args.listen = settings['listen'] + args.share = settings['share'] + args.check_for_updates = settings['check_for_updates'] + args.models_from_local_only = settings['models_from_local_only'] + args.low_vram = settings['low_vram'] + args.force_cpu_for_conditioning_latents = settings['force_cpu_for_conditioning_latents'] + args.defer_tts_load = settings['defer_tts_load'] + args.prune_nonfinal_outputs = settings['prune_nonfinal_outputs'] + args.device_override = settings['device_override'] + args.sample_batch_size = settings['sample_batch_size'] + args.unsqueeze_sample_batches = settings['unsqueeze_sample_batches'] + args.embed_output_metadata = settings['embed_output_metadata'] + args.latents_lean_and_mean = settings['latents_lean_and_mean'] + args.voice_fixer = settings['voice_fixer'] + args.voice_fixer_use_cuda = settings['voice_fixer_use_cuda'] + args.use_deepspeed = settings['use_deepspeed'] + args.concurrency_count = settings['concurrency_count'] + args.output_sample_rate = 44000 + args.autocalculate_voice_chunk_duration_size = settings['autocalculate_voice_chunk_duration_size'] + args.output_volume = settings['output_volume'] + args.results_folder = settings['results_folder'] + + args.hf_token = settings['hf_token'] + args.tts_backend = settings['tts_backend'] + + args.autoregressive_model = settings['autoregressive_model'] + args.diffusion_model = settings['diffusion_model'] + args.vocoder_model = settings['vocoder_model'] + args.tokenizer_json = settings['tokenizer_json'] + + args.phonemizer_backend = settings['phonemizer_backend'] + + args.valle_model = settings['valle_model'] + + args.whisper_backend = settings['whisper_backend'] + args.whisper_model = settings['whisper_model'] + args.whisper_batchsize = settings['whisper_batchsize'] + + args.training_default_halfp = settings['training_default_halfp'] + args.training_default_bnb = settings['training_default_bnb'] + + save_args_settings() + +def save_args_settings(): + global args + settings = get_default_settings() + + os.makedirs('./config/', exist_ok=True) + with open(f'./config/exec.json', 'w', encoding="utf-8") as f: + f.write(json.dumps(settings, indent='\t') ) + +# super kludgy )`; +def import_generate_settings(file = None): + if not file: + file = "./config/generate.json" + + res = { + 'text': None, + 'delimiter': None, + 'emotion': None, + 'prompt': None, + 'voice': "random", + 'mic_audio': None, + 'voice_latents_chunks': None, + 'candidates': None, + 'seed': None, + 'num_autoregressive_samples': 16, + 'diffusion_iterations': 30, + 'temperature': 0.8, + 'diffusion_sampler': "DDIM", + 'breathing_room': 8 , + 'cvvp_weight': 0.0, + 'top_p': 0.8, + 'diffusion_temperature': 1.0, + 'length_penalty': 1.0, + 'repetition_penalty': 2.0, + 'cond_free_k': 2.0, + 'experimentals': None, + } + + settings, _ = read_generate_settings(file, read_latents=False) + + if settings is not None: + res.update(settings) + + return res + +def reset_generate_settings(): + with open(f'./config/generate.json', 'w', encoding="utf-8") as f: + f.write(json.dumps({}, indent='\t') ) + return import_generate_settings() + +def read_generate_settings(file, read_latents=True): + j = None + latents = None + + if isinstance(file, list) and len(file) == 1: + file = file[0] + + try: + if file is not None: + if hasattr(file, 'name'): + file = file.name + + if file[-4:] == ".wav": + metadata = music_tag.load_file(file) + if 'lyrics' in metadata: + j = json.loads(str(metadata['lyrics'])) + elif file[-5:] == ".json": + with open(file, 'r') as f: + j = json.load(f) + except Exception as e: + pass + + if j is not None: + if 'latents' in j: + if read_latents: + latents = base64.b64decode(j['latents']) + del j['latents'] + + + if "time" in j: + j["time"] = "{:.3f}".format(j["time"]) + + + + return ( + j, + latents, + ) + +def version_check_tts( min_version ): + global tts + if not tts: + raise Exception("TTS is not initialized") + + if not hasattr(tts, 'version'): + return False + + if min_version[0] > tts.version[0]: + return True + if min_version[1] > tts.version[1]: + return True + if min_version[2] >= tts.version[2]: + return True + return False + +def load_tts( restart=False, + # TorToiSe configs + autoregressive_model=None, diffusion_model=None, vocoder_model=None, tokenizer_json=None, + # VALL-E configs + valle_model=None, +): + global args + global tts + + if restart: + unload_tts() + + tts_loading = True + if args.tts_backend == "tortoise": + if autoregressive_model: + args.autoregressive_model = autoregressive_model + else: + autoregressive_model = args.autoregressive_model + + if autoregressive_model == "auto": + autoregressive_model = deduce_autoregressive_model() + + if diffusion_model: + args.diffusion_model = diffusion_model + else: + diffusion_model = args.diffusion_model + + if vocoder_model: + args.vocoder_model = vocoder_model + else: + vocoder_model = args.vocoder_model + + if tokenizer_json: + args.tokenizer_json = tokenizer_json + else: + tokenizer_json = args.tokenizer_json + + if get_device_name() == "cpu": + print("!!!! WARNING !!!! No GPU available in PyTorch. You may need to reinstall PyTorch.") + + print(f"Loading TorToiSe... (AR: {autoregressive_model}, diffusion: {diffusion_model}, vocoder: {vocoder_model})") + tts = TorToise_TTS(minor_optimizations=not args.low_vram, autoregressive_model_path=autoregressive_model, diffusion_model_path=diffusion_model, vocoder_model=vocoder_model, tokenizer_json=tokenizer_json, unsqueeze_sample_batches=args.unsqueeze_sample_batches, use_deepspeed=args.use_deepspeed) + elif args.tts_backend == "vall-e": + if valle_model: + args.valle_model = valle_model + else: + valle_model = args.valle_model + + print(f"Loading VALL-E... (Config: {valle_model})") + tts = VALLE_TTS(config=args.valle_model) + elif args.tts_backend == "bark": + + print(f"Loading Bark...") + tts = Bark_TTS(small=args.low_vram) + + print("Loaded TTS, ready for generation.") + tts_loading = False + return tts + +def unload_tts(): + global tts + + if tts: + del tts + tts = None + print("Unloaded TTS") + do_gc() + +def reload_tts(): + unload_tts() + load_tts() + +def get_current_voice(): + global current_voice + if current_voice: + return current_voice + + settings, _ = read_generate_settings("./config/generate.json", read_latents=False) + + if settings and "voice" in settings['voice']: + return settings["voice"] + + return None + +def deduce_autoregressive_model(voice=None): + if not voice: + voice = get_current_voice() + + if voice: + if os.path.exists(f'./models/finetunes/{voice}.pth'): + return f'./models/finetunes/{voice}.pth' + + dir = f'./training/{voice}/finetune/models/' + if os.path.isdir(dir): + counts = sorted([ int(d[:-8]) for d in os.listdir(dir) if d[-8:] == "_gpt.pth" ]) + names = [ f'{dir}/{d}_gpt.pth' for d in counts ] + if len(names) > 0: + return names[-1] + + if args.autoregressive_model != "auto": + return args.autoregressive_model + + return get_model_path('autoregressive.pth') + +def update_autoregressive_model(autoregressive_model_path): + if args.tts_backend != "tortoise": + raise f"Unsupported backend: {args.tts_backend}" + + if autoregressive_model_path == "auto": + autoregressive_model_path = deduce_autoregressive_model() + else: + match = re.findall(r'^\[[a-fA-F0-9]{8}\] (.+?)$', autoregressive_model_path) + if match: + autoregressive_model_path = match[0] + + if not autoregressive_model_path or not os.path.exists(autoregressive_model_path): + print(f"Invalid model: {autoregressive_model_path}") + return + + args.autoregressive_model = autoregressive_model_path + save_args_settings() + print(f'Stored autoregressive model to settings: {autoregressive_model_path}') + + global tts + if not tts: + if tts_loading: + raise Exception("TTS is still initializing...") + return + + if hasattr(tts, "loading") and tts.loading: + raise Exception("TTS is still initializing...") + + + if autoregressive_model_path == tts.autoregressive_model_path: + return + + tts.load_autoregressive_model(autoregressive_model_path) + + do_gc() + + return autoregressive_model_path + +def update_diffusion_model(diffusion_model_path): + if args.tts_backend != "tortoise": + raise f"Unsupported backend: {args.tts_backend}" + + match = re.findall(r'^\[[a-fA-F0-9]{8}\] (.+?)$', diffusion_model_path) + if match: + diffusion_model_path = match[0] + + if not diffusion_model_path or not os.path.exists(diffusion_model_path): + print(f"Invalid model: {diffusion_model_path}") + return + + args.diffusion_model = diffusion_model_path + save_args_settings() + print(f'Stored diffusion model to settings: {diffusion_model_path}') + + global tts + if not tts: + if tts_loading: + raise Exception("TTS is still initializing...") + return + + if hasattr(tts, "loading") and tts.loading: + raise Exception("TTS is still initializing...") + + if diffusion_model_path == "auto": + diffusion_model_path = deduce_diffusion_model() + + if diffusion_model_path == tts.diffusion_model_path: + return + + tts.load_diffusion_model(diffusion_model_path) + + do_gc() + + return diffusion_model_path + +def update_vocoder_model(vocoder_model): + if args.tts_backend != "tortoise": + raise f"Unsupported backend: {args.tts_backend}" + + args.vocoder_model = vocoder_model + save_args_settings() + print(f'Stored vocoder model to settings: {vocoder_model}') + + global tts + if not tts: + if tts_loading: + raise Exception("TTS is still initializing...") + return + + if hasattr(tts, "loading") and tts.loading: + raise Exception("TTS is still initializing...") + + print(f"Loading model: {vocoder_model}") + tts.load_vocoder_model(vocoder_model) + print(f"Loaded model: {tts.vocoder_model}") + + do_gc() + + return vocoder_model + +def update_tokenizer(tokenizer_json): + if args.tts_backend != "tortoise": + raise f"Unsupported backend: {args.tts_backend}" + + args.tokenizer_json = tokenizer_json + save_args_settings() + print(f'Stored tokenizer to settings: {tokenizer_json}') + + global tts + if not tts: + if tts_loading: + raise Exception("TTS is still initializing...") + return + + if hasattr(tts, "loading") and tts.loading: + raise Exception("TTS is still initializing...") + + print(f"Loading tokenizer vocab: {tokenizer_json}") + tts.load_tokenizer_json(tokenizer_json) + print(f"Loaded tokenizer vocab: {tts.tokenizer_json}") + + do_gc() + + return vocoder_model + +def load_voicefixer(restart=False): + global voicefixer + + if restart: + unload_voicefixer() + + try: + print("Loading Voicefixer") + from voicefixer import VoiceFixer + voicefixer = VoiceFixer() + print("Loaded Voicefixer") + except Exception as e: + print(f"Error occurred while tring to initialize voicefixer: {e}") + if voicefixer: + del voicefixer + voicefixer = None + +def unload_voicefixer(): + global voicefixer + + if voicefixer: + del voicefixer + voicefixer = None + print("Unloaded Voicefixer") + + do_gc() + +def load_whisper_model(language=None, model_name=None, progress=None): + global whisper_model + global whisper_align_model + + if args.whisper_backend not in WHISPER_BACKENDS: + raise Exception(f"unavailable backend: {args.whisper_backend}") + + if not model_name: + model_name = args.whisper_model + else: + args.whisper_model = model_name + save_args_settings() + + if language and f'{model_name}.{language}' in WHISPER_SPECIALIZED_MODELS: + model_name = f'{model_name}.{language}' + print(f"Loading specialized model for language: {language}") + + notify_progress(f"Loading Whisper model: {model_name}", progress=progress) + + if args.whisper_backend == "openai/whisper": + import whisper + try: + #is it possible for model to fit on vram but go oom later on while executing on data? + whisper_model = whisper.load_model(model_name) + except: + print("Out of VRAM memory. falling back to loading Whisper on CPU.") + whisper_model = whisper.load_model(model_name, device="cpu") + elif args.whisper_backend == "lightmare/whispercpp": + from whispercpp import Whisper + if not language: + language = 'auto' + + b_lang = language.encode('ascii') + whisper_model = Whisper(model_name, models_dir='./models/', language=b_lang) + elif args.whisper_backend == "m-bain/whisperx": + import whisper, whisperx + device = "cuda" if get_device_name() == "cuda" else "cpu" + whisper_model = whisperx.load_model(model_name, device) + whisper_align_model = whisperx.load_align_model(model_name="WAV2VEC2_ASR_LARGE_LV60K_960H" if language=="en" else None, language_code=language, device=device) + + print("Loaded Whisper model") + +def unload_whisper(): + global whisper_model + global whisper_align_model + + if whisper_align_model: + del whisper_align_model + whisper_align_model = None + + if whisper_model: + del whisper_model + whisper_model = None + print("Unloaded Whisper") + + do_gc() + +# shamelessly borrowed from Voldy's Web UI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/master/modules/extras.py#L74 +def merge_models( primary_model_name, secondary_model_name, alpha, progress=gr.Progress() ): + key_blacklist = [] + + def weighted_sum(theta0, theta1, alpha): + return ((1 - alpha) * theta0) + (alpha * theta1) + + def read_model( filename ): + print(f"Loading {filename}") + return torch.load(filename) + + theta_func = weighted_sum + + theta_0 = read_model(primary_model_name) + theta_1 = read_model(secondary_model_name) + + for key in tqdm(theta_0.keys(), desc="Merging..."): + if key in key_blacklist: + print("Skipping ignored key:", key) + continue + + a = theta_0[key] + b = theta_1[key] + + if a.dtype != torch.float32 and a.dtype != torch.float16: + print("Skipping key:", key, a.dtype) + continue + + if b.dtype != torch.float32 and b.dtype != torch.float16: + print("Skipping key:", key, b.dtype) + continue + + theta_0[key] = theta_func(a, b, alpha) + + del theta_1 + + primary_basename = os.path.splitext(os.path.basename(primary_model_name))[0] + secondary_basename = os.path.splitext(os.path.basename(secondary_model_name))[0] + suffix = "{:.3f}".format(alpha) + output_path = f'./models/finetunes/{primary_basename}_{secondary_basename}_{suffix}_merge.pth' + + torch.save(theta_0, output_path) + message = f"Saved to {output_path}" + print(message) return message \ No newline at end of file -- 2.45.2 From 979b2c25f742ca5cfacb5e36eb3b77315d989d63 Mon Sep 17 00:00:00 2001 From: ken11o2 Date: Mon, 4 Sep 2023 19:00:00 +0000 Subject: [PATCH 3/4] add use_deepspeed into Setting Tab Gradio --- src/webui.py | 1953 +++++++++++++++++++++++++------------------------- 1 file changed, 977 insertions(+), 976 deletions(-) diff --git a/src/webui.py b/src/webui.py index 402bb2b..560453d 100755 --- a/src/webui.py +++ b/src/webui.py @@ -1,977 +1,978 @@ -import os -import argparse -import time -import json -import base64 -import re -import inspect -import urllib.request - -import torch -import torchaudio -import music_tag -import gradio as gr -import gradio.utils - -from datetime import datetime - -import tortoise.api -from tortoise.utils.audio import get_voice_dir, get_voices -from tortoise.utils.device import get_device_count - -from utils import * - -args = setup_args() - -GENERATE_SETTINGS = {} -TRANSCRIBE_SETTINGS = {} -EXEC_SETTINGS = {} -TRAINING_SETTINGS = {} -MERGER_SETTINGS = {} -GENERATE_SETTINGS_ARGS = [] - -PRESETS = { - 'Ultra Fast': {'num_autoregressive_samples': 16, 'diffusion_iterations': 30, 'cond_free': False}, - 'Fast': {'num_autoregressive_samples': 96, 'diffusion_iterations': 80}, - 'Standard': {'num_autoregressive_samples': 256, 'diffusion_iterations': 200}, - 'High Quality': {'num_autoregressive_samples': 256, 'diffusion_iterations': 400}, -} - -HISTORY_HEADERS = { - "Name": "", - "Samples": "num_autoregressive_samples", - "Iterations": "diffusion_iterations", - "Temp.": "temperature", - "Sampler": "diffusion_sampler", - "CVVP": "cvvp_weight", - "Top P": "top_p", - "Diff. Temp.": "diffusion_temperature", - "Len Pen": "length_penalty", - "Rep Pen": "repetition_penalty", - "Cond-Free K": "cond_free_k", - "Time": "time", - "Datetime": "datetime", - "Model": "model", - "Model Hash": "model_hash", -} - -# can't use *args OR **kwargs if I want to retain the ability to use progress -def generate_proxy( - text, - delimiter, - emotion, - prompt, - voice, - mic_audio, - voice_latents_chunks, - candidates, - seed, - num_autoregressive_samples, - diffusion_iterations, - temperature, - diffusion_sampler, - breathing_room, - cvvp_weight, - top_p, - diffusion_temperature, - length_penalty, - repetition_penalty, - cond_free_k, - experimentals, - voice_latents_original_ar, - voice_latents_original_diffusion, - progress=gr.Progress(track_tqdm=True) -): - kwargs = locals() - - try: - sample, outputs, stats = generate(**kwargs) - except Exception as e: - message = str(e) - if message == "Kill signal detected": - unload_tts() - - raise e - - return ( - outputs[0], - gr.update(value=sample, visible=sample is not None), - gr.update(choices=outputs, value=outputs[0], visible=len(outputs) > 1, interactive=True), - gr.update(value=stats, visible=True), - ) - - -def update_presets(value): - if value in PRESETS: - preset = PRESETS[value] - return (gr.update(value=preset['num_autoregressive_samples']), gr.update(value=preset['diffusion_iterations'])) - else: - return (gr.update(), gr.update()) - -def get_training_configs(): - configs = [] - for i, file in enumerate(sorted(os.listdir(f"./training/"))): - if file[-5:] != ".yaml" or file[0] == ".": - continue - configs.append(f"./training/{file}") - - return configs - -def update_training_configs(): - return gr.update(choices=get_training_list()) - -def history_view_results( voice ): - results = [] - files = [] - outdir = f"{args.results_folder}/{voice}/" - for i, file in enumerate(sorted(os.listdir(outdir))): - if file[-4:] != ".wav": - continue - - metadata, _ = read_generate_settings(f"{outdir}/{file}", read_latents=False) - if metadata is None: - continue - - values = [] - for k in HISTORY_HEADERS: - v = file - if k != "Name": - v = metadata[HISTORY_HEADERS[k]] if HISTORY_HEADERS[k] in metadata else '?' - values.append(v) - - - files.append(file) - results.append(values) - - return ( - results, - gr.Dropdown.update(choices=sorted(files)) - ) - -def import_generate_settings_proxy( file=None ): - global GENERATE_SETTINGS_ARGS - settings = import_generate_settings( file ) - - res = [] - for k in GENERATE_SETTINGS_ARGS: - res.append(settings[k] if k in settings else None) - - return tuple(res) - -def reset_generate_settings_proxy(): - global GENERATE_SETTINGS_ARGS - settings = reset_generate_settings() - - res = [] - for k in GENERATE_SETTINGS_ARGS: - res.append(settings[k] if k in settings else None) - - return tuple(res) - -def compute_latents_proxy(voice, voice_latents_chunks, original_ar, original_diffusion, progress=gr.Progress(track_tqdm=True)): - compute_latents( voice=voice, voice_latents_chunks=voice_latents_chunks, original_ar=original_ar, original_diffusion=original_diffusion ) - return voice - - -def import_voices_proxy(files, name, progress=gr.Progress(track_tqdm=True)): - import_voices(files, name, progress) - return gr.update() - -def read_generate_settings_proxy(file, saveAs='.temp'): - j, latents = read_generate_settings(file) - - if latents: - outdir = f'{get_voice_dir()}/{saveAs}/' - os.makedirs(outdir, exist_ok=True) - with open(f'{outdir}/cond_latents.pth', 'wb') as f: - f.write(latents) - - latents = f'{outdir}/cond_latents.pth' - - return ( - gr.update(value=j, visible=j is not None), - gr.update(value=latents, visible=latents is not None), - None if j is None else j['voice'], - gr.update(visible=j is not None), - ) - -def slice_dataset_proxy( voice, trim_silence, start_offset, end_offset, progress=gr.Progress(track_tqdm=True) ): - return slice_dataset( voice, trim_silence=trim_silence, start_offset=start_offset, end_offset=end_offset, results=None, progress=progress ) - -def diarize_dataset( voice, progress=gr.Progress(track_tqdm=True) ): - from pyannote.audio import Pipeline - pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization", use_auth_token=args.hf_token) - - messages = [] - files = get_voice(voice, load_latents=False) - for file in enumerate_progress(files, desc="Iterating through voice files", progress=progress): - diarization = pipeline(file) - for turn, _, speaker in diarization.itertracks(yield_label=True): - message = f"start={turn.start:.1f}s stop={turn.end:.1f}s speaker_{speaker}" - print(message) - messages.append(message) - - return "\n".join(messages) - -def prepare_all_datasets( language, validation_text_length, validation_audio_length, skip_existings, slice_audio, trim_silence, slice_start_offset, slice_end_offset, progress=gr.Progress(track_tqdm=True) ): - kwargs = locals() - - messages = [] - voices = get_voice_list() - - for voice in voices: - print("Processing:", voice) - message = transcribe_dataset( voice=voice, language=language, skip_existings=skip_existings, progress=progress ) - messages.append(message) - - if slice_audio: - for voice in voices: - print("Processing:", voice) - message = slice_dataset( voice, trim_silence=trim_silence, start_offset=slice_start_offset, end_offset=slice_end_offset, results=None, progress=progress ) - messages.append(message) - - for voice in voices: - print("Processing:", voice) - message = prepare_dataset( voice, use_segments=slice_audio, text_length=validation_text_length, audio_length=validation_audio_length, progress=progress ) - messages.append(message) - - return "\n".join(messages) - -def prepare_dataset_proxy( voice, language, validation_text_length, validation_audio_length, skip_existings, slice_audio, trim_silence, slice_start_offset, slice_end_offset, progress=gr.Progress(track_tqdm=True) ): - messages = [] - - message = transcribe_dataset( voice=voice, language=language, skip_existings=skip_existings, progress=progress ) - messages.append(message) - - if slice_audio: - message = slice_dataset( voice, trim_silence=trim_silence, start_offset=slice_start_offset, end_offset=slice_end_offset, results=None, progress=progress ) - messages.append(message) - - message = prepare_dataset( voice, use_segments=slice_audio, text_length=validation_text_length, audio_length=validation_audio_length, progress=progress ) - messages.append(message) - - return "\n".join(messages) - -def update_args_proxy( *args ): - kwargs = {} - keys = list(EXEC_SETTINGS.keys()) - for i in range(len(args)): - k = keys[i] - v = args[i] - kwargs[k] = v - - update_args(**kwargs) -def optimize_training_settings_proxy( *args ): - kwargs = {} - keys = list(TRAINING_SETTINGS.keys()) - for i in range(len(args)): - k = keys[i] - v = args[i] - kwargs[k] = v - - settings, messages = optimize_training_settings(**kwargs) - output = list(settings.values()) - return output[:-1] + ["\n".join(messages)] - -def import_training_settings_proxy( voice ): - messages = [] - injson = f'./training/{voice}/train.json' - statedir = f'./training/{voice}/finetune/training_state/' - output = {} - - try: - with open(injson, 'r', encoding="utf-8") as f: - settings = json.loads(f.read()) - except: - messages.append(f"Error import /{voice}/train.json") - - for k in TRAINING_SETTINGS: - output[k] = TRAINING_SETTINGS[k].value - - output = list(output.values()) - return output[:-1] + ["\n".join(messages)] - - if os.path.isdir(statedir): - resumes = sorted([int(d[:-6]) for d in os.listdir(statedir) if d[-6:] == ".state" ]) - - if len(resumes) > 0: - settings['resume_state'] = f'{statedir}/{resumes[-1]}.state' - messages.append(f"Found most recent training state: {settings['resume_state']}") - - output = {} - for k in TRAINING_SETTINGS: - if k not in settings: - output[k] = gr.update() - else: - output[k] = gr.update(value=settings[k]) - - output = list(output.values()) - - messages.append(f"Imported training settings: {injson}") - - return output[:-1] + ["\n".join(messages)] - -def save_training_settings_proxy( *args ): - kwargs = {} - keys = list(TRAINING_SETTINGS.keys()) - for i in range(len(args)): - k = keys[i] - v = args[i] - kwargs[k] = v - - settings, messages = save_training_settings(**kwargs) - return "\n".join(messages) - -def update_voices(): - return ( - gr.Dropdown.update(choices=get_voice_list(append_defaults=True)), - gr.Dropdown.update(choices=get_voice_list()), - gr.Dropdown.update(choices=get_voice_list(args.results_folder)), - ) - -def history_copy_settings( voice, file ): - return import_generate_settings( f"{args.results_folder}/{voice}/{file}" ) - -def setup_gradio(): - global args - global ui - - if not args.share: - def noop(function, return_value=None): - def wrapped(*args, **kwargs): - return return_value - return wrapped - gradio.utils.version_check = noop(gradio.utils.version_check) - gradio.utils.initiated_analytics = noop(gradio.utils.initiated_analytics) - gradio.utils.launch_analytics = noop(gradio.utils.launch_analytics) - gradio.utils.integration_analytics = noop(gradio.utils.integration_analytics) - gradio.utils.error_analytics = noop(gradio.utils.error_analytics) - gradio.utils.log_feature_analytics = noop(gradio.utils.log_feature_analytics) - #gradio.utils.get_local_ip_address = noop(gradio.utils.get_local_ip_address, 'localhost') - - if args.models_from_local_only: - os.environ['TRANSFORMERS_OFFLINE']='1' - - voice_list_with_defaults = get_voice_list(append_defaults=True) - voice_list = get_voice_list() - result_voices = get_voice_list(args.results_folder) - - valle_models = get_valle_models() - - autoregressive_models = get_autoregressive_models() - diffusion_models = get_diffusion_models() - tokenizer_jsons = get_tokenizer_jsons() - - dataset_list = get_dataset_list() - training_list = get_training_list() - - global GENERATE_SETTINGS_ARGS - GENERATE_SETTINGS_ARGS = list(inspect.signature(generate_proxy).parameters.keys())[:-1] - for i in range(len(GENERATE_SETTINGS_ARGS)): - arg = GENERATE_SETTINGS_ARGS[i] - GENERATE_SETTINGS[arg] = None - - with gr.Blocks() as ui: - with gr.Tab("Generate"): - with gr.Row(): - with gr.Column(): - GENERATE_SETTINGS["text"] = gr.Textbox(lines=4, value="Your prompt here.", label="Input Prompt") - with gr.Row(): - with gr.Column(): - GENERATE_SETTINGS["delimiter"] = gr.Textbox(lines=1, label="Line Delimiter", placeholder="\\n") - - GENERATE_SETTINGS["emotion"] = gr.Radio( ["Happy", "Sad", "Angry", "Disgusted", "Arrogant", "Custom", "None"], value="None", label="Emotion", type="value", interactive=True, visible=args.tts_backend=="tortoise" ) - GENERATE_SETTINGS["prompt"] = gr.Textbox(lines=1, label="Custom Emotion", visible=False) - GENERATE_SETTINGS["voice"] = gr.Dropdown(choices=voice_list_with_defaults, label="Voice", type="value", value=voice_list_with_defaults[0]) # it'd be very cash money if gradio was able to default to the first value in the list without this shit - GENERATE_SETTINGS["mic_audio"] = gr.Audio( label="Microphone Source", source="microphone", type="filepath", visible=False ) - GENERATE_SETTINGS["voice_latents_chunks"] = gr.Number(label="Voice Chunks", precision=0, value=0, visible=args.tts_backend=="tortoise") - GENERATE_SETTINGS["voice_latents_original_ar"] = gr.Checkbox(label="Use Original Latents Method (AR)", visible=args.tts_backend=="tortoise") - GENERATE_SETTINGS["voice_latents_original_diffusion"] = gr.Checkbox(label="Use Original Latents Method (Diffusion)", visible=args.tts_backend=="tortoise") - with gr.Row(): - refresh_voices = gr.Button(value="Refresh Voice List") - recompute_voice_latents = gr.Button(value="(Re)Compute Voice Latents") - - GENERATE_SETTINGS["voice"].change( - fn=update_baseline_for_latents_chunks, - inputs=GENERATE_SETTINGS["voice"], - outputs=GENERATE_SETTINGS["voice_latents_chunks"] - ) - GENERATE_SETTINGS["voice"].change( - fn=lambda value: gr.update(visible=value == "microphone"), - inputs=GENERATE_SETTINGS["voice"], - outputs=GENERATE_SETTINGS["mic_audio"], - ) - with gr.Column(): - preset = None - GENERATE_SETTINGS["candidates"] = gr.Slider(value=1, minimum=1, maximum=6, step=1, label="Candidates", visible=args.tts_backend=="tortoise") - GENERATE_SETTINGS["seed"] = gr.Number(value=0, precision=0, label="Seed", visible=args.tts_backend=="tortoise") - - preset = gr.Radio( ["Ultra Fast", "Fast", "Standard", "High Quality"], label="Preset", type="value", value="Ultra Fast", visible=args.tts_backend=="tortoise" ) - - GENERATE_SETTINGS["num_autoregressive_samples"] = gr.Slider(value=16, minimum=2, maximum=2048 if args.tts_backend=="vall-e" else 512, step=1, label="Samples", visible=args.tts_backend!="bark") - GENERATE_SETTINGS["diffusion_iterations"] = gr.Slider(value=30, minimum=0, maximum=512, step=1, label="Iterations", visible=args.tts_backend=="tortoise") - - GENERATE_SETTINGS["temperature"] = gr.Slider(value=0.95 if args.tts_backend=="vall-e" else 0.2, minimum=0, maximum=1, step=0.05, label="Temperature") - - show_experimental_settings = gr.Checkbox(label="Show Experimental Settings", visible=args.tts_backend=="tortoise") - reset_generate_settings_button = gr.Button(value="Reset to Default") - with gr.Column(visible=False) as col: - experimental_column = col - - GENERATE_SETTINGS["experimentals"] = gr.CheckboxGroup(["Half Precision", "Conditioning-Free"], value=["Conditioning-Free"], label="Experimental Flags") - GENERATE_SETTINGS["breathing_room"] = gr.Slider(value=8, minimum=1, maximum=32, step=1, label="Pause Size") - GENERATE_SETTINGS["diffusion_sampler"] = gr.Radio( - ["P", "DDIM"], # + ["K_Euler_A", "DPM++2M"], - value="DDIM", label="Diffusion Samplers", type="value" - ) - GENERATE_SETTINGS["cvvp_weight"] = gr.Slider(value=0, minimum=0, maximum=1, label="CVVP Weight") - GENERATE_SETTINGS["top_p"] = gr.Slider(value=0.8, minimum=0, maximum=1, label="Top P") - GENERATE_SETTINGS["diffusion_temperature"] = gr.Slider(value=1.0, minimum=0, maximum=1, label="Diffusion Temperature") - GENERATE_SETTINGS["length_penalty"] = gr.Slider(value=1.0, minimum=0, maximum=8, label="Length Penalty") - GENERATE_SETTINGS["repetition_penalty"] = gr.Slider(value=2.0, minimum=0, maximum=8, label="Repetition Penalty") - GENERATE_SETTINGS["cond_free_k"] = gr.Slider(value=2.0, minimum=0, maximum=4, label="Conditioning-Free K") - with gr.Column(): - with gr.Row(): - submit = gr.Button(value="Generate") - stop = gr.Button(value="Stop") - - generation_results = gr.Dataframe(label="Results", headers=["Seed", "Time"], visible=False) - source_sample = gr.Audio(label="Source Sample", visible=False) - output_audio = gr.Audio(label="Output") - candidates_list = gr.Dropdown(label="Candidates", type="value", visible=False, choices=[""], value="") - - def change_candidate( val ): - if not val: - return - return val - - candidates_list.change( - fn=change_candidate, - inputs=candidates_list, - outputs=output_audio, - ) - with gr.Tab("History"): - with gr.Row(): - with gr.Column(): - history_info = gr.Dataframe(label="Results", headers=list(HISTORY_HEADERS.keys())) - with gr.Row(): - with gr.Column(): - history_voices = gr.Dropdown(choices=result_voices, label="Voice", type="value", value=result_voices[0] if len(result_voices) > 0 else "") - with gr.Column(): - history_results_list = gr.Dropdown(label="Results",type="value", interactive=True, value="") - with gr.Column(): - history_audio = gr.Audio() - history_copy_settings_button = gr.Button(value="Copy Settings") - with gr.Tab("Utilities"): - with gr.Tab("Import / Analyze"): - with gr.Row(): - with gr.Column(): - audio_in = gr.Files(type="file", label="Audio Input", file_types=["audio"]) - import_voice_name = gr.Textbox(label="Voice Name") - import_voice_button = gr.Button(value="Import Voice") - with gr.Column(visible=False) as col: - utilities_metadata_column = col - - metadata_out = gr.JSON(label="Audio Metadata") - copy_button = gr.Button(value="Copy Settings") - latents_out = gr.File(type="binary", label="Voice Latents") - with gr.Tab("Tokenizer"): - with gr.Row(): - text_tokenizier_input = gr.TextArea(label="Text", max_lines=4) - text_tokenizier_output = gr.TextArea(label="Tokenized Text", max_lines=4) - - with gr.Row(): - text_tokenizier_button = gr.Button(value="Tokenize Text") - with gr.Tab("Model Merger"): - with gr.Column(): - with gr.Row(): - MERGER_SETTINGS["model_a"] = gr.Dropdown( choices=autoregressive_models, label="Model A", type="value", value=autoregressive_models[0] ) - MERGER_SETTINGS["model_b"] = gr.Dropdown( choices=autoregressive_models, label="Model B", type="value", value=autoregressive_models[0] ) - with gr.Row(): - MERGER_SETTINGS["weight_slider"] = gr.Slider(label="Weight (from A to B)", value=0.5, minimum=0, maximum=1) - with gr.Row(): - merger_button = gr.Button(value="Run Merger") - with gr.Column(): - merger_output = gr.TextArea(label="Console Output", max_lines=8) - with gr.Tab("Training"): - with gr.Tab("Prepare Dataset"): - with gr.Row(): - with gr.Column(): - DATASET_SETTINGS = {} - DATASET_SETTINGS['voice'] = gr.Dropdown( choices=voice_list, label="Dataset Source", type="value", value=voice_list[0] if len(voice_list) > 0 else "" ) - with gr.Row(): - DATASET_SETTINGS['language'] = gr.Textbox(label="Language", value="en") - DATASET_SETTINGS['validation_text_length'] = gr.Number(label="Validation Text Length Threshold", value=12, precision=0, visible=args.tts_backend=="tortoise") - DATASET_SETTINGS['validation_audio_length'] = gr.Number(label="Validation Audio Length Threshold", value=1, visible=args.tts_backend=="tortoise" ) - with gr.Row(): - DATASET_SETTINGS['skip'] = gr.Checkbox(label="Skip Existing", value=False) - DATASET_SETTINGS['slice'] = gr.Checkbox(label="Slice Segments", value=False) - DATASET_SETTINGS['trim_silence'] = gr.Checkbox(label="Trim Silence", value=False) - with gr.Row(): - DATASET_SETTINGS['slice_start_offset'] = gr.Number(label="Slice Start Offset", value=0) - DATASET_SETTINGS['slice_end_offset'] = gr.Number(label="Slice End Offset", value=0) - - transcribe_button = gr.Button(value="Transcribe and Process") - transcribe_all_button = gr.Button(value="Transcribe All") - diarize_button = gr.Button(value="Diarize", visible=False) - - with gr.Row(): - slice_dataset_button = gr.Button(value="(Re)Slice Audio") - prepare_dataset_button = gr.Button(value="(Re)Create Dataset") - - with gr.Row(): - EXEC_SETTINGS['whisper_backend'] = gr.Dropdown(WHISPER_BACKENDS, label="Whisper Backends", value=args.whisper_backend) - EXEC_SETTINGS['whisper_model'] = gr.Dropdown(WHISPER_MODELS, label="Whisper Model", value=args.whisper_model) - - dataset_settings = list(DATASET_SETTINGS.values()) - with gr.Column(): - prepare_dataset_output = gr.TextArea(label="Console Output", interactive=False, max_lines=8) - with gr.Tab("Generate Configuration", visible=args.tts_backend != "bark"): - with gr.Row(): - with gr.Column(): - TRAINING_SETTINGS["epochs"] = gr.Number(label="Epochs", value=500, precision=0) - with gr.Row(visible=args.tts_backend=="tortoise"): - TRAINING_SETTINGS["learning_rate"] = gr.Slider(label="Learning Rate", value=1e-5, minimum=0, maximum=1e-4, step=1e-6) - TRAINING_SETTINGS["mel_lr_weight"] = gr.Slider(label="Mel LR Ratio", value=1.00, minimum=0, maximum=1) - TRAINING_SETTINGS["text_lr_weight"] = gr.Slider(label="Text LR Ratio", value=0.01, minimum=0, maximum=1) - - with gr.Row(visible=args.tts_backend=="tortoise"): - lr_schemes = list(LEARNING_RATE_SCHEMES.keys()) - TRAINING_SETTINGS["learning_rate_scheme"] = gr.Radio(lr_schemes, label="Learning Rate Scheme", value=lr_schemes[0], type="value") - TRAINING_SETTINGS["learning_rate_schedule"] = gr.Textbox(label="Learning Rate Schedule", placeholder=str(LEARNING_RATE_SCHEDULE), visible=True) - TRAINING_SETTINGS["learning_rate_restarts"] = gr.Number(label="Learning Rate Restarts", value=4, precision=0, visible=False) - - TRAINING_SETTINGS["learning_rate_scheme"].change( - fn=lambda x: ( gr.update(visible=x == lr_schemes[0]), gr.update(visible=x == lr_schemes[1]) ), - inputs=TRAINING_SETTINGS["learning_rate_scheme"], - outputs=[ - TRAINING_SETTINGS["learning_rate_schedule"], - TRAINING_SETTINGS["learning_rate_restarts"], - ] - ) - with gr.Row(): - TRAINING_SETTINGS["batch_size"] = gr.Number(label="Batch Size", value=128, precision=0) - TRAINING_SETTINGS["gradient_accumulation_size"] = gr.Number(label="Gradient Accumulation Size", value=4, precision=0) - with gr.Row(): - TRAINING_SETTINGS["save_rate"] = gr.Number(label="Save Frequency (in epochs)", value=5, precision=0) - TRAINING_SETTINGS["validation_rate"] = gr.Number(label="Validation Frequency (in epochs)", value=5, precision=0) - - with gr.Row(): - TRAINING_SETTINGS["half_p"] = gr.Checkbox(label="Half Precision", value=args.training_default_halfp, visible=args.tts_backend=="tortoise") - TRAINING_SETTINGS["bitsandbytes"] = gr.Checkbox(label="BitsAndBytes", value=args.training_default_bnb, visible=args.tts_backend=="tortoise") - TRAINING_SETTINGS["validation_enabled"] = gr.Checkbox(label="Validation Enabled", value=False) - - with gr.Row(): - TRAINING_SETTINGS["workers"] = gr.Number(label="Worker Processes", value=2, precision=0, visible=args.tts_backend=="tortoise") - TRAINING_SETTINGS["gpus"] = gr.Number(label="GPUs", value=get_device_count(), precision=0) - - TRAINING_SETTINGS["source_model"] = gr.Dropdown( choices=autoregressive_models, label="Source Model", type="value", value=autoregressive_models[0], visible=args.tts_backend=="tortoise" ) - TRAINING_SETTINGS["resume_state"] = gr.Textbox(label="Resume State Path", placeholder="./training/${voice}/finetune/training_state/${last_state}.state", visible=args.tts_backend=="tortoise") - - TRAINING_SETTINGS["voice"] = gr.Dropdown( choices=dataset_list, label="Dataset", type="value", value=dataset_list[0] if len(dataset_list) else "" ) - - with gr.Row(): - training_refresh_dataset = gr.Button(value="Refresh Dataset List") - training_import_settings = gr.Button(value="Reuse/Import Dataset") - with gr.Column(): - training_configuration_output = gr.TextArea(label="Console Output", interactive=False, max_lines=8) - with gr.Row(): - training_optimize_configuration = gr.Button(value="Validate Training Configuration") - training_save_configuration = gr.Button(value="Save Training Configuration") - with gr.Tab("Run Training", visible=args.tts_backend != "bark"): - with gr.Row(): - with gr.Column(): - training_configs = gr.Dropdown(label="Training Configuration", choices=training_list, value=training_list[0] if len(training_list) else "") - refresh_configs = gr.Button(value="Refresh Configurations") - training_output = gr.TextArea(label="Console Output", interactive=False, max_lines=8) - verbose_training = gr.Checkbox(label="Verbose Console Output", value=True) - - keep_x_past_checkpoints = gr.Slider(label="Keep X Previous States", minimum=0, maximum=8, value=0, step=1) - - with gr.Row(): - training_graph_x_min = gr.Number(label="X Min", precision=0, value=0) - training_graph_x_max = gr.Number(label="X Max", precision=0, value=0) - training_graph_y_min = gr.Number(label="Y Min", precision=0, value=0) - training_graph_y_max = gr.Number(label="Y Max", precision=0, value=0) - - with gr.Row(): - start_training_button = gr.Button(value="Train") - stop_training_button = gr.Button(value="Stop") - reconnect_training_button = gr.Button(value="Reconnect") - - - with gr.Column(): - training_loss_graph = gr.LinePlot(label="Training Metrics", - x="it", # x="epoch", - y="value", - title="Loss Metrics", - color="type", - tooltip=['epoch', 'it', 'value', 'type'], - width=500, - height=350, - ) - training_lr_graph = gr.LinePlot(label="Training Metrics", - x="it", # x="epoch", - y="value", - title="Learning Rate", - color="type", - tooltip=['epoch', 'it', 'value', 'type'], - width=500, - height=350, - ) - training_grad_norm_graph = gr.LinePlot(label="Training Metrics", - x="it", # x="epoch", - y="value", - title="Gradient Normals", - color="type", - tooltip=['epoch', 'it', 'value', 'type'], - width=500, - height=350, - visible=False, # args.tts_backend=="vall-e" - ) - view_losses = gr.Button(value="View Losses") - - with gr.Tab("Settings"): - with gr.Row(): - exec_inputs = [] - with gr.Column(): - EXEC_SETTINGS['listen'] = gr.Textbox(label="Listen", value=args.listen, placeholder="127.0.0.1:7860/") - EXEC_SETTINGS['share'] = gr.Checkbox(label="Public Share Gradio", value=args.share) - EXEC_SETTINGS['check_for_updates'] = gr.Checkbox(label="Check For Updates", value=args.check_for_updates) - EXEC_SETTINGS['models_from_local_only'] = gr.Checkbox(label="Only Load Models Locally", value=args.models_from_local_only) - EXEC_SETTINGS['low_vram'] = gr.Checkbox(label="Low VRAM", value=args.low_vram) - EXEC_SETTINGS['embed_output_metadata'] = gr.Checkbox(label="Embed Output Metadata", value=args.embed_output_metadata) - EXEC_SETTINGS['latents_lean_and_mean'] = gr.Checkbox(label="Slimmer Computed Latents", value=args.latents_lean_and_mean) - EXEC_SETTINGS['voice_fixer'] = gr.Checkbox(label="Use Voice Fixer on Generated Output", value=args.voice_fixer) - EXEC_SETTINGS['voice_fixer_use_cuda'] = gr.Checkbox(label="Use CUDA for Voice Fixer", value=args.voice_fixer_use_cuda) - EXEC_SETTINGS['force_cpu_for_conditioning_latents'] = gr.Checkbox(label="Force CPU for Conditioning Latents", value=args.force_cpu_for_conditioning_latents) - EXEC_SETTINGS['defer_tts_load'] = gr.Checkbox(label="Do Not Load TTS On Startup", value=args.defer_tts_load) - EXEC_SETTINGS['prune_nonfinal_outputs'] = gr.Checkbox(label="Delete Non-Final Output", value=args.prune_nonfinal_outputs) - with gr.Column(): - EXEC_SETTINGS['sample_batch_size'] = gr.Number(label="Sample Batch Size", precision=0, value=args.sample_batch_size) - EXEC_SETTINGS['unsqueeze_sample_batches'] = gr.Checkbox(label="Unsqueeze Sample Batches", value=args.unsqueeze_sample_batches) - EXEC_SETTINGS['concurrency_count'] = gr.Number(label="Gradio Concurrency Count", precision=0, value=args.concurrency_count) - EXEC_SETTINGS['autocalculate_voice_chunk_duration_size'] = gr.Number(label="Auto-Calculate Voice Chunk Duration (in seconds)", precision=0, value=args.autocalculate_voice_chunk_duration_size) - EXEC_SETTINGS['output_volume'] = gr.Slider(label="Output Volume", minimum=0, maximum=2, value=args.output_volume) - EXEC_SETTINGS['device_override'] = gr.Textbox(label="Device Override", value=args.device_override) - - EXEC_SETTINGS['results_folder'] = gr.Textbox(label="Results Folder", value=args.results_folder) - # EXEC_SETTINGS['tts_backend'] = gr.Dropdown(TTSES, label="TTS Backend", value=args.tts_backend if args.tts_backend else TTSES[0]) - - if args.tts_backend=="vall-e": - with gr.Column(): - EXEC_SETTINGS['valle_model'] = gr.Dropdown(choices=valle_models, label="VALL-E Model Config", value=args.valle_model if args.valle_model else valle_models[0]) - - with gr.Column(visible=args.tts_backend=="tortoise"): - EXEC_SETTINGS['autoregressive_model'] = gr.Dropdown(choices=["auto"] + autoregressive_models, label="Autoregressive Model", value=args.autoregressive_model if args.autoregressive_model else "auto") - EXEC_SETTINGS['diffusion_model'] = gr.Dropdown(choices=diffusion_models, label="Diffusion Model", value=args.diffusion_model if args.diffusion_model else diffusion_models[0]) - EXEC_SETTINGS['vocoder_model'] = gr.Dropdown(VOCODERS, label="Vocoder", value=args.vocoder_model if args.vocoder_model else VOCODERS[-1]) - EXEC_SETTINGS['tokenizer_json'] = gr.Dropdown(tokenizer_jsons, label="Tokenizer JSON Path", value=args.tokenizer_json if args.tokenizer_json else tokenizer_jsons[0]) - - EXEC_SETTINGS['training_default_halfp'] = TRAINING_SETTINGS['half_p'] - EXEC_SETTINGS['training_default_bnb'] = TRAINING_SETTINGS['bitsandbytes'] - - with gr.Row(): - autoregressive_models_update_button = gr.Button(value="Refresh Model List") - gr.Button(value="Check for Updates").click(check_for_updates) - gr.Button(value="(Re)Load TTS").click( - reload_tts, - inputs=None, - outputs=None - ) - # kill_button = gr.Button(value="Close UI") - - def update_model_list_proxy( autoregressive, diffusion, tokenizer ): - autoregressive_models = get_autoregressive_models() - if autoregressive not in autoregressive_models: - autoregressive = autoregressive_models[0] - - diffusion_models = get_diffusion_models() - if diffusion not in diffusion_models: - diffusion = diffusion_models[0] - - tokenizer_jsons = get_tokenizer_jsons() - if tokenizer not in tokenizer_jsons: - tokenizer = tokenizer_jsons[0] - - return ( - gr.update( choices=autoregressive_models, value=autoregressive ), - gr.update( choices=diffusion_models, value=diffusion ), - gr.update( choices=tokenizer_jsons, value=tokenizer ), - ) - - autoregressive_models_update_button.click( - update_model_list_proxy, - inputs=[ - EXEC_SETTINGS['autoregressive_model'], - EXEC_SETTINGS['diffusion_model'], - EXEC_SETTINGS['tokenizer_json'], - ], - outputs=[ - EXEC_SETTINGS['autoregressive_model'], - EXEC_SETTINGS['diffusion_model'], - EXEC_SETTINGS['tokenizer_json'], - ], - ) - - exec_inputs = list(EXEC_SETTINGS.values()) - for k in EXEC_SETTINGS: - EXEC_SETTINGS[k].change( fn=update_args_proxy, inputs=exec_inputs ) - - EXEC_SETTINGS['autoregressive_model'].change( - fn=update_autoregressive_model, - inputs=EXEC_SETTINGS['autoregressive_model'], - outputs=None, - api_name="set_autoregressive_model" - ) - - EXEC_SETTINGS['vocoder_model'].change( - fn=update_vocoder_model, - inputs=EXEC_SETTINGS['vocoder_model'], - outputs=None - ) - - history_voices.change( - fn=history_view_results, - inputs=history_voices, - outputs=[ - history_info, - history_results_list, - ] - ) - history_results_list.change( - fn=lambda voice, file: f"{args.results_folder}/{voice}/{file}", - inputs=[ - history_voices, - history_results_list, - ], - outputs=history_audio - ) - audio_in.upload( - fn=read_generate_settings_proxy, - inputs=audio_in, - outputs=[ - metadata_out, - latents_out, - import_voice_name, - utilities_metadata_column, - ] - ) - - import_voice_button.click( - fn=import_voices_proxy, - inputs=[ - audio_in, - import_voice_name, - ], - outputs=import_voice_name #console_output - ) - show_experimental_settings.change( - fn=lambda x: gr.update(visible=x), - inputs=show_experimental_settings, - outputs=experimental_column - ) - if preset: - preset.change(fn=update_presets, - inputs=preset, - outputs=[ - GENERATE_SETTINGS['num_autoregressive_samples'], - GENERATE_SETTINGS['diffusion_iterations'], - ], - ) - - recompute_voice_latents.click(compute_latents_proxy, - inputs=[ - GENERATE_SETTINGS['voice'], - GENERATE_SETTINGS['voice_latents_chunks'], - GENERATE_SETTINGS['voice_latents_original_ar'], - GENERATE_SETTINGS['voice_latents_original_diffusion'], - ], - outputs=GENERATE_SETTINGS['voice'], - ) - - GENERATE_SETTINGS['emotion'].change( - fn=lambda value: gr.update(visible=value == "Custom"), - inputs=GENERATE_SETTINGS['emotion'], - outputs=GENERATE_SETTINGS['prompt'] - ) - GENERATE_SETTINGS['mic_audio'].change(fn=lambda value: gr.update(value="microphone"), - inputs=GENERATE_SETTINGS['mic_audio'], - outputs=GENERATE_SETTINGS['voice'] - ) - - refresh_voices.click(update_voices, - inputs=None, - outputs=[ - GENERATE_SETTINGS['voice'], - DATASET_SETTINGS['voice'], - history_voices - ] - ) - - generate_settings = list(GENERATE_SETTINGS.values()) - submit.click( - lambda: (gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)), - outputs=[source_sample, candidates_list, generation_results], - ) - - submit_event = submit.click(generate_proxy, - inputs=generate_settings, - outputs=[output_audio, source_sample, candidates_list, generation_results], - api_name="generate", - ) - - - copy_button.click(import_generate_settings_proxy, - inputs=audio_in, # JSON elements cannot be used as inputs - outputs=generate_settings - ) - - reset_generate_settings_button.click( - fn=reset_generate_settings_proxy, - inputs=None, - outputs=generate_settings - ) - - history_copy_settings_button.click(history_copy_settings, - inputs=[ - history_voices, - history_results_list, - ], - outputs=generate_settings - ) - - text_tokenizier_button.click(tokenize_text, - inputs=text_tokenizier_input, - outputs=text_tokenizier_output - ) - - merger_button.click(merge_models, - inputs=list(MERGER_SETTINGS.values()), - outputs=merger_output - ) - - refresh_configs.click( - lambda: gr.update(choices=get_training_list()), - inputs=None, - outputs=training_configs - ) - start_training_button.click(run_training, - inputs=[ - training_configs, - verbose_training, - keep_x_past_checkpoints, - ], - outputs=[ - training_output, - ], - ) - training_output.change( - fn=update_training_dataplot, - inputs=[ - training_graph_x_min, - training_graph_x_max, - training_graph_y_min, - training_graph_y_max, - ], - outputs=[ - training_loss_graph, - training_lr_graph, - training_grad_norm_graph, - ], - show_progress=False, - ) - - view_losses.click( - fn=update_training_dataplot, - inputs=[ - training_graph_x_min, - training_graph_x_max, - training_graph_y_min, - training_graph_y_max, - training_configs, - ], - outputs=[ - training_loss_graph, - training_lr_graph, - training_grad_norm_graph, - ], - ) - - stop_training_button.click(stop_training, - inputs=None, - outputs=training_output #console_output - ) - reconnect_training_button.click(reconnect_training, - inputs=[ - verbose_training, - ], - outputs=training_output #console_output - ) - transcribe_button.click( - prepare_dataset_proxy, - inputs=dataset_settings, - outputs=prepare_dataset_output #console_output - ) - transcribe_all_button.click( - prepare_all_datasets, - inputs=dataset_settings[1:], - outputs=prepare_dataset_output #console_output - ) - diarize_button.click( - diarize_dataset, - inputs=dataset_settings[0], - outputs=prepare_dataset_output #console_output - ) - prepare_dataset_button.click( - prepare_dataset, - inputs=[ - DATASET_SETTINGS['voice'], - DATASET_SETTINGS['slice'], - DATASET_SETTINGS['validation_text_length'], - DATASET_SETTINGS['validation_audio_length'], - ], - outputs=prepare_dataset_output #console_output - ) - slice_dataset_button.click( - slice_dataset_proxy, - inputs=[ - DATASET_SETTINGS['voice'], - DATASET_SETTINGS['trim_silence'], - DATASET_SETTINGS['slice_start_offset'], - DATASET_SETTINGS['slice_end_offset'], - ], - outputs=prepare_dataset_output - ) - - training_refresh_dataset.click( - lambda: gr.update(choices=get_dataset_list()), - inputs=None, - outputs=TRAINING_SETTINGS["voice"], - ) - training_settings = list(TRAINING_SETTINGS.values()) - training_optimize_configuration.click(optimize_training_settings_proxy, - inputs=training_settings, - outputs=training_settings[:-1] + [training_configuration_output] #console_output - ) - training_import_settings.click(import_training_settings_proxy, - inputs=TRAINING_SETTINGS['voice'], - outputs=training_settings[:-1] + [training_configuration_output] #console_output - ) - training_save_configuration.click(save_training_settings_proxy, - inputs=training_settings, - outputs=training_configuration_output #console_output - ) - - if os.path.isfile('./config/generate.json'): - ui.load(import_generate_settings_proxy, inputs=None, outputs=generate_settings) - - if args.check_for_updates: - ui.load(check_for_updates) - - stop.click(fn=cancel_generate, inputs=None, outputs=None) - - - ui.queue(concurrency_count=args.concurrency_count) - webui = ui +import os +import argparse +import time +import json +import base64 +import re +import inspect +import urllib.request + +import torch +import torchaudio +import music_tag +import gradio as gr +import gradio.utils + +from datetime import datetime + +import tortoise.api +from tortoise.utils.audio import get_voice_dir, get_voices +from tortoise.utils.device import get_device_count + +from utils import * + +args = setup_args() + +GENERATE_SETTINGS = {} +TRANSCRIBE_SETTINGS = {} +EXEC_SETTINGS = {} +TRAINING_SETTINGS = {} +MERGER_SETTINGS = {} +GENERATE_SETTINGS_ARGS = [] + +PRESETS = { + 'Ultra Fast': {'num_autoregressive_samples': 16, 'diffusion_iterations': 30, 'cond_free': False}, + 'Fast': {'num_autoregressive_samples': 96, 'diffusion_iterations': 80}, + 'Standard': {'num_autoregressive_samples': 256, 'diffusion_iterations': 200}, + 'High Quality': {'num_autoregressive_samples': 256, 'diffusion_iterations': 400}, +} + +HISTORY_HEADERS = { + "Name": "", + "Samples": "num_autoregressive_samples", + "Iterations": "diffusion_iterations", + "Temp.": "temperature", + "Sampler": "diffusion_sampler", + "CVVP": "cvvp_weight", + "Top P": "top_p", + "Diff. Temp.": "diffusion_temperature", + "Len Pen": "length_penalty", + "Rep Pen": "repetition_penalty", + "Cond-Free K": "cond_free_k", + "Time": "time", + "Datetime": "datetime", + "Model": "model", + "Model Hash": "model_hash", +} + +# can't use *args OR **kwargs if I want to retain the ability to use progress +def generate_proxy( + text, + delimiter, + emotion, + prompt, + voice, + mic_audio, + voice_latents_chunks, + candidates, + seed, + num_autoregressive_samples, + diffusion_iterations, + temperature, + diffusion_sampler, + breathing_room, + cvvp_weight, + top_p, + diffusion_temperature, + length_penalty, + repetition_penalty, + cond_free_k, + experimentals, + voice_latents_original_ar, + voice_latents_original_diffusion, + progress=gr.Progress(track_tqdm=True) +): + kwargs = locals() + + try: + sample, outputs, stats = generate(**kwargs) + except Exception as e: + message = str(e) + if message == "Kill signal detected": + unload_tts() + + raise e + + return ( + outputs[0], + gr.update(value=sample, visible=sample is not None), + gr.update(choices=outputs, value=outputs[0], visible=len(outputs) > 1, interactive=True), + gr.update(value=stats, visible=True), + ) + + +def update_presets(value): + if value in PRESETS: + preset = PRESETS[value] + return (gr.update(value=preset['num_autoregressive_samples']), gr.update(value=preset['diffusion_iterations'])) + else: + return (gr.update(), gr.update()) + +def get_training_configs(): + configs = [] + for i, file in enumerate(sorted(os.listdir(f"./training/"))): + if file[-5:] != ".yaml" or file[0] == ".": + continue + configs.append(f"./training/{file}") + + return configs + +def update_training_configs(): + return gr.update(choices=get_training_list()) + +def history_view_results( voice ): + results = [] + files = [] + outdir = f"{args.results_folder}/{voice}/" + for i, file in enumerate(sorted(os.listdir(outdir))): + if file[-4:] != ".wav": + continue + + metadata, _ = read_generate_settings(f"{outdir}/{file}", read_latents=False) + if metadata is None: + continue + + values = [] + for k in HISTORY_HEADERS: + v = file + if k != "Name": + v = metadata[HISTORY_HEADERS[k]] if HISTORY_HEADERS[k] in metadata else '?' + values.append(v) + + + files.append(file) + results.append(values) + + return ( + results, + gr.Dropdown.update(choices=sorted(files)) + ) + +def import_generate_settings_proxy( file=None ): + global GENERATE_SETTINGS_ARGS + settings = import_generate_settings( file ) + + res = [] + for k in GENERATE_SETTINGS_ARGS: + res.append(settings[k] if k in settings else None) + + return tuple(res) + +def reset_generate_settings_proxy(): + global GENERATE_SETTINGS_ARGS + settings = reset_generate_settings() + + res = [] + for k in GENERATE_SETTINGS_ARGS: + res.append(settings[k] if k in settings else None) + + return tuple(res) + +def compute_latents_proxy(voice, voice_latents_chunks, original_ar, original_diffusion, progress=gr.Progress(track_tqdm=True)): + compute_latents( voice=voice, voice_latents_chunks=voice_latents_chunks, original_ar=original_ar, original_diffusion=original_diffusion ) + return voice + + +def import_voices_proxy(files, name, progress=gr.Progress(track_tqdm=True)): + import_voices(files, name, progress) + return gr.update() + +def read_generate_settings_proxy(file, saveAs='.temp'): + j, latents = read_generate_settings(file) + + if latents: + outdir = f'{get_voice_dir()}/{saveAs}/' + os.makedirs(outdir, exist_ok=True) + with open(f'{outdir}/cond_latents.pth', 'wb') as f: + f.write(latents) + + latents = f'{outdir}/cond_latents.pth' + + return ( + gr.update(value=j, visible=j is not None), + gr.update(value=latents, visible=latents is not None), + None if j is None else j['voice'], + gr.update(visible=j is not None), + ) + +def slice_dataset_proxy( voice, trim_silence, start_offset, end_offset, progress=gr.Progress(track_tqdm=True) ): + return slice_dataset( voice, trim_silence=trim_silence, start_offset=start_offset, end_offset=end_offset, results=None, progress=progress ) + +def diarize_dataset( voice, progress=gr.Progress(track_tqdm=True) ): + from pyannote.audio import Pipeline + pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization", use_auth_token=args.hf_token) + + messages = [] + files = get_voice(voice, load_latents=False) + for file in enumerate_progress(files, desc="Iterating through voice files", progress=progress): + diarization = pipeline(file) + for turn, _, speaker in diarization.itertracks(yield_label=True): + message = f"start={turn.start:.1f}s stop={turn.end:.1f}s speaker_{speaker}" + print(message) + messages.append(message) + + return "\n".join(messages) + +def prepare_all_datasets( language, validation_text_length, validation_audio_length, skip_existings, slice_audio, trim_silence, slice_start_offset, slice_end_offset, progress=gr.Progress(track_tqdm=True) ): + kwargs = locals() + + messages = [] + voices = get_voice_list() + + for voice in voices: + print("Processing:", voice) + message = transcribe_dataset( voice=voice, language=language, skip_existings=skip_existings, progress=progress ) + messages.append(message) + + if slice_audio: + for voice in voices: + print("Processing:", voice) + message = slice_dataset( voice, trim_silence=trim_silence, start_offset=slice_start_offset, end_offset=slice_end_offset, results=None, progress=progress ) + messages.append(message) + + for voice in voices: + print("Processing:", voice) + message = prepare_dataset( voice, use_segments=slice_audio, text_length=validation_text_length, audio_length=validation_audio_length, progress=progress ) + messages.append(message) + + return "\n".join(messages) + +def prepare_dataset_proxy( voice, language, validation_text_length, validation_audio_length, skip_existings, slice_audio, trim_silence, slice_start_offset, slice_end_offset, progress=gr.Progress(track_tqdm=True) ): + messages = [] + + message = transcribe_dataset( voice=voice, language=language, skip_existings=skip_existings, progress=progress ) + messages.append(message) + + if slice_audio: + message = slice_dataset( voice, trim_silence=trim_silence, start_offset=slice_start_offset, end_offset=slice_end_offset, results=None, progress=progress ) + messages.append(message) + + message = prepare_dataset( voice, use_segments=slice_audio, text_length=validation_text_length, audio_length=validation_audio_length, progress=progress ) + messages.append(message) + + return "\n".join(messages) + +def update_args_proxy( *args ): + kwargs = {} + keys = list(EXEC_SETTINGS.keys()) + for i in range(len(args)): + k = keys[i] + v = args[i] + kwargs[k] = v + + update_args(**kwargs) +def optimize_training_settings_proxy( *args ): + kwargs = {} + keys = list(TRAINING_SETTINGS.keys()) + for i in range(len(args)): + k = keys[i] + v = args[i] + kwargs[k] = v + + settings, messages = optimize_training_settings(**kwargs) + output = list(settings.values()) + return output[:-1] + ["\n".join(messages)] + +def import_training_settings_proxy( voice ): + messages = [] + injson = f'./training/{voice}/train.json' + statedir = f'./training/{voice}/finetune/training_state/' + output = {} + + try: + with open(injson, 'r', encoding="utf-8") as f: + settings = json.loads(f.read()) + except: + messages.append(f"Error import /{voice}/train.json") + + for k in TRAINING_SETTINGS: + output[k] = TRAINING_SETTINGS[k].value + + output = list(output.values()) + return output[:-1] + ["\n".join(messages)] + + if os.path.isdir(statedir): + resumes = sorted([int(d[:-6]) for d in os.listdir(statedir) if d[-6:] == ".state" ]) + + if len(resumes) > 0: + settings['resume_state'] = f'{statedir}/{resumes[-1]}.state' + messages.append(f"Found most recent training state: {settings['resume_state']}") + + output = {} + for k in TRAINING_SETTINGS: + if k not in settings: + output[k] = gr.update() + else: + output[k] = gr.update(value=settings[k]) + + output = list(output.values()) + + messages.append(f"Imported training settings: {injson}") + + return output[:-1] + ["\n".join(messages)] + +def save_training_settings_proxy( *args ): + kwargs = {} + keys = list(TRAINING_SETTINGS.keys()) + for i in range(len(args)): + k = keys[i] + v = args[i] + kwargs[k] = v + + settings, messages = save_training_settings(**kwargs) + return "\n".join(messages) + +def update_voices(): + return ( + gr.Dropdown.update(choices=get_voice_list(append_defaults=True)), + gr.Dropdown.update(choices=get_voice_list()), + gr.Dropdown.update(choices=get_voice_list(args.results_folder)), + ) + +def history_copy_settings( voice, file ): + return import_generate_settings( f"{args.results_folder}/{voice}/{file}" ) + +def setup_gradio(): + global args + global ui + + if not args.share: + def noop(function, return_value=None): + def wrapped(*args, **kwargs): + return return_value + return wrapped + gradio.utils.version_check = noop(gradio.utils.version_check) + gradio.utils.initiated_analytics = noop(gradio.utils.initiated_analytics) + gradio.utils.launch_analytics = noop(gradio.utils.launch_analytics) + gradio.utils.integration_analytics = noop(gradio.utils.integration_analytics) + gradio.utils.error_analytics = noop(gradio.utils.error_analytics) + gradio.utils.log_feature_analytics = noop(gradio.utils.log_feature_analytics) + #gradio.utils.get_local_ip_address = noop(gradio.utils.get_local_ip_address, 'localhost') + + if args.models_from_local_only: + os.environ['TRANSFORMERS_OFFLINE']='1' + + voice_list_with_defaults = get_voice_list(append_defaults=True) + voice_list = get_voice_list() + result_voices = get_voice_list(args.results_folder) + + valle_models = get_valle_models() + + autoregressive_models = get_autoregressive_models() + diffusion_models = get_diffusion_models() + tokenizer_jsons = get_tokenizer_jsons() + + dataset_list = get_dataset_list() + training_list = get_training_list() + + global GENERATE_SETTINGS_ARGS + GENERATE_SETTINGS_ARGS = list(inspect.signature(generate_proxy).parameters.keys())[:-1] + for i in range(len(GENERATE_SETTINGS_ARGS)): + arg = GENERATE_SETTINGS_ARGS[i] + GENERATE_SETTINGS[arg] = None + + with gr.Blocks() as ui: + with gr.Tab("Generate"): + with gr.Row(): + with gr.Column(): + GENERATE_SETTINGS["text"] = gr.Textbox(lines=4, value="Your prompt here.", label="Input Prompt") + with gr.Row(): + with gr.Column(): + GENERATE_SETTINGS["delimiter"] = gr.Textbox(lines=1, label="Line Delimiter", placeholder="\\n") + + GENERATE_SETTINGS["emotion"] = gr.Radio( ["Happy", "Sad", "Angry", "Disgusted", "Arrogant", "Custom", "None"], value="None", label="Emotion", type="value", interactive=True, visible=args.tts_backend=="tortoise" ) + GENERATE_SETTINGS["prompt"] = gr.Textbox(lines=1, label="Custom Emotion", visible=False) + GENERATE_SETTINGS["voice"] = gr.Dropdown(choices=voice_list_with_defaults, label="Voice", type="value", value=voice_list_with_defaults[0]) # it'd be very cash money if gradio was able to default to the first value in the list without this shit + GENERATE_SETTINGS["mic_audio"] = gr.Audio( label="Microphone Source", source="microphone", type="filepath", visible=False ) + GENERATE_SETTINGS["voice_latents_chunks"] = gr.Number(label="Voice Chunks", precision=0, value=0, visible=args.tts_backend=="tortoise") + GENERATE_SETTINGS["voice_latents_original_ar"] = gr.Checkbox(label="Use Original Latents Method (AR)", visible=args.tts_backend=="tortoise") + GENERATE_SETTINGS["voice_latents_original_diffusion"] = gr.Checkbox(label="Use Original Latents Method (Diffusion)", visible=args.tts_backend=="tortoise") + with gr.Row(): + refresh_voices = gr.Button(value="Refresh Voice List") + recompute_voice_latents = gr.Button(value="(Re)Compute Voice Latents") + + GENERATE_SETTINGS["voice"].change( + fn=update_baseline_for_latents_chunks, + inputs=GENERATE_SETTINGS["voice"], + outputs=GENERATE_SETTINGS["voice_latents_chunks"] + ) + GENERATE_SETTINGS["voice"].change( + fn=lambda value: gr.update(visible=value == "microphone"), + inputs=GENERATE_SETTINGS["voice"], + outputs=GENERATE_SETTINGS["mic_audio"], + ) + with gr.Column(): + preset = None + GENERATE_SETTINGS["candidates"] = gr.Slider(value=1, minimum=1, maximum=6, step=1, label="Candidates", visible=args.tts_backend=="tortoise") + GENERATE_SETTINGS["seed"] = gr.Number(value=0, precision=0, label="Seed", visible=args.tts_backend=="tortoise") + + preset = gr.Radio( ["Ultra Fast", "Fast", "Standard", "High Quality"], label="Preset", type="value", value="Ultra Fast", visible=args.tts_backend=="tortoise" ) + + GENERATE_SETTINGS["num_autoregressive_samples"] = gr.Slider(value=16, minimum=2, maximum=2048 if args.tts_backend=="vall-e" else 512, step=1, label="Samples", visible=args.tts_backend!="bark") + GENERATE_SETTINGS["diffusion_iterations"] = gr.Slider(value=30, minimum=0, maximum=512, step=1, label="Iterations", visible=args.tts_backend=="tortoise") + + GENERATE_SETTINGS["temperature"] = gr.Slider(value=0.95 if args.tts_backend=="vall-e" else 0.2, minimum=0, maximum=1, step=0.05, label="Temperature") + + show_experimental_settings = gr.Checkbox(label="Show Experimental Settings", visible=args.tts_backend=="tortoise") + reset_generate_settings_button = gr.Button(value="Reset to Default") + with gr.Column(visible=False) as col: + experimental_column = col + + GENERATE_SETTINGS["experimentals"] = gr.CheckboxGroup(["Half Precision", "Conditioning-Free"], value=["Conditioning-Free"], label="Experimental Flags") + GENERATE_SETTINGS["breathing_room"] = gr.Slider(value=8, minimum=1, maximum=32, step=1, label="Pause Size") + GENERATE_SETTINGS["diffusion_sampler"] = gr.Radio( + ["P", "DDIM"], # + ["K_Euler_A", "DPM++2M"], + value="DDIM", label="Diffusion Samplers", type="value" + ) + GENERATE_SETTINGS["cvvp_weight"] = gr.Slider(value=0, minimum=0, maximum=1, label="CVVP Weight") + GENERATE_SETTINGS["top_p"] = gr.Slider(value=0.8, minimum=0, maximum=1, label="Top P") + GENERATE_SETTINGS["diffusion_temperature"] = gr.Slider(value=1.0, minimum=0, maximum=1, label="Diffusion Temperature") + GENERATE_SETTINGS["length_penalty"] = gr.Slider(value=1.0, minimum=0, maximum=8, label="Length Penalty") + GENERATE_SETTINGS["repetition_penalty"] = gr.Slider(value=2.0, minimum=0, maximum=8, label="Repetition Penalty") + GENERATE_SETTINGS["cond_free_k"] = gr.Slider(value=2.0, minimum=0, maximum=4, label="Conditioning-Free K") + with gr.Column(): + with gr.Row(): + submit = gr.Button(value="Generate") + stop = gr.Button(value="Stop") + + generation_results = gr.Dataframe(label="Results", headers=["Seed", "Time"], visible=False) + source_sample = gr.Audio(label="Source Sample", visible=False) + output_audio = gr.Audio(label="Output") + candidates_list = gr.Dropdown(label="Candidates", type="value", visible=False, choices=[""], value="") + + def change_candidate( val ): + if not val: + return + return val + + candidates_list.change( + fn=change_candidate, + inputs=candidates_list, + outputs=output_audio, + ) + with gr.Tab("History"): + with gr.Row(): + with gr.Column(): + history_info = gr.Dataframe(label="Results", headers=list(HISTORY_HEADERS.keys())) + with gr.Row(): + with gr.Column(): + history_voices = gr.Dropdown(choices=result_voices, label="Voice", type="value", value=result_voices[0] if len(result_voices) > 0 else "") + with gr.Column(): + history_results_list = gr.Dropdown(label="Results",type="value", interactive=True, value="") + with gr.Column(): + history_audio = gr.Audio() + history_copy_settings_button = gr.Button(value="Copy Settings") + with gr.Tab("Utilities"): + with gr.Tab("Import / Analyze"): + with gr.Row(): + with gr.Column(): + audio_in = gr.Files(type="file", label="Audio Input", file_types=["audio"]) + import_voice_name = gr.Textbox(label="Voice Name") + import_voice_button = gr.Button(value="Import Voice") + with gr.Column(visible=False) as col: + utilities_metadata_column = col + + metadata_out = gr.JSON(label="Audio Metadata") + copy_button = gr.Button(value="Copy Settings") + latents_out = gr.File(type="binary", label="Voice Latents") + with gr.Tab("Tokenizer"): + with gr.Row(): + text_tokenizier_input = gr.TextArea(label="Text", max_lines=4) + text_tokenizier_output = gr.TextArea(label="Tokenized Text", max_lines=4) + + with gr.Row(): + text_tokenizier_button = gr.Button(value="Tokenize Text") + with gr.Tab("Model Merger"): + with gr.Column(): + with gr.Row(): + MERGER_SETTINGS["model_a"] = gr.Dropdown( choices=autoregressive_models, label="Model A", type="value", value=autoregressive_models[0] ) + MERGER_SETTINGS["model_b"] = gr.Dropdown( choices=autoregressive_models, label="Model B", type="value", value=autoregressive_models[0] ) + with gr.Row(): + MERGER_SETTINGS["weight_slider"] = gr.Slider(label="Weight (from A to B)", value=0.5, minimum=0, maximum=1) + with gr.Row(): + merger_button = gr.Button(value="Run Merger") + with gr.Column(): + merger_output = gr.TextArea(label="Console Output", max_lines=8) + with gr.Tab("Training"): + with gr.Tab("Prepare Dataset"): + with gr.Row(): + with gr.Column(): + DATASET_SETTINGS = {} + DATASET_SETTINGS['voice'] = gr.Dropdown( choices=voice_list, label="Dataset Source", type="value", value=voice_list[0] if len(voice_list) > 0 else "" ) + with gr.Row(): + DATASET_SETTINGS['language'] = gr.Textbox(label="Language", value="en") + DATASET_SETTINGS['validation_text_length'] = gr.Number(label="Validation Text Length Threshold", value=12, precision=0, visible=args.tts_backend=="tortoise") + DATASET_SETTINGS['validation_audio_length'] = gr.Number(label="Validation Audio Length Threshold", value=1, visible=args.tts_backend=="tortoise" ) + with gr.Row(): + DATASET_SETTINGS['skip'] = gr.Checkbox(label="Skip Existing", value=False) + DATASET_SETTINGS['slice'] = gr.Checkbox(label="Slice Segments", value=False) + DATASET_SETTINGS['trim_silence'] = gr.Checkbox(label="Trim Silence", value=False) + with gr.Row(): + DATASET_SETTINGS['slice_start_offset'] = gr.Number(label="Slice Start Offset", value=0) + DATASET_SETTINGS['slice_end_offset'] = gr.Number(label="Slice End Offset", value=0) + + transcribe_button = gr.Button(value="Transcribe and Process") + transcribe_all_button = gr.Button(value="Transcribe All") + diarize_button = gr.Button(value="Diarize", visible=False) + + with gr.Row(): + slice_dataset_button = gr.Button(value="(Re)Slice Audio") + prepare_dataset_button = gr.Button(value="(Re)Create Dataset") + + with gr.Row(): + EXEC_SETTINGS['whisper_backend'] = gr.Dropdown(WHISPER_BACKENDS, label="Whisper Backends", value=args.whisper_backend) + EXEC_SETTINGS['whisper_model'] = gr.Dropdown(WHISPER_MODELS, label="Whisper Model", value=args.whisper_model) + + dataset_settings = list(DATASET_SETTINGS.values()) + with gr.Column(): + prepare_dataset_output = gr.TextArea(label="Console Output", interactive=False, max_lines=8) + with gr.Tab("Generate Configuration", visible=args.tts_backend != "bark"): + with gr.Row(): + with gr.Column(): + TRAINING_SETTINGS["epochs"] = gr.Number(label="Epochs", value=500, precision=0) + with gr.Row(visible=args.tts_backend=="tortoise"): + TRAINING_SETTINGS["learning_rate"] = gr.Slider(label="Learning Rate", value=1e-5, minimum=0, maximum=1e-4, step=1e-6) + TRAINING_SETTINGS["mel_lr_weight"] = gr.Slider(label="Mel LR Ratio", value=1.00, minimum=0, maximum=1) + TRAINING_SETTINGS["text_lr_weight"] = gr.Slider(label="Text LR Ratio", value=0.01, minimum=0, maximum=1) + + with gr.Row(visible=args.tts_backend=="tortoise"): + lr_schemes = list(LEARNING_RATE_SCHEMES.keys()) + TRAINING_SETTINGS["learning_rate_scheme"] = gr.Radio(lr_schemes, label="Learning Rate Scheme", value=lr_schemes[0], type="value") + TRAINING_SETTINGS["learning_rate_schedule"] = gr.Textbox(label="Learning Rate Schedule", placeholder=str(LEARNING_RATE_SCHEDULE), visible=True) + TRAINING_SETTINGS["learning_rate_restarts"] = gr.Number(label="Learning Rate Restarts", value=4, precision=0, visible=False) + + TRAINING_SETTINGS["learning_rate_scheme"].change( + fn=lambda x: ( gr.update(visible=x == lr_schemes[0]), gr.update(visible=x == lr_schemes[1]) ), + inputs=TRAINING_SETTINGS["learning_rate_scheme"], + outputs=[ + TRAINING_SETTINGS["learning_rate_schedule"], + TRAINING_SETTINGS["learning_rate_restarts"], + ] + ) + with gr.Row(): + TRAINING_SETTINGS["batch_size"] = gr.Number(label="Batch Size", value=128, precision=0) + TRAINING_SETTINGS["gradient_accumulation_size"] = gr.Number(label="Gradient Accumulation Size", value=4, precision=0) + with gr.Row(): + TRAINING_SETTINGS["save_rate"] = gr.Number(label="Save Frequency (in epochs)", value=5, precision=0) + TRAINING_SETTINGS["validation_rate"] = gr.Number(label="Validation Frequency (in epochs)", value=5, precision=0) + + with gr.Row(): + TRAINING_SETTINGS["half_p"] = gr.Checkbox(label="Half Precision", value=args.training_default_halfp, visible=args.tts_backend=="tortoise") + TRAINING_SETTINGS["bitsandbytes"] = gr.Checkbox(label="BitsAndBytes", value=args.training_default_bnb, visible=args.tts_backend=="tortoise") + TRAINING_SETTINGS["validation_enabled"] = gr.Checkbox(label="Validation Enabled", value=False) + + with gr.Row(): + TRAINING_SETTINGS["workers"] = gr.Number(label="Worker Processes", value=2, precision=0, visible=args.tts_backend=="tortoise") + TRAINING_SETTINGS["gpus"] = gr.Number(label="GPUs", value=get_device_count(), precision=0) + + TRAINING_SETTINGS["source_model"] = gr.Dropdown( choices=autoregressive_models, label="Source Model", type="value", value=autoregressive_models[0], visible=args.tts_backend=="tortoise" ) + TRAINING_SETTINGS["resume_state"] = gr.Textbox(label="Resume State Path", placeholder="./training/${voice}/finetune/training_state/${last_state}.state", visible=args.tts_backend=="tortoise") + + TRAINING_SETTINGS["voice"] = gr.Dropdown( choices=dataset_list, label="Dataset", type="value", value=dataset_list[0] if len(dataset_list) else "" ) + + with gr.Row(): + training_refresh_dataset = gr.Button(value="Refresh Dataset List") + training_import_settings = gr.Button(value="Reuse/Import Dataset") + with gr.Column(): + training_configuration_output = gr.TextArea(label="Console Output", interactive=False, max_lines=8) + with gr.Row(): + training_optimize_configuration = gr.Button(value="Validate Training Configuration") + training_save_configuration = gr.Button(value="Save Training Configuration") + with gr.Tab("Run Training", visible=args.tts_backend != "bark"): + with gr.Row(): + with gr.Column(): + training_configs = gr.Dropdown(label="Training Configuration", choices=training_list, value=training_list[0] if len(training_list) else "") + refresh_configs = gr.Button(value="Refresh Configurations") + training_output = gr.TextArea(label="Console Output", interactive=False, max_lines=8) + verbose_training = gr.Checkbox(label="Verbose Console Output", value=True) + + keep_x_past_checkpoints = gr.Slider(label="Keep X Previous States", minimum=0, maximum=8, value=0, step=1) + + with gr.Row(): + training_graph_x_min = gr.Number(label="X Min", precision=0, value=0) + training_graph_x_max = gr.Number(label="X Max", precision=0, value=0) + training_graph_y_min = gr.Number(label="Y Min", precision=0, value=0) + training_graph_y_max = gr.Number(label="Y Max", precision=0, value=0) + + with gr.Row(): + start_training_button = gr.Button(value="Train") + stop_training_button = gr.Button(value="Stop") + reconnect_training_button = gr.Button(value="Reconnect") + + + with gr.Column(): + training_loss_graph = gr.LinePlot(label="Training Metrics", + x="it", # x="epoch", + y="value", + title="Loss Metrics", + color="type", + tooltip=['epoch', 'it', 'value', 'type'], + width=500, + height=350, + ) + training_lr_graph = gr.LinePlot(label="Training Metrics", + x="it", # x="epoch", + y="value", + title="Learning Rate", + color="type", + tooltip=['epoch', 'it', 'value', 'type'], + width=500, + height=350, + ) + training_grad_norm_graph = gr.LinePlot(label="Training Metrics", + x="it", # x="epoch", + y="value", + title="Gradient Normals", + color="type", + tooltip=['epoch', 'it', 'value', 'type'], + width=500, + height=350, + visible=False, # args.tts_backend=="vall-e" + ) + view_losses = gr.Button(value="View Losses") + + with gr.Tab("Settings"): + with gr.Row(): + exec_inputs = [] + with gr.Column(): + EXEC_SETTINGS['listen'] = gr.Textbox(label="Listen", value=args.listen, placeholder="127.0.0.1:7860/") + EXEC_SETTINGS['share'] = gr.Checkbox(label="Public Share Gradio", value=args.share) + EXEC_SETTINGS['check_for_updates'] = gr.Checkbox(label="Check For Updates", value=args.check_for_updates) + EXEC_SETTINGS['models_from_local_only'] = gr.Checkbox(label="Only Load Models Locally", value=args.models_from_local_only) + EXEC_SETTINGS['low_vram'] = gr.Checkbox(label="Low VRAM", value=args.low_vram) + EXEC_SETTINGS['embed_output_metadata'] = gr.Checkbox(label="Embed Output Metadata", value=args.embed_output_metadata) + EXEC_SETTINGS['latents_lean_and_mean'] = gr.Checkbox(label="Slimmer Computed Latents", value=args.latents_lean_and_mean) + EXEC_SETTINGS['voice_fixer'] = gr.Checkbox(label="Use Voice Fixer on Generated Output", value=args.voice_fixer) + EXEC_SETTINGS['use_deepspeed'] = gr.Checkbox(label="Use DeepSpeed for Speed Bump.", value=args.use_deepspeed) + EXEC_SETTINGS['voice_fixer_use_cuda'] = gr.Checkbox(label="Use CUDA for Voice Fixer", value=args.voice_fixer_use_cuda) + EXEC_SETTINGS['force_cpu_for_conditioning_latents'] = gr.Checkbox(label="Force CPU for Conditioning Latents", value=args.force_cpu_for_conditioning_latents) + EXEC_SETTINGS['defer_tts_load'] = gr.Checkbox(label="Do Not Load TTS On Startup", value=args.defer_tts_load) + EXEC_SETTINGS['prune_nonfinal_outputs'] = gr.Checkbox(label="Delete Non-Final Output", value=args.prune_nonfinal_outputs) + with gr.Column(): + EXEC_SETTINGS['sample_batch_size'] = gr.Number(label="Sample Batch Size", precision=0, value=args.sample_batch_size) + EXEC_SETTINGS['unsqueeze_sample_batches'] = gr.Checkbox(label="Unsqueeze Sample Batches", value=args.unsqueeze_sample_batches) + EXEC_SETTINGS['concurrency_count'] = gr.Number(label="Gradio Concurrency Count", precision=0, value=args.concurrency_count) + EXEC_SETTINGS['autocalculate_voice_chunk_duration_size'] = gr.Number(label="Auto-Calculate Voice Chunk Duration (in seconds)", precision=0, value=args.autocalculate_voice_chunk_duration_size) + EXEC_SETTINGS['output_volume'] = gr.Slider(label="Output Volume", minimum=0, maximum=2, value=args.output_volume) + EXEC_SETTINGS['device_override'] = gr.Textbox(label="Device Override", value=args.device_override) + + EXEC_SETTINGS['results_folder'] = gr.Textbox(label="Results Folder", value=args.results_folder) + # EXEC_SETTINGS['tts_backend'] = gr.Dropdown(TTSES, label="TTS Backend", value=args.tts_backend if args.tts_backend else TTSES[0]) + + if args.tts_backend=="vall-e": + with gr.Column(): + EXEC_SETTINGS['valle_model'] = gr.Dropdown(choices=valle_models, label="VALL-E Model Config", value=args.valle_model if args.valle_model else valle_models[0]) + + with gr.Column(visible=args.tts_backend=="tortoise"): + EXEC_SETTINGS['autoregressive_model'] = gr.Dropdown(choices=["auto"] + autoregressive_models, label="Autoregressive Model", value=args.autoregressive_model if args.autoregressive_model else "auto") + EXEC_SETTINGS['diffusion_model'] = gr.Dropdown(choices=diffusion_models, label="Diffusion Model", value=args.diffusion_model if args.diffusion_model else diffusion_models[0]) + EXEC_SETTINGS['vocoder_model'] = gr.Dropdown(VOCODERS, label="Vocoder", value=args.vocoder_model if args.vocoder_model else VOCODERS[-1]) + EXEC_SETTINGS['tokenizer_json'] = gr.Dropdown(tokenizer_jsons, label="Tokenizer JSON Path", value=args.tokenizer_json if args.tokenizer_json else tokenizer_jsons[0]) + + EXEC_SETTINGS['training_default_halfp'] = TRAINING_SETTINGS['half_p'] + EXEC_SETTINGS['training_default_bnb'] = TRAINING_SETTINGS['bitsandbytes'] + + with gr.Row(): + autoregressive_models_update_button = gr.Button(value="Refresh Model List") + gr.Button(value="Check for Updates").click(check_for_updates) + gr.Button(value="(Re)Load TTS").click( + reload_tts, + inputs=None, + outputs=None + ) + # kill_button = gr.Button(value="Close UI") + + def update_model_list_proxy( autoregressive, diffusion, tokenizer ): + autoregressive_models = get_autoregressive_models() + if autoregressive not in autoregressive_models: + autoregressive = autoregressive_models[0] + + diffusion_models = get_diffusion_models() + if diffusion not in diffusion_models: + diffusion = diffusion_models[0] + + tokenizer_jsons = get_tokenizer_jsons() + if tokenizer not in tokenizer_jsons: + tokenizer = tokenizer_jsons[0] + + return ( + gr.update( choices=autoregressive_models, value=autoregressive ), + gr.update( choices=diffusion_models, value=diffusion ), + gr.update( choices=tokenizer_jsons, value=tokenizer ), + ) + + autoregressive_models_update_button.click( + update_model_list_proxy, + inputs=[ + EXEC_SETTINGS['autoregressive_model'], + EXEC_SETTINGS['diffusion_model'], + EXEC_SETTINGS['tokenizer_json'], + ], + outputs=[ + EXEC_SETTINGS['autoregressive_model'], + EXEC_SETTINGS['diffusion_model'], + EXEC_SETTINGS['tokenizer_json'], + ], + ) + + exec_inputs = list(EXEC_SETTINGS.values()) + for k in EXEC_SETTINGS: + EXEC_SETTINGS[k].change( fn=update_args_proxy, inputs=exec_inputs ) + + EXEC_SETTINGS['autoregressive_model'].change( + fn=update_autoregressive_model, + inputs=EXEC_SETTINGS['autoregressive_model'], + outputs=None, + api_name="set_autoregressive_model" + ) + + EXEC_SETTINGS['vocoder_model'].change( + fn=update_vocoder_model, + inputs=EXEC_SETTINGS['vocoder_model'], + outputs=None + ) + + history_voices.change( + fn=history_view_results, + inputs=history_voices, + outputs=[ + history_info, + history_results_list, + ] + ) + history_results_list.change( + fn=lambda voice, file: f"{args.results_folder}/{voice}/{file}", + inputs=[ + history_voices, + history_results_list, + ], + outputs=history_audio + ) + audio_in.upload( + fn=read_generate_settings_proxy, + inputs=audio_in, + outputs=[ + metadata_out, + latents_out, + import_voice_name, + utilities_metadata_column, + ] + ) + + import_voice_button.click( + fn=import_voices_proxy, + inputs=[ + audio_in, + import_voice_name, + ], + outputs=import_voice_name #console_output + ) + show_experimental_settings.change( + fn=lambda x: gr.update(visible=x), + inputs=show_experimental_settings, + outputs=experimental_column + ) + if preset: + preset.change(fn=update_presets, + inputs=preset, + outputs=[ + GENERATE_SETTINGS['num_autoregressive_samples'], + GENERATE_SETTINGS['diffusion_iterations'], + ], + ) + + recompute_voice_latents.click(compute_latents_proxy, + inputs=[ + GENERATE_SETTINGS['voice'], + GENERATE_SETTINGS['voice_latents_chunks'], + GENERATE_SETTINGS['voice_latents_original_ar'], + GENERATE_SETTINGS['voice_latents_original_diffusion'], + ], + outputs=GENERATE_SETTINGS['voice'], + ) + + GENERATE_SETTINGS['emotion'].change( + fn=lambda value: gr.update(visible=value == "Custom"), + inputs=GENERATE_SETTINGS['emotion'], + outputs=GENERATE_SETTINGS['prompt'] + ) + GENERATE_SETTINGS['mic_audio'].change(fn=lambda value: gr.update(value="microphone"), + inputs=GENERATE_SETTINGS['mic_audio'], + outputs=GENERATE_SETTINGS['voice'] + ) + + refresh_voices.click(update_voices, + inputs=None, + outputs=[ + GENERATE_SETTINGS['voice'], + DATASET_SETTINGS['voice'], + history_voices + ] + ) + + generate_settings = list(GENERATE_SETTINGS.values()) + submit.click( + lambda: (gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)), + outputs=[source_sample, candidates_list, generation_results], + ) + + submit_event = submit.click(generate_proxy, + inputs=generate_settings, + outputs=[output_audio, source_sample, candidates_list, generation_results], + api_name="generate", + ) + + + copy_button.click(import_generate_settings_proxy, + inputs=audio_in, # JSON elements cannot be used as inputs + outputs=generate_settings + ) + + reset_generate_settings_button.click( + fn=reset_generate_settings_proxy, + inputs=None, + outputs=generate_settings + ) + + history_copy_settings_button.click(history_copy_settings, + inputs=[ + history_voices, + history_results_list, + ], + outputs=generate_settings + ) + + text_tokenizier_button.click(tokenize_text, + inputs=text_tokenizier_input, + outputs=text_tokenizier_output + ) + + merger_button.click(merge_models, + inputs=list(MERGER_SETTINGS.values()), + outputs=merger_output + ) + + refresh_configs.click( + lambda: gr.update(choices=get_training_list()), + inputs=None, + outputs=training_configs + ) + start_training_button.click(run_training, + inputs=[ + training_configs, + verbose_training, + keep_x_past_checkpoints, + ], + outputs=[ + training_output, + ], + ) + training_output.change( + fn=update_training_dataplot, + inputs=[ + training_graph_x_min, + training_graph_x_max, + training_graph_y_min, + training_graph_y_max, + ], + outputs=[ + training_loss_graph, + training_lr_graph, + training_grad_norm_graph, + ], + show_progress=False, + ) + + view_losses.click( + fn=update_training_dataplot, + inputs=[ + training_graph_x_min, + training_graph_x_max, + training_graph_y_min, + training_graph_y_max, + training_configs, + ], + outputs=[ + training_loss_graph, + training_lr_graph, + training_grad_norm_graph, + ], + ) + + stop_training_button.click(stop_training, + inputs=None, + outputs=training_output #console_output + ) + reconnect_training_button.click(reconnect_training, + inputs=[ + verbose_training, + ], + outputs=training_output #console_output + ) + transcribe_button.click( + prepare_dataset_proxy, + inputs=dataset_settings, + outputs=prepare_dataset_output #console_output + ) + transcribe_all_button.click( + prepare_all_datasets, + inputs=dataset_settings[1:], + outputs=prepare_dataset_output #console_output + ) + diarize_button.click( + diarize_dataset, + inputs=dataset_settings[0], + outputs=prepare_dataset_output #console_output + ) + prepare_dataset_button.click( + prepare_dataset, + inputs=[ + DATASET_SETTINGS['voice'], + DATASET_SETTINGS['slice'], + DATASET_SETTINGS['validation_text_length'], + DATASET_SETTINGS['validation_audio_length'], + ], + outputs=prepare_dataset_output #console_output + ) + slice_dataset_button.click( + slice_dataset_proxy, + inputs=[ + DATASET_SETTINGS['voice'], + DATASET_SETTINGS['trim_silence'], + DATASET_SETTINGS['slice_start_offset'], + DATASET_SETTINGS['slice_end_offset'], + ], + outputs=prepare_dataset_output + ) + + training_refresh_dataset.click( + lambda: gr.update(choices=get_dataset_list()), + inputs=None, + outputs=TRAINING_SETTINGS["voice"], + ) + training_settings = list(TRAINING_SETTINGS.values()) + training_optimize_configuration.click(optimize_training_settings_proxy, + inputs=training_settings, + outputs=training_settings[:-1] + [training_configuration_output] #console_output + ) + training_import_settings.click(import_training_settings_proxy, + inputs=TRAINING_SETTINGS['voice'], + outputs=training_settings[:-1] + [training_configuration_output] #console_output + ) + training_save_configuration.click(save_training_settings_proxy, + inputs=training_settings, + outputs=training_configuration_output #console_output + ) + + if os.path.isfile('./config/generate.json'): + ui.load(import_generate_settings_proxy, inputs=None, outputs=generate_settings) + + if args.check_for_updates: + ui.load(check_for_updates) + + stop.click(fn=cancel_generate, inputs=None, outputs=None) + + + ui.queue(concurrency_count=args.concurrency_count) + webui = ui return webui \ No newline at end of file -- 2.45.2 From af5ee81b5bb05e45cb030ca26a37c81b67e6403a Mon Sep 17 00:00:00 2001 From: ken11o2 Date: Mon, 4 Sep 2023 20:52:38 +0000 Subject: [PATCH 4/4] Delete ai_voice_cloning_edited_to_use_deepspeed.zip --- ai_voice_cloning_edited_to_use_deepspeed.zip | Bin 62526 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 ai_voice_cloning_edited_to_use_deepspeed.zip diff --git a/ai_voice_cloning_edited_to_use_deepspeed.zip b/ai_voice_cloning_edited_to_use_deepspeed.zip deleted file mode 100644 index 3f44449068750c14e4508da083d4f69e43698182..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 62526 zcmb5VQ+Ou8*X@~fY}>YNCmmaFY@?Gmwr$(CZQHi(be#13XP)z&Idd-NIbU5=)y-P9 zZ|k@BQj!G&M+f=8*GU1n=Kpi~9~<_6PBT+0pqZ%w(80jP+04My%*@f*(ag-0N$G#8 z-v2ih#s7~A=->o&uyQtI00N!=7mNS>VTS)x3uhB2E64xE3L3;Bf=rXGXzzL#2?Ru* z1q6iuziahhB^vyvL`FxC8i2O*6+61`Tph9*B-MU9!xosB;kwZ}Z*xDG^H|Za4B4gD zXyQ_;?E8)P*>C+NOasWd=7y7^rVCr%$E&)W)mC1;t3x#mDCOO*9GcyXnde^i6tu=p zMl&<@CtF)*i@PnIvX>K$vr3;;FAf_bxt0wCLem^8wk$ikrZP~AX7vdA7HHwUe{b(t zdwQjLeow0OwaYnOtZ5}G!mQcKd^uae2^?&7DY8hC4#_bR_NJWPhVxPh+)7rC>#S>x zDH5p~)+R4X(J~j`wxf~d!Z!_MMj%Jhgw0gXDk?KIRO1dJZlzENc zB9^JM^bApClasodsMFWQWd1SqC%l@2wguaRH_0o0mxloGd+75p&0@HQI_EOk>BquI zog3Fe71p)Y3xtJw$}X&Usq?`MQB)J@X3Jq|Q^*-`o^EiI2b__?oiuARJ&dSdAg&la zI3w`&sva;KAP^F$UXPS0d$u7PHfL&~8P$ri-ll;V_Ijle*KBHOVOsQQ^rQanrLCx< zwS};v=>oeqDM?V;HH8MgU#Cq>%pf^O0*VCn?iEfge=uZq`9Kd%L^@Xex5k`@Sbd;t3uK_lI+AJF#{&p;iYEkT$qKl&47JN zYhBNAxC*DYMOZPg{0RYXeYel%SaMALa7dV|Uk2&_g3`wZf7y*aq_MWqVmcj}M2Fxa z7ghpVa{JjrbCuG>*BWCyuJ(!d9hR;pmnlef;!=EWtU>n5F71=nG>DSJGNELrG&G4j zunpbSLbFZt9nq&RQ^*IpDT|Sg-w}_bdIyyicC5lV1M1*7qjh(s=A49)#gaxrx-t|6 zVg%`(EHvt{k}R{@|11V5yY{{u&1qr@8MMeTpIb_th`89s^GJ(GyQ6*+9f`!d$3i{6 zq7WUQ{F{_LdsrmJH2=tF!$udK=lEm1@wJ|}R2E%-#Fwr=d>`M`Kyh3 zgH{TF)7>Bh&#P#vq!Uaf_;YNhdQN^kg1t&AfmI?X2 zu7L>CIiteGr>7ZlQ4vl*2)-X9NO(XQdyNZaG?P_Bmaea&3U--qEMkPA1gQ~?ghNNF ziMdzm7d;&3{HHC19KbEK^eC0MkK4usQcD;^d|vP+{~l3hD{(}gKaq%XpWLu(jC_xy=lzNwV3| z954oq;r-C@4I}M`VvevyesFV&IZtCxNB!_1suOOvliQ2IE(sddGl@ z280I~LkXr-z^bkZzC2Dc8tbq28a?IagMTEpcZP;kz}j<`_tA(hh--K#K?s4t&eoziYP(>L=_ZYW)vP1+ zV_sH9=E@mfpl$K{^yIz#X%@D`TrYO<)zq!DoKOlJ5fYR3R5P6RY7WP?sv| z^#C1XrGpA8vq;$(G}58kgVI{Y78zjM1G~lsZVo>D6j#N7nFhVp)Z)+UE(6BU)|Uv3 zpcham?~YYpi@p7wAdRUB{fR(gE5E`@`@pWVVttxC`I-$fnuVPSj+h+sBy=e|LYZ-w zmCOZ9<-a(3dI?3DHDdWFJsXCn((>{qBT_YH6|fmypAp;Mq+jePr7+s0)=n~B(K+xp zdC@N)22GrJbVd1~Y4v){7Zfgi_*$A(qLz(I;G#0>3P}NdjY^=ovT~2%VCrjg(8mlI z;6WUg7V5ni+VTgpU<4yQbE6O-FjMTnhJ}nfC))-us;fmF0UcTog-Zg17jn!=yV8rn z3@2>`^narGrutQ_-i(*>wboYxfg{{xp>t>tIUVX4Vo*|`pEU}Zf17+k6`g#F9zW4o z(zsNr5vX?bJNFRHwWfy4}lo0{aacI63s>7x-15RlASBpPxQM>q+&~QP8V*#S*y5`Nr zM9}r;T_-s|H_jOmow7IN;R_A7ZYJeFmQahNc#iuItaWM(r)tUO)G1u0g_d|K$VGC} z^Usvk+klhP7Y}($PGS3xDENQIhd^P0DpMWVB_1gVoBD%f!+()*z1&Z1U`0*phWtK$QT@CI}J*GO?(@kZgdiv?U@GpNxEL7<4HiQ{i@kO(h zx_Ju%$(Opx;MsYD<;P8(drEpTB_s!FkJ*L&Fu7@Y5%Uqm>`q!lxp0acY7ak8)qm!rbwD++7guhRw>=Uo^p@(x z#;Ub0ng2UV60Es(uPOk=f}?lOL*i#2c1#U(&Xa{K@*d;n=``7IL3>zB&w*>;N5)j- z$R=a`%$E+>GOXhyxN+_;N z&40UE?)Dv|1Q-}UAvZg^FEj{dej7_Ve0{(DZP@-6>08A1v!lo?(rsM^O97VxT-J`} z)?aX~c;k6Y7!VR#>aQ7uTOl!XTRq0Gl*G=dk@Z=R4?3^nY&;h>3EmkRvRRJ|=m);= zb}L<96V^lc+Kdphsq7<%g`?t=y~4~4(tSvmA2Xf78JC6p_Hg0PG+JWB)SHXNOq4sf zAHd%Y=wXS>#p#BwJG5A?zvs=1wOybW5aXKiC3jK7?h938uu`*8o?Gd1W)@mnH;qGpl?Z-^ zCVmOSs_1}$DkbO&h)Y0XsHZ$rq?ZVuL95Q)nu+uM0PPG?txw874Sl$6zD%iLSIsM6 z_tImA6k$r}tk~TCa8^#1mJqkMC-~AetD&Va@)YzYe9(Nu9h>t3##7ZUu$;D$^mz|~ z7$74lPMpqpkK8Y>`uWWN!9qe3Y;3(2qH)>qf3KkD%1fyR?;!K}N}lg~FO+3}&b%iqk<#y#^N{|j9yxgYyWD;hog0;yp>r1+hUhD8z66q5_ZQoNQCiC$0_W?)H+=^7NqTT*FPhGu zn$i+b2bIn_uk#BNX4Ubl4ncaMdq6oCXT3h?-m=2HNi^b2(s{tfe)}|ZTtaPd5yb$T zZre8%Jz2|K{J*myl_l$X+!~qMsam{1yAhRJ_T&jdOrQtd>yYDVxTh{p%AY;N&5CJXTETVjO?)vZ{I1GfnG$B&vPq#;NyUSYL-?tkZ8^(Wv4;tM0 zJ3HUHV;D)SrBz;<(Flwvf&v(=IkJA2iu^+(GKjq|N`9o9oAheBvvg>Y8M%2s3~=3H z^0I%gzqdM0*9iWNQOfkO%cPO*eV?WRE9WDggT(BZ!1`L92Ijfq&04wAd?wt7uH%dsFV-q7L{DM=x8}U-pwf#uD4zR5Y}1 zJq8SOSWSK?u@U@e`rEgwdM(A|euIpOiOEmS)7z%TxR{=XE1y72$YJ{bQ`xxs(&g8zv#ph3E_$Td-L zzj7`cpS(-xdwmQnR&3r{wy{p!<$n#cef^!lz5+; z+Rwi52pWG*@9q*v@4AI<6JhrvBt$O#ul@)Sp`>4N^w5Gt^h8 z8FFYZJQJ}h>3%aj(q1t|N^{nI9C-lL-`VG6LQc?Vzex>F=Tc3QY5ZocHZ(hz+q8MD zLfI$EmRumJ6Qx5VA@@~N9x7`yt4eK(O{2Yh3uwNl5&_xWjE{dPuf7Gf$8~?YmwqAo zD6F~&YML3KVCb7ZC8=}PM*O}gjKRgCQ@^M>tMjv_aWP8q+|iJkt}@MC-Dp&pwbW8g zR>trBGE$+%nfD45;FhGg=#EYqU`JXCzVmXgz2eiZIAzbp$xk` zxRGB(ZDQ(~v?9thMk7@_hrZE+DbsYSP`CK$D~`Tm=?^F=BMnusxjgvJ%LFZf1)2bY zr?LbLOigj+tcE%hub9{rFhM?1S))vvBiAhHFTV}A0oK_D>%XD-Agb}Ss_T?V%j!^j z0)5jX@EmR(Mq!}(x29?L?5LvJhk|oiBV04-6>~3XiG@!oV>?Vy^Qc<2zZy~%+Zca8 zZQa6wbON%Qwo!%T(D|5g#!Ys&=ew%Y;|`aQphs+R-I7PjO}G2|^Tt!s@r9cZSw;EB z#v>`z#iu3duFv1cGfk#GK9AINl`a9=7MUv?cSYVZqz0L{n(?$ZQJ1l_r1q^S*UwMm z#_Out+dJ1Ty$xExmowGiTx}QsXO(BG7yixcrKdW<-QC=(g~j`$Z6wJ}T-5g-bKseF zg~;uSON-l|_PEWFFVZc8;&-P$rY$@FZ8SUFUSQe{PPx4fR@G3KT_|WKX1K|=6&U+O;tW=^OSdt|9?#;Da zqZPYsl{V|lnYP?@H&)40lkvK?9HHy))NF;OM6S$(d@nioshY?t5lpkiN34#$a8^}F zG=wk|F;otDY4xtS>Gr9NjqO^snZ=+NKyp=;dIpVVDY4A+^$oX90zRIio1bJEocQnP|cE{)x6()zC@m&nYY zKL~+nKhYpZx@YTP^9FTF3{e%eAhwYCHPzx3bTG>1e>-(0bA+Joc)w{Pd|jqTEH<{J zeIXkVBu1L?=a^T}lBH*#-tl{Qf&i#fBHUS6WYjdNmSj+K3anZj;led08}Fbq5`{eD zWgO(6wu=eB<(X4XC3vNWKO#@qai{LY03fI%3S~oneKMf6jKG!fzPZNVY6D~Fk-@ri zDA*n=Ykx1maIeI%qIGxXu4V~o1k4;F+hna6g+>;jtMFEwD|M2w zsC;+NVq(kzf*QH3gaLp=c$QyYt@+Bl{_h;RYGWgfvuQT+7}|N<2=~n;K#PxbjZRD_ z!6UZ%&MX}aYiRBRRcV83?S;J31UD2LA-_QWyN_={?xG6m(R zOOn$oU!vbY)(kW3okr1ju7_Ku_~@71Y>|JPH#dxvd#`!kZDWx;yP8kNFywAsWw44#h@jn*AHaHU4r(fH4fqY<3n1a~gH1{QOZjnpjj zOy!Ae!{%V*$QKfsb4v@le|RrnPA{xJt`!uU9PH9_yMgfFiKOrcK&fAdKcnox&6Zbc zFaY8zY)U=e3PrT(@b61~_PJCG%T0n5gZ#C#zv{)&R>LIcd+8d(7Jtv!0Zq@OMe_aQ zB+3#=HaxTB^MssV@D!7lK#Z8Pck7T#_Cyd!Im*MVczQ^6=0c8%-&NN$y8|@TBu23O zHG-=u!H=}ws~SI7f|;eUV_+-q0#(Lq;ig8a4)vE_3k~Hiv{sX3EE82MGG-BK^UacI zYw51C*bdR9?ZN6IN_2pwVL2LcKn!K3I$bodd^S~R2Y&aYC{Wx4`Ce@4X!_U)cyM?+y zYreN_kq>PvtelK;+r%=W8y~qymma_xOG5~3QaPgr%vPq=ET|bh@~QH!nkUB;-B4nGp5>*6 zvN}C$4=8a^z5*#{a}%uywCTQBe>)&P1^dvfW!7>)-#k4wje2NFy6!higJEu6xn7Gg z4Cm-*vK!yXjpXS)C_I17f;~J3hTW`6q7_Fjbvy&rPVWWwPu__|(|by)T?2J{PFYJgPJa~v>53T3P44l-1Hcal{%mKaB3rTwAA`qJA22kq)P^G>Yg zdaad_%gBj(%;MmNc1gZ8Iq_;s#y6MDrE4>FPERI-~8AS<8AO#emnQ+B^nJN}Jp+Dk+n`{IAm{my14DvAG z3f*$bRK(pYqZo$q7e315d84#z{j83UDC&bDEA?Y$rE3^INB$>UC5^jO&)8vH0s#j3aTV$~YUe zqz?q~n7hiV>9#PG7!M@RVvxjn5;1n)ZN8v8$ib9poxF$3wmt%X#JhocDmEk3ZZvYF zmJF7tHnV+|imIqoRUwSH-Evk9S|WPxNXX8x6!pEFi<+uVrqhv;8UIU5{#2+H0yici zPb_r2hy335fzmntZU)Fz9};q7*oNt`wq)2IGNw`ZQAU9NGN7eEPMiQlK zxL&A-upkatll}V_y2`xgH+O_wM)sgo#C)A+7bPa0S41J8D4OATk0AM4TUXnl>t?&AR0s90PD{w)`QfG8nf zp>$)MQlAxG#uB-*RGguH<)PPrsDrVDTZYhJ6^DhqS){zpSMS23j$%vnFA4*@(oMPM z&6+%wWX`|`9u=?{osu{w$>L3+J?RP&uV({QV-^?C@n(2Jzj%@vya;ZMt-z-yqAwze z!-Y&9jsVRw&K|gDwq>Prly#2@OLilFpBs77TO@!S%$CTz#p3=zEKx^X(@U$MmG$R+ z8}1&joSpol78aV0z8*WYWHJ-2j(h;NE{VLh@xA%XnzgkKUE0{L^e-WR+{EP<2eVm@ zPPk)XRb%Cbq?0W%H)X$X-oF9oj>E&!;M`i#^#n^(j5-?C+tbAS+RN9aYQ7D{9u6q& z$=@`rrkO8(vg1O+u5W}Krl*WnE9qDpkt)0+?Q4vF5!16?a9DY zm8o7BNF4{WqF|v zq-(iKG`(JAOLVonLj0?+d9G>cEK#02Fqo2JI@R7gP$q|V__eK<#~%5w5dV9G-PBqi z)x%LmJTOTFo-)V^T%-&VVqKru2F`0>oNCn_GLrH0<%8vr@1e7o-%=ef7)6C*|J1~5 zW;BKzZ_CrhZOjhBV_5QB_;0=2G9PcuZ9$$_lIT6aHit~#$XJPv*^?VZ-zYG}d%i?< zd(j@Rz?d7}qeurrm9jc$I3@*`o6DM`xN`-t&a+}S@d(1*jna~RXB$%_aYy+Skmzy@ z1cjqoHF7__T>+fDaRQFMb|&20Y=H4z-Nd8L!qqG)9zAdhNlh5VB(orIPsbzZ4k`GyY3q7`Q>mJfDZYx5{K})TL zaO?x8!AB0sSZ)P_jB`$xSS}3*j7^KJRn9|9>n*;W75haI>_QD4jv3+l)GfG%gVdU+ z8#7(@?{Dz+cWH$X;YW6|9r$~0@&;x?fA;_jUwSkCRbfs=!UfruYUr{hSKhEEKR z&2PKXF2BqIAFPjtHJJoUA{P_74~9CNJiGkXH&L9DQoZfuQ0P9onv)trUAoCare1}O z<1L38DCu2CrOZuBsYl8pbneDB#AHT~8*B*XHUdh$hYyrR2`;2q##RsAU(szuxOE}s z!>-#Yl(%XK@4Oha^r)y}VR`(X+fnCP+>}duB^~PC2DK7a4GKp79&f*YH^3CHi>G@b zLfuee(pJd_4M8D66}C37;@dLM=39kiT7NwZlre)uu$TFyln+#b?>sI9$^TwVeN3fy ziQf&@qGF)gfZ$mS9q!IBg5pvh1ik$*WU`DNMs$k1YmQi4*9ev*8wPRF-+B#QS5^fb zd*AJM%lYC4H^vj+#4^p)u911` z(x8U28`=`nQCA(vZE|qsYAQr7);tARIek17eba}G>RLSqjWNSY}g7 zaW0+cS%8^+eWvyH|00&p*<3%eJm0P@#RCD>mcDdKf#I7G&usX_L57ho@DQ5q0J(VbP?lnpP&?`gP#g zZ!-^4jB9b!_Q$&1QO29j%oeJ)ZL0y_?Ou~gVb?fq+SeshkIh;w!Q>~-wM=+jOzHH3 z+mdQ95d5Sz!rj_O+Dlru^s4a|Ypzw(EXm00nG5KbOCWvLr{Lwz>8D|60`SA-Kuc&&(KI?&_w~g4fz@JY!Q7$JsAOqdhW38+v5c#VWi* z3ueALDIfRw>m~SKwwV&2uD*G7F|RgSNG}(tx|w%+_Z007?ci;53WqB{Lqnsv*%q41 zNHt`4t+WoarS%u9eGZ+0?^B=;v9j-)3#~S-I2x*R#pFS}c=ksQ0dyE}@jJ`nJp8JA;O3-Evpxt0%mwyaf)!H|dwJboD0pV7NSu*Vjpv~~fVBJ4W-Bjj z$x~8|^w{>eA;@HV8f7cu0etc=@e^ugr zZQ-^LBXKLs(ZI$4#D}Wt6Zoy+2#x<)J1P6>F$)ATmaZeWVO5!fg2-zy!tvdDFX!r(kE@3N(j4}fnVZ<( z#w^=-NAZ(6s{UgD6IXR@bI7EULgRHTkgc2SC%q${dZz>{F+CoODfHebLVfK{{-;BE zJNNjPE5a)-LI06)6Af!)bfhydD22f1*HhPQQ~%Y>%@2#gb|EGbhtCf$wqdW*9&@FW z6Ewajib1g2u2<3YLuH+9+sDw*&prubeS7X-T%4mz&+i@l0+R48WN?70Kn@*iBbY2A z?i61tT6A0I@Y{-FUw9Jtr2EGqPh_;}qq)~Z0kAEr1YaVMAY|N`G;18sF(1-EgTt45 z>;3ReZbHt!Lz9!QmIGSca$F5@o82`tcJfK4Lt|tYH^-zLPhl`exPgnv?u*Kl2WbL1JYNX4~45Jf?ti|2|GtR4r#(hWWcLOA1+Iw*>$EMY!vEW$9 z{6YS|HzP6o(mO3CR$Ys1c)qi6xIyGUsh%4!m5$l|2S-0*U+}Sn1O)t^j?eoIYmRHK zgbrj|3qmrS+#3Jl#xdoV9LKum*H$IKi{MoBdp5d{yv~`iEc|cPxNLXP@3FFbq(k7Q z23NFc8{W3f@JG&&m5DNS(!oAP8>_p~SKnot#;Jt(5|fRSu1hWMZA-<4^cD#E+DXcT zE$4NKko~@pQD>f-XfQ)CeWFvp6;)Aw3x6+9W9D&S%+%oCjg8ZmQ-P%Sqw^sQoEKLY z)%u1BbqWKKO_P)d(Gmg;0UE1k`Wtv{7K8T*nT8^}X)T>PtwwD{g5J@PSr^(DEcb4;QnA_Mu=;1>bqg0_rn(Y$cipbNJVWS!abji+b zmAc!04r$L~x>bjujj$m#JuSwpC(d>Eyv;pPabM4KkNYcJwQW18?0wTZ%Ii`nno2YA`018t#QV^ zp(AIKz`k9=*L;5R{Yi@ZJLo}0LTl9LQIl$$4C5b7iA~rWvA-3R197PX<&wm18lB8z z>dmbu;-~1~43U&j#JElhY!5^U^~oHX)iIt3O2UKtcrQhDB2UJqhJ;Y0AHB7AF9 zC;MpHO*r_mVp>JkU`lfsF23oNViOC?$le-_1cr-adN{pZT5R~t-y$k}M_4@02{uwe zmTK(M^fg57eo8AI5uLiG$4I)W(+^re7BSJDjJyF0j)6V+*%*gCW~?yW(ya9mYadg2@ z@XRk9(xEt7%J>ZY)+9f@DP|$RTR;NP-|rtwY-4a`z!1u~<#tuj56A@ebG7-4wHd*S z&a8neP3vl;NGp21v5t)JOA|h({;wc6jemZF(`jqior=49 zvCxGlrq+KGf>iWnRa6MOU27PL144$~1ln3|Hs66_d~r~}%(-jo!o1!b!S;thiQ5!U zUxR&*h5fLm1u&X*W0&wAUYoZ7N~|*Qf~WkV?KfWe z$96|!0^7kCmMS&`uYrYElA&a=2dwayhU0Xh-TfIfjs$Z7KLrk@r;tO~RD5fCwYJoy zWtKKDuSx>+#C7g^Vw}9@@{17de*~U44h4cnUXJFQTSKANAOc1~I2sp78Xhp;T;cl& z{w+j%j#5?@YQYQW3(B4?nXa(E-|pqfqhauqZ;+p0(~#?JCP+tBL_Cbh%4c6q)Z=y! z%7Z$~(5x2u9ZrD6@~|U8;>DAvck$!&bOk8~$f99^$|w%=aMX836^cjXwcg%PlX zEt)b0dbdN;GVVmjgYh9rcn5s4C00S|cSvNTe!N!>Y!}ZbFk$9nw1>BL(F4Iga*3AS zySovB6n_6yDr|jbclAS{@VZfXaE8j2r`Lr~8;dSKxai@Ogj_WN3q?L+su(~X%(wI1 zTu8EA93|Z_8K~dxaJ;A;fv`Bv5`D72iYM$eOp8g&(<84~fkB;Lf7g->?tgZg9DIFU z|I_ZfvRFV8>iv2=uIuCP=DhzoI+v25LCc~ZAzdVKz;#G`wj7cqU|C3~K3r$X3J3Ej z)n!J?Rm*H=Pf)phq9!fWnq=8$U zpDi?g4YemWPis|9BHCaG0k47s2;$8b9$084_Ik)4Dzj^+y-~s*hzWhjKrkHo7XB)j zaC|!9&O;{K3B^G_t{6o*J5wtu3|Z2idn*e(A|$kV=A#`(JnurG$Fd3dg*&24G}q)( z5@+=B{53RC5nVG9a?<=Ov$HR9FopP!6muA2`;Z524D6*#Tg-9px^wh=8hWlHJv6<| z#KgclKQ6q&g%TrQ(Su(oocVqUY5C*8&w>99sFTp+mP$ZA+KRxm8EjL;@cOm~)SZf!O1;VvskW?I@o>2at^0n-( z5sXSXCTpfUT(u7;y;5LoWOxEfTnDy<-meUd=j8BR?{cyI1=<%ZVmq`@8@Vp78RSzm zl>&PTh7F>032qw6>ZNKHZ1^YBteK(+jN63rjlouU|k=Tk0T{{m0fd__HCBAn+#2*)G{xqV#vsIv7(-Nz-ztXfX6+{nsYjj2JSpd$6Js4)wBe z2tsVHy|4h7=JV)VdUT15)I8pPsX&%DEsz2@c>x_^DnUa&cm?(mp;1o6j+(wL+iS)b z%2WX4Ec4c}NFw8&%m=kEMg@^E|LyR0fc5EX-*2W;%6m=7(2eR`e0b?DQREYhHa;rU zA3iSXpXwn2l+nmv-t1uex+*Ebv#@ZvQr#Tdxn&`Av@>bY`@?H`955qnU%RT>Fj};QsE8}@e!&Ny z91)Jg==@1Ir~x=@YAZ)MKt=5VprEkIIgz%12O?NyM;efL4Wc}rY8L7O(9Gv2A|Eoh zBRw28OCPi)oAOU8wxwA{;f^s0UbKA&qj$MlDSYP)bcE!`A03{TNJiIDHT4iD&eQ2X z7GU~l9q0K5G9Ymn->WeligRA$S7R7?Ca)s)rDYIQ2W{3T&=M5wiXz>&`UUpa8K+5R z=&d}gy#-HfvRPK4;(h}BLkkyDsY5-r!{dDpQF$gKWjbvw&sXe03$(e+#T=V_)_KZb zPdnWrUI9f`?A?@;u`YnU)pe+~0dC*Ddl&mSjvg-HDigZ9VY3TM`CEX2b5yJz+Sd$_ zsvBl)9j`9>GCJ!LI8^r_VuGCZB{bJ}J0Et4JBh43;WfFeqA^wrU`p zYftA)dMRSM&?YnHA~oqb^ubH7Q@O+y%w||0Pi$28i2F61C1$r>hD0#>wQk#C5tza& zJoVv5Uu;Kdyyiotyi~K&zv+u7N2tP`kiO%w2~yt!cUhQQAB2QQm-TpR#@L1HKo$1u zx~9BIzN=ZUR|;(xq`B1+&~>9!)zCAWR@sHd-xyT6*6p(Sl4+H+TC3PNSjK{vH{6|s zJ-~0KV+@ux%}nW-?v`|An~HQ-;rC%3=;%;CcC?M0_n!3;7ZUpU#k_BUt+cyd5#KqslKr`zCO1Pqg0~1lPi_ix9qRP z)aay0aw2ZfUL<`6~Q% z0#9Po<|jcA>+6#-UmFqP^hs}D`iLn6@~^2o$M$&2ZK3*{+sL;k)IA=*k=!CdYfun{ z9iMVq$g$DV&diKtT`Xn&rSys^jC_i^E>@c5oCoLAezS`a?nO>7Ovg^p?hVZ6#r%$F zC*CdML>3OJW!nYnUyTKTP=#jYiz$Mr`V&b0G3a2m{lcR&ndxcW=s6e8S#uV?F_9ED}SRe+- z!qzL7gq2E+_aI0rJb)Dhpi^FB`%XO{R|9$WE5gqf_(a#*OCf{-0{Lq}2bRn}uP8bS zy`D1=H7fQ6v$KE2*K8Y4K4`x?!4jTY=`|jfEZYwReAUa(X!NZS?DfNlguQH}gz3GT zCQY_TDZTx>cjHrPsF2qrNw{Ilk>~AYb*dN41r^Z@`CN_pUB-?$5Z~TX{uzBfq_bTzh@I`BoC`bK>GjOt3uqQ#fEci+Dqb&~y=nbR3C(whMssP- znVtXcNSryaP640Imz+j1?DyNK)3<}ukg!y_nnad)3v(-U8AJsel2y%iJr(D}p2-HQ<1Agt9w(KRhAD}LncDbVL z(qNRJQ0(`IGw1Xud)q@&^Q)5%H}PlpFhQbC9U|U`Wy4Io>#=_a0lBV_gQxoyEVVF} zI8&MkaIzfhG0RunW1O9hwXr=0B?D z5M(Z+i*!?}oX~bQ#)6@N zCPj-Y;Vcy5jd+xPsp0q`1M!w{4;VGLP;bIvrI z1$wWAo88D*z=_CN%w_NOpPE9d<{9P|nsmb8_>Q8rYd3^tS6Jyrwl+7`dPjc8GCp|) zGgqA0qzzLrIvkv`bIx32`IUMyFK5_Wi6icK7cTH9Zu=sOiLQK!^cTjK$x z9BAwnpz=6oVk3i=QTM)y(%)L#wSPQtRC_eA!Y@**J6*|wp-fNt%ao-_$!Y)iT)T;? zue43qP?M_Y<+4MaB@pf$M=%12vDg&Ur5NQf6gK{>bV_sv&(5L8U8I$kTm$+@&It9Y zc52k?PBno%=}gq)*k|&%|`=5wzKDnm2CYo^ zG+kP6v}^uMd;!H%!}%2@{&^GX_1zi*(383>PX>9>RB5VQQ3i}%<80|hJbnf0Vis0y z8^5$BllQes0;*OU2cZ7bArI3USND^d4IjnaTwc^tNu@n0Mp1VWkw3e}@RTvOa#`Vt zAWN_TOI2B&xGt{|B!-IiA*0LSnIauusz1p|0f)1sz$pzbzd)W$ z`Ws}yWkyj_m6yQn;4EgHPSQwFJ-2B-BTjB)+JJjwCk%MCUNnVjv@P_QZccZ_LAz{9V(0NK5>**vH>h-6Yye2#x96ZJ>H zUge9Wf3>4JmUJZ7K&(I2)RwHSXyAD9BEP|hbeU?=GgG1wGO60`H1fG z2ND#~AuF0dDn3{_;JcNtc2uhfAEzgZa`1%B;;2z(Kgf;uOwZ~Z<40aFTmtCmd@7bf zOi!;!pvK!b1w3o8Qr+$Gl2*9YUWh=jl`Lg<)Oqs31Y{@v%&HX_=F(#CtHCB)uWGM1 zHM!sx>(%k%7Q@dgbvQ2C7lDgxQ41{KRZ2K7PzAUS6JAQ5=~3c57W9u@IuNW_;~4i{ zdfnIrywTWUZ~h1S8H%4(FOJtN68k z8p{DRN6E-SM4d7RgjcH5ZL}wFp*^wbpOu}xs-x%Mv>C=EReXyGZB#m|=R z_cuN@VRqWOG(wu8k%Sx6QX0>-b+kE-kv45;a7*b|V8i!V+Tm8mtX$*oRIg-uvufUK zQ}OS*J7{6GO0(2De1nN#N25q^!N4*FA*U_lDW4zcdwpD8UuX)>n0L^gLqEegP)#&j{NCh}<QmjTSwk$I(>B?Eru2%~O>^ei?1ZFT!9mi8TREB4AK3+Ba%g#x!8 zQ?FpOx|daOU4+(U&OUeeQu#*M61(B6&D{3t4WVSVPKVi~Z%zi=8|`R<4gD)GUGK|~ z6XA88#^jzkB$6-e>|oB|GjvmcpH{n4<Y=C#ob;VpBRiz_aIBf6=xb%BwJt zkHv_2F)(VrKX-z^yeL$mjy)hQ4ZhgT!Q!EmN~fq<ebHXo0M9GA~!%oLth zHSw+%0YnR_JfsydG}l+wD1sG9Gp5)Ox|Dbw6JQsbW)Z8rhEshgVsYa?%5_kNXLCAn znx=ZFa8M4>vEBT^K~wL>(mi!|d&;+Po9j!8Q*XMc7XGy$@g_4fE1Ot#eZt;3-nrwG z{h3)&jh*I|USJ{r&wcMLfeZV}k~|+aA^Ji2wS1%hAb(8nM7%%Zwk!TpKU|yD#q|yP zzrhUvfc(FFBrN}JaI>*9HnILc=@^gzi+^}|?ZFITj{yKo8v+2L|G%#^aQ2vYu@3l5o zemEXD&cykvX=z^9s4iF{j4zvRx`1e0XB^M9IP+zOC*)tJSY49YU@QJDve;TkYOdKR zDMQ*DT(7WVGd_zb(QDogap6It;eEe2`0Q@cP+5iY{H`d(&{1G%`s|{y$c~%cxU@X$ zz1SeT{%HBAx_}_i>JZqgXrI~KuG@x#{_-5Ip}Uyn{YgOc)q}fy>1es##RA_T^z{a; z+Ll@RGwh=4D6HQ31w&u5;5{Vls7htnMc4rATv?c*LsbDbRe4O>046{K24ST5!y}!} zon7@fP^=-LHqF!*U39EkS9#VR{gdrZJ#-y3*QNrXS>=G!NtabvM|++Ing>l55U}(l zm3f{$cH4{fsv@d|D@#O~O|>ykSvIvMYX4SVUJy!oSau3qEnH26l1&x z{1&~mEq~Y7O$4W2-pK6Wei&()bjS4$)~#Er1lEqMHkjf?=$G|#t}K6_;o;%c_uis= zH-lbytK8_r({7%$RJ$6A>KZ69Rz><>d2A%ccDS^D0gLiA+w05yi4$Nsa$j2XNRB)1 zFpo=ked0p&L@kB>t*Qc<%j!gk1%%(5&v$EI6v+t;`7~JZ;@9ksMP%AH``w+dBV;gE zsb~J|DA)p)Xv-Rl(prCz)y&7^on3eb)LWA4jWaynE2!N+%tR)puH`DC2F`}T zV6ZXofgw>Z^1{FY&dzZl&E~aGWiFOUW>P@}T?W&~#p)X%Tpk2t-5Mbn399%nKZc*cEJ7zMOan^R=ajUOYHO;~e-ZEO44nz5j(wPl?F+ zG+U$C{gf@q2SoHyVThYQeLQkGaCJ$H2Ph~3y0e@OW;H_ocVvr01y(oXE&%^52{az0 z>})_7)u`;=1QMI>Ij(Ufy|At@gEgK@_M8*Fg2FM`7faFP;JybhC>z@Twxsi)2`z8u=0p?am%#hCv)+Q?ZShO(>9o=Le zTavFnbQ{4O-hV1xKj%KgYd9OnI%kRi5FYr10RBRf&Ibx2>-*WL;kBl~>N)hNyh>+X zLK`K%IP3~`k*yTSTc84Pp4|Tm?QAN}qFNrY<|uTOI@(@ddX~DGjpkl~gsg0KtLyLNr~^ zV}MutJ^?zwEUC^N5KVE4;5lj0U=SWf=ko0EL+8%u#;aN_O?oe87+Ctq`nuy#A%*~~ z(Qhwl(91W06@uliGO1L{E+PVbX4&|8@wQD8# z6JTI61SAq$Fbm9pDHO7gM|Iv?l2XFNS8eIjZLm}ArHy(^X7;ZT6mh=Lr$4Ze8(O4lZzP?-i z)B+K#;wlm8w+bPrHbQuCf{l2czi%HKXm%x5^ny_R<7KN8l%&z2nuXL#^(sv1xJE_k ztS-zFTc=D6y|nk!Iu%e{!?eKxja>_kx@{Vm0Z>2SUpxz3ZB`1NU`HN*uFi%5TTq~3 zcACO=tEZKTMO$#mXsMq8lE1YOXawCfzovC-6D0{34Ay!QE<=02S=GHFyj=Yf5U4i5 zY`+CaTLYv?%b1Nn9R%?^J;6*9qzaX>By|dUJJs9<9GxY9FQ?g51xa)Kv?uv0u<#b- zCtIh&hc=IL`Y7Zx^HWC>iakZjDQAoY@zYdV(|`a?5rfKj9k~WTI(rZ zZcg|KN)7GGdO z;v4Yy@VNiq8W$CvlS>u&^}#JCrL^zsYK76_aI#^Z$EP9odtj#qr|{;St1J!zY{mVR z*8J!QP5FoWn|JI!KI*=>>dH!ZS&+5#hw)ObHT=m8{aUxV0Ap}Os+!dqJ#U)+ z@F$zV7GU|U7po=rAl^Sm_E+Kih!w~7KdbS5ZK>(lMS*+P>P)qa}TLiU6D(2ew=auta9fu)A;Nh%ZzY;Y%z^GiuJcv{Hz7f_K2YwGxl1M8?J1RNo_8fn{J2}42Lxqhk7fQ zX^k05D@w@?sURlxn>k3>tDhN*dFAMl)T5`z`^3>Ar4iq%yn6{eGL~|iK$aDgr7-pV zF-ciOjYjr7-Kp;EI}nDn%lGMfS?q~9_TwsbL~P#(JOU5!cU;iy4<1zR<@Yztfwzv5 zCum8pT9a?DcJ^>Jecy$*3F|PnP{>?y(@KvG{%)aB9<^h&#jTRQMuT{TSM#24SNk+FV#q!ni8%JxVdb?t;wc1(Z+gmzgsqy+-Wr4NDYYR6O5S+IA z#n8l4UC6IYnkbRj2`3+yZ|FajHRfi>BA(<&KnQj7SoYLj#Tz7WsFZ&VzrpiN<}$3P zc@T(gT3J!6!hsG6-FH{2>!YDlF{%MF)N~#@_pq}cudg;#{vb$EhJksa2ST^}8*3Tp zhD+#2Fe^$6HNsHRzZO@9XzA>zFXa&)1H7RAl<9wyb_FEO^Mh)eho`A`#kEVUfgUAO zzIm-0Q9#t~sCqlADAA{|Y@^DS4WxneD`Mp2^gKsMjHWyD-#;GoY+SJxDJ0cr+*$hQ zKf%l55$b45a7mNTD=M@E5tiQKALaVqt?Tt&(*+VDp-3C6?A^(GY9&*YHWiseNQJa3O z^k{9`j{es9LKKWjHgXfY7~mWL#+pZk9$7_55nMAf+whF1<6aZ{6zq(rn%75pNTQ;O z0=UCf=$`KUQ#`77Hd|z`18xX`f|O7V38>ISRTxM7z^Y-q^lNm0!uZyb%bx1se>hcA zas)6d`ygCIrzoMa$BycSo~#3-%81SB6^+;sF2T+5YU*&b){^y5V10UhOhv7?_Lx3g zHML_(PffCM3UD;Athy*w~G_aA?g9>QO3)f>%+ ztGQj1K#s4igBAj55fx55H4oPx5C5d!>FPJ*{^LT; zt^n&`18eu^nQt)P)Yh0HZ-#KK8BPy~zPWz|6QUly9|(`$AJoZ{5v)1mP=f-i&(Oy- z&1O=mn|NbuPM-{cBLje-&x_q! zAN+tNS7}3F=_{UwerTpW3)^fp-(|;Eu}dd-%RQ6Q!Ik7MFGDaly0a@P+97{7@$_4I z0ksp4EP_3w^WvbR&dKlUAyxR1&m@Ep%XOEzUGt}*jJ2Y*eY_+qQ|WEP;pLl*Ps{$# z=b??5tY;Yu{GbZ`!G@J&ROAKX7ENIJG%_O07J}KJ7~rqH-bI zi#>`EpUi6UO@bWId;(muYM4Es+2kYc3aG1Z;eRS~CIi!8AF>-btkkOt+sJ*B__-av zYg~>JL52k_NICp1EgtKB!|<#2)11atKk;dPCf|$S*;G^1Yhg7+x5RW*{nQivHUUw# zc00-k^Gx~FC?{0+W;~6Eh_}JTd&go0NNYG|h#~r?-i#oD=zt{m=$V9e%OXm526=#7 zUAMOoEEm-Q>^nZjZT8Y4raNLr69H7acMuY^wFS7pIyP*TWf_&bZmT{KT#MTzG%&t% z^RV+6s{>@t-jk(l=CMIs_#`cDj{*`c4e1AJDQVCE zCA+W6pdjzOdQ>!>kP0 zn?N2shfyS%C!n!!JBE0j^V7 zZ>2sjlfp*Z+q$0A!EnJuu`ToTysVoI9UZ|bxbMsRfIxWQ1HXMN1@qy|M;LdH8m!hA z#Z%1Q@t@3_h&Wd?*^}>mZwz$KEX*rL5))gZi16u*71+_5;;M?NA+7mw;8D z!w;G}KR98uNsuu{@II+HOc+|MTuT3h7?;J92Z2 zqmbgso23bXpw%(H?OFti$!KPKaGmfKMg?Vyv!&D`Tim7WbhcXY5<-2`O|#|uE=*hx z)76NTB-g}Tl@S5#-+254$uTk$&1b|E%`ujnf5_Gd%z?e4m6lQi&{6bbG)lJ?CWR9? z_L)Y(=;Z>KU@zo5tlJ<-@U4ndHH=q=4)5NVC6vCylZX3Jp9iyd7uCBSd(@rplT1H4 zi%$K?{D*A4Unz>_*HjZ%j5&&^o6&JXiPqh+9ok*+5AFBA$K>&aCm~2iK6>bk*h;Ad zstg^XrL26vC2nU6*_YNTX|0le*D+ux;EzT&UX@O=x40{3q!z1^sQy~2fEfR|2(0_F z8gvZUQSiU7N1GsLutR*nut6Cwi~ZMh!)2nSp9dz-SUS~l2+eFQ!??n_@E&48xXy)Q z(gXVg`xO@bMRZG}$Wpq%xnasTR4nG!sUVZD0KIm+S zlEZxV8Kt>#f#;6`#Su;9+^Q}OKh2D7PJJN94)xOM81wIEC862QaM`2w? z`8|BX#St>t3Q>t3VMBVbAhf6_*i+sO4`RlZb!9Vlw{+4pJNgqGoj#Alu6fUsq`LG_I9 z$Xa%C{cpUc)ra-5y2>d*SyAYZIPHi1$DkNa=FTuZ;$Q{r2Da44-}sMW*mrhCbYtF? zDIe~ZG>27zi$u45s8yOyZm$xrl@Kq1=$9^?BGev5M;4cGe1ZlaD;9TqkiD%oG%Qi8 zqW8>i3|!+w&gTjWse{Mwi_Bx^=e)a1qQy*G7aDP^!nAn4vNgwF1S0eD>0ZWZct>)2 zK!17Jx*%+&$-25ho%5itFim9L$eqv$pVQ{kjK|sW{o>5GzTCXTny2yBXLe0Q%%!1D z^&zK+5P40ABE7L*)qcLRVx7Nycb|OVP9jrr;9_qwZSlGN`X#>v+?Q}iTujv!n)V9i zbYoxI$2+Qh^@W9n1A!LAnWMbTpPaZ8sOA{!m#bP>xu2@PK+P|=2v+KmEX*h7<+8j= z-!+p{!{&$(?6_P93081!znNduv;rzn1Os!jm@vM)H|#3?Pq}c!rO3IVOIp14HRA2| z&hF7!ksZMAYg8N>8kl=f8yKla|MsEkmjxfeN3c;3<*ohT+!G)BYW185KXbOHU1x(b zJ-To#+O?$&A6xRzH*bcDJ5Q)I9$YHbM}yTt#HAU*&Ei96icBN8&K?NwtAgEKweNF+ z48A0cc37IiD(9-E>=vIB3CE6oF)h|y6B&&d{-Bj@Asje{N~hZCb2t8l`<&VGD0a9* zQ2LtYaJsh%becOZO2npx-wz*OhOYZ$sqbsV%nq26w@Wl zAli6L&ay{e{C+0>w5x0RZCi#f79SL>Wusv13lyfXlB>-ry0c^jd^pu3J__NPqt3D2 z!Q(cK;PW6_K_uXg#M)Q&DV&X4U7uJ+O`hCnhYTp2_xapZ#2p-41-KjGERTJc^)8_; z@gIpqW+6AfM(iSiZa)dQlq)-5%A7&>>9_2kIinnNejM8NgwKkI5an#g!fXIfPOWyk z^?Aa$@=E8mV)*N?tiJfXi(E~X~?ku)mk!gn-H#VHLVo5GOx-${Lc6qWcK=# z{wGv!aDTj#B}?XLb#2Xn#2kvwp$-ZmR&-Vu{0iXYj!C!kdogTQkH-%akW(;qV%HVn zE7AkRHTq&_5W&S0tlJktMJM9k)(*(iZ)80z@#YC0m3 zT|}mt7r@$V#=VQX>()_W>0~9)v?KF{(~{|hq*L>E zqC8v_PSfsK?L)QKqG-wmiQD(>ZoZN2e6X}OTi;B@!cY|tRg&f*NsajBQ>0QlqrPjH z3BqA9X`-ZHW>M9&W1~#7q^Vc146Mk4=68BY94TUv1(|YoN&hz5UL>9^5eGZLu3f7R znbk*x>%fyZH9AMZ#mnH{yK{p=nIbsSnq9PXlnu$+c!Na12R}~S`9=Ycie3K=X&$f$ zv?k>PX0xM4SWVM-x(Hhn1ZEY2idjD=ufU9}d zoHj^RlB%(4OVD|uXr)W7=VV(`GR4XWKQu6XVKQOAXiHePU|g`ZNjA|H_V1`%OUZSs z*g|aP0r1HB#cUhS%z~{OVbEO?$e4_JY=W%2B|$1UpA4Mhn%1D}k%n&Y<6<5QNs~@q z8QHSgm7$FKG^qKLNl|&K>0)cx2O0uvn~j)ugCdK(>#}Ieh{OK7y(DN@@rq06G9UPZ;OZnlV^rkCJtz}E zp&8GQd)~85_^Vou(9M9hLiH3+M5iT2lA)_=(oVZ9*m7DNKMAO6LpG@`R$-qC1nokZ>*C6iAEZI-NjmCW~8kOXR1krur`G(V~*1F8afVKh5?yz+7vCeF*-VR z*L5dM*vtjAMQ%L|hhlmM)eGcc=x~({F+Da$@*6}3S50R2%>*uUup>!%AEOEdw1$8U zsLKJ2%jgUteyp=3t*;z4__%yBf!P;}Dco#{&KgSRNV~l=TTu`By`D` zplr8Z88*|XthQp03RjAfJ7B!+%Jbu zQS*|H*~-DykuH?>QP~CxY9#a^Qhz~V>&AF|ozzExd^k|UcuCuR^DdY)bkYm)PthS2 zH*7&q^VdF&<;a{z)t$q2qx`(fNdIzzq88EvOYDI5a}=5Rx!JK)O}uHQ$(!42aS&16=Zm z_Sfm5#~tk})*h|Z`=5-IEq2eUPEPWLgrr_+g`8S*Rcz=BNj&!Q9m34S5&*IpjT@ict{mO zU-^Jz4~L_85nQRcFcOBRs?lo727`|vujUg-+d1P%WKhR1}b zvRBUw7~RaY9Br46WAhT9yPP5_2cmebUs+i}$$!ym$CdS>+PRg7n)fghLyrvdK}ng~ ziN;pn319qm=YHsU0Obsc$Y|((;pQXLXwk9EjVZuZfyk^mn}dVx5QoaNlr3cFUw84T zq1;{(?3YJXLjwL16Bevi2(I-|0GX=Xv&*Cl-7iQ{YHVXy{P$e^efIK4ZnXYbx!!~B zSL7;^Hs_DePp|GAf|k1Gz;i zj(B1xQlkbb&3V1-4@BrYM~Hz&Q{=z8fnFSD0^^gQ&=Yg)JUITck`gd6iD zwfnN8WoP=e3n+=+4#iqn{fK~=(>s3s=C=0-rfkS27uEL*9PZth?n>J6Y`o$ePD1^N z;iLNwWNfuourp!jYBC$pju$1bRhsDNrTXT_mG+h_+$6)4C09z$`eOIf$BqV1U!=|H zkOA-f?ogl0rSGdHV86?id9WrFyx{5d^r5=#jvSHywQayq*!y1)uPcXCZ85E#kH`uD zXnO+!5dJ?{Y-cA6>;E}?;R?^{&t}_wqut3jNUW`xGV!Df?RarU1)ko)WV=jO)U_0s zib_ffMW$J|q@s(q+E4cfpbkLLkTv;K&TELgrbCS$Ic!*;moV;?a07;pyt+f!X8za&b3p@p^G)3@7@# zjyF3K*O*bNVa?$3Xz%rJrz^P~f}CvmW(?=)P zu*NCW=|+(p$U7V6_|!>?&LD759=XKxVE5n}cr%o%eTrCh7UjyHy4+U2l2Ok zx-&U(OOyi5<~*uF`;-v(jx_kZbztus*G$sVO{`f!u%zav7mpNp{a2O((_fjv8ksM|)IPOv^~g^Hy=U~kGivMU8<~vG5992qhMDV&qk{v`Z>|V%2T%IV zuY2y9+JmLEm~8G@mzvzG5(icsz0poR-^Lf_h{Ds4*7KsY>nawV-klaCJYhr8Cu(W9>b%_Qny zugu_`h?$|;m)@6e;ky_Oce9g)fox*FoL(x{z}D%uVE8gZcholhkuuktfvLTG+&;t1 zrjKseWSwlO60EUGhJ@{HQ{)QRmjnG?vP~{Cg~k;ye8j^Dx4OV=tw1eexbalRYwstqUAZ!{eVbJ*DOU8&67Yc)l^?V$hKDYPB_wOeQ4_DAH?s3<+jS7!12PxY;>EBI5Go>cA6r5j$ByM~aFaK-5*J=hxC0(e!biGa0MOVSJfUmd z4-RGg&-w;O-`+2;TeV&8P0#1gw_0d@x?VnZs9jwAUfvIU!rZHC11bXeoh}Yogjj?g z??11SMQ_*rhIqqV;5Zg84@ zSGLmz5|bc52(UMBhuru__j94iT;J?rr+vg<5~fN`M+`3T+;C!~>Lrj^8ckwjiul0s zdFw+V1=Li~XV||iAeCGkcfL25)VJL@0hhql3f{QiHb^pm&IG|$h7;ZKRz8tS`hw=5 z!^WLZO98ghEYci9M%8{819MUU^^<)HtkISxM90%Pq?MFHB}oY%S}fp(MO@;2-Q@eKpqu@Hf59l-cg5FBvs zL5nQWH9fc)lx$eg2tU&dm6eA)uG?=w%GcL;XY1{}R9Yzm&0*I<6F2U*0OU_-FO6KY z9~Jp)l1o2D`bOs+H9JKH(~^_ z$osV#OBZFH%1BErLTHa7jVi;&KK)g18>C2`&(dPI%LRfm4gN^A)qO&2?SYN;;d;eu z=4Czkjo&48?D{kKL&LfsSn{e(iSabaG0Q?;2qrZL#L~?V1bQ$6^DkUv^{5PKI{v3G zoaR^geG6Jxjut$n4n5+fA}dh*zk^{V4CulCWh=nvJ0pl=i7O4#rw0TQfNOB$H*-5; zcNQ_jHU=Np>E_)vm@|f&fQ=4crog802UgueytFuQWvKvs-@xJ>s|coXUlGnSz`wLCOSLIwk}FXG)PfFUUxq|V926BuIXi;Rk)5Ud zLgm9y--A@|*7Qv1FCabq#lJ+8iQJw~58cYFNmVjUvg(Gb6iuq(f`61V=BqzwEbzog zgD-M)XlD$b%pY?ROSGMcc(-06|AE%bj$=N48?gCmFm#5B{I!G=7)guxp}Y#IlKtgq z-%dB=NTZWO)eYlC1(H7cPqI*+`|nsF6uMZYQg%N-;f%!*e`tde!OC$|kK4dRFxaDW z+Wu>bUj5K#Hy;$Q`@RLoJ;}{{0qnu0(>me3dWHfFxpgAwO1OofA*AoL_p*O2q4LVbtvv%L-Ry&@{`g>du44{Edo=nH9K7)TdnW(8|FinJt7`ulL1EOG=B-_5K+As=|Y|`2a$GS%%IIXch6FZ42Jz2 zRjNKHKuWiTEmJE+zCT~;TWi-hpW7zSDBUp{wFN-y0_c9JyWk@btD9n;7+!1(V@cVs zk;+91bA}T(^lKVUg;*kwbRF7Mw(5Br$xl4!EhD#%6MgB|(>6#r{&2Qwgq5M(ve z_1H!Wr*(!o!1)z1h)Dji0*cL~&5{Ve3ny6YywiyFuc*VDaI-0tLPMRZJ7IqG{bf?{ zzzS`!>!>d6K6v8?p5)|taAhkmFpP6)3>?0BC*1F9ZMt%~Py&bi(7_1MDy~;}RNOIc z%NP51WHEtx$p@(vR?vAtZhok#rITfM%oxIjn90ZlLJWwtZ^MlKamCLU_O(S`P_`u% z=KVC{GfK|9q$cqSwI&E#m@o7@uJfBKw#^LdMG&c_%n)BIce8)OpfN{GRLpmSlWEZ^ zXYEqSWUq!vbJ^dw*C{LkV_43jPb%<4ft)khiiT$Yf>sMpn9@X?co&1yp_Hf>$i1T~ zzWcEF3y*e_I4Iydrz6V5TOn6U#zl5g=+TrPjmGxsg?Z#mF+A}Q^r;mJTBzr|yTTWU zX4%a^+}>VnCI0b!WmiYRIG9Sb@DVbRS^T)GlBqz8I8zuk3-i0Is1Vbvc=up3Fpcbk zEYX}hTM55A2v^#SNS1Yi}-H9SVOgqI?$N?yCE-WY-9)Fw+C-3XKeW; z^Eop=GfAa?;y#=|U-~Qyjy3vAY0u>&3sr0lYC%HPvwbF`qHAbiBb#%T$YC4O3;hjL z-^i92?XX-2P|s!b7!6_nYIr{vdaAyqb@LW^+a;~p z&PO@5FQ?!Y789RcVJkporpI07)(1A}HXt?TONwf}S<^Q~CQX$wuarRfWv?s?wD*^2 z5NT`kRzNYEP7b2nWE!XGg^brta-Z;|~hWhbh4VXIf;P&M>8QLy{097j}ftVuS+`sfvz| zvNz9ssJw=DW2(>=&ttkwRmj@^^`V+_JnL3rP%qLSX4e^^Esa~oYypnsSW3sN4*f~E zS3D>UU?rQ+qclI@EW2M!!x-oblljn~2j#~5EA?`&G!{!ZaC9zh#^r@w#SlGl_)I!i z+|@8B>54Y`$8Cv=ffGKlWv*+Z+JGv>M`vEGt!8|-8M+Z7D`kKwYu#)lYDN?of9ouH__!* zBALbs|3f-MXEs>l3kJkL5QKI*Vn#2H8__|xF}ug-DVNK)?fd<-SgS{`fLI>omaoLv z@o!RJfdUE7X@FR4jjcT^^RfdOZ>s_NnUSR?sIzF49YR8SONXj?HK7Q^5lc{=EzfUr zwh9aV=s5Z9>dKor3o6bcdSfiIL3Xn1VXrX3hO%@twLA~F--TuHX_YYlTp@tGNE~n2 z#&N2O(qVw6cE1ec3#dRysXVla34cJJ@{qaoLNXg7Q=y#2 zY3Y>OToSR(XWNoA%MfWxOZSyy3nl{DLdH?_)Z#C+7Eoyl{1%FZz*#^6kPOX20OKSs zh&vu>DoRA(C~g92puQF``K((-nm#XlU+bn#Z~2(HI|!+xl%t`?T}qU#5FFyG7&?@+ zyZ|GOP+lcL7^4KJI;^@1gB{DnBeNf|eJ|>+Pa-F@iOu;qxf5s{#TR|ulKgeF$=k-j zHF(~Pxeiv~jqIPwl>TgIk+69VcWcIh7Pp-}TJl+c>{so&RhdO&OjjfY_sI_Qd+`EG zMyHBj;*msC8*uU@h?4kgb?W|!&hfyaO8Mr0o(Y*RAcDUn$YzaR_2B2)1b!Ui`)RQl zrMW56L=mU6`|;5@-6o0d&Tu_odKQ7579)QZu_9gcLy8g{;Y7E;7;*A55f}GXIg4tG zR2NE;A!Ay=jXIlTla*UuJOs;PeD(J>e8SF0Bva&0ZMXEXF4}JYW;$Gp$Ax|aog1Jg2XaZ zjxCR!mg9obE3+(#6f-YzQOi+!Aj*;g1xjIvN`gV)J{L!Pfx&>Oc!j~|lpb!)fsPL$ z?qp354YFuu!gfo!pqx5jAvOBKXn0{^%+wCL?o&$5No&jwhe5eth-OMP)h-6Dakc## zH=7gbFq@AJMACL=4r}D_6?!m7d{ESa7@6TWd#Dqu0E4k^%tLbUzRW@iDI85v25KX^ z2u@fFeD#$TqcSa~kP+0Tc&tiGOR z*c#u!=uCqJhS6grOimPHb~3kY6SMO8Y(d3s;KZIm`6(2D3}fyCz=OX+&h;j&pu6^5L@11LGS8!%`v`mm1>tOO_K5xPrV*c%+8kkcrj z<}}_Z!T!RCIN-S{`88x`ZdII;Ojum)deP3I-0A377B*?1qxN7u;6}vHzh=+*gnLc> zpFs~_LEk74il&zEr&%p1-yuJ55!35|5tt@5HR{^~?FD%FVrM<>CC`1spD!U?-4 zT91?y`!D*jPpBrrwN4LP1~VT-JU}z5JeFx!uVBdZiC45qPXb%-0buK}aX-_#)$)b)JlRt_oq=){^f&Hn>8Kyu~;B! z`{@~>hDA1~c)w)Q?l#6tR`ulIC__%Nx_64uy_?;`B^uw!>zoZ@`(8}1?+eq{HX4W6 zcB4A!`Byw~?#_O>(oo*jxXC7&k^-vgzLDxyHc|cJ5p%fv88I~x^M~ZoXvkuVqHn3e;c~d^e1_u*f!Um% z>+Wi>Gw8lA2CgsOuNZ+nv_24CPq9MmL?&S$UWXBcwEMg|ibJah4H=#gdSqtO(KVTh zzY>y6kcC2ZV$_;qRxxe$sLvPqnwsqWg;d82*n&)c6Xe>!X4jgt6A(8w=omhKVAZKpWv|Mqf87HUgNdk!lx??^~nXrw{<*=NGe#J6S zy-oC*V;f_;vGS1d?`kuV!UM)jxV&H2xG!4K=Z{{1FX*EzxoWCzV zgZ)vK>?hE9@}eH!IDx-o#yl9+Ky>i+w_|pUw=a|OkclmWH8fhmUTcMS-HRwLHtSX0 z!ogOD6nE{r76CO8#kMM;sUkt@oK7t-!8rv+IO^VmiPP3|Pjcvs7V$7FX!DA@T!ne9 zn!KD9sw-t0lFn4ZS}}bB{zoi)ULk-E^hu$6p@D|7uq_5f(8g4`?!WD7tFkI5@Xp~u zv^dlu3aw7AK`w^sBKa{1wDZ+yRV&IZ_d!jOKV`@)WK}u8*JH3<#IQX?G0g1T*`;Y9 z3av`+R~)n3DO@ciJH)NWt14Lqu_Y$}O+JQJq1?8bbanGzw32tCBD_Mi%OCop319bU zBYRAhEiLGO^g)>QS%a(fnBlP~E|(-x_m?pz08{kF9O>Q*3qW7AGfbm6@-T;Tcf#uc zMb~>yX4rxP1XmHfYuI@OO17*p9y%V#Fp?Xf znSfbq5aQc=$cc?Tqwj4pVA)ZdYl3u;&pz#?bB?WZd(pfv5B9bDI^Pn0Yy;1=C9uWO zrYapZNI%USt0g->)|G9puElfLw~RVhtxd39XN(J^fDq+S@T)*9yy2&}?I?a>|8hGK zesN-5+8yOPsZ^_^Q)NLcPh!h5cKdB1&MZ(%|Ctv{y**Kq z&U~!8Zuu{J*Q$*<;d}rOr0@=zLz|VtBsbxS3P(r;|JfzS#k}HSTdKD9z4}A(Y_#Gg zu?tS@!M_fmwp=48pO?#D!u3Ek_IlAse6vvEx%5*Aj^%%7{1K zoZcG!{R`rh>1c0kKd@a~0>9j-?`*eb(h22uwhAbi2a-(tjfI^&b1PToX7<<1DIeEM z=!kvdY2*0g`q7*9gBPni?@zKjuN5BXt+%4$+)9cA22$ZhWYc)dX`}m|6isFD-t^M- zw&+iOs0=E*9=VJJ_HGgN7h5s4jc?52cf?SU&7;3E^z^rIM|by%EVW-$}hQvSd%VISQP-5C0vx8?s4pGUmdV?6-+P(RIlN$c_LEYL8THCDy1xi%~Z-N zTh7rf5G3gWU7{(ekVe#Qfe6Of5qTMZ+Pp z>@(HEP2cjiQPF~WHeSt;R-*?YQ*GqO%Q87N0i#7{7d3xo)T#ohO+slpXw0msee@C?z5z;xfr5-Lnl3sZ)8K zq%ZY{4=BPj3k*G;%9B48gRbP0Z4vfi4|t{tP*_aOx+_SpXe*rczp=NL*)!<=YEArU z!?Nh7s_n50DKOYeixfr0b=aGe@lATo(1^lqviObZH%5flWbD0mudekEQd^S7^UNb< z3K{)3AiiX#8A37%NazE*=3EYm#@4?MD8DH1k@Q1&v=Q?oBFw{wkQvdWr={Y&g6Oue zS&}%2DQdgfk60>e(NJ|I0HdehtdcpN?UdV7JhnkEm$Ut|9yaH1ZnYI_&{i%uFGTYO z3n;*Bc{3NQYFwxs*c|&;G}^!I@s3zP5|a!x+~4*GJ!z*kXx0)cPxO-1*20{52g5_IEtxxNX?+PwWmsBx z)+T;JbD=MT)_OIMarWk8rkbZfL0ereP>cPZl9jgLblp&+AyKV-b>s+nj4@bI3p7N)#cMY<&+}CpCjb!guNinOy?#8ETejPd?zOi>T_EIZMS}*m71`Wp)>jmNvxX%$Bz+~S;z7X(#Xd`n(xI+ zEgVcB4u^Z3uCcK_@yXYL{4#&F>a0MX+4uu|GRM(s@28nSzeY;6e%vqU8-|YO-1sF@ zvEajZ6tAqhn#C7bs*G<82E8ftZ2+N7ujxd@Kld?YzI2D=BPx}#f)0a)tu>eRe=T13 zp#R+eM^XHWP2I3~Hyoo(@_;Uh@r=3We!?}__N1cQnsug`GBvraEzM7(oxm2#jl z-~SF;5O_b|;ZYadF%UV$+T6NHlO9wInt$?~d`xLHHlK7Yp(G<2E-@5{4~Ksg1Uo)9 z**&6Rih76j8e47zkf)dJ3iBQQ1*941I+ZxQKMTkFLIomklWC;PKmyyp-=2i7hJcBq za`m1)ieCQ4>T0%ch(*`hSFDd31WlwO`&yxj>P28Jg|AK?1A0fJ*ACSMCtBOO|rS ziO{I_+zj7|bXOTRD^zJPj~&jpZ=l-5NLmk$j{X9ke97NVwq6jpD5B@8I|eLy9TWjX zGB)D{uOGGJ$g~@Nv}tYj@}Lc#FI;#G>y3JO&%j?_L1C2Jg&5zM4{Gm5W7!D=$5rhs zEtx4en6O7+q5un;bvP98A-X=)4kUHyv+;uD*$GX$U3hdk~EW3zRjh6RW9VfF_u_oGIgq zD`? z_hpx_$W>Rqv0QQ`xY~ zf+x$eA)}Y)`*vSjxU^+vDaJRgQnEjP!}cC%tTyjTQA+pgBnt*VTh4dAl!pYOKZmVj-E$_*by=QxGhI6XY7UGedMr}9H3WI1ZjD~s%f!Wr^}Z@ z#pSBPko7|J;T0l?cFVpsn-K>7Bo++r2OQ+uV| z+#Rl}E?)ao_uM8h`?@B6qD6)~bV@s(zyoYI$GGR|R*AKRGYCo{=*|he<`w1D* z%zCa|$jU`cJGr!~0g81#MVZwJsuHb--*Bv~*srU2qAq}=+VIH}PRW(da3L9MjjQ-7 zN9+EqCvM&jC^SwnP~hhK!%j#6E*s8nqKYWi!-4Ape1p+=F26>{Y;s-)7^NuOy9H9tQ`MiNA{DdH4}tAe=zg5#}@K0(m@? zlE4858dSt?U%>(&5AB7-RA~c?g16GJ;f>E?q*>^{wdklCS#xNGK}lh=iui3b8HWrA zwDApwWE7Jhq|?APClu~5z>=yGU~3o?E3vB=S;_&gTlboVeqq(Fq$ta_@Aj5JAGpri zILZyDY3QuFwgl)1UPBgT((|R-?`s@XaoT%Sqv%?~FgWpiHZW_klw_48xXNlV$q7|n z5f43o>r!&yS!7#YNfJ<0=&+0v{=A85K?*XJPH&prEfWoxmc4EdOMzaY%0;_3GDihz3*0ZrZ6KM*|bhWTe<~dJbYYems_Z?PXb9 zg9$1bUKIeT%rREo0>@!z2V*is@eco8LMar2Q?MAv_>6?YnCBK@;3CxZ?<|g%exq+< ziRA_f6)jc6=||t=FdD9s#PT%vIf&8I+ZNb0xH!(Yhx}6%6mF!HtU&Q#3lAZzA7OX` z&4aaoF&vqn#kqQEzXyvV(767?a@4f}j@}VFnrx58xP_Cz&2-G{<^m?Tkh&$Hr1QR|Arq85=aT2jsnFVjXZMej_^& zM0S%`?LA-#*>BQDO>jBKkFS z`T(5lI2}nD2upvdRxNX;zNe7-O-&@pYbhjo2_3)%m+WaRHddDLv9v%TTS+3Troktc zMiS#_4hMFWb6QDFkd2Rm3m}h!)VhRVx^kvXJ%nMwrCZ5_EYd635q23|?T?Cwa{Ifg zy17j9U6kMmVyM6NR4Io4%=53+Mb9cahDrtm8>PlG3}35fxfxyyJ$AZVVEi7WaR68Fr(+a^RTCscWc;>YsJ zzy`KG9gh{fyi0>tlSVkY*&-#V*OcU4fM_>W16A&z$XvDNnRZN#qEjxl3CSoaLP*Dy zDH2jp{k@T87>eNo3Rrrz zY! z$M)b0aJ|mXH+{>y(uGg^Yc{F9{S}=ON2m%wNz?Hpi@v4qh!iQ!|E*sTr_nN3oM^>M z>Qjv-`Ot4pIv)G*vH0-T)EuXNzp2k~^m&+>2I?cZYc&M#x+&Q%*re!tH9je(+a`rb zd1TA2!xyr+d5Fg(*Crl_tIfn5P?zv_cb|CMaK!xRVgZW#$b2Df1W^h41~h)(iq4y^ z@SBTt8bx%;MI#=8Y8~^)c=;6DX31Pf*$)xFK+A^QNo>jr>&Y5ePfAz{C(!6So@yQ> zZP-Eg`hIqjTzFXn<%x@;5CVlujL=ilT5J>)3}}@ZaRF2< zXe9azNH)U+BIoxAfiXsgl`lOg+^>*BZ(BA0Ys2mo{CWv}ODe2V3{%5Xwj3)bGTe}* zbiWZq4Nz{$JQIYZgVL*f!X5b~sxU95iKX`s?-(c=Dp>6_K_IUZ3Q@YaFp@4L?;hzt zdSDr%)QfE|AKj=;7JRypzG=v5af=N1S+qEwN09Z8iM%mn{h{C{x85b32`U;foe7YR z7i6+y$q){z)X+Wh_V?Iah*S;&ZbnGTl_?G5Bs*a&Nwz}-H59JT%pYHYm$x17(sfyJuQ!Fi$WdOt#UqXFs zqbjhl?0Zrn3oTb1o*e*-vWyaNxj6*ZpUEnn%b{cnjC^$*9eb}1&kkRn9lrX?yDQ;9 zJ9i3W={xR|DpekoN{>?>oH+wfDb!X{Wpy@l>F}Lkbz2XLxQu5P))&&?j$PHsJ`6li zp2oX(!)1d~+3>>?c0%$!cnwNj9a63_anXi$eXn1L;Xt9vrSOq2e|-+`6zURL+=Um+ zLwE?Qh5+&^Q7bhfj;wm{mlpfJeBLWF*HbUyo@-wzlQs&Tec8#Y-ng6U zl{*80syyVz^2PLTrXLZftfywxqOIGtIe_1j&uM@>P{t^h;vG0BTO-?}KB;m=x+9f! zsmT7^ik2!>l-&kr9U>SQNu`rCC0x?DN7(0ae(2HL7M*MXnKe$_6h`@ojL@3Gw!YnO z&f{o~s85KM59!Kdw)H7TcR%s$4&{cvpW57-TPlV$pN-*?mRlIKds~G>0E9k7vkCbu?9U?EKzH$Qm4%<5Vk9v?mjR;rumIZcI_;sP8?D;>^N7YuN z&!15287h5SOFnDkb|9&O5k!4Bo6P9FxY}hj_}Yxq>eAE0hXZ0p5gXLhtSPV#nu$tn z>D5|}aJ^sGF28l#$@4L&J30y^0?ryle>KKgi=t3FdJ-)#)_K6GioQRJ{Ymn^lN51x z1!1_wB)G6eh#A1$InC5G1Ofq zqTh@89|ED@1;QjasCZA28MN`&L?+a`Y*=?1^^q z=@r#=Z+$;rDOU-hykl6)KX>VZZLEeOLWKL`ZE7&aYpBxrq? zbv<#xmPQMx;e6>I=>is5D3xUHd_+T+2_rVw4`Rd>A2P%O`vi9E3^<+)>XV_c0G!V< zwfS95T~Ai`(4ZJ>x0Lu{(HFq1_S#Um)|luSZVsxq@?6ITpi7T}h<#S)BAs;Mo7 zc$Ju`iE%2nxLYh?mHMQF_F`b5&V;O=)>}v6lhcgrZPceTB{bGfI96s4VJd%Xi#DRu zh7MWb^gy7RAamuV5Uu3|Q0I~|2TeOttud`5dal>JDY#Biv-CvWRloVD-r<0C+d54* z)lx*tbMMc?Zt1!}HQ7>}=viB{X!nXI4I2Lu9NaXQzqBImjb{D))?=pJ3>uP^D@!e% z@+~Fnt+!)2C;YoB_<#Q88~4N14s~i!z zp@F~*UHMQ@1X;C73ZYtjf@1iW?u~`!hjs7=)#E8HjzI$)y9k#mt+|(4dV^ZCi3WHf%|@C^1IK`%K-W?^xZKzDofk zoi`7)bSM@@<~YraA>1Uc1cj)ReWY#fnoM1}@e-n3EZFSg$X0J(=*CK~1suX$}ny@tUQ; z51nOUP{gig%XRNV0){&ss0V(hAN22AC;iq*cetBU8pRts>d2}zE1VB;P;?#oJ|wez zPR3E9OcOGbf(2npPok$1v>%;s+&1g6{?P9Oyj6; zP=n+~*v}9#t4*G{?$$3%pVIN-cjse@oD|FEOU4%#z-KVeUI0g#IBz^dj2GK#I>Hzw zzrX-pEvjJEfm=AHe_^!ggZ?$yV||1NnPcsTHUMQ3((`pk1FiG*EE|&}lCdjvkd0@{ zV%z~&R4lSyE^+l3o5L!0Wwjox?6WG@CEn*#V2m%0Kn1v=r!L1o{DMzD`z<@N@Vl}2 zxP4dzJ$TQUF(cf<)6b6jK*Q_j5Wem*`!EZpDgkmqinq;)CG$fKuE}yf-z@S8*2zy1bpO5q zX^R2thVyVeiD;g1c?>&D5KCUAOe*BxK&z>nN9T&kqQ)knVr#pQS z($^y4ICk@lyo;qB+F#UMuE7T{SI)+sr)TZMOh8lB5a7eINkHhS#UzG#EP}RqqJ=)R z*)OD6QAQD?%5J?0ARNf$Dz>?<#1TFB>-MrZz%cH zo`17LjQKf2GMK9*3|q~R`LWa}M}nt?t<9#jjrBvRQLr@BPbi*3iI&6y?SgOS6m|}f zF434o{%ZMhQ<^LuJZ(=&%5;&h@!3Va!jyb;YLcg({oIX^uEG0#7y6>BD-mZ^-KRPo zdsdfpVX2?Z9UTLE(FK_Sc!rWdTf45g=fnlj;{W{V*Y;E)dGgImKCfIBq^(A&>x=VV zjC}EW*D0}=CCK$d=L6YMG{&# zn=LdXx)Xy<+mj-{f~kRX4m4}1egmqL<6&^1?e}O|<)`3G#KLaL>O^{-t#3uA(%4x* zgvsd)Rvwcn3bj{CqVamkJxiYX;m04=z}{Mn&e>^xsIxk*8N(pqk=9?b>WRYV6E||Y zjeG4LWX*7^G*64nP&QeuI!1$6|p;>dRg>=DDXh5oehuqoV;s&@7PrV=v#*B>*Rv zCL@P(B>lK0-aK(aLFinVv9w>>UADi|< z0jDCr*w&3O$@XN&3`Pf3)@T(2ExEYiHVW_LCe?M$y^z3CcV!*-334-IL(VfIzUo&O zeLGdHbe?4atJHI9bzw4BK>B;KV!(`MHPGk%Sz;nn_mufHP~PJ_usceYi`gyrrgGn6 zVf8Ov-1baaydB#2cFlj#IW{^p+dLMVIS$ui0X#ZT&hrW5N@#6>l?NAGz(bczYjv`) zKaJ~79&JxH@=B0aGkbg*UqX5Pt8Y){4emi4wHw~w_L^CGoEHtApZzgTR}Epa<>E~v zT~UMsTcx|FG{Y~G-h|!e;QUo>0C?eTzDh7CJ#JU@lnR{^gX7J5%^<(_lZ>rp&*h)l z*<4JMcSKMiwuln`8MxczMO`TQE&)goY69LG3C+(hQKf(1qzlxtk{o0dID!_pP@u^? z0A(ugKnZ$^MjyTg#YMp5@0aJV!AtA;i)ZHvjPbb|bG-T*JJIJ=J5ddahet;m1(dFb zi$2{&eGonx!(HdvCd)8xGXpgQXO^d3Q4YfD#p-PkhgLjAy{P~3Sd`jP;NWnE=$a8;lkWX*KF~u zVn^Ol6qKhOvwCJRs>~+V&@RiQcc2$CK35!FMEsX^B*ZtBIIu4-E4{HNHdDO%lsTA_ zaN9AP;mkI$*vF1Hfr2&YeR^9mLbh-61_nl^T&m7ZHd}QR;6e*%X7~lGNzomI17cjW z`IcxvsfdJTMcLFYOHZ$6^$h2deZ<^f=TJu;n@}H8pR*oboa&pDl~99k-^1LO8@e7d z+bhZP{E-y7=$dKATEvUT&y!c33JCO0nIKbgvk3C|5%EIoo#Ku!%$zAOs8cY0@B^-^ zzcW~&cvDFTb`d4J-@}VNh^P78e-n>ukk`lA-j-kI63?2YQ%q`{x|7>AIEA*UV6s`} zs5b_fu$oOEpNqQbq4%JOzW9W(<8u zb7Prms2``J_Dwzcrdx9gW!XZO6$Q+(acsB3#q0JebnT%}bW*Qae^|mu1Tq4DX}aDl zCcvP0M)EG5Mk`V7U>X2M>yBKYR}&dY35*2`8qud)n=G%d0d9OmgD78GoRErLcZl7G z(f&BPtacMJf%0W1Q?J|kw*a2!n)sPa`W-nC)A0Rx;1D?(C%VIRZrM!P-ifp?qbYaJX$19@x@(lsV=owJD5^!~BYR z#3cgk+`jOHp&GYd!77Jf>VYR12i0S<2acTd#j3LElZbhrf$k_{%Vv!gzh|iKL{3GR zr^_Nnwk;_eR=r|#SyU>m;k4uR?LHwN+~YOp70wr7eW;EJZpL{*H;`H`lv_3anYuHnThDKy^X)=gP7S&~pFXd_I)mGPc*yn@J!Cu7 z>)McQZg|#+F_@@BNx4|vzu_lj~piI$WE0Qp&Ix;e~iwy2x2H>0|=h{;>Z4Kak= z^OuXu0JCVoNG~bCtD6P=?nA9?a z=Ro^*?fa_U70PpjmlJvp)Gav>xdTIhPVT=wk~x6-q$h*$mk)o;Is+;nXWCgqHL z6p>I@M zd<#sVJ&7hMTfPQuM_&4WY zKY3*XL?n1jp@DiqwF9NIAOU7z`j!cy;QG9aGbOgkod(@@ZL}4-kXLEAbHEhjT|g0e zu*NyH=yL5{F6{Kc*tY0GgI`K3aI>0~Sphz1E8I|TNQ>2bUSxu#TmwK!DLO`CwBF(T zs#wm_J*!s|c2VtBuz_x)mBl*FTcBiX>nG;dLLaJLnGP8*iAs+>Rb>0ZW0deLOc;eW zvQniff_))<8(dsoFCiHwg#i!@J#~Ro0V-#}cFR~MK_@|%k0 z3&|n@PRUSMW&w(Z^TnH4Uf#S03ct4LAU|YXWufXXvRtUcc)!BX>Qw~Xa|axj2C;1x zx>K_$?=?|`W}rBp9325vie8pkQHtr(9S8}p6FPfUJ%1km>g;9m?ELFrzW!CjP=$1A zKXBIxs1Cyx{emKuZu=mtiIohih`IDTn7n*WWYJXLkGvMxjyygdJ!Le4tF zhA~fp5X}C~{E>q&%qRJJc57-mZ_4ErUEh+RBfX47m+h^!p3 z69M=&GBbtCqbIO(po+xwce6I&u|FT^pTfj>JO4I< zx46o4mno^(_(zgh3!!BGX|hDCflT?(+17$4wMh~wP4X$?j`SOOeP*9TKqrD3qHUR01( z)CF$sFD{X{EF(~sm$Qw#Q!_E)zLV2w*+`w3PkvU+RJC=prz9c8S^(f{)Y76PhB%6;nVs8!{V@e zmQ5Q8=s>W^f+nDZ&764B3~)JLj-pj9lgYquyMn5`5G$YZNF{jJ<|lW{uwY*q{xR^& zB5o8DlXvLrQge|7i)7(N7V0OzgxGs^A=KH$^c!N}nYu`-_Fyhf{aL@rJ4eK_mY0KSn%RL$ytz^OesD5othOT-JH$6(!#`eZkP}H7E*VOw|GLWk z-rD0rlI%2%*LmwZvNV5ut@J)hX?29!sHDLd$smQB!0PF*6)Vh#zWkAatbr8y6Tkb! zvs|Q+Jy;THWKwGTbp&_UdyL18HDfUiR7-g!lhZT&bbiN}MD_^L5~9PZ%Mzy}(r^8~ z)}jqk7-ZC+eR&lrc#${y5{Dg+C6K!B*ro9f^@jZ*(oF)F*S3S`;-=B=ycFdM4!Id+D*;QK!L)_n(u8eix4K=GGJXiK zi2q|%&AdcBqE^-y@pR9s2bj!TLQL{~<7zAl&dub;na(DOeUGN>DGu2? zU|QV}AmE@O7>RRmotPYIZ_x1nabl?7SpAvUO!CZgrP38HQnj%S%-7PAG%{O zobjZWZxiH|%QAx)&sbyW=(rv&rr<@Fbjt3f!~GqPgIzScUzp`?=Y6c_m})Qb(YHhf zgG$RxL{G1Cj*&TxH_WJG%L&CYq!_Qu@oB9A88Zk%gp=kggjB>|dCBLH1v()w)GeI8 z8aiMu%+fjCufLK4bXPK7DX>4CyzufZ?f!mEJLqNm#zr8^Tds})u{f;nD(Un3I*xaC z5^8OWEX7mPY5p!R^D9^=uk3&k&_JbAeBf!X?sEG|fZphE!AUxqZ04I8O|YOrZQ-eX zjqG&Tw(g&*ll`j3;j69m1e<@Kmbb4TnjRfoiOB4hGc!U?=SiNW>P`MGTR5ZY%#1ft zL^Aa4NM%;YX7ywA{kFaI`JVhS9Dmm;|8t}0$uU0lhJmWi()CSNJ4`ZfKk{G#!7T@K zwJBSO!j{8C$o_*8@s}^Y!N=|v>wAhr?V#_)RTCg^B>;P}_Qij4{`;qo&y%mtUO)a- z^6KfI&y(k8FHmQow)K7lPOrx&U&5B>00_YQS*YVL)EUIN8-988%Yk}|!Ez~*;_Mdd zgNaVM3Hg}`&^WW5vy78x&0E42~3){^{FV4FG;kHv>S zM%m};{EsirUp{?){`Kp#XXSD)qPVlPY+Xr;w0*+0OZIi+V7tmY&#_?Sgzlx@z>b4$ z>^R=hGJ||nG>4o-`vj+Ub&2MvaRg0#^j<+J96YP+m2(h(sO*ul!0MiDu!3P&IpUzy zrKMv-Vz!;OZ4)CxyoHj#e>;C~%ih1WQh&6~lHJsDJUp1=)6fJ`cgj)+&*Hu=SB^G< z9_YLzNWLLk!jz-PR0Y{8!aPzM}Vyp$lxf}D)y_x$h4}n zI{pIY&Rqz;Rs2hlCFzdDpfNz^dIJeRm)EsA@IX&Q?AB|nLX{k_){WL$0@HV&(hT>RdArsirQsbi!gZ_>4xziA6;7jQZ#8x*Vh!d>0Vf6GFTlUP9bt_M$EAYfV4 z^&>>z5P_6a&c8Xuh#d3h*DI@dRv|7 z*3Q=Ew2qNEw2(A~t*G_lO(0Dd)-fB&u9jQHb{ngH?H%ccHKbq96#OXmRP?@B&pR6c zxk%z_Zh{cBHnBms=o4V1k4T*CYYB5BiK17=mP!F{y;{1b>5VHV3;Y23r@Q-$>S@Jl zRqRl(*%WInSW?nEmx^xXe3PuYtJb>hR5b-ZP(t^zF37@lG#Q7C3_E^0^egz5d@{=) z8Nc4&q-!*$Zq^_S)I2XSq7#RxexH_J8_f}QK^VK~G@ZZiqD3{N4cYXVo5gSm6kB#p zQ@Y+6dxD4I^USy+T4bKW6EAXU%7h2<&(B{`XY9lPniCljan&c8*QQ9czKdN2c`PG& zDefY!JNgO#WLD@RBHL?mZabIMsk^&sY+CvXTKepz`|ePEVfIt7b96VHbWX)P!bZ}8 z;TV73Tft)5mOT@eVLqstewCGR0*NSq32r>mTcY*vsIw`Qj zZp0v*+TaD8Z_@cZJyl1eFZ9CEmWv)QVF+mKR+gX>3;}H>*Ir)H9V7qLDv}qlF`bJ#j(_R#da$(U*19 zawGS|%!LT~MU*2!A@zHac0eEGc{lFPzS91Qy9cJJs@{hO$NimE>RhZ8R*{P@-~jRX z=pp>`1)j(qeR)~eMF#DAdfd6(v#-3;wk17f*>VU(4q^G~JIu9%_R){N7^o5c{{;Uz zKKbHuKN|3c+V1(_!k@ra4ySHADe_v6LA?ag0IlT*Pml(D4A2^1woY+^oT|@1zXtie zu0@Pct+ASM0y=lI^nN&bFxsGINL@oa{X)DWW7OHH0d9zxds_tD~ zp>4NQf6Ab7x=Dg71ZG^Q9qy*{<5PK{_M(#=F6c)7Is;F2T=dNkdQi3!vD0R@*je4C z^O=p-Dbap%HK9OlxE)t87x-ddHqF;`BK4=U=g(jVSY#9MFQF3((%0!=H7)b37+@7o z)f6@&=!U}QtXE4YTaa0d4}73o!O~X3{?j@v1VYr~vA36Um9%oaOo|hZ_nOs%eKaBtOzGl>vzb#5xnkNGw8@9j;6D;93kHBpsiJ&pxeK^ zEu6;eyhzP_zkrQWF$BMZ0+v*Mo?JfFWv5*Wu&%6i6JY$V^EV7mqfv7fEYE8rzppmy zPQT4L3P=1)sJc?WjYSr$v&yRx{OP=bPaWyn#AVB1@XYd3&pb-NHJpB2wt1Ng2}7bI zFLM12P^W7ha}K9v0t$FWve&efkPSFf!^#a(t+D(qh^NR6_h%5N;jYD=dT{v)H132s zg*b|NDRh&5MMt=|D%I^m?!uwcvD8cy!#n#1T>?qHK3pDT55YsRAAjGA z?;}JD>UyP@UTzj7(Yiuh7k&TeQyIs!8B(5$DMNx>9>dZK{RXv&3&`1I|# z;Zf$3#Wp{VdLzo_nr zB83^Ig%bD|KviGvKYsfy2G&08$i)~&WpBmEYndd>2ADmrh%f&MO^(wmp(4xiMF(1q z^~_g4#6QFCCDw)D?)$OwP7VU%l4PJnGjWOY!U6!@zc{k7#=1v-JcA>HharPk9hj0IQgJXC;9*qXp6edYO05O4G}bM%9e zO0XMx(Q#t@30paJXxG$<*|ttsiQxaEJb)Z5!7K_bu&Pc!D$z$S>zz+UH{(CLrFA5M~Ux zoGr8k!7$?7>~np$A1k(-bSw#nazB2DZXv(`DJsp2RupK(}Hy%yXu6h<+hokSW7@j|iN1r&qT;~g* zY>}0R9o&b|sy;Om2@&w`{%I>&0y-H*A~ja zE@uyTGl2Ims5v1f_%O@sMLKerUYriUyzB?PxL`_O*aTk#C31*xnz~4rg}h+DxJzgvCArEBrFar; znqOaUP#EtmH5_SIrS3vi<$_c*%_fjuuxJ1l5Mn}a0Ob>7vw<2=XKN*Z+D8O@VMG8{ z2|P*ZGO-LapY2~Sfd@Wbfj)DCEPh@9tg3=yNvI-gm840)#F_qc|CwaQ+b4A&aP)e@KS&rjg(0rl7r z{zBb2B&^3Ii06-A0Fej*gq^Ib6ca-_pVK(Z9BJ7hi2UN*p2p*MH8-jpd)?nRafy|< z_M72Cg2R{)7npBq=(F)y6J=(JDWsF#iDMjsk-r=Dl9t+kTRp!c-p>xE`ZE4>Hx$;A z-P+V$X`H`+Ot<{8hx*mB-H5P#!--v6k-N9LxRYMSxF^9pQW%AIu>lJTdNgIF$4u)2 zv-+cd;9LDY26%96i%vGhtOx(cGByGb+~t622C1|BYNRiAj9&6z1RkiHj2`+Q-*vw! zvtoD#25F4m+kJ7nhW&1L+t-3<5#oVQva!N9LYY_K5ZIhKNhg}ctRcRQT289 zUg1GC$PwP8xa8asJnkac+no{7w@Z;c@=>L5n$i4(W3$AZM(Ti6)$_>#QgHkOZo)ZZGf)E(snn31iL!$%h zH}A2a+99{P(pkU>fN%|+fz(y=-eH|fVE!VX`a`iQV1GPZpn?S(INjT&M=?|m(yxGp z<`xLqkT!EdS!6K7Yjzu?X~<2R+-w$ahbC^d8T{iT-MTJUVMZRHAe)Z~2Tnd9n{DP) zl9Tm&E6^RYDGes9^PGmed)sd}*F)OUc#XtvVfCBF;`OWZS0LS_kZ<=J65G27!~o#t z*#RY!fX(^+Zy*2m$@$Ayc(}t5b}KeS-AoGmLG}wF(LR+bB%(oA4FgsyoG&)-c%6OS&E7 zuswQFxTFM2Qnql;bdk}7l3aK0h1L`i9^UMV8WDj_LraoUx3Uu%( zIhf4`wYDbe7RzB&EeWW-Mp-1}(xi%z97PtPpWvqA(x(h8v;g>)7edTt-=rCLH#wNj zZdv`u+YJyk$4E;>%|Ocy<<4%4d{RoS>-0^(sh^^n*?opI zhDZA6`~xlwC6RaqvPHpq0`Q>3ar<@gQ4^_-LUs=VozgA6>m?el z3lQ!4HA@U&m9K9HWP^T}<})1iMyfpp^B#12(z!PuEmkgTp!0P7!R@p z@H~7Bk<)tlHOAk95@b49-_F!;fBPIEKmaOIzd7hz**Hc09Yb?-eb^mMdxF5F(z_&7 zw^{5r5fk5rG!qpov$NlyWU~wxBPcecaz>!C&|EHPNC1X6c-osdZ`KGZ8?0usE5O3t zXNt4*SqqN70aB>S0Rt4v&6^wb8l453=m~b;%@kF^iKg}KDjUOmGt|98W$gwSY!3RE z3I1bd-*kf%6NmbHDaE);YCsdobtZvk%4u~O6Lwg==g%uHkjNWP^%>Ge7* z7!;b>)Qn)S+>xGaY)Meik$Sq*jh>plgCZWOpvXY+M#ZJ&PTi1;ce)x0ZoT!l&G>o) zI!uAamNG+Ohb=IH0a>7H*@eYu>FJRQ^MOVPFB=}2l7NI4PHg$HB*PX~<9H7zIvP1b zaB8zW4DAwa$1W@>t$u}xHo%^Bm!_%0rwU=szt8OD$R604B_zZ6j*LWL{USEQE3EmdDnfwAzkt=qsm6AJhO=J795U^zON zZ)&jEj&`h(4xrOO9j32TU}Blv&|;{8Sa!0SQeS<{G}YNTF{>r&3G$(Np=Qc(4vNrg z)TXR?tPOnvyn{3gcu%UdVXh5L9%%F8A!PF2Nf*`BHPPdn0hkT;O-JyTArq-#p3D+}VoT5pXAy|NiXRv*CF?8||Wo%%g4lrvki#n_lFT zw}wnA8FZEE`R3e#n&I{2d0J#9X|h_uvay@~7X2xp^*t)2hF!WY(nUEbaxzVh+7M-I z&aIc*pkRzl$`n=N4_xMtI|e##>b}Q5HjYRLeXa8ij#I@o_4l9+OzaFu2M($9xjF;=YM!q13B5=Fj*pv3HnqD>5mJ2q0a?eRj?*{HPYFA( zu}D&%f6hxTvfb z>}Mgx;(!B|Ir)v3Zad^x>z@1x3Tc|_u_Gvs1%>nCeAsw#?UAx4e)Wf)=GXJ&3{5&v zi1VMoj8c#6P))`e62m7hJ`F=0_ZVTuWMLp7kZ*2Lb6B&JH96$@fsaGC5amuM&Y_-i z69%q>31{F(;15DM@R{Ieoxce~1zvoO{wG|9GZV~AC^Ny#L3{5B!a|S^p49UrYHT2=BLQ-O_KaUS< zGj=-pP%)Eh*BHvJcm1g?2YFa(>v-1Q!ADIzqXNBNB>un6MOy7!geddPUJ@X+1=w4=J#a6F3e>LNUbz^66c1K% zuWXPj1;Z5`NvEUC(_~UXB#-*LAOipG8Hul&ta|yh$X;>RRtX zI_ZM8X0i7Q#sH)`S^C40k|%{cEg&o+IZ`0d2D}n}78Zd*H7bEuP_n~*5e*@vP|~Hc zKuz$9NS;&^Y7EgL(hqU9Ay!Cog}^%1!7CpJq1tP;h#CX}plP7j91x4F+xAvQB(JIn zv`_;OogZ{UT!_12@QD__#vk{6paWHI(4Bn+a2#ElWQ%1nQ;V5d7Be$5Gc(I#X0n*U zVrC|b*eJUSr&FS53s&75-12GTk zs0{LJz~>Sp!+w+M+#Bu`kD$PdVCw}rI5-dKm51VKvnO0F1&^{7?+Br4CvK1|$Jd{@ z9v3QsU+Rhob4mA*k$&Q!2ZmT4gUA5HW3>`NC&)|0ot!zI zxUHak`&mA!M+B4{Mj$?hByS~d7;6bQUK$wa%hj|2y_G*LE(>@=P`Dks`YpnA2COt<4@$NOS7VieKa*_nY z?ZV}r!jsIxN9ZV8wA7UQr9LKtzZQB+;~R;c{~{jYyo#pD|PA(7;$(y@ex#Z!mHYoT-j4H?oa`<|yVxvrT(s4zP; z&Cb*acn9D~>oeS_ryDtjb>YnYynEBqRR66q^Mh=1DVYyJZ{ zmjfZ=0`^Xjs`SuP@F1(>s4>a{(wqkN$PN-40s2uQdGij`G$iT#0{lhU7K+KC^|ZU%*Bf zvs-cuIk#47ktrxFR2L>l9R*7ltGv{4zp#F{yi2%USll!At`gf8BsN4a2As6B5Jek- zR00BifxB<=iFi4q{6djM1Is#bOoRl?X~AS`gU0`$XN-GRkIZPx{6kb%c3D+KaaXXO zsnkP+hZsL%333CBls*pAppYT><6NfZxCsj9F32~&g|~R_Sf(VqnMI>zi0y!L-Vr)U z2Pf1=Z*0-5epjbbNXBDG6CzrFv}$>eRCa%M zi&|b|T>5rq(T<9jS89sX74jkvy-$DsiONo7mQGjlki0O1kSA*^zr)_OMkNUC*BHQ-bw?7h~ zajokS7q(?>`W96QXa+Pz5MEPA)JX`*E|oG|*^SDaR#Ou@mHNhSZS5$Y*fKp+W5R#GTEEhS`IDOtHoE*g@5?xejspnWU6my*E}esrByhc;l(Rw zbl0{+HARC}1%5qE2s>SF{`QLAUBTYLw#%bCxy=Rq?bq+tzQ%FA8zB#!3HC=O4AC1O zE?~3W*prdFU2)`x>!y3Kr0Gv65c<3Ut{OC5BAQfsiXnIgK_%GcnkVtJz>w(I`H`Kj zMp=1`S4h27^BIG#Q1q#j@HO1nt4UKn(^Yn8JR@ug(6r@`A0)cE-K^i(nUuFe2`liM+<0HLh1lGIv z(Ycw5zSA?`kG_V!sIbDk*QtV?V4Fu)x=_HypYM_&K%bc)&6MAub}7RSxd=_yF(q452_a2z?u8n@GeNVG^yBnkB`69B=B71Ms^k$R#^tEEy!=Dxk< zJvm;tpP=sgA|5`yDzk5tLr6Q9K;3^B#|1oub;Q6v%7E12jTAx8 z5D*54^t zseMeQw8t;FUshOzk~nY_i19yiPM}Swc(KQ5ibyUhz*ym80h(V7_oZDN*e*CdLvCrJ zWS5xRTSYU!U4&|62kf`Gf`Rq)@Y-^B0JuH*T`q7(+f=~ozP?l-$VpE^Gdj)Ul5 zJK9$Y?n!c)9#6V7bg}{PHT1*4v}2be$X1Sa{2<4}v!9puvP{(D$6nRO158JtGLYu! zqme(clQOxcB5GWYzH09zNWb2~)T<6@p zt0o`q)dQQ)p$8pPbwBq~pnR62aZ4ZMktK6Q{mg5Z4@!ws9%wwZJCt10!6rjP;zoi^ zWR95)jv`A}#bUKBX;>9(eP^2y?QokxYC^@E2TGJ1dyS`X#Y2*+_jTp=dz8OL{dOtS zI`CMQvI3upluN)M#BM$8FX$?b;g7(?jjag)SgyQ_Q{JY_UvX89&mH;79%AXLZyl&1 zC37#gJ7(AT|4d)k{@lP`(_C&zQVPsgVdHw*7o*sZ$+!h5A!QC2mr|@~Ia*Xp4ECV= zl`b#l16uT6PXWqg#H?Xk`9UB`O@I+2$hGiZACUzb>~qa@#Qv~qJN}2y8mVk3qSi0b z@^aVF7#T>$AyL+YueN5anF`>3y#mAM`gPfg&~&7FKVZminD;3Zq`|;HyGH_`0aZz) z>T#ZH))a)F4JA_n|Fpf9yQz_@D&C_y)q~xA@?5cOGKPT66A-k+d+3ov1)%yKXy{L>9&nIWdZzgZA|bZ6d<)6y3hwnlo= zs178+N8piK?$4wv%wFdne}TN6L3r1s z@)P9xu<=zSud)UOv6Odq3Z=+R4=D)t+IQE@z41}q#N&mDuB@B;xy;=$LoekeA92M+ z5_iW066;haminJ`#X(haM;$sJ?a;#SFpu@4@ENW{btte;&T2AF7ed3t_WuaigO3;E zKFeh@_;5)z=3?k~!8mA`T+Z>G_dh?b?tO6e?lRb{>L;Zqcqg7h&gZ8>Na&{6p%{|QXYPevVSKlvJu_ zT(mZFmXV6U<4&_Nalx}Oa5Q!{jWCR{GzYgo&uEi44ac}IW{crKOH`Ps71btC`a1JqAahc1P&UfSOzM3 z$2zZ8@T9a^dx{$LJGWdT4K@*E$si&6mT2jd)*h3Nfn;75D$+D00ZE^Ydv?f15L1TL zBvXc$m>MBYe=C%w31O|67oK{1Q1dd4s#Ubn0`eeTF4VZ3?s@kcK-97wc*xt)0} ztbGDgJ9mJ9*J!3!Kdr~AEr65uR|CtZ7*@_DO&6<3jJZODiIFxJ@kr?v^uXJeQa<1% zL@;Fps9uG;cE@}VM!#Z{=r>dR66M8us<4~wnqjg{ODR3NA%{Q2_?6^QLMs$sLKBe& zyfC#x9qbhbMx(}TekDKOlG;=%;M zj|UOYT%W0X$gS)xj;s_$xwhGCnFKj>9C(TPYfq?4??usCCCpAdHv;N}ZU7azn#3%2 zmjh57B>9_;ygEa4D{~?IV(igi~NFnePw_-S{qdhbV(uq{h z=$tP*NczFrwJg(47>LU}auN{8jMKxW3HyxxUYblYI|bkr6L2xt;9 z47uKoeuEohF10PUdeHVxVek_A- zoGGWKwMXzGz)$8`pjXjtS*P%Ww>Y|B3ba(BV$np?LXuQ=%1|MCs_56VS$*lA^t*c}2tHbP$^J7xb)|Lku2B9$-`V6cE-Qu|xVP;O(W`*(vw&OCe7VcZ@~Xg)*^q$A zGW=OUtGr*u&okax_y-%=Ov2P*XA$jgq-;zf1p9g$7%!mr4s zDi&b}u)k#bsBK6p1IA*0Txijl=(kmwB~v*Z{aicB!*UHfnsAFas{tc#+-K{Y?T&{c z&abmI#Xf#F1ZVQ{sh!DMt#}ws>?5v&vnMX48K7n|nBk=b3cB?-`75|Pwi#eYZ{iyy z4dv)q2)eaWW9lw%ZpgO#+;SZLKA#G6_kM`t2})7b!TetSePcx4dvDi7#Z-z`tFcl=BZ(kwwwD&+=J5fC-L`}{ zHFC$iB8(9NPu#5XoK>&yfIY_Hkc_WBff~<9`u6vzbCn<4oUvb>;^*cBXULSNG-_on4%X%u_$OJC}i^# zS>f|wPnE0&Vt$0PL_Qk?=ac&AjOF#ol%e_kuw;W7-7V=)AR`H6C%cELb805%6VN8Nl#`7{@G2%OZEk z^&l)rN8u13X?Gb9_7YVWB1p&aIqOdIK_3hTt_)+vR*M}hPCTdwHV*RqwaUA}a0Mhm z>i@V8IBl@+LiK8C$=rApm^B=OQSQg?g|*L$9ejr1mw(+|b(VXf^--*ftu#9LEtn}; zk2_^7Xs^nDplv1B6iPplmW@xs8zSp6jC!Sm=}spn*ux-~f|Y8$w|3j}|H-yK|8RX? zqIk}_0P3LW|G4W(*WCM zz>X7o`QCpN>4g;`ibj0o0yc?!&m8JF4#uoN(~ZKmiYk0Xccs5AH{8OxyHXx(9J}ed zEN2tZoFtqR{r*-jaStgotzUyy+)129@0mjQ`Sg{rp}m50Z~y&!7tK9>hs*72avOg$ z6;N!})^P`|0o3BKSh$LreiSwoI-o-uu6%cK7K%4c*?>1jMOJ`rrBc5!pia90n~-O!TNs8GHL=W0!^mq21DQS=Fj$I*JNr^P%OvQt%;rvo z$hPa-gVW#eyc+%G&o9Afc#~}LV@4vTW=9b~cvZalIJ#@eV$c9=`PNMsxL*=-0LSdd zZ}1@Sm;?7r)1-+&Plx$6q>&oiDUTWme-@Dv(E&oJHKLXFfZeo+-CDWE;*}FYD#=RZ zoyRibrjJsC-TV{|UL_)u1mwYnu4n*X!OZ4j9it;FW`sx`Ks`oWkeGH#t?P*^I|c3d zAR3a@bA*j_9`}}h*e>nP;t@fmIMg4B83!v*C}n(6LuFDCxFN=wJtl`agC<6AK7u&Y zg47F5XWSSr2+edE#+-wJGD*$>)IN1I#&hAu1n299T`fIh+CQWR+ z{?q_pkuN;PiiMQOd(FT9i||Ye;9hLo2C`qB=r=U<`DejM1I3ducoc%dBsvvvtPHft zde&_@Imp1`7RQ{TR9&eDZeP!P+$qwa)Zt-@X}#zU^}=)fxVY0Y+&pX2Nhqh~p>8ZJ zTo4jZ13D>WTUfB4?!}C}iIjv)rrf$ejMT+4gyB2YO4g3S2^#{atA8rZWAOap>4lIF z%qU6()^${r7ix(-?nNp>B$~pM4_X#>2vx@rSGa{AbFfN$Guu)3gI=3KiY@7L8ZrI8 zX#22wOir$*d?ZYG3W+RTuIl+E#=%DvOloK6Qkef>oV4GA$#d%z>#CwW%3=1@n{40+ z$yV1hW!;fPpNe}T@s2rpke;5aHTxVI^8}PnzWxOtvOQ`hj9Ibt#<_V4dpgiKiZU(~ z31TPC^av_J<2Xu2KAu!a|7e%fRLN;IVFje2ee7l4M<&`v< zMT@M@^aY3f8O$K4a}xWVQ=-QC*%u9S>9io)g9>ZbdW2_SI!qyh?h4h->`>|?#!vM% z7D6!GhNm(w#h=S9G?~wkWKAJZH%)*MHhS7(K0$J>cH%{JSGP&fFVL2JMakreN*c_o zUJtk9{(Dl;UPEc*IU=-yWx%@^Q2+cV--#_o>N<9&2wchw)YlLZ#b$7H{$6}3v-Zut z$}fV@6K`lS+a#tTQ`_;)DrIIWA{fwL0yYsKm~bm-cV-#dq+XI{3AR#mlFmGNAZ%$b z2X1hS;UqPGg(0(|yK1@IE;&eRs{1G}wKIc|vM?-;OC~*(TbYI78u?$fSjv%lO(5hKLsivI#(S>)z8b$_Q%nIF5SoQgF$wZ(3< zC2|istR-hik{2-Z(@oJ_MNQd_7G~UtpkR~%DYBwtktna{nz@}Scqf8NDdk`^TI1*o zn}ewGOT6U4i&)3eGZS=1>6JQ`@GkZ<(!q{QAn5W}`BGCD(?u25VX^EN4KCT5H6g?Y zcAuUdEP##`SvIx&ewr+mjtswHuS4NVPfJgQ+oSK1?M@H%;)&8 zfx9@~9rn;bswzJX5^C7bbFU2^yok|D;98>WBI|Bvi-NcAW8HawNjqZhx$h%YbqE5V!{H_A$)!wI+^!PEkiWfH}57*4We86epgR?Np@mpZ?6QA1c{z98Jfo8F*oDJ6jI z;u%3-?XTk!A^GhfwOVRHH`FWc`(?WY&dos;@G$Rr-qMl3cKF#c{2_VDP9HT0m{-=FgH1)9;!yRgE#fHet zrQI=6(uYQ8dGb-v^>}0c>w913*NiN4J-=0%z|AJ>l6zS(6Z=+4{HobVTMI(Hj@}mZ z1^K(!^!Qrk*dGyD-FFKT*T-Us<}ixD`ebWU?U@&aS(byQ&bFhQDjc6%Bi?}4z;T2V zKy+Y;<6$6A*!7sxKnxu-g?)249jnO50anzBWRW7>R6Rsi-A`|zu2fvOep~0Y&Ek`p za$3k3f@&4M3PW+7^>7WI`gtCgL+1}GvQD?JmTP*jF!Q>Sb50txa9DA~dA-RTk@NW+ zCR=hod6W-Gv1Qe$6xbRgQMaRDz6+a3fh5iQ4rjP0Ca2#5aUiWp6c#F+UaBrPmR?(r zm}YoAXVFS-D(^-L49Yy+#qmh&2&PeK!?UX>gj8a}TVP?a=`Q&)c`%mtcO0_5nKK%# zUX3EN!s%Ak#RzimG_sjmdwmvN=4G(iO~C`ras&O0ar#80=I@qn374zA$n2Tcg7N%( z(iKhqqi2s|6?UjS7}#e#QF(q$nCeD`AYlcABTTdi0T8>sYRnSALsXL(P@P-XOP1EUgTQc;az@W zjldr+_Tf`$0xmRuzt$C_zO3}`3guk0#=&xkC{PG7d8C>kyZYhzN7051Uf^l9fjLh6 zawic4@zy}oXE_*XfvDhnv%<*=uEM?Y>bs%rA%Ei;8ZYG~aiZD~fUY`f*TTv+_o^3$ zkcP5jIkAyGXzU5d9o64G93Q@(Q?m3nMn>DCUc?pegWlw>#a|EL?A1L>^%)#Kza(kt z*_aqk^y;=8?vz`&yo^N%8DrmTGS@cQT1faXMR=08t?(m4P{|hiNIKl9C-G%{k;wFO zac~6q7<32hRwTLiR63dv0L%JFmJ1t_Fet>(djv3Xf_T8FAKraYIx|28CHi(u3blIYgq98bvZ<+u-EIJu+mxZqMBJ~atsz~LCFpWI+{wQ+Oluq^}CHTtYoqcGti{&G4K$G3E(F8Wn?1wo`U&;xAa zp(*0v+v~g+Hb*48Fkytd-0dV?SCXpf)vx5D+PGm@IV#2YGd2eC=M+cy-9c~A;H7&! zO>mEKweCD23y{jh;m)4}2IxF`#zM~Hy9Dhk+i(_HS`mzNiaLKtD zS=f~8yAY|cX}>ffi7u178x-$2=*yj}Z_=a*Ct?tnL_0`Q3oUo#4y6DM&ISqBD=VOo zOGTpL+cW(3`smmb@r=TANa`NeRpbY^aW#rP8_Oc=-Fg~9z@8}VXN1a&>wzK2Q@Oe( za>?d`bg3o~c+RZUaBAZ1Af|N5ak;&;duWXAW3^rfTHy&(5CNZ7QL)0gc!IBf*$Tp> zJ4wfx-UlMh`!F^c0H`ji-o7zvHj3@$$9|Z^Y;NA7i5r0`m6c=HHV0KJI}kQ5Up>|P z_SHmOwXouDBKpN=(Rt7Gx3OI4jO3djY%T8yPSBoFa@Px9sbgJ?V;)oZOqRp_Tk#74> zk$!K4NA2}q$HLUOEQO?*@%ZgHK(T7ob_2(iHDVm}^?no#H}Mhc1;qeib|n?MA)rc< z%M)Hkl?y?R5@cb*X!5*^DU9{OJnp2pgEs7tnP%v z0tLqAibFCc07ZgnSAXEoB;Ad-+sUkr%$^z^ksJL~UY7PmYkC?T^XXT=EVSU6kF5bS@8dAs}>dH7@O*@1S zUpk!CNteeWRwDes|G+V0;(w^yu57M}T%&Or9-s)?r3P{oWX=9gcn-yI`9DE1yfa7Li8Lq-~$0=7AI8}mG7 zkAmV{G&j{m-VrPEaq1rtk3m}c1T*%>{nYTR*xx`U!bW@8ncb|OyzM`mZfI)_i1%1Z zwK0gk0T#}J%N@tG%r}@Zi>ch{+vAFpatd;g9^_nJ7(tgVDDUtfj7@DxCV}&>A6+H6 zaPHfiU2pPyZyuOAj_#=a>{y-V!6m0(vf%5SgAM1O(k0WizA?@zNvpSbTvlEx=3ks) z*$MkigUI4w;3M)r$ode>q$)sl;Z8l`*Z5EWBK2gn>~QqLlGutJr28TGERoZj9Z4{S zkdsMjLW4L9oidv4(5mvD8ut?`AVmHLbC-;hoHza|^Dt~+=p*R>t;Wx=(1w$JltAbm z;w_njZ|Qs4l)QVMUV}W{ao5!kTt%IZ9?=GHDF%)rMYjgCHCwj|gQ+`aTacd@Usmga zZ{Pxl&>{>BSMvJR+Ye4Mk&xY!iIC9zy3;?&XTvrL@CX;m&&X9V#Sk5PgA7v^G7gp- z6IbZ%MV>V{LsNPujvof3*(f&FpE9XDHG(B@M*yRu zJ#u_!_oJcnuoZ{evdN7pE18J1; z@tH|at1)4->8(W0G6Lj8r~zn4wrYi7p#z!Cpk|+a)5W$jCzP3}R25q7LasFswgWxl zJewM>P>zQ;tLn*S`YMw6d?%oaX+tNhs@h>S zr!FHQ@OsmDGT+wDcLK7TGS-4lQ>`&_<5qHq7b1y>gO zDXF{xuf*azxK{VB64|9%&7kClN|AvyrUtI%uY^#i%b9mFuECJh7TY^XR+DqzeW zrF?Q@ABvn3K>1qTlDYV-`C!SievyK4_C|KHgh^M&B2M_rW{iTbK;F50U*UU{o)-Pr zGS7;;WWqICVwhhZC5QauStqD?&5fH0Epv%9ah5;@yQNdHqFttupA9)p1dKD^RPn*n z**7h!YP803i}yfNuVKfkpap>A8{^E6eNOE{TqkJw{V;tNMVFIP3F=rNOJXn}s{_6v zCC>|1lOQD*&+9S*Wo5w0x-s16yO&+SRtLpoMK39ewH?-^9$QPp^U;;>+Cy{`T86T6 z+GJoDbI--t{U2cEOG1VXmmbkiuP0!| zD;|>_?UVWF_nf27tV^rs&iK@0kgad$Kc37Zp}Nhca;X;Kk7}?GHd;Uj;~D)TX8Eyf z$ZzGJ*Z|A&4^3e;on%kpVp78Mi})qLJw^V;BY)KCA+;rcTLKKUi@^-zmVkM)gQuVx zUEA+?j3+aKlU`s;8U~$O?diIrc@X_NYpV_m8v{vTNi2nvoo&O)D$O|M8=0eEORGlM z%g<7IuS}B5$KOXEg3FuJDtvpMm3Iy%BSF@>i}jTwuZVy7FKm_?-e-zf!B|uW9bp_` zLR&%Svl?c?OG~mWxf}%p)Lxue1(;ZU%H5AbF}PP%70Q86lj;W zzufmu82EAGYcG;;5XU@E^%)CC0fLW0hNn{5z#1rtk$x4VL7g|G)q2l(4^>o@4@9S( zzqykc-HujSXbly>qY9f@DQveCJ~8+4;P`Us^o2UD<>d5~MLiP(Vcp{LcD$Xon@7R_ zhRn6S0$+qizulBZ(=~8APw#uhb@6BK8T5143jNW2n$WLlV(;h|w;%1-CqdQgkDgUr z`{i?AD((lE%2%gk^tRUcjz!cFWji*`%8J`O?>JH_AAq+=^$h)M)~Tge9&kC z_z~HPrblZjA-hbKy+@|!r9$#It_d?+oVa!y@I{73o1%g-vt>WbimpszoW@(FSIosT zw$0U;(V?mJ<)J7gp&zn6NmkGXeUz5J7f!@Mfi*F@o?>Mme3^AKmV<5c$-GJ*RLEyQ zQz4jAkFHgBrlW1d_q3juh^r&~S0tL@tM1=Tmz&n;=Iy6xz67pweBV{Yd2h~xB%}io z-N%|F7s{(WD1-_h<87hu$rL+RCQ9v%NMas|FdgvjDs?(T(YiV;>SGQ0sy{a=hpOCv zh73SJ`2l~$3;=-rd{Xs)8~pnl7J&5MM@Z2Bdtho}>0)Z4?_#g->TIfSVruH(>|knY z!T|Gckput$1b_uV<-qRJ-dq9s+kg6N{I^I?|A0jHcSv79fBoMfx!60o*jqZA(z&=e z{~fHPsmJ{P1pAK%NcVTJp8v0~&c;rb4*vlEnf3c0jv5+Z9zmkcQn-1xfd~M|G64WQ z{|Qg{sqetQ3jZHc=zmJ_8Tj~H4~$vDw>&ofxd)U5fA2Q^f3o?@?N_A#M-Tpk#ow8+ z`;iW%^H0wKXh26MsX8*w8`mWDrxNu50PBC^|D}cm^1q62=~`Bzz(*#8%Hbcp^!*7~PJK2tvbUt|I049w9pV!>~SOflYCH4Q8hN0`{d17kr zWa{i}>1O&5K1~t5qWyDupaHD$r0PY$?fl$NpCmqg`akmg#midzUzNwn)X?M~XrKmB zVMzW{m!JWiIi%`yX>I&O)&M~36UgVX_qX!sehT*AEnKcHmNx%5O@$lV1~vW%#Q!_{ q^|y$flm9*9eFc1i?qVPMGpmxvIb!U6zrpD(D-5z+Zy-~Jb2W%kGb -- 2.45.2