diff --git a/api.py b/api.py
index 7c33484..e57ed03 100644
--- a/api.py
+++ b/api.py
@@ -133,7 +133,7 @@ class TextToSpeech:
         self.tokenizer = VoiceBpeTokenizer()
         download_models()
 
-        self.autoregressive = UnifiedVoice(max_mel_tokens=300, max_text_tokens=200, max_conditioning_inputs=2, layers=30,
+        self.autoregressive = UnifiedVoice(max_mel_tokens=604, max_text_tokens=402, max_conditioning_inputs=2, layers=30,
                                       model_dim=1024,
                                       heads=16, number_text_tokens=256, start_text_token=255, checkpointing=False,
                                       train_solo_embeddings=False,
@@ -151,14 +151,18 @@ class TextToSpeech:
                                       layer_drop=0, unconditioned_percentage=0).cpu().eval()
         self.diffusion.load_state_dict(torch.load('.models/diffusion.pth'))
 
+        self.diffusion_next = DiffusionTts(model_channels=1024, num_layers=10, in_channels=100, out_channels=200,
+                                      in_latent_channels=1024, in_tokens=8193, dropout=0, use_fp16=False, num_heads=16,
+                                      layer_drop=0, unconditioned_percentage=0).cpu().eval()
+        self.diffusion_next.load_state_dict(torch.load('.models/diffusion_next.pth'))
+
         self.vocoder = UnivNetGenerator().cpu()
         self.vocoder.load_state_dict(torch.load('.models/vocoder.pth')['model_g'])
         self.vocoder.eval(inference=True)
 
     def tts(self, text, voice_samples, k=1,
             # autoregressive generation parameters follow
-            num_autoregressive_samples=512, temperature=.5, length_penalty=2, repetition_penalty=2.0, top_p=.5,
-            typical_sampling=False, typical_mass=.9,
+            num_autoregressive_samples=512, temperature=.5, length_penalty=1, repetition_penalty=2.0, top_p=.5,
             # diffusion generation parameters follow
             diffusion_iterations=100, cond_free=True, cond_free_k=2, diffusion_temperature=.7,):
         text = torch.IntTensor(self.tokenizer.encode(text)).unsqueeze(0).cuda()
@@ -185,10 +189,8 @@ class TextToSpeech:
                                                              temperature=temperature,
                                                              num_return_sequences=self.autoregressive_batch_size,
                                                              length_penalty=length_penalty,
-                                                             repetition_penalty=repetition_penalty,
-                                                             typical_sampling=typical_sampling,
-                                                             typical_mass=typical_mass)
-                padding_needed = 250 - codes.shape[1]
+                                                             repetition_penalty=repetition_penalty)
+                padding_needed = self.autoregressive.max_mel_tokens - codes.shape[1]
                 codes = F.pad(codes, (0, padding_needed), value=stop_mel_token)
                 samples.append(codes)
             self.autoregressive = self.autoregressive.cpu()
diff --git a/api_new_autoregressive.py b/api_new_autoregressive.py
index 1ba90e4..cd5cd89 100644
--- a/api_new_autoregressive.py
+++ b/api_new_autoregressive.py
@@ -135,7 +135,7 @@ class TextToSpeech:
         download_models()
 
         self.autoregressive = AutoregressiveCodegen(1024, 16).cpu().eval()
-        self.autoregressive.load_state_dict(torch.load('X:\\dlas\\experiments\\train_autoregressive_codegen\\models\\17000_codegen_ema.pth'))
+        self.autoregressive.load_state_dict(torch.load('X:\\dlas\\experiments\\train_autoregressive_codegen\\models\\20750_codegen_ema.pth'))
 
         self.clip = VoiceCLIP(dim_text=512, dim_speech=512, dim_latent=512, num_text_tokens=256, text_enc_depth=12,
                              text_seq_len=350, text_heads=8,
diff --git a/eval_multiple.py b/eval_multiple.py
index a3bf49f..99e1eae 100644
--- a/eval_multiple.py
+++ b/eval_multiple.py
@@ -7,7 +7,7 @@ from utils.audio import load_audio
 
 if __name__ == '__main__':
     fname = 'Y:\\libritts\\test-clean\\transcribed-brief-w2v.tsv'
-    outpath = 'D:\\tmp\\tortoise-tts-eval\\attempt_best'
+    outpath = 'D:\\tmp\\tortoise-tts-eval\\compare_vocoders'
     outpath_real = 'D:\\tmp\\tortoise-tts-eval\\real'
 
     os.makedirs(outpath, exist_ok=True)
@@ -24,12 +24,18 @@ if __name__ == '__main__':
         path = os.path.join(os.path.dirname(fname), line[1])
         cond_audio = load_audio(path, 22050)
         torchaudio.save(os.path.join(outpath_real, os.path.basename(line[1])), cond_audio, 22050)
-        sample = tts.tts(transcript, [cond_audio, cond_audio], num_autoregressive_samples=512, k=1,
+        sample, sample2 = tts.tts(transcript, [cond_audio, cond_audio], num_autoregressive_samples=512, k=1,
                              repetition_penalty=2.0, length_penalty=2, temperature=.5, top_p=.5,
-                             diffusion_temperature=.7, cond_free_k=2, diffusion_iterations=400)
+                             diffusion_temperature=.7, cond_free_k=2, diffusion_iterations=200)
+
         down = torchaudio.functional.resample(sample, 24000, 22050)
-        fout_path = os.path.join(outpath, os.path.basename(line[1]))
+        fout_path = os.path.join(outpath, 'old', os.path.basename(line[1]))
         torchaudio.save(fout_path, down.squeeze(0), 22050)
+
+        down = torchaudio.functional.resample(sample2, 24000, 22050)
+        fout_path = os.path.join(outpath, 'new', os.path.basename(line[1]))
+        torchaudio.save(fout_path, down.squeeze(0), 22050)
+
         recorder.write(f'{transcript}\t{fout_path}\n')
         recorder.flush()
     recorder.close()
\ No newline at end of file
diff --git a/models/new_autoregressive.py b/models/new_autoregressive.py
index c372f62..aba8c11 100644
--- a/models/new_autoregressive.py
+++ b/models/new_autoregressive.py
@@ -168,6 +168,8 @@ class AutoregressiveCodegen(nn.Module):
 
         self.START_TOKEN=8192
         self.STOP_TOKEN=8193
+        self.START_TEXT_TOKEN = 255
+        self.STOP_TEXT_TOKEN = 0
         self.max_text_token_id = num_text_tokens
         self.max_mel_token_id = num_mel_tokens
         self.mel_embedding = ConditioningEncoder(80, model_dim, do_checkpointing=False)
@@ -231,6 +233,9 @@ class AutoregressiveCodegen(nn.Module):
         for i in range(conditioning_signal.shape[1]):
             cond_embs.append(self.mel_embedding(conditioning_signal[:, i]))
         cond_emb = torch.stack(cond_embs, dim=1).mean(dim=1, keepdim=True)
+        # Since all positional embeddings are relative, it is (probably) important to "fix" the text with some permanent embeddings.
+        text_codes = F.pad(text_codes, (1,0), value=self.START_TEXT_TOKEN)
+        text_codes = F.pad(text_codes, (0,1), value=self.STOP_TEXT_TOKEN)
         _, enc_text = self.encoder(text_codes, return_hiddens=True)
         # Interleave cond_emb into the first few contexts.
         full_context = enc_text
@@ -255,6 +260,8 @@ class AutoregressiveCodegen(nn.Module):
         for i in range(conditioning_signal.shape[1]):
             cond_embs.append(self.mel_embedding(conditioning_signal[:, i]))
         cond_emb = torch.stack(cond_embs, dim=1).mean(dim=1, keepdim=True)
+        text_codes = F.pad(text_codes, (1,0), value=self.START_TEXT_TOKEN)
+        text_codes = F.pad(text_codes, (0,1), value=self.STOP_TEXT_TOKEN)
         _, enc_text = self.encoder(text_codes, return_hiddens=True)
         # Interleave cond_emb into the first few contexts.
         full_context = enc_text
diff --git a/models/text_voice_clip.py b/models/text_voice_clip.py
index b4b51a7..674e62b 100644
--- a/models/text_voice_clip.py
+++ b/models/text_voice_clip.py
@@ -55,7 +55,6 @@ class VoiceCLIP(nn.Module):
                 needs_permute=False,
                 exit_permute=False,
                 max_seq_len=-1,
-                use_pos_emb=False,
                 attn_layers=Encoder(
                     dim=dim_text,
                     depth=text_enc_depth,
@@ -71,7 +70,6 @@ class VoiceCLIP(nn.Module):
                 needs_permute=False,
                 exit_permute=False,
                 max_seq_len=-1,
-                use_pos_emb=False,
                 attn_layers=Encoder(
                     dim=dim_speech,
                     depth=speech_enc_depth,
diff --git a/models/xtransformers.py b/models/xtransformers.py
index 632349b..2e32c09 100644
--- a/models/xtransformers.py
+++ b/models/xtransformers.py
@@ -1186,7 +1186,9 @@ class TransformerWrapper(nn.Module):
         if use_cache:
             res.append(intermediates.past_key_values)
 
-        return res
+        if len(res) > 1:
+            return tuple(res)
+        return res[0]
 
 
 class ContinuousTransformerWrapper(nn.Module):
@@ -1247,7 +1249,9 @@ class ContinuousTransformerWrapper(nn.Module):
         if use_cache:
             res.append(intermediates.past_key_values)
 
-        return tuple(res)
+        if len(res) > 1:
+            return tuple(res)
+        return res[0]
 
 
 class XTransformer(nn.Module):