diff --git a/codes/trainer/eval/audio_diffusion_fid.py b/codes/trainer/eval/audio_diffusion_fid.py
index 7878c6ba..1ac133a2 100644
--- a/codes/trainer/eval/audio_diffusion_fid.py
+++ b/codes/trainer/eval/audio_diffusion_fid.py
@@ -15,7 +15,7 @@ import trainer.eval.evaluator as evaluator
 from data.audio.paired_voice_audio_dataset import load_tsv_aligned_codes
 from data.audio.unsupervised_audio_dataset import load_audio
 from models.clip.mel_text_clip import MelTextCLIP
-from models.tacotron2.text import sequence_to_text
+from models.tacotron2.text import sequence_to_text, text_to_sequence
 from scripts.audio.gen.speech_synthesis_utils import load_discrete_vocoder_diffuser, wav_to_mel, load_speech_dvae, \
     convert_mel_to_codes
 from utils.util import ceil_multiple, opt_get
@@ -62,21 +62,24 @@ class AudioDiffusionFid(evaluator.Evaluator):
                                                   'conditioning_input': real_resampled})
         return gen, real_resampled, sample_rate
 
-    def perform_dvae_diffusion(self, audio, codes, text, sample_rate=5500):
+    def perform_diffusion_vocoder(self, audio, codes, text, sample_rate=5500):
         mel = wav_to_mel(audio)
         mel_codes = convert_mel_to_codes(self.dvae, mel)
-
-        text_codes = sequence_to_text(text)
-
+        text_codes = text_to_sequence(text)
         real_resampled = torchaudio.functional.resample(audio, 22050, sample_rate).unsqueeze(0)
-        aligned_codes_compression_factor = sample_rate * 221 // 11025
-        output_size = codes.shape[-1]*aligned_codes_compression_factor
+
+        output_size = real_resampled.shape[-1]
+        aligned_codes_compression_factor = output_size // mel_codes.shape[-1]
         padded_size = ceil_multiple(output_size, 2048)
+        padding_added = padded_size - output_size
+        padding_needed_for_codes = padding_added // aligned_codes_compression_factor
+        if padding_needed_for_codes > 0:
+            mel_codes = F.pad(mel_codes, (0, padding_needed_for_codes))
         output_shape = (1, 1, padded_size)
         gen = self.diffuser.p_sample_loop(self.model, output_shape,
-                                    model_kwargs={'tokens': mel_codes.unsqueeze(0),
-                                                  'conditioning_input': real_resampled,
-                                                  'unaligned_input': text_codes})
+                                    model_kwargs={'tokens': mel_codes,
+                                                  'conditioning_input': audio.unsqueeze(0),
+                                                  'unaligned_input': torch.tensor(text_codes, device=audio.device).unsqueeze(0)})
         return gen, real_resampled, sample_rate
 
     def load_projector(self):
@@ -92,7 +95,7 @@ class AudioDiffusionFid(evaluator.Evaluator):
         return model
 
     def project(self, projector, sample, sample_rate):
-        sample = torchaudio.resample(sample, sample_rate, 22050)
+        sample = torchaudio.functional.resample(sample, sample_rate, 22050)
         mel = wav_to_mel(sample)
         return projector.get_speech_projection(mel).squeeze(0)  # Getting rid of the batch dimension means it's just [hidden_dim]
 
@@ -125,10 +128,10 @@ class AudioDiffusionFid(evaluator.Evaluator):
                 path, text, codes = self.data[i + self.env['rank']]
                 audio = load_audio(path, 22050).to(self.dev)
                 codes = codes.to(self.dev)
-                sample, ref, sample_rate = self.perform_diffusion_fn(audio, codes, text)
+                sample, ref, sample_rate = self.diffusion_fn(audio, codes, text)
 
-                gen_projections.append(self.project(projector, sample).cpu(), sample_rate)  # Store on CPU to avoid wasting GPU memory.
-                real_projections.append(self.project(projector, ref).cpu(), sample_rate)
+                gen_projections.append(self.project(projector, sample, sample_rate).cpu())  # Store on CPU to avoid wasting GPU memory.
+                real_projections.append(self.project(projector, ref, sample_rate).cpu())
 
                 torchaudio.save(os.path.join(save_path, f"{self.env['rank']}_{i}_gen.wav"), sample.squeeze(0).cpu(), sample_rate)
                 torchaudio.save(os.path.join(save_path, f"{self.env['rank']}_{i}_real.wav"), ref.squeeze(0).cpu(), sample_rate)
@@ -149,9 +152,9 @@ if __name__ == '__main__':
     from utils.util import load_model_from_config
 
     diffusion = load_model_from_config('X:\\dlas\\experiments\\train_diffusion_tts7_dvae_thin_with_text.yml', 'generator',
-                                       also_load_savepoint=False, load_path='X:\\dlas\\experiments\\train_diffusion_tts7_dvae_thin_with_text\\models\\12500_generator_ema.pth').cuda()
+                                       also_load_savepoint=False, load_path='X:\\dlas\\experiments\\train_diffusion_tts7_dvae_thin_with_text\\models\\5500_generator_ema.pth').cuda()
     opt_eval = {'eval_tsv': 'Y:\\libritts\\test-clean\\transcribed-brief-w2v.tsv', 'diffusion_steps': 50,
                 'diffusion_schedule': 'linear', 'diffusion_type': 'vocoder'}
     env = {'rank': 0, 'base_path': 'D:\\tmp\\test_eval', 'step': 500, 'device': 'cuda', 'opt': {}}
     eval = AudioDiffusionFid(diffusion, opt_eval, env)
-    eval.perform_eval()
\ No newline at end of file
+    print(eval.perform_eval())
\ No newline at end of file
diff --git a/codes/trainer/eval/eval_wer.py b/codes/trainer/eval/eval_wer.py
index 7b91a78b..037bf3d8 100644
--- a/codes/trainer/eval/eval_wer.py
+++ b/codes/trainer/eval/eval_wer.py
@@ -128,10 +128,10 @@ if __name__ == '__main__':
         'clip_lengths_key': 'wav_lengths',
         'text_seq_key': 'padded_text',
         'text_seq_lengths_key': 'text_lengths',
-        'kenlm_path': 'Y:\\bookscorpus-5gram\\5gram.bin'
+        #'kenlm_path': 'Y:\\bookscorpus-5gram\\5gram.bin',
     }
     model = Wav2VecWrapper(vocab_size=148, basis_model='facebook/wav2vec2-large-robust-ft-libri-960h', freeze_transformer=True, checkpointing_enabled=False)
-    weights = torch.load('X:\\dlas\\experiments/train_wav2vec_mass_large/models/13250_wav2vec.pth')
+    weights = torch.load('D:\\dlas\\experiments\\train_wav2vec_mass_large2\\models\\17000_wav2vec.pth')
     model.load_state_dict(weights)
     model = model.cuda()
     eval = WerEvaluator(model, opt_eval, env)