forked from mrq/DL-Art-School
Fix evaluation when using multiple batch sizes
This commit is contained in:
parent
572d137589
commit
3d916e7687
|
@ -37,6 +37,7 @@ class Trainer:
|
||||||
self.val_compute_fea = opt_get(opt, ['eval', 'compute_fea'], False)
|
self.val_compute_fea = opt_get(opt, ['eval', 'compute_fea'], False)
|
||||||
self.current_step = 0
|
self.current_step = 0
|
||||||
self.total_training_data_encountered = 0
|
self.total_training_data_encountered = 0
|
||||||
|
self.next_eval_step = 0
|
||||||
|
|
||||||
#### loading resume state if exists
|
#### loading resume state if exists
|
||||||
if opt['path'].get('resume_state', None):
|
if opt['path'].get('resume_state', None):
|
||||||
|
@ -253,7 +254,9 @@ class Trainer:
|
||||||
val_freq = opt['train']['val_freq'] * batch_size
|
val_freq = opt['train']['val_freq'] * batch_size
|
||||||
else:
|
else:
|
||||||
val_freq = int(opt['train']['val_freq_megasamples'] * 1000000)
|
val_freq = int(opt['train']['val_freq_megasamples'] * 1000000)
|
||||||
if opt_get(opt, ['eval', 'pure'], False) and self.total_training_data_encountered % val_freq == 0:
|
|
||||||
|
if opt_get(opt, ['eval', 'pure'], False) and self.total_training_data_encountered > self.next_eval_step:
|
||||||
|
self.next_eval_step = self.total_training_data_encountered + val_freq
|
||||||
metrics = []
|
metrics = []
|
||||||
for val_data in tqdm(self.val_loader):
|
for val_data in tqdm(self.val_loader):
|
||||||
self.model.feed_data(val_data, self.current_step, perform_micro_batching=False)
|
self.model.feed_data(val_data, self.current_step, perform_micro_batching=False)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user