def on_epoch_end(self, epoch, logs=None): for x, y_true in self.data: predictions = [output_encoder.decode(y, self.anchors, self.model) for y in self.model(x)] ground_truth = [y.to_tensor() for y in y_true] self.mean_AP_metric.update_state(ground_truth, predictions) logs['val_meanAP'] = self.mean_AP_metric.result().numpy() self.mean_AP_metric.reset_state()
for run in range(1, config.NUM_RUNS + 1): weights_dir = 'weights_naive_pasting_ablation_{}'.format(run) model_name = '{}_{}_samples_{}'.format(architecture.lower(), train_samples, train_type) print('\nGenerating results for {}'.format(model_name)) model_file = model_name + '.h5' model_path = os.path.join(weights_dir, model_file) model.load_weights(model_path) meanAP_metric.reset_state() for x, y_true in test_data: ground_truth = [y.to_tensor() for y in y_true] predictions = [ output_encoder.decode(y, anchors, model) for y in model(x) ] meanAP_metric.update_state(ground_truth, predictions) test_meanAP = meanAP_metric.result().numpy() results['run_{}'.format(run)].append(test_meanAP) print('test meanAP:', test_meanAP) del model results = pd.DataFrame(results) results.to_csv('results_naive_pasting_ablation.csv', index=False)
model_name = architecture.lower() + '_{}'.format(train_type) print('\nGenerating results for {}'.format(model_name)) model_file = model_name + '.h5' model_path = os.path.join(weights_dir, model_file) if not os.path.exists(model_path): raise Exception('Model weights at {} not found'.format(model_path)) model.load_weights(model_path) meanAP_metric.reset_state() for x, y_true in test_data: ground_truth = [y.to_tensor() for y in y_true] predictions = [ output_encoder.decode(y, anchors, model) for y in model(x) ] meanAP_metric.update_state(ground_truth, predictions) test_meanAP = meanAP_metric.result().numpy() results['run_{}'.format(run)].append(test_meanAP) print('test meanAP:', test_meanAP) for train_samples in config.TRAIN_SAMPLES: for train_type in ['from_scratch', 'finetuned']: results['architecture'].append(architecture) results['train_samples'].append(train_samples) results['train_type'].append(train_type) for run in range(1, config.NUM_RUNS + 1):
results['num_real_samples'].append(num_real_samples) results['num_fake_samples'].append(num_fake_samples) results['train_type'].append(train_type) for run in range(1, config.NUM_RUNS+1): weights_dir = 'weights_pretraining_ablation_{}'.format(run) model_name = '{}_{}_{}x'.format(architecture.lower(), train_type, prop_fake_samples) print('\nGenerating results for {}'.format(model_name)) model_file = model_name + '.h5' model_path = os.path.join(weights_dir, model_file) model.load_weights(model_path) meanAP_metric.reset_state() for x, y_true in test_data: ground_truth = [y.to_tensor() for y in y_true] predictions = [output_encoder.decode(y, anchors, model) for y in model(x)] meanAP_metric.update_state(ground_truth, predictions) test_meanAP = meanAP_metric.result().numpy() results['run_{}'.format(run)].append(test_meanAP) print('test meanAP:', test_meanAP) del model results = pd.DataFrame(results) results.to_csv('results_pretraining_ablation.csv', index=False)