def train_DA(epoch): net.train() params = list(net.parameters()) optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr) if not os.path.exists(output_dir): os.mkdir(output_dir) train_loss = 0 data_loader = ImageDataLoader(train_path, train_gt_path, shuffle=True, gt_downsample=True, pre_load=False) best_mae = sys.maxsize step = -1 train_loss = 0 gt_count = 0 et_count = 0 for blob in data_loader: step = step + 1 im_data = blob['data'] gt_data = blob['gt_density'] dtype = torch.FloatTensor # certified input im_data = torch.from_numpy(im_data).type(dtype) im_data = im_data.to(device) im_data = random_mask_batch_one_sample(im_data, keep, reuse_noise=True) im_data = Variable(im_data) gt_data = torch.from_numpy(gt_data).type(dtype) gt_data = gt_data.to(device) gt_data = Variable(gt_data) density_map = net(im_data, gt_data) zzk_loss = net.loss train_loss += zzk_loss.item() gt_data = gt_data.data.detach().cpu().numpy() gt_count = np.sum(gt_data) density_map = density_map.data.detach().cpu().numpy() et_count = np.sum(density_map) print("gt_count: ", gt_count) print("et_count: ", et_count) optimizer.zero_grad() zzk_loss.backward() optimizer.step() train_loss = train_loss / data_loader.get_num_samples() if epoch % 100 == 0: save_name = os.path.join( output_dir, '{}_{}_{}.h5'.format(method, dataset_name, epoch)) network.save_net(save_name, net) return train_loss
for blob in data_loader: im_data = blob['data'] gt_data = blob['gt_density'] # print('gt_data: ',gt_data) density_map = net(im_data, gt_data) density_map = density_map.data.cpu().numpy() # print('density_map: ',density_map) gt_count = np.sum(gt_data) # print('gt_count: ',gt_count) et_count = np.sum(density_map) # print('et_count: ',et_count) mae += abs(gt_count - et_count) rmse += ((gt_count - et_count) * (gt_count - et_count)) if vis: utils.display_results(im_data, gt_data, density_map) if save_output: utils.save_density_map( density_map, output_dir, 'output_' + blob['fname'].split('.')[0] + '.png') mae = mae / data_loader.get_num_samples() rmse = np.sqrt(rmse / data_loader.get_num_samples()) print('rmse: ', rmse) mrmse = np.sqrt(rmse / data_loader.get_num_samples()).mean() print('\nMAE: %0.3f, RMSE: %0.3f' % (mae, rmse)) print('\nmRMSE:%0.3f' % (mrmse)) f = open(file_results, 'w') f.write('MAE: %0.3f, MSE: %0.3f' % (mae, rmse)) f.close()
log_text = 'epoch: %4d, step %4d, Time: %.4fs, gt_cnt: %4.1f, et_cnt: %4.1f' % ( epoch, step, 1. / fps, gt_count, et_count) log_print(log_text, color='green', attrs=['bold']) re_cnt = True if re_cnt: t.tic() re_cnt = False if (epoch % 2 == 0): save_name = os.path.join( output_dir, '{}_{}_{}.h5'.format(method, dataset_name, epoch)) network.save_net(save_name, net) #calculate error on the validation dataset mae, mse = evaluate_model(save_name, data_loader_val) if mae < best_mae: best_mae = mae best_mse = mse best_model = '{}_{}_{}.h5'.format(method, dataset_name, epoch) log_text = 'EPOCH: %d, MAE: %.1f, MSE: %0.1f' % (epoch, mae, mse) log_print(log_text, color='green', attrs=['bold']) log_text = 'BEST MAE: %0.1f, BEST MSE: %0.1f, BEST MODEL: %s' % ( best_mae, best_mse, best_model) log_print(log_text, color='green', attrs=['bold']) if use_tensorboard: exp.add_scalar_value('MAE', mae, step=epoch) exp.add_scalar_value('MSE', mse, step=epoch) exp.add_scalar_value('train_loss', train_loss / data_loader.get_num_samples(), step=epoch)
trained_model = os.path.join(model_path) network.load_net(trained_model, net) net.cuda() net.eval() mae = 0.0 mse = 0.0 for blob in data_loader: im_data = blob['data'] gt_data = blob['gt_density'] density_map = net(im_data, gt_data) density_map = density_map.data.cpu().numpy() gt_count = np.sum(gt_data) et_count = np.sum(density_map) mae += abs(gt_count - et_count) mse += ((gt_count - et_count) * (gt_count - et_count)) if vis: utils.display_results(im_data, gt_data, density_map) if save_output: utils.save_density_map( density_map, output_dir, 'output_' + blob['fname'].split('.')[0] + '.png') mae = mae / data_loader.get_num_samples() mse = np.sqrt(mse / data_loader.get_num_samples()) print 'MAE: %0.2f, MSE: %0.2f' % (mae, mse) f = open(file_results, 'w') f.write('MAE: %0.2f, MSE: %0.2f' % (mae, mse)) f.close()
epoch, step, 1. / fps, gt_count, et_count) log_print(log_text, color='green', attrs=['bold']) re_cnt = True if re_cnt: t.tic() re_cnt = False if (epoch % 2 == 0): save_name = os.path.join( output_dir, '{}_{}_{}.h5'.format(method, dataset_name, epoch)) network.save_net(save_name, net) #calculate error on the validation dataset mae, mse = evaluate_model(save_name, data_loader_val) if mae < best_mae: best_mae = mae best_mse = mse best_model = '{}_{}_{}.h5'.format(method, dataset_name, epoch) log_text = 'EPOCH: %d, MAE: %.1f, MSE: %0.1f' % (epoch, mae, mse) log_print(log_text, color='green', attrs=['bold']) log_text = 'BEST MAE: %0.1f, BEST MSE: %0.1f, BEST MODEL: %s' % ( best_mae, best_mse, best_model) log_print(log_text, color='green', attrs=['bold']) if use_tensorboard: exp.add_scalar_value('MAE', mae, step=epoch) exp.add_scalar_value('MSE', mse, step=epoch) exp.add_scalar_value('train_loss', train_loss.item() / data_loader.get_num_samples(), step=epoch)
def test_DA(epoch): torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False data_path_DA = '../ShanghaiTech/part_A_final/test_data/images/' gt_path_DA = '../ShanghaiTech/part_A_final/test_data/after_ground_truth/' net.to(device) net.eval() mae = 0.0 mse = 0.0 data_loader_DA = ImageDataLoader(data_path_DA, gt_path_DA, shuffle=False, gt_downsample=True, pre_load=False) # 保存图片 if not os.path.exists('./results_DA_ablated'): os.mkdir('./results_DA_ablated') if not os.path.exists('./results_DA_ablated/density_map_adv'): os.mkdir('./results_DA_ablated/density_map_adv') if not os.path.exists('./results_DA_ablated/images_adv'): os.mkdir('./results_MCNN_DA/images_adv') if not os.path.exists('./results_DA_ablated/images_gt'): os.mkdir('./results_DA_ablated/images_gt') correct = 0 total = 0 dtype = torch.FloatTensor # ablated test for blob in data_loader_DA: im_data = blob['data'] gt_data = blob['gt_density'] full_imgname = blob['fname'] # certified input im_data = torch.from_numpy(im_data).type(dtype) im_data = im_data.to(device) im_data = random_mask_batch_one_sample(im_data, keep, reuse_noise=True) im_data = Variable(im_data) gt_data = torch.from_numpy(gt_data).type(dtype) gt_data = gt_data.to(device) gt_data = Variable(gt_data) density_map = net(im_data, gt_data) density_map = density_map.data.cpu().numpy() im_data = im_data.data.cpu().numpy() gt_data = gt_data.data.cpu().numpy() tgt_img = gt_data[0][0] plt.imsave( './results_DA_ablated/images_gt/IMG_{}.png'.format(full_imgname), tgt_img, format='png', cmap='gray') adv_tgt_img = im_data[0][0] plt.imsave( './results_DA_ablated/images_adv/IMG_{}.png'.format(full_imgname), adv_tgt_img, format='png', cmap=plt.cm.jet) adv_out = density_map[0][0] plt.imsave('./results_DA_ablated/density_map_adv/IMG_{}.png'.format( full_imgname), adv_out, format='png', cmap='gray') et_count = np.sum(density_map) gt_count = np.sum(gt_data) bias = abs(et_count - gt_count) mae += abs(gt_count - et_count) mse += ((gt_count - et_count) * (gt_count - et_count)) if bias < 10: correct += 1 total += 1 accuracy = (correct / total) * 100.0 print("correct: ", correct) print("total: ", total) mae = mae / data_loader_DA.get_num_samples() mse = np.sqrt(mse / data_loader_DA.get_num_samples()) print("test_ablated_results: ") print('\nMAE: %0.2f, MSE: %0.2f' % (mae, mse)) print("test_ablated_accuracy: ", accuracy) # 保存图片 if not os.path.exists('./results_DA_normal'): os.mkdir('./results_DA_normal') if not os.path.exists('./results_DA_normal/density_map_adv'): os.mkdir('./results_DA_normal/density_map_adv') if not os.path.exists('./results_DA_normal/images_gt'): os.mkdir('./results_DA_normal/images_gt') total = 0 correct = 0 mae = 0.0 mse = 0.0 for blob in data_loader_DA: im_data = blob['data'] gt_data = blob['gt_density'] full_imgname = blob['fname'] tgt_img = gt_data[0][0] plt.imsave('./results_DA_normal/images_gt/{}'.format(full_imgname), tgt_img, format='png', cmap='gray') im_data = torch.from_numpy(im_data).type(dtype) im_data = im_data.to(device) gt_data = torch.from_numpy(gt_data).type(dtype) gt_data = gt_data.to(device) gt_data = Variable(gt_data) density_map = net(im_data, gt_data) density_map = density_map.data.detach().cpu().numpy() gt_data = gt_data.data.detach().cpu().numpy() adv_out = density_map[0][0] plt.imsave( './results_DA_normal/density_map_adv/{}'.format(full_imgname), adv_out, format='png', cmap='gray') gt_count = np.sum(gt_data) et_count = np.sum(density_map) bias = abs(gt_count - et_count) mae += abs(gt_count - et_count) mse += (gt_count - et_count) * (gt_count - et_count) # !!! 具体评判预测成功的指标待定!!! if bias < 10: correct += 1 total += 1 accuracy = (correct / total) * 100.0 print("correct: ", correct) print("total: ", total) mae = mae / data_loader_1.get_num_samples() mse = np.sqrt(mse / data_loader_1.get_num_samples()) print("test_normal_result: ") print('\nMAE: %0.2f, MSE: %0.2f' % (mae, mse)) print("normal_test_accuracy: ", accuracy)
log_text = 'epoch: %4d, step %4d, Time: %.4fs, gt_cnt: %4.1f, et_cnt: %4.1f' % (epoch, step, 1./fps, gt_count,et_count) log_print(log_text, color='green', attrs=['bold']) re_cnt = True if re_cnt: t.tic() re_cnt = False if (epoch % 2 == 0): save_name = os.path.join(output_dir, '{}_{}_{}.h5'.format(method,dataset_name,epoch)) network.save_net(save_name, net) #calculate error on the validation dataset mae,mse = evaluate_model(save_name, data_loader_val) if mae < best_mae: best_mae = mae best_mse = mse best_model = '{}_{}_{}.h5'.format(method,dataset_name,epoch) log_text = 'EPOCH: %d, MAE: %.1f, MSE: %0.1f' % (epoch,mae,mse) log_print(log_text, color='green', attrs=['bold']) log_text = 'BEST MAE: %0.1f, BEST MSE: %0.1f, BEST MODEL: %s' % (best_mae,best_mse, best_model) log_print(log_text, color='green', attrs=['bold']) if use_tensorboard: exp.add_scalar_value('MAE', mae, step=epoch) exp.add_scalar_value('MSE', mse, step=epoch) exp.add_scalar_value('train_loss', train_loss/data_loader.get_num_samples(), step=epoch)
et_count = np.sum(density_map) y_true.append(gt_count) y_pred.append(et_count) y_diff.append(gt_count - et_count) mape += abs(gt_count - et_count) / gt_count mae += abs(gt_count - et_count) mse += ((gt_count - et_count) * (gt_count - et_count)) precise += et_count / gt_count if vis: utils.display_results(im_data, gt_data, density_map) if save_output: utils.save_density_map( density_map, output_dir, 'output_' + blob['fname'].split('.')[0] + '.png') u = np.sum([i**2 for i in y_diff]) v = np.sum([(i - np.mean(y_true))**2 for i in y_true]) R2 = 1 - (u / v) mae = mae / data_loader.get_num_samples() mse = np.sqrt(mse / data_loader.get_num_samples()) mape = mape / data_loader.get_num_samples() precise = precise / data_loader.get_num_samples() print('\nMAE: %0.2f, MSE: %0.2f, MAPE: %0.2f, R2: %0.2f, precise:%f' % (mae, mse, mape, R2, precise)) f = open(file_results, 'w') f.write('MAE: %0.2f, MSE: %0.2f, MAPE: %0.2f, R2: %0.2f, precise: %f' % (mae, mse, mape, R2, precise)) f.close()
scaling=scaling) mae = 0.0 mse = 0.0 num = 0 for blob in data_loader: num += 1 im_data = blob['data'] gt_data = blob['gt_density'] density_map = net(im_data) density_map = density_map.data.cpu().numpy() gt_count = np.sum(gt_data) et_count = np.sum(density_map) mae += abs(gt_count - et_count) mse += ((gt_count - et_count) * (gt_count - et_count)) if vis: utils.display_results(im_data, gt_data, density_map) if save_output: utils.save_density_map( density_map, output_dir, 'output_' + blob['fname'].split('.')[0] + '.png') if num % 100 == 0: print '%d/%d' % (num, data_loader.get_num_samples()) mae = mae / data_loader.get_num_samples() mse = np.sqrt(mse / data_loader.get_num_samples()) print 'MAE: %0.2f, MSE: %0.2f' % (mae, mse) f = open(file_results, 'w') f.write('MAE: %0.2f, MSE: %0.2f' % (mae, mse)) f.close()