experiment.log_text(args.note) DATA_PATH = args.input TRAIN_PATH = os.path.join(DATA_PATH, "train_data") TEST_PATH = os.path.join(DATA_PATH, "test_data") dataset_name = args.datasetname if dataset_name=="shanghaitech": print("will use shanghaitech dataset with crop ") elif dataset_name == "shanghaitech_keepfull": print("will use shanghaitech_keepfull") else: print("cannot detect dataset_name") print("current dataset_name is ", dataset_name) # create list train_list = create_image_list(TRAIN_PATH) test_list = create_image_list(TEST_PATH) # create data loader train_loader, train_loader_for_eval, test_loader = get_dataloader(train_list, train_list, test_list, dataset_name=dataset_name, batch_size=args.batch_size) print("len train_loader ", len(train_loader)) # model model_name = args.model experiment.log_other("model", model_name) if model_name == "M1": model = M1() elif model_name == "M2": model = M2() elif model_name == "M3":
def visualize_evaluation_shanghaitech_keepfull(model, args): """ :param model: model with param, if not model then do not output pred :param args: :return: """ if model is not None: model = model.cuda() model.eval() saved_folder = args.output os.makedirs(saved_folder, exist_ok=True) train_list, val_list = get_train_val_list(args.input, test_size=0.2) test_list = create_image_list(args.input) train_loader, val_loader, test_loader = get_dataloader( train_list, val_list, test_list, dataset_name="shanghaitech_keepfull_r50", visualize_mode=False, debug=True) log_f = open(args.meta_data, "w") mae_s = 0 mse_s = 0 n = 0 train_loader_iter = iter(train_loader) _, gt_density, _ = next(train_loader_iter) with torch.no_grad(): for item in test_loader: img, gt_density, debug_info = item gt_count = debug_info["p_count"] file_name = debug_info["name"] print(file_name[0].split(".")[0]) file_name_only = file_name[0].split(".")[0] save_path = os.path.join(saved_folder, "label_" + file_name_only + ".png") save_pred_path = os.path.join(saved_folder, "pred_" + file_name_only + ".png") save_density_map(gt_density.numpy()[0], save_path) if model is not None: pred = model(img.cuda()) predicted_density_map = pred.detach().cpu().clone().numpy() predicted_density_map_enlarge = cv2.resize( np.squeeze(predicted_density_map[0][0]), (int(predicted_density_map.shape[3] * 8), int(predicted_density_map.shape[2] * 8)), interpolation=cv2.INTER_CUBIC) / 64 save_density_map(predicted_density_map_enlarge, save_pred_path) print("pred " + save_pred_path + " value " + str(predicted_density_map.sum())) print("cont compare " + str(predicted_density_map.sum()) + " " + str(predicted_density_map_enlarge.sum())) print("shape compare " + str(predicted_density_map.shape) + " " + str(predicted_density_map_enlarge.shape)) pred_count = pred.detach().cpu().sum() pred_count_num = pred_count.item() error = abs(pred_count_num - gt_count_num) else: error = 0 pred_count = 0 mae_s += error mse_s += error * error density_map_count = gt_density.detach().sum() density_map_count_num = density_map_count.item() gt_count_num = gt_count.item() if model is not None: log_str = str(file_name_only) + " " + str( density_map_count_num) + " " + str( gt_count.item()) + " " + str(pred_count.item()) else: log_str = str(file_name_only) + " " + str( density_map_count_num) + " " + str(gt_count.item()) print(log_str) log_f.write(log_str + "\n") log_f.close() mae = mae_s / n mse = math.sqrt(mse_s / n) print("mae ", mae) print("mse", mse)