def test_model_origin(net, data_loader, save_output=False, save_path=None, test_fixed_size=-1, test_batch_size=1, gpus=None): timer = Timer() timer.tic() net.eval() mae = 0.0 mse = 0.0 detail = '' if save_output: print save_path for i, blob in enumerate(data_loader.get_loader(test_batch_size)): if (i * len(gpus) + 1) % 100 == 0: print "testing %d" % (i + 1) if save_output: index, fname, data, mask, gt_dens, gt_count = blob else: index, fname, data, mask, gt_count = blob with torch.no_grad(): dens = net(data) if save_output: image = data.squeeze_().mul_(torch.Tensor([0.229,0.224,0.225]).view(3,1,1))\ .add_(torch.Tensor([0.485,0.456,0.406]).view(3,1,1)).data.cpu().numpy() dgen.save_image( image.transpose((1, 2, 0)) * 255.0, save_path, fname[0].split('.')[0] + "_0_img.png") gt_dens = gt_dens.data.cpu().numpy() density_map = dens.data.cpu().numpy() dgen.save_density_map(gt_dens.squeeze(), save_path, fname[0].split('.')[0] + "_1_gt.png") dgen.save_density_map(density_map.squeeze(), save_path, fname[0].split('.')[0] + "_2_et.png") _gt_count = gt_dens.sum().item() del gt_dens gt_count = gt_count.item() et_count = dens.sum().item() del data, dens detail += "index: {}; fname: {}; gt: {}; et: {};\n".format( i, fname[0].split('.')[0], gt_count, et_count) mae += abs(gt_count - et_count) mse += ((gt_count - et_count) * (gt_count - et_count)) mae = mae / len(data_loader) mse = np.sqrt(mse / len(data_loader)) duration = timer.toc(average=False) print "testing time: %d" % duration return mae, mse, detail
' gt_cnt: %s,' % "{}".format(["%.1f" % gt_count.max(), "%.1f" % gt_count.mean(), "%.1f" % gt_count.min()]) + \ ' et_cnt: %s,' % "{}".format(["%.1f" % et_count.max(), "%.1f" % et_count.mean(), "%.1f" % et_count.min()]) + \ ' loss: %e' % float(loss_value) log_print(log_text, opt) re_cnt = True if opt.use_tensorboard: vis_exp.add_scalar_value('train_raw_loss', loss_value, step=step_cnt) ''' Save training image patch, and corresponding gt density map patch, predicted density patch before and after loss backprop''' if step_cnt % save_interval == 0: for i in range(density_map.shape[0]): density_gen.save_image( raw_patch[i], opt.expr_dir + './sup/', 'img_step%d_%d_0data.jpg' % (step_cnt, i)) density_gen.save_density_map( gt_data[i], opt.expr_dir + "./sup/", 'img_step%d_%d_1previous.jpg' % (step_cnt, i)) density_gen.save_density_map( density_map[i], opt.expr_dir + "./sup/", 'img_step%d_%d_2now.jpg' % (step_cnt, i)) for i in range(density_map_after.shape[0]): density_gen.save_density_map( density_map_after[i], opt.expr_dir + "./sup/", 'img_step%d_%d_3after.jpg' % (step_cnt, i)) if re_cnt: t.tic() re_cnt = False
def test_model_patches(net, data_loader, save_output=False, save_path=None, test_fixed_size=-1, test_batch_size=1, gpus=None): timer = Timer() timer.tic() net.eval() mae = 0.0 mse = 0.0 detail = '' if save_output: print save_path for i, blob in enumerate(data_loader.get_loader(1)): if (i + 1) % 10 == 0: print "testing %d" % (i + 1) if save_output: index, fname, data, mask, gt_dens, gt_count = blob else: index, fname, data, mask, gt_count = blob data = data.squeeze_() if len(data.shape) == 3: 'image small than crop size' data = data.unsqueeze_(dim=0) mask = mask.squeeze_() num_patch = len(data) batches = zip([ i * test_batch_size for i in range(num_patch // test_batch_size + int(num_patch % test_batch_size != 0)) ], [(i + 1) * test_batch_size for i in range(num_patch // test_batch_size)] + [num_patch]) with torch.no_grad(): dens_patch = [] for batch in batches: bat = data[slice(*batch)] dens = net(bat).cpu() dens_patch += [dens] if args.test_fixed_size != -1: H, W = mask.shape _, _, fixed_size = data[0].shape assert args.test_fixed_size == fixed_size density_map = torch.zeros((H, W)) for dens_slice, (x, y) in zip( itertools.chain(*dens_patch), itertools.product(range(W / fixed_size), range(H / fixed_size))): density_map[y * fixed_size:(y + 1) * fixed_size, x * fixed_size:(x + 1) * fixed_size] = dens_slice H = mask.sum(dim=0).max().item() W = mask.sum(dim=1).max().item() density_map = density_map.masked_select(mask).view(H, W) else: density_map = dens_patch[0] gt_count = gt_count.item() et_count = density_map.sum().item() if save_output: image = data.mul_(torch.Tensor([0.229,0.224,0.225]).view(3,1,1))\ .add_(torch.Tensor([0.485,0.456,0.406]).view(3,1,1)) if args.test_fixed_size != -1: H, W = mask.shape _, _, fixed_size = data[0].shape assert args.test_fixed_size == fixed_size inital_img = torch.zeros((3, H, W)) for img_slice, (x, y) in zip( image, itertools.product(range(W / fixed_size), range(H / fixed_size))): inital_img[:, y * fixed_size:(y + 1) * fixed_size, x * fixed_size:(x + 1) * fixed_size] = img_slice H = mask.sum(dim=0).max().item() W = mask.sum(dim=1).max().item() inital_img = inital_img.masked_select(mask).view(3, H, W) image = inital_img image = image.data.cpu().numpy() dgen.save_image( image.transpose((1, 2, 0)) * 255.0, save_path, fname[0].split('.')[0] + "_0_img.png") gt_dens = gt_dens.data.cpu().numpy() density_map = density_map.data.cpu().numpy() dgen.save_density_map(gt_dens.squeeze(), save_path, fname[0].split('.')[0] + "_1_gt.png") dgen.save_density_map(density_map.squeeze(), save_path, fname[0].split('.')[0] + "_2_et.png") del gt_dens del data, dens detail += "index: {}; fname: {}; gt: {}; et: {};\n".format( i, fname[0].split('.')[0], gt_count, et_count) mae += abs(gt_count - et_count) mse += ((gt_count - et_count) * (gt_count - et_count)) mae = mae / len(data_loader) mse = np.sqrt(mse / len(data_loader)) duration = timer.toc(average=False) print "testing time: %d" % duration return mae, mse, detail
def test_model_origin(net, data_loader, save_output=False, save_path=None, test_fixed_size=-1, test_batch_size=1, gpus=None, args=None): timer = Timer() timer.tic() net.eval() mae = 0.0 mse = 0.0 detail = '' if args.save_txt: save_txt_path = save_path.replace('density_maps', 'loc_txt_test') if not os.path.exists(save_txt_path): os.mkdir(save_txt_path) record = open( save_txt_path + '/DLA_loc_test_thr_{:.02f}.txt'.format(args.det_thr), 'w+') record2 = open(save_txt_path + '/DLA_cnt_test_den.txt', 'w+') if save_output: print(save_path) for i, blob in enumerate( data_loader.get_loader(test_batch_size, num_workers=args.num_workers)): if (i * len(gpus) + 1) % 100 == 0: print("testing %d" % (i + 1)) if save_output: index, fname, data, mask, gt_dens, gt_count = blob else: index, fname, data, mask, gt_count = blob if not args.test_patch: with torch.no_grad(): dens, dm = net(data) dens = dens.sigmoid_() dens_nms = network._nms(dens.detach()) dens_nms = dens_nms.data.cpu().numpy() dm = dm.data.cpu().numpy() else: #TODO dens, dens_nms, dm = test_patch(data) dens_nms = dens_nms.data.cpu().numpy() dm = dm.data.cpu().numpy() dm[dm < 0] = 0.0 gt_count = gt_count.item() # et_count = dens.sum().item() et_count = np.sum( dens_nms.reshape(test_batch_size, -1) >= args.det_thr, axis=-1)[0] et_count_dm = np.sum(dm.reshape(test_batch_size, -1), axis=-1)[0] if save_output: image = data.clone().squeeze_().mul_(torch.Tensor([0.229, 0.224, 0.225]).view(3, 1, 1)) \ .add_(torch.Tensor([0.485, 0.456, 0.406]).view(3, 1, 1)).data.cpu().numpy() dgen.save_image( image.transpose((1, 2, 0)) * 255.0, save_path, fname[0].split('.')[0] + "_0_img.jpg") gt_dens = gt_dens.data.cpu().numpy() density_map = dens.data.cpu().numpy() dgen.save_density_map(gt_dens.squeeze(), save_path, fname[0].split('.')[0] + "_1_gt.jpg", gt_count) dgen.save_density_map(density_map.squeeze(), save_path, fname[0].split('.')[0] + "_2_et.jpg") dens_mask = dens_nms >= args.det_thr dgen.save_heatmep_pred(dens_mask.squeeze(), save_path, fname[0].split('.')[0] + "_3_et.jpg", et_count) _gt_count = gt_dens.sum().item() del gt_dens if args.save_txt: ori_img = Image.open( os.path.join(data_loader.dataloader.image_path, fname[0])) ori_w, ori_h = ori_img.size h, w = data.shape[2], data.shape[3] ratio_w = float(ori_w) / w ratio_h = float(ori_h) / h dens_nms[dens_nms >= args.det_thr] = 1 dens_nms[dens_nms < args.det_thr] = 0 ids = np.array(np.where(dens_nms == 1)) # y,x ori_ids_y = ids[2, :] * ratio_h + ratio_h / 2 ori_ids_x = ids[3, :] * ratio_w + ratio_w / 2 ids = np.vstack((ori_ids_x, ori_ids_y)).astype(np.int16) # x,y loc_str = '' for i_id in range(ids.shape[1]): loc_str = loc_str + ' ' + str(ids[0][i_id]) + ' ' + str( ids[1][i_id]) # x, y if i == len(data_loader) - 1: record.write('{filename} {pred:d}{loc_str}'.format( filename=fname[0].split('.')[0], pred=et_count, loc_str=loc_str)) record2.write('{filename} {pred:0.2f}'.format( filename=fname[0].split('.')[0], pred=float(et_count_dm))) else: record.write('{filename} {pred:d}{loc_str}\n'.format( filename=fname[0].split('.')[0], pred=et_count, loc_str=loc_str)) record2.write('{filename} {pred:0.2f}\n'.format( filename=fname[0].split('.')[0], pred=float(et_count_dm))) del data, dens detail += "index: {}; fname: {}; gt: {}; et: {}; dif: {};\n".format( i, fname[0].split('.')[0], gt_count, et_count, gt_count - et_count) mae += abs(gt_count - et_count) mse += ((gt_count - et_count) * (gt_count - et_count)) mae = mae / len(data_loader) mse = np.sqrt(mse / len(data_loader)) duration = timer.toc(average=False) if args.save_txt: record.close() print("testing time: %d" % duration) return mae, mse, detail