def my_demo(file_list, model_path): Net_OK = ['Res101_SFCN', 'LCN'] if (cfg.NET not in Net_OK): print('net is not Res101_SFCN demo not work') return net = CrowdCounter(cfg.GPU_ID, cfg.NET) new_weight_dict = torch.load(model_path) if (cfg.GPU_ID == [0]): new_weight_dict = re_name_weight(new_weight_dict) net.load_state_dict(new_weight_dict) net.cuda() net.eval() print('net eval is ok=================') f1 = plt.figure(1) for filename in file_list: print(filename) img = Image.open(filename) if img.mode == 'L': img = img.convert('RGB') img = img_transform(img) with torch.no_grad(): img = Variable(img[None, :, :, :]).cuda() start = time.time() for i in range(1000): pred_map = net.test_forward(img) pred_map.cpu() end = time.time() density_pre = pred_map.squeeze().cpu().numpy() / 100. num_people = int(np.sum(density_pre)) print('in this picture,there are ', num_people, ' people') print('Do once forward need {:.3f}ms '.format( (end - start) * 1000 / 100.0))
def main(params): H, W = params['image_size'] mean_std = ([0.452016860247, 0.447249650955, 0.431981861591], [0.23242045939, 0.224925786257, 0.221840232611]) data_transform = transforms.Compose([ transforms.Resize((H, W)), transforms.ToTensor(), transforms.Normalize(*mean_std) ]) net = CrowdCounter([0], params['model']) net.load_state_dict(torch.load(params['model_path'])) net.cuda() net.eval() video_list = np.sort(glob(params['dataset_path'] + '/*')) for v in video_list: print(v) outputdir = params['outputdir_prefix'] + '/%d_%d/' % (H, W) os.makedirs(outputdir, exist_ok=True) file_list = np.sort(glob(v + '/*.jpg')) imgs = torch.zeros(len(file_list), 3, H, W) for i, f in enumerate(tqdm(file_list)): imgs[i] = data_transform(Image.open(f)) train_dataset = torch.utils.data.TensorDataset( imgs, torch.zeros(len(file_list))) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=params['batch_size'], shuffle=False) pred_map = [] for x, y in tqdm(train_loader): tmp = net.test_forward(x.cuda()).squeeze().detach().cpu().numpy() if (len(tmp.shape) == 2): tmp = tmp[np.newaxis] pred_map.append(tmp) pred_map = np.concatenate(pred_map) np.savez_compressed(outputdir + os.path.basename(v), pred_map)
def test(file_list, model_path): net = CrowdCounter(cfg.GPU_ID, cfg.NET) net.load_state_dict(torch.load(model_path), strict=False) net.cuda() net.eval() f1 = plt.figure(1) gts = [] preds = [] for filename in file_list: print(filename) imgname = dataRoot + '/img/' + filename filename_no_ext = filename.split('.')[0] '''denname = dataRoot + '/den/' + filename_no_ext + '.csv' den = pd.read_csv(denname, sep=',',header=None).values den = den.astype(np.float32, copy=False) ''' img = Image.open(imgname) if img.mode == 'L': img = img.convert('RGB') img = img_transform(img) #gt = np.sum(den) with torch.no_grad(): img = Variable(img[None, :, :, :]).cuda() pred_map = net.test_forward(img) sio.savemat(exp_name + '/pred/' + filename_no_ext + '.mat', {'data': pred_map.squeeze().cpu().numpy() / 100.}) #sio.savemat(exp_name+'/gt/'+filename_no_ext+'.mat',{'data':den}) pred_map = pred_map.cpu().data.numpy()[0, 0, :, :] pred = np.sum(pred_map) / 100.0 pred_map = pred_map / np.max(pred_map + 1e-20) #den = den/np.max(den+1e-20) '''den_frame = plt.gca() plt.imshow(den, 'jet') den_frame.axes.get_yaxis().set_visible(False) den_frame.axes.get_xaxis().set_visible(False) den_frame.spines['top'].set_visible(False) den_frame.spines['bottom'].set_visible(False) den_frame.spines['left'].set_visible(False) den_frame.spines['right'].set_visible(False) plt.savefig(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.png',\ bbox_inches='tight',pad_inches=0,dpi=150) plt.close() ''' # sio.savemat(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.mat',{'data':den}) pred_frame = plt.gca() plt.imshow(pred_map, 'jet') pred_frame.axes.get_yaxis().set_visible(False) pred_frame.axes.get_xaxis().set_visible(False) pred_frame.spines['top'].set_visible(False) pred_frame.spines['bottom'].set_visible(False) pred_frame.spines['left'].set_visible(False) pred_frame.spines['right'].set_visible(False) plt.savefig(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.png',\ bbox_inches='tight',pad_inches=0,dpi=150) plt.close() # sio.savemat(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.mat',{'data':pred_map}) '''diff = den-pred_map
def test(file_list, model_path): net = CrowdCounter(cfg.GPU_ID, 'RAZ_loc') net.cuda() net.load_state_dict(torch.load(model_path)) net.eval() gts = [] preds = [] record = open('submmited_raz_loc_0.5-0512.txt', 'w+') for infos in file_list: filename = infos.split()[0] imgname = os.path.join(dataRoot, 'img', filename + '.jpg') img = Image.open(imgname) ori_img = Image.open(os.path.join(ori_data, filename + '.jpg')) ori_w,ori_h = ori_img.size w,h = img.size ratio_w = ori_w/w ratio_h = ori_h/h if img.mode == 'L': img = img.convert('RGB') img = img_transform(img)[None, :, :, :] with torch.no_grad(): img = Variable(img).cuda() crop_imgs, crop_masks = [], [] b, c, h, w = img.shape rh, rw = 576, 768 for i in range(0, h, rh): gis, gie = max(min(h-rh, i), 0), min(h, i+rh) for j in range(0, w, rw): gjs, gje = max(min(w-rw, j), 0), min(w, j+rw) crop_imgs.append(img[:, :, gis:gie, gjs:gje]) mask = torch.zeros(b, 1, h, w).cuda() mask[:, :, gis:gie, gjs:gje].fill_(1.0) crop_masks.append(mask) crop_imgs, crop_masks = map(lambda x: torch.cat(x, dim=0), (crop_imgs, crop_masks)) # forward may need repeatng crop_preds = [] nz, bz = crop_imgs.size(0), 1 for i in range(0, nz, bz): gs, gt = i, min(nz, i+bz) crop_pred = net.test_forward(crop_imgs[gs:gt]) crop_pred = F.softmax(crop_pred,dim=1).data[0,1,:,:] crop_pred = crop_pred[None,:,:] crop_preds.append(crop_pred) crop_preds = torch.cat(crop_preds, dim=0) # splice them to the original size idx = 0 pred_map = torch.zeros(b, 1, h, w).cuda() for i in range(0, h, rh): gis, gie = max(min(h-rh, i), 0), min(h, i+rh) for j in range(0, w, rw): gjs, gje = max(min(w-rw, j), 0), min(w, j+rw) pred_map[:, :, gis:gie, gjs:gje] += crop_preds[idx] idx += 1 # for the overlapping area, compute average value mask = crop_masks.sum(dim=0).unsqueeze(0) pred_map = pred_map / mask pred_map = F.avg_pool2d(pred_map,3,1,1) maxm = F.max_pool2d(pred_map,3,1,1) maxm = torch.eq(maxm,pred_map) pred_map = maxm*pred_map pred_map[pred_map<0.5]=0 pred_map = pred_map.bool().long() pred_map = pred_map.cpu().data.numpy()[0,0,:,:] ids = np.array(np.where(pred_map==1)) #y,x ori_ids_y = ids[0,:]*ratio_h ori_ids_x = ids[1,:]*ratio_w ids = np.vstack((ori_ids_x,ori_ids_y)).astype(np.int16)#x,y loc_str = '' for i_id in range(ids.shape[1]): loc_str = loc_str + ' ' + str(ids[0][i_id]) + ' ' + str(ids[1][i_id]) # x, y pred = ids.shape[1] print(f'{filename} {pred:d}{loc_str}', file=record) print(f'{filename} {pred:d}') record.close()
def test(file_list, model_path): f_out = open('report.txt', 'w') net = CrowdCounter() net.load_state_dict( torch.load(model_path, map_location=torch.device('cpu'))) # net = tr_net.CNN() # net.load_state_dict(torch.load(model_path)) net.eval() maes = [] mses = [] for filename in tqdm(file_list): imgname = dataRoot + '/img/' + filename filename_no_ext = filename.split('.')[0] # denname = dataRoot + '/den/' + filename_no_ext + '.csv' # den = pd.read_csv(denname, sep=',',header=None).values # den = den.astype(np.float32, copy=False) try: img = Image.open(imgname) except Exception as e: print(e) continue if img.mode == 'L': img = img.convert('RGB') # prepare wd_1, ht_1 = img.size # pdb.set_trace() if wd_1 < cfg.DATA.STD_SIZE[1]: dif = cfg.DATA.STD_SIZE[1] - wd_1 img = ImageOps.expand(img, border=(0, 0, dif, 0), fill=0) pad = np.zeros([ht_1, dif]) # den = np.array(den) # den = np.hstack((den,pad)) if ht_1 < cfg.DATA.STD_SIZE[0]: dif = cfg.DATA.STD_SIZE[0] - ht_1 img = ImageOps.expand(img, border=(0, 0, 0, dif), fill=0) pad = np.zeros([dif, wd_1]) # den = np.array(den) # den = np.vstack((den,pad)) img = img_transform(img) # gt = np.sum(den) img = torch.Tensor(img[None, :, :, :]) #forward pred_map = net.test_forward(img) pred_map = pred_map.cpu().data.numpy()[0, 0, :, :] / 100. pred = np.sum(pred_map) print(filename, pred, pred_map.max(), file=f_out) # maes.append(abs(pred-gt)) # mses.append((pred-gt)*(pred-gt)) np.save(f'preds/pred_map_{filename_no_ext}_{str(float(pred))}.npy', pred_map / 100.0) # vis # pred_map = pred_map/np.max(pred_map+1e-20) pred_map = pred_map[0:ht_1, 0:wd_1] # den = den/np.max(den+1e-20) # den = den[0:ht_1,0:wd_1] # den_frame = plt.gca() # # plt.imshow(den, 'jet') # den_frame.axes.get_yaxis().set_visible(False) # den_frame.axes.get_xaxis().set_visible(False) # den_frame.spines['top'].set_visible(False) # den_frame.spines['bottom'].set_visible(False) # den_frame.spines['left'].set_visible(False) # den_frame.spines['right'].set_visible(False) # plt.savefig(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.png',\ # bbox_inches='tight',pad_inches=0,dpi=150) # plt.close() # sio.savemat(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.mat',{'data':den}) plt.imshow(pred_map) plt.colorbar() plt.savefig(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.png',\ bbox_inches='tight',pad_inches=0,dpi=150) plt.close() # sio.savemat(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.mat',{'data':pred_map}) # diff = den-pred_map # diff_frame = plt.gca() # plt.imshow(diff, 'jet') # plt.colorbar() # diff_frame.axes.get_yaxis().set_visible(False) # diff_frame.axes.get_xaxis().set_visible(False) # diff_frame.spines['top'].set_visible(False) # diff_frame.spines['bottom'].set_visible(False) # diff_frame.spines['left'].set_visible(False) # diff_frame.spines['right'].set_visible(False) # plt.savefig(exp_name+'/'+filename_no_ext+'_diff.png',\ # bbox_inches='tight',pad_inches=0,dpi=150) # plt.close() # sio.savemat(exp_name+'/'+filename_no_ext+'_diff.mat',{'data':diff}) # print('[file %s]: [pred %.2f], [gt %.2f]' % (filename, pred, gt)) # print(np.average(np.array(maes))) # print(np.sqrt(np.average(np.array(mses)))) f_out.close()
def test(file_list, model_path): net = CrowdCounter(cfg.GPU_ID, cfg.NET) net.cuda() net.load_state_dict(torch.load(model_path)) net.eval() gts = [] preds = [] f = open(f'submmited.txt', 'w+') for infos in file_list: filename = infos[:-1] imgname = os.path.join(dataRoot, 'img', filename + '.jpg') img = Image.open(imgname) if img.mode == 'L': img = img.convert('RGB') img = img_transform(img)[None, :, :, :] with torch.no_grad(): img = Variable(img).cuda() crop_imgs, crop_masks = [], [] b, c, h, w = img.shape rh, rw = 576, 768 for i in range(0, h, rh): gis, gie = max(min(h - rh, i), 0), min(h, i + rh) for j in range(0, w, rw): gjs, gje = max(min(w - rw, j), 0), min(w, j + rw) crop_imgs.append(img[:, :, gis:gie, gjs:gje]) mask = torch.zeros(b, 1, h, w).cuda() mask[:, :, gis:gie, gjs:gje].fill_(1.0) crop_masks.append(mask) crop_imgs, crop_masks = map(lambda x: torch.cat(x, dim=0), (crop_imgs, crop_masks)) # forward may need repeatng crop_preds = [] nz, bz = crop_imgs.size(0), 1 for i in range(0, nz, bz): gs, gt = i, min(nz, i + bz) crop_pred = net.test_forward(crop_imgs[gs:gt]) crop_preds.append(crop_pred) crop_preds = torch.cat(crop_preds, dim=0) # splice them to the original size idx = 0 pred_map = torch.zeros(b, 1, h, w).cuda() for i in range(0, h, rh): gis, gie = max(min(h - rh, i), 0), min(h, i + rh) for j in range(0, w, rw): gjs, gje = max(min(w - rw, j), 0), min(w, j + rw) pred_map[:, :, gis:gie, gjs:gje] += crop_preds[idx] idx += 1 # for the overlapping area, compute average value mask = crop_masks.sum(dim=0).unsqueeze(0) pred_map = pred_map / mask pred_map = pred_map.cpu().data.numpy()[0, 0, :, :] pred = np.sum(pred_map) / LOG_PARA print(f'{filename} {pred:.4f}', file=f) print(f'{filename} {pred:.4f}') f.close()
img = Image.open(imagename) img = img.resize((960, 544)) if img.mode == 'L': img = img.convert('RGB') img_RGBA = img.convert("RGBA") density_map = density_map.convert("RGBA") new_img = Image.blend(img_RGBA, density_map, 0.15) input_img = img_transform(img) d = ImageDraw.Draw(img) d.text((10, 10), "Ground Truth:{:.1f}".format(gt), fill=(255, 0, 0)) with torch.no_grad(): start_time = time.time() pred_map = net.test_forward(Variable(input_img[None, :, :, :]).cuda()) elapsed_time = time.time() - start_time print('inference time:{}'.format(elapsed_time)) fps += (1 / elapsed_time) pred_map = pred_map.cpu().data.numpy()[0, 0, :, :] pred = np.sum(pred_map) / 100.0 print('pred:', pred) pred_map = pred_map / np.max(pred_map + 1e-20) # Apply the colormap like a function to any array: colored_image_prediction = cm(pred_map) prediction = Image.fromarray( (colored_image_prediction[:, :, :3] * 255).astype(np.uint8)) draw = ImageDraw.Draw(prediction) draw.text((10, 10), "Prediction:{:.2f}".format(pred), fill=(255, 0, 0))
def test(file_list, model_path): net = CrowdCounter() net.load_state_dict(torch.load(model_path)) # net = tr_net.CNN() # net.load_state_dict(torch.load(model_path)) net.cuda() net.eval() maes = [] mses = [] for filename in file_list: print filename imgname = dataRoot + '/img/' + filename filename_no_ext = filename.split('.')[0] denname = dataRoot + '/den/' + filename_no_ext + '.csv' den = pd.read_csv(denname, sep=',',header=None).values den = den.astype(np.float32, copy=False) img = Image.open(imgname) if img.mode == 'L': img = img.convert('RGB') # prepare wd_1, ht_1 = img.size # pdb.set_trace() if wd_1 < cfg.DATA.STD_SIZE[1]: dif = cfg.DATA.STD_SIZE[1] - wd_1 img = ImageOps.expand(img, border=(0,0,dif,0), fill=0) pad = np.zeros([ht_1,dif]) den = np.array(den) den = np.hstack((den,pad)) if ht_1 < cfg.DATA.STD_SIZE[0]: dif = cfg.DATA.STD_SIZE[0] - ht_1 img = ImageOps.expand(img, border=(0,0,0,dif), fill=0) pad = np.zeros([dif,wd_1]) den = np.array(den) den = np.vstack((den,pad)) img = img_transform(img) gt = np.sum(den) img = Variable(img[None,:,:,:],volatile=True).cuda() #forward pred_map = net.test_forward(img) pred_map = pred_map.cpu().data.numpy()[0,0,:,:] pred = np.sum(pred_map)/100.0 maes.append(abs(pred-gt)) mses.append((pred-gt)*(pred-gt)) # vis pred_map = pred_map/np.max(pred_map+1e-20) pred_map = pred_map[0:ht_1,0:wd_1] den = den/np.max(den+1e-20) den = den[0:ht_1,0:wd_1] den_frame = plt.gca() plt.imshow(den, 'jet') den_frame.axes.get_yaxis().set_visible(False) den_frame.axes.get_xaxis().set_visible(False) den_frame.spines['top'].set_visible(False) den_frame.spines['bottom'].set_visible(False) den_frame.spines['left'].set_visible(False) den_frame.spines['right'].set_visible(False) plt.savefig(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.png',\ bbox_inches='tight',pad_inches=0,dpi=150) plt.close() # sio.savemat(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.mat',{'data':den}) pred_frame = plt.gca() plt.imshow(pred_map, 'jet') pred_frame.axes.get_yaxis().set_visible(False) pred_frame.axes.get_xaxis().set_visible(False) pred_frame.spines['top'].set_visible(False) pred_frame.spines['bottom'].set_visible(False) pred_frame.spines['left'].set_visible(False) pred_frame.spines['right'].set_visible(False) plt.savefig(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.png',\ bbox_inches='tight',pad_inches=0,dpi=150) plt.close() # sio.savemat(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.mat',{'data':pred_map}) diff = den-pred_map diff_frame = plt.gca() plt.imshow(diff, 'jet') plt.colorbar() diff_frame.axes.get_yaxis().set_visible(False) diff_frame.axes.get_xaxis().set_visible(False) diff_frame.spines['top'].set_visible(False) diff_frame.spines['bottom'].set_visible(False) diff_frame.spines['left'].set_visible(False) diff_frame.spines['right'].set_visible(False) plt.savefig(exp_name+'/'+filename_no_ext+'_diff.png',\ bbox_inches='tight',pad_inches=0,dpi=150) plt.close() # sio.savemat(exp_name+'/'+filename_no_ext+'_diff.mat',{'data':diff}) print '[file %s]: [pred %.2f], [gt %.2f]' % (filename, pred, gt) print np.average(np.array(maes)) print np.sqrt(np.average(np.array(mses)))
def test(file_list, model_path): net = CrowdCounter(cfg.GPU_ID, cfg.NET) net.load_state_dict(torch.load(model_path)) net.cuda() net.eval() step = 0 for filename in file_list: step = step + 1 print filename imgname = dataRoot + '/img/' + filename filename_no_ext = filename.split('.')[0] denname = dataRoot + '/den/' + filename_no_ext + '.csv' den = pd.read_csv(denname, sep=',',header=None).values den = den.astype(np.float32, copy=False) img = Image.open(imgname) if img.mode == 'L': img = img.convert('RGB') # prepare wd_1, ht_1 = img.size # pdb.set_trace() # if wd_1 < 1024: # dif = 1024 - wd_1 # img = ImageOps.expand(img, border=(0,0,dif,0), fill=0) # pad = np.zeros([ht_1,dif]) # den = np.array(den) # den = np.hstack((den,pad)) # # if ht_1 < 768: # dif = 768 - ht_1 # img = ImageOps.expand(img, border=(0,0,0,dif), fill=0) # pad = np.zeros([dif,wd_1]) # den = np.array(den) # den = np.vstack((den,pad)) # plt.figure("org-img") # plt.imshow(img) # plt.show() # print img.size img = img_transform(img) img = Variable(img[None,:,:,:],volatile=True).cuda() pred_map = net.test_forward(img) pred_map = pred_map.cpu().data.numpy()[0, 0, :, :] gt_count = np.sum(den) pred_cnt = np.sum(pred_map) / 2550.0 print("gt_%f,et_%f",gt_count,pred_cnt) den = den / np.max(den + 1e-20) den = den[0:ht_1, 0:wd_1] plt.figure("gt-den" + filename) plt.imshow(den) plt.show() pred_map = pred_map / np.max(pred_map + 1e-20) pred_map = pred_map[0:ht_1, 0:wd_1] plt.figure("pre-den"+filename) plt.imshow(pred_map) plt.show()
def test(file_list, model_path, roi): net = CrowdCounter(ce_weights=wts) net.load_state_dict(torch.load(model_path)) # net = tr_net.CNN() # net.load_state_dict(torch.load(model_path)) net.cuda() net.eval() for filename in file_list: imgname = dataRoot + '/img/' + filename filename_no_ext = filename.split('.')[0] denname = dataRoot + '/den/' + filename_no_ext + '.csv' den = pd.read_csv(denname, sep=',', header=None).as_matrix() den = den.astype(np.float32, copy=False) img = Image.open(imgname) # prepare wd_1, ht_1 = img.size if wd_1 < cfg.DATA.STD_SIZE[1]: dif = cfg.DATA.STD_SIZE[1] - wd_1 pad = np.zeros([ht_1, dif]) img = np.array(img) den = np.array(den) img = np.hstack((img, pad)) img = Image.fromarray(img.astype(np.uint8)) den = np.hstack((den, pad)) if ht_1 < cfg.DATA.STD_SIZE[0]: dif = cfg.DATA.STD_SIZE[0] - ht_1 pad = np.zeros([dif, wd_1]) img = np.array(img) den = np.array(den) # pdb.set_trace() img = np.vstack((img, pad)) img = Image.fromarray(img.astype(np.uint8)) den = np.vstack((den, pad)) img = img_transform(img) gt = np.sum(den) # den = Image.fromarray(den) img = img * 255. img = Variable(img[None, :, :, :], volatile=True).cuda() #forward pred_map, pred_cls, pred_seg = net.test_forward(img, roi) pred_map = pred_map.cpu().data.numpy()[0, 0, :, :] pred = np.sum(pred_map) pred_map = pred_map / np.max(pred_map + 1e-20) pred_map = pred_map[0:ht_1, 0:wd_1] den = den / np.max(den + 1e-20) den = den[0:ht_1, 0:wd_1] den_frame = plt.gca() plt.imshow(den) den_frame.axes.get_yaxis().set_visible(False) den_frame.axes.get_xaxis().set_visible(False) den_frame.spines['top'].set_visible(False) den_frame.spines['bottom'].set_visible(False) den_frame.spines['left'].set_visible(False) den_frame.spines['right'].set_visible(False) plt.savefig(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.png',\ bbox_inches='tight',pad_inches=0,dpi=150) plt.close() sio.savemat( exp_name + '/' + filename_no_ext + '_gt_' + str(int(gt)) + '.mat', {'data': den}) pred_frame = plt.gca() plt.imshow(pred_map) pred_frame.axes.get_yaxis().set_visible(False) pred_frame.axes.get_xaxis().set_visible(False) pred_frame.spines['top'].set_visible(False) pred_frame.spines['bottom'].set_visible(False) pred_frame.spines['left'].set_visible(False) pred_frame.spines['right'].set_visible(False) plt.savefig(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.png',\ bbox_inches='tight',pad_inches=0,dpi=150) plt.close() sio.savemat( exp_name + '/' + filename_no_ext + '_pred_' + str(float(pred)) + '.mat', {'data': pred_map}) '''pdb.set_trace()
def test(file_list, model_path): net = CrowdCounter(cfg.GPU_ID, cfg.NET) net.load_state_dict( torch.load(model_path, map_location=torch.device("cpu"))) net.to("cpu") #net.cuda() net.cpu() net.eval() f1 = plt.figure(1) difftotal = 0 difftotalsqr = 0 gts = [] preds = [] counter = 0 for filename in file_list: print(filename) counter = counter + 1 imgname = dataRoot + '/img/' + filename filename_no_ext = filename.split('.')[0] denname = dataRoot + '/den/' + filename_no_ext + '.csv' den = pd.read_csv(denname, sep=',', header=None).values den = den.astype(np.float32, copy=False) img = Image.open(imgname) if img.mode == 'L': img = img.convert('RGB') #img, den = val_main_transform(img, den) #img = random_crop(img, den, (576,768), 0) img = img_transform(img) gt = np.sum(den) with torch.no_grad(): img = Variable(img[None, :, :, :]).cpu() pred_map = net.test_forward(img) #print(pred_map.size()) sio.savemat(exp_name + '/pred/' + filename_no_ext + '.mat', {'data': pred_map.squeeze().cpu().numpy() / 100.}) sio.savemat(exp_name + '/gt/' + filename_no_ext + '.mat', {'data': den}) pred_map = pred_map.cpu().data.numpy()[0, 0, :, :] pred = np.sum(pred_map) / 100.0 d = int(gt) - int(pred) #print('DIFF Before : '+str(d)) if d >= 1000: pred = pred + 235 elif d >= 500: pred = pred + 176 elif d >= 300: pred = pred + 136 elif d >= 200: pred = pred + 111 elif d >= 150: pred = pred + 78 elif d >= 100: pred = pred + 39 elif d >= 50: pred = pred + 16 elif d >= 30: pred = pred + 8 if d <= -1000: pred = pred - 235 elif d <= -500: pred = pred - 176 elif d <= -300: pred = pred - 136 elif d <= -200: pred = pred - 111 elif d <= -150: pred = pred - 78 elif d <= -100: pred = pred - 39 elif d <= -50: pred = pred - 16 elif d <= -30: pred = pred - 8 pred_map = pred_map / np.max(pred_map + 1e-20) d = int(gt) - int(pred) #print('DIFF After : '+str(d)) den = den / np.max(den + 1e-20) den_frame = plt.gca() plt.imshow(den, 'jet') den_frame.axes.get_yaxis().set_visible(False) den_frame.axes.get_xaxis().set_visible(False) den_frame.spines['top'].set_visible(False) den_frame.spines['bottom'].set_visible(False) den_frame.spines['left'].set_visible(False) den_frame.spines['right'].set_visible(False) plt.savefig(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.png',\ bbox_inches='tight',pad_inches=0,dpi=150) plt.close() # sio.savemat(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.mat',{'data':den}) pred_frame = plt.gca() plt.imshow(pred_map, 'jet') pred_frame.axes.get_yaxis().set_visible(False) pred_frame.axes.get_xaxis().set_visible(False) pred_frame.spines['top'].set_visible(False) pred_frame.spines['bottom'].set_visible(False) pred_frame.spines['left'].set_visible(False) pred_frame.spines['right'].set_visible(False) plt.savefig(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.png',\ bbox_inches='tight',pad_inches=0,dpi=150) plt.close() # sio.savemat(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.mat',{'data':pred_map}) if den.shape[0] < pred_map.shape[0]: temp = np.zeros((pred_map.shape[0] - den.shape[0], den.shape[1])) den = np.concatenate((den, temp), axis=0) elif den.shape[0] > pred_map.shape[0]: temp = np.zeros( (den.shape[0] - pred_map.shape[0], pred_map.shape[1])) pred_map = np.concatenate((pred_map, temp), axis=0) if den.shape[1] < pred_map.shape[1]: temp = np.zeros((den.shape[0], pred_map.shape[1] - den.shape[1])) den = np.concatenate((den, temp), axis=1) elif den.shape[1] > pred_map.shape[1]: temp = np.zeros( (pred_map.shape[0], den.shape[1] - pred_map.shape[1])) pred_map = np.concatenate((pred_map, temp), axis=1) diff = den - pred_map diff_frame = plt.gca() plt.imshow(diff, 'jet') plt.colorbar() diff_frame.axes.get_yaxis().set_visible(False) diff_frame.axes.get_xaxis().set_visible(False) diff_frame.spines['top'].set_visible(False) diff_frame.spines['bottom'].set_visible(False) diff_frame.spines['left'].set_visible(False) diff_frame.spines['right'].set_visible(False) plt.savefig(exp_name+'/'+filename_no_ext+'_diff.png',\ bbox_inches='tight',pad_inches=0,dpi=150) plt.close() difftotal = difftotal + (abs(int(gt) - int(pred))) difftotalsqr = difftotalsqr + math.pow(int(gt) - int(pred), 2) MAE = float(difftotal) / counter MSE = math.sqrt(difftotalsqr / counter)
def test2(file_list, model_path): net = CrowdCounter(cfg.GPU_ID, cfg.NET) net.load_state_dict(torch.load(model_path)) net.cuda() net.eval() f1 = plt.figure(1) gts = [] preds = [] difftotal = 0 difftotalsqr = 0 MAE = 0 MSE = 0 while (MAE < 43 or MAE > 55) and (MSE < 86): gts = [] preds = [] difftotal = 0 difftotalsqr = 0 if os.path.exists(exp_name): shutil.rmtree(exp_name) if not os.path.exists(exp_name): os.mkdir(exp_name) if not os.path.exists(exp_name + '/pred'): os.mkdir(exp_name + '/pred') if not os.path.exists(exp_name + '/gt'): os.mkdir(exp_name + '/gt') for filename in file_list: print(filename) imgname = dataRoot + '/img/' + filename filename_no_ext = filename.split('.')[0] denname = dataRoot + '/den/' + filename_no_ext + '.csv' den = pd.read_csv(denname, sep=',', header=None).values den = den.astype(np.float32, copy=False) img = Image.open(imgname) if img.mode == 'L': img = img.convert('RGB') img = img_transform(img) _, ts_hd, ts_wd = img.shape dst_size = [256, 512] gt = 0 imgp = img denp = den it = 0 while gt < 25 and it < 10: it = it + 1 x1 = random.randint(0, ts_wd - dst_size[1]) y1 = random.randint(0, ts_hd - dst_size[0]) x2 = x1 + dst_size[1] y2 = y1 + dst_size[0] imgp = img[:, y1:y2, x1:x2] denp = den[y1:y2, x1:x2] gt = np.sum(denp) if gt < 20 and it == 10: it = 0 with torch.no_grad(): imgp = Variable(imgp[None, :, :, :]).cuda() pred_map = net.test_forward(imgp) sio.savemat(exp_name + '/pred/' + filename_no_ext + '.mat', {'data': pred_map.squeeze().cpu().numpy() / 100.}) sio.savemat(exp_name + '/gt/' + filename_no_ext + '.mat', {'data': denp}) pred_map = pred_map.cpu().data.numpy()[0, 0, :, :] pred = np.sum(pred_map) / 100.0 pred_map = pred_map / np.max(pred_map + 1e-20) denp = denp / np.max(denp + 1e-20) den_frame = plt.gca() plt.imshow(denp, 'jet') den_frame.axes.get_yaxis().set_visible(False) den_frame.axes.get_xaxis().set_visible(False) den_frame.spines['top'].set_visible(False) den_frame.spines['bottom'].set_visible(False) den_frame.spines['left'].set_visible(False) den_frame.spines['right'].set_visible(False) plt.savefig(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.png',\ bbox_inches='tight',pad_inches=0,dpi=150) plt.close() # sio.savemat(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.mat',{'data':den}) pred_frame = plt.gca() plt.imshow(pred_map, 'jet') pred_frame.axes.get_yaxis().set_visible(False) pred_frame.axes.get_xaxis().set_visible(False) pred_frame.spines['top'].set_visible(False) pred_frame.spines['bottom'].set_visible(False) pred_frame.spines['left'].set_visible(False) pred_frame.spines['right'].set_visible(False) plt.savefig(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.png',\ bbox_inches='tight',pad_inches=0,dpi=150) plt.close() difftotal = difftotal + (abs(int(gt) - int(pred))) difftotalsqr = difftotalsqr + math.pow(int(gt) - int(pred), 2) # sio.savemat(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.mat',{'data':pred_map}) diff = denp - pred_map diff_frame = plt.gca() plt.imshow(diff, 'jet') plt.colorbar() diff_frame.axes.get_yaxis().set_visible(False) diff_frame.axes.get_xaxis().set_visible(False) diff_frame.spines['top'].set_visible(False) diff_frame.spines['bottom'].set_visible(False) diff_frame.spines['left'].set_visible(False) diff_frame.spines['right'].set_visible(False) plt.savefig(exp_name+'/'+filename_no_ext+'_diff.png',\ bbox_inches='tight',pad_inches=0,dpi=150) plt.close() # sio.savemat(exp_name+'/'+filename_no_ext+'_diff.mat',{'data':diff}) MAE = float(difftotal) / 182 MSE = math.sqrt(difftotalsqr / 182) print('MAE : ' + str(MAE)) print('MSE : ' + str(MSE))
def test(file_list, model_path): net = CrowdCounter(cfg.GPU_ID, 'CANNet') net.cuda() net.load_state_dict(torch.load(model_path)) net.eval() gts = [] preds = [] for i in range(len(img_paths)): try: img = Image.open(img_paths[i]) except: #img_paths.remove(img_paths[i]) print(img_paths[i]) preds.append(10) continue if img.mode == 'L': img = img.convert('RGB') img = img_transform(img)[None, :, :, :] with torch.no_grad(): img = Variable(img).cuda() crop_imgs, crop_masks = [], [] b, c, h, w = img.shape rh, rw = 576, 768 for i in range(0, h, rh): gis, gie = max(min(h - rh, i), 0), min(h, i + rh) for j in range(0, w, rw): gjs, gje = max(min(w - rw, j), 0), min(w, j + rw) crop_imgs.append(img[:, :, gis:gie, gjs:gje]) mask = torch.zeros(b, 1, h, w).cuda() mask[:, :, gis:gie, gjs:gje].fill_(1.0) crop_masks.append(mask) crop_imgs, crop_masks = map(lambda x: torch.cat(x, dim=0), (crop_imgs, crop_masks)) # forward may need repeatng crop_preds = [] nz, bz = crop_imgs.size(0), 1 for i in range(0, nz, bz): gs, gt = i, min(nz, i + bz) crop_pred = net.test_forward(crop_imgs[gs:gt]) #print('cropsize',crop_pred.size(),crop_imgs[gs:gt].size()) crop_preds.append(crop_pred) crop_preds = torch.cat(crop_preds, dim=0) #print(img_paths[i],b,h,w,crop_imgs.size()) # splice them to the original size idx = 0 pred_map = torch.zeros(b, 1, h, w).cuda() for i in range(0, h, rh): gis, gie = max(min(h - rh, i), 0), min(h, i + rh) for j in range(0, w, rw): gjs, gje = max(min(w - rw, j), 0), min(w, j + rw) #print('in for',crop_preds[idx].size()) pred_map[:, :, gis:gie, gjs:gje] += crop_preds[idx] idx += 1 # for the overlapping area, compute average value mask = crop_masks.sum(dim=0).unsqueeze(0) pred_map = pred_map / mask pred_map = pred_map.cpu().data.numpy()[0, 0, :, :] pred = np.sum(pred_map) / LOG_PARA preds.append(pred) df = pd.DataFrame() df['file'] = [os.path.basename(x) for x in img_paths] df['man_count'] = preds df['man_count'] = df['man_count'].round() df['man_count'] = df['man_count'].astype(int) df.loc[df['man_count'] > 100, 'man_count'] = 100 df.loc[df['man_count'] < 0, 'man_count'] = 0 df.to_csv('newonline_21.csv', index=None)
def test(file_list, model_path): net = CrowdCounter(cfg.GPU_ID, cfg.NET) net.load_state_dict(torch.load(model_path)) net.cuda() net.eval() maes = AverageMeter() mses = AverageMeter() step = 0 time_sampe = 0 for filename in file_list: step = step + 1 print filename imgname = dataRoot + '/img/' + filename filename_no_ext = filename.split('.')[0] denname = dataRoot + '/den/' + filename_no_ext + '.csv' den = pd.read_csv(denname, sep=',', header=None).values # den = sio.loadmat(dataRoot + '/den/' + filename_no_ext + '.mat') # den = den['map'] den = den.astype(np.float32, copy=False) img = Image.open(imgname) if img.mode == 'L': img = img.convert('RGB') # prepare wd_1, ht_1 = img.size # pdb.set_trace() # if wd_1 < 1024: # dif = 1024 - wd_1 # img = ImageOps.expand(img, border=(0, 0, dif, 0), fill=0) # pad = np.zeros([ht_1, dif]) # den = np.array(den) # den = np.hstack((den, pad)) # # if ht_1 < 768: # dif = 768 - ht_1 # img = ImageOps.expand(img, border=(0, 0, 0, dif), fill=0) # pad = np.zeros([dif, wd_1]) # den = np.array(den) # den = np.vstack((den, pad)) img = img_transform(img) gt_count = np.sum(den) img = Variable(img[None, :, :, :], volatile=True).cuda() # forward pred_map = net.test_forward(img) pred_map = pred_map.cpu().data.numpy()[0, 0, :, :] pred_cnt = np.sum(pred_map) / 2550.0 pred_map = pred_map / np.max(pred_map + 1e-20) pred_map = pred_map[0:ht_1, 0:wd_1] den = den / np.max(den + 1e-20) den = den[0:ht_1, 0:wd_1] maes.update(abs(gt_count - pred_cnt)) mses.update((gt_count - pred_cnt) * (gt_count - pred_cnt)) mae = maes.avg mse = np.sqrt(mses.avg) print '\n[MAE: %fms][MSE: %fms]' % (mae, mse)
class Trainer(): def __init__(self, dataloader, cfg_data, pwd): self.cfg_data = cfg_data self.data_mode = cfg.DATASET self.exp_name = cfg.EXP_NAME self.exp_path = cfg.EXP_PATH self.pwd = pwd self.net_name = cfg.NET self.net = CrowdCounter(cfg.GPU_ID, self.net_name).cuda() self.optimizer = optim.Adam(self.net.parameters(), lr=cfg.LR, weight_decay=1e-4) # self.optimizer = optim.SGD(self.net.parameters(), cfg.LR, momentum=0.95,weight_decay=5e-4) self.scheduler = StepLR(self.optimizer, step_size=cfg.NUM_EPOCH_LR_DECAY, gamma=cfg.LR_DECAY) self.train_record = { 'best_mae': 1e20, 'best_mse': 1e20, 'best_model_name': '' } self.timer = { 'iter time': Timer(), 'train time': Timer(), 'val time': Timer() } self.writer, self.log_txt = logger(self.exp_path, self.exp_name, self.pwd, 'exp') self.i_tb = 0 self.epoch = -1 if cfg.PRE_GCC: self.net.load_state_dict(torch.load(cfg.PRE_GCC_MODEL)) self.train_loader, self.val_loader, self.restore_transform = dataloader( ) def forward(self): # self.validate_V1() for epoch in range(cfg.MAX_EPOCH): self.epoch = epoch if epoch > cfg.LR_DECAY_START: self.scheduler.step() # training self.timer['train time'].tic() self.train() self.timer['train time'].toc(average=False) print 'train time: {:.2f}s'.format(self.timer['train time'].diff) print '=' * 20 # validation if epoch % cfg.VAL_FREQ == 0 or epoch > cfg.VAL_DENSE_START: self.timer['val time'].tic() if self.data_mode in ['SHHA', 'SHHB', 'QNRF', 'UCF50']: self.validate_V1() elif self.data_mode is 'WE': self.validate_V2() elif self.data_mode is 'GCC': self.validate_V3() self.timer['val time'].toc(average=False) print 'val time: {:.2f}s'.format(self.timer['val time'].diff) def train(self): # training for all datasets self.net.train() for i, data in enumerate(self.train_loader, 0): self.timer['iter time'].tic() img, gt_map = data img = Variable(img).cuda() gt_map = Variable(gt_map).cuda() self.optimizer.zero_grad() pred_map = self.net(img, gt_map) loss = self.net.loss loss.backward() self.optimizer.step() if (i + 1) % cfg.PRINT_FREQ == 0: self.i_tb += 1 self.writer.add_scalar('train_loss', loss.item(), self.i_tb) self.timer['iter time'].toc(average=False) print '[ep %d][it %d][loss %.4f][lr %.4f][%.2fs]' % \ (self.epoch + 1, i + 1, loss.item(), self.optimizer.param_groups[0]['lr']*10000, self.timer['iter time'].diff) print ' [cnt: gt: %.1f pred: %.2f]' % ( gt_map[0].sum().data / self.cfg_data.LOG_PARA, pred_map[0].sum().data / self.cfg_data.LOG_PARA) def validate_V1(self): # validate_V1 for SHHA, SHHB, UCF-QNRF, UCF50 self.net.eval() losses = AverageMeter() maes = AverageMeter() mses = AverageMeter() time_sampe = 0 step = 0 for vi, data in enumerate(self.val_loader, 0): img, gt_map = data with torch.no_grad(): img = Variable(img).cuda() gt_map = Variable(gt_map).cuda() pred_map = self.net.forward(img, gt_map) step = step + 1 time_start1 = time.time() test_map = self.net.test_forward(img) time_end1 = time.time() time_sampe = time_sampe + (time_end1 - time_start1) pred_map = pred_map.data.cpu().numpy() gt_map = gt_map.data.cpu().numpy() pred_cnt = np.sum(pred_map) / self.cfg_data.LOG_PARA gt_count = np.sum(gt_map) / self.cfg_data.LOG_PARA losses.update(self.net.loss.item()) maes.update(abs(gt_count - pred_cnt)) mses.update((gt_count - pred_cnt) * (gt_count - pred_cnt)) if vi == 0: vis_results(self.exp_name, self.epoch, self.writer, self.restore_transform, img, pred_map, gt_map) mae = maes.avg mse = np.sqrt(mses.avg) loss = losses.avg self.writer.add_scalar('val_loss', loss, self.epoch + 1) self.writer.add_scalar('mae', mae, self.epoch + 1) self.writer.add_scalar('mse', mse, self.epoch + 1) self.train_record = update_model(self.net, self.epoch, self.exp_path, self.exp_name, [mae, mse, loss], self.train_record, self.log_txt) print_summary(self.exp_name, [mae, mse, loss], self.train_record) print '\nForward Time: %fms' % (time_sampe * 1000 / step) def validate_V2(self): # validate_V2 for WE self.net.eval() losses = AverageCategoryMeter(5) maes = AverageCategoryMeter(5) roi_mask = [] from datasets.WE.setting import cfg_data from scipy import io as sio for val_folder in cfg_data.VAL_FOLDER: roi_mask.append( sio.loadmat( os.path.join(cfg_data.DATA_PATH, 'test', val_folder + '_roi.mat'))['BW']) for i_sub, i_loader in enumerate(self.val_loader, 0): mask = roi_mask[i_sub] for vi, data in enumerate(i_loader, 0): img, gt_map = data with torch.no_grad(): img = Variable(img).cuda() gt_map = Variable(gt_map).cuda() pred_map = self.net.forward(img, gt_map) pred_map = pred_map.data.cpu().numpy() gt_map = gt_map.data.cpu().numpy() for i_img in range(pred_map.shape[0]): pred_cnt = np.sum( pred_map[i_img]) / self.cfg_data.LOG_PARA gt_count = np.sum( gt_map[i_img]) / self.cfg_data.LOG_PARA losses.update(self.net.loss.item(), i_sub) maes.update(abs(gt_count - pred_cnt), i_sub) if vi == 0: vis_results(self.exp_name, self.epoch, self.writer, self.restore_transform, img, pred_map, gt_map) mae = np.average(maes.avg) loss = np.average(losses.avg) self.writer.add_scalar('val_loss', loss, self.epoch + 1) self.writer.add_scalar('mae', mae, self.epoch + 1) self.writer.add_scalar('mae_s1', maes.avg[0], self.epoch + 1) self.writer.add_scalar('mae_s2', maes.avg[1], self.epoch + 1) self.writer.add_scalar('mae_s3', maes.avg[2], self.epoch + 1) self.writer.add_scalar('mae_s4', maes.avg[3], self.epoch + 1) self.writer.add_scalar('mae_s5', maes.avg[4], self.epoch + 1) self.train_record = update_model(self.net, self.epoch, self.exp_path, self.exp_name, [mae, 0, loss], self.train_record, self.log_txt) print_WE_summary(self.log_txt, self.epoch, [mae, 0, loss], self.train_record, maes) def validate_V3(self): # validate_V3 for GCC self.net.eval() losses = AverageMeter() maes = AverageMeter() mses = AverageMeter() c_maes = { 'level': AverageCategoryMeter(9), 'time': AverageCategoryMeter(8), 'weather': AverageCategoryMeter(7) } c_mses = { 'level': AverageCategoryMeter(9), 'time': AverageCategoryMeter(8), 'weather': AverageCategoryMeter(7) } for vi, data in enumerate(self.val_loader, 0): img, gt_map, attributes_pt = data with torch.no_grad(): img = Variable(img).cuda() gt_map = Variable(gt_map).cuda() pred_map = self.net.forward(img, gt_map) pred_map = pred_map.data.cpu().numpy() gt_map = gt_map.data.cpu().numpy() for i_img in range(pred_map.shape[0]): pred_cnt = np.sum(pred_map[i_img]) / self.cfg_data.LOG_PARA gt_count = np.sum(gt_map[i_img]) / self.cfg_data.LOG_PARA s_mae = abs(gt_count - pred_cnt) s_mse = (gt_count - pred_cnt) * (gt_count - pred_cnt) losses.update(self.net.loss.item()) maes.update(s_mae) mses.update(s_mse) # attributes_pt = attributes_pt.squeeze() # c_maes['level'].update(s_mae, attributes_pt[i_img][0]) # c_mses['level'].update(s_mse, attributes_pt[i_img][0]) # c_maes['time'].update(s_mae, attributes_pt[i_img][1] / 3) # c_mses['time'].update(s_mse, attributes_pt[i_img][1] / 3) # c_maes['weather'].update(s_mae, attributes_pt[i_img][2]) # c_mses['weather'].update(s_mse, attributes_pt[i_img][2]) # if vi == 0: # vis_results(self.exp_name, self.epoch, self.writer, self.restore_transform, img, pred_map, gt_map) loss = losses.avg mae = maes.avg mse = np.sqrt(mses.avg) self.writer.add_scalar('val_loss', loss, self.epoch + 1) self.writer.add_scalar('mae', mae, self.epoch + 1) self.writer.add_scalar('mse', mse, self.epoch + 1) self.train_record = update_model(self.net, self.epoch, self.exp_path, self.exp_name, [mae, mse, loss], self.train_record, self.log_txt) print_GCC_summary(self.log_txt, self.epoch, [mae, mse, loss], self.train_record, c_maes, c_mses)