Example #1
0
def cal_mae(img_root,gt_dmap_root,model_param_path):
    '''
    Calculate the MAE of the test data.
    img_root: the root of test image data.
    gt_dmap_root: the root of test ground truth density-map data.
    model_param_path: the path of specific mcnn parameters.
    '''
    model=CANNet()
    model.load_state_dict(torch.load(model_param_path))
    model.cuda()
    dataset=CrowdDataset(img_root,gt_dmap_root,8,phase='test')
    dataloader=torch.utils.data.DataLoader(dataset,batch_size=1,shuffle=False)
    model.eval()
    mae=0
    with torch.no_grad():
        for i,(img,gt_dmap) in enumerate(tqdm(dataloader)):
            img=img.cuda()
            gt_dmap=gt_dmap.cuda()
            # forward propagation
            et_dmap=model(img)
            mae+=abs(et_dmap.data.sum()-gt_dmap.data.sum()).item()
            del img,gt_dmap,et_dmap

    print("model_param_path:"+model_param_path+" mae:"+str(mae/len(dataloader)))
Example #2
0
@time: 6/27/19 2:37 PM
@desc: test videos for serials frame
'''
import torch
import cv2
import random
import matplotlib.pyplot as plt
import matplotlib.cm as CM
from torchvision import transforms
from cannet import CANNet
from my_dataset import CrowdDataset

model = CANNet()
model_param_path = './checkpoints/cvpr2019CAN_353model.pth'
model.load_state_dict(torch.load(model_param_path))
model.cuda()
model.eval()
torch.backends.cudnn.enabled = False


def read_img(img, gt_downsample):
    img = img / 255
    if len(img.shape) == 2:
        img = img[:, :, np.newaxis]
        img = np.concatenate((img, img, img), 2)

    if gt_downsample > 1:
        ds_rows = int(img.shape[0] // gt_downsample)
        ds_cols = int(img.shape[1] // gt_downsample)
        img = cv2.resize(img,
                         (ds_cols * gt_downsample, ds_rows * gt_downsample))