def evaluate_model(trained_model, data_loader, netname='CrowdCounter'):
    if (netname is 'CrowdCounter'):
        net = CrowdCounter()
    elif (netname is 'MCNNNet'):
        net = MCNNNet()
    elif (netname is 'AmendNet'):
        net = AmendNet()
    elif (netname is 'CrowdCounter_MSCNN'):
        net = CrowdCounter_MSCNN()
    else:
        raise (
            'netname should be one of ["CrowdCounter", "MCNNNet", "AmendNet", "CrowdCounter_MSCNN"],\
        but we got ', netname)
    network.load_net(trained_model, net)
    net.cuda()
    net.eval()
    mae = 0.0
    mse = 0.0
    for blob in data_loader:
        im_data = blob['data']
        gt_data = blob['gt_density']
        density_map = net(im_data, gt_data)
        density_map = density_map.data.cpu().numpy()
        gt_count = np.sum(gt_data)
        et_count = np.sum(density_map)
        mae += abs(gt_count - et_count)
        mse += ((gt_count - et_count) * (gt_count - et_count))
    mae = mae / data_loader.get_num_samples()
    mse = np.sqrt(mse / data_loader.get_num_samples())
    return mae, mse
Esempio n. 2
0
def evaluate_model(trained_model, data_loader):
    net = CrowdCounter()
    network.load_net(trained_model, net)
    net.cuda()
    net.eval()
    mae = 0.0
    mse = 0.0
    for blob in data_loader:
        im_data = blob['data']
        gt_data = blob['gt_density']
        density_map = net(im_data, gt_data)
        density_map = density_map.data.cpu().numpy()
        gt_count = np.sum(gt_data)
        et_count = np.sum(density_map)
        mae += abs(gt_count - et_count)
        mse += ((gt_count - et_count) * (gt_count - et_count))
    mae = np.float(mae / data_loader.get_num_samples())
    mse = np.sqrt(mse / data_loader.get_num_samples())
    return mae, mse
Esempio n. 3
0
model_path = './saved_models/mcnn_shtechA_110.h5'

output_dir = './output/'
model_name = os.path.basename(model_path).split('.')[0]
file_results = os.path.join(output_dir, 'results_' + model_name + '_.txt')
if not os.path.exists(output_dir):
    os.mkdir(output_dir)
output_dir = os.path.join(output_dir, 'density_maps_' + model_name)
if not os.path.exists(output_dir):
    os.mkdir(output_dir)

net = CrowdCounter()

trained_model = os.path.join(model_path)
network.load_net(trained_model, net)
net.cuda()
net.eval()
mae = 0.0
mse = 0.0
mape = 0.0
total_count = 0.0
#load test data
data_loader = ImageDataLoader(data_path,
                              gt_path,
                              shuffle=False,
                              gt_downsample=True,
                              pre_load=True)

for blob in data_loader:
    im_data = blob['data']
    gt_data = blob['gt_density']
Esempio n. 4
0
把一个取值范围是[0,255]的PIL.Image或者shape为(H,W,C)的numpy.ndarray,转换成形状为[C,H,W],取值范围是[0,1.0]的torch.FloadTensor

class torchvision.transforms.Normalize(mean, std)
给定均值:(R,G,B) 方差:(R,G,B),将会把Tensor正则化。即:Normalized_image=(image-mean)/std。
'''
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                          0.225]),
])

net = CrowdCounter()

#defining the model

net = net.cuda()

#loading the trained weights
model_path = 'dataset/Shanghai/cmtl_shtechA_204.h5'

trained_model = os.path.join(model_path)
network.load_net(trained_model, net)
net.cuda()
net.eval()

data_loader = ImageDataLoader(
    'dataset/Shanghai/part_A_final/test_data/images/',
    'dataset/Shanghai/part_A_final/test_data/ground_truth',
    shuffle=False,
    gt_downsample=True,
    pre_load=True)