Exemple #1
0
def evaluate_model(checkpoint_path, data_loader):
    print('Evaluation starts!!!')
    net = MCNN()
    net.load_state_dict(torch.load(checkpoint_path))
    net.to(device)
    net.eval()
    mae = 0.0
    mse = 0.0
    for blob in data_loader:                        
        im_data = blob['data'][0]
        gt_data = blob['gt_density'][0]
        #im_data = Variable(torch.from_numpy(im_data))
        density_map = net(im_data)
        density_map = density_map.data.cpu().numpy()
        gt_data = gt_data.cpu().numpy()
        gt_count = np.sum(gt_data)
        et_count = np.sum(density_map)
        mae += abs(gt_count-et_count)
        mse += ((gt_count-et_count)*(gt_count-et_count))        
    mae = mae/len(val_dataset)
    mse = np.sqrt(mse/len(val_dataset))
    return mae,mse
Exemple #2
0
output_dir = './output/'
model_name = os.path.basename(model_path).split('.')[0]
file_results = os.path.join(output_dir,'results_' + model_name + '_.txt')
if not os.path.exists(output_dir):
    os.mkdir(output_dir)
output_dir = os.path.join(output_dir, 'density_maps_' + model_name)
if not os.path.exists(output_dir):
    os.mkdir(output_dir)


net = MCNN()
      
net.load_state_dict(torch.load(model_path))
net.to(device)
net.eval()
mae = 0.0
mse = 0.0

#load test data
test_dataset = Shanghai_Dataset(data_path, gt_path, gt_downsample=True, pre_load=True)
test_data_loader =  DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=0)

for it,blob in enumerate(test_data_loader): 
    print('{}/{}'.format(it,len(test_dataset)))                                               
    im_data = blob['data'][0]
    gt_data = blob['gt_density'][0]
    density_map = net(im_data)
    density_map = density_map.data.cpu().numpy()
    gt_data = gt_data.data.cpu().numpy()
    gt_count = np.sum(gt_data)