def evaluate_model(checkpoint_path, data_loader): print('Evaluation starts!!!') net = MCNN() net.load_state_dict(torch.load(checkpoint_path)) net.to(device) net.eval() mae = 0.0 mse = 0.0 for blob in data_loader: im_data = blob['data'][0] gt_data = blob['gt_density'][0] #im_data = Variable(torch.from_numpy(im_data)) density_map = net(im_data) density_map = density_map.data.cpu().numpy() gt_data = gt_data.cpu().numpy() gt_count = np.sum(gt_data) et_count = np.sum(density_map) mae += abs(gt_count-et_count) mse += ((gt_count-et_count)*(gt_count-et_count)) mae = mae/len(val_dataset) mse = np.sqrt(mse/len(val_dataset)) return mae,mse
def __init__(self): super(CrowdCounter, self).__init__() self.DME = MCNN() self.loss_fn = nn.MSELoss() self.loss_ssim = SSIM_Loss()
data_path = './data/original/shanghaitech/part_A/test_data/images/' gt_path = './data/original/shanghaitech/part_A/test_data/ground-truth_csv/' model_path = './saved_models/mcnn_shtechA_1.pth' output_dir = './output/' model_name = os.path.basename(model_path).split('.')[0] file_results = os.path.join(output_dir,'results_' + model_name + '_.txt') if not os.path.exists(output_dir): os.mkdir(output_dir) output_dir = os.path.join(output_dir, 'density_maps_' + model_name) if not os.path.exists(output_dir): os.mkdir(output_dir) net = MCNN() net.load_state_dict(torch.load(model_path)) net.to(device) net.eval() mae = 0.0 mse = 0.0 #load test data test_dataset = Shanghai_Dataset(data_path, gt_path, gt_downsample=True, pre_load=True) test_data_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=0) for it,blob in enumerate(test_data_loader): print('{}/{}'.format(it,len(test_dataset))) im_data = blob['data'][0] gt_data = blob['gt_density'][0]
dataset_name = 'shtechA' checkpoint_dir = './saved_models/' device = torch.device("cuda" if torch.cuda.is_available() else "cpu") train_path = './data/formatted_trainval/shanghaitech_part_A_patches_9/train' train_gt_path = './data/formatted_trainval/shanghaitech_part_A_patches_9/train_den' val_path = './data/formatted_trainval/shanghaitech_part_A_patches_9/val' val_gt_path = './data/formatted_trainval/shanghaitech_part_A_patches_9/val_den' #training configuration num_epochs=2000 lr = 0.00001 momentum = 0.9 # load net net = MCNN() weights_normal_init(net, dev=0.01) net.to(device) net.train() optimizer = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=1e-5) criterion = nn.MSELoss() if not os.path.exists(checkpoint_dir): os.mkdir(checkpoint_dir) print('Loading training and validation datasets') #train_data_loader = ImageDataLoader(train_path, train_gt_path, shuffle=True, gt_downsample=True, pre_load=True) #val_data_loader = ImageDataLoader(val_path, val_gt_path, shuffle=False, gt_downsample=True, pre_load=True) train_dataset = Shanghai_Dataset(train_path, train_gt_path, gt_downsample=True, pre_load=True) train_data_loader = DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=0)
vis = False save_output = True device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") # converts tensors to CUDA variables if gpu is available data_path = './data/original/shanghaitech/part_A/test_data/images/' gt_path = './data/original/shanghaitech/part_A/test_data/ground-truth_csv/' model_path = './saved_models/mcnn_shtechA_1.pth' output_dir = './output_encoder/shanghai_part_A/' if not os.path.exists(output_dir): os.mkdir(output_dir) net = MCNN() print('Loading the best trained model ...') net.load_state_dict(torch.load(model_path)) net.to(device) net.eval() mae = 0.0 mse = 0.0 #load test data test_dataset = Shanghai_Dataset(data_path, gt_path, gt_downsample=True, pre_load=True) test_data_loader = DataLoader(test_dataset,
batch_size = 32 learning_rate = 0.0001 # Use DataLoader to sample data/label pairs at random if torch.cuda.is_available(): train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, pin_memory=True) else: train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True) # Initialize model, loss function, and optimizer model = MCNN() criterion = torch.nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) # Iterate through training set and return input image and ground truth density map for batch_idx, (img, gt_dmap) in enumerate(train_loader): # Make sure we are in training mode model.train() # Torch accumulates gradients so we must zero these out for each batch optimizer.zero_grad() # Place data and labels in variable to track gradients and place on the GPU if available img = Variable(img.float()) gt_dmap = Variable(gt_dmap.float())
def __init__(self, mcnn=None): super(CrowdCounter, self).__init__() if (mcnn is not None): self.DME = mcnn else: self.DME = MCNN()