def validate(val_loader, model_path, epoch, restore): net = CrowdCounter(ce_weights=train_set.wts) net.load_state_dict(torch.load(model_path)) net.cuda() net.eval() print '='*50 val_loss_mse = [] val_loss_cls = [] val_loss_seg = [] val_loss = [] mae = 0.0 mse = 0.0 for vi, data in enumerate(val_loader, 0): img, gt_map, gt_cnt, roi, gt_roi, gt_seg = data # pdb.set_trace() img = Variable(img, volatile=True).cuda() gt_map = Variable(gt_map, volatile=True).cuda() gt_seg = Variable(gt_seg, volatile=True).cuda() roi = Variable(roi[0], volatile=True).cuda().float() gt_roi = Variable(gt_roi[0], volatile=True).cuda() pred_map,pred_cls,pred_seg = net(img, gt_map, roi, gt_roi, gt_seg) loss1,loss2,loss3 = net.f_loss() val_loss_mse.append(loss1.data) val_loss_cls.append(loss2.data) val_loss_seg.append(loss3.data) val_loss.append(net.loss.data) pred_map = pred_map.data.cpu().numpy() gt_map = gt_map.data.cpu().numpy() pred_seg = pred_seg.cpu().max(1)[1].squeeze_(1).data.numpy() gt_seg = gt_seg.data.cpu().numpy() gt_count = np.sum(gt_map) pred_cnt = np.sum(pred_map) mae += abs(gt_count-pred_cnt) mse += ((gt_count-pred_cnt)*(gt_count-pred_cnt)) x = [] if vi==0: for idx, tensor in enumerate(zip(img.cpu().data, pred_map, gt_map, pred_seg, gt_seg)): if idx>cfg.VIS.VISIBLE_NUM_IMGS: break # pdb.set_trace() pil_input = restore(tensor[0]/255.) pil_label = torch.from_numpy(tensor[2]/(tensor[2].max()+1e-10)).repeat(3,1,1) pil_output = torch.from_numpy(tensor[1]/(tensor[1].max()+1e-10)).repeat(3,1,1) pil_gt_seg = torch.from_numpy(tensor[4]).repeat(3,1,1).float() pil_pred_seg = torch.from_numpy(tensor[3]).repeat(3,1,1).float() # pdb.set_trace() x.extend([pil_to_tensor(pil_input.convert('RGB')), pil_label, pil_output, pil_gt_seg, pil_pred_seg]) x = torch.stack(x, 0) x = vutils.make_grid(x, nrow=5, padding=5) writer.add_image(exp_name + '_epoch_' + str(epoch+1), (x.numpy()*255).astype(np.uint8)) mae = mae/val_set.get_num_samples() mse = np.sqrt(mse/val_set.get_num_samples()) ''' loss1 = float(np.mean(np.array(val_loss_mse))) loss2 = float(np.mean(np.array(val_loss_cls))) loss3 = float(np.mean(np.array(val_loss_seg))) loss = float(np.mean(np.array(val_loss)))''' loss1 = np.mean(np.array(val_loss_mse))[0] loss2 = np.mean(np.array(val_loss_cls))[0] loss3 = np.mean(np.array(val_loss_seg))[0] loss = np.mean(np.array(val_loss))[0] writer.add_scalar('val_loss_mse', loss1, epoch + 1) writer.add_scalar('val_loss_cls', loss2, epoch + 1) writer.add_scalar('val_loss_seg', loss3, epoch + 1) writer.add_scalar('val_loss', loss, epoch + 1) writer.add_scalar('mae', mae, epoch + 1) writer.add_scalar('mse', mse, epoch + 1) if mae < train_record['best_mae']: train_record['best_mae'] = mae train_record['mse'] = mse train_record['corr_epoch'] = epoch + 1 train_record['corr_loss'] = loss print '='*50 print exp_name print ' '+ '-'*20 print ' [mae %.1f mse %.1f], [val loss %.8f %.8f %.4f %.4f]' % (mae, mse, loss, loss1, loss2, loss3) print ' '+ '-'*20 # pdb.set_trace() print '[best] [mae %.1f mse %.1f], [loss %.8f], [epoch %d]' % (train_record['best_mae'], train_record['mse'], train_record['corr_loss'], train_record['corr_epoch']) print '='*50
def validate(val_loader, model_path, epoch, restore): net = CrowdCounter(ce_weights=train_set.wts) net.load_state_dict(torch.load(model_path)) net.cuda() net.eval() print '=' * 50 val_loss_mse = [] val_loss_cls = [] val_loss_seg = [] val_loss = [] mae = 0.0 mse = 0.0 for vi, data in enumerate(val_loader, 0): img, gt_map, gt_cnt, roi, gt_roi, gt_seg = data # pdb.set_trace() img = Variable(img, volatile=True).cuda() gt_map = Variable(gt_map, volatile=True).cuda() gt_seg = Variable(gt_seg, volatile=True).cuda() roi = Variable(roi[0], volatile=True).cuda().float() gt_roi = Variable(gt_roi[0], volatile=True).cuda() pred_map, pred_cls, pred_seg = net(img, gt_map, roi, gt_roi, gt_seg) loss1, loss2, loss3 = net.f_loss() val_loss_mse.append(loss1.data) val_loss_cls.append(loss2.data) val_loss_seg.append(loss3.data) val_loss.append(net.loss.data) pred_map = pred_map.data.cpu().numpy() gt_map = gt_map.data.cpu().numpy() pred_seg = pred_seg.cpu().max(1)[1].squeeze_(1).data.numpy() gt_seg = gt_seg.data.cpu().numpy() # pdb.set_trace() # pred_map = pred_map*pred_seg gt_count = np.sum(gt_map) pred_cnt = np.sum(pred_map) mae += abs(gt_count - pred_cnt) mse += ((gt_count - pred_cnt) * (gt_count - pred_cnt)) # pdb.set_trace() mae = mae / val_set.get_num_samples() mse = np.sqrt(mse / val_set.get_num_samples()) loss1 = np.mean(np.array(val_loss_mse))[0] loss2 = np.mean(np.array(val_loss_cls))[0] loss3 = np.mean(np.array(val_loss_seg))[0] loss = np.mean(np.array(val_loss))[0] print '=' * 50 print exp_name print ' ' + '-' * 20 print ' [mae %.1f mse %.1f], [val loss %.8f %.8f %.4f %.4f]' % ( mae, mse, loss, loss1, loss2, loss3) print ' ' + '-' * 20 print '=' * 50