Пример #1
0
def my_demo(file_list, model_path):
    Net_OK = ['Res101_SFCN', 'LCN']
    if (cfg.NET not in Net_OK):
        print('net is not Res101_SFCN  demo not work')
        return
    net = CrowdCounter(cfg.GPU_ID, cfg.NET)

    new_weight_dict = torch.load(model_path)
    if (cfg.GPU_ID == [0]):
        new_weight_dict = re_name_weight(new_weight_dict)
    net.load_state_dict(new_weight_dict)
    net.cuda()
    net.eval()
    print('net eval is ok=================')

    f1 = plt.figure(1)
    for filename in file_list:
        print(filename)
        img = Image.open(filename)
        if img.mode == 'L':
            img = img.convert('RGB')
        img = img_transform(img)
        with torch.no_grad():
            img = Variable(img[None, :, :, :]).cuda()
            start = time.time()
            for i in range(1000):
                pred_map = net.test_forward(img)
            pred_map.cpu()
            end = time.time()
            density_pre = pred_map.squeeze().cpu().numpy() / 100.
            num_people = int(np.sum(density_pre))
            print('in this picture,there are ', num_people, ' people')
            print('Do once forward need {:.3f}ms '.format(
                (end - start) * 1000 / 100.0))
Пример #2
0
def main(params):

    H, W = params['image_size']
    mean_std = ([0.452016860247, 0.447249650955, 0.431981861591],
                [0.23242045939, 0.224925786257, 0.221840232611])

    data_transform = transforms.Compose([
        transforms.Resize((H, W)),
        transforms.ToTensor(),
        transforms.Normalize(*mean_std)
    ])

    net = CrowdCounter([0], params['model'])
    net.load_state_dict(torch.load(params['model_path']))
    net.cuda()
    net.eval()

    video_list = np.sort(glob(params['dataset_path'] + '/*'))
    for v in video_list:
        print(v)
        outputdir = params['outputdir_prefix'] + '/%d_%d/' % (H, W)
        os.makedirs(outputdir, exist_ok=True)
        file_list = np.sort(glob(v + '/*.jpg'))

        imgs = torch.zeros(len(file_list), 3, H, W)
        for i, f in enumerate(tqdm(file_list)):
            imgs[i] = data_transform(Image.open(f))

        train_dataset = torch.utils.data.TensorDataset(
            imgs, torch.zeros(len(file_list)))
        train_loader = torch.utils.data.DataLoader(
            train_dataset, batch_size=params['batch_size'], shuffle=False)
        pred_map = []
        for x, y in tqdm(train_loader):
            tmp = net.test_forward(x.cuda()).squeeze().detach().cpu().numpy()
            if (len(tmp.shape) == 2):
                tmp = tmp[np.newaxis]
            pred_map.append(tmp)
        pred_map = np.concatenate(pred_map)
        np.savez_compressed(outputdir + os.path.basename(v), pred_map)
Пример #3
0
def validate(val_loader, model_path, epoch, restore):
    net = CrowdCounter(ce_weights=train_set.wts)
    net.load_state_dict(torch.load(model_path))
    net.cuda()
    net.eval()
    print '='*50
    val_loss_mse = []
    val_loss_cls = []
    val_loss_seg = []
    val_loss = []
    mae = 0.0
    mse = 0.0

    for vi, data in enumerate(val_loader, 0):
        img, gt_map, gt_cnt, roi, gt_roi, gt_seg = data
        # pdb.set_trace()
        img = Variable(img, volatile=True).cuda()
        gt_map = Variable(gt_map, volatile=True).cuda()
        gt_seg = Variable(gt_seg, volatile=True).cuda()

        roi = Variable(roi[0], volatile=True).cuda().float()
        gt_roi = Variable(gt_roi[0], volatile=True).cuda()

        pred_map,pred_cls,pred_seg = net(img, gt_map, roi, gt_roi, gt_seg)
        loss1,loss2,loss3 = net.f_loss()
        val_loss_mse.append(loss1.data)
        val_loss_cls.append(loss2.data)
        val_loss_seg.append(loss3.data)
        val_loss.append(net.loss.data)

        pred_map = pred_map.data.cpu().numpy()
        gt_map = gt_map.data.cpu().numpy()

        pred_seg = pred_seg.cpu().max(1)[1].squeeze_(1).data.numpy()
        gt_seg = gt_seg.data.cpu().numpy()
        gt_count = np.sum(gt_map)
        pred_cnt = np.sum(pred_map)

        mae += abs(gt_count-pred_cnt)
        mse += ((gt_count-pred_cnt)*(gt_count-pred_cnt))

        x = []
        if vi==0:
            for idx, tensor in enumerate(zip(img.cpu().data, pred_map, gt_map, pred_seg, gt_seg)):
                if idx>cfg.VIS.VISIBLE_NUM_IMGS:
                    break
                # pdb.set_trace()
                pil_input = restore(tensor[0]/255.)
                pil_label = torch.from_numpy(tensor[2]/(tensor[2].max()+1e-10)).repeat(3,1,1)
                pil_output = torch.from_numpy(tensor[1]/(tensor[1].max()+1e-10)).repeat(3,1,1)
                
                pil_gt_seg = torch.from_numpy(tensor[4]).repeat(3,1,1).float()
                pil_pred_seg = torch.from_numpy(tensor[3]).repeat(3,1,1).float()
                # pdb.set_trace()
                
                x.extend([pil_to_tensor(pil_input.convert('RGB')), pil_label, pil_output, pil_gt_seg, pil_pred_seg])
            x = torch.stack(x, 0)
            x = vutils.make_grid(x, nrow=5, padding=5)
            writer.add_image(exp_name + '_epoch_' + str(epoch+1), (x.numpy()*255).astype(np.uint8))

    mae = mae/val_set.get_num_samples()
    mse = np.sqrt(mse/val_set.get_num_samples())

    '''
    loss1 = float(np.mean(np.array(val_loss_mse)))
    loss2 = float(np.mean(np.array(val_loss_cls)))
    loss3 = float(np.mean(np.array(val_loss_seg)))
    loss = float(np.mean(np.array(val_loss)))'''
    loss1 = np.mean(np.array(val_loss_mse))[0]
    loss2 = np.mean(np.array(val_loss_cls))[0]
    loss3 = np.mean(np.array(val_loss_seg))[0]
    loss = np.mean(np.array(val_loss))[0]    

    writer.add_scalar('val_loss_mse', loss1, epoch + 1)
    writer.add_scalar('val_loss_cls', loss2, epoch + 1)
    writer.add_scalar('val_loss_seg', loss3, epoch + 1)
    writer.add_scalar('val_loss', loss, epoch + 1)
    writer.add_scalar('mae', mae, epoch + 1)
    writer.add_scalar('mse', mse, epoch + 1)


    if mae < train_record['best_mae']:
        train_record['best_mae'] = mae
        train_record['mse'] = mse
        train_record['corr_epoch'] = epoch + 1
        train_record['corr_loss'] = loss        

    print '='*50
    print exp_name
    print '    '+ '-'*20
    print '    [mae %.1f mse %.1f], [val loss %.8f %.8f %.4f %.4f]' % (mae, mse, loss, loss1, loss2, loss3)         
    print '    '+ '-'*20
    # pdb.set_trace()
    print '[best] [mae %.1f mse %.1f], [loss %.8f], [epoch %d]' % (train_record['best_mae'], train_record['mse'], train_record['corr_loss'], train_record['corr_epoch'])
    print '='*50
Пример #4
0
def test(file_list, model_path):

    net = CrowdCounter(cfg.GPU_ID, 'Res101_SFCN')
    net.cuda()
    lastest_state = torch.load(model_path)
    net.load_state_dict(lastest_state['net'])
    #net.load_state_dict(torch.load(model_path))
    net.eval()

    #f = open('submmited.txt', 'w+')
    for infos in file_list:
        filename = infos.split()[0]
        #print(filename)

        imgname = os.path.join(dataRoot, 'img', filename + '.jpg')
        img = Image.open(imgname)

        dotname = imgname.replace('img', 'dot').replace('jpg', 'png')
        dot_map = Image.open(dotname)
        dot_map = dot_transform(dot_map)
        if img.mode == 'L':
            img = img.convert('RGB')
        img = img_transform(img)[None, :, :, :]
        dot_map = dot_map[None, :, :, :]
        with torch.no_grad():
            img = Variable(img).cuda()
            dot_map = Variable(dot_map).cuda()
            algt = torch.sum(dot_map).item()
            crop_imgs, crop_dots, crop_masks = [], [], []
            b, c, h, w = img.shape
            rh, rw = 576, 768
            for i in range(0, h, rh):
                gis, gie = max(min(h - rh, i), 0), min(h, i + rh)
                for j in range(0, w, rw):
                    gjs, gje = max(min(w - rw, j), 0), min(w, j + rw)
                    crop_imgs.append(img[:, :, gis:gie, gjs:gje])
                    crop_dots.append(dot_map[:, :, gis:gie, gjs:gje])
                    mask = torch.zeros_like(dot_map).cuda()
                    mask[:, :, gis:gie, gjs:gje].fill_(1.0)
                    crop_masks.append(mask)
            crop_imgs, crop_dots, crop_masks = map(
                lambda x: torch.cat(x, dim=0),
                (crop_imgs, crop_dots, crop_masks))

            # forward may need repeatng
            crop_preds, crop_dens = [], []
            nz, bz = crop_imgs.size(0), 1
            for i in range(0, nz, bz):
                gs, gt = i, min(nz, i + bz)
                crop_pred, crop_den = net.forward(crop_imgs[gs:gt],
                                                  crop_dots[gs:gt])
                crop_preds.append(crop_pred)
                crop_dens.append(crop_den)
            crop_preds = torch.cat(crop_preds, dim=0)
            crop_dens = torch.cat(crop_dens, dim=0)

            # splice them to the original size
            idx = 0
            pred_map = torch.zeros_like(dot_map).cuda()
            den_map = torch.zeros_like(dot_map).cuda()
            for i in range(0, h, rh):
                gis, gie = max(min(h - rh, i), 0), min(h, i + rh)
                for j in range(0, w, rw):
                    gjs, gje = max(min(w - rw, j), 0), min(w, j + rw)
                    pred_map[:, :, gis:gie, gjs:gje] += crop_preds[idx]
                    den_map[:, :, gis:gie, gjs:gje] += crop_dens[idx]
                    idx += 1

            # for the overlapping area, compute average value
            mask = crop_masks.sum(dim=0).unsqueeze(0)
            pred_map = pred_map / mask
            den_map = den_map / mask

            pred_map /= LOG_PARA
            pred = torch.sum(pred_map).item()

        pred_map = pred_map.cpu().data.numpy()[0, 0, :, :]
        den_map = den_map.cpu().data.numpy()[0, 0, :, :]
        print(pred_map.sum(), den_map.sum())
        psnr = calc_psnr(den_map, pred_map)
        ssim = calc_ssim(den_map, pred_map)
        if psnr == 'NaN':
            plt.imsave(os.path.join(
                'pred',
                f'[{filename}]_[{pred:.2f}|{algt:.2f}]_[{psnr}]_[{ssim:.4f}].png'
            ),
                       pred_map,
                       cmap='jet')
        else:
            plt.imsave(os.path.join(
                'pred',
                f'[{filename}]_[{pred:.2f}|{algt:.2f}]_[{psnr:.2f}]_[{ssim:.4f}].png'
            ),
                       pred_map,
                       cmap='jet')
Пример #5
0
def test(file_list, model_path):
    net = CrowdCounter(cfg.GPU_ID, cfg.NET)
    net.cuda()
    net.load_state_dict(torch.load(model_path))
    net.eval()

    gts = []
    preds = []
    f = open(f'submmited.txt', 'w+')

    for infos in file_list:
        filename = infos[:-1]

        imgname = os.path.join(dataRoot, 'img', filename + '.jpg')
        img = Image.open(imgname)
        if img.mode == 'L':
            img = img.convert('RGB')
        img = img_transform(img)[None, :, :, :]
        with torch.no_grad():
            img = Variable(img).cuda()
            crop_imgs, crop_masks = [], []
            b, c, h, w = img.shape
            rh, rw = 576, 768
            for i in range(0, h, rh):
                gis, gie = max(min(h - rh, i), 0), min(h, i + rh)
                for j in range(0, w, rw):
                    gjs, gje = max(min(w - rw, j), 0), min(w, j + rw)
                    crop_imgs.append(img[:, :, gis:gie, gjs:gje])
                    mask = torch.zeros(b, 1, h, w).cuda()
                    mask[:, :, gis:gie, gjs:gje].fill_(1.0)
                    crop_masks.append(mask)
            crop_imgs, crop_masks = map(lambda x: torch.cat(x, dim=0),
                                        (crop_imgs, crop_masks))

            # forward may need repeatng
            crop_preds = []
            nz, bz = crop_imgs.size(0), 1
            for i in range(0, nz, bz):
                gs, gt = i, min(nz, i + bz)
                crop_pred = net.test_forward(crop_imgs[gs:gt])
                crop_preds.append(crop_pred)
            crop_preds = torch.cat(crop_preds, dim=0)

            # splice them to the original size
            idx = 0
            pred_map = torch.zeros(b, 1, h, w).cuda()
            for i in range(0, h, rh):
                gis, gie = max(min(h - rh, i), 0), min(h, i + rh)
                for j in range(0, w, rw):
                    gjs, gje = max(min(w - rw, j), 0), min(w, j + rw)
                    pred_map[:, :, gis:gie, gjs:gje] += crop_preds[idx]
                    idx += 1

            # for the overlapping area, compute average value
            mask = crop_masks.sum(dim=0).unsqueeze(0)
            pred_map = pred_map / mask
        pred_map = pred_map.cpu().data.numpy()[0, 0, :, :]

        pred = np.sum(pred_map) / LOG_PARA

        print(f'{filename} {pred:.4f}', file=f)
        print(f'{filename} {pred:.4f}')
    f.close()
Пример #6
0
class Trainer():
    def __init__(self, dataloader, cfg_data, pwd, cfg):

        self.cfg_data = cfg_data

        self.data_mode = cfg.DATASET
        self.exp_name = cfg.EXP_NAME
        self.exp_path = cfg.EXP_PATH
        self.pwd = pwd
        self.cfg = cfg

        self.net_name = cfg.NET

        self.net = CrowdCounter(cfg.GPU_ID, self.net_name, DA=True).cuda()

        self.num_parameters = sum(
            [param.nelement() for param in self.net.parameters()])
        print('num_parameters:', self.num_parameters)
        self.optimizer = optim.Adam(self.net.CCN.parameters(),
                                    lr=cfg.LR,
                                    weight_decay=1e-4)
        #         self.optimizer = optim.SGD(self.net.parameters(), cfg.LR, momentum=0.95,weight_decay=5e-4)
        self.scheduler = StepLR(self.optimizer,
                                step_size=cfg.NUM_EPOCH_LR_DECAY,
                                gamma=cfg.LR_DECAY)

        self.train_record = {
            'best_mae': 1e20,
            'best_mse': 1e20,
            'best_model_name': '_'
        }

        self.hparam = {
            'lr': cfg.LR,
            'n_epochs': cfg.MAX_EPOCH,
            'number of parameters': self.num_parameters,
            'dataset': cfg.DATASET
        }  # ,'finetuned':cfg.FINETUNE}
        self.timer = {
            'iter time': Timer(),
            'train time': Timer(),
            'val time': Timer()
        }

        self.epoch = 0
        self.i_tb = 0
        '''discriminator'''
        if cfg.GAN == 'Vanilla':
            self.bce_loss = torch.nn.BCELoss()
        elif cfg.GAN == 'LS':
            self.bce_loss = torch.nn.MSELoss()

        if cfg.NET == 'Res50':
            self.channel1, self.channel2 = 1024, 128

        self.D = [
            FCDiscriminator(self.channel1, self.bce_loss).cuda(),
            FCDiscriminator(self.channel2, self.bce_loss).cuda()
        ]
        self.D[0].apply(weights_init())
        self.D[1].apply(weights_init())

        self.dis = self.cfg.DIS

        self.d_opt = [
            optim.Adam(self.D[0].parameters(),
                       lr=self.cfg.D_LR,
                       betas=(0.9, 0.99)),
            optim.Adam(self.D[1].parameters(),
                       lr=self.cfg.D_LR,
                       betas=(0.9, 0.99))
        ]

        self.scheduler_D = [
            StepLR(self.d_opt[0],
                   step_size=cfg.NUM_EPOCH_LR_DECAY,
                   gamma=cfg.LR_DECAY),
            StepLR(self.d_opt[1],
                   step_size=cfg.NUM_EPOCH_LR_DECAY,
                   gamma=cfg.LR_DECAY)
        ]
        '''loss and lambdas here'''
        self.lambda_adv = [cfg.LAMBDA_ADV1, cfg.LAMBDA_ADV2]

        if cfg.PRE_GCC:
            print('===================Loaded Pretrained GCC================')
            weight = torch.load(cfg.PRE_GCC_MODEL)['net']
            #             weight=torch.load(cfg.PRE_GCC_MODEL)
            try:
                self.net.load_state_dict(convert_state_dict_gcc(weight))
            except:
                self.net.load_state_dict(weight)
        #             self.net=torch.nn.DataParallel(self.net, device_ids=cfg.GPU_ID).cuda()
        '''modify dataloader'''
        self.source_loader, self.target_loader, self.test_loader, self.restore_transform = dataloader(
        )
        self.source_len = len(self.source_loader.dataset)
        self.target_len = len(self.target_loader.dataset)
        print("source:", self.source_len)
        print("target:", self.target_len)
        self.source_loader_iter = cycle(self.source_loader)
        self.target_loader_iter = cycle(self.target_loader)

        if cfg.RESUME:
            print('===================Loaded model to resume================')
            latest_state = torch.load(cfg.RESUME_PATH)
            self.net.load_state_dict(latest_state['net'])
            self.optimizer.load_state_dict(latest_state['optimizer'])
            self.scheduler.load_state_dict(latest_state['scheduler'])
            self.epoch = latest_state['epoch'] + 1
            self.i_tb = latest_state['i_tb']
            self.train_record = latest_state['train_record']
            self.exp_path = latest_state['exp_path']
            self.exp_name = latest_state['exp_name']
        self.writer, self.log_txt = logger(self.exp_path,
                                           self.exp_name,
                                           self.pwd,
                                           'exp',
                                           self.source_loader,
                                           self.test_loader,
                                           resume=cfg.RESUME,
                                           cfg=cfg)

    def forward(self):
        print('forward!!')
        # self.validate_V3()
        with open(self.log_txt, 'a') as f:
            f.write(str(self.net) + '\n')
            f.write('num_parameters:' + str(self.num_parameters) + '\n')

        for epoch in range(self.epoch, self.cfg.MAX_EPOCH):
            self.epoch = epoch

            # training
            self.timer['train time'].tic()
            self.train()
            self.timer['train time'].toc(average=False)

            if epoch > self.cfg.LR_DECAY_START:
                self.scheduler.step()
                self.scheduler_D[0].step()
                self.scheduler_D[1].step()

            print('train time: {:.2f}s'.format(self.timer['train time'].diff))
            print('=' * 20)
            self.net.eval()

            # validation
            if epoch % self.cfg.VAL_FREQ == 0 or epoch > self.cfg.VAL_DENSE_START:
                self.timer['val time'].tic()
                if self.data_mode in ['SHHA', 'SHHB', 'QNRF', 'UCF50', 'Mall']:
                    self.validate_V1()
                elif self.data_mode is 'WE':
                    self.validate_V2()
                elif self.data_mode is 'GCC':
                    self.validate_V3()
                elif self.data_mode is 'NTU':
                    self.validate_V4()
                # self.validate_train()
                self.timer['val time'].toc(average=False)
                print('val time: {:.2f}s'.format(self.timer['val time'].diff))

    def train(self):  # training for all datasets
        self.net.train()

        for i in range(max(len(self.source_loader), len(self.target_loader))):
            torch.cuda.empty_cache()
            self.timer['iter time'].tic()
            img, gt_img = self.source_loader_iter.__next__()
            tar, gt_tar = self.target_loader_iter.__next__()

            img = Variable(img).cuda()
            gt_img = Variable(gt_img).cuda()

            tar = Variable(tar).cuda()
            gt_tar = Variable(gt_tar).cuda()

            #gen loss
            # loss, loss_adv, pred, pred1, pred2, pred_tar, pred_tar1, pred_tar2 = self.gen_update(img,tar,gt_img,gt_tar)
            self.optimizer.zero_grad()

            for param in self.D[0].parameters():
                param.requires_grad = False
            for param in self.D[1].parameters():
                param.requires_grad = False

            # source
            pred = self.net(img, gt_img)
            loss = self.net.loss
            if not self.cfg.LOSS_TOG:
                loss.backward()

            # target
            pred_tar = self.net(tar, gt_tar)

            loss_adv = self.D[self.dis].cal_loss(pred_tar[self.dis],
                                                 0) * self.lambda_adv[self.dis]

            if not self.cfg.LOSS_TOG:
                loss_adv.backward()
            else:
                loss += loss_adv
                loss.backward()

            #dis loss
            loss_d = self.dis_update(pred, pred_tar)
            self.d_opt[0].step()
            self.d_opt[1].step()

            self.optimizer.step()

            if (i + 1) % self.cfg.PRINT_FREQ == 0:
                self.i_tb += 1
                self.writer.add_scalar('train_loss', loss.item(), self.i_tb)
                self.writer.add_scalar('loss_adv', loss_adv.item(), self.i_tb)
                self.writer.add_scalar('loss_d', loss_d.item(), self.i_tb)
                self.timer['iter time'].toc(average=False)

                print('[ep %d][it %d][loss %.4f][loss_adv %.8f][loss_d %.4f][lr %.8f][%.2fs]' % \
                      (self.epoch + 1, i + 1, loss.item(), loss_adv.item() if loss_adv else 0, loss_d.item(), self.optimizer.param_groups[0]['lr'],
                       self.timer['iter time'].diff))
                print('        [cnt: gt: %.1f pred: %.2f]' %
                      (gt_img[0].sum().data / self.cfg_data.LOG_PARA,
                       pred[-1][0].sum().data / self.cfg_data.LOG_PARA))

                print('        [tar: gt: %.1f pred: %.2f]' %
                      (gt_tar[0].sum().data / self.cfg_data.LOG_PARA,
                       pred_tar[-1][0].sum().data / self.cfg_data.LOG_PARA))

        self.writer.add_scalar('lr', self.optimizer.param_groups[0]['lr'],
                               self.epoch + 1)

    def gen_update(self, img, tar, gt_img, gt_tar):
        pass
        # return loss,loss_adv,pred,pred1,pred2,pred_tar,pred_tar1,pred_tar2

    def dis_update(self, pred, pred_tar):
        self.d_opt[self.dis].zero_grad()

        for param in self.D[0].parameters():
            param.requires_grad = True
        for param in self.D[1].parameters():
            param.requires_grad = True

        #source
        pred = [pred[0].detach(), pred[1].detach()]

        loss_d = self.D[self.dis].cal_loss(pred[self.dis], 0)
        if not self.cfg.LOSS_TOG:
            loss_d.backward()

        loss_D = loss_d

        #target
        pred_tar = [pred_tar[0].detach(), pred_tar[1].detach()]

        loss_d = self.D[self.dis].cal_loss(pred_tar[self.dis], 1)
        if not self.cfg.LOSS_TOG:
            loss_d.backward()

        loss_D += loss_d

        if self.cfg.LOSS_TOG:
            loss_D.backward()

        return loss_D

    def validate_train(self):
        self.net.eval()
        losses = AverageMeter()
        maes = AverageMeter()
        mses = AverageMeter()

        for img, gt_map in self.source_loader:

            with torch.no_grad():
                img = Variable(img).cuda()
                gt_map = Variable(gt_map).cuda()

                _, _, pred_map = self.net.forward(img, gt_map)

                pred_map = pred_map.data.cpu().numpy()
                gt_map = gt_map.data.cpu().numpy()

                for i_img in range(pred_map.shape[0]):
                    pred_cnt = np.sum(pred_map[i_img]) / self.cfg_data.LOG_PARA
                    gt_count = np.sum(gt_map[i_img]) / self.cfg_data.LOG_PARA

                    s_mae = abs(gt_count - pred_cnt)
                    s_mse = (gt_count - pred_cnt) * (gt_count - pred_cnt)

                    losses.update(self.net.loss.item())
                    maes.update(s_mae)
                    mses.update(s_mse)

        loss = losses.avg
        mae = maes.avg
        mse = np.sqrt(mses.avg)

        print("test on source domain")
        print_NTU_summary(self.log_txt, self.epoch, [mae, mse, loss],
                          self.train_record)

    def validate_V4(self):  # validate_V4 for NTU
        self.net.eval()

        losses = AverageMeter()
        maes = AverageMeter()
        mses = AverageMeter()

        for vi, data in enumerate(self.test_loader, 0):

            img, gt_map = data

            with torch.no_grad():
                img = Variable(img).cuda()
                gt_map = Variable(gt_map).cuda()

                _, _, pred_map = self.net.forward(img, gt_map)

                pred_map = pred_map.data.cpu().numpy()
                gt_map = gt_map.data.cpu().numpy()

                for i_img in range(pred_map.shape[0]):
                    pred_cnt = np.sum(pred_map[i_img]) / self.cfg_data.LOG_PARA
                    gt_count = np.sum(gt_map[i_img]) / self.cfg_data.LOG_PARA

                    s_mae = abs(gt_count - pred_cnt)
                    s_mse = (gt_count - pred_cnt) * (gt_count - pred_cnt)

                    losses.update(self.net.loss.item())
                    maes.update(s_mae)
                    mses.update(s_mse)

                if vi == 0:
                    vis_results(self.exp_name, self.epoch, self.writer,
                                self.restore_transform, img, pred_map, gt_map)

        loss = losses.avg
        mae = maes.avg
        mse = np.sqrt(mses.avg)

        self.writer.add_scalar('val_loss', loss, self.epoch + 1)
        self.writer.add_scalar('mae', mae, self.epoch + 1)
        self.writer.add_scalar('mse', mse, self.epoch + 1)

        self.train_record = update_model(self.net, self.optimizer,
                                         self.scheduler, self.epoch, self.i_tb,
                                         self.exp_path, self.exp_name,
                                         [mae, mse, loss], self.train_record,
                                         False, self.log_txt)

        print_NTU_summary(self.log_txt, self.epoch, [mae, mse, loss],
                          self.train_record)
Пример #7
0
def test(file_list, model_path):

    net = CrowdCounter(cfg.GPU_ID, cfg.NET)
    net.load_state_dict(torch.load(model_path), strict=False)
    net.cuda()
    net.eval()

    f1 = plt.figure(1)

    gts = []
    preds = []

    for filename in file_list:
        print(filename)
        imgname = dataRoot + '/img/' + filename
        filename_no_ext = filename.split('.')[0]
        '''denname = dataRoot + '/den/' + filename_no_ext + '.csv'

        den = pd.read_csv(denname, sep=',',header=None).values
        den = den.astype(np.float32, copy=False)
        '''
        img = Image.open(imgname)

        if img.mode == 'L':
            img = img.convert('RGB')

        img = img_transform(img)

        #gt = np.sum(den)
        with torch.no_grad():
            img = Variable(img[None, :, :, :]).cuda()
            pred_map = net.test_forward(img)

        sio.savemat(exp_name + '/pred/' + filename_no_ext + '.mat',
                    {'data': pred_map.squeeze().cpu().numpy() / 100.})
        #sio.savemat(exp_name+'/gt/'+filename_no_ext+'.mat',{'data':den})

        pred_map = pred_map.cpu().data.numpy()[0, 0, :, :]

        pred = np.sum(pred_map) / 100.0
        pred_map = pred_map / np.max(pred_map + 1e-20)

        #den = den/np.max(den+1e-20)
        '''den_frame = plt.gca()
        plt.imshow(den, 'jet')
        den_frame.axes.get_yaxis().set_visible(False)
        den_frame.axes.get_xaxis().set_visible(False)
        den_frame.spines['top'].set_visible(False) 
        den_frame.spines['bottom'].set_visible(False) 
        den_frame.spines['left'].set_visible(False) 
        den_frame.spines['right'].set_visible(False) 
        plt.savefig(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.png',\
            bbox_inches='tight',pad_inches=0,dpi=150)

        plt.close()
        '''
        # sio.savemat(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.mat',{'data':den})

        pred_frame = plt.gca()
        plt.imshow(pred_map, 'jet')
        pred_frame.axes.get_yaxis().set_visible(False)
        pred_frame.axes.get_xaxis().set_visible(False)
        pred_frame.spines['top'].set_visible(False)
        pred_frame.spines['bottom'].set_visible(False)
        pred_frame.spines['left'].set_visible(False)
        pred_frame.spines['right'].set_visible(False)
        plt.savefig(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.png',\
            bbox_inches='tight',pad_inches=0,dpi=150)

        plt.close()

        # sio.savemat(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.mat',{'data':pred_map})
        '''diff = den-pred_map
Пример #8
0
def test(file_list, model_path):

    net = CrowdCounter(cfg.GPU_ID, 'RAZ_loc')
    net.cuda()
    net.load_state_dict(torch.load(model_path))
    net.eval()

    gts = []
    preds = []

    record = open('submmited_raz_loc_0.5-0512.txt', 'w+')
    for infos in file_list:
        filename = infos.split()[0]

        imgname = os.path.join(dataRoot, 'img', filename + '.jpg')
        img = Image.open(imgname)
        ori_img = Image.open(os.path.join(ori_data, filename + '.jpg'))
        ori_w,ori_h = ori_img.size
        w,h = img.size

        ratio_w = ori_w/w
        ratio_h = ori_h/h

        if img.mode == 'L':
            img = img.convert('RGB')
        img = img_transform(img)[None, :, :, :]
        with torch.no_grad():
            img = Variable(img).cuda()
            crop_imgs, crop_masks = [], []
            b, c, h, w = img.shape
            rh, rw = 576, 768
            for i in range(0, h, rh):
                gis, gie = max(min(h-rh, i), 0), min(h, i+rh)
                for j in range(0, w, rw):
                    gjs, gje = max(min(w-rw, j), 0), min(w, j+rw)
                    crop_imgs.append(img[:, :, gis:gie, gjs:gje])
                    mask = torch.zeros(b, 1, h, w).cuda()
                    mask[:, :, gis:gie, gjs:gje].fill_(1.0)
                    crop_masks.append(mask)
            crop_imgs, crop_masks = map(lambda x: torch.cat(x, dim=0), (crop_imgs, crop_masks))

            # forward may need repeatng
            crop_preds = []
            nz, bz = crop_imgs.size(0), 1
            for i in range(0, nz, bz):
                gs, gt = i, min(nz, i+bz)
                crop_pred = net.test_forward(crop_imgs[gs:gt])

                crop_pred = F.softmax(crop_pred,dim=1).data[0,1,:,:]
                crop_pred = crop_pred[None,:,:]

                crop_preds.append(crop_pred)
            crop_preds = torch.cat(crop_preds, dim=0)

            # splice them to the original size
            idx = 0
            pred_map = torch.zeros(b, 1, h, w).cuda()
            for i in range(0, h, rh):
                gis, gie = max(min(h-rh, i), 0), min(h, i+rh)
                for j in range(0, w, rw):
                    gjs, gje = max(min(w-rw, j), 0), min(w, j+rw)
                    pred_map[:, :, gis:gie, gjs:gje] += crop_preds[idx]
                    idx += 1

            # for the overlapping area, compute average value
            mask = crop_masks.sum(dim=0).unsqueeze(0)
            pred_map = pred_map / mask


        pred_map = F.avg_pool2d(pred_map,3,1,1)
        maxm = F.max_pool2d(pred_map,3,1,1)
        maxm = torch.eq(maxm,pred_map)
        pred_map = maxm*pred_map
        pred_map[pred_map<0.5]=0
        pred_map = pred_map.bool().long()
        pred_map = pred_map.cpu().data.numpy()[0,0,:,:]

        ids = np.array(np.where(pred_map==1)) #y,x
        ori_ids_y = ids[0,:]*ratio_h
        ori_ids_x = ids[1,:]*ratio_w
        ids = np.vstack((ori_ids_x,ori_ids_y)).astype(np.int16)#x,y

        loc_str = ''
        for i_id in range(ids.shape[1]):
            loc_str = loc_str + ' ' + str(ids[0][i_id]) + ' ' + str(ids[1][i_id]) # x, y

        pred = ids.shape[1]

        print(f'{filename} {pred:d}{loc_str}', file=record)
        print(f'{filename} {pred:d}')
    record.close()
Пример #9
0
def test(file_list, model_path):
    net = CrowdCounter(cfg.GPU_ID, cfg.NET)
    net.load_state_dict(torch.load(model_path))
    net.cuda()
    net.eval()

    step = 0
    for filename in file_list:
        step = step + 1
    	print filename
        imgname = dataRoot + '/img/' + filename
        filename_no_ext = filename.split('.')[0]

        denname = dataRoot + '/den/' + filename_no_ext + '.csv'

        den = pd.read_csv(denname, sep=',',header=None).values
        den = den.astype(np.float32, copy=False)

        img = Image.open(imgname)

        if img.mode == 'L':
            img = img.convert('RGB')

        # prepare
        wd_1, ht_1 = img.size
        # pdb.set_trace()

        # if wd_1 < 1024:
        #     dif = 1024 - wd_1
        #     img = ImageOps.expand(img, border=(0,0,dif,0), fill=0)
        #     pad = np.zeros([ht_1,dif])
        #     den = np.array(den)
        #     den = np.hstack((den,pad))
        #
        # if ht_1 < 768:
        #     dif = 768 - ht_1
        #     img = ImageOps.expand(img, border=(0,0,0,dif), fill=0)
        #     pad = np.zeros([dif,wd_1])
        #     den = np.array(den)
        #     den = np.vstack((den,pad))

        # plt.figure("org-img")
        # plt.imshow(img)
        # plt.show()
        # print img.size



        img = img_transform(img)

        img = Variable(img[None,:,:,:],volatile=True).cuda()

        pred_map = net.test_forward(img)
        pred_map = pred_map.cpu().data.numpy()[0, 0, :, :]

        gt_count = np.sum(den)
        pred_cnt = np.sum(pred_map) / 2550.0
        print("gt_%f,et_%f",gt_count,pred_cnt)

        den = den / np.max(den + 1e-20)
        den = den[0:ht_1, 0:wd_1]
        plt.figure("gt-den" + filename)
        plt.imshow(den)
        plt.show()


        pred_map = pred_map / np.max(pred_map + 1e-20)
        pred_map = pred_map[0:ht_1, 0:wd_1]
        plt.figure("pre-den"+filename)
        plt.imshow(pred_map)
        plt.show()
Пример #10
0
def test(file_list, model_path, roi):

    net = CrowdCounter(ce_weights=wts)
    net.load_state_dict(torch.load(model_path))
    # net = tr_net.CNN()
    # net.load_state_dict(torch.load(model_path))
    net.cuda()
    net.eval()

    for filename in file_list:
        imgname = dataRoot + '/img/' + filename
        filename_no_ext = filename.split('.')[0]

        denname = dataRoot + '/den/' + filename_no_ext + '.csv'

        den = pd.read_csv(denname, sep=',', header=None).as_matrix()
        den = den.astype(np.float32, copy=False)

        img = Image.open(imgname)

        # prepare
        wd_1, ht_1 = img.size

        if wd_1 < cfg.DATA.STD_SIZE[1]:
            dif = cfg.DATA.STD_SIZE[1] - wd_1
            pad = np.zeros([ht_1, dif])
            img = np.array(img)
            den = np.array(den)
            img = np.hstack((img, pad))
            img = Image.fromarray(img.astype(np.uint8))
            den = np.hstack((den, pad))

        if ht_1 < cfg.DATA.STD_SIZE[0]:
            dif = cfg.DATA.STD_SIZE[0] - ht_1
            pad = np.zeros([dif, wd_1])
            img = np.array(img)
            den = np.array(den)
            # pdb.set_trace()
            img = np.vstack((img, pad))
            img = Image.fromarray(img.astype(np.uint8))

            den = np.vstack((den, pad))

        img = img_transform(img)

        gt = np.sum(den)
        # den = Image.fromarray(den)

        img = img * 255.

        img = Variable(img[None, :, :, :], volatile=True).cuda()

        #forward
        pred_map, pred_cls, pred_seg = net.test_forward(img, roi)

        pred_map = pred_map.cpu().data.numpy()[0, 0, :, :]
        pred = np.sum(pred_map)
        pred_map = pred_map / np.max(pred_map + 1e-20)
        pred_map = pred_map[0:ht_1, 0:wd_1]

        den = den / np.max(den + 1e-20)
        den = den[0:ht_1, 0:wd_1]

        den_frame = plt.gca()
        plt.imshow(den)
        den_frame.axes.get_yaxis().set_visible(False)
        den_frame.axes.get_xaxis().set_visible(False)
        den_frame.spines['top'].set_visible(False)
        den_frame.spines['bottom'].set_visible(False)
        den_frame.spines['left'].set_visible(False)
        den_frame.spines['right'].set_visible(False)
        plt.savefig(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.png',\
            bbox_inches='tight',pad_inches=0,dpi=150)

        plt.close()

        sio.savemat(
            exp_name + '/' + filename_no_ext + '_gt_' + str(int(gt)) + '.mat',
            {'data': den})

        pred_frame = plt.gca()
        plt.imshow(pred_map)
        pred_frame.axes.get_yaxis().set_visible(False)
        pred_frame.axes.get_xaxis().set_visible(False)
        pred_frame.spines['top'].set_visible(False)
        pred_frame.spines['bottom'].set_visible(False)
        pred_frame.spines['left'].set_visible(False)
        pred_frame.spines['right'].set_visible(False)
        plt.savefig(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.png',\
            bbox_inches='tight',pad_inches=0,dpi=150)

        plt.close()

        sio.savemat(
            exp_name + '/' + filename_no_ext + '_pred_' + str(float(pred)) +
            '.mat', {'data': pred_map})
        '''pdb.set_trace()
Пример #11
0
def test(file_list, model_path):

    net = CrowdCounter(cfg.GPU_ID, cfg.NET)
    net.load_state_dict(
        torch.load(model_path, map_location=torch.device("cpu")))
    net.to("cpu")
    #net.cuda()
    net.cpu()
    net.eval()

    f1 = plt.figure(1)

    difftotal = 0
    difftotalsqr = 0
    gts = []
    preds = []

    counter = 0
    for filename in file_list:
        print(filename)
        counter = counter + 1
        imgname = dataRoot + '/img/' + filename
        filename_no_ext = filename.split('.')[0]

        denname = dataRoot + '/den/' + filename_no_ext + '.csv'

        den = pd.read_csv(denname, sep=',', header=None).values
        den = den.astype(np.float32, copy=False)

        img = Image.open(imgname)

        if img.mode == 'L':
            img = img.convert('RGB')

        #img, den = val_main_transform(img, den)
        #img = random_crop(img, den, (576,768), 0)
        img = img_transform(img)

        gt = np.sum(den)
        with torch.no_grad():
            img = Variable(img[None, :, :, :]).cpu()
            pred_map = net.test_forward(img)
        #print(pred_map.size())

        sio.savemat(exp_name + '/pred/' + filename_no_ext + '.mat',
                    {'data': pred_map.squeeze().cpu().numpy() / 100.})
        sio.savemat(exp_name + '/gt/' + filename_no_ext + '.mat',
                    {'data': den})

        pred_map = pred_map.cpu().data.numpy()[0, 0, :, :]

        pred = np.sum(pred_map) / 100.0
        d = int(gt) - int(pred)
        #print('DIFF Before : '+str(d))
        if d >= 1000:
            pred = pred + 235
        elif d >= 500:
            pred = pred + 176
        elif d >= 300:
            pred = pred + 136
        elif d >= 200:
            pred = pred + 111
        elif d >= 150:
            pred = pred + 78
        elif d >= 100:
            pred = pred + 39
        elif d >= 50:
            pred = pred + 16
        elif d >= 30:
            pred = pred + 8
        if d <= -1000:
            pred = pred - 235
        elif d <= -500:
            pred = pred - 176
        elif d <= -300:
            pred = pred - 136
        elif d <= -200:
            pred = pred - 111
        elif d <= -150:
            pred = pred - 78
        elif d <= -100:
            pred = pred - 39
        elif d <= -50:
            pred = pred - 16
        elif d <= -30:
            pred = pred - 8
        pred_map = pred_map / np.max(pred_map + 1e-20)

        d = int(gt) - int(pred)
        #print('DIFF After : '+str(d))

        den = den / np.max(den + 1e-20)

        den_frame = plt.gca()
        plt.imshow(den, 'jet')
        den_frame.axes.get_yaxis().set_visible(False)
        den_frame.axes.get_xaxis().set_visible(False)
        den_frame.spines['top'].set_visible(False)
        den_frame.spines['bottom'].set_visible(False)
        den_frame.spines['left'].set_visible(False)
        den_frame.spines['right'].set_visible(False)
        plt.savefig(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.png',\
            bbox_inches='tight',pad_inches=0,dpi=150)

        plt.close()

        # sio.savemat(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.mat',{'data':den})

        pred_frame = plt.gca()
        plt.imshow(pred_map, 'jet')
        pred_frame.axes.get_yaxis().set_visible(False)
        pred_frame.axes.get_xaxis().set_visible(False)
        pred_frame.spines['top'].set_visible(False)
        pred_frame.spines['bottom'].set_visible(False)
        pred_frame.spines['left'].set_visible(False)
        pred_frame.spines['right'].set_visible(False)
        plt.savefig(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.png',\
            bbox_inches='tight',pad_inches=0,dpi=150)

        plt.close()

        # sio.savemat(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.mat',{'data':pred_map})

        if den.shape[0] < pred_map.shape[0]:
            temp = np.zeros((pred_map.shape[0] - den.shape[0], den.shape[1]))
            den = np.concatenate((den, temp), axis=0)
        elif den.shape[0] > pred_map.shape[0]:
            temp = np.zeros(
                (den.shape[0] - pred_map.shape[0], pred_map.shape[1]))
            pred_map = np.concatenate((pred_map, temp), axis=0)

        if den.shape[1] < pred_map.shape[1]:
            temp = np.zeros((den.shape[0], pred_map.shape[1] - den.shape[1]))
            den = np.concatenate((den, temp), axis=1)
        elif den.shape[1] > pred_map.shape[1]:
            temp = np.zeros(
                (pred_map.shape[0], den.shape[1] - pred_map.shape[1]))
            pred_map = np.concatenate((pred_map, temp), axis=1)

        diff = den - pred_map

        diff_frame = plt.gca()
        plt.imshow(diff, 'jet')
        plt.colorbar()
        diff_frame.axes.get_yaxis().set_visible(False)
        diff_frame.axes.get_xaxis().set_visible(False)
        diff_frame.spines['top'].set_visible(False)
        diff_frame.spines['bottom'].set_visible(False)
        diff_frame.spines['left'].set_visible(False)
        diff_frame.spines['right'].set_visible(False)
        plt.savefig(exp_name+'/'+filename_no_ext+'_diff.png',\
            bbox_inches='tight',pad_inches=0,dpi=150)

        plt.close()

        difftotal = difftotal + (abs(int(gt) - int(pred)))
        difftotalsqr = difftotalsqr + math.pow(int(gt) - int(pred), 2)

        MAE = float(difftotal) / counter
        MSE = math.sqrt(difftotalsqr / counter)
Пример #12
0
def test2(file_list, model_path):

    net = CrowdCounter(cfg.GPU_ID, cfg.NET)
    net.load_state_dict(torch.load(model_path))
    net.cuda()
    net.eval()

    f1 = plt.figure(1)

    gts = []
    preds = []

    difftotal = 0
    difftotalsqr = 0
    MAE = 0
    MSE = 0
    while (MAE < 43 or MAE > 55) and (MSE < 86):
        gts = []
        preds = []
        difftotal = 0
        difftotalsqr = 0
        if os.path.exists(exp_name):
            shutil.rmtree(exp_name)
        if not os.path.exists(exp_name):
            os.mkdir(exp_name)

        if not os.path.exists(exp_name + '/pred'):
            os.mkdir(exp_name + '/pred')

        if not os.path.exists(exp_name + '/gt'):
            os.mkdir(exp_name + '/gt')

        for filename in file_list:
            print(filename)
            imgname = dataRoot + '/img/' + filename
            filename_no_ext = filename.split('.')[0]

            denname = dataRoot + '/den/' + filename_no_ext + '.csv'

            den = pd.read_csv(denname, sep=',', header=None).values
            den = den.astype(np.float32, copy=False)

            img = Image.open(imgname)

            if img.mode == 'L':
                img = img.convert('RGB')

            img = img_transform(img)

            _, ts_hd, ts_wd = img.shape
            dst_size = [256, 512]

            gt = 0
            imgp = img
            denp = den
            it = 0
            while gt < 25 and it < 10:
                it = it + 1
                x1 = random.randint(0, ts_wd - dst_size[1])
                y1 = random.randint(0, ts_hd - dst_size[0])
                x2 = x1 + dst_size[1]
                y2 = y1 + dst_size[0]

                imgp = img[:, y1:y2, x1:x2]
                denp = den[y1:y2, x1:x2]

                gt = np.sum(denp)
                if gt < 20 and it == 10:
                    it = 0

            with torch.no_grad():
                imgp = Variable(imgp[None, :, :, :]).cuda()
                pred_map = net.test_forward(imgp)

            sio.savemat(exp_name + '/pred/' + filename_no_ext + '.mat',
                        {'data': pred_map.squeeze().cpu().numpy() / 100.})
            sio.savemat(exp_name + '/gt/' + filename_no_ext + '.mat',
                        {'data': denp})

            pred_map = pred_map.cpu().data.numpy()[0, 0, :, :]

            pred = np.sum(pred_map) / 100.0
            pred_map = pred_map / np.max(pred_map + 1e-20)

            denp = denp / np.max(denp + 1e-20)

            den_frame = plt.gca()
            plt.imshow(denp, 'jet')
            den_frame.axes.get_yaxis().set_visible(False)
            den_frame.axes.get_xaxis().set_visible(False)
            den_frame.spines['top'].set_visible(False)
            den_frame.spines['bottom'].set_visible(False)
            den_frame.spines['left'].set_visible(False)
            den_frame.spines['right'].set_visible(False)
            plt.savefig(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.png',\
                bbox_inches='tight',pad_inches=0,dpi=150)

            plt.close()

            # sio.savemat(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.mat',{'data':den})

            pred_frame = plt.gca()
            plt.imshow(pred_map, 'jet')
            pred_frame.axes.get_yaxis().set_visible(False)
            pred_frame.axes.get_xaxis().set_visible(False)
            pred_frame.spines['top'].set_visible(False)
            pred_frame.spines['bottom'].set_visible(False)
            pred_frame.spines['left'].set_visible(False)
            pred_frame.spines['right'].set_visible(False)
            plt.savefig(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.png',\
                bbox_inches='tight',pad_inches=0,dpi=150)

            plt.close()

            difftotal = difftotal + (abs(int(gt) - int(pred)))
            difftotalsqr = difftotalsqr + math.pow(int(gt) - int(pred), 2)

            # sio.savemat(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.mat',{'data':pred_map})

            diff = denp - pred_map

            diff_frame = plt.gca()
            plt.imshow(diff, 'jet')
            plt.colorbar()
            diff_frame.axes.get_yaxis().set_visible(False)
            diff_frame.axes.get_xaxis().set_visible(False)
            diff_frame.spines['top'].set_visible(False)
            diff_frame.spines['bottom'].set_visible(False)
            diff_frame.spines['left'].set_visible(False)
            diff_frame.spines['right'].set_visible(False)
            plt.savefig(exp_name+'/'+filename_no_ext+'_diff.png',\
                bbox_inches='tight',pad_inches=0,dpi=150)

            plt.close()

            # sio.savemat(exp_name+'/'+filename_no_ext+'_diff.mat',{'data':diff})
        MAE = float(difftotal) / 182
        MSE = math.sqrt(difftotalsqr / 182)
        print('MAE : ' + str(MAE))
        print('MSE : ' + str(MSE))
Пример #13
0
class Trainer():
    def __init__(self, cfg_data, pwd):

        self.cfg_data = cfg_data
        self.train_loader, self.val_loader, self.restore_transform = datasets.loading_data(
            cfg.DATASET)

        self.data_mode = cfg.DATASET
        self.exp_name = cfg.EXP_NAME
        self.exp_path = cfg.EXP_PATH
        self.pwd = pwd

        self.net_name = cfg.NET
        self.net = CrowdCounter(cfg.GPU_ID, self.net_name).cuda()
        self.optimizer = optim.Adam(self.net.CCN.parameters(),
                                    lr=cfg.LR,
                                    weight_decay=1e-4)
        # self.optimizer = optim.SGD(self.net.parameters(), cfg.LR, momentum=0.95,weight_decay=5e-4)
        self.scheduler = StepLR(self.optimizer,
                                step_size=cfg.NUM_EPOCH_LR_DECAY,
                                gamma=cfg.LR_DECAY)

        self.train_record = {'best_bce_loss': 1e20, 'best_model_name': ''}
        self.timer = {
            'iter time': Timer(),
            'train time': Timer(),
            'val time': Timer()
        }

        self.epoch = 0
        self.i_tb = 0

        if cfg.PRE_GCC:
            self.net.load_state_dict(torch.load(cfg.PRE_GCC_MODEL))

        if cfg.RESUME:
            latest_state = torch.load(cfg.RESUME_PATH)
            self.net.load_state_dict(latest_state['net'])
            self.optimizer.load_state_dict(latest_state['optimizer'])
            self.scheduler.load_state_dict(latest_state['scheduler'])
            self.epoch = latest_state['epoch'] + 1
            self.i_tb = latest_state['i_tb']
            self.train_record = latest_state['train_record']
            self.exp_path = latest_state['exp_path']
            self.exp_name = latest_state['exp_name']

        self.writer, self.log_txt = logger(self.exp_path,
                                           self.exp_name,
                                           self.pwd,
                                           'exp',
                                           resume=cfg.RESUME)

    def forward(self):

        self.validate()
        for epoch in range(self.epoch, cfg.MAX_EPOCH):
            self.epoch = epoch

            # training
            self.timer['train time'].tic()
            self.train()
            self.timer['train time'].toc(average=False)

            print('train time: {:.2f}s'.format(self.timer['train time'].diff))
            print('=' * 20)

            # validation
            if epoch % cfg.VAL_FREQ == 0 or epoch > cfg.VAL_DENSE_START:
                self.timer['val time'].tic()
                self.validate()
                self.timer['val time'].toc(average=False)
                print('val time: {:.2f}s'.format(self.timer['val time'].diff))

            if epoch > cfg.LR_DECAY_START:
                self.scheduler.step()

    def train(self):  # training for all datasets
        self.net.train()
        for i, data in enumerate(self.train_loader, 0):
            self.timer['iter time'].tic()
            img, gt_map = data
            img = Variable(img).cuda()
            gt_map = Variable(gt_map).cuda()

            self.optimizer.zero_grad()
            pred_map, _ = self.net(img, gt_map)
            loss = self.net.loss
            loss.backward()
            self.optimizer.step()

            if (i + 1) % cfg.PRINT_FREQ == 0:
                self.i_tb += 1
                self.writer.add_scalar('train_loss', loss.item(), self.i_tb)
                self.timer['iter time'].toc(average=False)
                print( '[ep %d][it %d][loss %.4f][lr %.4f][%.2fs]' % \
                        (self.epoch + 1, i + 1, loss.item(), self.optimizer.param_groups[0]['lr']*10000, self.timer['iter time'].diff) )

    def validate(self):

        self.net.eval()

        losses = AverageMeter()

        for vi, data in enumerate(self.val_loader, 0):
            img, dot_map, attributes_pt = data

            with torch.no_grad():
                img = Variable(img).cuda()
                dot_map = Variable(dot_map).cuda()

                pred_map, loc_gt_map = self.net.forward(img, dot_map)
                pred_map = F.softmax(pred_map, dim=1).data.max(1)
                pred_map = pred_map[1].squeeze_(1)

                # # crop the img and gt_map with a max stride on x and y axis
                # # size: HW: __C_NWPU.TRAIN_SIZE
                # # stack them with a the batchsize: __C_NWPU.TRAIN_BATCH_SIZE
                # crop_imgs, crop_dots, crop_masks = [], [], []
                # b, c, h, w = img.shape
                # rh, rw = self.cfg_data.TRAIN_SIZE
                # for i in range(0, h, rh):
                #     gis, gie = max(min(h-rh, i), 0), min(h, i+rh)
                #     for j in range(0, w, rw):
                #         gjs, gje = max(min(w-rw, j), 0), min(w, j+rw)
                #         crop_imgs.append(img[:, :, gis:gie, gjs:gje])
                #         crop_dots.append(dot_map[:, :, gis:gie, gjs:gje])
                #         mask = torch.zeros_like(dot_map).cuda()
                #         mask[:, :, gis:gie, gjs:gje].fill_(1.0)
                #         crop_masks.append(mask)
                # crop_imgs, crop_dots, crop_masks = map(lambda x: torch.cat(x, dim=0), (crop_imgs, crop_dots, crop_masks))

                # # forward may need repeatng
                # crop_preds, crop_loc_gts = [], []
                # nz, bz = crop_imgs.size(0), self.cfg_data.TRAIN_BATCH_SIZE
                # for i in range(0, nz, bz):
                #     gs, gt = i, min(nz, i+bz)
                #     #pdb.set_trace()
                #     # print(crop_imgs[gs:gt].shape)
                #     # print(crop_dots[gs:gt].shape)
                #     crop_pred, crop_loc_gt = self.net.forward(crop_imgs[gs:gt], crop_dots[gs:gt])
                #     crop_pred = F.softmax(crop_pred,dim=1).data.max(1)
                #     crop_pred = crop_pred[1].squeeze_(1)
                #     crop_preds.append(crop_pred)
                #     crop_loc_gts.append(crop_loc_gt)
                # crop_preds = torch.cat(crop_preds, dim=0)
                # crop_loc_gts = torch.cat(crop_loc_gts, dim=0)

                # # splice them to the original size
                # idx = 0
                # pred_map = torch.zeros_like(dot_map).cuda()
                # loc_gt_map = torch.zeros_like(dot_map).cuda()
                # for i in range(0, h, rh):
                #     gis, gie = max(min(h-rh, i), 0), min(h, i+rh)
                #     for j in range(0, w, rw):
                #         gjs, gje = max(min(w-rw, j), 0), min(w, j+rw)
                #         pred_map[:, :, gis:gie, gjs:gje] += crop_preds[idx]
                #         loc_gt_map[:, :, gis:gie, gjs:gje] += crop_loc_gts[idx]
                #         idx += 1

                # # for the overlapping area, compute average value
                # mask = crop_masks.sum(dim=0).unsqueeze(0)
                # pred_map = (pred_map / mask).bool().long()
                # loc_gt_map = (loc_gt_map / mask).bool().long()

                pred_map = pred_map.bool().long()
                loc_gt_map = loc_gt_map.bool().long()

                # pdb.set_trace()

                pred_map = pred_map.data.cpu().numpy()
                loc_gt_map = loc_gt_map.data.cpu().numpy()

                losses.update(self.net.loss.item())

                if vi == 0:
                    vis_results(self.exp_name, self.epoch, self.writer,
                                self.restore_transform, img, pred_map,
                                loc_gt_map)

        loss = losses.avg

        self.writer.add_scalar('val_loss', loss, self.epoch + 1)

        self.train_record = update_model(self.net,self.optimizer,self.scheduler,self.epoch,self.i_tb,self.exp_path,self.exp_name, \
            loss,self.train_record,self.log_txt)

        print_NWPU_summary(self.exp_name, self.log_txt, self.epoch, loss,
                           self.train_record)
Пример #14
0
def test(file_list, model_path):

    net = CrowdCounter(cfg.GPU_ID, 'CANNet')
    net.cuda()
    net.load_state_dict(torch.load(model_path))
    net.eval()

    gts = []
    preds = []

    for i in range(len(img_paths)):
        try:
            img = Image.open(img_paths[i])
        except:
            #img_paths.remove(img_paths[i])
            print(img_paths[i])
            preds.append(10)
            continue
        if img.mode == 'L':
            img = img.convert('RGB')
        img = img_transform(img)[None, :, :, :]
        with torch.no_grad():
            img = Variable(img).cuda()
            crop_imgs, crop_masks = [], []
            b, c, h, w = img.shape
            rh, rw = 576, 768
            for i in range(0, h, rh):
                gis, gie = max(min(h - rh, i), 0), min(h, i + rh)
                for j in range(0, w, rw):
                    gjs, gje = max(min(w - rw, j), 0), min(w, j + rw)
                    crop_imgs.append(img[:, :, gis:gie, gjs:gje])
                    mask = torch.zeros(b, 1, h, w).cuda()
                    mask[:, :, gis:gie, gjs:gje].fill_(1.0)
                    crop_masks.append(mask)
            crop_imgs, crop_masks = map(lambda x: torch.cat(x, dim=0),
                                        (crop_imgs, crop_masks))

            # forward may need repeatng
            crop_preds = []
            nz, bz = crop_imgs.size(0), 1
            for i in range(0, nz, bz):
                gs, gt = i, min(nz, i + bz)
                crop_pred = net.test_forward(crop_imgs[gs:gt])
                #print('cropsize',crop_pred.size(),crop_imgs[gs:gt].size())
                crop_preds.append(crop_pred)
            crop_preds = torch.cat(crop_preds, dim=0)

            #print(img_paths[i],b,h,w,crop_imgs.size())

            # splice them to the original size
            idx = 0
            pred_map = torch.zeros(b, 1, h, w).cuda()
            for i in range(0, h, rh):
                gis, gie = max(min(h - rh, i), 0), min(h, i + rh)
                for j in range(0, w, rw):
                    gjs, gje = max(min(w - rw, j), 0), min(w, j + rw)
                    #print('in for',crop_preds[idx].size())
                    pred_map[:, :, gis:gie, gjs:gje] += crop_preds[idx]
                    idx += 1

            # for the overlapping area, compute average value
            mask = crop_masks.sum(dim=0).unsqueeze(0)
            pred_map = pred_map / mask
        pred_map = pred_map.cpu().data.numpy()[0, 0, :, :]

        pred = np.sum(pred_map) / LOG_PARA
        preds.append(pred)
    df = pd.DataFrame()
    df['file'] = [os.path.basename(x) for x in img_paths]
    df['man_count'] = preds
    df['man_count'] = df['man_count'].round()
    df['man_count'] = df['man_count'].astype(int)
    df.loc[df['man_count'] > 100, 'man_count'] = 100
    df.loc[df['man_count'] < 0, 'man_count'] = 0
    df.to_csv('newonline_21.csv', index=None)
Пример #15
0
def test(file_list, model_path):
    net = CrowdCounter(cfg.GPU_ID, cfg.NET)
    net.load_state_dict(torch.load(model_path))
    net.cuda()
    net.eval()

    maes = AverageMeter()
    mses = AverageMeter()

    step = 0
    time_sampe = 0
    for filename in file_list:
        step = step + 1
        print filename
        imgname = dataRoot + '/img/' + filename
        filename_no_ext = filename.split('.')[0]

        denname = dataRoot + '/den/' + filename_no_ext + '.csv'
        den = pd.read_csv(denname, sep=',', header=None).values

        # den = sio.loadmat(dataRoot + '/den/' + filename_no_ext + '.mat')
        # den = den['map']

        den = den.astype(np.float32, copy=False)

        img = Image.open(imgname)

        if img.mode == 'L':
            img = img.convert('RGB')

        # prepare
        wd_1, ht_1 = img.size
        # pdb.set_trace()

        # if wd_1 < 1024:
        #     dif = 1024 - wd_1
        #     img = ImageOps.expand(img, border=(0, 0, dif, 0), fill=0)
        #     pad = np.zeros([ht_1, dif])
        #     den = np.array(den)
        #     den = np.hstack((den, pad))
        #
        # if ht_1 < 768:
        #     dif = 768 - ht_1
        #     img = ImageOps.expand(img, border=(0, 0, 0, dif), fill=0)
        #     pad = np.zeros([dif, wd_1])
        #     den = np.array(den)
        #     den = np.vstack((den, pad))

        img = img_transform(img)

        gt_count = np.sum(den)

        img = Variable(img[None, :, :, :], volatile=True).cuda()

        # forward
        pred_map = net.test_forward(img)

        pred_map = pred_map.cpu().data.numpy()[0, 0, :, :]
        pred_cnt = np.sum(pred_map) / 2550.0
        pred_map = pred_map / np.max(pred_map + 1e-20)
        pred_map = pred_map[0:ht_1, 0:wd_1]

        den = den / np.max(den + 1e-20)
        den = den[0:ht_1, 0:wd_1]

        maes.update(abs(gt_count - pred_cnt))
        mses.update((gt_count - pred_cnt) * (gt_count - pred_cnt))

    mae = maes.avg
    mse = np.sqrt(mses.avg)

    print '\n[MAE: %fms][MSE: %fms]' % (mae, mse)
Пример #16
0
def test(file_list, model_path):

    f_out = open('report.txt', 'w')

    net = CrowdCounter()
    net.load_state_dict(
        torch.load(model_path, map_location=torch.device('cpu')))
    # net = tr_net.CNN()
    # net.load_state_dict(torch.load(model_path))
    net.eval()

    maes = []
    mses = []

    for filename in tqdm(file_list):
        imgname = dataRoot + '/img/' + filename
        filename_no_ext = filename.split('.')[0]

        # denname = dataRoot + '/den/' + filename_no_ext + '.csv'

        # den = pd.read_csv(denname, sep=',',header=None).values
        # den = den.astype(np.float32, copy=False)

        try:
            img = Image.open(imgname)
        except Exception as e:
            print(e)
            continue

        if img.mode == 'L':
            img = img.convert('RGB')

        # prepare
        wd_1, ht_1 = img.size
        # pdb.set_trace()

        if wd_1 < cfg.DATA.STD_SIZE[1]:
            dif = cfg.DATA.STD_SIZE[1] - wd_1
            img = ImageOps.expand(img, border=(0, 0, dif, 0), fill=0)
            pad = np.zeros([ht_1, dif])
            # den = np.array(den)
            # den = np.hstack((den,pad))

        if ht_1 < cfg.DATA.STD_SIZE[0]:
            dif = cfg.DATA.STD_SIZE[0] - ht_1
            img = ImageOps.expand(img, border=(0, 0, 0, dif), fill=0)
            pad = np.zeros([dif, wd_1])
            # den = np.array(den)
            # den = np.vstack((den,pad))

        img = img_transform(img)

        # gt = np.sum(den)

        img = torch.Tensor(img[None, :, :, :])

        #forward
        pred_map = net.test_forward(img)

        pred_map = pred_map.cpu().data.numpy()[0, 0, :, :] / 100.
        pred = np.sum(pred_map)
        print(filename, pred, pred_map.max(), file=f_out)

        # maes.append(abs(pred-gt))
        # mses.append((pred-gt)*(pred-gt))

        np.save(f'preds/pred_map_{filename_no_ext}_{str(float(pred))}.npy',
                pred_map / 100.0)

        # vis
        # pred_map = pred_map/np.max(pred_map+1e-20)
        pred_map = pred_map[0:ht_1, 0:wd_1]

        # den = den/np.max(den+1e-20)
        # den = den[0:ht_1,0:wd_1]

        # den_frame = plt.gca()
        # # plt.imshow(den, 'jet')
        # den_frame.axes.get_yaxis().set_visible(False)
        # den_frame.axes.get_xaxis().set_visible(False)
        # den_frame.spines['top'].set_visible(False)
        # den_frame.spines['bottom'].set_visible(False)
        # den_frame.spines['left'].set_visible(False)
        # den_frame.spines['right'].set_visible(False)
        # plt.savefig(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.png',\
        #     bbox_inches='tight',pad_inches=0,dpi=150)

        # plt.close()

        # sio.savemat(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.mat',{'data':den})

        plt.imshow(pred_map)
        plt.colorbar()
        plt.savefig(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.png',\
            bbox_inches='tight',pad_inches=0,dpi=150)
        plt.close()

        # sio.savemat(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.mat',{'data':pred_map})

        # diff = den-pred_map

        # diff_frame = plt.gca()
        # plt.imshow(diff, 'jet')
        # plt.colorbar()
        # diff_frame.axes.get_yaxis().set_visible(False)
        # diff_frame.axes.get_xaxis().set_visible(False)
        # diff_frame.spines['top'].set_visible(False)
        # diff_frame.spines['bottom'].set_visible(False)
        # diff_frame.spines['left'].set_visible(False)
        # diff_frame.spines['right'].set_visible(False)
        # plt.savefig(exp_name+'/'+filename_no_ext+'_diff.png',\
        #     bbox_inches='tight',pad_inches=0,dpi=150)

        # plt.close()

        # sio.savemat(exp_name+'/'+filename_no_ext+'_diff.mat',{'data':diff})

        # print('[file %s]: [pred %.2f], [gt %.2f]' % (filename, pred, gt))
    # print(np.average(np.array(maes)))
    # print(np.sqrt(np.average(np.array(mses))))
    f_out.close()
Пример #17
0
class Trainer():
    def __init__(self, dataloader, cfg_data, pwd):

        self.cfg_data = cfg_data

        self.data_mode = cfg.DATASET
        self.exp_name = cfg.EXP_NAME
        self.exp_path = cfg.EXP_PATH
        self.pwd = pwd

        self.net_name = cfg.NET
        self.net = CrowdCounter(cfg.GPU_ID,self.net_name).cuda()
        self.optimizer = optim.Adam(self.net.CCN.parameters(), lr=cfg.LR, weight_decay=1e-4)
        # self.optimizer = optim.SGD(self.net.parameters(), cfg.LR, momentum=0.95,weight_decay=5e-4)
        self.scheduler = StepLR(self.optimizer, step_size=cfg.NUM_EPOCH_LR_DECAY, gamma=cfg.LR_DECAY)          

        self.train_record = {'best_mae': 1e20, 'best_mse':1e20, 'best_model_name': ''}
        self.timer = {'iter time' : Timer(),'train time' : Timer(),'val time' : Timer()} 

        self.epoch = 0
        self.i_tb = 0
        
        if cfg.PRE_GCC:
            self.net.load_state_dict(torch.load(cfg.PRE_GCC_MODEL))

        self.train_loader, self.val_loader, self.restore_transform = dataloader()

        if cfg.RESUME:
            latest_state = torch.load(cfg.RESUME_PATH)
            self.net.load_state_dict(latest_state['net'])
            self.optimizer.load_state_dict(latest_state['optimizer'])
            self.scheduler.load_state_dict(latest_state['scheduler'])
            self.epoch = latest_state['epoch'] + 1
            self.i_tb = latest_state['i_tb']
            self.train_record = latest_state['train_record']
            self.exp_path = latest_state['exp_path']
            self.exp_name = latest_state['exp_name']

        self.writer, self.log_txt = logger(self.exp_path, self.exp_name, self.pwd, 'exp', resume=cfg.RESUME)


    def forward(self):

        # self.validate_V3()
        for epoch in range(self.epoch,cfg.MAX_EPOCH):
            self.epoch = epoch
            if epoch > cfg.LR_DECAY_START:
                self.scheduler.step()
                
            # training    
            self.timer['train time'].tic()
            self.train()
            self.timer['train time'].toc(average=False)

            print 'train time: {:.2f}s'.format(self.timer['train time'].diff)
            print '='*20

            # validation
            if epoch%cfg.VAL_FREQ==0 or epoch>cfg.VAL_DENSE_START:
                self.timer['val time'].tic()
                if self.data_mode in ['SHHA', 'SHHB', 'QNRF', 'UCF50']:
                    self.validate_V1()
                elif self.data_mode is 'WE':
                    self.validate_V2()
                elif self.data_mode is 'GCC':
                    self.validate_V3()
                self.timer['val time'].toc(average=False)
                print 'val time: {:.2f}s'.format(self.timer['val time'].diff)


    def train(self): # training for all datasets
        self.net.train()
        for i, data in enumerate(self.train_loader, 0):
            self.timer['iter time'].tic()
            img, gt_map = data
            img = Variable(img).cuda()
            gt_map = Variable(gt_map).cuda()

            self.optimizer.zero_grad()
            pred_map = self.net(img, gt_map)
            loss = self.net.loss
            loss.backward()
            self.optimizer.step()

            if (i + 1) % cfg.PRINT_FREQ == 0:
                self.i_tb += 1
                self.writer.add_scalar('train_loss', loss.item(), self.i_tb)
                self.timer['iter time'].toc(average=False)
                print '[ep %d][it %d][loss %.4f][lr %.4f][%.2fs]' % \
                        (self.epoch + 1, i + 1, loss.item(), self.optimizer.param_groups[0]['lr']*10000, self.timer['iter time'].diff)
                print '        [cnt: gt: %.1f pred: %.2f]' % (gt_map[0].sum().data/self.cfg_data.LOG_PARA, pred_map[0].sum().data/self.cfg_data.LOG_PARA)            


    def validate_V1(self):# validate_V1 for SHHA, SHHB, UCF-QNRF, UCF50

        self.net.eval()
        
        losses = AverageMeter()
        maes = AverageMeter()
        mses = AverageMeter()

        for vi, data in enumerate(self.val_loader, 0):
            img, gt_map = data

            with torch.no_grad():
                img = Variable(img).cuda()
                gt_map = Variable(gt_map).cuda()

                pred_map = self.net.forward(img,gt_map)

                pred_map = pred_map.data.cpu().numpy()
                gt_map = gt_map.data.cpu().numpy()

                for i_img in range(pred_map.shape[0]):
                
                    pred_cnt = np.sum(pred_map[i_img])/self.cfg_data.LOG_PARA
                    gt_count = np.sum(gt_map[i_img])/self.cfg_data.LOG_PARA

                    
                    losses.update(self.net.loss.item())
                    maes.update(abs(gt_count-pred_cnt))
                    mses.update((gt_count-pred_cnt)*(gt_count-pred_cnt))
                if vi==0:
                    vis_results(self.exp_name, self.epoch, self.writer, self.restore_transform, img, pred_map, gt_map)
            
        mae = maes.avg
        mse = np.sqrt(mses.avg)
        loss = losses.avg

        self.writer.add_scalar('val_loss', loss, self.epoch + 1)
        self.writer.add_scalar('mae', mae, self.epoch + 1)
        self.writer.add_scalar('mse', mse, self.epoch + 1)

        self.train_record = update_model(self.net,self.optimizer,self.scheduler,self.epoch,self.i_tb,self.exp_path,self.exp_name, \
            [mae, mse, loss],self.train_record,self.log_txt)
        print_summary(self.exp_name,[mae, mse, loss],self.train_record)


    def validate_V2(self):# validate_V2 for WE

        self.net.eval()

        losses = AverageCategoryMeter(5)
        maes = AverageCategoryMeter(5)

        roi_mask = []
        from datasets.WE.setting import cfg_data 
        from scipy import io as sio
        for val_folder in cfg_data.VAL_FOLDER:

            roi_mask.append(sio.loadmat(os.path.join(cfg_data.DATA_PATH,'test',val_folder + '_roi.mat'))['BW'])
        
        for i_sub,i_loader in enumerate(self.val_loader,0):

            mask = roi_mask[i_sub]
            for vi, data in enumerate(i_loader, 0):
                img, gt_map = data

                with torch.no_grad():
                    img = Variable(img).cuda()
                    gt_map = Variable(gt_map).cuda()

                    pred_map = self.net.forward(img,gt_map)

                    pred_map = pred_map.data.cpu().numpy()
                    gt_map = gt_map.data.cpu().numpy()

                    for i_img in range(pred_map.shape[0]):
                    
                        pred_cnt = np.sum(pred_map[i_img])/self.cfg_data.LOG_PARA
                        gt_count = np.sum(gt_map[i_img])/self.cfg_data.LOG_PARA

                        losses.update(self.net.loss.item(),i_sub)
                        maes.update(abs(gt_count-pred_cnt),i_sub)
                    if vi==0:
                        vis_results(self.exp_name, self.epoch, self.writer, self.restore_transform, img, pred_map, gt_map)
            
        mae = np.average(maes.avg)
        loss = np.average(losses.avg)

        self.writer.add_scalar('val_loss', loss, self.epoch + 1)
        self.writer.add_scalar('mae', mae, self.epoch + 1)
        self.writer.add_scalar('mae_s1', maes.avg[0], self.epoch + 1)
        self.writer.add_scalar('mae_s2', maes.avg[1], self.epoch + 1)
        self.writer.add_scalar('mae_s3', maes.avg[2], self.epoch + 1)
        self.writer.add_scalar('mae_s4', maes.avg[3], self.epoch + 1)
        self.writer.add_scalar('mae_s5', maes.avg[4], self.epoch + 1)

        self.train_record = update_model(self.net,self.optimizer,self.scheduler,self.epoch,self.i_tb,self.exp_path,self.exp_name, \
            [mae, mse, loss],self.train_record,self.log_txt)
        print_WE_summary(self.log_txt,self.epoch,[mae, 0, loss],self.train_record,maes)





    def validate_V3(self):# validate_V3 for GCC

        self.net.eval()
        
        losses = AverageMeter()
        maes = AverageMeter()
        mses = AverageMeter()

        c_maes = {'level':AverageCategoryMeter(9), 'time':AverageCategoryMeter(8),'weather':AverageCategoryMeter(7)}
        c_mses = {'level':AverageCategoryMeter(9), 'time':AverageCategoryMeter(8),'weather':AverageCategoryMeter(7)}


        for vi, data in enumerate(self.val_loader, 0):
            img, gt_map, attributes_pt = data

            with torch.no_grad():
                img = Variable(img).cuda()
                gt_map = Variable(gt_map).cuda()


                pred_map = self.net.forward(img,gt_map)

                pred_map = pred_map.data.cpu().numpy()
                gt_map = gt_map.data.cpu().numpy()

                for i_img in range(pred_map.shape[0]):
                
                    pred_cnt = np.sum(pred_map[i_img])/self.cfg_data.LOG_PARA
                    gt_count = np.sum(gt_map[i_img])/self.cfg_data.LOG_PARA

                    s_mae = abs(gt_count-pred_cnt)
                    s_mse = (gt_count-pred_cnt)*(gt_count-pred_cnt)

                    losses.update(self.net.loss.item())
                    maes.update(s_mae)
                    mses.update(s_mse)   
                    attributes_pt = attributes_pt.squeeze() 
                    c_maes['level'].update(s_mae,attributes_pt[i_img][0])
                    c_mses['level'].update(s_mse,attributes_pt[i_img][0])
                    c_maes['time'].update(s_mae,attributes_pt[i_img][1]/3)
                    c_mses['time'].update(s_mse,attributes_pt[i_img][1]/3)
                    c_maes['weather'].update(s_mae,attributes_pt[i_img][2])
                    c_mses['weather'].update(s_mse,attributes_pt[i_img][2])


                if vi==0:
                    vis_results(self.exp_name, self.epoch, self.writer, self.restore_transform, img, pred_map, gt_map)
            
        loss = losses.avg
        mae = maes.avg
        mse = np.sqrt(mses.avg)


        self.writer.add_scalar('val_loss', loss, self.epoch + 1)
        self.writer.add_scalar('mae', mae, self.epoch + 1)
        self.writer.add_scalar('mse', mse, self.epoch + 1)

        self.train_record = update_model(self.net,self.optimizer,self.scheduler,self.epoch,self.i_tb,self.exp_path,self.exp_name, \
            [mae, mse, loss],self.train_record,self.log_txt)


        print_GCC_summary(self.log_txt,self.epoch,[mae, mse, loss],self.train_record,c_maes,c_mses)
Пример #18
0
def test(file_list, model_path):

    net = CrowdCounter()
    net.load_state_dict(torch.load(model_path))
    # net = tr_net.CNN()
    # net.load_state_dict(torch.load(model_path))
    net.cuda()
    net.eval()

    maes = []
    mses = []

    for filename in file_list:
        print filename
        imgname = dataRoot + '/img/' + filename
        filename_no_ext = filename.split('.')[0]

        denname = dataRoot + '/den/' + filename_no_ext + '.csv'


        den = pd.read_csv(denname, sep=',',header=None).values
        den = den.astype(np.float32, copy=False)

        img = Image.open(imgname)

        if img.mode == 'L':
            img = img.convert('RGB')

        # prepare
        wd_1, ht_1 = img.size
        # pdb.set_trace()

        if wd_1 < cfg.DATA.STD_SIZE[1]:
            dif = cfg.DATA.STD_SIZE[1] - wd_1
            img = ImageOps.expand(img, border=(0,0,dif,0), fill=0)
            pad = np.zeros([ht_1,dif])
            den = np.array(den)
            den = np.hstack((den,pad))
            
        if ht_1 < cfg.DATA.STD_SIZE[0]:
            dif = cfg.DATA.STD_SIZE[0] - ht_1
            img = ImageOps.expand(img, border=(0,0,0,dif), fill=0)
            pad = np.zeros([dif,wd_1])
            den = np.array(den)
            den = np.vstack((den,pad))

        img = img_transform(img)

        gt = np.sum(den)

        img = Variable(img[None,:,:,:],volatile=True).cuda()

        #forward
        pred_map = net.test_forward(img)

        pred_map = pred_map.cpu().data.numpy()[0,0,:,:]
        pred = np.sum(pred_map)/100.0

        maes.append(abs(pred-gt))
        mses.append((pred-gt)*(pred-gt))

        
        # vis
        pred_map = pred_map/np.max(pred_map+1e-20)
        pred_map = pred_map[0:ht_1,0:wd_1]
        
        
        den = den/np.max(den+1e-20)
        den = den[0:ht_1,0:wd_1]

        den_frame = plt.gca()
        plt.imshow(den, 'jet')
        den_frame.axes.get_yaxis().set_visible(False)
        den_frame.axes.get_xaxis().set_visible(False)
        den_frame.spines['top'].set_visible(False) 
        den_frame.spines['bottom'].set_visible(False) 
        den_frame.spines['left'].set_visible(False) 
        den_frame.spines['right'].set_visible(False) 
        plt.savefig(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.png',\
            bbox_inches='tight',pad_inches=0,dpi=150)

        plt.close()
        
        # sio.savemat(exp_name+'/'+filename_no_ext+'_gt_'+str(int(gt))+'.mat',{'data':den})

        pred_frame = plt.gca()
        plt.imshow(pred_map, 'jet')
        pred_frame.axes.get_yaxis().set_visible(False)
        pred_frame.axes.get_xaxis().set_visible(False)
        pred_frame.spines['top'].set_visible(False) 
        pred_frame.spines['bottom'].set_visible(False) 
        pred_frame.spines['left'].set_visible(False) 
        pred_frame.spines['right'].set_visible(False) 
        plt.savefig(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.png',\
            bbox_inches='tight',pad_inches=0,dpi=150)

        plt.close()

        # sio.savemat(exp_name+'/'+filename_no_ext+'_pred_'+str(float(pred))+'.mat',{'data':pred_map})

        diff = den-pred_map

        diff_frame = plt.gca()
        plt.imshow(diff, 'jet')
        plt.colorbar()
        diff_frame.axes.get_yaxis().set_visible(False)
        diff_frame.axes.get_xaxis().set_visible(False)
        diff_frame.spines['top'].set_visible(False) 
        diff_frame.spines['bottom'].set_visible(False) 
        diff_frame.spines['left'].set_visible(False) 
        diff_frame.spines['right'].set_visible(False) 
        plt.savefig(exp_name+'/'+filename_no_ext+'_diff.png',\
            bbox_inches='tight',pad_inches=0,dpi=150)

        plt.close()

        # sio.savemat(exp_name+'/'+filename_no_ext+'_diff.mat',{'data':diff})
        
        print '[file %s]: [pred %.2f], [gt %.2f]' % (filename, pred, gt)
    print np.average(np.array(maes))
    print np.sqrt(np.average(np.array(mses)))
Пример #19
0
class Trainer():
    def __init__(self, cfg_data, pwd):

        self.cfg_data = cfg_data
        self.train_loader, self.val_loader, self.restore_transform = datasets.loading_data(cfg.DATASET)

        self.data_mode = cfg.DATASET
        self.exp_name = cfg.EXP_NAME
        self.exp_path = cfg.EXP_PATH
        self.pwd = pwd

        self.net_name = cfg.NET
        self.net = CrowdCounter(cfg.GPU_ID,self.net_name).cuda()
        self.optimizer = optim.Adam(self.net.CCN.parameters(), lr=cfg.LR, weight_decay=1e-4)
        # self.optimizer = optim.SGD(self.net.parameters(), cfg.LR, momentum=0.95,weight_decay=5e-4)
        self.scheduler = StepLR(self.optimizer, step_size=cfg.NUM_EPOCH_LR_DECAY, gamma=cfg.LR_DECAY)          

        self.train_record = {'best_mae': 1e20, 'best_mse':1e20, 'best_nae':1e20, 'best_model_name': ''}
        self.timer = {'iter time' : Timer(),'train time' : Timer(),'val time' : Timer()} 

        self.epoch = 0
        self.i_tb = 0
        
        if cfg.PRE_GCC:
            self.net.load_state_dict(torch.load(cfg.PRE_GCC_MODEL))

        if cfg.RESUME:
            latest_state = torch.load(cfg.RESUME_PATH)
            self.net.load_state_dict(latest_state['net'])
            self.optimizer.load_state_dict(latest_state['optimizer'])
            self.scheduler.load_state_dict(latest_state['scheduler'])
            self.epoch = latest_state['epoch'] + 1
            self.i_tb = latest_state['i_tb']
            self.train_record = latest_state['train_record']
            self.exp_path = latest_state['exp_path']
            self.exp_name = latest_state['exp_name']

        self.writer, self.log_txt = logger(self.exp_path, self.exp_name, self.pwd, 'exp', resume=cfg.RESUME)


    def forward(self):

        # self.validate()
        for epoch in range(self.epoch,cfg.MAX_EPOCH):
            self.epoch = epoch
                
            # training    
            self.timer['train time'].tic()
            self.train()
            self.timer['train time'].toc(average=False)

            print( 'train time: {:.2f}s'.format(self.timer['train time'].diff) )
            print( '='*20 )

            # validation
            if epoch%cfg.VAL_FREQ==0 or epoch>cfg.VAL_DENSE_START:
                self.timer['val time'].tic()
                self.validate()
                self.timer['val time'].toc(average=False)
                print( 'val time: {:.2f}s'.format(self.timer['val time'].diff) )

            if epoch > cfg.LR_DECAY_START:
                self.scheduler.step()


    def train(self): # training for all datasets
        self.net.train()
        for i, data in enumerate(self.train_loader, 0):
            self.timer['iter time'].tic()
            img, gt_map = data
            img = Variable(img).cuda()
            gt_map = Variable(gt_map).cuda()

            self.optimizer.zero_grad()
            pred_map, _ = self.net(img, gt_map)
            loss = self.net.loss
            loss.backward()
            self.optimizer.step()

            if (i + 1) % cfg.PRINT_FREQ == 0:
                self.i_tb += 1
                self.writer.add_scalar('train_loss', loss.item(), self.i_tb)
                self.timer['iter time'].toc(average=False)
                print( '[ep %d][it %d][loss %.4f][lr %.4f][%.2fs]' % \
                        (self.epoch + 1, i + 1, loss.item(), self.optimizer.param_groups[0]['lr']*10000, self.timer['iter time'].diff) )
                print( '        [cnt: gt: %.1f pred: %.2f]' % (gt_map[0].sum().data/self.cfg_data.LOG_PARA, pred_map[0].sum().data/self.cfg_data.LOG_PARA) )           


    def validate(self):

        self.net.eval()
        
        losses = AverageMeter()
        maes = AverageMeter()
        mses = AverageMeter()
        naes = AverageMeter()

        c_maes = {'level':AverageCategoryMeter(5), 'illum':AverageCategoryMeter(4)}
        c_mses = {'level':AverageCategoryMeter(5), 'illum':AverageCategoryMeter(4)}
        c_naes = {'level':AverageCategoryMeter(5), 'illum':AverageCategoryMeter(4)}

        for vi, data in enumerate(self.val_loader, 0):
            img, dot_map, attributes_pt = data

            with torch.no_grad():
                img = Variable(img).cuda()
                dot_map = Variable(dot_map).cuda()

                # crop the img and gt_map with a max stride on x and y axis
                # size: HW: __C_NWPU.TRAIN_SIZE
                # stack them with a the batchsize: __C_NWPU.TRAIN_BATCH_SIZE
                crop_imgs, crop_dots, crop_masks = [], [], []
                b, c, h, w = img.shape
                rh, rw = self.cfg_data.TRAIN_SIZE
                for i in range(0, h, rh):
                    gis, gie = max(min(h-rh, i), 0), min(h, i+rh)
                    for j in range(0, w, rw):
                        gjs, gje = max(min(w-rw, j), 0), min(w, j+rw)
                        crop_imgs.append(img[:, :, gis:gie, gjs:gje])
                        crop_dots.append(dot_map[:, :, gis:gie, gjs:gje])
                        mask = torch.zeros_like(dot_map).cuda()
                        mask[:, :, gis:gie, gjs:gje].fill_(1.0)
                        crop_masks.append(mask)
                crop_imgs, crop_dots, crop_masks = map(lambda x: torch.cat(x, dim=0), (crop_imgs, crop_dots, crop_masks))

                # forward may need repeatng
                crop_preds, crop_dens = [], []
                nz, bz = crop_imgs.size(0), self.cfg_data.TRAIN_BATCH_SIZE
                for i in range(0, nz, bz):
                    gs, gt = i, min(nz, i+bz)
                    crop_pred, crop_den = self.net.forward(crop_imgs[gs:gt], crop_dots[gs:gt])
                    crop_preds.append(crop_pred)
                    crop_dens.append(crop_den)
                crop_preds = torch.cat(crop_preds, dim=0)
                crop_dens = torch.cat(crop_dens, dim=0)

                # splice them to the original size
                idx = 0
                pred_map = torch.zeros_like(dot_map).cuda()
                den_map = torch.zeros_like(dot_map).cuda()
                for i in range(0, h, rh):
                    gis, gie = max(min(h-rh, i), 0), min(h, i+rh)
                    for j in range(0, w, rw):
                        gjs, gje = max(min(w-rw, j), 0), min(w, j+rw)
                        pred_map[:, :, gis:gie, gjs:gje] += crop_preds[idx]
                        den_map[:, :, gis:gie, gjs:gje] += crop_dens[idx]
                        idx += 1

                # for the overlapping area, compute average value
                mask = crop_masks.sum(dim=0).unsqueeze(0)
                pred_map = pred_map / mask
                den_map = den_map / mask

                pred_map = pred_map.data.cpu().numpy()
                dot_map = dot_map.data.cpu().numpy()
                den_map = den_map.data.cpu().numpy()
                
                pred_cnt = np.sum(pred_map)/self.cfg_data.LOG_PARA
                gt_count = np.sum(dot_map)/self.cfg_data.LOG_PARA

                s_mae = abs(gt_count-pred_cnt)
                s_mse = (gt_count-pred_cnt)*(gt_count-pred_cnt)

                losses.update(self.net.loss.item())
                maes.update(s_mae)
                mses.update(s_mse)
                   
                attributes_pt = attributes_pt.squeeze() 

                c_maes['level'].update(s_mae,attributes_pt[1])
                c_mses['level'].update(s_mse,attributes_pt[1])
                c_maes['illum'].update(s_mae,attributes_pt[0])
                c_mses['illum'].update(s_mse,attributes_pt[0])

                if gt_count != 0:
                    s_nae = abs(gt_count-pred_cnt)/gt_count
                    naes.update(s_nae)
                    c_naes['level'].update(s_nae,attributes_pt[1])
                    c_naes['illum'].update(s_nae,attributes_pt[0])

                if vi==0:
                    vis_results(self.exp_name, self.epoch, self.writer, self.restore_transform, img, pred_map, den_map)
            
        loss = losses.avg
        overall_mae = maes.avg
        overall_mse = np.sqrt(mses.avg)
        overall_nae = naes.avg

        self.writer.add_scalar('val_loss', loss, self.epoch + 1)
        self.writer.add_scalar('overall_mae', overall_mae, self.epoch + 1)
        self.writer.add_scalar('overall_mse', overall_mse, self.epoch + 1)
        self.writer.add_scalar('overall_nae', overall_nae, self.epoch + 1)

        self.train_record = update_model(self.net,self.optimizer,self.scheduler,self.epoch,self.i_tb,self.exp_path,self.exp_name, \
            [overall_mae, overall_mse, overall_nae, loss],self.train_record,self.log_txt)

        print_NWPU_summary(self.exp_name, self.log_txt,self.epoch,[overall_mae, overall_mse, overall_nae, loss],self.train_record,c_maes,c_mses, c_naes)
Пример #20
0
def validate(val_loader, model_path, epoch, restore):
    net = CrowdCounter(ce_weights=train_set.wts)
    net.load_state_dict(torch.load(model_path))
    net.cuda()
    net.eval()
    print '=' * 50
    val_loss_mse = []
    val_loss_cls = []
    val_loss_seg = []
    val_loss = []
    mae = 0.0
    mse = 0.0

    for vi, data in enumerate(val_loader, 0):
        img, gt_map, gt_cnt, roi, gt_roi, gt_seg = data
        # pdb.set_trace()
        img = Variable(img, volatile=True).cuda()
        gt_map = Variable(gt_map, volatile=True).cuda()
        gt_seg = Variable(gt_seg, volatile=True).cuda()

        roi = Variable(roi[0], volatile=True).cuda().float()
        gt_roi = Variable(gt_roi[0], volatile=True).cuda()

        pred_map, pred_cls, pred_seg = net(img, gt_map, roi, gt_roi, gt_seg)
        loss1, loss2, loss3 = net.f_loss()
        val_loss_mse.append(loss1.data)
        val_loss_cls.append(loss2.data)
        val_loss_seg.append(loss3.data)
        val_loss.append(net.loss.data)

        pred_map = pred_map.data.cpu().numpy()
        gt_map = gt_map.data.cpu().numpy()

        pred_seg = pred_seg.cpu().max(1)[1].squeeze_(1).data.numpy()
        gt_seg = gt_seg.data.cpu().numpy()

        # pdb.set_trace()
        # pred_map = pred_map*pred_seg

        gt_count = np.sum(gt_map)
        pred_cnt = np.sum(pred_map)

        mae += abs(gt_count - pred_cnt)
        mse += ((gt_count - pred_cnt) * (gt_count - pred_cnt))

    # pdb.set_trace()
    mae = mae / val_set.get_num_samples()
    mse = np.sqrt(mse / val_set.get_num_samples())

    loss1 = np.mean(np.array(val_loss_mse))[0]
    loss2 = np.mean(np.array(val_loss_cls))[0]
    loss3 = np.mean(np.array(val_loss_seg))[0]
    loss = np.mean(np.array(val_loss))[0]

    print '=' * 50
    print exp_name
    print '    ' + '-' * 20
    print '    [mae %.1f mse %.1f], [val loss %.8f %.8f %.4f %.4f]' % (
        mae, mse, loss, loss1, loss2, loss3)
    print '    ' + '-' * 20
    print '=' * 50
Пример #21
0
# model_path = './exp/Res50_Original_GCC_Inducing_CAP_0.0001_epochs_100_Finetuning/0.7/03-08_12-37_GCC_Res50__1e-05_finetuned_rd/all_ep_29_mae_32.5_mse_93.2.pth'
# pruned_model_path = './exp/Res50_Original_GCC_Inducing_CAP_0.0001_epochs_100_Pruning/0.7/resnet50_GCC_pruned_0.7.pth.tar'
# pruned_model_path = './exp/VGG_Decoder_GCC_Pretrained_Pruning/0.4/VGG_Decoder_GCC_pruned_0.4.pth.tar'

# model_path='05-ResNet-50_all_ep_35_mae_32.4_mse_76.1.pth'

net = CrowdCounter(cfg.GPU_ID, cfg.NET)
# net = CrowdCounter(cfg.GPU_ID,cfg.NET,cfg=torch.load(pruned_model_path)['cfg'])
state_dict = torch.load(args.model_path)

try:
    net.load_state_dict(state_dict['net'])
except KeyError:
    net.load_state_dict(state_dict)
net.cuda()
net.eval()
sum([param.nelement() for param in net.parameters()])


def get_concat_h(im1, im2):
    dst = Image.new('RGB', (im1.width + im2.width, im1.height))
    dst.paste(im1, (0, 0))
    dst.paste(im2, (im1.width, 0))
    return dst


cm = plt.get_cmap('jet')

file_folder = []
file_name = []
'''
class Tester():
    def __init__(self, dataloader, cfg_data, pwd):

        self.save_path = os.path.join('/mnt/home/dongsheng/hudi/counting/trained_models/img-den-mel-pred',
                                      str(cfg.NET) + '-' + 'noise-' + str(cfg_data.IS_NOISE) + '-' + str(
                                          cfg_data.BRIGHTNESS) +
                                      '-' + str(cfg_data.NOISE_SIGMA) + '-' + str(cfg_data.LONGEST_SIDE) + '-' + str(
                                          cfg_data.BLACK_AREA_RATIO) +
                                      '-' + str(cfg_data.IS_RANDOM) + '-' + 'denoise-' + str(cfg_data.IS_DENOISE))
        if not os.path.exists(self.save_path):
            os.system('mkdir ' + self.save_path)
        else:
            os.system('rm -rf ' + self.save_path)
            os.system('mkdir ' + self.save_path)

        self.cfg_data = cfg_data
        self.cfg = cfg

        self.data_mode = cfg.DATASET
        self.exp_name = cfg.EXP_NAME
        self.exp_path = cfg.EXP_PATH
        self.pwd = pwd

        self.net_name = cfg.NET
        self.net = CrowdCounter(cfg.GPU_ID, self.net_name).cuda()
        self.optimizer = optim.Adam(self.net.CCN.parameters(), lr=cfg.LR, weight_decay=1e-4)
        # self.optimizer = optim.SGD(self.net.CCN.parameters(), cfg.LR, momentum=0.9, weight_decay=5e-4)
        self.scheduler = StepLR(self.optimizer, step_size=cfg.NUM_EPOCH_LR_DECAY, gamma=cfg.LR_DECAY)

        self.train_record = {'best_mae': 1e20, 'best_mse': 1e20, 'best_model_name': ''}
        self.timer = {'iter time': Timer(), 'train time': Timer(), 'val time': Timer()}

        self.epoch = 0
        self.i_tb = 0

        if cfg.PRE_GCC:
            self.net.load_state_dict(torch.load(cfg.PRE_GCC_MODEL))

        self.train_loader, self.val_loader, self.test_loader, self.restore_transform = dataloader()

        if cfg.RESUME:
            # latest_state = torch.load(cfg.RESUME_PATH)
            # self.net.load_state_dict(latest_state['net'])
            # self.optimizer.load_state_dict(latest_state['optimizer'])
            # self.scheduler.load_state_dict(latest_state['scheduler'])
            # self.epoch = latest_state['epoch'] + 1
            # self.i_tb = latest_state['i_tb']
            # self.train_record = latest_state['train_record']
            # self.exp_path = latest_state['exp_path']
            # self.exp_name = latest_state['exp_name']

            latest_state = torch.load(cfg.RESUME_PATH)
            try:
                self.net.load_state_dict(latest_state)
            except:
                self.net.load_state_dict({k.replace('module.', ''): v for k, v in latest_state.items()})

        # self.writer, self.log_txt = logger(self.exp_path, self.exp_name, self.pwd, 'exp', resume=cfg.RESUME)

    def forward(self):

        self.test_V1()


    def test_V1(self):  # test_v1 for SHHA, SHHB, UCF-QNRF, UCF50, AC

        self.net.eval()

        losses = AverageMeter()
        maes = AverageMeter()
        mses = AverageMeter()

        for vi, data in enumerate(self.val_loader, 0):
            print(vi)
            img = data[0]
            gt_map = data[1]
            audio_img = data[2]

            with torch.no_grad():
                img = Variable(img).cuda()
                gt_map = Variable(gt_map).cuda()
                audio_img = Variable(audio_img).cuda()

                if 'Audio' in self.net_name:
                    pred_map = self.net([img, audio_img], gt_map)
                else:
                    pred_map = self.net(img, gt_map)

                pred_map = pred_map.data.cpu().numpy()
                gt_map = gt_map.data.cpu().numpy()

                for i_img in range(pred_map.shape[0]):
                    pred_cnt = np.sum(pred_map[i_img]) / self.cfg_data.LOG_PARA
                    gt_count = np.sum(gt_map[i_img]) / self.cfg_data.LOG_PARA

                    losses.update(self.net.loss.item())
                    maes.update(abs(gt_count - pred_cnt))
                    mses.update((gt_count - pred_cnt) * (gt_count - pred_cnt))
                # if vi == 0:
                #     vis_results(self.exp_name, self.epoch, self.writer, self.restore_transform, img, pred_map, gt_map)

                save_img_name = 'val-' + str(vi) + '.jpg'
                raw_img = self.restore_transform(img.data.cpu()[0, :, :, :])
                log_mel = audio_img.data.cpu().numpy()

                raw_img.save(os.path.join(self.save_path, 'raw_img' + save_img_name))
                pyplot.imsave(os.path.join(self.save_path, 'log-mel-map' + save_img_name), log_mel[0, 0, :, :],
                              cmap='jet')

                pred_save_img_name = 'val-' + str(vi) + '-' + str(pred_cnt) + '.jpg'
                gt_save_img_name = 'val-' + str(vi) + '-' + str(gt_count) + '.jpg'
                pyplot.imsave(os.path.join(self.save_path, 'gt-den-map' + '-' + gt_save_img_name), gt_map[0, :, :],
                              cmap='jet')
                pyplot.imsave(os.path.join(self.save_path, 'pred-den-map' + '-' + pred_save_img_name),
                              pred_map[0, 0, :, :],
                              cmap='jet')

        mae = maes.avg
        mse = np.sqrt(mses.avg)
        loss = losses.avg

        # self.writer.add_scalar('val_loss', loss, self.epoch + 1)
        # self.writer.add_scalar('test_mae', mae, self.epoch + 1)
        # self.writer.add_scalar('test_mse', mse, self.epoch + 1)
        print('test_mae: %.5f, test_mse: %.5f, test_loss: %.5f' % (mae, mse, loss))