Пример #1
0
def main():

    cfg_file = open('./config.py', "r")
    cfg_lines = cfg_file.readlines()

    with open(log_txt, 'a') as f:
        f.write(''.join(cfg_lines) + '\n\n\n\n')
    if len(cfg.TRAIN.GPU_ID) == 1:
        torch.cuda.set_device(cfg.TRAIN.GPU_ID[0])
    torch.backends.cudnn.benchmark = True

    net = CrowdCounter().cuda()

    if cfg.TRAIN.PRE_GCC:
        net.load_state_dict(torch.load(cfg.TRAIN.PRE_GCC_MODEL))

    net.train()
    optimizer = optim.Adam(net.parameters(),
                           lr=cfg.TRAIN.LR,
                           weight_decay=1e-4)
    scheduler = StepLR(optimizer,
                       step_size=cfg.TRAIN.NUM_EPOCH_LR_DECAY,
                       gamma=cfg.TRAIN.LR_DECAY)

    i_tb = 0
    # validate(val_loader, net, -1, restore_transform)
    for epoch in range(cfg.TRAIN.MAX_EPOCH):
        if epoch > cfg.TRAIN.LR_DECAY_START:
            scheduler.step()

        # training
        _t['train time'].tic()
        i_tb = train(train_loader, net, optimizer, epoch, i_tb)
        _t['train time'].toc(average=False)

        print 'train time: {:.2f}s'.format(_t['train time'].diff)
        print '=' * 20

        # validation
        if epoch % cfg.VAL.FREQ == 0 or epoch > cfg.VAL.DENSE_START:
            _t['val time'].tic()
            validate(val_loader, net, epoch, restore_transform)
            _t['val time'].toc(average=False)
            print 'val time: {:.2f}s'.format(_t['val time'].diff)
Пример #2
0
class Trainer():
    def __init__(self, dataloader, cfg_data, pwd, cfg):

        self.cfg_data = cfg_data

        self.data_mode = cfg.DATASET
        self.exp_name = cfg.EXP_NAME
        self.exp_path = cfg.EXP_PATH
        self.pwd = pwd
        self.cfg = cfg

        self.net_name = cfg.NET

        self.net = CrowdCounter(cfg.GPU_ID, self.net_name, DA=True).cuda()

        self.num_parameters = sum(
            [param.nelement() for param in self.net.parameters()])
        print('num_parameters:', self.num_parameters)
        self.optimizer = optim.Adam(self.net.CCN.parameters(),
                                    lr=cfg.LR,
                                    weight_decay=1e-4)
        #         self.optimizer = optim.SGD(self.net.parameters(), cfg.LR, momentum=0.95,weight_decay=5e-4)
        self.scheduler = StepLR(self.optimizer,
                                step_size=cfg.NUM_EPOCH_LR_DECAY,
                                gamma=cfg.LR_DECAY)

        self.train_record = {
            'best_mae': 1e20,
            'best_mse': 1e20,
            'best_model_name': '_'
        }

        self.hparam = {
            'lr': cfg.LR,
            'n_epochs': cfg.MAX_EPOCH,
            'number of parameters': self.num_parameters,
            'dataset': cfg.DATASET
        }  # ,'finetuned':cfg.FINETUNE}
        self.timer = {
            'iter time': Timer(),
            'train time': Timer(),
            'val time': Timer()
        }

        self.epoch = 0
        self.i_tb = 0
        '''discriminator'''
        if cfg.GAN == 'Vanilla':
            self.bce_loss = torch.nn.BCELoss()
        elif cfg.GAN == 'LS':
            self.bce_loss = torch.nn.MSELoss()

        if cfg.NET == 'Res50':
            self.channel1, self.channel2 = 1024, 128

        self.D = [
            FCDiscriminator(self.channel1, self.bce_loss).cuda(),
            FCDiscriminator(self.channel2, self.bce_loss).cuda()
        ]
        self.D[0].apply(weights_init())
        self.D[1].apply(weights_init())

        self.dis = self.cfg.DIS

        self.d_opt = [
            optim.Adam(self.D[0].parameters(),
                       lr=self.cfg.D_LR,
                       betas=(0.9, 0.99)),
            optim.Adam(self.D[1].parameters(),
                       lr=self.cfg.D_LR,
                       betas=(0.9, 0.99))
        ]

        self.scheduler_D = [
            StepLR(self.d_opt[0],
                   step_size=cfg.NUM_EPOCH_LR_DECAY,
                   gamma=cfg.LR_DECAY),
            StepLR(self.d_opt[1],
                   step_size=cfg.NUM_EPOCH_LR_DECAY,
                   gamma=cfg.LR_DECAY)
        ]
        '''loss and lambdas here'''
        self.lambda_adv = [cfg.LAMBDA_ADV1, cfg.LAMBDA_ADV2]

        if cfg.PRE_GCC:
            print('===================Loaded Pretrained GCC================')
            weight = torch.load(cfg.PRE_GCC_MODEL)['net']
            #             weight=torch.load(cfg.PRE_GCC_MODEL)
            try:
                self.net.load_state_dict(convert_state_dict_gcc(weight))
            except:
                self.net.load_state_dict(weight)
        #             self.net=torch.nn.DataParallel(self.net, device_ids=cfg.GPU_ID).cuda()
        '''modify dataloader'''
        self.source_loader, self.target_loader, self.test_loader, self.restore_transform = dataloader(
        )
        self.source_len = len(self.source_loader.dataset)
        self.target_len = len(self.target_loader.dataset)
        print("source:", self.source_len)
        print("target:", self.target_len)
        self.source_loader_iter = cycle(self.source_loader)
        self.target_loader_iter = cycle(self.target_loader)

        if cfg.RESUME:
            print('===================Loaded model to resume================')
            latest_state = torch.load(cfg.RESUME_PATH)
            self.net.load_state_dict(latest_state['net'])
            self.optimizer.load_state_dict(latest_state['optimizer'])
            self.scheduler.load_state_dict(latest_state['scheduler'])
            self.epoch = latest_state['epoch'] + 1
            self.i_tb = latest_state['i_tb']
            self.train_record = latest_state['train_record']
            self.exp_path = latest_state['exp_path']
            self.exp_name = latest_state['exp_name']
        self.writer, self.log_txt = logger(self.exp_path,
                                           self.exp_name,
                                           self.pwd,
                                           'exp',
                                           self.source_loader,
                                           self.test_loader,
                                           resume=cfg.RESUME,
                                           cfg=cfg)

    def forward(self):
        print('forward!!')
        # self.validate_V3()
        with open(self.log_txt, 'a') as f:
            f.write(str(self.net) + '\n')
            f.write('num_parameters:' + str(self.num_parameters) + '\n')

        for epoch in range(self.epoch, self.cfg.MAX_EPOCH):
            self.epoch = epoch

            # training
            self.timer['train time'].tic()
            self.train()
            self.timer['train time'].toc(average=False)

            if epoch > self.cfg.LR_DECAY_START:
                self.scheduler.step()
                self.scheduler_D[0].step()
                self.scheduler_D[1].step()

            print('train time: {:.2f}s'.format(self.timer['train time'].diff))
            print('=' * 20)
            self.net.eval()

            # validation
            if epoch % self.cfg.VAL_FREQ == 0 or epoch > self.cfg.VAL_DENSE_START:
                self.timer['val time'].tic()
                if self.data_mode in ['SHHA', 'SHHB', 'QNRF', 'UCF50', 'Mall']:
                    self.validate_V1()
                elif self.data_mode is 'WE':
                    self.validate_V2()
                elif self.data_mode is 'GCC':
                    self.validate_V3()
                elif self.data_mode is 'NTU':
                    self.validate_V4()
                # self.validate_train()
                self.timer['val time'].toc(average=False)
                print('val time: {:.2f}s'.format(self.timer['val time'].diff))

    def train(self):  # training for all datasets
        self.net.train()

        for i in range(max(len(self.source_loader), len(self.target_loader))):
            torch.cuda.empty_cache()
            self.timer['iter time'].tic()
            img, gt_img = self.source_loader_iter.__next__()
            tar, gt_tar = self.target_loader_iter.__next__()

            img = Variable(img).cuda()
            gt_img = Variable(gt_img).cuda()

            tar = Variable(tar).cuda()
            gt_tar = Variable(gt_tar).cuda()

            #gen loss
            # loss, loss_adv, pred, pred1, pred2, pred_tar, pred_tar1, pred_tar2 = self.gen_update(img,tar,gt_img,gt_tar)
            self.optimizer.zero_grad()

            for param in self.D[0].parameters():
                param.requires_grad = False
            for param in self.D[1].parameters():
                param.requires_grad = False

            # source
            pred = self.net(img, gt_img)
            loss = self.net.loss
            if not self.cfg.LOSS_TOG:
                loss.backward()

            # target
            pred_tar = self.net(tar, gt_tar)

            loss_adv = self.D[self.dis].cal_loss(pred_tar[self.dis],
                                                 0) * self.lambda_adv[self.dis]

            if not self.cfg.LOSS_TOG:
                loss_adv.backward()
            else:
                loss += loss_adv
                loss.backward()

            #dis loss
            loss_d = self.dis_update(pred, pred_tar)
            self.d_opt[0].step()
            self.d_opt[1].step()

            self.optimizer.step()

            if (i + 1) % self.cfg.PRINT_FREQ == 0:
                self.i_tb += 1
                self.writer.add_scalar('train_loss', loss.item(), self.i_tb)
                self.writer.add_scalar('loss_adv', loss_adv.item(), self.i_tb)
                self.writer.add_scalar('loss_d', loss_d.item(), self.i_tb)
                self.timer['iter time'].toc(average=False)

                print('[ep %d][it %d][loss %.4f][loss_adv %.8f][loss_d %.4f][lr %.8f][%.2fs]' % \
                      (self.epoch + 1, i + 1, loss.item(), loss_adv.item() if loss_adv else 0, loss_d.item(), self.optimizer.param_groups[0]['lr'],
                       self.timer['iter time'].diff))
                print('        [cnt: gt: %.1f pred: %.2f]' %
                      (gt_img[0].sum().data / self.cfg_data.LOG_PARA,
                       pred[-1][0].sum().data / self.cfg_data.LOG_PARA))

                print('        [tar: gt: %.1f pred: %.2f]' %
                      (gt_tar[0].sum().data / self.cfg_data.LOG_PARA,
                       pred_tar[-1][0].sum().data / self.cfg_data.LOG_PARA))

        self.writer.add_scalar('lr', self.optimizer.param_groups[0]['lr'],
                               self.epoch + 1)

    def gen_update(self, img, tar, gt_img, gt_tar):
        pass
        # return loss,loss_adv,pred,pred1,pred2,pred_tar,pred_tar1,pred_tar2

    def dis_update(self, pred, pred_tar):
        self.d_opt[self.dis].zero_grad()

        for param in self.D[0].parameters():
            param.requires_grad = True
        for param in self.D[1].parameters():
            param.requires_grad = True

        #source
        pred = [pred[0].detach(), pred[1].detach()]

        loss_d = self.D[self.dis].cal_loss(pred[self.dis], 0)
        if not self.cfg.LOSS_TOG:
            loss_d.backward()

        loss_D = loss_d

        #target
        pred_tar = [pred_tar[0].detach(), pred_tar[1].detach()]

        loss_d = self.D[self.dis].cal_loss(pred_tar[self.dis], 1)
        if not self.cfg.LOSS_TOG:
            loss_d.backward()

        loss_D += loss_d

        if self.cfg.LOSS_TOG:
            loss_D.backward()

        return loss_D

    def validate_train(self):
        self.net.eval()
        losses = AverageMeter()
        maes = AverageMeter()
        mses = AverageMeter()

        for img, gt_map in self.source_loader:

            with torch.no_grad():
                img = Variable(img).cuda()
                gt_map = Variable(gt_map).cuda()

                _, _, pred_map = self.net.forward(img, gt_map)

                pred_map = pred_map.data.cpu().numpy()
                gt_map = gt_map.data.cpu().numpy()

                for i_img in range(pred_map.shape[0]):
                    pred_cnt = np.sum(pred_map[i_img]) / self.cfg_data.LOG_PARA
                    gt_count = np.sum(gt_map[i_img]) / self.cfg_data.LOG_PARA

                    s_mae = abs(gt_count - pred_cnt)
                    s_mse = (gt_count - pred_cnt) * (gt_count - pred_cnt)

                    losses.update(self.net.loss.item())
                    maes.update(s_mae)
                    mses.update(s_mse)

        loss = losses.avg
        mae = maes.avg
        mse = np.sqrt(mses.avg)

        print("test on source domain")
        print_NTU_summary(self.log_txt, self.epoch, [mae, mse, loss],
                          self.train_record)

    def validate_V4(self):  # validate_V4 for NTU
        self.net.eval()

        losses = AverageMeter()
        maes = AverageMeter()
        mses = AverageMeter()

        for vi, data in enumerate(self.test_loader, 0):

            img, gt_map = data

            with torch.no_grad():
                img = Variable(img).cuda()
                gt_map = Variable(gt_map).cuda()

                _, _, pred_map = self.net.forward(img, gt_map)

                pred_map = pred_map.data.cpu().numpy()
                gt_map = gt_map.data.cpu().numpy()

                for i_img in range(pred_map.shape[0]):
                    pred_cnt = np.sum(pred_map[i_img]) / self.cfg_data.LOG_PARA
                    gt_count = np.sum(gt_map[i_img]) / self.cfg_data.LOG_PARA

                    s_mae = abs(gt_count - pred_cnt)
                    s_mse = (gt_count - pred_cnt) * (gt_count - pred_cnt)

                    losses.update(self.net.loss.item())
                    maes.update(s_mae)
                    mses.update(s_mse)

                if vi == 0:
                    vis_results(self.exp_name, self.epoch, self.writer,
                                self.restore_transform, img, pred_map, gt_map)

        loss = losses.avg
        mae = maes.avg
        mse = np.sqrt(mses.avg)

        self.writer.add_scalar('val_loss', loss, self.epoch + 1)
        self.writer.add_scalar('mae', mae, self.epoch + 1)
        self.writer.add_scalar('mse', mse, self.epoch + 1)

        self.train_record = update_model(self.net, self.optimizer,
                                         self.scheduler, self.epoch, self.i_tb,
                                         self.exp_path, self.exp_name,
                                         [mae, mse, loss], self.train_record,
                                         False, self.log_txt)

        print_NTU_summary(self.log_txt, self.epoch, [mae, mse, loss],
                          self.train_record)
Пример #3
0
class Trainer():
    def __init__(self, dataloader, cfg_data, pwd,cfg):

        self.cfg_data = cfg_data

        self.data_mode = cfg.DATASET
        self.exp_name = cfg.EXP_NAME
        self.exp_path = cfg.EXP_PATH
        self.pwd = pwd
        self.cfg=cfg

        self.net_name = cfg.NET

        self.net = CrowdCounter(cfg.GPU_ID,self.net_name).cuda()
        self.num_parameters= sum([param.nelement() for param in self.net.parameters()])
        print('num_parameters:',self.num_parameters)
        self.optimizer = optim.Adam(self.net.CCN.parameters(), lr=cfg.LR, weight_decay=1e-4)
#         self.optimizer = optim.SGD(self.net.parameters(), cfg.LR, momentum=0.95,weight_decay=5e-4)
        self.scheduler = StepLR(self.optimizer, step_size=cfg.NUM_EPOCH_LR_DECAY, gamma=cfg.LR_DECAY)          

        self.train_record = {'best_mae': 1e20, 'best_mse':1e20, 'best_model_name': '_'}

        self.hparam={'lr': cfg.LR, 'n_epochs': cfg.MAX_EPOCH,'number of parameters':self.num_parameters,'dataset':cfg.DATASET,'finetuned':cfg.FINETUNE}
        self.timer = {'iter time' : Timer(),'train time' : Timer(),'val time' : Timer()} 

        self.epoch = 0
        self.i_tb = 0
        
        if cfg.PRE_GCC:
            print('===================Loaded Pretrained GCC================')
            weight=torch.load(cfg.PRE_GCC_MODEL)['net']
#             weight=torch.load(cfg.PRE_GCC_MODEL)
            try:
                self.net.load_state_dict(convert_state_dict_gcc(weight))
            except:    
                self.net.load_state_dict(weight)
#             self.net=torch.nn.DataParallel(self.net, device_ids=cfg.GPU_ID).cuda()
          
                        
        self.train_loader, self.val_loader, self.restore_transform = dataloader()

        if cfg.RESUME:
            print('===================Loaded model to resume================')
            latest_state = torch.load(cfg.RESUME_PATH)
            self.net.load_state_dict(latest_state['net'])
            self.optimizer.load_state_dict(latest_state['optimizer'])
            self.scheduler.load_state_dict(latest_state['scheduler'])
            self.epoch = latest_state['epoch'] + 1
            self.i_tb = latest_state['i_tb']
            self.train_record = latest_state['train_record']
            self.exp_path = latest_state['exp_path']
            self.exp_name = latest_state['exp_name']
        self.writer, self.log_txt = logger(self.exp_path, self.exp_name, self.pwd, 'exp',self.train_loader, self.val_loader, resume=cfg.RESUME,cfg=cfg)


    def forward(self):
#         print('forward!!')
        # self.validate_V3()
        with open(self.log_txt, 'a') as f:
            f.write(str(self.net) + '\n')
            f.write('num_parameters:'+str(self.num_parameters)+'\n')
            
        for epoch in range(self.epoch,self.cfg.MAX_EPOCH):
            self.epoch = epoch
            if epoch > self.cfg.LR_DECAY_START:
                self.scheduler.step()
                
            # training    
            self.timer['train time'].tic()
            self.train()
            self.timer['train time'].toc(average=False)

            print( 'train time: {:.2f}s'.format(self.timer['train time'].diff) )
            print( '='*20 )
            self.net.eval()
            
            # validation
            if epoch%self.cfg.VAL_FREQ==0 or epoch>self.cfg.VAL_DENSE_START:
                self.timer['val time'].tic()
                if self.data_mode in ['SHHA', 'SHHB', 'QNRF', 'UCF50','Mall']:
                    self.validate_V1()
                elif self.data_mode is 'WE':
                    self.validate_V2()
                elif self.data_mode is 'GCC':
                    self.validate_V3()
                elif self.data_mode is 'NTU':
                    self.validate_V4()
                self.timer['val time'].toc(average=False)
                print( 'val time: {:.2f}s'.format(self.timer['val time'].diff) )


    def train(self): # training for all datasets
        self.net.train()
     
        for i, data in enumerate(self.train_loader, 0):
            self.timer['iter time'].tic()
            img, gt_map = data
            img = Variable(img).cuda()
            gt_map = Variable(gt_map).cuda()

            self.optimizer.zero_grad()
            pred_map = self.net(img, gt_map)

            loss = self.net.loss
            

            loss.backward()
            self.optimizer.step()

            if (i + 1) % self.cfg.PRINT_FREQ == 0:
                self.i_tb += 1
                self.writer.add_scalar('train_loss', loss.item(), self.i_tb)
                self.timer['iter time'].toc(average=False)

                print( '[ep %d][it %d][loss %.4f][lr %.6f][%.2fs]' % \
                        (self.epoch + 1, i + 1, loss.item(), self.optimizer.param_groups[0]['lr'], self.timer['iter time'].diff) )
                print( '        [cnt: gt: %.1f pred: %.2f]' % (gt_map[0].sum().data/self.cfg_data.LOG_PARA, pred_map[0].sum().data/self.cfg_data.LOG_PARA) )           

        self.writer.add_scalar('lr', self.optimizer.param_groups[0]['lr'], self.epoch + 1)

    def validate_V1(self):# validate_V1 for SHHA, SHHB, UCF-QNRF, UCF50

        self.net.eval()
        
        losses = AverageMeter()
        maes = AverageMeter()
        mses = AverageMeter()

        for vi, data in enumerate(self.val_loader, 0):
            img, gt_map = data

            with torch.no_grad():
                img = Variable(img).cuda()
                gt_map = Variable(gt_map).cuda()

                

                pred_map = self.net.forward(img, gt_map)
                pred_map = pred_map.data.cpu().numpy()
                gt_map = gt_map.data.cpu().numpy()

                for i_img in range(pred_map.shape[0]):
                
                    pred_cnt = np.sum(pred_map[i_img])/self.cfg_data.LOG_PARA
                    gt_count = np.sum(gt_map[i_img])/self.cfg_data.LOG_PARA

                    
                    losses.update(self.net.loss.item())
                    maes.update(abs(gt_count-pred_cnt))
                    mses.update((gt_count-pred_cnt)*(gt_count-pred_cnt))
                if vi==0:
                    vis_results(self.exp_name, self.epoch, self.writer, self.restore_transform, img, pred_map, gt_map)
            
        mae = maes.avg
        mse = np.sqrt(mses.avg)
        loss = losses.avg

        self.writer.add_scalar('val_loss', loss, self.epoch + 1)
        self.writer.add_scalar('mae', mae, self.epoch + 1)
        self.writer.add_scalar('mse', mse, self.epoch + 1)



        self.train_record = update_model(self.net,self.optimizer,self.scheduler,self.epoch,self.i_tb,self.exp_path,self.exp_name, \
            [mae, mse, loss],self.train_record,False,self.log_txt)
        print_summary(self.log_txt,self.epoch,self.exp_name,[mae, mse, loss],self.train_record)

    def validate_V2(self):# validate_V2 for WE

        self.net.eval()

        losses = AverageCategoryMeter(5)
        maes = AverageCategoryMeter(5)

        roi_mask = []
        from datasets.WE.setting import cfg_data 
        from scipy import io as sio
        for val_folder in cfg_data.VAL_FOLDER:

            roi_mask.append(sio.loadmat(os.path.join(cfg_data.DATA_PATH,'test',val_folder + '_roi.mat'))['BW'])
        
        for i_sub,i_loader in enumerate(self.val_loader,0):

            mask = roi_mask[i_sub]
            for vi, data in enumerate(i_loader, 0):
                img, gt_map = data

                with torch.no_grad():
                    img = Variable(img).cuda()
                    gt_map = Variable(gt_map).cuda()

                    pred_map = self.net.forward(img,gt_map)

                    pred_map = pred_map.data.cpu().numpy()
                    gt_map = gt_map.data.cpu().numpy()

                    for i_img in range(pred_map.shape[0]):
                    
                        pred_cnt = np.sum(pred_map[i_img])/self.cfg_data.LOG_PARA
                        gt_count = np.sum(gt_map[i_img])/self.cfg_data.LOG_PARA

                        losses.update(self.net.loss.item(),i_sub)
                        maes.update(abs(gt_count-pred_cnt),i_sub)
                    if vi==0:
                        vis_results(self.exp_name, self.epoch, self.writer, self.restore_transform, img, pred_map, gt_map)
            
        mae = np.average(maes.avg)
        loss = np.average(losses.avg)

        self.writer.add_scalar('val_loss', loss, self.epoch + 1)
        self.writer.add_scalar('mae', mae, self.epoch + 1)
        self.writer.add_scalar('mae_s1', maes.avg[0], self.epoch + 1)
        self.writer.add_scalar('mae_s2', maes.avg[1], self.epoch + 1)
        self.writer.add_scalar('mae_s3', maes.avg[2], self.epoch + 1)
        self.writer.add_scalar('mae_s4', maes.avg[3], self.epoch + 1)
        self.writer.add_scalar('mae_s5', maes.avg[4], self.epoch + 1)

        self.train_record = update_model(self.net,self.optimizer,self.scheduler,self.epoch,self.i_tb,self.exp_path,self.exp_name, \
            [mae, 0, loss],self.train_record,self.log_txt)
        print_WE_summary(self.log_txt,self.epoch,[mae, 0, loss],self.train_record,maes)
#         self.writer.add_hparams(self.hparam, {'best_mae': mae, 'best_mse':mse})





    def validate_V3(self):# validate_V3 for GCC

        self.net.eval()
        
        losses = AverageMeter()
        maes = AverageMeter()
        mses = AverageMeter()

        c_maes = {'level':AverageCategoryMeter(9), 'time':AverageCategoryMeter(8),'weather':AverageCategoryMeter(7)}
        c_mses = {'level':AverageCategoryMeter(9), 'time':AverageCategoryMeter(8),'weather':AverageCategoryMeter(7)}


        for vi, data in enumerate(self.val_loader, 0):
            img, gt_map, attributes_pt = data

            with torch.no_grad():
                img = Variable(img).cuda()
                gt_map = Variable(gt_map).cuda()


                pred_map = self.net.forward(img, gt_map)

                pred_map = pred_map.data.cpu().numpy()
                gt_map = gt_map.data.cpu().numpy()

                for i_img in range(pred_map.shape[0]):
                
                    pred_cnt = np.sum(pred_map[i_img])/self.cfg_data.LOG_PARA
                    gt_count = np.sum(gt_map[i_img])/self.cfg_data.LOG_PARA

                    s_mae = abs(gt_count-pred_cnt)
                    s_mse = (gt_count-pred_cnt)*(gt_count-pred_cnt)

                    losses.update(self.net.loss.item())
                    maes.update(s_mae)
                    mses.update(s_mse)   
                    attributes_pt = attributes_pt.squeeze() 
                    c_maes['level'].update(s_mae,attributes_pt[i_img][0])
                    c_mses['level'].update(s_mse,attributes_pt[i_img][0])
                    c_maes['time'].update(s_mae,attributes_pt[i_img][1]/3)
                    c_mses['time'].update(s_mse,attributes_pt[i_img][1]/3)
                    c_maes['weather'].update(s_mae,attributes_pt[i_img][2])
                    c_mses['weather'].update(s_mse,attributes_pt[i_img][2])


                if vi==0:
                    vis_results(self.exp_name, self.epoch, self.writer, self.restore_transform, img, pred_map, gt_map)
            
        loss = losses.avg
        mae = maes.avg
        mse = np.sqrt(mses.avg)


        self.writer.add_scalar('val_loss', loss, self.epoch + 1)
        self.writer.add_scalar('mae', mae, self.epoch + 1)
        self.writer.add_scalar('mse', mse, self.epoch + 1)

        self.train_record = update_model(self.net,self.optimizer,self.scheduler,self.epoch,self.i_tb,self.exp_path,self.exp_name, \
            [mae, mse, loss],self.train_record,False,self.log_txt)


        print_GCC_summary(self.log_txt,self.epoch,[mae, mse, loss],self.train_record,c_maes,c_mses)

    def validate_V4(self):# validate_V4 for NTU
        self.net.eval()

        losses = AverageMeter()
        maes = AverageMeter()
        mses = AverageMeter()

        for vi, data in enumerate(self.val_loader, 0):
            img, gt_map = data

            with torch.no_grad():
                img = Variable(img).cuda()
                gt_map = Variable(gt_map).cuda()


                pred_map = self.net.forward(img, gt_map)

                pred_map = pred_map.data.cpu().numpy()
                gt_map = gt_map.data.cpu().numpy()

                for i_img in range(pred_map.shape[0]):

                    pred_cnt = np.sum(pred_map[i_img])/self.cfg_data.LOG_PARA
                    gt_count = np.sum(gt_map[i_img])/self.cfg_data.LOG_PARA

                    s_mae = abs(gt_count-pred_cnt)
                    s_mse = (gt_count-pred_cnt)*(gt_count-pred_cnt)

                    losses.update(self.net.loss.item())
                    maes.update(s_mae)
                    mses.update(s_mse)   
                      


                if vi==0:
                    vis_results(self.exp_name, self.epoch, self.writer, self.restore_transform, img, pred_map, gt_map)

        loss = losses.avg
        mae = maes.avg
        mse = np.sqrt(mses.avg)


        self.writer.add_scalar('val_loss', loss, self.epoch + 1)
        self.writer.add_scalar('mae', mae, self.epoch + 1)
        self.writer.add_scalar('mse', mse, self.epoch + 1)

        self.train_record = update_model(self.net,self.optimizer,self.scheduler,self.epoch,self.i_tb,self.exp_path,self.exp_name, \
                [mae, mse, loss],self.train_record,False,self.log_txt)


        print_NTU_summary(self.log_txt,self.epoch,[mae, mse, loss],self.train_record)
Пример #4
0
# pruned_model_path = './exp/Res50_Original_GCC_Inducing_CAP_0.0001_epochs_100_Pruning/0.7/resnet50_GCC_pruned_0.7.pth.tar'
# pruned_model_path = './exp/VGG_Decoder_GCC_Pretrained_Pruning/0.4/VGG_Decoder_GCC_pruned_0.4.pth.tar'

# model_path='05-ResNet-50_all_ep_35_mae_32.4_mse_76.1.pth'

net = CrowdCounter(cfg.GPU_ID, cfg.NET)
# net = CrowdCounter(cfg.GPU_ID,cfg.NET,cfg=torch.load(pruned_model_path)['cfg'])
state_dict = torch.load(args.model_path)

try:
    net.load_state_dict(state_dict['net'])
except KeyError:
    net.load_state_dict(state_dict)
net.cuda()
net.eval()
sum([param.nelement() for param in net.parameters()])


def get_concat_h(im1, im2):
    dst = Image.new('RGB', (im1.width + im2.width, im1.height))
    dst.paste(im1, (0, 0))
    dst.paste(im2, (im1.width, 0))
    return dst


cm = plt.get_cmap('jet')

file_folder = []
file_name = []
'''
for file in glob.glob('/export/home/jinc0008/ntu_random_test/*'):
Пример #5
0
class Trainer():
    def __init__(self, dataloader, cfg_data, pwd):

        self.cfg_data = cfg_data

        self.data_mode = cfg.DATASET
        self.exp_name = cfg.EXP_NAME
        self.exp_path = cfg.EXP_PATH
        self.pwd = pwd

        self.net_name = cfg.NET
        self.net = CrowdCounter(cfg.GPU_ID, self.net_name).cuda()
        self.optimizer = optim.Adam(self.net.parameters(),
                                    lr=cfg.LR,
                                    weight_decay=1e-4)
        # self.optimizer = optim.SGD(self.net.parameters(), cfg.LR, momentum=0.95,weight_decay=5e-4)
        self.scheduler = StepLR(self.optimizer,
                                step_size=cfg.NUM_EPOCH_LR_DECAY,
                                gamma=cfg.LR_DECAY)

        self.train_record = {
            'best_mae': 1e20,
            'best_mse': 1e20,
            'best_model_name': ''
        }
        self.timer = {
            'iter time': Timer(),
            'train time': Timer(),
            'val time': Timer()
        }
        self.writer, self.log_txt = logger(self.exp_path, self.exp_name,
                                           self.pwd, 'exp')

        self.i_tb = 0
        self.epoch = -1

        if cfg.PRE_GCC:
            self.net.load_state_dict(torch.load(cfg.PRE_GCC_MODEL))

        self.train_loader, self.val_loader, self.restore_transform = dataloader(
        )

    def forward(self):

        # self.validate_V1()
        for epoch in range(cfg.MAX_EPOCH):
            self.epoch = epoch
            if epoch > cfg.LR_DECAY_START:
                self.scheduler.step()

            # training
            self.timer['train time'].tic()
            self.train()
            self.timer['train time'].toc(average=False)

            print 'train time: {:.2f}s'.format(self.timer['train time'].diff)
            print '=' * 20

            # validation
            if epoch % cfg.VAL_FREQ == 0 or epoch > cfg.VAL_DENSE_START:
                self.timer['val time'].tic()
                if self.data_mode in ['SHHA', 'SHHB', 'QNRF', 'UCF50']:
                    self.validate_V1()
                elif self.data_mode is 'WE':
                    self.validate_V2()
                elif self.data_mode is 'GCC':
                    self.validate_V3()
                self.timer['val time'].toc(average=False)
                print 'val time: {:.2f}s'.format(self.timer['val time'].diff)

    def train(self):  # training for all datasets
        self.net.train()
        for i, data in enumerate(self.train_loader, 0):
            self.timer['iter time'].tic()
            img, gt_map = data
            img = Variable(img).cuda()
            gt_map = Variable(gt_map).cuda()

            self.optimizer.zero_grad()
            pred_map = self.net(img, gt_map)
            loss = self.net.loss
            loss.backward()
            self.optimizer.step()

            if (i + 1) % cfg.PRINT_FREQ == 0:
                self.i_tb += 1
                self.writer.add_scalar('train_loss', loss.item(), self.i_tb)
                self.timer['iter time'].toc(average=False)
                print '[ep %d][it %d][loss %.4f][lr %.4f][%.2fs]' % \
                        (self.epoch + 1, i + 1, loss.item(), self.optimizer.param_groups[0]['lr']*10000, self.timer['iter time'].diff)
                print '        [cnt: gt: %.1f pred: %.2f]' % (
                    gt_map[0].sum().data / self.cfg_data.LOG_PARA,
                    pred_map[0].sum().data / self.cfg_data.LOG_PARA)

    def validate_V1(self):  # validate_V1 for SHHA, SHHB, UCF-QNRF, UCF50

        self.net.eval()

        losses = AverageMeter()
        maes = AverageMeter()
        mses = AverageMeter()

        time_sampe = 0
        step = 0

        for vi, data in enumerate(self.val_loader, 0):
            img, gt_map = data

            with torch.no_grad():
                img = Variable(img).cuda()
                gt_map = Variable(gt_map).cuda()

                pred_map = self.net.forward(img, gt_map)

                step = step + 1
                time_start1 = time.time()
                test_map = self.net.test_forward(img)
                time_end1 = time.time()
                time_sampe = time_sampe + (time_end1 - time_start1)

                pred_map = pred_map.data.cpu().numpy()
                gt_map = gt_map.data.cpu().numpy()

                pred_cnt = np.sum(pred_map) / self.cfg_data.LOG_PARA
                gt_count = np.sum(gt_map) / self.cfg_data.LOG_PARA

                losses.update(self.net.loss.item())
                maes.update(abs(gt_count - pred_cnt))
                mses.update((gt_count - pred_cnt) * (gt_count - pred_cnt))
                if vi == 0:
                    vis_results(self.exp_name, self.epoch, self.writer,
                                self.restore_transform, img, pred_map, gt_map)

        mae = maes.avg
        mse = np.sqrt(mses.avg)
        loss = losses.avg

        self.writer.add_scalar('val_loss', loss, self.epoch + 1)
        self.writer.add_scalar('mae', mae, self.epoch + 1)
        self.writer.add_scalar('mse', mse, self.epoch + 1)

        self.train_record = update_model(self.net, self.epoch, self.exp_path,
                                         self.exp_name, [mae, mse, loss],
                                         self.train_record, self.log_txt)
        print_summary(self.exp_name, [mae, mse, loss], self.train_record)
        print '\nForward Time: %fms' % (time_sampe * 1000 / step)

    def validate_V2(self):  # validate_V2 for WE

        self.net.eval()

        losses = AverageCategoryMeter(5)
        maes = AverageCategoryMeter(5)

        roi_mask = []
        from datasets.WE.setting import cfg_data
        from scipy import io as sio
        for val_folder in cfg_data.VAL_FOLDER:
            roi_mask.append(
                sio.loadmat(
                    os.path.join(cfg_data.DATA_PATH, 'test',
                                 val_folder + '_roi.mat'))['BW'])

        for i_sub, i_loader in enumerate(self.val_loader, 0):

            mask = roi_mask[i_sub]
            for vi, data in enumerate(i_loader, 0):
                img, gt_map = data

                with torch.no_grad():
                    img = Variable(img).cuda()
                    gt_map = Variable(gt_map).cuda()

                    pred_map = self.net.forward(img, gt_map)

                    pred_map = pred_map.data.cpu().numpy()
                    gt_map = gt_map.data.cpu().numpy()

                    for i_img in range(pred_map.shape[0]):
                        pred_cnt = np.sum(
                            pred_map[i_img]) / self.cfg_data.LOG_PARA
                        gt_count = np.sum(
                            gt_map[i_img]) / self.cfg_data.LOG_PARA

                        losses.update(self.net.loss.item(), i_sub)
                        maes.update(abs(gt_count - pred_cnt), i_sub)
                    if vi == 0:
                        vis_results(self.exp_name, self.epoch, self.writer,
                                    self.restore_transform, img, pred_map,
                                    gt_map)

        mae = np.average(maes.avg)
        loss = np.average(losses.avg)

        self.writer.add_scalar('val_loss', loss, self.epoch + 1)
        self.writer.add_scalar('mae', mae, self.epoch + 1)
        self.writer.add_scalar('mae_s1', maes.avg[0], self.epoch + 1)
        self.writer.add_scalar('mae_s2', maes.avg[1], self.epoch + 1)
        self.writer.add_scalar('mae_s3', maes.avg[2], self.epoch + 1)
        self.writer.add_scalar('mae_s4', maes.avg[3], self.epoch + 1)
        self.writer.add_scalar('mae_s5', maes.avg[4], self.epoch + 1)

        self.train_record = update_model(self.net, self.epoch, self.exp_path,
                                         self.exp_name, [mae, 0, loss],
                                         self.train_record, self.log_txt)
        print_WE_summary(self.log_txt, self.epoch, [mae, 0, loss],
                         self.train_record, maes)

    def validate_V3(self):  # validate_V3 for GCC

        self.net.eval()

        losses = AverageMeter()
        maes = AverageMeter()
        mses = AverageMeter()

        c_maes = {
            'level': AverageCategoryMeter(9),
            'time': AverageCategoryMeter(8),
            'weather': AverageCategoryMeter(7)
        }
        c_mses = {
            'level': AverageCategoryMeter(9),
            'time': AverageCategoryMeter(8),
            'weather': AverageCategoryMeter(7)
        }

        for vi, data in enumerate(self.val_loader, 0):
            img, gt_map, attributes_pt = data

            with torch.no_grad():
                img = Variable(img).cuda()
                gt_map = Variable(gt_map).cuda()

                pred_map = self.net.forward(img, gt_map)

                pred_map = pred_map.data.cpu().numpy()
                gt_map = gt_map.data.cpu().numpy()

                for i_img in range(pred_map.shape[0]):
                    pred_cnt = np.sum(pred_map[i_img]) / self.cfg_data.LOG_PARA
                    gt_count = np.sum(gt_map[i_img]) / self.cfg_data.LOG_PARA

                    s_mae = abs(gt_count - pred_cnt)
                    s_mse = (gt_count - pred_cnt) * (gt_count - pred_cnt)

                    losses.update(self.net.loss.item())
                    maes.update(s_mae)
                    mses.update(s_mse)
                    # attributes_pt = attributes_pt.squeeze()
                    # c_maes['level'].update(s_mae, attributes_pt[i_img][0])
                    # c_mses['level'].update(s_mse, attributes_pt[i_img][0])
                    # c_maes['time'].update(s_mae, attributes_pt[i_img][1] / 3)
                    # c_mses['time'].update(s_mse, attributes_pt[i_img][1] / 3)
                    # c_maes['weather'].update(s_mae, attributes_pt[i_img][2])
                    # c_mses['weather'].update(s_mse, attributes_pt[i_img][2])

                # if vi == 0:
                #     vis_results(self.exp_name, self.epoch, self.writer, self.restore_transform, img, pred_map, gt_map)

        loss = losses.avg
        mae = maes.avg
        mse = np.sqrt(mses.avg)

        self.writer.add_scalar('val_loss', loss, self.epoch + 1)
        self.writer.add_scalar('mae', mae, self.epoch + 1)
        self.writer.add_scalar('mse', mse, self.epoch + 1)

        self.train_record = update_model(self.net, self.epoch, self.exp_path,
                                         self.exp_name, [mae, mse, loss],
                                         self.train_record, self.log_txt)

        print_GCC_summary(self.log_txt, self.epoch, [mae, mse, loss],
                          self.train_record, c_maes, c_mses)