Пример #1
0
def optimizer_creation(cfg, model):
    """
    :return:

    The parameters has been split to 2 groups:
        ft_params: always ResNet parameters.
        new_params: always many layers parameters after ResNet.
    ft_params use cfg.ft_lr
    new_params use cfg.new_params_lr

    They are pairs by OrderedDict to save.
    """
    param_groups = model.get_param_groups()
    optimizer = create_optimizer(param_groups, cfg)
    recursive_to_device(optimizer.state_dict(), cfg.device)
    return optimizer
def extract_dataloader_feature(model, dataloader, cfg):
    """
    :param model:
    :param dataloader: gallery or query dataloader
    :param cfg:
    :return: concat_feat_dict_list

    I concat each batch item together in a dict:
    The dict should be this format: {
        'im_path': [item1, item2, ...],
        'feat': [item1, item2, ...],
        'label': [item1, item2, ...],
        'cam': [item1, item2, ...],
    }
    """
    feat_dict_list = []
    for item in tqdm(dataloader, desc='Extract Feature', miniters=20, ncols=120, unit=' batches'):
        model.eval()
        with torch.no_grad():
            item = recursive_to_device(item, cfg.device)
            output = model(item, cfg)
            part_label_batch = extract_part_label(item, cfg)
            feat_dict = {
                'total_part_label': part_label_batch.cpu().numpy(),
                'total_partial_feature': output['pool_feat_list'][1].cpu().numpy(),
                'total_pg_global_feature': output['reduction_pool_feat_list'][0].cpu().numpy(),
                'total_label': item['label'].cpu().numpy(),
                'total_cam': item['cam'].cpu().numpy(),
                'list_img': item['im_path'],
            }
        feat_dict_list.append(feat_dict)
    concat_feat_dict = concat_dict_list(feat_dict_list)
    return concat_feat_dict
Пример #3
0
def extract_dataloader_feature(model, dataloader, cfg):
    """
    :param model:
    :param dataloader: gallery or query dataloader
    :param cfg:
    :return: concat_feat_dict_list

    I concat each batch item together in a dict:
    The dict should be this format: {
        'im_path': [item1, item2, ...],
        'feat': [item1, item2, ...],
        'label': [item1, item2, ...],
        'cam': [item1, item2, ...],
    }
    """
    feat_dict_list = []
    for item in tqdm(dataloader,
                     desc='Extract Feature',
                     miniters=20,
                     ncols=120,
                     unit=' batches'):
        model.eval()
        with torch.no_grad():
            # ff = torch.FloatTensor(item['im'].size(0), 2048).zero_()
            # for i in range(2):
            #     if i == 1:
            #         item['im'] = fliphor(item['im'])
            #     item = recursive_to_device(item, cfg.device)
            #     output = model(item)
            #     feat = torch.cat(output['reduction_pool_feat_list'], 1)
            #     # feat = feat.cpu().numpy()
            #     feat = feat.cpu()
            #     ff = ff + feat
            item = recursive_to_device(item, cfg.device)
            output = model(item, cfg)
            _, D = output['reduction_pool_feat_list'][0].size()
            feat_list_length = len(output['reduction_pool_feat_list'])
            ff_length = feat_list_length * D
            ff = torch.FloatTensor(item['im'].size(0), ff_length).zero_()
            # output['normalize_reduction_pool_feat_list'] = [normalize(feat) for feat in output['reduction_pool_feat_list']]
            feat = torch.cat(
                output['reduction_pool_feat_list'][:feat_list_length], 1)
            # feat = feat.cpu().numpy()
            feat = feat.cpu()
            fnorm = torch.norm(feat, p=2, dim=1, keepdim=True)
            ff = feat.div(fnorm.expand_as(ff)).numpy()
            feat_dict = {
                'im_path': item['im_path'],
                # 'feat': feat,
                'feat': ff,
                'list_img': item['im_path']
            }
            if 'label' in item:
                feat_dict['label'] = item['label'].cpu().numpy()
            if 'cam' in item:
                feat_dict['cam'] = item['cam'].cpu().numpy()
        feat_dict_list.append(feat_dict)
    concat_feat_dict = concat_dict_list(feat_dict_list)
    return concat_feat_dict
Пример #4
0
def extract_dataloader_feature(model, dataloader, cfg, use_gcn):
    """
    :param model:
    :param dataloader: gallery or query dataloader
    :param cfg:
    :param use_gcn: feat is if gcned or not
    :return: concat_feat_dict_list

    I concat each batch item together in a dict:
    The dict should be this format: {
        'im_path': [item1, item2, ...],
        'feat': [item1, item2, ...],
        'label': [item1, item2, ...],
        'cam': [item1, item2, ...],
    }
    """
    feat_dict_list = []
    for item in tqdm(dataloader,
                     desc='Extract Feature',
                     miniters=20,
                     ncols=120,
                     unit=' batches'):
        model.eval()
        with torch.no_grad():
            item = recursive_to_device(item, cfg.device)
            output = model(item, cfg)
            N, keypoints_num = output['keypoints_confidence'].shape
            output['keypoints_confidence'] = torch.sqrt(
                output['keypoints_confidence']).unsqueeze(2).repeat(
                    [1, 1, 2048]).view([N, 2048 * keypoints_num])
            feat_stage1 = output['keypoints_confidence'] * torch.cat(
                output['bned_feat_vec_list'], dim=1)
            feat_stage2 = torch.cat(
                [i.unsqueeze(1) for i in output['bned_feat_vec_list']], dim=1)
            gcned_feat_stage1 = output['keypoints_confidence'] * torch.cat(
                output['bned_gcned_feat_vec_list'], dim=1)
            gcned_feat_stage2 = torch.cat(
                [i.unsqueeze(1) for i in output['bned_gcned_feat_vec_list']],
                dim=1)

            if use_gcn:
                feat_stage1 = gcned_feat_stage1
                feat_stage2 = gcned_feat_stage2
            else:
                feat_stage1 = feat_stage1
                feat_stage2 = feat_stage2

            feat_dict = {
                'feat_stage1': feat_stage1.cpu().numpy(),
                'feat_stage2': feat_stage2.cpu().numpy(),
                'label': item['label'].cpu().numpy(),
                'cam': item['cam'].cpu().numpy(),
                'list_img': item['im_path'],
            }
        feat_dict_list.append(feat_dict)
    concat_feat_dict = concat_dict_list(feat_dict_list)
    return concat_feat_dict
Пример #5
0
    def train(self):
        # dataset_sizes = len(self.source_train_loader.dataset)
        # class_num = self.source_train_loader.dataset.num_ids
        # assert self.cfg.model.num_classes == class_num, "cfg.model.num_classes should be {} in create_train_dataloader.py".format(class_num)

        print("End epoch is:", self.cfg.optim.epochs)
        for epoch in range(self.current_ep, self.cfg.optim.epochs):
            self.epoch_start_time = time.time()
            self.model.set_train_mode(
                fix_ft_layers=self.cfg.optim.phase == 'pretrain')

            for index, item in enumerate(self.source_train_loader):
                if self.lr_scheduler is not None:
                    self.lr_scheduler.step()
                self.optimizer.zero_grad()

                # Source item
                item = recursive_to_device(item, self.cfg.device)
                pred = self.model.forward(item,
                                          cfg=cfg,
                                          forward_type='Supervised')

                loss = 0
                for loss_type in [
                        self.cfg.id_loss, self.cfg.tri_loss,
                        self.cfg.pgfa_loss, self.cfg.src_ps_loss,
                        self.cfg.src_psgp_loss
                ]:
                    if loss_type.use is True:
                        loss += self.loss_functions[loss_type.name](
                            item, pred, step=self.current_step)['loss']

                if isinstance(loss, torch.Tensor):
                    loss.backward()
                self.optimizer.step()

                if ((self.current_step + 1) % self.steps_per_log
                        == 0) and (self.print_step_log is not None):
                    self.print_step_log(self.cfg, self.current_ep,
                                        self.current_step, self.optimizer,
                                        self.loss_functions,
                                        self.analyze_functions,
                                        self.epoch_start_time)
                self.current_step += 1
                if (self.trial_run_steps
                        is not None) and (index + 1 >= self.trial_run_steps):
                    break
            if ((self.current_ep + 1) % self.eps_per_log
                    == 0) and (self.print_ep_log is not None):
                self.print_ep_log()
            self.current_ep += 1
            score_str = self.may_test()
            self.may_save_ckpt(score_str, self.current_ep)
Пример #6
0
def visualization(model, dataloader, cfg):
    if cfg.vis.heat_map.use is True:
        savename = cfg.vis.heat_map.save_dir + '/' + dataloader.dataset.authority
        if not osp.exists(savename):
            os.makedirs(savename)
            print(savename, "has been created.")

    for item in tqdm(dataloader,
                     desc='Extract Feature',
                     miniters=20,
                     ncols=120,
                     unit=' batches'):
        model.eval()
        with torch.no_grad():
            item = recursive_to_device(item, cfg.device)
            output = model(item)
            draw_heat_map(output['ps_pred_list'],
                          im_path=item['im_path'],
                          savename=savename)
    return
Пример #7
0
    def train(self):
        print("End epoch is:", self.cfg.optim.epochs)
        for epoch in range(self.current_ep, self.cfg.optim.epochs):
            self.epoch_start_time = time.time()
            self.model.set_train_mode(
                fix_ft_layers=self.cfg.optim.phase == 'pretrain')

            # Unsupervised loader iter
            if self.cfg.dataset.train.type is 'Unsupervised':
                target_iter = iter(self.target_train_loader)

            # Train
            for index, item in enumerate(self.source_train_loader):
                if self.lr_scheduler is not None:
                    self.lr_scheduler.step()
                self.optimizer.zero_grad()

                # Source item
                item = recursive_to_device(item, self.cfg.device)

                # Target item
                if self.cfg.dataset.train.type is 'Unsupervised':
                    try:
                        item_target = next(target_iter)
                    except:
                        target_iter = iter(self.target_train_loader)
                        item_target = next(target_iter)
                    item_target = recursive_to_device(item_target,
                                                      self.cfg.device)

                # Supervised Part
                pred = self.model.forward(item,
                                          cfg=self.cfg,
                                          forward_type='Supervised')

                source_loss = 0
                for loss_type in [
                        self.cfg.id_loss, self.cfg.tri_loss,
                        self.cfg.src_multi_seg_loss
                ]:
                    if loss_type.use is True:
                        source_loss += self.loss_functions[loss_type.name](
                            item, pred, step=self.current_step)['loss']
                if self.cfg.dataset.train.type is 'Unsupervised':
                    # Unsupervised Part
                    pred = self.model.forward(item_target,
                                              forward_type='Unsupervised')
                    un_loss = self.loss_functions[self.cfg.inv_loss.name](
                        pred, epoch=epoch, step=self.current_step)['loss']
                    loss = (1 - self.cfg.inv_loss.lmd
                            ) * source_loss + self.cfg.inv_loss.lmd * un_loss
                else:
                    loss = source_loss
                if isinstance(loss, torch.Tensor):
                    loss.backward()
                self.optimizer.step()

                if ((self.current_step + 1) % self.steps_per_log
                        == 0) and (self.print_step_log is not None):
                    self.print_step_log(self.cfg, self.current_ep,
                                        self.current_step, self.optimizer,
                                        self.loss_functions,
                                        self.analyze_functions,
                                        self.epoch_start_time)
                self.current_step += 1
                if (self.trial_run_steps
                        is not None) and (index + 1 >= self.trial_run_steps):
                    break
            if ((self.current_ep + 1) % self.eps_per_log
                    == 0) and (self.print_ep_log is not None):
                self.print_ep_log()
            self.current_ep += 1
            score_str = self.may_test()
            self.may_save_ckpt(score_str, self.current_ep)