Exemple #1
0
    def load_net(runner, net, model_path=None):
        if runner.configer.get('gpu') is not None:
            net = RunnerHelper._make_parallel(runner, net)

        net = net.to(
            torch.device(
                'cpu' if runner.configer.get('gpu') is None else 'cuda'))
        if model_path is not None or runner.configer.get('network',
                                                         'resume') is not None:
            resume_path = runner.configer.get('network', 'resume')
            resume_path = model_path if model_path is not None else resume_path
            Log.info('Resuming from {}'.format(resume_path))
            resume_dict = torch.load(resume_path)
            if 'state_dict' in resume_dict:
                checkpoint_dict = resume_dict['state_dict']

            elif 'model' in resume_dict:
                checkpoint_dict = resume_dict['model']

            elif isinstance(resume_dict, OrderedDict):
                checkpoint_dict = resume_dict

            else:
                raise RuntimeError(
                    'No state_dict found in checkpoint file {}'.format(
                        runner.configer.get('network', 'resume')))

            if list(checkpoint_dict.keys())[0].startswith('module.'):
                checkpoint_dict = {
                    k[7:]: v
                    for k, v in checkpoint_dict.items()
                }

            # load state_dict
            if hasattr(net, 'module'):
                RunnerHelper.load_state_dict(
                    net.module, checkpoint_dict,
                    runner.configer.get('network', 'resume_strict'))
            else:
                RunnerHelper.load_state_dict(
                    net, checkpoint_dict,
                    runner.configer.get('network', 'resume_strict'))

            if runner.configer.get('network', 'resume_continue'):
                # runner.configer.resume(resume_dict['config_dict'])
                runner.runner_state = resume_dict['runner_state']

        return net
Exemple #2
0
    def train(self):
        """
          Train function of every epoch during train phase.
        """
        self.det_net.train()
        start_time = time.time()
        # Adjust the learning rate after every epoch.
        self.runner_state['epoch'] += 1

        for i, data_dict in enumerate(self.train_loader):
            Trainer.update(self)
            self.data_time.update(time.time() - start_time)
            # Forward pass.
            loss = self.det_net(data_dict)
            loss = loss.mean()
            self.train_losses.update(loss.item(), len(DCHelper.tolist(data_dict['meta'])))

            self.optimizer.zero_grad()
            loss.backward()
            RunnerHelper.clip_grad(self.det_net, 10.)
            self.optimizer.step()

            # Update the vars of the train phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()
            self.runner_state['iters'] += 1

            # Print the log info & reset the states.
            if self.runner_state['iters'] % self.configer.get('solver', 'display_iter') == 0:
                Log.info('Train Epoch: {0}\tTrain Iteration: {1}\t'
                         'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t'
                         'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n'
                         'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'.format(
                    self.runner_state['epoch'], self.runner_state['iters'],
                    self.configer.get('solver', 'display_iter'),
                    RunnerHelper.get_lr(self.optimizer), batch_time=self.batch_time,
                    data_time=self.data_time, loss=self.train_losses))
                self.batch_time.reset()
                self.data_time.reset()
                self.train_losses.reset()

            if self.configer.get('lr', 'metric') == 'iters' \
                    and self.runner_state['iters'] == self.configer.get('solver', 'max_iters'):
                break

            # Check to val the current model.
            if self.runner_state['iters'] % self.configer.get('solver', 'test_interval') == 0:
                self.val()
Exemple #3
0
    def relabel(self, json_dir):
        submission_dir = os.path.join(json_dir, self.configer.get('method'))
        if not os.path.exists(submission_dir):
            assert os.path.exists(json_dir)
            os.makedirs(submission_dir)

        img_shotname_list = list()
        object_list = list()

        for json_file in os.listdir(json_dir):
            if 'json' not in json_file:
                continue

            json_path = os.path.join(json_dir, json_file)
            shotname, extensions = os.path.splitext(json_file)
            img_shotname_list.append(shotname)

            with open(json_path, 'r') as json_stream:
                info_tree = json.load(json_stream)
                for object in info_tree['objects']:
                    # 0-indexing
                    object_list.append([
                        shotname, object['label'], object['score'],
                        int(object['bbox'][0]) + 1,
                        int(object['bbox'][1]) + 1,
                        int(object['bbox'][2]) + 1,
                        int(object['bbox'][3]) + 1
                    ])

        file_header_list = list()
        for i in range(len(self.configer.get('details', 'name_seq'))):
            cls = self.configer.get('details', 'name_seq')[i]
            Log.info('Writing {:s} VOC results file'.format(cls))
            filename = self.get_voc_results_file_template(submission_dir, cls)
            file_header = open(filename, 'wt')
            file_header_list.append(file_header)

        for object in object_list:
            file_header_list[object[1]].write(
                '{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.format(
                    object[0], object[2], object[3], object[4], object[5],
                    object[6]))

        for file_header in file_header_list:
            file_header.close()

        Log.info('Evaluate {} images...'.format(len(img_shotname_list)))
        return submission_dir
Exemple #4
0
    def init_optimizer(self, net_params):
        optimizer = None
        if self.configer.get('optim', 'optim_method') == 'sgd':
            optimizer = SGD(
                net_params,
                lr=self.configer.get('lr', 'base_lr'),
                momentum=self.configer.get('optim', 'sgd')['momentum'],
                weight_decay=self.configer.get('optim', 'sgd')['weight_decay'])

        elif self.configer.get('optim', 'optim_method') == 'adam':
            optimizer = Adam(net_params,
                             lr=self.configer.get('lr', 'base_lr'),
                             betas=self.configer.get('optim', 'adam')['betas'],
                             eps=self.configer.get('optim', 'adam')['eps'],
                             weight_decay=self.configer.get(
                                 'optim', 'adam')['weight_decay'])

        else:
            Log.error('Optimizer {} is not valid.'.format(
                self.configer.get('optim', 'optim_method')))
            exit(1)

        policy = self.configer.get('lr', 'lr_policy')

        scheduler = None
        if policy == 'step':
            scheduler = lr_scheduler.StepLR(
                optimizer,
                self.configer.get('lr', 'step')['step_size'],
                gamma=self.configer.get('lr', 'step')['gamma'])

        elif policy == 'multistep':
            scheduler = lr_scheduler.MultiStepLR(
                optimizer,
                self.configer.get('lr', 'multistep')['stepvalue'],
                gamma=self.configer.get('lr', 'multistep')['gamma'])

        elif policy == 'lambda_erfnet':
            lambda_erfnet = lambda epoch: pow((1 - (
                (epoch - 1) / self.configer.get('solver', 'max_epoch'))), 0.9)
            scheduler = lr_scheduler.LambdaLR(optimizer,
                                              lr_lambda=lambda_erfnet)

        else:
            Log.error('Policy:{} is not valid.'.format(policy))
            exit(1)

        return optimizer, scheduler
Exemple #5
0
    def __val(self):
        """
          Validation function during the train phase.
        """
        self.seg_net.eval()
        start_time = time.time()

        for j, data_dict in enumerate(self.val_loader):
            inputs = data_dict['img']
            targets = data_dict['labelmap']

            with torch.no_grad():
                # Change the data type.
                inputs, targets = self.module_utilizer.to_device(
                    inputs, targets)
                # Forward pass.
                outputs = self.seg_net(inputs)
                # Compute the loss of the val batch.
                loss = self.pixel_loss(outputs, targets)

                outputs = self.module_utilizer.gather(outputs)
                pred = outputs[0]

            self.val_losses.update(loss.item(), inputs.size(0))
            self.seg_running_score.update(
                pred.max(1)[1].cpu().numpy(),
                targets.cpu().numpy())

            # Update the vars of the val phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()

        self.configer.update_value(['performance'],
                                   self.seg_running_score.get_mean_iou())
        self.configer.update_value(['val_loss'], self.val_losses.avg)
        self.module_utilizer.save_net(self.seg_net, save_mode='performance')
        self.module_utilizer.save_net(self.seg_net, save_mode='val_loss')

        # Print the log info & reset the states.
        Log.info('Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
                 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time,
                                                loss=self.val_losses))
        Log.info('Mean IOU: {}\n'.format(
            self.seg_running_score.get_mean_iou()))
        self.batch_time.reset()
        self.val_losses.reset()
        self.seg_running_score.reset()
        self.seg_net.train()
Exemple #6
0
    def relabel(self, json_dir, method='mask_rcnn'):
        submission_file = os.path.join(
            json_dir,
            'person_instances_val2017_{}_results.json'.format(method))
        img_id_list = list()
        object_list = list()

        for json_file in os.listdir(json_dir):
            json_path = os.path.join(json_dir, json_file)
            shotname, extensions = os.path.splitext(json_file)
            try:
                img_id = int(shotname)
            except ValueError:
                Log.info('Invalid Json file: {}'.format(json_file))
                continue

            img_id_list.append(img_id)
            with open(json_path, 'r') as json_stream:
                info_tree = json.load(json_stream)
                for object in info_tree['objects']:
                    object_dict = dict()
                    object_dict['image_id'] = img_id
                    object_dict['category_id'] = int(
                        self.configer.get('data',
                                          'coco_cat_seq')[object['label']])
                    object_dict['score'] = object['score']
                    object_dict['bbox'] = [
                        object['bbox'][0], object['bbox'][1],
                        object['bbox'][2] - object['bbox'][0],
                        object['bbox'][3] - object['bbox'][1]
                    ]

                    if isinstance(object['segm'], dict):
                        object_dict['segmentation'] = object['segm']
                    else:
                        object_dict['segmentation'] = maskUtils.encode(
                            np.asfortranarray(
                                MaskHelper.polys2mask(object['segm'],
                                                      info_tree['height'],
                                                      info_tree['width'])))

                    object_list.append(object_dict)

        with open(submission_file, 'w') as write_stream:
            write_stream.write(json.dumps(object_list))

        Log.info('Evaluate {} images...'.format(len(img_id_list)))
        return submission_file, img_id_list
Exemple #7
0
    def __test_img(self, image_path, json_path, raw_path, vis_path):

        Log.info('Image Path: {}'.format(image_path))
        ori_image = ImageHelper.read_image(image_path,
                                           tool=self.configer.get('data', 'image_tool'),
                                           mode=self.configer.get('data', 'input_mode'))

        ori_width, ori_height = ImageHelper.get_size(ori_image)
        ori_img_bgr = ImageHelper.get_cv2_bgr(ori_image, mode=self.configer.get('data', 'input_mode'))
        heatmap_avg = np.zeros((ori_height, ori_width, self.configer.get('network', 'heatmap_out')))
        paf_avg = np.zeros((ori_height, ori_width, self.configer.get('network', 'paf_out')))
        multiplier = [scale * self.configer.get('test', 'input_size')[1] / ori_height
                      for scale in self.configer.get('test', 'scale_search')]
        stride = self.configer.get('network', 'stride')
        for i, scale in enumerate(multiplier):
            image, border_hw = self._get_blob(ori_image, scale=scale)
            with torch.no_grad():
                paf_out_list, heatmap_out_list = self.pose_net(image)
                paf_out = paf_out_list[-1]
                heatmap_out = heatmap_out_list[-1]

                # extract outputs, resize, and remove padding
                heatmap = heatmap_out.squeeze(0).cpu().numpy().transpose(1, 2, 0)

                heatmap = cv2.resize(heatmap, None, fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
                heatmap = cv2.resize(heatmap[:border_hw[0], :border_hw[1]],
                                     (ori_width, ori_height), interpolation=cv2.INTER_CUBIC)

                paf = paf_out.squeeze(0).cpu().numpy().transpose(1, 2, 0)
                paf = cv2.resize(paf, None, fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
                paf = cv2.resize(paf[:border_hw[0], :border_hw[1]],
                                 (ori_width, ori_height), interpolation=cv2.INTER_CUBIC)

                heatmap_avg = heatmap_avg + heatmap / len(multiplier)
                paf_avg = paf_avg + paf / len(multiplier)

        all_peaks = self.__extract_heatmap_info(heatmap_avg)
        special_k, connection_all = self.__extract_paf_info(ori_img_bgr, paf_avg, all_peaks)
        subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks)
        json_dict = self.__get_info_tree(ori_img_bgr, subset, candidate)

        image_canvas = self.pose_parser.draw_points(ori_img_bgr.copy(), json_dict)
        image_canvas = self.pose_parser.link_points(image_canvas, json_dict)

        ImageHelper.save(image_canvas, vis_path)
        ImageHelper.save(ori_img_bgr, raw_path)
        Log.info('Json Save Path: {}'.format(json_path))
        JsonHelper.save_file(json_dict, json_path)
Exemple #8
0
    def resize(img, target_size, interpolation=None):
        assert isinstance(target_size, (list, tuple))
        assert isinstance(interpolation, str)

        target_size = tuple(target_size)
        if isinstance(img, Image.Image):
            return ImageHelper.pil_resize(
                img, target_size, interpolation=PIL_INTER_DICT[interpolation])

        elif isinstance(img, np.ndarray):
            return ImageHelper.cv2_resize(
                img, target_size, interpolation=CV2_INTER_DICT[interpolation])

        else:
            Log.error('Image type is invalid.')
            exit(1)
Exemple #9
0
    def __read_list(self, root_dir, list_path):
        item_list = []
        with open(list_path, 'r') as f:
            for line in f.readlines()[0:]:
                filename = line.strip().split()[0]
                img_path = os.path.join(root_dir, filename)
                if not os.path.exists(img_path) or not ImageHelper.is_img(
                        img_path):
                    Log.error('Image Path: {} is Invalid.'.format(img_path))
                    exit(1)

                item_list.append(
                    (img_path, '.'.join(filename.split('.')[:-1])))

        Log.info('There are {} images..'.format(len(item_list)))
        return item_list
Exemple #10
0
    def caffe_resnet101(self, **kwargs):
        """Constructs a ResNet-101 model.
        Args:
            pretrained (bool): If True, returns a model pre-trained on Places
        """
        model = CaffeResNet(Bottleneck, [3, 4, 23, 3], bn_type=self.configer.get('network', 'bn_type'), **kwargs)
        if self.configer.get('network', 'pretrained') or self.configer.get('network', 'pretrained_model') is not None:
            if self.configer.get('network', 'pretrained_model') is not None:
                Log.info('Loading pretrained model:{}'.format(self.configer.get('network', 'pretrained_model')))
                pretrained_dict = torch.load(self.configer.get('network', 'pretrained_model'))
            else:
                pretrained_dict = self.load_url(model_urls['caffe_resnet101'])

            model.load_state_dict(pretrained_dict)

        return model
Exemple #11
0
    def resnet18(self, **kwargs):
        """Constructs a ResNet-18 model.
        Args:
            pretrained (bool): If True, returns a model pre-trained on Places
        """
        model = ResNet(BasicBlock, [2, 2, 2, 2], bn_type=self.configer.get('network', 'bn_type'), **kwargs)
        if self.configer.get('network', 'pretrained') or self.configer.get('network', 'pretrained_model') is not None:
            if self.configer.get('network', 'pretrained_model') is not None:
                Log.info('Loading pretrained model:{}'.format(self.configer.get('network', 'pretrained_model')))
                pretrained_dict = torch.load(self.configer.get('network', 'pretrained_model'))
            else:
                pretrained_dict = self.load_url(model_urls['resnet18'])

            model.load_state_dict(pretrained_dict)

        return model
Exemple #12
0
    def pil_resize(img, target_size, interpolation):
        assert isinstance(target_size, (list, tuple))

        target_size = tuple(target_size)

        if isinstance(img, Image.Image):
            return img.resize(target_size, interpolation)

        elif isinstance(img, np.ndarray):
            pil_img = ImageHelper.np2img(img)
            return ImageHelper.img2np(
                pil_img.resize(target_size, interpolation))

        else:
            Log.error('Image type is invalid.')
            exit(1)
    def __list_dirs(self, root_dir, dataset):
        img_list = list()
        label_list = list()
        size_list = list()
        image_dir = os.path.join(root_dir, dataset, 'image')
        label_dir = os.path.join(root_dir, dataset, 'label')
        img_extension = os.listdir(image_dir)[0].split('.')[-1]

        for file_name in os.listdir(label_dir):
            image_name = '.'.join(file_name.split('.')[:-1])
            img_path = os.path.join(image_dir,
                                    '{}.{}'.format(image_name, img_extension))
            label_path = os.path.join(label_dir, file_name)
            if not os.path.exists(label_path) or not os.path.exists(img_path):
                Log.error('Label Path: {} not exists.'.format(label_path))
                continue

            img_list.append(img_path)
            label_list.append(label_path)
            img = ImageHelper.read_image(
                img_path,
                tool=self.configer.get('data', 'image_tool'),
                mode=self.configer.get('data', 'input_mode'))
            size_list.append(ImageHelper.get_size(img))

        if dataset == 'train' and self.configer.get('data', 'include_val'):
            image_dir = os.path.join(root_dir, 'val/image')
            label_dir = os.path.join(root_dir, 'val/label')
            for file_name in os.listdir(label_dir):
                image_name = '.'.join(file_name.split('.')[:-1])
                img_path = os.path.join(
                    image_dir, '{}.{}'.format(image_name, img_extension))
                label_path = os.path.join(label_dir, file_name)
                if not os.path.exists(label_path) or not os.path.exists(
                        img_path):
                    Log.error('Label Path: {} not exists.'.format(label_path))
                    continue

                img_list.append(img_path)
                label_list.append(label_path)
                img = ImageHelper.read_image(
                    img_path,
                    tool=self.configer.get('data', 'image_tool'),
                    mode=self.configer.get('data', 'input_mode'))
                size_list.append(ImageHelper.get_size(img))

        return img_list, label_list, size_list
Exemple #14
0
    def __val(self):
        """
          Validation function during the train phase.
        """
        self.det_net.eval()
        start_time = time.time()
        with torch.no_grad():
            for i, data_dict in enumerate(self.val_loader):
                inputs = data_dict['img']
                batch_gt_bboxes = data_dict['bboxes']
                batch_gt_labels = data_dict['labels']
                input_size = [inputs.size(3), inputs.size(2)]
                # Forward pass.
                inputs = self.module_utilizer.to_device(inputs)
                feat_list, predictions, detections = self.det_net(inputs)

                targets, objmask, noobjmask = self.yolo_target_generator(
                    feat_list, batch_gt_bboxes, batch_gt_labels, input_size)
                targets, objmask, noobjmask = self.module_utilizer.to_device(
                    targets, objmask, noobjmask)

                # Compute the loss of the val batch.
                loss = self.det_loss(predictions, targets, objmask, noobjmask)
                self.val_losses.update(loss.item(), inputs.size(0))

                batch_detections = YOLOv3Test.decode(detections, self.configer)
                batch_pred_bboxes = self.__get_object_list(
                    batch_detections, input_size)

                self.det_running_score.update(batch_pred_bboxes,
                                              batch_gt_bboxes, batch_gt_labels)

                # Update the vars of the val phase.
                self.batch_time.update(time.time() - start_time)
                start_time = time.time()

            self.module_utilizer.save_net(self.det_net, save_mode='iters')
            # Print the log info & reset the states.
            Log.info(
                'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
                'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time,
                                               loss=self.val_losses))
            Log.info('Val mAP: {}'.format(self.det_running_score.get_mAP()))
            self.det_running_score.reset()
            self.batch_time.reset()
            self.val_losses.reset()
            self.det_net.train()
    def load_net(self, net):
        if self.configer.get('gpu') is not None:
            net = self._make_parallel(net)

        net = net.to(
            torch.device(
                'cpu' if self.configer.get('gpu') is None else 'cuda'))
        net.float()
        if self.configer.get('network', 'resume') is not None:
            Log.info('Loading checkpoint from {}...'.format(
                self.configer.get('network', 'resume')))
            resume_dict = torch.load(self.configer.get('network', 'resume'))
            if 'state_dict' in resume_dict:
                checkpoint_dict = resume_dict['state_dict']

            elif 'model' in resume_dict:
                checkpoint_dict = resume_dict['model']

            elif isinstance(resume_dict, OrderedDict):
                checkpoint_dict = resume_dict

            else:
                raise RuntimeError(
                    'No state_dict found in checkpoint file {}'.format(
                        self.configer.get('network', 'resume')))

            if list(checkpoint_dict.keys())[0].startswith('module.'):
                checkpoint_dict = {
                    k[7:]: v
                    for k, v in checkpoint_dict.items()
                }

            # load state_dict
            if hasattr(net, 'module'):
                self.load_state_dict(
                    net.module, checkpoint_dict,
                    self.configer.get('network', 'resume_strict'))
            else:
                self.load_state_dict(
                    net, checkpoint_dict,
                    self.configer.get('network', 'resume_strict'))

            if self.configer.get('network', 'resume_continue'):
                self.configer.resume(resume_dict['config_dict'])

        return net
Exemple #16
0
    def update(self, key_tuple, value):
        if not self.exists(*key_tuple):
            Log.error('{} Key: {} not existed!!!'.format(
                self._get_caller(), key_tuple))
            exit(1)

        if len(key_tuple) == 1 and not isinstance(
                self.params_root[key_tuple[0]], dict):
            self.params_root[key_tuple[0]] = value

        elif len(key_tuple) == 2:
            self.params_root[key_tuple[0]][key_tuple[1]] = value

        else:
            Log.error('{} Key: {} not existed!!!'.format(
                self._get_caller(), key_tuple))
            exit(1)
Exemple #17
0
    def _init(self):
        self.configer.add_key_value(['iters'], 0)
        self.configer.add_key_value(['last_iters'], 0)
        self.configer.add_key_value(['epoch'], 0)
        self.configer.add_key_value(['last_epoch'], 0)
        self.configer.add_key_value(['max_performance'], 0.0)
        self.configer.add_key_value(['performance'], 0.0)
        self.configer.add_key_value(['min_val_loss'], 9999.0)
        self.configer.add_key_value(['val_loss'], 9999.0)
        self.configer.add_key_value(['network', 'parallel'], False)
        if self.configer.is_empty('network', 'bn_type'):
            self.configer.add_key_value(['network', 'bn_type'], 'torchbn')

        if len(self.configer.get('gpu')) == 1:
            self.configer.update_value(['network', 'bn_type'], 'torchbn')

        Log.info('BN Type is {}.'.format(self.configer.get('network', 'bn_type')))
Exemple #18
0
    def save_net(self, net, save_mode='iters'):
        state = {
            'config_dict': self.configer.to_dict(),
            'state_dict': net.state_dict(),
        }
        if self.configer.get('checkpoints', 'checkpoints_root') is None:
            checkpoints_dir = os.path.join(self.configer.get('project_dir'),
                                           self.configer.get('checkpoints', 'checkpoints_dir'))
        else:
            checkpoints_dir = os.path.join(self.configer.get('checkpoints', 'checkpoints_root'),
                                           self.configer.get('checkpoints', 'checkpoints_dir'))

        if not os.path.exists(checkpoints_dir):
            os.makedirs(checkpoints_dir)

        if save_mode == 'performance':
            if self.configer.get('performance') > self.configer.get('max_performance'):
                latest_name = '{}_max_performance.pth'.format(self.configer.get('checkpoints', 'checkpoints_name'))
                torch.save(state, os.path.join(checkpoints_dir, latest_name))
                self.configer.update_value(['max_performance'], self.configer.get('performance'))

        elif save_mode == 'val_loss':
            if self.configer.get('val_loss') < self.configer.get('min_val_loss'):
                latest_name = '{}_min_loss.pth'.format(self.configer.get('checkpoints', 'checkpoints_name'))
                torch.save(state, os.path.join(checkpoints_dir, latest_name))
                self.configer.update_value(['min_val_loss'], self.configer.get('val_loss'))

        elif save_mode == 'iters':
            if self.configer.get('iters') - self.configer.get('last_iters') >= \
                    self.configer.get('checkpoints', 'save_iters'):
                latest_name = '{}_iters{}.pth'.format(self.configer.get('checkpoints', 'checkpoints_name'),
                                                 self.configer.get('iters'))
                torch.save(state, os.path.join(checkpoints_dir, latest_name))
                self.configer.update_value(['last_iters'], self.configer.get('iters'))

        elif save_mode == 'epoch':
            if self.configer.get('epoch') - self.configer.get('last_epoch') >= \
                    self.configer.get('checkpoints', 'save_epoch'):
                latest_name = '{}_epoch{}.pth'.format(self.configer.get('checkpoints', 'checkpoints_name'),
                                                 self.configer.get('epoch'))
                torch.save(state, os.path.join(checkpoints_dir, latest_name))
                self.configer.update_value(['last_epoch'], self.configer.get('epoch'))

        else:
            Log.error('Metric: {} is invalid.'.format(save_mode))
            exit(1)
Exemple #19
0
    def val(self):
        """
          Validation function during the train phase.
        """
        self.det_net.eval()
        start_time = time.time()
        with torch.no_grad():
            for j, data_dict in enumerate(self.val_loader):
                # Forward pass.
                out_dict = self.det_net(data_dict)
                # Compute the loss of the train batch & backward.
                loss = out_dict['loss'].mean()
                self.val_losses.update(loss.item(),
                                       len(DCHelper.tolist(data_dict['meta'])))
                test_indices_and_rois, test_roi_locs, test_roi_scores, test_rois_num = out_dict[
                    'test_group']
                batch_detections = FastRCNNTest.decode(
                    test_roi_locs, test_roi_scores, test_indices_and_rois,
                    test_rois_num, self.configer,
                    DCHelper.tolist(data_dict['meta']))
                batch_pred_bboxes = self.__get_object_list(batch_detections)
                self.det_running_score.update(batch_pred_bboxes, [
                    item['ori_bboxes']
                    for item in DCHelper.tolist(data_dict['meta'])
                ], [
                    item['ori_labels']
                    for item in DCHelper.tolist(data_dict['meta'])
                ])

                # Update the vars of the val phase.
                self.batch_time.update(time.time() - start_time)
                start_time = time.time()

            RunnerHelper.save_net(self,
                                  self.det_net,
                                  iters=self.runner_state['iters'])
            # Print the log info & reset the states.
            Log.info(
                'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
                'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time,
                                               loss=self.val_losses))
            Log.info('Val mAP: {}\n'.format(self.det_running_score.get_mAP()))
            self.det_running_score.reset()
            self.batch_time.reset()
            self.val_losses.reset()
            self.det_net.train()
Exemple #20
0
    def test(self, test_dir, out_dir):
        for _, data_dict in enumerate(self.test_loader.get_testloader(test_dir=test_dir)):
            data_dict['testing'] = True
            detections = self.det_net(data_dict)
            meta_list = DCHelper.tolist(data_dict['meta'])
            batch_detections = self.decode(detections, self.configer, meta_list)
            for i in range(len(meta_list)):
                ori_img_bgr = ImageHelper.read_image(meta_list[i]['img_path'], tool='cv2', mode='BGR')
                json_dict = self.__get_info_tree(batch_detections[i])
                image_canvas = self.det_parser.draw_bboxes(ori_img_bgr.copy(), json_dict,
                                                           conf_threshold=self.configer.get('res', 'vis_conf_thre'))
                ImageHelper.save(image_canvas,
                                 save_path=os.path.join(out_dir, 'vis/{}.png'.format(meta_list[i]['filename'])))

                Log.info('Json Path: {}'.format(os.path.join(out_dir, 'json/{}.json'.format(meta_list[i]['filename']))))
                JsonHelper.save_file(json_dict,
                                     save_path=os.path.join(out_dir, 'json/{}.json'.format(meta_list[i]['filename'])))
Exemple #21
0
    def parse_img_det(self, image_file, json_file):
        if image_file is None or not os.path.exists(image_file):
            Log.error('Image file: {} not existed.'.format(image_file))
            return

        if json_file is None or not os.path.exists(json_file):
            Log.error('Json file: {} not existed.'.format(json_file))
            return

        image_canvas = cv2.imread(image_file)  # B, G, R order.

        with open(json_file, 'r') as json_stream:
            info_tree = json.load(json_stream)
            image_canvas = self.draw_bboxes(image_canvas, info_tree)

        cv2.imshow('main', image_canvas)
        cv2.waitKey()
Exemple #22
0
    def parse_img_seg(self, image_file, label_file):
        if image_file is None or not os.path.exists(image_file):
            Log.error('Image file: {} not existed.'.format(image_file))
            return

        if label_file is None or not os.path.exists(label_file):
            Log.error('Label file: {} not existed.'.format(label_file))
            return

        image_canvas = cv2.imread(image_file)  # B, G, R order.

        mask_canvas = self.colorize(
            np.array(Image.open(label_file).convert('P')))
        image_canvas = cv2.addWeighted(image_canvas, 0.6, mask_canvas, 0.4, 0)

        cv2.imshow('main', image_canvas)
        cv2.waitKey()
    def __val(self):
        """
          Validation function during the train phase.
        """
        self.pose_net.eval()
        start_time = time.time()

        with torch.no_grad():
            for j, (inputs, heatmap, maskmap, tagmap,
                    num_objects) in enumerate(self.val_loader):
                # Change the data type.
                (inputs, heatmap, maskmap, tagmap,
                 num_objects) = self.module_utilizer.to_device(
                     inputs, heatmap, maskmap, tagmap, num_objects)

                # Forward pass.
                embed_out, heatmap_out = self.pose_net(inputs)
                # Compute the loss of the val batch.
                loss_heatmap = self.mse_loss(heatmap_out, heatmap, maskmap)
                loss_associate = self.embedding_loss(embed_out, tagmap,
                                                     num_objects)

                loss = loss_heatmap + loss_associate

                self.val_losses.update(loss.item(), inputs.size(0))
                self.val_loss_heatmap.update(loss_heatmap.item(),
                                             inputs.size(0))

                # Update the vars of the val phase.
                self.batch_time.update(time.time() - start_time)
                start_time = time.time()

            self.module_utilizer.save_net(self.pose_net, metric='iters')

            Log.info('Loss Heatmap:{}, Loss Asso: {}'.format(
                self.val_loss_heatmap.avg,
                self.val_losses.avg - self.val_loss_heatmap.avg))
            # Print the log info & reset the states.
            Log.info(
                'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
                'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time,
                                               loss=self.val_losses))
            self.batch_time.reset()
            self.val_losses.reset()
            self.val_loss_heatmap.reset()
            self.pose_net.train()
Exemple #24
0
    def add_key_value(self, key_tuple, value):
        if not self.is_empty(*key_tuple):
            Log.error('{} Key: {} existed!!!'.format(self._get_caller(), key_tuple))
            exit(1)

        if len(key_tuple) == 1:
            self.params_root[key_tuple[0]] = value

        elif len(key_tuple) == 2:
            if key_tuple[0] not in self.params_root:
                self.params_root[key_tuple[0]] = dict()

            self.params_root[key_tuple[0]][key_tuple[1]] = value

        else:
            Log.error('{} KeyError: {}.'.format(self._get_caller(), key_tuple))
            exit(1)
Exemple #25
0
    def cls_nms(bboxes, scores=None, labels=None, nms_threshold=0.0,
                iou_mode='union', cls_keep_num=None, nms_mode='nms',
                score_threshold=0.001, soft_sigma=0.5, soft_method='linear'):
        unique_labels = labels.cpu().unique()
        bboxes = bboxes.contiguous().view(-1, 4)
        if scores is not None:
            scores = scores.contiguous().view(-1,)

        if labels is not None:
            labels = labels.contiguous().view(-1,)

        unique_labels = unique_labels.to(bboxes.device)

        cls_keep_list = list()
        for c in unique_labels:
            cls_index = torch.nonzero(labels == c).squeeze(1)
            if nms_mode == 'nms':
                cls_keep = DetHelper.nms(bboxes[cls_index],
                                         scores=None if scores is None else scores[cls_index],
                                         nms_threshold=nms_threshold,
                                         mode=iou_mode)

            elif nms_mode == 'cython_nms':
                cls_keep = DetHelper.cython_nms(bboxes[cls_index],
                                                scores=None if scores is None else scores[cls_index],
                                                nms_threshold=nms_threshold)

            elif nms_mode == 'cython_soft_nms':
                cls_keep = DetHelper.cython_soft_nms(bboxes[cls_index],
                                                    scores=None if scores is None else scores[cls_index],
                                                    nms_threshold=nms_threshold,
                                                    score_threshold=score_threshold,
                                                    sigma=soft_sigma,
                                                    method=soft_method)

            else:
                Log.error('Not supported NMS mode: {}.'.format(nms_mode))
                exit(1)

            if cls_keep_num is not None:
                cls_keep = cls_keep[:cls_keep_num]

            cls_keep_list.append(cls_index[cls_keep])

        return torch.cat(cls_keep_list, 0)
    def load_state_dict(module, state_dict, strict=False):
        """Load state_dict to a module.
        This method is modified from :meth:`torch.nn.Module.load_state_dict`.
        Default value for ``strict`` is set to ``False`` and the message for
        param mismatch will be shown even if strict is False.
        Args:
            module (Module): Module that receives the state_dict.
            state_dict (OrderedDict): Weights.
            strict (bool): whether to strictly enforce that the keys
                in :attr:`state_dict` match the keys returned by this module's
                :meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
        """
        unexpected_keys = []
        own_state = module.state_dict()
        for name, param in state_dict.items():
            if name not in own_state:
                unexpected_keys.append(name)
                continue
            if isinstance(param, torch.nn.Parameter):
                # backwards compatibility for serialized parameters
                param = param.data

            try:
                own_state[name].copy_(param)
            except Exception:
                raise RuntimeError(
                    'While copying the parameter named {}, '
                    'whose dimensions in the model are {} and '
                    'whose dimensions in the checkpoint are {}.'.format(
                        name, own_state[name].size(), param.size()))
        missing_keys = set(own_state.keys()) - set(state_dict.keys())

        err_msg = []
        if unexpected_keys:
            err_msg.append('unexpected key in source state_dict: {}\n'.format(
                ', '.join(unexpected_keys)))
        if missing_keys:
            err_msg.append('missing keys in source state_dict: {}\n'.format(
                ', '.join(missing_keys)))
        err_msg = '\n'.join(err_msg)
        if err_msg:
            if strict:
                raise RuntimeError(err_msg)
            else:
                Log.warn(err_msg)
Exemple #27
0
    def test(runner):
        Log.info('Testing start...')
        base_dir = os.path.join(
            runner.configer.get('project_dir'), 'out/results',
            runner.configer.get('task'),
            runner.configer.get('checkpoints', 'checkpoints_name'),
            runner.configer.get('test', 'out_dir'))

        test_img = runner.configer.get('test', 'test_img')
        test_dir = runner.configer.get('test', 'test_dir')
        if test_img is None and test_dir is None:
            Log.error('test_img & test_dir not exists.')
            exit(1)

        if test_img is not None and test_dir is not None:
            Log.error('Either test_img or test_dir.')
            exit(1)

        if test_img is not None:
            base_dir = os.path.join(base_dir, 'test_img')
            filename = test_img.rstrip().split('/')[-1]
            label_path = os.path.join(
                base_dir, 'label',
                '{}.png'.format('.'.join(filename.split('.')[:-1])))
            raw_path = os.path.join(base_dir, 'raw', filename)
            vis_path = os.path.join(
                base_dir, 'vis',
                '{}_vis.png'.format('.'.join(filename.split('.')[:-1])))
            FileHelper.make_dirs(label_path, is_file=True)
            FileHelper.make_dirs(raw_path, is_file=True)
            FileHelper.make_dirs(vis_path, is_file=True)

            runner.test_img(test_img, label_path, vis_path, raw_path)

        else:
            base_dir = os.path.join(base_dir, 'test_dir',
                                    test_dir.rstrip('/').split('/')[-1])
            FileHelper.make_dirs(base_dir)

            for filename in FileHelper.list_dir(test_dir):
                image_path = os.path.join(test_dir, filename)
                label_path = os.path.join(
                    base_dir, 'label',
                    '{}.png'.format('.'.join(filename.split('.')[:-1])))
                raw_path = os.path.join(base_dir, 'raw', filename)
                vis_path = os.path.join(
                    base_dir, 'vis',
                    '{}_vis.png'.format('.'.join(filename.split('.')[:-1])))
                FileHelper.make_dirs(label_path, is_file=True)
                FileHelper.make_dirs(raw_path, is_file=True)
                FileHelper.make_dirs(vis_path, is_file=True)

                runner.test_img(image_path, label_path, vis_path, raw_path)

        Log.info('Testing end...')
    def vis_peaks(self,
                  heatmap_in,
                  ori_img_in,
                  name='default',
                  sub_dir='peaks'):
        base_dir = os.path.join(self.configer.get('project_dir'), POSE_DIR,
                                sub_dir)
        if not os.path.exists(base_dir):
            Log.error('Dir:{} not exists!'.format(base_dir))
            os.makedirs(base_dir)

        if not isinstance(heatmap_in, np.ndarray):
            if len(heatmap_in.size()) != 3:
                Log.error('Heatmap size is not valid.')
                exit(1)

            heatmap = heatmap_in.clone().data.cpu().numpy().transpose(1, 2, 0)
        else:
            heatmap = heatmap_in.copy()

        if not isinstance(ori_img_in, np.ndarray):
            ori_img = DeNormalize(
                mean=self.configer.get('trans_params', 'mean'),
                std=self.configer.get('trans_params',
                                      'std'))(ori_img_in.clone())
            ori_img = ori_img.data.cpu().squeeze().numpy().transpose(
                1, 2, 0).astype(np.uint8)
            ori_img = cv2.cvtColor(ori_img, cv2.COLOR_RGB2BGR)
        else:
            ori_img = ori_img_in.copy()

        for j in range(self.configer.get('data', 'num_keypoints')):
            peaks = self.__get_peaks(heatmap[:, :, j])

            for peak in peaks:
                ori_img = cv2.circle(ori_img, (peak[0], peak[1]),
                                     self.configer.get('vis', 'circle_radius'),
                                     self.configer.get('details',
                                                       'color_list')[j],
                                     thickness=-1)

            ori_img = cv2.resize(
                ori_img, tuple(self.configer.get('data', 'input_size')))
            cv2.imwrite(os.path.join(base_dir, '{}_{}.jpg'.format(name, j)),
                        ori_img)
Exemple #29
0
    def __init__(self, configer):
        self.configer = configer

        if self.configer.get('data', 'image_tool') == 'pil':
            self.aug_test_transform = pil_aug_trans.PILAugCompose(
                self.configer, split='test')
        elif self.configer.get('data', 'image_tool') == 'cv2':
            self.aug_test_transform = cv2_aug_trans.CV2AugCompose(
                self.configer, split='test')
        else:
            Log.error('Not support {} image tool.'.format(
                self.configer.get('data', 'image_tool')))
            exit(1)

        self.img_transform = Compose([
            ToTensor(),
            Normalize(**self.configer.get('data', 'normalize')),
        ])
Exemple #30
0
    def BatchNorm2d(bn_type='torch', ret_cls=False):
        if bn_type == 'torchbn':
            return nn.BatchNorm2d

        elif bn_type == 'syncbn':
            from extensions.ops.sync_bn.syncbn import BatchNorm2d
            return BatchNorm2d

        # elif bn_type == 'inplace_abn':
        #    from extensions.ops.inplace_abn.bn import InPlaceABNSync
        #    if ret_cls:
        #        return InPlaceABNSync

        #    return functools.partial(InPlaceABNSync, activation='none')

        else:
            Log.error('Not support BN type: {}.'.format(bn_type))
            exit(1)