def _get_child_configer_transform(self, child_config_file):

        dataset_configer = Configer(configs=child_config_file)
        child_configer = self.configer.clone()

        child_configer.params_root['data'].update(dataset_configer.get('data'))

        if self.configer.exists(
                'use_adaptive_transform') or self.dataset == 'val':
            child_configer.params_root.update({
                'train_trans':
                dataset_configer.params_root['train_trans'],
                'val_trans':
                dataset_configer.params_root['val_trans'],
            })

        return child_configer, CV2AugCompose(split=self.dataset,
                                             configer=child_configer)
Exemplo n.º 2
0
    def __init__(self, args, device='cuda'):
        if torch.cuda.device_count() == 0:
            device = 'cpu'

        self.device = torch.device(device)
        Log.info('Resuming from {}...'.format(args.model_path))
        checkpoint_dict = torch.load(args.model_path)
        self.configer = Configer(config_dict=checkpoint_dict['config_dict'],
                                 args_parser=args,
                                 valid_flag="deploy")
        self.net = ModelManager(self.configer).get_deploy_model()
        RunnerHelper.load_state_dict(self.net, checkpoint_dict['state_dict'],
                                     False)
        if device == 'cuda':
            self.net = DataParallelModel(self.net, gather_=True)

        self.net = self.net.to(self.device).eval()
        self.test_loader = DataLoader(self.configer)
Exemplo n.º 3
0
            color_dst[label_map == i] = color_list[i % len(color_list)]

        color_img_rgb = np.array(color_dst, dtype=np.uint8)
        color_img_bgr = cv2.cvtColor(color_img_rgb, cv2.COLOR_RGB2BGR)

        if image_canvas is not None:
            image_canvas = cv2.addWeighted(image_canvas, 0.6, color_img_bgr, 0.4, 0)
            return image_canvas

        else:
            return color_img_bgr

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--configs', default=None, type=str,
                        dest='configs', help='The file of the hyper parameters.')
    parser.add_argument('--image_file', default=None, type=str,
                        dest='image_file', help='The image file of Seg Parser.')
    parser.add_argument('--label_file', default=None, type=str,
                        dest='label_file', help='The label file of Seg Parser.')
    parser.add_argument('--image_dir', default=None, type=str,
                        dest='image_dir', help='The image directory of Seg Parser.')
    parser.add_argument('--label_dir', default=None, type=str,
                        dest='label_dir', help='The label directory of Seg Parser.')

    args_parser = parser.parse_args()

    seg_parser = SegParser(Configer(configs=args_parser.configs))
    seg_parser.parse_img_seg(args_parser.image_file, args_parser.label_file)
    seg_parser.parse_dir_seg(args_parser.image_dir, args_parser.label_dir)
                    dest='deploy.norm_type',
                    help='The pool type.')
parser.add_argument('--gpu_id',
                    default=[0, 1],
                    type=int,
                    dest='gpu_id',
                    help='The gpu id.')
args = parser.parse_args()

#second
model_path1 = '../../checkpoints/cls/resnet152/google_landmark_2020_resnet152_v2cluster_448_GPU8_final.pth'
model_path2 = '../../checkpoints/cls/resnest200/google_landmark_2020_resnest200_v2cluster_448_GPU8_final.pth'

checkpoint_dict1 = torch.load(model_path1)
configer1 = Configer(config_dict=checkpoint_dict1['config_dict'],
                     args_parser=args,
                     valid_flag="deploy")
net1 = ModelManager(configer1).get_deploy_model()
RunnerHelper.load_state_dict(net1, checkpoint_dict1['state_dict'], False)

checkpoint_dict2 = torch.load(model_path2)
configer2 = Configer(config_dict=checkpoint_dict2['config_dict'],
                     args_parser=args,
                     valid_flag="deploy")
net2 = ModelManager(configer2).get_deploy_model()
RunnerHelper.load_state_dict(net2, checkpoint_dict2['state_dict'], False)

net = MergeClsModel(net1, net2)
device = torch.device('cpu')
net = net.to(device).eval()
dummy_input = torch.randn(1, 3, 512, 512).to(device)
Exemplo n.º 5
0
    parser.add_argument('REMAIN', nargs='*')

    args_parser = parser.parse_args()

    from lib.utils.distributed import handle_distributed
    handle_distributed(args_parser,
                       os.path.expanduser(os.path.abspath(__file__)))

    if args_parser.seed is not None:
        random.seed(args_parser.seed)
        torch.manual_seed(args_parser.seed)

    cudnn.enabled = True
    cudnn.benchmark = args_parser.cudnn

    configer = Configer(args_parser=args_parser)
    data_dir = configer.get('data', 'data_dir')
    if isinstance(data_dir, str):
        data_dir = [data_dir]
    abs_data_dir = [os.path.expanduser(x) for x in data_dir]
    configer.update(['data', 'data_dir'], abs_data_dir)

    project_dir = os.path.dirname(os.path.realpath(__file__))
    configer.add(['project_dir'], project_dir)

    if configer.get('logging', 'log_to_file'):
        log_file = configer.get('logging', 'log_file')
        new_log_file = '{}_{}'.format(
            log_file, time.strftime("%Y-%m-%d_%X", time.localtime()))
        configer.update(['logging', 'log_file'], new_log_file)
    else:
        Log.info('mIoU (Class-wise)')
        iou_dict = self.seg_running_score.get_cls_iu()
        for cid, miou in iou_dict.items():
            Log.info('\t{}\t{}'.format(cid, miou))
        print(' & '.join('{:.1f}'.format(x * 100)
                         for x in list(iou_dict.values()) +
                         [self.seg_running_score.get_mean_iou()]))


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--configs',
                        default=None,
                        type=str,
                        dest='configs',
                        help='The configs file of pose.')
    parser.add_argument('--gt_dir',
                        default=None,
                        type=str,
                        dest='gt_dir',
                        help='The groundtruth annotations.')
    parser.add_argument('--pred_dir',
                        default=None,
                        type=str,
                        dest='pred_dir',
                        help='The label dir of predict annotations.')
    parser.add_argument('--file_list', default='all')
    args = parser.parse_args()

    ade20k_evaluator = ADE20KEvaluator(Configer(configs=args.configs))
    ade20k_evaluator.evaluate(args.pred_dir, args.gt_dir)
            print(filename)
            pred_path = os.path.join(pred_dir, filename)
            gt_path = os.path.join(gt_dir, filename)
            predmap = ImageHelper.img2np(ImageHelper.read_image(pred_path, tool='pil', mode='P'))
            gtmap = ImageHelper.img2np(ImageHelper.read_image(gt_path, tool='pil', mode='P'))

            predmap = self.relabel(predmap)
            gtmap = self.relabel(gtmap)
            gtmap[gtmap == 0] = 255

            self.seg_running_score.update(predmap[np.newaxis, :, :], gtmap[np.newaxis, :, :])
            img_cnt += 1

        Log.info('Evaluate {} images'.format(img_cnt))
        Log.info('mIOU: {}'.format(self.seg_running_score.get_mean_iou()))
        Log.info('Pixel ACC: {}'.format(self.seg_running_score.get_pixel_acc()))


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--configs', default=None, type=str,
                        dest='configs', help='The configs file of pose.')
    parser.add_argument('--gt_dir', default=None, type=str,
                        dest='gt_dir', help='The groundtruth annotations.')
    parser.add_argument('--pred_dir', default=None, type=str,
                        dest='pred_dir', help='The label dir of predict annotations.')
    args = parser.parse_args()

    cocostuff_evaluator = COCOStuffEvaluator(Configer(configs=args.configs))
    cocostuff_evaluator.evaluate(args.pred_dir, args.gt_dir)
            print(filename)
            
            pred_path = os.path.join(pred_dir, filename)
            gt_path = os.path.join(gt_dir, filename)
            predmap = ImageHelper.img2np(ImageHelper.read_image(pred_path, tool='pil', mode='P'))
            gtmap = ImageHelper.img2np(ImageHelper.read_image(gt_path, tool='pil', mode='P'))

            predmap = self.relabel(predmap)
            gtmap = self.relabel(gtmap)

            self.seg_running_score.update(predmap[np.newaxis, :, :], gtmap[np.newaxis, :, :])
            img_cnt += 1

        Log.info('Evaluate {} images'.format(img_cnt))
        Log.info('mIOU: {}'.format(self.seg_running_score.get_mean_iou()))
        Log.info('Pixel ACC: {}'.format(self.seg_running_score.get_pixel_acc()))


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--configs', default=None, type=str,
                        dest='configs', help='The configs file of pose.')
    parser.add_argument('--gt_dir', default=None, type=str,
                        dest='gt_dir', help='The groundtruth annotations.')
    parser.add_argument('--pred_dir', default=None, type=str,
                        dest='pred_dir', help='The label dir of predict annotations.')
    args = parser.parse_args()

    pcontext_evaluator = PascalContextEvaluator(Configer(configs=args.configs))
    pcontext_evaluator.evaluate(args.pred_dir, args.gt_dir)