def init_point_rend(config_path, weights_path):
    cfg = get_cfg()
    add_pointrend_config(cfg)
    cfg.merge_from_file(config_path)
    cfg.MODEL.WEIGHTS = weights_path
    predictor = DefaultPredictor(cfg)
    return predictor, cfg
示例#2
0
def setup(args):
    cfg = get_cfg()
    add_pointrend_config(cfg)
    cfg.merge_from_file(args.config)
    cfg.merge_from_list(args.opts)
    cfg.freeze()
    default_setup(cfg, args)
    return cfg
示例#3
0
def get_pointrend_predictor(min_confidence=0.9, image_format="RGB"):

    cfg = get_cfg()
    point_rend.add_pointrend_config(cfg)
    cfg.merge_from_file(POINTREND_CONFIG)
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = min_confidence
    cfg.MODEL.WEIGHTS = POINTREND_MODEL_WEIGHTS
    cfg.INPUT.FORMAT = image_format
    return DefaultPredictor(cfg)
示例#4
0
def setup(args):
    """
    Create configs and perform basic setups.
    """
    cfg = get_cfg()
    add_pointrend_config(cfg)
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()
    default_setup(cfg, args)
    return cfg
示例#5
0
def setup_cfg(args):
    # load config from file and command-line arguments
    cfg = get_cfg()
    add_pointrend_config(cfg)
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # Set score_threshold for builtin models
    cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
    cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
    cfg.freeze()
    return cfg
示例#6
0
def setup_cfg(args):
    # load config from file and command-line arguments
    cfg = get_cfg()
    if 'pointrend' in args.config_file:
        import sys
        sys.path.insert(
            1,
            "/n/pfister_lab2/Lab/donglai/lib/pipeline/detectron2/projects/PointRend"
        )
        import point_rend
        point_rend.add_pointrend_config(cfg)
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # Set score_threshold for builtin models
    cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
    cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
    cfg.freeze()
    return cfg
示例#7
0
文件: pointrend.py 项目: sxyu/segtool
    def __init__(self, filter_class=-1):
        '''
        :param filter_class output only intances of filter_class (-1 to disable). Note: class 0 is person.
        '''
        self.filter_class = filter_class
        self.coco_metadata = MetadataCatalog.get("coco_2017_val")
        self.cfg = get_cfg()

        # Add PointRend-specific config

        point_rend.add_pointrend_config(self.cfg)

        # Load a config from file
        self.cfg.merge_from_file(
            os.path.join(
                POINTREND_ROOT_PATH,
                "configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco.yaml"
            ))
        self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5  # set threshold for this model
        # Use a model from PointRend model zoo: https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend#pretrained-models
        self.cfg.MODEL.WEIGHTS = "detectron2://PointRend/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco/164955410/model_final_3c3198.pkl"
        self.predictor = DefaultPredictor(self.cfg)
def setup_cfg(args):
    # load config from file and command-line arguments
    cfg = get_cfg()

    mode = MODE
    model = None
    model_file = None
    model_weights = None
    if mode == DetectionMode.InstanceSegmentation:
        model = 'COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml'
        model_file = model_zoo.get_config_file(model)
        model_weights = model_zoo.get_checkpoint_url(model)
    elif mode == DetectionMode.Keypoints:
        model = 'COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml'
        model_file = model_zoo.get_config_file(model)
        model_weights = model_zoo.get_checkpoint_url(model)
    elif mode == DetectionMode.PanopticSegmentation:
        model = 'COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml'
        model_file = model_zoo.get_config_file(model)
        model_weights = model_zoo.get_checkpoint_url(model)
    elif mode == DetectionMode.PointRend:
        point_rend.add_pointrend_config(cfg)
        model_file = os.path.join(
            point_rend_path,
            'configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco.yaml'
        )
        model_weights = 'https://dl.fbaipublicfiles.com/detectron2/PointRend/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco/164955410/model_final_3c3198.pkl'

    cfg.merge_from_file(model_file)
    cfg.MODEL.WEIGHTS = model_weights

    # Set score_threshold for builtin models
    cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
    cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
    cfg.freeze()
    return cfg
示例#9
0
#cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = arguments_strTreshold  # set threshold for this model
#cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
#mask_rcnn_predictor = DefaultPredictor(cfg)
#mask_rcnn_outputs = mask_rcnn_predictor(im)

#panoptic segmentation
#cfg = get_cfg()
#cfg.merge_from_file(model_zoo.get_config_file("COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml"))
#cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = arguments_strTreshold  # set threshold for this model
#cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml")
#mask_rcnn_panoptic_predictor = DefaultPredictor(cfg)
#mask_rcnn_panoptic_outputs = mask_rcnn_predictor(im)

#PointRend
cfg = get_cfg()
point_rend.add_pointrend_config(cfg)
cfg.merge_from_file(
    "/shared/foss-18/detectron2/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco.yaml"
)
print("model treshold : ", float(arguments_strTreshold))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = float(
    arguments_strTreshold)  # set threshold for this model
# Use a model from PointRend model zoo: https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend#pretrained-models
cfg.MODEL.WEIGHTS = "https://dl.fbaipublicfiles.com/detectron2/PointRend/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco/164955410/model_final_3c3198.pkl"
#once loaded file is here :
#/home/dev18/.torch/fvcore_cache/detectron2/PointRend/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco/164955410/model_final_3c3198.pkl
predictor = DefaultPredictor(cfg)
outputs = predictor(im)

v = Visualizer(im[:, :, ::-1],
               coco_metadata,
示例#10
0
    def initialize(self, opt):
        BaseModel.initialize(self, opt)

        self.long_term = [0, 1, 10, 20, 40]
        self.alpha1 = opt.alpha1
        self.alpha2 = opt.alpha2
        self.alpha = opt.alpha
        # load/define networks
        self.netG_A_encoder = networks.define_G_encoder(
            opt.input_nc, opt.output_nc, opt.ngf, opt, opt.norm,
            not opt.no_dropout, opt.init_type, self.gpu_ids, opt.saliency,
            opt.multisa)
        self.netG_A_decoder = networks.define_G_decoder(
            opt.input_nc, opt.output_nc, opt.ngf, opt, opt.norm,
            not opt.no_dropout, opt.init_type, self.gpu_ids, opt.multisa)

        self.netM = networks.define_convs(self.netG_A_encoder.channel_size() *
                                          2,
                                          1,
                                          opt.M_layers,
                                          opt.M_size,
                                          gpu_ids=self.gpu_ids)
        self.netM2 = networks.define_convs(self.netG_A_encoder.channel_size() *
                                           2,
                                           1,
                                           opt.M_layers,
                                           opt.M_size,
                                           gpu_ids=self.gpu_ids)
        # ~~~~~~
        if opt.saliency:
            cfg = get_cfg()
            point_rend.add_pointrend_config(cfg)
            cfg.merge_from_file(
                "/home/linchpin/Documents/ink_stylize/ChipGAN_release/models/detectron2_repo/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco.yaml"
            )
            cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
            cfg.MODEL.WEIGHTS = "/home/linchpin/Documents/ink_stylize/ChipGAN_release/pretrained_models/model_final_3c3198.pkl"
            self.NetIS = build_model(cfg)
            checkpointer = DetectionCheckpointer(self.NetIS)
            checkpointer.load(cfg.MODEL.WEIGHTS)
            self.NetIS.eval()
            if len(self.opt.gpu_ids) == 0:
                self.NetIS.cpu()
            for param in self.NetIS.parameters():
                param.requires_grad = False

        self.pwc_model = PWCnet.PWCNet().eval()
        if len(self.opt.gpu_ids) != 0:
            self.pwc_model.cuda()
        model_path = './pretrained_models/network-default.pytorch'
        self.pwc_model.load_state_dict(torch.load(model_path))
        for param in self.pwc_model.parameters():
            param.requires_grad = False

        # ~~~~~~
        kw = 3
        g_kernel = self.gauss_kernel(kw, 3, 1).transpose((3, 2, 1, 0))
        self.gauss_conv_kw = nn.Conv2d(1,
                                       1,
                                       kernel_size=kw,
                                       stride=1,
                                       padding=(kw - 1) // 2,
                                       bias=False)
        self.gauss_conv_kw.weight.data.copy_(torch.from_numpy(g_kernel))
        self.gauss_conv_kw.weight.requires_grad = False
        if len(self.opt.gpu_ids) != 0:
            self.gauss_conv_kw.cuda()

        which_epoch = opt.which_epoch
        self.load_network(self.netG_A_encoder, 'G_A_encoder', which_epoch)
        self.load_network(self.netG_A_decoder, 'G_A_decoder', which_epoch)
        self.load_network(self.netM, "M", which_epoch)
        self.load_network(self.netM2, "M2", which_epoch)
        # self.netG_A_decoder.eval()
        # self.netG_A_encoder.eval()
        # self.netM.eval()
        # self.netM2.eval()
        self.pwc_model.eval()

        print('---------- Networks initialized -------------')
        networks.print_network(self.netG_A_encoder)
        networks.print_network(self.netG_A_decoder)
        networks.print_network(self.netM)
        print('-----------------------------------------------')
示例#11
0
    def initialize(self, opt):
        BaseModel.initialize(self, opt)
        self.display_param = dict()

        self.long_term = [0, 1, 10, 20, 40]
        self.alpha1 = opt.alpha1
        self.alpha2 = opt.alpha2
        self.alpha = opt.alpha
        # load/define networks
        self.netG_A_encoder = networks.define_G_encoder(opt.input_nc, opt.output_nc,
                                                        opt.ngf, opt, opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids, opt.saliency, opt.multisa)
        self.netG_A_decoder = networks.define_G_decoder(opt.input_nc, opt.output_nc,
                                                        opt.ngf, opt, opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids, opt.multisa)

        self.netG_B = networks.define_G(opt.output_nc, opt.input_nc,
                                        opt.ngf, opt, opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids)

        chs = self.netG_A_encoder.channel_size()
        self.netM = networks.define_convs(chs * 2, 1, opt.M_layers, opt.M_size, gpu_ids=self.gpu_ids)
        self.netM2 = networks.define_convs(chs * 2, 1, opt.M_layers, opt.M_size, gpu_ids=self.gpu_ids)

        if self.isTrain:
            use_sigmoid = opt.no_lsgan
            self.netD_A = networks.define_D(opt.output_nc, opt.ndf,
                                            opt.which_model_netD,
                                            opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, self.gpu_ids)
            self.netD_B = networks.define_D(opt.input_nc, opt.ndf,
                                            opt.which_model_netD,
                                            opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, self.gpu_ids)

            self.netD_ink = networks.define_D(opt.output_nc, opt.ndf,
                                              opt.which_model_netD,
                                              opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, self.gpu_ids)

            g_kernel = self.gauss_kernel(21, 3, 1).transpose((3, 2, 1, 0))
            self.gauss_conv = nn.Conv2d(
                1, 1, kernel_size=21, stride=1, padding=1, bias=False)
            self.gauss_conv.weight.data.copy_(torch.from_numpy(g_kernel))
            self.gauss_conv.weight.requires_grad = False
            self.gauss_conv.cuda()

            # Gaussain blur
            kw = 7
            g_kernel = self.gauss_kernel(kw, 3, 1).transpose((3, 2, 1, 0))
            self.gauss_conv_kw = nn.Conv2d(
                1, 1, kernel_size=kw, stride=1, padding=0, bias=False)
            self.gauss_conv_kw.weight.data.copy_(torch.from_numpy(g_kernel))
            self.gauss_conv_kw.weight.requires_grad = False
            self.gauss_conv_kw.cuda()
            self.gauss_conv_kw_pad = nn.ReflectionPad2d((kw-1)//2)

            L = np.array([1, 1]).reshape(2, 1)
            H = np.array([-1, 1]).reshape(2, 1)
            haar_kernel = np.stack(
                (L@(H.T), H@(L.T), H@(H.T))).reshape(3, 1, 2, 2) / 2
            self.haar_kernel = nn.Conv2d(
                1, 3, 2, stride=2, padding=0, bias=False)
            self.haar_kernel.weight.data.copy_(torch.from_numpy(haar_kernel))
            self.haar_kernel.weight.requires_grad = False
            self.haar_kernel.cuda()

            # Hed Model
            self.hed_model = Hed()
            self.hed_model.cuda()
            save_path = './pretrained_models/35.pth'
            self.hed_model.load_state_dict(torch.load(save_path))
            for param in self.hed_model.parameters():
                param.requires_grad = False

        if opt.saliency:
            # detectron2
            cfg = get_cfg()
            point_rend.add_pointrend_config(cfg)
            cfg.merge_from_file(
                "/home/linchpin/Documents/ink_stylize/ChipGAN_release/models/detectron2_repo/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco.yaml")
            cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
            cfg.MODEL.WEIGHTS = "pretrained_models/model_final_3c3198.pkl"
            self.NetIS = build_model(cfg)
            checkpointer = DetectionCheckpointer(self.NetIS)
            checkpointer.load(cfg.MODEL.WEIGHTS)
            self.NetIS.eval()
            for param in self.NetIS.parameters():
                param.requires_grad = False

        # pwcnet
        self.pwc_model = PWCnet.PWCNet().cuda().eval()
        model_path = './pretrained_models/network-default.pytorch'
        self.pwc_model.load_state_dict(torch.load(model_path))
        for param in self.pwc_model.parameters():
            param.requires_grad = False

        # ~~~~~~

        if not self.isTrain or opt.continue_train:
            which_epoch = opt.which_epoch
            self.load_network(self.netG_A_encoder, 'G_A_encoder', which_epoch)
            self.load_network(self.netG_A_decoder, 'G_A_decoder', which_epoch)
            self.load_network(self.netG_B, 'G_B', which_epoch)
            self.load_network(self.netM, "M", which_epoch)
            self.load_network(self.netM2, "M2", which_epoch)
        if opt.continue_train:
            self.load_network(self.netD_A, 'D_A', which_epoch)
            self.load_network(self.netD_B, 'D_B', which_epoch)
            self.load_network(self.netD_ink, 'D_ink', which_epoch)

        if self.isTrain:
            self.old_lr = opt.lr
            self.fake_A_pool = ImagePool(opt.pool_size)
            self.fake_B_pool = ImagePool(opt.pool_size)
            self.ink_fake_B_pool = ImagePool(opt.pool_size)
            # define loss functions
            self.criterionGAN = networks.GANLoss(
                use_lsgan=not opt.no_lsgan, tensor=self.Tensor)
            self.criterionCycle = torch.nn.L1Loss()
            self.criterionIdt = torch.nn.L1Loss()
            self.TV_LOSS = networks.TVLoss()
            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A_encoder.parameters(), self.netG_A_decoder.parameters(), self.netG_B.parameters()),
                                                lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_M = torch.optim.Adam(itertools.chain(self.netM.parameters(
            ), self.netM2.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_D_A = torch.optim.Adam(
                self.netD_A.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_D_B = torch.optim.Adam(
                self.netD_B.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_D_ink = torch.optim.Adam(
                self.netD_ink.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            if opt.continue_train:
                self.load_optim(self.optimizer_M, "M", which_epoch)
                self.load_optim(self.optimizer_G, "G", which_epoch)
                self.load_optim(self.optimizer_D_A, "D_A", which_epoch)
                self.load_optim(self.optimizer_D_B, "D_B", which_epoch)
                self.load_optim(self.optimizer_D_ink, "D_ink", which_epoch)
            self.optimizers = []
            self.schedulers = []
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_M)
            self.optimizers.append(self.optimizer_D_A)
            self.optimizers.append(self.optimizer_D_B)
            self.optimizers.append(self.optimizer_D_ink)
            for optimizer in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optimizer, opt))

        print('---------- Networks initialized -------------')
        networks.print_network(self.netG_A_encoder)
        networks.print_network(self.netG_A_decoder)
        networks.print_network(self.netG_B)
        networks.print_network(self.netM)
        networks.print_network(self.netM2)
        if self.isTrain:
            networks.print_network(self.netD_A)
            networks.print_network(self.netD_B)
        print('-----------------------------------------------')