def loadModel(self):
        # model = 'SuperPointNet'
        # params = self.config['model']['subpixel']['params']
        from utils.loader import modelLoader
        self.net = modelLoader(model=self.model, **self.params)

        checkpoint = torch.load(self.weights_path,
                                map_location=lambda storage, loc: storage)
        self.net.load_state_dict(checkpoint['model_state_dict'])

        self.net = self.net.to(self.device)
        logging.info('successfully load pretrained model from: %s',
                     self.weights_path)
        pass
    def loadModel(self):
        """
        load model from name and params
        init or load optimizer
        :return:
        """
        model = self.config["model"]["name"]
        params = self.config["model"]["params"]
        print("model: ", model)
        net = modelLoader(model=model, **params).to(self.device)
        logging.info("=> setting adam solver")
        optimizer = self.adamOptim(net,
                                   lr=self.config["model"]["learning_rate"])

        n_iter = 0
        ## new model or load pretrained
        if self.config["retrain"] == True:
            logging.info("New model")
            pass
        else:
            path = self.config["pretrained"]
            mode = "" if path[
                -4:] == ".pth" else "full"  # the suffix is '.pth' or 'tar.gz'
            logging.info("load pretrained model from: %s", path)
            net, optimizer, n_iter = pretrainedLoader(net,
                                                      optimizer,
                                                      n_iter,
                                                      path,
                                                      mode=mode,
                                                      full_path=True)
            logging.info("successfully load pretrained model from: %s", path)

        def setIter(n_iter):
            if self.config["reset_iter"]:
                logging.info("reset iterations to 0")
                n_iter = 0
            return n_iter

        self.net = net
        self.optimizer = optimizer
        self.n_iter = setIter(n_iter)
        pass
    def loadModel(self):
        ###### check!
        model = self.config['model']['name']
        params = self.config['model']['params']
        print("model: ", model)
        net = modelLoader(model=model, **params).to(self.device)
        # net.init_
        logging.info('=> setting adam solver')
        # import torch.optim as optim
        # optimizer = optim.Adam(net.parameters(), lr=self.config['model']['learning_rate'],
        # betas=(0.9, 0.999))
        optimizer = self.adamOptim(net,
                                   lr=self.config['model']['learning_rate'])

        n_iter = 0
        ## load pretrained
        if self.config['retrain'] == True:
            logging.info("New model")
            pass
        else:
            path = self.config['pretrained']
            mode = '' if path[:-3] == '.pth' else 'full'
            logging.info('load pretrained model from: %s', path)
            net, optimizer, n_iter = pretrainedLoader(net,
                                                      optimizer,
                                                      n_iter,
                                                      path,
                                                      mode=mode,
                                                      full_path=True)
            logging.info('successfully load pretrained model from: %s', path)

        def setIter(n_iter):
            if self.config['reset_iter']:
                logging.info("reset iterations to 0")
                n_iter = 0
            return n_iter

        self.net = net
        self.optimizer = optimizer
        self.n_iter = setIter(n_iter)
        pass
示例#4
0
    def loadModel(self, weights_path):
        # Load the network in inference mode.
        if weights_path[-4:] == '.tar':
            trained = True
        # if cuda:
        #     # Train on GPU, deploy on GPU.
        #     self.net.load_state_dict(torch.load(weights_path))

        # else:
        # Train on GPU, deploy on CPU.

        # trained = False
        if trained:
            # if self.subpixel:
            #     model = 'SubpixelNet'
            #     params = self.config['model']['subpixel']['params']
            # else:
            #     model = 'SuperPointNet'
            #     params = {}
            model = self.config['model']['name']
            params = self.config['model']['params']
            print("model: ", model)

            from utils.loader import modelLoader
            self.net = modelLoader(model=model, **params)
            # from models.SuperPointNet import SuperPointNet
            # self.net = SuperPointNet()
            checkpoint = torch.load(weights_path,
                                    map_location=lambda storage, loc: storage)
            self.net.load_state_dict(checkpoint['model_state_dict'])
        else:
            from models.SuperPointNet_pretrained import SuperPointNet
            self.net = SuperPointNet()
            self.net.load_state_dict(
                torch.load(weights_path,
                           map_location=lambda storage, loc: storage))
        # if grad==False:
        # torch.no_grad(
        # self.net = self.net.cuda()
        self.net = self.net.to(self.device)
示例#5
0
    def load_net_deepF(self, name="net_deepF"):
        from train_good_corr_4_vals_goodF_baseline import prepare_model
        from utils.loader import modelLoader

        device = self.device
        config = self.config
        img_zoom_xy = (
            config["data"]["preprocessing"]["resize"][1] /
            config["data"]["image"]["size"][1],
            config["data"]["preprocessing"]["resize"][0] /
            config["data"]["image"]["size"][0],
        )
        model_params = {
            "depth": config["model"]["depth"],
            "img_zoom_xy": img_zoom_xy,
            "image_size": config["data"]["image"]["size"],
            "quality_size": config["model"]["quality_size"],
            "if_quality": config["model"]["if_quality"],
            "if_img_des_to_pointnet":
            config["model"]["if_img_des_to_pointnet"],
            "if_goodCorresArch": config["model"]["if_goodCorresArch"],
            "if_img_feat": config["model"]["if_img_feat"],
            "if_cpu_svd": config["model"]["if_cpu_svd"],
            "if_learn_offsets": config["model"]["if_learn_offsets"],
            "if_tri_depth": config["model"]["if_tri_depth"],
            "if_sample_loss": config["model"]["if_sample_loss"],
        }
        net = modelLoader(config["model"]["name"], **model_params)
        net, optimizer, n_iter, n_iter_val = prepare_model(config,
                                                           net,
                                                           device,
                                                           n_iter=0,
                                                           n_iter_val=0,
                                                           net_postfix="")
        self.net_dict[name] = net
        pass
def train_good(config, output_dir, args):
    """
    # training script, controlled by config file and args
    # work with Train_model_pipeline.py
    params:
        config: config file path, contain the settings
        output_dir: the path for results
        args: some setting

    """
    # config
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logging.info("train on device: %s", device)
    with open(os.path.join(output_dir, "config.yml"), "w") as f:
        yaml.dump(config, f, default_flow_style=False)
    writer = SummaryWriter(
        getWriterPath(task=args.command, exper_name=args.exper_name,
                      date=True))

    logging.info(f"config: {config}")

    ## reproducibility
    if config["training"]["reproduce"]:
        logging.info("reproduce = True")
        torch.manual_seed(0)
        np.random.seed(0)
        print(
            f"test random # : np({np.random.rand(1)}), torch({torch.rand(1)})")
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    ## save paths
    save_path = Path(output_dir)
    save_path = save_path / "checkpoints"
    logging.info("+++[Train]+++ will save everything to {}".format(save_path))
    os.makedirs(save_path, exist_ok=True)

    # data loading
    assert (config["data"]["sequence_length"] == 2
            ), "Sorry dude, we are only supporting two-frame setting for now."
    # assert (config['data']['read_what']['with_X'] and config['model']['batch_size']==1) or (not config['data']['read_what']['with_X']), 'We are not suppoting batching lidar Xs with batch_size>1 yet!'
    config["data"]["read_what"]["with_quality"] = config["model"]["if_quality"]

    val = "test" if args.test else "val"  # use test or val data
    val_shuffle = False if args.test else True  # not sorting when doing testing
    train = False if args.test else True
    data = dataLoader(
        config,
        dataset=config["data"]["dataset"],
        train=train,
        warp_input=True,
        val=val,
        val_shuffle=val_shuffle,
    )
    train_loader, val_loader = data["train_loader"], data["val_loader"]
    logging.info(
        "+++[Dataset]+++ train split size %d in %d batches, val split size %d in %d batches"
        % (
            len(train_loader) * config["data"]["batch_size"],
            len(train_loader),
            len(val_loader) * config["data"]["batch_size"],
            len(val_loader),
        ))

    # model loading
    # model_params = {'depth': config['model']['depth'], 'clamp_at':config['model']['clamp_at'], 'in_channels': 4, 'num_seg_classes': 1, 'with_transform': False, 'with_instance_norm': True}
    if config["model"]["if_SP"]:
        config["model"]["quality_size"] = 1
    img_zoom_xy = (
        config["data"]["preprocessing"]["resize"][1] /
        config["data"]["image"]["size"][1],
        config["data"]["preprocessing"]["resize"][0] /
        config["data"]["image"]["size"][0],
    )
    model_params = {
        "depth": config["model"]["depth"],
        "img_zoom_xy": img_zoom_xy,
        "image_size": config["data"]["image"]["size"],
        "quality_size": config["model"]["quality_size"],
        "if_quality": config["model"]["if_quality"],
        "if_img_des_to_pointnet": config["model"]["if_img_des_to_pointnet"],
        "if_goodCorresArch": config["model"]["if_goodCorresArch"],
        "if_img_feat": config["model"]["if_img_feat"],
        "if_cpu_svd": config["model"]["if_cpu_svd"],
        "if_learn_offsets": config["model"]["if_learn_offsets"],
        "if_tri_depth": config["model"]["if_tri_depth"],
        "if_sample_loss": config["model"]["if_sample_loss"],
    }

    ## load model and weights - deep fundametal network (deepF)
    net = modelLoader(config["model"]["name"], **model_params)
    print(f"deepF net: {net}")
    n_iter = 0
    n_iter_val = 0 + n_iter

    ## load model and weights - superpoint (sp)
    if config["model"]["if_SP"]:
        SP_params = {
            "out_num_points": 2000,  ### no use
            "patch_size": 5,
            "device": device,
            "nms_dist": 4,
            "conf_thresh": 0.015,
            "nn_thresh": 0.7,
        }
        params = config["training"].get("SP_params", None)
        if params is not None:
            SP_params.update(params)
        else:
            logging.warning(f"use default Superpoint Parameters")
        # for e in list(params):
        #     if e != 'device':
        #         params[e] = float(params[e])
        logging.info(f"SP_params: {SP_params}")

        # checkpoint_path_SP = "logs/superpoint_coco_heat2_0/checkpoints/superPointNet_170000_checkpoint.pth.tar"
        checkpoint_path_SP = config["training"]["pretrained_SP"]
        checkpoint_mode_SP = "" if checkpoint_path_SP[-3:] == "pth" else "full"
        # helping modules
        SP_processer = SuperPointNet_process(**SP_params)
        SP_tracker = PointTracker(max_length=2, nn_thresh=params["nn_thresh"])

        # load the network
        net_SP = SuperPointNet_gauss2()
        n_iter_SP = 0
        n_iter_val_SP = 0 + n_iter_SP

        ## load pretrained and create optimizer
        net_SP, optimizer_SP, n_iter_SP, n_iter_val_SP = prepare_model(
            config,
            net_SP,
            device,
            n_iter_SP,
            n_iter_val_SP,
            net_postfix="_SP")
        # net_SP = nn.DataParallel(net_SP)  # AttributeError: 'DataParallel' object has no attribute 'process_output'
        if config["training"].get("train_SP", True):
            logging.info("+++[Train]+++  training superpoint")
        else:
            logging.info("+++[Train]+++  superpoint is used but not trained")

    ## load pretrained and create optimizer
    net, optimizer, n_iter, n_iter_val = prepare_model(config,
                                                       net,
                                                       device,
                                                       n_iter,
                                                       n_iter_val,
                                                       net_postfix="")
    if config["training"].get("train", True):
        logging.info("+++[Train]+++  training deepF model")
    else:
        logging.info("+++[Train]+++  deepF model is used but not trained")

    epoch = n_iter // len(train_loader)

    # set up train_agent
    train_agent = Train_model_pipeline(config,
                                       save_path=save_path,
                                       args=args,
                                       device=device)
    train_agent.writer = writer

    # feed the data into the agent
    train_agent.train_loader = train_loader
    train_agent.val_loader = val_loader
    train_agent.set_params(n_iter=n_iter, epoch=epoch, n_iter_val=n_iter_val)

    if not config["model"]["if_SP"]:
        net_SP = None
        optimizer_SP = None
        SP_processer = None
        SP_tracker = None
    train_agent.set_nets(net, net_SP=net_SP)
    train_agent.set_optimizers(optimizer, optimizer_SP)
    train_agent.set_SP_helpers(SP_processer, SP_tracker)

    while True:
        # Train for one epoch; val occasionally
        epoch_loss, _, n_iter, n_iter_val = train_agent.train_epoch(train=True)
        save_file = save_path / "training.txt"
        # saveLoss(save_file, n_iter, epoch_loss)
        if n_iter > config["training"]["train_iter"]:
            break

    print("Finished Training")