예제 #1
0
  def __init__(self, ARCH, DATA, datadir, logdir, modeldir):
    # parameters
    self.ARCH = ARCH
    self.DATA = DATA
    self.datadir = datadir
    self.logdir = logdir
    self.modeldir = modeldir

    # get the data
    parserModule = imp.load_source("parserModule",
                                   booger.TRAIN_PATH + '/tasks/semantic/dataset/' +
                                   self.DATA["name"] + '/parser.py')
    if self.ARCH["data_augmentation"]["use"] and self.ARCH["data_augmentation"]["params"]["translate"]["use"]:
        # Hack, we dont want to use random data augmentations when running inference.
        augm_params = {key: {"use": False} for (key, item) in self.ARCH["data_augmentation"]["params"].items()}
        augm_params["translate"]["use"] = True
        augm_params["translate"]["params"] = {"augment": False}
        augm = True
    else:
        augm_params = None
        augm = False
    self.parser = parserModule.Parser(root=self.datadir,
                                      train_sequences=self.DATA["split"]["train"],
                                      valid_sequences=self.DATA["split"]["valid"],
                                      test_sequences=self.DATA["split"]["test"],
                                      labels=self.DATA["labels"],
                                      color_map=self.DATA["color_map"],
                                      learning_map=self.DATA["learning_map"],
                                      learning_map_inv=self.DATA["learning_map_inv"],
                                      sensor=self.ARCH["dataset"]["sensor"],
                                      max_points=self.ARCH["dataset"]["max_points"],
                                      batch_size=1,
                                      workers=self.ARCH["train"]["workers"],
                                      gt=True,
                                      shuffle_train=False,
                                      data_augmentation= augm,
                                      augmentation_params=augm_params)

    # concatenate the encoder and the head
    with torch.no_grad():
      self.model = Segmentator(self.ARCH,
                               self.parser.get_n_classes(),
                               self.modeldir)

    # use knn post processing?
    self.post = None
    if self.ARCH["post"]["KNN"]["use"]:
      self.post = KNN(self.ARCH["post"]["KNN"]["params"],
                      self.parser.get_n_classes())

    # GPU?
    self.gpu = False
    self.model_single = self.model
    self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("Infering in device: ", self.device)
    if torch.cuda.is_available() and torch.cuda.device_count() > 0:
      cudnn.benchmark = True
      cudnn.fastest = True
      self.gpu = True
      self.model.cuda()
예제 #2
0
    def __init__(self, ARCH, DATA, datadir, logdir, modeldir):
        # parameters
        self.ARCH = ARCH
        self.DATA = DATA
        self.datadir = datadir
        self.logdir = logdir
        self.modeldir = modeldir

        # get the data
        parserModule = imp.load_source(
            "parserModule", booger.TRAIN_PATH + '/tasks/semantic/dataset/' +
            self.DATA["name"] + '/seq_parser.py')
        self.parser = parserModule.Parser(
            root=self.datadir,
            train_sequences=self.DATA["split"]["train"],
            valid_sequences=self.DATA["split"]["valid"],
            test_sequences=self.DATA["split"]["test"],
            labels=self.DATA["labels"],
            color_map=self.DATA["color_map"],
            learning_map=self.DATA["learning_map"],
            learning_map_inv=self.DATA["learning_map_inv"],
            sensor=self.ARCH["dataset"]["sensor"],
            mode='test',
            frame_num=self.ARCH["test"]["frame_num"],
            max_points=self.ARCH["dataset"]["max_points"],
            batch_size=self.ARCH["test"]["batch_size"],
            workers=self.ARCH["train"]["workers"],
            gt=True,
            shuffle_train=False)

        # concatenate the encoder and the head
        with torch.no_grad():
            self.model = Segmentator(self.ARCH, self.parser.get_n_classes(),
                                     self.modeldir)

        # use knn post processing?
        self.post = None
        if self.ARCH["post"]["KNN"]["use"]:
            self.post = KNN(self.ARCH["post"]["KNN"]["params"],
                            self.parser.get_n_classes())

        # GPU?
        self.gpu = False
        self.model_single = self.model
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        print("Infering in device: ", self.device)
        if torch.cuda.is_available() and torch.cuda.device_count() > 0:
            cudnn.benchmark = True
            cudnn.fastest = True
            self.gpu = True
            self.model.cuda()
예제 #3
0
    def __init__(self,
                 ARCH,
                 DATA,
                 datadir,
                 logdir,
                 modeldir,
                 split,
                 uncertainty,
                 mc=30):
        # parameters
        self.ARCH = ARCH
        self.DATA = DATA
        self.datadir = datadir
        self.logdir = logdir
        self.modeldir = modeldir
        self.uncertainty = uncertainty
        self.split = split
        self.mc = mc

        # get the data
        parserModule = imp.load_source(
            "parserModule", booger.TRAIN_PATH + '/tasks/semantic/dataset/' +
            self.DATA["name"] + '/parser.py')
        self.parser = parserModule.Parser(
            root=self.datadir,
            train_sequences=self.DATA["split"]["train"],
            valid_sequences=self.DATA["split"]["valid"],
            test_sequences=self.DATA["split"]["test"],
            labels=self.DATA["labels"],
            color_map=self.DATA["color_map"],
            learning_map=self.DATA["learning_map"],
            learning_map_inv=self.DATA["learning_map_inv"],
            sensor=self.ARCH["dataset"]["sensor"],
            max_points=self.ARCH["dataset"]["max_points"],
            batch_size=1,
            workers=self.ARCH["train"]["workers"],
            gt=True,
            shuffle_train=False)

        # concatenate the encoder and the head
        with torch.no_grad():
            torch.nn.Module.dump_patches = True
            if self.uncertainty:
                self.model = SalsaNextUncertainty(self.parser.get_n_classes())
                self.model = nn.DataParallel(self.model)
                w_dict = torch.load(modeldir + "/SalsaNext",
                                    map_location=lambda storage, loc: storage)
                self.model.load_state_dict(w_dict['state_dict'], strict=True)
            else:
                self.model = SalsaNext(self.parser.get_n_classes())
                # 遇到平行化(一堆.module報錯)的問題時,註解下面那行
                self.model = nn.DataParallel(self.model)
                w_dict = torch.load(modeldir + "/SalsaNext_valid_best",
                                    map_location=lambda storage, loc: storage)
                self.model.load_state_dict(w_dict['state_dict'], strict=True)

        # use knn post processing?
        self.post = None
        if self.ARCH["post"]["KNN"]["use"]:
            self.post = KNN(self.ARCH["post"]["KNN"]["params"],
                            self.parser.get_n_classes())

        # GPU?
        self.gpu = False
        self.model_single = self.model
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        print("Infering in device: ", self.device)
        if torch.cuda.is_available() and torch.cuda.device_count() > 0:
            cudnn.benchmark = True
            cudnn.fastest = True
            self.gpu = True
            self.model.cuda()
예제 #4
0
파일: user.py 프로젝트: Crowbar97/SalsaNext
    def __init__(self, ARCH, DATA, datadir, logdir, modeldir, modelname,
                 split):
        # parameters
        self.ARCH = ARCH
        self.DATA = DATA
        self.datadir = datadir
        self.logdir = logdir
        self.modeldir = modeldir
        self.modelname = modelname
        self.split = split

        # get the data
        parserModule = imp.load_source(
            'parserModule', booger.TRAIN_PATH + '/tasks/semantic/dataset/' +
            self.DATA['name'] + '/parser.py')
        self.parser = parserModule.Parser(
            root=self.datadir,
            train_sequences=self.DATA['split']['train'],
            valid_sequences=self.DATA['split']['valid'],
            test_sequences=self.DATA['split']['test'],
            labels=self.DATA['labels'],
            color_map=self.DATA['color_map'],
            learning_map=self.DATA['learning_map'],
            learning_map_inv=self.DATA['learning_map_inv'],
            sensor=self.ARCH['dataset']['sensor'],
            max_points=self.ARCH['dataset']['max_points'],
            batch_size=1,
            # workers=1,
            # important for time measurement
            workers=0,
            gt=True,
            shuffle_train=False)

        # concatenate the encoder and the head
        if self.modelname in ('salsanet', 'salsanext'):
            with torch.no_grad():
                print('modeldir: %s' % self.modeldir)
                model_path = os.path.join(self.modeldir, 'SalsaNet')
                print('model_path: %s' % model_path)

                self.model = SalsaNet(self.ARCH, self.parser.get_n_classes(),
                                      model_path)
                self.model = nn.DataParallel(self.model)
                torch.nn.Module.dump_patches = True

                w_dict = torch.load(model_path,
                                    map_location=lambda storage, loc: storage)
                print(w_dict['state_dict'].keys())

                self.model.module.load_state_dict(w_dict['state_dict'],
                                                  strict=True)
        else:
            with torch.no_grad():
                self.model = Segmentator(self.ARCH,
                                         self.parser.get_n_classes(),
                                         self.modeldir)

        # use knn post processing?
        self.post = None
        if self.ARCH['post']['KNN']['use']:
            self.post = KNN(self.ARCH['post']['KNN']['params'],
                            self.parser.get_n_classes())

        # GPU?
        self.gpu = False
        self.model_single = self.model
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        print('Infering in device: ', self.device)
        if torch.cuda.is_available() and torch.cuda.device_count() > 0:
            cudnn.benchmark = True
            cudnn.fastest = True
            self.gpu = True
            self.model.cuda()