Beispiel #1
0
    def __init__(self, option, model_type, dataset, modules):
        # call the initialization method of UnetBasedModel
        UnetBasedModel.__init__(self, option, model_type, dataset, modules)
        self._num_classes = dataset.num_classes
        self._weight_classes = dataset.weight_classes
        self._use_category = getattr(option, "use_category", False)
        if self._use_category:
            if not dataset.class_to_segments:
                raise ValueError(
                    "The dataset needs to specify a class_to_segments property when using category information for segmentation"
                )
            self._num_categories = len(dataset.class_to_segments.keys())
            log.info("Using category information for the predictions with %i categories", self._num_categories)
        else:
            self._num_categories = 0

        # Last MLP
        last_mlp_opt = option.mlp_cls

        self.FC_layer = Seq()
        last_mlp_opt.nn[0] += self._num_categories
        for i in range(1, len(last_mlp_opt.nn)):
            self.FC_layer.append(Conv1D(last_mlp_opt.nn[i - 1], last_mlp_opt.nn[i], bn=True, bias=False))
        if last_mlp_opt.dropout:
            self.FC_layer.append(torch.nn.Dropout(p=last_mlp_opt.dropout))

        self.FC_layer.append(Conv1D(last_mlp_opt.nn[-1], self._num_classes, activation=None, bias=True, bn=False))
        self.loss_names = ["loss_seg"]

        self.visual_names = ["data_visual"]
Beispiel #2
0
    def __init__(self, option, model_type, dataset, modules):
        # call the initialization method of UnetBasedModel
        UnetBasedModel.__init__(self, option, model_type, dataset, modules)
        # Last MLP
        self.mode = option.loss_mode
        self.normalize_feature = option.normalize_feature
        self.out_channels = option.out_channels
        self.loss_names = ["loss_reg", "loss"]
        self.metric_loss_module, self.miner_module = UnetBasedModel.get_metric_loss_and_miner(
            getattr(option, "metric_loss", None),
            getattr(option, "miner", None))
        last_mlp_opt = option.mlp_cls

        self.FC_layer = Seq()
        last_mlp_opt.nn[0]
        for i in range(1, len(last_mlp_opt.nn)):
            self.FC_layer.append(
                Conv1D(last_mlp_opt.nn[i - 1],
                       last_mlp_opt.nn[i],
                       bn=True,
                       bias=False))
        if last_mlp_opt.dropout:
            self.FC_layer.append(torch.nn.Dropout(p=last_mlp_opt.dropout))

        self.FC_layer.append(
            Conv1D(last_mlp_opt.nn[-1],
                   self.out_channels,
                   activation=None,
                   bias=True,
                   bn=False))
Beispiel #3
0
    def __init__(self, option, model_type, dataset, modules):
        """Initialize this model class.
        Parameters:
            opt -- training/test options
        A few things can be done here.
        - (required) call the initialization function of BaseModel
        - define loss function, visualization images, model names, and optimizers
        """
        UnetBasedModel.__init__(
            self, option, model_type, dataset,
            modules)  # call the initialization method of UnetBasedModel

        self._weight_classes = dataset.weight_classes

        nn = option.mlp_cls.nn
        self.dropout = option.mlp_cls.get("dropout")
        self.lin1 = torch.nn.Linear(nn[0], nn[1])
        self.lin2 = torch.nn.Linear(nn[2], nn[3])
        self.lin3 = torch.nn.Linear(nn[4], dataset.num_classes)

        self.loss_names = ["loss_seg"]
Beispiel #4
0
    def __init__(self, option, model_type, dataset, modules):

        # Pointnet++ is UnetBased model, call init method of unet model
        UnetBasedModel.__init__(self, option, model_type, dataset, modules)

        self._num_classes = dataset.num_classes
        self._weight_classes = dataset.weight_classes
        self._use_category = getattr(option, "use_category", False)

        if self._use_category:
            if not dataset.class_to_segments:
                raise ValueError("Dataset does not specify needed "
                                 "class_to_segments property")
            self._num_categories = len(dataset.class_to_segments.keys())
            log.info(f"Using category information for "
                     f"the predictions with ${self._num_categories}")
        else:
            self._num_categories = 0
            log.info(f"Category information is not going to be used")

        # ---------------------------------------------------
        # Specification of last MLP based on
        # mlp_cls opt in "mypointnet2" in "pointnet2.yaml"
        last_mlp_opt = copy.deepcopy(option.mlp_cls)

        # A sequential container. Modules will be added to
        # it in the order they are passed in the constructor
        # (Torch classic method)
        self.FC_layer = Seq()
        last_mlp_opt.nn[0] += self._num_categories

        # Adding layers specified in pointnet2.yaml - mlp_cls
        for i in range(1, len(last_mlp_opt.nn)):
            self.FC_layer.append(
                Conv1D(last_mlp_opt.nn[i - 1],
                       last_mlp_opt.nn[i],
                       bn=True,
                       bias=False))

        # Specify dropout of last FC layer (mlp_cls)
        if last_mlp_opt.dropout:
            self.FC_layer.append(torch.nn.Dropout(p=last_mlp_opt.dropout))

        self.FC_layer.append(
            Conv1D(last_mlp_opt.nn[-1],
                   self._num_classes,
                   activation=None,
                   bias=True,
                   bn=False))
        # -------------------------------------------------------------------

        # Name specs.
        self.loss_names = ["loss_seg"]
        self.visual_names = ["data_visual"]

        self.input = None
        self.labels = None
        self.batch_idx = None
        self.category = None

        self.loss_seg = None
        self.data_visual = None