def __init__( self, backbone, input_nc=1, grid_size=0.05, pointnet_nn=None, pre_mlp_nn=None, post_mlp_nn=[64, 64, 32], add_pos=False, add_pre_x=False, backend="minkowski", aggr=None, ): if input_nc is None: input_nc = 1 nn.Module.__init__(self) self.unet = SparseConv3d(architecture="unet", input_nc=input_nc, config=backbone, backend=backend) if pre_mlp_nn is not None: self.pre_mlp = MLP(pre_mlp_nn) else: self.pre_mlp = torch.nn.Identity() if pointnet_nn is not None: self.pointnet = MLP(pointnet_nn) else: self.pointnet = torch.nn.Identity() self.post_mlp = MLP(post_mlp_nn) self._grid_size = grid_size self.add_pos = add_pos self.add_pre_x = add_pre_x self.aggr = aggr
def __init__(self, option, model_type, dataset, modules): super(PointGroup, self).__init__(option) backbone_options = option.get("backbone", {"architecture": "unet"}) self.Backbone = Minkowski( backbone_options.architecture, input_nc=dataset.feature_dimension, num_layers=4, config=backbone_options.config, ) self.BackboneHead = Seq().append(FastBatchNorm1d(self.Backbone.output_nc)).append(torch.nn.ReLU()) self._scorer_is_encoder = option.scorer.architecture == "encoder" self._activate_scorer = option.scorer.activate self.Scorer = Minkowski( option.scorer.architecture, input_nc=self.Backbone.output_nc, num_layers=option.scorer.depth ) self.ScorerHead = Seq().append(torch.nn.Linear(self.Scorer.output_nc, 1)).append(torch.nn.Sigmoid()) self.Offset = Seq().append(MLP([self.Backbone.output_nc, self.Backbone.output_nc], bias=False)) self.Offset.append(torch.nn.Linear(self.Backbone.output_nc, 3)) self.Semantic = ( Seq() .append(MLP([self.Backbone.output_nc, self.Backbone.output_nc], bias=False)) .append(torch.nn.Linear(self.Backbone.output_nc, dataset.num_classes)) .append(torch.nn.LogSoftmax()) ) self.loss_names = ["loss", "offset_norm_loss", "offset_dir_loss", "semantic_loss", "score_loss"] stuff_classes = dataset.stuff_classes if is_list(stuff_classes): stuff_classes = torch.Tensor(stuff_classes).long() self._stuff_classes = torch.cat([torch.tensor([IGNORE_LABEL]), stuff_classes])
class FeatureRegressor(torch.nn.Module): """ Allows segregated segmentation in case the category of an object is known. This is the case in ShapeNet for example. Parameters ---------- in_features - size of the input channel n_feat: number of output features """ def __init__(self, in_features, n_feat, dropout_proba=0.5, bn_momentum=0.1): super().__init__() up_factor = 3 self.channel_rasing = MLP( [in_features, n_feat * up_factor], bn_momentum=bn_momentum, bias=False ) if dropout_proba: self.channel_rasing.add_module("Dropout", torch.nn.Dropout(p=dropout_proba)) self.final_mlp = MLP([n_feat * up_factor, n_feat], bias=True) def forward(self, features, **kwargs): assert features.dim() == 2 features = self.channel_rasing(features) features = self.final_mlp(features) return features
def __init__(self, option, model_type, dataset, modules): super(PointGroup, self).__init__(option) backbone_options = option.get("backbone", {"architecture": "unet"}) self.Backbone = Minkowski( backbone_options.get("architecture", "unet"), input_nc=dataset.feature_dimension, num_layers=4, config=backbone_options.get("config", {}), ) self._scorer_type = option.get("scorer_type", "encoder") cluster_voxel_size = option.get("cluster_voxel_size", 0.05) if cluster_voxel_size: self._voxelizer = GridSampling3D(cluster_voxel_size, quantize_coords=True, mode="mean") else: self._voxelizer = None self.ScorerUnet = Minkowski("unet", input_nc=self.Backbone.output_nc, num_layers=4, config=option.scorer_unet) self.ScorerEncoder = Minkowski("encoder", input_nc=self.Backbone.output_nc, num_layers=4, config=option.scorer_encoder) self.ScorerMLP = MLP([ self.Backbone.output_nc, self.Backbone.output_nc, self.ScorerUnet.output_nc ]) self.ScorerHead = Seq().append( torch.nn.Linear(self.ScorerUnet.output_nc, 1)).append(torch.nn.Sigmoid()) self.Offset = Seq().append( MLP([self.Backbone.output_nc, self.Backbone.output_nc], bias=False)) self.Offset.append(torch.nn.Linear(self.Backbone.output_nc, 3)) self.Semantic = (Seq().append( MLP([self.Backbone.output_nc, self.Backbone.output_nc], bias=False)).append( torch.nn.Linear(self.Backbone.output_nc, dataset.num_classes)).append( torch.nn.LogSoftmax(dim=-1))) self.loss_names = [ "loss", "offset_norm_loss", "offset_dir_loss", "semantic_loss", "score_loss" ] stuff_classes = dataset.stuff_classes if is_list(stuff_classes): stuff_classes = torch.Tensor(stuff_classes).long() self._stuff_classes = torch.cat( [torch.tensor([IGNORE_LABEL]), stuff_classes])
def __init__(self, in_features, n_feat, dropout_proba=0.5, bn_momentum=0.1): super().__init__() up_factor = 3 self.channel_rasing = MLP( [in_features, n_feat * up_factor], bn_momentum=bn_momentum, bias=False ) if dropout_proba: self.channel_rasing.add_module("Dropout", torch.nn.Dropout(p=dropout_proba)) self.final_mlp = MLP([n_feat * up_factor, n_feat], bias=True)
def test_scheduler(self): bn_scheduler_config = OmegaConf.load( os.path.join(DIR, "test_config/bn_scheduler_config.yaml")) bn_momentum = bn_scheduler_config.bn_scheduler.params.bn_momentum bn_scheduler_params = bn_scheduler_config.bn_scheduler.params bn_lambda = lambda e: max( bn_scheduler_params.bn_momentum * bn_scheduler_params.bn_decay** (int(e // bn_scheduler_params.decay_step)), bn_scheduler_params.bn_clip, ) model = MLP([3, 3, 3], bn_momentum=10) bn_scheduler = instantiate_bn_scheduler( model, bn_scheduler_config.bn_scheduler) self.assertEqual(model[0][1].batch_norm.momentum, bn_momentum) for epoch in range(100): bn_scheduler.step(epoch) self.assertEqual(model[0][1].batch_norm.momentum, bn_lambda(epoch)) model = MLP2D([3, 3, 3], bn=True) bn_scheduler = instantiate_bn_scheduler( model, bn_scheduler_config.bn_scheduler) self.assertEqual(model[0][1].momentum, bn_momentum) for epoch in range(100): bn_scheduler.step(epoch) self.assertEqual(model[0][1].momentum, bn_lambda(epoch))
def __init__(self, option, model_type, dataset, modules): super(PointGroup, self).__init__(option) self.Backbone = Minkowski("unet", input_nc=dataset.feature_dimension, num_layers=4) self._scorer_is_encoder = option.scorer.architecture == "encoder" self.Scorer = Minkowski(option.scorer.architecture, input_nc=self.Backbone.output_nc, num_layers=2) self.ScorerHead = Seq().append( torch.nn.Linear(self.Scorer.output_nc, 1)).append(torch.nn.Sigmoid()) self.Offset = Seq().append( MLP([self.Backbone.output_nc, self.Backbone.output_nc], bias=False)) self.Offset.append(torch.nn.Linear(self.Backbone.output_nc, 3)) self.Semantic = (Seq().append( torch.nn.Linear(self.Backbone.output_nc, dataset.num_classes)).append( torch.nn.LogSoftmax())) self.loss_names = [ "loss", "offset_norm_loss", "offset_dir_loss", "semantic_loss", "score_loss" ] self._stuff_classes = torch.cat( [torch.tensor([IGNORE_LABEL]), dataset.stuff_classes])
def set_last_mlp(self, last_mlp_opt): if len(last_mlp_opt.nn) > 2: self.FC_layer = MLP(last_mlp_opt.nn[: len(last_mlp_opt.nn) - 1]) self.FC_layer.add_module("last", Lin(last_mlp_opt.nn[-2], last_mlp_opt.nn[-1])) elif len(last_mlp_opt.nn) == 2: self.FC_layer = Seq(Lin(last_mlp_opt.nn[-2], last_mlp_opt.nn[-1])) else: self.FC_layer = torch.nn.Identity()
def __init__(self, nn, aggr="max", *args, **kwargs): super(GlobalPartialDenseBaseModule, self).__init__() self.nn = MLP(nn) self.pool = global_max_pool if aggr == "max" else global_mean_pool
def __init__(self, up_k, up_conv_nn, *args, **kwargs): super(FPModule_PD, self).__init__() self.upsample_op = KNNInterpolate(up_k) bn_momentum = kwargs.get("bn_momentum", 0.1) self.nn = MLP(up_conv_nn, bn_momentum=bn_momentum, bias=False)
def __init__(self, option, model_type, dataset, modules): super(VoteNet2, self).__init__(option) self._dataset = dataset # 1 - CREATE BACKBONE MODEL input_nc = dataset.feature_dimension backbone_option = option.backbone backbone_cls = getattr(models, backbone_option.model_type) backbone_extr_options = backbone_option.get("extra_options", {}) self.backbone_model = backbone_cls(architecture="unet", input_nc=input_nc, num_layers=4, config=backbone_option.config, **backbone_extr_options) self._kpconv_backbone = backbone_cls.__name__ == "KPConv" self.is_dense_format = self.conv_type == "DENSE" dropout = option.get("dropout", None) if dropout is not None: self.dropout = torch.nn.Dropout(dropout) else: self.dropout = None # 2 - SEGMENTATION HEAD semantic_supervision = option.get("semantic_supervision", False) if semantic_supervision: self.Semantic = (Seq().append( MLP([ self.backbone_model.output_nc, self.backbone_model.output_nc ], bias=False)).append( torch.nn.Linear(self.backbone_model.output_nc, dataset.num_classes)).append( torch.nn.LogSoftmax())) else: self.Semantic = None # 3 - CREATE VOTING MODEL voting_option = option.voting self._num_seeds = voting_option.num_points_to_sample voting_cls = getattr(votenet_module, voting_option.module_name) self.voting_module = voting_cls( vote_factor=voting_option.vote_factor, seed_feature_dim=self.backbone_model.output_nc) # 4 - CREATE PROPOSAL MODULE proposal_option = option.proposal proposal_option.vote_aggregation.down_conv_nn = [[ self.backbone_model.output_nc + 3, self.backbone_model.output_nc, self.backbone_model.output_nc, ]] proposal_cls = getattr(votenet_module, proposal_option.module_name) self.proposal_cls_module = proposal_cls( num_class=proposal_option.num_class, vote_aggregation_config=proposal_option.vote_aggregation, num_heading_bin=proposal_option.num_heading_bin, mean_size_arr=dataset.mean_size_arr, num_proposal=proposal_option.num_proposal, sampling=proposal_option.sampling, ) # Loss params self.loss_params = option.loss_params self.loss_params.num_heading_bin = proposal_option.num_heading_bin self.loss_params.mean_size_arr = dataset.mean_size_arr.tolist() self.losses_has_been_added = False self.loss_names = []