Beispiel #1
0
    def __init__(self, config):
        super(KPFCNN, self).__init__()
        self.encoder = KPCNN(config)
        self.config = config
        self.blocks = nn.ModuleDict()

        # Feature Extraction Module
        # Find first upsampling block
        start_i = 0
        for block_i, block in enumerate(config.architecture):
            if 'upsample' in block:
                start_i = block_i
                break

        layer = config.num_layers - 1
        r = config.first_subsampling_dl * config.density_parameter * 2 ** layer
        in_fdim = config.first_features_dim * 2 ** layer
        out_fdim = in_fdim
        block_in_layer = 0
        for block_i, block in enumerate(config.architecture[start_i:]):

            is_strided = 'strided' in block
            self.blocks[f'layer{layer}/{block}'] = get_block(block, config, int(1.5 * in_fdim), out_fdim, radius=r, strided=is_strided)

            # update feature dimension
            in_fdim = out_fdim
            block_in_layer += 1

            # Detect change to a subsampled layer
            if 'upsample' in block:
                # Update radius and feature dimension for next layer
                out_fdim = out_fdim // 2
                r *= 0.5
                layer -= 1
                block_in_layer = 0

        # Segmentation Head
        self.blocks['segmentation_head'] = nn.Sequential(
            nn.Linear(out_fdim, config.first_features_dim),
            nn.BatchNorm1d(config.first_features_dim, momentum=config.batch_norm_momentum, eps=1e-6),
            nn.LeakyReLU(negative_slope=0.2),
            # nn.Dropout(p=config.dropout),
            nn.Linear(config.first_features_dim, config.num_classes)
        )
Beispiel #2
0
    def __init__(self, config):
        is_test = False
        if is_test:
            self.experiment_id = "KPConvNet" + time.strftime('%m%d%H%M') + 'Test'
        else:
            self.experiment_id = "KPConvNet" + time.strftime('%m%d%H%M')

        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.verbose = True

        # snapshot
        self.snapshot_interval = 5
        snapshot_root = f'snapshot/{config.dataset}_{self.experiment_id}'
        tensorboard_root = f'tensorboard/{config.dataset}_{self.experiment_id}'
        os.makedirs(snapshot_root, exist_ok=True)
        os.makedirs(tensorboard_root, exist_ok=True)
        shutil.copy2(os.path.join('.', 'training_ShapeNetCls.py'), os.path.join(snapshot_root, 'train.py'))
        shutil.copy2(os.path.join('datasets', 'ShapeNet.py'), os.path.join(snapshot_root, 'dataset.py'))
        shutil.copy2(os.path.join('datasets', 'dataloader.py'), os.path.join(snapshot_root, 'dataloader.py'))
        self.save_dir = os.path.join(snapshot_root, 'models/')
        self.result_dir = os.path.join(snapshot_root, 'results/')
        self.tboard_dir = tensorboard_root

        # dataset & dataloader
        self.train_set = ShapeNetDataset(root=config.data_train_dir,
                                         split='train',
                                         first_subsampling_dl=config.first_subsampling_dl,
                                         classification=True,
                                         config=config,
                                         )
        self.test_set = ShapeNetDataset(root=config.data_test_dir,
                                        split='test',
                                        first_subsampling_dl=config.first_subsampling_dl,
                                        classification=True,
                                        config=config,
                                        )
        self.train_loader = get_dataloader(dataset=self.train_set,
                                           batch_size=config.train_batch_size,
                                           shuffle=True,
                                           num_workers=4,
                                           )
        self.test_loader = get_dataloader(dataset=self.test_set,
                                          batch_size=config.test_batch_size,
                                          shuffle=False,
                                          num_workers=4,
                                          )
        print("Training set size:", self.train_loader.dataset.__len__())
        print("Test set size:", self.test_loader.dataset.__len__())

        # model
        self.model = KPCNN(config)
        self.resume = config.resume
        # optimizer 
        self.start_epoch = 0
        self.epoch = config.max_epoch
        self.optimizer = optim.SGD(self.model.parameters(), lr=config.learning_rate, momentum=config.momentum, weight_decay=1e-6)
        self.scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=config.exp_gamma)
        self.scheduler_interval = config.exp_interval

        # evaluate
        self.evaluate_interval = 1
        self.evaluate_metric = nn.CrossEntropyLoss(reduction='mean')

        self.check_args()
Beispiel #3
0
class KPFCNN(nn.Module):
    def __init__(self, config):
        super(KPFCNN, self).__init__()
        self.encoder = KPCNN(config)
        self.config = config
        self.blocks = nn.ModuleDict()

        # Feature Extraction Module
        # Find first upsampling block
        start_i = 0
        for block_i, block in enumerate(config.architecture):
            if 'upsample' in block:
                start_i = block_i
                break

        layer = config.num_layers - 1
        r = config.first_subsampling_dl * config.density_parameter * 2 ** layer
        in_fdim = config.first_features_dim * 2 ** layer
        out_fdim = in_fdim
        block_in_layer = 0
        for block_i, block in enumerate(config.architecture[start_i:]):

            is_strided = 'strided' in block
            self.blocks[f'layer{layer}/{block}'] = get_block(block, config, int(1.5 * in_fdim), out_fdim, radius=r, strided=is_strided)

            # update feature dimension
            in_fdim = out_fdim
            block_in_layer += 1

            # Detect change to a subsampled layer
            if 'upsample' in block:
                # Update radius and feature dimension for next layer
                out_fdim = out_fdim // 2
                r *= 0.5
                layer -= 1
                block_in_layer = 0

        # Segmentation Head
        self.blocks['segmentation_head'] = nn.Sequential(
            nn.Linear(out_fdim, config.first_features_dim),
            nn.BatchNorm1d(config.first_features_dim, momentum=config.batch_norm_momentum, eps=1e-6),
            nn.LeakyReLU(negative_slope=0.2),
            # nn.Dropout(p=config.dropout),
            nn.Linear(config.first_features_dim, config.num_classes)
        )
        # print(list(self.named_parameters()))

    def forward(self, inputs):
        features = self.feature_extraction(inputs)
        logits = self.segmentation_head(features)
        return logits

    def feature_extraction(self, inputs):
        F = self.encoder.feature_extraction(inputs)
        features = F[-1]

        # Current radius of convolution and feature dimension
        layer = self.config.num_layers - 1
        r = self.config.first_subsampling_dl * self.config.density_parameter * 2 ** layer
        fdim = self.config.first_features_dim * 2 ** layer

        # Find first upsampling block
        start_i = 0
        for block_i, block in enumerate(self.config.architecture):
            if 'upsample' in block:
                start_i = block_i
                break

        # Loop over upsampling blocks
        for block_i, block in enumerate(self.config.architecture[start_i:]):

            # Get the function for this layer
            block_ops = self.blocks[f'layer{layer}/{block}']

            # Apply the layer function defining tf ops
            if 'upsample' in block:
                if block == 'nearest_upsample':
                    upsample_indices = inputs['upsamples'][layer - 1]
                else:
                    raise ValueError(f"Unknown block type. {block}")
                features = block_ops(upsample_indices, features)
            else:
                if block in ['unary', 'simple', 'resnet', 'resnetb']:
                    query_points = inputs['points'][layer]
                    support_points = inputs['points'][layer]
                    neighbors_indices = inputs['neighbors'][layer]
                elif block in ['simple_strided', 'resnetb_strided', 'resnetb_deformable_strided']:
                    query_points = inputs['points'][layer + 1]
                    support_points = inputs['points'][layer]
                    neighbors_indices = inputs['pools'][layer]
                else:
                    raise ValueError(f"Unknown block type. {block}")
                features = block_ops(query_points, support_points, neighbors_indices, features)

            # Detect change to a subsampled layer
            if 'upsample' in block:
                # Update radius and feature dimension for next layer
                layer -= 1
                r *= 0.5
                fdim = fdim // 2

                # Concatenate with CNN feature map
                features = torch.cat((features, F[layer]), dim=1)

        return features

    def segmentation_head(self, features):
        logits = self.blocks['segmentation_head'](features)
        return logits
Beispiel #4
0
def train():

    logger.info("test Dataloader")
    cfg = get_cfg_defaults()
    cfg.merge_from_file("./yaml/modelnet.yaml")

    writer = SummaryWriter()

    v = cfg.NETWORK.FIRST_SUBSAMPLING_DL
    mode_neigh = 0  # neighbors or edge
    architecture, list_voxel_size, list_radius, \
        list_radius_conv, list_size = get_list_constants(
            cfg.NETWORK.FIRST_SUBSAMPLING_DL, cfg.NETWORK.DENSITY_PARAMETER/2,
            cfg.NETWORK.ARCHITECTURE, cfg.NETWORK.FIRST_DIM,
            cfg.INPUT.IN_FEATURES_DIM,
            cfg.INPUT.NUM_CLASSES)

    list_transfo = [DataSubsampling(list_voxel_size)]
    neigh = Neighbors(list_radius,
                      max_num_neighbors=cfg.INPUT.MAX_NUM_NEIGHBORS,
                      is_pool=True,
                      is_upsample=False,
                      mode=mode_neigh)
    list_transfo.append(neigh)

    transfo = Compose(list_transfo)
    # transfo = DataSubsampling(list_voxel_size)

    dataset = ModelNet("./Data/ModelNet40",
                       subsampling_parameter=v,
                       transforms=transfo)

    dataloader = MultiScaleDataLoader(dataset,
                                      batch_size=cfg.NETWORK.BATCH_NUM,
                                      shuffle=True,
                                      num_workers=cfg.SYSTEM.NUM_WORKERS,
                                      pin_memory=False)

    model = KPCNN(architecture,
                  list_radius_conv,
                  list_size,
                  cfg,
                  mode=mode_neigh)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=cfg.TRAIN.LEARNING_RATE,
                                momentum=cfg.TRAIN.MOMENTUM)
    model.cuda()
    for e in range(1):
        logger.info("epoch {:d}", e)
        model.train()
        for i, batch in enumerate(dataloader):
            if (mode_neigh == 0):
                batch = shadow_neigh(batch).to('cuda')
            else:
                batch = batch.to('cuda')
            # print(batch.list_neigh[3][:20])
            pred = model(batch)
            loss = compute_classification_loss(pred, batch.y, model,
                                               cfg.TRAIN.WEIGHTS_DECAY)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (i % cfg.TRAIN.LOG_INTERVAL == 0):
                accuracy = compute_classification_accuracy(pred, batch.y)
                global_step = e * len(dataloader) + i
                logger.info("Epoch: {} Step {}, loss {:3f}, accuracy: {:3f}",
                            e, i, loss.item(), accuracy.item())
                writer.add_scalar('Loss/train', loss.item(), global_step)
                writer.add_scalar('Accuracy/train', accuracy.item(),
                                  global_step)