예제 #1
0
def main():
    # Use the GPU if there is one, otherwise CPU
    dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor'
    nClasses = 12
    # three-dimensional SparseConvNet

    class Model(nn.Module):
        def __init__(self, sgc_config):
            nn.Module.__init__(self)
            self.stage1 = scn.Sequential().add(
               scn.ValidConvolution(3, 1, 16, 3, False))
            self.stage1_2 = scn.MaxPooling(3, 2, 2)
            self.stage2 = scn.Sequential()
            res(self.stage2, 3, 16, 64)
            self.stage2_2 = scn.MaxPooling(3, 2, 2)
            self.stage3 = UNet6(3, nClasses, sgc_config=sgc_config)
            self.densePred = scn.SparseToDense(3, nClasses)
            self.sgc_config = sgc_config

        def forward(self, x, group_x):
            x1 = spatialGroupConv(x, self.stage1, self.sgc_config[0], self.sgc_config[1], group_x[0])
            x1 = self.stage1_2(x1)

            x2 = spatialGroupConv(x1, self.stage2, self.sgc_config[0], self.sgc_config[1], group_x[1])
            x2 = self.stage2_2(x2)

            x3 = self.stage3(x2, group_x[2:])
            o1 = self.densePred(x3[0])
            o2 = self.densePred(x3[1])
            
            return [o1, o2]

    model=Model([config.abc, config.group_num])
    model.type(dtype)
    print(model)


    SUNCG = SUNCG_DATA()
    config.output_offset = SUNCG['output_offset'].tolist()
    config.dataset_outputSize = SUNCG['dataset_outputSize'].tolist()
    config.output_offset2 = SUNCG['output_offset2'].tolist()
    config.dataset_outputSize2 = SUNCG['dataset_outputSize2'].tolist()

    train_dataset = getIterators(SUNCG, config)
    test_dataset = SUNCGTestDataset(SUNCG, config)

    TrainValidate(model, train_dataset, test_dataset, config)
예제 #2
0
def main():
    # Use the GPU if there is one, otherwise CPU
    dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available(
    ) else 'torch.FloatTensor'
    nClasses = 12

    # three-dimensional SparseConvNet

    class Model(nn.Module):
        def __init__(self):
            nn.Module.__init__(self)
            self.stage1 = scn.Sequential().add(
                scn.ValidConvolution(3, 1, 16, 3, False))
            self.stage1.add(scn.MaxPooling(3, 2, 2))
            res(self.stage1, 3, 16, 64)
            self.stage1.add(scn.MaxPooling(3, 2, 2))
            self.stage2 = UNet6(3, nClasses)
            self.densePred = scn.SparseToDense(3, nClasses)

        def forward(self, x):
            x1 = self.stage1(x)
            x2 = self.stage2(x1)
            o1 = self.densePred(x2[0])
            o2 = self.densePred(x2[1])
            return [o1, o2]

    model = Model()
    model.type(dtype)
    print(model)

    SUNCG = SUNCG_DATA()
    config.output_offset = SUNCG['output_offset'].tolist()
    config.dataset_outputSize = SUNCG['dataset_outputSize'].tolist()
    config.output_offset2 = SUNCG['output_offset2'].tolist()
    config.dataset_outputSize2 = SUNCG['dataset_outputSize2'].tolist()

    train_dataset = getIterators(SUNCG, config.train_num)
    test_dataset = SUNCGTestDataset(SUNCG)

    TrainValidate(model, train_dataset, test_dataset, config)
예제 #3
0
) else 'torch.FloatTensor'

# two-dimensional SparseConvNet
model = nn.Sequential()
sparseModel = scn.Sequential()
denseModel = nn.Sequential()
model.add(sparseModel).add(denseModel)
sparseModel.add(scn.ValidConvolution(2, 3, 16, 3, False))
sparseModel.add(scn.MaxPooling(2, 3, 2))
sparseModel.add(
    scn.SparseResNet(
        2, 16,
        [['b', 16, 2, 1], ['b', 32, 2, 2], ['b', 48, 2, 2], ['b', 96, 2, 2]]))
sparseModel.add(scn.Convolution(2, 96, 128, 4, 1, False))
sparseModel.add(scn.BatchNormReLU(128))
sparseModel.add(scn.SparseToDense(2))
denseModel.add(nn.View(-1, 128))
denseModel.add(nn.Linear(128, 3755))
model.type(dtype)
print(model)

spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1]))
print('input spatial size', spatial_size)
dataset = getIterators(spatial_size, 63, 3)
scn.ClassificationTrainValidate(model, dataset, {
    'nEpochs': 100,
    'initial_LR': 0.1,
    'LR_decay': 0.05,
    'weightDecay': 1e-4
})
예제 #4
0
from data import getIterators

# Use the GPU if there is one, otherwise CPU
dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor'

# two-dimensional SparseConvNet
model = nn.Sequential()
sparseModel = scn.Sequential()
denseModel = nn.Sequential()
model.add(sparseModel).add(denseModel)
sparseModel.add(scn.SparseVggNet(2, 3, [
    ['C', 16], ['C', 16], 'MP',
    ['C', 32], ['C', 32], 'MP',
    ['C', 48], ['C', 48], 'MP',
    ['C', 64], ['C', 64], 'MP',
    ['C', 96], ['C', 96]]))
sparseModel.add(scn.Convolution(2, 96, 128, 3, 2, False))
sparseModel.add(scn.BatchNormReLU(128))
sparseModel.add(scn.SparseToDense(2))
denseModel.add(nn.View(-1, 128))
denseModel.add(nn.Linear(128, 3755))
model.type(dtype)
print(model)

spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1]))
print('input spatial size', spatial_size)
dataset = getIterators(spatial_size, 63, 3)
scn.ClassificationTrainValidate(
    model, dataset,
    {'nEpochs': 100, 'initial_LR': 0.1, 'LR_decay': 0.05, 'weightDecay': 1e-4})