Esempio n. 1
0
def submanifold_resnet_block(inout_chan, bottleneck_chan):
    # relu is fused in this implementation
    dtype = 'torch.cuda.FloatTensor'

    bn1 = scn.BatchNormReLU(inout_chan)
    bn2 = scn.BatchNormReLU(bottleneck_chan)
    bn3 = scn.BatchNormReLU(bottleneck_chan)
    bn1.train = False
    bn2.train = False
    bn3.train = False
    model = scn.Sequential() \
        .add(bn1) \
        .add(scn.ValidConvolution(2, inout_chan, bottleneck_chan, 3, False)) \
        .add(bn2) \
        .add(scn.ValidConvolution(2, bottleneck_chan, bottleneck_chan, 3, False)) \
        .add(bn3) \
        .add(scn.ValidConvolution(2, bottleneck_chan, inout_chan, 3, False))
    model.type(dtype)
    model.cuda()

    return model
Esempio n. 2
0
dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available(
) else 'torch.FloatTensor'

# two-dimensional SparseConvNet
model = nn.Sequential()
sparseModel = scn.Sequential()
denseModel = nn.Sequential()
model.add(sparseModel).add(denseModel)
sparseModel.add(scn.ValidConvolution(2, 3, 16, 3, False))
sparseModel.add(scn.MaxPooling(2, 3, 2))
sparseModel.add(
    scn.SparseResNet(
        2, 16,
        [['b', 16, 2, 1], ['b', 32, 2, 2], ['b', 48, 2, 2], ['b', 96, 2, 2]]))
sparseModel.add(scn.Convolution(2, 96, 128, 4, 1, False))
sparseModel.add(scn.BatchNormReLU(128))
sparseModel.add(scn.SparseToDense(2))
denseModel.add(nn.View(-1, 128))
denseModel.add(nn.Linear(128, 3755))
model.type(dtype)
print(model)

spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1]))
print('input spatial size', spatial_size)
dataset = getIterators(spatial_size, 63, 3)
scn.ClassificationTrainValidate(model, dataset, {
    'nEpochs': 100,
    'initial_LR': 0.1,
    'LR_decay': 0.05,
    'weightDecay': 1e-4
})
Esempio n. 3
0
# LICENSE file in the root directory of this source tree.

import torch
import torch.legacy.nn as nn
import sparseconvnet.legacy as scn

# Use the GPU if there is one, otherwise CPU
dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available(
) else 'torch.FloatTensor'

model = scn.Sequential().add(
    scn.SparseVggNet(2, 1,
                     [['C', 8], ['C', 8], ['MP', 3, 2], ['C', 16], ['C', 16],
                      ['MP', 3, 2], ['C', 24], ['C', 24], ['MP', 3, 2]])).add(
                          scn.ValidConvolution(2, 24, 32, 3, False)).add(
                              scn.BatchNormReLU(32)).add(
                                  scn.SparseToDense(2)).type(dtype)

# output will be 10x10
inputSpatialSize = model.suggestInputSize(torch.LongTensor([10, 10]))
input = scn.InputBatch(2, inputSpatialSize)

msg = [
    " X   X  XXX  X    X    XX     X       X   XX   XXX   X    XXX   ",
    " X   X  X    X    X   X  X    X       X  X  X  X  X  X    X  X  ",
    " XXXXX  XX   X    X   X  X    X   X   X  X  X  XXX   X    X   X ",
    " X   X  X    X    X   X  X     X X X X   X  X  X  X  X    X  X  ",
    " X   X  XXX  XXX  XXX  XX       X   X     XX   X  X  XXX  XXX   "
]
input.addSample()
for y, line in enumerate(msg):
Esempio n. 4
0
sparseModel = scn.Sequential()
denseModel = nn.Sequential()
model.add(sparseModel).add(denseModel)
sparseModel.add(scn.ValidConvolution(2, 3, 16, 3, False))\
    .add(scn.SparseDenseNet(2, 16, [
        {'pool': 'MP', 'compression': 0},
        {'nExtraLayers': 2, 'growthRate': 16},
        {'pool': 'BN-R-C-AP', 'compression': 0},
        {'nExtraLayers': 2, 'growthRate': 16},
        {'pool': 'BN-R-C-AP', 'compression': 0},
        {'nExtraLayers': 2, 'growthRate': 16},
        {'pool': 'BN-R-C-AP', 'compression': 0},
        {'nExtraLayers': 2, 'growthRate': 16}]))
n_out = sparseModel.modules[-1].nOutputPlanes
sparseModel.add(scn.Convolution(2, n_out, 256, 4, 1, False))
sparseModel.add(scn.BatchNormReLU(256))
sparseModel.add(scn.SparseToDense(2))
denseModel.add(nn.View(-1, 256))
denseModel.add(nn.Linear(256, 3755))
model.type(dtype)
print(model)

spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1]))
print('input spatial size', spatial_size)
dataset = getIterators(spatial_size, 64, 2)
scn.ClassificationTrainValidate(model, dataset, {
    'nEpochs': 100,
    'initial_LR': 0.1,
    'LR_decay': 0.05,
    'weightDecay': 1e-4
})
Esempio n. 5
0
dtype = 'torch.FloatTensor'
dtype = 'torch.cuda.FloatTensor'  # Uncomment this to run on GPU

# two-dimensional SparseConvNet
model = nn.Sequential()
sparseModel = scn.Sequential()
denseModel = nn.Sequential()
model.add(sparseModel).add(denseModel)
sparseModel.add(
    scn.SparseVggNet(2, 3, [[
        'C',
        8,
    ], ['C', 8], 'MP', ['C', 16], ['C', 16], 'MP', ['C', 16, 8], ['C', 16, 8],
                            'MP', ['C', 24, 8], ['C', 24, 8], 'MP']))
sparseModel.add(scn.Convolution(2, 32, 64, 5, 1, False))
sparseModel.add(scn.BatchNormReLU(64))
sparseModel.add(scn.SparseToDense(2))
denseModel.add(nn.View(-1, 64))
denseModel.add(nn.Linear(64, 183))
model.type(dtype)
print(model)

spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1]))
print('input spatial size', spatial_size)
dataset = getIterators(spatial_size, 63, 3)
scn.ClassificationTrainValidate(model, dataset, {
    'nEpochs': 100,
    'initial_LR': 0.1,
    'LR_decay': 0.05,
    'weightDecay': 1e-4
})