# All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import torch import torch.legacy.nn as nn import sparseconvnet.legacy as scn # Use the GPU if there is one, otherwise CPU dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available( ) else 'torch.FloatTensor' model = scn.Sequential().add( scn.SparseVggNet(2, 1, [['C', 8], ['C', 8], ['MP', 3, 2], ['C', 16], ['C', 16], ['MP', 3, 2], ['C', 24], ['C', 24], ['MP', 3, 2]])).add( scn.ValidConvolution(2, 24, 32, 3, False)).add( scn.BatchNormReLU(32)).add( scn.SparseToDense(2)).type(dtype) # output will be 10x10 inputSpatialSize = model.suggestInputSize(torch.LongTensor([10, 10])) input = scn.InputBatch(2, inputSpatialSize) msg = [ " X X XXX X X XX X X XX XXX X XXX ", " X X X X X X X X X X X X X X X X ", " XXXXX XX X X X X X X X X X XXX X X X ", " X X X X X X X X X X X X X X X X X X ", " X X XXX XXX XXX XX X X XX X X XXX XXX "
import torch.legacy.nn as nn import sparseconvnet.legacy as scn from data import getIterators # Use the GPU if there is one, otherwise CPU dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available( ) else 'torch.FloatTensor' # two-dimensional SparseConvNet model = nn.Sequential() sparseModel = scn.Sequential() denseModel = nn.Sequential() model.add(sparseModel).add(denseModel) sparseModel.add( scn.SparseVggNet( 2, 3, [['C', 16], ['C', 16], 'MP', ['C', 32], ['C', 32], 'MP', ['C', 48], ['C', 48], 'MP', ['C', 64], ['C', 64], 'MP', ['C', 96], ['C', 96]])) sparseModel.add(scn.Convolution(2, 96, 128, 3, 2, False)) sparseModel.add(scn.BatchNormReLU(128)) sparseModel.add(scn.SparseToDense(2)) denseModel.add(nn.View(-1, 128)) denseModel.add(nn.Linear(128, 3755)) model.type(dtype) print(model) spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1])) print('input spatial size', spatial_size) dataset = getIterators(spatial_size, 63, 3) scn.ClassificationTrainValidate(model, dataset, { 'nEpochs': 100, 'initial_LR': 0.1,
import torch.legacy.nn as nn import sparseconvnet.legacy as scn from data import getIterators dtype = 'torch.FloatTensor' dtype = 'torch.cuda.FloatTensor' # Uncomment this to run on GPU # two-dimensional SparseConvNet model = nn.Sequential() sparseModel = scn.Sequential() denseModel = nn.Sequential() model.add(sparseModel).add(denseModel) sparseModel.add( scn.SparseVggNet(2, 3, [[ 'C', 8, ], ['C', 8], 'MP', ['C', 16], ['C', 16], 'MP', ['C', 16, 8], ['C', 16, 8], 'MP', ['C', 24, 8], ['C', 24, 8], 'MP'])) sparseModel.add(scn.Convolution(2, 32, 64, 5, 1, False)) sparseModel.add(scn.BatchNormReLU(64)) sparseModel.add(scn.SparseToDense(2)) denseModel.add(nn.View(-1, 64)) denseModel.add(nn.Linear(64, 183)) model.type(dtype) print(model) spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1])) print('input spatial size', spatial_size) dataset = getIterators(spatial_size, 63, 3) scn.ClassificationTrainValidate(model, dataset, { 'nEpochs': 100,
import torch import torch.legacy.nn as nn import sparseconvnet.legacy as scn from data import getIterators # Use the GPU if there is one, otherwise CPU dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor' # two-dimensional SparseConvNet model = nn.Sequential() sparseModel = scn.Sequential() denseModel = nn.Sequential() model.add(sparseModel).add(denseModel) sparseModel.add(scn.SparseVggNet(2, 3, [ ['C', 16, 8], ['C', 16, 8], 'MP', ['C', 32, 8], ['C', 32, 8], 'MP', ['C', 48, 16], ['C', 48, 16], 'MP', ['C', 64, 16], ['C', 64, 16], 'MP', ['C', 96, 16], ['C', 96, 16]])) sparseModel.add(scn.Convolution(2, 96 + 16, 128, 3, 2, False)) sparseModel.add(scn.BatchNormReLU(128)) sparseModel.add(scn.SparseToDense(2)) denseModel.add(nn.View(-1, 128)) denseModel.add(nn.Linear(128, 3755)) model.type(dtype) print(model) spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1])) print('input spatial size', spatial_size) dataset = getIterators(spatial_size, 63, 3) scn.ClassificationTrainValidate( model, dataset,