예제 #1
0
    def build_net(self):
        # [1.0] first layer
        first_layer = nn.ConcatTable()

        # [1.1] feed forward neural net, produce v1, v2
        feedforward = nn.Sequential()
        feedforward.add(nn.Linear(self.input_size, self.hidden_layer_size))
        feedforward.add(nn.PReLU())

        # add hidden layers
        for i in range(self.hidden_layer_count - 1):
            feedforward.add(
                nn.Linear(self.hidden_layer_size, self.hidden_layer_size))
            feedforward.add(nn.PReLU())

        feedforward.add(nn.Linear(self.hidden_layer_size, self.output_size))

        # [1.2] right part, discard pot_size, produce r1, r2
        right_part = nn.Sequential()
        right_part.add(nn.Narrow(1, 0, self.output_size))

        first_layer.add(feedforward)
        first_layer.add(right_part)

        # [2.0] outer net force counterfactual values satisfy 0-sum property
        second_layer = nn.ConcatTable()

        # accept v1,v2; ignore r1, r2
        left_part2 = nn.Sequential()
        left_part2.add(nn.SelectTable(0))

        # accept, r1,r2, v1,v2; produce -0.5k=-0.5(r1v1 + r2v2)
        right_part2 = nn.Sequential()
        right_part2.add(nn.DotProduct())
        right_part2.add(nn.Unsqueeze(1))
        right_part2.add(nn.Replicate(self.output_size, 1))
        right_part2.add(nn.Squeeze(2))
        right_part2.add(nn.MulConstant(-0.5))

        second_layer.add(left_part2)
        second_layer.add(right_part2)

        final_mlp = nn.Sequential()
        final_mlp.add(first_layer)
        final_mlp.add(second_layer)
        # accept v1,v2 and -0.5k, product v1-0.5k, v2-0.5k
        final_mlp.add(nn.CAddTable())

        return final_mlp
예제 #2
0
 def _build_net(self):
     return (nn.Sequential()
             .add(nn.Concat(0)
                  .add(nn.Linear(2, 5))
                  .add(nn.Linear(2, 5)))
             .add(nn.ReLU())
             .add(nn.Linear(10, 20)))
예제 #3
0
def build_conv_block(dim, padding_type, use_instance_norm):
    conv_block = nn.Sequential()
    p = 0
    if padding_type == 'reflect':
        conv_block.add(nn.SpatialReflectionPadding(1,1,1,1))
    elif padding_type == 'replicate':
        conv_block.add(nn.SpatialReplicationPadding(1,1,1,1))
    elif padding_type == 'zero':
        p = 1

    conv_block.add(nn.SpationConvolution(dim, dim, 3, 3, 1, 1, p, p))

    if use_instance_norm == 1:
        conv_block.add(nn.InstanceNormalization(dim))
    else:
        conv_block.add(nn.SpatialBatchNormalization(dim))

    conv_block.add(F.ReLU(True))

    if padding_type == 'reflect':
        conv_block.add(nn.SpatialReflectionPadding(1, 1, 1, 1))
    elif padding_type == 'replicate':
        conv_block.add(nn.SpatialReplicationPadding(1, 1, 1, 1))

    conv_block.add(nn.SpatialConvolution(dim, dim, 3, 3, 1, 1, p, p))

    if use_instance_norm == 1:
        conv_block.add(nn.InstanceNormalization(dim))
    else:
        conv_block.add(nn.SpatialBatchNormalization(dim))

    return conv_block    
예제 #4
0
def torch_to_pytorch(t7_filename, outputname=None):
    model = load_lua(t7_filename, unknown_classes=True)
    if type(model).__name__ == 'hashable_uniq_dict':
        model = model.model
    model.gradInput = None

    cvt = Convertor(model)
    s = cvt.lua_recursive_source(lnn.Sequential().add(model))
    s = cvt.simplify_source(s)

    varname = os.path.basename(t7_filename).replace('.t7', '').replace(
        '.', '_').replace('-', '_')

    with open("header.py") as f:
        header = f.read()
    s = '{}\n{}\n\n{} = {}'.format(header, '\n'.join(cvt.prefix_code), varname,
                                   s[:-2])

    if outputname is None:
        outputname = os.path.join('/tmp', varname)

    with open(outputname + '.py', "w") as pyfile:
        pyfile.write(s)

    n = nn.Sequential()
    cvt.lua_recursive_model(model, n)
    torch.save(n.state_dict(), outputname + '.pth')
예제 #5
0
def torch_to_pytorch(t7_filename, outputname=None):
    model = load_lua(t7_filename, unknown_classes=True)
    if type(model).__name__ == 'hashable_uniq_dict': model = model.model
    model.gradInput = None
    slist = lua_recursive_source(lnn.Sequential().add(model))
    s = simplify_source(slist)
    header = '''
import torch
import torch.nn as nn
import torch.legacy.nn as lnn

from functools import reduce
from torch.autograd import Variable

class LambdaBase(nn.Sequential):
    def __init__(self, fn, *args):
        super(LambdaBase, self).__init__(*args)
        self.lambda_func = fn

    def forward_prepare(self, input):
        output = []
        for module in self._modules.values():
            output.append(module(input))
        return output if output else input

class Lambda(LambdaBase):
    def forward(self, input):
        return self.lambda_func(self.forward_prepare(input))

class LambdaMap(LambdaBase):
    def forward(self, input):
        return list(map(self.lambda_func,self.forward_prepare(input)))

class LambdaReduce(LambdaBase):
    def forward(self, input):
        return reduce(self.lambda_func,self.forward_prepare(input))
'''
    varname = t7_filename.replace('.t7', '').replace('.',
                                                     '_').replace('-', '_')
    s = '{}\n\n{} = {}'.format(header, varname, s[:-2])

    if outputname is None: outputname = varname
    with open(outputname + '.py', "w") as pyfile:
        pyfile.write(s)

    n = nn.Sequential()
    lua_recursive_model(model, n)

    #print("numpy", np.load('vgg_places.npy') )

    np_file = np.load('vgg_places.npy')
    print(np_file.shape)
    print(np_file[0])

    torch.save(n.state_dict(), outputname + '.pth')
예제 #6
0
    def torch_to_keras(self, t7_filename, outputname=None):
        model = load_lua(t7_filename, unknown_classes=True)
        if type(model).__name__ == 'hashable_uniq_dict':
            model = model.model
        model.gradInput = None

        slist = self.lua_recursive_source(lnn.Sequential().add(model),
                                          isFirst=True)
        s = self.simplify_source(slist)
        header = '''from keras.models import Model
from keras.layers import Convolution2D
from keras.layers import Input
from keras.layers import ZeroPadding2D
from keras.layers import Activation
from keras.layers import BatchNormalization
from keras.layers import MaxPooling2D
from keras.layers import Lambda
from keras.layers import AveragePooling2D
from keras.layers import Dense
from keras.layers import Reshape
from keras.layers import concatenate
from Torch2KerasConverter.utils import lrn, sqrt, square, mulConstant, l2Normalize
'''

        if outputname is None:
            outputname = t7_filename

        varname = outputname.replace('.t7', '').replace('.',
                                                        '_').replace('-', '_')
        s = '{}\n\n{}\n{} = Model(inputs=[inp], outputs=x)\n{}.summary()\n'.format(
            header, s[:], varname, varname)

        with open(outputname + '.py', "w") as pyfile:
            pyfile.write(s)

        inp = Input(shape=self.inputShape)
        layers = self.lua_recursive_model(lnn.Sequential().add(model),
                                          isFirst=True,
                                          inp=inp)
        model = Model(inputs=[inp], outputs=layers)
        print(model.summary())
        model.save_weights(outputname + '.h5')
예제 #7
0
def torch_to_pytorch(t7_filename, out_py, out_pth):
    model = load_lua(t7_filename, unknown_classes=True)
    if type(model).__name__ == 'hashable_uniq_dict':
        model = model.model
    model.gradInput = None
    slist = lua_recursive_source(lnn.Sequential().add(model))
    s = simplify_source(slist)
    header = """
import torch
import torch.nn as nn
import torch.legacy.nn as lnn

from functools import reduce
from torch.autograd import Variable

class LambdaBase(nn.Sequential):
    def __init__(self, fn, *args):
        super(LambdaBase, self).__init__(*args)
        self.lambda_func = fn

    def forward_prepare(self, input):
        output = []
        for module in self._modules.values():
            output.append(module(input))
        return output if output else input

class Lambda(LambdaBase):
    def forward(self, input):
        return self.lambda_func(self.forward_prepare(input))

class LambdaMap(LambdaBase):
    def forward(self, input):
        return list(map(self.lambda_func,self.forward_prepare(input)))

class LambdaReduce(LambdaBase):
        def forward(self, input):
            return reduce(self.lambda_func,self.forward_prepare(input))
    """

    varname = t7_filename.split("/")[-1].replace('.t7',
                                                 '').replace('.', '_').replace(
                                                     '-', '_')
    s = '{}\n\n{} = {}'.format(header, varname, s[:-2])

    # save py file
    open(out_py + ".py", "w").write(s)

    # save pytorch model
    n = nn.Sequential()
    lua_recursive_model(model, n)
    torch.save(n.state_dict(), out_pth + '.pth')
예제 #8
0
def torch_to_pytorch(t7_filename, outputname=None):
    model = load_lua(t7_filename, unknown_classes=True)
    if type(model).__name__ == 'hashable_uniq_dict': model = model.model
    model.gradInput = None
    slist = lua_recursive_source(lnn.Sequential().add(model))
    s = simplify_source(slist)
    header = ''
    varname = t7_filename.replace('.t7', '').replace('.',
                                                     '_').replace('-', '_')
    s = '{}\n\n{} = {}'.format(header, varname, s[:-2])

    if outputname is None: outputname = varname
    with open(outputname + '.py', "w") as pyfile:
        pyfile.write(s)

    n = nn.Sequential()
    lua_recursive_model(model, n)
    torch.save(n.state_dict(), outputname + '.pth')
예제 #9
0
    def _test_single_layer(self, layer, decimal=7):
        torch_model = nn.Sequential()
        torch_model.add(layer)

        coreml_output = self._forward_coreml(torch_model)
        if not isinstance(coreml_output, list):
            coreml_output = coreml_output.copy()

        # XXX: pytorch legacy.nn has problem with state clearing, so we need to
        #      do it manually
        for l in torch_model.modules:
            if isinstance(l.output, torch.Tensor):
                l.output = l.output.new()

        torch_output = self._forward_torch(torch_model)
        if not isinstance(torch_output, list):
            torch_output = torch_output.copy()

        self._assert_outputs(torch_output, coreml_output, decimal)
예제 #10
0
def torch_to_pytorch(t7_filename, varname=None):
    if varname is None:
        varname = os.path.splitext(os.path.basename(t7_filename))[0].replace(
            '.', '_').replace('-', '_')
    outputname = os.path.join(os.path.dirname(t7_filename), varname)

    model = load_lua(t7_filename, unknown_classes=True)
    if type(model).__name__ == 'hashable_uniq_dict': model = model.model
    model.gradInput = None

    slist, pooling_code = lua_recursive_source(lnn.Sequential().add(model),
                                               varname)
    s = simplify_source(slist)
    header = open("header.py").read()
    s = '\n'.join([header] + pooling_code + ['', s])

    with open(outputname + '.py', "w") as pyfile:
        pyfile.write(s)

    n = lua_recursive_model(model)
    torch.save(n.state_dict(), outputname + '.pth')
예제 #11
0
    def test_ParallelTable(self):
        input = torch.randn(3, 4, 5)
        p = nn.ParallelTable()
        p.add(nn.View(4, 5, 1))
        p.add(nn.View(4, 5, 1))
        p.add(nn.View(4, 5, 1))
        m = nn.Sequential()
        m.add(nn.SplitTable(0))
        m.add(p)
        m.add(nn.JoinTable(2))

        # Check that these don't raise errors
        p.__repr__()
        str(p)

        output = m.forward(input)
        output2 = input.transpose(0, 2).transpose(0, 1)
        self.assertEqual(output2, output)

        gradInput = m.backward(input, output2)
        self.assertEqual(gradInput, input)
예제 #12
0
def build_volumetric_unpooling_net():
    pool = nn.VolumetricMaxPooling(2, 2, 2, 2)
    unpool = nn.VolumetricMaxUnpooling(pool)
    return nn.Sequential().add(pool).add(unpool)
예제 #13
0
def build_spatial_unpooling_net():
    pool = nn.SpatialMaxPooling(2, 2, 2, 2)
    unpool = nn.SpatialMaxUnpooling(pool)
    return nn.Sequential().add(pool).add(unpool)
예제 #14
0
               desc='split_dim'),
 OldModuleTest(nn.View, (2, -1, 2, 5),
               input_size=(2, 4, 5),
               reference_fn=lambda i, _: i.view(2, -1, 2, 5),
               desc='infer_middle'),
 OldModuleTest(nn.Sum, (1, ),
               input_size=(2, 4, 5),
               reference_fn=lambda i, _: i.sum(1)),
 OldModuleTest(nn.Sum, (1, True),
               input_size=(2, 4, 5),
               reference_fn=lambda i, _: i.sum(1).div(i.size(1)),
               desc='sizeAverage'),
 OldModuleTest(nn.Mean, (1, ),
               input_size=(2, 4, 5),
               reference_fn=lambda i, _: torch.mean(i, 1)),
 OldModuleTest(lambda: nn.Sequential().add(nn.GradientReversal()).add(
     nn.GradientReversal()),
               input_size=(4, 3, 2, 2),
               fullname='GradientReversal'),
 OldModuleTest(nn.Identity,
               input_size=(4, 3, 2, 4),
               reference_fn=lambda i, _: i),
 OldModuleTest(nn.DotProduct,
               input_size=[(10, 4), (10, 4)],
               reference_fn=lambda i, _: torch.Tensor(
                   list(a.dot(b) for a, b in zip(i[0], i[1])))),
 OldModuleTest(nn.CosineDistance,
               input_size=[(10, 4), (10, 4)],
               reference_fn=lambda i, _: torch.Tensor(
                   list(
                       a.dot(b) / (a.norm(2) * b.norm(2))
                       for a, b in zip(i[0], i[1])))),
예제 #15
0
def FCNN():
    num_classes = 2
    n_layers_enc = 32
    n_layers_ctx = 128
    n_input = 5
    prob_drop = 0.25
    layers = []
    # Encoder
    model = nn.Sequential()
    pool = nn.SpatialMaxPooling(2, 2, 2, 2)
    model.add(nn.SpatialConvolution(n_input, n_layers_enc, 3, 3, 1, 1, 1, 1))
    model.add(nn.ELU())
    model.add(
        nn.SpatialConvolution(n_layers_enc, n_layers_enc, 3, 3, 1, 1, 1, 1))
    model.add(nn.ELU())
    model.add(pool)
    # Context Module
    model.add(
        nn.SpatialDilatedConvolution(n_layers_enc, n_layers_ctx, 3, 3, 1, 1, 1,
                                     1, 1, 1))
    model.add(nn.ELU())
    model.add(nn.SpatialDropout(prob_drop))
    model.add(
        nn.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 2,
                                     2, 2, 2))
    model.add(nn.ELU())
    model.add(nn.SpatialDropout(prob_drop))
    model.add(
        nn.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 4,
                                     4, 4, 4))
    model.add(nn.ELU())
    model.add(nn.SpatialDropout(prob_drop))
    model.add(
        nn.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 8,
                                     8, 8, 8))
    model.add(nn.ELU())
    model.add(nn.SpatialDropout(prob_drop))
    model.add(
        nn.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1,
                                     16, 16, 16, 16))
    model.add(nn.ELU())
    model.add(nn.SpatialDropout(prob_drop))
    model.add(
        nn.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1,
                                     32, 32, 32, 32))
    model.add(nn.ELU())
    model.add(nn.SpatialDropout(prob_drop))
    model.add(
        nn.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1,
                                     64, 64, 64, 64))
    model.add(nn.ELU())
    model.add(nn.SpatialDropout(prob_drop))
    model.add(nn.SpatialDilatedConvolution(n_layers_ctx, n_layers_enc, 1, 1))
    model.add(nn.ELU())  # Nao havia no paper
    # Decoder
    model.add(nn.SpatialMaxUnpooling(pool))
    model.add(
        nn.SpatialConvolution(n_layers_enc, n_layers_enc, 3, 3, 1, 1, 1, 1))
    model.add(nn.ELU())
    model.add(
        nn.SpatialConvolution(n_layers_enc, num_classes, 3, 3, 1, 1, 1, 1))
    model.add(nn.ELU())
    model.add(nn.SoftMax())  # Nao havia no paper
    return model
예제 #16
0
 def setUp(self):
     self.input = np.random.ranf(_INPUT_SHAPE)
     self.model = nn.Sequential()
     self.model.add(nn.MulConstant(1.0))
def torch_to_pytorch(t7_filename, outputname=None):
    model = load_lua(t7_filename, unknown_classes=True)
    if type(model).__name__ == 'hashable_uniq_dict': model = model.model
    model.gradInput = None
    slist = lua_recursive_source(lnn.Sequential().add(model))
    s = simplify_source(slist)
    header = '''import torch
import torch.nn as nn

from functools import reduce


class LambdaBase(nn.Sequential):
    def __init__(self, fn, *args):
        super(LambdaBase, self).__init__(*args)
        self.lambda_func = fn

    def forward_prepare(self, input):
        output = []
        for module in self._modules.values():
            output.append(module(input))
        return output if output else input


class Lambda(LambdaBase):
    def forward(self, input):
        return self.lambda_func(self.forward_prepare(input))


class LambdaMap(LambdaBase):
    def forward(self, input):
        return list(map(self.lambda_func, self.forward_prepare(input)))


class LambdaReduce(LambdaBase):
    def forward(self, input):
        return reduce(self.lambda_func, self.forward_prepare(input))


class TVLoss(nn.Module):
    def __init__(self, TVLoss_weight=1):
        super(TVLoss, self).__init__()
        self.TVLoss_weight = TVLoss_weight

    def forward(self, x):
        batch_size = x.size()[0]
        h_x = x.size()[2]
        w_x = x.size()[3]
        count_h = self._tensor_size(x[:, :, 1:, :])
        count_w = self._tensor_size(x[:, :, :, 1:])
        h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()
        w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()
        return self.TVLoss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size + x

    def _tensor_size(self, t):
        return t.size()[1] * t.size()[2] * t.size()[3]


class TransformerNetwork(nn.Module):
    def __init__(self):
        super(TransformerNetwork, self).__init__()
'''

    footer = '''

    def forward(self, input):
        return self.style.forward(input)
'''
    varname = t7_filename.replace('.t7', '').replace('.',
                                                     '_').replace('-', '_')
    s = '{}        self.style = {}{}'.format(header, s[:-2], footer)

    if outputname is None: outputname = varname
    with open(outputname + '.py', "w") as pyfile:
        pyfile.write(s)

    n = nn.Sequential()
    lua_recursive_model(model, n)
    model = TransformerNetwork()
    model.style = n
    torch.save(model.state_dict(), outputname + '.pth')
예제 #18
0
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.

import torch
import torch.legacy.nn as nn
import sparseconvnet.legacy as scn
from data import getIterators

# Use the GPU if there is one, otherwise CPU
dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available(
) else 'torch.FloatTensor'

# two-dimensional SparseConvNet
model = nn.Sequential()
sparseModel = scn.Sequential()
denseModel = nn.Sequential()
model.add(sparseModel).add(denseModel)
sparseModel.add(scn.ValidConvolution(2, 3, 16, 3, False))
sparseModel.add(scn.MaxPooling(2, 3, 2))
sparseModel.add(
    scn.SparseResNet(
        2, 16,
        [['b', 16, 2, 1], ['b', 32, 2, 2], ['b', 48, 2, 2], ['b', 96, 2, 2]]))
sparseModel.add(scn.Convolution(2, 96, 128, 4, 1, False))
sparseModel.add(scn.BatchNormReLU(128))
sparseModel.add(scn.SparseToDense(2))
denseModel.add(nn.View(-1, 128))
denseModel.add(nn.Linear(128, 3755))
model.type(dtype)
googlenet = nn.Sequential(  # Sequential,
    nn.Conv2d(3, 64, (7, 7), (2, 2), (3, 3)),
    nn.ReLU(),
    nn.MaxPool2d((3, 3), (2, 2), (0, 0), ceil_mode=True),
    Lambda(lambda x, lrn=torch.legacy.nn.SpatialCrossMapLRN(*(
        5, 0.0001, 0.75, 1)): Variable(lrn.forward(x.data))),
    nn.Conv2d(64, 64, (1, 1)),
    nn.ReLU(),
    nn.Conv2d(64, 192, (3, 3), (1, 1), (1, 1)),
    nn.ReLU(),
    Lambda(lambda x, lrn=torch.legacy.nn.SpatialCrossMapLRN(*(
        5, 0.0001, 0.75, 1)): Variable(lrn.forward(x.data))),
    nn.MaxPool2d((3, 3), (2, 2), (0, 0), ceil_mode=True),
    nn.Conv2d(192, 64, (1, 1)),
    nn.ReLU(),
    nn.Conv2d(192, 96, (1, 1)),
    nn.ReLU(),
    nn.Conv2d(96, 128, (3, 3), (1, 1), (1, 1)),
    nn.ReLU(),
    nn.Conv2d(192, 16, (1, 1)),
    nn.ReLU(),
    nn.Conv2d(16, 32, (5, 5), (1, 1), (2, 2)),
    nn.ReLU(),
    nn.MaxPool2d((3, 3), (1, 1), (1, 1), ceil_mode=True),
    nn.Conv2d(192, 32, (1, 1)),
    nn.ReLU(),
    nn.Conv2d(256, 128, (1, 1)),
    nn.ReLU(),
    nn.Conv2d(256, 128, (1, 1)),
    nn.ReLU(),
    nn.Conv2d(128, 192, (3, 3), (1, 1), (1, 1)),
    nn.ReLU(),
    nn.Conv2d(256, 32, (1, 1)),
    nn.ReLU(),
    nn.Conv2d(32, 96, (5, 5), (1, 1), (2, 2)),
    nn.ReLU(),
    nn.MaxPool2d((3, 3), (1, 1), (1, 1), ceil_mode=True),
    nn.Conv2d(256, 64, (1, 1)),
    nn.ReLU(),
    nn.MaxPool2d((3, 3), (2, 2), (0, 0), ceil_mode=True),
    nn.Conv2d(480, 192, (1, 1)),
    nn.ReLU(),
    nn.Conv2d(480, 96, (1, 1)),
    nn.ReLU(),
    nn.Conv2d(96, 208, (3, 3), (1, 1), (1, 1)),
    nn.ReLU(),
    nn.Conv2d(480, 16, (1, 1)),
    nn.ReLU(),
    nn.Conv2d(16, 48, (5, 5), (1, 1), (2, 2)),
    nn.ReLU(),
    nn.MaxPool2d((3, 3), (1, 1), (1, 1), ceil_mode=True),
    nn.Conv2d(480, 64, (1, 1)),
    nn.ReLU(),
    nn.AvgPool2d((5, 5), (3, 3), (0, 0), ceil_mode=True),  #AvgPool2d,
    nn.Conv2d(512, 128, (1, 1)),
    nn.ReLU(),
    Lambda(lambda x: x.view(x.size(0), -1)),  # View,
    nn.Sequential(Lambda(lambda x: x.view(1, -1) if 1 == len(x.size()) else x),
                  nn.Linear(2048, 1024)),  # Linear,
    nn.ReLU(),
    nn.Dropout(0.7),
    nn.Sequential(Lambda(lambda x: x.view(1, -1) if 1 == len(x.size()) else x),
                  nn.Linear(1024, 365)),  # Linear,
)