Ejemplo n.º 1
0
    def __init__(self,
                 enc_size,
                 n_classes,
                 ae_model,
                 batch_size,
                 name="EncSVM"):
        super(EncodingSVM, self).__init__(name)

        self.batch_size = batch_size
        self.enc_size = enc_size
        self.n_classes = n_classes
        self.ae_model = ae_model

        self.upsample = Ops.NNUpsample1d()

        alpha = 32
        self.pools = []
        self.pools.append(nn.MaxPool1d(kernel_size=alpha, stride=alpha))
        self.pools.append(nn.MaxPool1d(kernel_size=alpha / 2,
                                       stride=alpha / 2))
        self.pools.append(nn.MaxPool1d(kernel_size=alpha / 4,
                                       stride=alpha / 4))
        #self.pools.append(nn.MaxPool1d(kernel_size=alpha/8, stride=alpha/8))

        self.fc = nn.Linear(self.enc_size, self.n_classes)
Ejemplo n.º 2
0
    def __init__(self,
                 size,
                 dim,
                 batch_size=64,
                 kernel_size=2,
                 name="MRI2S",
                 pretrained=False,
                 arch=True):
        super(MultiResImageToShape, self).__init__(name)

        self.size = size
        self.dim = dim
        self.kernel_size = kernel_size
        self.batch_size = batch_size
        if arch == 'vgg':
            self.encoder = torchvision.models.vgg11(pretrained=pretrained)
        elif arch == 'alexnet':
            self.encoder = torchvision.models.alexnet(pretrained=pretrained)
        self.encoder.classifier._modules['6'] = nn.Linear(4096, 16 * 1024)
        self.dec_modules = nn.ModuleList()
        self.base_size = 16

        self.upsample = Ops.NNUpsample1d()
        self.pool = nn.MaxPool1d(kernel_size=4, stride=4)

        custom_nfilters = [128, 128, 128, 256, 512, 512, 1024, 1024, 1024]
        custom_nfilters.reverse()
        custom_nfilters = np.array(custom_nfilters)

        current_size = self.base_size
        layer_num = 1
        padding = (self.kernel_size - 1) / 2
        while current_size < self.size:
            in_channels = custom_nfilters[layer_num - 1]
            print in_channels
            out_channels = custom_nfilters[layer_num]
            conv_dec = MultiResConvTranspose1d('up{}'.format(layer_num),
                                               in_channels, out_channels)
            current_size *= 2
            in_channels = out_channels
            layer_num += 1

            self.dec_modules.append(conv_dec)

        self.final_conv = nn.Sequential()
        self.final_conv.add_module(
            'final_conv1',
            nn.ConvTranspose1d(custom_nfilters[-1] * 3,
                               128,
                               1,
                               stride=1,
                               padding=0))
        self.final_conv.add_module('bn_final', nn.BatchNorm1d(128))
        self.final_conv.add_module('relu_final', nn.ReLU(inplace=True))
        self.final_conv.add_module(
            'final_conv2', nn.ConvTranspose1d(128, 3, 1, stride=1, padding=0))
        self.final_conv.add_module('tanh_final', nn.Tanh())
Ejemplo n.º 3
0
    def __init__(self,
                 size,
                 dim,
                 batch_size=64,
                 kernel_size=2,
                 name="MRTDecoder"):
        super(MRTDecoder, self).__init__(name)

        self.size = size
        self.dim = dim
        self.kernel_size = kernel_size
        self.batch_size = batch_size

        self.z = nn.Parameter(torch.randn(16 * 1024))
        self.dec_modules = nn.ModuleList()
        self.base_size = 16

        self.upsample = Ops.NNUpsample1d()
        self.pool = nn.MaxPool1d(kernel_size=4, stride=4)

        custom_nfilters = [128, 128, 128, 256, 512, 512, 1024, 1024, 1024]
        custom_nfilters.reverse()
        custom_nfilters = np.array(custom_nfilters)
        custom_nfilters[1:] /= 2

        current_size = self.base_size
        layer_num = 1
        padding = (self.kernel_size - 1) / 2
        while current_size < self.size:
            in_channels = custom_nfilters[layer_num - 1]
            out_channels = custom_nfilters[layer_num]
            conv_dec = MultiResConvTranspose1d('up{}'.format(layer_num),
                                               in_channels, out_channels)
            current_size *= 2
            in_channels = out_channels
            layer_num += 1

            self.dec_modules.append(conv_dec)

        self.final_conv = nn.Sequential()
        self.final_conv.add_module(
            'final_conv1',
            nn.ConvTranspose1d(custom_nfilters[-1] * 3,
                               128,
                               1,
                               stride=1,
                               padding=0))
        self.final_conv.add_module('bn_final', nn.BatchNorm1d(128))
        self.final_conv.add_module('relu_final', nn.ReLU(inplace=True))
        self.final_conv.add_module(
            'final_conv2', nn.ConvTranspose1d(128, 3, 1, stride=1, padding=0))
        self.final_conv.add_module('tanh_final', nn.Tanh())
Ejemplo n.º 4
0
    def __init__(self, name, in_channels, out_channels, blocktype, activation):
        super(MultiResBlock1d, self).__init__()

        self.upsample = Ops.NNUpsample1d()
        self.pool = nn.MaxPool1d(kernel_size=4, stride=4)
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.name = name

        self.conv0 = nn.Sequential()
        self.conv0.add_module(
            '{}_conv0'.format(self.name),
            blocktype(self.in_channels * 2,
                      self.out_channels,
                      kernel_size=2,
                      stride=2,
                      padding=0))
        self.conv0.add_module('{}_bn0'.format(self.name),
                              nn.BatchNorm1d(self.out_channels))
        self.conv0.add_module('{}_activation0'.format(self.name), activation)

        self.conv1 = nn.Sequential()
        self.conv1.add_module(
            '{}_conv1'.format(self.name),
            blocktype(self.in_channels * 3,
                      self.out_channels,
                      kernel_size=2,
                      stride=2,
                      padding=0))
        self.conv1.add_module('{}_bn1'.format(self.name),
                              nn.BatchNorm1d(self.out_channels))
        self.conv1.add_module('{}_activation1'.format(self.name), activation)

        self.conv2 = nn.Sequential()
        self.conv2.add_module(
            '{}_conv2'.format(self.name),
            blocktype(self.in_channels * 2,
                      self.out_channels,
                      kernel_size=2,
                      stride=2,
                      padding=0))
        self.conv2.add_module('{}_bn2'.format(self.name),
                              nn.BatchNorm1d(self.out_channels))
        self.conv2.add_module('{}_activation2'.format(self.name), activation)
Ejemplo n.º 5
0
    def __init__(self,
                 size,
                 dim,
                 batch_size=64,
                 enc_size=100,
                 kernel_size=2,
                 axis_file='rpt_axis.npy',
                 name="PointSeg"):
        super(UNetMRTDecoder, self).__init__(name)

        self.init_channels = 128
        self.size = size
        self.dim = dim
        self.batch_size = batch_size
        self.kernel_size = kernel_size
        self.enc_size = enc_size
        self.enc_modules = nn.ModuleList()
        self.dec_modules = nn.ModuleList()
        self.upsample = Ops.NNUpsample1d()
        self.pool = nn.AvgPool1d(kernel_size=4, stride=4)
        #self.z = nn.Parameter(torch.randn(self.size*3).view(batch_size, 3, -1))
        self.z = Variable(torch.randn(self.size * 3).view(batch_size, 3,
                                                          -1)).cuda()

        #custom_nfilters = [3, 128, 128, 128, 256, 265, 256,
        #        512, 512, 512, 1024, 1024, 2048]
        custom_nfilters = [
            3, 4, 8, 16, 32, 64, 128, 128, 128, 128, 256, 256, 256
        ]
        custom_nfilters = np.array(custom_nfilters)
        #custom_nfilters[1:] /= 4

        current_size = self.size
        layer_num = 1
        padding = (self.kernel_size - 1) / 2
        n_channels = []
        while current_size > 64:
            in_channels = custom_nfilters[layer_num - 1]
            out_channels = custom_nfilters[layer_num]
            conv_enc = MultiResConv1d('down{}'.format(layer_num), in_channels,
                                      out_channels)
            #            conv_enc = nn.Sequential()
            #            conv_enc.add_module('conv{}'.format(layer_num),
            #                    nn.Conv1d(in_channels, out_channels, self.kernel_size,
            #                        stride=2,
            #                        padding=padding))
            #                        #axis=self.axis_on_level(layer_num-1)))
            #            conv_enc.add_module('bn{}'.format(layer_num),
            #                    nn.BatchNorm1d(out_channels))
            #            conv_enc.add_module('lrelu{}'.format(layer_num),
            #                    nn.LeakyReLU(0.2, inplace=True))
            current_size /= 2
            in_channels = out_channels
            n_channels.append(out_channels)
            if out_channels < 1024:
                out_channels *= 2
            layer_num += 1

            self.enc_modules.append(conv_enc)

        n_channels.reverse()
        current_size = 64
        layer_num = 1
        padding = (self.kernel_size - 1) / 2
        while current_size < self.size // 2:
            if layer_num == 1:
                in_channels = n_channels[layer_num - 1]
            else:
                in_channels = n_channels[layer_num - 1] * 2
            out_channels = n_channels[layer_num]

            conv_dec = MultiResConvTranspose1d('up{}'.format(layer_num),
                                               in_channels, out_channels)
            #            conv_dec = nn.Sequential()
            #            conv_dec.add_module('conv{}'.format(layer_num),
            #                    nn.ConvTranspose1d(in_channels,
            #                        out_channels,
            #                        self.kernel_size,
            #                        stride=2,
            #                        padding=padding))
            #            conv_dec.add_module('bn{}'.format(layer_num),
            #                    nn.BatchNorm1d(out_channels))
            #            conv_dec.add_module('relu{}'.format(layer_num),
            #                    nn.ReLU(inplace=True))

            current_size *= 2
            in_channels = out_channels
            layer_num += 1

            self.dec_modules.append(conv_dec)

        conv_dec = MultiResConvTranspose1d('up{}'.format(layer_num),
                                           in_channels, 256)
        self.dec_modules.append(conv_dec)

        #        conv_dec = nn.Sequential()
        #        conv_dec.add_module('conv{}'.format(layer_num),
        #                nn.ConvTranspose1d(in_channels, 256, self.kernel_size,
        #                    stride=2,
        #                    padding=padding))
        #        conv_dec.add_module('bn{}'.format(layer_num),
        #                nn.BatchNorm1d(256))
        #        conv_dec.add_module('relu{}'.format(layer_num),
        #                nn.ReLU(inplace=True))

        self.final_conv = nn.Sequential()
        self.final_conv.add_module(
            'final_conv1',
            nn.ConvTranspose1d(256 * 3, 128, 1, stride=1, padding=0))
        self.final_conv.add_module('bn_final', nn.BatchNorm1d(128))
        self.final_conv.add_module('relu_final', nn.ReLU(inplace=True))
        self.final_conv.add_module(
            'final_conv2', nn.ConvTranspose1d(128, 3, 1, stride=1, padding=0))
        self.final_conv.add_module('tanh_final', nn.Tanh())
Ejemplo n.º 6
0
    def __init__(self,
                 size,
                 dim,
                 batch_size=64,
                 enc_size=100,
                 kernel_size=2,
                 reg_fn=NormalReg(),
                 noise=0,
                 name="MLVAE"):
        super(MultiResVAE, self).__init__(name)

        self.reg_fn = reg_fn

        self.size = size
        self.dim = dim
        self.enc_size = enc_size
        self.batch_size = batch_size
        self.kernel_size = kernel_size
        self.enc_modules = nn.ModuleList()
        self.dec_modules = nn.ModuleList()
        self.upsample = Ops.NNUpsample1d()
        self.pool = nn.MaxPool1d(kernel_size=4, stride=4)
        self.noise_factor = noise

        self.enc_noise = torch.FloatTensor(self.batch_size, self.enc_size)

        custom_nfilters = [3, 32, 64, 128, 256, 512, 512, 1024, 1024, 1024]
        custom_nfilters = np.array(custom_nfilters)
        custom_nfilters[1:] /= 2
        self.last_size = 16

        self.noise = torch.FloatTensor(self.batch_size, self.enc_size)

        current_size = self.size
        layer_num = 1
        padding = (self.kernel_size - 1) / 2
        n_channels = []
        n_channels.append(custom_nfilters[layer_num - 1])
        while current_size > self.last_size:
            in_channels = custom_nfilters[layer_num - 1]
            out_channels = custom_nfilters[layer_num]
            conv_enc = MultiResConv1d('down{}'.format(layer_num), in_channels,
                                      out_channels)
            current_size /= 2
            in_channels = out_channels
            n_channels.append(out_channels)
            layer_num += 1

            self.enc_modules.append(conv_enc)

        self.enc_fc = nn.Linear(3 * self.last_size * in_channels,
                                self.enc_size)
        #self.enc_fc_mean = nn.Linear(3*self.last_size*in_channels, self.enc_size)
        #self.enc_fc_var = nn.Linear(3*self.last_size*in_channels, self.enc_size)
        self.dec_fc = nn.Linear(self.enc_size, self.last_size * n_channels[-1])

        self.final_feature = 128
        n_channels.reverse()
        n_channels[-1] = self.final_feature
        current_size = self.last_size
        layer_num = 1
        padding = (self.kernel_size - 1) / 2
        while current_size < self.size:
            in_channels = n_channels[layer_num - 1]
            out_channels = n_channels[layer_num]
            conv_dec = MultiResConvTranspose1d('up{}'.format(layer_num),
                                               in_channels, out_channels)
            current_size *= 2
            in_channels = out_channels
            layer_num += 1

            self.dec_modules.append(conv_dec)

        self.final_conv = nn.Sequential()
        self.final_conv.add_module(
            'final_conv1',
            nn.ConvTranspose1d(self.final_feature * 3,
                               128,
                               1,
                               stride=1,
                               padding=0))
        self.final_conv.add_module('bn_final', nn.BatchNorm1d(128))
        self.final_conv.add_module('relu_final', nn.ReLU(inplace=True))
        self.final_conv.add_module(
            'final_conv2', nn.ConvTranspose1d(128, 3, 1, stride=1, padding=0))
        self.final_conv.add_module('tanh_final', nn.Tanh())