Exemple #1
0
    def __init__(self, class_num, full_scale, m=16, dimension=3):
        nn.Module.__init__(self)
        self.dimension = dimension
        self.input = scn.InputLayer(dimension, full_scale, mode=4)
        self.input_s = scn.InputLayer(dimension, full_scale, mode=4)
        self.down_in = scn.SubmanifoldConvolution(dimension, 1, m, 3, False)
        self.down_in_s = scn.SubmanifoldConvolution(dimension, 1, m, 3, False)
        self.main_block1 = self.block(m, m, 2, 1)
        self.main_block2 = self.block(m, 2 * m, 1, 2)
        self.main_block3 = self.block(2 * m, 3 * m, 1, 2)
        self.main_block4 = self.block(3 * m, 4 * m, 1, 2)
        self.main_block5 = self.block(4 * m, 5 * m, 1, 2)
        self.main_block6 = self.block(5 * m, 6 * m, 1, 2)
        self.main_block7 = self.block(6 * m, 7 * m, 2, 2)
        self.main_block8 = self.block(7 * m, 7 * m, 2, 1)

        self.support_block1 = self.block(m, m, 2, 1)
        self.support_block2 = self.block(m, 2 * m, 1, 2)
        self.support_block3 = self.block(2 * m, 3 * m, 1, 2)
        self.support_block4 = self.block(3 * m, 4 * m, 1, 2)
        self.support_block5 = self.block(4 * m, 5 * m, 1, 2)
        self.support_block6 = self.block(5 * m, 6 * m, 1, 2)
        self.support_block7 = self.block(6 * m, 7 * m, 1, 2)
        self.support_block8 = self.block(7 * m, 7 * m, 1, 1)

        self.support_block2_tune = self.guide_tune(dimension, 2 * m, 2 * m, 1,
                                                   False)
        self.support_block2_out = GlobalMeanAttentionPooling(dimension)
        self.support_block3_tune = self.guide_tune(dimension, 4 * m, 4 * m, 1,
                                                   False)
        self.support_block3_out = GlobalMeanAttentionPooling(dimension)
        self.support_block4_tune = self.guide_tune(dimension, 7 * m, 7 * m, 1,
                                                   False)
        self.support_block4_out = GlobalMeanAttentionPooling(dimension)

        self.global_add2 = GlobalMaskLayer(dimension)
        self.global_add3 = GlobalMaskLayer(dimension)
        self.global_add4 = GlobalMaskLayer(dimension)

        self.spatial_pick = DistMatchLayer_v4(dimension, 7 * m, topk=3)
        # self.join_sub = scn.JoinTable()
        self.tune_sub = self.guide_tune(dimension, 14 * m, 7 * m, 1, False)

        self.deconv7 = self.decoder(7 * m, 6 * m)
        self.join6 = scn.JoinTable()
        self.deconv6 = self.decoder(12 * m, 5 * m)
        self.deconv5 = self.decoder(5 * m, 4 * m)
        self.join4 = scn.JoinTable()
        self.deconv4 = self.decoder(8 * m, 3 * m)
        self.deconv3 = self.decoder(3 * m, 2 * m)
        self.join2 = scn.JoinTable()
        self.deconv2 = self.decoder(4 * m, 2 * m)
        # self.deconv1 = self.decoder(2 * m, m)

        self.output = scn.OutputLayer(dimension)
        self.linear = nn.Linear(2 * m, class_num)
Exemple #2
0
def MultiscaleShapeContext(dimension,
                           n_features=1,
                           n_layers=3,
                           shape_context_size=3,
                           downsample_size=2,
                           downsample_stride=2,
                           bn=True):
    m = sparseconvnet.Sequential()
    if n_layers == 1:
        m.add(
            sparseconvnet.ShapeContext(dimension, n_features,
                                       shape_context_size))
    else:
        m.add(sparseconvnet.ConcatTable().add(
            sparseconvnet.ShapeContext(
                dimension, n_features, shape_context_size)).add(
                    sparseconvnet.Sequential(
                        sparseconvnet.AveragePooling(dimension,
                                                     downsample_size,
                                                     downsample_stride),
                        MultiscaleShapeContext(dimension, n_features,
                                               n_layers - 1,
                                               shape_context_size,
                                               downsample_size,
                                               downsample_stride, False),
                        sparseconvnet.UnPooling(dimension, downsample_size,
                                                downsample_stride)))).add(
                                                    sparseconvnet.JoinTable())
    if bn:
        m.add(
            sparseconvnet.BatchNormalization(shape_context_size**dimension *
                                             n_features * n_layers))
    return m
 def U(nPlanes, n_input_planes=-1):  #Recursive function
     m = scn.Sequential()
     for i in range(reps):
         block(m, n_input_planes if n_input_planes != -1 else nPlanes[0],
               nPlanes[0])
         n_input_planes = -1
     if len(nPlanes) > 1:
         m.add(scn.ConcatTable().add(scn.Identity()).add(
             scn.Sequential().add(
                 scn.BatchNormLeakyReLU(
                     nPlanes[0], leakiness=leakiness)).add(
                         scn.Convolution(dimension, nPlanes[0], nPlanes[1],
                                         downsample[0], downsample[1],
                                         False)).add(U(nPlanes[1:])).add(
                                             scn.BatchNormLeakyReLU(
                                                 nPlanes[1],
                                                 leakiness=leakiness)).add(
                                                     scn.Deconvolution(
                                                         dimension,
                                                         nPlanes[1],
                                                         nPlanes[0],
                                                         downsample[0],
                                                         downsample[1],
                                                         False))))
         m.add(scn.JoinTable())
         for i in range(reps):
             block(m, nPlanes[0] * (2 if i == 0 else 1), nPlanes[0])
     return m
Exemple #4
0
 def v(depth, nPlanes):
     m = scn.Sequential()
     if depth == 1:
         for _ in range(reps):
             res(m, nPlanes, nPlanes, dropout_p)
     else:
         m = scn.Sequential()
         for _ in range(reps):
             res(m, nPlanes, nPlanes, dropout_p)
         if dropout_width:
             m.add(scn.ConcatTable().add(scn.Identity()).add(
                 scn.Sequential().add(scn.BatchNormReLU(nPlanes)).add(
                     #In place of Maxpooling
                     scn.Convolution(
                         dimension, nPlanes, nPlanes, 2, 2,
                         False)).add(scn.Dropout(dropout_p)).add(
                             v(depth - 1, nPlanes)).add(
                                 scn.BatchNormReLU(nPlanes)).add(
                                     scn.Deconvolution(
                                         dimension, nPlanes, nPlanes, 2, 2,
                                         False))))
         else:
             m.add(scn.ConcatTable().add(scn.Identity()).add(
                 scn.Sequential().add(scn.BatchNormReLU(nPlanes)).add(
                     scn.Convolution(dimension, nPlanes, nPlanes, 2, 2,
                                     False)).add(v(depth - 1, nPlanes)).add(
                                         scn.BatchNormReLU(nPlanes)).add(
                                             scn.Deconvolution(
                                                 dimension, nPlanes,
                                                 nPlanes, 2, 2, False))))
         m.add(scn.JoinTable())
         for i in range(reps):
             res(m, 2 * nPlanes if i == 0 else nPlanes, nPlanes, dropout_p)
     return m
Exemple #5
0
    def __init__(self, cfg, name='yresnet_decoder'):
        super(YResNetDecoder, self).__init__(cfg, name='network_base')
        self.model_config = cfg[name]

        self.reps = self.model_config.get('reps',
                                          2)  # Conv block repetition factor
        self.kernel_size = self.model_config.get('kernel_size', 2)
        self.num_strides = self.model_config.get('num_strides', 5)
        self.num_filters = self.model_config.get('filters', 16)
        self.nPlanes = [
            i * self.num_filters for i in range(1, self.num_strides + 1)
        ]
        self.downsample = [self.kernel_size, 2]  # [filter size, filter stride]
        self.concat = scn.JoinTable()
        self.add = scn.AddTable()
        dropout_prob = self.model_config.get('dropout_prob', 0.5)

        self.encoder_num_filters = self.model_config.get(
            'encoder_num_filters', None)
        if self.encoder_num_filters is None:
            self.encoder_num_filters = self.num_filters
        self.encoder_nPlanes = [
            i * self.encoder_num_filters
            for i in range(1, self.num_strides + 1)
        ]

        # Define Sparse YResNet Decoder.
        self.decoding_block = scn.Sequential()
        self.decoding_conv = scn.Sequential()
        for idx, i in enumerate(list(range(self.num_strides - 2, -1, -1))):
            if idx == 0:
                m = scn.Sequential().add(
                    scn.BatchNormLeakyReLU(self.encoder_nPlanes[i + 1],
                                           leakiness=self.leakiness)).add(
                                               scn.Deconvolution(
                                                   self.dimension,
                                                   self.encoder_nPlanes[i + 1],
                                                   self.nPlanes[i],
                                                   self.downsample[0],
                                                   self.downsample[1],
                                                   self.allow_bias))
            else:
                m = scn.Sequential().add(
                    scn.BatchNormLeakyReLU(
                        self.nPlanes[i + 1], leakiness=self.leakiness)).add(
                            scn.Deconvolution(self.dimension,
                                              self.nPlanes[i + 1],
                                              self.nPlanes[i],
                                              self.downsample[0],
                                              self.downsample[1],
                                              self.allow_bias)).add(
                                                  scn.Dropout(p=dropout_prob))
            self.decoding_conv.add(m)
            m = scn.Sequential()
            for j in range(self.reps):
                self._resnet_block(m, self.nPlanes[i] + (self.encoder_nPlanes[i] \
                    if j == 0 else 0), self.nPlanes[i])
            self.decoding_block.add(m)
Exemple #6
0
    def iter_unet(self, n_input_planes):
        # different from scn implementation, which is a recursive function
        enc_convs = scn.Sequential()
        dec_convs = scn.Sequential()
        for n_planes_in, n_planes_out in zip(self.n_planes[:-1],
                                             self.n_planes[1:]):
            # encode
            conv1x1 = scn.Sequential()
            for i in range(self.block_reps):
                conv1x1.add(
                    self.block(
                        n_input_planes
                        if n_input_planes != -1 else n_planes_in, n_planes_in))
                n_input_planes = -1

            conv = scn.Sequential()
            conv.add(
                scn.BatchNormLeakyReLU(n_planes_in, leakiness=self.leakiness))
            conv.add(
                scn.Convolution(self.dimension, n_planes_in, n_planes_out,
                                self.downsample[0], self.downsample[1], False))
            enc_conv = scn.Sequential()
            enc_conv.add(conv1x1)
            enc_conv.add(conv)
            enc_convs.add(enc_conv)

            # decode(corresponding stage of encode; symmetric with U)
            b_join = scn.Sequential()  # before_join
            b_join.add(
                scn.BatchNormLeakyReLU(n_planes_out, leakiness=self.leakiness))
            b_join.add(
                scn.Deconvolution(self.dimension, n_planes_out, n_planes_in,
                                  self.downsample[0], self.downsample[1],
                                  False))
            join_table = scn.JoinTable()
            a_join = scn.Sequential()  # after_join
            for i in range(self.block_reps):
                a_join.add(
                    self.block(n_planes_in * (2 if i == 0 else 1),
                               n_planes_in))
            dec_conv = scn.Sequential()
            dec_conv.add(b_join)
            dec_conv.add(join_table)
            dec_conv.add(a_join)
            dec_convs.add(dec_conv)

        middle_conv = scn.Sequential()
        for i in range(self.block_reps):
            middle_conv.add(
                self.block(
                    n_input_planes if n_input_planes != -1 else
                    self.n_planes[-1], self.n_planes[-1]))
            n_input_planes = -1

        return enc_convs, middle_conv, dec_convs
Exemple #7
0
    def __init__(self):
        super(CNN, self).__init__()
        ###############################
        # Hardcoded settings
        ###############################
        self._dimension = 3
        reps = 2
        kernel_size = 2
        num_strides = 7
        init_num_features = 8
        nInputFeatures = 1
        spatial_size = 128  #padding the rest for 169 PMTs
        num_classes = 2  # good versus ghost

        nPlanes = [(2**i) * init_num_features for i in range(0, num_strides)
                   ]  # every layer double the number of features
        downsample = [kernel_size, 2]
        leakiness = 0

        #################################
        # Input layer
        #################################
        self.input = scn.Sequential().add(
            scn.InputLayer(self._dimension, spatial_size, mode=3)).add(
                scn.SubmanifoldConvolution(self._dimension, nInputFeatures,
                                           init_num_features, 3,
                                           False))  # Kernel size 3, no bias
        self.concat = scn.JoinTable()
        #################################
        # Encode layers
        #################################\
        self.encoding_conv = scn.Sequential()
        for i in range(num_strides):
            if i < 4:  #hardcoded
                self.encoding_conv.add(
                    scn.BatchNormLeakyReLU(
                        nPlanes[i], leakiness=leakiness)).add(
                            scn.Convolution(self._dimension, nPlanes[i],
                                            nPlanes[i + 1], downsample[0],
                                            downsample[1], False))
            elif i < num_strides - 1:
                self.encoding_conv.add(scn.MaxPooling(self._dimension, 2, 2))

        self.output = scn.Sequential().add(
            scn.SparseToDense(self._dimension, nPlanes[-1]))
        ###################################
        # Final linear layer
        ###################################
        self.deepest_layer_num_features = int(
            nPlanes[-1] * np.power(spatial_size / (2**(num_strides - 1)), 3.))
        self.classifier = torch.nn.Sequential(
            torch.nn.ReLU(),
            torch.nn.Linear(self.deepest_layer_num_features, 2),
        )
Exemple #8
0
    def make_decoder_layer(self,
                           ilayer,
                           ninputchs,
                           noutputchs,
                           nreps,
                           leakiness=0.01,
                           downsample=[2, 2],
                           islast=False):
        """
        defines two layers:
          1) the deconv layer pre-concat
          2) residual blocks post-concat

        inputs
        ------
        ilayer     [int]: layer ID 
        ninputchs  [int]: number of features going into layer
        noutputchs [int]: number of features output by layer
        nreps      [int]: number of times residual modules should repeat
        leakiness  [float]: leakiness of LeakyReLU activiation functions
        downsample [list of ints size 2]: upsampling factor in weight and height
        islast     [bool]: last decoder layer does not have skip connection
        """

        # resnet block
        decode_blocks = create_resnet_layer(nreps,
                                            ninputchs,
                                            2 * noutputchs,
                                            downsample=downsample)

        # deconv
        decode_blocks.add(
            scn.BatchNormLeakyReLU(2 * noutputchs, leakiness=leakiness))
        decode_blocks.add(
            scn.Deconvolution(self.dimension, 2 * noutputchs, noutputchs,
                              downsample[0], downsample[1], False))
        setattr(self, "deconv%d" % (ilayer), decode_blocks)
        if self._verbose:
            print "DecoderLayer[", ilayer, "] inputchs[", ninputchs,
            print " -> resout[", 2 * noutputchs, "] -> deconv output[", noutputchs, "]"

        if not islast:
            # joiner for skip connections
            joiner = scn.JoinTable()
            setattr(self, "skipjoin%d" % (ilayer), joiner)
        else:
            joiner = None
        return decode_blocks, joiner
 def U(nPlanes):  #Recursive function
     m = scn.Sequential()
     if len(nPlanes) == 1:
         for _ in range(reps):
             block(m, nPlanes[0], nPlanes[0])
     else:
         m = scn.Sequential()
         for _ in range(reps):
             block(m, nPlanes[0], nPlanes[0])
         m.add(scn.ConcatTable().add(scn.Identity()).add(
             scn.Sequential().add(scn.BatchNormReLU(nPlanes[0])).add(
                 scn.Convolution(dimension, nPlanes[0], nPlanes[1],
                                 downsample[0], downsample[1],
                                 False)).add(U(nPlanes[1:])).add(
                                     scn.UnPooling(dimension, downsample[0],
                                                   downsample[1]))))
         m.add(scn.JoinTable())
     return m
Exemple #10
0
    def make_decoder_layer(self,
                           ilayer,
                           ninputchs,
                           noutputchs,
                           nreps,
                           leakiness=0.01,
                           downsample=[2, 2],
                           islast=False):
        """
        defines two layers:
          1) the deconv layer pre-concat
          2) residual blocks post-concat

        inputs
        ------
        ninputchs: number of features going into layer
        noutputchs: number of features output by layer
        """

        # resnet block
        decode_blocks = create_resnet_layer(nreps,
                                            ninputchs,
                                            2 * noutputchs,
                                            downsample=downsample)

        # deconv
        decode_blocks.add(
            scn.BatchNormLeakyReLU(2 * noutputchs, leakiness=leakiness))
        decode_blocks.add(
            scn.Deconvolution(self.dimension, 2 * noutputchs, noutputchs,
                              downsample[0], downsample[1], False))
        setattr(self, "deconv%d" % (ilayer), decode_blocks)
        if self._verbose:
            print "DecoderLayer[", ilayer, "] inputchs[", ninputchs,
            print " -> resout[", 2 * noutputchs, "] -> deconv output[", noutputchs, "]"

        if not islast:
            # joiner for skip connections
            joiner = scn.JoinTable()
            setattr(self, "skipjoin%d" % (ilayer), joiner)
        else:
            joiner = None
        return decode_blocks, joiner
Exemple #11
0
def get_channels_combiner(
        num_dims, sparse, input_channels_list, concat=True,
        input_strideprod_list=None):

    if input_strideprod_list is not None:
        assert (
            input_strideprod_list[0] == np.array(input_strideprod_list)).all()

    stride = np.full(num_dims, 1)
    if concat:
        output_channels = sum(input_channels_list)
        if sparse:
            layer = scn.JoinTable()
        else:
            layer = DenseJoinTable()
    else:
        output_channels = input_channels_list[0]
        assert (np.array(input_channels_list) == output_channels).all()
        if sparse:
            layer = scn.AddTable()
        else:
            layer = DenseAddTable()

    return sparse, stride, output_channels, layer
Exemple #12
0
    def __init__(self, inputshape, reps, nin_features, nout_features, nplanes):
        nn.Module.__init__(self)
        """
        inputs
        ------
        inputshape [list of int]: dimensions of the matrix or image
        reps [int]: number of residual modules per layer (for both encoder and decoder)
        nin_features [int]: number of features in the first convolutional layer
        nout_features [int]: number of features that feed into the regression layer
        nplanes [int]: the depth of the U-Net
        """
        # set parameters
        self.dimensions = 2  # not playing with 3D for now

        # input shape: LongTensor, tuple, or list. Handled by InputLayer
        # size of each spatial dimesion
        self.inputshape = inputshape
        if len(self.inputshape) != self.dimensions:
            raise ValueError(
                "expected inputshape to contain size of 2 dimensions only." +
                "given %d values" % (len(self.inputshape)))

        # mode variable: how to deal with repeated data
        self.mode = 0

        # nfeatures
        self.nfeatures = nin_features
        self.nout_features = nout_features

        # plane structure
        self.nPlanes = [self.nfeatures * 2**(n + 1) for n in xrange(nplanes)]
        print self.nPlanes

        # repetitions (per plane)
        self.reps = reps

        # residual blocks
        self.residual_blocks = True

        # need encoder for both source and target
        # then cat tensor
        # and produce one decoder for flow, another decoder for visibility

        # model:
        # input
        self.src_inputlayer = scn.InputLayer(self.dimensions,
                                             self.inputshape,
                                             mode=self.mode)
        self.tar1_inputlayer = scn.InputLayer(self.dimensions,
                                              self.inputshape,
                                              mode=self.mode)
        self.tar2_inputlayer = scn.InputLayer(self.dimensions,
                                              self.inputshape,
                                              mode=self.mode)

        # stem
        self.src_stem = scn.SubmanifoldConvolution(self.dimensions, 1,
                                                   self.nfeatures, 3, False)
        self.tar1_stem = scn.SubmanifoldConvolution(self.dimensions, 1,
                                                    self.nfeatures, 3, False)
        self.tar2_stem = scn.SubmanifoldConvolution(self.dimensions, 1,
                                                    self.nfeatures, 3, False)

        # encoders
        self.source_encoder = SparseEncoder("src", self.reps, self.nfeatures,
                                            self.nPlanes)
        self.target1_encoder = SparseEncoder("tar1", self.reps, self.nfeatures,
                                             self.nPlanes)
        self.target2_encoder = SparseEncoder("tar2", self.reps, self.nfeatures,
                                             self.nPlanes)

        # concat
        self.join_enclayers = []
        for ilayer in xrange(len(self.nPlanes)):
            self.join_enclayers.append(scn.JoinTable())
            setattr(self, "join_enclayers%d" % (ilayer),
                    self.join_enclayers[ilayer])

        # calculate decoder planes
        self.decode_layers_inchs = []
        self.decode_layers_outchs = []
        for ilayer, enc_outchs in enumerate(reversed(self.nPlanes)):
            self.decode_layers_inchs.append(4 *
                                            enc_outchs if ilayer > 0 else 3 *
                                            enc_outchs)
            self.decode_layers_outchs.append(self.nPlanes[-(1 + ilayer)] / 2)
        print "decode in chs: ", self.decode_layers_inchs
        print "decode out chs: ", self.decode_layers_outchs

        # decoders
        self.flow1_decoder = SparseDecoder("flow1", self.reps,
                                           self.decode_layers_inchs,
                                           self.decode_layers_outchs)
        self.flow2_decoder = SparseDecoder("flow2", self.reps,
                                           self.decode_layers_inchs,
                                           self.decode_layers_outchs)

        # last deconv concat
        self.flow1_concat = scn.JoinTable()
        self.flow2_concat = scn.JoinTable()

        # final feature set convolution
        flow_resblock_inchs = 3 * self.nfeatures + self.decode_layers_outchs[-1]
        self.flow1_resblock = create_resnet_layer(self.reps,
                                                  flow_resblock_inchs,
                                                  self.nout_features)
        self.flow2_resblock = create_resnet_layer(self.reps,
                                                  flow_resblock_inchs,
                                                  self.nout_features)

        # regression layer
        self.flow1_out = scn.SubmanifoldConvolution(self.dimensions,
                                                    self.nout_features, 1, 1,
                                                    True)
        self.flow2_out = scn.SubmanifoldConvolution(self.dimensions,
                                                    self.nout_features, 1, 1,
                                                    True)
def SparseVggNet(dimension, nInputPlanes, layers):
    """
    VGG style nets
    Use submanifold convolutions
    Also implements 'Plus'-augmented nets
    """
    nPlanes = nInputPlanes
    m = scn.Sequential()
    for x in layers:
        if x == 'MP':
            m.add(scn.MaxPooling(dimension, 3, 2))
        elif x[0] == 'MP':
            m.add(scn.MaxPooling(dimension, x[1], x[2]))
        elif x == 'C3/2':
            m.add(scn.Convolution(dimension, nPlanes, nPlanes, 3, 2, False))
            m.add(scn.BatchNormReLU(nPlanes))
        elif x[0] == 'C3/2':
            m.add(scn.Convolution(dimension, nPlanes, x[1], 3, 2, False))
            nPlanes = x[1]
            m.add(scn.BatchNormReLU(nPlanes))
        elif x[0] == 'C' and len(x) == 2:
            m.add(
                scn.SubmanifoldConvolution(dimension, nPlanes, x[1], 3, False))
            nPlanes = x[1]
            m.add(scn.BatchNormReLU(nPlanes))
        elif x[0] == 'C' and len(x) == 3:
            m.add(scn.ConcatTable().add(
                scn.SubmanifoldConvolution(
                    dimension, nPlanes, x[1], 3,
                    False)).add(scn.Sequential().add(
                        scn.Convolution(
                            dimension, nPlanes, x[2], 3, 2,
                            False)).add(scn.BatchNormReLU(x[2])).add(
                                scn.SubmanifoldConvolution(
                                    dimension, x[2], x[2], 3,
                                    False)).add(scn.BatchNormReLU(x[2])).add(
                                        scn.Deconvolution(
                                            dimension, x[2], x[2], 3, 2,
                                            False)))).add(scn.JoinTable())
            nPlanes = x[1] + x[2]
            m.add(scn.BatchNormReLU(nPlanes))
        elif x[0] == 'C' and len(x) == 4:
            m.add(scn.ConcatTable().add(
                scn.SubmanifoldConvolution(
                    dimension, nPlanes, x[1], 3,
                    False)).add(scn.Sequential().add(
                        scn.Convolution(
                            dimension, nPlanes, x[2], 3, 2,
                            False)).add(scn.BatchNormReLU(x[2])).add(
                                scn.SubmanifoldConvolution(
                                    dimension, x[2], x[2], 3,
                                    False)).add(scn.BatchNormReLU(x[2])).add(
                                        scn.Deconvolution(
                                            dimension, x[2], x[2], 3, 2,
                                            False))).
                  add(scn.Sequential().add(
                      scn.Convolution(
                          dimension, nPlanes, x[3],
                          3, 2, False)).add(scn.BatchNormReLU(x[3])).add(
                              scn.SubmanifoldConvolution(
                                  dimension, x[3], x[3], 3,
                                  False)).add(scn.BatchNormReLU(x[3])).add(
                                      scn.Convolution(dimension, x[3], x[3], 3,
                                                      2, False)).
                      add(scn.BatchNormReLU(x[3])).add(
                          scn.SubmanifoldConvolution(
                              dimension, x[3], x[3],
                              3, False)).add(scn.BatchNormReLU(x[3])).add(
                                  scn.Deconvolution(
                                      dimension, x[3], x[3], 3, 2,
                                      False)).add(scn.BatchNormReLU(x[3])).add(
                                          scn.SubmanifoldConvolution(
                                              dimension, x[3], x[3], 3,
                                              False)).add(
                                                  scn.BatchNormReLU(x[3])).add(
                                                      scn.Deconvolution(
                                                          dimension, x[3],
                                                          x[3], 3, 2,
                                                          False)))).add(
                                                              scn.JoinTable())
            nPlanes = x[1] + x[2] + x[3]
            m.add(scn.BatchNormReLU(nPlanes))
        elif x[0] == 'C' and len(x) == 5:
            m.add(scn.ConcatTable().add(
                scn.SubmanifoldConvolution(
                    dimension, nPlanes, x[1], 3,
                    False)).add(scn.Sequential().add(
                        scn.Convolution(
                            dimension, nPlanes, x[2], 3, 2,
                            False)).add(scn.BatchNormReLU(x[2])).add(
                                scn.SubmanifoldConvolution(
                                    dimension, x[2], x[2], 3,
                                    False)).add(scn.BatchNormReLU(x[2])).add(
                                        scn.Deconvolution(
                                            dimension, x[2], x[2], 3, 2,
                                            False))).
                  add(scn.Sequential().add(
                      scn.Convolution(
                          dimension, nPlanes, x[3],
                          3, 2, False)).add(scn.BatchNormReLU(x[3])).add(
                              scn.SubmanifoldConvolution(
                                  dimension, x[3], x[3], 3,
                                  False)).add(scn.BatchNormReLU(x[3])).add(
                                      scn.Convolution(dimension, x[3], x[3], 3,
                                                      2, False)).
                      add(scn.BatchNormReLU(x[3])).add(
                          scn.SubmanifoldConvolution(
                              dimension, x[3], x[3],
                              3, False)).add(scn.BatchNormReLU(x[3])).add(
                                  scn.Deconvolution(
                                      dimension, x[3], x[3], 3, 2,
                                      False)).add(scn.BatchNormReLU(x[3])).add(
                                          scn.SubmanifoldConvolution(
                                              dimension, x[3], x[3], 3,
                                              False)).add(
                                                  scn.BatchNormReLU(x[3])).add(
                                                      scn.Deconvolution(
                                                          dimension, x[3],
                                                          x[3], 3, 2, False))).
                  add(scn.Sequential().add(
                      scn.Convolution(
                          dimension, nPlanes,
                          x[4], 3, 2,
                          False)).add(scn.BatchNormReLU(x[4])).add(
                              scn.SubmanifoldConvolution(
                                  dimension, x[4], x[4], 3,
                                  False)).add(scn.BatchNormReLU(x[4])).add(
                                      scn.Convolution(
                                          dimension, x[4], x[4], 3,
                                          2, False)).add(
                                              scn.BatchNormReLU(x[4])).add(
                                                  scn.SubmanifoldConvolution(
                                                      dimension, x[4], x[4], 3,
                                                      False)).add(
                                                          scn.BatchNormReLU(
                                                              x[4])).
                      add(scn.Convolution(
                          dimension, x[4], x[4], 3, 2,
                          False)).add(scn.BatchNormReLU(x[4])).add(
                              scn.SubmanifoldConvolution(
                                  dimension, x[4], x[4], 3,
                                  False)).add(scn.BatchNormReLU(x[4])).add(
                                      scn.Deconvolution(
                                          dimension, x[4], x[4], 3,
                                          2, False)).add(
                                              scn.BatchNormReLU(x[4])).add(
                                                  scn.SubmanifoldConvolution(
                                                      dimension, x[4], x[4], 3,
                                                      False)).
                      add(scn.BatchNormReLU(x[4])).add(
                          scn.Deconvolution(
                              dimension, x[4], x[4], 3,
                              2, False)).add(scn.BatchNormReLU(x[4])).add(
                                  scn.SubmanifoldConvolution(
                                      dimension, x[4], x[4], 3,
                                      False)).add(scn.BatchNormReLU(x[4])).add(
                                          scn.Deconvolution(
                                              dimension, x[4], x[4], 3, 2,
                                              False)))).add(scn.JoinTable())
            nPlanes = x[1] + x[2] + x[3] + x[4]
            m.add(scn.BatchNormReLU(nPlanes))
    return m
Exemple #14
0
    def __init__(self, cfg, name="uresnet_clustering"):
        super(UResNet, self).__init__()
        import sparseconvnet as scn
        self._model_config = cfg['modules'][name]

        # Whether to compute ghost mask separately or not
        self._ghost = self._model_config.get('ghost', False)
        self._dimension = self._model_config.get('data_dim', 3)
        reps = self._model_config.get('reps',
                                      2)  # Conv block repetition factor
        kernel_size = self._model_config.get('kernel_size', 2)
        num_strides = self._model_config.get('num_strides', 5)
        m = self._model_config.get('filters', 16)  # Unet number of features
        nInputFeatures = self._model_config.get('features', 1)
        spatial_size = self._model_config.get('spatial_size', 512)
        num_classes = self._model_config.get('num_classes', 5)
        self._N = self._model_config.get('num_cluster_conv', 0)
        self._simpleN = self._model_config.get('simple_conv', True)
        self._add_coordinates = self._model_config.get('cluster_add_coords',
                                                       False)
        self._density_estimate = self._model_config.get(
            'density_estimate', False)

        nPlanes = [i * m for i in range(1, num_strides + 1)
                   ]  # UNet number of features per level
        downsample = [kernel_size, 2]  # [filter size, filter stride]
        self.last = None
        leakiness = 0

        def block(m, a, b):  # ResNet style blocks
            m.add(scn.ConcatTable().add(scn.Identity(
            ) if a == b else scn.NetworkInNetwork(a, b, False)).add(
                scn.Sequential().add(
                    scn.BatchNormLeakyReLU(a, leakiness=leakiness)).add(
                        scn.SubmanifoldConvolution(
                            self._dimension, a, b, 3, False)).add(
                                scn.BatchNormLeakyReLU(
                                    b, leakiness=leakiness)).add(
                                        scn.SubmanifoldConvolution(
                                            self._dimension, b, b, 3,
                                            False)))).add(scn.AddTable())

        self.input = scn.Sequential().add(
            scn.InputLayer(self._dimension, spatial_size, mode=3)).add(
                scn.SubmanifoldConvolution(self._dimension, nInputFeatures, m,
                                           3, False))  # Kernel size 3, no bias
        self.concat = scn.JoinTable()
        # Encoding
        self.bn = scn.BatchNormLeakyReLU(nPlanes[0], leakiness=leakiness)
        self.encoding_block = scn.Sequential()
        self.encoding_conv = scn.Sequential()
        module = scn.Sequential()
        for i in range(num_strides):
            module = scn.Sequential()
            for _ in range(reps):
                block(module, nPlanes[i], nPlanes[i])
            self.encoding_block.add(module)
            module2 = scn.Sequential()
            if i < num_strides - 1:
                module2.add(
                    scn.BatchNormLeakyReLU(
                        nPlanes[i], leakiness=leakiness)).add(
                            scn.Convolution(self._dimension, nPlanes[i],
                                            nPlanes[i + 1], downsample[0],
                                            downsample[1], False))
            self.encoding_conv.add(module2)
        self.encoding = module

        # Decoding
        self.decoding_conv, self.decoding_blocks = scn.Sequential(
        ), scn.Sequential()
        for i in range(num_strides - 2, -1, -1):
            inFeatures = nPlanes[i + 1] * (2 if
                                           (self._N > 0
                                            and i < num_strides - 2) else 1)
            module1 = scn.Sequential().add(
                scn.BatchNormLeakyReLU(inFeatures, leakiness=leakiness)).add(
                    scn.Deconvolution(self._dimension, inFeatures, nPlanes[i],
                                      downsample[0], downsample[1], False))
            self.decoding_conv.add(module1)
            module2 = scn.Sequential()
            for j in range(reps):
                block(module2, nPlanes[i] * (2 if j == 0 else 1), nPlanes[i])
            self.decoding_blocks.add(module2)

        # Clustering convolutions
        if self._N > 0:
            self.clustering_conv = scn.Sequential()
            for i in range(num_strides - 2, -1, -1):
                conv = scn.Sequential()
                for j in range(self._N):
                    if self._simpleN:
                        conv.add(
                            scn.SubmanifoldConvolution(
                                self._dimension, nPlanes[i] +
                                (4 if j == 0 and self._add_coordinates else 0),
                                nPlanes[i], 3, False))
                        conv.add(
                            scn.BatchNormLeakyReLU(nPlanes[i],
                                                   leakiness=leakiness))
                    else:
                        block(
                            conv, nPlanes[i] +
                            (4 if j == 0 and self._add_coordinates else 0),
                            nPlanes[i])
                self.clustering_conv.add(conv)

        outFeatures = m * (2 if self._N > 0 else 1)
        self.output = scn.Sequential().add(scn.BatchNormReLU(outFeatures)).add(
            scn.OutputLayer(self._dimension))

        self.linear = torch.nn.Linear(outFeatures, num_classes)
        if self._density_estimate:
            self._density_layer = []
            for i in range(num_strides - 2, -1, -1):
                self._density_layer.append(torch.nn.Linear(nPlanes[i], 2))
            self._density_layer = torch.nn.Sequential(*self._density_layer)
Exemple #15
0
    def __init__(self, in_channel, n_classes, batchnorm=True, droput=0.0):
        self.in_channel = in_channel
        self.n_classes = n_classes
        super(UNet3D, self).__init__()

        self.conv1_1 = self.encoder(in_channel,
                                    32,
                                    bias=False,
                                    batchnorm=batchnorm)
        self.conv1_2 = self.encoder(32, 64, bias=False, batchnorm=batchnorm)
        self.pool1 = scn.MaxPooling(3, 2, 2)

        self.conv2_1 = self.encoder(64, 64, bias=False, batchnorm=batchnorm)
        self.conv2_2 = self.encoder(64, 128, bias=False, batchnorm=batchnorm)
        self.pool2 = scn.MaxPooling(3, 2, 2)

        self.conv3_1 = self.encoder(128,
                                    128,
                                    bias=False,
                                    filter_stride=1,
                                    filter_size=3,
                                    batchnorm=batchnorm)
        self.conv3_2 = self.encoder(128,
                                    256,
                                    bias=False,
                                    filter_stride=1,
                                    filter_size=3,
                                    batchnorm=batchnorm)
        self.pool3 = scn.MaxPooling(3, 2, 2)

        self.conv4_1 = self.encoder(256, 256, bias=False, batchnorm=batchnorm)
        self.conv4_2 = self.encoder(256, 512, bias=False, batchnorm=batchnorm)

        self.up5_1 = self.decoder(512,
                                  512,
                                  filter_size=2,
                                  filter_stride=2,
                                  bias=False)
        self.up5_2 = scn.JoinTable()
        self.conv5_1 = self.encoder(256 + 512,
                                    256,
                                    bias=False,
                                    batchnorm=batchnorm)
        self.conv5_2 = self.encoder(256, 256, bias=False, batchnorm=batchnorm)

        self.up6_1 = self.decoder(256,
                                  256,
                                  filter_size=2,
                                  filter_stride=2,
                                  bias=False)
        self.up6_2 = scn.JoinTable()
        self.conv6_1 = self.encoder(128 + 256, 128, bias=False)
        self.conv6_2 = self.encoder(128, 128, bias=False)

        self.up7_1 = self.decoder(128,
                                  128,
                                  filter_size=2,
                                  filter_stride=2,
                                  bias=False)
        self.up7_2 = scn.JoinTable()
        self.conv7_1 = self.encoder(64 + 128,
                                    64,
                                    bias=False,
                                    batchnorm=batchnorm)
        self.conv7_2 = self.encoder(64, 64, bias=False, batchnorm=batchnorm)

        self.conv8 = self.encoder(64,
                                  n_classes,
                                  filter_size=1,
                                  bias=False,
                                  batchnorm=False)
        self.act = scn.Sigmoid()

        self.log_level = 0
    def __init__(self, cfg):
        super(UResNet, self).__init__()
        import sparseconvnet as scn
        model_config = cfg['modules']['uresnet_lonely']
        self._model_config = model_config
        dimension = model_config['data_dim']
        reps = 2  # Conv block repetition factor
        kernel_size = 2  # Use input_spatial_size method for other values?
        m = model_config['filters']  # Unet number of features
        nPlanes = [i * m for i in range(1, model_config['num_strides'] + 1)
                   ]  # UNet number of features per level
        # nPlanes = [(2**i) * m for i in range(1, num_strides+1)]  # UNet number of features per level
        nInputFeatures = 1

        downsample = [kernel_size,
                      2]  # downsample = [filter size, filter stride]
        self.last = None
        leakiness = 0

        def block(m, a, b):
            # ResNet style blocks
            m.add(scn.ConcatTable().add(scn.Identity(
            ) if a == b else scn.NetworkInNetwork(a, b, False)).add(
                scn.Sequential().add(
                    scn.BatchNormLeakyReLU(a, leakiness=leakiness)).add(
                        scn.SubmanifoldConvolution(
                            dimension, a, b, 3, False)).add(
                                scn.BatchNormLeakyReLU(
                                    b, leakiness=leakiness)).add(
                                        scn.SubmanifoldConvolution(
                                            dimension, b, b, 3,
                                            False)))).add(scn.AddTable())

        self.input = scn.Sequential().add(
            scn.InputLayer(dimension, model_config['spatial_size'],
                           mode=3)).add(
                               scn.SubmanifoldConvolution(
                                   dimension, nInputFeatures, m, 3,
                                   False))  # Kernel size 3, no bias
        self.concat = scn.JoinTable()
        # Encoding
        self.bn = scn.BatchNormLeakyReLU(nPlanes[0], leakiness=leakiness)
        # self.encoding = []
        self.encoding_block = scn.Sequential()
        self.encoding_conv = scn.Sequential()
        module = scn.Sequential()
        for i in range(model_config['num_strides']):
            module = scn.Sequential()
            for _ in range(reps):
                block(module, nPlanes[i], nPlanes[i])
            self.encoding_block.add(module)
            module2 = scn.Sequential()
            if i < model_config['num_strides'] - 1:
                module2.add(
                    scn.BatchNormLeakyReLU(
                        nPlanes[i], leakiness=leakiness)).add(
                            scn.Convolution(dimension, nPlanes[i],
                                            nPlanes[i + 1], downsample[0],
                                            downsample[1], False))
            # self.encoding.append(module)
            self.encoding_conv.add(module2)
        self.encoding = module

        # Decoding
        self.decoding_conv, self.decoding_blocks = scn.Sequential(
        ), scn.Sequential()
        for i in range(model_config['num_strides'] - 2, -1, -1):
            module1 = scn.Sequential().add(
                scn.BatchNormLeakyReLU(nPlanes[i + 1],
                                       leakiness=leakiness)).add(
                                           scn.Deconvolution(
                                               dimension, nPlanes[i + 1],
                                               nPlanes[i], downsample[0],
                                               downsample[1], False))
            self.decoding_conv.add(module1)
            module2 = scn.Sequential()
            for j in range(reps):
                block(module2, nPlanes[i] * (2 if j == 0 else 1), nPlanes[i])
            self.decoding_blocks.add(module2)

        self.output = scn.Sequential().add(scn.BatchNormReLU(m)).add(
            scn.OutputLayer(dimension))

        self.linear = torch.nn.Linear(m, model_config['num_classes'])
Exemple #17
0
    def __init__(self,
                 output_shape,
                 use_norm=True,
                 num_filters_down1=[32, 64, 96, 128],
                 num_filters_down2=[32, 64, 96, 128],
                 name='tDBN_bv_2'):
        super(tDBN_bv_2, self).__init__()
        self.name = name
        if use_norm:
            BatchNorm1d = change_default_args(eps=1e-3,
                                              momentum=0.01)(nn.BatchNorm1d)
            Linear = change_default_args(bias=False)(nn.Linear)
        else:
            BatchNorm1d = Empty
            Linear = change_default_args(bias=True)(nn.Linear)
        sparse_shape = np.array(output_shape[1:4])  # + [1, 0, 0]
        # sparse_shape[0] = 11
        # print(sparse_shape)
        self.scn_input = scn.InputLayer(3, sparse_shape.tolist())
        self.voxel_output_shape = output_shape

        To_use_bias = False
        residual_use = True  # using residual block or not
        dimension = 3

        reps = 2
        dimension = 3
        leakiness = 0
        input_filters_layers = num_filters_down1[:
                                                 4]  # feature channels in the raw data.

        num_filter_fpn = num_filters_down1[
            3:]  # [ 64, 128, 256, 512] #dimension of feature maps, num_filter_fpn[3] == dimension_feature_map[3]
        dimension_feature_map = num_filters_down2  # [ 64, 128, 256, 512] # dimensions of output into 2D feature map
        dimension_kernel_size = [15, 7, 3, 1]

        filters_input_pairs = [[
            input_filters_layers[i], input_filters_layers[i + 1]
        ] for i in range(len(input_filters_layers) - 1)]

        m = None
        m = scn.Sequential()
        # -----------------------------------------------------------------
        ## block1 and feature map 0, convert from voxel into 3D tensor
        # -----------------------------------------------------------------
        for i, o in [[1, input_filters_layers[0]]]:
            m.add(scn.SubmanifoldConvolution(3, i, o, 3, False))

        for i, o in filters_input_pairs:  # , [num_filter_fpn[0], num_filter_fpn[0]]]:
            for _ in range(reps):
                self.block(m, i, i, residual_blocks=residual_use)

            m.add(scn.BatchNormLeakyReLU(i, leakiness=leakiness)).add(
                scn.Convolution(dimension, i, o, 3, 2, False))

        self.block_input = m
        middle_layers = []

        m = None
        m = scn.Sequential()
        for _ in range(reps):
            self.block(m,
                       num_filter_fpn[0],
                       num_filter_fpn[0],
                       residual_blocks=residual_use)
        self.x0_in = m

        for k in range(1, 4):
            m = None
            m = scn.Sequential()
            # downsample
            m.add(
                scn.BatchNormLeakyReLU(
                    num_filter_fpn[k - 1], leakiness=leakiness)).add(
                        scn.Convolution(dimension, num_filter_fpn[k - 1],
                                        num_filter_fpn[k], 3, 2, False))
            # cnn
            for _ in range(reps):
                if k == 4:
                    self.block(m,
                               num_filter_fpn[k],
                               num_filter_fpn[k],
                               dimension=2,
                               residual_blocks=residual_use
                               )  ## it has be compressed into 2 dimensions
                else:
                    self.block(m,
                               num_filter_fpn[k],
                               num_filter_fpn[k],
                               dimension=3,
                               residual_blocks=residual_use)
            if k == 1:
                self.x1_in = m
            if k == 2:
                self.x2_in = m
            if k == 3:
                self.x3_in = m

        #self.feature_map3 = Sequential(*middle_layers)  # XXX
        self.feature_map3 = scn.Sequential(
            scn.BatchNormLeakyReLU(num_filter_fpn[3],
                                   leakiness=leakiness)).add(
                                       scn.SparseToDense(3, num_filter_fpn[3])
                                   )  ## last one is the 2D instead of 3D

        for k in range(2, -1, -1):
            m = None
            m = scn.Sequential()
            # upsample
            m.add(
                scn.BatchNormLeakyReLU(
                    num_filter_fpn[k + 1], leakiness=leakiness)).add(
                        scn.Deconvolution(dimension, num_filter_fpn[k + 1],
                                          num_filter_fpn[k], 3, 2, False))
            if k == 2:
                self.upsample32 = m
            if k == 1:
                self.upsample21 = m
            if k == 0:
                self.upsample10 = m

            m = None
            m = scn.Sequential()
            m.add(scn.JoinTable())
            for i in range(reps):
                self.block(m,
                           num_filter_fpn[k] * (2 if i == 0 else 1),
                           num_filter_fpn[k],
                           residual_blocks=residual_use)

            if k == 2:
                self.concate2 = m

            if k == 1:
                self.concate1 = m

            if k == 0:
                self.concate0 = m

            m = None
            m = scn.Sequential()
            m.add(
                scn.BatchNormLeakyReLU(
                    num_filter_fpn[k], leakiness=leakiness)).add(
                        scn.Convolution(
                            3,
                            num_filter_fpn[k],
                            dimension_feature_map[k],
                            (dimension_kernel_size[k], 1, 1), (1, 1, 1),
                            bias=False)).add(
                                scn.BatchNormReLU(
                                    dimension_feature_map[k],
                                    eps=1e-3,
                                    momentum=0.99)).add(
                                        scn.SparseToDense(
                                            3, dimension_feature_map[k]))
            if k == 2:
                self.feature_map2 = m

            if k == 1:
                self.feature_map1 = m

            if k == 0:
                self.feature_map0 = m
Exemple #18
0
    def __init__(self, cfg, name='unet_full'):
        super().__init__(cfg, name)

        self.model_config = cfg[name]
        self.num_filters = self.model_config.get('filters', 16)
        self.ghost = self.model_config.get('ghost', False)
        self.seed_dim = self.model_config.get('seed_dim', 1)
        self.sigma_dim = self.model_config.get('sigma_dim', 1)
        self.embedding_dim = self.model_config.get('embedding_dim', 3)
        self.num_classes = self.model_config.get('num_classes', 5)
        self.num_gnn_features = self.model_config.get('num_gnn_features', 16)
        self.inputKernel = self.model_config.get('input_kernel_size', 3)
        self.coordConv = self.model_config.get('coordConv', False)

        # Network Freezing Options
        self.encoder_freeze = self.model_config.get('encoder_freeze', False)
        self.ppn_freeze = self.model_config.get('ppn_freeze', False)
        self.segmentation_freeze = self.model_config.get('segmentation_freeze', False)
        self.embedding_freeze = self.model_config.get('embedding_freeze', False)
        self.seediness_freeze = self.model_config.get('seediness_freeze', False)

        # Input Layer Configurations and commonly used scn operations.
        self.input = scn.Sequential().add(
            scn.InputLayer(self.dimension, self.spatial_size, mode=3)).add(
            scn.SubmanifoldConvolution(self.dimension, self.nInputFeatures, \
            self.num_filters, self.inputKernel, self.allow_bias)) # Kernel size 3, no bias
        self.concat = scn.JoinTable()
        self.add = scn.AddTable()

        # Backbone UResNet. Do NOT change namings!
        self.encoder = UResNetEncoder(cfg, name='uresnet_encoder')

        # self.seg_net = UResNetDecoder(cfg, name='segmentation_decoder')
        self.seed_net = UResNetDecoder(cfg, name='seediness_decoder')
        self.cluster_net = UResNetDecoder(cfg, name='embedding_decoder')

        # Encoder-Decoder 1x1 Connections
        encoder_planes = [i for i in self.encoder.nPlanes]
        # seg_planes = [i for i in self.seg_net.nPlanes]
        cluster_planes = [i for i in self.cluster_net.nPlanes]
        seed_planes = [i for i in self.seed_net.nPlanes]

        # print("Encoder Planes: ", encoder_planes)
        # print("Seg Planes: ", seg_planes)
        # print("Cluster Planes: ", cluster_planes)
        # print("Seediness Planes: ", seed_planes)

        self.skip_mode = self.model_config.get('skip_mode', 'default')

        # self.seg_skip = scn.Sequential()
        self.cluster_skip = scn.Sequential()
        self.seed_skip = scn.Sequential()

        # print(self.seg_skip)
        # print(self.cluster_skip)
        # print(self.seed_skip)

        # Output Layers
        self.output_cluster = scn.Sequential()
        self._nin_block(self.output_cluster, self.cluster_net.num_filters, 4)
        self.output_cluster.add(scn.OutputLayer(self.dimension))

        self.output_seediness = scn.Sequential()
        self._nin_block(self.output_seediness, self.seed_net.num_filters, 1)
        self.output_seediness.add(scn.OutputLayer(self.dimension))

        '''
        self.output_segmentation = scn.Sequential()
        self._nin_block(self.output_segmentation, self.seg_net.num_filters, self.num_classes)
        self.output_segmentation.add(scn.OutputLayer(self.dimension))
        '''

        '''
        self.output_gnn_features = scn.Sequential()
        sum_filters = self.seg_net.num_filters + self.seed_net.num_filters + self.cluster_net.num_filters
        self._resnet_block(self.output_gnn_features, sum_filters, self.num_gnn_features)
        self._nin_block(self.output_gnn_features, self.num_gnn_features, self.num_gnn_features)
        self.output_gnn_features.add(scn.OutputLayer(self.dimension))
        '''

        if self.ghost:
            self.linear_ghost = scn.Sequential()
            self._nin_block(self.linear_ghost, self.num_filters, 2)
            # self.linear_ghost.add(scn.OutputLayer(self.dimension))

        # PPN
        # self.ppn  = PPN(cfg)

        if self.skip_mode == 'default':

            '''
            for p1, p2 in zip(encoder_planes, seg_planes):
                self.seg_skip.add(scn.Identity())
            '''

            for p1, p2 in zip(encoder_planes, cluster_planes):
                self.cluster_skip.add(scn.Identity())
            for p1, p2 in zip(encoder_planes, seed_planes):
                self.seed_skip.add(scn.Identity())
            '''
            self.ppn_transform = scn.Sequential()
            ppn1_num_filters = seg_planes[self.ppn.ppn1_stride-self.ppn._num_strides]
            self._nin_block(self.ppn_transform, encoder_planes[-1], ppn1_num_filters)
            '''

        elif self.skip_mode == '1x1':

            '''
            for p1, p2 in zip(encoder_planes, seg_planes):
                self._nin_block(self.seg_skip, p1, p2)
            '''

            for p1, p2 in zip(encoder_planes, cluster_planes):
                self._nin_block(self.cluster_skip, p1, p2)

            for p1, p2 in zip(encoder_planes, seed_planes):
                self._nin_block(self.seed_skip, p1, p2)

            # self.ppn_transform = scn.Identity()

        else:
            raise ValueError('Invalid skip connection mode!')

        # Freeze Layers
        if self.encoder_freeze:
            for p in self.encoder.parameters():
                p.requires_grad = False
            print('Encoder Freezed')

        '''
        if self.ppn_freeze:
            for p in self.ppn.parameters():
                p.requires_grad = False
            print('PPN Freezed')
        '''

        '''
        if self.segmentation_freeze:
            for p in self.seg_net.parameters():
                p.requires_grad = False
            for p in self.output_segmentation.parameters():
                p.requires_grad = False
            print('Segmentation Branch Freezed')
        '''

        if self.embedding_freeze:
            for p in self.cluster_net.parameters():
                p.requires_grad = False
            for p in self.output_cluster.parameters():
                p.requires_grad = False
            print('Clustering Branch Freezed')

        if self.seediness_freeze:
            for p in self.seed_net.parameters():
                p.requires_grad = False
            for p in self.output_seediness.parameters():
                p.requires_grad = False
            print('Seediness Branch Freezed')

        # Pytorch Activations
        self.tanh = nn.Tanh()
        self.sigmoid = nn.Sigmoid()
Exemple #19
0
    def __init__(self, cfg, name="uresnet_lonely"):
        super(UResNet, self).__init__()
        import sparseconvnet as scn

        if 'modules' in cfg:
            self.model_config = cfg['modules'][name]
        else:
            self.model_config = cfg

        # Whether to compute ghost mask separately or not

        self._dimension = self.model_config.get('data_dim', 3)
        reps = self.model_config.get('reps', 2)  # Conv block repetition factor
        kernel_size = self.model_config.get('kernel_size', 2)
        num_strides = self.model_config.get('num_strides', 5)
        m = self.model_config.get('filters', 16)  # Unet number of features
        nInputFeatures = self.model_config.get('features', 1)
        spatial_size = self.model_config.get('spatial_size', 512)
        leakiness = self.model_config.get('leak', 0.0)

        nPlanes = [i * m for i in range(1, num_strides + 1)
                   ]  # UNet number of features per level
        print("nPlanes: ", nPlanes)
        downsample = [kernel_size, 2]  # [filter size, filter stride]
        self.last = None

        def block(m, a, b):  # ResNet style blocks
            m.add(scn.ConcatTable().add(scn.Identity(
            ) if a == b else scn.NetworkInNetwork(a, b, False)).add(
                scn.Sequential().add(
                    scn.BatchNormLeakyReLU(a, leakiness=leakiness)).add(
                        scn.SubmanifoldConvolution(
                            self._dimension, a, b, 3, False)).add(
                                scn.BatchNormLeakyReLU(
                                    b, leakiness=leakiness)).add(
                                        scn.SubmanifoldConvolution(
                                            self._dimension, b, b, 3,
                                            False)))).add(scn.AddTable())

        self.input = scn.Sequential().add(
            scn.InputLayer(self._dimension, spatial_size, mode=3)).add(
                scn.SubmanifoldConvolution(self._dimension, nInputFeatures, m,
                                           3, False))  # Kernel size 3, no bias
        self.concat = scn.JoinTable()
        # Encoding
        self.bn = scn.BatchNormLeakyReLU(nPlanes[0], leakiness=leakiness)
        self.encoding_block = scn.Sequential()
        self.encoding_conv = scn.Sequential()
        module = scn.Sequential()
        for i in range(num_strides):
            module = scn.Sequential()
            for _ in range(reps):
                block(module, nPlanes[i], nPlanes[i])
            self.encoding_block.add(module)
            module2 = scn.Sequential()
            if i < num_strides - 1:
                module2.add(
                    scn.BatchNormLeakyReLU(
                        nPlanes[i], leakiness=leakiness)).add(
                            scn.Convolution(self._dimension, nPlanes[i],
                                            nPlanes[i + 1], downsample[0],
                                            downsample[1], False))
            self.encoding_conv.add(module2)
        self.encoding = module

        # Decoding
        self.decoding_conv, self.decoding_blocks = scn.Sequential(
        ), scn.Sequential()
        for i in range(num_strides - 2, -1, -1):
            module1 = scn.Sequential().add(
                scn.BatchNormLeakyReLU(nPlanes[i + 1],
                                       leakiness=leakiness)).add(
                                           scn.Deconvolution(
                                               self._dimension, nPlanes[i + 1],
                                               nPlanes[i], downsample[0],
                                               downsample[1], False))
            self.decoding_conv.add(module1)
            module2 = scn.Sequential()
            for j in range(reps):
                block(module2, nPlanes[i] * (2 if j == 0 else 1), nPlanes[i])
            self.decoding_blocks.add(module2)

        self.output = scn.Sequential().add(scn.BatchNormReLU(m)).add(
            scn.OutputLayer(self._dimension))
Exemple #20
0
    def __init__(self, cfg):
        super(PPNUResNet, self).__init__()
        import sparseconvnet as scn
        self._model_config = cfg['modules']['uresnet_ppn_type']

        self._dimension = self._model_config.get('data_dim', 3)
        nInputFeatures = self._model_config.get('features', 1)
        spatial_size = self._model_config.get('spatial_size', 512)
        num_classes = self._model_config.get('num_classes', 5)
        m = self._model_config.get('filters', 16)  # Unet number of features
        num_strides = self._model_config.get('num_strides', 5)

        reps = 2  # Conv block repetition factor
        kernel_size = 2  # Use input_spatial_size method for other values?
        nPlanes = [i * m for i in range(1, num_strides + 1)
                   ]  # UNet number of features per level
        # nPlanes = [(2**i) * m for i in range(1, num_strides+1)]  # UNet number of features per level

        downsample = [kernel_size,
                      2]  # downsample = [filter size, filter stride]
        self.last = None
        leakiness = 0

        def block(m, a, b):
            # ResNet style blocks
            m.add(scn.ConcatTable().add(scn.Identity(
            ) if a == b else scn.NetworkInNetwork(a, b, False)).add(
                scn.Sequential().add(
                    scn.BatchNormLeakyReLU(a, leakiness=leakiness)).add(
                        scn.SubmanifoldConvolution(
                            self._dimension, a, b, 3, False)).add(
                                scn.BatchNormLeakyReLU(
                                    b, leakiness=leakiness)).add(
                                        scn.SubmanifoldConvolution(
                                            self._dimension, b, b, 3,
                                            False)))).add(scn.AddTable())

        self.input = scn.Sequential().add(
            scn.InputLayer(self._dimension, spatial_size, mode=3)).add(
                scn.SubmanifoldConvolution(self._dimension, nInputFeatures, m,
                                           3, False))  # Kernel size 3, no bias
        self.concat = scn.JoinTable()
        # Encoding
        self.bn = scn.BatchNormLeakyReLU(nPlanes[0], leakiness=leakiness)
        # self.encoding = []
        self.encoding_block = scn.Sequential()
        self.encoding_conv = scn.Sequential()
        module = scn.Sequential()
        for i in range(num_strides):
            module = scn.Sequential()
            for _ in range(reps):
                block(module, nPlanes[i], nPlanes[i])
            self.encoding_block.add(module)
            module2 = scn.Sequential()
            if i < num_strides - 1:
                module2.add(
                    scn.BatchNormLeakyReLU(
                        nPlanes[i], leakiness=leakiness)).add(
                            scn.Convolution(self._dimension, nPlanes[i],
                                            nPlanes[i + 1], downsample[0],
                                            downsample[1], False))
            # self.encoding.append(module)
            self.encoding_conv.add(module2)
        self.encoding = module

        # Decoding
        self.decoding_conv, self.decoding_blocks = scn.Sequential(
        ), scn.Sequential()
        for i in range(num_strides - 2, -1, -1):
            module1 = scn.Sequential().add(
                scn.BatchNormLeakyReLU(nPlanes[i + 1],
                                       leakiness=leakiness)).add(
                                           scn.Deconvolution(
                                               self._dimension, nPlanes[i + 1],
                                               nPlanes[i], downsample[0],
                                               downsample[1], False))
            self.decoding_conv.add(module1)
            module2 = scn.Sequential()
            for j in range(reps):
                block(module2, nPlanes[i] * (2 if j == 0 else 1), nPlanes[i])
            self.decoding_blocks.add(module2)

        self.output = scn.Sequential().add(scn.BatchNormReLU(m)).add(
            scn.OutputLayer(self._dimension))

        self.linear = torch.nn.Linear(m, num_classes)

        # PPN stuff
        self.half_stride = int(num_strides / 2.0)
        self.ppn1_conv = scn.SubmanifoldConvolution(self._dimension,
                                                    nPlanes[-1], nPlanes[-1],
                                                    3, False)
        self.ppn1_scores = scn.SubmanifoldConvolution(self._dimension,
                                                      nPlanes[-1], 2, 3, False)

        self.selection1 = Selection()
        self.selection2 = Selection()
        self.unpool1 = scn.Sequential()
        for i in range(num_strides - self.half_stride - 1):
            self.unpool1.add(
                scn.UnPooling(self._dimension, downsample[0], downsample[1]))

        self.unpool2 = scn.Sequential()
        for i in range(self.half_stride):
            self.unpool2.add(
                scn.UnPooling(self._dimension, downsample[0], downsample[1]))

        middle_filters = int(m * self.half_stride * (self.half_stride + 1) /
                             2.0)
        self.ppn2_conv = scn.SubmanifoldConvolution(self._dimension,
                                                    middle_filters,
                                                    middle_filters, 3, False)
        self.ppn2_scores = scn.SubmanifoldConvolution(self._dimension,
                                                      middle_filters, 2, 3,
                                                      False)
        self.multiply1 = Multiply()
        self.multiply2 = Multiply()

        self.ppn3_conv = scn.SubmanifoldConvolution(self._dimension,
                                                    nPlanes[0], nPlanes[0], 3,
                                                    False)
        self.ppn3_pixel_pred = scn.SubmanifoldConvolution(
            self._dimension, nPlanes[0], self._dimension, 3, False)
        self.ppn3_scores = scn.SubmanifoldConvolution(self._dimension,
                                                      nPlanes[0], 2, 3, False)
        self.ppn3_type = scn.SubmanifoldConvolution(self._dimension,
                                                    nPlanes[0], num_classes, 3,
                                                    False)

        self.add_labels1 = AddLabels()
        self.add_labels2 = AddLabels()
Exemple #21
0
    def __init__(self,
                 inputshape,
                 reps,
                 nin_features,
                 nout_features,
                 nplanes,
                 flowdirs=['y2u', 'y2v'],
                 predict_classvec=False,
                 home_gpu=None,
                 features_per_layer=None,
                 show_sizes=False,
                 share_encoder_weights=False):
        nn.Module.__init__(self)
        """
        inputs
        ------
        inputshape [list of int]: dimensions of the matrix or image
        reps [int]: number of residual modules per layer (for both encoder and decoder)
        nin_features [int]: number of features in the first convolutional layer
        nout_features [int]: number of features that feed into the classification/regression layer
        nplanes [int]: the depth of the U-Net
        flowdirs [list of str]: which flow directions to implement, if two (y2u+y2v)
                       then we must process all three planes and produce two flow predictions. 
                       if one, then only two planes are processed by encoder, and one flow predicted.
        share_encoder_weights [bool]: if True, share the weights for the encoder
        features_per_layer [list of int]: if provided, defines the feature size of each layer depth. 
                       if None, calculated automatically.
        show_sizes [bool]: if True, print sizes while running forward
        predict_pixenum [bool]: if True, we predict a class vector representing what target column matches
        """
        # set parameters
        self.dimensions = 2  # not playing with 3D for now
        self.home_gpu = home_gpu
        self.predict_classvec = predict_classvec

        # input shape: LongTensor, tuple, or list. Handled by InputLayer
        # size of each spatial dimesion
        self.inputshape = inputshape
        if len(self.inputshape) != self.dimensions:
            raise ValueError(
                "expected inputshape to contain size of 2 dimensions only." +
                "given %d values" % (len(self.inputshape)))

        # mode variable: how to deal with repeated data
        self.mode = 0

        # for debug, show sizes of layers/planes
        self._show_sizes = show_sizes

        # nfeatures
        self.nfeatures = nin_features
        self.nout_features = nout_features

        # plane structure
        if features_per_layer is None:
            self.nPlanes = [
                self.nfeatures * 2**(n + 1) for n in xrange(nplanes)
            ]
        else:
            if (type(features_per_layer) is list
                    and len(features_per_layer) == nplanes
                    and type(features_per_layer[0]) is int):
                self.nPlanes = features_per_layer
            else:
                raise ValueError(
                    "features_per_layer should be a list of int with number of elements equalling 'nplanes' argument"
                )
        if self._show_sizes:
            print "Features per plane/layer: ", self.nPlanes

        # repetitions (per plane)
        self.reps = reps

        # residual blocks
        self.residual_blocks = True

        # need encoder for both source and target
        # then cat tensor
        # and produce one decoder for flow, another decoder for visibility

        # set which flows to run
        for flowdir in flowdirs:
            if flowdir not in SparseLArFlow.avail_flows:
                raise ValueError(
                    "flowdir={} not available. Allowed flows: {}".format(
                        flowdir, SparseLArFlow.avail_flows))
        self.flowdirs = flowdirs
        self.nflows = len(self.flowdirs)

        # do we share weights
        self._share_encoder_weights = share_encoder_weights

        # model:
        # input
        self.src_inputlayer = scn.InputLayer(self.dimensions,
                                             self.inputshape,
                                             mode=self.mode)

        if 'y2u' in self.flowdirs:
            self.tar1_inputlayer = scn.InputLayer(self.dimensions,
                                                  self.inputshape,
                                                  mode=self.mode)
        else:
            self.tar1_inputlayer = None

        if 'y2v' in self.flowdirs:
            self.tar2_inputlayer = scn.InputLayer(self.dimensions,
                                                  self.inputshape,
                                                  mode=self.mode)
        else:
            self.tar2_inputlayer = None

        # stem
        self.src_stem = scn.SubmanifoldConvolution(self.dimensions, 1,
                                                   self.nfeatures, 3, False)
        if not self._share_encoder_weights:
            # if not sharing weights, producer separate stems
            if 'y2u' in self.flowdirs:
                self.tar1_stem = scn.SubmanifoldConvolution(
                    self.dimensions, 1, self.nfeatures, 3, False)
            else:
                self.tar1_stem = None

            if 'y2v' in self.flowdirs:
                self.tar2_stem = scn.SubmanifoldConvolution(
                    self.dimensions, 1, self.nfeatures, 3, False)
            else:
                self.tar2_stem = None

        # encoders
        self.source_encoder = SparseEncoder("src", self.reps, self.nfeatures,
                                            self.nPlanes)
        if not self._share_encoder_weights:
            # if not sharing weights, add additional encoders
            if 'y2u' in self.flowdirs:
                self.target1_encoder = SparseEncoder("tar1", self.reps,
                                                     self.nfeatures,
                                                     self.nPlanes)
            else:
                self.target1_encoder = None

            if 'y2v' in self.flowdirs:
                self.target2_encoder = SparseEncoder("tar2", self.reps,
                                                     self.nfeatures,
                                                     self.nPlanes)
            else:
                self.target2_encoder = None

        if self._show_sizes:
            self.source_encoder.set_verbose(True)

        # concat
        self.join_enclayers = []
        for ilayer in xrange(len(self.nPlanes)):
            self.join_enclayers.append(scn.JoinTable())
            setattr(self, "join_enclayers%d" % (ilayer),
                    self.join_enclayers[ilayer])

        # calculate decoder planes
        self.decode_layers_inchs = []
        self.decode_layers_outchs = []
        for ilayer, enc_outchs in enumerate(reversed(self.nPlanes)):
            # for input, we expect features from encoder + output features
            enc_nfeatures = (self.nflows + 1) * enc_outchs
            dec_nfeatures = 0 if ilayer == 0 else self.decode_layers_outchs[-1]
            self.decode_layers_inchs.append(enc_nfeatures + dec_nfeatures)
            self.decode_layers_outchs.append(enc_nfeatures)
        if self._show_sizes:
            print "decoder layers input  chs: ", self.decode_layers_inchs
            print "decoder layers output chs: ", self.decode_layers_outchs

        # decoders
        if 'y2u' in self.flowdirs:
            self.flow1_decoder = SparseDecoder("flow1", self.reps,
                                               self.decode_layers_inchs,
                                               self.decode_layers_outchs)
        else:
            self.flow1_decoder = None

        if 'y2v' in self.flowdirs:
            self.flow2_decoder = SparseDecoder("flow2", self.reps,
                                               self.decode_layers_inchs,
                                               self.decode_layers_outchs)
        else:
            self.flow2_decoder = None

        if self._show_sizes:
            for fd in [self.flow1_decoder, self.flow2_decoder]:
                if fd is not None:
                    fd.set_verbose(True)

        # last deconv concat
        if 'y2u' in self.flowdirs:
            self.flow1_concat = scn.JoinTable()
        else:
            self.flow1_concat = None

        if 'y2v' in self.flowdirs:
            self.flow2_concat = scn.JoinTable()
        else:
            self.flow2_concat = None

        # final feature set convolution
        flow_resblock_inchs = (
            self.nflows + 1) * self.nfeatures + self.decode_layers_outchs[-1]
        if 'y2u' in self.flowdirs:
            self.flow1_resblock = create_resnet_layer(self.reps,
                                                      flow_resblock_inchs,
                                                      self.nout_features)
        else:
            self.flow1_resblock = None

        if 'y2v' in self.flowdirs:
            self.flow2_resblock = create_resnet_layer(self.reps,
                                                      flow_resblock_inchs,
                                                      self.nout_features)
        else:
            self.flow2_resblock = None

        # OUTPUT LAYER
        if self.predict_classvec:
            if 'y2u' in self.flowdirs:
                self.flow1_out = scn.SubmanifoldConvolution(
                    self.dimensions, self.nout_features, self.inputshape[1], 1,
                    True)
            else:
                self.flow1_out = None

            if 'y2v' in self.flowdirs:
                self.flow2_out = scn.SubmanifoldConvolution(
                    self.dimensions, self.nout_features, self.inputshape[1], 1,
                    True)
            else:
                self.flow2_out = None
        else:
            # regression layer
            if 'y2u' in self.flowdirs:
                self.flow1_out = scn.SubmanifoldConvolution(
                    self.dimensions, self.nout_features, 1, 1, True)
            else:
                self.flow1_out = None

            if 'y2v' in self.flowdirs:
                self.flow2_out = scn.SubmanifoldConvolution(
                    self.dimensions, self.nout_features, 1, 1, True)
            else:
                self.flow2_out = None