Example #1
0
    def create_block(self):
        """

        :return: Dense Feature Stack with Skip Layer and Downsampling block
        """
        dfs_block = DenseFeatureStackBlock(self.n_dense_channels,
                                           self.kernel_size,
                                           self.dilation_rates, self.use_bdo,
                                           **self.kwargs)

        skip_conv = ConvolutionalLayer(
            self.n_seg_channels,
            kernel_size=self.kernel_size,
            # name='skip_conv',
            **self.kwargs)

        down_conv = None
        if self.n_down_channels is not None:
            down_conv = ConvolutionalLayer(
                self.n_down_channels,
                kernel_size=self.kernel_size,
                stride=2,
                #  name='down_conv',
                **self.kwargs)

        dfssd_block = namedtuple('DenseSDBlock',
                                 ['dfs_block', 'skip_conv', 'down_conv'])

        return dfssd_block(dfs_block=dfs_block,
                           skip_conv=skip_conv,
                           down_conv=down_conv)
Example #2
0
    def layer_op(self, input_tensor, is_training, layer_id=-1):
        rank = input_tensor.get_shape().ndims
        perm = [i for i in range(rank)]
        perm[-2], perm[-1] = perm[-1], perm[-2]
        output_tensor = input_tensor
        n_layers = self.n_layers
        # All layers except the last one consists in:
        # BN + Conv_3x3x3 + Activation
        # layer_instances = []

        for layer in range(n_layers - 1):
            layer_to_add = ConvolutionalLayer(
                n_output_chns=self.num_features[layer + 1],
                with_bn=True,
                kernel_size=3,
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                acti_func=self.acti_func,
                name='conv_fc_%d' % layer)
            output_tensor = layer_to_add(output_tensor, is_training)
            # layer_instances.append((layer_to_add, output_tensor))
        last_layer = ConvolutionalLayer(n_output_chns=self.num_classes,
                                        kernel_size=1)
        output_tensor = last_layer(output_tensor, is_training)
        # layer_instances.append((last_layer, output_tensor))
        return output_tensor
Example #3
0
    def layer_op(self, input_tensor, is_training, bn_momentum=0.9):
        conv_op1 = ConvolutionalLayer(n_output_chns=self.n_chn,
                                      kernel_size=self.kernel,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      acti_func=self.acti_func,
                                      name='{}_1'.format(self.n_chn))

        conv_op2 = ConvolutionalLayer(n_output_chns=self.n_chn,
                                      kernel_size=self.kernel,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      acti_func=self.acti_func,
                                      name='{}_2'.format(self.n_chn))

        conv_op3 = ConvolutionalLayer(n_output_chns=self.n_chn,
                                      kernel_size=self.kernel,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      acti_func=self.acti_func,
                                      name='{}_3'.format(self.n_chn))

        f1 = conv_op1(input_tensor, is_training, bn_momentum)
        f1cat = tf.concat((input_tensor, f1), axis=-1)
        f2 = conv_op2(f1cat, is_training, bn_momentum)
        f2cat = tf.concat((input_tensor, f1, f2), axis=-1)
        f3 = conv_op3(f2cat, is_training, bn_momentum)

        return f3
Example #4
0
    def layer_op(self, images, is_training=True, **unused_kwargs):
        """

        :param images: tensor, input to the network
        :param is_training: boolean, True if network is in training mode
        :param unused_kwargs: other arguments, not in use
        :return: tensor, network output
        """
        conv_1 = ConvolutionalLayer(self.hidden_features,
                                    kernel_size=3,
                                    w_initializer=self.initializers['w'],
                                    w_regularizer=self.regularizers['w'],
                                    b_initializer=self.initializers['b'],
                                    b_regularizer=self.regularizers['b'],
                                    acti_func='relu',
                                    name='conv_input')

        conv_2 = ConvolutionalLayer(self.num_classes,
                                    kernel_size=1,
                                    w_initializer=self.initializers['w'],
                                    w_regularizer=self.regularizers['w'],
                                    b_initializer=self.initializers['b'],
                                    b_regularizer=self.regularizers['b'],
                                    acti_func=None,
                                    name='conv_output')

        flow = conv_1(images, is_training)
        flow = conv_2(flow, is_training)
        return flow
Example #5
0
 def layer_op(self, input_tensor, is_training=None, keep_prob=None):
     stack = DenseFeatureStackBlock(
         self.n_dense_channels,
         self.kernel_size,
         self.dilation_rates,
         self.use_bdo,
         **self.kwargs)(input_tensor,
                        is_training=is_training,
                        keep_prob=keep_prob)
     all_features = tf.concat(stack, len(input_tensor.get_shape()) - 1)
     seg = ConvolutionalLayer(
         self.n_seg_channels,
         kernel_size=self.kernel_size,
         **self.kwargs)(all_features,
                        is_training=is_training,
                        keep_prob=keep_prob)
     if self.n_downsample_channels is None:
         down = None
     else:
         down = ConvolutionalLayer(
             self.n_downsample_channels,
             kernel_size=self.kernel_size,
             stride=2,
             **self.kwargs)(all_features,
                            is_training=is_training,
                            keep_prob=keep_prob)
     return seg, down
Example #6
0
    def layer_op(self, input_tensor, is_training, layer_id=-1):
        """

        :param input_tensor: tensor, input to the layer
        :param is_training: boolean, True if network is in training mode
        :param layer_id: not is use
        :return: tensor with number of channels to num_classes
        """
        rank = input_tensor.shape.ndims
        perm = [i for i in range(rank)]
        perm[-2], perm[-1] = perm[-1], perm[-2]
        output_tensor = input_tensor
        n_layers = self.n_layers
        # All layers except the last one consists in:
        # BN + Conv_3x3x3 + Activation
        # layer_instances = []

        for layer in range(n_layers - 1):
            layer_to_add = ConvolutionalLayer(
                n_output_chns=self.num_features[layer + 1],
                feature_normalization='batch',
                kernel_size=3,
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                acti_func=self.acti_func,
                name='conv_fc_%d' % layer)
            output_tensor = layer_to_add(output_tensor, is_training)
            # layer_instances.append((layer_to_add, output_tensor))
        last_layer = ConvolutionalLayer(n_output_chns=self.num_classes,
                                        kernel_size=1)
        output_tensor = last_layer(output_tensor, is_training)
        # layer_instances.append((last_layer, output_tensor))
        return output_tensor
Example #7
0
    def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs):
        assert (layer_util.check_spatial_dims(images, lambda x: x % 8 == 0))
        # go through self.layers, create an instance of each layer
        # and plugin data
        layer_instances = []
        input_tensor_res = images

        ### first convolution layer
        params = self.layers[0]
        first_conv_layer = ConvolutionalLayer(
            n_output_chns=params['n_features'],
            kernel_size=params['kernel_size'],
            with_bias=True,
            with_bn=False,
            acti_func=self.acti_func,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            name=params['name'])
        flow = first_conv_layer(images, is_training)
        layer_instances.append((first_conv_layer, flow))

        ###
        params = self.layers[1]
        for j in range(params['repeat']):
            conv_layer = ConvolutionalLayer(
                n_output_chns=params['n_features'],
                kernel_size=params['kernel_size'],
                with_bias=True,
                with_bn=False,
                acti_func=self.acti_func,
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                name='%s_%d' % (params['name'], j))
            flow = conv_layer(flow, is_training)
            layer_instances.append((conv_layer, flow))

        ###
        params = self.layers[2]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      with_bias=True,
                                      with_bn=False,
                                      acti_func=None,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        output_tensor_res = ElementwiseLayer('SUM')(input_tensor_res, flow)

        # set training properties
        if is_training:
            self._print(layer_instances)
            # return layer_instances[-1][1]
            return output_tensor_res
        # return layer_instances[layer_id][1]
        return output_tensor_res
Example #8
0
 def layer_op(self, noise):
     n_chns = noise.shape[-1]
     conv_1 = ConvolutionalLayer(
         20, 10, with_bn=True, acti_func='selu', with_bias=True)
     conv_2 = ConvolutionalLayer(
         20, 10, with_bn=True, acti_func='selu', with_bias=True)
     conv_3 = ConvolutionalLayer(
         n_chns, 10, with_bn=False, with_bias=True)
     hidden_feature = conv_1(noise, is_training=True)
     hidden_feature = conv_2(hidden_feature, is_training=True)
     fake_features = conv_3(hidden_feature, is_training=True)
     return fake_features
Example #9
0
    def layer_op(self, thru_tensor, is_training):
        for (kernel_size, n_features) in zip(self.kernels, self.n_chns):
            # no activation before final 1x1x1 conv layer 
            acti_func = self.acti_func if kernel_size > 1 else None
            feature_normalization = 'instance' if acti_func is not None else None

            conv_op = ConvolutionalLayer(n_output_chns=n_features,
                                         kernel_size=kernel_size,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         acti_func=acti_func,
                                         name='{}'.format(n_features),
                                         feature_normalization=feature_normalization)
            thru_tensor = conv_op(thru_tensor, is_training)

        if self.with_downsample_branch:
            branch_output = thru_tensor
        else:
            branch_output = None

        if self.func == 'DOWNSAMPLE':
            downsample_op = DownSampleLayer('MAX', kernel_size=2, stride=2, name='down_2x2')
            thru_tensor = downsample_op(thru_tensor)
        elif self.func == 'UPSAMPLE':
            up_shape = [2 * int(thru_tensor.shape[i]) for i in (1, 2, 3)]
            upsample_op = LinearResizeLayer(up_shape)
            thru_tensor = upsample_op(thru_tensor)

        elif self.func == 'NONE':
            pass  # do nothing
        return thru_tensor, branch_output
Example #10
0
    def test_no_restores(self):
        tf.reset_default_graph()
        block1 = ConvolutionalLayer(4, 3, name='bar', with_bn=False,
                                    w_initializer=tf.constant_initializer(1.))
        b2 = block1(tf.ones([1., 5., 5., 1.]))
        init_op = global_vars_init_or_restore()
        all_vars = tf.global_variables()
        with self.test_session() as sess:
            sess.run(init_op)

            def getvar(x):
                return [v for v in all_vars if v.name == x][0]

            bar_w_var = getvar(block1.layer_scope().name + '/conv_/w:0')
            [bar_w] = sess.run([bar_w_var])
            self.assertAllClose(bar_w, np.ones([3, 3, 1, 4]))
Example #11
0
    def layer_op(self, images, is_training, keep_prob=0.5, layer_id=-1, **unused_kwargs):

        # crop 27x27x27 from 59x59x59
        #crop_op = CropLayer(border=self.crop_diff, name='cropping_input')
        T1_path = images
       # T2_path = crop_op(images)
        print(crop_op)

        # T1 pathway
        for n_features in self.T1_features:

            # T1 pathway convolutions
            T1_path_1 = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=3,
                padding='VALID',
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                acti_func=self.acti_func,
                name='normal_conv_{}'.format(n_features))
            
            c-SE = ChannelSELayer()

            T1_path = c-SE(T1_path_1(T1_path, is_training))
            print(T1_path_1)
            print(c-SE)
Example #12
0
    def layer_op(self, input_tensor, is_training):
        output_tensor = input_tensor
        for (kernel_size, n_features) in zip(self.kernels, self.n_chns):
            conv_op = ConvolutionalLayer(n_output_chns=n_features,
                                         kernel_size=kernel_size,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         acti_func=self.acti_func,
                                         name='{}'.format(n_features))
            output_tensor = conv_op(output_tensor, is_training)

        if self.with_downsample_branch:
            branch_output = output_tensor
        else:
            branch_output = None

        if self.func == 'DOWNSAMPLE':
            downsample_op = DownSampleLayer('MAX',
                                            kernel_size=2,
                                            stride=2,
                                            name='down_2x2')
            output_tensor = downsample_op(output_tensor)
        elif self.func == 'UPSAMPLE':
            upsample_op = DeconvolutionalLayer(n_output_chns=self.n_chns[-1],
                                               kernel_size=2,
                                               stride=2,
                                               name='up_2x2')
            output_tensor = upsample_op(output_tensor, is_training)
        elif self.func == 'NONE':
            pass  # do nothing
        return output_tensor, branch_output
Example #13
0
    def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs):
        """

        :param images: tensor, concatenation of multiple input modalities
        :param is_training: boolean, True if network is in training mode
        :param layer_id: not in use
        :param unused_kwargs:
        :return: predicted tensor
        """
        n_modality = images.shape.as_list()[-1]
        rank = images.shape.ndims
        assert n_modality > 1
        roots = tf.split(images, n_modality, axis=rank - 1)
        for (idx, root) in enumerate(roots):
            conv_layer = ConvolutionalLayer(
                n_output_chns=self.n_features,
                kernel_size=3,
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                acti_func=self.acti_func,
                name='conv_{}'.format(idx))
            roots[idx] = conv_layer(root, is_training)
        roots = tf.stack(roots, axis=-1)

        back_end = ScaleBlock('AVERAGE', n_layers=1)
        output_tensor = back_end(roots, is_training)

        front_end = HighRes3DNet(self.num_classes)
        output_tensor = front_end(output_tensor, is_training)
        return output_tensor
Example #14
0
        def final_image(n_chns, x):
            """

            :param n_chns: int, number of output channels
            :param x: tensor, input tensor to layers
            :return: tensor, generated image
            """
            with tf.name_scope('final_image'):
                if add_noise > 0:
                    feature_shape = x.shape.as_list()[0:-1]
                    noise_shape = feature_shape + [add_noise]
                    noise = tf.random_normal(noise_shape, 0, .1)
                    x = tf.concat([x, noise], axis=3)
                conv_layer = ConvolutionalLayer(
                    n_output_chns=n_chns,
                    kernel_size=3,
                    acti_func='tanh',
                    feature_normalization=None,
                    with_bias=True,
                    w_initializer=self.initializers['w'],
                    b_initializer=self.initializers['b'])
                x_sample = conv_layer(x,
                                      is_training=is_training,
                                      keep_prob=keep_prob_ph)
                return tf.image.resize_images(x_sample, image_size[:-1])
Example #15
0
 def down(ch, x):
     with tf.name_scope('downsample'):
         c = ConvolutionalLayer(ch, 3, stride=2, with_bn=False,
                                w_initializer=w_init)(x, is_training=is_training)
         c=tf.contrib.layers.batch_norm(c)
         c = leaky_relu(c)
         return c
Example #16
0
 def conv(ch, x, s):
     c = (ConvolutionalLayer(ch,
                             3,
                             feature_normalization=None,
                             w_initializer=w_init)(
                                 x, is_training=is_training))
     return leaky_relu(tf.contrib.layers.batch_norm(c) + s)
Example #17
0
 def layer_op(self, input_tensor, is_training=None, keep_prob=None):
     channel_dim = len(input_tensor.get_shape()) - 1
     stack = [input_tensor]
     input_mask = tf.ones([input_tensor.get_shape().as_list()[-1]]) > 0
     for idx, d in enumerate(self.dilation_rates):
         if idx == len(self.dilation_rates) - 1:
             keep_prob = None  # no dropout on last layer of the stack
         if self.use_bdo:
             conv = ChannelSparseConvolutionalLayer(
                 self.n_dense_channels,
                 kernel_size=self.kernel_size,
                 **self.kwargs)
             conv, new_input_mask = conv(tf.concat(stack, channel_dim),
                                         input_mask=input_mask,
                                         is_training=is_training,
                                         keep_prob=keep_prob)
             input_mask = tf.concat([input_mask, new_input_mask], 0)
         else:
             conv = ConvolutionalLayer(self.n_dense_channels,
                                       kernel_size=self.kernel_size,
                                       **self.kwargs)
             conv = conv(tf.concat(stack, channel_dim),
                         is_training=is_training,
                         keep_prob=keep_prob)
         stack.append(conv)
     return stack
Example #18
0
 def convr(ch, x):
     conv_layer = ConvolutionalLayer(
         n_output_chns=ch,
         kernel_size=3,
         with_bn=True,
         acti_func='selu',
         w_initializer=self.initializers['w'])
     return conv_layer(x, is_training=is_training)
Example #19
0
 def conv(ch, x):
     with tf.name_scope('conv'):
         conv_layer = ConvolutionalLayer(ch,
                                         3,
                                         with_bn=False,
                                         w_initializer=w_init)
         c = conv_layer(x, is_training=is_training)
         return tf.nn.relu(tf.contrib.layers.batch_norm(c))
    def layer_op(self, lr_images, is_training=True, keep_prob=1.0):
        input_shape = lr_images.shape.as_list()
        batch_size = input_shape[0]
        input_shape = input_shape[1:]
        n_of_dims = len(input_shape) - 1

        if batch_size is None:
            raise ValueError('The batch size must be known and fixed.')
        if any(i is None or i <= 0 for i in input_shape):
            raise ValueError('The image shape must be known in advance.')

        n_of_channels = input_shape[-1]

        features = lr_images
        for i, (ksize, n_of_features) in enumerate(self.layer_configurations):
            name = 'fmap_{}'.format(i)

            if n_of_features > 0:
                conv = ConvolutionalLayer(n_of_features,
                                          kernel_size=ksize,
                                          acti_func=self.acti_func,
                                          name=name,
                                          **self.conv_layer_params)
            else:
                n_of_features = n_of_channels * self.upsample_factor**n_of_dims
                conv = ConvolutionalLayer(n_of_features,
                                          kernel_size=ksize,
                                          acti_func=None,
                                          name=name,
                                          **self.conv_layer_params)

            features = conv(features,
                            is_training=is_training,
                            keep_prob=keep_prob)

        # Setting the number of output features to the known value
        # obtained from the input shape results in a ValueError as
        # of TF 1.12
        output_shape = ([batch_size] +
                        [self.upsample_factor * i
                         for i in input_shape[:-1]] + [None])

        return tf.contrib.periodic_resample.periodic_resample(features,
                                                              output_shape,
                                                              name='shuffle')
Example #21
0
    def create_network(self):

        hyperparams = self.hyperparams

        # Create initial convolutional layer
        initial_conv = ConvolutionalLayer(
            hyperparams['n_initial_conv_channels'], kernel_size=5, stride=2)
        # name='initial_conv')

        # Create dense vblocks
        num_blocks = len(hyperparams["n_dense_channels"])  # Num dense blocks
        dense_ch = hyperparams["n_dense_channels"]
        seg_ch = hyperparams["n_seg_channels"]
        down_ch = hyperparams["n_down_channels"]
        dil_rate = hyperparams["dilation_rates"]
        use_bdo = hyperparams['use_bdo']

        dense_vblocks = []
        for i in range(num_blocks):
            vblock = DenseFeatureStackBlockWithSkipAndDownsample(
                n_dense_channels=dense_ch[i],
                kernel_size=3,
                dilation_rates=dil_rate[i],
                n_seg_channels=seg_ch[i],
                n_down_channels=down_ch[i],
                use_bdo=use_bdo,
                acti_func='relu')
            dense_vblocks.append(vblock)

        # Create final convolutional layer
        final_conv = ConvolutionalLayer(
            self.num_classes,
            kernel_size=hyperparams['seg_kernel_size'],
            feature_normalization=None,
            with_bias=True)
        #  name='final_conv')

        # Create a structure with all the fields of a DenseVNet
        dense_vnet = namedtuple(
            'DenseVNet', ['initial_conv', 'dense_vblocks', 'final_conv'])

        return dense_vnet(initial_conv=initial_conv,
                          dense_vblocks=dense_vblocks,
                          final_conv=final_conv)
Example #22
0
    def create_network(self):
        hyper = self.hyperparameters

        # Initial Convolution
        net_initial_conv = ConvolutionalLayer(hyper['n_input_channels'][0],
                                              kernel_size=5,
                                              stride=2)

        # Dense Block Params
        downsample_channels = list(hyper['n_input_channels'][1:]) + [None]
        num_blocks = len(hyper["n_dense_channels"])
        use_bdo = self.architecture_parameters['use_bdo']

        # Create DenseBlocks
        net_dense_vblocks = []

        for idx in range(num_blocks):
            dense_ch = hyper["n_dense_channels"][idx]  # Num dense channels
            seg_ch = hyper["n_seg_channels"][idx]  # Num segmentation ch
            down_ch = downsample_channels[idx]  # Num of downsampling ch
            dil_rate = hyper["dilation_rates"][idx]  # Dilation rate

            # Dense feature block
            dblock = DenseFeatureStackBlockWithSkipAndDownsample(
                dense_ch,
                3,
                dil_rate,
                seg_ch,
                down_ch,
                use_bdo,
                acti_func='relu')

            net_dense_vblocks.append(dblock)

        # Segmentation
        net_seg_layer = ConvolutionalLayer(self.num_classes,
                                           kernel_size=hyper['final_kernel'],
                                           with_bn=False,
                                           with_bias=True)

        return DenseVNetDesc(initial_bn=BNLayer(),
                             initial_conv=net_initial_conv,
                             dense_vblocks=net_dense_vblocks,
                             seg_layer=net_seg_layer)
Example #23
0
 def down(ch, x):
     with tf.name_scope('downsample'):
         conv_layer = ConvolutionalLayer(
             n_output_chns=ch,
             kernel_size=3,
             stride=2,
             with_bn=True,
             acti_func='selu',
             w_initializer=self.initializers['w'])
         return conv_layer(x, is_training=is_training)
Example #24
0
 def conv(ch, x):
     with tf.name_scope('conv'):
         conv_layer = ConvolutionalLayer(
             n_output_chns=ch,
             kernel_size=3,
             with_bn=True,
             with_bias=False,
             acti_func='relu',
             w_initializer=self.initializers['w'])
         return conv_layer(x, is_training=is_training)
Example #25
0
    def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs):
        # image_size  should be divisible by 4
        assert layer_util.check_spatial_dims(images, lambda x: x % 4 == 0)
        assert layer_util.check_spatial_dims(images, lambda x: x >= 21)
        block_layer = UNetBlock('DOWNSAMPLE',
                                (self.n_features[0], self.n_features[1]),
                                (3, 3),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='d0')
        pool_1, conv_1 = block_layer(images, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE',
                                (self.n_features[1], self.n_features[2]),
                                (3, 3),
                                with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='d1')
        up_1, _ = block_layer(pool_1, is_training)
        print(block_layer)

        block_layer = UNetBlock(
            'NONE', (self.n_features[1], self.n_features[1], self.num_classes),
            (3, 3),
            with_downsample_branch=True,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            acti_func=self.acti_func,
            name='u0')
        crop_layer = CropLayer(border=4, name='crop-8')
        concat_1 = ElementwiseLayer('CONCAT')(crop_layer(conv_1), up_1)
        print(block_layer)

        # for the last layer, upsampling path is not used
        _, output_tensor = block_layer(concat_1, is_training)

        output_conv_op = ConvolutionalLayer(
            n_output_chns=self.num_classes,
            kernel_size=1,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            acti_func=None,
            name='{}'.format(self.num_classes),
            padding='VALID',
            with_bn=False,
            with_bias=True)
        final_output_tensor = output_conv_op(output_tensor, is_training)
        print(output_conv_op)

        return final_output_tensor
Example #26
0
        def conv(ch, x, s):
            conv_layer = ConvolutionalLayer(
                n_output_chns=ch,
                kernel_size=3,
                with_bn=True,
                w_initializer=self.initializers['w'])
            acti_layer = ActiLayer(func='selu')

            # combining two flows
            res_flow = conv_layer(x, is_training=is_training) + s
            return acti_layer(res_flow)
Example #27
0
    def create_block(self):
        net_dense_fstack = DenseFeatureStackBlock(self.n_dense_channels,
                                                  self.kernel_size,
                                                  self.dilation_rates,
                                                  self.use_bdo, **self.kwargs)

        net_conv = ConvolutionalLayer(self.n_seg_channels,
                                      kernel_size=self.kernel_size,
                                      **self.kwargs)

        net_down = None
        if self.n_downsample_channels is not None:
            net_down = ConvolutionalLayer(self.n_downsample_channels,
                                          kernel_size=self.kernel_size,
                                          stride=2,
                                          **self.kwargs)

        return DenseSDBlockDesc(dense_fstack=net_dense_fstack,
                                conv=net_conv,
                                down=net_down)
Example #28
0
    def layer_op(self, input_tensor, is_training, bn_momentum=0.9):
        output_tensor = input_tensor
        for (kernel_size, n_features) in zip(self.kernels, self.n_chns):
            conv_op = ConvolutionalLayer(n_output_chns=n_features,
                                         kernel_size=kernel_size,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         acti_func=self.acti_func,
                                         name='{}'.format(n_features))
            output_tensor = conv_op(output_tensor, is_training, bn_momentum)

        return output_tensor
Example #29
0
 def feature_block(ch, image):
     with tf.name_scope('feature'):
         conv_layer = ConvolutionalLayer(
             n_output_chns=ch,
             kernel_size=5,
             with_bias=True,
             with_bn=False,
             acti_func='selu',
             w_initializer=self.initializers['w'],
             b_initializer=self.initializers['b'])
         d_h1s = conv_layer(image, is_training=is_training)
         d_h1r = convr(ch, d_h1s)
         return conv(ch, d_h1r, d_h1s)
Example #30
0
    def layer_op(self, images, is_training):
        conv_1 = ConvolutionalLayer(self.hidden_features,
                                    kernel_size=3,
                                    w_initializer=self.initializers['w'],
                                    w_regularizer=self.regularizers['w'],
                                    b_initializer=self.initializers['b'],
                                    b_regularizer=self.regularizers['b'],
                                    acti_func='relu',
                                    name='conv_input')

        conv_2 = ConvolutionalLayer(self.num_classes,
                                    kernel_size=1,
                                    w_initializer=self.initializers['w'],
                                    w_regularizer=self.regularizers['w'],
                                    b_initializer=self.initializers['b'],
                                    b_regularizer=self.regularizers['b'],
                                    acti_func=None,
                                    name='conv_output')

        flow = conv_1(images, is_training)
        flow = conv_2(flow, is_training)
        return flow
Example #31
0
    def test_restore_block(self):
        definition = {'foo': [1], 'bar/conv_/w': np.random.randn(3, 3, 1, 3),
                      'bar2/conv_/w': np.random.randn(3, 3, 1, 3),
                      'foo3/conv_/w': np.random.randn(3, 3, 1, 3),
                      'bar/bing/boffin': [2]}
        checkpoint_name = self.make_checkpoint('chk1', definition)
        tf.reset_default_graph()
        block1 = ConvolutionalLayer(3, 3, with_bn=False, name='foo')
        b1 = block1(tf.ones([1., 5., 5., 1.]))
        tf.add_to_collection(RESTORABLE,
                             ('foo', checkpoint_name, 'bar'))
        block2 = ConvolutionalLayer(4, 3, name='bar', with_bn=False,
                                    w_initializer=tf.constant_initializer(1.))
        b2 = block2(tf.ones([1., 5., 5., 1.]))
        block3 = ConvolutionalLayer(3, 3, with_bn=False, name='foo2')
        block3.restore_from_checkpoint(checkpoint_name, 'bar2')
        b3 = block3(tf.ones([1., 5., 5., 1.]))
        block4 = ConvolutionalLayer(3, 3, with_bn=False, name='foo3')
        block4.restore_from_checkpoint(checkpoint_name)
        b4 = block4(tf.ones([1., 5., 5., 1.]))
        tf.add_to_collection(RESTORABLE,
                             ('foo', checkpoint_name, 'bar'))
        init_op = global_vars_init_or_restore()
        all_vars = tf.global_variables()
        with self.test_session() as sess:
            sess.run(init_op)

            def getvar(x):
                return [v for v in all_vars if v.name == x][0]

            foo_w_var = getvar(block1.layer_scope().name + '/conv_/w:0')
            bar_w_var = getvar(block2.layer_scope().name + '/conv_/w:0')
            foo2_w_var = getvar(block3.layer_scope().name + '/conv_/w:0')
            foo3_w_var = getvar(block4.layer_scope().name + '/conv_/w:0')
            vars = [foo_w_var, bar_w_var, foo2_w_var, foo3_w_var]
            [foo_w, bar_w, foo2_w, foo3_w] = sess.run(vars)
            self.assertAllClose(foo_w, definition['bar/conv_/w'])
            self.assertAllClose(bar_w, np.ones([3, 3, 1, 4]))
            self.assertAllClose(foo2_w, definition['bar2/conv_/w'])
            self.assertAllClose(foo3_w, definition['foo3/conv_/w'])