示例#1
0
    def layer_op(self, input_tensor, is_training):
        output_tensor = input_tensor
        for (kernel_size, n_features) in zip(self.kernels, self.n_chns):
            conv_op = ConvolutionalLayer(n_output_chns=n_features,
                                         kernel_size=kernel_size,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         acti_func=self.acti_func,
                                         name='{}'.format(n_features),
                                         padding='VALID',
                                         with_bn=False,
                                         with_bias=True)
            output_tensor = conv_op(output_tensor, is_training)

        if self.with_downsample_branch:
            branch_output = output_tensor
        else:
            branch_output = None

        if self.func == 'DOWNSAMPLE':
            downsample_op = DownSampleLayer('MAX',
                                            kernel_size=2,
                                            stride=2,
                                            name='down_2x_isotropic')
            output_tensor = downsample_op(output_tensor)
        if self.func == 'DOWNSAMPLE_ANISOTROPIC':
            downsample_op = DownSampleLayer('MAX',
                                            kernel_size=[2, 2, 1],
                                            stride=[2, 2, 1],
                                            name='down_2x2x1')
            output_tensor = downsample_op(output_tensor)
        elif self.func == 'UPSAMPLE':
            upsample_op = DeconvolutionalLayer(n_output_chns=self.n_chns[-1],
                                               kernel_size=2,
                                               stride=2,
                                               name='up_2x_isotropic',
                                               with_bn=False,
                                               with_bias=True)
            output_tensor = upsample_op(output_tensor, is_training)
        elif self.func == 'UPSAMPLE_ANISOTROPIC':
            upsample_op = DeconvolutionalLayer(n_output_chns=self.n_chns[-1],
                                               kernel_size=[2, 2, 1],
                                               stride=[2, 2, 1],
                                               name='up_2x2x1',
                                               with_bn=False,
                                               with_bias=True)
            output_tensor = upsample_op(output_tensor, is_training)
        elif self.func == 'NONE':
            pass  # do nothing
        return output_tensor, branch_output
示例#2
0
    def layer_op(self, thru_tensor, is_training):
        for (kernel_size, n_features) in zip(self.kernels, self.n_chns):
            # no activation before final 1x1x1 conv layer 
            acti_func = self.acti_func if kernel_size > 1 else None
            feature_normalization = 'instance' if acti_func is not None else None

            conv_op = ConvolutionalLayer(n_output_chns=n_features,
                                         kernel_size=kernel_size,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         acti_func=acti_func,
                                         name='{}'.format(n_features),
                                         feature_normalization=feature_normalization)
            thru_tensor = conv_op(thru_tensor, is_training)

        if self.with_downsample_branch:
            branch_output = thru_tensor
        else:
            branch_output = None

        if self.func == 'DOWNSAMPLE':
            downsample_op = DownSampleLayer('MAX', kernel_size=2, stride=2, name='down_2x2')
            thru_tensor = downsample_op(thru_tensor)
        elif self.func == 'UPSAMPLE':
            up_shape = [2 * int(thru_tensor.shape[i]) for i in (1, 2, 3)]
            upsample_op = LinearResizeLayer(up_shape)
            thru_tensor = upsample_op(thru_tensor)

        elif self.func == 'NONE':
            pass  # do nothing
        return thru_tensor, branch_output
示例#3
0
 def _test_nd_downsample_output_shape(self, rank, param_dict, output_shape):
     if rank == 2:
         input_data = self.get_2d_input()
     elif rank == 3:
         input_data = self.get_3d_input()
     downsample_layer = DownSampleLayer(**param_dict)
     output_data = downsample_layer(input_data)
     print(downsample_layer)
     with self.cached_session() as sess:
         sess.run(tf.global_variables_initializer())
         out = sess.run(output_data)
         self.assertAllClose(output_shape, out.shape)
示例#4
0
文件: vgg21.py 项目: taigw/Demic
    def layer_op(self, input_tensor, is_training, bn_momentum=0.9):
        output_tensor = input_tensor
        for (n_chn, dilation) in zip(self.n_chns, self.dilations):
            conv_op = ConvolutionalLayer(n_output_chns=n_chn,
                                         kernel_size=[1, 3, 3],
                                         dilation=[1, dilation, dilation],
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         moving_decay=self.moving_decay,
                                         acti_func=self.acti_func,
                                         name='{}'.format(dilation))
            pool_op = DownSampleLayer('MAX', kernel_size=2, stride=2)
            output_tensor = conv_op(output_tensor, is_training, bn_momentum)
            output_tensor = pool_op(output_tensor)

        return output_tensor
示例#5
0
 def layer_op(self, input_tensor, is_training, bn_momentum):
     output_tensor = input_tensor
     for i in range(len(self.n_chns)):
         conv_op = ConvolutionalLayer(n_output_chns=self.n_chns[i],
                                      kernel_size=[1, 3, 3],
                                      with_bias=True,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      acti_func=self.acti_func,
                                      name='{}'.format(i))
         output_tensor = conv_op(output_tensor, is_training, bn_momentum)
     pooling = DownSampleLayer('MAX',
                               kernel_size=(1, 2, 2),
                               stride=(1, 2, 2),
                               name='down1')
     output_tensor = pooling(output_tensor)
     return output_tensor
示例#6
0
 def layer_op(self, input_tensor, is_training, bn_momentum=0.9):
     output_tensor = input_tensor
     for (kernel_size, n_features) in zip(self.kernels, self.n_chns):
         conv_op = ConvolutionalLayer(n_output_chns=n_features,
                                      kernel_size=kernel_size,
                                      padding=self.padding,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      acti_func=self.acti_func,
                                      name='{}'.format(n_features))
         output_tensor = conv_op(output_tensor, is_training, bn_momentum)
     if (self.pooling):
         pooling_op = DownSampleLayer('MAX',
                                      kernel_size=2,
                                      stride=2,
                                      name='down_2x2')
         output_tensor = pooling_op(output_tensor)
     return output_tensor
示例#7
0
    def layer_op(self, lr_image, is_training=True, keep_prob=1.0):
        input_shape = lr_image.get_shape().as_list()
        batch_size = input_shape.pop(0)

        if batch_size is None:
            raise ValueError("The batch size must be known and fixed.")
        if any(i is None or i <= 0 for i in input_shape):
            raise ValueError("The image shape must be known in advance.")

        # Making sure there are enough features channels for the
        # periodic shuffling
        features = ConvolutionalLayer(
            n_output_chns=(self.n_output_chns *
                           self.upsample_factor**(len(input_shape) - 1)),
            kernel_size=self.kernel_size,
            acti_func=None,
            name="subpixel_conv",
            **self.conv_layer_params)(input_tensor=lr_image,
                                      is_training=is_training,
                                      keep_prob=keep_prob)

        # Setting the number of output features to the known value
        # obtained from the input shape results in a ValueError as
        # of TF 1.12
        sr_image = tf.contrib.periodic_resample.periodic_resample(
            values=features,
            shape=([batch_size] +
                   [self.upsample_factor * i
                    for i in input_shape[:-1]] + [None]),
            name="periodic_shuffle",
        )

        # Averaging out the values without downsampling to counteract
        # the periodicity of periodic shuffling as per Sugawara et al.
        if self.use_avg:
            sr_image = DownSampleLayer(
                func="AVG",
                kernel_size=self.upsample_factor,
                stride=1,
                padding="SAME",
                name="averaging",
            )(input_tensor=sr_image)

        return sr_image
示例#8
0
    def layer_op(self, input_tensor, is_training):
        """

        :param input_tensor: tensor, input to the UNet block
        :param is_training: boolean, True if network is in training mode
        :return: output tensor of the UNet block and branch before downsampling (if required)
        """
        output_tensor = input_tensor
        for (kernel_size, n_features) in zip(self.kernels, self.n_chns):
            conv_op = ConvolutionalLayer(n_output_chns=n_features,
                                         kernel_size=kernel_size,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         acti_func=self.acti_func,
                                         name='{}'.format(n_features))
            output_tensor = conv_op(output_tensor, is_training)

        if self.with_downsample_branch:
            branch_output = output_tensor
        else:
            branch_output = None

        if self.func == 'DOWNSAMPLE':
            downsample_op = DownSampleLayer('MAX',
                                            kernel_size=2,
                                            stride=2,
                                            name='down_2x2')
            output_tensor = downsample_op(output_tensor)
        elif self.func == 'UPSAMPLE':
            upsample_op = DeconvolutionalLayer(n_output_chns=self.n_chns[-1],
                                               kernel_size=2,
                                               stride=2,
                                               name='up_2x2')
            output_tensor = upsample_op(output_tensor, is_training)
        elif self.func == 'NONE':
            pass  # do nothing
        return output_tensor, branch_output
示例#9
0
文件: unet2d.py 项目: taigw/Demic
    def layer_op(self, images, is_training, bn_momentum=0.9, layer_id=-1):
        # image_size  should be divisible by 8
        #        spatial_dims = images.get_shape()[1:-1].as_list()
        #        assert (spatial_dims[-2] % 16 == 0 )
        #        assert (spatial_dims[-1] % 16 == 0 )

        block1 = UNetBlock((self.n_features[0], self.n_features[0]),
                           ((1, 3, 3), (1, 3, 3)),
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           acti_func=self.acti_func,
                           name='B1')

        block2 = UNetBlock((self.n_features[1], self.n_features[1]),
                           ((1, 3, 3), (1, 3, 3)),
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           acti_func=self.acti_func,
                           name='B2')

        block3 = UNetBlock((self.n_features[2], self.n_features[2]),
                           ((1, 3, 3), (1, 3, 3)),
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           acti_func=self.acti_func,
                           name='B3')

        block4 = UNetBlock((self.n_features[3], self.n_features[3]),
                           ((1, 3, 3), (1, 3, 3)),
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           acti_func=self.acti_func,
                           name='B4')

        block5 = UNetBlock((self.n_features[4], self.n_features[4]),
                           ((1, 3, 3), (1, 3, 3)),
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           acti_func=self.acti_func,
                           name='B5')

        block6 = UNetBlock((self.n_features[3], self.n_features[3]),
                           ((1, 3, 3), (1, 3, 3)),
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           acti_func=self.acti_func,
                           name='B6')

        block7 = UNetBlock((self.n_features[2], self.n_features[2]),
                           ((1, 3, 3), (1, 3, 3)),
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           acti_func=self.acti_func,
                           name='B7')

        block8 = UNetBlock((self.n_features[1], self.n_features[1]),
                           ((1, 3, 3), (1, 3, 3)),
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           acti_func=self.acti_func,
                           name='B8')

        block9 = UNetBlock((self.n_features[0], self.n_features[0]),
                           ((1, 3, 3), (1, 3, 3)),
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           acti_func=self.acti_func,
                           name='B9')

        conv = ConvLayer(n_output_chns=self.num_classes,
                         kernel_size=(1, 1, 1),
                         w_initializer=self.initializers['w'],
                         w_regularizer=self.regularizers['w'],
                         with_bias=True,
                         name='conv')
        down1 = DownSampleLayer('MAX',
                                kernel_size=(1, 2, 2),
                                stride=(1, 2, 2),
                                name='down1')
        down2 = DownSampleLayer('MAX',
                                kernel_size=(1, 2, 2),
                                stride=(1, 2, 2),
                                name='down2')
        down3 = DownSampleLayer('MAX',
                                kernel_size=(1, 2, 2),
                                stride=(1, 2, 2),
                                name='down3')
        down4 = DownSampleLayer('MAX',
                                kernel_size=(1, 2, 2),
                                stride=(1, 2, 2),
                                name='down4')

        up1 = DeconvolutionalLayer(n_output_chns=self.n_features[3],
                                   kernel_size=(1, 2, 2),
                                   stride=(1, 2, 2),
                                   name='up1')
        up2 = DeconvolutionalLayer(n_output_chns=self.n_features[2],
                                   kernel_size=(1, 2, 2),
                                   stride=(1, 2, 2),
                                   name='up2')
        up3 = DeconvolutionalLayer(n_output_chns=self.n_features[1],
                                   kernel_size=(1, 2, 2),
                                   stride=(1, 2, 2),
                                   name='up3')
        up4 = DeconvolutionalLayer(n_output_chns=self.n_features[0],
                                   kernel_size=(1, 2, 2),
                                   stride=(1, 2, 2),
                                   name='up4')

        f1 = block1(images, is_training, bn_momentum)
        d1 = down1(f1)
        f2 = block2(d1, is_training, bn_momentum)
        d2 = down2(f2)
        f3 = block3(d2, is_training, bn_momentum)
        d3 = down3(f3)
        f4 = block4(d3, is_training, bn_momentum)
        d4 = down4(f4)
        f5 = block5(d4, is_training, bn_momentum)
        # add dropout to the original version
        f5 = tf.nn.dropout(f5, self.dropout)

        f5up = up1(f5, is_training, bn_momentum)
        f4cat = tf.concat((f4, f5up), axis=-1)
        f6 = block6(f4cat, is_training, bn_momentum)
        # add dropout to the original version
        f6 = tf.nn.dropout(f6, self.dropout)

        f6up = up2(f6, is_training, bn_momentum)
        f3cat = tf.concat((f3, f6up), axis=-1)
        f7 = block7(f3cat, is_training, bn_momentum)
        # add dropout to the original version
        f7 = tf.nn.dropout(f7, self.dropout)

        f7up = up3(f7, is_training, bn_momentum)
        f2cat = tf.concat((f2, f7up), axis=-1)
        f8 = block8(f2cat, is_training, bn_momentum)
        # add dropout to the original version
        f8 = tf.nn.dropout(f8, self.dropout)

        f8up = up4(f8, is_training, bn_momentum)
        f1cat = tf.concat((f1, f8up), axis=-1)
        f9 = block9(f1cat, is_training, bn_momentum)
        # add dropout to the original version
        f9 = tf.nn.dropout(f9, self.dropout)
        output = conv(f9)
        return output
示例#10
0
    def layer_op(self, input_tensor, is_training, layer_id=-1):
        layer_instances = []
        scores_instances = []
        first_conv_layer = ConvolutionalLayer(
            n_output_chns=self.num_features[0],
            with_bn=True,
            kernel_size=3,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            acti_func=self.acti_func,
            name='conv_1_1')
        flow = first_conv_layer(input_tensor, is_training)
        layer_instances.append((first_conv_layer, flow))

        # SCALE 1
        with DilatedTensor(flow, dilation_factor=1) as dilated:
            for j in range(self.num_res_blocks[0]):
                res_block = HighResBlock(self.num_features[0],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % ('res_1', j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        score_layer_scale1 = ScoreLayer(
            num_features=self.num_fea_score_layers[0],
            num_classes=self.num_classes)
        score_1 = score_layer_scale1(flow, is_training)
        scores_instances.append(score_1)
        # if is_training:
        #     loss_s1 = WGDL(score_1, labels)
        #     tf.add_to_collection('multiscale_loss', loss_s1/num_scales)

        # # SCALE 2
        with DilatedTensor(flow, dilation_factor=2) as dilated:
            for j in range(self.num_res_blocks[1]):
                res_block = HighResBlock(self.num_features[1],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % ('res_2', j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        score_layer_scale2 = ScoreLayer(
            num_features=self.num_fea_score_layers[1],
            num_classes=self.num_classes)
        score_2 = score_layer_scale2(flow, is_training)

        # score_2 = self.score_layer(flow, self.num_fea_score_layers[1])
        up_score_2 = score_2
        scores_instances.append(up_score_2)
        # if is_training:
        #     loss_s2 =  self.WGDL(score_2, labels)
        #     # loss_s2 = self.new_dice_loss(score_2, labels)
        #     tf.add_to_collection('multiscale_loss', loss_s2/num_scales)

        # SCALE 3
        ## dowsampling factor = 2
        downsample_scale3 = DownSampleLayer(func='AVG',
                                            kernel_size=2,
                                            stride=2)
        flow = downsample_scale3(flow)
        layer_instances.append((downsample_scale3, flow))
        with DilatedTensor(flow, dilation_factor=1) as dilated:
            for j in range(self.num_res_blocks[2]):
                res_block = HighResBlock(self.num_features[2],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % ('res_3', j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        score_layer_scale3 = ScoreLayer(
            num_features=self.num_fea_score_layers[2],
            num_classes=self.num_classes)
        score_3 = score_layer_scale3(flow, is_training)

        upsample_indep_scale3 = UpSampleLayer(
            func='CHANNELWISE_DECONV',
            kernel_size=2,
            stride=2,
            w_initializer=tf.constant_initializer(1.0, dtype=tf.float32))
        up_score_3 = upsample_indep_scale3(score_3)
        scores_instances.append(up_score_3)

        # up_score_3 = self.feature_indep_upsample_conv(score_3, factor=2)
        # if is_training:
        #     loss_s3 = self.WGDL(up_score_3, labels)
        #     # loss_s3 = self.new_dice_loss(up_score_3, labels)
        #     tf.add_to_collection('multiscale_loss', loss_s3/num_scales)

        # SCALE 4
        with DilatedTensor(flow, dilation_factor=2) as dilated:
            for j in range(self.num_res_blocks[3]):
                res_block = HighResBlock(self.num_features[3],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % ('res_4', j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        score_layer_scale4 = ScoreLayer(
            num_features=self.num_fea_score_layers[3],
            num_classes=self.num_classes)
        score_4 = score_layer_scale4(flow, self.num_fea_score_layers[3],
                                     is_training)

        upsample_indep_scale4 = UpSampleLayer(
            func='CHANNELWISE_DECONV',
            kernel_size=1,
            stride=2,
            w_initializer=tf.constant_initializer(1.0, dtype=tf.float32))
        up_score_4 = upsample_indep_scale4(score_4)
        scores_instances.append(up_score_4)

        # if is_training:
        #     loss_s4 = self.WGDL(up_score_4, labels)
        #     # loss_s4 = self.new_dice_loss(up_score_4, labels)
        #     tf.add_to_collection('multiscale_loss', loss_s4/num_scales)

        # FUSED SCALES
        merge_layer = MergeLayer('WEIGHTED_AVERAGE')
        soft_scores = []
        for s in scores_instances:
            soft_scores.append(tf.nn.softmax(s))
        fused_score = merge_layer(soft_scores)
        scores_instances.append(fused_score)
        if is_training:
            return scores_instances
        return fused_score
示例#11
0
    def layer_op(self,
                 input_tensor,
                 is_training=True,
                 layer_id=-1,
                 keep_prob=0.5,
                 **unused_kwargs):
        """

        :param input_tensor: tensor to input to the network, size has to be divisible by 2*dilation_rates
        :param is_training: boolean, True if network is in training mode
        :param layer_id: not in use
        :param keep_prob: double, percentage of nodes to keep for drop-out
        :param unused_kwargs:
        :return: network prediction
        """
        hyperparams = self.hyperparams

        # Validate that dilation rates are compatible with input dimensions
        modulo = 2**(len(hyperparams['dilation_rates']))
        assert layer_util.check_spatial_dims(input_tensor,
                                             lambda x: x % modulo == 0)

        # Perform on the fly data augmentation
        if is_training and hyperparams['augmentation_scale'] > 0:
            augment_layer = AffineAugmentationLayer(
                hyperparams['augmentation_scale'], 'LINEAR', 'ZERO')
            input_tensor = augment_layer(input_tensor)

        ###################
        ### Feedforward ###
        ###################

        # Initialize network components
        dense_vnet = self.create_network()

        # Store output feature maps from each component
        feature_maps = []

        # Downsample input to the network
        downsample_layer = DownSampleLayer(func='AVG', kernel_size=3, stride=2)
        downsampled_tensor = downsample_layer(input_tensor)
        bn_layer = BNLayer()
        downsampled_tensor = bn_layer(downsampled_tensor,
                                      is_training=is_training)
        feature_maps.append(downsampled_tensor)

        # All feature maps should match the downsampled tensor's shape
        feature_map_shape = downsampled_tensor.shape.as_list()[1:-1]

        # Prepare initial input to dense_vblocks
        initial_features = dense_vnet.initial_conv(input_tensor,
                                                   is_training=is_training)
        channel_dim = len(input_tensor.shape) - 1
        down = tf.concat([downsampled_tensor, initial_features], channel_dim)

        # Feed downsampled input through dense_vblocks
        for dblock in dense_vnet.dense_vblocks:
            # Get skip layer and activation output
            skip, down = dblock(down,
                                is_training=is_training,
                                keep_prob=keep_prob)
            # Resize skip layer to original shape and add to feature maps
            skip = LinearResizeLayer(feature_map_shape)(skip)
            feature_maps.append(skip)

        # Merge feature maps
        all_features = tf.concat(feature_maps, channel_dim)

        # Perform final convolution to segment structures
        output = dense_vnet.final_conv(all_features, is_training=is_training)

        ######################
        ### Postprocessing ###
        ######################

        # Get the number of spatial dimensions of input tensor
        n_spatial_dims = input_tensor.shape.ndims - 2

        # Refine segmentation with prior
        if hyperparams['use_prior']:
            spatial_prior_shape = [hyperparams['prior_size']] * n_spatial_dims
            # Prior shape must be 4 or 5 dim to work with linear_resize layer
            # ie to conform to shape=[batch, X, Y, Z, channels]
            prior_shape = [1] + spatial_prior_shape + [1]
            spatial_prior = SpatialPriorBlock(prior_shape, feature_map_shape)
            output += spatial_prior()

        # Invert augmentation
        if is_training and hyperparams['augmentation_scale'] > 0:
            inverse_aug = augment_layer.inverse()
            output = inverse_aug(output)

        # Resize output to original size
        input_tensor_spatial_size = input_tensor.shape.as_list()[1:-1]
        output = LinearResizeLayer(input_tensor_spatial_size)(output)

        # Segmentation summary
        seg_argmax = tf.to_float(tf.expand_dims(tf.argmax(output, -1), -1))
        seg_summary = seg_argmax * (255. / self.num_classes - 1)

        # Image Summary
        norm_axes = list(range(1, n_spatial_dims + 1))
        mean, var = tf.nn.moments(input_tensor, axes=norm_axes, keep_dims=True)
        timg = tf.to_float(input_tensor - mean) / (tf.sqrt(var) * 2.)
        timg = (timg + 1.) * 127.
        single_channel = tf.reduce_mean(timg, -1, True)
        img_summary = tf.minimum(255., tf.maximum(0., single_channel))

        if n_spatial_dims == 2:
            tf.summary.image(tf.get_default_graph().unique_name('imgseg'),
                             tf.concat([img_summary, seg_summary], 1), 5,
                             [tf.GraphKeys.SUMMARIES])
        elif n_spatial_dims == 3:
            image3_axial(tf.get_default_graph().unique_name('imgseg'),
                         tf.concat([img_summary, seg_summary], 1), 5,
                         [tf.GraphKeys.SUMMARIES])
        else:
            raise NotImplementedError(
                'Image Summary only supports 2D and 3D images')

        return output
示例#12
0
    def layer_op(self, images, is_training, layer_id=-1, **unused_kwargs):
        """

        :param images: tensor, input to the network, size should be divisible by d_factor
        :param is_training: boolean, True if network is in training mode
        :param layer_id: not in use
        :param unused_kwargs:
        :return: tensor, network output
        """
        # image_size is defined as the largest context, then:
        #   downsampled path size: image_size / d_factor
        #   downsampled path output: image_size / d_factor - 16

        # to make sure same size of feature maps from both pathways:
        #   normal path size: (image_size / d_factor - 16) * d_factor + 16
        #   normal path output: (image_size / d_factor - 16) * d_factor

        # where 16 is fixed by the receptive field of conv layers
        # TODO: make sure label_size = image_size/d_factor - 16

        # image_size has to be an odd number and divisible by 3 and
        # smaller than the smallest image size of the input volumes

        # label_size should be (image_size/d_factor - 16) * d_factor

        assert self.d_factor % 2 == 1  # to make the downsampling centered
        assert (layer_util.check_spatial_dims(
            images, lambda x: x % self.d_factor == 0))
        assert (layer_util.check_spatial_dims(images, lambda x: x % 2 == 1)
                )  # to make the crop centered
        assert (layer_util.check_spatial_dims(images,
                                              lambda x: x > self.d_factor * 16)
                )  # required by receptive field

        # crop 25x25x25 from 57x57x57
        crop_op = CropLayer(border=self.crop_diff, name='cropping_input')
        normal_path = crop_op(images)
        print(crop_op)

        # downsample 19x19x19 from 57x57x57
        downsample_op = DownSampleLayer(func='CONSTANT',
                                        kernel_size=self.d_factor,
                                        stride=self.d_factor,
                                        padding='VALID',
                                        name='downsample_input')
        downsample_path = downsample_op(images)
        print(downsample_op)

        # convolutions for both pathways
        for n_features in self.conv_features:
            # normal pathway convolutions
            conv_path_1 = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=3,
                padding='VALID',
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                acti_func=self.acti_func,
                name='normal_conv')
            normal_path = conv_path_1(normal_path, is_training)
            print(conv_path_1)

            # downsampled pathway convolutions
            conv_path_2 = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=3,
                padding='VALID',
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                acti_func=self.acti_func,
                name='downsample_conv')
            downsample_path = conv_path_2(downsample_path, is_training)
            print(conv_path_2)

        # upsampling the downsampled pathway
        downsample_path = UpSampleLayer('REPLICATE',
                                        kernel_size=self.d_factor,
                                        stride=self.d_factor)(downsample_path)

        # concatenate both pathways
        output_tensor = ElementwiseLayer('CONCAT')(normal_path,
                                                   downsample_path)

        # 1x1x1 convolution layer
        for n_features in self.fc_features:
            conv_fc = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=1,
                acti_func=self.acti_func,
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                name='conv_1x1x1_{}'.format(n_features))
            output_tensor = conv_fc(output_tensor, is_training)
            print(conv_fc)

        return output_tensor
示例#13
0
    def layer_op(self,
                 input_tensor,
                 is_training=True,
                 layer_id=-1,
                 keep_prob=0.5,
                 **unused_kwargs):
        hyper = self.hyperparameters

        # Initialize DenseVNet network layers
        net = self.create_network()

        #
        # Parameter handling
        #

        # Shape and dimension variable shortcuts
        channel_dim = len(input_tensor.shape) - 1
        input_size = input_tensor.shape.as_list()
        spatial_size = input_size[1:-1]
        n_spatial_dims = input_tensor.shape.ndims - 2

        # Validate input dimension with dilation rates
        modulo = 2**(len(hyper['dilation_rates']))
        assert layer_util.check_spatial_dims(input_tensor,
                                             lambda x: x % modulo == 0)

        #
        # Augmentation + Downsampling + Initial Layers
        #

        # On the fly data augmentation
        augment_layer = None
        if is_training and hyper['augmentation_scale'] > 0:
            augmentation_class = AffineAugmentationLayer
            augment_layer = augmentation_class(hyper['augmentation_scale'],
                                               'LINEAR', 'ZERO')
            input_tensor = augment_layer(input_tensor)

        # Variable storing all intermediate results -- VLinks
        all_segmentation_features = []

        # Downsample input to the network
        ave_downsample_layer = DownSampleLayer(func='AVG',
                                               kernel_size=3,
                                               stride=2)
        down_tensor = ave_downsample_layer(input_tensor)
        downsampled_img = net.initial_bn(down_tensor, is_training=is_training)

        # Add initial downsampled image VLink
        all_segmentation_features.append(downsampled_img)

        # All results should match the downsampled input's shape
        output_shape = downsampled_img.shape.as_list()[1:-1]

        init_features = net.initial_conv(input_tensor, is_training=is_training)

        #
        # Dense VNet Main Block
        #

        # `down` will handle the input of each Dense VNet block
        # Initialize it by stacking downsampled image and initial conv features
        down = tf.concat([downsampled_img, init_features], channel_dim)

        # Process Dense VNet Blocks
        for dblock in net.dense_vblocks:
            # Get skip layer and activation output
            skip, down = dblock(down,
                                is_training=is_training,
                                keep_prob=keep_prob)

            # Resize skip layer to original shape and add VLink
            skip = LinearResizeLayer(output_shape)(skip)
            all_segmentation_features.append(skip)

        # Concatenate all intermediate skip layers
        inter_results = tf.concat(all_segmentation_features, channel_dim)

        # Initial segmentation output
        seg_output = net.seg_layer(inter_results, is_training=is_training)

        #
        # Dense VNet End - Now postprocess outputs
        #

        # Refine segmentation with prior if any
        if self.architecture_parameters['use_prior']:
            xyz_prior = SpatialPriorBlock([12] * n_spatial_dims, output_shape)
            seg_output += xyz_prior

        # Invert augmentation if any
        if is_training and hyper['augmentation_scale'] > 0 \
                and augment_layer is not None:
            inverse_aug = augment_layer.inverse()
            seg_output = inverse_aug(seg_output)

        # Resize output to original size
        seg_output = LinearResizeLayer(spatial_size)(seg_output)

        # Segmentation results
        seg_argmax = tf.to_float(tf.expand_dims(tf.argmax(seg_output, -1), -1))
        seg_summary = seg_argmax * (255. / self.num_classes - 1)

        # Image Summary
        norm_axes = list(range(1, n_spatial_dims + 1))
        mean, var = tf.nn.moments(input_tensor, axes=norm_axes, keep_dims=True)
        timg = tf.to_float(input_tensor - mean) / (tf.sqrt(var) * 2.)
        timg = (timg + 1.) * 127.
        single_channel = tf.reduce_mean(timg, -1, True)
        img_summary = tf.minimum(255., tf.maximum(0., single_channel))
        if n_spatial_dims == 2:
            tf.summary.image(tf.get_default_graph().unique_name('imgseg'),
                             tf.concat([img_summary, seg_summary], 1), 5,
                             [tf.GraphKeys.SUMMARIES])
        elif n_spatial_dims == 3:
            # Show summaries
            image3_axial(tf.get_default_graph().unique_name('imgseg'),
                         tf.concat([img_summary, seg_summary], 1), 5,
                         [tf.GraphKeys.SUMMARIES])
        else:
            raise NotImplementedError(
                'Image Summary only supports 2D and 3D images')

        return seg_output