Esempio n. 1
0
    def _test_deconv_output_shape(self, rank, param_dict, output_shape):
        if rank == 2:
            input_data = self.get_2d_input()
        elif rank == 3:
            input_data = self.get_3d_input()

        deconv_layer = DeconvLayer(**param_dict)
        output_data = deconv_layer(input_data)
        print(deconv_layer)
        with self.cached_session() as sess:
            sess.run(tf.global_variables_initializer())
            output_value = sess.run(output_data)
            self.assertAllClose(output_shape, output_value.shape)
Esempio n. 2
0
    def layer_op(self, main_flow, bypass_flow):
        """

        :param main_flow: tensor, input to the VNet block
        :param bypass_flow: tensor, input from skip connection
        :return: res_flow is tensor before final block operation (for residual connections),
            main_flow is final output tensor
        """
        for i in range(self.n_conv):
            main_flow = ConvLayer(name='conv_{}'.format(i),
                                  n_output_chns=self.n_feature_chns,
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  kernel_size=5)(main_flow)
            if i < self.n_conv - 1:  # no activation for the last conv layer
                main_flow = ActiLayer(
                    func=self.acti_func,
                    regularizer=self.regularizers['w'])(main_flow)
        res_flow = ElementwiseLayer('SUM')(main_flow, bypass_flow)

        if self.func == 'DOWNSAMPLE':
            main_flow = ConvLayer(name='downsample',
                                  n_output_chns=self.n_output_chns,
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  kernel_size=2,
                                  stride=2,
                                  with_bias=True)(res_flow)
        elif self.func == 'UPSAMPLE':
            main_flow = DeconvLayer(name='upsample',
                                    n_output_chns=self.n_output_chns,
                                    w_initializer=self.initializers['w'],
                                    w_regularizer=self.regularizers['w'],
                                    kernel_size=2,
                                    stride=2,
                                    with_bias=True)(res_flow)
        elif self.func == 'SAME':
            main_flow = ConvLayer(name='conv_1x1x1',
                                  n_output_chns=self.n_output_chns,
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  b_initializer=self.initializers['b'],
                                  b_regularizer=self.regularizers['b'],
                                  kernel_size=1,
                                  with_bias=True)(res_flow)
        main_flow = ActiLayer(self.acti_func)(main_flow)
        print(self)
        return res_flow, main_flow
Esempio n. 3
0
    def layer_op(self, main_flow, bypass_flow):
        for i in range(self.n_conv):
            main_flow = ConvLayer(name='conv_{}'.format(i),
                                  n_output_chns=self.n_feature_chns,
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  kernel_size=5)(main_flow)
            if i < self.n_conv - 1:  # no activation for the last conv layer
                main_flow = ActiLayer(
                    func=self.acti_func,
                    regularizer=self.regularizers['w'])(main_flow)
        res_flow = ElementwiseLayer('SUM')(main_flow, bypass_flow)

        if self.func == 'DOWNSAMPLE':
            main_flow = ConvLayer(name='downsample',
                                  n_output_chns=self.n_output_chns,
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  kernel_size=2,
                                  stride=2,
                                  with_bias=True)(res_flow)
        elif self.func == 'UPSAMPLE':
            main_flow = DeconvLayer(name='upsample',
                                    n_output_chns=self.n_output_chns,
                                    w_initializer=self.initializers['w'],
                                    w_regularizer=self.regularizers['w'],
                                    kernel_size=2,
                                    stride=2,
                                    with_bias=True)(res_flow)
        elif self.func == 'SAME':
            main_flow = ConvLayer(name='conv_1x1x1',
                                  n_output_chns=self.n_output_chns,
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  b_initializer=self.initializers['b'],
                                  b_regularizer=self.regularizers['b'],
                                  kernel_size=1,
                                  with_bias=True)(res_flow)
        main_flow = ActiLayer(self.acti_func)(main_flow)
        print(self)
        print('VNet is running')
        return res_flow, main_flow
Esempio n. 4
0
    def layer_op(self, input_tensor):
        spatial_rank = layer_util.infer_spatial_rank(input_tensor)
        output_tensor = input_tensor
        if self.func == 'REPLICATE':
            if self.kernel_size != self.stride:
                raise ValueError(
                    "`kernel_size` != `stride` currently not"
                    "supported in `REPLICATE` mode. Please"
                    "consider using `CHANNELWISE_DECONV` operation.")
            # simply replicate input values to
            # local regions of (kernel_size ** spatial_rank) element
            kernel_size_all_dims = layer_util.expand_spatial_params(
                self.kernel_size, spatial_rank)
            pixel_num = np.prod(kernel_size_all_dims)
            repmat = np.hstack((pixel_num, [1] * spatial_rank, 1)).flatten()
            output_tensor = tf.tile(input=input_tensor, multiples=repmat)
            output_tensor = tf.batch_to_space_nd(
                input=output_tensor,
                block_shape=kernel_size_all_dims,
                crops=[[0, 0]] * spatial_rank)

        elif self.func == 'CHANNELWISE_DECONV':
            output_tensor = [
                tf.expand_dims(x, -1)
                for x in tf.unstack(input_tensor, axis=-1)
            ]
            output_tensor = [
                DeconvLayer(n_output_chns=1,
                            kernel_size=self.kernel_size,
                            stride=self.stride,
                            padding='SAME',
                            with_bias=self.with_bias,
                            w_initializer=self.initializers['w'],
                            w_regularizer=self.regularizers['w'],
                            b_initializer=self.initializers['b'],
                            b_regularizer=self.regularizers['b'],
                            name='deconv_{}'.format(i))(x)
                for (i, x) in enumerate(output_tensor)
            ]
            output_tensor = tf.concat(output_tensor, axis=-1)
        return output_tensor