def __init__(self):
        super(DenseNet, self).__init__()
        input_channels = 1
        conv_channels = 32
        down_structure = [2, 2, 2]
        output_channels = 4  # 2
        act_fn = config["act_fn"]
        norm_fn = config["norm_fn"]
        self.features = nn.Sequential()
        self.features.add_module("init_conv", nn.Conv3d(input_channels, conv_channels,
                                                        kernel_size=3, stride=1, padding=1, bias=True))
        self.features.add_module("init_norm", norm_fn(conv_channels))
        self.features.add_module("init_act", act_fn())
        self.dropblock = LinearScheduler(
            DropBlock3D(drop_prob=0., block_size=5),
            start_value=0.,
            stop_value=0.5,
            nr_steps=5000
        )

        channels = conv_channels
        self.features.add_module('drop_block', DropBlock3D(drop_prob=0.1, block_size=5))

        for i, num_layers in enumerate(down_structure):
            for j in range(num_layers):
                conv_layer = ConvBlock(channels)
                self.features.add_module("block{}_layer{}".format(i + 1, j + 1), conv_layer)
                channels = conv_layer.out_channels

            # down-sample
            trans_layer = TransmitBlock(channels, is_last_layer=(i == len(down_structure) - 1))
            self.features.add_module("transition{}".format(i + 1), trans_layer)
            channels = trans_layer.out_channels

        self.classifier = nn.Linear(channels, output_channels)
def get_drop_block():
    return LinearScheduler(
        DropBlock3D(block_size=5, drop_prob=0.0),
        start_value=0.0,
        stop_value=0.1,
        nr_steps=10000,
    )
Ejemplo n.º 3
0
def test_block_mask_cube_odd():
    db = DropBlock3D(block_size=3, drop_prob=0.1)
    mask = torch.tensor([[[[0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.],
                           [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.],
                           [0., 0., 0., 0., 0.]],
                          [[1., 0., 0., 0., 0.], [0., 0., 0., 1., 0.],
                           [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.],
                           [0., 0., 0., 0., 0.]],
                          [[0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.],
                           [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.],
                           [0., 0., 0., 0., 0.]],
                          [[0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.],
                           [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.],
                           [0., 0., 0., 0., 0.]],
                          [[0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.],
                           [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.],
                           [0., 0., 0., 0., 0.]]]])

    expected = torch.tensor([[[[1., 1., 1., 1., 1., 1.],
                               [1., 1., 1., 1., 1., 1.],
                               [1., 1., 1., 1., 1., 1.],
                               [1., 1., 1., 1., 1., 1.],
                               [1., 1., 1., 1., 1., 1.],
                               [1., 1., 1., 1., 1., 1.]],
                              [[0., 0., 0., 1., 1., 1.],
                               [0., 0., 0., 0., 0., 0.],
                               [0., 0., 0., 0., 0., 0.],
                               [1., 1., 1., 0., 0., 0.],
                               [1., 1., 1., 1., 1., 1.],
                               [1., 1., 1., 1., 1., 1.]],
                              [[0., 0., 0., 1., 1., 1.],
                               [0., 0., 0., 0., 0., 0.],
                               [0., 0., 0., 0., 0., 0.],
                               [1., 1., 1., 0., 0., 0.],
                               [1., 1., 1., 1., 1., 1.],
                               [1., 1., 1., 1., 1., 1.]],
                              [[0., 0., 0., 1., 1., 1.],
                               [0., 0., 0., 0., 0., 0.],
                               [0., 0., 0., 0., 0., 0.],
                               [1., 1., 1., 0., 0., 0.],
                               [1., 1., 1., 1., 1., 1.],
                               [1., 1., 1., 1., 1., 1.]],
                              [[1., 1., 1., 1., 1., 1.],
                               [1., 1., 1., 1., 1., 1.],
                               [1., 1., 1., 1., 1., 1.],
                               [1., 1., 1., 1., 1., 1.],
                               [1., 1., 1., 1., 1., 1.],
                               [1., 1., 1., 1., 1., 1.]],
                              [[1., 1., 1., 1., 1., 1.],
                               [1., 1., 1., 1., 1., 1.],
                               [1., 1., 1., 1., 1., 1.],
                               [1., 1., 1., 1., 1., 1.],
                               [1., 1., 1., 1., 1., 1.],
                               [1., 1., 1., 1., 1., 1.]]]])

    block_mask = db._compute_block_mask(mask)
    assert torch.equal(block_mask, expected)
Ejemplo n.º 4
0
def test_forward_pass2():
    block_sizes = [2, 3, 4, 5, 6, 7, 8]
    depths = [5, 6, 8, 10, 11, 14, 15]
    heights = [5, 6, 8, 10, 11, 14, 15]
    widths = [5, 7, 8, 10, 15, 14, 15]

    for block_size, depth, height, width in zip(block_sizes, depths, heights,
                                                widths):
        dropout = DropBlock3D(0.2, block_size=block_size)
        input = torch.randn((5, 20, depth, height, width))
        output = dropout(input)

        assert tuple(input.shape) == tuple(output.shape)
Ejemplo n.º 5
0
 def init_dropblock(self, start_value, stop_value, nr_steps, block_size):
     self.dropblock = LinearScheduler(DropBlock3D(drop_prob=stop_value,
                                                  block_size=block_size),
                                      start_value=start_value,
                                      stop_value=stop_value,
                                      nr_steps=nr_steps)
Ejemplo n.º 6
0
def test_forward_pass():
    db = DropBlock3D(block_size=3, drop_prob=0.1)
    block_mask = torch.tensor([[[[1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.]],
                                [[0., 0., 0., 1., 1., 1., 1.],
                                 [0., 0., 0., 0., 0., 0., 1.],
                                 [0., 0., 0., 0., 0., 0., 1.],
                                 [1., 1., 1., 0., 0., 0., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.]],
                                [[0., 0., 0., 1., 1., 1., 1.],
                                 [0., 0., 0., 0., 0., 0., 1.],
                                 [0., 0., 0., 0., 0., 0., 1.],
                                 [1., 1., 1., 0., 0., 0., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.]],
                                [[0., 0., 0., 1., 1., 1., 1.],
                                 [0., 0., 0., 0., 0., 0., 1.],
                                 [0., 0., 0., 0., 0., 0., 1.],
                                 [1., 1., 1., 0., 0., 0., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.]],
                                [[1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.]],
                                [[1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.]],
                                [[1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.],
                                 [1., 1., 1., 1., 1., 1., 1.]]]])

    db._compute_block_mask = mock.MagicMock(return_value=block_mask)

    x = torch.ones(10, 10, 7, 7, 7)
    h = db(x)

    expected = block_mask * block_mask.numel() / block_mask.sum()
    expected = expected[:, None, :, :, :].expand_as(x)

    assert tuple(h.shape) == (10, 10, 7, 7, 7)
    assert torch.equal(h, expected)
Ejemplo n.º 7
0
def test_forward_pass_with_cuda():
    dropout = DropBlock3D(0.2, block_size=5).to('cuda')
    input = torch.randn((5, 20, 16, 16, 16)).to('cuda')
    output = dropout(input)

    assert tuple(input.shape) == tuple(output.shape)
Ejemplo n.º 8
0
def test_forward_pass2():
    dropout = DropBlock3D(0.2, block_size=3)
    input = torch.randn((1, 1, 8, 8, 8))
    output = dropout(input)

    assert tuple(input.shape) == tuple(output.shape)
Ejemplo n.º 9
0
def propagate_dropblock(l,
                        num_layer_same_scale,
                        input_prev_layer,
                        num_stride,
                        dim_filter,
                        num_filters,
                        padding,
                        unet_type,
                        mode,
                        keep_prob,
                        convolution_type,
                        deconvolution_shape=None):

    #########################################################
    ###### this is using DropBlock for regularization #######
    #########################################################

    with tf.variable_scope(str(convolution_type) + '_layer_' + str(l) +
                           '_dim_' + str(num_layer_same_scale),
                           reuse=tf.AUTO_REUSE):

        if unet_type == '3D':

            if convolution_type == 'upsampling':

                input_prev_layer = tf.layers.conv3d_transpose(
                    inputs=input_prev_layer,
                    filters=num_filters,
                    kernel_size=(dim_filter, dim_filter, dim_filter),
                    strides=(num_stride, num_stride, num_stride),
                    padding=padding,
                    data_format='channels_last',
                    activation=None,
                    use_bias=True,
                    kernel_initializer=tf.initializers.variance_scaling(),
                    bias_initializer=tf.zeros_initializer(),
                    kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-4),
                    bias_regularizer=None,
                    activity_regularizer=None,
                    kernel_constraint=None,
                    bias_constraint=None,
                    trainable=True,
                    name='deconv3d_layer',
                    reuse=tf.AUTO_REUSE)

            else:

                input_prev_layer = tf.layers.conv3d(
                    inputs=input_prev_layer,
                    filters=num_filters,
                    kernel_size=(dim_filter, dim_filter, dim_filter),
                    strides=(num_stride, num_stride, num_stride),
                    padding=padding,
                    data_format='channels_last',
                    dilation_rate=(1, 1, 1),
                    activation=None,
                    use_bias=False,
                    kernel_initializer=tf.initializers.variance_scaling(),
                    bias_initializer=tf.zeros_initializer(),
                    kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-4),
                    bias_regularizer=None,
                    activity_regularizer=None,
                    kernel_constraint=None,
                    bias_constraint=None,
                    trainable=True,
                    name='conv3d_layer',
                    reuse=tf.AUTO_REUSE)

        else:

            if convolution_type == 'upsampling':

                input_prev_layer = tf.layers.conv2d_transpose(
                    inputs=input_prev_layer,
                    filters=num_filters,
                    kernel_size=(dim_filter, dim_filter),
                    strides=(num_stride, num_stride),
                    padding=padding,
                    data_format='channels_last',
                    activation=None,
                    use_bias=False,
                    kernel_initializer=tf.initializers.variance_scaling(),
                    bias_initializer=tf.zeros_initializer(),
                    kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-4),
                    bias_regularizer=None,
                    activity_regularizer=None,
                    kernel_constraint=None,
                    bias_constraint=None,
                    trainable=True,
                    name='deconv2d_layer',
                    reuse=tf.AUTO_REUSE)

            else:

                input_prev_layer = tf.layers.conv2d(
                    inputs=input_prev_layer,
                    filters=num_filters,
                    kernel_size=(dim_filter, dim_filter),
                    strides=(num_stride, num_stride),
                    padding=padding,
                    data_format='channels_last',
                    dilation_rate=(1, 1),
                    activation=None,
                    use_bias=False,
                    kernel_initializer=tf.initializers.variance_scaling(),
                    bias_initializer=tf.zeros_initializer(),
                    kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-4),
                    bias_regularizer=None,
                    activity_regularizer=None,
                    kernel_constraint=None,
                    bias_constraint=None,
                    trainable=True,
                    name='conv2d_layer',
                    reuse=tf.AUTO_REUSE)
        '''
		input_prev_layer = tf.layers.batch_normalization(
			inputs = input_prev_layer,
			axis=-1,
			momentum=0.99,
			epsilon=0.001,
			center=True,
			scale=True,
			beta_initializer=tf.zeros_initializer(),
			gamma_initializer=tf.ones_initializer(),
			moving_mean_initializer=tf.zeros_initializer(),
			moving_variance_initializer=tf.ones_initializer(),
			beta_regularizer=None,
			gamma_regularizer=None,
			beta_constraint=None,
			gamma_constraint=None,
			training=mode,
			trainable=True,
			name='batch_norm',
			reuse=tf.AUTO_REUSE,
			renorm=False,
			renorm_clipping=None,
			renorm_momentum=0.99,
			fused=None,
			virtual_batch_size=None,
			adjustment=None
		)
		'''
        input_prev_layer = tf.nn.leaky_relu(features=input_prev_layer,
                                            alpha=0.1,
                                            name='leaky_relu')
        drop_block = DropBlock3D(keep_prob=keep_prob, block_size=3)
        input_prev_layer = drop_block(input_prev_layer, True)
        #input_prev_layer = DropBlock(inputul = input_prev_layer, keep_prob=keep_prob, block_size=3)

    return input_prev_layer