Ejemplo n.º 1
0
def encoder3D(x,
              img_shape,
              conv_chans=None,
              n_convs_per_stage=1,
              min_h=5,
              min_c=None,
              prefix='vte',
              ks=3,
              return_skips=False,
              use_residuals=False,
              use_maxpool=False,
              max_time_downsample=None):
    skip_layers = []
    concat_skip_sizes = []

    if max_time_downsample is None:
        # do not attempt to downsample beyond 1
        max_time_downsample = int(np.floor(np.log2(img_shape[-2]))) - 1
        print('Max downsamples in time: {}'.format(max_time_downsample))

    if conv_chans is None:
        n_convs = int(np.floor(np.log2(img_shape[0] / min_h)))
        conv_chans = [min_c * 2] * (n_convs - 1) + [min_c]
    elif not type(conv_chans) == list:
        n_convs = int(np.floor(np.log2(img_shape[0] / min_h)))
        conv_chans = [conv_chans] * (n_convs - 1) + [min_c]

    for i in range(len(conv_chans)):
        if n_convs_per_stage is not None and n_convs_per_stage > 1 or use_maxpool and n_convs_per_stage is not None:
            for ci in range(n_convs_per_stage):
                x = Conv3D(conv_chans[i],
                           kernel_size=ks,
                           padding='same',
                           name='{}_enc_conv3D_{}_{}'.format(
                               prefix, i, ci + 1))(x)
                if ci == 0 and use_residuals:
                    residual_input = x
                elif ci == n_convs_per_stage - 1 and use_residuals:
                    x = Add(name='{}_enc_{}_add_residual'.format(prefix, i))(
                        [residual_input, x])

                x = LeakyReLU(0.2,
                              name='{}_enc_leakyrelu_{}_{}'.format(
                                  prefix, i, ci + 1))(x)

        if return_skips:
            skip_layers.append(x)
            concat_skip_sizes.append(x.get_shape().as_list()[1:-1])

        # only downsample if we are below the max number of downsamples in time
        if i < max_time_downsample:
            strides = (2, 2, 2)
        else:
            strides = (2, 2, 1)

        if use_maxpool:
            x = MaxPooling3D(pool_size=strides,
                             name='{}_enc_maxpool_{}'.format(prefix, i))(x)
        else:
            x = Conv3D(conv_chans[i],
                       kernel_size=ks,
                       strides=strides,
                       padding='same',
                       name='{}_enc_conv3D_{}'.format(prefix, i))(x)

        if i < len(conv_chans) - 1:  # no activation on last convolution
            x = LeakyReLU(0.2, name='{}_enc_leakyrelu_{}'.format(prefix, i))(x)

    if min_c is not None and min_c > 0:
        if n_convs_per_stage is not None and n_convs_per_stage > 1:
            for ci in range(n_convs_per_stage):
                x = Conv3D(min_c,
                           kernel_size=ks,
                           padding='same',
                           name='{}_enc_conv3D_last_{}'.format(prefix,
                                                               ci + 1))(x)
                if ci == 0 and use_residuals:
                    residual_input = x
                elif ci == n_convs_per_stage - 1 and use_residuals:
                    x = Add(
                        name='{}_enc_{}_add_residual'.format(prefix, 'last'))(
                            [residual_input, x])
                x = LeakyReLU(0.2,
                              name='{}_enc_leakyrelu_last'.format(prefix))(x)
        x = Conv3D(min_c,
                   kernel_size=ks,
                   strides=(1, 1, 1),
                   padding='same',
                   name='{}_enc_conv3D_last'.format(prefix))(x)
        if return_skips:
            skip_layers.append(x)
            concat_skip_sizes.append(x.get_shape().as_list()[1:-1])

    if return_skips:
        return x, skip_layers, concat_skip_sizes
    else:
        return x
Ejemplo n.º 2
0
def encoder(x, img_shape,
            conv_chans=None,
            n_convs_per_stage=1,
            min_h=5, min_c=None,
            prefix='',
            ks=3,
            return_skips=False, use_residuals=False, use_maxpool=False, use_batchnorm=False):
    skip_layers = []
    concat_skip_sizes = []
    n_dims = len(img_shape) - 1  # assume img_shape includes spatial dims, followed by channels

    if conv_chans is None:
        n_convs = int(np.floor(np.log2(img_shape[0] / min_h)))
        conv_chans = [min_c * 2] * (n_convs - 1) + [min_c]
    elif not type(conv_chans) == list:
        n_convs = int(np.floor(np.log2(img_shape[0] / min_h)))
        conv_chans = [conv_chans] * (n_convs - 1) + [min_c]

    for i in range(len(conv_chans)):
        #if n_convs_per_stage is not None and n_convs_per_stage > 1 or use_maxpool and n_convs_per_stage is not None:
        for ci in range(n_convs_per_stage):
            x = myConv(nf=conv_chans[i], ks=ks, strides=1, n_dims=n_dims,
                       prefix='{}_enc'.format(prefix),
                       suffix='{}_{}'.format(i, ci + 1))(x)

            if ci == 0 and use_residuals:
                residual_input = x
            elif ci == n_convs_per_stage - 1 and use_residuals:
                x = Add(name='{}_enc_{}_add_residual'.format(prefix, i))([residual_input, x])

            if use_batchnorm:
                x = BatchNormalization()(x)
            x = LeakyReLU(0.2, name='{}_enc_leakyrelu_{}_{}'.format(prefix, i, ci + 1))(x)

        if return_skips:
            skip_layers.append(x)
            concat_skip_sizes.append(np.asarray(x.get_shape().as_list()[1:-1]))

        if use_maxpool and i < len(conv_chans) - 1:
            # changed 5/30/19, don't pool after our last conv
            x = myPool(n_dims=n_dims, prefix=prefix, suffix=i)(x)
        else:
            x = myConv(conv_chans[i], ks=ks, strides=2, n_dims=n_dims,
                       prefix='{}_enc'.format(prefix), suffix=i)(x)

            # don't activate right after a maxpool, it makes no sense
            if i < len(conv_chans) - 1:  # no activation on last convolution
                x = LeakyReLU(0.2, name='{}_enc_leakyrelu_{}'.format(prefix, i))(x)

    if min_c is not None and min_c > 0:
        # if the last number of channels is specified, convolve to that
        if n_convs_per_stage is not None and n_convs_per_stage > 1:
            for ci in range(n_convs_per_stage):
                x = myConv(min_c, ks=ks, n_dims=n_dims, strides=1,
                           prefix='{}_enc'.format(prefix), suffix='last_{}'.format(ci + 1))(x)

                if ci == 0 and use_residuals:
                    residual_input = x
                elif ci == n_convs_per_stage - 1 and use_residuals:
                    x = Add(name='{}_enc_{}_add_residual'.format(prefix, 'last'))([residual_input, x])
                x = LeakyReLU(0.2, name='{}_enc_leakyrelu_last'.format(prefix))(x)

        x = myConv(min_c, ks=ks, strides=1, n_dims=n_dims,
                   prefix='{}_enc'.format(prefix),
                   suffix='_last')(x)

        if return_skips:
            skip_layers.append(x)
            concat_skip_sizes.append(np.asarray(x.get_shape().as_list()[1:-1]))

    if return_skips:
        return x, skip_layers, concat_skip_sizes
    else:
        return x
Ejemplo n.º 3
0
def encoder(x,
            img_shape,
            conv_chans=None,
            n_convs_per_stage=1,
            min_h=5,
            min_c=None,
            prefix='',
            ks=3,
            return_skips=False,
            use_residuals=False,
            use_maxpool=False,
            use_batchnorm=False,
            kernel_initializer=None,
            bias_initializer=None):
    skip_layers = []
    concat_skip_sizes = []
    n_dims = len(
        img_shape
    ) - 1  # assume img_shape includes spatial dims, followed by channels

    if conv_chans is None:
        n_convs = int(np.floor(np.log2(img_shape[0] / min_h)))
        conv_chans = [min_c * 2] * (n_convs - 1) + [min_c]
    elif not type(conv_chans) == list:
        n_convs = int(np.floor(np.log2(img_shape[0] / min_h)))
        conv_chans = [conv_chans] * (n_convs - 1) + [min_c]
    else:
        n_convs = len(conv_chans)

    if isinstance(ks, list):
        assert len(ks) == (n_convs + 1
                           )  # specify for each conv, as well as the last one
    else:
        ks = [ks] * (n_convs + 1)

    for i in range(len(conv_chans)):
        #if n_convs_per_stage is not None and n_convs_per_stage > 1 or use_maxpool and n_convs_per_stage is not None:
        for ci in range(n_convs_per_stage):
            x = myConv(nf=conv_chans[i],
                       ks=ks[i],
                       strides=1,
                       n_dims=n_dims,
                       kernel_initializer=kernel_initializer,
                       bias_initializer=bias_initializer,
                       prefix='{}_enc'.format(prefix),
                       suffix='{}_{}'.format(i, ci + 1))(x)

            if ci == 0 and use_residuals:
                residual_input = x
            elif ci == n_convs_per_stage - 1 and use_residuals:
                x = Add(name='{}_enc_{}_add_residual'.format(prefix, i))(
                    [residual_input, x])

            if use_batchnorm:
                x = BatchNormalization()(x)
            x = LeakyReLU(0.2,
                          name='{}_enc_leakyrelu_{}_{}'.format(
                              prefix, i, ci + 1))(x)

        if return_skips:
            skip_layers.append(x)
            concat_skip_sizes.append(np.asarray(x.get_shape().as_list()[1:-1]))

        if use_maxpool and i < len(conv_chans) - 1:
            # changed 5/30/19, don't pool after our last conv
            x = myPool(n_dims=n_dims, prefix=prefix, suffix=i)(x)
        else:
            x = myConv(conv_chans[i],
                       ks=ks[i],
                       strides=2,
                       n_dims=n_dims,
                       kernel_initializer=kernel_initializer,
                       bias_initializer=bias_initializer,
                       prefix='{}_enc'.format(prefix),
                       suffix=i)(x)

            # don't activate right after a maxpool, it makes no sense
            if i < len(conv_chans) - 1:  # no activation on last convolution
                x = LeakyReLU(0.2,
                              name='{}_enc_leakyrelu_{}'.format(prefix, i))(x)

    if min_c is not None and min_c > 0:
        # if the last number of channels is specified, convolve to that
        if n_convs_per_stage is not None and n_convs_per_stage > 1:
            for ci in range(n_convs_per_stage):
                # TODO: we might not have enough ks for this
                x = myConv(min_c,
                           ks=ks[-1],
                           n_dims=n_dims,
                           strides=1,
                           kernel_initializer=kernel_initializer,
                           bias_initializer=bias_initializer,
                           prefix='{}_enc'.format(prefix),
                           suffix='last_{}'.format(ci + 1))(x)

                if ci == 0 and use_residuals:
                    residual_input = x
                elif ci == n_convs_per_stage - 1 and use_residuals:
                    x = Add(
                        name='{}_enc_{}_add_residual'.format(prefix, 'last'))(
                            [residual_input, x])
                x = LeakyReLU(0.2,
                              name='{}_enc_leakyrelu_last'.format(prefix))(x)

        x = myConv(min_c,
                   ks=ks[-1],
                   strides=1,
                   n_dims=n_dims,
                   kernel_initializer=kernel_initializer,
                   bias_initializer=bias_initializer,
                   prefix='{}_enc'.format(prefix),
                   suffix='_last')(x)

        if return_skips:
            skip_layers.append(x)
            concat_skip_sizes.append(np.asarray(x.get_shape().as_list()[1:-1]))

    if return_skips:
        return x, skip_layers, concat_skip_sizes
    else:
        return x
Ejemplo n.º 4
0
def encoder3D(x, img_shape,
            conv_chans=None,
            n_convs_per_stage=1,
            min_h=5, min_c=None,
            prefix='vte',
            ks=3,
            return_skips=False, use_residuals=False, use_maxpool=False,
            max_time_downsample=None):
    skip_layers = []
    concat_skip_sizes = []

    if max_time_downsample is None:
        # do not attempt to downsample beyond 1
        max_time_downsample = int(np.floor(np.log2(img_shape[-2]))) - 1
        print('Max downsamples in time: {}'.format(max_time_downsample))

    if conv_chans is None:
        n_convs = int(np.floor(np.log2(img_shape[0] / min_h)))
        conv_chans = [min_c * 2] * (n_convs - 1) + [min_c]
    elif not type(conv_chans) == list:
        n_convs = int(np.floor(np.log2(img_shape[0] / min_h)))
        conv_chans = [conv_chans] * (n_convs - 1) + [min_c]

    for i in range(len(conv_chans)):
        if n_convs_per_stage is not None and n_convs_per_stage > 1 or use_maxpool and n_convs_per_stage is not None:
            for ci in range(n_convs_per_stage):
                x = Conv3D(conv_chans[i], kernel_size=ks, padding='same',
                           name='{}_enc_conv3D_{}_{}'.format(prefix, i, ci + 1))(x)
                if ci == 0 and use_residuals:
                    residual_input = x
                elif ci == n_convs_per_stage - 1 and use_residuals:
                    x = Add(name='{}_enc_{}_add_residual'.format(prefix, i))([residual_input, x])

                x = LeakyReLU(0.2, name='{}_enc_leakyrelu_{}_{}'.format(prefix, i, ci + 1))(x)

        if return_skips:
            skip_layers.append(x)
            concat_skip_sizes.append(x.get_shape().as_list()[1:-1])

        # only downsample if we are below the max number of downsamples in time
        if i < max_time_downsample:
            strides = (2, 2, 2)
        else:
            strides = (2, 2, 1)

        if use_maxpool:
            x = MaxPooling3D(pool_size=strides,
                             name='{}_enc_maxpool_{}'.format(prefix, i))(x)
        else:
            x = Conv3D(conv_chans[i], kernel_size=ks, strides=strides, padding='same',
                       name='{}_enc_conv3D_{}'.format(prefix, i))(x)

        if i < len(conv_chans) - 1:  # no activation on last convolution
            x = LeakyReLU(0.2, name='{}_enc_leakyrelu_{}'.format(prefix, i))(x)

    if min_c is not None and min_c > 0:
        if n_convs_per_stage is not None and n_convs_per_stage > 1:
            for ci in range(n_convs_per_stage):
                x = Conv3D(min_c, kernel_size=ks, padding='same',
                           name='{}_enc_conv3D_last_{}'.format(prefix, ci + 1))(x)
                if ci == 0 and use_residuals:
                    residual_input = x
                elif ci == n_convs_per_stage - 1 and use_residuals:
                    x = Add(name='{}_enc_{}_add_residual'.format(prefix, 'last'))([residual_input, x])
                x = LeakyReLU(0.2, name='{}_enc_leakyrelu_last'.format(prefix))(x)
        x = Conv3D(min_c, kernel_size=ks, strides=(1, 1, 1), padding='same',
                   name='{}_enc_conv3D_last'.format(prefix))(x)
        if return_skips:
            skip_layers.append(x)
            concat_skip_sizes.append(x.get_shape().as_list()[1:-1])

    if return_skips:
        return x, skip_layers, concat_skip_sizes
    else:
        return x