Exemple #1
0
 def __init__(self,
              _in_channel,
              output_dim,
              stride=1,
              downsample=None,
              **kwargs):
     super(Bottleneck, self).__init__(**kwargs)
     self.downsample = downsample
     self.conv1 = Conv2D(filters=output_dim,
                         kernel_size=1,
                         padding="same",
                         use_bias=False)
     self.bn1 = GroupNormalization(groups=GN_GROUPS)
     self.conv2 = Conv2D(filters=output_dim,
                         kernel_size=3,
                         strides=stride,
                         padding="same",
                         use_bias=False)
     self.bn2 = GroupNormalization(groups=GN_GROUPS)
     self.conv3 = Conv2D(filters=output_dim * self.expansion,
                         kernel_size=1,
                         padding="same",
                         use_bias=False)
     self.bn3 = GroupNormalization(groups=GN_GROUPS)
     self.relu = ReLU()
Exemple #2
0
def single_hourglass(output_channels,
                     n_levels=4,
                     channels=32,
                     channels_growth=2,
                     spatial_dims=2,
                     spacing=1,
                     norm_groups=4):
    '''Combines an hourglass block with input/output blocks to 
    increase/decrease the number of channels.
    
    Notes:
    Expects the input to be already padded)
    '''

    Conv = get_nd_conv(spatial_dims)

    hglass = _stack_layers([
        Conv(channels, kernel_size=1, padding='same'),
        bottleneck_conv_block(channels, spatial_dims, norm_groups),
        hourglass_block(n_levels, channels, channels_growth, spatial_dims,
                        spacing, norm_groups),
        bottleneck_conv_block(channels, spatial_dims, norm_groups),
        Activation(),
        GroupNormalization(groups=norm_groups, axis=-1),
        Conv(channels, kernel_size=3, padding='same'),
        Activation(),
        GroupNormalization(groups=norm_groups, axis=-1),
        Conv(output_channels, kernel_size=1, padding='same'),
    ])

    return hglass
def conv_block_3d(x, growth_rate, name, GN=True):
    """A building block for a dense block.

    Arguments:
    x: input tensor.
    growth_rate: float, growth rate at dense layers.
    name: string, block label.

    Returns:
    Output tensor for the block.
    """
    if GN:
        x1 = GroupNormalization(groups=2, axis=-1, name=name + '_0_gn')(x)
    else:
        x1 = layers.BatchNormalization(name=name + '_0_bn')(x)
    # x1 = layers.BatchNormalization(axis=-1, epsilon=1.001e-5, name=name + '_0_bn')(x)
    x1 = layers.Activation('selu', name=name + '_0_selu')(x1)
    x1 = layers.Conv3D(2 * growth_rate,
                       1,
                       use_bias=False,
                       name=name + '_1_conv',
                       padding='same')(x1)
    # x1 = layers.BatchNormalization(axis=-1, epsilon=1.001e-5, name=name + '_1_bn')(x1)
    if GN:
        x1 = GroupNormalization(groups=2, axis=-1, name=name + '_1_gn')(x1)
    else:
        x1 = layers.BatchNormalization(name=name + '_1_bn')(x1)
    x1 = layers.Activation('selu', name=name + '_1_selu')(x1)
    x1 = layers.Conv3D(growth_rate,
                       3,
                       padding='same',
                       use_bias=False,
                       name=name + '_2_conv')(x1)
    x = layers.Concatenate(axis=-1, name=name + '_concat')([x, x1])
    return x
Exemple #4
0
def transition_layer1(x, out_filters_list=[32, 64], GROUPS = 18):
    x0 = Conv2D(out_filters_list[0], 3, padding='same', use_bias=False, kernel_initializer='he_normal')(x)
    x0 = GroupNormalization(axis=3, groups=GROUPS)(x0)
    x0 = Activation('relu')(x0)

    x1 = Conv2D(out_filters_list[1], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x)
    x1 = GroupNormalization(axis=3, groups=GROUPS)(x1)
    x1 = Activation('relu')(x1)

    return [x0, x1]
    def _make_fuse_layers(self):
        if self.num_branches == 1:
            return None

        num_branches = self.num_branches
        num_inchannels = self.num_inchannels
        fuse_layers = []
        for i in range(num_branches if self.multi_scale_output else 1):
            fuse_layer = []
            for j in range(num_branches):
                if j > i:
                    fuse_layer.append(Sequential([
                        Conv2D(
                            filters=num_inchannels[i], 
                            kernel_size=(1, 1), 
                            strides=(1, 1), 
                            padding="same", 
                            use_bias=False
                        ),
                        GroupNormalization(groups=self.GN_GROUPS),
                        UpSampling2D(size=(2 ** (j - i), 2 ** (j - i)), interpolation='bilinear')
                    ]))
                elif j == i:
                    fuse_layer.append(None)
                else:
                    conv3x3s = []
                    for k in range(i - j):
                        if k == i - j - 1:
                            num_outchannels_conv3x3 = num_inchannels[i]
                            conv3x3s.append(Sequential([
                                Conv2D(
                                    filters=num_outchannels_conv3x3,
                                    kernel_size=(3,3),
                                    strides=(2,2),
                                    padding="same",
                                    use_bias=False),
                                GroupNormalization(groups=self.GN_GROUPS)
                            ]))
                        else:
                            num_outchannels_conv3x3 = num_inchannels[j]
                            conv3x3s.append(Sequential([
                                Conv2D(
                                    filters=num_outchannels_conv3x3,
                                    kernel_size=(3,3),
                                    strides=(2,2),
                                    padding="same",
                                    use_bias=False),
                                GroupNormalization(groups=self.GN_GROUPS),
                                ReLU()
                            ]))
                    fuse_layer.append(Sequential(conv3x3s))
            fuse_layers.append(fuse_layer)

        return fuse_layers
 def __init__(self, _in_channel, output_dim, stride=1, downsample=None, GN_GROUPS=32, after_stem=False, **kwargs):
     super(BasicBlock, self).__init__(**kwargs)
     self.downsample = downsample
     self.conv1 = conv3x3(output_dim, stride)
     if after_stem:
         self.gn1 = GroupNormalization(groups=output_dim)
     else:
         self.gn1 = GroupNormalization(groups=GN_GROUPS)
     self.relu = ReLU()
     self.conv2 = conv3x3(output_dim)
     self.gn2 = GroupNormalization(groups=GN_GROUPS)
Exemple #7
0
def fuse_layer1(x, out_filters_list=[32, 64], GROUPS = 18):
    x0_0 = x[0]
    x0_1 = Conv2D(out_filters_list[0], 1, use_bias=False, kernel_initializer='he_normal')(x[1])
    x0_1 = GroupNormalization(axis=3, groups=GROUPS)(x0_1)
    x0_1 = UpSampling2D(size=(2, 2), interpolation="bilinear")(x0_1)
    x0 = add([x0_0, x0_1])

    x1_0 = Conv2D(out_filters_list[1], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x[0])
    x1_0 = GroupNormalization(axis=3, groups=GROUPS)(x1_0)
    x1_1 = x[1]
    x1 = add([x1_0, x1_1])
    return [x0, x1]
Exemple #8
0
 def __init__(self, filters, gp_num=48, **kwargs):
     super(BasicShuffleUnitV2, self).__init__()
     filters //= 2
     self.model = tf.keras.models.Sequential([
         GroupNormalization(groups=gp_num, axis=-1),
         Conv2D(filters, 1, use_bias=False),
         ReLU(),
         GroupNormalization(groups=gp_num, axis=-1),
         DepthwiseConv2D(3, padding='same', use_bias=False),
         GroupNormalization(groups=gp_num, axis=-1),
         Conv2D(filters, 1, use_bias=False),
         ReLU(),
     ])
Exemple #9
0
 def __init__(self,
              _in_channel,
              output_dim,
              stride=1,
              downsample=None,
              **kwargs):
     super(BasicBlock, self).__init__(**kwargs)
     self.downsample = downsample
     self.conv1 = conv3x3(output_dim, stride)
     self.bn1 = GroupNormalization(groups=GN_GROUPS)
     self.relu = ReLU()
     self.conv2 = conv3x3(output_dim)
     self.bn2 = GroupNormalization(groups=GN_GROUPS)
    def _make_head(self, pre_stage_channels, GN_GROUPS):
        
        head_block = Bottleneck
        head_channels = [32, 64, 128, 256]

        # Increasing the #channels on each resolution 
        # from C, 2C, 4C, 8C to 128, 256, 512, 1024
        incre_modules = []
        for i, channels  in enumerate(pre_stage_channels):
            incre_module = self._make_layer(
                block=head_block, 
                planes=head_channels[i], 
                blocks=1, 
                stride=1,
                head_layer=True
            )
            incre_modules.append(incre_module)
        
            
        # downsampling modules
        downsamp_modules = []
        for i in range(len(pre_stage_channels)-1):
            out_channels = head_channels[i+1] * head_block.expansion

            downsamp_module = Sequential([
                Conv2D(
                    filters=out_channels,
                    kernel_size=(3,3),
                    strides=(2,2),
                    padding='same'
                ),
                GroupNormalization(groups=GN_GROUPS),
                ReLU()
            ])

            downsamp_modules.append(downsamp_module)

        final_layer = Sequential([
            Conv2D(
                filters=2048,
                kernel_size=(1,1),
                strides=(1,1),
                padding='valid'
            ),
            GroupNormalization(groups=GN_GROUPS),
            ReLU(),
            GlobalAveragePooling2D(),
            Dense(self.NUM_CLASSES, dtype='float32')
        ])

        return incre_modules, downsamp_modules, final_layer
Exemple #11
0
def _normalization(inputs, norm='bn', name=None):
    if norm == 'bn':
        return BatchNormalization(name=name)(inputs)
    elif norm == 'syncbn':
        return SyncBatchNormalization(name=name)(inputs)
    elif norm == 'in':
        return InstanceNormalization(name=name)(inputs)
    elif norm == 'ln':
        return LayerNormalization(name=name)(inputs)
    elif 'gn' in norm:
        if len(norm) == 2:
            return GroupNormalization(name=name)(inputs)
        else:
            return GroupNormalization(groups=int(norm[2:]), name=name)(inputs)
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = Sequential([
                Conv2D(planes * block.expansion,
                       kernel_size=(1, 1),
                       strides=(stride, stride),
                       padding="same",
                       use_bias=False),
                GroupNormalization(groups=self.GN_GROUPS)
            ])

        layers = [
            block(self.inplanes,
                  planes,
                  stride,
                  downsample,
                  GN_GROUPS=self.GN_GROUPS)
        ]
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(
                block(self.inplanes, planes, GN_GROUPS=self.GN_GROUPS))

        return Sequential(layers)
    def _make_layer(self, block, planes, blocks, stride=1, after_stem=False, head_layer=False):
        downsample = None
        if stride != 1 or self.inplanes != planes*block.expansion:
            # TODO: make it so this if statement = True when C = 32
            if head_layer:
                GN_GROUPS = 32
            else:
                GN_GROUPS = self.GN_GROUPS
            
            downsample = Sequential([
                Conv2D(
                    planes * block.expansion, 
                    kernel_size=(1,1), 
                    strides=(stride,stride), 
                    padding="same",
                    use_bias=False),
                GroupNormalization(groups=GN_GROUPS)
            ])

        if after_stem:
            layers = [block(self.inplanes, planes, stride, downsample, GN_GROUPS=self.GN_GROUPS, after_stem=True)]
        elif head_layer:
            layers = [block(self.inplanes, planes, stride, downsample, GN_GROUPS=32, after_stem=False)]
        else:
            layers = [block(self.inplanes, planes, stride, downsample)]
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            if after_stem:
                layers.append(block(self.inplanes, planes, GN_GROUPS=self.GN_GROUPS, after_stem=True))
            elif head_layer:
                layers.append(block(self.inplanes, planes, GN_GROUPS=32, after_stem=False))
            else:
                layers.append(block(self.inplanes, planes))

        return Sequential(layers)
    def test_group_normalization(self, use_cpu_only, backend, rank, groups,
                                 axis, epsilon, center, scale):
        tensorflow_addons = pytest.importorskip("tensorflow_addons")
        from tensorflow_addons.layers import GroupNormalization

        shape = np.random.randint(low=2, high=4, size=rank)
        shape[-1] = shape[-1] * groups  # groups must be a multiple of channels
        model = tf.keras.Sequential([
            GroupNormalization(
                batch_input_shape=shape,
                groups=groups,
                axis=axis,
                epsilon=epsilon,
                center=center,
                scale=scale,
            )
        ])
        run_compare_tf_keras(
            model,
            [random_gen(shape, rand_min=-1, rand_max=1)],
            use_cpu_only=use_cpu_only,
            backend=backend,
            atol=1e-3,
            rtol=1e-4,
        )
Exemple #15
0
    def _make_one_branch(self,
                         branch_index,
                         block,
                         num_blocks,
                         num_channels,
                         stride=1):
        downsample = None
        if stride != 1 or self.num_inchannels[
                branch_index] != num_channels[branch_index] * block.expansion:
            downsample = Sequential(
                Conv2D(self.num_inchannels[branch_index],
                       num_channels[branch_index] * block.expansion,
                       kernel_size=1,
                       strides=stride,
                       bias=False),
                GroupNormalization(groups=GN_GROUPS),
            )

        layers = [
            block(self.num_inchannels[branch_index],
                  num_channels[branch_index], stride, downsample)
        ]
        self.num_inchannels[
            branch_index] = num_channels[branch_index] * block.expansion
        for i in range(1, num_blocks[branch_index]):
            layers.append(
                block(self.num_inchannels[branch_index],
                      num_channels[branch_index]))

        return Sequential(layers)
Exemple #16
0
    def __init__(self, c, name=None, drop_rate=0.0, groups=32):
        super().__init__(name=name)
        self.c = c
        self.drop_rate = drop_rate

        self.conv1 = Conv2D(c, 3, padding='same')
        self.conv2 = Conv2D(c, 3, padding='same')

        self.norm1 = GroupNormalization(groups=groups)
        self.norm2 = GroupNormalization(groups=groups)
        self.temb_proj = Dense(c)

        if drop_rate > 0.01:
            self.drop_fn = Dropout(drop_rate)
        else:
            self.drop_fn = tf.identity
Exemple #17
0
def fuse_layer3(x, out_filters_list=[32, 64, 128, 256], GROUPS = 18):
    x0_0 = x[0]
    
    x0_1 = Conv2D(out_filters_list[1], 1, use_bias=False, kernel_initializer='he_normal')(x[1])
    x0_1 = GroupNormalization(axis=3, groups=GROUPS)(x0_1)
    x0_1 = UpSampling2D(size=(2, 2), interpolation="bilinear")(x0_1)
    
    x0_2 = Conv2D(out_filters_list[2], 1, use_bias=False, kernel_initializer='he_normal')(x[2])
    x0_2 = GroupNormalization(axis=3, groups=GROUPS)(x0_2)
    x0_2 = UpSampling2D(size=(4, 4), interpolation="bilinear")(x0_2)
    
    x0_3 = Conv2D(out_filters_list[3], 1, use_bias=False, kernel_initializer='he_normal')(x[3])
    x0_3 = GroupNormalization(axis=3, groups=GROUPS)(x0_3)
    x0_3 = UpSampling2D(size=(8, 8), interpolation="bilinear")(x0_3)
    
    x0 = concatenate([x0_0, x0_1, x0_2, x0_3], axis=-1)
    return x0
Exemple #18
0
def mydensenet(blocks_in_dense=2, dense_conv_blocks=2, dense_layers=1, num_dense_connections=256, filters=16,
               growth_rate=16, reduction=0.5, **kwargs):
    """
    :param blocks_in_dense: how many convolution blocks are in a single size layer
    :param dense_conv_blocks: how many dense blocks before a max pooling to occur
    :param dense_layers: number of dense layers
    :param num_dense_connections:
    :param filters:
    :param growth_rate:
    :param kwargs:
    :return:
    """
    blocks_in_dense = int(blocks_in_dense)
    dense_conv_blocks = int(dense_conv_blocks)
    dense_layers = int(dense_layers)
    num_dense_connections = int(num_dense_connections)
    filters = int(filters)
    growth_rate = int(growth_rate)
    reduction = float(reduction)
    input_shape = (32, 64, 64, 2)
    img_input = layers.Input(shape=input_shape)
    x = img_input

    inputs = (img_input,)

    x = layers.Conv3D(filters, (3, 7, 7), strides=2, use_bias=False, name='conv1/conv', padding='Same')(x)
    # x = layers.BatchNormalization(axis=-1, epsilon=1.001e-5, name='conv1/bn')(x)
    x = GroupNormalization(groups=2, axis=-1, name='conv1/gn')(x)
    x = layers.Activation('selu', name='conv1/selu')(x)

    for i in range(dense_conv_blocks):
        x = dense_block3d(x=x, growth_rate=growth_rate, blocks=blocks_in_dense, name='conv{}'.format(i))
        x = transition_block(x=x, reduction=reduction, name='pool{}'.format(i))
    # x = layers.BatchNormalization(axis=-1, epsilon=1.001e-5, name='bn')(x)
    x = GroupNormalization(groups=2, axis=-1, name='gn')(x)
    x = layers.Activation('selu', name='selu')(x)

    x = layers.AveragePooling3D(pool_size=(2, 2, 2), name='final_average_pooling')(x)
    x = layers.Flatten()(x)
    for i in range(dense_layers):
        x = layers.Dense(num_dense_connections, activation='selu', kernel_regularizer=regularizers.l2(0.001))(x)
        x = layers.Dropout(0.5)(x)
    x = layers.Dense(2, activation='softmax', name='prediction', dtype='float32')(x)
    model = Model(inputs=inputs, outputs=(x,), name='my_3d_densenet')
    return model
Exemple #19
0
def fuse_layer2(x, out_filters_list=[32, 64, 128], GROUPS = 18):
    
    # add( identity (x0) | upsample x 2 (x1) | upsample x 4 (x2) ) --> x0
    x0_0 = x[0]
    x0_1 = Conv2D(out_filters_list[0], 1, use_bias=False, kernel_initializer='he_normal')(x[1])
    x0_1 = GroupNormalization(axis=3, groups=GROUPS)(x0_1)
    x0_1 = UpSampling2D(size=(2, 2), interpolation="bilinear")(x0_1)
    x0_2 = Conv2D(out_filters_list[0], 1, use_bias=False, kernel_initializer='he_normal')(x[2])
    x0_2 = GroupNormalization(axis=3, groups=GROUPS)(x0_2)
    x0_2 = UpSampling2D(size=(4, 4), interpolation="bilinear")(x0_2)
    x0 = add([x0_0, x0_1, x0_2])

    # add( downsample x 2 (x0) | identity (x1) | upsample x 2 (x2) ) --> x1
    x1_0 = Conv2D(out_filters_list[1], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x[0])
    x1_0 = GroupNormalization(axis=3, groups=GROUPS)(x1_0)
    x1_1 = x[1]
    x1_2 = Conv2D(out_filters_list[1], 1, use_bias=False, kernel_initializer='he_normal')(x[2])
    x1_2 = GroupNormalization(axis=3, groups=GROUPS)(x1_2)
    x1_2 = UpSampling2D(size=(2, 2), interpolation="bilinear")(x1_2)
    x1 = add([x1_0, x1_1, x1_2])

    # add( downsample x 4 (x0) | downsample x 2 (x1) | identity (x2) ) --> x2
    x2_0 = Conv2D(out_filters_list[0], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x[0])
    x2_0 = GroupNormalization(axis=3, groups=GROUPS)(x2_0)
    x2_0 = Activation('relu')(x2_0)
    x2_0 = Conv2D(out_filters_list[2], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x2_0)
    x2_0 = GroupNormalization(axis=3, groups=GROUPS)(x2_0)
    x2_1 = Conv2D(out_filters_list[2], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x[1])
    x2_1 = GroupNormalization(axis=3, groups=GROUPS)(x2_1)
    x2_2 = x[2]
    x2 = add([x2_0, x2_1, x2_2])
    return [x0, x1, x2]
Exemple #20
0
 def __init__(self,
              filters,
              kernel_size,
              gp_num=3,
              pad_type="constant",
              **kwargs):
     super(ResBlock, self).__init__()
     padding = (kernel_size - 1) // 2
     padding = (padding, padding)
     self.model = tf.keras.models.Sequential()
     self.model.add(get_padding(pad_type, padding))
     self.model.add(GroupNormalization(groups=gp_num, axis=-1))
     self.model.add(Conv2D(filters, kernel_size))
     self.model.add(ReLU())
     self.model.add(get_padding(pad_type, padding))
     self.model.add(GroupNormalization(groups=gp_num, axis=-1))
     self.model.add(Conv2D(filters, kernel_size))
     self.add = Add()
Exemple #21
0
def transition_layer3(x, out_filters_list=[32, 64, 128, 256], GROUPS = 18):
    x0 = Conv2D(out_filters_list[0], 3, padding='same', use_bias=False, kernel_initializer='he_normal')(x[0])
    x0 = GroupNormalization(axis=3, groups=GROUPS)(x0)
    x0 = Activation('relu')(x0)

    x1 = Conv2D(out_filters_list[1], 3, padding='same', use_bias=False, kernel_initializer='he_normal')(x[1])
    x1 = GroupNormalization(axis=3, groups=GROUPS)(x1)
    x1 = Activation('relu')(x1)

    x2 = Conv2D(out_filters_list[2], 3, padding='same', use_bias=False, kernel_initializer='he_normal')(x[2])
    x2 = GroupNormalization(axis=3, groups=GROUPS)(x2)
    x2 = Activation('relu')(x2)

    x3 = Conv2D(out_filters_list[3], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x[2])
    x3 = GroupNormalization(axis=3, groups=GROUPS)(x3)
    x3 = Activation('relu')(x3)

    return [x0, x1, x2, x3]
Exemple #22
0
    def __init__(self,
                 filters=64,
                 lrelu_alpha=0.2,
                 pad_type="constant",
                 gp_num=3,
                 **kwargs):
        super(StridedConv, self).__init__(name="StridedConv")

        self.model = tf.keras.models.Sequential()
        self.model.add(get_padding(pad_type, (1, 1)))
        self.model.add(GroupNormalization(groups=gp_num, axis=-1))
        self.model.add(Conv2D(filters, 3, strides=(2, 2)))
        self.model.add(LeakyReLU(lrelu_alpha))
        self.model.add(get_padding(pad_type, (1, 1)))

        self.model.add(GroupNormalization(groups=gp_num, axis=-1))
        self.model.add(Conv2D(filters * 2, 3))
        self.model.add(LeakyReLU(lrelu_alpha))
Exemple #23
0
def basic_Block(x_input, out_filters, strides=(1, 1), with_conv_shortcut=False, final_activation=True, GROUPS = 18):
    x = conv3x3(x_input, out_filters, strides)
    x = GroupNormalization(axis=3, groups=GROUPS)(x)
    x = Activation('relu')(x)

    x = conv3x3(x, out_filters)
    x = GroupNormalization(axis=3, groups=GROUPS)(x)

    if with_conv_shortcut:
        residual = Conv2D(out_filters, 1, strides=strides, use_bias=False, kernel_initializer='he_normal')(x_input)
        residual = GroupNormalization(axis=3, groups=GROUPS)(residual)
        x = add([x, residual])
    else:
        x = add([x, x_input])

    if final_activation:
        x = Activation('relu')(x)
        
    return x
Exemple #24
0
def stem_net(x_input, GROUPS = 32):
    x = Conv2D(64, 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x_input)
    x = GroupNormalization(axis=3, groups=GROUPS)(x)
    x = Activation('relu')(x)

    x = bottleneck_Block(x, 256, with_conv_shortcut=True, GROUPS=GROUPS)
    x = bottleneck_Block(x, 256, with_conv_shortcut=False, GROUPS=GROUPS)
    x = bottleneck_Block(x, 256, with_conv_shortcut=False, GROUPS=GROUPS)
    x = bottleneck_Block(x, 256, with_conv_shortcut=False, GROUPS=GROUPS)

    return x
Exemple #25
0
 def __init__(self,
              filters,
              regularizer,
              data_format='channels_first',
              name=None,
              **kwargs):
     super().__init__(**kwargs)
     self.hidden = [
         Conv3D(filters=filters,
                kernel_size=(1, 1, 1),
                strides=1,
                kernel_regularizer=regularizer,
                data_format=data_format,
                name=f'Res_{name}' if name else None),
         GroupNormalization(
             groups=8,
             axis=1 if data_format == 'channels_first' else 0,
             name=f'GroupNorm_1_{name}' if name else None),
         Activation('relu', name=f'Relu_1_{name}' if name else None),
         Conv3D(filters=filters,
                kernel_size=(3, 3, 3),
                strides=1,
                padding='same',
                kernel_regularizer=regularizer,
                data_format=data_format,
                name=f'Conv3D_1_{name}' if name else None),
         GroupNormalization(
             groups=8,
             axis=1 if data_format == 'channels_first' else 0,
             name=f'GroupNorm_2_{name}' if name else None),
         Activation('relu', name=f'Relu_2_{name}' if name else None),
         Conv3D(filters=filters,
                kernel_size=(3, 3, 3),
                strides=1,
                padding='same',
                kernel_regularizer=regularizer,
                data_format=data_format,
                name=f'Conv3D_2_{name}' if name else None),
         Add(name=f'Out_{name}' if name else None)
     ]
    def _make_transition_layer(num_channels_pre_layer, num_channels_cur_layer,
                               GN_GROUPS):
        num_branches_pre = len(num_channels_pre_layer)
        num_branches_cur = len(num_channels_cur_layer)

        transition_layers = []
        for i in range(num_branches_cur):
            if i < num_branches_pre:
                if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
                    transition_layers.append(
                        Sequential([
                            Conv2D(filters=num_channels_cur_layer[i],
                                   kernel_size=(3, 3),
                                   strides=(1, 1),
                                   padding="same",
                                   use_bias=False),
                            GroupNormalization(groups=GN_GROUPS),
                            ReLU()
                        ]))
                else:
                    transition_layers.append(None)
            else:
                conv3x3s = []
                for j in range(i + 1 - num_branches_pre):
                    inchannels = num_channels_pre_layer[-1]
                    outchannels = num_channels_cur_layer[
                        i] if j == i - num_branches_pre else inchannels
                    conv3x3s.append(
                        Sequential([
                            Conv2D(filters=outchannels,
                                   kernel_size=(3, 3),
                                   strides=(2, 2),
                                   padding="same",
                                   use_bias=False),
                            GroupNormalization(groups=GN_GROUPS),
                            ReLU()
                        ]))
                transition_layers.append(Sequential(conv3x3s))

        return transition_layers
Exemple #27
0
    def __init__(self, c, name=None, drop_rate=0.0, groups=32):
        super().__init__(name=name)
        self.c = c
        self.k = Dense(c)
        self.norm = GroupNormalization(groups=groups)
        self.proj_out = Dense(c)
        self.q = Dense(c)
        self.v = Dense(c)

        if drop_rate > 0.01:
            self.drop_fn = Dropout(drop_rate)
        else:
            self.drop_fn = tf.identity
Exemple #28
0
def bottleneck_Block(x_input, out_filters, strides=(1, 1), with_conv_shortcut=False, GROUPS = 18):
    expansion = 4
    de_filters = int(out_filters / expansion)

    x = Conv2D(de_filters, 1, use_bias=False, kernel_initializer='he_normal')(x_input)
    x = GroupNormalization(axis=3, groups=GROUPS)(x)
    x = Activation('relu')(x)

    x = Conv2D(de_filters, 3, strides=strides, padding='same', use_bias=False, kernel_initializer='he_normal')(x)
    x = GroupNormalization(axis=3, groups=GROUPS)(x)
    x = Activation('relu')(x)

    x = Conv2D(out_filters, 1, use_bias=False, kernel_initializer='he_normal')(x)
    x = GroupNormalization(axis=3, groups=GROUPS)(x)

    if with_conv_shortcut:
        residual = Conv2D(out_filters, 1, strides=strides, use_bias=False, kernel_initializer='he_normal')(x_input)
        residual = GroupNormalization(axis=3, groups=GROUPS)(residual)
        x = add([x, residual])
    else:
        x = add([x, x_input])

    x = Activation('relu')(x)
    return x
Exemple #29
0
def bottleneck_conv_block(channels=32, spatial_dims=2, norm_groups=4):
    '''
    Notes
    -----
    pre-activation to keep the residual path clear as described in:
    
    HE, Kaiming, et al. Identity mappings in deep residual networks.
    In: European conference on computer vision. Springer, Cham, 2016.
    S. 630-645.
    '''

    Conv = get_nd_conv(spatial_dims)

    conv_in = Conv(channels, kernel_size=1, padding='same')

    seq = _stack_layers([
        Activation(),
        GroupNormalization(groups=norm_groups, axis=-1),
        Conv(channels // 2, kernel_size=1, padding='same'),
        Activation(),
        GroupNormalization(groups=norm_groups, axis=-1),
        Conv(channels // 2, kernel_size=3, padding='same'),
        Activation(),
        GroupNormalization(groups=norm_groups, axis=-1),
        Conv(channels, kernel_size=1, padding='same'),
    ])

    def block(x):
        if x.shape[-1] != channels:
            # if needed, brings the number of input channels to the same as output channels
            # not strictly a bottleneck anymore
            x = Activation()(conv_in(x))

        return x + seq(x)

    return block
Exemple #30
0
 def __init__(self,
              filters,
              kernel_size,
              gp_num=3,
              pad_type="constant",
              light=False,
              **kwargs):
     super(UpSampleConv, self).__init__(name="UpSampleConv")
     if light:
         self.model = tf.keras.models.Sequential([
             GroupNormalization(groups=gp_num, axis=-1),
             Conv2D(filters, 1),
             BasicShuffleUnitV2(filters, pad_type)
         ])
     else:
         self.model = ConvBlock(filters, kernel_size, 1, gp_num, pad_type)