Пример #1
0
    def feedforward_layers(self, final_activation=None):
        '''Iterate layers of dropout-dense-batch norm'''
        X = Input(shape=(self.X_train.shape[1], ))

        layer = Layer(name='identity')(X)

        n_layers = len(self.params['layers'])

        for i, units in enumerate(self.params['layers']):

            drop_rate = self.params.get('drop_rates', [0.0] * n_layers)[i]
            if drop_rate > 0.0:
                layer = Dropout(drop_rate,
                                noise_shape=None,
                                seed=None,
                                name='drop_' + str(i+1))(layer)

            layer = Dense(units,
                        activation=self.params.get('activation', None),
                        kernel_initializer=self.initializer,
                        bias_initializer='zeros',
                        kernel_regularizer=self.regularizer,
                        bias_regularizer=None,
                        activity_regularizer=None,
                        kernel_constraint=None,
                        bias_constraint=None,
                        name='dense_' + str(i+1))(layer)

            if self.params.get('use_batch_norm', [False] * n_layers)[i]:
                layer = BatchNormalization(axis=-1,
                                           momentum=self.params.get('batch_norm_momentum', 0.99),
                                           epsilon=0.001,
                                           center=True,
                                           scale=True,
                                           beta_initializer='zeros',
                                           gamma_initializer='ones',
                                           moving_mean_initializer='zeros',
                                           moving_variance_initializer='ones',
                                           beta_regularizer=None,
                                           gamma_regularizer=None,
                                           beta_constraint=None,
                                           gamma_constraint=None,
                                           name='bn_'+str(i+1))(layer)

        outputs = Dense(1,
                    activation=final_activation,
                    kernel_initializer=self.initializer,
                    bias_initializer='zeros',
                    kernel_regularizer=None,
                    bias_regularizer=None,
                    activity_regularizer=None,
                    kernel_constraint=None,
                    bias_constraint=None,
                    name='outputs')(layer)

        return X, outputs
Пример #2
0
def reduce_encoder_output(encoder_output, encoder_reduction):
    if encoder_reduction == EncoderReduction.GA_POOLING:
        reduced = GlobalAveragePooling2D()(encoder_output)
    elif encoder_reduction == EncoderReduction.FLATTEN:
        reduced = Flatten()(encoder_output)
    elif encoder_reduction == EncoderReduction.GA_ATTENTION:
        reduced = Layer()(attention_ga_pooling(encoder_output))
    else:
        raise ValueError()
    return reduced
Пример #3
0
    def build(self, input_shape=None):
        self.check_initialization()

        # State value V
        if self.v_h_size is not None:
            if self.noise_std_init == 0:
                self.v_h = Dense(self.v_h_size, name='latent_V')
            else:
                self.v_h = NoisyDense(self.v_h_size,
                                      std_init=self.noise_std_init,
                                      name='latent_V')
        else:
            self.v_h = Layer()

        if self.noise_std_init == 0:
            self.v = Dense(1, name='V')
        else:
            self.v = NoisyDense(1, self.noise_std_init, name='V')

        # Advantage A
        if self.a_h_size is not None:
            if self.noise_std_init == 0:
                self.a_h = Dense(self.a_h_size, name='latent_A')
            else:
                self.a_h = NoisyDense(self.a_h_size,
                                      std_init=self.noise_std_init,
                                      name='latent_A')
        else:
            self.a_h = Layer()

        if self.noise_std_init == 0:
            self.a = Dense(self.num_actions, name='A')
        else:
            self.a = NoisyDense(self.num_actions,
                                self.noise_std_init,
                                name='A')

        super(DoubleQNetwork, self).build(input_shape)
Пример #4
0
def split_output_into_instance_seg(model,
                                   n_classes,
                                   spacing=1.,
                                   class_activation=True):
    '''Splits the output of model into instance semi-conv embeddings and semantic class.
    
    Args:
        model: A base model that outputs at least n_classes + n_spatial-dimensions channels
        n_classes: number semantic classes
        spacing: pixel/voxel spacing of the semi-conv embeddings
    '''

    spatial_dims = len(model.inputs[0].shape) - 2
    spacing = tuple(
        float(val) for val in np.broadcast_to(spacing, spatial_dims))
    y_preds = model.outputs[0]

    if y_preds.shape[-1] < n_classes + spatial_dims:
        raise ValueError(
            'model has less than n_classes + n_spatial_dims channels: {} < {} + {}'
            .format(y_preds.shape[-1], n_classes, spatial_dims))

    vfield = y_preds[..., 0:spatial_dims]
    coords = generate_coordinate_grid(tf.shape(vfield), spatial_dims) * spacing
    embeddings = coords + vfield

    semantic_class = y_preds[..., spatial_dims:spatial_dims + n_classes]
    if class_activation:
        semantic_class = tf.nn.softmax(semantic_class, axis=-1)

    # rename outputs
    embeddings = Layer(name='embeddings')(embeddings)
    semantic_class = Layer(name='semantic_class')(semantic_class)

    return Model(inputs=model.inputs,
                 outputs=[embeddings, semantic_class],
                 name=model.name)
Пример #5
0
    def base_network(self):
        mob_net = tf.keras.applications.MobileNetV2(weights='imagenet',
                                                    include_top=False,
                                                    input_shape=self.in_shape)
        fmg = Layer(name="Feature_map_G_1")(
            mob_net.layers[-self.end_layer].output)
        fmg = Conv2D(self.out_features, (1, 1),
                     name='Feature_map_G_2',
                     activation='relu')(fmg)
        fmg = tf.keras.layers.BatchNormalization(axis=1,
                                                 trainable=False,
                                                 name='Feature_map_G_3')(fmg)
        x = GlobalAveragePooling2D()(fmg)
        if self.l2_norm:
            x = Lambda(lambda a: tf.math.l2_normalize(a, axis=1),
                       name='l2_norm')(x)

        outmodel = Model(inputs=mob_net.input,
                         outputs=x,
                         name='base_FE_network')
        self.map_G = Model(inputs=mob_net.input,
                           outputs=fmg,
                           name='base_FE_network')
        return outmodel
Пример #6
0
    def build_generator(self) -> tf.keras.Model:
        inputs = Input((self.z_dims, ))

        x = Dense(4 * 4 * 4 * self.z_dims)(inputs)
        x = BatchNormalization()(x)
        x = ReLU()(x)

        x = Reshape((4, 4, 4 * self.z_dims))(x)

        for i in range(3):
            x = Conv2DTranspose(self.z_dims * 4 // (2**i),
                                kernel_size=5,
                                strides=2,
                                padding='same')(x)
            x = BatchNormalization()(x)
            x = ReLU()(x)

        x = Conv2DTranspose(self.n_channels,
                            kernel_size=5,
                            strides=1,
                            padding='same')(x)
        x = Layer('tanh')(x)

        return Model(inputs, x, name='generator')
Пример #7
0
 def __init__(self, num_channels, name, use_1x1conv=False, strides=1):
     super().__init__()
     self.conv1 = Convolution2D(num_channels,
                                kernel_size=3,
                                padding='same',
                                strides=strides,
                                name=name + "_conv1",
                                kernel_initializer=GlorotNormal)
     self.conv2 = Convolution2D(num_channels,
                                kernel_size=3,
                                padding='same',
                                name=name + "_conv2",
                                kernel_initializer=GlorotNormal)
     self.bn1 = BatchNormalization(name=name + "_bn1")
     self.bn2 = BatchNormalization(name=name + "_bn2")
     self.skip_conv = None
     if use_1x1conv:
         self.skip_conv = Convolution2D(num_channels,
                                        kernel_size=1,
                                        strides=strides,
                                        name=name + "_skip_conv",
                                        kernel_initializer=GlorotNormal)
     else:
         self.skip_conv = Layer()
Пример #8
0
def conrec_model(input_shape=(256, 256, 1),
                 basemap=32,
                 activation='sigmoid',
                 depth=4,
                 p_dropout=None,
                 batch_normalization=True,
                 projection_dim=128,
                 projection_head_layers=3,
                 skip_connections=None,
                 encoder_reduction=EncoderReduction.GA_POOLING,
                 decoder_type=DecoderType.UPSAMPLING,
                 sc_strength=1):
    def _pool_and_dropout(pool_size, p_dropout, inp):
        """helper fcn to easily add optional dropout"""
        if p_dropout:
            pool = MaxPooling2D(pool_size=pool_size)(inp)
            return Dropout(p_dropout)(pool)
        else:
            return MaxPooling2D(pool_size=pool_size)(inp)

    if skip_connections is None:
        skip_connections = depth - 1

    inputs = Input(input_shape)
    current_layer = inputs
    levels = list()

    for layer_depth in range(depth):
        x = current_layer
        for _ in range(2):
            x = _create_convolution_block(
                input_layer=x,
                n_filters=basemap * (2**layer_depth),
                kernel=(3, 3),
                batch_normalization=batch_normalization,
                use_bias=True)
        if layer_depth < depth - 1:
            x = Layer(name='sc-' + str(layer_depth))(x)
            skip = _create_convolution_block(
                input_layer=x,
                n_filters=x.shape[-1] * sc_strength,
                kernel=(1, 1),
                batch_normalization=batch_normalization,
                use_bias=True)
            levels.append(skip)
            current_layer = _pool_and_dropout(pool_size=(2, 2),
                                              p_dropout=p_dropout,
                                              inp=x)
        else:
            x = Dropout(p_dropout)(x) if p_dropout else x
            current_layer = x

    reduced = reduce_encoder_output(encoder_output=current_layer,
                                    encoder_reduction=encoder_reduction)
    reduced = Layer(name=ENCODER_OUTPUT_NAME)(reduced)

    con_output = add_contrastive_output(
        input=reduced,
        projection_dim=projection_dim,
        projection_head_layers=projection_head_layers)

    for layer_depth in range(depth - 2, -1, -1):
        if decoder_type == DecoderType.TRANSPOSE:
            x = Conv2DTranspose(basemap * (2**layer_depth), (2, 2),
                                strides=(2, 2),
                                padding='same')(current_layer)
        elif decoder_type == DecoderType.UPSAMPLING:
            x = UpSampling2D(size=(2, 2))(current_layer)
        else:
            raise ValueError('Unknown decoder type')
        if skip_connections > layer_depth:
            x = concatenate([x, levels[layer_depth]], axis=3)
        else:
            print('No skip connection')
        for _ in range(2):
            x = _create_convolution_block(
                input_layer=x,
                n_filters=basemap * (2**layer_depth),
                kernel=(3, 3),
                batch_normalization=batch_normalization,
                use_bias=True)
        current_layer = Dropout(p_dropout)(x) if p_dropout else x

    reconstruction_out = Conv2D(input_shape[-1], (1, 1),
                                activation=activation,
                                name=RECONSTRUCTION_OUTPUT)(current_layer)

    return Model(inputs, [reconstruction_out, con_output])
Пример #9
0
        Convolution2D(256, kernel, kernel, 'valid'),
        BatchNormalization(),
        UpSampling2D(size=(pool_size, pool_size)),
        ZeroPadding2D(padding=(pad, pad)),
        Convolution2D(128, kernel, kernel, 'valid'),
        BatchNormalization(),
        UpSampling2D(size=(pool_size, pool_size)),
        ZeroPadding2D(padding=(pad, pad)),
        Convolution2D(filter_size, kernel, kernel, 'valid'),
        BatchNormalization(),
    ]


segnet_basic = models.Sequential()

segnet_basic.add(Layer(input_shape=(3, 360, 480)))

segnet_basic.encoding_layers = create_encoding_layers()

for l in segnet_basic.encoding_layers:
    segnet_basic.add(l)

# Note: it this looks weird, that is because of adding Each Layer using that for loop
# instead of re-writting mode.add(somelayer+params) everytime.

segnet_basic.decoding_layers = create_decoding_layers()
for l in segnet_basic.decoding_layers:
    segnet_basic.add(l)

segnet_basic.add(Convolution2D(
    12,