def decoder(self, x, a, b):
        a = GlobalAveragePooling2D()(a)
        b = Conv2D(64, (1, 1), strides=1, padding='same')(b)
        b = GlobalAveragePooling2D()(b)

        x = Conv2D(64, (3, 3), strides=1, padding='same')(x)
        x = InstanceNormalization()(x)
        x = Activation('relu')(x)
        x1 = multiply([x, b])
        x = add([x, x1])
        x = UpSampling2D(size=(2, 2))(x)

        x = Conv2D(64, (3, 3), strides=1, padding='same')(x)
        x = InstanceNormalization()(x)
        x = Activation('relu')(x)
        x2 = multiply([x, a])
        x = add([x, x2])
        x = UpSampling2D(size=(2, 2))(x)

        x = Conv2D(128, (3, 3), strides=1, padding='same')(x)
        x = InstanceNormalization()(x)
        x = Activation('relu')(x)

        x = Conv2D(1, 1, padding='same', activation='sigmoid')(x)
        return x
    def build_SynNet(img_size=256):
        inp = layers.Input(shape=(img_size, img_size, 6))

        t_layer, r_layer = tf.split(inp, num_or_size_splits=2)

        # 256 -> 128
        conv1 = BeyondLinearityComponent.get_conv_block(6, 64)(inp)

        # 128 -> 64
        conv2 = BeyondLinearityComponent.get_conv_block(64, 128)(conv1)

        # 64 -> 32
        conv3 = BeyondLinearityComponent.get_conv_block(128, 256)(conv2)

        x = conv3
        # 32 -> 32
        for _ in range(9):
            x = BeyondLinearityComponent.get_res_block(256, 256)(x)

        # 32 -> 64
        deconv1 = BeyondLinearityComponent.get_deconv_block(256, 128)(x)

        # 64 -> 128
        deconv2 = BeyondLinearityComponent.get_deconv_block(128, 64)(deconv1)

        # 128 -> 256 and get the alpha blending mask.
        mask = BeyondLinearityComponent.get_deconv_block(64, 3, non_linear='sigmoid')(deconv2)
        mask_d = tf.ones_like(mask) - mask

        out_t = layers.multiply()([mask, t_layer])
        out_r = layers.multiply()([mask_d, r_layer])

        out = out_t + out_r

        return keras.Model(inp, [mask, out])
    def build_optical_synthesis_generator(img_size=256, noise_dim=4):
        """
        build the generator model that use the conventional reflection synthetic model.
        the generator with the optical synthesis prior will only accept a noise-map from the encoder and convert it to
        an (1) alpha blending mask for fusing the transmission layer T and reflection layer R. (2) convolution kernel
        that blurs the reflection layer
        :param img_size: image size for reflection image R, transmission layer T
        :param noise_dim: noise_dim to concat with the input image (T, R)
        :return: tf.keras.Model object. The generator model accepts three 4-D tensors: (1) T. (2) R. (3) noise layer.
        The generator model will output two tensors:
        (1) [alpha_blending_mask] with (256, 256, 3) for mixing two layers.
        (2) [conv-kernel] used for blurring the reflection layer.
        """
        in_layer = tf.keras.layers.Input(shape=(img_size, img_size, 3 + 3 + noise_dim))

        # noise_in = tf.keras.layers.Input(shape=(img_size, img_size, noise_dim))
        # T_in = tf.keras.layers.Input(shape=(img_size, img_size, 3))
        # R_in = tf.keras.layers.Input(shape=(img_size, img_size, 3))
        # split the input tensor
        T_in, R_in, noise_in = tf.split(in_layer, [3, 3, noise_dim], axis=3)
        ds1 = Component.get_conv_block(noise_dim, 32, norm=False)(noise_in)
        ds2 = Component.get_conv_block(32, 64)(ds1)
        ds3 = Component.get_conv_block(64, 128)(ds2)  # d3: (32, 32)
        ds4 = Component.get_conv_block(128, 256)(ds3)
        ds5 = Component.get_conv_block(256, 256)(ds4)
        ds6 = Component.get_conv_block(256, 256)(ds5)

        us1 = Component.get_deconv_block(256, 256)(ds6)
        us2 = Component.get_deconv_block(512, 256)(tf.concat([us1, ds5], axis=3))
        us3 = Component.get_deconv_block(512, 128)(tf.concat([us2, ds4], axis=3))
        us4 = Component.get_deconv_block(256, 64)(tf.concat([us3, ds3], axis=3))  # us4: (64, 64, 64)
        us5 = Component.get_deconv_block(128, 32)(tf.concat([us4, ds2], axis=3))  # us5: (128, 128, 32)

        # let us handle the conv kernel first
        # us5 ---conv--- (32, 32, 16) ---reshape---> (32, 32, 3, 3)
        # (1, 128, 128, 32) -> (1, 64, 64, 16)
        down1 = Component.get_conv_block(32, 16)(us5)

        # (1, 64, 64, 16) -> (1, 32, 32, 9)
        down2 = Component.get_conv_block(16, 9)(down1)

        kernel = tf.reshape(down2, [32, 32, 3, 3])

        # the alpha blending mask
        alpha_mask = Component.get_deconv_block(64, 3, norm=False, non_linear='leaky_relu')(
            tf.concat([us5, ds1], axis=3))
        alpha_mask_sub = layers.subtract([tf.ones_like(alpha_mask), alpha_mask])
        # alpha_mask_sub = Component.get_deconv_block(64, 3, norm=False, non_linear='leaky_relu')(
        #     tf.concat([us5, ds1], axis=3))
        # the blurring kernel
        blurred_R = tf.nn.conv2d(R_in, kernel, strides=[1, 1, 1, 1], padding='SAME')

        # transmission
        t_layer = layers.multiply([T_in, alpha_mask])
        r_layer = layers.multiply([blurred_R, alpha_mask_sub])

        out = layers.add([t_layer, r_layer])

        return tf.keras.Model(in_layer, out)
Example #4
0
    def call(self, inputs, training=False):
        x, a, i = inputs
        ##global params, move to default non-option
        if self.glob:
            glob_avg = tf.math.segment_mean(x, i)
            glob_var = abs(
                tf.math.subtract(tf.math.segment_mean(multiply([x, x]), i),
                                 multiply([glob_avg, glob_avg])))
            glob_max = tf.math.segment_max(x, i)
            glob_min = tf.math.segment_min(x, i)
            xglob = tf.concat([glob_avg, glob_var, glob_max, glob_min], axis=1)
        a, e = self.generate_edge_features(x, a)
        ##this norm should maybe be further down ahead of edgeconv
        if self.edgenorm:
            e = self.norm_edge(e)
        x = self.MP([x, a, e])
        if self.edgeconv:
            a, e = self.generate_edge_features(x, a)
            x = self.ECC1([x, a, e])
        for conv in self.GCNs:
            x = conv([x, a])
        x1 = self.Pool1([x, i])
        x2 = self.Pool2([x, i])
        x3 = self.Pool3([x, i])
        xpool = tf.concat([x1, x2, x3], axis=1)
        if self.glob:
            x = tf.concat([xpool, xglob], axis=1)
        else:
            x = xpool
        for decode_layer, dropout_layer, norm_layer in zip(
                self.decode, self.dropout_layers, self.norm_layers):
            x = dropout_layer(x, training=training)
            x = self.decode_activation(decode_layer(x))
            x = norm_layer(x, training=training)

        x_loge = self.loge[0](x)
        x_loge = self.loge[1](x_loge)
        x_loge = self.loge_out(x_loge)

        x_angles = self.angles[0](x)
        x_angles = self.angles[1](x_angles)
        x_angles = self.angles_out(x_angles)
        zeniazi = sigmoid(self.angle_scale(x_angles))

        if self.n_sigs > 0:
            x_sigs = self.sigs[0](x)
            x_sigs = self.sigs[1](x_sigs)
            x_sigs = tf.abs(self.sigs_out(x_sigs)) + eps
        #could add correlation here
        xs = tf.stack(
            [x_loge[:, 0], zeniazi[:, 0] * np.pi, zeniazi[:, 1] * 2 * np.pi],
            axis=1)
        if self.n_sigs > 0:
            return tf.concat([xs, x_sigs], axis=1)
        else:
            return xs
    def get_model(self):
        """
        用于构建完整模型,即build_feature + Dense
        :return:
            model
        """
        input_category = Input(shape=(self.category_count, ))
        input_query1 = Input(shape=(self.query_len, ))
        input_query2 = Input(shape=(self.query_len, ))

        # Layer1: 特征抽取层
        if self.shared:
            # 调用了1次Model,是双塔共享模式
            model = self.build_feature()

            query_1 = model(input_query1)
            query_2 = model(input_query2)

        else:
            # 调用了2次Model,是双塔非共享模型
            query_1 = self.build_feature()(input_query1)
            query_2 = self.build_feature()(input_query2)

        if self.add_feature:
            # |q1-q2| 两特征之差的绝对值
            sub = subtract([query_1, query_2])
            sub = tf.abs(sub)
            # q1*q2 两特征按元素相乘
            mul = multiply([query_1, query_2])
            # max(q1,q2)^2 两特征取最大元素的平方
            max_square = multiply(
                [maximum([query_1, query_2]),
                 maximum([query_1, query_2])])

            merge_layers = Concatenate()(
                [query_1, query_2, sub, mul, max_square, input_category])
        else:
            merge_layers = Concatenate()([query_1, query_2, input_category])

        # Layer2:全连接层
        fc = None
        for i in range(len(self.dense_units)):
            if i == 0:
                fc = Dense(self.dense_units[i],
                           activation="relu")(merge_layers)
            elif i == len(self.dense_units) - 1:
                fc = Dense(1, activation='sigmoid')(fc)
            else:
                fc = Dense(self.dense_units[i], activation="relu")(fc)

        model = Model(inputs=[input_category, input_query1, input_query2],
                      outputs=[fc])
        model.summary()

        return model
Example #6
0
    def _create_wann(self, shape):
        # Build task, weights_predictor and discrepancer network
        # Weights_predictor should end with a relu activation
        self.weights_predictor = self.get_weighting_model(
                shape, activation='relu', C=self.C_w, name="weights")
        self.task = self.get_base_model(
                shape, activation=None, C=self.C, name="task")
        self.discrepancer = self.get_base_model(
                shape, activation=None, C=self.C, name="discrepancer")
        
        # Create input layers for Xs, Xt, ys, yt and target weights
        input_source = Input(shape=(shape,))
        input_target = Input(shape=(shape,))
        output_source = Input(shape=(1,))
        output_target = Input(shape=(1,))
        weights_target = Input(shape=(1,))
        Flip = _GradReverse()
        
        # Get networks output for both source and target
        weights_source = self.weights_predictor(input_source)      
        output_task_s = self.task(input_source)
        output_task_t = self.task(input_target)
        output_disc_s = self.discrepancer(input_source)
        output_disc_t = self.discrepancer(input_target)
        
        # Reversal layer at the end of discrepancer
        output_disc_s = Flip(output_disc_s)
        output_disc_t = Flip(output_disc_t)

        # Create model and define loss
        self.model = Model([input_source, input_target, output_source, output_target, weights_target],
                           [output_task_s, output_task_t, output_disc_s, output_disc_t, weights_source],
                           name='WANN')
            
        loss_task_s = K.mean(multiply([weights_source, K.square(output_source - output_task_s)]))
        loss_task_t = K.mean(multiply([weights_target, K.square(output_target - output_task_t)]))
            
        loss_disc_s = K.mean(multiply([weights_source, K.square(output_source - output_disc_s)]))
        loss_disc_t = K.mean(multiply([weights_target, K.square(output_target - output_disc_t)]))
            
        loss_task = loss_task_s #+ loss_task_t
        loss_disc = loss_disc_t - loss_disc_s
                         
        loss = loss_task + loss_disc
   
        self.model.add_loss(loss)
        self.model.add_metric(tf.reduce_sum(K.mean(weights_source)), name="weights", aggregation="mean")
        self.model.add_metric(tf.reduce_sum(loss_task_s), name="task_s", aggregation="mean")
        self.model.add_metric(tf.reduce_sum(loss_task_t), name="task_t", aggregation="mean")
        self.model.add_metric(tf.reduce_sum(loss_disc), name="disc", aggregation="mean")
        self.model.add_metric(tf.reduce_sum(loss_disc_s), name="disc_s", aggregation="mean")
        self.model.add_metric(tf.reduce_sum(loss_disc_t), name="disc_t", aggregation="mean")
        self.model.compile(optimizer=self.optimizer)
        return self
Example #7
0
    def call(self, inputs, training=False):
        x, a, i = inputs
        glob_avg = tf.math.segment_mean(x, i)
        glob_var = abs(
            tf.math.subtract(tf.math.segment_mean(multiply([x, x]), i),
                             multiply([glob_avg, glob_avg])))
        glob_max = tf.math.segment_max(x, i)
        glob_min = tf.math.segment_min(x, i)
        xglob = tf.concat([glob_avg, glob_var, glob_max, glob_min], axis=1)
        a, e = self.generate_edge_features(x, a)
        for MP in self.MPs:
            x = MP([x, a, e])
        for conv in self.GCNs:
            x = conv([x, a])
        x1 = self.Pool1([x, i])
        x2 = self.Pool2([x, i])
        x3 = self.Pool3([x, i])
        x = tf.concat([x1, x2, x3], axis=1)
        x = tf.concat([x, xglob], axis=1)
        for decode_layer, dropout_layer, norm_layer in zip(
                self.decode, self.dropout_layers, self.norm_layers):
            x = dropout_layer(x, training=training)
            x = self.decode_activation(decode_layer(x))
            x = norm_layer(x, training=training)

        x_loge = self.loge[0](x)
        x_loge = self.loge[1](x_loge)
        x_loge = self.loge_out(x_loge)

        x_zeni = self.zeni[0](x)
        x_zeni = self.zeni[1](x_zeni)
        x_zeni = self.zeni_out(x_zeni)
        zeni = sigmoid(self.zeni_scale(x_zeni))

        x_azi = self.azi[0](x)
        x_azi = self.azi[1](x_azi)
        x_azi = self.azi_out(x_azi)
        azi = sigmoid(self.azi_scale(x_azi))

        x_sigz = self.sigz[0](x)
        x_sigz = self.sigz[1](x_sigz)
        x_sigz = tf.math.add(tf.math.abs(self.sigz_out(x_sigz)), eps)

        x_sigaz = self.sigaz[0](x)
        x_sigaz = self.sigaz[1](x_sigaz)
        x_sigaz = tf.math.add(tf.math.abs(self.sigaz_out(x_sigaz)), eps)

        #could add correlation here

        xs = tf.stack([x_loge, zeni * np.pi, azi * 2 * np.pi, x_sigz, x_sigaz],
                      axis=1)

        return xs[:, :, 0]
Example #8
0
    def call(self, inputs, training=False):
        x, a, i = inputs
        glob_avg = tf.math.segment_mean(x, i)
        glob_var = abs(
            tf.math.subtract(tf.math.segment_mean(multiply([x, x]), i),
                             multiply([glob_avg, glob_avg])))
        glob_max = tf.math.segment_max(x, i)
        glob_min = tf.math.segment_min(x, i)
        xglob = tf.concat([glob_avg, glob_var, glob_max, glob_min], axis=1)
        a, e = self.generate_edge_features(x, a, forward=False, edgetype=1)
        e = self.norm_edge(e)
        x = self.hop12max1([x, a, e])
        x = self.hop12max2([x, a, e])

        x2me = self.hop2mean([x, a, e])

        x = tf.concat([x, x2me], axis=1)
        # tf.print(tf.shape(x))

        x = self.edgeback([x, a, e])
        for conv in self.GCNs:
            x = conv([x, a])
        x1 = self.Pool1([x, i])
        x2 = self.Pool2([x, i])
        x3 = self.Pool3([x, i])
        #maybe histpool here
        x = tf.concat([x1, x2, x3], axis=1)
        x = tf.concat([x, xglob], axis=1)
        for decode_layer, dropout_layer, norm_layer in zip(
                self.decode, self.dropout_layers, self.norm_layers):
            x = dropout_layer(x, training=training)
            x = self.decode_activation(decode_layer(x))
            x = norm_layer(x, training=training)

        x_loge = self.loge[0](x)
        x_loge = self.loge[1](x_loge)
        x_loge = self.loge_out(x_loge)

        x_angles = self.angles[0](x)
        x_angles = self.angles[1](x_angles)
        x_angles = self.angles_out(x_angles)
        zeniazi = sigmoid(self.angle_scale(x_angles))

        x_sigs = self.sigs[0](x)
        x_sigs = self.sigs[1](x_sigs)
        x_sigs = tf.abs(self.sigs_out(x_sigs)) + eps
        #could add correlation here
        xs = tf.stack(
            [x_loge[:, 0], zeniazi[:, 0] * np.pi, zeniazi[:, 1] * 2 * np.pi],
            axis=1)

        return tf.concat([xs, x_sigs], axis=1)
    def build_RmNet(img_size=256):
        inp = layers.Input(shape=(img_size, img_size, 3))

        # 256 -> 128
        conv1 = BeyondLinearityComponent.get_conv_block(3, 16)(inp)

        # 128 -> 64
        conv2 = BeyondLinearityComponent.get_conv_block(16, 32)(conv1)

        # 64 -> 32
        conv3 = BeyondLinearityComponent.get_conv_block(32, 64)(conv2)

        # 32 -> 16
        conv4 = BeyondLinearityComponent.get_conv_block(64, 128)(conv3)

        # 16 -> 8
        conv5 = BeyondLinearityComponent.get_conv_block(128, 256)(conv4)

        # 8 -> 4
        conv6 = BeyondLinearityComponent.get_conv_block(256, 512)(conv5)

        def get_upsampling_unit(non_linear):
            # 4 -> 8
            deconv1 = BeyondLinearityComponent.get_deconv_block(512, 256)(conv6)

            # 8 -> 16
            deconv2 = BeyondLinearityComponent.get_deconv_block(256, 128)(tf.concat([deconv1, conv5], axis=3))

            # 16 -> 32
            deconv3 = BeyondLinearityComponent.get_deconv_block(128, 64)(tf.concat([deconv2, conv4], axis=3))

            # 32 -> 64
            deconv4 = BeyondLinearityComponent.get_deconv_block(64, 32)(tf.concat([deconv3, conv3], axis=3))

            # 64 -> 128
            deconv5 = BeyondLinearityComponent.get_deconv_block(32, 16)(tf.concat([deconv4, conv2], axis=3))

            # 128 -> 256
            out = BeyondLinearityComponent.get_deconv_block(16, 3, non_linear=non_linear)(
                tf.concat([deconv5, conv1], axis=3))

            return out

        t_layer = get_upsampling_unit('tanh')
        r_layer = get_upsampling_unit('tanh')
        mask = get_upsampling_unit('sigmoid')
        mask_d = tf.ones_like(mask) - mask

        recombined_image = layers.multiply([mask, t_layer]) + layers.multiply([mask_d, r_layer])

        return keras.Model(inp, [t_layer, r_layer, recombined_image])
Example #10
0
def attention_gate(inp_1, inp_2, n_intermediate_filters):
    """Attention gate. Compresses both inputs to n_intermediate_filters filters before processing.
       Implemented as proposed by Oktay et al. in their Attention U-net, see: https://arxiv.org/abs/1804.03999.
    """
    inp_1_conv = Conv2D(
        n_intermediate_filters,
        kernel_size=1,
        strides=1,
        padding="same",
        kernel_initializer="he_normal",
    )(inp_1)
    inp_2_conv = Conv2D(
        n_intermediate_filters,
        kernel_size=1,
        strides=1,
        padding="same",
        kernel_initializer="he_normal",
    )(inp_2)

    f = Activation("relu")(add([inp_1_conv, inp_2_conv]))
    g = Conv2D(
        filters=1,
        kernel_size=1,
        strides=1,
        padding="same",
        kernel_initializer="he_normal",
    )(f)
    h = Activation("sigmoid")(g)
    return multiply([inp_1, h])
Example #11
0
    def build_generator(self):

        model = Sequential()

        model.add(
            Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(self.channels, kernel_size=3, padding='same'))
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim, ))
        label = Input(shape=(1, ), dtype='int32')
        label_embedding = Flatten()(Embedding(self.num_classes,
                                              self.latent_dim)(label))

        model_input = multiply([noise, label_embedding])
        img = model(model_input)

        return Model([noise, label], img)
Example #12
0
def attention_block_2d(x, g, inter_channel, data_format='channels_first'):
    # theta_x(?,g_height,g_width,inter_channel)

    theta_x = Conv2D(inter_channel, [1, 1], strides=[1, 1], data_format=data_format)(x)

    # phi_g(?,g_height,g_width,inter_channel)

    phi_g = Conv2D(inter_channel, [1, 1], strides=[1, 1], data_format=data_format)(g)

    # f(?,g_height,g_width,inter_channel)

    f = Activation('relu')(add([theta_x, phi_g]))

    # psi_f(?,g_height,g_width,1)

    psi_f = Conv2D(1, [1, 1], strides=[1, 1], data_format=data_format)(f)

    rate = Activation('sigmoid')(psi_f)

    # rate(?,x_height,x_width)

    # att_x(?,x_height,x_width,x_channel)

    att_x = multiply([x, rate])

    return att_x
Example #13
0
def squeeze_excite_block(input, ratio=16):
    ''' Create a channel-wise squeeze-excite block
    Args:
        input: input tensor
        filters: number of output filters
    Returns: a keras tensor
    References
    -   [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507)
    '''
    init = input
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    filters = init.shape[channel_axis]
    se_shape = (1, 1, filters)

    se = GlobalAveragePooling2D()(init)
    se = Reshape(se_shape)(se)
    se = Dense(filters // ratio,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)

    if K.image_data_format() == 'channels_first':
        se = Permute((3, 1, 2))(se)

    x = multiply([init, se])
    return x
def build_generator(z_input: Input, label_input: Input):
    """
    Build generator CNN
    :param z_input: latent input
    :param label_input: conditional label input
    """

    model = Sequential([
        Dense(128, input_dim=latent_dim),
        LeakyReLU(alpha=0.2),
        BatchNormalization(momentum=0.8),
        Dense(256),
        LeakyReLU(alpha=0.2),
        BatchNormalization(momentum=0.8),
        Dense(512),
        LeakyReLU(alpha=0.2),
        BatchNormalization(momentum=0.8),
        Dense(np.prod((28, 28, 1)), activation='tanh'),
        # reshape to MNIST image size
        Reshape((28, 28, 1))
    ])

    model.summary()

    # the latent input vector z
    label_embedding = Embedding(input_dim=10,
                                output_dim=latent_dim)(label_input)
    flat_embedding = Flatten()(label_embedding)

    # combine the noise and label by element-wise multiplication
    model_input = multiply([z_input, flat_embedding])
    image = model(model_input)

    return Model([z_input, label_input], image)
def build_discriminator():
    """
    Build discriminator network
    """

    model = Sequential([
        Flatten(input_shape=(28, 28, 1)),
        Dense(256),
        LeakyReLU(alpha=0.2),
        Dense(128),
        LeakyReLU(alpha=0.2),
        Dense(1, activation='sigmoid'),
    ],
                       name='discriminator')

    model.summary()

    image = Input(shape=(28, 28, 1))
    flat_img = Flatten()(image)

    label_input = Input(shape=(1, ), dtype='int32')
    label_embedding = Embedding(input_dim=10, output_dim=np.prod(
        (28, 28, 1)))(label_input)
    flat_embedding = Flatten()(label_embedding)

    # combine the noise and label by element-wise multiplication
    model_input = multiply([flat_img, flat_embedding])

    validity = model(model_input)

    return Model([image, label_input], validity)
Example #16
0
def attention_layer(input, reduction_ratio):
    """
    :param input: input tensor
    :param reduction_ratio: reduction ratio for w1
    :return: output tensor
    """
    input_chan_num = int(input.get_shape()[-1])
    #print(input_chan_num)
    #print(type(input_chan_num))
    #print(tf.divide(input_chan_num, reduction_ratio))
    gap = GlobalAveragePooling2D()(input)
    gap = K.expand_dims(gap,1)
    gap = K.expand_dims(gap,1)

    #print(gap.get_shape())
    down_scale = Dense(int(input_chan_num/reduction_ratio),
                        activation='relu',
                       use_bias=False
                        )(gap)
    up_scale = Dense(input_chan_num ,
                        activation='sigmoid',
                     use_bias=False,
                     )(down_scale)
    up_scale = tf.squeeze(up_scale, [1, 2])
    x = multiply([input, up_scale])

    return input
Example #17
0
    def build_discriminator(self):

        model = Sequential()

        model.add(Dense(512, input_dim=np.prod(self.img_shape)))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(Dense(1, activation='sigmoid'))
        model.summary()

        img = Input(shape=self.img_shape)
        label = Input(shape=(1, ), dtype='int32')

        label_embedding = Flatten()(Embedding(self.num_classes,
                                              np.prod(self.img_shape))(label))
        flat_img = Flatten()(img)

        model_input = multiply([flat_img, label_embedding])

        validity = model(model_input)

        return Model([img, label], validity)
Example #18
0
def spatial_attention(input_feature):
    kernel_size = 7

    if K.image_data_format() == "channels_first":
        channel = input_feature._keras_shape[1]
        cbam_feature = Permute((2, 3, 1))(input_feature)
    else:
        channel = input_feature._keras_shape[-1]
        cbam_feature = input_feature

    avg_pool = Lambda(lambda x: K.mean(x, axis=3, keepdims=True))(cbam_feature)
    assert avg_pool._keras_shape[-1] == 1
    max_pool = Lambda(lambda x: K.max(x, axis=3, keepdims=True))(cbam_feature)
    assert max_pool._keras_shape[-1] == 1
    concat = Concatenate(axis=3)([avg_pool, max_pool])
    assert concat._keras_shape[-1] == 2
    cbam_feature = Conv2D(filters=1,
                          kernel_size=kernel_size,
                          strides=1,
                          padding='same',
                          activation='sigmoid',
                          kernel_initializer='he_normal',
                          use_bias=False)(concat)
    assert cbam_feature._keras_shape[-1] == 1

    if K.image_data_format() == "channels_first":
        cbam_feature = Permute((3, 1, 2))(cbam_feature)

    return multiply([input_feature, cbam_feature])
Example #19
0
def attention_block3D(x, gating, inter_shape):
    shape_x = k.int_shape(x)

    # Getting the x signal to the same shape as the gating signal
    theta_x = Conv3D(filters=inter_shape,
                     kernel_size=3,
                     strides=2,
                     padding='same')(x)  # 16

    # Getting the gating signal to the same number of filters as the inter_shape
    phi_g = Conv3D(filters=inter_shape,
                   kernel_size=1,
                   strides=1,
                   padding='same')(gating)

    concat_xg = add([phi_g, theta_x])
    act_xg = Activation('relu')(concat_xg)
    psi = Conv3D(filters=1, kernel_size=1, padding='same')(act_xg)
    sigmoid_xg = Activation('sigmoid')(psi)
    upsample_psi = UpSampling3D(size=2)(sigmoid_xg)

    upsample_psi = repeat_elem(upsample_psi, shape_x[4], axs=4)  #

    y = multiply([upsample_psi, x])

    result = Conv3D(filters=shape_x[4],
                    kernel_size=1,
                    strides=1,
                    padding='same')(y)
    result_bn = BatchNormalization()(result)
    return result_bn
Example #20
0
def autoencoder_SWWAE(input_shape,
                      pool_size=2,
                      nfeats=[8, 16, 32, 64, 128],
                      ksize=3,
                      nlayers=5):
    pool_sizes = np.array([1, 1, 1, 1, 1]) * pool_size
    nfeats_all = [input_shape[2]] + nfeats

    img_input = Input(shape=input_shape)
    wheres = [None] * nlayers
    y = img_input

    for i in range(nlayers):
        y_prepool = convresblock(y, nfeats=nfeats_all[i + 1], ksize=ksize)
        y = MaxPooling2D(pool_size=(pool_sizes[i], pool_sizes[i]))(y_prepool)
        wheres[i] = Lambda(getwhere,
                           output_shape=lambda x: x[0])([y_prepool, y])[0]

    for i in range(nlayers):
        ind = nlayers - 1 - i
        y = UpSampling2D(size=(pool_sizes[ind], pool_sizes[ind]))(y)
        y = multiply([y, wheres[ind]])
        y = convresblock(y, nfeats=nfeats_all[ind], ksize=ksize)

    y = Activation('hard_sigmoid')(y)
    model = Model(img_input, y)

    return model
Example #21
0
def AttnGatingBlock(x, g, inter_shape, name):
    ''' take g which is the spatially smaller signal, do a conv to get the same
    number of feature channels as x (bigger spatially)
    do a conv on x to also get same geature channels (theta_x)
    then, upsample g to be same size as x
    add x and g (concat_xg)
    relu, 1x1 conv, then sigmoid then upsample the final - this gives us attn coefficients'''

    shape_x = x.shape  # 32
    shape_g = g.shape  # 16

    theta_x = Conv2D(inter_shape, (2, 2), strides=(2, 2), padding='same', name='xl' + name)(x)  # 16
    shape_theta_x = theta_x.shape

    phi_g = Conv2D(inter_shape, (1, 1), padding='same')(g)
    upsample_g = Conv2DTranspose(inter_shape, (3, 3),
                                 strides=(shape_theta_x[1] // shape_g[1], shape_theta_x[2] // shape_g[2]),
                                 padding='same', name='g_up' + name)(phi_g)  # 16

    concat_xg = add([upsample_g, theta_x])
    act_xg = Activation('relu')(concat_xg)
    psi = Conv2D(1, (1, 1), padding='same', name='psi' + name)(act_xg)
    sigmoid_xg = Activation('sigmoid')(psi)
    shape_sigmoid = sigmoid_xg.shape
    upsample_psi = UpSampling2D(size=(shape_x[1] // shape_sigmoid[1], shape_x[2] // shape_sigmoid[2]))(sigmoid_xg)  # 32

    upsample_psi = expend_as(upsample_psi, shape_x[3], name)
    y = multiply([upsample_psi, x], name='q_attn' + name)

    result = Conv2D(shape_x[3], (1, 1), padding='same', name='q_attn_conv' + name)(y)
    result_bn = BatchNormalization(name='q_attn_bn' + name)(result)
    return result_bn
Example #22
0
    def build_discriminator(self):

        model = Sequential()

        model.add(Dense(512, input_dim=np.prod(self.img_shape)))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(Dense(1, activation='sigmoid'))
        model.summary()

        img = Input(shape=self.img_shape)
        label = Input(shape=(1, ), dtype='int32')

        label_embedding = Flatten()(Embedding(
            self.embedded_dimension,
            self.latent_dim,
            weights=[self.word2vec.wv.vectors],
            trainable=False)(label))
        label_expansion = Dense(784)(label_embedding)
        flat_img = Flatten()(img)

        model_input = multiply([flat_img, label_expansion])

        validity = model(model_input)

        return Model([img, label], validity)
Example #23
0
    def buildQNetwork(self):
        from tensorflow.python.keras.optimizer_v2.adam import Adam
        from tensorflow.keras.models import Model
        from tensorflow.keras.layers import Input, Dense, Conv2D
        from tensorflow.keras.layers import Flatten, TimeDistributed, LSTM, multiply

        input_shape = (self.historylength, ) + self.state_size
        inputA = Input(shape=input_shape)
        inputB = Input(shape=(self.action_size, ))

        if len(self.state_size) == 1:
            x = TimeDistributed(
                Dense(10, input_shape=input_shape, activation='relu'))(inputA)
        else:
            x = TimeDistributed(Conv2D(16, 8, strides=4,
                                       activation='relu'))(inputA)
            x = TimeDistributed(Conv2D(32, 4, strides=2, activation='relu'))(x)
        x = TimeDistributed(Flatten())(x)
        x = LSTM(256)(x)
        x = Dense(10, activation='relu')(x)  # fully connected
        x = Dense(10, activation='relu')(x)
        x = Dense(self.action_size)(x)
        outputs = multiply([x, inputB])
        model = Model(inputs=[inputA, inputB], outputs=outputs)
        model.compile(loss='mse', optimizer=Adam(lr=0.0001, clipvalue=1))
        return model
Example #24
0
    def create_generator(self):
        # label input
        latent = layers.Input(shape=(self.latent_dim,))
        # this will be our label
        image_class = layers.Input(shape=(1,), dtype='int32')
        # 10 classes in CIFAR-10
        flt = layers.Flatten()(layers.Embedding(10, self.latent_dim,
                              embeddings_initializer='glorot_normal')(image_class))

        # hadamard product between z-space and a class conditional embedding
        G = layers.multiply([latent, flt])
        G = layers.Dense(4*4*384, kernel_initializer='glorot_normal')(G)
        G = layers.Reshape((4,4,384))(G)
        # upsample
        G = layers.Conv2DTranspose(192, (5,5), strides=(2,2), padding='same', activation='relu', kernel_initializer='glorot_normal', bias_initializer='zeros')(G)
        G = layers.BatchNormalization()(G)
        # convolution
        G = layers.Conv2DTranspose(96, (5,5), strides=(2,2), padding='same', activation='relu', kernel_initializer='glorot_normal', bias_initializer='zeros')(G)
        G = layers.BatchNormalization()(G)
        # upsample
        G = layers.Conv2DTranspose(3, (5,5), strides=(2,2), padding='same', activation='tanh', kernel_initializer='glorot_normal', bias_initializer='zeros')(G)

        # define model
        model = tf.keras.Model([latent, image_class], G, name="generator")
        model.summary()
        return model
Example #25
0
def attention_featurewise(inputs, single=False, attention_layer_descriptor=''):
    """
        Featurewise attention block (Keras API).
        Applies this block to inputs (inputs.shape = (batch_size, time_steps, input_dim)).

        Inputs:
            inputs (Keras layer)
            single (bool): if True, attention is shared across timesteps
            attention_layer_descriptor (string): describes where attention is applied

        Output: A Keras layer
    """
    input_dim = int(inputs.shape[-1])
    a = Dense(input_dim, activation='softmax')(inputs)
    if single:
        a = Lambda(lambda x: K.mean(x, axis=1),
                   name='dim_reduction_' + attention_layer_descriptor)(a)
        a = RepeatVector(input_dim)(a)
    if v0 == '2':
        output_attention_mul = multiply([inputs, a],
                                        name='attention_mul_featurewise_' +
                                        attention_layer_descriptor)
    else:
        output_attention_mul = merge([inputs, a],
                                     name='attention_mul_featurewise_' +
                                     attention_layer_descriptor,
                                     mode='mul')
    return output_attention_mul
Example #26
0
    def build_generator(self):

        model = Sequential()

        model.add(Dense(256, input_dim=self.latent_dim))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(1024))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(np.prod(self.img_shape), activation='tanh'))
        model.add(Reshape(self.img_shape))

        model.summary()

        noise = Input(shape=(self.latent_dim, ))
        label = Input(shape=(1, ), dtype='int32')
        label_embedding = Flatten()(Embedding(self.num_classes,
                                              self.latent_dim)(label))

        model_input = multiply([noise, label_embedding])
        img = model(model_input)

        return Model([noise, label], img)
Example #27
0
def se_block(input_feature, ratio=8):
    """Contains the implementation of Squeeze-and-Excitation(SE) block.
	As described in https://arxiv.org/abs/1709.01507.
	"""

    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    channel = input_feature._keras_shape[channel_axis]

    se_feature = GlobalAveragePooling2D()(input_feature)
    se_feature = Reshape((1, 1, channel))(se_feature)
    assert se_feature._keras_shape[1:] == (1, 1, channel)
    se_feature = Dense(channel // ratio,
                       activation='relu',
                       kernel_initializer='he_normal',
                       use_bias=True,
                       bias_initializer='zeros')(se_feature)
    assert se_feature._keras_shape[1:] == (1, 1, channel // ratio)
    se_feature = Dense(channel,
                       activation='sigmoid',
                       kernel_initializer='he_normal',
                       use_bias=True,
                       bias_initializer='zeros')(se_feature)
    assert se_feature._keras_shape[1:] == (1, 1, channel)
    if K.image_data_format() == 'channels_first':
        se_feature = Permute((3, 1, 2))(se_feature)

    se_feature = multiply([input_feature, se_feature])
    return se_feature
def define_critic_gp(dataset):

    init = RandomNormal(stddev=0.1)

    feature_data = Input(shape=(dataset.shape[1], ))
    label_data = Input(shape=(1, ))

    label_embedding = Flatten()(Embedding(
        key.shape[0], math.ceil((1 / 4) * dataset.shape[1]))(label_data))
    label_dense = Dense(dataset.shape[1])(label_embedding)

    inputs = multiply([feature_data, label_dense])

    main_disc = Dense(math.ceil((1 / 2) * dataset.shape[1]),
                      kernel_initializer=init)(inputs)
    main_disc = BatchNormalization()(main_disc)
    main_disc = Activation("tanh")(main_disc)
    main_disc = Dense(math.ceil((1 / 4) * dataset.shape[1]),
                      kernel_initializer=init)(main_disc)
    main_disc = BatchNormalization()(main_disc)
    main_disc = Activation("tanh")(main_disc)
    main_disc = Dropout(0.4)(main_disc)
    disc_out = Dense(1, activation="linear")(main_disc)

    discrim = Model([feature_data, label_data], disc_out)

    opt = RMSprop(lr=0.00005)
    discrim.compile(loss=wasserstein_loss, optimizer=opt, metrics=["accuracy"])
    return discrim
Example #29
0
    def value_network(self):
        # inputA = Input(shape=self.state_size)
        # inputA = Flatten()(inputA)
        '''model = tf.keras.Sequential([
            Dense(32, activation='relu', input_shape=(self.state_size)),
            Dense(16, activation='relu', input_shape=(self.state_size)),
            Dense(16, activation='relu', input_shape=(self.state_size)),
            Dense(1, activation='linear', input_shape=(self.state_size))
    ])
    model.compile(loss='mse', optimizer=Adam(lr = self.value_lr))'''
        from tensorflow.python.keras.optimizer_v2.adam import Adam
        from tensorflow.keras.models import Model
        from tensorflow.keras.layers import Dense, Input, Flatten, multiply

        inputA = Input(shape=self.state_size)
        inputB = Input(shape=(self.action_size, ))
        x = Flatten()(inputA)
        x = Dense(24, input_dim=self.state_size,
                  activation='relu')(x)  # fully connected
        x = Dense(24, activation='relu')(x)
        x = Dense(self.action_size, activation='linear')(x)
        outputs = multiply([x, inputB])
        model = Model(inputs=[inputA, inputB], outputs=outputs)
        model.compile(loss='mse', optimizer=Adam(lr=self.value_lr))
        return model
def define_generator(dataset, latent_dim, key):

    init = RandomNormal(stddev=0.7)

    noise = Input(shape=(latent_dim, ))
    label = Input(shape=(1, ))

    label_embedding = Flatten()(Embedding(
        key.shape[0], math.ceil((1 / 4) * dataset.shape[1]))(label))
    label_dense = Dense(latent_dim)(label_embedding)

    inputs = multiply([noise, label_dense])

    main_gen = Dense(math.ceil((1 / 4) * dataset.shape[1]),
                     kernel_initializer=init)(inputs)
    main_gen = BatchNormalization()(main_gen)
    main_gen = Activation("tanh")(main_gen)
    main_gen = Dense(math.ceil((1 / 2) * dataset.shape[1]),
                     kernel_initializer=init)(main_gen)
    main_gen = BatchNormalization()(main_gen)
    main_gen = Activation("tanh")(main_gen)
    main_gen = Dense((dataset.shape[1] + math.ceil(
        (1 / 4) * dataset.shape[1])),
                     kernel_initializer=init)(main_gen)
    main_gen = BatchNormalization()(main_gen)
    main_gen = Activation("tanh")(main_gen)
    gen_out = Dense(dataset.shape[1], activation="tanh")(main_gen)
    gen = Model([noise, label], gen_out)
    return gen