示例#1
0
文件: model.py 项目: zonasw/DBNet
def DBNet(cfg, k=50, model='training'):
    assert model in ['training', 'inference'], 'error'

    input_image = KL.Input(shape=[None, None, 3], name='input_image')

    backbone = ResNet50(inputs=input_image, include_top=False, freeze_bn=True)
    C2, C3, C4, C5 = backbone.outputs

    # in2
    in2 = KL.Conv2D(256, (1, 1),
                    padding='same',
                    kernel_initializer='he_normal',
                    name='in2')(C2)
    in2 = KL.BatchNormalization()(in2)
    in2 = KL.ReLU()(in2)
    # in3
    in3 = KL.Conv2D(256, (1, 1),
                    padding='same',
                    kernel_initializer='he_normal',
                    name='in3')(C3)
    in3 = KL.BatchNormalization()(in3)
    in3 = KL.ReLU()(in3)
    # in4
    in4 = KL.Conv2D(256, (1, 1),
                    padding='same',
                    kernel_initializer='he_normal',
                    name='in4')(C4)
    in4 = KL.BatchNormalization()(in4)
    in4 = KL.ReLU()(in4)
    # in5
    in5 = KL.Conv2D(256, (1, 1),
                    padding='same',
                    kernel_initializer='he_normal',
                    name='in5')(C5)
    in5 = KL.BatchNormalization()(in5)
    in5 = KL.ReLU()(in5)

    # P5
    P5 = KL.Conv2D(64, (3, 3), padding='same',
                   kernel_initializer='he_normal')(in5)
    P5 = KL.BatchNormalization()(P5)
    P5 = KL.ReLU()(P5)
    P5 = KL.UpSampling2D(size=(8, 8))(P5)
    # P4
    out4 = KL.Add()([in4, KL.UpSampling2D(size=(2, 2))(in5)])
    P4 = KL.Conv2D(64, (3, 3), padding='same',
                   kernel_initializer='he_normal')(out4)
    P4 = KL.BatchNormalization()(P4)
    P4 = KL.ReLU()(P4)
    P4 = KL.UpSampling2D(size=(4, 4))(P4)
    # P3
    out3 = KL.Add()([in3, KL.UpSampling2D(size=(2, 2))(out4)])
    P3 = KL.Conv2D(64, (3, 3), padding='same',
                   kernel_initializer='he_normal')(out3)
    P3 = KL.BatchNormalization()(P3)
    P3 = KL.ReLU()(P3)
    P3 = KL.UpSampling2D(size=(2, 2))(P3)
    # P2
    out2 = KL.Add()([in2, KL.UpSampling2D(size=(2, 2))(out3)])
    P2 = KL.Conv2D(64, (3, 3), padding='same',
                   kernel_initializer='he_normal')(out2)
    P2 = KL.BatchNormalization()(P2)
    P2 = KL.ReLU()(P2)

    fuse = KL.Concatenate()([P2, P3, P4, P5])

    # binarize map
    p = KL.Conv2D(64, (3, 3),
                  padding='same',
                  kernel_initializer='he_normal',
                  use_bias=False)(fuse)
    p = KL.BatchNormalization()(p)
    p = KL.ReLU()(p)
    p = KL.Conv2DTranspose(64, (2, 2),
                           strides=(2, 2),
                           kernel_initializer='he_normal',
                           use_bias=False)(p)
    p = KL.BatchNormalization()(p)
    p = KL.ReLU()(p)
    binarize_map = KL.Conv2DTranspose(1, (2, 2),
                                      strides=(2, 2),
                                      kernel_initializer='he_normal',
                                      activation='sigmoid',
                                      name='binarize_map')(p)

    # threshold map
    t = KL.Conv2D(64, (3, 3),
                  padding='same',
                  kernel_initializer='he_normal',
                  use_bias=False)(fuse)
    t = KL.BatchNormalization()(t)
    t = KL.ReLU()(t)
    t = KL.Conv2DTranspose(64, (2, 2),
                           strides=(2, 2),
                           kernel_initializer='he_normal',
                           use_bias=False)(t)
    t = KL.BatchNormalization()(t)
    t = KL.ReLU()(t)
    threshold_map = KL.Conv2DTranspose(1, (2, 2),
                                       strides=(2, 2),
                                       kernel_initializer='he_normal',
                                       activation='sigmoid',
                                       name='threshold_map')(t)

    # thresh binary map
    thresh_binary = KL.Lambda(lambda x: 1 / (1 + tf.exp(-k * (x[0] - x[1]))))(
        [binarize_map, threshold_map])

    if model == 'training':
        input_gt = KL.Input(shape=[cfg.IMAGE_SIZE, cfg.IMAGE_SIZE],
                            name='input_gt')
        input_mask = KL.Input(shape=[cfg.IMAGE_SIZE, cfg.IMAGE_SIZE],
                              name='input_mask')
        input_thresh = KL.Input(shape=[cfg.IMAGE_SIZE, cfg.IMAGE_SIZE],
                                name='input_thresh')
        input_thresh_mask = KL.Input(shape=[cfg.IMAGE_SIZE, cfg.IMAGE_SIZE],
                                     name='input_thresh_mask')

        loss_layer = KL.Lambda(db_loss, name='db_loss')([
            input_gt, input_mask, input_thresh, input_thresh_mask,
            binarize_map, thresh_binary, threshold_map
        ])

        db_model = K.Model(inputs=[
            input_image, input_gt, input_mask, input_thresh, input_thresh_mask
        ],
                           outputs=[loss_layer])

        loss_names = ["db_loss"]
        for layer_name in loss_names:
            layer = db_model.get_layer(layer_name)
            db_model.add_loss(layer.output)
            # db_model.add_metric(layer.output, name=layer_name, aggregation="mean")
    else:
        db_model = K.Model(inputs=input_image, outputs=binarize_map)
        """
        db_model = K.Model(inputs=input_image,
                           outputs=thresh_binary)
        """
    return db_model
def DCN_model(inp_layer,
              inp_embed,
              link_size,
              cross_size,
              slice_size,
              input_deep_col,
              input_wide_col,
              link_nf_size,
              cross_nf_size,
              encoder,
              link_seqlen=170,
              cross_seqlen=12,
              pred_len=1,
              dropout=0.25,
              sp_dropout=0.1,
              embed_dim=64,
              hidden_dim=128,
              n_layers=3,
              lr=0.001,
              kernel_size1=3,
              kernel_size2=2,
              conv_size=128,
              conv=False,
              have_knowledge=True):
    inp = L.concatenate(inp_embed, axis=-1)
    link_inputs = L.Input(shape=(link_seqlen, link_nf_size),
                          name='link_inputs')
    cross_inputs = L.Input(shape=(cross_seqlen, cross_nf_size),
                           name='cross_inputs')
    deep_inputs = L.Input(shape=(input_deep_col, ), name='deep_input')
    slice_input = L.Input(shape=(1, ), name='slice_input')
    wide_inputs = keras.layers.Input(shape=(input_wide_col, ),
                                     name='wide_inputs')

    # link----------------------------
    categorical_link = link_inputs[:, :, :1]
    embed_link = L.Embedding(input_dim=link_size,
                             output_dim=embed_dim,
                             mask_zero=True)(categorical_link)
    reshaped_link = tf.reshape(
        embed_link,
        shape=(-1, embed_link.shape[1],
               embed_link.shape[2] * embed_link.shape[3]))
    reshaped_link = L.SpatialDropout1D(sp_dropout)(reshaped_link)
    """
    categorical_slice = link_inputs[:, :, 5:6]
    embed_slice = L.Embedding(input_dim=289, output_dim=16, mask_zero=True)(categorical_slice)
    reshaped_slice = tf.reshape(embed_slice, shape=(-1, embed_slice.shape[1], embed_slice.shape[2] * embed_slice.shape[3]))
    reshaped_slice = L.SpatialDropout1D(sp_dropout)(reshaped_slice)

    categorical_hightemp = link_inputs[:, :, 6:7]
    embed_hightemp = L.Embedding(input_dim=33, output_dim=8, mask_zero=True)(categorical_hightemp)
    reshaped_hightemp = tf.reshape(embed_hightemp, shape=(-1, embed_hightemp.shape[1], embed_hightemp.shape[2] * embed_hightemp.shape[3]))
    reshaped_hightemp = L.SpatialDropout1D(sp_dropout)(reshaped_hightemp)

    categorical_weather = link_inputs[:, :, 7:8]
    embed_weather = L.Embedding(input_dim=7, output_dim=8, mask_zero=True)(categorical_weather)
    reshaped_weather = tf.reshape(embed_weather, shape=(-1, embed_weather.shape[1], embed_weather.shape[2] * embed_weather.shape[3]))
    reshaped_weather = L.SpatialDropout1D(sp_dropout)(reshaped_weather)
    
    numerical_fea1 = link_inputs[:, :, 1:5]
    numerical_fea1 = L.Masking(mask_value=0, name='numerical_fea1')(numerical_fea1)
    hidden = L.concatenate([reshaped_link, numerical_fea1, reshaped_slice, reshaped_hightemp, reshaped_weather], axis=2)
    
    """
    if have_knowledge:
        numerical_fea1 = link_inputs[:, :, 1:5]
        numerical_fea1 = L.Masking(mask_value=0,
                                   name='numerical_fea1')(numerical_fea1)

        categorical_ar_st = link_inputs[:, :, 5:6]
        categorical_ar_st = L.Masking(
            mask_value=-1, name='categorical_ar_st')(categorical_ar_st)
        embed_ar_st = L.Embedding(input_dim=289,
                                  output_dim=8)(categorical_ar_st)
        reshaped_ar_st = tf.reshape(
            embed_ar_st,
            shape=(-1, embed_ar_st.shape[1],
                   embed_ar_st.shape[2] * embed_ar_st.shape[3]))
        reshaped_ar_st = L.SpatialDropout1D(sp_dropout)(reshaped_ar_st)

        categorical_ar_sl = link_inputs[:, :, 6:7]
        categorical_ar_sl = L.Masking(
            mask_value=-1, name='categorical_ar_sl')(categorical_ar_sl)
        embed_ar_sl = L.Embedding(input_dim=289,
                                  output_dim=8)(categorical_ar_sl)
        reshaped_ar_sl = tf.reshape(
            embed_ar_sl,
            shape=(-1, embed_ar_sl.shape[1],
                   embed_ar_sl.shape[2] * embed_ar_sl.shape[3]))
        reshaped_ar_sl = L.SpatialDropout1D(sp_dropout)(reshaped_ar_sl)
        hidden = L.concatenate(
            [reshaped_link, reshaped_ar_st, reshaped_ar_sl, numerical_fea1],
            axis=2)

        #hidden = L.concatenate([reshaped_link, numerical_fea1],axis=2)
    else:
        numerical_fea1 = link_inputs[:, :, 1:5]
        numerical_fea1 = L.Masking(mask_value=0,
                                   name='numerical_fea1')(numerical_fea1)

        categorical_arrival = link_inputs[:, :, 5:6]
        categorical_arrival = L.Masking(
            mask_value=-1, name='categorical_arrival')(categorical_arrival)
        embed_ar = L.Embedding(input_dim=5, output_dim=16)(categorical_arrival)
        reshaped_ar = tf.reshape(embed_ar,
                                 shape=(-1, embed_ar.shape[1],
                                        embed_ar.shape[2] * embed_ar.shape[3]))
        reshaped_ar = L.SpatialDropout1D(sp_dropout)(reshaped_ar)

        categorical_ar_st = link_inputs[:, :, 6:7]
        categorical_ar_st = L.Masking(
            mask_value=-1, name='categorical_ar_st')(categorical_ar_st)
        embed_ar_st = L.Embedding(input_dim=289,
                                  output_dim=8)(categorical_ar_st)
        reshaped_ar_st = tf.reshape(
            embed_ar_st,
            shape=(-1, embed_ar_st.shape[1],
                   embed_ar_st.shape[2] * embed_ar_st.shape[3]))
        reshaped_ar_st = L.SpatialDropout1D(sp_dropout)(reshaped_ar_st)

        categorical_ar_sl = link_inputs[:, :, 7:8]
        categorical_ar_sl = L.Masking(
            mask_value=-1, name='categorical_ar_sl')(categorical_ar_sl)
        embed_ar_sl = L.Embedding(input_dim=289,
                                  output_dim=8)(categorical_ar_sl)
        reshaped_ar_sl = tf.reshape(
            embed_ar_sl,
            shape=(-1, embed_ar_sl.shape[1],
                   embed_ar_sl.shape[2] * embed_ar_sl.shape[3]))
        reshaped_ar_sl = L.SpatialDropout1D(sp_dropout)(reshaped_ar_sl)
        hidden = L.concatenate([
            reshaped_link, reshaped_ar, reshaped_ar_st, reshaped_ar_sl,
            numerical_fea1
        ],
                               axis=2)

        #hidden = L.concatenate([reshaped_link, reshaped_ar, numerical_fea1],axis=2)
    #hidden = L.Masking(mask_value=0)(hidden)
    for x in range(n_layers):
        hidden = gru_layer(hidden_dim, dropout)(hidden)

    if conv:
        x_conv1 = Conv1D(conv_size,
                         kernel_size=kernel_size1,
                         padding='valid',
                         kernel_initializer='he_uniform')(hidden)
        avg_pool1_gru = GlobalAveragePooling1D()(x_conv1)
        max_pool1_gru = GlobalMaxPooling1D()(x_conv1)
        #x_conv2 = Conv1D(conv_size, kernel_size=kernel_size2, padding='valid', kernel_initializer='he_uniform')(hidden)
        #avg_pool2_gru = GlobalAveragePooling1D()(x_conv2)
        #max_pool2_gru = GlobalMaxPooling1D()(x_conv2)
        truncated_link = concatenate([avg_pool1_gru, max_pool1_gru])
    else:
        truncated_link = hidden[:, :pred_len]
        truncated_link = L.Flatten()(truncated_link)

    # truncated_link = Attention(256)(hidden)
    # CROSS----------------------------
    categorical_fea2 = cross_inputs[:, :, :1]
    embed2 = L.Embedding(input_dim=cross_size, output_dim=16,
                         mask_zero=True)(categorical_fea2)
    reshaped2 = tf.reshape(embed2,
                           shape=(-1, embed2.shape[1],
                                  embed2.shape[2] * embed2.shape[3]))
    reshaped2 = L.SpatialDropout1D(sp_dropout)(reshaped2)

    numerical_fea2 = cross_inputs[:, :, 1:]
    numerical_fea2 = L.Masking(mask_value=0,
                               name='numerical_fea2')(numerical_fea2)
    hidden2 = L.concatenate([reshaped2, numerical_fea2], axis=2)
    # hidden2 = L.Masking(mask_value=0)(hidden2)
    for x in range(n_layers):
        hidden2 = gru_layer(hidden_dim, dropout)(hidden2)

    if conv:
        x_conv3 = Conv1D(conv_size,
                         kernel_size=kernel_size1,
                         padding='valid',
                         kernel_initializer='he_uniform')(hidden2)
        avg_pool3_gru = GlobalAveragePooling1D()(x_conv3)
        max_pool3_gru = GlobalMaxPooling1D()(x_conv3)
        #x_conv4 = Conv1D(conv_size, kernel_size=kernel_size2, padding='valid', kernel_initializer='he_uniform')(hidden2)
        #avg_pool4_gru = GlobalAveragePooling1D()(x_conv4)
        #max_pool4_gru = GlobalMaxPooling1D()(x_conv4)
        truncated_cross = concatenate([avg_pool3_gru, max_pool3_gru])
    else:
        truncated_cross = hidden2[:, :pred_len]
        truncated_cross = L.Flatten()(truncated_cross)

    # truncated_cross = Attention(256)(hidden2)
    # SLICE----------------------------
    embed_slice = L.Embedding(input_dim=slice_size, output_dim=1)(slice_input)
    embed_slice = L.Flatten()(embed_slice)

    # DEEP_INPUS
    x = encoder(deep_inputs)
    x = L.Concatenate()([x, deep_inputs])  # use both raw and encoded features
    x = L.BatchNormalization()(x)
    x = L.Dropout(0.25)(x)

    for i in range(3):
        x = L.Dense(256)(x)
        x = L.BatchNormalization()(x)
        x = L.Lambda(tf.keras.activations.swish)(x)
        x = L.Dropout(0.25)(x)
    dense_hidden3 = L.Dense(64, activation='linear')(x)

    # DCN
    cross = CrossLayer(output_dim=inp.shape[2],
                       num_layer=8,
                       name="cross_layer")(inp)

    # MAIN-------------------------------
    truncated = L.concatenate([
        truncated_link, truncated_cross, cross, dense_hidden3, wide_inputs,
        embed_slice
    ])
    truncated = L.BatchNormalization()(truncated)
    truncated = L.Dropout(dropout)(L.Dense(512, activation='relu')(truncated))
    truncated = L.BatchNormalization()(truncated)
    truncated = L.Dropout(dropout)(L.Dense(256, activation='relu')(truncated))

    if have_knowledge:
        out = L.Dense(2, activation='linear', name='out')(truncated)
        model = tf.keras.Model(inputs=[
            inp_layer, link_inputs, cross_inputs, deep_inputs, wide_inputs,
            slice_input
        ],
                               outputs=out)
        print(model.summary())
        model.compile(
            loss=knowledge_distillation_loss_withBE,
            optimizer=RAdamOptimizer(
                learning_rate=1e-3
            ),  # 'adam'  RAdam(warmup_proportion=0.1, min_lr=1e-7)
            #metrics={'out':'mape'} # AdamWOptimizer(weight_decay=1e-4)
            metrics=[mape_2, mape_3])
    else:
        out = L.Dense(1, activation='linear', name='out')(truncated)
        model = tf.keras.Model(inputs=[
            inp_layer, link_inputs, cross_inputs, deep_inputs, wide_inputs,
            slice_input
        ],
                               outputs=out)
        print(model.summary())
        model.compile(
            loss=['mape'],
            optimizer=RAdamOptimizer(
                learning_rate=1e-3
            ),  # 'adam'  RAdam(warmup_proportion=0.1, min_lr=1e-7)
            #metrics={'out':'mape'}
            metrics=['mape'])

    return model
示例#3
0
# Define model architecture
#------------------------------------------------------------------------------
# left channel
in1                 = layers.Input(shape=(time_sound,nfreqs,1)) # define input (rows, columns, channels (only one in my case))
model_l_conv1       = layers.Conv2D(16,(1,3),activation='relu', padding = 'same')(in1) # define first layer and input to the layer
model_l_conv1_mp    = layers.MaxPooling2D(pool_size = (1,2))(model_l_conv1)
model_l_conv1_mp_do = layers.Dropout(0.2)(model_l_conv1_mp)

# right channel
in2                 = layers.Input(shape=(time_sound,nfreqs,1)) # define input
model_r_conv1       = layers.Conv2D(16,(1,3),activation='relu', padding = 'same')(in2) # define first layer and input to the layer
model_r_conv1_mp    = layers.MaxPooling2D(pool_size = (1,2))(model_r_conv1)
model_r_conv1_mp_do = layers.Dropout(0.2)(model_r_conv1_mp)

# merged
model_final_merge       = layers.Concatenate(axis = -1)([model_l_conv1_mp_do, model_r_conv1_mp_do]) 
model_final_conv1       = layers.Conv2D(32,(3,3),activation='relu', padding = 'same')(model_final_merge)
model_final_conv1_mp    = layers.MaxPooling2D(pool_size = (2,2))(model_final_conv1)
model_final_conv1_mp_do = layers.Dropout(0.2)(model_final_conv1_mp)

model_final_flatten = layers.Flatten()(model_final_conv1_mp_do)
model_final_dropout = layers.Dropout(0.2)(model_final_flatten) # dropout for regularization
predicted_coords    = layers.Dense(2, activation = 'tanh')(model_final_dropout) # I have used the tanh activation because our outputs should be between -1 and 1

#------------------------------------------------------------------------------
# Create model
#------------------------------------------------------------------------------
# create
model = models.Model(inputs = [in1,in2], outputs = predicted_coords) # create
# compile
model.compile(loss = cos_dist_2D_angular, optimizer = optimizers.Adam(), metrics=['cosine_proximity','mse',cos_distmet_2D_angular])
示例#4
0
 def upsamp_concat_block(self, x, xskip):
     u = layers.UpSampling2D((2, 2))(x)
     return layers.Concatenate()([u, xskip])
示例#5
0
def load_vae_dna_model_deepsignal(latent_dim, rc_loss_scale, vae_lr, kmer_loss_scale):
    # Build encoder
    encoder_inputs = keras.Input(shape=(479,))
    base = Lambda(lambda y: y[:, 0:68])(encoder_inputs)
    base = layers.Reshape((17, 4))(base)
    features = Lambda(lambda y: y[:, 68:119])(encoder_inputs)
    features = layers.Reshape((3, 17))(features)
    features = layers.Permute((2,1))(features)
    top_module = layers.Concatenate(axis=-1)([base, features])
    x = layers.Bidirectional(layers.LSTM(50, return_sequences=True))(top_module)
    top_out = layers.Bidirectional(layers.LSTM(50))(x)
    bottom_module = Lambda(lambda y: y[:, 119:])(encoder_inputs)
    x = layers.Reshape((1, 360, 1))(bottom_module)
    x = layers.Conv2D(filters=64, kernel_size=(1, 7), activation='relu', strides=2)(x)
    # Add in inception layers
    x = layers.MaxPooling2D(pool_size=(1, 3), strides=2)(x)
    x = layers.Conv2D(filters=128, kernel_size=(1, 1), activation='relu', strides=1)(x)
    x = inception_module(x)
    x = layers.Conv2D(filters=32, kernel_size=(1, 7), activation='tanh', strides=5)(x)
    bottom_out = layers.Reshape((544,))(x)
    # Classification module which combines top and bottom outputs using FFNN
    x = layers.Concatenate(axis=-1)([top_out, bottom_out])
    x = layers.Dense(256, activation="relu")(x)
    z_mean = layers.Dense(latent_dim, name="z_mean")(x)
    z_log_var = layers.Dense(latent_dim, name="z_log_var")(x)
    z_mean_var = layers.Concatenate()([z_mean, z_log_var])
    encoder = keras.Model(encoder_inputs, z_mean_var, name="encoder")

    # Build decoder
    latent_inputs = keras.Input(shape=(latent_dim*2,))
    x_mean = Lambda(lambda y: y[:, 0:latent_dim])(latent_inputs)
    x_log_var = Lambda(lambda y: y[:, latent_dim:])(latent_inputs)
    latent_sampled = Lambda(sampling, output_shape=(latent_dim,), name='z')([x_mean, x_log_var])
    fc_out = layers.Dense(256, activation='relu')(latent_sampled)
    top_module = Lambda(lambda y: y[:, 0:119])(fc_out)
    x = layers.Reshape((17, 7))(top_module)
    x = layers.Bidirectional(layers.LSTM(50, return_sequences=True))(x)
    x = layers.Bidirectional(layers.LSTM(50, return_sequences=True))(x)
    x = layers.LSTM(7, return_sequences=True)(x)
    x = layers.Reshape((119,))(x)
    top_out = layers.Dense(119, activation="relu")(x)
    bottom_module = Lambda(lambda x: x[:, 119:])(fc_out)
    x = layers.Reshape((1, 137, 1))(bottom_module)
    x = layers.Conv2D(filters=64, kernel_size=(1, 7), activation='relu', strides=2)(x)
    x = layers.MaxPooling2D(pool_size=(1, 3), strides=2)(x)
    x = layers.Conv2D(filters=128, kernel_size=(1, 1), activation='relu', strides=1)(x)
    x = inception_module(x)
    x = layers.Conv2D(filters=32, kernel_size=(1, 7), activation='tanh', strides=5)(x)
    x = layers.Reshape((192,))(x)
    bottom_out = layers.Dense(360, activation="relu")(x)
    decoder_outputs = layers.Concatenate(axis=1)([top_out, bottom_out])
    decoder_outputs = layers.Reshape((479,))(decoder_outputs)
    decoder_outputs = layers.Dense(479)(decoder_outputs)
    decoder = keras.Model(latent_inputs, decoder_outputs, name="decoder")

    outputs = decoder(encoder(encoder_inputs))
    vae = keras.Model(encoder_inputs, outputs, name='vae_mlp')
    reconstruction_loss = mse(encoder_inputs, outputs)
    reconstruction_loss *= rc_loss_scale
    kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5
    ''' 
    kmer_out_norm = tf.math.l2_normalize(kmer_out, axis=1, epsilon=1e-12)
    pairwise_distances_squared = tf.math.add(
        tf.math.reduce_sum(tf.math.square(kmer_out_norm), axis=[1], keepdims=True),
        tf.math.reduce_sum(
            tf.math.square(tf.transpose(kmer_out_norm)), axis=[0], keepdims=True
        ),
    ) - 2.0 * tf.matmul(kmer_out_norm, tf.transpose(kmer_out_norm))
    pairwise_distances_squared = tf.reshape(pairwise_distances_squared, [-1])
    labels = encoder_inputs[:, 24:44]
    label_mask = tf.reduce_all(tf.math.equal(tf.expand_dims(labels, axis=0), tf.expand_dims(labels, axis=1)), 2)
    label_mask = tf.math.logical_not(tf.reshape(label_mask, [-1]))
    kmer_loss = tf.boolean_mask(pairwise_distances_squared, label_mask)
    kmer_loss = tf.reduce_mean(kmer_loss) * kmer_loss_scale
    '''
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    vae.add_loss(vae_loss)
    vae.compile(optimizer=Adam(learning_rate=0, clipnorm=1.0, epsilon=1e-06))
    return encoder, decoder, vae
示例#6
0
    def build_generator(self):
        def conv2d(x,
                   filters,
                   kernel_size=(4, 4),
                   dropout_rate=0.0,
                   batch_normalization=True):
            x = tkl.Conv2D(filters,
                           kernel_size=kernel_size,
                           strides=(2, 2),
                           padding='same')(x)
            x = tkl.LeakyReLU(alpha=0.2)(x)
            if dropout_rate:
                x = tkl.Dropout(dropout_rate)(x, training=True)
            if batch_normalization:
                x = tkl.BatchNormalization(momentum=0.8)(x)
            return x

        def deconv2d(x,
                     x_skip,
                     filters,
                     kernel_size=(4, 4),
                     dropout_rate=0.0,
                     batch_normalization=True):
            x = tkl.UpSampling2D(size=(2, 2))(x)
            x = tkl.Conv2D(filters,
                           kernel_size=kernel_size,
                           strides=(1, 1),
                           padding='same',
                           activation='relu')(x)
            if dropout_rate:
                x = tkl.Dropout(dropout_rate)(x, training=True)
            if batch_normalization:
                x = tkl.BatchNormalization(momentum=0.8)(x)
            x = tkl.Concatenate()([x, x_skip])
            return x

        # Image input
        image = tkl.Input(shape=self.image_shape)
        reward = tkl.Input(shape=(1, ))
        action_type = tkl.Input(shape=(1, ))

        r = tkl.Reshape((1, 1, 1))(reward)
        r = tkl.UpSampling2D(size=self.image_shape[:-1])(r)

        a = tkl.Lambda(self.one_hot)(action_type)
        a = tkl.Reshape((1, 1, 4))(a)
        a = tkl.UpSampling2D(size=self.image_shape[:-1])(a)

        h = tkl.Concatenate()([image, r, a])

        # Downsampling
        d1 = conv2d(h, self.gf, batch_normalization=False)
        d2 = conv2d(d1, self.gf * 2)
        d3 = conv2d(d2, self.gf * 4)
        d4 = conv2d(d3, self.gf * 8)
        d5 = conv2d(d4, self.gf * 8, dropout_rate=0.2)
        d6 = conv2d(d5, self.gf * 8, dropout_rate=0.4)

        if self.image_shape[0] == 128:
            d7 = conv2d(d6, self.gf * 8, dropout_rate=0.4)

            # Upsampling
            u1 = deconv2d(d7, d6, self.gf * 8, dropout_rate=0.4)
            u2 = deconv2d(u1, d5, self.gf * 8, dropout_rate=0.4)
        else:
            u2 = deconv2d(d6, d5, self.gf * 8, dropout_rate=0.4)

        u3 = deconv2d(u2, d4, self.gf * 8, dropout_rate=0.2)
        u4 = deconv2d(u3, d3, self.gf * 4, dropout_rate=0.2)
        u5 = deconv2d(u4, d2, self.gf * 2)
        u6 = deconv2d(u5, d1, self.gf)

        u7 = tkl.UpSampling2D(size=2)(u6)
        result = tkl.Conv2D(self.image_shape[2],
                            kernel_size=4,
                            strides=1,
                            padding='same',
                            activation='tanh')(u7)
        assert result.shape[1:] == self.image_shape

        return tk.models.Model([image, reward, action_type],
                               result,
                               name='gen')
示例#7
0
    def __init__(self, num_classes, activation: str = "leaky"):
        super(PANet, self).__init__(name="PANet")
        self.conv78 = YOLOConv2D(
            filters=256, kernel_size=1, activation=activation
        )
        self.upSampling78 = layers.UpSampling2D(interpolation="bilinear")
        self.conv79 = YOLOConv2D(
            filters=256, kernel_size=1, activation=activation
        )
        self.concat78_79 = layers.Concatenate(axis=-1)

        self.conv80 = YOLOConv2D(
            filters=256, kernel_size=1, activation=activation
        )
        self.conv81 = YOLOConv2D(
            filters=512, kernel_size=3, activation=activation
        )
        self.conv82 = YOLOConv2D(
            filters=256, kernel_size=1, activation=activation
        )
        self.conv83 = YOLOConv2D(
            filters=512, kernel_size=3, activation=activation
        )
        self.conv84 = YOLOConv2D(
            filters=256, kernel_size=1, activation=activation
        )

        self.conv85 = YOLOConv2D(
            filters=128, kernel_size=1, activation=activation
        )
        self.upSampling85 = layers.UpSampling2D(interpolation="bilinear")
        self.conv86 = YOLOConv2D(
            filters=128, kernel_size=1, activation=activation
        )
        self.concat85_86 = layers.Concatenate(axis=-1)

        self.conv87 = YOLOConv2D(
            filters=128, kernel_size=1, activation=activation
        )
        self.conv88 = YOLOConv2D(
            filters=256, kernel_size=3, activation=activation
        )
        self.conv89 = YOLOConv2D(
            filters=128, kernel_size=1, activation=activation
        )
        self.conv90 = YOLOConv2D(
            filters=256, kernel_size=3, activation=activation
        )
        self.conv91 = YOLOConv2D(
            filters=128, kernel_size=1, activation=activation
        )

        self.conv92 = YOLOConv2D(
            filters=256, kernel_size=3, activation=activation
        )
        self.conv93 = YOLOConv2D(
            filters=3 * (num_classes + 5), kernel_size=1, activation=None,
        )

        self.conv94 = YOLOConv2D(
            filters=256, kernel_size=3, strides=2, activation=activation
        )
        self.concat84_94 = layers.Concatenate(axis=-1)

        self.conv95 = YOLOConv2D(
            filters=256, kernel_size=1, activation=activation
        )
        self.conv96 = YOLOConv2D(
            filters=512, kernel_size=3, activation=activation
        )
        self.conv97 = YOLOConv2D(
            filters=256, kernel_size=1, activation=activation
        )
        self.conv98 = YOLOConv2D(
            filters=512, kernel_size=3, activation=activation
        )
        self.conv99 = YOLOConv2D(
            filters=256, kernel_size=1, activation=activation
        )

        self.conv100 = YOLOConv2D(
            filters=512, kernel_size=3, activation=activation
        )
        self.conv101 = YOLOConv2D(
            filters=3 * (num_classes + 5), kernel_size=1, activation=None,
        )

        self.conv102 = YOLOConv2D(
            filters=512, kernel_size=3, strides=2, activation=activation
        )
        self.concat77_102 = layers.Concatenate(axis=-1)

        self.conv103 = YOLOConv2D(
            filters=512, kernel_size=1, activation=activation
        )
        self.conv104 = YOLOConv2D(
            filters=1024, kernel_size=3, activation=activation
        )
        self.conv105 = YOLOConv2D(
            filters=512, kernel_size=1, activation=activation
        )
        self.conv106 = YOLOConv2D(
            filters=1024, kernel_size=3, activation=activation
        )
        self.conv107 = YOLOConv2D(
            filters=512, kernel_size=1, activation=activation
        )

        self.conv108 = YOLOConv2D(
            filters=1024, kernel_size=3, activation=activation
        )
        self.conv109 = YOLOConv2D(
            filters=3 * (num_classes + 5), kernel_size=1, activation=None,
        )
def SVBRDF_partial_branched():
    print("PARTIALLY BRANCHED")
    inputs = keras.Input(shape=(256, 256) + (3, ))

    ### just write the complete network, long code, but works.
    ### keras subclassing is trash.
    ### trainable but cannot be loaded.
    #a: albedo, s:specular, n: normal, r: roughness

    #================
    #==== albedo ====
    #================

    a_gf = layers.AveragePooling2D(inputs.shape[1], inputs.shape[1])(inputs)
    a_gf = layers.Dense(filter_3[0])(a_gf)
    a_gf = layers.Activation('selu')(a_gf)
    a_encoder1 = layers.Conv2D(filters=filter_3[0],
                               kernel_size=4,
                               padding="same")(inputs)
    a_encoder1 = layers.BatchNormalization()(a_encoder1)
    a_encoder1 = layers.Activation("selu")(a_encoder1)

    a_encoder2 = layers.MaxPooling2D(pool_size=(2, 2),
                                     padding="same")(a_encoder1)
    a_gfdown = layers.AveragePooling2D(a_encoder2.shape[1],
                                       a_encoder2.shape[1])(a_encoder2)
    a_gfup = layers.Dense(filter_3[0])(a_gf)
    a_gf = layers.Concatenate()([a_gf, a_gfdown])
    a_gf = layers.Dense(filter_3[1])(a_gf)
    a_gf = layers.Activation('selu')(a_gf)
    a_encoder2 = layers.Add()([a_encoder2, a_gfup])
    a_encoder2 = LeakyReLU()(a_encoder2)
    a_encoder2 = layers.Conv2D(filters=filter_3[1],
                               kernel_size=4,
                               padding="same")(a_encoder2)
    a_encoder2 = layers.BatchNormalization()(a_encoder2)
    a_encoder2 = layers.Activation("selu")(a_encoder2)

    a_encoder3 = layers.MaxPooling2D(pool_size=(2, 2),
                                     padding="same")(a_encoder2)
    a_gfdown = layers.AveragePooling2D(a_encoder3.shape[1],
                                       a_encoder3.shape[1])(a_encoder3)
    a_gfup = layers.Dense(filter_3[1])(a_gf)
    a_gf = layers.Concatenate()([a_gf, a_gfdown])
    a_gf = layers.Dense(filter_3[2])(a_gf)
    a_gf = layers.Activation('selu')(a_gf)
    a_encoder3 = layers.Add()([a_encoder3, a_gfup])
    a_encoder3 = LeakyReLU()(a_encoder3)
    a_encoder3 = layers.Conv2D(filters=filter_3[2],
                               kernel_size=4,
                               padding="same")(a_encoder3)
    a_encoder3 = layers.BatchNormalization()(a_encoder3)
    a_encoder3 = layers.Activation("selu")(a_encoder3)

    a_encoder4 = layers.MaxPooling2D(pool_size=(2, 2),
                                     padding="same")(a_encoder3)
    a_gfdown = layers.AveragePooling2D(a_encoder4.shape[1],
                                       a_encoder4.shape[1])(a_encoder4)
    a_gfup = layers.Dense(filter_3[2])(a_gf)
    a_gf = layers.Concatenate()([a_gf, a_gfdown])
    a_gf = layers.Dense(filter_3[3])(a_gf)
    a_gf = layers.Activation('selu')(a_gf)
    a_encoder4 = layers.Add()([a_encoder4, a_gfup])
    a_encoder4 = LeakyReLU()(a_encoder4)
    a_encoder4 = layers.Conv2D(filters=filter_3[3],
                               kernel_size=4,
                               padding="same")(a_encoder4)
    a_encoder4 = layers.BatchNormalization()(a_encoder4)
    a_encoder4 = layers.Activation("selu")(a_encoder4)

    a_encoder5 = layers.MaxPooling2D(pool_size=(2, 2),
                                     padding="same")(a_encoder4)
    a_gfdown = layers.AveragePooling2D(a_encoder5.shape[1],
                                       a_encoder4.shape[1])(a_encoder5)
    a_gfup = layers.Dense(filter_3[3])(a_gf)
    a_gf = layers.Concatenate()([a_gf, a_gfdown])
    a_gf = layers.Dense(filter_3[4])(a_gf)
    a_gf = layers.Activation('selu')(a_gf)
    a_encoder5 = layers.Add()([a_encoder5, a_gfup])
    a_encoder5 = LeakyReLU()(a_encoder5)
    a_encoder5 = layers.Conv2D(filters=filter_3[4],
                               kernel_size=4,
                               padding="same")(a_encoder5)
    a_encoder5 = layers.BatchNormalization()(a_encoder5)
    a_encoder5 = layers.Activation("selu")(a_encoder5)

    a_encoder6 = layers.MaxPooling2D(pool_size=(2, 2),
                                     padding="same")(a_encoder5)
    a_gfdown = layers.AveragePooling2D(a_encoder6.shape[1],
                                       a_encoder6.shape[1])(a_encoder6)
    a_gfup = layers.Dense(filter_3[4])(a_gf)
    a_gf = layers.Concatenate()([a_gf, a_gfdown])
    a_gf = layers.Dense(filter_3[5])(a_gf)
    a_gf = layers.Activation('selu')(a_gf)
    a_encoder6 = layers.Add()([a_encoder6, a_gfup])
    a_encoder6 = LeakyReLU()(a_encoder6)
    a_encoder6 = layers.Conv2D(filters=filter_3[5],
                               kernel_size=4,
                               padding="same")(a_encoder6)
    a_encoder6 = layers.BatchNormalization()(a_encoder6)
    a_encoder6 = layers.Activation("selu")(a_encoder6)

    a_encoder7 = layers.MaxPooling2D(pool_size=(2, 2),
                                     padding="same")(a_encoder6)
    a_gfdown = layers.AveragePooling2D(a_encoder7.shape[1],
                                       a_encoder7.shape[1])(a_encoder7)
    a_gfup = layers.Dense(filter_3[5])(a_gf)
    a_gf = layers.Concatenate()([a_gf, a_gfdown])
    a_gf = layers.Dense(filter_3[6])(a_gf)
    a_gf = layers.Activation('selu')(a_gf)
    a_encoder7 = layers.Add()([a_encoder7, a_gfup])
    a_encoder7 = LeakyReLU()(a_encoder7)
    a_encoder7 = layers.Conv2D(filters=filter_3[6],
                               kernel_size=4,
                               padding="same")(a_encoder7)
    a_encoder7 = layers.BatchNormalization()(a_encoder7)
    a_encoder7 = layers.Activation("selu")(a_encoder7)

    a_bottom = layers.MaxPooling2D(pool_size=(2, 2),
                                   padding="same")(a_encoder7)
    a_gfdown = layers.AveragePooling2D(a_bottom.shape[1],
                                       a_bottom.shape[1])(a_bottom)
    a_gfup = layers.Dense(filter_3[6])(a_gf)
    a_gf = layers.Concatenate()([a_gf, a_gfdown])
    a_gf = layers.Dense(filter_3[7])(a_gf)
    a_gf = layers.Activation('selu')(a_gf)
    a_bottom = layers.Add()([a_bottom, a_gfup])
    a_bottom = LeakyReLU()(a_bottom)
    a_bottom = layers.Conv2D(filters=filter_3[7],
                             kernel_size=4,
                             padding="same")(a_bottom)
    a_bottom = layers.BatchNormalization()(a_bottom)
    a_bottom = layers.Activation("selu")(a_bottom)
    a_bottom = layers.Conv2DTranspose(filters=filter_3[8],
                                      kernel_size=2,
                                      strides=2,
                                      padding="same")(a_bottom)

    a_decoder7 = layers.Concatenate()([a_encoder7, a_bottom])
    a_gfdown = layers.AveragePooling2D(a_decoder7.shape[1],
                                       a_decoder7.shape[1])(a_decoder7)
    a_gfup = layers.Dense(filter_3[8] * 2)(a_gf)
    a_gf = layers.Concatenate()([a_gf, a_gfdown])
    a_gf = layers.Dense(filter_3[8])(a_gf)
    a_gf = layers.Activation('selu')(a_gf)
    a_decoder7 = layers.Add()([a_decoder7, a_gfup])
    a_decoder7 = LeakyReLU()(a_decoder7)
    a_decoder7 = layers.Conv2D(filters=filter_3[8],
                               kernel_size=4,
                               padding="same")(a_decoder7)
    a_decoder7 = layers.BatchNormalization()(a_decoder7)
    a_decoder7 = layers.Activation("selu")(a_decoder7)
    a_decoder7 = layers.Conv2DTranspose(filters=filter_3[9],
                                        kernel_size=2,
                                        strides=2,
                                        padding="same")(a_decoder7)

    a_decoder6 = layers.Concatenate()([a_encoder6, a_decoder7])
    a_gfdown = layers.AveragePooling2D(a_decoder6.shape[1],
                                       a_decoder6.shape[1])(a_decoder6)
    a_gfup = layers.Dense(filter_3[9] * 2)(a_gf)
    a_gf = layers.Concatenate()([a_gf, a_gfdown])
    a_gf = layers.Dense(filter_3[9])(a_gf)
    a_gf = layers.Activation('selu')(a_gf)
    a_decoder6 = layers.Add()([a_decoder6, a_gfup])
    a_decoder6 = LeakyReLU()(a_decoder6)
    a_decoder6 = layers.Conv2D(filters=filter_3[9],
                               kernel_size=4,
                               padding="same")(a_decoder6)
    a_decoder6 = layers.BatchNormalization()(a_decoder6)
    a_decoder6 = layers.Activation("selu")(a_decoder6)
    a_decoder6 = layers.Conv2DTranspose(filters=filter_3[10],
                                        kernel_size=2,
                                        strides=2,
                                        padding="same")(a_decoder6)

    a_decoder5 = layers.Concatenate()([a_encoder5, a_decoder6])
    a_gfdown = layers.AveragePooling2D(a_decoder5.shape[1],
                                       a_decoder5.shape[1])(a_decoder5)
    a_gfup = layers.Dense(filter_3[10] * 2)(a_gf)
    a_gf = layers.Concatenate()([a_gf, a_gfdown])
    a_gf = layers.Dense(filter_3[10])(a_gf)
    a_gf = layers.Activation('selu')(a_gf)
    a_decoder5 = layers.Add()([a_decoder5, a_gfup])
    a_decoder5 = LeakyReLU()(a_decoder5)
    a_decoder5 = layers.Conv2D(filters=filter_3[10],
                               kernel_size=4,
                               padding="same")(a_decoder5)
    a_decoder5 = layers.BatchNormalization()(a_decoder5)
    a_decoder5 = layers.Activation("selu")(a_decoder5)
    a_decoder5 = layers.Conv2DTranspose(filters=filter_3[11],
                                        kernel_size=2,
                                        strides=2,
                                        padding="same")(a_decoder5)

    a_decoder4 = layers.Concatenate()([a_encoder4, a_decoder5])
    a_gfdown = layers.AveragePooling2D(a_decoder4.shape[1],
                                       a_decoder4.shape[1])(a_decoder4)
    a_gfup = layers.Dense(filter_3[11] * 2)(a_gf)
    a_gf = layers.Concatenate()([a_gf, a_gfdown])
    a_gf = layers.Dense(filter_3[11])(a_gf)
    a_gf = layers.Activation('selu')(a_gf)
    a_decoder4 = layers.Add()([a_decoder4, a_gfup])
    a_decoder4 = LeakyReLU()(a_decoder4)
    a_decoder4 = layers.Conv2D(filters=filter_3[11],
                               kernel_size=4,
                               padding="same")(a_decoder4)
    a_decoder4 = layers.BatchNormalization()(a_decoder4)
    a_decoder4 = layers.Activation("selu")(a_decoder4)
    a_decoder4 = layers.Conv2DTranspose(filters=filter_3[12],
                                        kernel_size=2,
                                        strides=2,
                                        padding="same")(a_decoder4)

    a_decoder3 = layers.Concatenate()([a_encoder3, a_decoder4])
    a_gfdown = layers.AveragePooling2D(a_decoder3.shape[1],
                                       a_decoder3.shape[1])(a_decoder3)
    a_gfup = layers.Dense(filter_3[12] * 2)(a_gf)
    a_gf = layers.Concatenate()([a_gf, a_gfdown])
    a_gf = layers.Dense(filter_3[12])(a_gf)
    a_gf = layers.Activation('selu')(a_gf)
    a_decoder3 = layers.Add()([a_decoder3, a_gfup])
    a_decoder3 = LeakyReLU()(a_decoder3)
    a_decoder3 = layers.Conv2D(filters=filter_3[12],
                               kernel_size=4,
                               padding="same")(a_decoder3)
    a_decoder3 = layers.BatchNormalization()(a_decoder3)
    a_decoder3 = layers.Activation("selu")(a_decoder3)
    a_decoder3 = layers.Conv2DTranspose(filters=filter_3[13],
                                        kernel_size=2,
                                        strides=2,
                                        padding="same")(a_decoder3)

    a_decoder2 = layers.Concatenate()([a_encoder2, a_decoder3])
    a_gfdown = layers.AveragePooling2D(a_decoder2.shape[1],
                                       a_decoder2.shape[1])(a_decoder2)
    a_gfup = layers.Dense(filter_3[13] * 2)(a_gf)
    a_gf = layers.Concatenate()([a_gf, a_gfdown])
    a_gf = layers.Dense(filter_3[13])(a_gf)
    a_gf = layers.Activation('selu')(a_gf)
    a_decoder2 = layers.Add()([a_decoder2, a_gfup])
    a_decoder2 = LeakyReLU()(a_decoder2)
    a_decoder2 = layers.Conv2D(filters=filter_3[13],
                               kernel_size=4,
                               padding="same")(a_decoder2)
    a_decoder2 = layers.BatchNormalization()(a_decoder2)
    a_decoder2 = layers.Activation("selu")(a_decoder2)
    a_decoder2 = layers.Conv2DTranspose(filters=filter_3[14],
                                        kernel_size=2,
                                        strides=2,
                                        padding="same")(a_decoder2)

    a_decoder1 = layers.Concatenate()([a_encoder1, a_decoder2])
    a_gfup = layers.Dense(filter_3[14] * 2)(a_gf)
    a_decoder1 = layers.Add()([a_decoder1, a_gfup])
    a_decoder1 = LeakyReLU()(a_decoder1)
    a_decoder1 = layers.Conv2D(filters=filter_3[14],
                               kernel_size=4,
                               padding="same")(a_decoder1)
    a_decoder1 = layers.BatchNormalization()(a_decoder1)
    a_decoder1 = layers.Activation("selu")(a_decoder1)

    a_outputs = layers.Conv2D(3,
                              kernel_size=1,
                              activation="tanh",
                              padding="same")(a_decoder1)

    #================
    #=== specular ===
    #================

    s_gf = layers.AveragePooling2D(inputs.shape[1], inputs.shape[1])(inputs)
    s_gf = layers.Dense(filter_6[0])(s_gf)
    s_gf = layers.Activation('selu')(s_gf)
    s_encoder1 = layers.Conv2D(filters=filter_6[0],
                               kernel_size=4,
                               padding="same")(inputs)
    s_encoder1 = layers.BatchNormalization()(s_encoder1)
    s_encoder1 = layers.Activation("selu")(s_encoder1)

    s_encoder2 = layers.MaxPooling2D(pool_size=(2, 2),
                                     padding="same")(s_encoder1)
    s_gfdown = layers.AveragePooling2D(s_encoder2.shape[1],
                                       s_encoder2.shape[1])(s_encoder2)
    s_gfup = layers.Dense(filter_6[0])(s_gf)
    s_gf = layers.Concatenate()([s_gf, s_gfdown])
    s_gf = layers.Dense(filter_6[1])(s_gf)
    s_gf = layers.Activation('selu')(s_gf)
    s_encoder2 = layers.Add()([s_encoder2, s_gfup])
    s_encoder2 = LeakyReLU()(s_encoder2)
    s_encoder2 = layers.Conv2D(filters=filter_6[1],
                               kernel_size=4,
                               padding="same")(s_encoder2)
    s_encoder2 = layers.BatchNormalization()(s_encoder2)
    s_encoder2 = layers.Activation("selu")(s_encoder2)

    s_encoder3 = layers.MaxPooling2D(pool_size=(2, 2),
                                     padding="same")(s_encoder2)
    s_gfdown = layers.AveragePooling2D(s_encoder3.shape[1],
                                       s_encoder3.shape[1])(s_encoder3)
    s_gfup = layers.Dense(filter_6[1])(s_gf)
    s_gf = layers.Concatenate()([s_gf, s_gfdown])
    s_gf = layers.Dense(filter_6[2])(s_gf)
    s_gf = layers.Activation('selu')(s_gf)
    s_encoder3 = layers.Add()([s_encoder3, s_gfup])
    s_encoder3 = LeakyReLU()(s_encoder3)
    s_encoder3 = layers.Conv2D(filters=filter_6[2],
                               kernel_size=4,
                               padding="same")(s_encoder3)
    s_encoder3 = layers.BatchNormalization()(s_encoder3)
    s_encoder3 = layers.Activation("selu")(s_encoder3)

    s_encoder4 = layers.MaxPooling2D(pool_size=(2, 2),
                                     padding="same")(s_encoder3)
    s_gfdown = layers.AveragePooling2D(s_encoder4.shape[1],
                                       s_encoder4.shape[1])(s_encoder4)
    s_gfup = layers.Dense(filter_6[2])(s_gf)
    s_gf = layers.Concatenate()([s_gf, s_gfdown])
    s_gf = layers.Dense(filter_6[3])(s_gf)
    s_gf = layers.Activation('selu')(s_gf)
    s_encoder4 = layers.Add()([s_encoder4, s_gfup])
    s_encoder4 = LeakyReLU()(s_encoder4)
    s_encoder4 = layers.Conv2D(filters=filter_6[3],
                               kernel_size=4,
                               padding="same")(s_encoder4)
    s_encoder4 = layers.BatchNormalization()(s_encoder4)
    s_encoder4 = layers.Activation("selu")(s_encoder4)

    s_encoder5 = layers.MaxPooling2D(pool_size=(2, 2),
                                     padding="same")(s_encoder4)
    s_gfdown = layers.AveragePooling2D(s_encoder5.shape[1],
                                       s_encoder4.shape[1])(s_encoder5)
    s_gfup = layers.Dense(filter_6[3])(s_gf)
    s_gf = layers.Concatenate()([s_gf, s_gfdown])
    s_gf = layers.Dense(filter_6[4])(s_gf)
    s_gf = layers.Activation('selu')(s_gf)
    s_encoder5 = layers.Add()([s_encoder5, s_gfup])
    s_encoder5 = LeakyReLU()(s_encoder5)
    s_encoder5 = layers.Conv2D(filters=filter_6[4],
                               kernel_size=4,
                               padding="same")(s_encoder5)
    s_encoder5 = layers.BatchNormalization()(s_encoder5)
    s_encoder5 = layers.Activation("selu")(s_encoder5)

    s_encoder6 = layers.MaxPooling2D(pool_size=(2, 2),
                                     padding="same")(s_encoder5)
    s_gfdown = layers.AveragePooling2D(s_encoder6.shape[1],
                                       s_encoder6.shape[1])(s_encoder6)
    s_gfup = layers.Dense(filter_6[4])(s_gf)
    s_gf = layers.Concatenate()([s_gf, s_gfdown])
    s_gf = layers.Dense(filter_6[5])(s_gf)
    s_gf = layers.Activation('selu')(s_gf)
    s_encoder6 = layers.Add()([s_encoder6, s_gfup])
    s_encoder6 = LeakyReLU()(s_encoder6)
    s_encoder6 = layers.Conv2D(filters=filter_6[5],
                               kernel_size=4,
                               padding="same")(s_encoder6)
    s_encoder6 = layers.BatchNormalization()(s_encoder6)
    s_encoder6 = layers.Activation("selu")(s_encoder6)

    s_encoder7 = layers.MaxPooling2D(pool_size=(2, 2),
                                     padding="same")(s_encoder6)
    s_gfdown = layers.AveragePooling2D(s_encoder7.shape[1],
                                       s_encoder7.shape[1])(s_encoder7)
    s_gfup = layers.Dense(filter_6[5])(s_gf)
    s_gf = layers.Concatenate()([s_gf, s_gfdown])
    s_gf = layers.Dense(filter_6[6])(s_gf)
    s_gf = layers.Activation('selu')(s_gf)
    s_encoder7 = layers.Add()([s_encoder7, s_gfup])
    s_encoder7 = LeakyReLU()(s_encoder7)
    s_encoder7 = layers.Conv2D(filters=filter_6[6],
                               kernel_size=4,
                               padding="same")(s_encoder7)
    s_encoder7 = layers.BatchNormalization()(s_encoder7)
    s_encoder7 = layers.Activation("selu")(s_encoder7)

    s_bottom = layers.MaxPooling2D(pool_size=(2, 2),
                                   padding="same")(s_encoder7)
    s_gfdown = layers.AveragePooling2D(s_bottom.shape[1],
                                       s_bottom.shape[1])(s_bottom)
    s_gfup = layers.Dense(filter_6[6])(s_gf)
    s_gf = layers.Concatenate()([s_gf, s_gfdown])
    s_gf = layers.Dense(filter_6[7])(s_gf)
    s_gf = layers.Activation('selu')(s_gf)
    s_bottom = layers.Add()([s_bottom, s_gfup])
    s_bottom = LeakyReLU()(s_bottom)
    s_bottom = layers.Conv2D(filters=filter_6[7],
                             kernel_size=4,
                             padding="same")(s_bottom)
    s_bottom = layers.BatchNormalization()(s_bottom)
    s_bottom = layers.Activation("selu")(s_bottom)
    s_bottom = layers.Conv2DTranspose(filters=filter_6[8],
                                      kernel_size=2,
                                      strides=2,
                                      padding="same")(s_bottom)

    s_decoder7 = layers.Concatenate()([s_encoder7, s_bottom])
    s_gfdown = layers.AveragePooling2D(s_decoder7.shape[1],
                                       s_decoder7.shape[1])(s_decoder7)
    s_gfup = layers.Dense(filter_6[8] * 2)(s_gf)
    s_gf = layers.Concatenate()([s_gf, s_gfdown])
    s_gf = layers.Dense(filter_6[8])(s_gf)
    s_gf = layers.Activation('selu')(s_gf)
    s_decoder7 = layers.Add()([s_decoder7, s_gfup])
    s_decoder7 = LeakyReLU()(s_decoder7)
    s_decoder7 = layers.Conv2D(filters=filter_6[8],
                               kernel_size=4,
                               padding="same")(s_decoder7)
    s_decoder7 = layers.BatchNormalization()(s_decoder7)
    s_decoder7 = layers.Activation("selu")(s_decoder7)
    s_decoder7 = layers.Conv2DTranspose(filters=filter_6[9],
                                        kernel_size=2,
                                        strides=2,
                                        padding="same")(s_decoder7)

    s_decoder6 = layers.Concatenate()([s_encoder6, s_decoder7])
    s_gfdown = layers.AveragePooling2D(s_decoder6.shape[1],
                                       s_decoder6.shape[1])(s_decoder6)
    s_gfup = layers.Dense(filter_6[9] * 2)(s_gf)
    s_gf = layers.Concatenate()([s_gf, s_gfdown])
    s_gf = layers.Dense(filter_6[9])(s_gf)
    s_gf = layers.Activation('selu')(s_gf)
    s_decoder6 = layers.Add()([s_decoder6, s_gfup])
    s_decoder6 = LeakyReLU()(s_decoder6)
    s_decoder6 = layers.Conv2D(filters=filter_6[9],
                               kernel_size=4,
                               padding="same")(s_decoder6)
    s_decoder6 = layers.BatchNormalization()(s_decoder6)
    s_decoder6 = layers.Activation("selu")(s_decoder6)
    s_decoder6 = layers.Conv2DTranspose(filters=filter_6[10],
                                        kernel_size=2,
                                        strides=2,
                                        padding="same")(s_decoder6)

    s_decoder5 = layers.Concatenate()([s_encoder5, s_decoder6])
    s_gfdown = layers.AveragePooling2D(s_decoder5.shape[1],
                                       s_decoder5.shape[1])(s_decoder5)
    s_gfup = layers.Dense(filter_6[10] * 2)(s_gf)
    s_gf = layers.Concatenate()([s_gf, s_gfdown])
    s_gf = layers.Dense(filter_6[10])(s_gf)
    s_gf = layers.Activation('selu')(s_gf)
    s_decoder5 = layers.Add()([s_decoder5, s_gfup])
    s_decoder5 = LeakyReLU()(s_decoder5)
    s_decoder5 = layers.Conv2D(filters=filter_6[10],
                               kernel_size=4,
                               padding="same")(s_decoder5)
    s_decoder5 = layers.BatchNormalization()(s_decoder5)
    s_decoder5 = layers.Activation("selu")(s_decoder5)
    s_decoder5 = layers.Conv2DTranspose(filters=filter_6[11],
                                        kernel_size=2,
                                        strides=2,
                                        padding="same")(s_decoder5)

    s_decoder4 = layers.Concatenate()([s_encoder4, s_decoder5])
    s_gfdown = layers.AveragePooling2D(s_decoder4.shape[1],
                                       s_decoder4.shape[1])(s_decoder4)
    s_gfup = layers.Dense(filter_6[11] * 2)(s_gf)
    s_gf = layers.Concatenate()([s_gf, s_gfdown])
    s_gf = layers.Dense(filter_6[11])(s_gf)
    s_gf = layers.Activation('selu')(s_gf)
    s_decoder4 = layers.Add()([s_decoder4, s_gfup])
    s_decoder4 = LeakyReLU()(s_decoder4)
    s_decoder4 = layers.Conv2D(filters=filter_6[11],
                               kernel_size=4,
                               padding="same")(s_decoder4)
    s_decoder4 = layers.BatchNormalization()(s_decoder4)
    s_decoder4 = layers.Activation("selu")(s_decoder4)
    s_decoder4 = layers.Conv2DTranspose(filters=filter_6[12],
                                        kernel_size=2,
                                        strides=2,
                                        padding="same")(s_decoder4)

    s_decoder3 = layers.Concatenate()([s_encoder3, s_decoder4])
    s_gfdown = layers.AveragePooling2D(s_decoder3.shape[1],
                                       s_decoder3.shape[1])(s_decoder3)
    s_gfup = layers.Dense(filter_6[12] * 2)(s_gf)
    s_gf = layers.Concatenate()([s_gf, s_gfdown])
    s_gf = layers.Dense(filter_6[12])(s_gf)
    s_gf = layers.Activation('selu')(s_gf)
    s_decoder3 = layers.Add()([s_decoder3, s_gfup])
    s_decoder3 = LeakyReLU()(s_decoder3)
    s_decoder3 = layers.Conv2D(filters=filter_6[12],
                               kernel_size=4,
                               padding="same")(s_decoder3)
    s_decoder3 = layers.BatchNormalization()(s_decoder3)
    s_decoder3 = layers.Activation("selu")(s_decoder3)
    s_decoder3 = layers.Conv2DTranspose(filters=filter_6[13],
                                        kernel_size=2,
                                        strides=2,
                                        padding="same")(s_decoder3)

    s_decoder2 = layers.Concatenate()([s_encoder2, s_decoder3])
    s_gfdown = layers.AveragePooling2D(s_decoder2.shape[1],
                                       s_decoder2.shape[1])(s_decoder2)
    s_gfup = layers.Dense(filter_6[13] * 2)(s_gf)
    s_gf = layers.Concatenate()([s_gf, s_gfdown])
    s_gf = layers.Dense(filter_6[13])(s_gf)
    s_gf = layers.Activation('selu')(s_gf)
    s_decoder2 = layers.Add()([s_decoder2, s_gfup])
    s_decoder2 = LeakyReLU()(s_decoder2)
    s_decoder2 = layers.Conv2D(filters=filter_6[13],
                               kernel_size=4,
                               padding="same")(s_decoder2)
    s_decoder2 = layers.BatchNormalization()(s_decoder2)
    s_decoder2 = layers.Activation("selu")(s_decoder2)
    s_decoder2 = layers.Conv2DTranspose(filters=filter_6[14],
                                        kernel_size=2,
                                        strides=2,
                                        padding="same")(s_decoder2)

    s_decoder1 = layers.Concatenate()([s_encoder1, s_decoder2])
    s_gfup = layers.Dense(filter_6[14] * 2)(s_gf)
    s_decoder1 = layers.Add()([s_decoder1, s_gfup])
    s_decoder1 = LeakyReLU()(s_decoder1)
    s_decoder1 = layers.Conv2D(filters=filter_6[14],
                               kernel_size=4,
                               padding="same")(s_decoder1)
    s_decoder1 = layers.BatchNormalization()(s_decoder1)
    s_decoder1 = layers.Activation("selu")(s_decoder1)

    s_outputs = layers.Conv2D(6,
                              kernel_size=1,
                              activation="tanh",
                              padding="same")(s_decoder1)

    #================
    #==== output ====
    #================

    outputs = layers.Concatenate()([a_outputs, s_outputs])
    model = keras.Model(inputs, outputs)
    return model


#model = SVBRDF_partial_branched()
#model.summary()
示例#9
0
    embed_anc = inner3_model(in_layer_anc)
    embed_pos = inner3_model(in_layer_pos)
    embed_neg = inner3_model(in_layer_neg)

    out_anc = outer_model(in_layer_anc)
    out_pos = outer_model(in_layer_pos)
    out_neg = outer_model(in_layer_neg)

    concat = layers.Concatenate()([
        conv1_anc,
        conv1_pos,
        conv1_neg,  # 26*26*32
        conv2_anc,
        conv2_pos,
        conv2_neg,  # 12*12*32
        embed_anc,
        embed_pos,
        embed_neg,  # 128
        out_anc,
        out_pos,
        out_neg
    ])  # 10

    model = Model(inputs=[in_layer_anc, in_layer_pos, in_layer_neg],
                  outputs=concat)

    # %% Trainable Model


    def dual_loss(n_cls):
        def _loss(y_true, y_pred):
示例#10
0
 def __init__(self, filters, name=None):
     super(YoloBlock, self).__init__(name=name)
     self.darkconv_0 = DarknetBlock(filters, 1)
     self.upsample_0 = layers.UpSampling2D(2)
     self.concat_0 = layers.Concatenate()
示例#11
0
def concat(batchList, axis=0):
    """
    batchList : list of tensors that need to be concatenated
    axis : axis along which to concatenate tensors
    """
    return lyrs.Concatenate(axis=axis)(batchList)
示例#12
0
    def _build_naml(self):
        """The main function to create NAML's logic. The core of NAML
        is a user encoder and a news encoder.
        
        Returns:
            obj: a model used to train.
            obj: a model used to evaluate and predict.
        """
        hparams = self.hparams

        his_input_title = keras.Input(
            shape=(hparams.his_size, hparams.title_size), dtype="int32"
        )
        his_input_body = keras.Input(
            shape=(hparams.his_size, hparams.body_size), dtype="int32"
        )
        his_input_vert = keras.Input(shape=(hparams.his_size, 1), dtype="int32")
        his_input_subvert = keras.Input(shape=(hparams.his_size, 1), dtype="int32")

        pred_input_title = keras.Input(
            shape=(hparams.npratio + 1, hparams.title_size), dtype="int32"
        )
        pred_input_body = keras.Input(
            shape=(hparams.npratio + 1, hparams.body_size), dtype="int32"
        )
        pred_input_vert = keras.Input(shape=(hparams.npratio + 1, 1), dtype="int32")
        pred_input_subvert = keras.Input(shape=(hparams.npratio + 1, 1), dtype="int32")

        pred_input_title_one = keras.Input(
            shape=(1, hparams.title_size,), dtype="int32"
        )
        pred_input_body_one = keras.Input(shape=(1, hparams.body_size,), dtype="int32")
        pred_input_vert_one = keras.Input(shape=(1, 1), dtype="int32")
        pred_input_subvert_one = keras.Input(shape=(1, 1), dtype="int32")

        his_title_body_verts = layers.Concatenate(axis=-1)(
            [his_input_title, his_input_body, his_input_vert, his_input_subvert]
        )

        pred_title_body_verts = layers.Concatenate(axis=-1)(
            [pred_input_title, pred_input_body, pred_input_vert, pred_input_subvert]
        )

        pred_title_body_verts_one = layers.Concatenate(axis=-1)(
            [
                pred_input_title_one,
                pred_input_body_one,
                pred_input_vert_one,
                pred_input_subvert_one,
            ]
        )
        pred_title_body_verts_one = layers.Reshape((-1,))(pred_title_body_verts_one)

        imp_indexes = keras.Input(shape=(1,), dtype="int32")
        user_indexes = keras.Input(shape=(1,), dtype="int32")

        embedding_layer = layers.Embedding(
            hparams.word_size,
            hparams.word_emb_dim,
            weights=[self.word2vec_embedding],
            trainable=True,
        )

        newsencoder = self._build_newsencoder(embedding_layer)
        userencoder = self._build_userencoder(newsencoder)

        user_present = userencoder(his_title_body_verts)
        news_present = layers.TimeDistributed(newsencoder)(pred_title_body_verts)
        news_present_one = newsencoder(pred_title_body_verts_one)

        preds = layers.Dot(axes=-1)([news_present, user_present])
        preds = layers.Activation(activation="softmax")(preds)

        pred_one = layers.Dot(axes=-1)([news_present_one, user_present])
        pred_one = layers.Activation(activation="sigmoid")(pred_one)

        model = keras.Model(
            [
                imp_indexes,
                user_indexes,
                his_input_title,
                his_input_body,
                his_input_vert,
                his_input_subvert,
                pred_input_title,
                pred_input_body,
                pred_input_vert,
                pred_input_subvert,
            ],
            preds,
        )

        scorer = keras.Model(
            [
                imp_indexes,
                user_indexes,
                his_input_title,
                his_input_body,
                his_input_vert,
                his_input_subvert,
                pred_input_title_one,
                pred_input_body_one,
                pred_input_vert_one,
                pred_input_subvert_one,
            ],
            pred_one,
        )

        return model, scorer
示例#13
0
def Convolutional(
    input_shape=(51, 51, 1),
    conv_layers_dimensions=(16, 32, 64, 128),
    dense_layers_dimensions=(32, 32),
    steps_per_pooling=1,
    dropout=(),
    dense_top=True,
    number_of_outputs=3,
    output_activation=None,
    output_kernel_size=3,
    loss=nd_mean_absolute_error,
    input_layer=None,
    convolution_block="convolutional",
    pooling_block="pooling",
    dense_block="dense",
    **kwargs
):
    """Creates and compiles a convolutional neural network.
    A convolutional network with a dense top.
    Parameters
    ----------
    input_shape : tuple of ints
        Size of the images to be analyzed.
    conv_layers_dimensions : tuple of ints
        Number of convolutions in each convolutional layer.
    dense_layers_dimensions : tuple of ints
        Number of units in each dense layer.
    dropout : tuple of float
        Adds a dropout between the convolutional layers
    number_of_outputs : int
        Number of units in the output layer.
    output_activation : str or keras activation
        The activation function of the output.
    loss : str or keras loss function
        The loss function of the network.
    layer_function : Callable[int] -> keras layer
        Function that returns a convolutional layer with convolutions
        determined by the input argument. Can be use to futher customize the network.
    Returns
    -------
    keras.models.Model
        Deep learning network
    """

    # Update layer functions
    dense_block = as_block(dense_block)
    convolution_block = as_block(convolution_block)
    pooling_block = as_block(pooling_block)

    ### INITIALIZE DEEP LEARNING NETWORK

    if isinstance(input_shape, list):
        network_input = [layers.Input(shape) for shape in input_shape]
        inputs = layers.Concatenate(axis=-1)(network_input)
    else:
        network_input = layers.Input(input_shape)
        inputs = network_input

    layer = inputs

    if input_layer:
        layer = input_layer(layer)

    ### CONVOLUTIONAL BASIS
    for conv_layer_dimension in conv_layers_dimensions:

        for _ in range(steps_per_pooling):
            layer = convolution_block(conv_layer_dimension)(layer)

        if dropout:
            layer = layers.SpatialDropout2D(dropout[0])(layer)
            dropout = dropout[1:]

        # add pooling layer
        layer = pooling_block(conv_layer_dimension)(layer)

    # DENSE TOP

    if dense_top:
        layer = layers.Flatten()(layer)
        for dense_layer_dimension in dense_layers_dimensions:
            layer = dense_block(dense_layer_dimension)(layer)
        output_layer = layers.Dense(number_of_outputs, activation=output_activation)(
            layer
        )
    else:

        output_layer = layers.Conv2D(
            number_of_outputs,
            kernel_size=output_kernel_size,
            activation=output_activation,
            padding="same",
            name="output",
        )(layer)

    model = models.Model(network_input, output_layer)

    return KerasModel(model, loss=loss, **kwargs)
示例#14
0
def build_model(nhr, nreg, conv_layers, l1, l2, lgdp1, lgdp2, gdp_out_name,
                dec_out_name):
    """
    Build the convolutional neural network.

    Our model is constructed using the Keras functional API
    (https://keras.io/getting-started/functional-api-guide/) which allows us to
    have multiple inputs. Note that all inputs must be specified with an Input
    layer.

    The main input to this model is a 4-dimensional array:
        [cases, weeks, hours, regions]
    where
        cases = number of quarters (open)
        weeks = number of weeks in a quarter (open)
        hours = 168, the number of hours in a week (fixed)
        regions = number of spatial regions (fixed)

    We need an additional inputs:
        timestep = value representing time since the start of the data

    """
    # Weekly timeseries input
    input_numeric = layers.Input(shape=(nhr, nreg), name='HourlyElectricity')

    # Time since start input (for dealing with energy efficiency changes)
    input_time = layers.Input(shape=(1, ), name='TimeSinceStart')

    # Previous quarter's GDP
    input_gdp_prev = layers.Input(shape=(1, ), name='GDPPrev')

    # Natural gas data at a weekly level
    input_gas = layers.Input(shape=(1, ), name='NaturalGas')

    # Petroleum data at a weekly level
    input_petrol = layers.Input(shape=(1, ), name='Petroleum')

    ## Add a way to specify specific encodings to investigate what the
    ## encoder is responding to.  The following scalar is either 1 or
    ## zero.  0 means normal operation; 1 means we use a specified
    ## input for the encoding.
    input_switch = layers.Input(shape=(1, ), name='EncoderSwitch')
    input_switch_complement = layers.Input(shape=(1, ),
                                           name='EncoderSwitchComplement')
    encoder_input = layers.Input(
        shape=(l2, ), name='EncoderInput')  # ignored if input_switch == 0.

    # The convolutional layers need input tensors with the shape (batch, steps, channels).
    # A convolutional layer is 1D in that it slides through the data length-wise,
    # moving along just one dimension.
    # Parameters for the convolutional layers are given as a comma-separated argument:
    #
    #     [kernel_size]-[filters]-[pool_size (optional)],[filters]-...
    #
    # These are fed directly into the Conv1D layer, which has parameters:
    #     Conv1D(filters, kernel_size, ...)
    conv_params = parse_conv_layers(conv_layers)

    i = 0
    for param_set in conv_params:
        if i == 0:
            convolutions = layers.Conv1D(
                param_set[1],
                param_set[0],
                padding='same',
                activation='relu',
                bias_initializer='glorot_uniform')(input_numeric)
        else:
            convolutions = layers.Conv1D(
                param_set[1],
                param_set[0],
                padding='same',
                activation='relu',
                bias_initializer='glorot_uniform')(convolutions)
        convolutions = layers.MaxPool1D(param_set[2])(convolutions)
        i += 1

    feature_layer = layers.Flatten()(convolutions)

    # Merge the inputs together and end our encoding with fully connected layers
    encoded = layers.Dense(l1,
                           bias_initializer='glorot_uniform')(feature_layer)
    encoded = layers.LeakyReLU()(encoded)
    encoded = layers.Dense(l2,
                           bias_initializer='glorot_uniform',
                           name='FinalEncoding')(encoded)
    encoded = layers.LeakyReLU()(encoded)

    ## Implement the input switch (see above).
    # def oneminus(tensor):
    #     one = keras.backend.ones(shape=(1,))
    #     return layers.subtract([one, tensor])
    # input_switch_complement = layers.Lambda(oneminus)(input_switch)
    ##input_switch_complement = layers.Subtract()([one, input_switch])

    encoded = layers.Multiply()([encoded, input_switch_complement])
    enc_in = layers.Multiply()([encoder_input, input_switch])
    encoded = layers.Add(name='SwitchedEncoding')([encoded, enc_in])

    # At this point, the representation is the most encoded and small; now let's build the decoder
    decoded = layers.Dense(l1, bias_initializer='glorot_uniform')(encoded)
    decoded = layers.LeakyReLU()(decoded)
    decoded = layers.Dense(convolutions.shape[1] * convolutions.shape[2],
                           bias_initializer='glorot_uniform')(decoded)
    decoded = layers.LeakyReLU()(decoded)

    decoded = layers.Reshape((convolutions.shape[1], convolutions.shape[2]),
                             name='UnFlatten')(decoded)

    for param_set in reversed(conv_params):
        i -= 1
        decoded = layers.UpSampling1D(param_set[2])(decoded)
        if i == 0:
            decoded = layers.Conv1D(nreg,
                                    param_set[0],
                                    padding='same',
                                    activation='linear',
                                    bias_initializer='glorot_uniform',
                                    name=dec_out_name)(decoded)
        else:
            decoded = layers.Conv1D(conv_params[i - 1][1],
                                    param_set[0],
                                    padding='same',
                                    activation='relu',
                                    bias_initializer='glorot_uniform')(decoded)

    # This is our actual output, the GDP prediction
    merged_layer = layers.Concatenate()(
        [encoded, input_time, input_gdp_prev, input_gas, input_petrol])

    gdp_hidden_layer = layers.Dense(lgdp1, name='GDP_Hidden')(merged_layer)
    gdp_hidden_layer = layers.LeakyReLU()(gdp_hidden_layer)
    if lgdp2 > 0:
        gdp_hidden_layer = layers.Dense(
            lgdp2,
            name='GDP_Hidden2',
            kernel_regularizer=keras.regularizers.l1(0))(gdp_hidden_layer)
        gdp_hidden_layer = layers.LeakyReLU()(gdp_hidden_layer)

    output = layers.Dense(1, activation='linear',
                          name=gdp_out_name)(gdp_hidden_layer)

    autoencoder = keras.models.Model(inputs=[
        input_numeric, input_time, input_gdp_prev, input_gas, input_petrol,
        input_switch, input_switch_complement, encoder_input
    ],
                                     outputs=[output, decoded])

    return autoencoder
示例#15
0
    def buildMyModelV2(self, shape, n_cls):
        model = Sequential()
        model.add(layers.Conv2D(32, (5, 5), input_shape=shape,
                                kernel_regularizer=l2(0.01)))
        model.add(layers.BatchNormalization())
        model.add(layers.Activation('relu'))
        model.add(layers.Conv2D(32, (5, 5), activation='relu',
                                kernel_regularizer=l2(0.01)))
        model.add(layers.MaxPooling2D())
        model.add(layers.Dropout(0.25))
        layer05 = layers.Dense(128, activation='sigmoid',
                               kernel_regularizer=l2(0.01))(
            layers.Flatten()(model.output))
        model.add(layers.Conv2D(64, (5, 5),
                                kernel_regularizer=l2(0.01)))
        model.add(layers.BatchNormalization())
        model.add(layers.Activation('relu'))
        model.add(layers.Conv2D(64, (3, 3), activation='relu',
                                kernel_regularizer=l2(0.01)))
        model.add(layers.MaxPooling2D())
        model.add(layers.Dropout(0.25))
        layer04 = layers.Dense(128, activation='sigmoid',
                               kernel_regularizer=l2(0.01))(
            layers.Flatten()(model.output))
        model.add(layers.Conv2D(32, (3, 3),
                                kernel_regularizer=l2(0.01)))
        model.add(layers.BatchNormalization())
        model.add(layers.Activation('relu'))
        model.add(layers.Conv2D(32, (3, 3), activation='relu',
                                kernel_regularizer=l2(0.01)))
        model.add(layers.MaxPooling2D())
        model.add(layers.Dropout(0.25))
        model.add(layers.Flatten())
        layer03 = layers.Dense(128, activation='sigmoid',
                               kernel_regularizer=l2(0.01))(model.output)
        model.add(layers.Dense(512, activation='sigmoid',
                               kernel_regularizer=l2(0.01)))
        model.add(layers.Dropout(0.5))
        layer02 = model.output
        model.add(layers.Dense(128, activation='sigmoid',
                               kernel_regularizer=l2(0.01)))
        model.add(layers.Dropout(0.5))
        layer01 = model.output
        model.add(layers.Dense(n_cls, activation='softmax'))

        self.e_len = [128, 128, 128, 512, 128]
        self.input = model.input
        self.output = [layer01, model.output]

        input_a = layers.Input(shape=shape)
        input_p = layers.Input(shape=shape)
        input_n = layers.Input(shape=shape)

        layer05_model = Model(inputs=model.input, outputs=layer05)
        layer05_a = layer05_model(input_a)
        layer05_p = layer05_model(input_p)
        layer05_n = layer05_model(input_n)

        layer04_model = Model(inputs=model.input, outputs=layer04)
        layer04_a = layer04_model(input_a)
        layer04_p = layer04_model(input_p)
        layer04_n = layer04_model(input_n)

        layer03_model = Model(inputs=model.input, outputs=layer03)
        layer03_a = layer03_model(input_a)
        layer03_p = layer03_model(input_p)
        layer03_n = layer03_model(input_n)

        layer02_model = Model(inputs=model.input, outputs=layer02)
        layer02_a = layer02_model(input_a)
        layer02_p = layer02_model(input_p)
        layer02_n = layer02_model(input_n)

        embed_model = Model(inputs=model.input, outputs=layer01)
        embed_a = embed_model(input_a)
        embed_p = embed_model(input_p)
        embed_n = embed_model(input_n)

        output_a = model(input_a)
        output_p = model(input_p)
        output_n = model(input_n)

        concat = layers.Concatenate()(
            [layer05_a, layer05_p, layer05_n,
             layer04_a, layer04_p, layer04_n,
             layer03_a, layer03_p, layer03_n,
             layer02_a, layer02_p, layer02_n,
             embed_a, embed_p, embed_n,
             output_a, output_p, output_n])

        self.model = Model(
            inputs=[input_a, input_p, input_n], outputs=concat)
        return self
示例#16
0
    def buildMyModelV2(self, shape, n_cls):
        model = Sequential()
        model.add(
            layers.Conv2D(32, (3, 3), activation='sigmoid', input_shape=shape))
        model.add(layers.Conv2D(32, (3, 3), activation='sigmoid'))
        flattened_conv02 = layers.Flatten()(model.output)
        model.add(layers.MaxPooling2D())
        model.add(layers.Flatten())

        model.add(layers.Dense(128, activation='sigmoid'))

        self.extract_layer = 'dense_128_sigmoid'
        self.input = model.input
        self.output = model.output

        model.add(layers.Dense(n_cls, activation='softmax'))

        self.myModel = model

        input_a = layers.Input(shape=shape)
        input_p = layers.Input(shape=shape)
        input_n = layers.Input(shape=shape)

        output_p = model(input_p)

        embed_model = self.getModel()
        embedded_a = embed_model(input_a)
        embedded_p = embed_model(input_p)
        embedded_n = embed_model(input_n)

        embed_model = Model(inputs=model.input, outputs=flattened_conv02)
        embed_conv02_a = embed_model(input_a)
        embed_conv02_p = embed_model(input_p)
        embed_conv02_n = embed_model(input_n)

        def output_shape(input_shape):
            return input_shape[0], 1

        def cosine_distance(tensor_a, tensor_b):
            l2_norm_a = K.l2_normalize(tensor_a, axis=-1)
            l2_norm_b = K.l2_normalize(tensor_b, axis=-1)
            return 1 - K.sum(l2_norm_a * l2_norm_b, axis=-1, keepdims=True)

        def kullback_leibler_divergence(tensor_a, tensor_b):
            return K.sum(tensor_a * K.log(tensor_a / tensor_b),
                         axis=-1,
                         keepdims=True)

        divergence_layer = layers.Lambda(
            lambda tensors: kullback_leibler_divergence(
                tensors[0], tensors[1]),
            output_shape=output_shape)
        pos_divergence_conv02 = divergence_layer(
            [embed_conv02_a, embed_conv02_p])
        neg_divergence_conv02 = divergence_layer(
            [embed_conv02_a, embed_conv02_n])

        diver_concat = layers.Concatenate(axis=-1)(
            [pos_divergence_conv02, neg_divergence_conv02])

        distance_layer = layers.Lambda(
            lambda tensors: kullback_leibler_divergence(
                tensors[0], tensors[1]),
            output_shape=output_shape)
        pos_distance = distance_layer([embedded_a, embedded_p])
        neg_distance = distance_layer([embedded_a, embedded_n])

        dist_concat = layers.Concatenate(axis=-1)([pos_distance, neg_distance])

        self.model = Model(inputs=[input_a, input_p, input_n],
                           outputs=[output_p, dist_concat, diver_concat])
        return self
def get_model_head_concat(DATA):
    embed_dim = 128  # Embedding size for each token
    num_heads = args.head_attention  # Number of attention heads
    ff_dim = 256  # Hidden layer size in feed forward network inside transformer
    # shape:(sequence长度, )
    # first input
    input_creative_id = Input(shape=(None, ), name='creative_id')
    x1 = TokenAndPositionEmbedding(maxlen, NUM_creative_id + 1, embed_dim,
                                   DATA['creative_id_emb'])(input_creative_id)

    input_ad_id = Input(shape=(None, ), name='ad_id')
    x2 = TokenAndPositionEmbedding(maxlen, NUM_ad_id + 1, embed_dim,
                                   DATA['ad_id_emb'])(input_ad_id)

    input_product_id = Input(shape=(None, ), name='product_id')
    x3 = TokenAndPositionEmbedding(maxlen, NUM_product_id + 1, embed_dim,
                                   DATA['product_id_emb'])(input_product_id)

    input_advertiser_id = Input(shape=(None, ), name='advertiser_id')
    x4 = TokenAndPositionEmbedding(
        maxlen, NUM_advertiser_id + 1, embed_dim,
        DATA['advertiser_id_emb'])(input_advertiser_id)

    input_industry = Input(shape=(None, ), name='industry')
    x5 = TokenAndPositionEmbedding(maxlen, NUM_industry + 1, embed_dim,
                                   DATA['industry_emb'])(input_industry)

    input_product_category = Input(shape=(None, ), name='product_category')
    x6 = TokenAndPositionEmbedding(
        maxlen, NUM_product_category + 1, embed_dim,
        DATA['product_category_emb'])(input_product_category)

    # concat
    # x = x1 + x2 + x3
    x = layers.Concatenate(axis=1)([x1, x2, x3, x4, x5, x6])

    for _ in range(args.num_transformer):
        x = TransformerBlock(embed_dim, num_heads, ff_dim)(x)

    for _ in range(args.num_lstm):
        x = Bidirectional(LSTM(256, return_sequences=True))(x)
    x = layers.GlobalMaxPooling1D()(x)

    output_gender = Dense(2, activation='softmax', name='gender')(x)
    output_age = Dense(10, activation='softmax', name='age')(x)

    model = Model([
        input_creative_id, input_ad_id, input_product_id, input_advertiser_id,
        input_industry, input_product_category
    ], [output_gender, output_age])
    model.compile(optimizer=optimizers.Adam(1e-4),
                  loss={
                      'gender':
                      losses.CategoricalCrossentropy(from_logits=False),
                      'age': losses.CategoricalCrossentropy(from_logits=False)
                  },
                  loss_weights=[0.4, 0.6],
                  metrics=['accuracy'])
    model.summary()

    return model
示例#18
0
def get_nvae(x, z_dim, n_group_per_scale, n_ch=32, k_size=3):
    # Initialize the variables we will need
    bottom_up = []
    z_vec = []
    z_means = []
    z_log_vars = []
    delta_means = []
    delta_log_vars = []

    # Set the number of channels to be equal to initial_ch
    h = layers.Conv2D(n_ch,
                      k_size,
                      1,
                      padding='same',
                      kernel_initializer=initializer,
                      name='Initial_Conv2D')(x)

    # Bottom-up phase Encoder
    for idx, n_group in enumerate(n_group_per_scale):
        n_ch *= 2

        h = layers.Conv2D(n_ch,
                          k_size,
                          2,
                          padding='same',
                          kernel_initializer=initializer,
                          name='Conv2D_' + str(idx))(h)
        for group in range(n_group):
            h = EncoderBlock(K.int_shape(h)[-1],
                             name='EncoderBlock_' + str(idx) + '_' +
                             str(group))(h)
            bottom_up.append(h)

    # Top-down phase Decoder
    delta_mean, delta_log_var = GetStatistics(z_dim,
                                              name='initial_delta_stats')(
                                                  bottom_up.pop(-1))
    z = layers.Lambda(utils.sampling)([delta_mean, delta_log_var])

    # Append everything
    z_vec.append(z)
    z_means.append([])
    z_log_vars.append([])
    delta_means.append(delta_mean)
    delta_log_vars.append(delta_log_var)

    for idx, n_group in enumerate(n_group_per_scale[::-1]):
        if idx == 0:
            start = 1
            n_channels = K.int_shape(h)[-1]
            h = z
        else:
            start = 0
            n_channels = K.int_shape(h)[-1]

        for group in range(start, n_group):
            h = DecoderBlock(n_channels,
                             name='DecoderBlock_' + str(idx) + '_' +
                             str(group))(h)
            h_z = layers.Concatenate()([h, bottom_up.pop(-1)])

            n_channels = K.int_shape(h)[-1]

            z_mean, z_log_var = GetStatistics(z_dim,
                                              name='stats_' + str(idx) + '_' +
                                              str(group))(h)
            delta_mean, delta_log_var = GetStatistics(
                z_dim, name='delta_stats_' + str(idx) + '_' + str(group))(h_z)
            z = layers.Lambda(utils.sampling)(
                [z_mean + delta_mean, z_log_var + delta_log_var])

            # Append everything
            z_vec.append(z)
            z_means.append(z_mean)
            z_log_vars.append(z_log_var)
            delta_means.append(delta_mean)
            delta_log_vars.append(delta_log_var)

            h = layers.Concatenate()([h, z])

        n_ch /= 2
        h = layers.Conv2DTranspose(n_ch,
                                   k_size,
                                   2,
                                   padding='same',
                                   activation=activation,
                                   kernel_initializer=initializer,
                                   name='Conv2DT_' + str(idx))(h)

    h = DecoderBlock(n_ch, name='DecoderBlock_' + str(idx))(h)
    x_recon = layers.Conv2DTranspose(3,
                                     k_size,
                                     strides=1,
                                     padding='same',
                                     activation='sigmoid',
                                     kernel_initializer=initializer,
                                     name='Output')(h)

    # Define the two models (Encoder and NVAE)
    encoder = models.Model(x, [z_vec, delta_means, delta_log_vars])
    nvae = models.Model(x, x_recon)

    # Add the loss to the model
    nvae.add_loss(
        nvae_loss(x, x_recon, delta_means, delta_log_vars, z_means,
                  z_log_vars))

    return nvae, encoder
示例#19
0
def Trans_layer(input1, input2):
    x = Conv_layer(L.UpSampling2D()(input1), input2.shape[-1])
    x = BN_ReLU(x)
    x = L.Concatenate()([x, input2])
    return x
示例#20
0
def buildNetwork(nettype):

    unfreeze = False
    n_block, n_self = 3, 10

    l_in = layers.Input(shape=(None, ))
    l_mask = layers.Input(shape=(None, ))

    #transformer part
    #positional encodings for product and reagents, respectively
    l_pos = PositionLayer(EMBEDDING_SIZE)(l_mask)
    l_left_mask = MaskLayerLeft()(l_mask)

    #encoder
    l_voc = layers.Embedding(input_dim=vocab_size,
                             output_dim=EMBEDDING_SIZE,
                             input_length=None,
                             trainable=unfreeze)
    l_embed = layers.Add()([l_voc(l_in), l_pos])

    for layer in range(n_block):

        #self attention
        l_o = [
            SelfLayer(EMBEDDING_SIZE, KEY_SIZE, trainable=unfreeze)(
                [l_embed, l_embed, l_embed, l_left_mask])
            for i in range(n_self)
        ]

        l_con = layers.Concatenate()(l_o)
        l_dense = layers.TimeDistributed(layers.Dense(EMBEDDING_SIZE,
                                                      trainable=unfreeze),
                                         trainable=unfreeze)(l_con)
        if unfreeze == True: l_dense = layers.Dropout(rate=0.1)(l_dense)
        l_add = layers.Add()([l_dense, l_embed])
        l_att = LayerNormalization(trainable=unfreeze)(l_add)

        #position-wise
        l_c1 = layers.Conv1D(N_HIDDEN,
                             1,
                             activation='relu',
                             trainable=unfreeze)(l_att)
        l_c2 = layers.Conv1D(EMBEDDING_SIZE, 1, trainable=unfreeze)(l_c1)
        if unfreeze == True: l_c2 = layers.Dropout(rate=0.1)(l_c2)
        l_ff = layers.Add()([l_att, l_c2])
        l_embed = LayerNormalization(trainable=unfreeze)(l_ff)

    #end of Transformer's part
    l_encoder = l_embed

    #text-cnn part
    #https://github.com/deepchem/deepchem/blob/b7a6d3d759145d238eb8abaf76183e9dbd7b683c/deepchem/models/tensorgraph/models/text_cnn.py

    l_in2 = layers.Input(shape=(None, EMBEDDING_SIZE))

    kernel_sizes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20]
    num_filters = [100, 200, 200, 200, 200, 100, 100, 100, 100, 100, 160, 160]

    l_pool = []
    for i in range(len(kernel_sizes)):
        l_conv = layers.Conv1D(num_filters[i],
                               kernel_size=kernel_sizes[i],
                               padding='valid',
                               kernel_initializer='normal',
                               activation='relu')(l_in2)
        l_maxpool = layers.Lambda(lambda x: tf.reduce_max(x, axis=1))(l_conv)
        l_pool.append(l_maxpool)

    l_cnn = layers.Concatenate(axis=1)(l_pool)
    l_cnn_drop = layers.Dropout(rate=0.25)(l_cnn)

    #dense part
    l_dense = layers.Dense(N_HIDDEN_CNN, activation='relu')(l_cnn_drop)

    #https://github.com/ParikhKadam/Highway-Layer-Keras
    transform_gate = layers.Dense(
        units=N_HIDDEN_CNN,
        activation="sigmoid",
        bias_initializer=tf.keras.initializers.Constant(-1))(l_dense)

    carry_gate = layers.Lambda(lambda x: 1.0 - x,
                               output_shape=(N_HIDDEN_CNN, ))(transform_gate)
    transformed_data = layers.Dense(units=N_HIDDEN_CNN,
                                    activation="relu")(l_dense)
    transformed_gated = layers.Multiply()([transform_gate, transformed_data])
    identity_gated = layers.Multiply()([carry_gate, l_dense])

    l_highway = layers.Add()([transformed_gated, identity_gated])

    if nettype == "regression":
        l_out = layers.Dense(1, activation='linear',
                             name="Regression")(l_highway)
        mdl = tf.keras.Model([l_in2], l_out)
        mdl.compile(optimizer='adam', loss='mse', metrics=['mse'])
    else:
        l_out = layers.Dense(2, activation='softmax',
                             name="Classification")(l_highway)
        mdl = tf.keras.Model([l_in2], l_out)
        mdl.compile(optimizer='adam',
                    loss='binary_crossentropy',
                    metrics=['acc'])

    K.set_value(mdl.optimizer.lr, 1.0e-4)

    encoder = tf.keras.Model([l_in, l_mask], l_encoder)
    encoder.compile(optimizer='adam', loss='mse')
    encoder.set_weights(np.load("embeddings.npy", allow_pickle=True))

    return mdl, encoder
示例#21
0
def unet(input_shape: Tuple[int, int, int]) -> tf.keras.Model:

    input_layer = layers.Input(input_shape)
    x = input_layer

    # Encoder
    x = layers.Conv2D(32, 3, padding='same', kernel_initializer='he_normal')(x)
    x = layers.ReLU()(x)
    x = layers.Conv2D(32, 3, padding='same', kernel_initializer='he_normal')(x)
    x = layers.ReLU()(x)
    skip1 = x
    x = layers.MaxPool2D(2)(x)

    x = layers.Conv2D(64, 3, padding='same', kernel_initializer='he_normal')(x)
    x = layers.ReLU()(x)
    x = layers.Conv2D(64, 3, padding='same', kernel_initializer='he_normal')(x)
    x = layers.ReLU()(x)
    skip2 = x
    x = layers.MaxPool2D(2)(x)

    x = layers.Conv2D(128, 3, padding='same',
                      kernel_initializer='he_normal')(x)
    x = layers.ReLU()(x)
    x = layers.Conv2D(128, 3, padding='same',
                      kernel_initializer='he_normal')(x)
    x = layers.ReLU()(x)
    skip3 = x
    x = layers.MaxPool2D(2)(x)

    # Bottleneck
    x = layers.Conv2D(256, 3, padding='same',
                      kernel_initializer='he_normal')(x)
    x = layers.ReLU()(x)
    x = layers.Conv2D(256, 3, padding='same',
                      kernel_initializer='he_normal')(x)
    x = layers.ReLU()(x)

    # Decoder
    x = layers.UpSampling2D(2)(x)
    x = layers.Concatenate(axis=-1)([x, skip3])
    x = layers.Conv2D(128, 3, padding='same',
                      kernel_initializer='he_normal')(x)
    x = layers.ReLU()(x)
    x = layers.Conv2D(128, 3, padding='same',
                      kernel_initializer='he_normal')(x)
    x = layers.ReLU()(x)

    x = layers.UpSampling2D(2)(x)
    x = layers.Concatenate(axis=-1)([x, skip2])
    x = layers.Conv2D(64, 3, padding='same', kernel_initializer='he_normal')(x)
    x = layers.ReLU()(x)
    x = layers.Conv2D(64, 3, padding='same', kernel_initializer='he_normal')(x)
    x = layers.ReLU()(x)

    x = layers.UpSampling2D(2)(x)
    x = layers.Concatenate(axis=-1)([x, skip1])
    x = layers.Conv2D(32, 3, padding='same', kernel_initializer='he_normal')(x)
    x = layers.ReLU()(x)
    x = layers.Conv2D(32, 3, padding='same', kernel_initializer='he_normal')(x)
    x = layers.ReLU()(x)

    x = layers.Conv2D(1,
                      1,
                      activation='sigmoid',
                      padding='same',
                      kernel_initializer='he_normal')(x)

    model = tf.keras.Model(input_layer, x)
    return model
def create_model(data, catcols):
    """
    This function returns a compiled tf.keras model for entity embedding
    """
    # init list of inputs for embedding
    inputs = []

    # init list of outputs for embedding
    outputs = []

    # loop over all categorical columns
    for c in catcols:
        # find the number of unique values in column
        num_unique_values = int(data[c].nunique())
        # simple dimension of embedding calculator
        # min size is half of the number of unique values
        # max size is 50. max size depends on the number of unique
        # categories too. 50 is quite sufficient most of the times
        # but if you have millions of unique values, you might
        # need a larger dimension
        embed_dim = int(min(np.ceil((num_unique_values) / 2), 50))

        # simple keras input layer with size 1
        inp = layers.Input(shape=(1, ))

        # add embedding layer to raw input
        # embedding size is always 1 more than unique values in the input
        out = layers.Embedding(num_unique_values + 1, embed_dim, name=c)(inp)

        # 1-d spatial dropout is the standard for embedding layers
        # you can use it in NLP tasks too
        out = layers.SpatialDropout1D(0.3)(out)

        # reshape the input to the dimension of embedding
        # this becomes our output layer for current feature
        out = layers.Reshape(target_shape=(embed_dim, ))(out)

        # add input to input list
        inputs.append(inp)

        # add output to output list
        outputs.append(out)

    # concatenate all output layers
    x = layers.Concatenate()(outputs)

    # add a batchnorm layer
    x = layers.BatchNormalization()(x)

    # a bunch of dense layers with dropout
    # start with 1 or two layers only
    x = layers.Dense(300, activation='relu')(x)
    x = layers.Dropout(0.3)(x)
    x = layers.BatchNormalization()(x)

    # using softmax and treating it as a two class problem
    # you can a;ps use sigmoid, then you need to use only one output class
    y = layers.Dense(1, activation="linear")(x)

    # create final model
    model = Model(inputs=inputs, outputs=y)

    opt = SGD(lr=0.01, momentum=0.9)

    # compile the model
    # we use adam and binary cross entropy
    model.compile(loss='mean_squared_logarithmic_error', optimizer=opt)

    return model
示例#23
0
def efficientdet(phi,
                 num_classes=20,
                 num_anchors=9,
                 weighted_bifpn=False,
                 freeze_bn=False,
                 score_threshold=0.01,
                 detect_quadrangle=False,
                 anchor_parameters=None):
    assert phi in range(7)
    input_size = image_sizes[phi]
    input_shape = (input_size, input_size, 3)
    # input_shape = (None, None, 3)
    image_input = layers.Input(input_shape)
    w_bifpn = w_bifpns[phi]
    d_bifpn = 2 + phi
    w_head = w_bifpn
    d_head = 3 + int(phi / 3)
    backbone_cls = backbones[phi]
    # features = backbone_cls(include_top=False, input_shape=input_shape, weights=weights)(image_input)
    features = backbone_cls(input_tensor=image_input, freeze_bn=freeze_bn)
    if weighted_bifpn:
        for i in range(d_bifpn):
            features = build_wBiFPN(features, w_bifpn, i, freeze_bn=freeze_bn)
    else:
        for i in range(d_bifpn):
            features = build_BiFPN(features, w_bifpn, i, freeze_bn=freeze_bn)
    regress_head = build_regress_head(w_head,
                                      d_head,
                                      num_anchors=num_anchors,
                                      detect_quadrangle=detect_quadrangle)
    class_head = build_class_head(w_head,
                                  d_head,
                                  num_classes=num_classes,
                                  num_anchors=num_anchors)
    regression = [regress_head(feature) for feature in features]
    regression = layers.Concatenate(axis=1, name='regression')(regression)
    classification = [class_head(feature) for feature in features]
    classification = layers.Concatenate(axis=1,
                                        name='classification')(classification)

    model = models.Model(inputs=[image_input],
                         outputs=[regression, classification],
                         name='efficientdet')

    # apply predicted regression to anchors

    # anchors_input = layers.Input((None, 4))
    anchors = anchors_for_shape((input_size, input_size),
                                anchor_params=anchor_parameters)
    anchors_input = np.expand_dims(anchors, axis=0)
    boxes = RegressBoxes(name='boxes')([anchors_input, regression[..., :4]])
    boxes = ClipBoxes(name='clipped_boxes')([image_input, boxes])

    # filter detections (apply NMS / score threshold / select top-k)
    if detect_quadrangle:
        detections = FilterDetections(name='filtered_detections',
                                      score_threshold=score_threshold,
                                      detect_quadrangle=True)([
                                          boxes, classification,
                                          regression[..., 4:8], regression[...,
                                                                           8]
                                      ])
    else:
        detections = FilterDetections(name='filtered_detections',
                                      score_threshold=score_threshold)(
                                          [boxes, classification])

    prediction_model = models.Model(inputs=[image_input],
                                    outputs=detections,
                                    name='efficientdet_p')
    return model, prediction_model
示例#24
0
def get_fully_conv_unet(n_ch, patch_height, patch_width):
    chan = 'channels_first'
    inputs = layers.Input(shape=(n_ch, patch_height, patch_width))

    conv1 = layers.Conv2D(32, (3, 3),
                          activation='relu',
                          padding='same',
                          data_format=chan)(inputs)
    conv1 = layers.Dropout(0.2)(conv1)
    conv1_strided = layers.Conv2D(32, (3, 3),
                                  activation='relu',
                                  padding='same',
                                  strides=2,
                                  data_format=chan)(conv1)

    conv2 = layers.Conv2D(64, (3, 3),
                          activation='relu',
                          padding='same',
                          data_format=chan)(conv1_strided)
    conv2 = layers.Dropout(0.2)(conv2)
    conv2_strided = layers.Conv2D(64, (3, 3),
                                  activation='relu',
                                  padding='same',
                                  strides=2,
                                  data_format=chan)(conv2)

    conv3 = layers.Conv2D(128, (3, 3),
                          activation='relu',
                          padding='same',
                          data_format=chan)(conv2_strided)
    conv3 = layers.Dropout(0.2)(conv3)
    conv3 = layers.Conv2D(128, (3, 3),
                          activation='relu',
                          padding='same',
                          data_format=chan)(conv3)

    up1 = layers.UpSampling2D(size=(2, 2), data_format=chan)(conv3)
    up1 = layers.Concatenate(axis=1)([conv2, up1])

    conv4 = layers.Conv2D(64, (3, 3),
                          activation='relu',
                          padding='same',
                          data_format=chan)(up1)
    conv4 = layers.Dropout(0.2)(conv4)
    conv4 = layers.Conv2D(64, (3, 3),
                          activation='relu',
                          padding='same',
                          data_format=chan)(conv4)

    up2 = layers.UpSampling2D(size=(2, 2), data_format=chan)(conv4)
    up2 = layers.Concatenate(axis=1)([conv1, up2])

    conv5 = layers.Conv2D(32, (3, 3),
                          activation='relu',
                          padding='same',
                          data_format=chan)(up2)
    conv5 = layers.Dropout(0.2)(conv5)
    conv5 = layers.Conv2D(32, (3, 3),
                          activation='relu',
                          padding='same',
                          data_format=chan)(conv5)

    conv6 = layers.Conv2D(2, (1, 1),
                          activation='relu',
                          padding='same',
                          data_format=chan)(conv5)
    conv6 = layers.Reshape((2, patch_height * patch_width))(conv6)
    conv6 = layers.Permute((2, 1))(conv6)

    conv7 = layers.Activation('softmax')(conv6)

    model = keras.Model(inputs=inputs, outputs=conv7)
    print(model.summary())

    adam = tf.keras.optimizers.Adam(
        lr=0.001,
        decay=.01)  #, beta_1=0.9, beta_2=0.999, epsilon=0.1, decay=0.0)
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])

    return model
示例#25
0
                           activation='relu',
                           name='block5_conv1')(pool4)
    block5 = layers.Conv2D(512,
                           3,
                           padding='same',
                           activation='relu',
                           name='block5_conv2')(block5)
    block5 = layers.Conv2D(512,
                           3,
                           padding='same',
                           activation='relu',
                           name='block5_conv3')(block5)

    unpool1 = layers.Conv2DTranspose(512, 4, strides=(2, 2),
                                     padding='same')(block5)
    concat1 = layers.Concatenate(axis=3)([unpool1, block4])
    block6 = layers.Conv2D(512,
                           3,
                           padding='same',
                           activation='relu',
                           name='block6_conv1')(concat1)
    block6 = layers.Conv2D(512,
                           3,
                           padding='same',
                           activation='relu',
                           name='block6_conv2')(block6)
    block6 = layers.Conv2D(512,
                           3,
                           padding='same',
                           activation='relu',
                           name='block6_conv3')(block6)
示例#26
0
def attend(n_ch, patch_height, patch_width):
    chan = 'channels_first'
    inputs = layers.Input(shape=(n_ch, patch_height, patch_width))

    conv1 = layers.Conv2D(32, (3, 3),
                          activation='relu',
                          padding='same',
                          data_format=chan)(inputs)
    conv1 = layers.Dropout(0.2)(conv1)
    conv1_strided = layers.Conv2D(32, (3, 3),
                                  activation='relu',
                                  padding='same',
                                  strides=2,
                                  data_format=chan)(conv1)

    conv2 = layers.Conv2D(64, (3, 3),
                          activation='relu',
                          padding='same',
                          data_format=chan)(conv1_strided)
    conv2 = layers.Dropout(0.2)(conv2)
    conv2_strided = layers.Conv2D(64, (3, 3),
                                  activation='relu',
                                  padding='same',
                                  strides=2,
                                  data_format=chan)(conv2)

    #     conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same',data_format=chan)(conv2_strided)
    #     conv3 = layers.Dropout(0.2)(conv3)
    #     conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same',data_format=chan)(conv3)
    print(conv2_strided)
    roll = tf.transpose(conv2_strided, [0, 2, 3, 1])
    att = keras.layers.Lambda(
        lambda x: multihead_attention_2d(inputs=roll,
                                         total_key_filters=64,
                                         total_value_filters=64,
                                         output_filters=128,
                                         num_heads=8,
                                         training=True,
                                         layer_type='SAME'))(roll)
    roll = tf.transpose(att, [0, 3, 1, 2])
    #up2 = UpSampling2D(size=(2, 2), data_format='channels_last')(conv4)
    #     up2 = keras.layers.Concatenate(axis=3)([conv1, roll])

    up1 = layers.UpSampling2D(size=(2, 2), data_format=chan)(roll)
    up1 = layers.Concatenate(axis=1)([conv2, up1])
    print('shape up1', up1.shape)

    conv4 = layers.Conv2D(64, (3, 3),
                          activation='relu',
                          padding='same',
                          data_format=chan)(up1)
    conv4 = layers.Dropout(0.2)(conv4)
    conv4 = layers.Conv2D(64, (3, 3),
                          activation='relu',
                          padding='same',
                          data_format=chan)(conv4)

    up2 = layers.UpSampling2D(size=(2, 2), data_format=chan)(conv4)
    print('shape up2', up2.shape)
    up2 = layers.Concatenate(axis=1)([conv1, up2])

    conv5 = layers.Conv2D(32, (3, 3),
                          activation='relu',
                          padding='same',
                          data_format=chan)(up2)
    conv5 = layers.Dropout(0.2)(conv5)
    conv5 = layers.Conv2D(32, (3, 3),
                          activation='relu',
                          padding='same',
                          data_format=chan)(conv5)

    conv6 = layers.Conv2D(2, (1, 1),
                          activation='relu',
                          padding='same',
                          data_format=chan)(conv5)
    conv6 = layers.Reshape((2, patch_height * patch_width))(conv6)
    conv6 = layers.Permute((2, 1))(conv6)

    conv7 = layers.Activation('softmax')(conv6)
    print(conv7)
    model = keras.Model(inputs=inputs, outputs=conv7)
    print(model.summary())

    adam = tf.keras.optimizers.Adam(
        lr=0.001,
        decay=.01)  #, beta_1=0.9, beta_2=0.999, epsilon=0.1, decay=0.0)
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])

    return model
示例#27
0
def HFVAE(x, z_dim, n_group_per_scale, initial_ch=32, k_size=3):

    # Bottom-up phase encoder
    h = layers.Conv2D(initial_ch,
                      kernel_size=1,
                      strides=1,
                      padding='same',
                      kernel_regularizer=reg,
                      kernel_initializer=initializer,
                      name='Conv2D_0')(x)
    n_ch = initial_ch
    levels = []

    for g, n_group in enumerate(n_group_per_scale):
        n_ch *= 2

        h = layers.Conv2D(n_ch,
                          kernel_size=k_size,
                          strides=2,
                          padding='same',
                          kernel_regularizer=reg,
                          kernel_initializer=initializer,
                          name='Conv2D_s2_' + str(g))(h)
        for group in range(n_group):
            locname = str(g) + "_" + str(group)
            h = EncoderBlock(1, 1, n_ch, name='EncoderBlock_' + locname)(h)
            levels.append(h)

    levels.reverse()
    n_group_per_scale.reverse()

    # Top-down phase encoder

    latent_variables = list()
    latent_stats = list()
    delta_stats = list()

    stats = layers.GlobalAveragePooling2D()(levels[0])
    delta_mean = layers.Dense(64, name='dense_delta_mean_0')(stats)
    delta_log_var = layers.Dense(64, name='dense_delta_log_var_0')(stats)

    z = layers.Lambda(sampling)([delta_mean, delta_log_var])
    latent_variables.append(z)
    latent_stats.append(())
    delta_stats.append((delta_mean, delta_log_var))
    dim = levels[0].shape[1]
    z = layers.Dense((dim * dim * n_ch), name='dense_z_orig')(z)
    h = layers.Reshape((dim, dim, n_ch))(z)

    level = 1
    first = True

    for g, n_group in enumerate(n_group_per_scale):
        if first:
            start = 1
            first = False
        else:
            start = 0
        for group in range(start, n_group):
            #print(n_group,group,h.shape)
            locname = str(g) + "_" + str(group)

            h = DecoderBlock(1, 1, n_ch, name='DecoderBlock_' + locname)(h)

            h_z = layers.Concatenate()([h, levels[level]])
            #print("shapes = ", h.shape, levels[level].shape)
            level = level + 1

            z_stats = layers.GlobalAveragePooling2D()(h)
            d_stats = layers.GlobalAveragePooling2D()(h_z)

            z_mean = layers.Dense(z_dim,
                                  name='dense_z_mean' + locname)(z_stats)
            z_log_var = layers.Dense(z_dim,
                                     name='dense_z_log_var' + locname)(z_stats)
            delta_mean = layers.Dense(z_dim, name='dense_delta_mean' +
                                      locname)(d_stats)
            delta_log_var = layers.Dense(z_dim,
                                         name='dense_delta_log_var' +
                                         locname)(d_stats)

            z = layers.Lambda(sampling)(
                [z_mean + delta_mean, z_log_var + delta_log_var])
            latent_variables.append(z)
            latent_stats.append((z_mean, z_log_var))
            delta_stats.append((delta_mean, delta_log_var))

            h = Film((K.int_shape(h)[3]), name='Film' + locname)([h, z])
            h = layers.Conv2D(n_ch,
                              kernel_size=k_size,
                              strides=1,
                              padding='same',
                              kernel_regularizer=reg,
                              kernel_initializer=initializer,
                              name='Conv2D_1s_' + locname)(h)

        dim = dim * 2
        n_ch = n_ch // 2
        h = layers.Conv2DTranspose(n_ch,
                                   k_size,
                                   strides=2,
                                   padding='same',
                                   activation=act,
                                   kernel_regularizer=reg,
                                   kernel_initializer=initializer,
                                   name='Conv2DT_2s_' + str(g))(h)

    x_recon = layers.Conv2DTranspose(3,
                                     k_size,
                                     strides=1,
                                     padding='same',
                                     activation='sigmoid',
                                     kernel_initializer=initializer,
                                     name='Output')(h)

    encoder = models.Model(x, [latent_variables, delta_stats, latent_stats])
    hfvae = models.Model(x, x_recon)

    # adding loss

    x_true = K.reshape(x, (-1, np.prod(input_shape)))
    x_pred = K.reshape(x_recon, (-1, np.prod(input_shape)))
    L_recon = K.sum(K.square(x_true - x_pred), axis=-1)

    hfvae.add_loss(L_recon)

    for i in range(len(latent_stats)):
        delta_mean, delta_log_var = delta_stats[i]
        if i == 0:
            hfvae.add_loss(0.5 * gamma *
                           K.sum(K.square(delta_mean) + K.exp(delta_log_var) -
                                 1 - delta_log_var,
                                 axis=-1))
        else:
            z_mean, z_log_var = latent_stats[i]
            hfvae.add_loss(0.5 * gamma *
                           K.sum(K.square(delta_mean) / K.exp(z_log_var) +
                                 K.exp(delta_log_var) - 1 - delta_log_var,
                                 axis=-1))

    return hfvae, encoder, latent_variables, latent_stats, delta_stats
示例#28
0
def ASPP(img_input):
    # atrous spatial pyramid pooling
    dims = keras.backend.int_shape(img_input)

    # pool_1x1conv2d
    img_pool = layers.AveragePooling2D(pool_size=(dims[1], dims[2]),
                                       name='average_pooling')(img_input)
    img_pool = layers.Conv2D(filters=256,
                             kernel_size=1,
                             padding='same',
                             kernel_initializer='he_normal',
                             name='pool_1x1conv2d',
                             use_bias=False)(img_pool)
    img_pool = layers.BatchNormalization(name='bn_1')(img_pool)
    img_pool = layers.Activation('relu', name='relu_1')(img_pool)

    img_pool = Upsample(tensor=img_pool, size=[dims[1], dims[2]])

    # atrous 1
    y_1 = layers.Conv2D(filters=256,
                        kernel_size=1,
                        dilation_rate=1,
                        padding='same',
                        kernel_initializer='he_normal',
                        name='ASPP_conv2d_d1',
                        use_bias=False)(img_input)
    y_1 = layers.BatchNormalization(name='bn_2')(y_1)
    y_1 = layers.Activation('relu', name='relu_2')(y_1)

    # atrous 6
    y_6 = layers.Conv2D(filters=256,
                        kernel_size=3,
                        dilation_rate=6,
                        padding='same',
                        kernel_initializer='he_normal',
                        name='ASPP_conv2d_d6',
                        use_bias=False)(img_input)
    y_6 = layers.BatchNormalization(name='bn_3')(y_6)
    y_6 = layers.Activation('relu', name='relu_3')(y_6)

    # atrous 12
    y_12 = layers.Conv2D(filters=256,
                         kernel_size=3,
                         dilation_rate=12,
                         padding='same',
                         kernel_initializer='he_normal',
                         name='ASPP_conv2d_d12',
                         use_bias=False)(img_input)
    y_12 = layers.BatchNormalization(name='bn_4')(y_12)
    y_12 = layers.Activation('relu', name='relu_4')(y_12)

    # atrous 18
    y_18 = layers.Conv2D(filters=256,
                         kernel_size=3,
                         dilation_rate=18,
                         padding='same',
                         kernel_initializer='he_normal',
                         name='ASPP_conv2d_d18',
                         use_bias=False)(img_input)
    y_18 = layers.BatchNormalization(name='bn_5')(y_18)
    y_18 = layers.Activation('relu', name='relu_5')(y_18)

    # concatenate sampled layers
    y = layers.Concatenate(name='ASPP_concat')(
        [img_pool, y_1, y_6, y_12, y_18])

    y = layers.Conv2D(filters=256,
                      kernel_size=1,
                      dilation_rate=1,
                      padding='same',
                      kernel_initializer='he_normal',
                      name='ASPP_conv2d_final',
                      use_bias=False)(y)
    y = layers.BatchNormalization(name=f'bn_final')(y)
    y = layers.Activation('relu', name=f'relu_final')(y)

    return y
示例#29
0
文件: ans.py 项目: wyj-fps/TAAC2020
def get_age_model(DATA):

    feed_forward_size = 2048
    max_seq_len = 150
    model_dim = 256 + 256 + 64 + 32 + 8 + 16

    input_creative_id = Input(shape=(max_seq_len, ), name='creative_id')
    x1 = Embedding(
        input_dim=NUM_creative_id + 1,
        output_dim=256,
        weights=[DATA['creative_id_emb']],
        trainable=args.not_train_embedding,
        #    trainable=False,
        input_length=150,
        mask_zero=True)(input_creative_id)
    # encodings = PositionEncoding(model_dim)(x1)
    # encodings = Add()([embeddings, encodings])

    input_ad_id = Input(shape=(max_seq_len, ), name='ad_id')
    x2 = Embedding(
        input_dim=NUM_ad_id + 1,
        output_dim=256,
        weights=[DATA['ad_id_emb']],
        trainable=args.not_train_embedding,
        #    trainable=False,
        input_length=150,
        mask_zero=True)(input_ad_id)

    input_product_id = Input(shape=(max_seq_len, ), name='product_id')
    x3 = Embedding(
        input_dim=NUM_product_id + 1,
        output_dim=32,
        weights=[DATA['product_id_emb']],
        trainable=args.not_train_embedding,
        #    trainable=False,
        input_length=150,
        mask_zero=True)(input_product_id)

    input_advertiser_id = Input(shape=(max_seq_len, ), name='advertiser_id')
    x4 = Embedding(
        input_dim=NUM_advertiser_id + 1,
        output_dim=64,
        weights=[DATA['advertiser_id_emb']],
        trainable=args.not_train_embedding,
        #    trainable=False,
        input_length=150,
        mask_zero=True)(input_advertiser_id)

    input_industry = Input(shape=(max_seq_len, ), name='industry')
    x5 = Embedding(
        input_dim=NUM_industry + 1,
        output_dim=16,
        weights=[DATA['industry_emb']],
        trainable=True,
        #    trainable=False,
        input_length=150,
        mask_zero=True)(input_industry)

    input_product_category = Input(shape=(max_seq_len, ),
                                   name='product_category')
    x6 = Embedding(
        input_dim=NUM_product_category + 1,
        output_dim=8,
        weights=[DATA['product_category_emb']],
        trainable=True,
        #    trainable=False,
        input_length=150,
        mask_zero=True)(input_product_category)

    # (bs, 100, 128*2)
    encodings = layers.Concatenate(axis=2)([x1, x2, x3, x4, x5, x6])
    # (bs, 100)
    masks = tf.equal(input_creative_id, 0)

    # (bs, 100, 128*2)
    attention_out = MultiHeadAttention(
        8, 79)([encodings, encodings, encodings, masks])

    # Add & Norm
    attention_out += encodings
    attention_out = LayerNormalization()(attention_out)
    # Feed-Forward
    ff = PositionWiseFeedForward(model_dim, feed_forward_size)
    ff_out = ff(attention_out)
    # Add & Norm
    # ff_out (bs, 100, 128),但是attention_out是(bs,100,256)
    ff_out += attention_out
    encodings = LayerNormalization()(ff_out)
    encodings = GlobalMaxPooling1D()(encodings)
    encodings = Dropout(0.2)(encodings)

    # output_gender = Dense(2, activation='softmax', name='gender')(encodings)
    output_age = Dense(10, activation='softmax', name='age')(encodings)

    model = Model(inputs=[
        input_creative_id, input_ad_id, input_product_id, input_advertiser_id,
        input_industry, input_product_category
    ],
                  outputs=[output_age])

    model.compile(
        optimizer=optimizers.Adam(2.5e-4),
        loss={
            # 'gender': losses.CategoricalCrossentropy(from_logits=False),
            'age': losses.CategoricalCrossentropy(from_logits=False)
        },
        # loss_weights=[0.4, 0.6],
        metrics=['accuracy'])
    return model
示例#30
0
文件: model.py 项目: qwoprocks/SAPD
def sapd(
    phi,
    soft_select=False,
    num_classes=20,
    freeze_bn=False,
    max_gt_boxes=100,
    batch_size=32,
    score_threshold=0.01,
):
    assert phi in range(7)
    image_size = image_sizes[phi]
    input_shape = (image_size, image_size, 3)
    # input_shape = (None, None, 3)
    image_input = layers.Input(input_shape)
    gt_boxes_input = layers.Input((max_gt_boxes, 5))
    num_gt_boxes_input = layers.Input((1, ), dtype='int32')
    fm_shapes_input = layers.Input((5, 2), dtype='int32')

    backbone_cls = backbones[phi]
    # (C1, C2, C3, C4, C5)
    features = backbone_cls(input_tensor=image_input, freeze_bn=freeze_bn)
    w_bifpn = w_bifpns[phi]
    d_bifpn = 2 + phi
    w_head = w_bifpn
    d_head = 3 + int(phi / 3)
    for i in range(d_bifpn):
        features = build_BiFPN(features, w_bifpn, i, freeze_bn=freeze_bn)
    regr_head = build_regress_head(w_head, d_head)
    cls_head = build_class_head(w_head, d_head, num_classes=num_classes)
    pyramid_features = features
    fpn_width = w_head
    cls_pred = [cls_head(feature) for feature in pyramid_features]
    cls_pred = layers.Concatenate(axis=1, name='classification')(cls_pred)
    regr_pred = [regr_head(feature) for feature in pyramid_features]
    regr_pred = layers.Concatenate(axis=1, name='regression')(regr_pred)

    # meta select net
    meta_select_net = build_meta_select_net(width=fpn_width)
    meta_select_input, gt_boxes_batch_ids = MetaSelectInput()(
        [gt_boxes_input, *pyramid_features])
    meta_select_pred = meta_select_net(meta_select_input)
    meta_select_target = MetaSelectTarget()(
        [cls_pred, regr_pred, fm_shapes_input, gt_boxes_input])
    # # lambda == 0.1 in paper
    meta_select_loss = layers.Lambda(
        lambda x: 0.1 * losses.sparse_categorical_crossentropy(x[0], x[1]),
        output_shape=(1, ),
        name="meta_select_loss")([meta_select_target, meta_select_pred])

    if soft_select:
        meta_select_weight = MetaSelectWeight(
            max_gt_boxes=max_gt_boxes,
            soft_select=soft_select,
            batch_size=batch_size,
        )([meta_select_pred, gt_boxes_batch_ids, num_gt_boxes_input])
    else:
        meta_select_weight = MetaSelectWeight(
            max_gt_boxes=max_gt_boxes,
            soft_select=soft_select,
            batch_size=batch_size,
        )([meta_select_target, gt_boxes_batch_ids, num_gt_boxes_input])

    cls_target, regr_target = SAPDTarget(num_classes=num_classes)(
        [fm_shapes_input, gt_boxes_input, meta_select_weight])

    focal_loss = focal_with_weight_and_mask()
    iou_loss = iou_with_weight_and_mask()
    cls_loss = layers.Lambda(focal_loss, output_shape=(1, ),
                             name="cls_loss")([cls_target, cls_pred])
    regr_loss = layers.Lambda(iou_loss, output_shape=(1, ),
                              name="regr_loss")([regr_target, regr_pred])

    model = models.Model(inputs=[
        image_input, gt_boxes_input, num_gt_boxes_input, fm_shapes_input
    ],
                         outputs=[
                             cls_loss, regr_loss, meta_select_loss, cls_pred,
                             regr_pred, cls_target, regr_target
                         ],
                         name='sapd')

    locations, strides = Locations()(pyramid_features)

    # apply predicted regression to anchors
    boxes = RegressBoxes(name='boxes')([locations, strides, regr_pred])
    boxes = ClipBoxes(name='clipped_boxes')([image_input, boxes])

    # filter detections (apply NMS / score threshold / select top-k)
    detections = FilterDetections(name='filtered_detections',
                                  score_threshold=score_threshold)(
                                      [boxes, cls_pred])

    prediction_model = models.Model(inputs=[image_input],
                                    outputs=detections,
                                    name='sapd_p')

    return model, prediction_model