Example #1
0
def create_model(input_size, num_classes, batch_size, min_value, max_value,
                 combining_weight, weight_flag, regularization_weight):
    ## Build models based on Basset
    from tensorflow.keras.layers import Conv1D, Dense, MaxPooling1D, Flatten, Dropout, Activation, BatchNormalization
    from tensorflow.keras.models import Sequential

    model = Sequential()
    model.add(
        Conv1D(
            filters=300,
            kernel_size=19,
            padding='same',  # , activation='relu'
            input_shape=(input_size, 4)))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_size=3, strides=3, padding='same'))
    model.add(BatchNormalization())

    model.add(Conv1D(filters=200, kernel_size=11, padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_size=4, strides=4, padding='same'))
    model.add(BatchNormalization())

    model.add(Conv1D(filters=200, kernel_size=7, padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_size=4, strides=4, padding='same'))
    model.add(BatchNormalization())

    model.add(Flatten())
    model.add(Dense(1000))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.3))

    model.add(Dense(1000))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.3))

    model.add(Dense(num_classes))

    opt = tensorflow.keras.optimizers.Adam(lr=learning_rate,
                                           beta_1=0.9,
                                           beta_2=0.999,
                                           epsilon=1e-8,
                                           decay=0,
                                           amsgrad=False)
    model.compile(loss=losses.combine_loss_by_sample(combining_weight),
                  optimizer=opt,
                  metrics=['mse', losses.pearson_loss, losses.r2_score])

    return model
Example #2
0
def create_model(input_size, num_classes, learning_rate, combined_loss_weight):
    ## Build models based on Basset
    from tensorflow.keras.layers import Input, Conv1D, Dense, MaxPooling1D, Flatten, Dropout, Activation, BatchNormalization  # BatchNormalization(different in keras standalone)

    feature_input = Input(shape=(input_size, 4), name='input')
    nn = Conv1D(filters=300, kernel_size=19, padding='same',
                name='conv_1')(feature_input)
    nn = Activation('relu', name='relu_1')(nn)
    nn = MaxPooling1D(pool_size=3, strides=3, padding='same',
                      name='maxpool_1')(nn)
    nn = BatchNormalization(name='BN_1')(nn)

    nn = Conv1D(filters=200, kernel_size=11, padding='same', name='conv_2')(nn)
    nn = Activation('relu', name='relu_2')(nn)
    nn = MaxPooling1D(pool_size=4, strides=4, padding='same',
                      name='maxpool_2')(nn)
    nn = BatchNormalization(name='BN_2')(nn)

    nn = Conv1D(filters=200, kernel_size=7, padding='same', name='conv_3')(nn)
    nn = Activation('relu', name='relu_3')(nn)
    nn = MaxPooling1D(pool_size=4, strides=4, padding='same',
                      name='maxpool_3')(nn)
    nn = BatchNormalization(name='BN_3')(nn)

    nn = Flatten(name='flatten')(nn)

    nn = Dense(1000, name='dense_1')(nn)
    nn = Activation('relu', name='relu_4')(nn)
    nn = Dropout(0.3, name='dropout_1')(nn)

    nn = Dense(1000, name='dense_2')(nn)
    nn = Activation('relu', name='relu_5')(nn)
    nn = Dropout(0.3, name='dropout_2')(nn)

    result = Dense(num_classes, name='dense_out')(nn)

    model = tf.keras.models.Model(inputs=feature_input, outputs=result)

    opt = tf.keras.optimizers.Adam(lr=learning_rate,
                                   beta_1=0.9,
                                   beta_2=0.999,
                                   epsilon=1e-8,
                                   decay=0,
                                   amsgrad=False)

    model.compile(loss=losses.combine_loss_by_sample(combined_loss_weight),
                  optimizer=opt,
                  metrics=['mse', losses.pearson_loss])

    return model
Example #3
0
def create_model(input_size,
                 num_classes_1,
                 num_classes_2,
                 batch_size,
                 combining_weight,
                 weight_flag,
                 ratio=1.0,
                 percent_share=None):  #, input_tag
    from tensorflow.keras.layers import Input, Conv1D, Dense, MaxPooling1D, Flatten, Dropout, Activation, BatchNormalization
    from tensorflow.keras.layers import Masking, Add, Multiply, RepeatVector, Reshape, Permute, Lambda

    conv1_dim, conv2_dim, conv3_dim, fc1_dim, fc2_dim = 300, 200, 200, 1000, 1000

    def cross_stitch(inputs):
        in_1 = inputs[0]
        in_2 = inputs[1]
        share_rate = inputs[2]
        in_1 = Lambda(lambda x: x * share_rate)(in_1)
        in_2 = Lambda(lambda x: x * (1.0 - share_rate))(in_2)
        return Add()([in_1, in_2])

    inputs = Input(shape=(input_size, 4), name='inputs')
    inputs1, inputs2 = inputs, inputs

    nn1_in = Conv1D(filters=conv1_dim,
                    kernel_size=19,
                    padding='same',
                    name='conv_1_task1')(inputs1)
    nn2_in = Conv1D(filters=conv1_dim,
                    kernel_size=19,
                    padding='same',
                    name='conv_1_task2')(inputs2)

    # Add in cross-stitch operation
    nn1 = Lambda(cross_stitch, name='cross_stitch_1_task1')(
        [nn1_in, nn2_in, percent_share[0]])
    nn2 = Lambda(cross_stitch, name='cross_stitch_1_task2')(
        [nn2_in, nn1_in, percent_share[0]])

    nn1 = Activation('relu', name='relu_1_task1')(nn1)
    nn1 = MaxPooling1D(pool_size=3,
                       strides=3,
                       padding='same',
                       name='maxpool_1_task1')(nn1)
    nn1 = BatchNormalization(name='BN_1_task1')(nn1)

    nn2 = Activation('relu', name='relu_1_task2')(nn2)
    nn2 = MaxPooling1D(pool_size=3,
                       strides=3,
                       padding='same',
                       name='maxpool_1_task2')(nn2)
    nn2 = BatchNormalization(name='BN_1_task2')(nn2)

    nn1_in = Conv1D(filters=conv2_dim,
                    kernel_size=11,
                    padding='same',
                    name='conv_2_task1')(nn1)
    nn2_in = Conv1D(filters=conv2_dim,
                    kernel_size=11,
                    padding='same',
                    name='conv_2_task2')(nn2)

    nn1 = Lambda(cross_stitch, name='cross_stitch_2_task1')(
        [nn1_in, nn2_in, percent_share[1]])
    nn2 = Lambda(cross_stitch, name='cross_stitch_2_task2')(
        [nn2_in, nn1_in, percent_share[1]])

    nn1 = Activation('relu', name='relu_2_task1')(nn1)
    nn1 = MaxPooling1D(pool_size=4,
                       strides=4,
                       padding='same',
                       name='maxpool_2_task1')(nn1)
    nn1 = BatchNormalization(name='BN_2_task1')(nn1)

    nn2 = Activation('relu', name='relu_2_task2')(nn2)
    nn2 = MaxPooling1D(pool_size=4,
                       strides=4,
                       padding='same',
                       name='maxpool_2_task2')(nn2)
    nn2 = BatchNormalization(name='BN_2_task2')(nn2)

    nn1_in = Conv1D(filters=conv3_dim,
                    kernel_size=7,
                    padding='same',
                    name='conv_3_task1')(nn1)
    nn2_in = Conv1D(filters=conv3_dim,
                    kernel_size=7,
                    padding='same',
                    name='conv_3_task2')(nn2)

    nn1 = Lambda(cross_stitch, name='cross_stitch_3_task1')(
        [nn1_in, nn2_in, percent_share[2]])
    nn2 = Lambda(cross_stitch, name='cross_stitch_3_task2')(
        [nn2_in, nn1_in, percent_share[2]])

    nn1 = Activation('relu', name='relu_3_task1')(nn1)
    nn1 = MaxPooling1D(pool_size=4,
                       strides=4,
                       padding='same',
                       name='maxpool_3_task1')(nn1)
    nn1 = BatchNormalization(name='BN_3_task1')(nn1)

    nn2 = Activation('relu', name='relu_3_task2')(nn2)
    nn2 = MaxPooling1D(pool_size=4,
                       strides=4,
                       padding='same',
                       name='maxpool_3_task2')(nn2)
    nn2 = BatchNormalization(name='BN_3_task2')(nn2)

    nn1 = Flatten(name='flatten_task1')(nn1)
    nn2 = Flatten(name='flatten_task2')(nn2)

    nn1_in = Dense(fc1_dim, name='dense_1_task1')(nn1)
    nn2_in = Dense(fc1_dim, name='dense_1_task2')(nn2)

    nn1 = Lambda(cross_stitch, name='cross_stitch_4_task1')(
        [nn1_in, nn2_in, percent_share[3]])
    nn2 = Lambda(cross_stitch, name='cross_stitch_4_task2')(
        [nn2_in, nn1_in, percent_share[3]])

    nn1 = BatchNormalization(name='BN_4_task1')(nn1)
    nn1 = Activation('relu', name='relu_4_task1')(nn1)
    nn1 = Dropout(0.3, name='dropout_1_task1')(nn1)

    nn2 = BatchNormalization(name='BN_4_task2')(nn2)
    nn2 = Activation('relu', name='relu_4_task2')(nn2)
    nn2 = Dropout(0.3, name='dropout_1_task2')(nn2)

    nn1_in = Dense(fc2_dim, name='dense_2_task1')(nn1)
    nn2_in = Dense(fc2_dim, name='dense_2_task2')(nn2)

    nn1 = Lambda(cross_stitch, name='cross_stitch_5_task1')(
        [nn1_in, nn2_in, percent_share[4]])
    nn2 = Lambda(cross_stitch, name='cross_stitch_5_task1')(
        [nn2_in, nn1_in, percent_share[4]])

    nn1 = BatchNormalization(name='BN_5_task1')(nn1)
    nn1 = Activation('relu', name='relu_5_task1')(nn1)
    nn1 = Dropout(0.3, name='dropout_2_task1')(nn1)

    nn2 = BatchNormalization(name='BN_5_task2')(nn2)
    nn2 = Activation('relu', name='relu_5_task2')(nn2)
    nn2 = Dropout(0.3, name='dropout_2_task2')(nn2)

    ## two head version:
    labels_mask_1 = Input(shape=([num_classes_1]),
                          dtype='float32',
                          name='labels_mask_1')
    mask_1 = Masking(mask_value=0.0, name='mask_1')(labels_mask_1)
    labels_mask_2 = Input(shape=([num_classes_2]),
                          dtype='float32',
                          name='labels_mask_2')
    mask_2 = Masking(mask_value=0.0, name='mask_2')(labels_mask_2)

    result_1_ori = Dense(num_classes_1, name='dense_out_1_ori')(nn1)
    result_2_ori = Dense(num_classes_2, name='dense_out_2_ori')(nn2)

    result_1 = Multiply(name='dense_out_1')([result_1_ori, mask_1])
    result_2 = Multiply(name='dense_out_2')([result_2_ori, mask_2])

    model = tf.keras.models.Model(
        inputs=[inputs, labels_mask_1, labels_mask_2],
        outputs=[result_1, result_2])

    loss_combined = {
        'dense_out_1': losses.combine_loss_by_sample(0.01),
        'dense_out_2': losses.combine_loss_by_sample(0.0052)
    }
    lossWeights = {'dense_out_1': ratio, 'dense_out_2': 1.0}

    metric = {
        'dense_out_1': ['mse', losses.pearson_loss, losses.r2_score],
        'dense_out_2': ['mse', losses.pearson_loss, losses.r2_score]
    }

    opt = tf.keras.optimizers.Adam(lr=learning_rate,
                                   beta_1=0.9,
                                   beta_2=0.999,
                                   epsilon=1e-8,
                                   decay=0,
                                   amsgrad=False)

    model.compile(optimizer=opt,
                  loss=loss_combined,
                  loss_weights=lossWeights,
                  metrics=metric)
    return model
def create_model(input_size, num_classes_1, num_classes_2, batch_size, combining_weight, weight_flag, ratio=1.0, percent_share=None): #, input_tag
    from tensorflow.keras.layers import Input, Conv1D, Dense, MaxPooling1D, Flatten, Dropout, Activation, BatchNormalization
    from tensorflow.keras.layers import Masking, Multiply, RepeatVector, Reshape, Permute, Lambda

    conv1_dim, conv2_dim, conv3_dim, fc1_dim, fc2_dim = 300, 200, 200, 1000, 1000
    inputs = Input(shape=(input_size, 4), name='inputs')
    nn = Conv1D(filters=conv1_dim, kernel_size=19, padding='same', name='conv_1')(inputs)

    custom_mask_in = Input(shape=([1]), name='custom_mask_in') 
    # Split into task1 + task2 + share (always = 1, Masking layer does not work on conv, so use Lambda layer)        
    conv1_mask_base = Reshape((input_size,), name='conv1_mask_base')(RepeatVector(input_size)(custom_mask_in))        
    conv1_mask_share = RepeatVector(int(conv1_dim * percent_share[0]), name='conv1_share')(conv1_mask_base) 
    conv1_mask_share = Permute((2, 1), name='conv1_mask_share_reordered')(conv1_mask_share)
    conv1_mask_task1 = RepeatVector(int(conv1_dim * round(1.0 - percent_share[0], 10)/2.0), name='conv1_task1')(conv1_mask_base)
    conv1_mask_task1_masking = Lambda(lambda x: K.cast(K.equal(x, 1), dtype='float32'), name='conv1_task1_converted')(conv1_mask_task1)
    conv1_mask_task1 = Multiply(name='conv1_mask_task1')([conv1_mask_task1, conv1_mask_task1_masking])
    conv1_mask_task1 = Permute((2, 1), name='conv1_mask_task1_reordered')(conv1_mask_task1)

    conv1_mask_task2 = RepeatVector(int(conv1_dim * round(1.0 - percent_share[0], 10)/2.0), name='conv1_task2')(conv1_mask_base)
    conv1_mask_task2_masking = Lambda(lambda x: K.cast(K.equal(x, 2), dtype='float32'), name='conv1_task2_converted')(conv1_mask_task2)
    conv1_mask_task2 = Multiply(name='conv1_mask_task2')([conv1_mask_task2, conv1_mask_task2_masking])
    conv1_mask_task2 = Permute((2, 1), name='conv1_mask_task2_reordered')(conv1_mask_task2)

    conv1_mask = tf.keras.layers.concatenate([conv1_mask_task1, conv1_mask_task2, conv1_mask_share], name='conv1_mask_concatenate')
    conv1_mask = Lambda(lambda x: K.cast(K.greater(x, 0), dtype='float32'), name='conv1_mask_normalized')(conv1_mask)
    nn = Multiply(name='conv1_branched_1')([nn, conv1_mask])

    nn = Activation('relu', name='relu_1')(nn)
    nn = MaxPooling1D(pool_size=3, strides=3, padding='same', name='maxpool_1')(nn)
    nn = BatchNormalization(name='BN_1')(nn)
    
    nn = Conv1D(filters=conv2_dim, kernel_size=11, padding='same', name='conv_2')(nn)

    conv2_mask_base = Reshape((int(nn.shape[1]),), name='conv2_mask_base')(RepeatVector(int(nn.shape[1]))(custom_mask_in))       
    conv2_mask_share = RepeatVector(int(conv2_dim * percent_share[1]), name='conv2_share')(conv2_mask_base) 
    conv2_mask_share = Permute((2, 1), name='conv2_mask_share_reordered')(conv2_mask_share)
    conv2_mask_task1 = RepeatVector(int(conv2_dim * round(1.0 - percent_share[1], 10)/2.0), name='conv2_task1')(conv2_mask_base)
    conv2_mask_task1_masking = Lambda(lambda x: K.cast(K.equal(x, 1), dtype='float32'), name='conv2_task1_converted')(conv2_mask_task1)
    conv2_mask_task1 = Multiply(name='conv2_mask_task1')([conv2_mask_task1, conv2_mask_task1_masking])
    conv2_mask_task1 = Permute((2, 1), name='conv2_mask_task1_reordered')(conv2_mask_task1)
    conv2_mask_task2 = RepeatVector(int(conv2_dim * round(1.0 - percent_share[1], 10)/2.0), name='conv2_task2')(conv2_mask_base)
    conv2_mask_task2_masking = Lambda(lambda x: K.cast(K.equal(x, 2), dtype='float32'), name='conv2_task2_converted')(conv2_mask_task2)
    conv2_mask_task2 = Multiply(name='conv2_mask_task2')([conv2_mask_task2, conv2_mask_task2_masking])
    conv2_mask_task2 = Permute((2, 1), name='conv2_mask_task2_reordered')(conv2_mask_task2)
    conv2_mask = tf.keras.layers.concatenate([conv2_mask_task1, conv2_mask_task2, conv2_mask_share], name='conv2_mask_concatenate')
    conv2_mask = Lambda(lambda x: K.cast(K.greater(x, 0), dtype='float32'), name='conv2_mask_normalized')(conv2_mask)    
    nn = Multiply(name='conv2_branched_1')([nn, conv2_mask])

    nn = Activation('relu', name='relu_2')(nn)
    nn = MaxPooling1D(pool_size=4, strides=4, padding='same', name='maxpool_2')(nn)
    nn = BatchNormalization(name='BN_2')(nn)
    
    nn = Conv1D(filters=conv3_dim, kernel_size=7, padding='same', name='conv_3')(nn)

    conv3_mask_base = Reshape((int(nn.shape[1]),), name='conv3_mask_base')(RepeatVector(int(nn.shape[1]))(custom_mask_in))        
    conv3_mask_share = RepeatVector(int(conv3_dim * percent_share[2]), name='conv3_share')(conv3_mask_base) 
    conv3_mask_share = Permute((2, 1), name='conv3_mask_share_reordered')(conv3_mask_share)
    conv3_mask_task1 = RepeatVector(int(conv3_dim * round(1.0 - percent_share[2], 10)/2.0), name='conv3_task1')(conv3_mask_base)
    conv3_mask_task1_masking = Lambda(lambda x: K.cast(K.equal(x, 1), dtype='float32'), name='conv3_task1_converted')(conv3_mask_task1)
    conv3_mask_task1 = Multiply(name='conv3_mask_task1')([conv3_mask_task1, conv3_mask_task1_masking])
    conv3_mask_task1 = Permute((2, 1), name='conv3_mask_task1_reordered')(conv3_mask_task1)
    conv3_mask_task2 = RepeatVector(int(conv3_dim * round(1.0 - percent_share[2], 10)/2.0), name='conv3_task2')(conv3_mask_base)
    conv3_mask_task2_masking = Lambda(lambda x: K.cast(K.equal(x, 2), dtype='float32'), name='conv3_task2_converted')(conv3_mask_task2)
    conv3_mask_task2 = Multiply(name='conv3_mask_task2')([conv3_mask_task2, conv3_mask_task2_masking])
    conv3_mask_task2 = Permute((2, 1), name='conv3_mask_task2_reordered')(conv3_mask_task2)
    conv3_mask = tf.keras.layers.concatenate([conv3_mask_task1, conv3_mask_task2, conv3_mask_share], name='conv3_mask_concatenate')
    conv3_mask = Lambda(lambda x: K.cast(K.greater(x, 0), dtype='float32'), name='conv3_mask_normalized')(conv3_mask)    
    nn = Multiply(name='conv3_branched_1')([nn, conv3_mask])

    nn = Activation('relu', name='relu_3')(nn)
    nn = MaxPooling1D(pool_size=4, strides=4, padding='same', name='maxpool_3')(nn)
    nn = BatchNormalization(name='BN_3')(nn)
    
    nn = Flatten(name='flatten')(nn)
    
    # Split into task1 + task2 + share         
    fc1_mask_share = Reshape((int(fc1_dim * percent_share[3]),), name='fc1_share')(RepeatVector(int(fc1_dim * 0.65))(custom_mask_in)) 
    fc1_mask_task1 = Reshape((int(fc1_dim * round(1.0 - percent_share[3], 10)/2.0),), name='fc1_task1')(RepeatVector(int(fc1_dim * 0.175))(custom_mask_in))
    fc1_mask_task1 = Masking(mask_value=2, name='fc1_mask_task1')(fc1_mask_task1) 
    fc1_mask_task2 = Reshape((int(fc1_dim * round(1.0 - percent_share[3], 10)/2.0),), name='fc1_task2')(RepeatVector(int(fc1_dim * 0.175))(custom_mask_in)) 
    fc1_mask_task2 = Masking(mask_value=1, name='fc1_mask_task2')(fc1_mask_task2)
    fc1_mask = tf.keras.layers.concatenate([fc1_mask_task1, fc1_mask_task2, fc1_mask_share], name='fc1_mask_concatenate')
    fc1_mask = Lambda(lambda x: K.cast(K.greater(x, 0), dtype='float32'), name='fc1_mask_normalized')(fc1_mask)

    nn = Dense(fc1_dim, name='dense_1')(nn)
    nn = Multiply(name='fc1_branched_1')([nn, fc1_mask]) 
    nn = BatchNormalization(name='BN_4')(nn)
    nn = Activation('relu', name='relu_4')(nn)
    nn = Dropout(0.3, name='dropout_1')(nn)
    
    # Split into task1 + task2 + share         
    fc2_mask_share = Reshape((int(fc2_dim * percent_share[4]),), name='fc2_share')(RepeatVector(int(fc2_dim * 0.6))(custom_mask_in)) 
    fc2_mask_task1 = Reshape((int(fc2_dim * round(1.0 - percent_share[4], 10)/2.0),), name='fc2_task1')(RepeatVector(int(fc2_dim * 0.2))(custom_mask_in))
    fc2_mask_task1 = Masking(mask_value=2, name='fc2_mask_task1')(fc2_mask_task1) 
    fc2_mask_task2 = Reshape((int(fc2_dim * round(1.0 - percent_share[4], 10)/2.0),), name='fc2_task2')(RepeatVector(int(fc2_dim * 0.2))(custom_mask_in)) 
    fc2_mask_task2 = Masking(mask_value=1, name='fc2_mask_task2')(fc2_mask_task2)
    fc2_mask = tf.keras.layers.concatenate([fc2_mask_task1, fc2_mask_task2, fc2_mask_share], name='fc2_mask_concatenate')
    fc2_mask = Lambda(lambda x: K.cast(K.greater(x, 0), dtype='float32'), name='fc2_mask_normalized')(fc2_mask)    

    nn = Dense(fc2_dim, name='dense_2')(nn)
    nn = Multiply(name='fc2_branched_1')([nn, fc2_mask]) 
    nn = BatchNormalization(name='BN_5')(nn)
    nn = Activation('relu', name='relu_5')(nn)
    nn = Dropout(0.3, name='dropout_2')(nn)      
    
    ## tower head by task:
    labels_mask_1 = Input(shape=([num_classes_1]), dtype='float32', name='labels_mask_1')
    mask_1 = Masking(mask_value=0.0, name='mask_1')(labels_mask_1) 
    labels_mask_2 = Input(shape=([num_classes_2]), dtype='float32', name='labels_mask_2')
    mask_2 = Masking(mask_value=0.0, name='mask_2')(labels_mask_2)     

    result_1_ori = Dense(num_classes_1, name='dense_out_1_ori')(nn)
    result_2_ori = Dense(num_classes_2, name='dense_out_2_ori')(nn)

    result_1 = Multiply(name='dense_out_1')([result_1_ori, mask_1])
    result_2 = Multiply(name='dense_out_2')([result_2_ori, mask_2])

    model = tf.keras.models.Model(inputs=[inputs, labels_mask_1, labels_mask_2, custom_mask_in],
                                  outputs=[result_1, result_2]) 

    loss_combined = {
        'dense_out_1': losses.combine_loss_by_sample(0.01),
        'dense_out_2': losses.combine_loss_by_sample(0.0052)
    }
    lossWeights = {'dense_out_1': ratio, 'dense_out_2': 1.0} 

    metric = {
        'dense_out_1': ['mse', losses.pearson_loss, losses.r2_score],
        'dense_out_2': ['mse', losses.pearson_loss, losses.r2_score]
    }

    opt = tf.keras.optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8, decay=0, amsgrad=False)
   
    model.compile(optimizer=opt,
                  loss=loss_combined,
                  loss_weights=lossWeights,
                  metrics=metric)
    return model
Example #5
0
def create_model(input_size,
                 num_classes_1,
                 num_classes_2,
                 batch_size,
                 combining_weight,
                 weight_flag,
                 ratio=1.0,
                 percent_share=None):
    from tensorflow.keras.layers import Input, Conv1D, Dense, MaxPooling1D, Flatten, Dropout, Activation, BatchNormalization
    from tensorflow.keras.layers import Concatenate, Masking, Multiply, RepeatVector, Reshape, Permute, Lambda

    inputs = Input(shape=(input_size, 4), name='inputs')
    nn = Conv1D(filters=300, kernel_size=19, padding='same',
                name='conv_1')(inputs)
    nn = Activation('relu', name='relu_1')(nn)
    nn = MaxPooling1D(pool_size=3, strides=3, padding='same',
                      name='maxpool_1')(nn)
    nn = BatchNormalization(name='BN_1')(nn)

    nn = Conv1D(filters=200, kernel_size=11, padding='same', name='conv_2')(nn)
    nn = Activation('relu', name='relu_2')(nn)
    nn = MaxPooling1D(pool_size=4, strides=4, padding='same',
                      name='maxpool_2')(nn)
    nn = BatchNormalization(name='BN_2')(nn)

    nn = Conv1D(filters=200, kernel_size=7, padding='same', name='conv_3')(nn)
    nn = Activation('relu', name='relu_3')(nn)
    nn = MaxPooling1D(pool_size=4, strides=4, padding='same',
                      name='maxpool_3')(nn)
    nn = BatchNormalization(name='BN_3')(nn)

    nn = Flatten(name='flatten')(nn)

    nn = Dense(1000, name='dense_1')(nn)
    nn = BatchNormalization(name='BN_4')(nn)
    nn = Activation('relu', name='relu_4')(nn)
    nn = Dropout(0.3, name='dropout_1')(nn)

    nn = Dense(1000, name='dense_2')(nn)
    nn = BatchNormalization(name='BN_5')(nn)
    nn = Activation('relu', name='relu_5')(nn)
    nn = Dropout(0.3, name='dropout_2')(nn)

    ## two head version:
    labels_mask_1 = Input(shape=([num_classes_1]),
                          dtype='float32',
                          name='labels_mask_1')
    mask_1 = Masking(mask_value=0.0, name='mask_1')(labels_mask_1)
    labels_mask_2 = Input(shape=([num_classes_2]),
                          dtype='float32',
                          name='labels_mask_2')
    mask_2 = Masking(mask_value=0.0, name='mask_2')(labels_mask_2)

    result_1_ori = Dense(num_classes_1, name='dense_out_1_ori')(nn)
    result_2_ori = Dense(num_classes_2, name='dense_out_2_ori')(nn)

    result_1 = Multiply(name='dense_out_1')([result_1_ori, mask_1])
    result_2 = Multiply(name='dense_out_2')([result_2_ori, mask_2])

    model = tf.keras.models.Model(
        inputs=[inputs, labels_mask_1, labels_mask_2],
        outputs=[result_1, result_2])

    loss_combined = {
        'dense_out_1': losses.combine_loss_by_sample(0.01),
        'dense_out_2': losses.combine_loss_by_sample(0.0052)
    }
    lossWeights = {'dense_out_1': ratio, 'dense_out_2': 1.0}

    metric = {
        'dense_out_1': ['mse', losses.pearson_loss, losses.r2_score],
        'dense_out_2': ['mse', losses.pearson_loss, losses.r2_score]
    }

    opt = tf.keras.optimizers.Adam(lr=learning_rate,
                                   beta_1=0.9,
                                   beta_2=0.999,
                                   epsilon=1e-8,
                                   decay=0,
                                   amsgrad=False)  # decay=1e-6,

    model.compile(optimizer=opt,
                  loss=loss_combined,
                  loss_weights=lossWeights,
                  metrics=metric)
    return model
Example #6
0
model.outputs = [model.layers[-1].output]
from tensorflow.keras.layers import Dense
model.add(Dense(num_classes))
if TL_manner == 'frozen':
    for layer in model.layers[:-1]:
        print(layer.name)
        layer.trainable = False
model.summary()

opt = tensorflow.keras.optimizers.Adam(lr=learning_rate,
                                       beta_1=0.9,
                                       beta_2=0.999,
                                       epsilon=1e-8,
                                       decay=0,
                                       amsgrad=False)
model.compile(loss=losses.combine_loss_by_sample(combining_weight),
              optimizer=opt,
              metrics=['mse', losses.pearson_loss, losses.r2_score])

checkpoint_path_weights = output_directory + 'cp.ckpt'

## Create checkpoint callback
cp_callback = tensorflow.keras.callbacks.ModelCheckpoint(
    checkpoint_path_weights,
    save_weights_only=True,
    save_best_only=True,
    verbose=1)

tensorboard = tensorflow.keras.callbacks.TensorBoard(log_dir=os.path.join(
    output_directory, 'logs', '{}'.format(time())),
                                                     histogram_freq=1,