コード例 #1
0
ファイル: build_model.py プロジェクト: sss1/DeepInteractions
def build_model(use_JASPAR = True):

  # A single downstream model merges the enhancer and promoter branches
  # Build main (merged) branch
  # Using batch normalization seems to inhibit retraining, probably because the
  # point of retraining is to learn (external) covariate shift
  model = Sequential()
  model.add(merge_layer)
  model.add(BatchNormalization())
  model.add(Dropout(0.25))
  model.add(biLSTM_layer)
  model.add(BatchNormalization())
  model.add(Dropout(0.5))
  model.add(Flatten())
  model.add(dense_layer)
  model.add(BatchNormalization())
  model.add(Activation("relu"))
  model.add(Dropout(0.5))
  model.add(LR_classifier_layer)
  model.add(BatchNormalization())
  model.add(Activation("sigmoid"))
  
  # Read in and initialize convolutional layers with motifs from JASPAR
  if use_JASPAR:
    util.initialize_with_JASPAR(enhancer_conv_layer, promoter_conv_layer)

  return model
コード例 #2
0
def build_model(use_JASPAR=True):
    # A single downstream model merges the enhancer and promoter branches
    # Build main (merged) branch
    # Using batch normalization seems to inhibit retraining, probably because the
    # point of retraining is to learn (external) covariate shift
    model = Sequential()
    model.add(merge_layer)
    model.add(BatchNormalization())
    model.add(Dropout(0.25))
    #model.add(biLSTM_layer)
    #model.add(BatchNormalization())
    #model.add(Dropout(0.5))
    model.add(Flatten())
    model.add(dense_layer)
    model.add(BatchNormalization())
    model.add(Activation("relu"))
    model.add(Dropout(0.5))
    model.add(LR_classifier_layer)
    model.add(BatchNormalization())
    model.add(Activation("sigmoid"))

    # Read in and initialize convolutional layers with motifs from JASPAR
    if use_JASPAR:
        util.initialize_with_JASPAR(enhancer_conv_layer, promoter_conv_layer)

    return model
コード例 #3
0
def build_model(use_JASPAR=True):

    enhancer_input = Input((1600, 4))
    promoter_input = Input((3000, 4))

    enhancer_branch_encoder = enhancer_branch(enhancer_input)
    promoter_branch_encoder = promoter_branch(promoter_input)
    encoder = concatenate([enhancer_branch_encoder, promoter_branch_encoder],
                          axis=1)
    encoder = BatchNormalization()(encoder)
    encoder = Dropout(0.25)(encoder)
    encoder = Flatten()(encoder)
    encoder = dense_layer(encoder)
    encoder = BatchNormalization()(encoder)
    encoder = Activation("relu")(encoder)
    encoder = Dropout(0.2)(encoder)
    #print(encoder.shape)
    #assert(1==2)
    #test
    #ans = LR_classifier_layer(encoder)
    #ans = BatchNormalization()(ans)
    #ans = Activation("sigmoid")(ans)
    #print(ans.shape)
    #assert(1==0)

    # A single downstream model merges the enhancer and promoter branches
    # Build main (merged) branch
    # Using batch normalization seems to inhibit retraining, probably because the
    # point of retraining is to learn (external) covariate shift
    #model = Sequential()
    #model.add(merge_layer)
    #model.add(BatchNormalization())
    #model.add(Dropout(0.25))
    #model.add(biLSTM_layer)
    #model.add(BatchNormalization())
    #model.add(Dropout(0.5))
    #model.add(Flatten())
    #model.add(dense_layer)
    #model.add(BatchNormalization())
    #model.add(Activation("relu"))
    #model.add(Dropout(0.2))
    #model.add(LR_classifier_layer)
    #model.add(BatchNormalization())
    #model.add(Activation("sigmoid"))

    enhancer_mlp = mlp_pooling(enhancer_input)
    promoter_mlp = mlp_pooling(promoter_input)
    #print(enhancer_mlp.shape)
    #print(promoter_mlp.shape)
    #assert(1==0)

    enhancer_mlp = Flatten()(enhancer_mlp)
    promoter_mlp = Flatten()(promoter_mlp)

    enhancer_mlp = enhancer_mlp_layer(enhancer_mlp)
    enhancer_mlp = BatchNormalization()(enhancer_mlp)
    enhancer_mlp = Activation("relu")(enhancer_mlp)
    enhancer_mlp = Dropout(0.2)(enhancer_mlp)

    promoter_mlp = promoter_mlp_layer(promoter_mlp)
    promoter_mlp = BatchNormalization()(promoter_mlp)
    promoter_mlp = Activation("relu")(promoter_mlp)
    promoter_mlp = Dropout(0.2)(promoter_mlp)

    mlp = concatenate([enhancer_mlp, promoter_mlp], axis=1)
    mlp = BatchNormalization()(mlp)

    yconv_contact_loss = concatenate([encoder, mlp], axis=1)
    pad = K.zeros_like(mlp, K.tf.float32)
    yconv_contact_pred = concatenate([encoder, pad], 1)
    pad2 = K.zeros_like(encoder, K.tf.float32)
    yconv_contact_H = concatenate([pad2, mlp], 1)

    y_conv_loss = LR_classifier_layer(yconv_contact_loss)
    y_conv_pred = LR_classifier_layer(yconv_contact_pred)
    y_conv_H = LR_classifier_layer(yconv_contact_H)

    #decoder = y_conv_loss - K.tf.matmul(K.tf.matmul(K.tf.matmul(y_conv_H, K.tf.linalg.inv(K.tf.matmul(y_conv_H, y_conv_H, transpose_a=True))),
    #y_conv_H, transpose_b=True), y_conv_loss)

    #decoder = Lambda(lambda y_conv_loss, y_conv_H:
    #y_conv_loss - K.tf.matmul(K.tf.matmul(K.tf.matmul(y_conv_H, K.tf.linalg.inv(K.tf.matmul(y_conv_H, y_conv_H, transpose_a=True))),y_conv_H, transpose_b=True), y_conv_loss))(y_conv_loss,y_conv_H)

    decoder = Lambda(lambda x: K.tf.matmul(K.tf.matmul(
        x, K.tf.linalg.inv(K.tf.matmul(x, x, transpose_a=True))),
                                           x,
                                           transpose_b=True))(y_conv_H)

    decoder = Lambda(lambda y: y - K.tf.matmul(decoder, y))(y_conv_loss)

    #decoder = Reshape(())(decoder)
    #print(decoder.shape)
    #assert(1==0)
    # Read in and initialize convolutional layers with motifs from JASPAR
    if use_JASPAR:
        util.initialize_with_JASPAR(enhancer_conv_layer, promoter_conv_layer)

    decoder = Activation('sigmoid')(decoder)

    #answer = model([enhancer_input, promoter_input])
    model = Model([enhancer_input, promoter_input], decoder)
    #model = Model([enhancer_input, promoter_input], ans) #test

    return model