示例#1
0
    def build_model(self, info, batch_size=4):
        profeat_dim = info.vector_modal_dim[info.vector_modal_name["profeat"]]
        dragon_dim = info.vector_modal_dim[info.vector_modal_name["dragon"]]
        labels = self.placeholders["labels"]
        mask = self.placeholders["mask"]
        dropout_rate = self.placeholders["dropout_rate"]
        profeat = self.placeholders["profeat"]
        dragon = self.placeholders["dragon"]
        mask_label = self.placeholders["mask_label"]
        is_train = self.placeholders["is_train"]
        enabled_node_nums = self.placeholders["enabled_node_nums"]

        ###
        ### Mol part
        ###
        layer = dragon
        layer = K.layers.Dense(1000)(layer)
        layer = K.layers.BatchNormalization()(layer)
        layer = tf.nn.relu(layer)

        layer = K.layers.Dense(500)(layer)
        layer = K.layers.BatchNormalization()(layer)
        layer = tf.nn.relu(layer)

        layer = K.layers.Dense(100)(layer)
        layer = K.layers.BatchNormalization()(layer)
        layer = tf.nn.relu(layer)

        graph_output_layer = layer
        graph_output_layer_dim = 100

        ###
        ### Sequence part
        ###
        layer = profeat
        layer = K.layers.Dense(2000)(layer)
        layer = K.layers.BatchNormalization()(layer)
        layer = tf.nn.relu(layer)

        layer = K.layers.Dense(1000)(layer)
        layer = K.layers.BatchNormalization()(layer)
        layer = tf.nn.relu(layer)

        layer = K.layers.Dense(100)(layer)
        layer = K.layers.BatchNormalization()(layer)
        layer = tf.nn.relu(layer)

        seq_output_layer = layer
        seq_output_layer_dim = 100

        # #region 不要
        # input_dim = graph_output_layer_dim
        # layer = graph_output_layer
        # #endregion

        ###
        ### Shared part
        ##
        # 32dim (Graph part)+ 32 dim (Sequence part)

        layer = tf.concat([seq_output_layer, graph_output_layer], axis=1)
        input_dim = seq_output_layer_dim + graph_output_layer_dim

        layer = K.layers.Dense(100)(layer)
        layer = K.layers.BatchNormalization()(layer)
        layer = tf.nn.relu(layer)

        layer = K.layers.Dense(info.label_dim)(layer)
        # # 最終出力を作成
        # # shape:[12×50×2]
        logits = mu.multitask_logits(layer, labels.shape[1])
        model = logits
        # # costの計算 各タスクのバッチ数平均 12
        task_losses =  mu.add_training_loss(logits = logits,label = labels,pos_weight = info.pos_weight,\
                                            batch_size= batch_size,n_tasks = labels.shape[1],mask = mask_label)
        total_loss = tf.reduce_sum(task_losses)  #全タスクのlossを合計

        ### multi-task loss
        cost_opt = task_losses
        each_cost = task_losses

        # 2値の確率予測:12×50×2
        prediction = mu.add_softmax(logits)
        prediction = tf.transpose(prediction, [1, 0, 2])
        metrics = {}
        cost_sum = total_loss
        # cost_sum = cost_opt
        metrics["each_cost"] = task_losses

        metrics["each_correct_count"] = [None] * labels.shape[1]
        for i in range(labels.shape[1]):
            equal_cnt = mask_label[:, i] * tf.cast(
                tf.equal(tf.cast(tf.argmax(prediction[:, i, :], 1), tf.int16),
                         tf.cast(labels[:, i], tf.int16)), tf.float32)

            each_correct_count = tf.cast(tf.reduce_sum(equal_cnt, axis=0),
                                         tf.float32)
            metrics["each_correct_count"][i] = each_correct_count

        # correct_count=0#mask*tf.cast(tf.reduce_all(tf.equal(tf.cast(tf.argmax(prediction,1),tf.int16), tf.cast(labels,tf.int16)),axis=1),tf.float32)
        metrics["correct_count"] = sum(
            [metrics["each_correct_count"][i] for i in range(labels.shape[1])])
        return model, prediction, cost_opt, cost_sum, metrics
示例#2
0
 def build_model(self, info, batch_size=4):
     adj_channel_num=info.adj_channel_num
     profeat_dim=info.vector_modal_dim[info.vector_modal_name["profeat"]]
     in_adjs=self.placeholders["adjs"]
     features=self.placeholders["features"]
     in_nodes=self.placeholders["nodes"]
     labels=self.placeholders["labels"]
     mask=self.placeholders["mask"]
     dropout_rate=self.placeholders["dropout_rate"]
     profeat = self.placeholders["profeat"]
     mask_label=self.placeholders["mask_label"]
     is_train=self.placeholders["is_train"] 
     enabled_node_nums=self.placeholders["enabled_node_nums"]
     wd_b=None
     wd_w=0.1
     
     ###
     ### Graph part
     ###
     layer=features
     input_dim=info.feature_dim
     layer=layers.GraphConv(100,adj_channel_num)(layer,adj=in_adjs)
     layer=layers.GraphBatchNormalization()(layer,
         max_node_num=info.graph_node_num,enabled_node_nums=enabled_node_nums)
     layer=tf.nn.relu(layer)
 
     layer=layers.GraphDense(100)(layer)
     layer=tf.nn.relu(layer)
     layer=layers.GraphGather()(layer)
     graph_output_layer=layer
     graph_output_layer_dim=100
    
     ###
     ### Sequence part
     ###
     with tf.variable_scope("seq_nn") as scope_part:
         layer=profeat
         layer=K.layers.Dense(100)(layer)
         layer=K.layers.BatchNormalization()(layer)
         layer=tf.nn.relu(layer)
     
         seq_output_layer=layer
         seq_output_layer_dim=100
  
     ###
     ### Shared part
     ###
     # 32dim (Graph part)+ 32 dim (Sequence part)
     layer=tf.concat([seq_output_layer,graph_output_layer],axis=1)
     input_dim=seq_output_layer_dim+graph_output_layer_dim
     
     with tf.variable_scope("shared_nn") as scope_part:
         layer=K.layers.Dense(52)(layer)
         layer=K.layers.BatchNormalization()(layer)
         layer=tf.nn.relu(layer)
 
         layer=K.layers.Dense(info.label_dim)(layer)
     # # 最終出力を作成
     # # shape:[12×50×2]
     logits = mu.multitask_logits(layer, labels.shape[1])
     model = logits
     # # costの計算 各タスクのバッチ数平均 12
     task_losses =  mu.add_training_loss(logits = logits,label = labels,pos_weight = info.pos_weight,\
                                         batch_size= batch_size,n_tasks = labels.shape[1],mask = mask_label)
     total_loss = tf.reduce_sum(task_losses)#全タスクのlossを合計
 
     ### multi-task loss
     cost_opt=task_losses
     each_cost = task_losses
 
     # 2値の確率予測:12×50×2
     prediction = mu.add_softmax(logits)
     prediction = tf.transpose(prediction,[1,0,2])
 
     metrics={}
     cost_sum= total_loss 
     # cost_sum = cost_opt
     metrics["each_cost"] = task_losses
 
     metrics["each_correct_count"] = [None]*labels.shape[1]
     for i in range(labels.shape[1]):
         equal_cnt=mask_label[:,i]*tf.cast(tf.equal(tf.cast(tf.argmax(prediction[:,i,:],1),tf.int16), tf.cast(labels[:,i],tf.int16)),tf.float32)
 
         each_correct_count=tf.cast(tf.reduce_sum(equal_cnt,axis=0),tf.float32)
         metrics["each_correct_count"][i] = each_correct_count
 
     # correct_count=0#mask*tf.cast(tf.reduce_all(tf.equal(tf.cast(tf.argmax(prediction,1),tf.int16), tf.cast(labels,tf.int16)),axis=1),tf.float32)
     # metrics["correct_count"]=tf.reduce_sum(correct_count)
     metrics["correct_count"]= sum([metrics["each_correct_count"][i] for i in range(labels.shape[1])])
     return model,prediction,cost_opt,cost_sum,metrics
示例#3
0
def build_model(placeholders, info, config, batch_size=4):
    dragon_dim = info.vector_modal_dim[info.vector_modal_name["dragon"]]
    sequences = placeholders["sequences"]
    sequences_len = placeholders["sequences_len"]
    labels = placeholders["labels"]
    mask = placeholders["mask"]
    dropout_rate = placeholders["dropout_rate"]
    dragon = placeholders["dragon"]
    mask_label = placeholders["mask_label"]
    is_train = placeholders["is_train"]
    enabled_node_nums = placeholders["enabled_node_nums"]
    ###
    ### Mol part
    ###
    layer = dragon
    layer = K.layers.Dense(1000)(layer)
    layer = K.layers.BatchNormalization()(layer)
    layer = tf.nn.relu(layer)

    graph_output_layer = layer
    graph_output_layer_dim = 1000

    ###
    ### Sequence part
    ###
    with tf.variable_scope("seq_nn") as scope_part:
        # Embedding
        embedding_dim = 25

        layer = K.layers.Embedding(info.sequence_symbol_num,
                                   embedding_dim)(sequences)
        # CNN + Pooling
        stride = 1
        layer = K.layers.Convolution1D(500,
                                       stride,
                                       padding="same",
                                       activation='relu')(layer)
        layer = K.layers.MaxPooling1D(stride)(layer)

        layer = K.layers.Flatten()(layer)
        layer = K.layers.Dense(500)(layer)
        layer = K.layers.BatchNormalization()(layer)
        layer = tf.nn.relu(layer)

        seq_output_layer = layer
        seq_output_layer_dim = 500
    ###
    ### Shared part
    ###
    # 32dim (Graph part)+ 32 dim (Sequence part)
    layer = tf.concat([seq_output_layer, graph_output_layer], axis=1)
    input_dim = seq_output_layer_dim + graph_output_layer_dim

    layer = K.layers.Dense(100)(layer)
    layer = K.layers.BatchNormalization()(layer)
    layer = tf.nn.relu(layer)

    layer = K.layers.Dense(info.label_dim)(layer)

    # # 最終出力を作成
    # # shape:[12×50×2]
    logits = mu.multitask_logits(layer, labels.shape[1])
    model = logits
    # # costの計算 各タスクのバッチ数平均 12
    task_losses =  mu.add_training_loss(logits = logits,label = labels,pos_weight = info.pos_weight,\
                                        batch_size= batch_size,n_tasks = labels.shape[1],mask = mask_label)
    total_loss = tf.reduce_sum(task_losses)  #全タスクのlossを合計

    ### multi-task loss
    cost_opt = task_losses
    each_cost = task_losses

    # 2値の確率予測:12×50×2
    prediction = mu.add_softmax(logits)
    prediction = tf.transpose(prediction, [1, 0, 2])

    metrics = {}
    cost_sum = total_loss
    # cost_sum = cost_opt
    metrics["each_cost"] = task_losses

    metrics["each_correct_count"] = [None] * labels.shape[1]
    for i in range(labels.shape[1]):
        equal_cnt = mask_label[:, i] * tf.cast(
            tf.equal(tf.cast(tf.argmax(prediction[:, i, :], 1), tf.int16),
                     tf.cast(labels[:, i], tf.int16)), tf.float32)

        each_correct_count = tf.cast(tf.reduce_sum(equal_cnt, axis=0),
                                     tf.float32)
        metrics["each_correct_count"][i] = each_correct_count

    # correct_count=0#mask*tf.cast(tf.reduce_all(tf.equal(tf.cast(tf.argmax(prediction,1),tf.int16), tf.cast(labels,tf.int16)),axis=1),tf.float32)
    metrics["correct_count"] = sum(
        [metrics["each_correct_count"][i] for i in range(labels.shape[1])])
    return model, prediction, cost_opt, cost_sum, metrics