예제 #1
0
def origin_model():
    sensor_matrix = Input(shape=(num_sensors, num_sensors))
    
    s_input1, extract_cnn1 = resnet_sensor_network(input_shape, repetitions=[2,2,2,2])
    #s_input2, extract_cnn2 = resnet_sensor_network(input_shape, repetitions=[2,2,2,2])
    #s_input3, extract_cnn3 = resnet_sensor_network(input_shape, repetitions=[2,2,2,2])
    #s_input4, extract_cnn4 = resnet_sensor_network(input_shape, repetitions=[2,2,2,2])
    #extract_cnn = Concatenate(axis=1)([extract_cnn1, extract_cnn2, extract_cnn3, extract_cnn4])
        
    #extract_cnn = np.reshape(extract_cnn, (-1,))
    G_1 = GraphConv(256, 'relu')([extract_cnn1, sensor_matrix])
    G_2 = GraphConv(256, 'relu')([G_1, sensor_matrix])
    #gnn_output = tf.split(G_2, num_sensors, 1)
    
    output1 = Dense(32, activation='relu')(Flatten()(G_2))
    output1 = Dense(1, activation='linear', name='sensor_1')(output1)
    
    #output2 = Dense(32, activation='relu')(Flatten()(gnn_output[1]))
    #output2 = Dense(1, activation='linear', name='sensor_2')(output2)
    
    #output3 = Dense(32, activation='relu')(Flatten()(gnn_output[2]))
    #output3 = Dense(1, activation='linear', name='sensor_3')(output3)                                                         
                                                             
    #output4 = Dense(32, activation='relu')(Flatten()(gnn_output[3]))
    #output4 = Dense(1, activation='linear', name='sensor_4')(output4)
    
    model = Model(inputs=[s_input1, sensor_matrix], 
                  outputs= [output1])
    return model
 def __init__(self, **kwargs):
     super().__init__(**kwargs)
     self.conv1 = GraphConv(32, activation='elu', kernel_regularizer=l2(l2_reg))
     self.conv2 = GraphConv(32, activation='elu', kernel_regularizer=l2(l2_reg))
     self.flatten = Flatten()
     self.fc1 = Dense(512, activation='relu')
     self.fc2 = Dense(n_out, activation='softmax')
예제 #3
0
def make_embedding(CV, MODEL, DATA, EMBED):
    DATA_FOLD = DATA + f"/FOLD-{CV}"
    if not os.path.exists(EMBED):
        os.mkdir(EMBED)

    graph, features, labels = load_dataset(DATA, DATA_FOLD)
    fltr = GraphConv.preprocess(graph).astype('f4')
    fltr = ops.sp_matrix_to_sp_tensor(fltr)

    X_in = Input((features.shape[1], ))
    fltr_in = Input((features.shape[0], ), sparse=True)
    X_1 = GraphConv(512, 'relu', True,
                    kernel_regularizer=l2(5e-4))([X_in, fltr_in])
    X_1 = Dropout(0.5)(X_1)
    X_2 = GraphConv(256, 'relu', True,
                    kernel_regularizer=l2(5e-4))([X_1, fltr_in])
    X_2 = Dropout(0.5)(X_2)
    X_3 = GraphConv(128, 'relu', True,
                    kernel_regularizer=l2(5e-4))([X_2, fltr_in])
    X_3 = Dropout(0.5)(X_3)
    X_4 = GraphConv(64, 'linear', True,
                    kernel_regularizer=l2(5e-4))([X_3, fltr_in])
    X_5 = Dense(labels.shape[1], use_bias=True)(X_4)

    loaded_model = load_model(f"{MODEL}")
    model_without_task = Model(inputs=[X_in, fltr_in], outputs=X_4)
    model_without_task.set_weights(loaded_model.get_weights()[:8])

    final_node_representations = model_without_task([features, fltr],
                                                    training=False)
    save_embedding(final_node_representations, EMBED, DATA_FOLD, CV)
예제 #4
0
    def build_model(self, N, F, num_outputs):
        X_in = Input(shape=(N, F), name='X_in')
        A_in = Input(shape=(N, N), name='A_in')
        RL_indice = Input(shape=(N), name='rl_indice_in')

        ### Encoder
        x = Dense(32, activation='relu', name='encoder_1')(X_in)
        x = Dense(32, activation='relu', name='encoder_2')(x)

        ### Graphic convolution

        x = GraphConv(32, activation='relu', name='gcn1')([x, A_in])
        # x = GraphConv(32, activation='relu',name='gcn2')([x, A_in])

        ### Policy network
        x1 = Dense(32, activation='relu', name='policy_1')(x)
        x1 = GraphConv(32, activation='relu', name='gcn2')([x1, A_in])
        x1 = Dense(32, activation='relu', name='policy_add')(x1)
        x2 = Dense(16, activation='relu', name='policy_2')(x1)

        ###  Action and filter
        x3 = Dense(num_outputs, activation='linear', name='policy_3')(x2)
        filt = Reshape((N, 1), name='expend_dim')(RL_indice)
        qout = Multiply(name='filter')([x3, filt])

        model = Model(inputs=[X_in, A_in, RL_indice], outputs=[qout])
        # print(model.summary())
        return model
예제 #5
0
def robot_khop_model():  # input/output = num of sensors
    num_sensors = 9
    input_shape = (84, 84 * 4, 3)
    sensor_matrix1 = Input(shape=(num_sensors + 1, num_sensors + 1))
    sensor_matrix2 = Input(shape=(num_sensors + 1, num_sensors + 1))
    #sensor_matrix3 = Input(shape=(num_sensors, num_sensors))
    r_input = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input1 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input2 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input3 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input4 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input5 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input6 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input7 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input8 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input9 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))

    s_cnn = sensor_cnn(input_shape, repetitions=[2, 2, 2, 2])
    robot_cnn = s_cnn(r_input)
    extract_cnn1 = s_cnn(s_input1)
    extract_cnn2 = s_cnn(s_input2)
    extract_cnn3 = s_cnn(s_input3)
    extract_cnn4 = s_cnn(s_input4)
    extract_cnn5 = s_cnn(s_input5)
    extract_cnn6 = s_cnn(s_input6)
    extract_cnn7 = s_cnn(s_input7)
    extract_cnn8 = s_cnn(s_input8)
    extract_cnn9 = s_cnn(s_input9)

    extract_cnn = Concatenate(axis=1)([
        extract_cnn1, extract_cnn2, extract_cnn3, extract_cnn4, extract_cnn5,
        extract_cnn6, extract_cnn7, extract_cnn8, extract_cnn9, robot_cnn
    ])

    #extract_cnn = np.reshape(extract_cnn, (-1,))
    G_h1 = GraphConv(256, 'relu')([extract_cnn, sensor_matrix1])
    G_h2 = GraphConv(256, 'relu')([extract_cnn, sensor_matrix2])
    G_1 = Concatenate(axis=-1)([G_h1, G_h2])

    G_2h1 = GraphConv(256, 'relu')([G_1, sensor_matrix1])
    G_2h2 = GraphConv(256, 'relu')([G_1, sensor_matrix2])
    G_2 = Concatenate(axis=-1)([G_2h1, G_2h2])

    gnn_output = tf.split(G_2, num_sensors + 1, 1)

    r_output = Dense(64, activation='relu',
                     name='policy_mlp')(Flatten()(gnn_output[-1]))
    output1 = Dense(2, activation='linear', name='robot_loss')(r_output)

    model = Model(inputs=[
        s_input1, s_input2, s_input3, s_input4, s_input5, s_input6, s_input7,
        s_input8, s_input9, r_input, sensor_matrix1, sensor_matrix2
    ],
                  outputs=[output1])
    return model
예제 #6
0
 def __init__(self):
     super(MyModel, self).__init__()
     self.dp1 = Dropout(dropout)
     self.gcn1 = GraphConv(channels,
                      activation='relu',
                      kernel_regularizer=l2(l2_reg),
                      use_bias=False)
     self.dp2 = Dropout(dropout)
     self.gcn2 = GraphConv(n_classes,
                      activation='softmax',
                      use_bias=False)
예제 #7
0
 def create_model(self):
     X_in = Input((self.features.shape[1],))
     fltr_in = Input((self.features.shape[0],), sparse=True)
     X_1 = GraphConv(512, 'relu', True, kernel_regularizer=l2(5e-4))([X_in, fltr_in])
     X_1 = Dropout(0.5)(X_1)
     X_2 = GraphConv(256, 'relu', True, kernel_regularizer=l2(5e-4))([X_1, fltr_in])
     X_2 = Dropout(0.5)(X_2)
     X_3 = GraphConv(128, 'relu', True, kernel_regularizer=l2(5e-4))([X_2, fltr_in])
     X_3 = Dropout(0.5)(X_3)
     X_4 = GraphConv(64, 'linear', True, kernel_regularizer=l2(5e-4))([X_3, fltr_in])
     X_5 = Dense(GCN.labels.shape[1], use_bias=True)(X_4)
     return Model(inputs=[X_in, fltr_in], outputs=X_5)
예제 #8
0
def Model_treeGCN_softmax_1(node_count,
                            wordvocabsize,
                            w2v_k,
                            word_W,
                            l2_reg=5e-4):
    X_word_in = Input(shape=(node_count, ), dtype='int32')
    # fltr_in = Input(shape=(node_count, node_count), sparse=True)
    fltr_in = Input(tensor=sp_matrix_to_sp_tensor(fltr))

    word_embedding_layer = Embedding(input_dim=wordvocabsize + 1,
                                     output_dim=w2v_k,
                                     input_length=node_count,
                                     mask_zero=True,
                                     trainable=True,
                                     weights=[word_W])
    word_embedding_x = word_embedding_layer(X_word_in)
    word_embedding_x = Dropout(0.25)(word_embedding_x)

    graph_conv_1 = GraphConv(200,
                             activation='relu',
                             kernel_regularizer=l2(l2_reg),
                             use_bias=True)([word_embedding_x, fltr_in])
    dropout_1 = Dropout(0.5)(graph_conv_1)
    graph_conv_2 = GraphConv(200,
                             activation='relu',
                             kernel_regularizer=l2(l2_reg),
                             use_bias=True)([dropout_1, fltr_in])
    dropout_2 = Dropout(0.5)(graph_conv_2)

    feature_node0 = Lambda(lambda x: x[:, 0])(dropout_2)

    # pool = GlobalAttentionPool(200)(dropout_2)

    flatten = Flatten()(dropout_2)
    fc = Dense(512, activation='relu')(flatten)
    fc = Dropout(0.5)(fc)

    # LSTM_backward = LSTM(200, activation='tanh', return_sequences=False,
    #                      go_backwards=True, dropout=0.5)(dropout_2)

    # present_node0 = concatenate([feature_node0, LSTM_backward], axis=-1)
    class_output = Dense(120)(fc)
    class_output = Activation('softmax', name='CLASS')(class_output)

    # Build model
    model = Model(inputs=[X_word_in, fltr_in], outputs=class_output)
    optimizer = Adam(lr=0.001)
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  weighted_metrics=['acc'])
    return model
예제 #9
0
    def conv_layer(row, col, x):
        conv = tf.keras.layers.Conv1D(hidden_dim * 2,
                                      5,
                                      padding='same',
                                      activation='tanh',
                                      input_shape=(row, col))(x)

        gcn_1 = GraphConv(
            graph_channels,
            activation='tanh',
        )([conv, As_in[:, :, :, 1]])

        gcn_2 = ChebConv(
            graph_channels,
            activation='tanh',
        )([conv, As_in[:, :, :, 1]])

        gcn_3 = ARMAConv(
            graph_channels,
            activation='tanh',
        )([conv, As_in[:, :, :, 1]])

        gcn_1 = tf.keras.layers.Concatenate()([gcn_1, gcn_2, gcn_3])
        gcn_1 = tf.keras.layers.Conv1D(3 * graph_channels,
                                       5,
                                       padding='same',
                                       activation='tanh',
                                       input_shape=(row, col))(gcn_1)

        conv = tf.keras.layers.Concatenate()([x, conv, gcn_1, gcn_2, gcn_3])
        conv = tf.keras.layers.Activation("relu")(conv)
        conv = tf.keras.layers.SpatialDropout1D(0.1)(conv)

        return conv
예제 #10
0
    def build_model(self, N, F, num_outputs):

        X_in = Input(shape=(N, F), name='X_in')
        A_in = Input(shape=(N, N), name='A_in')
        RL_indice = Input(shape=(N), name='rl_indice_in')

        ### Encoder
        x = Dense(32, activation='relu', name='encoder_1')(X_in)
        x = Dense(32, activation='relu', name='encoder_2')(x)

        ### Graphic convolution

        x1 = GraphConv(32, activation='relu', name='gcn1')([x, A_in])
        x1 = Dense(32, activation='relu', name='post_gcn_1')(x1)

        # x2 = GraphConv(32, activation='relu',name='gcn2')([x1, A_in])
        # x2 = Dense(32,activation='relu',name='post_gcn_2')(x2)

        ###  Action and filter
        x3 = Concatenate()([x, x1])
        x3 = Dense(64, activation='relu', name='policy_1')(x3)
        x3 = Dense(32, activation='relu', name='policy_2')(x3)

        x3 = Dense(num_outputs, activation='linear', name='policy_output')(x3)
        mask = Reshape((N, 1), name='expend_dim')(RL_indice)
        qout = Multiply(name='filter')([x3, mask])

        model = Model(inputs=[X_in, A_in, RL_indice], outputs=[qout])
        print(model.summary())
        return model
예제 #11
0
def pose_model():
    sensor_matrix = Input(shape=(num_sensors, num_sensors))
    
    s_input1, extract_cnn1 = resnet_sensor_network(input_shape, repetitions=[2,2,2,2])
    s_input2, extract_cnn2 = resnet_sensor_network(input_shape, repetitions=[2,2,2,2])
    s_input3, extract_cnn3 = resnet_sensor_network(input_shape, repetitions=[2,2,2,2])
    s_input4, extract_cnn4 = resnet_sensor_network(input_shape, repetitions=[2,2,2,2])
    
    pose_input1 = Input(shape=(2))  
    s_pose1 = Dense(64, activation='relu')(pose_input1)
    pose_input2 = Input(shape=(2)) 
    s_pose2 = Dense(64, activation='relu')(pose_input2)
    pose_input3 = Input(shape=(2)) 
    s_pose3 = Dense(64, activation='relu')(pose_input3)
    pose_input4 = Input(shape=(2))
    s_pose4 = Dense(64, activation='relu')(pose_input4)
    
    extract_cnn1 = Concatenate(axis=-1)([extract_cnn1, Reshape((1,s_pose1.shape[-1]))(s_pose1)])
    extract_cnn2 = Concatenate(axis=-1)([extract_cnn2, Reshape((1,s_pose2.shape[-1]))(s_pose2)])
    extract_cnn3 = Concatenate(axis=-1)([extract_cnn3, Reshape((1,s_pose3.shape[-1]))(s_pose3)])
    extract_cnn4 = Concatenate(axis=-1)([extract_cnn4, Reshape((1,s_pose4.shape[-1]))(s_pose4)])
    extract_cnn = Concatenate(axis=1)([extract_cnn1, extract_cnn2, extract_cnn3, extract_cnn4])
      
    #extract_cnn = np.reshape(extract_cnn, (-1,))
    G_1 = GraphConv(256, 'relu')([extract_cnn, sensor_matrix])
    G_2 = GraphConv(256, 'relu')([G_1, sensor_matrix])
    gnn_output = tf.split(G_2, num_sensors, 1)
    
    output1 = Dense(32, activation='relu')(Flatten()(gnn_output[0]))
    output1 = Dense(1, activation='linear', name='sensor_1')(output1)
    
    output2 = Dense(32, activation='relu')(Flatten()(gnn_output[1]))
    output2 = Dense(1, activation='linear', name='sensor_2')(output2)
    
    output3 = Dense(32, activation='relu')(Flatten()(gnn_output[2]))
    output3 = Dense(1, activation='linear', name='sensor_3')(output3)                                                         
                                                             
    output4 = Dense(32, activation='relu')(Flatten()(gnn_output[3]))
    output4 = Dense(1, activation='linear', name='sensor_4')(output4)
    
    model = Model(inputs=[s_input1, s_input2, s_input3, s_input4, 
                          s_pose1, s_pose2, s_pose3, s_pose4,
                          sensor_matrix],  
                  outputs= [output1,output2,output3,output4])
    return model
예제 #12
0
def make_discriminator(name, s, adj, node_f, use_gcn=True, use_gru=True):
    n = node_f.shape[0]  # number of nodes
    input_s = Input(shape=(s, n))
    input_f = Input(shape=(n, node_f.shape[1]))
    input_g = Input(shape=(n, n))
    if use_gcn:
        gcov1 = GraphConv(2 * base)([input_f, input_g])
        # gcov2 = GraphConv(base)([gcov1, input_g])
        input_s1 = Dot(axes=(2, 1))(
            [input_s, gcov1])  # dot product: element by element multiply
    else:
        input_s1 = input_s
    fc1 = Dense(4 * base, activation='relu', input_shape=(n, ))(input_s1)
    fc2 = Dense(8 * base, activation='relu', input_shape=(n, ))(fc1)
    # S*D2

    if use_gru:
        rnn1 = Dropout(dropout)(CuDNNGRU(2 * base, return_sequences=True)(fc2))
    else:
        rnn1 = fc2
    fc3 = Dense(16 * base, activation='relu', input_shape=(n, ))(rnn1)
    out = Dense(1)(Flatten()(fc3))
    return Model(name=name, inputs=[input_s, input_f, input_g], outputs=out)
예제 #13
0
def khop_model_distribute(num_sensors=10):  # input/output = num of sensors
    gnn_unit = 128
    sensor_matrix1 = Input(shape=(num_sensors, num_sensors))
    sensor_matrix2 = Input(shape=(num_sensors, num_sensors))
    sensor_matrix3 = Input(shape=(num_sensors, num_sensors))
    sensor_matrix4 = Input(shape=(num_sensors, num_sensors))
    #sensor_matrix3 = Input(shape=(num_sensors, num_sensors))
    s_input1 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input2 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input3 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input4 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input5 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input6 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input7 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input8 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input9 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input10 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))

    s_cnn = sensor_cnn(input_shape, repetitions=[2, 2, 2, 2])
    extract_cnn1 = s_cnn(s_input1)
    extract_cnn2 = s_cnn(s_input2)
    extract_cnn3 = s_cnn(s_input3)
    extract_cnn4 = s_cnn(s_input4)
    extract_cnn5 = s_cnn(s_input5)
    extract_cnn6 = s_cnn(s_input6)
    extract_cnn7 = s_cnn(s_input7)
    extract_cnn8 = s_cnn(s_input8)
    extract_cnn9 = s_cnn(s_input9)
    extract_cnn10 = s_cnn(s_input10)

    extract_cnn = Concatenate(axis=1)([
        extract_cnn1, extract_cnn2, extract_cnn3, extract_cnn4, extract_cnn5,
        extract_cnn6, extract_cnn7, extract_cnn8, extract_cnn9, extract_cnn10
    ])

    #extract_cnn = np.reshape(extract_cnn, (-1,))
    G_h1 = GraphConv(gnn_unit, 'selu')([extract_cnn, sensor_matrix1])
    G_h2 = GraphConv(gnn_unit, 'selu')([extract_cnn, sensor_matrix2])
    G_h3 = GraphConv(gnn_unit, 'selu')([extract_cnn, sensor_matrix3])
    G_h4 = GraphConv(gnn_unit, 'selu')([extract_cnn, sensor_matrix4])
    G_1 = Concatenate(axis=-1)([G_h1, G_h2, G_h3, G_h4])

    G_2h1 = GraphConv(gnn_unit, 'selu')([G_1, sensor_matrix1])
    G_2h2 = GraphConv(gnn_unit, 'selu')([G_1, sensor_matrix2])
    G_2h3 = GraphConv(gnn_unit, 'selu')([G_1, sensor_matrix3])
    G_2h4 = GraphConv(gnn_unit, 'selu')([G_1, sensor_matrix4])
    G_2 = Concatenate(axis=-1)([G_2h1, G_2h2, G_2h3, G_2h4])

    #G_3h1 = GraphConv(gnn_unit, 'selu')([G_2, sensor_matrix1])
    #G_3h2 = GraphConv(gnn_unit, 'selu')([G_2, sensor_matrix2])
    #G_3h3 = GraphConv(gnn_unit, 'selu')([G_2, sensor_matrix3])
    #G_3h4 = GraphConv(gnn_unit, 'selu')([G_2, sensor_matrix4])
    #G_3 = Concatenate(axis=-1)([G_3h1, G_3h2, G_3h3, G_3h4])

    #G_4h1 = GraphConv(gnn_unit, 'selu')([G_3, sensor_matrix1])
    #G_4h2 = GraphConv(gnn_unit, 'selu')([G_3, sensor_matrix2])
    #G_4h3 = GraphConv(gnn_unit, 'selu')([G_3, sensor_matrix3])
    #G_4h4 = GraphConv(gnn_unit, 'selu')([G_3, sensor_matrix4])
    #G_4 = Concatenate(axis=-1)([G_4h1, G_4h2, G_4h3, G_4h4])

    gnn_output = tf.split(G_2, num_sensors, 1)

    mlp_layer = mlp_model()

    output1 = mlp_layer(Flatten()(gnn_output[0]))
    output2 = mlp_layer(Flatten()(gnn_output[1]))
    output3 = mlp_layer(Flatten()(gnn_output[2]))
    output4 = mlp_layer(Flatten()(gnn_output[3]))
    output5 = mlp_layer(Flatten()(gnn_output[4]))
    output6 = mlp_layer(Flatten()(gnn_output[5]))
    output7 = mlp_layer(Flatten()(gnn_output[6]))
    output8 = mlp_layer(Flatten()(gnn_output[7]))
    output9 = mlp_layer(Flatten()(gnn_output[8]))
    output10 = mlp_layer(Flatten()(gnn_output[9]))

    model = Model(inputs=[
        s_input1, s_input2, s_input3, s_input4, s_input5, s_input6, s_input7,
        s_input8, s_input9, s_input10, sensor_matrix1, sensor_matrix2,
        sensor_matrix3, sensor_matrix4
    ],
                  outputs=[
                      output1, output2, output3, output4, output5, output6,
                      output7, output8, output9, output10
                  ])
    return model
예제 #14
0
# Parameters
channels = 256
learning_rate = 1e-2
epochs = 200
es_patience = 200
F = X.shape[1]
n_classes = y.shape[1]

# Preprocessing operations
fltr = GraphConv.preprocess(A).astype('f4')

# Model definition
X_in = Input(shape=(F, ))
fltr_in = Input((N, ), sparse=True)
graph_conv_1 = GraphConv(channels, activation='relu')([X_in, fltr_in])
graph_conv_2 = GraphConv(channels, activation='relu')([graph_conv_1, fltr_in])
graph_conv_3 = GraphConv(n_classes)([graph_conv_2, fltr_in])

# Build model
model = Model(inputs=[X_in, fltr_in], outputs=graph_conv_3)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss=BinaryCrossentropy(from_logits=True))
model.summary()

# Train model
for i in tqdm(range(1, 1 + epochs)):
    tr_loss = model.train_on_batch([X, fltr], y, sample_weight=tr_mask)
    tr_auc, va_auc, te_auc = evaluate(X, fltr, y, model, masks, evaluator)
    tqdm.write(
        'Ep. {} - Loss: {:.3f} - AUC: {:.3f} - Val AUC: {:.3f} - Test AUC: {:.3f}'
예제 #15
0
def main(time_train,
         epochs,
         C_i,
         C_o,
         learning_rate,
         kernel,
         regenerate=False):
    os.chdir(os.path.split(os.path.dirname(os.path.realpath(__file__)))[0])

    if (not os.path.exists('/media/data6TB/spandan/data.p')) or regenerate:
        from data_utils import load_data
        X, A, E = load_data(
            DATASET='data-all.json', R=300, SIGMA=1
        )  # Shapes: (171, 640, 3) (171, 640, 640) (171, 640, 640, 1)
        with open('/media/data6TB/spandan/data.p', 'wb') as pkl:
            pickle.dump((X, A, E), pkl)

    else:
        with open('/media/data6TB/spandan/data.p', 'rb') as pkl:
            X, A, E = pickle.load(pkl)

    districts = A.shape[1]

    # Inputs
    X_in = Input(shape=(districts, C_i), batch_size=time_train)
    E_in = Input(shape=(districts, districts, 1), batch_size=time_train)
    A_in = Input(shape=(districts, districts), batch_size=time_train)

    # Block
    X_i0 = tf.transpose(tf.expand_dims(X_in, axis=0), perm=[0, 2, 1, 3])
    l1 = GLU(filters=2 * C_o, kernelsize=kernel)(X_i0)
    X_i1 = tf.squeeze(tf.transpose(l1, perm=[0, 2, 1, 3]))
    E_i1 = E_in[:X_i1.shape[0], :, :, :]
    A_i1 = A_in[:X_i1.shape[0], :, :]
    l1 = GraphConv(channels=C_i, activation='relu')([X_i1, A_i1, E_i1])
    l1 = tf.expand_dims(tf.transpose(l1, perm=[1, 0, 2]), axis=0)
    l1 = GLU(filters=2 * C_o, kernelsize=kernel)(l1)

    # Block
    l2 = GLU(filters=2 * C_o, kernelsize=kernel)(l1)
    X_i2 = tf.squeeze(tf.transpose(l2, perm=[0, 2, 1, 3]))
    E_i2 = E_in[:X_i2.shape[0], :, :, :]
    A_i2 = A_in[:X_i2.shape[0], :, :]
    l2 = GraphConv(channels=C_i, activation='relu')([X_i2, A_i2, E_i2])
    l2 = tf.expand_dims(tf.transpose(l2, perm=[1, 0, 2]), axis=0)
    l2 = GLU(filters=2 * C_o, kernelsize=kernel)(l2)

    # Output layer
    l3 = GLU(filters=2 * C_i, kernelsize=(time_train - 4 * (kernel - 1)))(l2)
    X_i3 = tf.squeeze(tf.transpose(l3, perm=[0, 2, 1, 3]))
    final_output = nstack(Dense(C_i)(X_i3), time_train)

    model = Model(inputs=[X_in, E_in, A_in], outputs=final_output)
    optimizer = RMSprop(learning_rate=learning_rate)
    model.compile(optimizer=optimizer,
                  loss='mean_squared_error',
                  weighted_metrics=['acc'])
    model.summary()

    X_input = X[:time_train, :, :]
    E_input = E[:time_train, :, :, :]
    A_input = localpooling_filter((A[:time_train, :, :]).numpy(),
                                  symmetric=True)
    output = nstack(tf.squeeze(X[time_train, :, :]), time_train)

    model.fit([X_input, E_input, A_input],
              output,
              shuffle=False,
              epochs=epochs)
from spektral.layers import GraphConv
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dropout
from tensorflow.keras.regularizers import l2

dropout = 0.5
channels = 16
l2_reg = 5e-4 / 2

# Preprocessing operations
fltr = GraphConv.preprocess(A).astype('f4')
X = X.toarray()

X_in = Input(shape=(F, ))
fltr_in = Input((N, ), sparse=True)

dropout_1 = Dropout(dropout)(X_in)
graph_conv_1 = GraphConv(channels,
                         activation='relu',
                         kernel_regularizer=l2(l2_reg),
                         use_bias=False)
# dropout_2 = Dropout(dropout)(graph_conv_1)
# graph_conv_2 = GraphConv(n_classes,
#                          activation='softmax',
#                          use_bias=False)([dropout_2, fltr_in])
# Build model
# model = Model(inputs=[X_in, fltr_in], outputs=graph_conv_2)

print(graph_conv_1([X, fltr]))
# print(output)
예제 #17
0
    def _model_builder_gcn(self):
        gc1_channels = 32
        gc2_channels = 64
        full_latent_space = len(self._radius) * self.latent_space

        # Input
        fltr_in = Input(shape=(self.N, self.N), name='fltr_in')
        nf_in = Input(shape=(self.N, self.F), name='nf_in')
        z_in = Input(shape=(full_latent_space, ), name='z_in')

        # Encoder
        gc1 = GraphConv(gc1_channels,
                        kernel_regularizer=l2(self.l2_reg),
                        name='gc1')([nf_in, fltr_in])
        bn1 = BatchNormalization()(gc1)
        relu1 = Activation('relu')(bn1)
        do1 = Dropout(self.dropout_rate)(relu1)

        gc2 = GraphConv(gc2_channels,
                        kernel_regularizer=l2(self.l2_reg),
                        name='gc2')([do1, fltr_in])
        bn2 = BatchNormalization()(gc2)
        relu2 = Activation('relu')(bn2)
        do2 = Dropout(self.dropout_rate)(relu2)

        pool = GlobalAttentionPool(128, name='attn_pool')(do2)

        z_enc_list = []
        z_clip_list = []
        for _r in self._radius:
            z_1 = Dense(128, activation='relu')(pool)
            z_2 = Dense(self.latent_space, activation='linear')(z_1)
            z_3 = CCMProjection(_r)(z_2)
            z_enc_list.append(z_2)
            z_clip_list.append(z_3)

        if len(self._radius) > 1:
            z_enc = Concatenate(name='z_enc')(z_enc_list)
            z_clip = Concatenate(name='z_clip')(z_clip_list)
        else:
            z_enc = z_enc_list[0]
            z_clip = z_clip_list[0]

        # Decoder
        dense3 = Dense(128)(z_in)
        bn3 = BatchNormalization()(dense3)
        relu3 = Activation('relu')(bn3)

        dense4 = Dense(256)(relu3)
        bn4 = BatchNormalization()(dense4)
        relu4 = Activation('relu')(bn4)

        dense5 = Dense(512)(relu4)
        bn5 = BatchNormalization()(dense5)
        relu5 = Activation('relu')(bn5)

        # Output
        adj_out_pre = Dense(self.N * self.N, activation='sigmoid')(relu5)
        adj_out = Reshape((self.N, self.N), name='adj_out')(adj_out_pre)

        nf_out_pre = Dense(self.N * self.F, activation='linear')(relu5)
        nf_out = Reshape((self.N, self.F), name='nf_out')(nf_out_pre)

        # Build models
        encoder = Model(inputs=[fltr_in, nf_in], outputs=z_enc)
        clipper = Model(inputs=[fltr_in, nf_in], outputs=z_clip)
        decoder = Model(inputs=z_in, outputs=[adj_out, nf_out])
        model = Model(inputs=[fltr_in, nf_in], outputs=decoder(clipper.output))
        model.output_names = ['adj', 'nf']

        return model, encoder, decoder, clipper
예제 #18
0
A_in = Input(batch_shape=(None, None), sparse=True)
I_in = Input(batch_shape=(None, 1), dtype='int64')

# The inputs will have an arbitrary dimension, while the targets consist of
# batch_size values.
# However, Keras expects the inputs to have the same dimension as the output.
# This is a hack in Tensorflow to bypass the requirements of Keras.
# We use a dynamically initialized tf.Dataset to feed the target values to the
# model at training time.
target_ph = tf.placeholder(tf.float32, shape=(None, 1))
target_data = tf.data.Dataset.from_tensor_slices(target_ph)
target_data = target_data.batch(batch_size)
target_iter = target_data.make_initializable_iterator()
target = target_iter.get_next()

gc1 = GraphConv(64, activation='relu')([X_in, A_in])
gc2 = GraphConv(64, activation='relu')([gc1, A_in])
pool = GlobalAvgPool()([gc2, I_in])
dense1 = Dense(64, activation='relu')(pool)
output = Dense(n_out)(dense1)

# Build model
model = Model(inputs=[X_in, A_in, I_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss='mse', target_tensors=target)
model.summary()

# Training setup
sess = K.get_session()
batches_train = batch_iterator([A_train, X_train, y_train], batch_size=batch_size, epochs=epochs)
loss = 0
예제 #19
0
X_train, y_train, X_val, y_val, X_test, y_test, adj = mnist.load_data()
X_train, X_val, X_test = X_train[..., None], X_val[..., None], X_test[..., None]
N = X_train.shape[-2]      # Number of nodes in the graphs
F = X_train.shape[-1]      # Node features dimensionality
n_out = y_train.shape[-1]  # Dimension of the target

fltr = normalized_laplacian(adj)

# Model definition
X_in = Input(shape=(N, F))
# Pass A as a fixed tensor, otherwise Keras will complain about inputs of
# different rank.
A_in = Input(tensor=sp_matrix_to_sp_tensor(fltr))

graph_conv = GraphConv(32,
                       activation='elu',
                       kernel_regularizer=l2(l2_reg),
                       use_bias=True)([X_in, A_in])
graph_conv = GraphConv(32,
                       activation='elu',
                       kernel_regularizer=l2(l2_reg),
                       use_bias=True)([graph_conv, A_in])
flatten = Flatten()(graph_conv)
fc = Dense(512, activation='relu')(flatten)
output = Dense(n_out, activation='softmax')(fc)

# Build model
model = Model(inputs=[X_in, A_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
              loss='sparse_categorical_crossentropy',
              metrics=['acc'])
예제 #20
0
X, A, _, y = ogb.dataset_to_numpy(dataset, dtype='f8')
A = [a.toarray() for a in A]
F = X[0].shape[-1]
X = pad_jagged_array(X, (N, F))
A = pad_jagged_array(A, (N, N))
X_tr, A_tr, y_tr = X[tr_idx], A[tr_idx], y[tr_idx]
X_va, A_va, y_va = X[va_idx], A[va_idx], y[va_idx]
X_te, A_te, y_te = X[te_idx], A[te_idx], y[te_idx]

################################################################################
# BUILD MODEL
################################################################################
X_in = Input(shape=(N, F))
A_in = Input(shape=(N, N))

X_1 = GraphConv(32, activation='relu')([X_in, A_in])
X_1, A_1 = MinCutPool(N // 2)([X_1, A_in])
X_2 = GraphConv(32, activation='relu')([X_1, A_1])
X_3 = GlobalSumPool()(X_2)
output = Dense(n_out)(X_3)

# Build model
model = Model(inputs=[X_in, A_in], outputs=output)
opt = Adam(lr=learning_rate)
model.compile(optimizer=opt, loss='mse')
model.summary()

################################################################################
# FIT MODEL
################################################################################
model.fit([X_tr, A_tr],
예제 #21
0
# Preprocessing operations
fltr = localpooling_filter(A).astype('f4')
X = X.toarray()
Ai = A.toarray()
fltr1 = fltr.toarray()

# Pre-compute propagation
for i in range(K - 1):
    fltr = fltr.dot(fltr)
fltr.sort_indices()

# Model definition
X_in = Input(shape=(F, ))
fltr_in = Input((N, ), sparse=True)
output = GraphConv(n_classes,
                   activation='softmax',
                   kernel_regularizer=l2(l2_reg),
                   use_bias=False)([X_in, fltr_in])

# Build model
model = Model(inputs=[X_in, fltr_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
              loss='categorical_crossentropy',
              weighted_metrics=['acc'])
model.summary()

# Train model
validation_data = ([X, fltr], y, val_mask)
model.fit(
    [X, fltr],
    y,
예제 #22
0
from tensorflow.keras.regularizers import l2

from spektral.datasets import citation
from spektral.layers import GraphConv, ops
from spektral.utils import tic, toc

# Load data
A, X, y, train_mask, val_mask, test_mask = citation.load_data('cora')
fltr = GraphConv.preprocess(A).astype('f4')
fltr = ops.sp_matrix_to_sp_tensor(fltr)
X = X.toarray()

# Define model
X_in = Input(shape=(X.shape[1],))
fltr_in = Input((X.shape[0],), sparse=True)
X_1 = GraphConv(16, 'relu', True, kernel_regularizer=l2(5e-4))([X_in, fltr_in])
X_1 = Dropout(0.5)(X_1)
X_2 = GraphConv(y.shape[1], 'softmax', True)([X_1, fltr_in])

# Build model
model = Model(inputs=[X_in, fltr_in], outputs=X_2)
optimizer = Adam(lr=1e-2)
model.compile(optimizer=optimizer, loss='categorical_crossentropy')
loss_fn = model.loss_functions[0]


# Training step
@tf.function
def train():
    with tf.GradientTape() as tape:
        predictions = model([X, fltr], training=True)
예제 #23
0
def khop_model_share(): # input/output = num of sensors 
    sensor_matrix1 = Input(shape=(num_sensors, num_sensors))
    sensor_matrix2 = Input(shape=(num_sensors, num_sensors))
    #sensor_matrix3 = Input(shape=(num_sensors, num_sensors))
    s_input1 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input2 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input3 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input4 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input5 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input6 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input7 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input8 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input9 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    
    s_cnn = sensor_cnn(input_shape, repetitions = [2,2,2,2])
    extract_cnn1 = s_cnn(s_input1)
    extract_cnn2 = s_cnn(s_input2)
    extract_cnn3 = s_cnn(s_input3)
    extract_cnn4 = s_cnn(s_input4)
    extract_cnn5 = s_cnn(s_input5)
    extract_cnn6 = s_cnn(s_input6)
    extract_cnn7 = s_cnn(s_input7)
    extract_cnn8 = s_cnn(s_input8)
    extract_cnn9 = s_cnn(s_input9)
    
    extract_cnn = Concatenate(axis=1)([extract_cnn1, extract_cnn2, extract_cnn3, 
                                       extract_cnn4, extract_cnn5, extract_cnn6,
                                       extract_cnn7, extract_cnn8, extract_cnn9])
        
    #extract_cnn = np.reshape(extract_cnn, (-1,))
    G_h1 = GraphConv(256, 'relu')([extract_cnn, sensor_matrix1])
    G_h2 = GraphConv(256, 'relu')([extract_cnn, sensor_matrix2])
    G_1 = Concatenate(axis=-1)([G_h1, G_h2])
  
    G_2h1 = GraphConv(256, 'relu')([G_1, sensor_matrix1])
    G_2h2 = GraphConv(256, 'relu')([G_1, sensor_matrix2])
    G_2 = Concatenate(axis=-1)([G_2h1, G_2h2])
    
    gnn_output = tf.split(G_2, num_sensors, 1)
    
    output1 = Dense(32, activation='relu')(Flatten()(gnn_output[0]))
    output1 = Dense(2, activation='linear', name='sensor_1')(output1)
    
    output2 = Dense(32, activation='relu')(Flatten()(gnn_output[1]))
    output2 = Dense(2, activation='linear', name='sensor_2')(output2)
    
    output3 = Dense(32, activation='relu')(Flatten()(gnn_output[2]))
    output3 = Dense(2, activation='linear', name='sensor_3')(output3)                                                         
                                                             
    output4 = Dense(32, activation='relu')(Flatten()(gnn_output[3]))
    output4 = Dense(2, activation='linear', name='sensor_4')(output4)
    
    output5 = Dense(32, activation='relu')(Flatten()(gnn_output[4]))
    output5 = Dense(2, activation='linear', name='sensor_5')(output5)

    output6 = Dense(32, activation='relu')(Flatten()(gnn_output[5]))
    output6 = Dense(2, activation='linear', name='sensor_6')(output6)
    
    output7 = Dense(32, activation='relu')(Flatten()(gnn_output[6]))
    output7 = Dense(2, activation='linear', name='sensor_7')(output7)

    output8 = Dense(32, activation='relu')(Flatten()(gnn_output[7]))
    output8 = Dense(2, activation='linear', name='sensor_8')(output8)
    
    output9 = Dense(32, activation='relu')(Flatten()(gnn_output[8]))
    output9 = Dense(2, activation='linear', name='sensor_9')(output9)
    
    model = Model(inputs=[s_input1, s_input2, s_input3, s_input4,
                          s_input5, s_input6, s_input7, s_input8, s_input9,
                          sensor_matrix1, sensor_matrix2], 
                  outputs= [output1,output2,output3,output4,
                            output5,output6,output7,output8,output9])
    return model
예제 #24
0
파일: estimator.py 프로젝트: cgarciae/nfl
def get_model(params):

    # Model definition
    X_in = Input(shape=params["X_shape"])
    A_in = Input(shape=params["A_shape"])
    # E_in = Input(shape=params["E_shape"])
    # aux_in = Input(shape=params["aux_shape"])

    net = X_in
    A_exp = layers.Lambda(lambda x: K.expand_dims(x, axis=-1))(A_in)

    ################################
    # block
    ################################

    net = RelationalDense(32)([net, A_exp])
    # net = layers.BatchNormalization()(net)
    net = layers.Activation("relu")(net)
    net = MaxEdges()(net)
    # net = EdgeConditionedConv(32)([X_in, A_in, E_in])
    net = GraphConv(32)([net, A_in])
    net = layers.BatchNormalization()(net)
    net = layers.Activation("relu")(net)

    ################################
    # block
    ################################

    # net = RelationalDense(64)([net, A_exp])
    # # net = layers.BatchNormalization()(net)
    # net = layers.Activation("relu")(net)
    # net = MaxEdges()(net)
    # # net = EdgeConditionedConv(64)([net, A_in, E_in])
    # net = GraphConv(128)([net, A_in])
    # net = layers.BatchNormalization()(net)
    # net = layers.Activation("relu")(net)

    ################################
    # pooling
    ################################

    net = GlobalAttentionPool(128)(net)
    # net = GlobalMaxPool()(net)
    net = layers.Dropout(0.5)(net)

    ################################
    # block
    ################################

    # concat = Concatenate()([dense1, aux_in])
    net = Dense(64)(net)
    net = layers.BatchNormalization()(net)
    net = layers.Activation("relu")(net)

    ################################
    # block
    ################################

    output = Dense(1)(net)

    ################################
    # model
    ################################

    # Build model
    # model = Model(inputs=[X_in, A_in, E_in], outputs=output)
    model = Model(inputs=[X_in, A_in], outputs=output)
    optimizer = Adam(lr=params["learning_rate"])
    model.compile(
        optimizer=optimizer,
        loss="mse",
        metrics=["mse", "mae", "mape"],
    )

    return model
예제 #25
0
N = A.shape[0]
F = X.shape[-1]
n_classes = y.shape[-1]

# %%
X[1, 2]
# %%
from spektral.layers import GraphConv
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dropout

# %%
X_in = Input(shape=(F, ))
A_in = Input((N, ), sparse=True)

X_1 = GraphConv(16, "relu")([X_in, A_in])
X_1 = Dropout(0.5)(X_1)
X_2 = GraphConv(n_classes, "softmax")([X_1, A_in])

model = Model(inputs=[X_in, A_in], outputs=X_2)

# %%
A = GraphConv.preprocess(A).astype("f4")

# %%
model.compile(optimizer="adam",
              loss="categorical_crossentropy",
              weighted_metrics=["acc"])
model.summary()

# %%
예제 #26
0
    center=True,
    scale=True,
    beta_initializer='zeros',
    gamma_initializer='ones',
    moving_mean_initializer='zeros',
    moving_variance_initializer='ones',
    renorm_momentum=0.99,
)(area_in)  #batch norm on the area data

if not include_batch_norm_layers:
    bn0 = area_in

if not include_batch_norm_layers:
    g1 = GraphConv(
        embedding_vecor_length1,  #first graph conv layer
        activation='relu',
        kernel_regularizer=l2(l2_reg),
        use_bias=True)([bn0, fltr_in])
    ac1 = g1
else:
    g1 = GraphConv(
        embedding_vecor_length1,  #first graph conv layer
        kernel_regularizer=l2(l2_reg),
        use_bias=True)([bn0, fltr_in])
    bn1 = BN(
        axis=-1,
        momentum=0.99,
        epsilon=0.001,
        center=True,
        scale=True,
        beta_initializer='zeros',
예제 #27
0
es_patience = 10  # Patience fot early stopping
log_dir = init_logging()  # Create log directory and file

# Preprocessing
fltr = localpooling_filter(adj.copy())

# Train/test split
fltr_train, fltr_test, \
x_train, x_test,       \
y_train, y_test = train_test_split(fltr, x, y, test_size=0.1)

# Model definition
X_in = Input(shape=(N, F))
filter_in = Input((N, N))

gc1 = GraphConv(32, activation='relu',
                kernel_regularizer=l2(l2_reg))([X_in, filter_in])
gc2 = GraphConv(32, activation='relu',
                kernel_regularizer=l2(l2_reg))([gc1, filter_in])
pool = GlobalAttentionPool(128)(gc2)

output = Dense(n_classes, activation='softmax')(pool)

# Build model
model = Model(inputs=[X_in, filter_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
              loss='categorical_crossentropy',
              metrics=['acc'])
model.summary()

# Callbacks
예제 #28
0
y_train = labels_one_hot * train_mask[..., np.newaxis]
y_val = labels_one_hot * val_mask[..., np.newaxis]

# Get important parameters of adjacency matrix
N = adj.shape[0]
F = 4  # number of features
learning_rate = 0.01
epochs = 300

# Preprocessing operations
fltr = GraphConv.preprocess(adj).astype('f4')

# Model definition
X_in = Input(shape=(N, ))
fltr_in = Input(shape=(N, ))
x = GraphConv(F, activation='tanh', use_bias=False)([X_in, fltr_in])
x = GraphConv(F, activation='tanh', use_bias=False)([x, fltr_in])
x = GraphConv(2, activation='tanh', use_bias=False,
              name="embedding")([x, fltr_in])
output = GraphConv(4, activation='softmax', use_bias=False)([x, fltr_in])

# Build model
model = Model(inputs=[X_in, fltr_in], outputs=output)
model.compile(optimizer=Adam(lr=learning_rate),
              loss='categorical_crossentropy',
              weighted_metrics=['acc'])
model.summary()

embeddings = {}
history = []
예제 #29
0
l2_reg = 5e-4 / 2  # L2 regularization rate
learning_rate = 1e-2  # Learning rate
epochs = 10  # Number of training epochs 200
es_patience = 10  # Patience for early stopping

# Preprocessing operations
fltr = GraphConv.preprocess(A).astype('f4')
X = X.toarray()

# Model definition
X_in = Input(shape=(F, ))
fltr_in = Input((N, ), sparse=True)

dropout_1 = Dropout(dropout)(X_in)
graph_conv_1 = GraphConv(channels,
                         activation='relu',
                         kernel_regularizer=l2(l2_reg),
                         use_bias=False)([dropout_1, fltr_in])
dropout_2 = Dropout(dropout)(graph_conv_1)
graph_conv_2 = GraphConv(n_classes, activation='softmax',
                         use_bias=False)([dropout_2, fltr_in])

# Build model
model = Model(inputs=[X_in, fltr_in], outputs=graph_conv_2)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
              loss='categorical_crossentropy',
              weighted_metrics=['acc'])
model.summary()

# Train model
validation_data = ([X, fltr], y, val_mask)
예제 #30
0
def build_gcn_model(trainset, testset, nb_node_features, nb_classes=1, batch_size=32, nb_epochs=100, lr=0.001,
                     save_path=None):
    
    # Create model architecture
    X_in = Input(batch_shape=(None, nb_node_features))
    A_in = Input(batch_shape=(None, None), sparse=True)
    I_in = Input(batch_shape=(None, ), dtype='int64')
    target = Input(tensor=tf.placeholder(tf.float32, shape=(None, nb_classes), name='target'))
    
    gc1 = GraphConv(64, activation='relu')([X_in, A_in])
    gc2 = GraphConv(128, activation='relu')([gc1, A_in])
    pool = GlobalAvgPool()([gc2, I_in])
    dense1 = Dense(128, activation='relu')(pool)
    output = Dense(nb_classes, activation='sigmoid')(dense1)
    
    model = Model(inputs=[X_in, A_in, I_in], outputs=output)
    
    # Compile model
    #optimizer = Adam(lr=lr)    
    opt = tf.train.AdamOptimizer(learning_rate=lr)
    model.compile(optimizer=opt, loss='binary_crossentropy', target_tensors=target, metrics=['accuracy'])
    model.summary()
    loss = model.total_loss
    train_step = opt.minimize(loss)
    
    # Initialize all variables
    sess = K.get_session()
    init_op = tf.global_variables_initializer()
    sess.run(init_op)
    
    # Get train and test data
    [A_train, X_train, y_train] = trainset
    [A_test, X_test, y_test] = testset
    
    SW_KEY = 'dense_2_sample_weights:0' # Keras automatically creates a placeholder for sample weights, which must be fed
    best_accuracy = 0
    for i in range(nb_epochs):
        # Train
        # TODO: compute class weight and use it in loss function
        batches_train = batch_iterator([A_train, X_train, y_train], batch_size=batch_size)
        model_loss = 0
        prediction = []
        for b in batches_train:
            batch = Batch(b[0], b[1])
            X_, A_, I_ = batch.get('XAI')
            y_ = b[2]
            tr_feed_dict = {X_in: X_,
                            A_in: sp_matrix_to_sp_tensor_value(A_),
                            I_in: I_,
                            target: y_,
                            SW_KEY: np.ones((1,))}
            outs = sess.run([train_step, loss, output], feed_dict=tr_feed_dict)
            model_loss += outs[1]
            prediction.append(list(outs[2].flatten()))    
        y_train_predict = (np.concatenate(prediction)[:len(y_train)] > 0.5).astype('uint8')
        train_accuracy = accuracy_score(y_train, y_train_predict)
        train_loss = model_loss / (np.ceil(len(y_train) / batch_size))
        
        # Validation
        batches_val = batch_iterator([A_test, X_test, y_test], batch_size=batch_size)
        model_loss = 0
        prediction = []
        
        for b in batches_val:
            batch = Batch(b[0], b[1])
            X_, A_, I_ = batch.get('XAI')
            y_ = b[2]
            tr_feed_dict = {X_in: X_,
                            A_in: sp_matrix_to_sp_tensor_value(A_),
                            I_in: I_,
                            target: y_,
                            SW_KEY: np.ones((1,))}
            loss_, output_ = sess.run([loss, output], feed_dict=tr_feed_dict)
            model_loss += loss_
            prediction.append(list(output_.flatten()))
        
        y_val_predict = (np.concatenate(prediction)[:len(y_test)] > 0.5).astype('uint8')
        val_accuracy = accuracy_score(y_test, y_val_predict)
        val_loss = model_loss / (np.ceil(len(y_test) / batch_size))
        print('---------------------------------------------')
        print('Epoch {}: train_loss: {}, train_acc: {}, val_loss: {}, val_acc: {}'.format(i+1, train_loss, train_accuracy,
              val_loss, val_accuracy))
        
        if val_accuracy > best_accuracy:
            best_accuracy = val_accuracy
            model.save(save_path)
        
    # Evaluate the model
    model = load_model(save_path)
    batches_val = batch_iterator([A_test, X_test, y_test], batch_size=batch_size)
    prediction = []
    for b in batches_val:
        batch = Batch(b[0], b[1])
        X_, A_, I_ = batch.get('XAI')
        y_ = b[2]
        tr_feed_dict = {X_in: X_,
                        A_in: sp_matrix_to_sp_tensor_value(A_),
                        I_in: I_,
                        target: y_,
                        SW_KEY: np.ones((1,))}
        output_ = sess.run([output], feed_dict=tr_feed_dict)
        prediction.append(list(output_.flatten()))
    
    y_val_predict = (np.concatenate(prediction)[:len(y_test)] > 0.5).astype('uint8')
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')  # disable the warning on f1-score with not all labels
        scores = get_prediction_score(y_val, y_val_predict)
        
    return model, scores