Exemple #1
0
def make_ggnn2_model(input_shapes, output_shape, model_config):
   '''
   order:
   
   X_input, filters_input, nums_input, identity_input, adjacency_input 
   '''
   #training_data = ([X, graph_conv_filters, lens], Y)
   #X_input, filters_input, nums_input, identity_input, adjacency_input 
   features_shape, filters_shape, lens_shape, identity_shape, adjacency_shape = input_shapes
   filters_shape1, max_atoms = filters_shape[1:]
   
   X_input = Input(shape=features_shape[1:])
   filters_input = Input(shape=filters_shape[1:])
   identity_input = Input(shape=identity_shape[1:])
   adjacency_input = Input(shape=adjacency_shape[1:])
   nums_input= Input(shape=(None,))

   num_filters = int(filters_shape[1]/max_atoms)
   model_config['max_atoms'] = max_atoms
   model_config['num_filters'] = num_filters

   #control parameters
   N_H = model_config.get('hidden_units', 128)
   dropout_prob = model_config.get('dropout', 0.031849402173891934)
   lr = model_config.get('lr', 1e-3)
   l2_val = model_config.get('l2', 1e-3)
   N_it = model_config.get('num_layers', 8)
   activation = model_config.get('activation', 'softplus')
   drp_flag = model_config.get('dropout_flag', False)
   
   #initial convolution
   H = MultiGraphCNN(N_H, 1, activation=activation, kernel_regularizer=l2(l2_val), name='gcnn1')([X_input, identity_input])
   H = BatchNormalization()(H)
   H=Dropout(dropout_prob)(H, training=drp_flag)
   for it in range(N_it):
      H = MultiGraphCNN(N_H, num_filters, activation=activation, kernel_regularizer=l2(l2_val))([H, filters_input])
      H = Dropout(dropout_prob)(H, training=drp_flag)

   #Pooling
   output = Lambda(lambda X: K.sum(X[0], axis=1)/X[1])([H, nums_input])  # adding a node invariant layer to make sure output does not depends upon the node order in a graph.
   if len(output_shape)==2:
      N_output=2
      output_activation = 'softmax'
      loss_f='categorical_crossentropy'
      metric = 'categorical_accuracy'
   else:
      N_output=1
      output_activation = 'sigmoid'
      loss_f='binary_crossentropy'
      metric = 'accuracy'
   
   output = Dropout(dropout_prob)(output, training=drp_flag)
   output = Dense(N_output, activation=output_activation)(output)
   
   model = Model(inputs=[X_input, filters_input, nums_input, identity_input, adjacency_input ] , outputs=output)
   model.compile(loss=loss_f, optimizer=Adam(lr=lr), metrics=[metric])

   return model, metric
def build_model(X_shape, graph_shape, Y_shape, num_filters):
    X_input = Input(shape=(X_shape[1], X_shape[2]))
    graph_conv_filters_input = Input(shape=(graph_shape[1], graph_shape[2]))

    output = MultiGraphCNN(100, num_filters, activation='elu')([X_input, graph_conv_filters_input])
    output = Dropout(0.2)(output)
    output = MultiGraphCNN(100, num_filters, activation='elu')([output, graph_conv_filters_input])
    output = Dropout(0.2)(output)
    output = Lambda(lambda x: K.mean(x, axis=1))(output)  # adding a node invariant layer to make sure output does not depends upon the node order in a graph.
    output = Dense(Y_shape[1])(output)
    output = Activation('softmax')(output)

    model = Model(inputs=[X_input, graph_conv_filters_input], outputs=output)
    model.compile(loss='categorical_crossentropy', 
        optimizer=optimizers.SGD(lr=1e-4, momentum=0.9), 
        metrics=['acc'])

    model.load_weights('models/model_weights.h5')
    return model
Exemple #3
0
 def make_gated_unit_layer(N_H):
     GCN_Z = MultiGraphCNN(N_H,
                           2,
                           activation='sigmoid',
                           kernel_regularizer=l2(l2_val))
     GCN_R = MultiGraphCNN(N_H,
                           2,
                           activation='sigmoid',
                           kernel_regularizer=l2(l2_val))
     GCN_U = MultiGraphCNN(N_H,
                           1,
                           activation='linear',
                           use_bias=False,
                           kernel_regularizer=l2(l2_val))
     GCN_W = MultiGraphCNN(N_H,
                           1,
                           activation='linear',
                           kernel_regularizer=l2(l2_val))
     return [GCN_Z, GCN_R, GCN_U, GCN_W]
def egrmodel2(A,X, graph_conv_filters,num_filters):

    X_input = Input(shape=(X.shape[1], X.shape[2]))
    graph_conv_filters_input = Input(shape=(graph_conv_filters.shape[1], graph_conv_filters.shape[2]))

    layer_gcnn1 = MultiGraphCNN(16, num_filters, activation='elu')([X_input, graph_conv_filters_input])
    layer_gcnn1 = Dropout(0.2)(layer_gcnn1)
    layer_gcnn2 = MultiGraphCNN(16, num_filters, activation='elu')([layer_gcnn1, graph_conv_filters_input])
    layer_gcnn2 = Dropout(0.2)(layer_gcnn2)
    layer_gcnn3 = MultiGraphCNN(16, num_filters, activation='elu')([layer_gcnn2, graph_conv_filters_input])
    layer_gcnn3 = Dropout(0.2)(layer_gcnn3)
    layer_gcnn4 = Maximum()([layer_gcnn1, layer_gcnn2, layer_gcnn3])
    # layer_gcnn5 = Reshape((layer_gcnn4.shape[1]*layer_gcnn4.shape[2],))(layer_gcnn4)
    layer_gcnn5 = Flatten()(layer_gcnn4)

    # # layer_conv5 = AveragePooling2D(pool_size=(2, 1), strides=None, padding='valid', data_format=None)(layer_conv5)
    layer_dense1 = Dense(50, activation='sigmoid')(layer_gcnn5)

    model = Model(inputs=[X_input, graph_conv_filters_input], outputs=layer_dense1)
    model.summary()

    return model
def GCNN_skeleton_t16(num_nodes, num_features, graph_conv_filters_shape1,
                      graph_conv_filters_shape2, num_filters, num_classes,
                      n_neuron, n_dropout, timesteps):
    print('Build GCNN')
    X_input_t1 = Input(shape=(num_nodes, num_features))
    X_input_t2 = Input(shape=(num_nodes, num_features))
    X_input_t3 = Input(shape=(num_nodes, num_features))
    X_input_t4 = Input(shape=(num_nodes, num_features))
    X_input_t5 = Input(shape=(num_nodes, num_features))
    X_input_t6 = Input(shape=(num_nodes, num_features))
    X_input_t7 = Input(shape=(num_nodes, num_features))
    X_input_t8 = Input(shape=(num_nodes, num_features))
    X_input_t9 = Input(shape=(num_nodes, num_features))
    X_input_t10 = Input(shape=(num_nodes, num_features))
    X_input_t11 = Input(shape=(num_nodes, num_features))
    X_input_t12 = Input(shape=(num_nodes, num_features))
    X_input_t13 = Input(shape=(num_nodes, num_features))
    X_input_t14 = Input(shape=(num_nodes, num_features))
    X_input_t15 = Input(shape=(num_nodes, num_features))
    X_input_t16 = Input(shape=(num_nodes, num_features))

    X_input = Input(shape=(timesteps, num_nodes, 3))

    graph_conv_filters_input = Input(shape=(graph_conv_filters_shape1,
                                            graph_conv_filters_shape2))

    output_t1 = MultiGraphCNN(n_neuron, num_filters, activation='elu')(
        [X_input_t1, graph_conv_filters_input])
    output_t1 = Dropout(n_dropout)(output_t1)

    output_t2 = MultiGraphCNN(n_neuron, num_filters, activation='elu')(
        [X_input_t2, graph_conv_filters_input])
    output_t2 = Dropout(n_dropout)(output_t2)

    output1 = Lambda(lambda x: K.expand_dims(x, axis=1))(
        output_t1
    )  # adding a node invariant layer to make sure output does not depends upon the node order in a graph.
    output2 = Lambda(lambda x: K.expand_dims(x, axis=1))(output_t2)

    output_t3 = MultiGraphCNN(n_neuron, num_filters, activation='elu')(
        [X_input_t3, graph_conv_filters_input])
    output_t3 = Dropout(n_dropout)(output_t3)

    output_t4 = MultiGraphCNN(n_neuron, num_filters, activation='elu')(
        [X_input_t4, graph_conv_filters_input])
    output_t4 = Dropout(n_dropout)(output_t4)

    output3 = Lambda(lambda x: K.expand_dims(x, axis=1))(
        output_t3
    )  # adding a node invariant layer to make sure output does not depends upon the node order in a graph.
    output4 = Lambda(lambda x: K.expand_dims(x, axis=1))(output_t4)

    output_t5 = MultiGraphCNN(n_neuron, num_filters, activation='elu')(
        [X_input_t5, graph_conv_filters_input])
    output_t5 = Dropout(n_dropout)(output_t5)

    output_t6 = MultiGraphCNN(n_neuron, num_filters, activation='elu')(
        [X_input_t6, graph_conv_filters_input])
    output_t6 = Dropout(n_dropout)(output_t6)

    output5 = Lambda(lambda x: K.expand_dims(x, axis=1))(
        output_t5
    )  # adding a node invariant layer to make sure output does not depends upon the node order in a graph.
    output6 = Lambda(lambda x: K.expand_dims(x, axis=1))(output_t6)

    output_t7 = MultiGraphCNN(n_neuron, num_filters, activation='elu')(
        [X_input_t7, graph_conv_filters_input])
    output_t7 = Dropout(n_dropout)(output_t7)

    output_t8 = MultiGraphCNN(n_neuron, num_filters, activation='elu')(
        [X_input_t8, graph_conv_filters_input])
    output_t8 = Dropout(n_dropout)(output_t8)

    output7 = Lambda(lambda x: K.expand_dims(x, axis=1))(
        output_t7
    )  # adding a node invariant layer to make sure output does not depends upon the node order in a graph.
    output8 = Lambda(lambda x: K.expand_dims(x, axis=1))(output_t8)

    output_t9 = MultiGraphCNN(n_neuron, num_filters, activation='elu')(
        [X_input_t9, graph_conv_filters_input])
    output_t9 = Dropout(n_dropout)(output_t9)

    output_t10 = MultiGraphCNN(n_neuron, num_filters, activation='elu')(
        [X_input_t10, graph_conv_filters_input])
    output_t10 = Dropout(n_dropout)(output_t10)

    output9 = Lambda(lambda x: K.expand_dims(x, axis=1))(
        output_t9
    )  # adding a node invariant layer to make sure output does not depends upon the node order in a graph.
    output10 = Lambda(lambda x: K.expand_dims(x, axis=1))(output_t10)

    output_t11 = MultiGraphCNN(n_neuron, num_filters, activation='elu')(
        [X_input_t11, graph_conv_filters_input])
    output_t11 = Dropout(n_dropout)(output_t11)

    output_t12 = MultiGraphCNN(n_neuron, num_filters, activation='elu')(
        [X_input_t12, graph_conv_filters_input])
    output_t12 = Dropout(n_dropout)(output_t12)

    output11 = Lambda(lambda x: K.expand_dims(x, axis=1))(
        output_t11
    )  # adding a node invariant layer to make sure output does not depends upon the node order in a graph.
    output12 = Lambda(lambda x: K.expand_dims(x, axis=1))(output_t12)

    output_t13 = MultiGraphCNN(n_neuron, num_filters, activation='elu')(
        [X_input_t13, graph_conv_filters_input])
    output_t13 = Dropout(n_dropout)(output_t13)

    output_t14 = MultiGraphCNN(n_neuron, num_filters, activation='elu')(
        [X_input_t14, graph_conv_filters_input])
    output_t14 = Dropout(n_dropout)(output_t14)

    output13 = Lambda(lambda x: K.expand_dims(x, axis=1))(
        output_t13
    )  # adding a node invariant layer to make sure output does not depends upon the node order in a graph.
    output14 = Lambda(lambda x: K.expand_dims(x, axis=1))(output_t14)

    output_t15 = MultiGraphCNN(n_neuron, num_filters, activation='elu')(
        [X_input_t15, graph_conv_filters_input])
    output_t15 = Dropout(n_dropout)(output_t15)

    output_t16 = MultiGraphCNN(n_neuron, num_filters, activation='elu')(
        [X_input_t16, graph_conv_filters_input])
    output_t16 = Dropout(n_dropout)(output_t16)

    output15 = Lambda(lambda x: K.expand_dims(x, axis=1))(
        output_t15
    )  # adding a node invariant layer to make sure output does not depends upon the node order in a graph.
    output16 = Lambda(lambda x: K.expand_dims(x, axis=1))(output_t16)

    output = keras.layers.Concatenate(axis=1)([
        output1, output2, output3, output4, output5, output6, output7, output8,
        output9, output10, output11, output12, output13, output14, output15,
        output16
    ])
    output = keras.layers.Concatenate()([output, X_input])
    out = BatchNormalization()(output)
    out = Conv2D(64, (3, 3), activation='relu', padding='same')(out)
    #out = MaxPooling2D(pool_size=(2,2))(out)
    out = BatchNormalization()(out)
    out = Conv2D(64, (3, 3), activation='relu', padding='same')(out)
    out = MaxPooling2D(pool_size=(2, 2))(out)
    out = BatchNormalization()(out)
    out = Conv2D(128, (3, 3), activation='relu', padding='same')(out)
    out = MaxPooling2D(pool_size=(2, 2))(out)
    out = BatchNormalization()(out)
    out = Dropout(0.5)(out)
    out = Flatten()(out)
    out_new = Dense(256, activation='relu')(out)
    out_new = Dense(128, activation='relu')(out_new)
    out_new = Dropout(n_dropout, name='gcnn_out')(out_new)
    #output_final = Dense(num_classes, activation='softmax')(out_new)
    model = Model(inputs=[
        X_input_t1, X_input_t2, X_input_t3, X_input_t4, X_input_t5, X_input_t6,
        X_input_t7, X_input_t8, X_input_t9, X_input_t10, X_input_t11,
        X_input_t12, X_input_t13, X_input_t14, X_input_t15, X_input_t16,
        X_input, graph_conv_filters_input
    ],
                  outputs=out_new)
    return model
Exemple #6
0
Y = pd.read_csv('data/Y_mutag.csv', header=None)
Y = np.array(Y)

A, X, Y = shuffle(A, X, Y)

# build graph_conv_filters
SYM_NORM = True
num_filters = 2
graph_conv_filters = preprocess_adj_tensor_with_identity(A, SYM_NORM)

# build model
X_input = Input(shape=(X.shape[1], X.shape[2]))
graph_conv_filters_input = Input(shape=(graph_conv_filters.shape[1],
                                        graph_conv_filters.shape[2]))

output = MultiGraphCNN(100, num_filters,
                       activation='elu')([X_input, graph_conv_filters_input])
output = Dropout(0.2)(output)
output = MultiGraphCNN(100, num_filters,
                       activation='elu')([output, graph_conv_filters_input])
output = Dropout(0.2)(output)
output = Lambda(lambda x: K.mean(x, axis=1))(
    output
)  # adding a node invariant layer to make sure output does not depends upon the node order in a graph.
output = Dense(Y.shape[1])(output)
output = Activation('softmax')(output)

nb_epochs = 200
batch_size = 169

model = Model(inputs=[X_input, graph_conv_filters_input], outputs=output)
model.compile(loss='categorical_crossentropy',
Atest, Xtest, ytest = shuffle(xtest, ftest, ytest)
Aval, Xval, yval = shuffle(xval, fval, yval)

## filters
SYM_NORM = True  # adj = D^(-0.5)*A"*D^(-0.5), A"=A+I
num_filters = 2  # output of each input = 2*(A.shape)
graph_conv_filters = preprocess_adj_tensor_with_identity(A, SYM_NORM)
graph_conv_filters_test = preprocess_adj_tensor_with_identity(Atest, SYM_NORM)
graph_conv_filters_val = preprocess_adj_tensor_with_identity(Aval, SYM_NORM)

# model
X_input = Input(shape=(X.shape[1], X.shape[2]))
graph_conv_filters_input = Input(shape=(graph_conv_filters.shape[1],
                                        graph_conv_filters.shape[2]))

layer_gcnn1 = MultiGraphCNN(
    8, num_filters, activation='elu')([X_input, graph_conv_filters_input])
# layer_gcnn1 = Dropout(0.2)(layer_gcnn1)
layer_gcnn2 = MultiGraphCNN(
    8, num_filters, activation='elu')([layer_gcnn1, graph_conv_filters_input])
# layer_gcnn2 = Dropout(0.2)(layer_gcnn2)
layer_gcnn3 = MultiGraphCNN(
    8, num_filters, activation='elu')([layer_gcnn2, graph_conv_filters_input])
# layer_gcnn3 = Dropout(0.2)(layer_gcnn3)
layer_gcnn4 = Average()([layer_gcnn1, layer_gcnn2, layer_gcnn3])
# add new Graph layer with cnn
layer_gcnn4 = MultiGraphCNN(
    1, num_filters, activation='elu')([layer_gcnn4, graph_conv_filters_input])
# layer_gcnn3 = Dropout(0.2)(layer_gcnn3)
# layer_gcnn5 = Reshape((layer_gcnn4.shape[1]*layer_gcnn4.shape[2],))(layer_gcnn4)
layer_gcnn5 = Flatten()(layer_gcnn4)
# layer_gcnn5 = Dropout(0.2)(layer_gcnn5)
Exemple #8
0
def make_ggnn_model(input_shapes, output_shape, model_config):
   '''
   order:
   
   X_input, filters_input, nums_input, identity_input, adjacency_input 
   '''
   #training_data = ([X, graph_conv_filters, lens], Y)
   #X_input, filters_input, nums_input, identity_input, adjacency_input 
   features_shape, filters_shape, lens_shape, identity_shape, adjacency_shape = input_shapes
   filters_shape1, max_atoms = filters_shape[1:]
   
   X_input = Input(shape=features_shape[1:])
   filters_input = Input(shape=filters_shape[1:])
   identity_input = Input(shape=identity_shape[1:])
   adjacency_input = Input(shape=adjacency_shape[1:])
   nums_input= Input(shape=(None,))

   num_filters = int(filters_shape[1]/max_atoms)
   model_config['max_atoms'] = max_atoms
   model_config['num_filters'] = num_filters

   #control parameters
   recurrent = model_config.get('recurrent', False)
   N_H = model_config.get('hidden_units', 128)
   dropout_prob = model_config.get('dropout', 0.031849402173891934)
   lr = model_config.get('lr', 1e-3)
   l2_val = model_config.get('l2', 1e-3)
   N_it = model_config.get('num_layers', 8)
   activation = model_config.get('activation', 'softplus')
   drp_flag = model_config.get('dropout_flag', False)
   
   #GGNN unit components
   hadamard = Lambda(lambda x: x[0]*x[1])
   sum_ = Lambda(lambda x: x[0]+x[1])
   tanh = Activation('tanh')
   combiner = Lambda(lambda x:(K.ones_like(x[0])-x[0])*x[1]+x[0]*x[2])
   def make_gated_unit_layer(N_H):
      GCN_Z = MultiGraphCNN(N_H, 2, activation='sigmoid', kernel_regularizer=l2(l2_val))
      GCN_R = MultiGraphCNN(N_H, 2, activation='sigmoid', kernel_regularizer=l2(l2_val))
      GCN_U = MultiGraphCNN(N_H, 1, activation='linear', use_bias=False, kernel_regularizer=l2(l2_val))
      GCN_W = MultiGraphCNN(N_H, 1, activation='linear', kernel_regularizer=l2(l2_val))
      return [GCN_Z,GCN_R,GCN_U,GCN_W]

   #initial convolution
   H = MultiGraphCNN(N_H, 1, activation=activation, kernel_regularizer=l2(l2_val), name='gcnn1')([X_input, identity_input])
   H = BatchNormalization()(H)
   H=Dropout(dropout_prob)(H, training=drp_flag)
   #GCNN iterations
   GCN_layers=[]
   GCN_layers.append(make_gated_unit_layer(N_H))
   for it in range(N_it):
      GCN_Z, GCN_R, GCN_U, GCN_W = GCN_layers[-1]
      z = GCN_Z([H, filters_input])
      r = GCN_R([H, filters_input])
      r = Dropout(dropout_prob)(r, training=drp_flag)
      z = Dropout(dropout_prob)(z, training=drp_flag)
      u = hadamard([r,H])
      u = GCN_U([u, identity_input])
      u = Dropout(dropout_prob)(u, training=drp_flag)
      w = GCN_W([H, adjacency_input])
      w = Dropout(dropout_prob)(w, training=drp_flag)
      HT = tanh(sum_([u,w]))
      H = combiner([z,H,HT]) 
      if it<(N_it-1) and not recurrent:
         GCN_layers.append(make_gated_unit_layer(N_H))

   #Pooling
   output = Lambda(lambda X: K.sum(X[0], axis=1)/X[1])([H, nums_input])  # adding a node invariant layer to make sure output does not depends upon the node order in a graph.
   if len(output_shape)==2:
      N_output=2
      output_activation = 'softmax'
      loss_f='categorical_crossentropy'
      metric = 'categorical_accuracy'
   else:
      N_output=1
      output_activation = 'sigmoid'
      loss_f='binary_crossentropy'
      metric = 'accuracy'
   
   output = Dropout(dropout_prob)(output, training=drp_flag)
   output = Dense(N_output, activation=output_activation)(output)
   
   model = Model(inputs=[X_input, filters_input, nums_input, identity_input, adjacency_input ] , outputs=output)
   model.compile(loss=loss_f, optimizer=Adam(lr=lr), metrics=[metric])

   return model, metric