示例#1
0
def generate_GNN(max_n_nodes,
                 n_attributes,
                 n_classes,
                 batch_size=32,
                 validation_split=0.1,
                 epochs=100,
                 verbose=0,
                 plot=False):

    learning_rate = 0.001
    l2_reg = 5e-4

    ##### DEFINISCI MODELLO ORIGINALE
    X_in_1_1 = Input(shape=(max_n_nodes, n_attributes))
    filter_in_1_1 = Input((max_n_nodes, max_n_nodes))
    gc1_1_1 = GraphAttention(32,
                             activation='relu',
                             kernel_regularizer=l2(l2_reg))(
                                 [X_in_1_1, filter_in_1_1])
    gc2_1_1 = GraphAttention(32,
                             activation='relu',
                             kernel_regularizer=l2(l2_reg))(
                                 [gc1_1_1, filter_in_1_1])
    pool_1_1 = GlobalAttentionPool(128)(gc2_1_1)
    output_1_1 = Dense(n_classes, activation='softmax')(pool_1_1)
    model_1_1 = Model(inputs=[X_in_1_1, filter_in_1_1], outputs=output_1_1)
    optimizer = Adam(lr=learning_rate)
    model_1_1.compile(optimizer=optimizer,
                      loss='categorical_crossentropy',
                      metrics=['acc'])

    ##### CREA IL SECONDO MODELLO
    X_in_1_2 = Input(shape=(max_n_nodes, n_attributes))
    filter_in_1_2 = Input((max_n_nodes, max_n_nodes))
    gc1_1_2 = GraphAttention(32,
                             activation='relu',
                             kernel_regularizer=l2(l2_reg))(
                                 [X_in_1_2, filter_in_1_2])
    gc2_1_2 = GraphAttention(32,
                             activation='relu',
                             kernel_regularizer=l2(l2_reg))(
                                 [gc1_1_2, filter_in_1_2])
    pool_1_2 = GlobalAttentionPool(128)(gc2_1_2)
    model_1_2 = Model(inputs=[X_in_1_2, filter_in_1_2], outputs=pool_1_2)
    model_1_2.compile(optimizer=Adam(lr=learning_rate),
                      loss='categorical_crossentropy',
                      metrics=['acc'])

    my_GNN_1 = Transformer_GNN(original_model=model_1_1,
                               new_model=model_1_2,
                               batch_size=batch_size,
                               validation_split=validation_split,
                               epochs=epochs,
                               verbose=verbose,
                               plot=plot)

    return (my_GNN_1)
示例#2
0
def gen_SpektralGNN_emb(n_classes, n_components, max_n_nodes, n_attributes):
    learning_rate = 0.001
    l2_reg = 5e-4

    X_in = Input(shape=(max_n_nodes, n_attributes))
    filter_in = Input((max_n_nodes, max_n_nodes))
    emb_GNN = GraphAttention(32,
                             activation='relu',
                             kernel_regularizer=l2(l2_reg))([X_in, filter_in])
    emb_GNN = GraphAttention(n_components,
                             activation='relu',
                             kernel_regularizer=l2(l2_reg))(
                                 [emb_GNN, filter_in])
    emb_GNN = GlobalAttentionPool(n_components)(emb_GNN)

    cla_GNN = Dense(n_classes, activation='softmax')(emb_GNN)

    optimizer = Adam(lr=learning_rate)
    classificator = Model(inputs=[X_in, filter_in], outputs=cla_GNN)
    embedder = Model(inputs=[X_in, filter_in], outputs=emb_GNN)
    classificator.compile(optimizer=optimizer,
                          loss='categorical_crossentropy',
                          metrics=['acc'])

    return (classificator, embedder)
示例#3
0
def gen_small(max_n_nodes, n_attributes, n_components):
    n_classes = 2

    learning_rate = 0.001
    l2_reg = 5e-4

    ##### DEFINISCI MODELLO ORIGINALE
    X_in_1_1 = Input(shape=(max_n_nodes, n_attributes))
    filter_in_1_1 = Input((max_n_nodes, max_n_nodes))
    gc1_1_1 = GraphAttention(32,
                             activation='relu',
                             kernel_regularizer=l2(l2_reg))(
                                 [X_in_1_1, filter_in_1_1])
    gc2_1_1 = GraphAttention(32,
                             activation='relu',
                             kernel_regularizer=l2(l2_reg))(
                                 [gc1_1_1, filter_in_1_1])
    pool_1_1 = GlobalAttentionPool(128)(gc2_1_1)
    dense_x = Dense(n_components)(pool_1_1)
    output_1_1 = Dense(n_classes, activation='softmax')(dense_x)
    model_1_1 = Model(inputs=[X_in_1_1, filter_in_1_1], outputs=output_1_1)
    optimizer = Adam(lr=learning_rate)
    model_1_1.compile(optimizer=optimizer,
                      loss='categorical_crossentropy',
                      metrics=['acc'])

    ##### CREA IL SECONDO MODELLO
    X_in_1_2 = Input(shape=(max_n_nodes, n_attributes))
    filter_in_1_2 = Input((max_n_nodes, max_n_nodes))
    gc1_1_2 = GraphAttention(32,
                             activation='relu',
                             kernel_regularizer=l2(l2_reg))(
                                 [X_in_1_2, filter_in_1_2])
    gc2_1_2 = GraphAttention(32,
                             activation='relu',
                             kernel_regularizer=l2(l2_reg))(
                                 [gc1_1_2, filter_in_1_2])
    pool_1_2 = GlobalAttentionPool(128)(gc2_1_2)
    dense2_x = Dense(n_components)(pool_1_2)
    model_1_2 = Model(inputs=[X_in_1_2, filter_in_1_2], outputs=dense2_x)
    model_1_2.compile(optimizer=Adam(lr=learning_rate),
                      loss='categorical_crossentropy',
                      metrics=['acc'])

    return (model_1_1, model_1_2)
示例#4
0
def test_gap():
    adj, nf, ef, labels = qm9.load_data('numpy', amount=1000)
    N = nf.shape[-2]
    F = nf.shape[-1]
    channels_out = 32

    model = Sequential()
    model.add(GlobalAttentionPool(channels_out, input_shape=(N, F)))

    assert model.output_shape == (None, channels_out)
示例#5
0
def Model_treeGAT_softmax_1(node_count,
                            wordvocabsize,
                            w2v_k,
                            word_W,
                            l2_reg=5e-4):
    X_word_in = Input(shape=(node_count, ), dtype='int32')
    fltr_in = Input(shape=(node_count, node_count), dtype='float32')
    # fltr_in1 = Input(tensor=sparse_tensor_to_dense(sp_matrix_to_sp_tensor(fltr)))

    word_embedding_layer = Embedding(input_dim=wordvocabsize + 1,
                                     output_dim=w2v_k,
                                     input_length=node_count,
                                     mask_zero=True,
                                     trainable=True,
                                     weights=[word_W])
    word_embedding_x = word_embedding_layer(X_word_in)
    word_embedding_x = Dropout(0.25)(word_embedding_x)

    graph_conv_1 = GraphAttention(200,
                                  attn_heads=3,
                                  activation='relu',
                                  kernel_regularizer=l2(l2_reg),
                                  dropout_rate=0.5,
                                  use_bias=True)([word_embedding_x, fltr_in])
    graph_conv_1 = Dropout(0.5)(graph_conv_1)
    graph_conv_2 = GraphAttention(200,
                                  attn_heads=3,
                                  activation='relu',
                                  kernel_regularizer=l2(l2_reg),
                                  dropout_rate=0.5,
                                  use_bias=True)([graph_conv_1, fltr_in])
    graph_conv_2 = Dropout(0.5)(graph_conv_2)
    feature_node0 = Lambda(lambda x: x[:, 0])(graph_conv_2)

    pool = GlobalAttentionPool(200)(graph_conv_2)

    flatten = Flatten()(graph_conv_2)
    flatten = Dense(512, activation='relu')(flatten)
    fc = Dropout(0.5)(flatten)

    # LSTM_backward = LSTM(200, activation='tanh', return_sequences=False,
    #                      go_backwards=True, dropout=0.5)(dropout_2)

    # present_node0 = concatenate([feature_node0, LSTM_backward], axis=-1)
    class_output = Dense(120)(fc)
    class_output = Activation('softmax', name='CLASS')(class_output)

    # Build model
    model = Model(inputs=[X_word_in, fltr_in], outputs=class_output)
    optimizer = Adam(lr=0.001)
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  weighted_metrics=['acc'])
    return model
fltr = localpooling_filter(adj.copy())

# Train/test split
fltr_train, fltr_test, \
x_train, x_test,       \
y_train, y_test = train_test_split(fltr, x, y, test_size=0.1)

# Model definition
X_in = Input(shape=(N, F))
filter_in = Input((N, N))

gc1 = GraphConv(32, activation='relu',
                kernel_regularizer=l2(l2_reg))([X_in, filter_in])
gc2 = GraphConv(32, activation='relu',
                kernel_regularizer=l2(l2_reg))([gc1, filter_in])
pool = GlobalAttentionPool(128)(gc2)

output = Dense(n_classes, activation='softmax')(pool)

# Build model
model = Model(inputs=[X_in, filter_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
              loss='categorical_crossentropy',
              metrics=['acc'])
model.summary()

# Callbacks
es_callback = EarlyStopping(monitor='val_acc', patience=es_patience)

# Train model
示例#7
0
def gen_dense(n_components, max_n_nodes, n_attributes):

    layers = [64, 32, 16, 8, 5, 3, 2]

    learning_rate = 0.001
    l2_reg = 5e-4
    n_classes = 2

    # origlinale
    X_in_1_1 = Input(shape=(max_n_nodes, n_attributes))
    filter_in_1_1 = Input((max_n_nodes, max_n_nodes))
    gc1_1_1 = GraphAttention(32,
                             activation='relu',
                             kernel_regularizer=l2(l2_reg))(
                                 [X_in_1_1, filter_in_1_1])
    gc2_1_1 = GraphAttention(32,
                             activation='relu',
                             kernel_regularizer=l2(l2_reg))(
                                 [gc1_1_1, filter_in_1_1])
    pool_1_1 = GlobalAttentionPool(128)(gc2_1_1)

    index = (layers.index(n_components)) + 1
    input_layer = pool_1_1

    for i in range(0, index):
        layer = add_layer(input_layer, layers[i])
        input_layer = layer

    output_1_1 = Dense(n_classes, activation='softmax')(input_layer)
    model_1_1 = Model(inputs=[X_in_1_1, filter_in_1_1], outputs=output_1_1)
    optimizer = Adam(lr=learning_rate)
    model_1_1.compile(optimizer=optimizer,
                      loss='categorical_crossentropy',
                      metrics=['acc'])

    # embedder

    X_in_2 = Input(shape=(max_n_nodes, n_attributes))
    filter_in_2 = Input((max_n_nodes, max_n_nodes))
    gc1_2 = GraphAttention(32,
                           activation='relu',
                           kernel_regularizer=l2(l2_reg))(
                               [X_in_2, filter_in_2])
    gc2_2 = GraphAttention(32,
                           activation='relu',
                           kernel_regularizer=l2(l2_reg))([gc1_2, filter_in_2])
    pool_2 = GlobalAttentionPool(128)(gc2_2)

    index = (layers.index(n_components)) + 1
    input_layer = pool_2

    for i in range(0, index):
        layer = add_layer(input_layer, layers[i])
        input_layer = layer

    model_2 = Model(inputs=[X_in_2, filter_in_2], outputs=input_layer)
    optimizer = Adam(lr=learning_rate)
    model_2.compile(optimizer=optimizer,
                    loss='categorical_crossentropy',
                    metrics=['acc'])

    return (model_1_1, model_2)
 def __init__(self, units, **kwargs):
     super().__init__(**kwargs)
     self.units = units
     self.conv_graph_layer = GraphAttention(units)
     self.pool_graph_layer = GlobalAttentionPool(units)
 def __init__(self, units, name):
     super().__init__(name=name)
     self.conv_graph_layer = GraphAttention(units)
     self.pool_graph_layer = GlobalAttentionPool(units)
示例#10
0
文件: estimator.py 项目: cgarciae/nfl
def get_model(params):

    # Model definition
    X_in = Input(shape=params["X_shape"])
    A_in = Input(shape=params["A_shape"])
    # E_in = Input(shape=params["E_shape"])
    # aux_in = Input(shape=params["aux_shape"])

    net = X_in
    A_exp = layers.Lambda(lambda x: K.expand_dims(x, axis=-1))(A_in)

    ################################
    # block
    ################################

    net = RelationalDense(32)([net, A_exp])
    # net = layers.BatchNormalization()(net)
    net = layers.Activation("relu")(net)
    net = MaxEdges()(net)
    # net = EdgeConditionedConv(32)([X_in, A_in, E_in])
    net = GraphConv(32)([net, A_in])
    net = layers.BatchNormalization()(net)
    net = layers.Activation("relu")(net)

    ################################
    # block
    ################################

    # net = RelationalDense(64)([net, A_exp])
    # # net = layers.BatchNormalization()(net)
    # net = layers.Activation("relu")(net)
    # net = MaxEdges()(net)
    # # net = EdgeConditionedConv(64)([net, A_in, E_in])
    # net = GraphConv(128)([net, A_in])
    # net = layers.BatchNormalization()(net)
    # net = layers.Activation("relu")(net)

    ################################
    # pooling
    ################################

    net = GlobalAttentionPool(128)(net)
    # net = GlobalMaxPool()(net)
    net = layers.Dropout(0.5)(net)

    ################################
    # block
    ################################

    # concat = Concatenate()([dense1, aux_in])
    net = Dense(64)(net)
    net = layers.BatchNormalization()(net)
    net = layers.Activation("relu")(net)

    ################################
    # block
    ################################

    output = Dense(1)(net)

    ################################
    # model
    ################################

    # Build model
    # model = Model(inputs=[X_in, A_in, E_in], outputs=output)
    model = Model(inputs=[X_in, A_in], outputs=output)
    optimizer = Adam(lr=params["learning_rate"])
    model.compile(
        optimizer=optimizer,
        loss="mse",
        metrics=["mse", "mae", "mape"],
    )

    return model
示例#11
0
    def _model_builder_ecc(self):
        gc1_channels = 32
        gc2_channels = 64
        full_latent_space = len(self._radius) * self.latent_space

        # Inputs
        adj_in = Input(shape=(self.N, self.N), name='adj_in')
        nf_in = Input(shape=(self.N, self.F), name='nf_in')
        ef_in = Input(shape=(self.N, self.N, self.S), name='ef_in')
        z_in = Input(shape=(full_latent_space, ), name='z_in')

        # Encoder
        gc1 = EdgeConditionedConv(gc1_channels,
                                  kernel_regularizer=l2(self.l2_reg),
                                  name='ecc1')([nf_in, adj_in, ef_in])
        bn1 = BatchNormalization()(gc1)
        relu1 = Activation('relu')(bn1)
        do1 = Dropout(self.dropout_rate)(relu1)

        gc2 = EdgeConditionedConv(gc2_channels,
                                  kernel_regularizer=l2(self.l2_reg),
                                  name='ecc2')([do1, adj_in, ef_in])
        bn2 = BatchNormalization()(gc2)
        relu2 = Activation('relu')(bn2)
        do2 = Dropout(self.dropout_rate)(relu2)

        pool = GlobalAttentionPool(128, name='attn_pool')(do2)

        z_enc_list = []
        z_clip_list = []
        for _r in self._radius:
            z_1 = Dense(128, activation='relu')(pool)
            z_2 = Dense(self.latent_space, activation='linear')(z_1)
            z_3 = CCMProjection(_r)(z_2)
            z_enc_list.append(z_2)
            z_clip_list.append(z_3)

        if len(self._radius) > 1:
            z_enc = Concatenate(name='z_enc')(z_enc_list)
            z_clip = Concatenate(name='z_clip')(z_clip_list)
        else:
            z_enc = z_enc_list[0]
            z_clip = z_clip_list[0]

        # Decoder
        dense3 = Dense(128)(z_in)
        bn3 = BatchNormalization()(dense3)
        relu3 = Activation('relu')(bn3)

        dense4 = Dense(256)(relu3)
        bn4 = BatchNormalization()(dense4)
        relu4 = Activation('relu')(bn4)

        dense5 = Dense(512)(relu4)
        bn5 = BatchNormalization()(dense5)
        relu5 = Activation('relu')(bn5)

        # Output
        adj_out_pre = Dense(self.N * self.N, activation='sigmoid')(relu5)
        adj_out = Reshape((self.N, self.N), name='adj_out')(adj_out_pre)

        nf_out_pre = Dense(self.N * self.F, activation='linear')(relu5)
        nf_out = Reshape((self.N, self.F), name='nf_out')(nf_out_pre)

        ef_out_pre = Dense(self.N * self.N * self.S,
                           activation='linear')(relu5)
        ef_out = Reshape((self.N, self.N, self.S), name='ef_out')(ef_out_pre)

        # Build models
        encoder = Model(inputs=[adj_in, nf_in, ef_in], outputs=z_enc)
        clipper = Model(inputs=[adj_in, nf_in, ef_in], outputs=z_clip)
        decoder = Model(inputs=z_in, outputs=[adj_out, nf_out, ef_out])
        model = Model(inputs=[adj_in, nf_in, ef_in],
                      outputs=decoder(clipper.output))
        model.output_names = ['adj', 'nf', 'ef']

        return model, encoder, decoder, clipper
示例#12
0
def build_ecn_model(trainset, testset, nb_nodes, nb_node_features, nb_edge_features, nb_classes=1,
                    batch_size=32, nb_epochs=100, lr=0.001, save_path=None):
    
    
    # Create model architecture
    X_in = Input(shape=(nb_nodes, nb_node_features))
    A_in = Input(shape=(nb_nodes, nb_nodes))
    E_in = Input(shape=(nb_nodes, nb_nodes, nb_edge_features))
    gc1 = EdgeConditionedConv(32, activation='relu')([X_in, A_in, E_in])
    gc2 = EdgeConditionedConv(64, activation='relu')([gc1, A_in, E_in])
    pool = GlobalAttentionPool(128)(gc2)
    dense1 = Dense(128, activation='relu')(pool)
    output = Dense(nb_classes, activation='sigmoid')(dense1)
    
    # Build model
    model = Model(inputs=[X_in, A_in, E_in], outputs=output)
    optimizer = Adam(lr=lr)
    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
    model.summary()
    
    # Callback list
    callback_list = []
    # monitor val_loss and terminate training if no improvement
    early_stop = callbacks.EarlyStopping(monitor='val_loss', min_delta=0.00001, \
                patience=20, verbose=2, mode='auto', restore_best_weights=True)
    callback_list.append(early_stop)
    
    if save_path is not None:
        # save best model based on val_acc during training
        checkpoint = callbacks.ModelCheckpoint(save_path, monitor='val_acc', \
                    verbose=0, save_best_only=True, save_weights_only=False, mode='auto')
        callback_list.append(checkpoint)
        
    # Get train and test data
    [X_train, A_train, E_train, y_train] = trainset
    [X_test, A_test, E_test, y_test] = testset
    
    # Compute class weights
    weight_list = class_weight.compute_class_weight('balanced', np.unique(y_train), y_train)
    weight_dict = {}
    for i in range(len(np.unique(y_train))):
        weight_dict[np.unique(y_train)[i]] = weight_list[i]
    
    # Train model
    model.fit([X_train, A_train, E_train],
              y_train,
              batch_size=batch_size,
              validation_data = ([X_val, A_val, E_val], y_val),
              epochs=nb_epochs,
              verbose=2,
              class_weight=weight_dict,
              callbacks=callback_list)
    
    # Evaluate model    
    prediction = model.predict([X_val, A_val, E_val])
    y_val_predict = (prediction > 0.5).astype('uint8')
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')  # disable the warning on f1-score with not all labels
        scores = get_prediction_score(y_val, y_val_predict)
        
    return model, scores
示例#13
0
            Lambda(lambda x_: GraphConv(128,
                                        activation='relu',
                                        kernel_regularizer=l2(l2_reg),
                                        use_bias=True)
                   ([x_[..., :-N], x_[..., -N:]])))(conc1)
        gc1 = Dropout(dropout_rate)(gc1)
        conc2 = Concatenate()([gc1, filter_in])
        gc2 = TimeDistributed(
            Lambda(lambda x_: GraphConv(128,
                                        activation='relu',
                                        kernel_regularizer=l2(l2_reg),
                                        use_bias=True)
                   ([x_[..., :-N], x_[..., -N:]])))(conc2)

        # pool = TimeDistributed(NodeAttentionPool())(gc2)
        pool = TimeDistributed(GlobalAttentionPool(128))(gc2)
        # pool = Lambda(lambda x_: K.reshape(x_, (-1, ts, N * 128)))(gc2)

        # Recurrent block
        lstm = LSTM(256, return_sequences=True)(pool)
        lstm = LSTM(256)(lstm)

        # Dense block
        # dense1 = BatchNormalization()(lstm)
        # dense1 = Dropout(dropout_rate)(dense1)
        dense1 = Dense(256, activation='relu',
                       kernel_regularizer=l2(l2_reg))(lstm)
        # dense2 = BatchNormalization()(dense1)
        # dense2 = Dropout(dropout_rate)(dense2)
        dense2 = Dense(512, activation='relu',
                       kernel_regularizer=l2(l2_reg))(dense1)