예제 #1
0
def simgnn(parser):
    inputA = Input(shape=(None,16))
    GinputA = Input(shape=(None,None))
    inputB = Input(shape=(None,16))
    GinputB = Input(shape=(None,None))
    
    shared_gcn1 =  GraphConv(units=parser.filters_1,step_num=3, activation="relu")
    shared_gcn2 =  GraphConv(units=parser.filters_2,step_num=3, activation="relu")
    shared_gcn3 =  GraphConv(units=parser.filters_3,step_num=3, activation="relu")
    shared_attention =  Attention(parser)

    x = shared_gcn1([inputA, GinputA])
    x = shared_gcn2([x, GinputA])
    x = shared_gcn3([x, GinputA])
    x = shared_attention(x[0])

    y = shared_gcn1([inputB, GinputB])
    y = shared_gcn2([y, GinputB])
    y = shared_gcn3([y, GinputB])
    y = shared_attention(y[0])

    z = NeuralTensorLayer(output_dim=16, input_dim=16)([x, y])
    z = keras.layers.Dense(16, activation="relu")(z)
    z = keras.layers.Dense(8, activation="relu")(z)
    z = keras.layers.Dense(4, activation="relu")(z)
    z = keras.layers.Dense(1)(z)
    z = keras.activations.sigmoid(z)

    return Model(inputs=[inputA, GinputA, inputB, GinputB], outputs=z)
예제 #2
0
 def test_average_step_inf(self):
     data_layer = keras.layers.Input(shape=(None, 3), name='Input-Data')
     edge_layer = keras.layers.Input(shape=(None, None),
                                     dtype='int32',
                                     name='Input-Edge')
     conv_layer = GraphConv(
         units=2,
         step_num=60000000,
         kernel_initializer='ones',
         use_bias=False,
         bias_initializer='ones',
         name='GraphConv',
     )([data_layer, edge_layer])
     model = keras.models.Model(inputs=[data_layer, edge_layer],
                                outputs=conv_layer)
     model.compile(
         optimizer='adam',
         loss='mae',
         metrics=['mae'],
     )
     model_path = os.path.join(tempfile.gettempdir(),
                               'test_save_load_%f.h5' % np.random.random())
     model.save(model_path)
     model = keras.models.load_model(
         model_path, custom_objects={'GraphConv': GraphConv})
     predicts = model.predict([self.input_data,
                               self.input_edge])[0].tolist()
     expects = np.asarray([
         [9., 9.],
         [9., 9.],
         [9., 9.],
         [22., 22.],
     ])
     self.assertTrue(np.allclose(expects, predicts), predicts)
예제 #3
0
 def test_fit(self):
     data_layer = keras.layers.Input(shape=(None, 3), name='Input-Data')
     edge_layer = keras.layers.Input(shape=(None, None),
                                     dtype='int32',
                                     name='Input-Edge')
     conv_layer = GraphConv(
         units=2,
         name='GraphConv',
     )([data_layer, edge_layer])
     model = keras.models.Model(inputs=[data_layer, edge_layer],
                                outputs=conv_layer)
     model.compile(
         optimizer='adam',
         loss='mean_squared_error',
         metrics=['mean_squared_error'],
     )
     expects = np.asarray([[
         [9.5, 0.7],
         [6.5, 0.7],
         [9.5, 0.7],
         [22.8, 1.0],
     ]])
     model.fit(
         x=[self.input_data, self.input_edge],
         y=expects,
         epochs=10000,
         callbacks=[
             keras.callbacks.EarlyStopping(monitor='loss', patience=5),
         ],
         verbose=False,
     )
     predicts = model.predict([self.input_data, self.input_edge])
     self.assertTrue(np.allclose(expects, predicts, rtol=0.1, atol=0.1),
                     predicts)
예제 #4
0
 def test_average_step_1(self):
     data_layer = keras.layers.Input(shape=(None, 3), name='Input-Data')
     edge_layer = keras.layers.Input(shape=(None, None),
                                     dtype='int32',
                                     name='Input-Edge')
     conv_layer = GraphConv(
         units=2,
         step_num=1,
         kernel_initializer='ones',
         bias_initializer='ones',
         name='GraphConv',
     )([data_layer, edge_layer])
     model = keras.models.Model(inputs=[data_layer, edge_layer],
                                outputs=conv_layer)
     model.compile(
         optimizer='adam',
         loss='mae',
         metrics=['mae'],
     )
     model.summary()
     predicts = model.predict([self.input_data, self.input_edge])[0]
     expects = np.asarray([
         [10., 10.],
         [7., 7.],
         [10., 10.],
         [23., 23.],
     ])
     self.assertTrue(np.allclose(expects, predicts), predicts)
예제 #5
0
def configurate_model():
    node_label_input = keras.layers.Input(shape=(MAX_ATOMS, ), dtype='int32')
    # node_feat_input = OnehotEmbedding(128)(node_one_hot_label_input)
    node_feat_input = keras.layers.Embedding(
        output_dim=128, input_dim=DATA_DIM)(node_label_input)
    adj_input = keras.layers.Input(shape=(MAX_ATOMS, MAX_ATOMS))

    gc1 = GraphConv(
        units=32,
        step_num=1,
    )([node_feat_input, adj_input])

    gc2 = GraphConv(
        units=32,
        step_num=1,
    )([gc1, adj_input])

    logits = Dense(1)(gc2)
    logits = K.squeeze(logits, -1)

    tau = 0.1
    samples = Sample_Concrete(tau, gumble_k, name='sample')(logits)

    selected_node_feat_input = Multiply()([gc2, K.expand_dims(samples, -1)])
    graph_feat_input = K.sum(selected_node_feat_input, axis=1) / gumble_k

    net = Dense(200,
                activation='relu',
                name='dense1',
                kernel_regularizer=regularizers.l2(1e-3))(graph_feat_input)
    net = BatchNormalization()(net)  # Add batchnorm for stability.
    net = Dense(200,
                activation='relu',
                name='dense2',
                kernel_regularizer=regularizers.l2(1e-3))(net)
    net = BatchNormalization()(net)

    preds = Dense(2,
                  activation='softmax',
                  name='dense4',
                  kernel_regularizer=regularizers.l2(1e-3))(net)

    model = Model(inputs=[node_label_input, adj_input], outputs=preds)
    return model, (node_label_input, adj_input), samples, logits
예제 #6
0
    def build_model(self):
        f_data = keras.layers.Input(shape=(None, self.data_loader.max_len),
                                    name='facebook_data')
        f_edge = keras.layers.Input(shape=(None, None), name='facebook_edge')

        t_data = keras.layers.Input(shape=(None, self.data_loader.max_len),
                                    name='twitter_data')
        t_edge = keras.layers.Input(shape=(None, None), name='twitter_edge')

        f_conv_layer = GraphConv(units=32, step_num=1,
                                 name='GCN_for_facebook')([f_data, f_edge])
        t_conv_layer = GraphConv(units=32, step_num=1,
                                 name='GCN_for_twitter')([t_data, t_edge])
        y1 = keras.layers.Dense(1, activation='relu')(f_conv_layer)
        y2 = keras.layers.Dense(1, activation='relu')(t_conv_layer)
        y3 = keras.layers.Dot(axes=[-1, -1], name='transfor_matrix')([y1, y2])
        y3 = tf.keras.layers.Attention()([y3, y3])
        y3 = keras.layers.Activation(activation='sigmoid',
                                     name='prob_matrix')(y3)
        model = keras.models.Model([f_data, f_edge, t_data, t_edge], y3)
        model.summary()
        return model
예제 #7
0
def gcn_model(size):
    unit_num = 4
    node_input = Input(shape=(get_config("node_len"), size), name='Input-Node')
    edge_input = Input(shape=(get_config("node_len"), get_config("node_len")),
                       dtype='int32',
                       name='Input-Edge')
    conv_layer = GraphConv(units=unit_num,
                           name='GraphConv')([node_input, edge_input])
    x = Flatten()(conv_layer)
    x = Dense(unit_num * get_config("node_len"), activation='relu')(x)
    x = Dense(64, activation='relu')(x)
    output_layer = Dense(2, activation='softmax')(x)
    model = Model(inputs=[node_input, edge_input], outputs=output_layer)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
예제 #8
0
def create_model():
    K.clear_session()
    midi_dir = 'input'
    out_dir = 'output'
    pitch = [
        "C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B", "C"
    ]

    def pitch_to_note(pitchname):
        for i in range(128):
            if (pitchname == pitch[i % 12] + str(int(i / 12) - 1)):
                return i
            elif (pitchname == pitch[i % 12 + 1] + str(-1 *
                                                       (int(i / 12) - 1))):
                return i

    # def note_to_pitch(notee):
    #     notee = np.array(notee)
    #     print(len(notee))
    #     for k in range(64):
    #         pm = pretty_midi.PrettyMIDI(resolution=220,initial_tempo=120.0)
    #         instrument = pretty_midi.Instrument(0)
    #         for i in range(32):
    #             for j in range(128):
    #                 element = notee[k,j,i]
    #                 if(element > 1e-3):
    #                     note = pretty_midi.Note(velocity=100,pitch=j,start=i*0.5,end=(i+1)*0.5)
    #                     instrument.notes.append(note)
    #                     pm.instruments.append(instrument)

    #     count = 0
    #     for note in instrument.notes:
    #         count += 1
    #     return pm
    def note_to_pitch(notee):
        notee = np.array(notee)
        k = random.randint(0, len(notee))
        pm = pretty_midi.PrettyMIDI(resolution=220, initial_tempo=120.0)
        instrument = pretty_midi.Instrument(0)
        for i in range(64):
            for j in range(128):
                element = notee[k, j, i]
                if (element > 1e-3):
                    note = pretty_midi.Note(velocity=100,
                                            pitch=j,
                                            start=i * 0.25,
                                            end=(i + 1) * 0.25)
                    instrument.notes.append(note)
                    pm.instruments.append(instrument)
                # else:
                # print("element<1e-3")
        count = 0
        for note in instrument.notes:
            count += 1
        return pm

    X_train = np.load('output\Chinese_X_train.npy')
    A_train = np.load('output\Chinese_A_train.npy')
    graph = []
    #
    X_rows = 128
    X_cols = 64
    X_channels = 1
    X_shape = (X_rows, X_cols)

    A_rows = 128
    A_cols = 128
    A_channels = 1
    A_shape = (A_rows, A_cols)
    z_dim = 32
    steps = 150
    optimizer = Adam(0.0002, 0.5)
    G = [Input(shape=(128, 64), batch_shape=None, sparse=False)]
    # discriminator

    # # Generator
    # self.Xgenerator = self.create_Xgenerator()
    # self.Agenerator = self.create_Agenerator()
    # #
    # self.Xgenerator.compile(loss='binary_crossentropy', optimizer=optimizer)
    # self.Agenerator.compile(loss = 'binary_crossentropy', optimizer = optimizer)

    # self.combined = self.build_combined2()
    # self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)

    Z_in = Input(shape=(z_dim, ))
    H = Dense({{choice([16, 32, 64, 128, 256, 512])}})(Z_in)
    H = LeakyReLU(alpha={{uniform(0, 1)}})(H)
    H = BatchNormalization(momentum={{uniform(0, 1)}})(H)
    H = Dense({{choice([16, 32, 64, 128, 256, 512])}})(H)
    H = LeakyReLU({{uniform(0, 1)}})(H)
    H = Dropout(0.5)(H)
    H = BatchNormalization(momentum={{uniform(0, 1)}})(H)
    H = Dense({{choice([16, 32, 64, 128, 256, 512])}})(H)
    H = LeakyReLU({{uniform(0, 1)}})(H)
    H = Dropout(0.5)(H)
    H = BatchNormalization(momentum={{uniform(0, 1)}})(H)
    H = Dense({{choice([16, 32, 64, 128, 256, 512])}})(H)
    H = LeakyReLU({{uniform(0, 1)}})(H)
    H = Dense(np.prod(X_shape), activation='tanh')(H)
    H = Reshape(X_shape)(H)
    Xgenerator = Model(Z_in, H, name='Xgenerator')
    Xgenerator.compile(loss='binary_crossentropy',
                       optimizer=Adam(lr={{uniform(0, 1)}},
                                      beta_1={{uniform(0, 1)}},
                                      beta_2={{uniform(0, 1)}},
                                      decay={{uniform(0, 1)}}))
    Xgenerator.summary()

    Z_in = Input(shape=(z_dim, ))
    H = Dense({{choice([16, 32, 64, 128, 256, 512])}})(Z_in)
    H = LeakyReLU({{uniform(0, 1)}})(H)
    H = BatchNormalization(momentum={{uniform(0, 1)}})(H)
    H = Dense({{choice([16, 32, 64, 128, 256, 512])}})(H)
    H = LeakyReLU({{uniform(0, 1)}})(H)
    H = Dropout(0.5)(H)
    H = BatchNormalization(momentum={{uniform(0, 1)}})(H)
    H = Dense({{choice([16, 32, 64, 128, 256, 512])}})(H)
    H = LeakyReLU({{uniform(0, 1)}})(H)
    H = Dropout(0.5)(H)
    H = BatchNormalization(momentum={{uniform(0, 1)}})(H)
    H = Dense({{choice([16, 32, 64, 128, 256, 512])}})(H)
    H = LeakyReLU({{uniform(0, 1)}})(H)
    H = Dense(np.prod(A_shape), activation='tanh')(H)
    H = Reshape(A_shape)(H)
    Agenerator = Model(Z_in, H, name='Agenerator')
    Agenerator.compile(loss='binary_crossentropy',
                       optimizer=Adam(lr={{uniform(0, 1)}},
                                      beta_1={{uniform(0, 1)}},
                                      beta_2={{uniform(0, 1)}},
                                      decay={{uniform(0, 1)}}))
    Agenerator.summary()

    #
    X_in = Input(shape=(128, 64))
    A = Input(shape=(128, 128))
    H = GraphConv(64)([X_in, A])
    H = GraphConv(64)([H, A])
    H = GraphConv(64)([H, A])
    H = Flatten()(H)
    H = Dropout({{uniform(0, 1)}})(H)
    Y = Dense(units={{choice([16, 32, 64, 128, 256, 512])}},
              activation='tanh')(H)
    Y = LeakyReLU({{uniform(0, 1)}})(Y)
    Y = Dense(1, activation='sigmoid')(Y)
    #
    discriminator = Model(inputs=[X_in, A], outputs=Y, name='Discriminator')
    discriminator.compile(loss='binary_crossentropy',
                          optimizer=SGD(lr={{uniform(0, 1)}},
                                        momentum={{uniform(0, 1)}},
                                        decay={{uniform(0, 1)}},
                                        nesterov={{choice([True, False])}}))
    discriminator.summary()

    z = Input(shape=(z_dim, ))
    X = Xgenerator(z)
    A = Agenerator(z)
    support = 1
    gan_V = discriminator([X, A])
    model = Model(inputs=z, outputs=gan_V, name='Model')
    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(lr={{uniform(0, 1)}},
                                 beta_1={{uniform(0, 1)}},
                                 beta_2={{uniform(0, 1)}},
                                 decay={{uniform(0, 1)}}))
    model.summary()

    # discriminator

    batch_size = 64
    half_batch = int(batch_size / 2)
    noise_half = np.random.normal(0, 1, (half_batch, z_dim))
    noise = np.random.normal(0, 1, (batch_size, z_dim))
    #starttrain = time.time()
    for step in range(steps):
        graphs = []
        # ---------------------
        #  Discriminator
        # ---------------------

        # noise = np.random.normal(0, 1, (half_batch, z_dim))
        # noise = np.random.uniform(0, 1, (half_batch, z_dim))
        gen_A = Agenerator.predict(noise_half)
        gen_X = Xgenerator.predict(noise_half)

        # print(gen_X)
        for t in range(0, len(X_train) // batch_size):
            #
            idx = np.random.randint(0, len(X_train), half_batch)
            #print(idx)

            graphsX = []
            graphsA = []
            for i in idx:
                graphsX.append(X_train[i])
                graphsA.append(A_train[i])
            graphs = [graphsX, graphsA]
            # discriminator
            #
            valid_y = np.array([1] * batch_size)
            # noise = np.random.normal(0, 1, (batch_size, z_dim))
            # noise = np.random.uniform(0,1,(batch_size,self.z_dim))
            g_loss = model.train_on_batch(noise, valid_y)
            # noise = np.random.normal(0, 1, (batch_size, z_dim))
            g_loss = model.train_on_batch(
                noise,
                valid_y,
            )
            d_loss_real = discriminator.train_on_batch(
                graphs, np.ones((half_batch, 1)))
            d_loss_fake = discriminator.train_on_batch([gen_X, gen_A],
                                                       np.zeros(
                                                           (half_batch, 1)))
            #
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # ---------------------
            #  Generator
            # ---------------------
            valid_y = np.array([1] * batch_size)
            # noise = np.random.normal(0, 1, (batch_size, z_dim))
            g_loss = model.train_on_batch(
                noise,
                valid_y,
            )
            # noise = np.random.normal(0, 1, (batch_size, z_dim))
            # noise = np.random.uniform(0,1,(batch_size,self.z_dim))
            g_loss = model.train_on_batch(noise, valid_y)

        # Train the generator
        # g_loss = self.combined.train_on_batch(noise, valid_y)
        # g_loss = model.train_on_batch(noise, np.ones((half_batch,1)))

    #elapsed_time = time.time() - starttrain
    #print ("elapsed_time:{0}".format(elapsed_time) + "[sec]")
    return {'loss': g_loss, 'status': STATUS_OK}
예제 #9
0
The edge layer holds the network's understanding of the graph.
"""
data_layer = keras.layers.Input(shape=(numDrugsInSubgraph, numFeats),
                                name='Input-Data')
edge_layer = keras.layers.Input(shape=(numDrugsInSubgraph, numDrugsInSubgraph),
                                dtype='int32',
                                name='Input-Edge')
drug_1_layer = keras.layers.Input(shape=(numFeats, ),
                                  dtype='float32',
                                  name='Pairs-Data-1')
drug_2_layer = keras.layers.Input(shape=(numFeats, ),
                                  dtype='float32',
                                  name='Pairs-Data-2')

conv_layer_1 = GraphConv(
    units=int(32),
    step_num=2,
)([data_layer, edge_layer])

conv_layer_2 = GraphConv(
    units=int(16),
    step_num=2,
)([conv_layer_1, edge_layer])

conv_layer_3 = GraphConv(
    units=int(8),
    step_num=2,
)([conv_layer_2, edge_layer])

flat_output = Flatten()(conv_layer_3)
concat_output = concatenate([flat_output, drug_1_layer, drug_2_layer])
wide_output = Dense(128, activation='relu')(concat_output)
예제 #10
0
A_train, A_test, X_train, X_test, y_train, y_test = train_test_split(
    A, X, y, train_size=TRAIN_FRACTION, random_state=0, shuffle=True)

num_nodes = A[0].shape[0]
support = 1

# training_data_generator = DataGenerator(A_train, X_train, y_train, X_y_train, batch_size=128)
# validation_data_generator = DataGenerator(A_test, X_test, y_test, X_y_test, batch_size=128)

# Define model architecture

X_in = Input(shape=(None, X_train[0].shape[1]), name='Input-Features')
A_in = Input(shape=(None, A_train[0].shape[1]),
             sparse=False,
             name='Input-Adjacency')
H = GraphConv(units=1024, step_num=1, name='GraphConv-1',
              activation='relu')([X_in, A_in])
#H = Dropout(DO)(H)
H = GraphConv(units=1024, step_num=1, name='GraphConv-2',
              activation='relu')([H, A_in])
#H = Dropout(DO)(H)
#H = Concatenate(axis=2)([X_in, A_in])
H = Dense(units=512, activation='tanh', name='Dense-1')(H)
H = Dense(units=256, activation='tanh', name='Dense-2')(H)
H = Dense(units=1, activation='linear', name='Dense-3')(H)
# Y = Dense(units=146, activation='linear')(H)
# H = GraphConv(units=1, step_num=1, name='GraphConv-3',activation='relu')([H, A_in])
Y = GlobalAveragePooling1D(data_format='channels_last',
                           name='Global-Pooling')(H)
#Y = GraphConv(units=146, step_num=1, name='GraphConv-3',activation='linear')([H, A_in])

# Compile model