Ejemplo n.º 1
0
def origin_model():
    sensor_matrix = Input(shape=(num_sensors, num_sensors))
    
    s_input1, extract_cnn1 = resnet_sensor_network(input_shape, repetitions=[2,2,2,2])
    #s_input2, extract_cnn2 = resnet_sensor_network(input_shape, repetitions=[2,2,2,2])
    #s_input3, extract_cnn3 = resnet_sensor_network(input_shape, repetitions=[2,2,2,2])
    #s_input4, extract_cnn4 = resnet_sensor_network(input_shape, repetitions=[2,2,2,2])
    #extract_cnn = Concatenate(axis=1)([extract_cnn1, extract_cnn2, extract_cnn3, extract_cnn4])
        
    #extract_cnn = np.reshape(extract_cnn, (-1,))
    G_1 = GraphConv(256, 'relu')([extract_cnn1, sensor_matrix])
    G_2 = GraphConv(256, 'relu')([G_1, sensor_matrix])
    #gnn_output = tf.split(G_2, num_sensors, 1)
    
    output1 = Dense(32, activation='relu')(Flatten()(G_2))
    output1 = Dense(1, activation='linear', name='sensor_1')(output1)
    
    #output2 = Dense(32, activation='relu')(Flatten()(gnn_output[1]))
    #output2 = Dense(1, activation='linear', name='sensor_2')(output2)
    
    #output3 = Dense(32, activation='relu')(Flatten()(gnn_output[2]))
    #output3 = Dense(1, activation='linear', name='sensor_3')(output3)                                                         
                                                             
    #output4 = Dense(32, activation='relu')(Flatten()(gnn_output[3]))
    #output4 = Dense(1, activation='linear', name='sensor_4')(output4)
    
    model = Model(inputs=[s_input1, sensor_matrix], 
                  outputs= [output1])
    return model
 def __init__(self, **kwargs):
     super().__init__(**kwargs)
     self.conv1 = GraphConv(32, activation='elu', kernel_regularizer=l2(l2_reg))
     self.conv2 = GraphConv(32, activation='elu', kernel_regularizer=l2(l2_reg))
     self.flatten = Flatten()
     self.fc1 = Dense(512, activation='relu')
     self.fc2 = Dense(n_out, activation='softmax')
Ejemplo n.º 3
0
    def build_model(self, N, F, num_outputs):
        X_in = Input(shape=(N, F), name='X_in')
        A_in = Input(shape=(N, N), name='A_in')
        RL_indice = Input(shape=(N), name='rl_indice_in')

        ### Encoder
        x = Dense(32, activation='relu', name='encoder_1')(X_in)
        x = Dense(32, activation='relu', name='encoder_2')(x)

        ### Graphic convolution

        x = GraphConv(32, activation='relu', name='gcn1')([x, A_in])
        # x = GraphConv(32, activation='relu',name='gcn2')([x, A_in])

        ### Policy network
        x1 = Dense(32, activation='relu', name='policy_1')(x)
        x1 = GraphConv(32, activation='relu', name='gcn2')([x1, A_in])
        x1 = Dense(32, activation='relu', name='policy_add')(x1)
        x2 = Dense(16, activation='relu', name='policy_2')(x1)

        ###  Action and filter
        x3 = Dense(num_outputs, activation='linear', name='policy_3')(x2)
        filt = Reshape((N, 1), name='expend_dim')(RL_indice)
        qout = Multiply(name='filter')([x3, filt])

        model = Model(inputs=[X_in, A_in, RL_indice], outputs=[qout])
        # print(model.summary())
        return model
Ejemplo n.º 4
0
def make_embedding(CV, MODEL, DATA, EMBED):
    DATA_FOLD = DATA + f"/FOLD-{CV}"
    if not os.path.exists(EMBED):
        os.mkdir(EMBED)

    graph, features, labels = load_dataset(DATA, DATA_FOLD)
    fltr = GraphConv.preprocess(graph).astype('f4')
    fltr = ops.sp_matrix_to_sp_tensor(fltr)

    X_in = Input((features.shape[1], ))
    fltr_in = Input((features.shape[0], ), sparse=True)
    X_1 = GraphConv(512, 'relu', True,
                    kernel_regularizer=l2(5e-4))([X_in, fltr_in])
    X_1 = Dropout(0.5)(X_1)
    X_2 = GraphConv(256, 'relu', True,
                    kernel_regularizer=l2(5e-4))([X_1, fltr_in])
    X_2 = Dropout(0.5)(X_2)
    X_3 = GraphConv(128, 'relu', True,
                    kernel_regularizer=l2(5e-4))([X_2, fltr_in])
    X_3 = Dropout(0.5)(X_3)
    X_4 = GraphConv(64, 'linear', True,
                    kernel_regularizer=l2(5e-4))([X_3, fltr_in])
    X_5 = Dense(labels.shape[1], use_bias=True)(X_4)

    loaded_model = load_model(f"{MODEL}")
    model_without_task = Model(inputs=[X_in, fltr_in], outputs=X_4)
    model_without_task.set_weights(loaded_model.get_weights()[:8])

    final_node_representations = model_without_task([features, fltr],
                                                    training=False)
    save_embedding(final_node_representations, EMBED, DATA_FOLD, CV)
Ejemplo n.º 5
0
def robot_khop_model():  # input/output = num of sensors
    num_sensors = 9
    input_shape = (84, 84 * 4, 3)
    sensor_matrix1 = Input(shape=(num_sensors + 1, num_sensors + 1))
    sensor_matrix2 = Input(shape=(num_sensors + 1, num_sensors + 1))
    #sensor_matrix3 = Input(shape=(num_sensors, num_sensors))
    r_input = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input1 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input2 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input3 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input4 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input5 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input6 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input7 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input8 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input9 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))

    s_cnn = sensor_cnn(input_shape, repetitions=[2, 2, 2, 2])
    robot_cnn = s_cnn(r_input)
    extract_cnn1 = s_cnn(s_input1)
    extract_cnn2 = s_cnn(s_input2)
    extract_cnn3 = s_cnn(s_input3)
    extract_cnn4 = s_cnn(s_input4)
    extract_cnn5 = s_cnn(s_input5)
    extract_cnn6 = s_cnn(s_input6)
    extract_cnn7 = s_cnn(s_input7)
    extract_cnn8 = s_cnn(s_input8)
    extract_cnn9 = s_cnn(s_input9)

    extract_cnn = Concatenate(axis=1)([
        extract_cnn1, extract_cnn2, extract_cnn3, extract_cnn4, extract_cnn5,
        extract_cnn6, extract_cnn7, extract_cnn8, extract_cnn9, robot_cnn
    ])

    #extract_cnn = np.reshape(extract_cnn, (-1,))
    G_h1 = GraphConv(256, 'relu')([extract_cnn, sensor_matrix1])
    G_h2 = GraphConv(256, 'relu')([extract_cnn, sensor_matrix2])
    G_1 = Concatenate(axis=-1)([G_h1, G_h2])

    G_2h1 = GraphConv(256, 'relu')([G_1, sensor_matrix1])
    G_2h2 = GraphConv(256, 'relu')([G_1, sensor_matrix2])
    G_2 = Concatenate(axis=-1)([G_2h1, G_2h2])

    gnn_output = tf.split(G_2, num_sensors + 1, 1)

    r_output = Dense(64, activation='relu',
                     name='policy_mlp')(Flatten()(gnn_output[-1]))
    output1 = Dense(2, activation='linear', name='robot_loss')(r_output)

    model = Model(inputs=[
        s_input1, s_input2, s_input3, s_input4, s_input5, s_input6, s_input7,
        s_input8, s_input9, r_input, sensor_matrix1, sensor_matrix2
    ],
                  outputs=[output1])
    return model
Ejemplo n.º 6
0
 def __init__(self):
     super(MyModel, self).__init__()
     self.dp1 = Dropout(dropout)
     self.gcn1 = GraphConv(channels,
                      activation='relu',
                      kernel_regularizer=l2(l2_reg),
                      use_bias=False)
     self.dp2 = Dropout(dropout)
     self.gcn2 = GraphConv(n_classes,
                      activation='softmax',
                      use_bias=False)
Ejemplo n.º 7
0
 def create_model(self):
     X_in = Input((self.features.shape[1],))
     fltr_in = Input((self.features.shape[0],), sparse=True)
     X_1 = GraphConv(512, 'relu', True, kernel_regularizer=l2(5e-4))([X_in, fltr_in])
     X_1 = Dropout(0.5)(X_1)
     X_2 = GraphConv(256, 'relu', True, kernel_regularizer=l2(5e-4))([X_1, fltr_in])
     X_2 = Dropout(0.5)(X_2)
     X_3 = GraphConv(128, 'relu', True, kernel_regularizer=l2(5e-4))([X_2, fltr_in])
     X_3 = Dropout(0.5)(X_3)
     X_4 = GraphConv(64, 'linear', True, kernel_regularizer=l2(5e-4))([X_3, fltr_in])
     X_5 = Dense(GCN.labels.shape[1], use_bias=True)(X_4)
     return Model(inputs=[X_in, fltr_in], outputs=X_5)
Ejemplo n.º 8
0
def Model_treeGCN_softmax_1(node_count,
                            wordvocabsize,
                            w2v_k,
                            word_W,
                            l2_reg=5e-4):
    X_word_in = Input(shape=(node_count, ), dtype='int32')
    # fltr_in = Input(shape=(node_count, node_count), sparse=True)
    fltr_in = Input(tensor=sp_matrix_to_sp_tensor(fltr))

    word_embedding_layer = Embedding(input_dim=wordvocabsize + 1,
                                     output_dim=w2v_k,
                                     input_length=node_count,
                                     mask_zero=True,
                                     trainable=True,
                                     weights=[word_W])
    word_embedding_x = word_embedding_layer(X_word_in)
    word_embedding_x = Dropout(0.25)(word_embedding_x)

    graph_conv_1 = GraphConv(200,
                             activation='relu',
                             kernel_regularizer=l2(l2_reg),
                             use_bias=True)([word_embedding_x, fltr_in])
    dropout_1 = Dropout(0.5)(graph_conv_1)
    graph_conv_2 = GraphConv(200,
                             activation='relu',
                             kernel_regularizer=l2(l2_reg),
                             use_bias=True)([dropout_1, fltr_in])
    dropout_2 = Dropout(0.5)(graph_conv_2)

    feature_node0 = Lambda(lambda x: x[:, 0])(dropout_2)

    # pool = GlobalAttentionPool(200)(dropout_2)

    flatten = Flatten()(dropout_2)
    fc = Dense(512, activation='relu')(flatten)
    fc = Dropout(0.5)(fc)

    # LSTM_backward = LSTM(200, activation='tanh', return_sequences=False,
    #                      go_backwards=True, dropout=0.5)(dropout_2)

    # present_node0 = concatenate([feature_node0, LSTM_backward], axis=-1)
    class_output = Dense(120)(fc)
    class_output = Activation('softmax', name='CLASS')(class_output)

    # Build model
    model = Model(inputs=[X_word_in, fltr_in], outputs=class_output)
    optimizer = Adam(lr=0.001)
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  weighted_metrics=['acc'])
    return model
Ejemplo n.º 9
0
    def build_model(self, N, F, num_outputs):

        X_in = Input(shape=(N, F), name='X_in')
        A_in = Input(shape=(N, N), name='A_in')
        RL_indice = Input(shape=(N), name='rl_indice_in')

        ### Encoder
        x = Dense(32, activation='relu', name='encoder_1')(X_in)
        x = Dense(32, activation='relu', name='encoder_2')(x)

        ### Graphic convolution

        x1 = GraphConv(32, activation='relu', name='gcn1')([x, A_in])
        x1 = Dense(32, activation='relu', name='post_gcn_1')(x1)

        # x2 = GraphConv(32, activation='relu',name='gcn2')([x1, A_in])
        # x2 = Dense(32,activation='relu',name='post_gcn_2')(x2)

        ###  Action and filter
        x3 = Concatenate()([x, x1])
        x3 = Dense(64, activation='relu', name='policy_1')(x3)
        x3 = Dense(32, activation='relu', name='policy_2')(x3)

        x3 = Dense(num_outputs, activation='linear', name='policy_output')(x3)
        mask = Reshape((N, 1), name='expend_dim')(RL_indice)
        qout = Multiply(name='filter')([x3, mask])

        model = Model(inputs=[X_in, A_in, RL_indice], outputs=[qout])
        print(model.summary())
        return model
Ejemplo n.º 10
0
    def conv_layer(row, col, x):
        conv = tf.keras.layers.Conv1D(hidden_dim * 2,
                                      5,
                                      padding='same',
                                      activation='tanh',
                                      input_shape=(row, col))(x)

        gcn_1 = GraphConv(
            graph_channels,
            activation='tanh',
        )([conv, As_in[:, :, :, 1]])

        gcn_2 = ChebConv(
            graph_channels,
            activation='tanh',
        )([conv, As_in[:, :, :, 1]])

        gcn_3 = ARMAConv(
            graph_channels,
            activation='tanh',
        )([conv, As_in[:, :, :, 1]])

        gcn_1 = tf.keras.layers.Concatenate()([gcn_1, gcn_2, gcn_3])
        gcn_1 = tf.keras.layers.Conv1D(3 * graph_channels,
                                       5,
                                       padding='same',
                                       activation='tanh',
                                       input_shape=(row, col))(gcn_1)

        conv = tf.keras.layers.Concatenate()([x, conv, gcn_1, gcn_2, gcn_3])
        conv = tf.keras.layers.Activation("relu")(conv)
        conv = tf.keras.layers.SpatialDropout1D(0.1)(conv)

        return conv
Ejemplo n.º 11
0
def pose_model():
    sensor_matrix = Input(shape=(num_sensors, num_sensors))
    
    s_input1, extract_cnn1 = resnet_sensor_network(input_shape, repetitions=[2,2,2,2])
    s_input2, extract_cnn2 = resnet_sensor_network(input_shape, repetitions=[2,2,2,2])
    s_input3, extract_cnn3 = resnet_sensor_network(input_shape, repetitions=[2,2,2,2])
    s_input4, extract_cnn4 = resnet_sensor_network(input_shape, repetitions=[2,2,2,2])
    
    pose_input1 = Input(shape=(2))  
    s_pose1 = Dense(64, activation='relu')(pose_input1)
    pose_input2 = Input(shape=(2)) 
    s_pose2 = Dense(64, activation='relu')(pose_input2)
    pose_input3 = Input(shape=(2)) 
    s_pose3 = Dense(64, activation='relu')(pose_input3)
    pose_input4 = Input(shape=(2))
    s_pose4 = Dense(64, activation='relu')(pose_input4)
    
    extract_cnn1 = Concatenate(axis=-1)([extract_cnn1, Reshape((1,s_pose1.shape[-1]))(s_pose1)])
    extract_cnn2 = Concatenate(axis=-1)([extract_cnn2, Reshape((1,s_pose2.shape[-1]))(s_pose2)])
    extract_cnn3 = Concatenate(axis=-1)([extract_cnn3, Reshape((1,s_pose3.shape[-1]))(s_pose3)])
    extract_cnn4 = Concatenate(axis=-1)([extract_cnn4, Reshape((1,s_pose4.shape[-1]))(s_pose4)])
    extract_cnn = Concatenate(axis=1)([extract_cnn1, extract_cnn2, extract_cnn3, extract_cnn4])
      
    #extract_cnn = np.reshape(extract_cnn, (-1,))
    G_1 = GraphConv(256, 'relu')([extract_cnn, sensor_matrix])
    G_2 = GraphConv(256, 'relu')([G_1, sensor_matrix])
    gnn_output = tf.split(G_2, num_sensors, 1)
    
    output1 = Dense(32, activation='relu')(Flatten()(gnn_output[0]))
    output1 = Dense(1, activation='linear', name='sensor_1')(output1)
    
    output2 = Dense(32, activation='relu')(Flatten()(gnn_output[1]))
    output2 = Dense(1, activation='linear', name='sensor_2')(output2)
    
    output3 = Dense(32, activation='relu')(Flatten()(gnn_output[2]))
    output3 = Dense(1, activation='linear', name='sensor_3')(output3)                                                         
                                                             
    output4 = Dense(32, activation='relu')(Flatten()(gnn_output[3]))
    output4 = Dense(1, activation='linear', name='sensor_4')(output4)
    
    model = Model(inputs=[s_input1, s_input2, s_input3, s_input4, 
                          s_pose1, s_pose2, s_pose3, s_pose4,
                          sensor_matrix],  
                  outputs= [output1,output2,output3,output4])
    return model
Ejemplo n.º 12
0
    def load_folded_dataset(self, path):
        with open(path + "/graph.json", 'r') as f:
            graph_json = json.load(f)
        graph = nx.json_graph.node_link_graph(graph_json)
        adjacency_mat = nx.adjacency_matrix(graph)
        fltr = GraphConv.preprocess(adjacency_mat).astype('f4')

        self.fltr = ops.sp_matrix_to_sp_tensor(fltr)
        self.features = np.load(path + "/feats.npy")
        self.train_mask = np.load(path + "/train_mask.npy")
        self.valid_mask = np.load(path + "/valid_mask.npy")
        self.train_labels = GCN.labels[self.train_mask]
        self.valid_labels = GCN.labels[self.valid_mask]
Ejemplo n.º 13
0
def make_discriminator(name, s, adj, node_f, use_gcn=True, use_gru=True):
    n = node_f.shape[0]  # number of nodes
    input_s = Input(shape=(s, n))
    input_f = Input(shape=(n, node_f.shape[1]))
    input_g = Input(shape=(n, n))
    if use_gcn:
        gcov1 = GraphConv(2 * base)([input_f, input_g])
        # gcov2 = GraphConv(base)([gcov1, input_g])
        input_s1 = Dot(axes=(2, 1))(
            [input_s, gcov1])  # dot product: element by element multiply
    else:
        input_s1 = input_s
    fc1 = Dense(4 * base, activation='relu', input_shape=(n, ))(input_s1)
    fc2 = Dense(8 * base, activation='relu', input_shape=(n, ))(fc1)
    # S*D2

    if use_gru:
        rnn1 = Dropout(dropout)(CuDNNGRU(2 * base, return_sequences=True)(fc2))
    else:
        rnn1 = fc2
    fc3 = Dense(16 * base, activation='relu', input_shape=(n, ))(rnn1)
    out = Dense(1)(Flatten()(fc3))
    return Model(name=name, inputs=[input_s, input_f, input_g], outputs=out)
        os.mkdir(models)
    if not os.path.exists(MODEL):
        os.mkdir(MODEL)
    DATA = f"./Data/results/v3/FOLD-{1}"

    logging.basicConfig(filename=f"{MODEL}/train.log", level=logging.DEBUG)
    logger = logging.getLogger()
    logger.info(f"Path for Dataset = {DATA}")
    logger.info(f"Path for Models = {MODEL}")

    labels = np.load("./Data/results/v3/labels.npy")
    with open(DATA + "/graph.json", 'r') as f:
        graph_json = json.load(f)
    graph = nx.json_graph.node_link_graph(graph_json)
    adjacency_mat = nx.adjacency_matrix(graph)
    fltr = GraphConv.preprocess(adjacency_mat).astype('f4')

    fltr = ops.sp_matrix_to_sp_tensor(fltr)
    features = np.load(DATA + "/feats.npy")
    train_mask = np.load(DATA + "/train_mask.npy")
    valid_mask = np.load(DATA + "/valid_mask.npy")
    train_labels = labels[train_mask]
    valid_labels = labels[valid_mask]

    strategy = tf.distribute.experimental.ParameterServerStrategy(
        cluster_resolver)
    with strategy.scope():

        def create_model(features_shape, labels_shape):
            X_in = Input((features_shape[1], ))
            fltr_in = Input((features_shape[0], ), sparse=True)
Ejemplo n.º 15
0
def khop_model_distribute(num_sensors=10):  # input/output = num of sensors
    gnn_unit = 128
    sensor_matrix1 = Input(shape=(num_sensors, num_sensors))
    sensor_matrix2 = Input(shape=(num_sensors, num_sensors))
    sensor_matrix3 = Input(shape=(num_sensors, num_sensors))
    sensor_matrix4 = Input(shape=(num_sensors, num_sensors))
    #sensor_matrix3 = Input(shape=(num_sensors, num_sensors))
    s_input1 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input2 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input3 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input4 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input5 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input6 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input7 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input8 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input9 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input10 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))

    s_cnn = sensor_cnn(input_shape, repetitions=[2, 2, 2, 2])
    extract_cnn1 = s_cnn(s_input1)
    extract_cnn2 = s_cnn(s_input2)
    extract_cnn3 = s_cnn(s_input3)
    extract_cnn4 = s_cnn(s_input4)
    extract_cnn5 = s_cnn(s_input5)
    extract_cnn6 = s_cnn(s_input6)
    extract_cnn7 = s_cnn(s_input7)
    extract_cnn8 = s_cnn(s_input8)
    extract_cnn9 = s_cnn(s_input9)
    extract_cnn10 = s_cnn(s_input10)

    extract_cnn = Concatenate(axis=1)([
        extract_cnn1, extract_cnn2, extract_cnn3, extract_cnn4, extract_cnn5,
        extract_cnn6, extract_cnn7, extract_cnn8, extract_cnn9, extract_cnn10
    ])

    #extract_cnn = np.reshape(extract_cnn, (-1,))
    G_h1 = GraphConv(gnn_unit, 'selu')([extract_cnn, sensor_matrix1])
    G_h2 = GraphConv(gnn_unit, 'selu')([extract_cnn, sensor_matrix2])
    G_h3 = GraphConv(gnn_unit, 'selu')([extract_cnn, sensor_matrix3])
    G_h4 = GraphConv(gnn_unit, 'selu')([extract_cnn, sensor_matrix4])
    G_1 = Concatenate(axis=-1)([G_h1, G_h2, G_h3, G_h4])

    G_2h1 = GraphConv(gnn_unit, 'selu')([G_1, sensor_matrix1])
    G_2h2 = GraphConv(gnn_unit, 'selu')([G_1, sensor_matrix2])
    G_2h3 = GraphConv(gnn_unit, 'selu')([G_1, sensor_matrix3])
    G_2h4 = GraphConv(gnn_unit, 'selu')([G_1, sensor_matrix4])
    G_2 = Concatenate(axis=-1)([G_2h1, G_2h2, G_2h3, G_2h4])

    #G_3h1 = GraphConv(gnn_unit, 'selu')([G_2, sensor_matrix1])
    #G_3h2 = GraphConv(gnn_unit, 'selu')([G_2, sensor_matrix2])
    #G_3h3 = GraphConv(gnn_unit, 'selu')([G_2, sensor_matrix3])
    #G_3h4 = GraphConv(gnn_unit, 'selu')([G_2, sensor_matrix4])
    #G_3 = Concatenate(axis=-1)([G_3h1, G_3h2, G_3h3, G_3h4])

    #G_4h1 = GraphConv(gnn_unit, 'selu')([G_3, sensor_matrix1])
    #G_4h2 = GraphConv(gnn_unit, 'selu')([G_3, sensor_matrix2])
    #G_4h3 = GraphConv(gnn_unit, 'selu')([G_3, sensor_matrix3])
    #G_4h4 = GraphConv(gnn_unit, 'selu')([G_3, sensor_matrix4])
    #G_4 = Concatenate(axis=-1)([G_4h1, G_4h2, G_4h3, G_4h4])

    gnn_output = tf.split(G_2, num_sensors, 1)

    mlp_layer = mlp_model()

    output1 = mlp_layer(Flatten()(gnn_output[0]))
    output2 = mlp_layer(Flatten()(gnn_output[1]))
    output3 = mlp_layer(Flatten()(gnn_output[2]))
    output4 = mlp_layer(Flatten()(gnn_output[3]))
    output5 = mlp_layer(Flatten()(gnn_output[4]))
    output6 = mlp_layer(Flatten()(gnn_output[5]))
    output7 = mlp_layer(Flatten()(gnn_output[6]))
    output8 = mlp_layer(Flatten()(gnn_output[7]))
    output9 = mlp_layer(Flatten()(gnn_output[8]))
    output10 = mlp_layer(Flatten()(gnn_output[9]))

    model = Model(inputs=[
        s_input1, s_input2, s_input3, s_input4, s_input5, s_input6, s_input7,
        s_input8, s_input9, s_input10, sensor_matrix1, sensor_matrix2,
        sensor_matrix3, sensor_matrix4
    ],
                  outputs=[
                      output1, output2, output3, output4, output5, output6,
                      output7, output8, output9, output10
                  ])
    return model
Ejemplo n.º 16
0
va_mask = np.zeros(N, dtype=bool)
va_mask[va_idx] = True
te_mask = np.zeros(N, dtype=bool)
te_mask[te_idx] = True
masks = [tr_mask, va_mask, te_mask]

# Parameters
channels = 256
learning_rate = 1e-2
epochs = 200
es_patience = 200
F = X.shape[1]
n_classes = y.shape[1]

# Preprocessing operations
fltr = GraphConv.preprocess(A).astype('f4')

# Model definition
X_in = Input(shape=(F, ))
fltr_in = Input((N, ), sparse=True)
graph_conv_1 = GraphConv(channels, activation='relu')([X_in, fltr_in])
graph_conv_2 = GraphConv(channels, activation='relu')([graph_conv_1, fltr_in])
graph_conv_3 = GraphConv(n_classes)([graph_conv_2, fltr_in])

# Build model
model = Model(inputs=[X_in, fltr_in], outputs=graph_conv_3)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss=BinaryCrossentropy(from_logits=True))
model.summary()

# Train model
Ejemplo n.º 17
0
def main(time_train,
         epochs,
         C_i,
         C_o,
         learning_rate,
         kernel,
         regenerate=False):
    os.chdir(os.path.split(os.path.dirname(os.path.realpath(__file__)))[0])

    if (not os.path.exists('/media/data6TB/spandan/data.p')) or regenerate:
        from data_utils import load_data
        X, A, E = load_data(
            DATASET='data-all.json', R=300, SIGMA=1
        )  # Shapes: (171, 640, 3) (171, 640, 640) (171, 640, 640, 1)
        with open('/media/data6TB/spandan/data.p', 'wb') as pkl:
            pickle.dump((X, A, E), pkl)

    else:
        with open('/media/data6TB/spandan/data.p', 'rb') as pkl:
            X, A, E = pickle.load(pkl)

    districts = A.shape[1]

    # Inputs
    X_in = Input(shape=(districts, C_i), batch_size=time_train)
    E_in = Input(shape=(districts, districts, 1), batch_size=time_train)
    A_in = Input(shape=(districts, districts), batch_size=time_train)

    # Block
    X_i0 = tf.transpose(tf.expand_dims(X_in, axis=0), perm=[0, 2, 1, 3])
    l1 = GLU(filters=2 * C_o, kernelsize=kernel)(X_i0)
    X_i1 = tf.squeeze(tf.transpose(l1, perm=[0, 2, 1, 3]))
    E_i1 = E_in[:X_i1.shape[0], :, :, :]
    A_i1 = A_in[:X_i1.shape[0], :, :]
    l1 = GraphConv(channels=C_i, activation='relu')([X_i1, A_i1, E_i1])
    l1 = tf.expand_dims(tf.transpose(l1, perm=[1, 0, 2]), axis=0)
    l1 = GLU(filters=2 * C_o, kernelsize=kernel)(l1)

    # Block
    l2 = GLU(filters=2 * C_o, kernelsize=kernel)(l1)
    X_i2 = tf.squeeze(tf.transpose(l2, perm=[0, 2, 1, 3]))
    E_i2 = E_in[:X_i2.shape[0], :, :, :]
    A_i2 = A_in[:X_i2.shape[0], :, :]
    l2 = GraphConv(channels=C_i, activation='relu')([X_i2, A_i2, E_i2])
    l2 = tf.expand_dims(tf.transpose(l2, perm=[1, 0, 2]), axis=0)
    l2 = GLU(filters=2 * C_o, kernelsize=kernel)(l2)

    # Output layer
    l3 = GLU(filters=2 * C_i, kernelsize=(time_train - 4 * (kernel - 1)))(l2)
    X_i3 = tf.squeeze(tf.transpose(l3, perm=[0, 2, 1, 3]))
    final_output = nstack(Dense(C_i)(X_i3), time_train)

    model = Model(inputs=[X_in, E_in, A_in], outputs=final_output)
    optimizer = RMSprop(learning_rate=learning_rate)
    model.compile(optimizer=optimizer,
                  loss='mean_squared_error',
                  weighted_metrics=['acc'])
    model.summary()

    X_input = X[:time_train, :, :]
    E_input = E[:time_train, :, :, :]
    A_input = localpooling_filter((A[:time_train, :, :]).numpy(),
                                  symmetric=True)
    output = nstack(tf.squeeze(X[time_train, :, :]), time_train)

    model.fit([X_input, E_input, A_input],
              output,
              shuffle=False,
              epochs=epochs)
Ejemplo n.º 18
0
print('A', A.shape)
print('X', X[0])
# print('y', y)
# print('N', N)

from spektral.layers import GraphConv
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dropout
from tensorflow.keras.regularizers import l2

dropout = 0.5
channels = 16
l2_reg = 5e-4 / 2

# Preprocessing operations
fltr = GraphConv.preprocess(A).astype('f4')
X = X.toarray()

X_in = Input(shape=(F, ))
fltr_in = Input((N, ), sparse=True)

dropout_1 = Dropout(dropout)(X_in)
graph_conv_1 = GraphConv(channels,
                         activation='relu',
                         kernel_regularizer=l2(l2_reg),
                         use_bias=False)
# dropout_2 = Dropout(dropout)(graph_conv_1)
# graph_conv_2 = GraphConv(n_classes,
#                          activation='softmax',
#                          use_bias=False)([dropout_2, fltr_in])
# Build model
Ejemplo n.º 19
0
    [np.random.choice(np.nonzero(labels_one_hot[:, c])[0]) for c in range(4)])
train_mask = np.zeros(shape=labels_one_hot.shape[0], dtype=np.bool)
train_mask[labels_to_keep] = ~train_mask[labels_to_keep]
val_mask = ~train_mask

y_train = labels_one_hot * train_mask[..., np.newaxis]
y_val = labels_one_hot * val_mask[..., np.newaxis]

# Get important parameters of adjacency matrix
N = adj.shape[0]
F = 4  # number of features
learning_rate = 0.01
epochs = 300

# Preprocessing operations
fltr = GraphConv.preprocess(adj).astype('f4')

# Model definition
X_in = Input(shape=(N, ))
fltr_in = Input(shape=(N, ))
x = GraphConv(F, activation='tanh', use_bias=False)([X_in, fltr_in])
x = GraphConv(F, activation='tanh', use_bias=False)([x, fltr_in])
x = GraphConv(2, activation='tanh', use_bias=False,
              name="embedding")([x, fltr_in])
output = GraphConv(4, activation='softmax', use_bias=False)([x, fltr_in])

# Build model
model = Model(inputs=[X_in, fltr_in], outputs=output)
model.compile(optimizer=Adam(lr=learning_rate),
              loss='categorical_crossentropy',
              weighted_metrics=['acc'])
Ejemplo n.º 20
0
    center=True,
    scale=True,
    beta_initializer='zeros',
    gamma_initializer='ones',
    moving_mean_initializer='zeros',
    moving_variance_initializer='ones',
    renorm_momentum=0.99,
)(area_in)  #batch norm on the area data

if not include_batch_norm_layers:
    bn0 = area_in

if not include_batch_norm_layers:
    g1 = GraphConv(
        embedding_vecor_length1,  #first graph conv layer
        activation='relu',
        kernel_regularizer=l2(l2_reg),
        use_bias=True)([bn0, fltr_in])
    ac1 = g1
else:
    g1 = GraphConv(
        embedding_vecor_length1,  #first graph conv layer
        kernel_regularizer=l2(l2_reg),
        use_bias=True)([bn0, fltr_in])
    bn1 = BN(
        axis=-1,
        momentum=0.99,
        epsilon=0.001,
        center=True,
        scale=True,
        beta_initializer='zeros',
Ejemplo n.º 21
0
print(X.shape, y.shape, train_mask.shape)
# Parameters
channels = 16  # Number of channels in the first layer
N = X.shape[0]  # Number of nodes in the graph
print('Number of nodes in the graph N = ', N)
F = X.shape[1]  # Original size of node features
print('Original size of node features F = ', F)
n_classes = y.shape[1]  # Number of classes
dropout = 0.5  # Dropout rate for the features
l2_reg = 5e-4 / 2  # L2 regularization rate
learning_rate = 1e-2  # Learning rate
epochs = 10  # Number of training epochs 200
es_patience = 10  # Patience for early stopping

# Preprocessing operations
fltr = GraphConv.preprocess(A).astype('f4')
X = X.toarray()

# Model definition
X_in = Input(shape=(F, ))
fltr_in = Input((N, ), sparse=True)

dropout_1 = Dropout(dropout)(X_in)
graph_conv_1 = GraphConv(channels,
                         activation='relu',
                         kernel_regularizer=l2(l2_reg),
                         use_bias=False)([dropout_1, fltr_in])
dropout_2 = Dropout(dropout)(graph_conv_1)
graph_conv_2 = GraphConv(n_classes, activation='softmax',
                         use_bias=False)([dropout_2, fltr_in])
Ejemplo n.º 22
0
es_patience = 10  # Patience fot early stopping
log_dir = init_logging()  # Create log directory and file

# Preprocessing
fltr = localpooling_filter(adj.copy())

# Train/test split
fltr_train, fltr_test, \
x_train, x_test,       \
y_train, y_test = train_test_split(fltr, x, y, test_size=0.1)

# Model definition
X_in = Input(shape=(N, F))
filter_in = Input((N, N))

gc1 = GraphConv(32, activation='relu',
                kernel_regularizer=l2(l2_reg))([X_in, filter_in])
gc2 = GraphConv(32, activation='relu',
                kernel_regularizer=l2(l2_reg))([gc1, filter_in])
pool = GlobalAttentionPool(128)(gc2)

output = Dense(n_classes, activation='softmax')(pool)

# Build model
model = Model(inputs=[X_in, filter_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
              loss='categorical_crossentropy',
              metrics=['acc'])
model.summary()

# Callbacks
Ejemplo n.º 23
0
# Preprocessing operations
fltr = localpooling_filter(A).astype('f4')
X = X.toarray()
Ai = A.toarray()
fltr1 = fltr.toarray()

# Pre-compute propagation
for i in range(K - 1):
    fltr = fltr.dot(fltr)
fltr.sort_indices()

# Model definition
X_in = Input(shape=(F, ))
fltr_in = Input((N, ), sparse=True)
output = GraphConv(n_classes,
                   activation='softmax',
                   kernel_regularizer=l2(l2_reg),
                   use_bias=False)([X_in, fltr_in])

# Build model
model = Model(inputs=[X_in, fltr_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
              loss='categorical_crossentropy',
              weighted_metrics=['acc'])
model.summary()

# Train model
validation_data = ([X, fltr], y, val_mask)
model.fit(
    [X, fltr],
    y,
Ejemplo n.º 24
0
can train GCN for 200 epochs in a few tenths of a second (0.32s on a GTX 1050).
In total, this script has 34 SLOC.
"""
import tensorflow as tf
from tensorflow.keras.layers import Input, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2

from spektral.datasets import citation
from spektral.layers import GraphConv, ops
from spektral.utils import tic, toc

# Load data
A, X, y, train_mask, val_mask, test_mask = citation.load_data('cora')
fltr = GraphConv.preprocess(A).astype('f4')
fltr = ops.sp_matrix_to_sp_tensor(fltr)
X = X.toarray()

# Define model
X_in = Input(shape=(X.shape[1],))
fltr_in = Input((X.shape[0],), sparse=True)
X_1 = GraphConv(16, 'relu', True, kernel_regularizer=l2(5e-4))([X_in, fltr_in])
X_1 = Dropout(0.5)(X_1)
X_2 = GraphConv(y.shape[1], 'softmax', True)([X_1, fltr_in])

# Build model
model = Model(inputs=[X_in, fltr_in], outputs=X_2)
optimizer = Adam(lr=1e-2)
model.compile(optimizer=optimizer, loss='categorical_crossentropy')
loss_fn = model.loss_functions[0]
Ejemplo n.º 25
0
X_train, y_train, X_val, y_val, X_test, y_test, adj = mnist.load_data()
X_train, X_val, X_test = X_train[..., None], X_val[..., None], X_test[..., None]
N = X_train.shape[-2]      # Number of nodes in the graphs
F = X_train.shape[-1]      # Node features dimensionality
n_out = y_train.shape[-1]  # Dimension of the target

fltr = normalized_laplacian(adj)

# Model definition
X_in = Input(shape=(N, F))
# Pass A as a fixed tensor, otherwise Keras will complain about inputs of
# different rank.
A_in = Input(tensor=sp_matrix_to_sp_tensor(fltr))

graph_conv = GraphConv(32,
                       activation='elu',
                       kernel_regularizer=l2(l2_reg),
                       use_bias=True)([X_in, A_in])
graph_conv = GraphConv(32,
                       activation='elu',
                       kernel_regularizer=l2(l2_reg),
                       use_bias=True)([graph_conv, A_in])
flatten = Flatten()(graph_conv)
fc = Dense(512, activation='relu')(flatten)
output = Dense(n_out, activation='softmax')(fc)

# Build model
model = Model(inputs=[X_in, A_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
              loss='sparse_categorical_crossentropy',
              metrics=['acc'])
# Parameters
l2_reg = 5e-4         # Regularization rate for l2
learning_rate = 1e-3  # Learning rate for SGD
batch_size = 32       # Batch size
epochs = 1000         # Number of training epochs
es_patience = 10      # Patience fot early stopping

# Load data
X_train, y_train, X_val, y_val, X_test, y_test, A = mnist.load_data()
X_train, X_val, X_test = X_train[..., None], X_val[..., None], X_test[..., None]
N = X_train.shape[-2]      # Number of nodes in the graphs
F = X_train.shape[-1]      # Node features dimensionality
n_out = 10                 # Dimension of the target

fltr = GraphConv.preprocess(A)

# Model definition
X_in = Input(shape=(N, F))
# Pass A as a fixed tensor, otherwise Keras will complain about inputs of
# different rank.
A_in = Input(tensor=sp_matrix_to_sp_tensor(fltr))

graph_conv = GraphConv(32,
                       activation='elu',
                       kernel_regularizer=l2(l2_reg))([X_in, A_in])
graph_conv = GraphConv(32,
                       activation='elu',
                       kernel_regularizer=l2(l2_reg))([graph_conv, A_in])
flatten = Flatten()(graph_conv)
fc = Dense(512, activation='relu')(flatten)
Ejemplo n.º 27
0
A_in = Input(batch_shape=(None, None), sparse=True)
I_in = Input(batch_shape=(None, 1), dtype='int64')

# The inputs will have an arbitrary dimension, while the targets consist of
# batch_size values.
# However, Keras expects the inputs to have the same dimension as the output.
# This is a hack in Tensorflow to bypass the requirements of Keras.
# We use a dynamically initialized tf.Dataset to feed the target values to the
# model at training time.
target_ph = tf.placeholder(tf.float32, shape=(None, 1))
target_data = tf.data.Dataset.from_tensor_slices(target_ph)
target_data = target_data.batch(batch_size)
target_iter = target_data.make_initializable_iterator()
target = target_iter.get_next()

gc1 = GraphConv(64, activation='relu')([X_in, A_in])
gc2 = GraphConv(64, activation='relu')([gc1, A_in])
pool = GlobalAvgPool()([gc2, I_in])
dense1 = Dense(64, activation='relu')(pool)
output = Dense(n_out)(dense1)

# Build model
model = Model(inputs=[X_in, A_in, I_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss='mse', target_tensors=target)
model.summary()

# Training setup
sess = K.get_session()
batches_train = batch_iterator([A_train, X_train, y_train], batch_size=batch_size, epochs=epochs)
loss = 0
Ejemplo n.º 28
0
X, A, _, y = ogb.dataset_to_numpy(dataset, dtype='f8')
A = [a.toarray() for a in A]
F = X[0].shape[-1]
X = pad_jagged_array(X, (N, F))
A = pad_jagged_array(A, (N, N))
X_tr, A_tr, y_tr = X[tr_idx], A[tr_idx], y[tr_idx]
X_va, A_va, y_va = X[va_idx], A[va_idx], y[va_idx]
X_te, A_te, y_te = X[te_idx], A[te_idx], y[te_idx]

################################################################################
# BUILD MODEL
################################################################################
X_in = Input(shape=(N, F))
A_in = Input(shape=(N, N))

X_1 = GraphConv(32, activation='relu')([X_in, A_in])
X_1, A_1 = MinCutPool(N // 2)([X_1, A_in])
X_2 = GraphConv(32, activation='relu')([X_1, A_1])
X_3 = GlobalSumPool()(X_2)
output = Dense(n_out)(X_3)

# Build model
model = Model(inputs=[X_in, A_in], outputs=output)
opt = Adam(lr=learning_rate)
model.compile(optimizer=opt, loss='mse')
model.summary()

################################################################################
# FIT MODEL
################################################################################
model.fit([X_tr, A_tr],
Ejemplo n.º 29
0
                                        c2v_k=50,
                                        t2v_k=100,
                                        maxlen=maxlen)

    for inum in range(2, 3):

        graph_dict, \
        tagDict_train, tagDict_dev, tagDict_test, \
        word_vob, word_id2word, word_W, w2v_k, \
        char_vob, char_id2char, char_W, c2v_k, \
        target_vob, target_id2word, \
        posi_W, posi_k, type_W, type_k, \
        max_s, max_posi, max_c = pickle.load(open(datafile, 'rb'))

        A = nx.adjacency_matrix(nx.from_dict_of_lists(graph_dict))
        fltr = GraphConv.preprocess(A).astype('f4')
        print('fltr.toarray.shape ... ', fltr.toarray().shape)

        nn_model = SelectModel(modelname,
                               node_count=len(graph_dict),
                               wordvocabsize=len(word_vob),
                               tagvocabsize=len(target_vob),
                               posivocabsize=max_posi + 1,
                               charvocabsize=len(char_vob),
                               word_W=word_W,
                               posi_W=posi_W,
                               tag_W=type_W,
                               char_W=char_W,
                               input_sent_lenth=max_s,
                               w2v_k=w2v_k,
                               posi2v_k=max_posi + 1,
Ejemplo n.º 30
0
N = A.shape[0]
F = X.shape[-1]
n_classes = y.shape[-1]

# %%
X[1, 2]
# %%
from spektral.layers import GraphConv
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dropout

# %%
X_in = Input(shape=(F, ))
A_in = Input((N, ), sparse=True)

X_1 = GraphConv(16, "relu")([X_in, A_in])
X_1 = Dropout(0.5)(X_1)
X_2 = GraphConv(n_classes, "softmax")([X_1, A_in])

model = Model(inputs=[X_in, A_in], outputs=X_2)

# %%
A = GraphConv.preprocess(A).astype("f4")

# %%
model.compile(optimizer="adam",
              loss="categorical_crossentropy",
              weighted_metrics=["acc"])
model.summary()

# %%