Exemplo n.º 1
0
 def __init__(self):
     super().__init__()
     self.mask = GraphMasking()
     self.conv1 = GCSConv(32, activation="relu")
     self.pool = MinCutPool(N // 2)
     self.conv2 = GCSConv(32, activation="relu")
     self.global_pool = GlobalSumPool()
     self.dense1 = Dense(n_out)
Exemplo n.º 2
0
def GNN(A, X):
    # 使用graph_hi构造图邻接矩阵A
    np.random.seed(0)  # for reproducibility
    ITER = 10000
    # Parameters
    P = OrderedDict([
        ('es_patience', ITER),
        ('dataset', ['cora'
                     ]),  # 'cora', 'citeseer', 'pubmed', 'cloud', or 'synth'
        ('H_', [None]),
        ('n_channels', [16]),
        ('learning_rate', [5e-4])
    ])
    ############################################################################
    # LOAD DATASET
    ############################################################################

    A = np.maximum(A, A.T)
    A = sp.csr_matrix(A, dtype=np.float32)

    X = X.todense()
    n_feat = X.shape[-1]

    ############################################################################
    # GNN MODEL
    ############################################################################
    X_in = Input(
        tensor=tf.placeholder(tf.float32, shape=(None, n_feat), name='X_in'))
    A_in = Input(tensor=tf.sparse_placeholder(tf.float32, shape=(None, None)),
                 name='A_in',
                 sparse=True)
    # S_in = Input(tensor=tf.placeholder(tf.int32, shape=(None,), name='segment_ids_in'))

    A_norm = normalized_adjacency(A)
    X_1 = GCSConv(P['n_channels'],
                  kernel_initializer='he_normal',
                  activation='elu')([X_in, A_in])

    pool1, adj1, C = MinCutPool(k=n_classes,
                                h=P['H_'],
                                activation='elu',
                                return_mask=True)([X_1, A_in])

    model = Model([X_in, A_in], [pool1, adj1, C])
    model.compile('adam', None)

    ############################################################################
    # TRAINING
    ############################################################################
    # Setup
    sess = K.get_session()
    loss = model.total_loss
    opt = tf.train.AdamOptimizer(learning_rate=P['learning_rate'])
    train_step = opt.minimize(loss)

    # Initialize all variables
    init_op = tf.global_variables_initializer()
    sess.run(init_op)

    # Fit layer
    tr_feed_dict = {X_in: X, A_in: sp_matrix_to_sp_tensor_value(A_norm)}

    best_loss = np.inf
    patience = P['es_patience']
    tol = 1e-5
    for _ in tqdm(range(ITER)):
        outs = sess.run([train_step, model.losses[0], model.losses[1], C],
                        feed_dict=tr_feed_dict)
        # c = np.argmax(outs[3], axis=-1)
        if outs[1] + outs[2] + tol < best_loss:
            best_loss = outs[1] + outs[2]
            patience = P['es_patience']
        else:
            patience -= 1
            if patience == 0:
                break

    ############################################################################
    # RESULTS
    ############################################################################
    C_ = sess.run([C], feed_dict=tr_feed_dict)[0]
    c = np.argmax(C_, axis=-1)
    K.clear_session()
    return c
Exemplo n.º 3
0
        X_m[n] = d['mean color']
        X_t[n] = d['total color']
        y[n] = d['labels'][0]
    X_m = (X_m / np.max(X_m)).astype(np.float32)
    X_t = (X_t / np.max(X_t)).astype(np.float32)
    X = np.concatenate((X_m, X_t), axis=-1)

    n_feat = X.shape[1]
    X_in = Input(
        tensor=tf.placeholder(tf.float32, shape=(None, n_feat), name='X_in'))
    A_in = Input(tensor=tf.placeholder(tf.float32, shape=(None, None)),
                 name='A_in')
    S_in = Input(
        tensor=tf.placeholder(tf.int32, shape=(None, ), name='segment_ids_in'))

    pool1, adj1, seg1, C = MinCutPool(n_clust, activation=ACTIV,
                                      h=H_)([X_in, A_in, S_in])

    model = Model([X_in, A_in, S_in], [pool1, seg1])
    model.compile('adam', None)

    # Setup
    sess = K.get_session()
    loss = model.total_loss
    opt = tf.train.AdamOptimizer(learning_rate=1e-3)
    train_step = opt.minimize(loss)

    # Initialize all variables
    init_op = tf.global_variables_initializer()
    sess.run(init_op)

    # Fit layer
                        kernel_regularizer=l2(P['GNN_l2']))(X_in)
            A_1 = A_in
            I_1 = I_in
        else:
            gc1 = GNN(P['n_channels'],
                      activation=P['activ'],
                      kernel_regularizer=l2(P['GNN_l2']))([X_in, A_in])

            if P['method'] == 'top_k_pool':
                X_1, A_1, I_1, M_1 = TopKPool(0.5)([gc1, A_in, I_in])
            elif P['method'] == 'sag_pool':
                X_1, A_1, I_1 = SAGPool(0.5)([gc1, A_in, I_in])
            elif P['method'] == 'mincut_pool':
                X_1, A_1, I_1, M_1 = MinCutPool(
                    k=int(average_N // 2),
                    h=P['mincut_H'],
                    activation=P['activ'],
                    kernel_regularizer=l2(P['pool_l2']))([gc1, A_in, I_in])

            elif P['method'] == 'flat':
                X_1 = gc1
                A_1 = A_in
                I_1 = I_in
            else:
                raise ValueError

        # Block 2
        if P['method'] == 'diff_pool':
            X_2, A_2, I_2, M_2 = DiffPool(k=int(average_N // 4),
                                          channels=P['n_channels'],
                                          activation=P['activ'],
Exemplo n.º 5
0
A = [a.toarray() for a in A]
F = X[0].shape[-1]
X = pad_jagged_array(X, (N, F))
A = pad_jagged_array(A, (N, N))
X_tr, A_tr, y_tr = X[tr_idx], A[tr_idx], y[tr_idx]
X_va, A_va, y_va = X[va_idx], A[va_idx], y[va_idx]
X_te, A_te, y_te = X[te_idx], A[te_idx], y[te_idx]

################################################################################
# BUILD MODEL
################################################################################
X_in = Input(shape=(N, F))
A_in = Input(shape=(N, N))

X_1 = GraphConv(32, activation='relu')([X_in, A_in])
X_1, A_1 = MinCutPool(N // 2)([X_1, A_in])
X_2 = GraphConv(32, activation='relu')([X_1, A_1])
X_3 = GlobalSumPool()(X_2)
output = Dense(n_out)(X_3)

# Build model
model = Model(inputs=[X_in, A_in], outputs=output)
opt = Adam(lr=learning_rate)
model.compile(optimizer=opt, loss='mse')
model.summary()

################################################################################
# FIT MODEL
################################################################################
model.fit([X_tr, A_tr],
          y_tr,
                     name='A_in',
                     sparse=True)
        S_in = Input(tensor=tf.placeholder(
            tf.int32, shape=(None, ), name='segment_ids_in'))

        if P['apply_GNN'] and P['method'] != 'diff_pool':
            A_norm = normalized_adjacency(A)
            X_1 = GraphConvSkip(P['n_channels'],
                                kernel_initializer='he_normal',
                                activation=P['ACTIV'])([X_in, A_in])
        else:
            A_norm = A
            X_1 = X_in

        if P['method'] == 'mincut_pool':
            pool1, adj1, seg1, C = MinCutPool(
                k=n_clust, h=P['H_'], activation=P['ACTIV'])([X_1, A_in, S_in])

        elif P['method'] == 'diff_pool':
            pool1, adj1, seg1, C = DiffPool(k=n_clust,
                                            channels=P['n_channels'],
                                            activation=P['ACTIV'])(
                                                [X_1, A_in, S_in])
        else:
            raise ValueError

        model = Model([X_in, A_in, S_in], [pool1, seg1, C])
        model.compile('adam', None)

        ############################################################################
        # TRAINING
        ############################################################################
Exemplo n.º 7
0
A_in = Input(tensor=tf.sparse_placeholder(tf.float32, shape=(None, None)),
             name='A_in')
I_in = Input(
    tensor=tf.placeholder(tf.int32, shape=(None, ), name='segment_ids_in'))
X_target = Input(
    tensor=tf.placeholder(tf.float32, shape=(None, n_feat), name='X_target'))
A_target = Input(tensor=tf.sparse_placeholder(tf.float32, shape=(None, None)),
                 name='A_target')
A = normalized_adjacency(A)
n_out = X.shape[-1]

# encoder
X1 = GraphConvSkip(gnn_channels, activation=ACTIV)([X_in, A_in])
X1 = GraphConvSkip(gnn_channels, activation=ACTIV)([X1, A_in])
# pooling
X2, A2, I2, M2 = MinCutPool(k=n_nodes // 4, h=gnn_channels)([X1, A_in, I_in])
# unpooling
X3, A3, I3 = upsampling_from_matrix_op([X2, A2, I2, M2])
# decoder
X3 = GraphConvSkip(gnn_channels, activation=ACTIV)([X3, A_in])
X3 = GraphConvSkip(gnn_channels, activation=ACTIV)([X3, A_in])
X3 = GraphConvSkip(n_out)([X3, A_in])

model = Model([X_in, A_in, I_in], [X3])
model.compile('adam', 'mse', target_tensors=[X_target])

# TRAINING
sess = K.get_session()
loss = model.total_loss
opt = tf.train.AdamOptimizer(learning_rate=5e-3)
train_step = opt.minimize(loss)