예제 #1
0
    def call(self, node, adj, batch, edge_attr, training=True):
        feature = tf.nn.embedding_lookup(self.embedding, node)
        predict = self.model(feature, adj, batch, edge_attr, training=training)
        return predict

    def predict(self, nodes, adj, batch, edge_attr, training=False):
        return self(nodes, adj, batch, edge_attr, training)


accs_all = []
for i in range(10):
    model = TextSAGEynamicWeight(dim, num_class, drop_rate)
    optimize = tf.optimizers.Adam(lr)

    cross_entropy = Losess.MaskSparseCategoricalCrossentropy()
    acc_score = Metric.SparseAccuracy()

    stop_monitor = EarlyStopping(monitor="loss",
                                 patience=10,
                                 restore_best_weights=False)
    for i in range(epoch):
        t = time.time()

        for node, label, adj, edge_attr, batch in data.load(nodes[:-500],
                                                            adjs[:-500],
                                                            labels[:-500],
                                                            edge_attrs[:-500],
                                                            batchs[:-500],
                                                            batch_size=32):
            node, label, adj, edge_attr, batch = merge_batch_graph(
예제 #2
0
    embedding_dims,
    class_num,
    # init_weights,
    weights_trainable=True)

# model = TextCNN.TextCNN(maxlen, vocab_size, embedding_dims, class_num)

# 构建优化器
# lr = tf.keras.optimizers.schedules.PolynomialDecay(0.01, decay_steps=18000,
#                                                    end_learning_rate=0.0001,
#                                                    cycle=False)

optimizer = optim.AdamWarmup(learning_rate=0.01, decay_steps=15000)

# 构建损失函数
mask_sparse_categotical_loss = Losess.MaskSparseCategoricalCrossentropy(
    from_logits=False)

f1score = Metric.SparseF1Score(average="macro")
precsionscore = Metric.SparsePrecisionScore(average="macro")
recallscore = Metric.SparseRecallScore(average="macro")
accuarcyscore = Metric.SparseAccuracy()

# 保存模型
checkpoint = tf.train.Checkpoint(model=model)
manager = tf.train.CheckpointManager(checkpoint,
                                     directory="./save",
                                     checkpoint_name="model.ckpt",
                                     max_to_keep=3)

Batch = 0
for X, token_type_id, input_mask, Y in load.load_train():
예제 #3
0
num_class = 6
drop_rate = 0.5
epoch = 200
early_stopping = 10
penalty = 5e-4

# cora, pubmed, citeseer
data = Planetoid(name="citeseer", loop=True, norm=True)

features, adj, y_train, y_val, y_test, train_mask, val_mask, test_mask = data.load(
)

model = GCNLayer(hidden_dim, num_class, drop_rate)

optimizer = tf.keras.optimizers.Adam(0.01)
crossentropy = Losess.MaskCategoricalCrossentropy()
accscore = Metric.MaskAccuracy()
stop_monitor = EarlyStopping(monitor="loss", patience=early_stopping)

# ---------------------------------------------------------
# For train
for p in range(epoch):
    t = time.time()
    with tf.GradientTape() as tape:
        predict = model(features, adj, training=True)
        loss = crossentropy(y_train, predict, train_mask)
        loss += penalty * tf.nn.l2_loss(model.variables[0])

    grads = tape.gradient(loss, model.variables)
    optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables))