Exemple #1
0
    embedding_dims,
    class_num,
    # init_weights,
    weights_trainable=True)

# model = TextCNN.TextCNN(maxlen, vocab_size, embedding_dims, class_num)

# 构建优化器
# lr = tf.keras.optimizers.schedules.PolynomialDecay(0.01, decay_steps=18000,
#                                                    end_learning_rate=0.0001,
#                                                    cycle=False)

optimizer = optim.AdamWarmup(learning_rate=0.01, decay_steps=15000)

# 构建损失函数
mask_sparse_categotical_loss = Losess.MaskSparseCategoricalCrossentropy(
    from_logits=False)

f1score = Metric.SparseF1Score(average="macro")
precsionscore = Metric.SparsePrecisionScore(average="macro")
recallscore = Metric.SparseRecallScore(average="macro")
accuarcyscore = Metric.SparseAccuracy()

# 保存模型
checkpoint = tf.train.Checkpoint(model=model)
manager = tf.train.CheckpointManager(checkpoint,
                                     directory="./save",
                                     checkpoint_name="model.ckpt",
                                     max_to_keep=3)

Batch = 0
for X, token_type_id, input_mask, Y in load.load_train():
Exemple #2
0
drop_rate = 0.5
epoch = 300
lr = 0.001
split = 10  # 10-fold

tf.random.set_seed(1124)
accs_all = []
dataloader = TUDataset(name="PROTEINS", split=split)

for block_index in range(split):

    model = GINLayer(dim, num_class, drop_rate)

    optimize = tf.optimizers.Adam(lr)

    cross_entropy = Losess.MaskSparseCategoricalCrossentropy()
    acc_score = Metric.SparseAccuracy()

    train_data, test_data = dataloader.load(batch_size=128,
                                            block_index=block_index)
    for i in range(epoch):
        t = time.time()
        loss_all = []
        acc_all = []
        for x, y, edge_index, edge_attr, batch in train_data:
            x, y, edge_index, edge_attr, batch = merge_batch_graph(
                x, y, edge_index, edge_attr, batch)

            with tf.GradientTape() as tape:
                predict = model(x, edge_index, batch, training=True)
                loss = cross_entropy(y, predict)
Exemple #3
0
epoch = 1000
patience = 100
# penalty = 0.0005 # for cora and citeseer
penalty = 0.001  # for pubmed

loader = graphloader.GCNLoader(dataset="pubmed", loop=True, features_norm=True)

features, adj, y_train, y_val, y_test, train_mask, val_mask, test_mask = loader.load(
)

model = GAT.GATLayer(hidden_dim=hidden_dim,
                     num_class=num_class,
                     dropout_rate=drop_rate)

optimizer = tf.keras.optimizers.Adam(0.01)
crossentropy = Losess.MaskCategoricalCrossentropy()
accscore = Metric.MaskAccuracy()
stop_monitor = EarlyStopping(monitor="loss", patience=patience)

# ---------------------------------------------------------
# For train
for p in range(epoch):
    t = time.time()
    with tf.GradientTape() as tape:
        predict = model(features, adj, training=True)
        loss = crossentropy(y_train, predict, train_mask)
        loss += tf.add_n([
            tf.nn.l2_loss(v) for v in model.variables if "bias" not in v.name
        ]) * penalty
    grads = tape.gradient(loss, model.variables)
    optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables))
Exemple #4
0
from fennlp.optimizers import optim
from fennlp.metrics import Losess, Metric

_HIDDEN_DIM = 64
_NUM_CLASS = 7
_DROP_OUT_RATE = 0.5
_EPOCH = 100

loader = graphloader.GCNLoader()
features, adj, labels, idx_train, idx_val, idx_test = loader.load()

model = GCN.GCN2Layer(_HIDDEN_DIM, _NUM_CLASS, _DROP_OUT_RATE)

optimizer = tf.keras.optimizers.Adam(0.01)

crossentropy = Losess.MaskSparseCategoricalCrossentropy(from_logits=False)
accscore = Metric.SparseAccuracy()
f1score = Metric.SparseF1Score(average="macro")
# ---------------------------------------------------------
# For train
for epoch in range(_EPOCH):
    with tf.GradientTape() as tape:
        output = model(features, adj, training=True)
        predict = tf.gather(output, list(idx_train))
        label = tf.gather(labels, list(idx_train))
        loss = crossentropy(label, predict, use_mask=False)
        acc = accscore(label, predict)
        f1 = f1score(label, predict)
        print("Epoch {} | Loss {:.4f} | Acc {:.4f} | F1 {:.4f}".format(
            epoch, loss.numpy(), acc, f1))
    grads = tape.gradient(loss, model.variables)