Exemplo n.º 1
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    valset = Feeder(args.val_feat_path,
                    args.val_knn_graph_path,
                    args.val_label_path,
                    args.seed,
                    args.k_at_hop,
                    args.active_connection,
                    train=False)
    valloader = DataLoader(valset,
                           batch_size=args.batch_size,
                           num_workers=args.workers,
                           shuffle=False,
                           pin_memory=True)

    ckpt = load_checkpoint(args.checkpoint)
    net = model.gcn()
    net.load_state_dict(ckpt['state_dict'])
    net = net.cuda()

    knn_graph = valset.knn_graph
    knn_graph_dict = list()
    for neighbors in knn_graph:
        knn_graph_dict.append(dict())
        for n in neighbors[1:]:
            knn_graph_dict[-1][n] = []

    criterion = nn.CrossEntropyLoss().cuda()
    edges, scores = validate(valloader, net, criterion)

    np.save('edges', edges)
    np.save('scores', scores)
    #edges=np.load('edges.npy')
    #scores = np.load('scores.npy')

    clusters = graph_propagation(edges,
                                 scores,
                                 max_sz=900,
                                 step=0.6,
                                 pool='avg')
    final_pred = clusters2labels(clusters, len(valset))
    labels = valset.labels

    print('------------------------------------')
    print('Number of nodes: ', len(labels))
    print('Precision   Recall   F-Sore   NMI')
    p, r, f = bcubed(final_pred, labels)
    nmi = normalized_mutual_info_score(final_pred, labels)
    print(('{:.4f}    ' * 4).format(p, r, f, nmi))

    labels, final_pred = single_remove(labels, final_pred)
    print('------------------------------------')
    print('After removing singleton culsters, number of nodes: ', len(labels))
    print('Precision   Recall   F-Sore   NMI')
    p, r, f = bcubed(final_pred, labels)
    nmi = normalized_mutual_info_score(final_pred, labels)
    print(('{:.4f}    ' * 4).format(p, r, f, nmi))
Exemplo n.º 2
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True
    sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    trainset = Feeder(args.feat_path, args.knn_graph_path, args.label_path,
                      args.seed, args.k_at_hop, args.active_connection)
    trainloader = DataLoader(trainset,
                             batch_size=args.batch_size,
                             num_workers=args.workers,
                             shuffle=True,
                             pin_memory=True)

    net = model.gcn().cuda()
    opt = torch.optim.SGD(net.parameters(),
                          args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    criterion = nn.CrossEntropyLoss().cuda()

    save_checkpoint({
        'state_dict': net.state_dict(),
        'epoch': 0,
    },
                    False,
                    fpath=osp.join(args.logs_dir, 'epoch_{}.ckpt'.format(0)))
    for epoch in range(args.epochs):
        adjust_lr(opt, epoch)

        train(trainloader, net, criterion, opt, epoch)
        save_checkpoint({
            'state_dict': net.state_dict(),
            'epoch': epoch + 1,
        },
                        False,
                        fpath=osp.join(args.logs_dir,
                                       'epoch_{}.ckpt'.format(epoch + 1)))
Exemplo n.º 3
0
if __name__ == "__main__":
    # 初始化信息
    args = args_init()

    df_data_path = "./data/df_data.pkl"
    graph_path = "./data/text_graph.pkl"
    #if not os.path.isfile(df_data_path) or not os.path.isfile(graph_path):
    generate_text_graph()

    df_data = load_pickle("df_data.pkl")
    #print(type(df_data))
    G = load_pickle("text_graph.pkl")

    f, X, A_hat, selected, labels_selected, labels_not_selected, test_idxs = load_data(
        args, df_data, G)
    net = gcn(X.shape[1], A_hat, args)
    print(net)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=args.lr)
    scheduler = optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=[1000, 2000, 3000, 4000], gamma=0.77)

    best_pred = 0, 0
    losses_per_epoch, evaluation_test = [], []
    where_stop = 0

    print("----开始训练----")
    net.train()
    evaluation_trained = []
    for e in range(0, args.num_epochs):
Exemplo n.º 4
0
with tf.Graph().as_default():
    with tf.device('/gpu:' + str(0)):
        num_points = np.shape(adj)[
            0]  # N, Number of nodes in the graph, points in the point cloud

        inputs = tf.placeholder(
            tf.float32,
            shape=(num_points,
                   num_points))  # NxD, D - size of input feature vector
        degM = tf.placeholder(tf.float32,
                              shape=(num_points,
                                     num_points))  # NxX, degree matrix
        # labels = tf.placeholder(tf.int32, shape=(num_points))  # 1xN Ground truth labels

        embdng, pred, = model.gcn(
            inputs, degM)  # NxF, F dimensionality of output feature vector

        # Defining training mask for semi-supervised learning
        train_mask = np.zeros(num_points)
        train_mask[9] = 1  # class 0
        train_mask[6] = 1  # class 1
        train_mask[33] = 1  # class 2
        train_mask[24] = 1  # class 3

        loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred,
                                                              labels=labels_gt)
        mask = tf.cast(train_mask, dtype=tf.float32)
        mask /= tf.reduce_mean(mask)
        loss *= mask
        loss = tf.reduce_mean(loss)