Example #1
0
def main():

    #---------------data generation---------------------
    B_true, W_true, X, train_loader = load_data(args)

    # --------------graph estimation--------------------
    w_est = Mis_dag(args, train_loader)

    # --------------accuracy calculation----------------
    acc = count_accuracy(B_true, w_est != 0)
    print(acc)
Example #2
0
            emb_query = emb_query.reshape(opt.episodes_per_batch,
                                          train_n_query, -1)

            logit_query = cls_head(emb_query, emb_support, labels_support,
                                   opt.train_way, opt.train_shot)

            smoothed_one_hot = one_hot(labels_query.reshape(-1), opt.train_way)
            smoothed_one_hot = smoothed_one_hot * (1 - opt.eps) + (
                1 - smoothed_one_hot) * opt.eps / (opt.train_way - 1)

            log_prb = F.log_softmax(logit_query.reshape(-1, opt.train_way),
                                    dim=1)
            loss = -(smoothed_one_hot * log_prb).sum(dim=1)
            loss = loss.mean()

            acc = count_accuracy(logit_query.reshape(-1, opt.train_way),
                                 labels_query.reshape(-1))

            train_accuracies.append(acc.item())
            train_losses.append(loss.item())

            if (i % 100 == 0):
                train_acc_avg = np.mean(np.array(train_accuracies))
                log(
                    log_file_path,
                    'Train Epoch: {}\tBatch: [{}/{}]\tLoss: {:.4f}\tAccuracy: {:.2f} % ({:.2f} %)'
                    .format(epoch, i, len(dloader_train), loss.item(),
                            train_acc_avg, acc))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
Example #3
0
        emb_support = IAM(emb_support, emb_query, emb_query, labels_support)
        # PSM
        for _ in range(opt.psm_iters):
            logit_query = cls_head(emb_query, emb_support, labels_support,
                                   opt.way, opt.shot)
            labels_pseudo = torch.argmax(logit_query, dim=2)  # (1, n_query)
            labels_pseudo_one_hot = one_hot(labels_pseudo,
                                            opt.way)  # (1, n_query, way)
            proto_query = torch.bmm(labels_pseudo_one_hot.transpose(1, 2),
                                    emb_query)  # (1, way, d)
            proto_query = proto_query.div(
                labels_pseudo_one_hot.transpose(1, 2).sum(
                    dim=2, keepdim=True).expand_as(proto_query) + 1e-5)
            labels_pseudo = torch.arange(opt.way).unsqueeze(0).cuda()
            emb_support = torch.cat([emb_support, proto_query], dim=1)
            labels_support = torch.cat([labels_support, labels_pseudo], dim=1)

        logits = cls_head(emb_query, emb_support, labels_support, opt.way,
                          opt.shot)
        acc = count_accuracy(logits.reshape(-1, opt.way),
                             labels_query.reshape(-1))
        test_accuracies.append(acc.item())

        avg = np.mean(np.array(test_accuracies))
        std = np.std(np.array(test_accuracies))
        ci95 = 1.96 * std / np.sqrt(i + 1)

        if i % 50 == 0:
            print(
                'Episode [{}/{}]:\t\t\tAccuracy: {:.2f} ± {:.2f} % ({:.2f} %)'.
                format(i, opt.episode, avg, ci95, acc))
Example #4
0
            if h_new > 0.25 * h:
                rho *= 10
            else:
                break
        w_est, h = w_new, h_new
        alpha += rho * h
        if h <= h_tol or rho >= rho_max:
            break
    W_est = _adj(w_est)
    W_est[np.abs(W_est) < w_threshold] = 0
    return W_est


if __name__ == '__main__':
    import utils as ut
    ut.set_random_seed(1)

    n, d, s0, graph_type, sem_type = 100, 20, 20, 'ER', 'gauss'
    B_true = ut.simulate_dag(d, s0, graph_type)
    W_true = ut.simulate_parameter(B_true)
    np.savetxt('W_true.csv', W_true, delimiter=',')

    X = ut.simulate_linear_sem(W_true, n, sem_type)
    np.savetxt('X.csv', X, delimiter=',')

    W_est = notears_linear_l1(X, lambda1=0.1, loss_type='l2')
    assert ut.is_dag(W_est)
    np.savetxt('W_est.csv', W_est, delimiter=',')
    acc = ut.count_accuracy(B_true, W_est != 0)
    print(acc)
Example #5
0
    import glog as log
    import networkx as nx
    import utils

    # configurations
    n, d = 1000, 10
    graph_type, degree, sem_type = 'erdos-renyi', 4, 'linear-gauss'
    log.info('Graph: %d node, avg degree %d, %s graph', d, degree, graph_type)
    log.info('Data: %d samples, %s SEM', n, sem_type)

    # graph
    log.info('Simulating graph ...')
    G = utils.simulate_random_dag(d, degree, graph_type)
    log.info('Simulating graph ... Done')

    # data
    log.info('Simulating data ...')
    X = utils.simulate_sem(G, n, sem_type)
    log.info('Simulating data ... Done')

    # solve optimization problem
    log.info('Solving equality constrained problem ...')
    W_est = notears_simple(X)
    G_est = nx.DiGraph(W_est)
    log.info('Solving equality constrained problem ... Done')

    # evaluate
    fdr, tpr, fpr, shd, nnz = utils.count_accuracy(G, G_est)
    log.info('Accuracy: fdr %f, tpr %f, fpr %f, shd %d, nnz %d', fdr, tpr, fpr,
             shd, nnz)
Example #6
0
                rho *= 10
            else:
                break
        w_est, h = w_new, h_new
        alpha += rho * h
        if h <= h_tol or rho >= rho_max:
            break
    W_est = _adj(w_est)
    W_est[np.abs(W_est) < w_threshold] = 0
    return W_est


if __name__ == '__main__':
    import utils as ut
    ut.set_random_seed(1)

    n, d, s0, graph_type, sem_type = 100, 20, 20, 'ER', 'gauss'
    B_true = ut.simulate_dag(d, s0, graph_type)
    W_true = ut.simulate_parameter(B_true)
    np.savetxt('W_true.csv', W_true, delimiter=',')

    X = ut.simulate_linear_sem(W_true, n, sem_type)
    np.savetxt('X.csv', X, delimiter=',')

    W_est = notears_linear_l1(X, lambda1=0.1, loss_type='l2')
    import igraph as ig
    assert ig.Graph.Weighted_Adjacency(W_est.tolist()).is_dag()
    np.savetxt('W_est.csv', W_est, delimiter=',')
    acc = ut.count_accuracy(W_true, W_est)
    print(acc)
Example #7
0
            labels = torch.cat([lab1,lab2,lab3,lab4], 0)
            for j in range(labels.shape[0]):
                labels[j]=label_dict[labels[j].item()]
            labels=labels.cuda()

            outputs = embedding_net(images)
            logit_query = cls_head(outputs)
            loss = criterion(logit_query, labels)

            if opt.feature_clustering_coeff > 0.0:
                loss = loss + opt.feature_clustering_coeff * feature_clustering(outputs)

            if opt.hyperplane_variation_coeff > 0.0:
                loss = loss + opt.hyperplane_variation_coeff * hyperplane_variation(outputs)

            acc = count_accuracy(logit_query, labels)
            
            train_accuracies.append(acc.item())
            train_losses.append(loss.item())

            if (i % 100 == 0):
                train_acc_avg = np.mean(np.array(train_accuracies))
                log(log_file_path, 'Train Epoch: {}\tBatch: [{}/{}]\tLoss: {:.4f}\tAccuracy: {:.2f} % ({:.2f} %)'.format(
                            epoch, i, len(dloader_train), loss.item(), train_acc_avg, acc))
            
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        lr_scheduler.step()