Exemple #1
0
def main(graph, fil, norm, permute, ss, epd, n_cv, flip, feat, feat_kwargs,
         ntda):
    """
    All hyperprameter goes here.

    :param graph: graph dataset
    :param fil: filtration function
    :param norm: normalize or not
    :param permute: whether permute dgm
    :param ss: both sublevel and superlevel or not
    :param epd: include extended persistence or not
    :param n_cv: number of cross validation
    :return:
    """

    global gs
    print('feat kwargs', feat_kwargs)
    db = get_tda_db()
    params = {
        'graph': graph,
        'fil': fil,
        'norm': norm,
        'permute': permute,
        'ss': ss,
        'epd': epd,
        'n_cv': n_cv,
        'flip': flip,
        'feat': feat,
        'ntda': ntda,
        'feat_kwargs': feat_kwargs
    }
    if check_duplicate(db, params): return

    label_flag = dgms_dir_test(fil=fil, fil_d='sub', norm=norm, graph=graph)[1]
    # gs, labels = load_graphs(dataset=graph, labels_only=label_flag)  # step 1
    gs, labels = load_tugraphs(
        graph, labels_only=False
    )  # labels_only true means gs is None. Turned on for high speed

    # parallel

    # subdgms = gs2dgms(gs, n_jobs=-1, fil=fil, fil_d='sub', norm=norm, graph = graph, ntda = ntda, debug_flag=True)
    subdgms = gs2dgms_parallel(n_jobs=-1,
                               fil=fil,
                               fil_d='sub',
                               norm=norm,
                               graph=graph,
                               ntda=ntda)
    supdgms = gs2dgms_parallel(n_jobs=-1,
                               fil=fil,
                               fil_d='sup',
                               norm=norm,
                               graph=graph,
                               ntda=ntda)
    epddgms = gs2dgms_parallel(n_jobs=-1,
                               fil=fil,
                               one_hom=True,
                               norm=norm,
                               graph=graph,
                               ntda=ntda)

    dgms = combine_dgms(subdgms, supdgms, epddgms, ss=ss, epd=epd, flip=flip)
    dgms = permute_dgms(dgms, permute_flag=permute)  # old way
    dgms_summary(dgms)

    swdgms = dgms2swdgms(dgms)
    if feat == 'sw':
        print(feat_kwargs)
        k, _ = sw_parallel(swdgms,
                           swdgms,
                           parallel_flag=True,
                           kernel_type='sw',
                           **feat_kwargs)
        print(k.shape)
        cmargs = {'print_flag': 'off'}  # confusion matrix
        clf = classifier(labels,
                         labels,
                         method='svm',
                         n_cv=n_cv,
                         kernel=k,
                         **cmargs)
        clf.svm_kernel_(n_splits=10)

    elif feat == 'pi':  # vector
        params = {
            'bandwidth': 1.0,
            'weight': (1, 1),
            'im_range': [0, 1, 0, 1],
            'resolution': [5, 5]
        }
        images = merge_dgms(subdgms,
                            supdgms,
                            epddgms,
                            vectype='pi',
                            ss=ss,
                            epd=epd,
                            **params)
        clf = classifier(images, labels, method='svm', n_cv=n_cv)
        clf.svm(n_splits=10)

    elif feat == 'pss':
        k, _ = sw_parallel(swdgms,
                           swdgms,
                           parallel_flag=True,
                           kernel_type='pss',
                           **feat_kwargs)
        # print(k.shape, k, np.max(k))
        clf = classifier(labels, labels, method='svm', n_cv=n_cv, kernel=k)
        clf.svm_kernel_(n_splits=10)

    elif feat == 'wg':
        k, _ = sw_parallel(swdgms,
                           swdgms,
                           parallel_flag=True,
                           kernel_type='wg',
                           **feat_kwargs)
        print(k.shape)
        clf = classifier(labels, labels, method='svm', n_cv=n_cv, kernel=k)
        clf.svm_kernel_(n_splits=10)

    elif feat == 'pervec':
        cmargs = {'print_flag': 'on'}  # confusion matrix
        pd_vector = dgms2vec(dgms, vectype='pervec', **feat_kwargs)
        clf = classifier(pd_vector, labels, method='svm', n_cv=n_cv, **cmargs)
        clf.svm(n_splits=10)

    elif feat == 'pf':
        k, _ = sw_parallel(swdgms,
                           swdgms,
                           parallel_flag=False,
                           kernel_type='pf',
                           **feat_kwargs)
        clf = classifier(labels, labels, method='svm', n_cv=n_cv, kernel=k)
        clf.svm_kernel_(n_splits=10)
    else:
        raise Exception('No such feat %s' % feat)

    print(clf.stat)
    print_line()
    return clf.stat
Exemple #2
0
    subdgms = gs2dgms_parallel(n_jobs=-1, fil=fil, fil_d='sub', norm=norm)
    supdgms = gs2dgms_parallel(n_jobs=-1, fil=fil, fil_d='sup', norm=norm)
    epddgms = gs2dgms_parallel(n_jobs=-1, fil=fil, one_hom=True, norm=norm)

    # serial
    # subdgms = gs2dgms(gs, fil=fil, fil_d='sub', norm=norm, one_hom=False) # step2 # TODO: need to add interface
    # supdgms = gs2dgms(gs, fil=fil, fil_d='sup', norm=norm, one_hom=False)  # step2 #
    # epddgms = gs2dgms(gs, fil=fil, norm=norm, one_hom=True)  # step2 # TODO

    dgms = combine_dgms(subdgms, supdgms, epddgms, args)
    dgms = permute_dgms(dgms, permute_flag=args.permute, permute_ratio=0.5)
    dgms_summary(dgms)

    # sw kernel
    swdgms = dgms2swdgms(dgms)
    kwargs = {'bw': args.bw, 'n_directions': 10, 'K': 1, 'p': 1}
    sw_kernel, _ = sw_parallel(swdgms,
                               swdgms,
                               parallel_flag=True,
                               kernel_type='sw',
                               **kwargs)
    print(sw_kernel.shape)

    clf = classifier(labels,
                     labels,
                     method='svm',
                     n_cv=args.n_cv,
                     kernel=sw_kernel)
    clf.svm_kernel_(n_splits=10)
    print(clf.stat)
Exemple #3
0
    # print(f'sanity dgm is {print_dgm(sanity_dgms[10])} \n')
    print(f'another fake dgm is {print_dgm(another_fake_dgms[10])} \n')

    all_dgms = true_dgms + fake_dgms
    indicator_labels = [1] * len(true_dgms) + [-1] * len(fake_dgms)

    if args.doublefake:
        all_dgms = fake_dgms + another_fake_dgms
        indicator_labels = [1] * len(fake_dgms) + [-1] * len(another_fake_dgms)
    all_dgms = dgms2swdgms(all_dgms)

    # classify true diagrams from fake ones
    feat_kwargs = {'n_directions': 10, 'bw': 1}
    k, _ = sw_parallel(all_dgms,
                       all_dgms,
                       parallel_flag=True,
                       kernel_type='sw',
                       **feat_kwargs)

    print(k.shape)
    cmargs = {'print_flag': 'off'}  # confusion matrix
    clf = classifier(indicator_labels,
                     indicator_labels,
                     method='svm',
                     n_cv=1,
                     kernel=k,
                     **cmargs)
    clf.svm_kernel_(n_splits=10)
    if not args.viz: sys.exit('-' * 50)

    feat_kwargs = {'n_directions': 10, 'bw': 1}
Exemple #4
0
def main(graph, fil, norm, permute, ss, epd, n_cv, flip, feat, feat_kwargs):
    """
    All hyperprameter goes here.

    :param graph: graph dataset
    :param fil: filtration function
    :param norm: normalize or not
    :param permute: whether permute dgm
    :param ss: both sublevel and superlevel or not
    :param epd: include extended persistence or not
    :param n_cv: number of cross validation
    :return:
    """

    global gs
    print('kwargs', feat_kwargs)
    label_flag = dgms_dir_test(fil=fil, fil_d='sub', norm=norm, graph=graph)[1]
    # gs, labels = load_graphs(dataset=graph, labels_only=label_flag)  # step 1
    gs, labels = load_tugraphs(graph, labels_only=True)

    # parallel
    subdgms = gs2dgms_parallel(n_jobs=-1,
                               fil=fil,
                               fil_d='sub',
                               norm=norm,
                               graph=graph)
    supdgms = gs2dgms_parallel(n_jobs=-1,
                               fil=fil,
                               fil_d='sup',
                               norm=norm,
                               graph=graph)
    epddgms = gs2dgms_parallel(n_jobs=-1,
                               fil=fil,
                               one_hom=True,
                               norm=norm,
                               graph=graph)

    dgms = combine_dgms(subdgms, supdgms, epddgms, ss=ss, epd=epd, flip=flip)
    dgms = permute_dgms(dgms, permute_flag=permute, permute_ratio=0.5)
    dgms_summary(dgms)

    swdgms = dgms2swdgms(dgms)
    if feat == 'sw':
        print(feat_kwargs)
        k, _ = sw_parallel(swdgms,
                           swdgms,
                           parallel_flag=True,
                           kernel_type='sw',
                           **feat_kwargs)
        clf = classifier(labels, labels, method='svm', n_cv=n_cv, kernel=k)
        clf.svm_kernel_(n_splits=10)
        print(clf.stat)
        return clf.stat

    elif feat == 'pi':
        params = {
            'bandwidth': 1.0,
            'weight': (1, 1),
            'im_range': [0, 1, 0, 1],
            'resolution': [5, 5]
        }
        images = merge_dgms(subdgms,
                            supdgms,
                            epddgms,
                            vectype='pi',
                            ss=ss,
                            epd=epd,
                            **params)
        clf = classifier(images, labels, method='svm', n_cv=n_cv)
        clf.svm(n_splits=10)
        return clf.stat

    elif feat == 'pss':
        k, _ = sw_parallel(swdgms,
                           swdgms,
                           parallel_flag=False,
                           kernel_type='pss',
                           **feat_kwargs)
        print(k.shape, k, np.max(k))
        clf = classifier(labels, labels, method='svm', n_cv=n_cv, kernel=k)
        clf.svm_kernel_(n_splits=10)
        print(clf.stat)
        return clf.stat

    elif feat == 'wg':
        k, _ = sw_parallel(swdgms,
                           swdgms,
                           parallel_flag=True,
                           kernel_type='wg',
                           **feat_kwargs)
        print(k.shape)
        clf = classifier(labels, labels, method='svm', n_cv=n_cv, kernel=k)
        clf.svm_kernel_(n_splits=10)
        print(clf.stat)
        return clf.stat

    elif feat == 'pdvector':
        pass
Exemple #5
0
    # viz fv value
    # val = dict(nx.get_node_attributes(gs[i], 'fv')).values()
    # plt.plot(val)
    # plt.title('q: %s, i: %s'%(q, i))
    # plt.show()
    # sys.exit()

    print('Finish computing lapfeat')
    dgms = alldgms(gs,
                   radius=float('inf'),
                   dataset='',
                   recompute_flag=True,
                   method='serial',
                   n=2 * n,
                   zigzag=zigzag)  # compute dgms in parallel
    print('Finish computing dgms')
    swdgms = dgms2swdgms(dgms)

    feat_kwargs = {'n_directions': 10, 'bw': 1}
    sw_kernel, _ = sw_parallel(swdgms,
                               swdgms,
                               kernel_type='sw',
                               parallel_flag=True,
                               **feat_kwargs)
    clf = classifier(np.zeros((len(labels), 10)),
                     labels,
                     method=None,
                     kernel=sw_kernel)
    print(clf.svm_kernel_())
    print(p, q, edge_kwargs)
Exemple #6
0
    gs2 = sbms(n=n, n1=75, n2=75, p=p, q=q)
    gs = gs2 + gs1
    labels = [1] * n + [2] * n

    # node filtration is fiedler vector.
    # edge_kwargs = {'h': 0.3, 'edgefunc': 'edge_prob'}
    # for i in range(len(gs)):
    #     g = gs[i]
    #     lp = LaplacianEigenmaps(d=1)
    #     lp.learn_embedding(g, weight='weight')
    #     lapfeat = lp.get_embedding()
    #     gs[i] = fil_strategy(g, lapfeat, method=fil_method, viz_flag=False, **edge_kwargs)
    # print('Finish computing lapfeat')

    # compute diagrams
    dgms = gs2dgms(gs, fil='deg', fil_d='sub', norm=True)

    # compute kernel and evaluation
    swdgms = dgms2swdgms(dgms)
    kwargs = {'bw': 1, 'n_directions': 10}
    sw_kernel, _ = sw_parallel(swdgms,
                               swdgms,
                               kernel_type='sw',
                               parallel_flag=False,
                               **kwargs)
    clf = classifier(np.zeros((len(labels), 10)),
                     labels,
                     method=None,
                     kernel=sw_kernel)
    print(clf.svm_kernel_())