def main(graph, fil, norm, permute, ss, epd, n_cv, flip, feat, feat_kwargs, ntda): """ All hyperprameter goes here. :param graph: graph dataset :param fil: filtration function :param norm: normalize or not :param permute: whether permute dgm :param ss: both sublevel and superlevel or not :param epd: include extended persistence or not :param n_cv: number of cross validation :return: """ global gs print('feat kwargs', feat_kwargs) db = get_tda_db() params = { 'graph': graph, 'fil': fil, 'norm': norm, 'permute': permute, 'ss': ss, 'epd': epd, 'n_cv': n_cv, 'flip': flip, 'feat': feat, 'ntda': ntda, 'feat_kwargs': feat_kwargs } if check_duplicate(db, params): return label_flag = dgms_dir_test(fil=fil, fil_d='sub', norm=norm, graph=graph)[1] # gs, labels = load_graphs(dataset=graph, labels_only=label_flag) # step 1 gs, labels = load_tugraphs( graph, labels_only=False ) # labels_only true means gs is None. Turned on for high speed # parallel # subdgms = gs2dgms(gs, n_jobs=-1, fil=fil, fil_d='sub', norm=norm, graph = graph, ntda = ntda, debug_flag=True) subdgms = gs2dgms_parallel(n_jobs=-1, fil=fil, fil_d='sub', norm=norm, graph=graph, ntda=ntda) supdgms = gs2dgms_parallel(n_jobs=-1, fil=fil, fil_d='sup', norm=norm, graph=graph, ntda=ntda) epddgms = gs2dgms_parallel(n_jobs=-1, fil=fil, one_hom=True, norm=norm, graph=graph, ntda=ntda) dgms = combine_dgms(subdgms, supdgms, epddgms, ss=ss, epd=epd, flip=flip) dgms = permute_dgms(dgms, permute_flag=permute) # old way dgms_summary(dgms) swdgms = dgms2swdgms(dgms) if feat == 'sw': print(feat_kwargs) k, _ = sw_parallel(swdgms, swdgms, parallel_flag=True, kernel_type='sw', **feat_kwargs) print(k.shape) cmargs = {'print_flag': 'off'} # confusion matrix clf = classifier(labels, labels, method='svm', n_cv=n_cv, kernel=k, **cmargs) clf.svm_kernel_(n_splits=10) elif feat == 'pi': # vector params = { 'bandwidth': 1.0, 'weight': (1, 1), 'im_range': [0, 1, 0, 1], 'resolution': [5, 5] } images = merge_dgms(subdgms, supdgms, epddgms, vectype='pi', ss=ss, epd=epd, **params) clf = classifier(images, labels, method='svm', n_cv=n_cv) clf.svm(n_splits=10) elif feat == 'pss': k, _ = sw_parallel(swdgms, swdgms, parallel_flag=True, kernel_type='pss', **feat_kwargs) # print(k.shape, k, np.max(k)) clf = classifier(labels, labels, method='svm', n_cv=n_cv, kernel=k) clf.svm_kernel_(n_splits=10) elif feat == 'wg': k, _ = sw_parallel(swdgms, swdgms, parallel_flag=True, kernel_type='wg', **feat_kwargs) print(k.shape) clf = classifier(labels, labels, method='svm', n_cv=n_cv, kernel=k) clf.svm_kernel_(n_splits=10) elif feat == 'pervec': cmargs = {'print_flag': 'on'} # confusion matrix pd_vector = dgms2vec(dgms, vectype='pervec', **feat_kwargs) clf = classifier(pd_vector, labels, method='svm', n_cv=n_cv, **cmargs) clf.svm(n_splits=10) elif feat == 'pf': k, _ = sw_parallel(swdgms, swdgms, parallel_flag=False, kernel_type='pf', **feat_kwargs) clf = classifier(labels, labels, method='svm', n_cv=n_cv, kernel=k) clf.svm_kernel_(n_splits=10) else: raise Exception('No such feat %s' % feat) print(clf.stat) print_line() return clf.stat
# load graphs gs, labels = load_graphs(dataset=args.graph) # parallel subdgms = gs2dgms_parallel(n_jobs=-1, fil=fil, fil_d='sub', norm=norm) supdgms = gs2dgms_parallel(n_jobs=-1, fil=fil, fil_d='sup', norm=norm) epddgms = gs2dgms_parallel(n_jobs=-1, fil=fil, one_hom=True, norm=norm) # serial # subdgms = gs2dgms(gs, fil=fil, fil_d='sub', norm=norm, one_hom=False) # step2 # TODO: need to add interface # supdgms = gs2dgms(gs, fil=fil, fil_d='sup', norm=norm, one_hom=False) # step2 # # epddgms = gs2dgms(gs, fil=fil, norm=norm, one_hom=True) # step2 # TODO dgms = combine_dgms(subdgms, supdgms, epddgms, args) dgms = permute_dgms(dgms, permute_flag=args.permute, permute_ratio=0.5) dgms_summary(dgms) # sw kernel swdgms = dgms2swdgms(dgms) kwargs = {'bw': args.bw, 'n_directions': 10, 'K': 1, 'p': 1} sw_kernel, _ = sw_parallel(swdgms, swdgms, parallel_flag=True, kernel_type='sw', **kwargs) print(sw_kernel.shape) clf = classifier(labels, labels, method='svm', n_cv=args.n_cv,
def main(graph, fil, norm, permute, ss, epd, n_cv, flip, feat, feat_kwargs): """ All hyperprameter goes here. :param graph: graph dataset :param fil: filtration function :param norm: normalize or not :param permute: whether permute dgm :param ss: both sublevel and superlevel or not :param epd: include extended persistence or not :param n_cv: number of cross validation :return: """ global gs print('kwargs', feat_kwargs) label_flag = dgms_dir_test(fil=fil, fil_d='sub', norm=norm, graph=graph)[1] # gs, labels = load_graphs(dataset=graph, labels_only=label_flag) # step 1 gs, labels = load_tugraphs(graph, labels_only=True) # parallel subdgms = gs2dgms_parallel(n_jobs=-1, fil=fil, fil_d='sub', norm=norm, graph=graph) supdgms = gs2dgms_parallel(n_jobs=-1, fil=fil, fil_d='sup', norm=norm, graph=graph) epddgms = gs2dgms_parallel(n_jobs=-1, fil=fil, one_hom=True, norm=norm, graph=graph) dgms = combine_dgms(subdgms, supdgms, epddgms, ss=ss, epd=epd, flip=flip) dgms = permute_dgms(dgms, permute_flag=permute, permute_ratio=0.5) dgms_summary(dgms) swdgms = dgms2swdgms(dgms) if feat == 'sw': print(feat_kwargs) k, _ = sw_parallel(swdgms, swdgms, parallel_flag=True, kernel_type='sw', **feat_kwargs) clf = classifier(labels, labels, method='svm', n_cv=n_cv, kernel=k) clf.svm_kernel_(n_splits=10) print(clf.stat) return clf.stat elif feat == 'pi': params = { 'bandwidth': 1.0, 'weight': (1, 1), 'im_range': [0, 1, 0, 1], 'resolution': [5, 5] } images = merge_dgms(subdgms, supdgms, epddgms, vectype='pi', ss=ss, epd=epd, **params) clf = classifier(images, labels, method='svm', n_cv=n_cv) clf.svm(n_splits=10) return clf.stat elif feat == 'pss': k, _ = sw_parallel(swdgms, swdgms, parallel_flag=False, kernel_type='pss', **feat_kwargs) print(k.shape, k, np.max(k)) clf = classifier(labels, labels, method='svm', n_cv=n_cv, kernel=k) clf.svm_kernel_(n_splits=10) print(clf.stat) return clf.stat elif feat == 'wg': k, _ = sw_parallel(swdgms, swdgms, parallel_flag=True, kernel_type='wg', **feat_kwargs) print(k.shape) clf = classifier(labels, labels, method='svm', n_cv=n_cv, kernel=k) clf.svm_kernel_(n_splits=10) print(clf.stat) return clf.stat elif feat == 'pdvector': pass
# node feat example nodefeat_ = np.array(list(dict(nx.degree(g)).values())).reshape( len(g), 1) # np.random.random((n_node, 1)) nodefeat_ = nodefeat_ / float(max(nodefeat_)) fil = fil_stradegy(g, fil='node', node_fil='sub', nodefeat=nodefeat_) g = fil.build_fv() x = graph2dgm(g) diagram = x.get_diagram(g, key='fv', subflag='True', one_homology_flag=False, parallel_flag=False, zigzag=False) print(diagram) # imdb gs, labels = load_graphs(dataset='imdb_binary') # step 1 subdgms = gs2dgms_parallel( n_jobs=1, fil='jaccard', fil_d='sub', one_hom=False, debug_flag=False) # step2 # TODO: need to add interface dgms_summary(subdgms) debug(subdgms, 'subdgms') g = nx.random_geometric_graph(100, 0.4) print(edgefeat(g, fil='jaccard')) np.random.seed(42) n_node = 20 g = nx.random_geometric_graph(n_node, 0.5, seed=42) diagram = node_fil_(g, fil='hop', norm=True, base=0) print(diagram)
b'upperarm': 6, b'upperleg': 7 } return parts_dict if __name__ == '__main__': from Esme.dgms.stats import stat, dgms_summary from itertools import chain import matplotlib.pyplot as plt num_pdpoints = [] for i in range(1, 5): #chain(range(1, 261), range(281,401)): dgms = loaddgm(str(i), form='dionysus', print_flag=True) print(get_cat(i)) _, stat = dgms_summary(dgms) # stat is (mean, std, min, max) num_pdpoints.append(stat[0]) plt.plot(stat) plt.show() sys.exit() res = face_idx(file='1') print(res) sys.exit() cat_dict = prince_cat() cat = 'Human' for idx in chain(range(1, 261), range(281, 401)): for k, v in cat_dict.items(): if k[0] <= idx and k[1] > idx: cat = v