コード例 #1
0
def maketsk(args):
    rrg_iter = args.pop("rrg_iter")
    graphs = rg.make_graphs_static(**args)
    if rrg_iter > 0:
        graphs = rrg.rule_rand_graphs(graphs,
                                      numgr=500 + EXPERIMENT_REPEATS,
                                      iter=rrg_iter)[0]
    return graphs
コード例 #2
0
ファイル: rule_rand_graphs.py プロジェクト: smautner/graken
def test_rulerand():
    import util.random_graphs as rg
    import structout as so
    import graphlearn3.util.setoperations as setop
    grs = rg.make_graphs_static()[:30]  #[:10] # default returns 100 graphs..
    res1, grammar1 = rule_rand_graphs(grs, numgr=500, iter=2)
    #res, grammar2=rule_rand_graphs(res1, numgr = 50, iter=1)
    #so.gprint(res) #!!!!!!!!
    '''
コード例 #3
0
ファイル: debug.py プロジェクト: smautner/graken
def test_neighexpansion():
    graphs = rg.make_graphs_static(
        7,  # how many to generate
        4,  # graph size
        3,  # node-labelcount
        2,  # edgelabelcount
        labeldistribution='uniform')
    optimizer = pareto.LocalLandmarksDistanceOptimizer()
    optimizer.enhance_grammar(graphs)
    neighs = list(optimizer.grammar.neighbors(graphs[0]))

    so.gprint(graphs[0])
    so.gprint(neighs)
コード例 #4
0
ファイル: debug.py プロジェクト: smautner/graken
def test_randgraphs():

    #make_graphs_static(n,ncnt,nlab,elab,maxdeg=3, labeldistribution='real'):
    graphs = rg.make_graphs_static(
        10,  # how many to generate
        5,  # graph size
        5,  # node-labelcount
        2,  # edgelabelcount
        labeldistribution='uniform')
    so.graph.ginfo(graphs[0])
    while graphs:
        so.gprint(graphs[:3], edgelabel='label')
        graphs = graphs[3:]
    return graphs
コード例 #5
0
ファイル: debug.py プロジェクト: smautner/graken
def test_instancemaker():
    graphs = rg.make_graphs_static(
        7,  # how many to generate
        5,  # graph size
        5,  # node-labelcount
        2,  # edgelabelcount
        labeldistribution='uniform')
    im = InstanceMaker(n_landmarks=3, n_neighbors=6).fit(graphs, ntargets=2)
    landgraphs, des_dist, rest, target = im.get()
    print("landmarks")
    so.gprint(landgraphs, edgelabel='label')
    print("des dist")
    print(des_dist)
    print("target")
    so.gprint(target)
    print("rest")
    so.gprint(rest, edgelabel='label')
コード例 #6
0
ファイル: debug.py プロジェクト: smautner/graken
def test_grammar():

    graphs = rg.make_graphs_static(
        7,  # how many to generate
        5,  # graph size
        5,  # node-labelcount
        2,  # edgelabelcount
        labeldistribution='uniform')
    optimizer = pareto.LocalLandmarksDistanceOptimizer()
    optimizer.enhance_grammar(graphs)
    print(optimizer.grammar)
    keys = list(optimizer.grammar.productions.keys())
    random.shuffle(keys)
    print("start w grammar")
    for k in keys[:10]:
        cips = list(optimizer.grammar.productions[k].values())
        so.gprint([c.graph for c in cips],
                  color=[[c.core_nodes, c.interface_nodes] for c in cips])
        #so.graph.ginfo(cips[0].graph)
        print(cips[0].__dict__)
コード例 #7
0
ファイル: debug.py プロジェクト: smautner/graken
def test_pareto():
    configure_logging(logging.getLogger(), verbosity=2)
    graphs = rg.make_graphs_static(
        100,  # how many to generate
        5,  # graph size
        4,  # node-labelcount
        2,  # edgelabelcount
        labeldistribution='uniform',
        allow_cycles=False)

    im = InstanceMaker(n_landmarks=5, n_neighbors=50).fit(graphs, ntargets=2)

    optimizer = pareto.LocalLandmarksDistanceOptimizer(n_iter=7,
                                                       context_size=1,
                                                       multiproc=True)
    landmark_graphs, desired_distances, ranked_graphs, target_graph = im.get()
    NONE = optimizer.optimize(
        landmark_graphs,
        desired_distances,
        ranked_graphs,
        #start_graph_list=[landmark_graphs[0]])
        start_graph_list=landmark_graphs)
    return None