コード例 #1
0
    def threadGraph(self, config):
        """
            Thread construisant le graph :
            lit les valeurs, les formate comme il faut, configure puis
            crée le graph
        """

        while(not self.stop):    
            
            # waits until queue is full
            self.queue_graph.get(True)
            self.transmit_is_ready = False
            
            # Graph creation
            graph.create_graph(config, self.all_values,self.all_add_values, self.timelist)

            # Task finished, now ready
            self.transmit_is_ready = True
        print('threadGraph return 0')
        
        return 0
コード例 #2
0
ファイル: app.py プロジェクト: jradaelli/TDI0_miniproject
def index():
    if request.method == 'GET':
        return render_template('index.html')
    else:
        app.vars["stock_ticker"] = request.form['ticker']
        app.logger.debug(app.vars["stock_ticker"])
        app.vars["columns_name"] = request.form.getlist('features')
        app.logger.debug(app.vars["columns_name"])
        final_data = get_data(
            app.vars["stock_ticker"], app.vars["columns_name"])
        plot = create_graph(final_data, app.vars["columns_name"])
        script, div = components(plot)
        return render_template('graph.html', script=script, div=div)
コード例 #3
0
ファイル: main.py プロジェクト: jasmeeto/ka-project-infection
def main():
    '''
    main function 
    '''
    parser = argparse.ArgumentParser()
    parser.add_argument('type', help="total or limited")
    parser.add_argument('-l', '--limit', type=int, help="number to limit by for limited")
    parser.add_argument('-t', '--threshold', type=float, help="threshold value for limited (defualts to 0)")
    parser.add_argument('-r', '--random', type=float, help="use random test, takes in num nodes and edge probability as argument (creates new file in data folder)", nargs=2)
    parser.add_argument('-s', '--source', help="source node name")
    parser.add_argument('-i', '--input_file', help="input file location")
    parser.add_argument('-w', '--weighted', action='store_true', help="is the input weighted")
    parser.add_argument('-v', '--visualize', action='store_true', help="whether or not to visualize graph")
    parser.add_argument('-o', '--output', help="store visualization to output (requires ffmpeg) - paired with -v option")
    parser.add_argument('-g', '--graphviz', action='store_true', help="(optional) use graphviz to visualize (requires pygraphviz)")

    args = parser.parse_args()

    if args.random:
        args.input_file = random_test(args.random[0], args.random[1])
        args.output = args.input_file.replace('.dat', '.mp4').replace('data', 'output')

    if not os.path.isfile(args.input_file) and not args.random:
        print("need to give valid input file")
        exit(1)


    g = graph.create_graph(args.input_file, args.weighted)
    iterations = []
    if args.type == "limited":
        if not args.limit:
            print("need to give valid limit for 'limited' option")
            exit(1)
        if args.threshold:
            threshold = args.threshold
        else:
            threshold = 0
        if args.source: 
            iterations = infection.limited_infection(g, args.source, args.limit, threshold)
        else: 
            iterations = infection.limited_infection(g, limit=args.limit, threshold=threshold)
    else: #arg.type ==  total or anything
        if args.source: 
            iterations = infection.total_infection(g, args.source)
        else: 
            iterations = infection.total_infection(g)

    if args.visualize:
        graph.animate(g, iterations, args.output, args.graphviz, args.weighted)
コード例 #4
0
def set_graph():
    """
    sekvencijalno kreira graf, sprema ga na disk, učitava i ispisuje radi provjere
   
    Parameters 
    ----------
    None
    
    Returns:
    -------
    None
    
    """

    nodes = int(input("Enter number of nodes: "))
    generatedGraph = create_graph(nodes)

    saveGraph(generatedGraph, 'testExample.txt')
    loadedGraph = loadGraph('testExample.txt')

    print('Initial graph:\n', loadedGraph, '\nNote: initial graph visualization is stored in pdf file')
コード例 #5
0
def main(argv=None):
    if argv is None:
        argv = sys.argv[1:]
    args = parse_cmd_options(argv)

    logger = logging.getLogger('')
    if args.verbose:
        logger.setLevel(logging.DEBUG)
    handler = logging.StreamHandler()
    logger.addHandler(handler)

    filename = args.filename
    logger.debug("Filename: %s" % filename)
    eventlist = get_events_from_file(filename)
    logger.info("%s events found" % len(eventlist))
    max_day = max(e.day for e in eventlist)
    for day in xrange(1, max_day+1):
        events = graph.create_graph([e for e in eventlist if e.day == day])
        graph.find_conflicts(events)
        for e in events:
            e.save()
            print e.get_id(), e.conflicts_with
コード例 #6
0
    def __init__(self, node_count, edge_count, edges):
        self.node_count = node_count
        self.edge_count = edge_count
        self.edges_as_given = edges
        self.graph = create_graph(node_count, edge_count, edges)

        #degrees = np.array([n.degree for n in self.graph])
        degrees = np.array(
            [sum([j.degree for j in n.edges]) + n.degree for n in self.graph])

        self.perm = np.argsort(degrees)[::-1]

        for i in range(len(self.perm)):
            self.graph[self.perm[i]].id = i

        self.edges_A = []
        self.edges_B = []
        for n in self.graph:
            for nb in n.edges:
                self.edges_A.append(n.id)
                self.edges_B.append(nb.id)

        self.edges_A = np.array(self.edges_A)
        self.edges_B = np.array(self.edges_B)
コード例 #7
0
def run():
    #-------------------------------------------------------------------------------
    # Default input values

    #Default csv file
    csv_file = 'default_input.csv'

    #dict of target number of students in each slot
    slotdict = {
        "Mo_1900": 8,
        "Mo_2100": 6,
        "Tu_1900": 5,
        "Tu_2100": 4,
        "We_1900": 4,
        "We_2100": 4,
        "Th_1900": 4,
        "Th_2100": 4,
        "Fr_1900": 4,
        "Fr_2100": 4,
        "Sa_1500": 5,
        "Sa_1600": 6,
        "Sa_1700": 5,
        "Su_1700": 4,
        "Su_1800": 3,
        "Su_1900": 6,
        "Su_2000": 4,
        "Su_2100": 6
    }

    duration = 120  #length of slots (in minutes)

    #default column values
    gap = 180
    cap = 2
    exp = 3
    skill = 4

    #list of slots that need more skilled TA's
    stress_slots = []

    #numeric value indicating how many TAs the scheduler can hire above the targeted value for any given slot
    target_delta = 1

    #number of shifts the scheduler can assign in addition to the slotdict shift numbers
    flex_shifts = 4

    #sets minimum number of experienced TA's per slot
    min_exp = 0

    #sets minimum number of skilled TA's per stress slot
    min_skill = 0

    #gets number of slots
    num_slots = 0
    for slot in slotdict:
        num_slots += slotdict[slot]

    #Default weights
    weight_dict = {}
    weight_dict['slot_type'] = 4
    weight_dict['no_1'] = 3
    weight_dict['guarantee_shift'] = 5
    weight_dict['avail'] = 7
    weight_dict['shift_cap'] = 5
    weight_dict['equality'] = 3
    #-------------------------------------------------------------------------------

    df = input_creator.get_df(csv_file)
    students = list(df['name'])
    input_creator.check_col(df, gap, cap, exp, skill)

    #dict of slots to check as keys, and overlapping slots as values (student won't be placed in overlap)
    slots = input_creator.get_slots(df)

    #dict of slots and their prev slots
    prev_slot = input_creator.get_prev_slots(df, duration)

    #create graph nodes and weight edges
    graph_data = graph.create_graph(df, weight_dict, slotdict, prev_slot,
                                    num_slots, duration)
    student_nodes = graph_data[0]
    slot_nodes = graph_data[1]
    wt = graph_data[2]

    #solve the problem, get the ordered schedule, updated df
    results = solver.run_solver(student_nodes, slot_nodes, wt, df, slotdict,
                                min_exp, min_skill, stress_slots, target_delta,
                                flex_shifts, duration)
    schedule = results[0]
    df = results[1]

    #get stats
    happiness_stats = stats.hap_stats(df, schedule)
    corr_stats = stats.corr_stats(df, schedule)
    student_stats = stats.stud_stats(df, schedule, prev_slot)
    slot_stats = stats.slotsize_stats(schedule, slotdict)
    #format output
    format_weights = {'weights used': weight_dict}
    sched_stats = {
        'avg hap': happiness_stats[0],
        'std dev of hap': happiness_stats[1],
        'min hap stud outliers': happiness_stats[2],
        'avail to hap corr': corr_stats[0],
        'skill to hap corr': corr_stats[1],
        'experience to hap corr': corr_stats[2],
        'studs who got 1s': student_stats[0],
        'studs without shift': student_stats[2],
        'wrong shift type studs': student_stats[1]
    }
    output_data = [format_weights, schedule, sched_stats, df]

    return (output_data)
コード例 #8
0
ファイル: img_to_graph.py プロジェクト: SummerBigData/Nuclei
def disp_img_connectivity(k=5):
    img = get_img()

    y, x = np.random.choice(range(img.shape[0] - k)), np.random.choice(
        range(img.shape[1] - k))
    patch = img[y:y + k, x:x + k]
    maxval = patch.max()
    '''
    dg = nx.grid_2d_graph(k, k).to_directed()
    ebunch = [e for e in dg.edges]
    dg.remove_edges_from(ebunch)
    pos = dict((n, (n[0], k-n[1])) for n in dg.nodes())

    d = 2.5
    for i, row in enumerate(patch):
        for j, val in enumerate(row):
            if val == 0:
                continue

            lower_pts = np.argwhere(neighborhood(patch, i, j) < val)
            for pt in lower_pts:
                lowval = patch[pt[0], pt[1]]
                weight = d * (1 - (maxval-lowval)/float(maxval))
                dg.add_weighted_edges_from([((j, i), (pt[1], pt[0]), weight)])

            eq_pts = np.argwhere(neighborhood(patch, i, j) == val)
            for pt in eq_pts:
                dg.add_weighted_edges_from([
                    ((j, i), (pt[1], pt[0]), d),
                    ((pt[1], pt[0]), (j, i), d)])
    '''
    from graph import create_graph
    dg = create_graph(patch)
    pos = dict((n, (n[0], k - n[1])) for n in dg.nodes())

    _, axs = plt.subplots(1, 2)
    axs[1].axis('off')
    nx.draw_networkx_nodes(dg, pos, node_size=350)

    all_weights = []
    for (n1, n2, data) in dg.edges(data=True):
        all_weights.append(data['weight'])

    for w in list(set(all_weights)):
        w_edges = [(n1, n2) for (n1, n2, ea) in dg.edges(data=True)
                   if ea['weight'] == w]
        nx.draw_networkx_edges(dg, pos, edgelist=w_edges, width=w)

    nx.draw_networkx_labels(dg, pos, font_size=10)

    for i in range(k):
        for j in range(k):
            color = 'k'
            if patch[j, i] < patch.mean():
                color = 'w'
            axs[0].text(i,
                        j,
                        patch[j, i],
                        ha='center',
                        va='center',
                        color=color)

    gray_imshow(axs[0], patch)
    plt.show()
コード例 #9
0
    ################# Original Tree Making Of #################

    tree = id3.id3(tr_mat, tr_res, ATTRIBUTES)

    kaccuracy = kfold.get_accuracy(tr_mat, tr_res, RESULTS, ATTRIBUTES)

    predicted = []
    pbar = tqdm(total=len(te_mat), desc='', leave=False)
    for i in range(te_mat.shape[0]):
        res = id3.get_class(tree, te_mat[i, :])
        predicted.append(res)
        # print("Inferred class: {} expected class: {}".format(res, te_res[i]))
        pbar.update(1)
    pbar.close()

    gv.create_graph(tree, 'ID3 Wine quality Decision Tree')

    conf_mat_tr = perf.calculate_confusion_matrix(tree, tr_mat, tr_res,
                                                  RESULTS)
    conf_mat_te = perf.calculate_confusion_matrix(tree, te_mat, te_res,
                                                  RESULTS)

    print("------------Original tree------------")
    print("KFold Cross Validation Accuracy on training set: {}".format(
        kaccuracy))
    print("Accuracy on training set: {}".format(
        perf.get_accuracy(conf_mat_tr)))
    print("Accuracy on testing set: {}".format(perf.get_accuracy(conf_mat_te)))
    print("Classification report:\n")
    print(classification_report(te_res, predicted))
コード例 #10
0
    return g1_inter


def oppo_inter(a_inter):
    b_inter = {}
    for k, v in a_inter.items():
        for node in v:
            if not b_inter.has_key(node):
                b_inter.setdefault(node, [k])
            else:
                tmp = b_inter.get(node)
                if k not in tmp:
                    tmp.append(k)
                    b_inter.setdefault(node, tmp)

    return b_inter


if __name__ == '__main__':
    num_nodes = 1000
    g1 = create_graph('ba', num_nodes, 4)
    g2 = create_graph('ba', num_nodes, 4)

    g1c = create_couplings(g1, g2, 5, 0.5)
    # g2c = generate_inter_oppo(g1c)

    # print g1c
    # print "============================="
    # print g2c

    create_couplings_121(g1, g2)
コード例 #11
0
import numpy as np
import pandas as pd
import copy
import graph as gutils
import matplotlib.pyplot as plt
import seaborn as sns
from collections import defaultdict

kb = pd.read_csv('../data/FB15k-237/train.txt',
                 sep='\t',
                 names=['e1', 'r', 'e2'])
valid = pd.read_csv('data/WN18RR/valid.txt', sep='\t', names=['e1', 'r', 'e2'])

G = gutils.create_graph(kb, add_inverse=True)
G2 = gutils.create_graph(kb, add_inverse=False)

print(kb.shape)


def plot_frequency(col, percentile=0.99):
    # how often values are being reused
    counts = col.value_counts().value_counts().sort_index()

    cum_percent = np.cumsum(counts) / sum(counts)
    counts = counts.loc[cum_percent <= percentile]

    sns.lineplot(x=counts.index, y=counts)


plot_frequency(kb['e1'])
plot_frequency(kb['e2'])
コード例 #12
0
    te_mat = np.array(id3.strings_to_numbers(te_mat))

    ################# Original Tree Making Of #################

    tree = id3.id3(tr_mat, tr_res, ATTRIBUTES)

    kaccuracy = kfold.get_accuracy(tr_mat, tr_res, RESULTS, ATTRIBUTES)

    predicted = []
    for i in range(te_mat.shape[0]):
        res = id3.get_class(tree, te_mat[i, :])
        predicted.append(res)
        # print("Inferred class: {} expected class: {}".format(res, te_res[i]))

    gv.create_graph(tree, 'ID3 Iris Decision Tree')

    conf_mat_tr = perf.calculate_confusion_matrix(tree, tr_mat, tr_res,
                                                  RESULTS)
    conf_mat_te = perf.calculate_confusion_matrix(tree, te_mat, te_res,
                                                  RESULTS)

    print("------------Original tree------------")
    print("KFold Cross Validation Accuracy on training set: {}".format(
        kaccuracy))
    print("Accuracy on training set: {}".format(
        perf.get_accuracy(conf_mat_tr)))
    print("Accuracy on testing set: {}".format(perf.get_accuracy(conf_mat_te)))
    print("Classification report:\n")
    print(classification_report(te_res, predicted))
コード例 #13
0
import os
import matplotlib.pyplot as plt
import graph


def parse_data(directory):
    network = {}
    people = os.listdir(directory)
    for filename in people:
        name = filename
        if filename.endswith('.txt'):
            name = filename[:-4].strip()
        name = name.replace('.', ' ').lower()
        f = open(directory + os.path.sep + filename)
        acquaintances = []
        for name_connected in f:
            acquaintances.append(name_connected.strip().lower())
        network[name] = acquaintances
    return network


if __name__ == '__main__':

    network = parse_data(directory='people')
    plt.figure()
    plt.title("Social Network", fontsize='xx-large')
    graph.create_graph(network)

    plt.show()
コード例 #14
0
#!/usr/bin/env python
# coding: utf-8
# created by [email protected] 
# Date: 2016/1/16 
# Time: 15:14
#
#from src.cascade.findpc import findpc
from findpc import findpc
from graph import create_graph
#from src.cascade.graph import create_graph
import networkx as nx

import networkx as nx
#from src.cascade.graph import create_graph
#from src.couple.inter_couple import create_couplings_121
from graph import create_graph
from inter_couple import create_couplings_121

num_nodes = 1000
degree = 4

g1 = create_graph('er', num_nodes, degree)
g2 = create_graph('er', num_nodes, degree)

print findpc(g1, g2)
コード例 #15
0
def create_er():
    Ghviet = create_graph('er', 1000, 4)
    Ggarr = create_graph('er', 1000, 4)
    h_inter = {}
    g_inter = {}
    i = 0
    for net in Ghviet:  #Ggarr:
        mindist = 1000000.0
        #print net
        inter = ''
        for node in Ggarr:  #Ghviet:
            #x1 = hc_dict.get(node)[1]
            #x1 = hc_dict.get(node)[0]
            x1 = gc_dict.get(node)[0]
            #y1 = hc_dict.get(node)[2]
            #y1 = hc_dict.get(node)[1]
            y1 = gc_dict.get(node)[1]
            #x2 = gc_dict.get(net)[0]
            x2 = hc_dict.get(net)[0]
            #y2 = gc_dict.get(net)[1]
            y2 = hc_dict.get(net)[1]
            dist = (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)
            if mindist > dist:
                mindist = dist
                inter = node

        if not g_inter.has_key(inter):
            #h_inter.setdefault(inter, [net])
            g_inter.setdefault(inter, [net])
        else:
            tmp = g_inter.get(inter)  #h_inter.get(inter)
            #h_inter.setdefault(inter, tmp.append(net))
            g_inter.setdefault(inter, tmp.append(net))
    #########################################################
    #attach one power node to each communication node in garr:
    for net in Ggarr:  #Ggarr:
        mindist = 1000000.0
        #print net
        inter = ''
        for node in Ghviet:  #Ghviet:
            #x1 = hc_dict.get(node)[1]
            #x1 = hc_dict.get(node)[0]
            x1 = hc_dict.get(node)[0]
            #y1 = hc_dict.get(node)[2]
            #y1 = hc_dict.get(node)[1]
            y1 = hc_dict.get(node)[1]
            #x2 = gc_dict.get(net)[0]
            x2 = gc_dict.get(net)[0]
            #y2 = gc_dict.get(net)[1]
            y2 = gc_dict.get(net)[1]
            dist = (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)
            if mindist > dist:
                mindist = dist
                inter = node
        if not h_inter.has_key(inter):
            #print 'Here, inter, net:'
            #print inter, net
            h_inter.setdefault(inter, [net])
        else:
            tmp = h_inter.get(inter)
            h_inter.setdefault(inter, tmp.append(net))
    #print 'hviet edges: '+str(Ghviet.size())
    print 'garr nodes: ' + str(len(Ggarr.nodes()))
    print Ggarr.nodes()
    print 'garr edges: ' + str(len(Ggarr.edges()))
    print Ggarr.edges()

    ###############################
    for i in Ghviet.nodes():
        if 'Longitude' not in Ghviet.node[i]:
            Ghviet.node[i]['Longitude'] = hc_dict.get(i)[
                1]  #random.randint(0,300)
        if 'Latitude' not in Ghviet.node[i]:
            Ghviet.node[i]['Latitude'] = hc_dict.get(i)[
                0]  #random.randint(0,300)
    ### Ggarr:
    for i in Ggarr.nodes():
        if 'Longitude' not in Ggarr.node[i]:
            Ggarr.node[i]['Longitude'] = gc_dict.get(i)[
                1]  #random.randint(0,300)
        if 'Latitude' not in Ggarr.node[i]:
            Ggarr.node[i]['Latitude'] = gc_dict.get(i)[
                0]  #random.randint(0,300)
    ################################
    nx.write_gml(Ghviet, "hviet.gml")
    nx.write_gml(Ggarr, "garr.gml")
    f_hviet.close()
    f_hvietc.close()
    f_garr.close()
    f_garrc.close()
    return [Ghviet, Ggarr, h_inter, g_inter]
コード例 #16
0
ファイル: main.py プロジェクト: mattzzz/grarphthebuilding
        print("Trying to rectify the image...")
        t_start = time.time()
        image_rectifier = ImageRectifier(frame)
        #image_rectifier = ImageRectifier(join(DATA_PATH, 'examples_png/ex1_pic.png'))
        #cv2.imshow('rectified', image_rectifier.img)

        # TODO: Detect symbols/edges and build graph
        #detected_graph = GraphGenerator(cv2.imread(join(DATA_PATH, 'examples_png/ex0.png'))).graph
        print("Rectified in %f seconds... now detecting the graph" %
              (time.time() - t_start))
        detected_graph = GraphGenerator(frame).graph

        # Overlay the detected graph over original image
        print("Graph detected in %f seconds! Now overlaying the symbols..." %
              (time.time() - t_start))
        graph = create_graph(**detected_graph)
        overlay_image = OverlayDrawer(graph).draw(*frame.shape[:2])

        # FIXME: More complicated adding needed so the overlay is not transparent
        # Where blank_image values are not 0, we want to override the values in gray
        gray = cv2.addWeighted(overlay_image, 1.0, frame, 0.5, 0.5)
        #gray = gray + overlay_image

        # Display the resulting composed image
        cv2.imshow('window', gray)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything done, release the capture
    capture.release()
    cv2.destroyAllWindows()
コード例 #17
0
	    open('../models/words/inverted_vocab.json'))

	meter = json.load( \
	    open('../models/words/meter.json'))
	inverted_meter = json.load( \
	    open('../models/words/inverted_meter.json'))

	rhyme = json.load( \
	    open('../models/words/rhyme.json'))
	inverted_rhyme = json.load( \
	    open('../models/words/inverted_rhyme.json'))

	pos = json.load( \
	    open('../models/words/pos.json'))
	inverted_pos = json.load( \
	    open('../models/words/inverted_pos.json'))

	for i in range(len(O)):
	    top_n = O[i].argsort()[-10:][::-1]
	    print "\nHidden state " + str(i)
	    for j in top_n:
	        print vocab[j] + ",",
	    print ""
	    for j in top_n:
	        print inverted_pos[vocab[j]][0] + ",",
	    print " "
	    for j in top_n:
	        print inverted_meter[vocab[j]][0] + ",",
	    print
	create_graph(A_conc.T).render("36_state_backward")
コード例 #18
0
    today = date.today().isoformat()
    params = {
        'user_agent': MAIL,
        'workspace_id': W_ID,
        'since': since,
        'until': until
    }
    auth = requests.auth.HTTPBasicAuth(API_TOKEN, 'api_token')
    return requests.get('https://toggl.com/reports/api/v2/details',
                        auth=auth,
                        headers=headers,
                        params=params)


if __name__ == '__main__':
    details = get_toggl().json()['data']

    for data in details:
        project = data['project']
        if project == proj:
            time = (round(float(data['dur']) / 3600000, 2) * 60)
            date = data['start'].split('T')[0]
            time_list.append(time)
            date_list.append(date)
            # toggl_data.update(zip(date_list, time_list))

df = pd.DataFrame({'date': date_list, 'time': time_list})
df.to_csv('toggl_data.csv', index=False)

graph.create_graph()  # グラフ作成
コード例 #19
0
data_table = data_table + '</table>'

disk_table = '<h2>Opslag</h2><table id="diskinfo"><tr><th>Disk</th><th>Total</th><th>Free</th><th>Used</th></tr>'
try:
    for disk in current_agent.data['diskinfo']:
        disk_table = disk_table + '<tr><td>%s</td><td>%s</td><td>%s</td><td>%s%%</td></tr>' % (disk['id'], disk['total'], disk['free'], int(int(disk['free']) / int(disk['total']) * 100))
except:
    diskerr = traceback.format_exc()
    logger.error('Fout bij het laden van de schijfinformatie: %s' % diskerr)

disk_table = disk_table + '</table>'

graphs = '<h2>Geschiedenis</h2>'
# TODO RAM hier toevoegen.
graph_items = ['cpu_load', 'ram_free', 'temperature', 'no_processes', 'no_services', 'no_users']
for graph_item in graph_items:
    try:
        current_graph = graph.create_graph(current_agent.ip, graph_item)
        graphs = graphs + current_graph
    except:
        logger.error('Kon geen grafiek maken: %s' % traceback.format_exc())

actions_div = '<div id="actions"><h2>Acties</h2><ul><a href="web_agent.py?id=%s&action=reboot"><li>Reboot</li></a>\n' % current_agent.ip
for custom_action in database.get_actions(current_agent.ip):
    actions_div = actions_div + '<a href="web_agent.py?id=%s&action=%s"><li>%s</li></a>' % (current_agent.ip, custom_action[0], custom_action[1])
actions_div = actions_div + '</ul></div>'

page_title = '<h1>%s</h1>' % current_agent.info['hostname']
all_content = page_title + executed_action_div + actions_div + cpu_ram_table + data_table + disk_table + graphs
print(helper_web.create_html(all_content))
コード例 #20
0
ファイル: run.py プロジェクト: x0rzkov/twitter-patterns
#!/bin/python
#%%
import os
import pandas as pd

from dataset import merge_tweets_of_all_groups
from graph import create_graph, graph_measures, merge_graph_feats_with_tweet_feats, draw_graph

# merge_tweets_of_all_groups()

g = create_graph(only_classified_users=True, override=True)
# meas = graph_measures(g)
# merge_graph_feats_with_tweet_feats(meas)

draw_graph(False)
コード例 #21
0
ファイル: server.py プロジェクト: jdhenke/uap
 def __init__(self, matrix_path, dim_list, node_type):
   self.graph = graph.create_graph(matrix_path, dim_list, node_type)
コード例 #22
0
        print(f"hit @{i}: {hit}, MRR: {mrr}")


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Process some integers.')
    parser.add_argument('--dataset', help='name of the dataset', default='WN18RR')
    args = parser.parse_args()
    dataset = args.dataset

    train_dir = os.path.join("data", dataset, "train.txt")
    valid_dir = os.path.join("data", dataset, "valid.txt")

    print(f'load dataset {dataset}')
    train = pd.read_csv(train_dir, sep='\t', names=['e1', 'r', 'e2'])
    valid = pd.read_csv(valid_dir, sep='\t', names=['e1', 'r', 'e2'])

    relations_kb = memory.make_relations_kb(train, valid)
    G = create_graph(train, add_inverse_name=False)
    valid = valid.loc[valid.apply(lambda x: x.e1 in G and x.e2 in G, axis=1)]  # filter nodes, that not present in train

    print('start cases')
    cases = memory.create_memory_cases(G, cutoff=cutoff, max_relations=max_relations, cores=cores)
    print('start similarity')
    sim_mat, node_ids = memory.create_similarity(G, sparse=True)

    print('start inference')
    ranks_tail = pipeline(valid, G, sim_mat, node_ids, cases, relations_kb=relations_kb, top_k=top_k, type='tail')
    ranks_head = pipeline(valid, G, sim_mat, node_ids, cases, relations_kb=relations_kb, top_k=top_k, type='head')

    print_scores(ranks_tail, ranks_head)
コード例 #23
0
ファイル: server.py プロジェクト: jdhenke/live-kb-viz
 def create_kb(self, hashValue, assertions, axesList, nodeType):
     graph_instance = graph.create_graph(json.loads(assertions), json.loads(axesList), nodeType)
     self.graphs[hashValue] = graph_instance
     return {"hashValue": hashValue}
コード例 #24
0
ファイル: main.py プロジェクト: Gajan-L/CSCI6511
import random
from graph import create_graph, dijkstra, a_star
from count_time import timeit

if __name__ == '__main__':
    # read graph file and create the graph
    data_dir = "./input/"
    graph = create_graph(data_dir)

    # get the start and end points
    start, end = random.sample(graph.nodes, 2)

    path_1 = dijkstra(start, end, graph)
    print(f"Shortest path from {start} to {end}:", path_1)
    print(" ")
    path_2 = a_star(start, end, graph)
    print(f"Shortest path from {start} to {end}:", path_2)

コード例 #25
0
ファイル: create_graph.py プロジェクト: wsgan001/motifwalk
# coding: utf-8

import util
from graph import create_graph
import pandas as pd

edge_file = "data/com-youtube.ungraph.txt"
com_file = "data/com-youtube.top5000.cmty.txt"

adj_list = util.txt_edgelist_to_adjlist(edge_file)
com_file = util.txt_community_to_dict_transposed(com_file)

# save to youtube_small_2.graph
g = create_graph("youtube_small_2", adj_list, com_file, remove_unlabeled=True)

num_edges = sum([len(a) for a in g.values()]) // 2

print("Create graph successfully")
print("Number of nodes: {}".format(len(g)))
print("Number of edges: {}".format(num_edges))
コード例 #26
0
                 left_on=['e2', 'r'],
                 right_on=['e2', 'r'])[['e2', 'r']].drop_duplicates()

sh1, sh2, sh3, sh4 = e1_e1.shape[0], e1_e2.shape[0], e2_e1.shape[
    0], e2_e2.shape[0]
print(
    f"train e1 valid e1: {sh1}, train e1 valid e2: {sh2}, train e2 valid e1: {sh3}, train e2 valid e2: {sh4}"
)

e1, r = e1_e1.iloc[0][['e1', 'r']]
print(kb.loc[(kb.e1 == e1) & (kb.r == r)])
print(valid.loc[(valid.e1 == e1) & (valid.r == r)])  # 1-> 114, 0 ->34

#%%can we access valid e2 through others paths in train data

G = graph.create_graph(kb)
non_exist = []
for i, row in enumerate(valid.itertuples()):
    try:
        nx.all_simple_paths(G, row.e1, row.e2, cutoff=5).__next__()
    except StopIteration:
        non_exist.append(row)
    except AttributeError:
        non_exist.append(row)
    except nx.exception.NodeNotFound:
        continue
print(
    f"in validation with 5 steps relation doesnt exist in {len(non_exist)} cases"
)

train_counts = kb['e1'].value_counts()
コード例 #27
0
def main_graphs():
    graph = g.create_graph(16, 30)
    g.render_graph(graph, cmcm.calculate_mcm(graph),
                   "Visualizations/heuristic")
    g.render_graph(graph, nmcm.calculate_mcm(graph), "Visualizations/networkx")
コード例 #28
0
    min_conn_time = 0
    min_conn_ratio_time = 0

    max_degree_abs_err = 0
    min_conn_abs_err = 0
    min_conn_ratio_abs_err = 0

    max_degree_rel_err = 0
    min_conn_rel_err = 0
    min_conn_ratio_rel_err = 0

    for i in range(n_iter):
        print("Iterazione {}".format(i + 1))
        if threshold:
            graph = create_graph(dim,
                                 threshold=threshold,
                                 connected=cconnected)
        else:
            graph = create_graph_with_n_edges(dim, edges=n_edges)
        n_connected = calc_objective(graph, [])

        opt, _ = global_optimum(graph, k, "greedy_eval.pl", "greedy-out")

        # Valutazione della Max Degree Best
        start_time = time.time()
        max_degree_removed = algo_greedy(graph, k, max_degree_best)
        max_degree_sol = calc_objective(graph, max_degree_removed)
        calc_time = time.time() - start_time

        rel_error, abs_error = calc_errors(opt, max_degree_sol)
        max_degree_abs_err += abs_error
コード例 #29
0
import graph
from depth_first_search import depth_first_search
from breadth_first_search import breadth_first_search

edges = [(0, 1), (0, 2), (0, 3), (1, 4), (1, 5), (2, 6),
         (2, 7), (3, 8), (3, 9), (4, 10), (4, 11)]
G, _ = graph.create_graph(edges)

start_vertex = G.get_vertex(0)
breadth = breadth_first_search(G)
breadth(G, start_vertex)

depth = depth_first_search(G)
depth(G, start_vertex)

print('Undirected Case.')
print(edges)
print('                             ')
print('==============================')
print('Breadth First traversal of G')
for edge in breadth.breadth_traversal:
    print((edge.endPoints()[0].element(), edge.endPoints()[1].element()))
print('==============================')
print('Depth First traversal of G')
for edge in depth.depth_traversal:
    print((edge.endPoints()[0].element(), edge.endPoints()[1].element()))

print('                             ')
print('==============================')
print('==============================')
print('                             ')
コード例 #30
0
    ################# Original Tree Making Of #################

    tree = id3.id3(tr_mat, tr_res, ATTRIBUTES)

    #kaccuracy = kfold.get_accuracy(tr_mat, tr_res, RESULTS, ATTRIBUTES)

    predicted = []
    pbar = tqdm(total=len(te_mat), desc='', leave=False)
    for i in range(te_mat.shape[0]):
        res = id3.get_class(tree, te_mat[i, :])
        predicted.append(res)
        # print("Inferred class: {} expected class: {}".format(res, te_res[i]))
        pbar.update(1)
    pbar.close()

    gv.create_graph(tree, 'ID3 Digits Decision Tree')

    conf_mat_tr = perf.calculate_confusion_matrix(tree, tr_mat, tr_res,
                                                  RESULTS)
    conf_mat_te = perf.calculate_confusion_matrix(tree, te_mat, te_res,
                                                  RESULTS)

    print("------------Original tree------------")
    #print("KFold Cross Validation Accuracy on training set: {}".format(kaccuracy))
    print("Accuracy on training set: {}".format(
        perf.get_accuracy(conf_mat_tr)))
    print("Accuracy on testing set: {}".format(perf.get_accuracy(conf_mat_te)))
    print("Classification report:\n")
    print(classification_report(te_res, predicted))

    ################# Pruned Tree Making Of #################
import random
import numpy as np

from abc import ABC, abstractmethod

from grid.get_grid import get_grid
from graph import create_graph
from algorithms.help_functions import *

grid = np.array(get_grid(), copy=True)  # movement grid
graph = create_graph()  # movement graph


class AlgorithmInterface(ABC):
    def __init__(self):
        self.goalNameKey = ""
        self.player = None

    def run(self):
        pass

    @abstractmethod
    def getNextStep(self):
        pass

    def getGoal(self, index):
        # Red ghost's goal is player (index = 1)
        if index == 1:
            return pixelToGrid((self.player.x, self.player.y))

        # Lightblue ghost's goal is 4 fields ahead of player's current position (index = 2)
コード例 #32
0
 def test_create_graph(self):
     G = g.create_graph()
     self.assertEqual(len(G.nodes()), 0)
コード例 #33
0
path_object = pathlib.Path(SOURCE / 'outputs')
if path_object.exists():
    shutil.rmtree(SOURCE / 'outputs')
os.makedirs(SOURCE / 'outputs')

# pre process data #
print("pre processing data...")
posts_data = preprocessing.preprocess_text(posts_data)
users_data = preprocessing.preprocess_text(users_data)

# create network with topics #
print("create network")
topics = graph.get_topics(users_data, 0.1, 5)
network_file_name = SOURCE / 'outputs/bullies_network.csv'
graph.create_csv_network_from_topics(network_file_name, topics)
network_graph = graph.create_graph(network_file_name)

# # pre process network #
print("pre processing network...")
network_graph = preprocessing.preprocess_graph(network_graph,
                                               0.1)  #todo change back to 0.1
graph.graph_attributes(network_graph)

# extract nlp features #
print("extract nlp features...")
feature_list = [
    'post_length', 'tfidf', 'topics', 'screamer', 'words', 'off_dis',
    'not_off_dis'
]
X_nlp = nlp_feature_extractions.extract_features(users_data, feature_list)
y_nlp = (users_data['cb_level'] == 3).astype(int)
コード例 #34
0
import os
import matplotlib.pyplot as plt
import graph

def parse_data(directory):
    network = {}
    people = os.listdir(directory)
    for filename in people:
        name = filename
        if filename.endswith('.txt'):
            name = filename[:-4].strip()
        name = name.replace('.',' ').lower()
        f = open(directory + os.path.sep + filename)
        acquaintances = []
        for name_connected in f:
            acquaintances.append(name_connected.strip().lower())
        network[name] = acquaintances
    return network

if __name__ == '__main__':

    network = parse_data(directory = 'people')
    plt.figure()
    plt.title("Python Social Network 2015", fontsize='xx-large')
    graph.create_graph(network)

    plt.show()
コード例 #35
0
 def validation(self):
     """ Perform validation on the dev set with saved model checkpoints. """
     ckpt_batch_idx = self.validation_setup()
     self.validation_loop(ckpt_batch_idx)
     create_graph()
コード例 #36
0
    def plan_path(self):
        self.flight_state = States.PLANNING
        print("Searching for a path ...")
        TARGET_ALTITUDE = 5
        SAFETY_DISTANCE = 5

        self.target_position[2] = TARGET_ALTITUDE

        # TODO: read lat0, lon0 from colliders into floating point values 
        # read the first line and extract only the latitude and longitude vals
        with open('colliders.csv', 'r') as f:
            temp = f.readline()
            lat, lon = temp.replace('lat0 ', '').replace('lon0', '').split(', ')
            lat, lon = float(lat), float(lon)
        
        # TODO: set home position to (lon0, lat0, 0)
        self.set_home_position(lon, lat, 0)

        # TODO: retrieve current global position
        curr_global_pos = self.global_position
 
        # TODO: convert to current local position using global_to_local()
        curr_local_pos = global_to_local(curr_global_pos, self.global_home)
        print("Current Local position {}".format(curr_local_pos))
        
        # start_local = int(curr_local_pos[0]), int(curr_local_pos[1]) #int(start_local[0]), int(start_local[1])
        # print(start_local, end_local)

        print('global home {0}, position {1}, local position {2}'.format(self.global_home, self.global_position,
                                                                         self.local_position))
        # Read in obstacle map
        data = np.loadtxt('colliders.csv', delimiter=',', dtype='Float64', skiprows=2)
        
        # Define a grid for a particular altitude and safety margin around obstacles
        grid, north_offset, east_offset = create_grid(data, TARGET_ALTITUDE, SAFETY_DISTANCE)
        print("North offset = {0}, east offset = {1}".format(north_offset, east_offset))

        # # Define starting point on the grid (this is just grid center)
        # grid_start = start_local
        # # TODO: convert start position to current position rather than map center
        grid_start = (int(curr_local_pos[0]-north_offset), int(curr_local_pos[1]-east_offset))
        
        # # Set goal as some arbitrary position on the grid
        # grid_goal = end_local
        # TODO: adapt to set goal as latitude / longitude position and convert
        end_geo = self.goal
        end_local = global_to_local(end_geo, self.global_home)
        grid_goal = end_local[0]-north_offset, end_local[1]-east_offset
        grid_goal = int(grid_goal[0]), int(grid_goal[1])

        # Run A* to find a path from start to goal
        # TODO: add diagonal motions with a cost of sqrt(2) to your A* implementation
        # or move to a different search space such as a graph (not done here)
        print('Grid Start and Goal: ', grid_start, grid_goal)

        # TODO (if you're feeling ambitious): Try a different approach altogether!
        if self.planner == 1:
            path, _ = a_star(grid, heuristic, grid_start, grid_goal)

            # TODO: prune path to minimize number of waypoints
            path = prune_path(path)
        else:
            if self.planner == 2:
                sampler = Sampler(data)
                polygons = sampler._polygons
                # Example: sampling 100 points and removing
                # ones conflicting with obstacles.
                nodes = sampler.sample(300)
                print(nodes[0])

            elif self.planner == 3:
                pass

            elif self.planner == 4:    
                pass

            #create the graph and calculate the a_star
            t0 = time.time()
            print('building graph ... ', )
            g = create_graph(nodes, 10, polygons)
            print('graph took {0} seconds to build'.format(time.time()-t0))
            start = closest_point(g, (grid_start[0], grid_start[1], TARGET_ALTITUDE))
            goal = closest_point(g, (grid_goal[0], grid_goal[1], TARGET_ALTITUDE))
            print(start, goal)
            

            # print(start, start_ne)
            print('finding path ... ', )
            path, cost = a_star_graph(g, heuristic, start, goal)
            print('done. path size and cost: {}'.format((len(path), cost)))
            # print(len(path), path)
            # path_pairs = zip(path[:-1], path[1:])

        # Convert path to waypoints
        waypoints = [[p[0] + north_offset, p[1] + east_offset, TARGET_ALTITUDE, 0] for p in path]
        print(waypoints)
        # Set self.waypoints
        self.waypoints = waypoints
        # TODO: send waypoints to sim (this is just for visualization of waypoints)
        self.send_waypoints()
コード例 #37
0
                distance_est[destination] = distance_est[source] + w[edge]
                spt_predecessor[destination] = source

    return distance_est, spt_predecessor


if __name__ == '__main__':
    import graph

    E = [('a', 'b', 4), ('b', 'd', 10),
         ('d', 'f', 11), ('b', 'c', 5),
         ('a', 'c', 2), ('c', 'e', 3),
         ('e', 'd', 4)]
    print('Shortest paths from a for graph')
    print(E)
    G, weight_mapping = graph.create_graph(E, is_directed=True)
    start_vertex = G.get_vertex('a')
    d, p = dag_shortest_paths(G, weight_mapping, start_vertex)
    for key in p:
        if p[key] is None:
            print(key.element() + ' is the start vertex')
        else:
            path = [key]
            vertex = p[key]
            while not (vertex is None):
                path.insert(0, vertex)
                vertex = p[vertex]
            elements = [vertex.element() for vertex in path]
            print('Shortest path to ' + key.element() + ' with value: ' + str(d[key]))
            x = '->'.join(elements)
            print(x)