Esempio n. 1
0
def run_experiments(data, ground_truth, network_num):
    all_res = []
    # Timerank with one connection - default q
    mutu4 = Muturank_new(data.graphs,
                         threshold=1e-6,
                         alpha=0.85,
                         beta=0.85,
                         connection='one',
                         clusters=len(ground_truth),
                         default_q=True)
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu4.dynamic_coms,
                             "Timerank with one connection - default q",
                             mutu4.tfs,
                             eval="dynamic",
                             duration=mutu4.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu4.dynamic_coms,
                             "Timerank with one connection - default q",
                             mutu4.tfs,
                             eval="sets",
                             duration=mutu4.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu4.dynamic_coms,
                             "Timerank with one connection - default q",
                             mutu4.tfs,
                             eval="per_tf",
                             duration=mutu4.duration))
    f = open(results_file, 'a')
    f.write(
        tabulate(all_res, headers="keys", tablefmt="fancy_grid").encode('utf8')
        + "\n")
    f.close()
    # Timerank with all connections - default q
    mutu5 = Muturank_new(data.graphs,
                         threshold=1e-6,
                         alpha=0.85,
                         beta=0.85,
                         connection='all',
                         clusters=len(ground_truth),
                         default_q=True)
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu5.dynamic_coms,
                             "Timerank with all connections - default q",
                             mutu5.tfs,
                             eval="dynamic",
                             duration=mutu5.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu5.dynamic_coms,
                             "Timerank with all connections - default "
                             "q",
                             mutu5.tfs,
                             eval="sets",
                             duration=mutu5.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu5.dynamic_coms,
                             "Timerank with all connections - default "
                             "q",
                             mutu5.tfs,
                             eval="per_tf",
                             duration=mutu5.duration))
    f = open(results_file, 'a')
    f.write(
        tabulate(all_res, headers="keys", tablefmt="fancy_grid").encode('utf8')
        + "\n")
    f.close()
    # Timerank with next connection - default q
    mutu6 = Muturank_new(data.graphs,
                         threshold=1e-6,
                         alpha=0.85,
                         beta=0.85,
                         connection='next',
                         clusters=len(ground_truth),
                         default_q=True)
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu6.dynamic_coms,
                             "Timerank with next connection - default q",
                             mutu6.tfs,
                             eval="dynamic",
                             duration=mutu6.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu6.dynamic_coms,
                             "Timerank with next connection - default q",
                             mutu6.tfs,
                             eval="sets",
                             duration=mutu6.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu6.dynamic_coms,
                             "Timerank with next connection - default q",
                             mutu6.tfs,
                             eval="per_tf",
                             duration=mutu6.duration))
    f = open(results_file, 'a')
    f.write(
        tabulate(all_res, headers="keys", tablefmt="fancy_grid").encode('utf8')
        + "\n")
    f.close()
    # NNTF
    fact = TensorFact(data.graphs,
                      num_of_coms=len(ground_truth),
                      threshold=1e-4,
                      seeds=10,
                      overlap=False)
    all_res.append(
        evaluate.get_results(ground_truth,
                             fact.dynamic_coms,
                             "NNTF",
                             mutu6.tfs,
                             eval="dynamic",
                             duration=fact.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             fact.dynamic_coms,
                             "NNTF",
                             mutu6.tfs,
                             eval="sets",
                             duration=fact.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             fact.dynamic_coms,
                             "NNTF",
                             mutu6.tfs,
                             eval="per_tf",
                             duration=fact.duration))
    f = open(results_file, 'a')
    f.write(
        tabulate(all_res, headers="keys", tablefmt="fancy_grid").encode('utf8')
        + "\n")
    f.close()
    with open(results_file, 'a') as f:
        f.write("NNTF\n")
        f.write("Error: " + str(fact.error) + "Seed: " + str(fact.best_seed) +
                "\n")
        f.write("A\n")
        pprint.pprint(fact.A, stream=f, width=150)
        f.write("B\n")
        pprint.pprint(fact.B, stream=f, width=150)
        f.write("C\n")
        pprint.pprint(fact.C, stream=f, width=150)
        pprint.pprint(fact.dynamic_coms, stream=f, width=150)
    # GED
    import sys
    sys.path.insert(0, '../GED/')
    import preprocessing, Tracker
    start_time = time.time()
    from ged import GedWrite, ReadGEDResults
    ged_data = GedWrite(data)
    graphs = preprocessing.getGraphs(ged_data.fileName)
    tracker = Tracker.Tracker(graphs)
    tracker.compare_communities()
    #outfile = 'tmpfiles/ged_results.csv'
    outfile = './results/GED-events-handdrawn-' + str(network_num) + '.csv'
    with open(outfile, 'w') as f:
        for hypergraph in tracker.hypergraphs:
            hypergraph.calculateEvents(f)
    ged_time = str(datetime.timedelta(seconds=int(time.time() - start_time)))
    print "--- %s seconds ---" % (ged_time)
    ged = ReadGEDResults.ReadGEDResults(file_coms=ged_data.fileName,
                                        file_output=outfile)
    with open(results_file, 'a') as f:
        f.write("GED\n")
        pprint.pprint(ged.dynamic_coms, stream=f, width=150)
    all_res.append(
        evaluate.get_results(ground_truth,
                             ged.dynamic_coms,
                             "GED",
                             mutu6.tfs,
                             eval="dynamic",
                             duration=ged_time))
    all_res.append(
        evaluate.get_results(ground_truth,
                             ged.dynamic_coms,
                             "GED",
                             mutu6.tfs,
                             eval="sets",
                             duration=ged_time))
    f = open(results_file, 'a')
    f.write(
        tabulate(all_res, headers="keys", tablefmt="fancy_grid").encode('utf8')
        + "\n")
    f.close()
    #all_res.append(evaluate.get_results(ground_truth, ged.dynamic_coms, "GED", mutu6.tfs, eval="per_tf"))
    # Run Timerank - One connection
    mutu1 = Muturank_new(data.graphs,
                         threshold=1e-6,
                         alpha=0.85,
                         beta=0.85,
                         connection='one',
                         clusters=len(ground_truth),
                         default_q=False)
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu1.dynamic_coms,
                             "Timerank with one connection",
                             mutu1.tfs,
                             eval="dynamic",
                             duration=mutu1.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu1.dynamic_coms,
                             "Timerank with one connection",
                             mutu1.tfs,
                             eval="sets",
                             duration=mutu1.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu1.dynamic_coms,
                             "Timerank with one connection",
                             mutu1.tfs,
                             eval="per_tf",
                             duration=mutu1.duration))
    f = open(results_file, 'a')
    f.write(
        tabulate(all_res, headers="keys", tablefmt="fancy_grid").encode('utf8')
        + "\n")
    f.close()
    muturank_res = OrderedDict()
    muturank_res["tf/node"] = ['t' + str(tf) for tf in mutu1.tfs_list]
    for i, node in enumerate(mutu1.node_ids):
        muturank_res[node] = [
            mutu1.p_new[tf * len(mutu1.node_ids) + i]
            for tf in range(mutu1.tfs)
        ]
    f = open(results_file, 'a')
    f.write("ONE CONNECTION\n")
    f.write(
        tabulate(muturank_res, headers="keys", tablefmt="fancy_grid").encode(
            'utf8') + "\n")
    f.write(
        tabulate(zip(['t' + str(tf) for tf in mutu1.tfs_list], mutu1.q_new),
                 headers="keys",
                 tablefmt="fancy_grid").encode('utf8') + "\n")
    f.close()

    # Timerank with all connections
    mutu2 = Muturank_new(data.graphs,
                         threshold=1e-6,
                         alpha=0.85,
                         beta=0.85,
                         connection='all',
                         clusters=len(ground_truth),
                         default_q=False)
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu2.dynamic_coms,
                             "Timerank with all connections",
                             mutu2.tfs,
                             eval="dynamic",
                             duration=mutu2.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu2.dynamic_coms,
                             "Timerank with all connections",
                             mutu2.tfs,
                             eval="sets",
                             duration=mutu2.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu2.dynamic_coms,
                             "Timerank with all connections",
                             mutu2.tfs,
                             eval="per_tf",
                             duration=mutu2.duration))
    f = open(results_file, 'a')
    f.write(
        tabulate(all_res, headers="keys", tablefmt="fancy_grid").encode('utf8')
        + "\n")
    f.close()
    muturank_res = OrderedDict()
    muturank_res["tf/node"] = ['t' + str(tf) for tf in mutu2.tfs_list]
    for i, node in enumerate(mutu2.node_ids):
        muturank_res[node] = [
            mutu2.p_new[tf * len(mutu2.node_ids) + i]
            for tf in range(mutu2.tfs)
        ]
    f = open(results_file, 'a')
    f.write("ALL CONNECTIONS\n")
    f.write(
        tabulate(muturank_res, headers="keys", tablefmt="fancy_grid").encode(
            'utf8') + "\n")
    f.write(
        tabulate(zip(['t' + str(tf) for tf in mutu2.tfs_list], mutu2.q_new),
                 headers="keys",
                 tablefmt="fancy_grid").encode('utf8') + "\n")
    f.close()
    # Timerank with next connection
    mutu3 = Muturank_new(data.graphs,
                         threshold=1e-6,
                         alpha=0.85,
                         beta=0.85,
                         connection='next',
                         clusters=len(ground_truth),
                         default_q=False)
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu3.dynamic_coms,
                             "Timerank with next connection",
                             mutu3.tfs,
                             eval="dynamic",
                             duration=mutu3.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu3.dynamic_coms,
                             "Timerank with next connection",
                             mutu3.tfs,
                             eval="sets",
                             duration=mutu3.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu3.dynamic_coms,
                             "Timerank with next connection",
                             mutu3.tfs,
                             eval="per_tf",
                             duration=mutu3.duration))
    f = open(results_file, 'a')
    f.write(
        tabulate(all_res, headers="keys", tablefmt="fancy_grid").encode('utf8')
        + "\n")
    f.close()
    muturank_res = OrderedDict()
    muturank_res["tf/node"] = ['t' + str(tf) for tf in mutu3.tfs_list]
    for i, node in enumerate(mutu3.node_ids):
        muturank_res[node] = [
            mutu3.p_new[tf * len(mutu3.node_ids) + i]
            for tf in range(mutu3.tfs)
        ]
    f = open(results_file, 'a')
    f.write("NEXT CONNECTION\n")
    f.write(
        tabulate(muturank_res, headers="keys", tablefmt="fancy_grid").encode(
            'utf8') + "\n")
    f.write(
        tabulate(zip(['t' + str(tf) for tf in mutu3.tfs_list], mutu3.q_new),
                 headers="keys",
                 tablefmt="fancy_grid").encode('utf8') + "\n")
    f.write("GROUND TRUTH\n")
    pprint.pprint(ground_truth, stream=f, width=150)
    f.write("ONE CONNECTION\n")
    pprint.pprint(mutu1.dynamic_coms, stream=f, width=150)
    f.write("ALL CONNECTIONS\n")
    pprint.pprint(mutu2.dynamic_coms, stream=f, width=150)
    f.write("NEXT CONNECTION\n")
    pprint.pprint(mutu3.dynamic_coms, stream=f, width=150)
    f.close()
    return all_res
def run_experiments(data, ground_truth, network_num):
    times = []
    all_res = []
    # Timerank with one connection - default q
    start_time = time.time()
    mutu4 = Muturank_new(data.graphs,
                         threshold=1e-6,
                         alpha=0.85,
                         beta=0.85,
                         connection='one',
                         clusters=len(ground_truth),
                         default_q=True)
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu4.dynamic_coms,
                             "Timerank-STC-Uni",
                             mutu4.tfs,
                             eval="dynamic",
                             duration=mutu4.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu4.dynamic_coms,
                             "Timerank-STC-Uni",
                             mutu4.tfs,
                             eval="sets",
                             duration=mutu4.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu4.dynamic_coms,
                             "Timerank-STC-Uni",
                             mutu4.tfs,
                             eval="per_tf",
                             duration=mutu4.duration))
    duration = time.time() - start_time
    print("Timerank with one connection - default q: TIME: %d min, %d sec" %
          (duration // 60, duration % 60))
    times.append(duration)

    # Timerank with all connections - default q
    start_time = time.time()
    mutu5 = Muturank_new(data.graphs,
                         threshold=1e-6,
                         alpha=0.85,
                         beta=0.85,
                         connection='all',
                         clusters=len(ground_truth),
                         default_q=True)
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu5.dynamic_coms,
                             "Timerank-AOC-Uni",
                             mutu5.tfs,
                             eval="dynamic",
                             duration=mutu5.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu5.dynamic_coms,
                             "Timerank-AOC-Uni",
                             mutu5.tfs,
                             eval="sets",
                             duration=mutu5.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu5.dynamic_coms,
                             "Timerank-AOC-Uni",
                             mutu5.tfs,
                             eval="per_tf",
                             duration=mutu5.duration))
    duration = time.time() - start_time
    print("Timerank with all connection - default q: TIME: %d min, %d sec" %
          (duration // 60, duration % 60))
    times.append(duration)

    # Timerank with next connection - default q
    start_time = time.time()
    mutu6 = Muturank_new(data.graphs,
                         threshold=1e-6,
                         alpha=0.85,
                         beta=0.85,
                         connection='next',
                         clusters=len(ground_truth),
                         default_q=True)
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu6.dynamic_coms,
                             "Timerank-NOC-Uni",
                             mutu6.tfs,
                             eval="dynamic",
                             duration=mutu6.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu6.dynamic_coms,
                             "Timerank-NOC-Uni",
                             mutu6.tfs,
                             eval="sets",
                             duration=mutu6.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu6.dynamic_coms,
                             "Timerank-NOC-Uni",
                             mutu6.tfs,
                             eval="per_tf",
                             duration=mutu6.duration))
    duration = time.time() - start_time
    print("Timerank with next connection - default q: TIME: %d min, %d sec" %
          (duration // 60, duration % 60))
    times.append(duration)

    # Run Timerank - One connection
    start_time = time.time()
    mutu1 = Muturank_new(data.graphs,
                         threshold=1e-6,
                         alpha=0.85,
                         beta=0.85,
                         connection='one',
                         clusters=len(ground_truth),
                         default_q=False)
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu1.dynamic_coms,
                             "Timerank-STC",
                             mutu1.tfs,
                             eval="dynamic",
                             duration=mutu1.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu1.dynamic_coms,
                             "Timerank-STC",
                             mutu1.tfs,
                             eval="sets",
                             duration=mutu1.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu1.dynamic_coms,
                             "Timerank-STC",
                             mutu1.tfs,
                             eval="per_tf",
                             duration=mutu1.duration))
    duration = time.time() - start_time
    print("Timerank with one connection: TIME: %d min, %d sec" %
          (duration // 60, duration % 60))
    times.append(duration)

    muturank_res = OrderedDict()
    muturank_res["tf/node"] = ['t' + str(tf) for tf in mutu1.tfs_list]
    for i, node in enumerate(mutu1.node_ids):
        muturank_res[node] = [
            mutu1.p_new[tf * len(mutu1.node_ids) + i]
            for tf in range(mutu1.tfs)
        ]
    f = open('results_hand.txt', 'a')
    f.write("ONE CONNECTION\n")
    f.write(tabulate(muturank_res, headers="keys", tablefmt="grid") + "\n")
    f.write(
        tabulate(zip(['t' + str(tf) for tf in mutu1.tfs_list], mutu1.q_new),
                 headers="keys",
                 tablefmt="grid") + "\n")
    f.close()

    # Timerank with all connections
    start_time = time.time()
    mutu2 = Muturank_new(data.graphs,
                         threshold=1e-6,
                         alpha=0.85,
                         beta=0.85,
                         connection='all',
                         clusters=len(ground_truth),
                         default_q=False)
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu2.dynamic_coms,
                             "Timerank-AOC",
                             mutu2.tfs,
                             eval="dynamic",
                             duration=mutu2.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu2.dynamic_coms,
                             "Timerank-AOC",
                             mutu2.tfs,
                             eval="sets",
                             duration=mutu2.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu2.dynamic_coms,
                             "Timerank-AOC",
                             mutu2.tfs,
                             eval="per_tf",
                             duration=mutu2.duration))
    duration = time.time() - start_time
    print("Timerank with all connection: TIME: %d min, %d sec" %
          (duration // 60, duration % 60))
    times.append(duration)

    muturank_res = OrderedDict()
    muturank_res["tf/node"] = ['t' + str(tf) for tf in mutu2.tfs_list]
    for i, node in enumerate(mutu2.node_ids):
        muturank_res[node] = [
            mutu2.p_new[tf * len(mutu2.node_ids) + i]
            for tf in range(mutu2.tfs)
        ]
    f = open('results_hand.txt', 'a')
    f.write("ALL CONNECTIONS\n")
    f.write(tabulate(muturank_res, headers="keys", tablefmt="grid") + "\n")
    f.write(
        tabulate(zip(['t' + str(tf) for tf in mutu2.tfs_list], mutu2.q_new),
                 headers="keys",
                 tablefmt="grid") + "\n")
    f.close()

    # Timerank with next connection
    start_time = time.time()
    mutu3 = Muturank_new(data.graphs,
                         threshold=1e-6,
                         alpha=0.85,
                         beta=0.85,
                         connection='next',
                         clusters=len(ground_truth),
                         default_q=False)
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu3.dynamic_coms,
                             "Timerank-NOC",
                             mutu3.tfs,
                             eval="dynamic",
                             duration=mutu3.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu3.dynamic_coms,
                             "Timerank-NOC",
                             mutu3.tfs,
                             eval="sets",
                             duration=mutu3.duration))
    all_res.append(
        evaluate.get_results(ground_truth,
                             mutu3.dynamic_coms,
                             "Timerank-NOC",
                             mutu3.tfs,
                             eval="per_tf",
                             duration=mutu3.duration))
    duration = time.time() - start_time
    print("Timerank with next connection: TIME: %d min, %d sec" %
          (duration // 60, duration % 60))
    times.append(duration)

    muturank_res = OrderedDict()
    muturank_res["tf/node"] = ['t' + str(tf) for tf in mutu3.tfs_list]
    for i, node in enumerate(mutu3.node_ids):
        muturank_res[node] = [
            mutu3.p_new[tf * len(mutu3.node_ids) + i]
            for tf in range(mutu3.tfs)
        ]
    f = open('results_hand.txt', 'a')
    f.write("NEXT CONNECTION\n")
    f.write(tabulate(muturank_res, headers="keys", tablefmt="grid") + "\n")
    f.write(
        tabulate(zip(['t' + str(tf) for tf in mutu3.tfs_list], mutu3.q_new),
                 headers="keys",
                 tablefmt="grid") + "\n")
    f.write("GROUND TRUTH\n")
    pprint.pprint(ground_truth, stream=f, width=150)
    f.write("ONE CONNECTION\n")
    pprint.pprint(mutu1.dynamic_coms, stream=f, width=150)
    f.write("ALL CONNECTIONS\n")
    pprint.pprint(mutu2.dynamic_coms, stream=f, width=150)
    f.write("NEXT CONNECTION\n")
    pprint.pprint(mutu3.dynamic_coms, stream=f, width=150)
    f.close()

    # NNTF
    start_time = time.time()
    fact = TensorFact(data.graphs,
                      num_of_coms=len(ground_truth),
                      threshold=1e-4,
                      seeds=1,
                      overlap=False)
    fact_dur = time.time() - start_time
    fact_dur = "%d:%d" % (fact_dur // 60, fact_dur % 60)
    all_res.append(
        evaluate.get_results(ground_truth,
                             fact.dynamic_coms,
                             "NNTF",
                             mutu6.tfs,
                             eval="dynamic",
                             duration=fact_dur))
    all_res.append(
        evaluate.get_results(ground_truth,
                             fact.dynamic_coms,
                             "NNTF",
                             mutu6.tfs,
                             eval="sets",
                             duration=fact_dur))
    all_res.append(
        evaluate.get_results(ground_truth,
                             fact.dynamic_coms,
                             "NNTF",
                             mutu6.tfs,
                             eval="per_tf",
                             duration=fact_dur))
    duration = time.time() - start_time
    print("NNTF: TIME: %d min, %d sec" % (duration // 60, duration % 60))
    times.append(duration)

    with open('results_hand.txt', 'a') as f:
        f.write("NNTF\n")
        f.write("Error: " + str(fact.error) + "Seed: " + str(fact.best_seed) +
                "\n")
        f.write("A\n")
        pprint.pprint(fact.A, stream=f, width=150)
        f.write("B\n")
        pprint.pprint(fact.B, stream=f, width=150)
        f.write("C\n")
        pprint.pprint(fact.C, stream=f, width=150)
        pprint.pprint(fact.dynamic_coms, stream=f, width=150)

    # NNTF-Timerank tensor
    new_graphs = {}
    for i, A in mutu1.a.items():
        new_graphs[i] = nx.from_scipy_sparse_matrix(A)
    start_time = time.time()
    fact2 = TensorFact(new_graphs,
                       num_of_coms=len(ground_truth),
                       threshold=1e-4,
                       seeds=1,
                       overlap=False,
                       original_graphs=data.graphs)
    fact_dur = time.time() - start_time
    fact_dur = "%d:%d" % (fact_dur // 60, fact_dur % 60)
    all_res.append(
        evaluate.get_results(ground_truth,
                             fact2.dynamic_coms,
                             "NNTF-Timerank tensor",
                             mutu6.tfs,
                             eval="dynamic",
                             duration=fact_dur))
    all_res.append(
        evaluate.get_results(ground_truth,
                             fact2.dynamic_coms,
                             "NNTF-Timerank tensor",
                             mutu6.tfs,
                             eval="sets",
                             duration=fact_dur))
    all_res.append(
        evaluate.get_results(ground_truth,
                             fact2.dynamic_coms,
                             "NNTF-Timerank tensor",
                             mutu6.tfs,
                             eval="per_tf",
                             duration=fact_dur))
    duration = time.time() - start_time
    print("NNTF-Timerank tensor: TIME: %d min, %d sec" %
          (duration // 60, duration % 60))
    times.append(duration)

    with open('results_hand.txt', 'a') as f:
        f.write("NNTF\n")
        f.write("Error: " + str(fact2.error) + "Seed: " +
                str(fact2.best_seed) + "\n")
        f.write("A\n")
        pprint.pprint(fact2.A, stream=f, width=150)
        f.write("B\n")
        pprint.pprint(fact2.B, stream=f, width=150)
        f.write("C\n")
        pprint.pprint(fact2.C, stream=f, width=150)
        pprint.pprint(fact2.dynamic_coms, stream=f, width=150)

    # NNTF-Overlap
    start_time = time.time()
    fact = TensorFact(data.graphs,
                      num_of_coms=len(ground_truth),
                      threshold=1e-4,
                      seeds=1,
                      overlap=True)
    fact_dur = time.time() - start_time
    fact_dur = "%d:%d" % (fact_dur // 60, fact_dur % 60)
    all_res.append(
        evaluate.get_results(ground_truth,
                             fact.dynamic_coms,
                             "NNTF-Overlap",
                             mutu6.tfs,
                             eval="dynamic",
                             duration=fact_dur))
    all_res.append(
        evaluate.get_results(ground_truth,
                             fact.dynamic_coms,
                             "NNTF-Overlap",
                             mutu6.tfs,
                             eval="sets",
                             duration=fact_dur))
    all_res.append(
        evaluate.get_results(ground_truth,
                             fact.dynamic_coms,
                             "NNTF-Overlap",
                             mutu6.tfs,
                             eval="per_tf",
                             duration=fact_dur))
    duration = time.time() - start_time
    print("NNTF-Overlap: TIME: %d min, %d sec" %
          (duration // 60, duration % 60))
    times.append(duration)

    with open('results_hand.txt', 'a') as f:
        f.write("NNTF-Overlap\n")
        f.write("Error: " + str(fact.error) + "Seed: " + str(fact.best_seed) +
                "\n")
        f.write("A\n")
        pprint.pprint(fact.A, stream=f, width=150)
        f.write("B\n")
        pprint.pprint(fact.B, stream=f, width=150)
        f.write("C\n")
        pprint.pprint(fact.C, stream=f, width=150)
        pprint.pprint(fact.dynamic_coms, stream=f, width=150)

    # GED
    import sys
    sys.path.insert(0, '../GED/')
    start_time = time.time()
    from ged import GedWrite, ReadGEDResults
    ged_data = GedWrite(data)
    graphs = preprocessing.getGraphs(ged_data.fileName)
    tracker = Tracker.Tracker(graphs)
    tracker.compare_communities()
    if not os.path.exists('results'):
        os.makedirs('results')
    outfile = os.path.join('results',
                           'GED-events-handdrawn' + str(network_num) + '.csv')

    with open(outfile, 'w+') as f:
        for hypergraph in tracker.hypergraphs:
            hypergraph.calculateEvents(f)

    ged = ReadGEDResults.ReadGEDResults(file_coms=ged_data.fileName,
                                        file_output=outfile)
    ged_dur = time.time() - start_time
    ged_dur = "%d:%d" % (ged_dur // 60, ged_dur % 60)
    with open('results_hand.txt', 'a') as f:
        f.write("GED\n")
        pprint.pprint(ged.dynamic_coms, stream=f, width=150)
    all_res.append(
        evaluate.get_results(ground_truth,
                             ged.dynamic_coms,
                             "GED-T",
                             mutu6.tfs,
                             eval="dynamic",
                             duration=ged_dur))
    all_res.append(
        evaluate.get_results(ground_truth,
                             ged.dynamic_coms,
                             "GED-T",
                             mutu6.tfs,
                             eval="sets",
                             duration=ged_dur))
    all_res.append(
        evaluate.get_results(ground_truth,
                             ged.dynamic_coms,
                             "GED-T",
                             mutu6.tfs,
                             eval="per_tf",
                             duration=ged_dur))
    duration = time.time() - start_time
    print("GED-T: TIME: %d min, %d sec" % (duration // 60, duration % 60))
    times.append(duration)

    # GED with timerank communities
    # GED
    import sys
    sys.path.insert(0, '../GED/')
    start_time = time.time()
    from ged import GedWrite, ReadGEDResults
    ged_data = GedWrite(
        Data(mutu1.comms, data.graphs, len(graphs), len(mutu1.dynamic_coms),
             mutu1.dynamic_coms))
    graphs = preprocessing.getGraphs(ged_data.fileName)
    tracker = Tracker.Tracker(graphs)
    tracker.compare_communities()
    outfile = './results/GED-events-handdrawn-' + str(network_num) + '.csv'

    with open(outfile, 'w') as f:
        for hypergraph in tracker.hypergraphs:
            hypergraph.calculateEvents(f)

    ged = ReadGEDResults.ReadGEDResults(file_coms=ged_data.fileName,
                                        file_output=outfile)
    ged_dur = time.time() - start_time
    ged_dur = "%d:%d" % (ged_dur // 60, ged_dur % 60)
    with open('results_hand.txt', 'a') as f:
        f.write("GED\n")
        pprint.pprint(ged.dynamic_coms, stream=f, width=150)
    all_res.append(
        evaluate.get_results(ground_truth,
                             ged.dynamic_coms,
                             "GED - with Timerank comms",
                             mutu6.tfs,
                             eval="dynamic",
                             duration=ged_dur))
    all_res.append(
        evaluate.get_results(ground_truth,
                             ged.dynamic_coms,
                             "GED - with Timerank comms",
                             mutu6.tfs,
                             eval="sets",
                             duration=ged_dur))
    all_res.append(
        evaluate.get_results(ground_truth,
                             ged.dynamic_coms,
                             "GED - with Timerank comms",
                             mutu6.tfs,
                             eval="per_tf",
                             duration=ged_dur))
    duration = time.time() - start_time
    print("GED - with Timerank comms: TIME: %d min, %d sec" %
          (duration // 60, duration % 60))
    times.append(duration)

    print("TOTAL TIME: %d min, %d sec" % (sum(times) // 60, sum(times) % 60))
    return all_res