Beispiel #1
0
def demand_tracking(algo, problems, time_per_prob, residual_factor,
                    results_file_placeholder):
    results_fname = results_file_placeholder.format(problems[0].name,
                                                    residual_factor, algo.name)
    print(results_fname)
    with open(results_fname, 'w') as w:
        print_(RESULTS_HEADER, file=w)

        i = 0
        curr_sol_dict = None

        while i < len(problems):
            problem = problems[i]
            print_('\nProblem {}'.format(i))
            algo.solve(problem)
            runtime = algo.runtime
            while runtime > time_per_prob and i < len(problems):
                satisfied_demand, residual_tm = compute_satisfied_demand(
                    problems[i], curr_sol_dict, residual_factor)
                print_('{},{},{},False,NaN'.format(i, problems[i].total_demand,
                                                   satisfied_demand),
                       file=w)
                runtime -= time_per_prob
                i += 1
                if i < len(problems):
                    problems[i].traffic_matrix.tm += residual_tm

            if i >= len(problems):
                break
            curr_sol_dict = algo.sol_dict
            satisfied_demand, residual_tm = compute_satisfied_demand(
                problems[i], curr_sol_dict, residual_factor)
            print_('{},{},{},True,{}'.format(i, problems[i].total_demand,
                                             satisfied_demand, algo.runtime),
                   file=w)
            i += 1
            if i >= len(problems):
                break
            problems[i].traffic_matrix.tm += residual_tm
Beispiel #2
0
def benchmark(problems):

    with open(OUTPUT_CSV, 'a') as results:
        print_(','.join(HEADERS), file=results)
        for problem_name, topo_fname, tm_fname in problems:
            problem = Problem.from_file(topo_fname, tm_fname)
            print_(problem.name, tm_fname)
            traffic_seed = problem.traffic_matrix.seed
            total_demand = problem.total_demand
            print_('traffic seed: {}'.format(traffic_seed))
            print_('traffic matrix model: {}'.format(
                problem.traffic_matrix.model))
            print_('traffic matrix scale factor: {}'.format(
                problem.traffic_matrix.scale_factor))
            print_('total demand: {}'.format(total_demand))

            num_paths, edge_disjoint, dist_metric, partition_cls, num_parts_scale_factor = NCFLOW_HYPERPARAMS[
                problem_name]
            num_partitions_to_set = num_parts_scale_factor * int(
                np.sqrt(len(problem.G.nodes)))
            partitioner = partition_cls(num_partitions_to_set)
            partition_algo = partitioner.name

            run_dir = os.path.join(
                TOP_DIR, problem.name,
                '{}-{}'.format(traffic_seed, problem.traffic_matrix.model))
            if not os.path.exists(run_dir):
                os.makedirs(run_dir)

            try:
                print_(
                    '\nNCFlow, {} partitioner, {} partitions, {} paths, edge disjoint {}, dist metric {}'
                    .format(partition_algo, num_partitions_to_set, num_paths,
                            edge_disjoint, dist_metric))
                run_nc_dir = os.path.join(
                    run_dir, 'ncflow', partition_algo,
                    '{}-partitions'.format(num_partitions_to_set),
                    '{}-paths'.format(num_paths),
                    'edge_disjoint-{}'.format(edge_disjoint),
                    'dist_metric-{}'.format(dist_metric))
                if not os.path.exists(run_nc_dir):
                    os.makedirs(run_nc_dir)
                with open(
                        os.path.join(
                            run_nc_dir,
                            '{}-ncflow-partitioner_{}-{}_partitions-{}_paths-edge_disjoint_{}-dist_metric_{}.txt'
                            .format(problem.name, partition_algo,
                                    num_partitions_to_set, num_paths,
                                    edge_disjoint, dist_metric)), 'w') as log:
                    ncflow = NcfEpi.new_max_flow(num_paths,
                                                 edge_disjoint=edge_disjoint,
                                                 dist_metric=dist_metric,
                                                 out=log)
                    ncflow.solve(problem, partitioner)

                    for i, nc in enumerate(ncflow._ncflows):
                        with open(
                                log.name.replace(
                                    '.txt',
                                    '-runtime-dict-iter-{}.pkl'.format(i)),
                                'wb') as w:
                            pickle.dump(nc.runtime_dict, w)
                        with open(
                                log.name.replace(
                                    '.txt', '-sol-dict-iter-{}.pkl'.format(i)),
                                'wb') as w:
                            pickle.dump(nc.sol_dict, w)
                    num_partitions = len(np.unique(ncflow._partition_vector))

                    for iter in range(ncflow.num_iters):
                        nc = ncflow._ncflows[iter]

                        r1_runtime, r2_runtime, recon_runtime, \
                                r3_runtime, kirchoffs_runtime = nc.runtime_est(14, breakdown = True)
                        runtime = r1_runtime + r2_runtime + recon_runtime + r3_runtime + kirchoffs_runtime
                        total_flow = nc.obj_val

                        result_line = PLACEHOLDER.format(
                            problem.name, len(problem.G.nodes),
                            len(problem.G.edges), traffic_seed,
                            problem.traffic_matrix.model,
                            problem.traffic_matrix.scale_factor,
                            len(problem.commodity_list), total_demand,
                            'ncflow_edge_per_iter', partition_algo,
                            num_partitions,
                            partitioner.size_of_largest_partition,
                            partitioner.runtime, num_paths, edge_disjoint,
                            dist_metric, iter, total_flow, runtime, r1_runtime,
                            r2_runtime, recon_runtime, r3_runtime,
                            kirchoffs_runtime)
                        print_(result_line, file=results)
            except:
                print_(
                    'NCFlow partitioner {}, {} paths, Problem {}, traffic seed {}, traffic model {} failed'
                    .format(partition_algo, num_paths, problem.name,
                            traffic_seed, problem.traffic_matrix.model))
                traceback.print_exc(file=sys.stdout)
Beispiel #3
0
def benchmark(problem_names,
              benchmark_nc=True,
              benchmark_path=True,
              benchmark_smore=True,
              benchmark_fleischer_edge=True):

    with open('fib-entries.csv', 'a') as results:
        print_(','.join(HEADERS), file=results)

        for problem_name in problem_names:
            if benchmark_path:
                num_paths, edge_disjoint, dist_metric = PATH_FORM_HYPERPARAMS
                problem = get_fib_entry_problem(problem_name)
                total_num_fib_entries, max_num_fib_entries = PathFormulation.fib_entries(
                    problem, num_paths, edge_disjoint, dist_metric)
                print_(
                    'Path Formulation, Problem {}, {} paths, edge disjoint {}, dist_metric {}, total num fib entries: {}'
                    .format(problem_name, num_paths, edge_disjoint,
                            dist_metric, total_num_fib_entries))
                print_(
                    'Path Formulation, Problem {}, {} paths, edge disjoint {}, dist_metric {}, max num fib entries: {}'
                    .format(problem_name, num_paths, edge_disjoint,
                            dist_metric, max_num_fib_entries))

                result_line = PLACEHOLDER.format(
                    problem_name, len(problem.G.nodes), len(problem.G.edges),
                    problem.traffic_matrix.seed, problem.traffic_matrix.model,
                    len(problem.commodity_list), problem.total_demand,
                    'path_formulation', 'N/A', 'N/A', 'N/A', num_paths,
                    edge_disjoint, dist_metric, total_num_fib_entries,
                    max_num_fib_entries)
                print_(result_line, file=results)

            if benchmark_nc:
                num_paths, edge_disjoint, dist_metric, partition_cls, num_parts_scale_factor = NCFLOW_HYPERPARAMS[
                    problem_name]
                problem = get_fib_entry_problem(problem_name)

                num_parts_to_set = num_parts_scale_factor * int(
                    np.sqrt(len(problem.G.nodes)))
                partitioner = partition_cls(num_parts_to_set)
                partition_algo = partitioner.name

                partition_vector = partitioner.partition(problem)
                num_parts = len(np.unique(partition_vector))
                if not all_partitions_contiguous(problem, partition_vector):
                    print_(
                        'Problem {}, partitioner {}, num_partitions_to_set {} did not find a valid partition'
                        .format(problem_name, partition_algo, num_parts))
                    continue
                total_num_fib_entries, max_num_fib_entries = NcfEpi.fib_entries(
                    problem, partitioner, num_paths, edge_disjoint,
                    dist_metric)
                print_(
                    'NCFlowEdgePerIter, Problem {}, partitioner {}, {} partitions, {} paths, edge disjoint {}, dist_metric {}, total num fib entries: {}'
                    .format(problem_name, partition_algo, num_parts, num_paths,
                            edge_disjoint, dist_metric, total_num_fib_entries))
                print_(
                    'NCFlowEdgePerIter, Problem {}, partitioner {}, {} partitions, {} paths, edge disjoint {}, dist_metric {}, max num fib entries: {}'
                    .format(problem_name, partition_algo, num_parts, num_paths,
                            edge_disjoint, dist_metric, max_num_fib_entries))

                result_line = PLACEHOLDER.format(
                    problem_name, len(problem.G.nodes), len(problem.G.edges),
                    problem.traffic_matrix.seed, problem.traffic_matrix.model,
                    len(problem.commodity_list), problem.total_demand,
                    'ncflow_edge_per_iter', partition_algo, num_parts,
                    partitioner.runtime, num_paths, edge_disjoint, dist_metric,
                    total_num_fib_entries, max_num_fib_entries)
                print_(result_line, file=results)

            if benchmark_smore:
                num_paths = 4
                problem = get_fib_entry_problem(problem_name)
                total_num_fib_entries, max_num_fib_entries = SMORE.fib_entries(
                    problem, num_paths)
                print_(
                    'SMORE, Problem {}, {} paths, total num fib entries: {}'.
                    format(problem_name, num_paths, total_num_fib_entries))
                print_('SMORE, Problem {}, {} paths, max num fib entries: {}'.
                       format(problem_name, num_paths, max_num_fib_entries))

                result_line = PLACEHOLDER.format(
                    problem_name, len(problem.G.nodes), len(problem.G.edges),
                    problem.traffic_matrix.seed, problem.traffic_matrix.model,
                    len(problem.commodity_list), problem.total_demand, 'smore',
                    'N/A', 'N/A', 'N/A', num_paths, 'N/A', 'N/A',
                    total_num_fib_entries, max_num_fib_entries)
                print_(result_line, file=results)

            if benchmark_fleischer_edge:
                problem = get_fib_entry_problem(problem_name)
                fib_dict = defaultdict(dict)
                for k, (_, t_k, _) in problem.commodity_list:
                    commod_id_str = 'k-{}'.format(k)
                    # For each commod in a given TM, we would store weights in each switch for each of the
                    # switch's neighbors. For demo purposes, we just store the neighbors. (NOTE: This is
                    # a generous assumption: if the number of neighbors is too large, it cannot fit in a single
                    # rule in the switch.)
                    for n in problem.G.nodes():
                        if n != t_k:
                            fib_dict[n][commod_id_str] = list(
                                problem.G.successors(n))

                fib_dict = dict(fib_dict)
                fib_dict_counts = [len(fib_dict[k]) for k in fib_dict.keys()]
                total_num_fib_entries, max_num_fib_entries = sum(
                    fib_dict_counts), max(fib_dict_counts)

                print_('Fleischer Edge, Problem {}, total num fib entries: {}'.
                       format(problem_name, total_num_fib_entries))
                print_('Fleischer Edge, Problem {}, max num fib entries: {}'.
                       format(problem_name, max_num_fib_entries))

                result_line = PLACEHOLDER.format(
                    problem_name, len(problem.G.nodes), len(problem.G.edges),
                    problem.traffic_matrix.seed, problem.traffic_matrix.model,
                    len(problem.commodity_list), problem.total_demand,
                    'fleischer_edge', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A',
                    total_num_fib_entries, max_num_fib_entries)
                print_(result_line, file=results)
Beispiel #4
0
def benchmark(problems):
    num_paths, edge_disjoint, dist_metric = PATH_FORM_HYPERPARAMS
    with open('path-form.csv', 'a') as results:
        print_(','.join(HEADERS), file=results)
        for problem_name, topo_fname, tm_fname in problems:
            problem = Problem.from_file(topo_fname, tm_fname)
            print_(problem.name, tm_fname)
            traffic_seed = problem.traffic_matrix.seed
            total_demand = problem.total_demand
            print_('traffic seed: {}'.format(traffic_seed))
            print_('traffic scale factor: {}'.format(
                problem.traffic_matrix.scale_factor))
            print_('traffic matrix model: {}'.format(
                problem.traffic_matrix.model))
            print_('total demand: {}'.format(total_demand))

            run_dir = os.path.join(
                TOP_DIR, problem.name,
                '{}-{}'.format(traffic_seed, problem.traffic_matrix.model))
            if not os.path.exists(run_dir):
                os.makedirs(run_dir)

            try:
                print_(
                    '\nPath formulation, {} paths, edge disjoint {}, dist metric {}'
                    .format(num_paths, edge_disjoint, dist_metric))
                with open(
                        os.path.join(
                            run_dir,
                            '{}-path-formulation_{}-paths_edge-disjoint-{}_dist-metric-{}.txt'
                            .format(problem.name, num_paths, edge_disjoint,
                                    dist_metric)), 'w') as log:
                    pf = PathFormulation.new_max_flow(
                        num_paths,
                        edge_disjoint=edge_disjoint,
                        dist_metric=dist_metric,
                        out=log)
                    pf.solve(problem)
                    pf_sol_dict = pf.extract_sol_as_dict()
                    with open(
                            os.path.join(
                                run_dir,
                                '{}-path-formulation_{}-paths_edge-disjoint-{}_dist-metric-{}_sol-dict.pkl'
                                .format(problem.name, num_paths, edge_disjoint,
                                        dist_metric)), 'wb') as w:
                        pickle.dump(pf_sol_dict, w)

                result_line = PLACEHOLDER.format(
                    problem.name,
                    len(problem.G.nodes),
                    len(problem.G.edges),
                    traffic_seed,
                    problem.traffic_matrix.scale_factor,
                    problem.traffic_matrix.model,
                    len(problem.commodity_list),
                    total_demand,
                    'path_formulation',
                    num_paths,
                    edge_disjoint,
                    dist_metric,
                    pf.obj_val,
                    pf.runtime,
                )
                print_(result_line, file=results)

            except Exception:
                print_(
                    'Path formulation {} paths, edge disjoint {}, dist metric {}, Problem {}, traffic seed {}, traffic model {} failed'
                    .format(num_paths, edge_disjoint, dist_metric,
                            problem.name, traffic_seed,
                            problem.traffic_matrix.model))
                traceback.print_exc(file=sys.stdout)
sys.path.append('..')
from lib.problem import Problem

OUTPUT_CSV = 'demand-stats.csv'
HEADERS = [
    'problem', 'num_nodes', 'num_edges', 'traffic_seed', 'tm_model',
    'scale_factor', 'num_commodities', 'total_demand', 'clustering_algo',
    'num_partitions', 'size_of_largest_partition', 'partition_runtime',
    'intra_demand', 'inter_demand'
]
PLACEHOLDER = ','.join('{}' for _ in HEADERS)
PARTITIONER_DICT = {}

if __name__ == '__main__':
    with open(OUTPUT_CSV, 'a') as w:
        print_(','.join(HEADERS), file=w)
        for problem_name in PROBLEM_NAMES:

            if problem_name.endswith('.graphml'):
                topo_fname = os.path.join('..', 'topologies', 'topology-zoo',
                                          problem_name)
            else:
                topo_fname = os.path.join('..', 'topologies', problem_name)
            _, _, _, partition_cls, num_parts_scale_factor = NCFLOW_HYPERPARAMS[
                problem_name]

            for model in TM_MODELS:
                for tm_fname in iglob(
                        '../traffic-matrices/{}/{}*_traffic-matrix.pkl'.format(
                            model, problem_name)):
                    print(tm_fname)
Beispiel #6
0
def benchmark(problems,
              scale_factors_to_sweep=[0.25, 0.5, 1, 2, 3, 4, 5, 6, 7]):

    with open('num-partitions-sweep.csv', 'a') as results:
        print_(','.join(HEADERS), file=results)
        for problem_name, topo_fname, tm_fname in problems:
            problem = Problem.from_file(topo_fname, tm_fname)
            print_(problem.name, tm_fname)
            traffic_seed = problem.traffic_matrix.seed
            total_demand = problem.total_demand
            print_('traffic seed: {}'.format(traffic_seed))
            print_('traffic matrix model: {}'.format(
                problem.traffic_matrix.model))
            print_('traffic matrix scale factor: {}'.format(
                problem.traffic_matrix.scale_factor))
            print_('total demand: {}'.format(total_demand))

            num_paths, edge_disjoint, dist_metric, partition_cls, _ = NCFLOW_HYPERPARAMS[
                problem_name]
            for scale_factor in scale_factors_to_sweep:
                num_partitions_to_set = int(scale_factor *
                                            int(np.sqrt(len(problem.G.nodes))))
                print('Scale factor: {} -> num partitions: {}'.format(
                    scale_factor, num_partitions_to_set))
                partitioner = partition_cls(num_partitions_to_set)
                partition_algo = partitioner.name

                partition_vector = partitioner.partition(problem)
                num_parts = len(np.unique(partition_vector))
                if not all_partitions_contiguous(problem, partition_vector):
                    print_(
                        'Problem {}, partitioner {}, num_partitions_to_set {} did not find a valid partition'
                        .format(problem_name, partition_algo, num_parts))
                    continue

                run_dir = os.path.join(
                    TOP_DIR, problem.name,
                    '{}-{}'.format(traffic_seed, problem.traffic_matrix.model))
                if not os.path.exists(run_dir):
                    os.makedirs(run_dir)

                try:
                    print_(
                        '\nNCFlow, {} partitioner, {} partitions, {} paths, edge disjoint {}, dist metric {}'
                        .format(partition_algo, num_partitions_to_set,
                                num_paths, edge_disjoint, dist_metric))
                    run_nc_dir = os.path.join(
                        run_dir, 'ncflow', partition_algo,
                        '{}-partitions'.format(num_partitions_to_set),
                        '{}-paths'.format(num_paths),
                        'edge_disjoint-{}'.format(edge_disjoint),
                        'dist_metric-{}'.format(dist_metric))
                    if not os.path.exists(run_nc_dir):
                        os.makedirs(run_nc_dir)

                    fib_entry_problem = get_fib_entry_problem(problem.name)
                    print_(
                        'NCFlowEdgePerIter, Problem {}, partitioner {}, {} partitions, {} paths, edge disjoint {}, dist_metric {} computing fib entries'
                        .format(fib_entry_problem.name, partition_algo,
                                num_parts, num_paths, edge_disjoint,
                                dist_metric))

                    total_num_fib_entries, max_num_fib_entries = NcfEpi.fib_entries(
                        fib_entry_problem, partitioner, num_paths,
                        edge_disjoint, dist_metric)
                    print_(
                        'NCFlowEdgePerIter, Problem {}, partitioner {}, {} partitions, {} paths, edge disjoint {}, dist_metric {}, total num fib entries: {}'
                        .format(problem_name, partition_algo, num_parts,
                                num_paths, edge_disjoint, dist_metric,
                                total_num_fib_entries))
                    print_(
                        'NCFlowEdgePerIter, Problem {}, partitioner {}, {} partitions, {} paths, edge disjoint {}, dist_metric {}, max num fib entries: {}'
                        .format(problem_name, partition_algo, num_parts,
                                num_paths, edge_disjoint, dist_metric,
                                max_num_fib_entries))

                    with open(
                            os.path.join(
                                run_nc_dir,
                                '{}-ncflow-partitioner_{}-{}_partitions-{}_paths-edge_disjoint_{}-dist_metric_{}.txt'
                                .format(problem.name, partition_algo,
                                        num_partitions_to_set, num_paths,
                                        edge_disjoint, dist_metric)),
                            'w') as log:
                        ncflow = NcfEpi.new_max_flow(
                            num_paths,
                            edge_disjoint=edge_disjoint,
                            dist_metric=dist_metric,
                            out=log)
                        ncflow.solve(problem, partitioner)

                        num_partitions = len(
                            np.unique(ncflow._partition_vector))
                        runtime = ncflow.runtime_est(14)
                        total_flow = ncflow.obj_val

                        result_line = PLACEHOLDER.format(
                            problem.name, len(problem.G.nodes),
                            len(problem.G.edges), traffic_seed,
                            problem.traffic_matrix.model,
                            problem.traffic_matrix.scale_factor,
                            len(problem.commodity_list), total_demand,
                            'ncflow_edge_per_iter', partition_algo,
                            num_partitions,
                            partitioner.size_of_largest_partition,
                            partitioner.runtime, num_paths, edge_disjoint,
                            dist_metric, total_flow, runtime, ncflow.num_iters,
                            total_num_fib_entries, max_num_fib_entries)
                        print_(result_line, file=results)
                except:
                    print_(
                        'NCFlowEdgePerIter partitioner {}, {} paths, Problem {}, traffic seed {}, traffic model {} failed'
                        .format(partition_algo, num_paths, problem.name,
                                traffic_seed, problem.traffic_matrix.model))
                    traceback.print_exc(file=sys.stdout)
Beispiel #7
0
def benchmark(problems):
    with open('smore.csv', 'a') as w:
        print_(','.join(HEADERS), file=w)
        for problem_name, topo_fname, tm_fname in problems:

            problem = Problem.from_file(topo_fname, tm_fname)
            print_(problem.name, tm_fname)

            traffic_seed = problem.traffic_matrix.seed
            total_demand = np.sum(problem.traffic_matrix.tm)
            print_('traffic seed: {}'.format(traffic_seed))
            print_('traffic matrix model: {}'.format(
                problem.traffic_matrix.model))
            print_('total demand: {}'.format(total_demand))

            run_dir = os.path.join(
                TOP_DIR, problem.name,
                '{}-{}'.format(traffic_seed, problem.traffic_matrix.model))
            if not os.path.exists(run_dir):
                os.makedirs(run_dir)

            try:
                with open(
                        os.path.join(run_dir,
                                     '{}-smore.txt'.format(problem.name)),
                        'w') as log:
                    smore = SMORE.new_max_flow(num_paths=4, out=log)
                    smore.solve(problem)
                    smore_sol_dict = smore.extract_sol_as_dict()
                    pickle.dump(
                        smore_sol_dict,
                        open(
                            os.path.join(
                                run_dir,
                                '{}-smore-sol-dict.pkl'.format(problem.name)),
                            'wb'))

                    result_line = PLACEHOLDER.format(
                        problem.name, len(problem.G.nodes),
                        len(problem.G.edges), traffic_seed,
                        problem.traffic_matrix.model,
                        problem.traffic_matrix.scale_factor,
                        len(problem.commodity_list), total_demand, 'smore', 4,
                        smore.total_flow, smore.runtime)
                    print_(result_line, file=w)

            except Exception:
                print_(
                    'SMORE, Problem {}, traffic seed {}, traffic model {} failed'
                    .format(problem.name, traffic_seed,
                            problem.traffic_matrix.model))
                traceback.print_exc(file=sys.stdout)