示例#1
0
def generate_index(opts):
    def run(args):
        utils.run(opts.print_only, "time", *args)

    # make sure proper config is loaded
    run(["sudo", "./startup-config"])

    if opts.debug:
        b = conf.DBIN_TILE_INDEXER
    else:
        b = conf.RBIN_TILE_INDEXER

    meta_dirs = []
    global_dir = conf.getGlobalsDir(opts.dataset, False)

    for i in range(0, int(opts.nmic)):
        meta_dirs.append(
            conf.getMicSubdir(conf.SG_DATAPATH_VERTEX_ENGINE[i], opts.dataset,
                              "meta", i))

    # run indexer
    args = [
        b, "--nthreads", 1, "--paths-meta", ":".join(meta_dirs),
        "--path-global", global_dir
    ]

    if opts.gdb:
        args = ["gdb", "--args"] + args
    run(args)
示例#2
0
def rebalance(opts):
    globals_dir = conf.getGlobalsDir(opts.dataset, opts.weighted)
    partition = conf.getGrcPartitionDir(opts.dataset, 0, opts.weighted)

    original_meta = []
    original_tile = []
    for i in range(0, len(conf.SG_GRC_OUTPUT_DIRS)):
        original_meta.append(conf.getGrcMetaDir(opts.dataset, i,
                                                opts.weighted))
        original_tile.append(conf.getGrcTileDir(opts.dataset, i,
                                                opts.weighted))

    output_meta = []
    output_tile = []

    for i in range(0, int(opts.nmic)):
        output_meta.append(
            conf.getMicSubdir(conf.SG_DATAPATH_VERTEX_ENGINE[i], opts.dataset,
                              "meta", i, opts.weighted))
        output_tile.append(
            conf.getMicSubdir(conf.SG_DATAPATH_EDGE_ENGINE[i], opts.dataset,
                              "tile", i, opts.weighted))

    if opts.pdump is True:
        post_pgrc.post_graph_load(original_meta, output_meta, original_tile,
                                  output_tile, globals_dir, opts.shuffle)
    else:
        post_grc.post_graph_load(original_meta, output_meta, original_tile,
                                 output_tile, globals_dir, opts.shuffle)
    # Copy over meta to the edge_engine output directory:
    # 1. Create meta directory for specific MIC on edge_engine output
    # 2. Copy tile_stats.dat and stat.dat
    for i in range(0, int(opts.nmic)):
        edge_engine_meta_dir = conf.getMicSubdir(
            conf.SG_DATAPATH_EDGE_ENGINE[i], opts.dataset, "meta", i,
            opts.weighted)
        vertex_engine_meta_dir = conf.getMicSubdir(
            conf.SG_DATAPATH_VERTEX_ENGINE[i], opts.dataset, "meta", i,
            opts.weighted)
        if (edge_engine_meta_dir == vertex_engine_meta_dir):
            continue

        if not os.path.exists(edge_engine_meta_dir):
            utils.mkdirp(edge_engine_meta_dir, conf.FILE_GROUP)

        # Copy the files over.
        stat_filename = os.path.join(vertex_engine_meta_dir, "stat.dat")
        tile_stat_filename = os.path.join(vertex_engine_meta_dir,
                                          "tile_stats.dat")
        cmd = "cp %s %s" % (stat_filename, edge_engine_meta_dir)
        os.system(cmd)
        cmd = "cp %s %s" % (tile_stat_filename, edge_engine_meta_dir)
        os.system(cmd)
        print(cmd)
    print("Done copying all stat files for edge engines")
示例#3
0
def startMosaic(opts):
    killMosaic(opts.print_only)
    build.build(False, False)

    meta_dirs = []
    tile_dirs = []
    global_dir = conf.getGlobalsDir(opts.dataset)

    # set up fault-tolerance dir if required
    fault_tolerance_dir = conf.getFaultToleranceDir(opts.dataset)
    if opts.fault_tolerant_mode:
        shutil.rmtree(fault_tolerance_dir, True)
        utils.mkdirp(fault_tolerance_dir, conf.FILE_GROUP)

    perf_events_dir = conf.getPerfEventsDir(opts.dataset)
    if opts.enable_perf_event_collection:
        utils.mkdirp(perf_events_dir, conf.FILE_GROUP)

    for i in range(0, int(opts.nmic)):
        meta_dirs.append(
            conf.getMicSubdir(conf.SG_DATAPATH_VERTEX_ENGINE[i], opts.dataset,
                              "meta", i))
        tile_dirs.append(
            conf.getMicSubdir(conf.SG_DATAPATH_VERTEX_ENGINE[i], opts.dataset,
                              "tile", i))

    in_memory_mode_int = 1 if opts.in_memory_mode else 0

    meta_dirs_string = ":".join(meta_dirs)
    tile_dirs_string = ":".join(tile_dirs)

    enable_tile_partitioning_int = 1 if opts.enable_tile_partitioning else 0
    enable_fault_tolerance_int = 1 if opts.fault_tolerant_mode else 0
    enable_perf_event_collection_int = 1 if opts.enable_perf_event_collection else 0

    # for selective scheduling
    use_selective_scheduling_int = 1 if \
        conf.SG_ALGORITHM_ENABLE_SELECTIVE_SCHEDULING[opts.algorithm] else 0
    if opts.dataset in conf.SG_DATASET_DISABLE_SELECTIVE_SCHEDULING:
        use_selective_scheduling_int = 0

    # For pinning, count threads and determine if we need to use smt or not.
    count_tile_readers = conf.SG_NREADER
    count_tile_processors = conf.SG_NPROCESSOR

    edge_engine_per_socket = opts.nmic / topo.NUM_SOCKET

    count_threads_per_edge_engine = opts.count_indexreader + opts.count_vertex_fetcher + opts.count_vertex_reducer + count_tile_readers + count_tile_processors
    count_threads_per_socket = count_threads_per_edge_engine * edge_engine_per_socket + opts.count_globalreducer / topo.NUM_SOCKET

    use_smt_int = 1 if count_threads_per_socket >= topo.NUM_PHYSICAL_CPU_PER_SOCKET else 0

    # Set the size of the read tiles rb to the in memory value iff not running on
    # the mic and the in memory mode is activated.
    read_tiles_rb_size = conf.SG_RB_SIZE_READ_TILES
    if opts.in_memory_mode:
        read_tiles_rb_size = conf.SG_RB_SIZE_READ_TILES_IN_MEMORY

    args = [
        "--algorithm",
        opts.algorithm,
        "--max-iterations",
        opts.max_iterations,
        "--nmic",
        opts.nmic,
        "--count-applier",
        opts.count_applier,
        "--count-globalreducer",
        opts.count_globalreducer,
        "--count-globalfetcher",
        opts.count_globalfetcher,
        "--count-indexreader",
        opts.count_indexreader,
        "--count-vertex-reducer",
        opts.count_vertex_reducer,
        "--count-vertex-fetcher",
        opts.count_vertex_fetcher,
        "--in-memory-mode",
        in_memory_mode_int,
        "--paths-meta",
        meta_dirs_string,
        "--paths-tile",
        tile_dirs_string,
        "--path-globals",
        global_dir,
        "--use-selective-scheduling",
        use_selective_scheduling_int,
        "--path-fault-tolerance-output",
        fault_tolerance_dir,
        "--enable-fault-tolerance",
        enable_fault_tolerance_int,
        "--enable-tile-partitioning",
        enable_tile_partitioning_int,
        "--count-tile-reader",
        count_tile_readers,
        "--local-fetcher-mode",
        opts.local_fetcher_mode,
        "--global-fetcher-mode",
        opts.global_fetcher_mode,
        "--enable-perf-event-collection",
        enable_perf_event_collection_int,
        "--path-perf-events",
        perf_events_dir,
        "--count-tile-processors",
        count_tile_processors,
        "--use-smt",
        use_smt_int,
        "--host-tiles-rb-size",
        conf.SG_RB_SIZE_HOST_TILES,
        "--local-reducer-mode",
        opts.local_reducer_mode,
        "--processed-rb-size",
        conf.SG_RB_SIZE_PROCESSED,
        "--read-tiles-rb-size",
        read_tiles_rb_size,
        "--tile-processor-mode",
        opts.tile_processor_mode,
        "--tile-processor-input-mode",
        opts.tile_processor_input_mode,
        "--tile-processor-output-mode",
        opts.tile_processor_output_mode,
        "--count-followers",
        opts.count_followers,
    ]

    if opts.enable_log:
        log_dir = os.path.join(conf.LOG_ROOT, (conf.getWeightedName(
            opts.dataset, conf.SG_ALGORITHM_WEIGHTED[opts.algorithm])))
        utils.mkdirp(log_dir, conf.FILE_GROUP)
        args = args + ["--log", log_dir]

    if opts.debug:
        b = conf.DBIN_MOSAIC
    else:
        b = conf.RBIN_MOSAIC

    # We need sudo for scif
    args = [b] + args

    if opts.gdb:
        args = ["gdb", "--args"] + args

    # We need sudo for scif
    # args = ["sudo", "LD_LIBRARY_PATH=/usr/lib64/:$LD_LIBRARY_PATH"] + args
    # args = ["sudo", "valgrind"] + args
    if opts.run == "perfstat":
        args = [
            "perf", "stat", "-B", "-e",
            "cache-references,cache-misses,cycles,instructions,branches,faults,migrations"
        ] + args
    if opts.run == "likwid":
        max_cpu_id = multiprocessing.cpu_count() - 1
        args = [
            "likwid-perfctr", "-f", "-g", "NUMA", "-g", "L2", "-g", "L2CACHE",
            "-g", "BRANCH", "-g", "CYCLE_ACTIVITY", "-g", "L3", "-g",
            "L3CACHE", "-c",
            "0-%d" % max_cpu_id
        ] + args

    args = ["sudo"] + args

    if not opts.print_only:
        if opts.gdb:
            utils.run(opts.print_only, *args)
        else:
            out_file = utils.getVertexEngineLogName(opts)
            utils.run_output(opts.print_only, out_file, *args)
示例#4
0
def generate_graph_in_memory(opts):
    def run(args):
        utils.run(opts.print_only, "time", *args)

    # make sure proper config is loaded
    run(["sudo", "./startup-config"])

    if opts.debug:
        grc_in_memory = conf.DBIN_GRC_IN_MEMORY
    else:
        grc_in_memory = conf.RBIN_GRC_IN_MEMORY

    input_weighted = 0
    if conf.SG_INPUT_WEIGHTED.get(opts.dataset, False):
        input_weighted = 1

    # populate hashed dirs
    num_dir = conf.SG_NUM_HASH_DIRS

    meta_dirs = []
    tile_dirs = []
    global_dir = conf.getGlobalsDir(opts.dataset, opts.weighted_output)

    utils.mkdirp(global_dir, conf.FILE_GROUP)

    if (opts.weighted_output):
        unweighted_stat = os.path.join(conf.getGlobalsDir(opts.dataset, False),
                                       "stat.dat")
        weighted_stat = os.path.join(conf.getGlobalsDir(opts.dataset, True),
                                     "stat.dat")
        shutil.copyfile(unweighted_stat, weighted_stat)

        unweighted_deg = os.path.join(conf.getGlobalsDir(opts.dataset, False),
                                      "vertex_deg.dat")
        weighted_deg = os.path.join(conf.getGlobalsDir(opts.dataset, True),
                                    "vertex_deg.dat")
        shutil.copyfile(unweighted_deg, weighted_deg)

        unweighted_global_to_orig = os.path.join(
            conf.getGlobalsDir(opts.dataset, False),
            "vertex_global_to_orig.dat")
        weighted_global_to_orig = os.path.join(
            conf.getGlobalsDir(opts.dataset, True),
            "vertex_global_to_orig.dat")
        shutil.copyfile(unweighted_global_to_orig, weighted_global_to_orig)

    for i in range(0, len(conf.SG_GRC_OUTPUT_DIRS)):
        meta_dir = conf.getGrcMetaDir(opts.dataset, i, opts.weighted_output)
        tile_dir = conf.getGrcTileDir(opts.dataset, i, opts.weighted_output)

        shutil.rmtree(meta_dir, True)
        shutil.rmtree(tile_dir, True)

        utils.mkdirp(meta_dir, conf.FILE_GROUP)
        utils.mkdirp(tile_dir, conf.FILE_GROUP)

        utils.populate_hash_dirs(num_dir, meta_dir)
        utils.populate_hash_dirs(num_dir, tile_dir)

        meta_dirs.append(meta_dir)
        tile_dirs.append(tile_dir)

    output_weighted = 0
    if opts.weighted_output:
        output_weighted = 1

    use_rle_int = 0
    if opts.use_rle:
        use_rle_int = 1

    generator = ""
    delimiter = ""
    count_vertices = 0
    count_edges = 0
    use_original_ids = 0

    input_file = ""
    if opts.rmat:
        generator = "rmat"
        count_vertices = conf.SG_GRAPH_SETTINGS_RMAT[
            opts.dataset]["count_vertices"]
        count_edges = conf.SG_GRAPH_SETTINGS_RMAT[opts.dataset]["count_edges"]
        use_original_ids = 1 if conf.SG_GRAPH_SETTINGS_RMAT[
            opts.dataset]["use_original_ids"] else 0
    elif opts.binary:
        generator = "binary"
        input_file = conf.SG_INPUT_FILE[opts.dataset]["binary"]
        count_vertices = conf.SG_GRAPH_SETTINGS_DELIM[
            opts.dataset]["count_vertices"]
        use_original_ids = 1 if conf.SG_GRAPH_SETTINGS_DELIM[
            opts.dataset]["use_original_ids"] else 0
    else:
        generator = "delim_edges"
        input_file = conf.SG_INPUT_FILE[opts.dataset]["delim"]
        count_vertices = conf.SG_GRAPH_SETTINGS_DELIM[
            opts.dataset]["count_vertices"]
        delimiter = conf.SG_GRAPH_SETTINGS_DELIM[opts.dataset]["delimiter"]
        use_original_ids = 1 if conf.SG_GRAPH_SETTINGS_DELIM[
            opts.dataset]["use_original_ids"] else 0

    if not opts.rmat:
        if not os.path.exists(input_file):
            print("Failed to find %s" % input_file)
            exit(1)
    args = [
        grc_in_memory,
        "--source",
        input_file,
        "--count-vertices",
        count_vertices,
        "--generator",
        generator,
        "--graphname",
        opts.dataset,
        "--path-globals",
        global_dir,
        "--paths-meta",
        ":".join(meta_dirs),
        "--paths-tile",
        ":".join(tile_dirs),
        "--nthreads",
        conf.SG_GRC_NTHREADS_PARTITIONER,
        "--npartition-managers",
        conf.SG_GRC_NPARTITION_MANAGERS,
        "--input-weighted",
        input_weighted,
        "--output-weighted",
        output_weighted,
        "--rmat-count-edges",
        count_edges,
        "--use-run-length-encoding",
        use_rle_int,
        "--use-original-ids",
        use_original_ids,
        "--traversal",
        opts.traversal,
        "--delimiter",
        delimiter,
    ]
    if opts.gdb:
        args = ["gdb", "--args"] + args
    run(args)
def startVertexEngine(opts):

    meta_dirs = []
    tile_dirs = []
    global_dir = conf.getGlobalsDir(opts.dataset)

# set up fault-tolerance dir if required
    fault_tolerance_dir = conf.getFaultToleranceDir(opts.dataset)
    if opts.fault_tolerant_mode:
        shutil.rmtree(fault_tolerance_dir, True)
        utils.mkdirp(fault_tolerance_dir, conf.FILE_GROUP)

    perf_events_dir = conf.getPerfEventsDir(opts.dataset)
    if opts.enable_perf_event_collection:
        utils.mkdirp(perf_events_dir, conf.FILE_GROUP)

    for i in range(0, int(opts.nmic)):
        meta_dirs.append(conf.getMicSubdir(conf.SG_DATAPATH_VERTEX_ENGINE[i], opts.dataset, "meta", i))
        tile_dirs.append(conf.getMicSubdir(conf.SG_DATAPATH_VERTEX_ENGINE[i], opts.dataset, "tile", i))

    run_on_mic_arg = "1" if opts.run_on_mic else "0"

    in_memory_mode_int = 1 if opts.in_memory_mode else 0

    meta_dirs_string = ":".join(meta_dirs)
    tile_dirs_string = ":".join(tile_dirs)

    edge_engine_to_mic_string = ['%s' % str(a) for a in conf.SG_EDGE_ENGINE_TO_MIC]

    edge_engine_to_mic_arg = ":".join(edge_engine_to_mic_string)

    enable_tile_partitioning_int = 1 if opts.enable_tile_partitioning else 0
    enable_fault_tolerance_int = 1 if opts.fault_tolerant_mode else 0
    enable_perf_event_collection_int = 1 if opts.enable_perf_event_collection else 0
    perfmon = 1 if opts.perfmon == "True" else 0

#for selective scheduling
    use_selective_scheduling_int = 1 if conf.SG_ALGORITHM_ENABLE_SELECTIVE_SCHEDULING[opts.algorithm] else 0
    if opts.dataset in conf.SG_DATASET_DISABLE_SELECTIVE_SCHEDULING:
        use_selective_scheduling_int = 0

# For pinning, count threads and determine if we need to use smt or not.
    count_tile_readers = conf.SG_NREADER_MIC if opts.run_on_mic else conf.SG_NREADER
    count_tile_processors = conf.SG_NPROCESSOR_MIC if opts.run_on_mic else conf.SG_NPROCESSOR

    edge_engine_per_socket = opts.nmic / topo.NUM_SOCKET

    count_threads_per_edge_engine = opts.count_indexreader + opts.count_vertex_fetcher + opts.count_vertex_reducer + count_tile_readers + count_tile_processors
    count_threads_per_socket = count_threads_per_edge_engine * edge_engine_per_socket + opts.count_globalreducer / topo.NUM_SOCKET

    if opts.local_fetcher_mode == "GlobalFetcher":
        count_threads += opts.count_globalfetcher
    use_smt_int = 1 if count_threads_per_socket >= topo.NUM_PHYSICAL_CPU_PER_SOCKET else 0

    args = [
            "--algorithm", opts.algorithm,
            "--max-iterations", opts.max_iterations,
            "--nmic", opts.nmic,
            "--count-applier", opts.count_applier,
            "--count-globalreducer", opts.count_globalreducer,
            "--count-globalfetcher", opts.count_globalfetcher,
            "--count-indexreader", opts.count_indexreader,
            "--count-vertex-reducer", opts.count_vertex_reducer,
            "--count-vertex-fetcher", opts.count_vertex_fetcher,
            "--count-tile-reader", count_tile_readers,
            "--count-tile-processors", count_tile_processors,
            "--in-memory-mode", in_memory_mode_int,
            "--port", opts.port,
            "--run-on-mic", run_on_mic_arg,
            "--paths-meta", meta_dirs_string,
            "--paths-tile", tile_dirs_string,
            "--path-global", global_dir,
            "--edge-engine-to-mic", edge_engine_to_mic_arg,
            "--use-selective-scheduling", use_selective_scheduling_int,
            "--path-fault-tolerance-output", fault_tolerance_dir,
            "--enable-fault-tolerance", enable_fault_tolerance_int,
            "--enable-tile-partitioning", enable_tile_partitioning_int,
            "--do-perfmon", perfmon,
            "--local-fetcher-mode", opts.local_fetcher_mode,
            "--local-reducer-mode", opts.local_reducer_mode,
            "--global-fetcher-mode", opts.global_fetcher_mode,
            "--enable-perf-event-collection", enable_perf_event_collection_int,
            "--path-perf-events", perf_events_dir,
            "--use-smt", use_smt_int,
            "--host-tiles-rb-size", conf.SG_RB_SIZE_HOST_TILES,
            ]

    if opts.enable_log:
        log_dir = os.path.join(conf.LOG_ROOT, (conf.getWeightedName(opts.dataset, conf.SG_ALGORITHM_WEIGHTED[opts.algorithm])))
        utils.mkdirp(log_dir, conf.FILE_GROUP)
        args = args + ["--log", log_dir]

    if opts.debug:
        b = conf.DBIN_VERTEX_ENGINE
    else:
        b = conf.RBIN_VERTEX_ENGINE

    # We need sudo for scif
    args = [b] + args

    if opts.gdb:
        args = ["gdb", "--args"] + args

    # We need sudo for scif
    # args = ["sudo", "LD_LIBRARY_PATH=/usr/lib64/:$LD_LIBRARY_PATH"] + args
    # args = ["sudo", "valgrind"] + args
    if opts.run == "perfstat":
        args = ["perf", "stat", "-B", "-e", "cache-references,cache-misses,cycles,instructions,branches,faults,migrations"] + args
    if opts.run == "likwid":
        max_cpu_id = multiprocessing.cpu_count() - 1
        args = ["likwid-perfctr", "-f", "-g", "NUMA", "-g", "L2", "-g", "L2CACHE", "-g", "BRANCH", "-g", "CYCLE_ACTIVITY", "-g", "L3", "-g", "L3CACHE", "-c", "0-%d" % max_cpu_id] + args

    args = ["sudo"] + args

    if not opts.print_only:
      if opts.benchmark_mode:
        with open(os.path.join(conf.LOG_ROOT, 'benchmark.dat'), 'a+') as f:
          values = []
          sargs = ['"%s"' % str(a) for a in args]
          cmd = " ".join(sargs)
          p = _exec_cmd(cmd, subprocess.PIPE)
          target_str = "Time for iteration "
          finish_str = "Finished with execution!"
          flag_continue = True
          while flag_continue:
              for l in p.stderr.readlines():
                  l_str = str(l)
                  # print(l_str.strip())
                  id_end = l_str.find(finish_str)
                  if id_end is not -1:
                    flag_continue = False
                    break

                  id_target = l_str.find(target_str)
                  if id_target is not -1:
                      performance_str = l_str[id_target+len(target_str):]
                      string_parts = performance_str.strip().split(' ')
                      iteration = int(string_parts[0].strip())
                      iteration_time = float(string_parts[1].rstrip('\\n\''))
                      values.append(iteration_time)

          print(values)
          sum_values = numpy.sum(numpy.array(values))
          median = numpy.median(numpy.array(values))
          mean = numpy.mean(numpy.array(values))
          stderr = numpy.std(numpy.array(values))
          f.write("\t\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (opts.algorithm,
                  opts.dataset, len(values),
                  sum_values, mean, median, stderr))
      else:
        out_file = utils.getVertexEngineLogName(opts)

        if opts.gdb:
            utils.run(opts.print_only, *args)
        else:
            utils.run_output(opts.print_only, out_file, *args)
def generate_graph(opts):
    def run(args):
        utils.run(opts.print_only, "time", *args)

    # make sure proper config is loaded
    run(["sudo", "./startup-config"])
    # kill everything first:
    cmd = "sudo killall grc-rmat-generator grc-rmat-tiler"
    os.system(cmd)

    if opts.run_on_mic:
        for mic_index in range(0, opts.nmic):
            cmd = "sshpass -p phi ssh root@mic%s killall grc-rmat-generator" % (mic_index)
            print(cmd)
            os.system(cmd)

    if opts.debug:
        grc_tiler = conf.DBIN_RMAT_TILER
        grc_edge_generator = conf.DBIN_RMAT_GENERATOR
    else:
        grc_tiler = conf.RBIN_RMAT_TILER
        grc_edge_generator = conf.RBIN_RMAT_GENERATOR

    # copy binary if running on mic:
    if opts.run_on_mic:
        if opts.debug:
            grc_edge_generator = os.path.join(conf.BUILD_ROOT, "Debug-k1om/tools/grc/grc-rmat-generator")
        else:
            grc_edge_generator = os.path.join(conf.BUILD_ROOT, "Release-k1om/tools/grc/grc-rmat-generator")
        for mic_index in range(0, opts.nmic):
            cmd = "sshpass -p phi scp %s root@mic%s:" % (grc_edge_generator, mic_index)
            os.system(cmd)

    # populate hashed dirs
    num_dir = conf.SG_NUM_HASH_DIRS

    partition_dirs = []
    meta_dirs = []
    tile_dirs = []
    global_dir = conf.getGlobalsDir(opts.dataset, False)

    # remove in degree-generation-phase:
    if opts.phase == "generate_vertex_degrees":
        shutil.rmtree(global_dir, True)
        utils.mkdirp(global_dir, conf.FILE_GROUP)

    for i in range(0, len(conf.SG_GRC_PARTITION_DIRS)):
        partition_dir = conf.getGrcPartitionDir(opts.dataset, i, False)
        meta_dir = conf.getGrcMetaDir(opts.dataset, i, False)
        tile_dir = conf.getGrcTileDir(opts.dataset, i, False)

        # remove in degree-generation-phase:
        if opts.phase == "generate_vertex_degrees":
            shutil.rmtree(partition_dir, True)
            utils.mkdirp(partition_dir, conf.FILE_GROUP)
            utils.populate_hash_dirs(num_dir, partition_dir)

        shutil.rmtree(meta_dir, True)
        shutil.rmtree(tile_dir, True)

        utils.mkdirp(meta_dir, conf.FILE_GROUP)
        utils.mkdirp(tile_dir, conf.FILE_GROUP)

        utils.populate_hash_dirs(num_dir, meta_dir)
        utils.populate_hash_dirs(num_dir, tile_dir)

        partition_dirs.append(partition_dir)
        meta_dirs.append(meta_dir)
        tile_dirs.append(tile_dir)

    use_rle_int = 0
    if opts.use_rle:
        use_rle_int = 1
    run_on_mic_int = 0
    if opts.run_on_mic:
        run_on_mic_int = 1

    count_vertices = conf.SG_GRAPH_SETTINGS_RMAT[opts.dataset]["count_vertices"]
    count_edges = conf.SG_GRAPH_SETTINGS_RMAT[opts.dataset]["count_edges"]

    # the degree-phase doesn't need too many partition-managers:
    count_partition_managers = 0
    if opts.phase == "generate_vertex_degrees":
        opts.count_partition_managers = conf.SG_GRC_RMAT_TILER_DEGREES_NPARTITION_MANAGERS
    else:
        opts.count_partition_managers = conf.SG_GRC_RMAT_TILER_TILING_NPARTITION_MANAGERS

    # start MIC-components:
    edges_per_mic = count_edges / opts.nmic
    for mic_index in range(0, opts.nmic):
        edges_start = edges_per_mic * mic_index
        edges_end = edges_per_mic * (mic_index + 1)
        port = opts.base_port + mic_index * 100
        args_rmat_generator = [
                "--port", port,
                "--edges-start", edges_start,
                "--edges-end", edges_end,
                "--count-threads", opts.count_generator_threads,
                "--count-vertices", count_vertices,
                "--count-partition-managers", opts.count_partition_managers,
                "--generator-phase", opts.phase
            ]
        if opts.run_on_mic:
            args_rmat_generator = ["nohup", "./grc-rmat-generator"] + args_rmat_generator + [
                    ">", "rmat-generator.out",
                    "2>", "rmat-generator.err",
                    "<", "/dev/null", "&"]
            utils.run_sshpass(opts.print_only, "phi", "root", "mic%s" % (mic_index), *args_rmat_generator)
        else: 
            utils.run_background(opts.print_only, "sudo", grc_edge_generator, *args_rmat_generator)

    time.sleep(3)

    # start host-components:
    args_rmat_tiler = [
            grc_tiler,
            "--graphname", opts.dataset,
            "--base-port", opts.base_port,
            "--count-partition-managers", opts.count_partition_managers,
            "--count-threads", opts.count_threads,
            "--count-vertices", count_vertices,
            "--count-edges", count_edges,
            "--generator-phase", opts.phase,
            "--path-global", global_dir,
            "--paths-meta", ":".join(meta_dirs),
            "--paths-tile", ":".join(tile_dirs),
            "--paths-partition", ":".join(partition_dirs),
            "--count-edge-generators", opts.nmic,
            "--use-run-length-encoding", use_rle_int,
            "--run-on-mic", run_on_mic_int
        ]
    if opts.gdb_tiler:
        args_rmat_tiler = ["gdb", "--args"] + args_rmat_tiler

    utils.run(opts.print_only, "time", "sudo", *args_rmat_tiler);