Beispiel #1
0
def run_simics(
    checkpoint,
    workload_name,
    transactions,
    protocol={"name": "test"},
    processors=16,
    smt_threads=1,
    phy_signature="Perfect_",
    virtual_signature="Perfect_",
    summary_signature="Perfect_",
    xact_max_depth="1",
    microbenchmark=0,
    mbench_arg_prefix="",
    mbench_arg_string="",
    bench_arg=0,
    condor_process=os.getpid(),
    condor_cluster=os.getppid(),
    expected_ruby_cycles=0,
    tolerance=.05,
    check_opal=0,
    check_ruby=0,
):
    # use SPARC V8 directory
    name = protocol["name"]
    output = tools.run_command(
        "scripts/prepare_simics_home.sh simics/home/%s" % name)
    if protocol.has_key("specific_processor_count"):
        processors = protocol.get("specific_processor_count", 1)
    procs_per_chip = protocol.get("procs_per_chip", 1)
    l2_cache_banks = protocol.get("specific_cache_count",
                                  (processors / smt_threads))
    eager_cd = protocol.get("eager_cd", 1)
    eager_vm = protocol.get("eager_vm", 1)
    magic_waiting = protocol.get("magic_waiting", 0)
    no_backoff = protocol.get("no_backoff", 0)

    bandwidth = protocol.get("bandwidth", 6400)
    if procs_per_chip > processors:
        procs_per_chip = 1

    log_message(
        "Running simics: checkpoint=%s, processors=%s, smt_threads=%s, procs_per_chip=%d transactions=%d, protocol: %s"
        % (checkpoint, processors, smt_threads, procs_per_chip, transactions,
           name))

    # prepare environment variables for running simics
    env_dict = workloads.prepare_env_dictionary(simics=0)
    workloads.set_var(env_dict, "RESULTS_DIR", "../../../results")
    workloads.set_var(env_dict, "WORKLOAD", workload_name)
    workloads.set_var(env_dict, "CHECKPOINT", checkpoint)
    workloads.set_var(env_dict, "CHECKPOINT_DIR", " ")
    workloads.set_var(env_dict, "PROTOCOL", name)
    workloads.set_var(env_dict, "PROCESSORS", processors)
    workloads.set_var(env_dict, "CHIPS", 1)
    workloads.set_var(env_dict, "SMT_THREADS", smt_threads)
    workloads.set_var(env_dict, "PROCS_PER_CHIP", procs_per_chip)
    workloads.set_var(env_dict, "NUM_L2_BANKS", l2_cache_banks)
    workloads.set_var(env_dict, "TRANSACTIONS", transactions)
    workloads.set_var(env_dict, "BANDWIDTH", bandwidth)
    if (g_random_seed != "random"):
        workloads.set_var(env_dict, "RANDOM_SEED", g_random_seed)
    workloads.set_var(env_dict, "CONDORCLUSTER", condor_cluster)
    workloads.set_var(env_dict, "CONDORPROCESS", condor_process)

    # Transactional Memory variables
    workloads.set_var(env_dict, "MICROBENCH_DIR",
                      "microbenchmarks/transactional")
    workloads.set_var(env_dict, "BENCHMARK", workload_name)
    workloads.set_var(env_dict, "MAX_DEPTH", xact_max_depth)
    workloads.set_var(env_dict, 'LOCK_TYPE', "TM")
    workloads.set_var(env_dict, 'READ_WRITE_FILTER', phy_signature)
    workloads.set_var(env_dict, 'VIRTUAL_READ_WRITE_FILTER', virtual_signature)
    workloads.set_var(env_dict, 'SUMMARY_READ_WRITE_FILTER', summary_signature)
    workloads.set_var(env_dict, "XACT_EAGER_CD", eager_cd)
    if (eager_vm == 0):
        workloads.set_var(env_dict, "XACT_LAZY_VM", 1)
    else:
        workloads.set_var(env_dict, "XACT_LAZY_VM", 0)
    workloads.set_var(env_dict, "ENABLE_MAGIC_WAITING", magic_waiting)
    workloads.set_var(env_dict, "XACT_NO_BACKOFF", no_backoff)

    # set per-microbenchmark specific variables
    #if ((workload_name == "compensation") or (workload_name == "commit-action")
    #      or (workload_name == "isolation-test") or (workload_name == "logging-test")
    #      or (workload_name == "partial-rollback")):
    #    workloads.set_var(env_dict, "MICROBENCH_DIR", "microbenchmarks/transactional/test")
    workloads.set_var(env_dict, 'MBENCH_ARG_PREFIX', mbench_arg_prefix)
    workloads.set_var(env_dict, 'MBENCH_ARG_STRING', mbench_arg_string)

    workloads.update_system_env(env_dict)

    # create results directory
    output = tools.run_command("/bin/rm -rf results")

    mbench_arg_prefix = workloads.get_var(env_dict, 'MBENCH_ARG_PREFIX')
    os.mkdir("results")
    if (microbenchmark == 1):
        print "CREATING DIRECTORY results/%s-%s" % (workload_name,
                                                    mbench_arg_prefix)
        os.mkdir("results/%s-%s" % (workload_name, mbench_arg_prefix))
    else:
        os.mkdir("results/" + workload_name)
    print "WORKLOAD NAME %s" % workload_name

    #'''
    os.chdir("simics/home/%s/" % name)

    # run the microbenchmark script if needed
    if (microbenchmark == 1):
        output = tools.run_command(
            "./simics -echo -verbose -no-log -no-win -x ../../../gen-scripts/microbench.simics",
            "quit 666\n",
            verbose=1,
            max_lines=0)
    else:
        output = tools.run_command(
            "./simics -echo -verbose -no-log -no-win -x ../../../gen-scripts/go.simics",
            "quit 666\n",
            verbose=1)
    #tools.run_command("./simics -echo -verbose -no-log -x ../../../gen-scripts/go.simics", "quit 666\n")
    os.chdir("../../..")

    # dump simics output
    if (microbenchmark == 0):
        simics_output_filename = "results/%s.output" % workloads.get_output_file_name_prefix(
            env_dict)
    else:
        simics_output_filename = "results/%s.output" % workloads.get_microbench_output_file_name_prefix(
            env_dict, 0)
    simics_output = open(simics_output_filename, "w")
    simics_output.write(output)
    simics_output.close()

    if check_ruby == 1 and name != "template":
        if (microbenchmark == 0):
            ruby_stats_filename = "results/%s.stats" % workloads.get_output_file_name_prefix(
                env_dict)
            error_output_filename = "condor/results/%s.error" % (
                workloads.get_output_file_name_prefix(env_dict, 0))
        else:
            ruby_stats_filename = "results/%s.stats" % workloads.get_microbench_output_file_name_prefix(
                env_dict, 0)
            error_output_filename = "condor/results/%s.error" % (
                workloads.get_microbench_output_file_name_prefix(env_dict, 0))
        if (not os.path.exists(ruby_stats_filename)):
            raise RegressionError(
                "Ruby stats output file not present: %s" % ruby_stats_filename,
                output)

        # Check for error file, indicating a SIMICS_ASSERT() failure
        if (os.path.exists(error_output_filename)):
            print "SIMICS ASSERT error!"
            raise RegressionError(
                "SIMICS_ASSERT error file found: %s" % error_output_filename,
                output)

        # get random seed
        simics_output = open(simics_output_filename, "r")
        for line in simics_output.readlines():
            if re.search("g_RANDOM_SEED", line):
                tokens = line.split()
                log_message("  Random seed: %d" % int(tokens[4][:-1]))
        # get ruby cycle
        ruby_stats = open(ruby_stats_filename, "r")
        ruby_cycles = 0
        for line in ruby_stats.readlines():
            line_elements = string.split(line)
            if len(line_elements) > 1 and line_elements[0] == "Ruby_cycles:":
                ruby_cycles = int(line_elements[1])
        if (ruby_cycles == 0):
            raise RegressionError(
                "Ruby_cycles not found from the output file: %s" %
                ruby_stats_filename, output)
        else:
            log_message("  Ruby_cycles: %d" % ruby_cycles)
        if (expected_ruby_cycles != 0):
            percent_diff = 1.0 * ruby_cycles / expected_ruby_cycles
            if percent_diff < (1.0 - tolerance) or percent_diff > (1.0 +
                                                                   tolerance):
                log_message(
                    "  Checking ruby_cycles - ratio is %f: OUT OF RANGE" %
                    percent_diff)
                log_error(
                    "ERROR: Ruby_cycles not within tolerances.  expected %d, actual %d"
                    % (expected_ruby_cycles, ruby_cycles))
            else:
                log_message("  Checking ruby_cycles - ratio is %f: OK" %
                            percent_diff)

    if check_opal == 1:
        opal_log_filename = "results/%s.opal" % workloads.get_output_file_name_prefix(
            env_dict)
        if (not os.path.exists(opal_log_filename)):
            raise RegressionError(
                ("Opal log file not present: %s" % opal_log_filename), output)
        # check opal correct rate!
        else:
            opal_log = open(opal_log_filename)
            processor_total_instructions = 1001  # > 1000
            processor_correct_rate = 98  # < 99
            for line in opal_log.readlines():
                tokens = line.split()
                # remember the correct rate
                if (len(tokens) == 5 and tokens[1] == "Percent"
                        and tokens[2] == "correct"):
                    processor_correct_rate = float(tokens[4])
                # remember the processor's commit instruction number
                if (len(tokens) == 6 and tokens[1] == "Total"
                        and tokens[2] == "number" and tokens[3] == "of"
                        and tokens[4] == "instructions"):
                    processor_total_instructions = int(tokens[5])
                    # check the correct rate here since the total instruction
                    # number comes last during the scan of the output file
                    if (processor_correct_rate < 99
                            and processor_total_instructions > 1000):
                        raise RegressionError((
                            "Opal correct rate too low (%f%% of %d instructions)!"
                            % (processor_correct_rate,
                               processor_total_instructions)), output)
Beispiel #2
0
              for opal_config in config.opal_config_file_list:
                print "  Creating %s, %s" % (module, protocol_option)
	        for bandwidth in config.bandwidth_list:
                    condor_file.write("## run: %s module, %s opal, %d chips, %d procs per chip, %d smt threads, %s protocol option, %d bandwidth\n" %
                                      (protocol, opal_config, chips, procs_per_chip, smt_threads, protocol_option, bandwidth))
                    condor_file.write("\n")

                    req = string.join(requirement_list, " && ")

                    condor_file.write("requirements = %s\n" % req)
                    condor_file.write("\n")

                    ## Environment - this is how we pass parameters to simics
                    env_dict = workloads.prepare_env_dictionary(simics = 0)
                    if config.random_seed:
                        workloads.set_var(env_dict, 'RANDOM_SEED', config.random_seed)

                    workloads.set_var(env_dict, 'OPAL_CONFIG_NAME', opal_config[0])
                    workloads.set_var(env_dict, 'OPAL_CONFIG_FILE', opal_config[1])

                    # for the checkpoint, we have to extract the checkpoint storage directory
                    # from the checkpoints configuration file
                    lines = grep("../checkpoints/workload.conf", "WORKLOAD_CHECKPOINT_DIR=")
                    if(len(lines) != 1):
                      print "The workload checkpoint directory could not be determined"
                      os.exit(1)
                    workload_check_dir = lines[0].split("=")[1]

                    if protocol_option:
                        workloads.set_var(env_dict, 'PROTOCOL_OPTION',  protocol_option)
                    if warmup_file:
Beispiel #3
0
def run_simics(checkpoint, workload_name, 
               transactions,
               protocol={"name" : "test"},
               processors=16,
               smt_threads=1,
               phy_signature="Perfect_",
               virtual_signature="Perfect_",
               summary_signature="Perfect_",
               xact_max_depth="1",
               microbenchmark = 0,
               mbench_arg_prefix="",
               mbench_arg_string="",
               bench_arg = 0,
               condor_process = os.getpid(), condor_cluster = os.getppid(),
               expected_ruby_cycles=0, tolerance=.05,
               check_opal=0, check_ruby=0,
               ):
    # use SPARC V8 directory
    name = protocol["name"]
    output = tools.run_command("scripts/prepare_simics_home.sh simics/home/%s"%name)
    if protocol.has_key("specific_processor_count"):
        processors = protocol.get("specific_processor_count", 1)
    procs_per_chip = protocol.get("procs_per_chip", 1)
    l2_cache_banks = protocol.get("specific_cache_count", (processors/smt_threads))
    eager_cd = protocol.get("eager_cd", 1)
    eager_vm = protocol.get("eager_vm", 1)
    magic_waiting = protocol.get("magic_waiting", 0)
    no_backoff = protocol.get("no_backoff", 0)
    
    bandwidth = protocol.get("bandwidth", 6400)
    if procs_per_chip > processors:
        procs_per_chip = 1
        
    log_message("Running simics: checkpoint=%s, processors=%s, smt_threads=%s, procs_per_chip=%d transactions=%d, protocol: %s" % (checkpoint, processors, smt_threads, procs_per_chip, transactions, name))

    # prepare environment variables for running simics
    env_dict = workloads.prepare_env_dictionary(simics = 0)
    workloads.set_var(env_dict, "RESULTS_DIR", "../../../results")
    workloads.set_var(env_dict, "WORKLOAD", workload_name)
    workloads.set_var(env_dict, "CHECKPOINT", checkpoint)
    workloads.set_var(env_dict, "CHECKPOINT_DIR", " ")
    workloads.set_var(env_dict, "PROTOCOL", name)
    workloads.set_var(env_dict, "PROCESSORS", processors)
    workloads.set_var(env_dict, "CHIPS", 1)
    workloads.set_var(env_dict, "SMT_THREADS", smt_threads)
    workloads.set_var(env_dict, "PROCS_PER_CHIP", procs_per_chip)
    workloads.set_var(env_dict, "NUM_L2_BANKS", l2_cache_banks)
    workloads.set_var(env_dict, "TRANSACTIONS", transactions)
    workloads.set_var(env_dict, "BANDWIDTH", bandwidth)
    if(g_random_seed != "random"):
      workloads.set_var(env_dict, "RANDOM_SEED", g_random_seed)
    workloads.set_var(env_dict, "CONDORCLUSTER", condor_cluster)
    workloads.set_var(env_dict, "CONDORPROCESS", condor_process)

    # Transactional Memory variables
    workloads.set_var(env_dict, "MICROBENCH_DIR", "microbenchmarks/transactional")
    workloads.set_var(env_dict, "BENCHMARK", workload_name)
    workloads.set_var(env_dict, "MAX_DEPTH", xact_max_depth)
    workloads.set_var(env_dict, 'LOCK_TYPE', "TM")
    workloads.set_var(env_dict, 'READ_WRITE_FILTER', phy_signature)        
    workloads.set_var(env_dict, 'VIRTUAL_READ_WRITE_FILTER', virtual_signature)
    workloads.set_var(env_dict, 'SUMMARY_READ_WRITE_FILTER', summary_signature)
    workloads.set_var(env_dict, "XACT_EAGER_CD", eager_cd)
    if(eager_vm == 0):
        workloads.set_var(env_dict, "XACT_LAZY_VM", 1)
    else:
        workloads.set_var(env_dict, "XACT_LAZY_VM", 0)
    workloads.set_var(env_dict, "ENABLE_MAGIC_WAITING", magic_waiting)
    workloads.set_var(env_dict, "XACT_NO_BACKOFF", no_backoff)
    
    # set per-microbenchmark specific variables
    #if ((workload_name == "compensation") or (workload_name == "commit-action")
    #      or (workload_name == "isolation-test") or (workload_name == "logging-test")
    #      or (workload_name == "partial-rollback")):
    #    workloads.set_var(env_dict, "MICROBENCH_DIR", "microbenchmarks/transactional/test")
    workloads.set_var(env_dict, 'MBENCH_ARG_PREFIX', mbench_arg_prefix)        
    workloads.set_var(env_dict, 'MBENCH_ARG_STRING', mbench_arg_string)
        
    workloads.update_system_env(env_dict)

    # create results directory
    output = tools.run_command("/bin/rm -rf results")
    
    mbench_arg_prefix = workloads.get_var(env_dict, 'MBENCH_ARG_PREFIX')
    os.mkdir("results")
    if(microbenchmark == 1):
        print "CREATING DIRECTORY results/%s-%s" % (workload_name,mbench_arg_prefix)
        os.mkdir("results/%s-%s" %(workload_name,mbench_arg_prefix))
    else:
        os.mkdir("results/"+workload_name)
    print "WORKLOAD NAME %s" % workload_name

    #'''
    os.chdir("simics/home/%s/" % name)

    # run the microbenchmark script if needed
    if( microbenchmark == 1):
        output = tools.run_command("./simics -echo -verbose -no-log -no-win -x ../../../gen-scripts/microbench.simics", "quit 666\n", verbose=1, max_lines=0)
    else:
        output = tools.run_command("./simics -echo -verbose -no-log -no-win -x ../../../gen-scripts/go.simics", "quit 666\n", verbose=1)
    #tools.run_command("./simics -echo -verbose -no-log -x ../../../gen-scripts/go.simics", "quit 666\n")
    os.chdir("../../..")
    
    # dump simics output
    if(microbenchmark == 0):
        simics_output_filename = "results/%s.output" % workloads.get_output_file_name_prefix(env_dict)
    else:
        simics_output_filename = "results/%s.output" % workloads.get_microbench_output_file_name_prefix(env_dict,0)
    simics_output = open(simics_output_filename, "w")
    simics_output.write(output)
    simics_output.close()
    
    if check_ruby == 1 and name != "template":
        if( microbenchmark == 0):
            ruby_stats_filename = "results/%s.stats" % workloads.get_output_file_name_prefix(env_dict)
            error_output_filename = "condor/results/%s.error" % (workloads.get_output_file_name_prefix(env_dict, 0))
        else:
            ruby_stats_filename = "results/%s.stats" % workloads.get_microbench_output_file_name_prefix(env_dict,0)
            error_output_filename = "condor/results/%s.error" % (workloads.get_microbench_output_file_name_prefix(env_dict, 0))
        if (not os.path.exists(ruby_stats_filename)):
            raise RegressionError("Ruby stats output file not present: %s" % ruby_stats_filename, output)

        # Check for error file, indicating a SIMICS_ASSERT() failure
        if(os.path.exists(error_output_filename)):
            print "SIMICS ASSERT error!"
            raise RegressionError("SIMICS_ASSERT error file found: %s" % error_output_filename, output)

        # get random seed
        simics_output = open(simics_output_filename, "r")
        for line in simics_output.readlines():
            if re.search("g_RANDOM_SEED", line):
                tokens = line.split()
                log_message("  Random seed: %d"%int(tokens[4][:-1]))
        # get ruby cycle
        ruby_stats = open(ruby_stats_filename, "r")
        ruby_cycles = 0
        for line in ruby_stats.readlines():
            line_elements = string.split(line)
            if len(line_elements) > 1 and line_elements[0] == "Ruby_cycles:":
                ruby_cycles = int(line_elements[1])
        if (ruby_cycles == 0):
            raise RegressionError("Ruby_cycles not found from the output file: %s" % ruby_stats_filename, output)
        else:
            log_message("  Ruby_cycles: %d"%ruby_cycles)
        if (expected_ruby_cycles != 0):
            percent_diff = 1.0*ruby_cycles/expected_ruby_cycles
            if percent_diff < (1.0-tolerance) or percent_diff > (1.0 + tolerance):
                log_message("  Checking ruby_cycles - ratio is %f: OUT OF RANGE" % percent_diff)
                log_error("ERROR: Ruby_cycles not within tolerances.  expected %d, actual %d" % (expected_ruby_cycles, ruby_cycles))
            else:
                log_message("  Checking ruby_cycles - ratio is %f: OK" % percent_diff)

    if check_opal == 1:
        opal_log_filename = "results/%s.opal" % workloads.get_output_file_name_prefix(env_dict)
        if (not os.path.exists(opal_log_filename)):
            raise RegressionError(("Opal log file not present: %s" %
                                   opal_log_filename), output)
        # check opal correct rate!
        else:
            opal_log = open(opal_log_filename)
            processor_total_instructions = 1001 # > 1000
            processor_correct_rate = 98 # < 99
            for line in opal_log.readlines():
                tokens = line.split()
                # remember the correct rate
                if(len(tokens) == 5 and tokens[1] == "Percent" and tokens[2] == "correct"):
                    processor_correct_rate = float(tokens[4])
                # remember the processor's commit instruction number
                if(len(tokens) == 6 and tokens[1] == "Total" and tokens[2] == "number" and tokens[3] == "of" and tokens [4] == "instructions"):
                    processor_total_instructions = int(tokens[5])
                    # check the correct rate here since the total instruction
                    # number comes last during the scan of the output file
                    if(processor_correct_rate < 99 and processor_total_instructions > 1000):
                        raise RegressionError(("Opal correct rate too low (%f%% of %d instructions)!" % (processor_correct_rate, processor_total_instructions)), output)