Пример #1
0
def _main():
    isas = list(
        itertools.product(TOPLEVELS, OPCODE_TYPES, INSTR_TYPES,
                          TERMINATE_TYPES))
    print(LINE_SEP)
    print(green("Launching %d experiments ..." % (len(isas) * len(RUNS))))
    print(LINE_SEP)

    # create a temp dir to store config files
    try:
        tmp_dir = os.path.join(os.getcwd(), "tmp%d")
        i = 0
        while os.path.isdir(tmp_dir % i):
            i += 1
        tmp_dir = tmp_dir % i
        os.mkdir(tmp_dir)

        # create config files on the fly and launch experiments
        for toplevel, opcode_type, instr_type, terminate_type in isas:
            for run in RUNS:
                # craft config dictionary
                cdict = copy.deepcopy(CONFIG_DICT)

                # Set experiment name
                experiment_name = EXPERIMENT_BASE_NAME % (
                    toplevel, opcode_type, instr_type, terminate_type, run)
                experiment_name = experiment_name.replace("_", "-")
                # print(experiment_name)
                # continue
                cdict["experiment_name"] = experiment_name
                cdict["toplevel"] = toplevel

                # Set configurations
                cdict["fuzzer_params"]["duration_mins"] = DURATION_MINS
                cdict["model_params"]["opcode_type"] = opcode_type
                cdict["model_params"]["instr_type"] = instr_type
                if terminate_type == "invalidop":
                    cdict["model_params"]["terminate_on_invalid_opcode"] = 1
                else:
                    cdict["model_params"]["terminate_on_invalid_opcode"] = 0

                # write to HJSON file
                hjson_filename = experiment_name + ".hjson"
                hjson_file_path = os.path.join(tmp_dir, hjson_filename)
                with open(hjson_file_path, "w") as fp:
                    hjson.dump(cdict, fp)

                # launch fuzz the DUT
                fuzz(["--fail-silently", hjson_file_path])

                # cleanup config file
                os.remove(hjson_file_path)

    finally:
        for directory in glob.glob("tmp*"):
            shutil.rmtree(directory)

    print(LINE_SEP)
    print(green("DONE!"))
    print(LINE_SEP)
Пример #2
0
def build_docker_image(config):
    """Creates docker image containing DUT to fuzz."""
    if not config.args.silent:
        print(LINE_SEP)
        print("Building Docker image to fuzz %s ..." % config.toplevel)
        print(LINE_SEP)
    # Set Dockerfile path
    if config.soc == "other":
        dockerfile_path = "%s/hw/other/%s" % (config.root_path,
                                              config.toplevel)
    else:
        dockerfile_path = "%s/hw/%s" % (config.root_path, config.soc)
    # Build command
    cmd = [
        "docker", "build", "--build-arg",
        "TOPLEVEL=%s" % config.toplevel, "--build-arg",
        "TB_TYPE=%s" % config.tb_type, "--build-arg",
        "FUZZER=%s" % config.fuzzer, "--build-arg",
        "VERSION=%s" % config.version, "-t", config.docker_image,
        dockerfile_path
    ]
    error_str = "ERROR: image build FAILED. Terminating experiment!"
    run_cmd(cmd, error_str, silent=config.args.silent)
    if not config.args.silent:
        print(green("IMAGE BUILD SUCCESSFUL -- Done!"))
Пример #3
0
def load_bb_data(data_root):
    print(yellow("Loading data ..."))
    exp2data = {}

    # TODO: change this to automatically extract names from a single exp. number
    # extract each data file into a Pandas dataframe
    exp_combos = list(itertools.product(STATES, WIDTHS, EXP_BASE_NAMES))
    for num_states, width, exp_base_name in exp_combos:
        for trial in TRIALS:
            # Build complete path to data files
            exp_name_wo_trialnum = exp_base_name % (num_states, width)
            exp_name = "%s-%d" % (exp_name_wo_trialnum, trial)
            data_path = os.path.join(data_root, exp_name)

            # Extract experiment info.
            exp_name_list = exp_name.split("-")
            instr_type = exp_name_list[6]
            if len(exp_name_list) > 9:
                fs_opt = True
            else:
                fs_opt = False

            # Load fuzzing data into an object
            exp2data[exp_name_wo_trialnum] = FuzzingData(
                num_states, width, instr_type, fs_opt, trial, data_path)
    print(green("Done."))
    print(LINE_SEP)
    return exp2data
Пример #4
0
def check_num_active_vm_instances(config):
  """Checks number of active VM instances on GCE as a $$$ safety measure."""
  if not config.args.silent:
    print(LINE_SEP)
    print("Checking number of active VMs on GCE ...")
    print(LINE_SEP)
  cmd = [
      "gcloud", "compute", "instances", "list",
      "--zones=%s" % config.gcp_params["zone"]
  ]
  proc = subprocess.Popen(cmd,
                          stdin=subprocess.PIPE,
                          stdout=subprocess.PIPE,
                          stderr=subprocess.STDOUT,
                          close_fds=True)
  num_active_vm_instances = -1  # first line is header
  while True:
    line = proc.stdout.readline()
    if not line:
      break
    num_active_vm_instances += 1
  if num_active_vm_instances < config.args.max_vm_instances:
    if not config.args.silent:
      print(green("%d active VM(s)" % num_active_vm_instances))
  else:
    if not config.args.silent:
      print(red("%d active VM(s)" % num_active_vm_instances))
      print(
          yellow("waiting %d seconds and trying again ..." %
                 config.args.vm_launch_wait_time_s))
  return num_active_vm_instances
Пример #5
0
def compute_instr_type_mann_whitney(instr_rts):
  print(
      yellow(
          "Computing Mann-Whitney U-test on instrumentation complexity data ..."
      ))
  for num_states in STATES:
    sub_rt_df = instr_rts[instr_rts[NUM_STATES_LABEL] == num_states]
    full_instr_data = sub_rt_df[sub_rt_df[INSTR_TYPE_LABEL] ==
                                INSTR_TYPE_MAPPINGS["full"]][RUN_TIME_LABEL]
    duttb_instr_data = sub_rt_df[sub_rt_df[INSTR_TYPE_LABEL] ==
                                 INSTR_TYPE_MAPPINGS["duttb"]][RUN_TIME_LABEL]
    dut_instr_data = sub_rt_df[sub_rt_df[INSTR_TYPE_LABEL] ==
                               INSTR_TYPE_MAPPINGS["dut"]][RUN_TIME_LABEL]
    # mw_full_duttb = stats.mannwhitneyu(full_instr_data, duttb_instr_data)
    mw_full_dut = stats.mannwhitneyu(full_instr_data, dut_instr_data)
    # mw_duttb_dut = stats.mannwhitneyu(duttb_instr_data, dut_instr_data)
    print("%d States - Mann-Whitney:" % num_states)
    # print(
    # "\t%s vs. %s:" %
    # (INSTR_TYPE_MAPPINGS["full"], INSTR_TYPE_MAPPINGS["duttb"]),
    # mw_full_duttb)
    print(
        "\t%s vs. %s:" %
        (INSTR_TYPE_MAPPINGS["full"], INSTR_TYPE_MAPPINGS["dut"]), mw_full_dut)
    # print(
    # "\t%s vs. %s:" %
    # (INSTR_TYPE_MAPPINGS["duttb"], INSTR_TYPE_MAPPINGS["dut"]),
    # mw_duttb_dut)
  print(green("Done."))
  print(LINE_SEP)
Пример #6
0
def build_bbs_df(exp2data):
    print(yellow("Building basic block stats dataframe ..."))
    # Create empty dictionary that will be used to create Pandas
    # a DataFrame that look like the following:
    # +---------------------------------------------------+
    # | # states | instrumentation level | # basic blocks |
    # +---------------------------------------------------+
    # |   ...    |          ...          |       ...      |
    bbs_dict = {
        NUM_STATES_LABEL: [],
        INSTR_TYPE_LABEL: [],
        NUM_BB_LABEL: [],
    }
    for exp_name, fd in exp2data.items():
        bbs_dict[NUM_STATES_LABEL].extend([fd.num_states] * 2)
        # bbs_dict[INSTR_TYPE_LABEL].extend(["Simulation Engine", "TB", "DUT"])
        bbs_dict[INSTR_TYPE_LABEL].extend(["Simulation Engine + TB", "DUT"])
        # bbs_dict[NUM_BB_LABEL].append(fd.vltrt_bbs)
        # bbs_dict[NUM_BB_LABEL].append(fd.tb_bbs)
        bbs_dict[NUM_BB_LABEL].append(fd.vltrt_bbs + fd.tb_bbs)
        bbs_dict[NUM_BB_LABEL].append(fd.dut_bbs)
        # bbs_dict[NUM_STATES_LABEL].append(fd.num_states)
        # bbs_dict[INSTR_TYPE_LABEL].append(INSTR_TYPE_MAPPINGS[fd.instr_type])
        # if fd.instr_type == "full":
        # bbs_dict[NUM_BB_LABEL].append(fd.full_bbs)
        # elif fd.instr_type == "duttb":
        # bbs_dict[NUM_BB_LABEL].append(fd.duttb_bbs)
        # elif fd.instr_type == "dut":
        # bbs_dict[NUM_BB_LABEL].append(fd.dut_bbs)
        # else:
        # print(red("ERROR: unknown instrumentation type."))
        # sys.exit(1)
    print(green("Done."))
    print(LINE_SEP)
    return pd.DataFrame.from_dict(bbs_dict)
def _check_simulation_results(encrypt_results,
                              decrypt_results,
                              test_pair,
                              verbose=False):
    error = False
    for i in range(len(test_pair.encrypt.data_in_line_starts)):
        # Extract plaintexts/ciphertexts
        with open(encrypt_results, "r") as fp:
            log_lines = fp.readlines()
            encrypt_in_data = _get_crypt_io_data_blocks(
                log_lines, test_pair.encrypt.data_in_line_starts[i],
                test_pair.encrypt.data_block_size)
            encrypt_out_data = _get_crypt_io_data_blocks(
                log_lines, test_pair.encrypt.data_out_line_starts[i],
                test_pair.encrypt.data_block_size)
        with open(decrypt_results, "r") as fp:
            log_lines = fp.readlines()
            decrypt_in_data = _get_crypt_io_data_blocks(
                log_lines, test_pair.decrypt.data_in_line_starts[i],
                test_pair.decrypt.data_block_size)
            decrypt_out_data = _get_crypt_io_data_blocks(
                log_lines, test_pair.decrypt.data_out_line_starts[i],
                test_pair.decrypt.data_block_size)
        # Check all data blocks are the same length
        if (len(encrypt_in_data) != len(encrypt_out_data)
                or len(encrypt_in_data) != len(decrypt_in_data)
                or len(encrypt_in_data) != len(decrypt_out_data)):
            error = True
            error_msg = "data LENGTH MISMATCH for encrypt/decrypt data blocks."
        if not error:
            for i in range(len(encrypt_out_data)):
                if verbose:
                    print(
                        "\n   Encrypt Out =  0x{:0>8X}; Decrypt In = 0x{:0>8X}"
                        .format(encrypt_out_data[i], decrypt_in_data[i]))
                if encrypt_out_data[i] != decrypt_in_data[i]:
                    error_msg = "encryption OUTPUT does not match decryption INPUT."
                    error = True
                    break
        if verbose:
            print("   --------------------------------------------")
        if not error:
            for i in range(len(encrypt_in_data)):
                if verbose:
                    print(
                        "   Encrypt In =  0x{:0>8X}; Decrypt Out = 0x{:0>8X}".
                        format(encrypt_in_data[i], decrypt_out_data[i]))
                if encrypt_in_data[i] != decrypt_out_data[i]:
                    error_msg = "encryption INPUT does not match decryption OUTPUT."
                    break
        if verbose:
            print("   --------------------------------------------")
        # If an error occured during any test, terminate
        if error:
            break
    # Print a color-coded message with results
    if not error:
        print(green("PASS"))
    else:
        print("".join([red("ERROR"), ": ", error_msg]))
Пример #8
0
def build_fs_opt_rts_df(exp2data):
  print(yellow("Building fork server optimization dataframe ..."))
  FS_OPT_BASELINE = True
  # Create empty dictionary that will be used to create Pandas
  # a DataFrame that look like the following:
  # +----------------------------------------------------+
  # | # states | fork server optimization? | runtime (s) |
  # +----------------------------------------------------+
  # |   ...    |            ...            |     ...     |
  runtimes_dict = {
      NUM_STATES_LABEL: [],
      OPT_TYPE_LABEL: [],
      RUN_TIME_LABEL: [],
  }
  # Aggregate data into a dictionary
  exp2rts = _aggregrate_fs_opt_rts(exp2data)
  # Compute scale factors for each set of num_states experiments
  states2scales = {}
  for (num_states, instr_type, fs_opt), runtimes in exp2rts.items():
    if instr_type == "full" and fs_opt is FS_OPT_BASELINE:
      scale_factor = np.median(runtimes)
      states2scales[num_states] = scale_factor
  # Build the dataframe for plotting
  for (num_states, instr_type, fs_opt), runtimes in exp2rts.items():
    runtimes = list(map(lambda x: x / states2scales[num_states], runtimes))
    runtimes_dict[NUM_STATES_LABEL].extend([num_states] * len(runtimes))
    runtimes_dict[OPT_TYPE_LABEL].extend([OPT_TYPE_MAPPINGS[fs_opt]] *
                                         len(runtimes))
    runtimes_dict[RUN_TIME_LABEL].extend(runtimes)
  print(green("Done."))
  print(LINE_SEP)
  return pd.DataFrame.from_dict(runtimes_dict)
Пример #9
0
def build_instr_complex_rts_df(exp2data):
  print(yellow("Building instruction complexity dataframe ..."))
  INSTR_TYPE_BASELINE = "dut"
  # Create empty dictionary that will be used to create Pandas
  # a DataFrame that look like the following:
  # +------------------------------------------------+
  # | # states | instrumentation level | runtime (s) |
  # +------------------------------------------------+
  # |   ...    |          ...          |     ...     |
  runtimes_dict = {
      NUM_STATES_LABEL: [],
      INSTR_TYPE_LABEL: [],
      RUN_TIME_LABEL: [],
  }
  # Aggregate data into a dictionary
  exp2rts = _aggregrate_instr_complex_rts(exp2data)
  # Compute scale factors for each set of num_states experiments
  states2scales = {}
  for (num_states, instr_type, fs_opt), runtimes in exp2rts.items():
    if instr_type == INSTR_TYPE_BASELINE and fs_opt is False:
      scale_factor = np.median(runtimes)
      states2scales[num_states] = scale_factor
  # Build the dataframe for plotting
  for (num_states, instr_type, fs_opt), runtimes in exp2rts.items():
    runtimes = list(map(lambda x: x / states2scales[num_states], runtimes))
    runtimes_dict[NUM_STATES_LABEL].extend([num_states] * len(runtimes))
    runtimes_dict[INSTR_TYPE_LABEL].extend([INSTR_TYPE_MAPPINGS[instr_type]] *
                                           len(runtimes))
    runtimes_dict[RUN_TIME_LABEL].extend(runtimes)
  print(green("Done."))
  print(LINE_SEP)
  return pd.DataFrame.from_dict(runtimes_dict)
def plot_avg_coverage_vs_time_broken(hwf_cov_df, rfuzz_cov_df, time_units="m"):
    print(yellow("Generating plot ..."))

    # Set plot style and extract only HDL line coverage
    # sns.set_theme(context="notebook", style="darkgrid")
    hdl_cov_df = pd.concat([hwf_cov_df, rfuzz_cov_df])

    # create subplots
    fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(6, 4))

    # create figure and plot the data
    # fig, ax = plt.subplots(1, 1, figsize=(6, 4))
    sns.lineplot(data=hdl_cov_df,
                 x=TIME_LABEL,
                 y=COVERAGE_LABEL,
                 hue=TOPLEVEL_LABEL,
                 style=FUZZER_LABEL,
                 ax=ax1)
    sns.lineplot(data=hdl_cov_df,
                 x=TIME_LABEL,
                 y=COVERAGE_LABEL,
                 hue=TOPLEVEL_LABEL,
                 style=FUZZER_LABEL,
                 ax=ax2)

    # set axis ranges
    ax1.set_ylim(0.3, 1.0)
    ax2.set_ylim(0, 0.05)

    # hide the spines between the two axes
    ax1.spines['bottom'].set_visible(False)
    ax2.spines['top'].set_visible(False)
    ax1.xaxis.tick_top()
    ax1.tick_params(labeltop=False)
    ax2.xaxis.tick_bottom()

    # format the plot
    if time_units == "m":
        time_units_label = "min."
    elif time_units == "h":
        time_units_label = "hours"
    else:
        time_units_label = "s"
    # ax1.set_xlabel(TIME_LABEL + " (%s)" % time_units_label,
    # fontsize=LABEL_FONT_SIZE)
    # ax1.set_ylabel("HDL Line " + COVERAGE_LABEL, fontsize=LABEL_FONT_SIZE)
    # ax1.tick_params("x", labelsize=TICK_FONT_SIZE)
    # ax1.tick_params("y", labelsize=TICK_FONT_SIZE)
    # plt.legend(fontsize=LEGEND_FONT_SIZE,
    # title_fontsize=LEGEND_TITLE_FONT_SIZE,
    # bbox_to_anchor=(1.01, 0.75),
    # loc='upper left')
    plt.tight_layout()

    # save the plot
    plt.savefig(PLOT_FILE_NAME, format=PLOT_FORMAT)
    print(green("Done."))
    print(LINE_SEP)
def _main(argv):
    # Part command line arguments
    args = _parse_args(argv)

    # Print initial message with # of total tests
    isas = list(itertools.product(OPCODE_TYPES, INSTR_TYPES, TERMINATE_TYPES))
    num_tests = len(isas)
    print(LINE_SEP)
    print(green("Running %d test suites ..." % num_tests))

    # Get list of encryption/decryption test pairs
    yaml_descripts_dir = os.path.join(os.getenv("HW_FUZZING"), "hw",
                                      "opentitan", "aes", "seed_descriptions")
    test_pairs = _extract_test_pairs(yaml_descripts_dir, args.testcase)

    test_suite_num = 1
    for opcode_type, instr_type, terminate_type in isas:
        print(LINE_SEP)
        print("Test Suite #:    ", test_suite_num)
        print("Opcode Type:     ", opcode_type)
        print("Instruction Type:", instr_type)
        print("Termination Type:", terminate_type)
        print(LINE_SEP)

        # Set ISA parameters
        config_dict = _set_isa_params(HJSON_CONFIG_TEMPLATE, opcode_type,
                                      instr_type, terminate_type)

        # Perform E2E encryption/decryption simulation tests on OpenTitan AES block
        for tp in test_pairs:
            try:
                print("(%s)\t- " % tp.test_id, end="")
                sys.stdout.flush()

                # Run simulations in separate thread to show animated waiting spinner
                # TEST_IS_EXECUTING = True
                # t = threading.Thread(target=_waiting_animation)
                # t.daemon = True
                # t.start()
                _run_simulation(config_dict, ENCRYPT_LOG, tp.encrypt)
                _run_simulation(config_dict, DECRYPT_LOG, tp.decrypt)
                # TEST_IS_EXECUTING = False

                # Check simulation results
                _check_simulation_results(ENCRYPT_LOG, DECRYPT_LOG, tp,
                                          args.verbose)

            finally:
                try:
                    os.remove(TMP_HJSON_CONFIG)
                    os.remove(ENCRYPT_LOG)
                    os.remove(DECRYPT_LOG)
                except OSError as e:
                    if e.errno != errno.ENOENT:
                        raise
        test_suite_num += 1
Пример #12
0
def _main():
  print(LINE_SEP)
  print(green("Launching %d experiments ..." % (len(TOPLEVELS) * len(RUNS))))
  print(LINE_SEP)

  # create a temp dir to store config files
  with tempfile.TemporaryDirectory() as tmp_dir:
    # create config files on the fly and launch experiments
    for toplevel in TOPLEVELS:
      for run in RUNS:
        # craft config dictionary
        cdict = copy.deepcopy(CONFIG_DICT)

        # Set experiment name
        experiment_name = EXPERIMENT_BASE_NAME % (toplevel, DURATION_MINS, run)
        experiment_name = experiment_name.replace("_", "-").lower()
        cdict["experiment_name"] = experiment_name
        cdict["toplevel"] = toplevel

        # Set configurations
        cdict["fuzzer_params"]["duration_mins"] = DURATION_MINS

        # write to HJSON file
        hjson_filename = experiment_name + ".hjson"
        hjson_file_path = os.path.join(tmp_dir, hjson_filename)
        with open(hjson_file_path, "w") as fp:
          hjson.dump(cdict, fp)

        # launch fuzz the DUT
        # fuzz(["--fail-silently", hjson_file_path])
        fuzz([
            "-y", "--gcp-config-filename", "gcp_config.east1b.hjson",
            hjson_file_path
        ])

        # cleanup config file
        os.remove(hjson_file_path)

  print(LINE_SEP)
  print(green("DONE!"))
  print(LINE_SEP)
Пример #13
0
def compute_fs_opt_mann_whitney(instr_rts):
  print(yellow("Computing Mann-Whitney U-test on fork server opt. data ..."))
  for num_states in STATES:
    sub_rt_df = instr_rts[instr_rts[NUM_STATES_LABEL] == num_states]
    no_opt_data = sub_rt_df[sub_rt_df[OPT_TYPE_LABEL] ==
                            OPT_TYPE_MAPPINGS[False]][RUN_TIME_LABEL]
    opt_data = sub_rt_df[sub_rt_df[OPT_TYPE_LABEL] ==
                         OPT_TYPE_MAPPINGS[True]][RUN_TIME_LABEL]
    mw = stats.mannwhitneyu(no_opt_data, opt_data)
    print("%d States - Mann-Whitney:" % num_states)
    print("\t%s vs. %s:" % (OPT_TYPE_MAPPINGS[False], OPT_TYPE_MAPPINGS[True]),
          mw.pvalue)
  print(green("Done."))
  print(LINE_SEP)
Пример #14
0
def gen_seed(input_yaml_file_name, output_file_name, verbose):
  """Parse YAML HW fuzzing opcodes and translates them in binary to file."""
  print(f"Creating fuzzer seed from YAML: {input_yaml_file_name} ...")
  with open(input_yaml_file_name, "r") as fp:
    fuzz_opcodes = yaml.load(fp, Loader=yaml.Loader)
  with open(output_file_name, "wb") as fp:
    for instr in fuzz_opcodes:
      hwf_instr = TLULFuzzInstr(instr)
      if verbose:
        print(hwf_instr)
      for _ in range(hwf_instr.repeat):
        fp.write(hwf_instr.to_bytes())
  print(green("Seed file generated!"))
  if verbose:
    dump_seed_file_to_stdin(output_file_name)
Пример #15
0
def push_docker_image_to_gcr(config):
  """Pushes docker image to GCR if it does not exist there yet."""
  if not config.args.silent:
    print(LINE_SEP)
    print("Pushing Docker image to GCR ...")
    print(LINE_SEP)
  if config.args.update or not check_if_docker_image_exists_in_gcr(config):
    cmd = ["docker", "push", config.docker_image]
    error_str = "ERROR: pushing image to GCR FAILED. Terminating experiment!"
    run_cmd(cmd, error_str, silent=config.args.silent)
    if not config.args.silent:
      print(green("IMAGE PUSH SUCCESSFUL -- Done!"))
  else:
    if not config.args.silent:
      print(yellow("IMAGE ALREADY EXISTS IN GCR -- Done!"))
Пример #16
0
def build_docker_image(config):
  """Creates docker image containing DUT to fuzz."""
  if not config.args.silent:
    print(LINE_SEP)
    print("Building Docker image to fuzz %s ..." % config.toplevel)
    print(LINE_SEP)
  cmd = [
      "docker", "build", "--build-arg",
      "FUZZER=%s" % config.fuzzer, "--build-arg",
      "VERSION=%s" % config.version, "-t", config.docker_image,
      "%s/hw/%s" % (config.root_path, config.toplevel)
  ]
  error_str = "ERROR: image build FAILED. Terminating experiment!"
  run_cmd(cmd, error_str, silent=config.args.silent)
  if not config.args.silent:
    print(green("IMAGE BUILD SUCCESSFUL -- Done!"))
Пример #17
0
def plot_coverage_vs_time(coverage_dfs):
  print(yellow("Generating plots ..."))
  cov_metrics = [
      SW_LINE_COVERAGE_LABEL, SW_REGION_COVERAGE_LABEL, HW_LINE_COVERAGE_LABEL
  ]
  num_cores = len(TOPLEVELS)
  num_cov_metrics = len(cov_metrics)
  sns.set_theme(context="notebook", style="darkgrid")
  fig, axes = plt.subplots(num_cov_metrics,
                           num_cores,
                           sharex=True,
                           sharey=True)
  for trial in range(len(coverage_dfs)):
    # Select experiment trial number
    cov_df = coverage_dfs[trial]
    for row in range(len(axes)):
      # select portion of data corresponding to current COVERAGE METRIC
      sub_cov_df = cov_df[cov_df[COVERAGE_TYPE_LABEL] == cov_metrics[row]]
      for col in range(len(axes[row])):
        # select portion of data corresponding to current core
        plt_df = sub_cov_df[sub_cov_df[TOPLEVEL_LABEL] == TOPLEVELS[col]]
        # sns.set_context("paper")
        curr_ax = sns.lineplot(data=plt_df,
                               x=TIME_LABEL,
                               y=COVERAGE_LABEL,
                               hue=GRAMMAR_LABEL,
                               ax=axes[row][col],
                               legend=False)
        if row == 0 and col == 0 and trial == 0:
          lines = curr_ax.get_lines()
        axes[row][col].set_title("Coverage = %s | Core = %s" %
                                 (cov_metrics[row], TOPLEVELS[col]))
  fig.legend(
      lines,
      [
          "Const. Opcode & Variable Frame",
          "Const. Opcode & Fixed Frame",
          "Mapped Opcode & Variable Frame",
          "Mapped Opcode & Fixed Frame",
      ],
      loc="lower center",
      ncol=4,
  )
  print(green("Done."))
  print(LINE_SEP)
  plt.show()
Пример #18
0
def build_coverage_df(exp2data, trial):
  print(yellow("Building coverage dataframe ..."))
  # Create empty dictionary that will be used to create a Pandas DataFrame that
  # looks like the following:
  # +--------------------------------------------------------------------+
  # | toplevel | isa (grammar) | coverage type | time (s) | coverage (%) |
  # +--------------------------------------------------------------------+
  # |   ...    |        ...    |      ...      |   ...    |      ...     |
  coverage_dict = {
      TOPLEVEL_LABEL: [],
      GRAMMAR_LABEL: [],
      COVERAGE_TYPE_LABEL: [],
      TIME_LABEL: [],
      COVERAGE_LABEL: [],
  }

  # Add rows to the dataframe
  for exp_name, fd_list in exp2data.items():
    fd = fd_list[trial]
    for time, row in fd.afl_data.iterrows():
      cov_df_idx = row["paths_total"] - 1
      for _ in range(3):
        coverage_dict[TOPLEVEL_LABEL].append(fd.toplevel)
        coverage_dict[GRAMMAR_LABEL].append(fd.grammar)
        coverage_dict[TIME_LABEL].append(time)

      # Add kcov coverage
      kcov = fd.kcov_data.loc[cov_df_idx, "Line-Coverage-(%)"]
      coverage_dict[COVERAGE_TYPE_LABEL].append(SW_LINE_COVERAGE_LABEL)
      coverage_dict[COVERAGE_LABEL].append(kcov * 100.0)

      # Add LLVM coverage
      llvm_cov = fd.llvm_cov_data.loc[cov_df_idx, "Region-Coverage-(%)"]
      coverage_dict[COVERAGE_TYPE_LABEL].append(SW_REGION_COVERAGE_LABEL)
      coverage_dict[COVERAGE_LABEL].append(llvm_cov * 100.0)

      # Add Verilator coverage
      vlt_cov = (float(fd.vlt_cov_data.loc[cov_df_idx, "Lines-Covered"]) /
                 float(fd.vlt_cov_data.loc[cov_df_idx, "Total-Lines"]))
      coverage_dict[COVERAGE_TYPE_LABEL].append(HW_LINE_COVERAGE_LABEL)
      coverage_dict[COVERAGE_LABEL].append(vlt_cov * 100.0)

  print(green("Done."))
  print(LINE_SEP)
  return pd.DataFrame.from_dict(coverage_dict)
Пример #19
0
def push_vm_management_scripts_to_gcs(config):
  """Pushes VM management (startup/shutdown scripts to GCS."""
  if not config.args.silent:
    print(LINE_SEP)
    print("Copying VM management script to GCS ...")
    print(LINE_SEP)
  cmd = [
      "gsutil", "cp",
      "%s/infra/hwfp/%s" %
      (config.root_path, config.gcp_params["startup_script"]),
      "gs://%s-%s/%s" % (config.gcp_params["project_id"],
                         config.gcp_params["vm_management_bucket"],
                         config.gcp_params["startup_script"])
  ]
  error_str = "ERROR: pushing scripts to GCS FAILED. Terminating experiment!"
  run_cmd(cmd, error_str, silent=config.args.silent)
  if not config.args.silent:
    print(green("COPY SUCCESSFUL -- Done!"))
def compute_stats(hwf_cov_dict, rfuzz_cov_dict):
    print(yellow("Computing stats ..."))
    # Compute HDL coverage % differences
    cov_diffs_sum = 0
    for toplevel, hwf_cov in hwf_cov_dict.items():
        min_hwf_cov = min(hwf_cov)
        max_rfuzz_cov = max(rfuzz_cov_dict[toplevel])
        cov_diff = min_hwf_cov - max_rfuzz_cov
        cov_diffs_sum += cov_diff
        print("HWF vs. RFUZZ coverage (%15s): %.3f%%" % (toplevel, cov_diff))
    cov_diffs_avg = float(cov_diffs_sum) / float(len(hwf_cov_dict.keys()))
    print("Avg. coverage difference: %.3f%%" % (cov_diffs_avg))
    print('-' * len(LINE_SEP))
    for toplevel, hwf_cov in hwf_cov_dict.items():
        rfuzz_cov = rfuzz_cov_dict[toplevel]
        myu = stats.mannwhitneyu(hwf_cov, rfuzz_cov)
        print("HWF vs. RFUZZ Mann-Whitney (%15s): %s" % (toplevel, myu))
    print(green("Done."))
    print(LINE_SEP)
Пример #21
0
def create_local_experiment_data_dir(config):
    """Creates local directories to store fuzzing experiment data."""
    if not config.args.silent:
        print(LINE_SEP)
        print("Creating local directories for fuzzing data ...")
        print(LINE_SEP)
    exp_data_path = "%s/data/%s" % \
        (config.root_path, config.experiment_name)

    # Create directories
    os.makedirs(exp_data_path)
    os.chmod(exp_data_path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH)
    os.mkdir(os.path.join(exp_data_path, "out"))
    os.chmod(os.path.join(exp_data_path, "out"),
             stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH)
    os.mkdir(os.path.join(exp_data_path, "logs"))
    os.chmod(os.path.join(exp_data_path, "logs"),
             stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH)

    # Copy over seeds that were used in this experiment
    seeds_dir = "%s/hw/%s/%s/seeds" % (config.root_path, config.soc,
                                       config.toplevel)
    seed_descripts_dir = "%s/hw/%s/%s/seed_descriptions" % (
        config.root_path, config.soc, config.toplevel)
    if os.path.isdir(seeds_dir):
        shutil.copytree(seeds_dir, os.path.join(exp_data_path, "seeds"))
        os.chmod(os.path.join(exp_data_path, "seeds"),
                 stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH)
    elif os.path.isdir(seed_descripts_dir):
        shutil.copytree(seed_descripts_dir,
                        os.path.join(exp_data_path, "seed_descriptions"))
        os.chmod(os.path.join(exp_data_path, "seed_descriptions"),
                 stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH)
    else:
        print(red("ERROR: no seeds found. Terminating experiment!"),
              file=sys.stderr)

    # Copy over HJSON config file that was used
    shutil.copy2(config.config_filename, exp_data_path)
    if not config.args.silent:
        print(green("DIRECTORY CREATION SUCCESSFUL -- Done!"))
    return exp_data_path
Пример #22
0
def plot_bbs(instr_bbs, orientation="h"):
    print(yellow("Generating plots ..."))
    LABEL_FONT_SIZE = 14
    sns.set()
    if orientation == "h":
        ax = sns.barplot(y=NUM_STATES_LABEL,
                         x=NUM_BB_LABEL,
                         hue=INSTR_TYPE_LABEL,
                         data=instr_bbs,
                         orient="h",
                         ci=None)
        ax.set_ylabel(NUM_STATES_LABEL, fontsize=LABEL_FONT_SIZE)
        ax.set_xlabel(NUM_BB_LABEL, fontsize=LABEL_FONT_SIZE)
        ax.tick_params("y", labelsize=LABEL_FONT_SIZE)
        ax.tick_params("x", labelsize=LABEL_FONT_SIZE)
        ax.set_xlim(10, 10000)
        plt.xscale("log")
    else:
        ax = sns.barplot(x=NUM_STATES_LABEL,
                         y=NUM_BB_LABEL,
                         hue=INSTR_TYPE_LABEL,
                         data=instr_bbs,
                         ci=None)
        ax.set_xlabel(NUM_STATES_LABEL, fontsize=LABEL_FONT_SIZE)
        ax.set_ylabel(NUM_BB_LABEL, fontsize=LABEL_FONT_SIZE)
        ax.tick_params("x", labelsize=LABEL_FONT_SIZE)
        ax.tick_params("y", labelsize=LABEL_FONT_SIZE)
        ax.set_ylim(10, 10000)
        plt.yscale("log")
    plt.legend(title=INSTR_TYPE_LABEL,
               fontsize=LABEL_FONT_SIZE,
               title_fontsize=LABEL_FONT_SIZE,
               ncol=3,
               loc="upper center",
               bbox_to_anchor=(0.5, 1.25))
    plt.tight_layout()
    plt.savefig("hwf_components_bbs.png", format="PNG")
    print(green("Done."))
    print(LINE_SEP)
def plot_avg_coverage_vs_time(hwf_cov_df, rfuzz_cov_df, time_units="m"):
    print(yellow("Generating plot ..."))

    # Set plot style and extract only HDL line coverage
    sns.set_theme(context="notebook", style="darkgrid")
    hdl_cov_df = pd.concat([hwf_cov_df, rfuzz_cov_df])

    # create figure and plot the data
    fig, ax = plt.subplots(1, 1, figsize=(6, 4))
    sns.lineplot(data=hdl_cov_df,
                 x=TIME_LABEL,
                 y=COVERAGE_LABEL,
                 hue=TOPLEVEL_LABEL,
                 style=FUZZER_LABEL,
                 ax=ax)

    # format the plot
    if time_units == "m":
        time_units_label = "min."
    elif time_units == "h":
        time_units_label = "hours"
    else:
        time_units_label = "s"
    ax.set_xlabel(TIME_LABEL + " (%s)" % time_units_label,
                  fontsize=LABEL_FONT_SIZE)
    ax.set_ylabel("HDL Line " + COVERAGE_LABEL, fontsize=LABEL_FONT_SIZE)
    ax.tick_params("x", labelsize=TICK_FONT_SIZE)
    ax.tick_params("y", labelsize=TICK_FONT_SIZE)
    plt.legend(fontsize=LEGEND_FONT_SIZE,
               title_fontsize=LEGEND_TITLE_FONT_SIZE,
               bbox_to_anchor=(1.01, 0.75),
               loc='upper left')
    plt.tight_layout()

    # save the plot
    plt.savefig(PLOT_FILE_NAME, format=PLOT_FORMAT)
    print(green("Done."))
    print(LINE_SEP)
Пример #24
0
def plot_avg_coverage_vs_time(cov_df, time_units="m"):
    print(yellow("Generating plot ..."))

    # Set plot style and extract only HDL line coverage
    sns.set_theme(context="notebook", style="darkgrid")
    hdl_cov_df = cov_df[cov_df[COVERAGE_TYPE_LABEL] == HW_LINE_COVERAGE_LABEL]

    # create figure and plot the data
    fig, ax = plt.subplots(1, 1, figsize=(4, 2))
    sns.lineplot(data=hdl_cov_df,
                 x=TIME_LABEL,
                 y=COVERAGE_LABEL,
                 hue=TOPLEVEL_LABEL,
                 ax=ax,
                 markers="x")

    # format the plot
    if time_units == "m":
        time_units_label = "min."
    elif time_units == "h":
        time_units_label = "hours"
    else:
        time_units_label = "s"
    ax.set_xlabel(TIME_LABEL + " (%s)" % time_units_label,
                  fontsize=LABEL_FONT_SIZE)
    ax.set_ylabel("HDL Line " + COVERAGE_LABEL, fontsize=LABEL_FONT_SIZE)
    ax.tick_params("x", labelsize=TICK_FONT_SIZE)
    ax.tick_params("y", labelsize=TICK_FONT_SIZE)
    plt.legend(title="Core",
               fontsize=LEGEND_FONT_SIZE,
               title_fontsize=LEGEND_TITLE_FONT_SIZE,
               ncol=2)
    plt.tight_layout()

    # save the plot
    plt.savefig(PLOT_FILE_NAME, format=PLOT_FORMAT)
    print(green("Done."))
    print(LINE_SEP)
def build_min_hwf_coverage_df(exp2data,
                              time_units="m",
                              normalize_to_start=False,
                              consolidation="max"):
    print(yellow("Building HWF coverage dataframe ..."))
    # Create empty dictionary that will be used to create a Pandas DataFrame that
    # looks like the following:
    # +--------------------------------------------------------------------+
    # | toplevel | fuzzer | coverage type |      time     |  coverage (%)  |
    # +--------------------------------------------------------------------+
    # |   ...    |  ...   |     ...       |      ...      |       ...      |
    coverage_dict = {
        TOPLEVEL_LABEL: [],
        FUZZER_LABEL: [],
        COVERAGE_TYPE_LABEL: [],
        TIME_LABEL: [],
        COVERAGE_LABEL: [],
    }
    cov_dict = collections.defaultdict(
        list)  # maps toplevel --> [coverage list]
    for exp_name, fd_list in exp2data.items():
        # get min coverage experiment
        min_cov = get_max_vlt_cov(fd_list[0].hwf_cov_data)
        min_cov_fd = fd_list[0]
        for fd in fd_list:
            cov = get_max_vlt_cov(fd.hwf_cov_data)
            cov_dict[fd.toplevel].append(cov)
            if cov < min_cov:
                min_cov = cov
                min_cov_fd = fd
        # build data frame for plotting
        for time, row in min_cov_fd.hwf_afl_data.iterrows():
            # scale time
            scaled_time = scale_time(time, time_units)
            # add circuit, fuzzer, and time values to dataframe row
            coverage_dict[TOPLEVEL_LABEL].append(min_cov_fd.toplevel)
            coverage_dict[TIME_LABEL].append(scaled_time)
            # get the AFL paths_total at the current time
            paths_total = get_paths_total_at_time(time,
                                                  min_cov_fd.hwf_afl_data) - 1
            # get HWF coverage data
            hwf_vlt_cov = get_vlt_cov_at_time(paths_total,
                                              min_cov_fd.hwf_cov_data)
            # normalize to start time if requested
            if time == 0:
                hwf_vlt_cov_t0 = hwf_vlt_cov
            if normalize_to_start:
                hwf_vlt_cov /= hwf_vlt_cov_t0
            # add to data frame
            coverage_dict[FUZZER_LABEL].append("HWFP")
            coverage_dict[COVERAGE_TYPE_LABEL].append(HW_LINE_COVERAGE_LABEL)
            coverage_dict[COVERAGE_LABEL].append(hwf_vlt_cov)
        # extend lines to max time value
        if coverage_dict[TIME_LABEL][-1] != SCALED_MAX_PLOT_TIME:
            coverage_dict[TOPLEVEL_LABEL].append(min_cov_fd.toplevel)
            coverage_dict[TIME_LABEL].append(SCALED_MAX_PLOT_TIME)
            coverage_dict[FUZZER_LABEL].append("HWFP")
            coverage_dict[COVERAGE_TYPE_LABEL].append(HW_LINE_COVERAGE_LABEL)
            coverage_dict[COVERAGE_LABEL].append(
                coverage_dict[COVERAGE_LABEL][-1])
        print("Min. HW Line coverage (%15s): %.3f%%" %
              (min_cov_fd.toplevel, coverage_dict[COVERAGE_LABEL][-1]))
    print(green("Done."))
    print(LINE_SEP)
    return pd.DataFrame.from_dict(coverage_dict), cov_dict
Пример #26
0
def build_avg_coverage_df(exp2data,
                          time_units="m",
                          normalize_to_start=False,
                          consolidation="max"):
    print(yellow("Building average coverage dataframe ..."))
    # Create empty dictionary that will be used to create a Pandas DataFrame that
    # looks like the following:
    # +--------------------------------------------------------------------+
    # | toplevel | isa (grammar) | coverage type | time (s) | coverage (%) |
    # +--------------------------------------------------------------------+
    # |   ...    |        ...    |      ...      |   ...    |      ...     |
    coverage_dict = {
        TOPLEVEL_LABEL: [],
        GRAMMAR_LABEL: [],
        COVERAGE_TYPE_LABEL: [],
        TIME_LABEL: [],
        COVERAGE_LABEL: [],
    }
    for exp_name, fd_list in exp2data.items():
        anchor_fd = fd_list[0]
        for time, row in anchor_fd.afl_data.iterrows():
            # scale time
            if time_units == "h":
                scaled_time = float(time) / float(3600)
            elif time_units == "m":
                scaled_time = float(time) / float(60)
            else:
                scaled_time = time
            # add circuit, grammar, and time values to dataframe row
            for _ in range(3):
                coverage_dict[TOPLEVEL_LABEL].append(anchor_fd.toplevel)
                coverage_dict[GRAMMAR_LABEL].append(anchor_fd.grammar)
                coverage_dict[TIME_LABEL].append(scaled_time)
            # compute average coverage at all points in time
            kcov_avg = 0
            llvm_cov_avg = 0
            vlt_cov_avg = 0
            kcov_max = 0
            llvm_cov_max = 0
            vlt_cov_max = 0
            i = 0
            for fd in fd_list:
                # get the paths_total at the current time
                paths_total = get_paths_total_at_time(time, fd.afl_data) - 1
                # get coverage data
                # print(exp_name, i)
                kcov = get_cov_at_time(paths_total, fd.kcov_data,
                                       "Line-Coverage-(%)")
                kcov_avg += kcov
                kcov_max = max(kcov_max, kcov)
                llvm_cov = get_cov_at_time(paths_total, fd.llvm_cov_data,
                                           "Region-Coverage-(%)")
                llvm_cov_avg += llvm_cov
                llvm_cov_max = max(llvm_cov_max, llvm_cov)
                vlt_cov = get_vlt_cov_at_time(paths_total, fd.vlt_cov_data)
                vlt_cov_avg += vlt_cov
                vlt_cov_max = max(vlt_cov_max, vlt_cov)
                i += 1
            kcov_avg /= float(len(fd_list))
            llvm_cov_avg /= float(len(fd_list))
            vlt_cov_avg /= float(len(fd_list))
            # save time 0 coverage to normalize
            if time == 0:
                kcov_avg_t0 = kcov_avg
                llvm_cov_avg_t0 = llvm_cov_avg
                vlt_cov_avg_t0 = vlt_cov_avg
            if normalize_to_start:
                kcov_avg /= kcov_avg_t0
                llvm_cov_avg /= llvm_cov_avg_t0
                vlt_cov_avg /= vlt_cov_avg_t0
            coverage_dict[COVERAGE_TYPE_LABEL].append(SW_LINE_COVERAGE_LABEL)
            coverage_dict[COVERAGE_TYPE_LABEL].append(SW_REGION_COVERAGE_LABEL)
            coverage_dict[COVERAGE_TYPE_LABEL].append(HW_LINE_COVERAGE_LABEL)
            if consolidation == "avg":
                coverage_dict[COVERAGE_LABEL].append(kcov_avg)
                coverage_dict[COVERAGE_LABEL].append(llvm_cov_avg)
                coverage_dict[COVERAGE_LABEL].append(vlt_cov_avg)
            else:
                coverage_dict[COVERAGE_LABEL].append(kcov_max)
                coverage_dict[COVERAGE_LABEL].append(llvm_cov_max)
                coverage_dict[COVERAGE_LABEL].append(vlt_cov_max)
        # extend lines to max time value
        if coverage_dict[TIME_LABEL][-1] != SCALED_MAX_PLOT_TIME:
            for _ in range(3):
                coverage_dict[TOPLEVEL_LABEL].append(anchor_fd.toplevel)
                coverage_dict[GRAMMAR_LABEL].append(anchor_fd.grammar)
                coverage_dict[TIME_LABEL].append(SCALED_MAX_PLOT_TIME)
        coverage_dict[COVERAGE_TYPE_LABEL].append(SW_LINE_COVERAGE_LABEL)
        coverage_dict[COVERAGE_TYPE_LABEL].append(SW_REGION_COVERAGE_LABEL)
        coverage_dict[COVERAGE_TYPE_LABEL].append(HW_LINE_COVERAGE_LABEL)
        coverage_dict[COVERAGE_LABEL].extend(
            coverage_dict[COVERAGE_LABEL][-3:])
        # print("Max SW Line coverage:       ", coverage_dict[COVERAGE_LABEL][-3])
        # print("Max SW Basic Block coverage:", coverage_dict[COVERAGE_LABEL][-2])
        print("Max HW Line coverage:       ",
              coverage_dict[COVERAGE_LABEL][-1])
    print(green("Done."))
    print(LINE_SEP)
    return pd.DataFrame.from_dict(coverage_dict)
def _main():
  num_experiments = len(NUM_STATES) * len(COMP_WIDTHS) * len(RUNS) * len(
      EXPERIMENT_BASE_NAMES)
  print(LINE_SEP)
  print(LINE_SEP)
  print(LINE_SEP)
  print(green("LAUNCHING %d EXPERIMENTS ..." % num_experiments))
  print(LINE_SEP)
  print(LINE_SEP)
  print(LINE_SEP)

  # create a temp dir to store config files
  try:
    tmp_dir = os.path.join(os.getcwd(), "tmp%d")
    i = 0
    while os.path.isdir(tmp_dir % i):
      i += 1
    tmp_dir = tmp_dir % i
    os.mkdir(tmp_dir)

    # create config files on the fly and launch experiments
    for experiment_base_name in EXPERIMENT_BASE_NAMES:
      for states in NUM_STATES:
        for width in COMP_WIDTHS:
          for run in RUNS:
            # craft config dictionary
            cdict = copy.deepcopy(CONFIG_DICT)

            # Set experiment name
            experiment_name = experiment_base_name % (states, width, run)
            cdict["experiment_name"] = experiment_name

            # Set test bench
            if "wopt" in experiment_name:
              cdict["tb"] = "afl_opt"
            else:
              cdict["tb"] = "afl"

            # Set instrumentation amount
            if "full-instr" in experiment_name:
              cdict["instrument_dut"] = 1
              cdict["instrument_tb"] = 1
              cdict["instrument_vltrt"] = 1
            elif "duttb-instr" in experiment_name:
              cdict["instrument_dut"] = 1
              cdict["instrument_tb"] = 1
              cdict["instrument_vltrt"] = 0
            elif "dut" in experiment_name:
              cdict["instrument_dut"] = 1
              cdict["instrument_tb"] = 0
              cdict["instrument_vltrt"] = 0
            else:
              print(red("ERROR: invalid instrumentation config. ABORTING!"))
              sys.exit(1)

            # Set lock size
            cdict["hdl_gen_params"]["num_lock_states"] = states
            cdict["hdl_gen_params"]["lock_comp_width"] = width

            # write to HJSON file
            hjson_filename = experiment_name + ".hjson"
            hjson_file_path = os.path.join(tmp_dir, hjson_filename)
            with open(hjson_file_path, "w") as fp:
              hjson.dump(cdict, fp)

            # launch fuzz the DUT
            fuzz(["--fail-silently", hjson_file_path])

            # cleanup config file
            os.remove(hjson_file_path)

  finally:
    # remove temp dir
    for tmp_dir in glob.glob("tmp*"):
      shutil.rmtree(tmp_dir, ignore_errors=True)

  print(LINE_SEP)
  print(LINE_SEP)
  print(LINE_SEP)
  print(green("DONE!"))
  print(LINE_SEP)
  print(LINE_SEP)
  print(LINE_SEP)
def build_max_rfuzz_coverage_df(exp2data,
                                time_units="m",
                                normalize_to_start=False,
                                consolidation="max"):
    print(yellow("Building RFUZZ coverage dataframe ..."))
    # Create empty dictionary that will be used to create a Pandas DataFrame that
    # looks like the following:
    # +--------------------------------------------------------------------+
    # | toplevel | fuzzer | coverage type |      time     |  coverage (%)  |
    # +--------------------------------------------------------------------+
    # |   ...    |  ...   |     ...       |      ...      |       ...      |
    coverage_dict = {
        TOPLEVEL_LABEL: [],
        FUZZER_LABEL: [],
        COVERAGE_TYPE_LABEL: [],
        TIME_LABEL: [],
        COVERAGE_LABEL: [],
    }
    cov_dict = collections.defaultdict(
        list)  # maps toplevel --> [coverage list]
    for exp_name, fd_list in exp2data.items():
        # get max coverage experiment
        max_cov = get_max_vlt_cov(fd_list[0].rfuzz_cov_data)
        max_cov_fd = fd_list[0]
        for fd in fd_list:
            cov = get_max_vlt_cov(fd.rfuzz_cov_data)
            cov_dict[fd.toplevel].append(cov)
            if cov > max_cov:
                max_cov = cov
                max_cov_fd = fd
        for test_id, row in max_cov_fd.rfuzz_data.iterrows():
            # scale time
            scaled_time = scale_time(row["Time (s)"], time_units)
            # add circuit, fuzzer, and time values to dataframe row
            coverage_dict[TOPLEVEL_LABEL].append(max_cov_fd.toplevel)
            coverage_dict[TIME_LABEL].append(scaled_time)
            # compute average coverage at all points in time
            rfuzz_vlt_cov = get_vlt_cov_at_time(test_id,
                                                max_cov_fd.rfuzz_cov_data)
            # save time 0 coverage to normalize if requested
            if test_id == 0:
                rfuzz_vlt_cov_t0 = rfuzz_vlt_cov
            if normalize_to_start:
                rfuzz_vlt_cov /= rfuzz_vlt_cov_t0
            # add coverage to dataframe row
            coverage_dict[FUZZER_LABEL].append("RFUZZ")
            coverage_dict[COVERAGE_TYPE_LABEL].append(HW_LINE_COVERAGE_LABEL)
            coverage_dict[COVERAGE_LABEL].append(rfuzz_vlt_cov)
        # extend lines to max time value
        if coverage_dict[TIME_LABEL][-1] != SCALED_MAX_PLOT_TIME:
            coverage_dict[TOPLEVEL_LABEL].append(max_cov_fd.toplevel)
            coverage_dict[TIME_LABEL].append(SCALED_MAX_PLOT_TIME)
            coverage_dict[FUZZER_LABEL].append("RFUZZ")
            coverage_dict[COVERAGE_TYPE_LABEL].append(HW_LINE_COVERAGE_LABEL)
            coverage_dict[COVERAGE_LABEL].append(
                coverage_dict[COVERAGE_LABEL][-1])
        print("Max HW Line coverage (%15s): %.3f%%" %
              (max_cov_fd.toplevel, coverage_dict[COVERAGE_LABEL][-1]))
    print(green("Done."))
    print(LINE_SEP)
    return pd.DataFrame.from_dict(coverage_dict), cov_dict
Пример #29
0
def run_docker_container_on_gce(config):
  """Runs a Docker container to fuzz the DUT on a Google Compute Engine VM."""
  # ***IMPORTANT: check how many VM instances currently up before launching***
  launch_vm = False
  while not launch_vm:
    # if above under $$$ threshold, create VM instance, else wait
    if check_num_active_vm_instances(config) < config.args.max_vm_instances:
      launch_vm = True
    else:
      time.sleep(config.args.vm_launch_wait_time_s)  # wait before trying again

  # Launch fuzzing container on VM
  if not config.args.silent:
    print(LINE_SEP)
    print("Launching GCE VM to fuzz %s ..." % config.toplevel)
    print(LINE_SEP)
  cmd = [
      "gcloud",
      "compute",
      "--project=%s" % config.gcp_params["project_id"],
      "instances",
      "create-with-container",
      config.experiment_name,
      "--container-image",
      config.docker_image,
      "--container-stdin",
      "--container-tty",
      "--container-privileged",
      "--container-restart-policy",
      config.gcp_params["container_restart_policy"],
      "--zone=%s" % config.gcp_params["zone"],
      "--machine-type=%s" % config.gcp_params["machine_type"],
      "--boot-disk-size=%s" % config.gcp_params["boot_disk_size"],
      "--scopes=%s" % config.gcp_params["scopes"],
      "--metadata=startup-script-url=gs://%s-%s/%s" %
      (config.gcp_params["project_id"],
       config.gcp_params["vm_management_bucket"],
       config.gcp_params["startup_script"]),
  ]
  # Open shell debugging
  if config.manual:
    cmd.append("--container-command=/bin/bash")
  # Set environment variables for general configs
  cmd.extend(["--container-env", "%s=%s" % ("TOPLEVEL", config.toplevel)])
  cmd.extend(["--container-env", "%s=%s" % ("VERSION", config.version)])
  cmd.extend(["--container-env", "%s=%s" % ("TB_TYPE", config.tb_type)])
  cmd.extend(["--container-env", "%s=%s" % ("TB", config.tb)])
  cmd.extend(["--container-env", "%s=%s" % ("FUZZER", config.fuzzer)])
  cmd.extend(
      ["--container-env",
       "%s=%s" % ("INSTRUMENT_DUT", config.instrument_dut)])
  cmd.extend(
      ["--container-env",
       "%s=%s" % ("INSTRUMENT_TB", config.instrument_tb)])
  cmd.extend([
      "--container-env",
      "%s=%s" % ("INSTRUMENT_VLTRT", config.instrument_vltrt)
  ])
  cmd.extend(["--container-env", "%s=%s" % ("RUN_ON_GCP", config.run_on_gcp)])
  # Set environment variables for Verilator/HDL-generator/fuzzer params
  for params in config.env_var_params:
    for param, value in params.items():
      if value is not None:
        cmd.extend(["--container-env", "%s=%s" % (param.upper(), value)])
  # launch container in VM instance
  error_str = "ERROR: launching VM on GCE FAILED. Terminating experiment!"
  run_cmd(cmd, error_str, silent=config.args.silent)
  if not config.args.silent:
    print(green("VM LAUNCH SUCCESSFUL -- Done!"))
Пример #30
0
def run_docker_container_locally(config, exp_data_path):
  """Runs a Docker container to fuzz the DUT on the local machine."""
  if not config.args.silent:
    print(LINE_SEP)
    print("Running Docker container to fuzz %s ..." % config.toplevel)
    print(LINE_SEP)
  cmd = [
      "docker",
      "run",
      "-it",
      "--rm",
      "--security-opt",
      "seccomp=unconfined",
      "--log-driver=%s" % config.args.log_driver,
      "--name",
      config.experiment_name,
  ]
  # Set environment variables for general configs
  cmd.extend(["-e", "%s=%s" % ("TOPLEVEL", config.toplevel)])
  cmd.extend(["-e", "%s=%s" % ("VERSION", config.version)])
  cmd.extend(["-e", "%s=%s" % ("TB_TYPE", config.tb_type)])
  cmd.extend(["-e", "%s=%s" % ("TB", config.tb)])
  cmd.extend(["-e", "%s=%s" % ("FUZZER", config.fuzzer)])
  cmd.extend(["-e", "%s=%s" % ("INSTRUMENT_DUT", config.instrument_dut)])
  cmd.extend(["-e", "%s=%s" % ("INSTRUMENT_TB", config.instrument_tb)])
  cmd.extend(["-e", "%s=%s" % ("INSTRUMENT_VLTRT", config.instrument_vltrt)])
  cmd.extend(["-e", "%s=%s" % ("RUN_ON_GCP", config.run_on_gcp)])
  # Set environment variables for Verilator/HDL-generator/fuzzer params
  for params in config.env_var_params:
    for param, value in params.items():
      if value is not None:
        cmd.extend(["-e", "%s=%s" % (param.upper(), value)])
  # Mount volumes for output data
  cmd.extend(
      ["-v",
       "%s/logs:/src/hw/%s/logs" % (exp_data_path, config.toplevel)])
  cmd.extend(
      ["-v", "%s/out:/src/hw/%s/out" % (exp_data_path, config.toplevel)])
  # If manual mode, mount src code for development/debugging
  if config.manual:
    cmd.extend(
        ["-v",
         "%s/%s:/src/hw/hwfutils" % (config.root_path, HWFUTILS_PATH)])
    cmd.extend(["-v", "%s/%s:/src/hw/tb" % (config.root_path, SHARED_TB_PATH)])
    cmd.extend([
        "-v",
        "%s/hw/%s:/src/hw/%s" %
        (config.root_path, config.toplevel, config.toplevel)
    ])
    cmd.extend([
        "-v",
        "%s/infra/base-sim/common.mk:/src/hw/common.mk" % config.root_path
    ])
    cmd.extend(
        ["-v",
         "%s/infra/base-sim/exe.mk:/src/hw/exe.mk" % config.root_path])
    for script in [
        "run",
        "run-kcov",
        "run-llvm-cov",
        "run-vlt-cov",
        "set_hwf_isa.sh",
        "cpp-verilator-sim",
    ]:
      cmd.extend([
          "-v",
          "%s/infra/base-sim/scripts/%s:/scripts/%s" %
          (config.root_path, script, script)
      ])
    if config.fuzzer == "afl" or config.fuzzer == "afl-term-on-crash":
      for script in ["compile", "fuzz"]:
        cmd.extend([
            "-v",
            "%s/infra/base-afl/%s:/scripts/%s" %
            (config.root_path, script, script)
        ])
  # Set target Docker image and run
  cmd.extend(["-t", config.docker_image])
  # If manual mode, start shell
  if config.manual:
    cmd.append("/bin/bash")
  error_str = "ERROR: container run FAILED. Terminating experiment!"
  run_cmd(cmd, error_str, silent=config.args.silent)
  if not config.args.silent:
    print(green("CONTAINER RUN SUCCESSFUL -- Done!"))