示例#1
0
def plot_results(data_dir, plot_dir=PLOT_DIR, test_id=""):
    data_dir = Path(f"{data_dir}")
    conf_name = "test_config"
    config = parse_config(data_dir, conf_name)
    modes = config["modes"]
    epochs = config["epochs"]
    teacher_name = config["teacher_name"] + "_teacher"
    student_name = config["student_name"]
    dfs = {}
    for mode in modes:
        mode = mode.lower()
        mode_path = data_dir.joinpath(mode)
        csv_path = mode_path.joinpath(f"{student_name}_train.csv")
        try:
            dfs[mode] = read_csv(csv_path)
        except FileNotFoundError:
            print(f"Results for {mode} not found, ignoring...")
    teacher_path = data_dir.joinpath(f"{teacher_name}_val.csv")
    dfs["teacher"] = read_csv(teacher_path)
    df = pd.concat(dfs.values(), axis=1, keys=dfs.keys())
    print(df.max().sort_values(ascending=True))
    df = compute_rolling_df_mean(df, 10)
    if (len(modes) + 1) > len(DASH_STYLES):
        print("Too many lines to plot!")
        return

    sns.lineplot(data=df, palette="tab10",
                 style="event", dashes=DASH_STYLES)
    plot_dir = Path(plot_dir).joinpath(test_id)
    util.check_dir(plot_dir)
    plt_name = f"{epochs}_epochs_{teacher_name}_to_{student_name}"
    plt_name = Path(plot_dir).joinpath(plt_name)
    plt.savefig(f"{plt_name}.pdf", bbox_inches='tight', pad_inches=0.05)
    plt.savefig(f"{plt_name}.png", bbox_inches='tight', pad_inches=0.05)
    plt.gcf().clear()
示例#2
0
def dump_file(target_dir, p4_file):
    util.check_dir(target_dir)
    target = target_dir.joinpath(p4_file.name)
    try:
        p4_file.rename(target)
    except FileNotFoundError:
        log.warning("Could not move file %s, file not found!", p4_file)
def main(args):

    p4_input = Path(args.p4_input).resolve()
    pass_dir = Path(args.pass_dir)
    p4c_bin = args.p4c_bin
    allow_undef = args.allow_undef
    dunp_info = args.dunp_info
    if os.path.isfile(p4_input):
        pass_dir = pass_dir.joinpath(p4_input.stem)
        util.del_dir(pass_dir)
        result = validate_translation(
            p4_input, pass_dir, p4c_bin, allow_undef, dunp_info)
        sys.exit(result)
    elif os.path.isdir(p4_input):
        util.check_dir(pass_dir)
        for p4_file in list(p4_input.glob("**/*.p4")):
            output_dir = pass_dir.joinpath(p4_file.stem)
            util.del_dir(output_dir)
            validate_translation(
                p4_file, output_dir, p4c_bin, allow_undef)
        result = util.EXIT_SUCCESS
    else:
        log.error("Input file \"%s\" does not exist!", p4_input)
        result = util.EXIT_SUCCESS
    sys.exit(result)
示例#4
0
def test_allkd(s_name, params):
    teachers = ["resnet8", "resnet14", "resnet20", "resnet26",
                "resnet32", "resnet44", "resnet56",
                # "resnet34", "resnet50", "resnet101", "resnet152",
                ]
    accs = {}
    for t_name in teachers:
        params_t = params.copy()
        params_t["teacher_name"] = t_name
        t_net, best_teacher, best_t_acc = setup_teacher(t_name, params_t)
        t_net = util.load_checkpoint(t_net, best_teacher, params_t["device"])
        t_net = freeze_teacher(t_net)
        s_net = setup_student(s_name, params_t)
        params_t["test_name"] = f"{s_name}_{t_name}"
        params_t["results_dir"] = params_t["results_dir"].joinpath("allkd")
        util.check_dir(params_t["results_dir"])
        best_acc = test_kd(s_net, t_net, params_t)
        accs[t_name] = (best_t_acc, best_acc)

    best_acc = 0
    best_t_acc = 0
    for t_name, acc in accs.items():
        if acc[0] > best_t_acc:
            best_t_acc = acc[0]
        if acc[1] > best_acc:
            best_acc = acc[1]
        print(f"Best results teacher {t_name}: {acc[0]}")
        print(f"Best results for {s_name}: {acc[1]}")

    return best_t_acc, best_acc
示例#5
0
def run_benchmarks(modes, params, s_name, t_name):
    results = {}

    # if we test allkd we do not need to train an individual teacher
    if "allkd" in modes:
        best_t_acc, results["allkd"] = test_allkd(s_name, params)
        modes.remove("allkd")
    else:
        t_net, best_teacher, best_t_acc = setup_teacher(t_name, params)

    for mode in modes:
        mode = mode.lower()
        params_s = params.copy()
        # reset the teacher
        t_net = util.load_checkpoint(t_net, best_teacher, params["device"])

        # load the student and create a results directory for the mode
        s_net = setup_student(s_name, params)
        params_s["test_name"] = s_name
        params_s["results_dir"] = params_s["results_dir"].joinpath(mode)
        util.check_dir(params_s["results_dir"])
        # start the test
        try:
            run_test = globals()[f"test_{mode}"]
            results[mode] = run_test(s_net, t_net, params_s)
        except KeyError:
            raise RuntimeError(f"Training mode {mode} not supported!")

    # Dump the overall results
    print(f"Best results teacher {t_name}: {best_t_acc}")
    for name, acc in results.items():
        print(f"Best results for {s_name} with {name} method: {acc}")
def generate_testfolder(results_dir):
    n_folders = 0
    if os.path.isdir(results_dir):
        f_list = os.listdir(results_dir)
        n_folders = len(f_list)
    testfolder = results_dir.joinpath(f"run_{n_folders}")
    util.check_dir(testfolder)
    return testfolder
示例#7
0
def check(idx, config):
    test_id = generate_id()
    test_name = f"{test_id}_{idx}"
    dump_dir = OUTPUT_DIR.joinpath(f"dmp_{test_name}")
    util.check_dir(dump_dir)
    log_file = dump_dir.joinpath(f"{test_name}.log")
    p4_file = dump_dir.joinpath(f"{test_name}.p4")
    seed = int.from_bytes(os.getrandom(8), "big")
    log.info("Testing P4 program: %s - Seed: %s", p4_file.name, seed)
    # generate a random program
    result, p4_file = generate_p4_prog(P4RANDOM_BIN, p4_file, config, seed)
    if result.returncode != util.EXIT_SUCCESS:
        log.error("Failed generate P4 code!")
        dump_result(result, GENERATOR_BUG_DIR, p4_file)
        # reset the dump directory
        util.del_dir(dump_dir)
        return result.returncode
    # check compilation
    result = compile_p4_prog(config["compiler_bin"], p4_file, dump_dir)
    if result.returncode != util.EXIT_SUCCESS:
        if not is_known_bug(result):
            log.error("Failed to compile the P4 code!")
            log.error("Found a new bug!")
            dump_result(result, CRASH_BUG_DIR, p4_file)
            dump_file(CRASH_BUG_DIR, p4_file)
            if config["do_prune"]:
                info_file = CRASH_BUG_DIR.joinpath(f"{p4_file.stem}_info.json")
                info = validation.INFO
                # customize the main info with the new information
                info["compiler"] = str(config["compiler_bin"])
                info["exit_code"] = result.returncode
                info["p4z3_bin"] = str(P4Z3_BIN)
                info["out_dir"] = str(CRASH_BUG_DIR)
                info["input_file"] = str(p4_file)
                info["allow_undef"] = False
                info["err_string"] = result.stderr.decode("utf-8")
                log.error("Dumping configuration to %s.", info_file)
                with open(info_file, 'w') as json_file:
                    json.dump(info, json_file, indent=2, sort_keys=True)
                p4_cmd = f"{PRUNER_BIN} "
                p4_cmd += f"--config {info_file} "
                p4_cmd += f" {CRASH_BUG_DIR.joinpath(f'{p4_file.stem}.p4')} "
                log.error("Pruning P4 file with command %s ", p4_cmd)
                util.start_process(p4_cmd)
        # reset the dump directory
        util.del_dir(dump_dir)
        return result
    # check validation
    if config["do_validate"]:
        result = validate(dump_dir, p4_file, log_file, config)
    elif config["use_blackbox"]:
        result = run_p4_test(dump_dir, p4_file, log_file, config)

    # reset the dump directory
    util.del_dir(dump_dir)
    return result
示例#8
0
def save_error(err_path, stdout, stderr):
    log.error("*" * 60)
    log.error(stdout.decode("utf-8"))
    log.error("*" * 60)
    log.error(stderr.decode("utf-8"))
    log.error("*" * 60)
    util.check_dir(err_path.parent)
    with open(f"{err_path}", 'w+') as err_file:
        err_file.write(stdout.decode("utf-8"))
        err_file.write(stderr.decode("utf-8"))
示例#9
0
  def update_path(self):
    """Checks PATH variable and edits it accordingly.

    Update or repair Windows PATH. If called after setup, path will be updated.
    If called by the flag --fix_path, path will be repaired.
    """
    update = ""
    # Installed by this script
    if not find_executable("cwebp"):
      cwebp_ver, _ = CWEBP_VERSIONS.get(self.version)
      update = (os.path.join(self.cwebp_path, cwebp_ver, "bin") + os.pathsep
                + update)
    if not find_executable("cl"):
      update = (os.path.join(self.program_files, self.vs_version, "VC", "bin")
                + os.pathsep + update)

    # Installed by exe installers
    if not find_executable("cmake"):
      location = util.check_dir(self.cmake_path,
                                os.path.join(CMAKE_VERSION, "bin"), "cmake.exe")
      if not location:
        location = util.find_file(self.program_files, "cmake.exe")
        if location:
          location = os.path.dirname(location)
      if location:
        update = location + os.pathsep + update
      else:
        logging.warn("Unable to set path for CMake. Please rerun this script "
                     "with additional flag:\n\t--cmake=\\path\\to\\cmake")
    if not find_executable("java"):
      location = util.check_dir(self.java_path, "bin", "java.exe")
      if not location:
        location = util.find_file(os.path.dirname(self.program_files),
                                  "java.exe")
        if location:
          location = os.path.dirname(location)
      if location:
        update = location + os.pathsep + update
      else:
        logging.warn("Unable to set path for Java. Please rerun this script "
                     "with the additional flag:\n\t--java=\\path\\to\\java")
    if not find_executable("python"):
      location = util.check_dir(self.python_path, "files", "python.exe")
      if not location:
        location = util.find_file(os.path.dirname(self.program_files),
                                  "python.exe")
        if location:
          location = os.path.dirname(location)
      if location:
        update = location + os.pathsep + update
      else:
        logging.warn("Unable to set path for Python. Please rerun this script "
                     "with the additional flag:\n\t--python=\\path\\to\\python")
    self.path_update = update
    self.bash_profile_changed = True
示例#10
0
def main(args):

    if args.randomize_input:
        seed = int.from_bytes(os.getrandom(8), "big")
        z3.set_param(
            "smt.phase_selection",
            5,
            "smt.random_seed",
            seed,
            "smt.arith.random_initial_value",
            True,
            "sat.phase",
            "random",
        )

    config = {}
    config["arch"] = args.arch
    if config["arch"] == "tna":
        config["pipe_name"] = "pipe0_ingress"
        config["ingress_var"] = "ingress"
    elif config["arch"] == "v1model":
        config["pipe_name"] = "ig"
        config["ingress_var"] = "ig"
    elif config["arch"] == "psa":
        config["pipe_name"] = "ingress_ig"
        config["ingress_var"] = "ig"
    else:
        raise RuntimeError("Unsupported test arch \"%s\"!" % config["arch"])

    if args.p4_input:
        p4_input = Path(args.p4_input)
        out_base_dir = Path(args.out_dir)
    else:
        out_base_dir = Path(args.out_dir).joinpath("rnd_test")
        util.check_dir(out_base_dir)
        p4_input = out_base_dir.joinpath("rnd_test.p4")
        # generate a random program from scratch
        generate_p4_prog(P4RANDOM_BIN, p4_input, config)

    if os.path.isfile(p4_input):
        out_dir = out_base_dir.joinpath(p4_input.stem)
        util.del_dir(out_dir)
        config["out_dir"] = out_dir
        config["p4_input"] = p4_input
        result = perform_blackbox_test(config)
    else:
        util.check_dir(out_base_dir)
        for p4_file in list(p4_input.glob("**/*.p4")):
            out_dir = out_base_dir.joinpath(p4_file.stem)
            util.del_dir(out_dir)
            config["out_dir"] = out_dir
            config["p4_input"] = p4_file
            result = perform_blackbox_test(config)
    sys.exit(result)
def prune_files(p4_prune_dir, p4_passes):
    util.check_dir(p4_prune_dir)
    for p4_file in p4_passes:
        sed_cmd = "sed -r "
        sed_cmd += "\':a; s%(.*)/\\*.*\\*/%\\1%; ta; /\\/\\*/ !b; N; ba\' "
        sed_cmd += f"{p4_file} "
        sed_cmd += " | sed -r \'/^\\s*$/d\' "
        sed_cmd += f"> {p4_prune_dir}/{p4_file.name}"
        log.debug("Removing comments and whitespace")
        log.debug("Command: %s", sed_cmd)
        util.exec_process(sed_cmd)
    return p4_prune_dir
def main(args):
    util.check_dir(PLOT_DIR)
    input_folder = Path(args.input_folder)
    # Set consistent seaborn style for plotting
    sns.set(style="white",
            rc={
                "lines.linewidth": 2.0,
                "axes.spines.right": False,
                "axes.spines.top": False,
                "lines.markeredgewidth": 0.1
            })
    make_line_graph(input_folder, args.use_error_bars)
    make_bar_graph(input_folder)
def gen_p4_passes(p4c_bin, p4_dmp_dir, p4_file):
    util.check_dir(p4_dmp_dir)
    # ignore the compiler output here, for now.
    result = generate_p4_dump(p4c_bin, p4_file, p4_dmp_dir)
    # log.warning(result.stderr.decode('utf-8'))
    # if result.returncode == 1:
    # return []
    p4_passes = list_passes(p4c_bin, p4_file, p4_dmp_dir)
    full_p4_passes = []
    for p4_pass in p4_passes:
        p4_name = f"{p4_file.stem}-{p4_pass}.p4"
        full_p4_pass = p4_dmp_dir.joinpath(p4_name)
        full_p4_passes.append(full_p4_pass)
    return full_p4_passes
示例#14
0
def set_path(bg='train.txt', fg='small.txt', save_path='save'):
    '''

    :param bg:
    :param fg:
    :param save_path:
    :return: save_base_dir=保存文件夹,img_dir=背景图path,labels_dir=背景图标签,small_img_dir=前景图path
    '''
    base_dir = os.getcwd()
    save_base_dir = os.path.join(base_dir, save_path)
    util.check_dir(save_base_dir)
    img_dir = [f.strip() for f in open(os.path.join(base_dir, bg)).readlines()]  # 读取train.txt文件内容,背景图片
    labels_dir = hp.replace_labels(img_dir)  # 读取背景图片标签
    small_img_dir = [f.strip() for f in open(os.path.join(base_dir, fg)).readlines()]  # 读取small.txt文件内容,前景图片
    return save_base_dir, img_dir, labels_dir, small_img_dir
示例#15
0
def runCSlicerStandalone(example: str, time_dict: dict):
    logger.info('Starting Example :' + example)
    start_time = time.time()
    # extract info from cslicer orig config file
    start, end, repo_name, test_suite, repo_path, lines, config_file = \
        extractInfoFromCSlicerConfigs(example)
    if os.path.isdir(repo_path):
        logger.info(f'remove old repo "{repo_path}"')
        shutil.rmtree(repo_path)
    shutil.copytree(repo_path + env_const.REPO_DIR_SFX, repo_path)
    if not check_dir(env_const.CSLICER_STANDALONE_OUTPUT_DIR,
                     make_if_not=True):
        exit(ErrorCode.PATH_ERROR)
    # run tests at end commit, generate jacoco files
    runTestsGenJacoco(example, end, repo_path, test_suite)

    cslicer_orig_log = os.path.join(env_const.CSLICER_STANDALONE_OUTPUT_DIR,
                                    example + '.log')
    time_cost = runCSlicerTool(cslicer_orig_log, config_file, 'orig')
    time_dict[example] = time_cost

    # -------------------------------- cslicer end -------------------------------------
    # debug: move repo to somewhere else
    end_time = time.time()
    run_time = end_time - start_time
    putTimeinLog(cslicer_orig_log, run_time)
示例#16
0
  def mac_install_cwebp(self):
    """Check for and install cwebp.

    Assumes that if cwebp is already installed, then the user has correctly set
    their path variable such that the command "cwebp -h" will work.
    Raises:
      FileDownloadError: If the cwebp tar fails to download, or is incorrectly
          downloaded.
      ExtractionError: If the cwebp tar cannot be properly extracted.
    """
    if find_executable("cwebp"):
      logging.info("cwebp already installed.")
      return
    location = util.check_dir(self.cwebp_path, CWEBP_VERSION, "cwebp")
    if location:
      self.cwebp_path = location
      logging.info("cwebp found at " + self.cwebp_path)
      return
    logging.info("cwebp not installed. Downloading now.")
    location = os.path.join(common.BASE_DIR, "cwebp.tar.gz")
    location = util.download_file(CWEBP_URL, location, "cwebp", CWEBP_HASH)
    if not location:
      raise common.FileDownloadError("https://developers.google.com/speed/webp/"
                                     "docs/precompiled", "Please rerun this "
                                     "script afterwards with the flag\n"
                                     "\t--cwebp=/path/to/cwebp")
    if not util.extract_tarfile(location, "r:gz", self.cwebp_path, "cwebp"):
      raise common.ExtractionError(location)
    logging.info("cwebp successfully installed.")
示例#17
0
  def windows_install_cmake(self):
    """Check for and install cmake.

    Raises:
      FileDownloadError: If the CMake zip fails to download, or is downloaded
          incorrectly.
    """
    if find_executable("cmake"):
      if check_cmake_version():
        logging.info("CMake already installed.")
        return
      else:
        logging.info("CMake version not sufficient. Updating now.")
    else:
      location = util.check_dir(self.cmake_path, CMAKE_VERSION,
                                os.path.join("bin", "cmake.exe"))
      if location:
        logging.info("CMake already installed.")
        self.cmake_path = location
        return
      else:
        logging.info("CMake not installed. Downloading now...")
    location = os.path.join(common.BASE_DIR, "cmake.zip")
    location = util.download_file(CMAKE_URL, location, "cmake", CMAKE_HASH)
    if not location:
      raise common.FileDownloadError("https://cmake.org/download/", "Please "
                                     "rerun this script afterwards with the "
                                     "flag\n\t--cmake=\\path\\to\\cmake")
    util.extract_zipfile(location, "r", self.cmake_path, "cmake")
    logging.info("cmake successfully installed.")
示例#18
0
  def windows_install_cwebp(self):
    """Check for and install cwebp in given directory.

    Raises:
      FileDownloadError: If the cwebp zip fails to download, or is downloaded
          incorrectly.
    """
    if find_executable("cwebp"):
      if check_cwebp_version():
        logging.info("cwebp already installed.")
        return
      else:
        logging.info("cwebp version not sufficient. Updating now.")
    else:
      location = util.check_dir(self.cwebp_path,
                                CWEBP_VERSIONS.get(self.version)[0],
                                "\\bin\\cmake.exe")
      if location:
        logging.info("CMake already installed.")
        self.cmake_path = location
        return
    version, file_hash = CWEBP_VERSIONS.get(self.version)
    logging.info("cwebp not installed. Downloading now...")
    url = CWEBP_BASE_URL + version + ".zip"
    location = os.path.join(common.BASE_DIR, "cwebp.zip")
    location = util.download_file(url, location, "cwebp", file_hash)
    if not location:
      raise common.FileDownloadError("https://developers.google.com/speed/webp/"
                                     "docs/precompiled", "Please rerun this "
                                     "script afterwards with the flag\n\t"
                                     "--cmake=\\path\\to\\cmake")
    util.extract_zipfile(location, "r", self.cwebp_path, "cwebp")
    logging.info("cwebp successfully installed.")
示例#19
0
  def mac_install_ant(self):
    """Check for and install Apache Ant.

    Raises:
      FileDownloadError: If the ant tar fails to download, or is incorrectly
          downloaded.
      ExtractionError: If the ant tar cannot be properly extracted.
    """
    if find_executable("ant"):
      logging.info("Apache Ant already installed.")
      return
    location = util.check_dir(self.ant_path, ANT_VERSION, "bin/ant")
    if location:
      self.ant_path = location
      logging.info("Apache Ant already installed.")
      return
    logging.info("Apache Ant not installed. Installing now.")
    location = os.path.join(common.BASE_DIR, "ant.tar.gz")
    location = util.download_file(ANT_URL, location, "Ant", ANT_HASH)
    if not location:
      raise common.FileDownloadError("https://www.apache.org/dist/ant/"
                                     "binaries/", "Please rerun this script "
                                     "again afterwards.")
    if not util.extract_tarfile(location, "r:gz", self.ant_path, "Ant"):
      raise common.ExtractionError(location)
    logging.info("Apache Ant successfully installed.")
示例#20
0
文件: mac.py 项目: niu2x/gxm
    def mac_install_ant(self):
        """Check for and install Apache Ant.

    Raises:
      FileDownloadError: If the ant tar fails to download, or is incorrectly
          downloaded.
      ExtractionError: If the ant tar cannot be properly extracted.
    """
        if find_executable("ant"):
            logging.info("Apache Ant already installed.")
            return
        location = util.check_dir(self.ant_path, ANT_VERSION, "bin/ant")
        if location:
            self.ant_path = location
            logging.info("Apache Ant already installed.")
            return
        logging.info("Apache Ant not installed. Installing now.")
        location = os.path.join(common.BASE_DIR, "ant.tar.gz")
        location = util.download_file(ANT_URL, location, "Ant", ANT_HASH)
        if not location:
            raise common.FileDownloadError(
                "https://www.apache.org/dist/ant/"
                "binaries/", "Please rerun this script "
                "again afterwards.")
        if not util.extract_tarfile(location, "r:gz", self.ant_path, "Ant"):
            raise common.ExtractionError(location)
        logging.info("Apache Ant successfully installed.")
示例#21
0
  def mac_install_cmake(self):
    """Check for and install cmake.

    Assumes that if cmake is already installed, then the user has correctly set
    their path variable such that the command "cmake --version" will work.

    Raises:
      FileDownloadError: If the cmake tar fails to download, or is incorrectly
          downloaded.
      ExtractionError: If the cmake tar cannot be properly extracted.
    """
    if find_executable("cmake"):
      logging.info("CMake already installed.")
      return
    cmake_version = util.get_file_name(
        CMAKE_VERSIONS.get(self.version)[0], False)
    location = util.check_dir(self.cmake_path, cmake_version, "bin/cmake")
    if location:
      self.cmake_path = location
      logging.info("CMake found at " + self.cmake_path)
      return

    logging.info("CMake not installed. Downloading now.")
    url, file_hash = CMAKE_VERSIONS.get(self.os_version, (None, None))
    url = urlparse.urljoin(CMAKE_DOWNLOAD_PREFIX, url)
    location = os.path.join(common.BASE_DIR, "cmake.tar.gz")
    location = util.download_file(url, location, "cmake", file_hash)
    if not location:
      raise common.FileDownloadError("https://cmake.org/download/", "Please "
                                     "rerun this script afterwards with the "
                                     "flag\n\t--cmake=/path/to/cmake")
    if not util.extract_tarfile(location, "r:gz", self.cmake_path, "cmake"):
      raise common.ExtractionError(location)
    logging.info("CMake successfully installed.")
示例#22
0
文件: mac.py 项目: niu2x/gxm
    def mac_install_cwebp(self):
        """Check for and install cwebp.

    Assumes that if cwebp is already installed, then the user has correctly set
    their path variable such that the command "cwebp -h" will work.
    Raises:
      FileDownloadError: If the cwebp tar fails to download, or is incorrectly
          downloaded.
      ExtractionError: If the cwebp tar cannot be properly extracted.
    """
        if find_executable("cwebp"):
            logging.info("cwebp already installed.")
            return
        location = util.check_dir(self.cwebp_path, CWEBP_VERSION, "cwebp")
        if location:
            self.cwebp_path = location
            logging.info("cwebp found at " + self.cwebp_path)
            return
        logging.info("cwebp not installed. Downloading now.")
        location = os.path.join(common.BASE_DIR, "cwebp.tar.gz")
        location = util.download_file(CWEBP_URL, location, "cwebp", CWEBP_HASH)
        if not location:
            raise common.FileDownloadError(
                "https://developers.google.com/speed/webp/"
                "docs/precompiled", "Please rerun this "
                "script afterwards with the flag\n"
                "\t--cwebp=/path/to/cwebp")
        if not util.extract_tarfile(location, "r:gz", self.cwebp_path,
                                    "cwebp"):
            raise common.ExtractionError(location)
        logging.info("cwebp successfully installed.")
示例#23
0
文件: client.py 项目: Darthone/atto
    def run(self):
        # These directories are generated by the generate_certificates script
        keys_dir = self.config['certs']['certs']                                                                  
        public_keys_dir = self.config['certs']['public']                                                          
        secret_keys_dir = self.config['certs']['private']                                                         
        if not (util.check_dir(keys_dir) and util.check_dir(public_keys_dir) and util.check_dir(secret_keys_dir)):
            logging.critical("Certificates are missing - run generate_certificates.py script first")              
            sys.exit(1)                                                                                           
        logger.info("Keys: %s  |  Public: %s  |  Secret: %s", keys_dir, public_keys_dir, secret_keys_dir)         
                                                                                                                  
        ctx = zmq.Context.instance()                                                                              
        client = ctx.socket(zmq.REQ)
        client.RCVTIMEO = self.config['client']['timeout']

        # We need two certificates, one for the client and one for
        # the server. The client must know the server's public key
        # to make a CURVE connection.
        client_secret_file = os.path.join(secret_keys_dir, "client.key_secret")
        client_public, client_secret = zmq.auth.load_certificate(client_secret_file)
        client.curve_secretkey = client_secret
        client.curve_publickey = client_public

        server_public_file = os.path.join(public_keys_dir, "server.key")
        server_public, _ = zmq.auth.load_certificate(server_public_file)
        # The client must know the server's public key to make a CURVE connection.
        client.curve_serverkey = server_public
        connection_str = 'tcp://%s:%s' % (self.config['client']['dest'], self.config['client']['port'])
        logger.info("Trying to connect to %s", connection_str)
        client.connect(connection_str)

        logger.info("Loading plguins")
        self.load_plugins()
        logger.info("Starting Loop")
        while True:
            try:
                for p in self.plugs:
                    logging.info("Running %s", p._name)
                    client.send(p.encode())
                    logging.debug("Waiting for ack")
                    message = client.recv()
                    logging.info("Recieved ack")
                time.sleep(self.config['client']['sleep']/1000)
                self.check_config()
            except zmq.error.ZMQError as e:
                logger.critical("ZMQError, Exiting: %s", e)
                exit()
示例#24
0
 def upload_tools(abi, x86_arm):
     remote_path = Command.__remote_path + abi + "/"
     if not util.check_dir(remote_path):
         util.mkdir(remote_path)
     # 上传loader
     local_loader = os.path.join(Command.__tool_local_path, abi,
                                 Command.__loader_name)
     shell_cmd = util.getcmd('push "%s" "%s"' % (local_loader, remote_path))
     if not util.execute_cmd(shell_cmd):
         return False
     if x86_arm:
         # 上传 loader.so
         local_inject_so = os.path.join(Command.__tool_local_path, abi,
                                        Command.__client_fake_name)
         shell_cmd = util.getcmd('push "%s" "%s"' %
                                 (local_inject_so, remote_path))
         if not util.execute_cmd(shell_cmd):
             return False
         shell_cmd = util.getshell('chmod 777  "%s"/*' % remote_path)
         if not util.execute_cmd(shell_cmd):
             return False
         # 创建目录
         remote_path = Command.__remote_path + "armeabi-v7a" + "/"
         if not util.check_dir(remote_path):
             util.mkdir(remote_path)
         # 上传client
         local_client = os.path.join(Command.__tool_local_path,
                                     "armeabi-v7a",
                                     Command.__client_mod_name)
         shell_cmd = util.getcmd('push "%s" "%s"' %
                                 (local_client, remote_path))
         if not util.execute_cmd(shell_cmd):
             return False
     else:
         # 上传client
         local_client = os.path.join(Command.__tool_local_path, abi,
                                     Command.__client_mod_name)
         shell_cmd = util.getcmd('push "%s" "%s"' %
                                 (local_client, remote_path))
         if not util.execute_cmd(shell_cmd):
             return False
     shell_cmd = util.getshell('chmod 777  "%s"/*' % remote_path)
     if not util.execute_cmd(shell_cmd):
         return False
     return True
示例#25
0
def main():
    args = handle_args()
    facts_dir = os.path.abspath(args.p)
    output_file = args.o
    result = dict()
    check_dir(facts_dir)
    logger.info(f"Check {facts_dir}")
    for sub_dir in os.listdir(facts_dir):
        sub_dir = os.path.join(facts_dir, sub_dir)
        if os.path.isdir(sub_dir):
            name = os.path.basename(sub_dir)  # type: str
            logger.info(f"Now in {name}")
            if r"##" in name:
                lines_dict = dict()
                for ta_file in os.listdir(sub_dir):
                    ta_file_path = os.path.join(sub_dir, ta_file)
                    assert (os.path.isfile(ta_file_path))
                    fact_type = os.path.splitext(ta_file)[0].split("-")[1]  # type: str
                    print(fact_type)
                    n_line = get_lines_of_facts(ta_file_path)  # type: int
                    lines_dict[fact_type] = n_line
                facts_num = FactsNum(**lines_dict)
                static_total = facts_num.total()
                if name in result:
                    result[name].update(lines_dict)
                else:
                    result[name] = lines_dict
                result[name]["static"] = static_total
            else:
                project_name, test_num, h_start, h_end = Benchmark.split_fullname_to_tuple(os.path.basename(sub_dir))
                key_name = f"{project_name}##{h_start}##{h_end}"
                for cov_ta_file in os.listdir(sub_dir):
                    cov_ta_path = os.path.join(sub_dir, cov_ta_file)
                    assert (os.path.isfile(cov_ta_path))
                    n_line = get_lines_of_facts(cov_ta_path)
                    lines_dict = {test_num: n_line}
                    if key_name in result:
                        result[key_name].update(lines_dict)
                    else:
                        result[key_name] = lines_dict
        else:
            logger.info(f"Skip file {sub_dir}")
    with open(output_file, 'w') as of:
        json.dump(result, of, indent=2)
def validate_translation(p4_file, target_dir, p4c_bin,
                         allow_undef=False, dump_info=False):
    info = INFO

    # customize the main info with the new information
    info["compiler"] = str(p4c_bin)
    info["exit_code"] = util.EXIT_SUCCESS
    info["p4z3_bin"] = str(P4Z3_BIN)
    info["out_dir"] = str(target_dir)
    info["input_file"] = str(p4_file)
    info["allow_undef"] = allow_undef
    info["validation_bin"] = f"python3 {__file__}"

    log.info("\n" + "-" * 70)
    log.info("Analysing %s", p4_file)
    start_time = datetime.now()
    util.check_dir(target_dir)
    fail_dir = target_dir.joinpath("failed")
    # run the p4 compiler and dump all the passes for this file
    passes = gen_p4_passes(p4c_bin, target_dir, p4_file)
    passes = prune_passes(passes)
    p4_py_files = []
    # for each emitted pass, generate a python representation
    if len(passes) < 2:
        log.warning("P4 file did not generate enough passes!")
        return util.EXIT_SKIPPED
    # perform the actual comparison
    result, check_info = z3check.z3_check(passes, fail_dir, allow_undef)
    # merge the two info dicts
    info["exit_code"] = result
    info = {**info, **check_info}
    done_time = datetime.now()
    elapsed = done_time - start_time
    time_str = time.strftime("%H hours %M minutes %S seconds",
                             time.gmtime(elapsed.total_seconds()))
    ms = elapsed.microseconds / 1000
    log.info("Translation validation took %s %s milliseconds.",
             time_str, ms)
    if dump_info:
        json_name = target_dir.joinpath(f"{p4_file.stem}_info.json")
        log.info("Dumping configuration to %s.", json_name)
        with open(json_name, 'w') as json_file:
            json.dump(info, json_file, indent=2, sort_keys=True)
    return result
示例#27
0
def get_z3_formulization(p4_file, out_dir=OUT_DIR):

    if p4_file.suffix == ".p4":
        util.check_dir(out_dir)
        py_file = out_dir.joinpath(p4_file.with_suffix(".py").name)
        result = run_p4_to_py(p4_file, py_file)
        p4_file = py_file
        if result.returncode != util.EXIT_SUCCESS:
            log.error("Failed to translate P4 to Python.")
            log.error("Compiler crashed!")
            return None, result.returncode

    p4py_module = get_py_module(p4_file)
    if p4py_module is None:
        return None, util.EXIT_FAILURE
    package, result = get_z3_asts(p4py_module, p4_file)
    if result != util.EXIT_SUCCESS:
        return None, result
    return package, result
示例#28
0
def main(args):
    result, config = validate_choice(args)
    if result != util.EXIT_SUCCESS:
        return result

    util.check_dir(OUTPUT_DIR)

    # initialize with some pre-configured state
    launch = TestLauncher(config)

    if config["arch"] == "tna":
        # the tofino tests only support single threaded mode for now
        for idx in range(args.iterations):
            launch(idx)
        return util.EXIT_SUCCESS
    # this sometimes deadlocks, no idea why....
    with Pool(args.num_processes) as p:
        p.map(launch, range(args.iterations))
    return util.EXIT_SUCCESS
示例#29
0
def start_evaluation(args):
    device = util.setup_torch()
    num_classes = 100 if args.dataset == "cifar100" else 10
    train_loader, test_loader = get_cifar(num_classes,
                                          batch_size=args.batch_size)

    # for benchmarking, decided whether we want to use unique test folders
    if USE_ID:
        test_id = util.generate_id()
    else:
        test_id = ""
    results_dir = Path(args.results_dir).joinpath(test_id)
    results_dir = Path(results_dir).joinpath(args.dataset)
    util.check_dir(results_dir)

    # Parsing arguments and prepare settings for training
    params = {
        "epochs": args.epochs,
        "modes": args.modes,
        "t_checkpoint": args.t_checkpoint,
        "results_dir": results_dir,
        "train_loader": train_loader,
        "test_loader": test_loader,
        "batch_size": args.batch_size,
        # model configuration
        "device": device,
        "teacher_name": args.t_name,
        "student_name": args.s_name,
        "num_classes": num_classes,
        # hyperparameters
        "weight_decay": args.weight_decay,
        "learning_rate": args.learning_rate,
        "momentum": args.momentum,
        "sched": "multisteplr",
        "optim": "SGD",
        # fixed knowledge distillation parameters
        "lambda_student": 0.5,
        "T_student": 5,
    }
    test_conf_name = results_dir.joinpath("test_config.json")
    util.dump_json_config(test_conf_name, params)
    run_benchmarks(args.modes, params, args.s_name, args.t_name)
    plot_results(results_dir, test_id=test_id)
示例#30
0
文件: server.py 项目: Darthone/atto
    def run(self):
        ''' Run Ironhouse example '''

        # These directories are generated by the generate_certificates script
        keys_dir = self.config['certs']['certs']
        public_keys_dir = self.config['certs']['public']
        secret_keys_dir = self.config['certs']['private']
        if not (util.check_dir(keys_dir) and util.check_dir(public_keys_dir) and util.check_dir(secret_keys_dir)):
            logging.critical("Certificates are missing - run generate_certificates.py script first")
            sys.exit(1)
        logger.info("Keys: %s  |  Public: %s  |  Secret: %s", keys_dir, public_keys_dir, secret_keys_dir)

        ctx = zmq.Context.instance()

        # Start an authenticator for this context.
        auth = ThreadAuthenticator(ctx)
        auth.start()
        for ip in self.config['server']['auth']:
            auth.allow(ip)

        # Tell authenticator to use the certificate in a directory
        auth.configure_curve(domain='*', location=public_keys_dir)

        server = ctx.socket(zmq.REP)

        server_secret_file = os.path.join(secret_keys_dir, "server.key_secret")
        server_public, server_secret = zmq.auth.load_certificate(server_secret_file)
        server.curve_secretkey = server_secret
        server.curve_publickey = server_public
        server.curve_server = True  # must come before bind
        bind_info = 'tcp://%s:%s' % (self.config['server']['listen'], self.config['server']['port'])
        server.bind(bind_info)
        logger.info("Server bound to: %s", bind_info)

        self.load_plugins()
        logger.info("Starting reciever.")

        while True:
            msg = server.recv()
            self.handle_msg(msg)
            server.send("ack")

        auth.stop()
示例#31
0
def read_video(webcam: bool, gray: bool, text: bool):
    input_video_path = '../videos'

    if not check_dir(input_video_path):
        sys.exit()

    video_file_path = f'{input_video_path}/{os.listdir(input_video_path)[0]}'
    if webcam:
        video_file_path = 0

    videoCaptureInstance = cv2.VideoCapture(video_file_path)

    if not videoCaptureInstance.isOpened():
        raise IOError("Cannot open webcam")

    try:
        while True:
            ret, frame = videoCaptureInstance.read(
            )  # ret = True/False availability of frame and frame = multiple picture frames

            if not ret:
                break

            # Text
            if text:
                resolution_text = 'Width: ' + str(
                    videoCaptureInstance.get(
                        cv2.CAP_PROP_FRAME_WIDTH)) + " Height: " + str(
                            videoCaptureInstance.get(
                                cv2.CAP_PROP_FRAME_HEIGHT))
                date_text = str(datetime.now().strftime('%Y-%m-%d_%H:%M:%S'))
                frame = put_text_frame(frame, (10, 30), resolution_text,
                                       'black')
                frame = put_text_frame(frame, (10, 60), date_text, 'black')
            # Colorful
            if gray:
                convert_frame_to_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                cv2.imshow('video_window', convert_frame_to_gray)
            else:
                cv2.imshow('video_window', frame)

            client_key_press = cv2.waitKey(1) & 0xFF

            # end video stream by escape key
            if client_key_press == 27:
                break
        videoCaptureInstance.release(
        )  # Important: release current active webcam or stream in order for other instance of webcam
        cv2.destroyWindow('video_window')
        if platform == "darwin":
            cv2.waitKey(1)

    except cv2.error:
        print('Unexpected error occurred with reading image and error code is',
              cv2.error.code)
示例#32
0
def perform_blackbox_test(config):
    out_dir = config["out_dir"]
    p4_input = config["p4_input"]
    if out_dir == OUT_DIR:
        out_dir = out_dir.joinpath(p4_input.stem)
    util.check_dir(out_dir)
    util.copy_file(p4_input, out_dir)
    config["out_dir"] = out_dir
    config["p4_input"] = p4_input

    main_formula, pkt_range = get_main_formula(config)
    if main_formula == None or not pkt_range:
        return util.EXIT_FAILURE
    conditions = set()
    # FIXME: Another hack to deal with branch conditions we cannot control
    for child in main_formula.children()[pkt_range]:
        conditions |= get_branch_conditions(child)
    cond_tuple = dissect_conds(config, conditions)
    stf_str = build_test(config, main_formula, cond_tuple, pkt_range)
    # finally, run the test with the stf string we have assembled
    # and return the result of course
    return run_stf_test(config, stf_str)
示例#33
0
def get_prog_semantics(config):
    p4_input = config["p4_input"]
    out_dir = config["out_dir"]
    py_file = Path(f"{out_dir}/{p4_input.stem}.py")
    fail_dir = out_dir.joinpath("failed")

    result = run_p4_to_py(p4_input, py_file, config)
    if result.returncode != util.EXIT_SUCCESS:
        log.error("Failed to translate P4 to Python.")
        util.check_dir(fail_dir)
        with open(f"{fail_dir}/error.txt", 'w+') as err_file:
            err_file.write(result.stderr.decode("utf-8"))
        util.copy_file([p4_input, py_file], fail_dir)
        return None, result.returncode
    package, result = get_z3_formulization(py_file)
    pipe_val = package.get_pipes()
    if result != util.EXIT_SUCCESS:
        if fail_dir and result != util.EXIT_SKIPPED:
            util.check_dir(fail_dir)
            util.copy_file([p4_input, py_file], fail_dir)
        return pipe_val, result
    return pipe_val, util.EXIT_SUCCESS
示例#34
0
 def android_install_ndk(self):
   """Checks the directory for installing Android NDK."""
   logging.info("Checking for Android NDK...")
   # Check if android path is already set up
   location = find_executable("ndk-build")
   if location:
     # Strip ndk-build out of path name
     self.sdk_path = os.path.dirname(location)
     logging.info("Android NDK found at " + self.ndk_path)
     return
   # Path is not set, but ndk may still exist
   location = util.check_dir(self.ndk_path, ANDROID_NDK, "ndk-build")
   if location:
     self.ndk_path = location
     logging.info("Android NDK found at " + self.ndk_path)
     return
   logging.info("Android NDK not found. Downloading now.")
   self.android_download_ndk(self.ndk_path)
示例#35
0
 def android_install_sdk(self):
   """Checks the directory for installing Android SDK."""
   logging.info("Checking for Android SDK...")
   # Check if android path is already set up
   location = find_executable("android")
   if location:
     # Strip tools/android out of path
     self.sdk_path = os.path.dirname(os.path.dirname(location))
     logging.info("Android SDK found at " + self.sdk_path)
     return
   # Path is not set, but sdk may still exist
   android_path = (os.path.join("tools", "android") +
                   (".bat" if self.system == common.WINDOWS else ""))
   location = util.check_dir(self.sdk_path, SDK_NAMES.get(self.system),
                             android_path)
   if location:
     self.sdk_path = location
     logging.info("Android SDK found at " + self.sdk_path)
     return
   logging.info("Android SDK not found. Downloading now.")
   self.android_download_sdk(self.sdk_path)