def test_run_script():
    paths = build_directory_structure()

    # create config
    config = ConfigCmd(RERANK_DIR + '/conf/config.xml', RERANK_DIR)

    # initialize rerankCmd
    rerank = RerankCmd()

    script = Path('../../../../../librec_auto/core/cmd/rerank/far_rerank.py')

    param_spec = ['--max_len=4', '--lambda=0.3', '--binary=False']

    rerank._files = config.get_files()

    rerank._files.create_exp_paths(0)  # create experiment tuple

    sub_paths = rerank._files.get_exp_paths(0)

    rerank._config = config  # set config

    result = rerank.run_script(script=script,
                               sub_paths=sub_paths,
                               original_path=paths['original_path'],
                               param_spec=param_spec)

    assert result == 0  # check that the script ran successfully

    # check the reranked results are correct
    assert filecmp.cmp(RERANK_DATA_DIR / Path('result-out-1.txt'),
                       paths['results_path'] / 'out-1.txt')
    assert filecmp.cmp(RERANK_DATA_DIR / Path('result-out-2.txt'),
                       paths['results_path'] / 'out-2.txt')

    rmtree(RERANK_DIR)  # delete the reranking directory
예제 #2
0
    def execute(self, config: ConfigCmd):
        self._config = config
        self.status = Cmd.STATUS_INPROC
        files = config.get_files()

        target = files.get_study_path()

        post_path = files.get_post_path()

        if not post_path.exists():
            print('librec-auto: post directory missing. Creating. ', target)
            os.makedirs(str(post_path))

        post_elems = config.get_xml().xpath(self.POST_ELEM_XPATH)

        for post_elem in post_elems:
            param_spec = utils.create_param_spec(post_elem)
            param_spec = self.handle_password(post_elem, config, param_spec)
            script_path = utils.get_script_path(post_elem, 'post')
            exec_path = config.get_files().get_study_path()

            proc_spec = [
                sys.executable,
                script_path.absolute().as_posix(),
                self._config.get_files().get_config_file_path().name
            ] + param_spec
            print(f'librec-auto: Running post-processing script {proc_spec}')
            subprocess.call(proc_spec, cwd=str(exec_path.absolute()))
예제 #3
0
 def execute(self, config: ConfigCmd, dry_run=False):
     self._config = config
     self._exp_path = config.get_files().get_exp_paths(self._sub_no)
     self._files = config.get_files()
     if self._files.get_exp_count() > 0:
         for i in range(0, self._files.get_exp_count()):
             sub_path = self._files.get_exp_paths(i)
             self.execute_algorithm(sub_path, dry_run=dry_run)
예제 #4
0
    def execute(self, config: ConfigCmd):
        self.status = Cmd.STATUS_INPROC
        self._files = config.get_files()

        if self._type == 'none':
            pass

        if self._no_ask or self.purge_confirm():
            if self._type == "all" or self._type == 'split':
                self.purge_subexperiments()
                self.purge_splits()  # AS 10-23-20
                self.purge_post()

            if self._type == "results":
                self.purge_subexperiments()
                self.purge_post()

            if self._type == "rerank":
                self.purge_rerank()
                self.purge_post()

            if self._type == "post":
                self.purge_post()
        else:
            print("librec-auto: Skipping. No files deleted.")
        self.status = Cmd.STATUS_COMPLETE
예제 #5
0
    def execute(self, config: ConfigCmd):
        self.status = Cmd.STATUS_INPROC
        self._files = config.get_files()
        self._config = config

        if self._files.get_exp_count() > 0:
            for i in range(0, self._files.get_exp_count()):
                sub_path = self._files.get_exp_paths(i)
                self.rerank(sub_path)
예제 #6
0
    def execute(self, config: ConfigCmd):
        self._config = config
        self.status = Cmd.STATUS_INPROC
        files = config.get_files()

        StudyStatus(config)

        target = files.get_study_path()

        post_path = files.get_post_path()

        if not post_path.exists():
            print('librec-auto: post directory missing. Creating. ', target)
            os.makedirs(str(post_path))

        post_elems = config.get_xml().xpath(self.POST_ELEM_XPATH)

        for post_elem in post_elems:
            param_spec = utils.create_param_spec(post_elem)
            param_spec = self.handle_password(post_elem, config, param_spec)
            script_path = utils.get_script_path(post_elem, 'post')
            exec_path = config.get_files().get_study_path()

            proc_spec = [
                sys.executable,
                script_path.absolute().as_posix(),
                self._config.get_files().get_config_file_path().name
            ] + param_spec
            print(f'librec-auto: Running post-processing script {proc_spec}')
            # replace with safe_run_subprocess()
            # subprocess.call(proc_spec, cwd=str(exec_path.absolute()))
            run_script = safe_run_subprocess(proc_spec,
                                             str(exec_path.absolute()))
            script_name = re.split(r'/', str(script_path))[-1]
            if run_script != 0:
                self.status = Cmd.STATUS_ERROR
                raise ScriptFailureException(
                    script_name,
                    f"Post processing script at {str(script_path)} failed.",
                    run_script)

        self.status = Cmd.STATUS_COMPLETE
예제 #7
0
    def execute(self, config: ConfigCmd, dry_run=False):
        self._config = config
        self.status = Cmd.STATUS_INPROC

        metrics = self.get_metrics()
        # Must use absolute paths because these will be passed to a script
        cv_dirs = config.get_cv_directories(absolute=True)

        # todo run this all in parallel

        num_of_experiments = self._config.get_sub_exp_count()

        # Run the evaluator for every cv in every experiment.
        for experiment_num in range(num_of_experiments):
            # Each item in this list represents a cv in the experiment.
            experiment_results = []

            for cv_dir in cv_dirs:
                cv_num = str(cv_dir)[-1]
                print('For experiment', experiment_num + 1, 'evaluating cv',
                      str(cv_dir)[-1], '...')
                # Create an evaluator for each cv...
                evaluator = Evaluator(config, metrics, cv_dir, experiment_num,
                                      cv_num)
                if dry_run:
                    evaluator.dry_run()
                else:
                    cv_results = evaluator.evaluate()  # Evaluate it.
                    experiment_results.append(cv_results)  # Add to results.

                    self.save_results(experiment_num, experiment_results)
                    Status.save_status("Python-side metrics completed", experiment_num, config, \
                        config.get_files().get_exp_paths(experiment_num))

        if not dry_run:
            temp_binary_path = self._config.get_files().get_study_path() / Path(
                'py-eval-temp.pickle')
            if os.path.exists(temp_binary_path):
                # Remove temporary eval binary
                os.remove(temp_binary_path)
def test_dry_run(capsys):
    captured = capsys.readouterr()  # capture stdout

    build_directory_structure()

    # create config
    config = ConfigCmd(RERANK_DIR + '/conf/config.xml', RERANK_DIR)

    # initialize rerankCmd
    rerank = RerankCmd()

    rerank._files = config.get_files()

    rerank._files.create_exp_paths(0)  # create experiment tuple

    sub_paths = rerank._files.get_exp_paths(0)

    rerank._config = config  # set config

    rerank.dry_run(config)

    rmtree(RERANK_DIR)  # delete the reranking directory

    # check stdout
    out, err = capsys.readouterr()

    # for exp0000 -> only one experiment
    # re-rank script -> wildcard to handle absolute path, far_rerank.py
    # parameters from config.xml

    # use a wildcard for the beginning of the absolute path
    if name == 'nt':
        # running on Windows
        out_pattern = r"librec-auto \(DR\): Running re-ranking command RerankCmd\(\) for exp00000\n\tRe-rank script: (.*)librec-auto\\librec_auto\\core\\cmd\\rerank\\far_rerank\.py\n\tParameters: \['--max_len=4', '--lambda=0\.3', '--binary=False']\n$"
    else:
        # non-Windows
        out_pattern = r"librec-auto \(DR\): Running re-ranking command RerankCmd\(\) for exp00000\n\tRe-rank script: (.*)librec-auto\/librec_auto\/core\/cmd\/rerank\/far_rerank\.py\n\tParameters: \['--max_len=4', '--lambda=0\.3', '--binary=False']\n$"

    # assert that there is a match
    assert re.match(out_pattern, out) != None
예제 #9
0
    def execute(self, config: ConfigCmd):
        self.status = Cmd.STATUS_INPROC
        files = config.get_files()
        target = files.get_study_path()
        #        result_path = files.get_result_path()

        if files.get_exp_count() == 0:
            print("librec-auto: No experiments found.")
        else:
            for exp_paths in files.get_exp_paths_iterator():
                status = Status(exp_paths)
                print(status)

        self.status = Cmd.STATUS_COMPLETE
예제 #10
0
    def execute(self, config: ConfigCmd):
        self._config = config
        self._exp_path = config.get_files().get_exp_paths(self._sub_no)
        link = self._exp_path.get_ref_exp_name()
        if link and self._command == 'run':
            self.status = Cmd.STATUS_COMPLETE
        else:
            self.ensure_clean_log()

            if self._command != 'check':
                Status.save_status("Executing", self._sub_no, config,
                                   self._exp_path)
            if self._command == "eval":
                self.fix_list_length()
            self.execute_librec()
        if self._command != 'check':
            Status.save_status("Completed", self._sub_no, config,
                               self._exp_path)
예제 #11
0
 def execute(self, config: ConfigCmd, startflag=None, exp_no=None):
     config.ensure_experiments(exp_no)
     config.setup_exp_configs(startflag)
예제 #12
0
 def dry_run(self, config: ConfigCmd):
     print(f"librec-auto (DR): Executing setup command {self}")
     config.ensure_experiments()
     config.setup_exp_configs()
예제 #13
0
def _get_config():
    return ConfigCmd('config.xml', '')
예제 #14
0
def _get_config():
    config = ConfigCmd('librec_auto/test/test-config.xml', '')
    config._files.set_post_path(POST_DIR)
    return config
예제 #15
0
def _get_config():
    return ConfigCmd('librec_auto/test/test-config.xml', '')
예제 #16
0
 def execute(self, config: ConfigCmd):
     config.ensure_experiments()
     config.setup_exp_configs()