Пример #1
0
def test_preset_n_and_ew(preset_args,
                         clres,
                         start_time=time.time(),
                         time_limit=Def.TimeOuts.test_time_limit):
    """
    Test command arguments - check evaluation worker with number of workers
    """

    ew_flag = ['-ew']
    n_flag = ['-n', Def.Flags.enw]
    p_valid_params = p_utils.validation_params(preset_args)

    run_cmd = [
        'python3',
        'rl_coach/coach.py',
        '-p',
        '{}'.format(preset_args),
        '-e',
        '{}'.format("ExpName_" + preset_args),
    ]

    # add flags to run command
    test_ew_flag = a_utils.add_one_flag_value(flag=ew_flag)
    test_n_flag = a_utils.add_one_flag_value(flag=n_flag)
    run_cmd.extend(test_ew_flag)
    run_cmd.extend(test_n_flag)

    print(str(run_cmd))

    try:
        proc = subprocess.Popen(run_cmd,
                                stdout=clres.stdout,
                                stderr=clres.stdout)

        try:
            a_utils.validate_arg_result(flag=test_ew_flag,
                                        p_valid_params=p_valid_params,
                                        clres=clres,
                                        process=proc,
                                        start_time=start_time,
                                        timeout=time_limit)

            a_utils.validate_arg_result(flag=test_n_flag,
                                        p_valid_params=p_valid_params,
                                        clres=clres,
                                        process=proc,
                                        start_time=start_time,
                                        timeout=time_limit)
        except AssertionError:
            # close process once get assert false
            proc.kill()
            # if test failed - print logs
            screen.error(open(clres.stdout.name).read(), crash=False)
            assert False

    except OSError as e:
        # if test launch failed due to OSError - skip test
        pytest.skip(e)

    proc.kill()
Пример #2
0
def test_preset_args(preset_args,
                     flag,
                     clres,
                     start_time=time.time(),
                     time_limit=Def.TimeOuts.test_time_limit):
    """ Test command arguments - the test will check all flags one-by-one."""

    p_valid_params = p_utils.validation_params(preset_args)

    run_cmd = [
        'python3',
        'rl_coach/coach.py',
        '-p',
        '{}'.format(preset_args),
        '-e',
        '{}'.format("ExpName_" + preset_args),
    ]

    if p_valid_params.reward_test_level:
        lvl = ['-lvl', '{}'.format(p_valid_params.reward_test_level)]
        run_cmd.extend(lvl)

    # add flags to run command
    test_flag = a_utils.add_one_flag_value(flag=flag)
    run_cmd.extend(test_flag)
    print(str(run_cmd))

    # run command
    p = subprocess.Popen(run_cmd, stdout=clres.stdout, stderr=clres.stdout)

    # validate results
    a_utils.validate_args_results(test_flag, clres, p, start_time, time_limit)

    # Close process
    p.kill()
Пример #3
0
def test_preset_args(preset_args,
                     flag,
                     clres,
                     start_time=time.time(),
                     time_limit=Def.TimeOuts.test_time_limit):
    """ Test command arguments - the test will check all flags one-by-one."""

    p_valid_params = p_utils.validation_params(preset_args)

    run_cmd = [
        'python3',
        'rl_coach/coach.py',
        '-p',
        '{}'.format(preset_args),
        '-e',
        '{}'.format("ExpName_" + preset_args),
    ]

    if p_valid_params.reward_test_level:
        lvl = ['-lvl', '{}'.format(p_valid_params.reward_test_level)]
        run_cmd.extend(lvl)

    # add flags to run command
    test_flag = a_utils.add_one_flag_value(flag=flag)

    if flag[0] == "-cp":
        seed = ['--seed', '42']
        seed_flag = a_utils.add_one_flag_value(flag=seed)
        run_cmd.extend(seed_flag)

    run_cmd.extend(test_flag)
    print(str(run_cmd))

    try:
        proc = subprocess.Popen(run_cmd,
                                stdout=clres.stdout,
                                stderr=clres.stdout)

        try:
            a_utils.validate_arg_result(flag=test_flag,
                                        p_valid_params=p_valid_params,
                                        clres=clres,
                                        process=proc,
                                        start_time=start_time,
                                        timeout=time_limit)
        except AssertionError:
            # close process once get assert false
            proc.kill()
            # if test failed - print logs
            screen.error(open(clres.stdout.name).read(), crash=False)
            assert False

    except OSError as e:
        # if test launch failed due to OSError - skip test
        pytest.skip(e)

    proc.kill()
Пример #4
0
def test_preset_seed(preset_args_for_seed,
                     clres,
                     start_time=time.time(),
                     time_limit=Def.TimeOuts.test_time_limit):
    """
    Test command arguments - the test will check seed argument with all
    presets
    """
    def close_processes():
        """
        close all processes that still active in the process list
        """
        for i in range(seed_num):
            proc[i].kill()

    proc = []
    seed_num = 2
    flag = ["--seed", str(seed_num)]
    p_valid_params = p_utils.validation_params(preset_args_for_seed)

    run_cmd = [
        'python3',
        'rl_coach/coach.py',
        '-p',
        '{}'.format(preset_args_for_seed),
        '-e',
        '{}'.format("ExpName_" + preset_args_for_seed),
    ]

    if p_valid_params.trace_test_levels:
        lvl = ['-lvl', '{}'.format(p_valid_params.trace_test_levels[0])]
        run_cmd.extend(lvl)

    # add flags to run command
    test_flag = a_utils.add_one_flag_value(flag=flag)
    run_cmd.extend(test_flag)
    print(str(run_cmd))

    for _ in range(seed_num):
        proc.append(
            subprocess.Popen(run_cmd, stdout=clres.stdout,
                             stderr=clres.stdout))

    try:
        a_utils.validate_arg_result(flag=test_flag,
                                    p_valid_params=p_valid_params,
                                    clres=clres,
                                    process=proc,
                                    start_time=start_time,
                                    timeout=time_limit)
    except AssertionError:
        close_processes()
        # if test failed - print logs
        screen.error(open(clres.stdout.name).read(), crash=False)
        assert False

    close_processes()
def clres(request):
    """
    Create both file csv and log for testing
    :yield: class of both files paths
    """
    class CreateCsvLog:
        """
        Create a test and log paths
        """
        def __init__(self, csv, log, pattern):
            self.exp_path = csv
            self.stdout = open(log, 'w')
            self.fn_pattern = pattern

        @property
        def experiment_path(self):
            return self.exp_path

        @property
        def stdout_path(self):
            return self.stdout

        @experiment_path.setter
        def experiment_path(self, val):
            self.exp_path = val

        @stdout_path.setter
        def stdout_path(self, val):
            self.stdout = open(val, 'w')

    # get preset name from test request params
    idx = 0 if 'preset' in list(request.node.funcargs.items())[0][0] else 1
    p_name = list(request.node.funcargs.items())[idx][1]

    p_valid_params = p_utils.validation_params(p_name)

    sys.path.append('.')
    test_name = 'ExpName_{}'.format(p_name)
    test_path = os.path.join(Def.Path.experiments, test_name)
    if path.exists(test_path):
        shutil.rmtree(test_path)

    # get the stdout for logs results
    log_file_name = 'test_log_{}.txt'.format(p_name)
    fn_pattern = '*.csv' if p_valid_params.num_workers > 1 else 'worker_0*.csv'

    res = CreateCsvLog(test_path, log_file_name, fn_pattern)

    yield res

    # clean files
    if path.exists(res.exp_path):
        shutil.rmtree(res.exp_path)

    if path.exists(res.stdout.name):
        os.remove(res.stdout.name)
Пример #6
0
def test_preset_args(preset_args,
                     flag,
                     clres,
                     start_time=time.time(),
                     time_limit=Def.TimeOuts.test_time_limit):
    """ Test command arguments - the test will check all flags one-by-one."""

    p_valid_params = p_utils.validation_params(preset_args)

    run_cmd = [
        'python3',
        'rl_coach/coach.py',
        '-p',
        '{}'.format(preset_args),
        '-e',
        '{}'.format("ExpName_" + preset_args),
    ]

    if p_valid_params.reward_test_level:
        lvl = ['-lvl', '{}'.format(p_valid_params.reward_test_level)]
        run_cmd.extend(lvl)

    # add flags to run command
    test_flag = a_utils.add_one_flag_value(flag=flag)
    run_cmd.extend(test_flag)
    print(str(run_cmd))

    proc = subprocess.Popen(run_cmd, stdout=clres.stdout, stderr=clres.stdout)

    try:
        a_utils.validate_arg_result(flag=test_flag,
                                    p_valid_params=p_valid_params,
                                    clres=clres,
                                    process=proc,
                                    start_time=start_time,
                                    timeout=time_limit)
    except AssertionError:
        # close process once get assert false
        proc.kill()
        assert False

    proc.kill()
Пример #7
0
def test_preset_n_and_ew_and_onnx(preset_args,
                                  clres,
                                  start_time=time.time(),
                                  time_limit=Def.TimeOuts.test_time_limit):
    """
    Test command arguments - check evaluation worker, number of workers and
                             onnx.
    """

    ew_flag = ['-ew']
    n_flag = ['-n', Def.Flags.enw]
    onnx_flag = ['-onnx']
    s_flag = ['-s', Def.Flags.css]
    p_valid_params = p_utils.validation_params(preset_args)

    run_cmd = [
        'python3',
        'rl_coach/coach.py',
        '-p',
        '{}'.format(preset_args),
        '-e',
        '{}'.format("ExpName_" + preset_args),
    ]

    # add flags to run command
    test_ew_flag = a_utils.add_one_flag_value(flag=ew_flag)
    test_n_flag = a_utils.add_one_flag_value(flag=n_flag)
    test_onnx_flag = a_utils.add_one_flag_value(flag=onnx_flag)
    test_s_flag = a_utils.add_one_flag_value(flag=s_flag)

    run_cmd.extend(test_ew_flag)
    run_cmd.extend(test_n_flag)
    run_cmd.extend(test_onnx_flag)
    run_cmd.extend(test_s_flag)

    print(str(run_cmd))

    proc = subprocess.Popen(run_cmd, stdout=clres.stdout, stderr=clres.stdout)

    try:
        # Check csv files has been created
        a_utils.validate_arg_result(flag=test_ew_flag,
                                    p_valid_params=p_valid_params,
                                    clres=clres,
                                    process=proc,
                                    start_time=start_time,
                                    timeout=time_limit)

        # Check csv files created same as the number of the workers
        a_utils.validate_arg_result(flag=test_n_flag,
                                    p_valid_params=p_valid_params,
                                    clres=clres,
                                    process=proc,
                                    start_time=start_time,
                                    timeout=time_limit)

        # Check checkpoint files
        a_utils.validate_arg_result(flag=test_s_flag,
                                    p_valid_params=p_valid_params,
                                    clres=clres,
                                    process=proc,
                                    start_time=start_time,
                                    timeout=time_limit)

        # TODO: add onnx check; issue found #257

    except AssertionError:
        # close process once get assert false
        proc.kill()
        assert False

    proc.kill()
Пример #8
0
def test_restore_checkpoint(preset_args,
                            clres,
                            framework,
                            timeout=Def.TimeOuts.test_time_limit):
    """
    Create checkpoints and restore them in second run.
    :param preset_args: all preset that can be tested for argument tests
    :param clres: logs and csv files
    :param framework: name of the test framework
    :param timeout: max time for test
    """
    def _create_cmd_and_run(flag):
        """
        Create default command with given flag and run it
        :param flag: name of the tested flag, this flag will be extended to the
                     running command line
        :return: active process
        """
        run_cmd = [
            'python3',
            'rl_coach/coach.py',
            '-p',
            '{}'.format(preset_args),
            '-e',
            '{}'.format("ExpName_" + preset_args),
            '--seed',
            '{}'.format(4),
            '-f',
            '{}'.format(framework),
        ]

        test_flag = a_utils.add_one_flag_value(flag=flag)
        run_cmd.extend(test_flag)
        print(str(run_cmd))
        p = subprocess.Popen(run_cmd, stdout=clres.stdout, stderr=clres.stdout)

        return p

    start_time = time.time()

    if framework == "mxnet":
        # update preset name - for mxnet framework we are using *_DQN
        preset_args = Def.Presets.mxnet_args_test[0]
        # update logs paths
        test_name = 'ExpName_{}'.format(preset_args)
        test_path = os.path.join(Def.Path.experiments, test_name)
        clres.experiment_path = test_path
        clres.stdout_path = 'test_log_{}.txt'.format(preset_args)

    p_valid_params = p_utils.validation_params(preset_args)
    create_cp_proc = _create_cmd_and_run(flag=['--checkpoint_save_secs', '5'])

    # wait for checkpoint files
    csv_list = a_utils.get_csv_path(clres=clres)
    assert len(csv_list) > 0
    exp_dir = os.path.dirname(csv_list[0])

    checkpoint_dir = os.path.join(exp_dir, Def.Path.checkpoint)

    checkpoint_test_dir = os.path.join(Def.Path.experiments, Def.Path.test_dir)
    if os.path.exists(checkpoint_test_dir):
        shutil.rmtree(checkpoint_test_dir)

    res = a_utils.is_reward_reached(csv_path=csv_list[0],
                                    p_valid_params=p_valid_params,
                                    start_time=start_time,
                                    time_limit=timeout)
    if not res:
        screen.error(open(clres.stdout.name).read(), crash=False)
        assert False

    entities = a_utils.get_files_from_dir(checkpoint_dir)

    assert len(entities) > 0
    assert any(".ckpt." in file for file in entities)

    # send CTRL+C to close experiment
    create_cp_proc.send_signal(signal.SIGINT)

    if os.path.isdir(checkpoint_dir):
        shutil.copytree(exp_dir, checkpoint_test_dir)
        shutil.rmtree(exp_dir)

    create_cp_proc.kill()
    checkpoint_test_dir = "{}/{}".format(checkpoint_test_dir,
                                         Def.Path.checkpoint)
    # run second time with checkpoint folder  (restore)
    restore_cp_proc = _create_cmd_and_run(
        flag=['-crd', checkpoint_test_dir, '--evaluate'])

    new_csv_list = test_utils.get_csv_path(clres=clres)
    time.sleep(10)

    csv = pd.read_csv(new_csv_list[0])
    res = csv['Episode Length'].values[-1]
    expected_reward = 100
    assert res >= expected_reward, Def.Consts.ASSERT_MSG.format(
        str(expected_reward), str(res))
    restore_cp_proc.kill()

    test_folder = os.path.join(Def.Path.experiments, Def.Path.test_dir)
    if os.path.exists(test_folder):
        shutil.rmtree(test_folder)
Пример #9
0
def test_restore_checkpoint(preset_args, clres, start_time=time.time(),
                            timeout=Def.TimeOuts.test_time_limit):
    """ Create checkpoint and restore them in second run."""

    def _create_cmd_and_run(flag):

        run_cmd = [
            'python3', 'rl_coach/coach.py',
            '-p', '{}'.format(preset_args),
            '-e', '{}'.format("ExpName_" + preset_args),
        ]
        test_flag = a_utils.add_one_flag_value(flag=flag)
        run_cmd.extend(test_flag)

        p = subprocess.Popen(run_cmd, stdout=clres.stdout, stderr=clres.stdout)

        return p

    p_valid_params = p_utils.validation_params(preset_args)
    create_cp_proc = _create_cmd_and_run(flag=['--checkpoint_save_secs', '5'])

    # wait for checkpoint files
    csv_list = a_utils.get_csv_path(clres=clres)
    assert len(csv_list) > 0
    exp_dir = os.path.dirname(csv_list[0])

    checkpoint_dir = os.path.join(exp_dir, Def.Path.checkpoint)

    checkpoint_test_dir = os.path.join(Def.Path.experiments, Def.Path.test_dir)
    if os.path.exists(checkpoint_test_dir):
        shutil.rmtree(checkpoint_test_dir)

    assert a_utils.is_reward_reached(csv_path=csv_list[0],
                                     p_valid_params=p_valid_params,
                                     start_time=start_time, time_limit=timeout)

    entities = a_utils.get_files_from_dir(checkpoint_dir)

    assert len(entities) > 0
    assert "checkpoint" in entities
    assert any(".ckpt." in file for file in entities)

    # send CTRL+C to close experiment
    create_cp_proc.send_signal(signal.SIGINT)

    csv = pd.read_csv(csv_list[0])
    rewards = csv['Evaluation Reward'].values
    rewards = rewards[~np.isnan(rewards)]
    max_reward = np.amax(rewards)

    if os.path.isdir(checkpoint_dir):
        shutil.copytree(exp_dir, checkpoint_test_dir)
        shutil.rmtree(exp_dir)

    create_cp_proc.kill()
    checkpoint_test_dir = "{}/{}".format(checkpoint_test_dir,
                                         Def.Path.checkpoint)
    # run second time with checkpoint folder  (restore)
    restore_cp_proc = _create_cmd_and_run(flag=['-crd', checkpoint_test_dir,
                                                '--evaluate'])

    new_csv_list = test_utils.get_csv_path(clres=clres)
    time.sleep(10)

    csv = pd.read_csv(new_csv_list[0])
    res = csv['Episode Length'].values[-1]
    assert res == max_reward, Def.Consts.ASSERT_MSG.format(str(max_reward),
                                                           str(res))
    restore_cp_proc.kill()

    test_folder = os.path.join(Def.Path.experiments, Def.Path.test_dir)
    if os.path.exists(test_folder):
        shutil.rmtree(test_folder)