Пример #1
0
 def init_environment_dependent_modules(self):
     super().init_environment_dependent_modules()
     self.env = self.parent_level_manager._real_environment
     screen.log_title("Human Control Mode")
     available_keys = self.env.get_available_keys()
     if available_keys:
         screen.log("Use keyboard keys to move. Press escape to quit. Available keys:")
         screen.log("")
         for action, key in self.env.get_available_keys():
             screen.log("\t- {}: {}".format(action, key))
         screen.separator()
Пример #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-t',
                        '--trace',
                        help="(flag) perform trace based testing",
                        action='store_true')
    parser.add_argument(
        '-p',
        '--preset',
        help="(string) Name of a preset to run (as configured in presets.py)",
        default=None,
        type=str)
    parser.add_argument(
        '-ip',
        '--ignore_presets',
        help=
        "(string) Name of a preset(s) to ignore (comma separated, and as configured in presets.py)",
        default=None,
        type=str)
    parser.add_argument(
        '-v',
        '--verbose',
        help="(flag) display verbose logs in the event of an error",
        action='store_true')
    parser.add_argument(
        '--stop_after_first_failure',
        help="(flag) stop executing tests after the first error",
        action='store_true')
    parser.add_argument(
        '-tl',
        '--time_limit',
        help="time limit for each test in minutes",
        default=
        40,  # setting time limit to be so high due to DDPG being very slow - its tests are long
        type=int)
    parser.add_argument(
        '-np',
        '--no_progress_bar',
        help=
        "(flag) Don't print the progress bar (makes jenkins logs more readable)",
        action='store_true')
    parser.add_argument(
        '-ow',
        '--overwrite',
        help="(flag) overwrite old trace with new ones in trace testing mode",
        action='store_true')

    args = parser.parse_args()
    if args.preset is not None:
        presets_lists = [args.preset]
    else:
        # presets_lists = list_all_classes_in_module(presets)
        presets_lists = [
            f[:-3] for f in os.listdir(os.path.join('rl_coach', 'presets'))
            if f[-3:] == '.py' and not f == '__init__.py'
        ]

    fail_count = 0
    test_count = 0

    args.time_limit = 60 * args.time_limit

    if args.ignore_presets is not None:
        presets_to_ignore = args.ignore_presets.split(',')
    else:
        presets_to_ignore = []
    for idx, preset_name in enumerate(sorted(presets_lists)):
        if args.stop_after_first_failure and fail_count > 0:
            break
        if preset_name not in presets_to_ignore:
            try:
                preset = import_module(
                    'rl_coach.presets.{}'.format(preset_name))
            except:
                screen.error("Failed to load preset <{}>".format(preset_name),
                             crash=False)
                fail_count += 1
                test_count += 1
                continue

            preset_validation_params = preset.graph_manager.preset_validation_params
            if not args.trace and not preset_validation_params.test:
                continue

            if args.trace:
                num_env_steps = preset_validation_params.trace_max_env_steps
                if preset_validation_params.trace_test_levels:
                    for level in preset_validation_params.trace_test_levels:
                        test_count += 1
                        test_passed = perform_trace_based_tests(
                            args, preset_name, num_env_steps, level)
                        if not test_passed:
                            fail_count += 1
                else:
                    test_count += 1
                    test_passed = perform_trace_based_tests(
                        args, preset_name, num_env_steps)
                    if not test_passed:
                        fail_count += 1
            else:
                test_passed = perform_reward_based_tests(
                    args, preset_validation_params, preset_name)
                if not test_passed:
                    fail_count += 1

    screen.separator()
    if fail_count == 0:
        screen.success(" Summary: " + str(test_count) + "/" + str(test_count) +
                       " tests passed successfully")
    else:
        screen.error(" Summary: " + str(test_count - fail_count) + "/" +
                     str(test_count) + " tests passed successfully")
Пример #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-p', '--preset', '--presets',
                        help="(string) Name of preset(s) to run (comma separated, as configured in presets.py)",
                        default=None,
                        type=str)
    parser.add_argument('-ip', '--ignore_presets',
                        help="(string) Name of preset(s) to ignore (comma separated, and as configured in presets.py)",
                        default=None,
                        type=str)
    parser.add_argument('-v', '--verbose',
                        help="(flag) display verbose logs in the event of an error",
                        action='store_true')
    parser.add_argument('--stop_after_first_failure',
                        help="(flag) stop executing tests after the first error",
                        action='store_true')
    parser.add_argument('-ow', '--overwrite',
                        help="(flag) overwrite old trace with new ones in trace testing mode",
                        action='store_true')
    parser.add_argument('-prl', '--parallel',
                        help="(flag) run tests in parallel",
                        action='store_true')
    parser.add_argument('-ut', '--update_traces',
                        help="(flag) update traces on repository",
                        action='store_true')
    parser.add_argument('-mt', '--max_threads',
                        help="(int) maximum number of threads to run in parallel",
                        default=multiprocessing.cpu_count()-2,
                        type=int)
    parser.add_argument(
        '-i', '--image', help="(string) Name of the testing image", type=str, default=None
    )
    parser.add_argument(
        '-mb', '--memory_backend', help="(string) Name of the memory backend", type=str, default="redispubsub"
    )
    parser.add_argument(
        '-e', '--endpoint', help="(string) Name of the s3 endpoint", type=str, default='s3.amazonaws.com'
    )
    parser.add_argument(
        '-cr', '--creds_file', help="(string) Path of the s3 creds file", type=str, default='.aws_creds'
    )
    parser.add_argument(
        '-b', '--bucket', help="(string) Name of the bucket for s3", type=str, default=None
    )

    args = parser.parse_args()

    if args.update_traces:
        if not args.bucket:
            print("bucket_name required for s3")
            exit(1)
        if not os.environ.get('AWS_ACCESS_KEY_ID') or not os.environ.get('AWS_SECRET_ACCESS_KEY'):
            print("AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY env vars need to be set")
            exit(1)

        config_file = './tmp.cred'
        generate_config(args.image, args.memory_backend, args.endpoint, args.bucket, args.creds_file, config_file)

    if not args.parallel:
        args.max_threads = 1

    if args.preset is not None:
        presets_lists = args.preset.split(',')
    else:
        presets_lists = [f[:-3] for f in os.listdir(os.path.join('rl_coach', 'presets')) if
                         f[-3:] == '.py' and not f == '__init__.py']

    fail_count = 0
    test_count = 0

    if args.ignore_presets is not None:
        presets_to_ignore = args.ignore_presets.split(',')
    else:
        presets_to_ignore = []

    for idx, preset_name in enumerate(sorted(presets_lists)):
        if args.stop_after_first_failure and fail_count > 0:
            break
        if preset_name not in presets_to_ignore:
            try:
                preset = import_module('rl_coach.presets.{}'.format(preset_name))
            except:
                screen.error("Failed to load preset <{}>".format(preset_name), crash=False)
                fail_count += 1
                test_count += 1
                continue

            preset_validation_params = preset.graph_manager.preset_validation_params
            num_env_steps = preset_validation_params.trace_max_env_steps
            if preset_validation_params.test_using_a_trace_test:
                if preset_validation_params.trace_test_levels:
                    for level in preset_validation_params.trace_test_levels:
                        test_count += 1
                        test_path, log_file, p = run_trace_based_test(preset_name, num_env_steps, level)
                        processes.append((test_path, log_file, p))
                        test_passed = wait_and_check(args, processes)
                        if test_passed is not None and not test_passed:
                            fail_count += 1
                else:
                    test_count += 1
                    test_path, log_file, p = run_trace_based_test(preset_name, num_env_steps)
                    processes.append((test_path, log_file, p))
                    test_passed = wait_and_check(args, processes)
                    if test_passed is not None and not test_passed:
                        fail_count += 1

    while len(processes) > 0:
        test_passed = wait_and_check(args, processes, force=True)
        if test_passed is not None and not test_passed:
            fail_count += 1

    screen.separator()
    if fail_count == 0:
        screen.success(" Summary: " + str(test_count) + "/" + str(test_count) + " tests passed successfully")
    else:
        screen.error(" Summary: " + str(test_count - fail_count) + "/" + str(test_count) + " tests passed successfully", crash=False)
Пример #4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-p',
        '--preset',
        '--presets',
        help=
        "(string) Name of preset(s) to run (comma separated, and as configured in presets.py)",
        default=None,
        type=str)
    parser.add_argument(
        '-ip',
        '--ignore_presets',
        help=
        "(string) Name of preset(s) to ignore (comma separated, and as configured in presets.py)",
        default=None,
        type=str)
    parser.add_argument(
        '-v',
        '--verbose',
        help="(flag) display verbose logs in the event of an error",
        action='store_true')
    parser.add_argument(
        '--stop_after_first_failure',
        help="(flag) stop executing tests after the first error",
        action='store_true')
    parser.add_argument(
        '-tl',
        '--time_limit',
        help="time limit for each test in minutes",
        default=
        60,  # setting time limit to be so high due to DDPG being very slow - its tests are long
        type=int)
    parser.add_argument(
        '-np',
        '--no_progress_bar',
        help=
        "(flag) Don't print the progress bar (makes jenkins logs more readable)",
        action='store_true')

    args = parser.parse_args()
    if args.preset is not None:
        presets_lists = args.preset.split(',')
    else:
        presets_lists = all_presets()

    fail_count = 0
    test_count = 0

    args.time_limit = 60 * args.time_limit

    if args.ignore_presets is not None:
        presets_to_ignore = args.ignore_presets.split(',')
    else:
        presets_to_ignore = []
    for idx, preset_name in enumerate(sorted(presets_lists)):
        if args.stop_after_first_failure and fail_count > 0:
            break
        if preset_name not in presets_to_ignore:
            print("Attempting to run Preset: %s" % preset_name)
            if not importable(preset_name):
                screen.error("Failed to load preset <{}>".format(preset_name),
                             crash=False)
                fail_count += 1
                test_count += 1
                continue

            if not has_test_parameters(preset_name):
                continue

            test_count += 1
            try:
                test_preset_reward(preset_name, args.no_progress_bar,
                                   args.time_limit, args.verbose)
            except Exception as e:
                fail_count += 1

    screen.separator()
    if fail_count == 0:
        screen.success(" Summary: " + str(test_count) + "/" + str(test_count) +
                       " tests passed successfully")
    else:
        screen.error(" Summary: " + str(test_count - fail_count) + "/" +
                     str(test_count) + " tests passed successfully")
Пример #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-p',
        '--preset',
        help="(string) Name of a preset to run (as configured in presets.py)",
        default=None,
        type=str)
    parser.add_argument(
        '-ip',
        '--ignore_presets',
        help=
        "(string) Name of a preset(s) to ignore (comma separated, and as configured in presets.py)",
        default=None,
        type=str)
    parser.add_argument(
        '-v',
        '--verbose',
        help="(flag) display verbose logs in the event of an error",
        action='store_true')
    parser.add_argument(
        '--stop_after_first_failure',
        help="(flag) stop executing tests after the first error",
        action='store_true')
    parser.add_argument(
        '-ow',
        '--overwrite',
        help="(flag) overwrite old trace with new ones in trace testing mode",
        action='store_true')
    parser.add_argument('-prl',
                        '--parallel',
                        help="(flag) run tests in parallel",
                        action='store_true')
    parser.add_argument(
        '-mt',
        '--max_threads',
        help="(int) maximum number of threads to run in parallel",
        default=multiprocessing.cpu_count() - 2,
        type=int)

    args = parser.parse_args()
    if not args.parallel:
        args.max_threads = 1

    if args.preset is not None:
        presets_lists = [args.preset]
    else:
        presets_lists = [
            f[:-3] for f in os.listdir(os.path.join('rl_coach', 'presets'))
            if f[-3:] == '.py' and not f == '__init__.py'
        ]

    fail_count = 0
    test_count = 0

    if args.ignore_presets is not None:
        presets_to_ignore = args.ignore_presets.split(',')
    else:
        presets_to_ignore = []

    for idx, preset_name in enumerate(sorted(presets_lists)):
        if args.stop_after_first_failure and fail_count > 0:
            break
        if preset_name not in presets_to_ignore:
            try:
                preset = import_module(
                    'rl_coach.presets.{}'.format(preset_name))
            except:
                screen.error("Failed to load preset <{}>".format(preset_name),
                             crash=False)
                fail_count += 1
                test_count += 1
                continue

            preset_validation_params = preset.graph_manager.preset_validation_params
            num_env_steps = preset_validation_params.trace_max_env_steps
            if preset_validation_params.test_using_a_trace_test:
                if preset_validation_params.trace_test_levels:
                    for level in preset_validation_params.trace_test_levels:
                        test_count += 1
                        test_path, log_file, p = run_trace_based_test(
                            preset_name, num_env_steps, level)
                        processes.append((test_path, log_file, p))
                        test_passed = wait_and_check(args, processes)
                        if test_passed is not None and not test_passed:
                            fail_count += 1
                else:
                    test_count += 1
                    test_path, log_file, p = run_trace_based_test(
                        preset_name, num_env_steps)
                    processes.append((test_path, log_file, p))
                    test_passed = wait_and_check(args, processes)
                    if test_passed is not None and not test_passed:
                        fail_count += 1

    while len(processes) > 0:
        test_passed = wait_and_check(args, processes, force=True)
        if test_passed is not None and not test_passed:
            fail_count += 1

    screen.separator()
    if fail_count == 0:
        screen.success(" Summary: " + str(test_count) + "/" + str(test_count) +
                       " tests passed successfully")
    else:
        screen.error(" Summary: " + str(test_count - fail_count) + "/" +
                     str(test_count) + " tests passed successfully")