Пример #1
0
def generate(ctx, executor, beamng_home, beamng_user, time_budget, map_size,
             oob_tolerance, speed_limit, module_name, module_path, class_name,
             visualize_tests, log_to, debug):
    # ensure that ctx.obj exists and is a dict (in case `cli()` is called
    # by means other than the `if` block below)
    ctx.ensure_object(dict)

    # TODO Refactor by adding a create summary command and forwarding the output of this run to that command

    # Setup logging
    setup_logging(log_to, debug)

    # Setup test generator by dynamically loading it
    module = importlib.import_module(module_name, module_path)
    the_class = getattr(module, class_name)

    road_visualizer = None
    # Setup visualization
    if visualize_tests:
        road_visualizer = RoadTestVisualizer(map_size=map_size)

    # Setup folder structure by ensuring that the basic folder structure is there.
    default_output_folder = os.path.join(get_script_path(), OUTPUT_RESULTS_TO)
    try:
        os.makedirs(default_output_folder)
    except OSError as e:
        if e.errno != errno.EEXIST:
            raise

    # Create the unique folder that will host the results of this execution using the test generator data and
    # a timestamp as id
    # TODO Allow to specify a location for this folder and the run id
    timestamp_id = time.time_ns() // 1000000
    result_folder = os.path.join(
        default_output_folder,
        "_".join([str(module_name),
                  str(class_name),
                  str(timestamp_id)]))

    try:
        os.makedirs(result_folder)
    except OSError:
        log.fatal("An error occurred during test generation")
        traceback.print_exc()
        sys.exit(2)

    log.info("Outputting results to " + result_folder)

    # Setup executor. All the executor must output the execution data into the result_folder
    if executor == "mock":
        from code_pipeline.executors import MockExecutor
        the_executor = MockExecutor(result_folder,
                                    time_budget,
                                    map_size,
                                    road_visualizer=road_visualizer)
    elif executor == "beamng":
        from code_pipeline.beamng_executor import BeamngExecutor
        the_executor = BeamngExecutor(result_folder,
                                      time_budget,
                                      map_size,
                                      oob_tolerance=oob_tolerance,
                                      max_speed=speed_limit,
                                      beamng_home=beamng_home,
                                      beamng_user=beamng_user,
                                      road_visualizer=road_visualizer)

    # Register the shutdown hook for post processing results
    register_exit_fun(
        create_post_processing_hook(ctx, result_folder, the_executor))

    try:
        # Instantiate the test generator
        test_generator = the_class(time_budget=time_budget,
                                   executor=the_executor,
                                   map_size=map_size)
        # Start the generation
        test_generator.start()
    except Exception:
        log.fatal("An error occurred during test generation")
        traceback.print_exc()
        sys.exit(2)
    finally:
        # Ensure the executor is stopped no matter what.
        # TODO Consider using a ContextManager: With executor ... do
        the_executor.close()

    # We still need this here to post process the results if the execution takes the regular flow
    post_process(ctx, result_folder, the_executor)
Пример #2
0
                seg_length=SEG_LENGTH,
                num_spline_nodes=NUM_SPLINE_NODES).generate()

            log.info("Generated test from road points %s", road_points)
            the_test = RoadTestFactory.create_road_test(road_points)

            test_outcome, description, execution_data = self.executor.execute_test(
                the_test)

            log.info(test_outcome, description)
            count += 1
            log.info("Remaining Time: %s",
                     str(self.executor.get_remaining_time()))

            log.info("Successful tests: %s", str(count))


if __name__ == "__main__":
    # TODO Clean up the code and remove hardcoded logic from the sample generators. Create a unit tests instead
    time_budget = 250000
    map_size = 250
    beamng_home = r"C:\Users\vinni\bng_competition\BeamNG.research.v1.7.0.0"

    from code_pipeline.beamng_executor import BeamngExecutor
    executor = BeamngExecutor(time_budget=time_budget,
                              map_size=map_size,
                              beamng_home=beamng_home)

    roadgen = JanusGenerator(time_budget, executor, map_size)
    roadgen.start()
Пример #3
0
        log.info("Pretend test is executing")
        time.sleep(5)
        self.total_elapsed_time += 5

        return test_outcome, description, execution_data

    def _close(self):
        super()._close()
        print("Closing Mock Executor")


if __name__ == '__main__':
    # TODO Remove this code and create an unit test instead
    from code_pipeline.beamng_executor import BeamngExecutor
    executor = BeamngExecutor(
        time_budget=250000,
        map_size=250,
        beamng_home=r"C:\Users\vinni\bng_competition\BeamNG.research.v1.7.0.0")
    ROAD_PATH = r"data\seed0.json"
    with open(ROAD_PATH, 'r') as f:
        dict = json.loads(f.read())
    sample_nodes = [tuple(t) for t in dict['sample_nodes']]

    # nodes should be a list of (x,y) float coordinates
    nodes = [sample[:2] for sample in sample_nodes]
    nodes = [(node[0], node[1], -28.0, 8.0) for node in nodes]

    tc = nodes
    test_outcome, description, execution_data = executor.execute_test(tc)
Пример #4
0
def generate(executor, beamng_home, beamng_user, time_budget, map_size,
             module_name, module_path, class_name, visualize_tests, log_to,
             debug):
    # Setup logging
    setup_logging(log_to, debug)

    # Setup test generator by dynamically loading it
    module = importlib.import_module(module_name, module_path)
    the_class = getattr(module, class_name)

    road_visualizer = None
    # Setup visualization
    if visualize_tests:
        road_visualizer = RoadTestVisualizer(map_size=map_size)

    # Setup folder structure by ensuring that the basic folder structure is there.
    default_output_folder = os.path.join(get_script_path(), OUTPUT_RESULTS_TO)
    try:
        os.makedirs(default_output_folder)
    except OSError as e:
        if e.errno != errno.EEXIST:
            raise

    # Create the unique folder that will host the results of this execution using the test generator data and
    # a timestamp as id
    timestamp_id = time.time_ns() // 1000000
    result_folder = os.path.join(
        default_output_folder,
        "_".join([str(module_name),
                  str(class_name),
                  str(timestamp_id)]))

    try:
        os.makedirs(result_folder)
    except OSError:
        log.fatal("An error occurred during test generation")
        traceback.print_exc()
        sys.exit(2)

    log.info("Outputting results to " + result_folder)

    # Setup executor
    if executor == "mock":
        from code_pipeline.executors import MockExecutor
        the_executor = MockExecutor(time_budget=time_budget,
                                    map_size=map_size,
                                    road_visualizer=road_visualizer)
    elif executor == "beamng":
        # TODO Make sure this executor outputs the files in the results folder
        from code_pipeline.beamng_executor import BeamngExecutor
        the_executor = BeamngExecutor(beamng_home=beamng_home,
                                      beamng_user=beamng_user,
                                      time_budget=time_budget,
                                      map_size=map_size,
                                      road_visualizer=road_visualizer)

    # Register the shutdown hook for post processing results
    register_exit_fun(create_post_processing_hook(result_folder, the_executor))

    try:
        # Instantiate the test generator
        test_generator = the_class(time_budget=time_budget,
                                   executor=the_executor,
                                   map_size=map_size)
        # Start the generation
        test_generator.start()
    except Exception:
        log.fatal("An error occurred during test generation")
        traceback.print_exc()
        sys.exit(2)

    # We still need this here to post process the results if the execution takes the regular flow
    post_process(result_folder, the_executor)