Ejemplo n.º 1
0
def main(args: argparse.Namespace):
    loadfile: Path
    if os.path.isabs(args.loadfile):
        loadfile = Path(args.loadfile)
    else:
        # Take paths relative to 'data/saves/'
        loadfile = common.savefile(args.loadfile)

    physics_engine = physics.PhysicsEngine(common.load_savefile(loadfile))
    initial_state = physics_engine.get_state()

    gui = flight_gui.FlightGui(initial_state,
                               title=name,
                               running_as_mirror=False)
    atexit.register(gui.shutdown)

    if args.flamegraph:
        common.start_flamegraphing()
    if args.profile:
        common.start_profiling()

    while True:
        state = physics_engine.get_state()

        # If we have any commands, process them so the simthread has as
        # much time as possible to restart before next update.
        physics_engine.handle_requests(gui.pop_commands())

        gui.draw(state)
        gui.rate(common.FRAMERATE)
Ejemplo n.º 2
0
def lead_server_loop(args):
    """Main, 'while True'-style loop for a lead server. Blocking.
    See help text for command line arguments for what a lead server is."""

    # Before you make changes to the lead server architecture, consider that
    # the GRPC server runs in a separate thread than this thread!
    state_server = network.StateServer()

    log.info(f'Loading save at {args.data_location.path}')
    physics_engine = physics.PEngine(
        common.load_savefile(Path(args.data_location.path)))

    if not args.no_gui:
        global cleanup_function
        global ungraceful_shutdown
        gui = flight_gui.FlightGui(physics_engine.get_state(),
                                   no_intro=args.no_intro)
        cleanup_function = gui.shutdown
        ungraceful_shutdown = gui.ungraceful_shutdown

    server = grpc.server(concurrent.futures.ThreadPoolExecutor(max_workers=4))
    grpc_stubs.add_StateServerServicer_to_server(state_server, server)
    server.add_insecure_port(f'[::]:{args.serve_on_port}')
    server.start()  # This doesn't block!
    # Need a context manager from now on, to make sure the server always stops.
    with common.GrpcServerContext(server):
        log.info(f'Server running on port {args.serve_on_port}. Ctrl-C exits.')

        if args.profile:
            common.start_profiling()
        while True:
            user_commands = []
            state = physics_engine.get_state()
            state_server.notify_state_change(copy.deepcopy(state._proto_state))

            if not args.no_gui:
                user_commands += gui.pop_commands()
            user_commands += state_server.pop_commands()

            # If we have any commands, process them so the simthread has as
            # much time as possible to regenerate solutions before next update
            for command in user_commands:
                if command.ident == network.Request.NOOP:
                    continue
                log.info(f'Got command: {command}')
                physics_engine.handle_request(command)

            if not args.no_gui:
                gui.draw(state)
                gui.rate(common.FRAMERATE)
            else:
                time.sleep(1 / common.FRAMERATE)
Ejemplo n.º 3
0
def main(args: argparse.Namespace):
    # Before you make changes to this function, keep in mind that this function
    # starts a GRPC server that runs in a separate thread!
    state_server = network.StateServer()

    loadfile: Path
    if os.path.isabs(args.loadfile):
        loadfile = Path(args.loadfile)
    else:
        # Take paths relative to 'data/saves/'
        loadfile = common.savefile(args.loadfile)

    physics_engine = physics.PhysicsEngine(common.load_savefile(loadfile))
    initial_state = physics_engine.get_state()

    TICKS_BETWEEN_CLIENT_LIST_REFRESHES = 150
    ticks_until_next_client_list_refresh = 0

    server = grpc.server(
        concurrent.futures.ThreadPoolExecutor(max_workers=4))
    atexit.register(lambda: server.stop(grace=2))
    grpc_stubs.add_StateServerServicer_to_server(state_server, server)
    server.add_insecure_port(f'[::]:{network.DEFAULT_PORT}')
    state_server.notify_state_change(initial_state.as_proto())
    server.start()  # This doesn't block!

    gui = ServerGui()

    try:
        if args.flamegraph:
            common.start_flamegraphing()
        if args.profile:
            common.start_profiling()

        while True:
            # If we have any commands, process them immediately so input lag
            # is minimized.
            commands = state_server.pop_commands() + gui.pop_commands()
            physics_engine.handle_requests(commands)

            state = physics_engine.get_state()
            state_server.notify_state_change(state.as_proto())

            if ticks_until_next_client_list_refresh == 0:
                ticks_until_next_client_list_refresh = \
                    TICKS_BETWEEN_CLIENT_LIST_REFRESHES
                state_server.refresh_client_list()
            ticks_until_next_client_list_refresh -= 1

            gui.update(state, state_server.addr_to_connected_clients.values())
    finally:
        server.stop(grace=1)
Ejemplo n.º 4
0
def test_performance():
    # This just runs for 10 seconds and collects profiling data.
    import time

    with PhysicsEngine('OCESS.json') as physics_engine:
        physics_engine.handle_requests([
            network.Request(ident=network.Request.TIME_ACC_SET,
                            time_acc_set=common.TIME_ACCS[-2].value)
        ])

        end_time = time.time() + 10
        print(f"Profiling performance for {end_time - time.time()} seconds.")
        common.start_profiling()

        while time.time() < end_time:
            time.sleep(0.05)
            physics_engine.get_state()