コード例 #1
0
def extract_samples(
    client_header,
    client_messages,
    srv_header,
    srv_messages,
    text_entries=None,
    mouse_to_action=default_mouse_to_action,
):
    samples = []
    client = Client(srv_header)
    numpy_screen = client.framebuffer.numpy_screen
    numpy_screen.set_paint_cursor(True)

    server_deque = collections.deque(srv_messages)
    text_deque = None if text_entries is None else collections.deque(
        text_entries)
    cur_text = ""

    for idx, (ts, msg) in enumerate(client_messages):
        # update the current text
        for text in iterate_earlier(text_deque, ts):
            cur_text = text

        # apply server messages to the framebuffer
        for srv_msg in iterate_earlier(server_deque, ts):
            if srv_msg.message_type == rfp_server.RfpServer.MessageType.fb_update:
                rects = []
                for msg_r in srv_msg.message_body.rects:
                    rect = client.decode_rectangle(msg_r)
                    if rect is not None:
                        rects.append(rect)
                update = server_messages.FramebufferUpdate(rects)
                numpy_screen.flip()
                numpy_screen.apply(update)
                numpy_screen.flip()

        # pass client action to framebuffer to track cursor position
        if msg.message_type == 5:  # TODO: enum
            event = vnc_event.PointerEvent(msg.message_body.pos_x,
                                           msg.message_body.pos_y,
                                           msg.message_body.button_mask)
            numpy_screen.flip()
            numpy_screen.apply_action(event)
            numpy_screen.flip()

            # if button was pressed, record the observation and the event
            if msg.message_body.button_mask:
                img = crop_image(numpy_screen.peek().copy())
                action = mouse_to_action(event)
                if action is not None:
                    obs = img if text_entries is None else (img, cur_text)
                    samples.append((obs, action))

    return samples
コード例 #2
0
def test_mouse_coords():
    env = gym.make(DEFAULT_ENV)
    env = universe.wrappers.experimental.SoftmaxClickMouse(env)

    for _ in range(100):
        x = random.randint(0, 300)
        y = random.randint(0, 300)
        event = vnc_event.PointerEvent(x, y, 1)
        discr = env._action_to_discrete(event)
        discr2 = vnc_demo.mouse_to_action(x, y)
        assert discr == discr2
        pass
コード例 #3
0
    def sample(self):
        # Both key and pointer allowed
        if self.screen_shape is not None:
            event_type = prng.np_random.randint(2)
        else:
            event_type = 0

        if event_type == 0:
            # Let's press a key
            key = prng.np_random.choice(self.keys)
            event = [key]
        else:
            x = prng.np_random.randint(self.screen_shape[0])
            y = prng.np_random.randint(self.screen_shape[1])
            buttonmask = prng.np_random.choice(self.buttonmasks)

            event = [vnc_event.PointerEvent(x, y, buttonmask)]
        return event
コード例 #4
0
ファイル: test_multi_env.py プロジェクト: zmoon111/universe
def test_multi_env():
    """
    Create 2 envs pointing at the same VNC server and alternate using them.
    The vnc-agent eval workers do this.
    It's nontrivial because the envs have to start rejecting updates when they're not active, and start
    accepting again.
    You should see logs like
        update queue max of 60 reached; pausing further updates
    after every switch.
    You can watch it on a second VNC client. You should see the mouse slowly circling the screen.
    """
    loops = 1

    logger.info('test_multi_env, loops=%d', loops)
    e2 = create_dd_env(1, 'test_multi_env_1')
    e1 = create_dd_env(1, 'test_multi_env_1')
    basetime = time.time()
    for outeri in range(loops):
        for envi, env in enumerate([e1, e2]):
            if env is None: continue
            env.reset()
            tot_reward = 0.0
            for stepi in range(100):
                angle = stepi * np.pi * 2.0 / 100.0
                x = 512 + np.round(np.cos(angle) * 400)
                y = 384 + np.round(np.sin(angle) * 300)
                action = [vnc_event.PointerEvent(x, y, 0)]
                obs, reward, done, info = env.step(action)
                obs_sum = np.sum(obs)
                tot_reward += reward
                logger.debug(
                    "%d/%d/%d: state=%s sum %.0f, reward=%g, done=%s, action=%s",
                    outeri, envi, stepi, obs.shape, obs_sum, reward, done,
                    action)
                assert obs.shape == (768, 1024, 3)
                assert obs_sum >= 300000000 and obs_sum < 400000000
            logger.info("%d/%d: tot_reward=%.0f", outeri, envi, tot_reward)
            assert tot_reward >= 0
    if e1: e1.close()
    if e2: e2.close()
コード例 #5
0
            # framebuffer update
            if msg.message_type == rfp_server.RfpServer.MessageType.fb_update:
                rects = []
                for msg_r in msg.message_body.rects:
                    rect = client.decode_rectangle(msg_r)
                    if rect is not None:
                        rects.append(rect)
                update = server_messages.FramebufferUpdate(rects)
                numpy_screen.flip()
                numpy_screen.apply(update)
                numpy_screen.flip()

        # pass client action to framebuffer to track cursor position
        if msg.message_type == 5:  # TODO: enum
            event = vnc_event.PointerEvent(msg.message_body.pos_x,
                                           msg.message_body.pos_y,
                                           msg.message_body.button_mask)
            numpy_screen.flip()
            numpy_screen.apply_action(event)
            numpy_screen.flip()

            # if button was pressed, record the image
            if msg.message_body.button_mask or last_save is None or (
                    ts - last_save) > 0.5:
                n = "img_%04d_%.4f_%d.png" % (idx, ts - start_ts,
                                              msg.message_body.button_mask)
                img = Image.fromarray(numpy_screen.peek())
                draw = ImageDraw.Draw(img)
                y_ofs = msg.message_body.pos_y
                x_ofs = msg.message_body.pos_x
                if msg.message_body.button_mask: