Exemplo n.º 1
0
def pqp_benchmark():
    task = Zeroes()
    # task = Lamb #Error: cant be pickled
    # task = Func
    batch_size = 16
    tensor_size = (9, 9, 9, 9, 9)
    wait_time = 0.1
    samples = 100

    df = PooledQueueProcessor(task,
                              args=[batch_size],
                              kwargs={"tensor_size": tensor_size},
                              max_queue_size=samples)

    def get():
        return df.get()

    def wait_get():
        time.sleep(wait_time)
        return df.get()

    def generate():
        return task(batch_size, tensor_size=tensor_size)

    def wait_generate():
        time.sleep(wait_time)
        return task(batch_size, tensor_size=tensor_size)

    for func, discount in zip(
        (get, wait_get, generate, wait_generate),
        (0, samples * wait_time, 0, samples * wait_time)):
        t, res = benchmark_func(func, samples)
        print(f"{func.__name__}: {t - discount} seconds")
Exemplo n.º 2
0
def test_integration_success():
    task = Square()

    with PooledQueueProcessor(task, [2],
                              fill_at_construction=True,
                              max_queue_size=10) as processor:
        for a, _ in zip(processor, range(30)):
            print(a)
Exemplo n.º 3
0
def pooled_neodroid_env_classification_generator(env, device, batch_size=64) -> Tuple:
    """

    :param env:
    :param device:
    :param batch_size:
    :return:
    """

    class FetchConvert(PooledQueueTask):
        def __init__(
            self,
            env,
            device: Union[str, torch.device] = "cpu",
            batch_size: int = 64,
            *args,
            **kwargs
        ):
            """

      :param env:
      :param device:
      :param batch_size:
      :param args:
      :param kwargs:
      """
            super().__init__(*args, **kwargs)

            self.env = env
            self.batch_size = batch_size
            self.device = device

        def call(self, *args, **kwargs) -> Tuple:
            predictors = []
            class_responses = []

            while len(predictors) < self.batch_size:
                state = self.env.update()
                rgb_arr = state.sensor("RGB").value
                rgb_arr = Image.open(rgb_arr).convert("RGB")
                a_class = state.sensor("Class").value

                predictors.append(default_torch_transform(rgb_arr))
                class_responses.append(int(a_class))

            return (
                torch.stack(predictors).to(self.device),
                torch.LongTensor(class_responses).to(self.device),
            )

    task = FetchConvert(env, device=device, batch_size=batch_size)

    processor = PooledQueueProcessor(
        task, fill_at_construction=True, max_queue_size=16, n_proc=None
    )

    for a in zip(processor):
        yield a
Exemplo n.º 4
0
def NeodroidClassificationGenerator2(env, device, batch_size=64):
    task = FetchConvert(env, device=device, batch_size=batch_size)

    processor = PooledQueueProcessor(task,
                                     fill_at_construction=True,
                                     max_queue_size=16,
                                     n_proc=None)

    for a in zip(processor):
        yield a
Exemplo n.º 5
0
def test_integration_except_ctx():
    task = Exc()

    with pytest.raises(NotImplementedError) as exc_info:
        task()  # TODO: MP does not work in pytest
        with PooledQueueProcessor(task, [2], max_queue_size=10) as processor:
            for a, _ in zip(processor, range(30)):
                print(a)

    assert exc_info.type is NotImplementedError
Exemplo n.º 6
0
def test_lambda_func():
    task = lambda x: x

    with PooledQueueProcessor(task, [2], max_queue_size=10) as processor:
        for a, _ in zip(processor, range(30)):
            pass
Exemplo n.º 7
0
def test_integration_func():
    task = identity

    with PooledQueueProcessor(task, [2], max_queue_size=10) as processor:
        for a, _ in zip(processor, range(30)):
            pass