Esempio n. 1
0
    def test_queue_usage(self):
        q = Queue(1000 *
                  1000)  # specify the size of the circular buffer in the ctor

        # any pickle-able Python object can be added to the queue
        py_obj = dict(a=42, b=33, c=(1, 2, 3), d=[1, 2, 3], e='123', f=b'kkk')
        q.put(py_obj)
        assert q.qsize() == 1

        retrieved = q.get()
        assert q.empty()
        assert py_obj == retrieved

        for i in range(100):
            try:
                q.put(py_obj, timeout=0.1)
            except Full:
                log.debug('Queue is full!')

        num_received = 0
        while num_received < 100:
            # get multiple messages at once, returns a list of messages for better performance in many-to-few scenarios
            # get_many does not guarantee that all max_messages_to_get will be received on the first call, in fact
            # no such guarantee can be made in multiprocessing systems.
            # get_many() will retrieve as many messages as there are available AND can fit in the pre-allocated memory
            # buffer. The size of the buffer is increased gradually to match demand.
            messages = q.get_many(max_messages_to_get=100)
            num_received += len(messages)

        try:
            q.get(timeout=0.1)
            assert True, 'This won\'t be called'
        except Empty:
            log.debug('Queue is empty')
Esempio n. 2
0
 def test_queue_size(self):
     q = Queue(max_size_bytes=1000)
     py_obj_1 = dict(a=10, b=20)
     py_obj_2 = dict(a=30, b=40)
     q.put_nowait(py_obj_1)
     q.put_nowait(py_obj_2)
     q_size_bef = q.qsize()
     log.debug('Queue size after put -  %d', q_size_bef)
     num_messages = 0
     want_to_read = 2
     while num_messages < want_to_read:
         msgs = q.get_many()
         print(msgs)
         num_messages += len(msgs)
     self.assertEqual(type(q_size_bef), int)
     q_size_af = q.qsize()
     log.debug('Queue size after get -  %d', q_size_af)
     self.assertEqual(q_size_af, 0)
Esempio n. 3
0
                continue

            # writer.add_scalar("charts/episode_reward", r, global_step)
            # writer.add_scalar("charts/stats_queue_size", stats_queue.qsize(), global_step)
            # writer.add_scalar("charts/rollouts_queue_size", rollouts_queue.qsize(), global_step)
            # writer.add_scalar("charts/data_process_queue_size", data_process_queue.qsize(), global_step)
            if update_step % 10 == 0:
                # print(f"global_step={global_step}, episode_reward={r}")
                print(f"global_step={global_step}")
                global_step += global_step_increment
                writer.add_scalar(
                    "charts/fps",
                    global_step_increment / (time.time() - start_time),
                    global_step)
                writer.add_scalar("charts/policy_request_queue",
                                  policy_request_queue.qsize(), global_step)
                writer.add_scalar("charts/rollout_task_queues[0]",
                                  rollout_task_queues[0].qsize(), global_step)
                print("FPS: ",
                      global_step_increment / (time.time() - start_time))
                global_step_increment = 0
                start_time = time.time()

            # else:
            #     # print(m[0], m[1], global_step)
            #     # writer.add_scalar(m[0], m[1], global_step)
            #     pass
            # if args.capture_video and args.prod_mode:
            #     video_files = glob.glob(f'videos/{experiment_name}/*.mp4')
            #     for video_file in video_files:
            #         if video_file not in existing_video_files: