def test_simple_use(): q = Queue() items = list(range(10)) for item in items: q.put(item) for item in items: assert item == q.get()
class EvaluationLogger: def __init__(self, get_log_data, log_size=100000): self.log = Queue() self.get_log_data = get_log_data def log_data(self, env): self.log.put(self.get_log_data(env)) def get_data(self): return [self.log.get() for _ in range(self.log.size())]
def test_simple_usage(ray_start_regular): q = Queue() items = list(range(10)) for item in items: q.put(item) for item in items: assert item == q.get()
def test_async_get(ray_start_regular): q = Queue() future = async_get.remote(q) with pytest.raises(Empty): q.get_nowait() with pytest.raises(RayTimeoutError): ray.get(future, timeout=0.1) # task not canceled on timeout. q.put(1) assert ray.get(future) == 1
def test_async_put(ray_start_regular): q = Queue(1) q.put(1) future = async_put.remote(q, 2) with pytest.raises(Full): q.put_nowait(3) with pytest.raises(RayTimeoutError): ray.get(future, timeout=0.1) # task not canceled on timeout. assert q.get() == 1 assert q.get() == 2
def test_qsize(): q = Queue() items = list(range(10)) size = 0 assert q.qsize() == size for item in items: q.put(item) size += 1 assert q.qsize() == size for item in items: assert q.get() == item size -= 1 assert q.qsize() == size
def test_get(ray_start_regular): q = Queue() item = 0 q.put(item) assert q.get(block=False) == item item = 1 q.put(item) assert q.get(timeout=0.2) == item with pytest.raises(ValueError): q.get(timeout=-1) with pytest.raises(Empty): q.get_nowait() with pytest.raises(Empty): q.get(timeout=0.2)
def test_put(ray_start_regular): q = Queue(1) item = 0 q.put(item, block=False) assert q.get() == item item = 1 q.put(item, timeout=0.2) assert q.get() == item with pytest.raises(ValueError): q.put(0, timeout=-1) q.put(0) with pytest.raises(Full): q.put_nowait(1) with pytest.raises(Full): q.put(1, timeout=0.2)
class ReplayMemory: def __init__(self, memory_size=20000, random_generator=default_random_generator, memory_ratio=1.0): self.memory = Queue(maxsize=memory_size) self.random_generator = random_generator self.memory_ratio = memory_ratio def add(self, data, block=False): if self.memory.full(): self.memory.get(True) if self.random_generator.rand() < self.memory_ratio: self.memory.put(data, block) def sample(self, n): assert n <= self.memory.size(), "Not enough replay memory" data = [] while self.memory.size() > 0: data.append(self.memory.get()) sample_idx = self.random_generator.randint(len(data), n) samples = [data[i] for i in sample_idx] return torch.stack(samples, dim=0)
def test_get(): q = Queue() item = 0 q.put(item) assert q.get(block=False) == item item = 1 q.put(item) assert q.get(timeout=0.2) == item with pytest.raises(ValueError): q.get(timeout=-1) with pytest.raises(Empty): q.get_nowait() with pytest.raises(Empty): q.get(timeout=0.2) item = 0 put_async.remote(q, item, True, None, 0.2) assert q.get() == item
def time_put(self): queue = Queue(1000) for i in range(1000): queue.put(i)
def test_put(): q = Queue(1) item = 0 q.put(item, block=False) assert q.get() == item item = 1 q.put(item, timeout=0.2) assert q.get() == item with pytest.raises(ValueError): q.put(0, timeout=-1) q.put(0) with pytest.raises(Full): q.put_nowait(1) with pytest.raises(Full): q.put(1, timeout=0.2) q.get() q.put(1) get_id = get_async.remote(q, False, None, 0.2) q.put(2) assert ray.get(get_id) == 1
def test_put(): start_ray() q = Queue(1) item = 0 q.put(item, block=False) assert q.get() == item item = 1 q.put(item, timeout=0.2) assert q.get() == item with pytest.raises(ValueError): q.put(0, timeout=-1) q.put(0) with pytest.raises(Full): q.put_nowait(1) with pytest.raises(Full): q.put(1, timeout=0.2) q.get() q.put(1) get_id = get_async.remote(q, False, None, 0.2) q.put(2) assert ray.get(get_id) == 1
def time_get(self): queue = Queue() for i in range(1000): queue.put(i) for _ in range(1000): queue.get()
def evaluation( evaluation_config_path="./configs/evaluation/fast_reanalyze_evaluation.toml" ): t1 = time.time() ray.init() config = load_toml(evaluation_config_path) api = wandb.Api() if len(config.run_ids) > 0: runs = [ api.run(path=f"{config.entity}/{config.project_name}/{id}") for id in config.run_ids ] else: runs = api.runs(path=f"{config.entity}/{config.project_name}", filters=config.filters) results = SharedResults.remote(num_episodes=config.num_episodes) job_queue = Queue() # Fill the queue with models to evaluate for run in runs: files = run.files() print(files) env_config_file = find_env_config(files.objects, r"(:?^|\s)\w*(?=.py)") try: weights_file_result = run.files("model.weights") if env_config_file is None: continue env_config_name = os.path.splitext(env_config_file.name)[0] # if os.path.exists(os.path.join(ModelEvaluator.CONFIGS_DIR_PATH, env_config_file.name)) is False: env_config_file.download(True, root=ModelEvaluator.CONFIGS_DIR_PATH) weight_file_path = os.path.join(ModelEvaluator.WEIGHTS_DIR_PATH, env_config_name, f"{run.id}.weights") if os.path.exists(weight_file_path) is False: pathlib.Path(os.path.dirname(weight_file_path)).mkdir( parents=True, exist_ok=True) weights_file = weights_file_result[0].download( replace=True, root=ModelEvaluator.WEIGHTS_DIR_PATH) shutil.move(weights_file.name, weight_file_path) weight_file_path = weights_file.name del weights_file for seed in range(config.num_episodes): job_queue.put( (env_config_name, weight_file_path, env_config_file, seed)) except: print(f"{run.name} failure") # Start the model evaluator worker evaluators = [] for _ in range(config.num_workers): model_evaluator = ModelEvaluator.remote(job_queue, results, config.num_episodes) evaluators.append(model_evaluator.evaluate.remote()) # Wait for all the workers to be done ray.get(evaluators) # Save the results ids_string = '_'.join(config.run_ids[-10]) filter_string = '_'.join( [f"{key}-{value}" for key, value in config.filters.items()]) with open( f'evaluation_results/test_results_{ids_string}_{filter_string}.json', 'w') as outfile: json.dump(ray.get(results.get_result.remote()), outfile) print(f"Time taken : {time.time() - t1}")
def test_queue(ray_start_regular): @ray.remote def get_async(queue, block, timeout, sleep): time.sleep(sleep) return queue.get(block, timeout) @ray.remote def put_async(queue, item, block, timeout, sleep): time.sleep(sleep) queue.put(item, block, timeout) # Test simple usage. q = Queue() items = list(range(10)) for item in items: q.put(item) for item in items: assert item == q.get() # Test asynchronous usage. q = Queue() items = set(range(10)) producers = [ # noqa put_async.remote(q, item, True, None, 0.5) for item in items ] consumers = [get_async.remote(q, True, None, 0) for _ in items] result = set(ray.get(consumers)) assert items == result # Test put. q = Queue(1) item = 0 q.put(item, block=False) assert q.get() == item item = 1 q.put(item, timeout=0.2) assert q.get() == item with pytest.raises(ValueError): q.put(0, timeout=-1) q.put(0) with pytest.raises(Full): q.put_nowait(1) with pytest.raises(Full): q.put(1, timeout=0.2) q.get() q.put(1) get_id = get_async.remote(q, False, None, 0.2) q.put(2) assert ray.get(get_id) == 1 # Test get. q = Queue() item = 0 q.put(item) assert q.get(block=False) == item item = 1 q.put(item) assert q.get(timeout=0.2) == item with pytest.raises(ValueError): q.get(timeout=-1) with pytest.raises(Empty): q.get_nowait() with pytest.raises(Empty): q.get(timeout=0.2) item = 0 put_async.remote(q, item, True, None, 0.2) assert q.get() == item # Test qsize. q = Queue() items = list(range(10)) size = 0 assert q.qsize() == size for item in items: q.put(item) size += 1 assert q.qsize() == size for item in items: assert q.get() == item size -= 1 assert q.qsize() == size
def test_queue(ray_start): @ray.remote def get_async(queue, block, timeout, sleep): time.sleep(sleep) return queue.get(block, timeout) @ray.remote def put_async(queue, item, block, timeout, sleep): time.sleep(sleep) queue.put(item, block, timeout) # Test simple usage. q = Queue() items = list(range(10)) for item in items: q.put(item) for item in items: assert item == q.get() # Test asynchronous usage. q = Queue() items = set(range(10)) producers = [ # noqa put_async.remote(q, item, True, None, 0.5) for item in items ] consumers = [get_async.remote(q, True, None, 0) for _ in items] result = set(ray.get(consumers)) assert items == result # Test put. q = Queue(1) item = 0 q.put(item, block=False) assert q.get() == item item = 1 q.put(item, timeout=0.2) assert q.get() == item with pytest.raises(ValueError): q.put(0, timeout=-1) q.put(0) with pytest.raises(Full): q.put_nowait(1) with pytest.raises(Full): q.put(1, timeout=0.2) q.get() q.put(1) get_id = get_async.remote(q, False, None, 0.2) q.put(2) assert ray.get(get_id) == 1 # Test get. q = Queue() item = 0 q.put(item) assert q.get(block=False) == item item = 1 q.put(item) assert q.get(timeout=0.2) == item with pytest.raises(ValueError): q.get(timeout=-1) with pytest.raises(Empty): q.get_nowait() with pytest.raises(Empty): q.get(timeout=0.2) item = 0 put_async.remote(q, item, True, None, 0.2) assert q.get() == item # Test qsize. q = Queue() items = list(range(10)) size = 0 assert q.qsize() == size for item in items: q.put(item) size += 1 assert q.qsize() == size for item in items: assert q.get() == item size -= 1 assert q.qsize() == size