コード例 #1
0
ファイル: test.py プロジェクト: rohanpadhye/ray
    def test_transfer(self):
        for _ in range(100):
            # Create an object.
            object_id1, memory_buffer1, metadata1 = create_object(
                self.client1, 2000, 2000)
            # Transfer the buffer to the the other PlasmaStore.
            self.client1.transfer("127.0.0.1", self.port2, object_id1)
            # Compare the two buffers.
            assert_get_object_equal(self,
                                    self.client1,
                                    self.client2,
                                    object_id1,
                                    memory_buffer=memory_buffer1,
                                    metadata=metadata1)
            # # Transfer the buffer again.
            # self.client1.transfer("127.0.0.1", self.port2, object_id1)
            # # Compare the two buffers.
            # assert_get_object_equal(self, self.client1, self.client2, object_id1,
            #                         memory_buffer=memory_buffer1, metadata=metadata1)

            # Create an object.
            object_id2, memory_buffer2, metadata2 = create_object(
                self.client2, 20000, 20000)
            # Transfer the buffer to the the other PlasmaStore.
            self.client2.transfer("127.0.0.1", self.port1, object_id2)
            # Compare the two buffers.
            assert_get_object_equal(self,
                                    self.client1,
                                    self.client2,
                                    object_id2,
                                    memory_buffer=memory_buffer2,
                                    metadata=metadata2)
コード例 #2
0
ファイル: test.py プロジェクト: BenJamesbabala/ray
  def test_fetch_multiple(self):
    for _ in range(20):
      # Create two objects and a third fake one that doesn't exist.
      object_id1, memory_buffer1, metadata1 = create_object(self.client1, 2000, 2000)
      missing_object_id = random_object_id()
      object_id2, memory_buffer2, metadata2 = create_object(self.client1, 2000, 2000)
      object_ids = [object_id1, missing_object_id, object_id2]
      # Fetch the objects from the other plasma store. The second object ID
      # should timeout since it does not exist.
      # TODO(rkn): Right now we must wait for the object table to be updated.
      while (not self.client2.contains(object_id1)) or (not self.client2.contains(object_id2)):
        self.client2.fetch(object_ids)
      # Compare the buffers of the objects that do exist.
      assert_get_object_equal(self, self.client1, self.client2, object_id1,
                              memory_buffer=memory_buffer1, metadata=metadata1)
      assert_get_object_equal(self, self.client1, self.client2, object_id2,
                              memory_buffer=memory_buffer2, metadata=metadata2)
      # Fetch in the other direction. The fake object still does not exist.
      self.client1.fetch(object_ids)
      assert_get_object_equal(self, self.client2, self.client1, object_id1,
                              memory_buffer=memory_buffer1, metadata=metadata1)
      assert_get_object_equal(self, self.client2, self.client1, object_id2,
                              memory_buffer=memory_buffer2, metadata=metadata2)

    # Check that we can call fetch with duplicated object IDs.
    object_id3 = random_object_id()
    self.client1.fetch([object_id3, object_id3])
    object_id4, memory_buffer4, metadata4 = create_object(self.client1, 2000, 2000)
    time.sleep(0.1)
    # TODO(rkn): Right now we must wait for the object table to be updated.
    while not self.client2.contains(object_id4):
      self.client2.fetch([object_id3, object_id3, object_id4, object_id4])
    assert_get_object_equal(self, self.client2, self.client1, object_id4,
                            memory_buffer=memory_buffer4, metadata=metadata4)
コード例 #3
0
ファイル: test.py プロジェクト: BenJamesbabala/ray
 def assert_create_raises_plasma_full(unit_test, size):
   partial_size = np.random.randint(size)
   try:
     _, memory_buffer, _ = create_object(unit_test.plasma_client, partial_size, size - partial_size)
   except plasma.plasma_out_of_memory_error as e:
     pass
   else:
     # For some reason the above didn't throw an exception, so fail.
     unit_test.assertTrue(False)
コード例 #4
0
ファイル: test.py プロジェクト: rohanpadhye/ray
    def test_fetch(self):
        for _ in range(10):
            # Create an object.
            object_id1, memory_buffer1, metadata1 = create_object(
                self.client1, 2000, 2000)
            self.client1.fetch([object_id1])
            self.assertEqual(self.client1.contains(object_id1), True)
            self.assertEqual(self.client2.contains(object_id1), False)
            # Fetch the object from the other plasma manager.
            # TODO(rkn): Right now we must wait for the object table to be updated.
            while not self.client2.contains(object_id1):
                self.client2.fetch([object_id1])
            # Compare the two buffers.
            assert_get_object_equal(self,
                                    self.client1,
                                    self.client2,
                                    object_id1,
                                    memory_buffer=memory_buffer1,
                                    metadata=metadata1)

        # Test that we can call fetch on object IDs that don't exist yet.
        object_id2 = random_object_id()
        self.client1.fetch([object_id2])
        self.assertEqual(self.client1.contains(object_id2), False)
        memory_buffer2, metadata2 = create_object_with_id(
            self.client2, object_id2, 2000, 2000)
        # # Check that the object has been fetched.
        # self.assertEqual(self.client1.contains(object_id2), True)
        # Compare the two buffers.
        # assert_get_object_equal(self, self.client1, self.client2, object_id2,
        #                         memory_buffer=memory_buffer2, metadata=metadata2)

        # Test calling the same fetch request a bunch of times.
        object_id3 = random_object_id()
        self.assertEqual(self.client1.contains(object_id3), False)
        self.assertEqual(self.client2.contains(object_id3), False)
        for _ in range(10):
            self.client1.fetch([object_id3])
            self.client2.fetch([object_id3])
        memory_buffer3, metadata3 = create_object_with_id(
            self.client1, object_id3, 2000, 2000)
        for _ in range(10):
            self.client1.fetch([object_id3])
            self.client2.fetch([object_id3])
        #TODO(rkn): Right now we must wait for the object table to be updated.
        while not self.client2.contains(object_id3):
            self.client2.fetch([object_id3])
        assert_get_object_equal(self,
                                self.client1,
                                self.client2,
                                object_id3,
                                memory_buffer=memory_buffer3,
                                metadata=metadata3)
コード例 #5
0
ファイル: test.py プロジェクト: rohanpadhye/ray
    def test_integration_single_task(self):
        # There should be three db clients, the global scheduler, the local
        # scheduler, and the plasma manager.
        self.assertEqual(
            len(self.redis_client.keys("{}*".format(DB_CLIENT_PREFIX))),
            2 * NUM_CLUSTER_NODES + 1)

        num_return_vals = [0, 1, 2, 3, 5, 10]
        # There should not be anything else in Redis yet.
        self.assertEqual(len(self.redis_client.keys("*")),
                         2 * NUM_CLUSTER_NODES + 1)
        # Insert the object into Redis.
        data_size = 0xf1f0
        metadata_size = 0x40
        plasma_client = self.plasma_clients[0]
        object_dep, memory_buffer, metadata = create_object(plasma_client,
                                                            data_size,
                                                            metadata_size,
                                                            seal=True)

        # Sleep before submitting task to photon.
        time.sleep(0.1)
        # Submit a task to Redis.
        task = photon.Task(random_driver_id(), random_function_id(),
                           [photon.ObjectID(object_dep)], num_return_vals[0],
                           random_task_id(), 0)
        self.photon_clients[0].submit(task)
        time.sleep(0.1)
        # There should now be a task in Redis, and it should get assigned to the
        # local scheduler
        num_retries = 10
        while num_retries > 0:
            task_entries = self.redis_client.keys("{}*".format(TASK_PREFIX))
            self.assertLessEqual(len(task_entries), 1)
            if len(task_entries) == 1:
                task_contents = self.redis_client.hgetall(task_entries[0])
                task_status = int(task_contents[b"state"])
                self.assertTrue(task_status in [
                    TASK_STATUS_WAITING, TASK_STATUS_SCHEDULED,
                    TASK_STATUS_QUEUED
                ])
                if task_status == TASK_STATUS_QUEUED:
                    break
                else:
                    print(task_status)
            print("The task has not been scheduled yet, trying again.")
            num_retries -= 1
            time.sleep(1)

        if num_retries <= 0 and task_status != TASK_STATUS_QUEUED:
            # Failed to submit and schedule a single task -- bail.
            self.tearDown()
            sys.exit(1)
コード例 #6
0
ファイル: test.py プロジェクト: BenJamesbabala/ray
  def test_wait(self):
    # Test timeout.
    obj_id0 = random_object_id()
    self.client1.wait([obj_id0], timeout=100, num_returns=1)
    # If we get here, the test worked.

    # Test wait if local objects available.
    obj_id1 = random_object_id()
    self.client1.create(obj_id1, 1000)
    self.client1.seal(obj_id1)
    ready, waiting = self.client1.wait([obj_id1], timeout=100, num_returns=1)
    self.assertEqual(set(ready), set([obj_id1]))
    self.assertEqual(waiting, [])

    # Test wait if only one object available and only one object waited for.
    obj_id2 = random_object_id()
    self.client1.create(obj_id2, 1000)
    # Don't seal.
    ready, waiting = self.client1.wait([obj_id2, obj_id1], timeout=100, num_returns=1)
    self.assertEqual(set(ready), set([obj_id1]))
    self.assertEqual(set(waiting), set([obj_id2]))

    # Test wait if object is sealed later.
    obj_id3 = random_object_id()

    def finish():
      self.client2.create(obj_id3, 1000)
      self.client2.seal(obj_id3)

    t = threading.Timer(0.1, finish)
    t.start()
    ready, waiting = self.client1.wait([obj_id3, obj_id2, obj_id1], timeout=1000, num_returns=2)
    self.assertEqual(set(ready), set([obj_id1, obj_id3]))
    self.assertEqual(set(waiting), set([obj_id2]))

    # Test if the appropriate number of objects is shown if some objects are not ready
    ready, waiting = self.client1.wait([obj_id3, obj_id2, obj_id1], 100, 3)
    self.assertEqual(set(ready), set([obj_id1, obj_id3]))
    self.assertEqual(set(waiting), set([obj_id2]))

    # Don't forget to seal obj_id2.
    self.client1.seal(obj_id2)

    # Test calling wait a bunch of times.
    object_ids = []
    # TODO(rkn): Increasing n to 100 (or larger) will cause failures. The
    # problem appears to be that the number of timers added to the manager event
    # loop slow down the manager so much that some of the asynchronous Redis
    # commands timeout triggering fatal failure callbacks.
    n = 40
    for i in range(n * (n + 1) // 2):
      if i % 2 == 0:
        object_id, _, _ = create_object(self.client1, 200, 200)
      else:
        object_id, _, _ = create_object(self.client2, 200, 200)
      object_ids.append(object_id)
    # Try waiting for all of the object IDs on the first client.
    waiting = object_ids
    retrieved = []
    for i in range(1, n + 1):
      ready, waiting = self.client1.wait(waiting, timeout=1000, num_returns=i)
      self.assertEqual(len(ready), i)
      retrieved += ready
    self.assertEqual(set(retrieved), set(object_ids))
    ready, waiting = self.client1.wait(object_ids, timeout=1000, num_returns=len(object_ids))
    self.assertEqual(set(ready), set(object_ids))
    self.assertEqual(waiting, [])
    # Try waiting for all of the object IDs on the second client.
    waiting = object_ids
    retrieved = []
    for i in range(1, n + 1):
      ready, waiting = self.client2.wait(waiting, timeout=1000, num_returns=i)
      self.assertEqual(len(ready), i)
      retrieved += ready
    self.assertEqual(set(retrieved), set(object_ids))
    ready, waiting = self.client2.wait(object_ids, timeout=1000, num_returns=len(object_ids))
    self.assertEqual(set(ready), set(object_ids))
    self.assertEqual(waiting, [])

    # Make sure that wait returns when the requested number of object IDs are
    # available and does not wait for all object IDs to be available.
    object_ids = [random_object_id() for _ in range(9)] + [20 * b'\x00']
    object_ids_perm = object_ids[:]
    random.shuffle(object_ids_perm)
    for i in range(10):
      if i % 2 == 0:
        create_object_with_id(self.client1, object_ids_perm[i], 2000, 2000)
      else:
        create_object_with_id(self.client2, object_ids_perm[i], 2000, 2000)
      ready, waiting = self.client1.wait(object_ids, num_returns=(i + 1))
      self.assertEqual(set(ready), set(object_ids_perm[:(i + 1)]))
      self.assertEqual(set(waiting), set(object_ids_perm[(i + 1):]))
コード例 #7
0
ファイル: test.py プロジェクト: BenJamesbabala/ray
  def test_store_full(self):
    # The store is started with 1GB, so make sure that create throws an
    # exception when it is full.
    def assert_create_raises_plasma_full(unit_test, size):
      partial_size = np.random.randint(size)
      try:
        _, memory_buffer, _ = create_object(unit_test.plasma_client, partial_size, size - partial_size)
      except plasma.plasma_out_of_memory_error as e:
        pass
      else:
        # For some reason the above didn't throw an exception, so fail.
        unit_test.assertTrue(False)

    # Create a list to keep some of the buffers in scope.
    memory_buffers = []
    _, memory_buffer, _ = create_object(self.plasma_client, 9 * 10 ** 8, 0)
    memory_buffers.append(memory_buffer)
    # Remaining space is 10 ** 8. Make sure that we can't create an object of
    # size 10 ** 8 + 1, but we can create one of size 10 ** 8.
    assert_create_raises_plasma_full(self, 10 ** 8 + 1)
    _, memory_buffer, _ = create_object(self.plasma_client, 10 ** 8, 0)
    del memory_buffer
    _, memory_buffer, _ = create_object(self.plasma_client, 10 ** 8, 0)
    del memory_buffer
    assert_create_raises_plasma_full(self, 10 ** 8 + 1)

    _, memory_buffer, _ = create_object(self.plasma_client, 9 * 10 ** 7, 0)
    memory_buffers.append(memory_buffer)
    # Remaining space is 10 ** 7.
    assert_create_raises_plasma_full(self, 10 ** 7 + 1)

    _, memory_buffer, _ = create_object(self.plasma_client, 9 * 10 ** 6, 0)
    memory_buffers.append(memory_buffer)
    # Remaining space is 10 ** 6.
    assert_create_raises_plasma_full(self, 10 ** 6 + 1)

    _, memory_buffer, _ = create_object(self.plasma_client, 9 * 10 ** 5, 0)
    memory_buffers.append(memory_buffer)
    # Remaining space is 10 ** 5.
    assert_create_raises_plasma_full(self, 10 ** 5 + 1)

    _, memory_buffer, _ = create_object(self.plasma_client, 9 * 10 ** 4, 0)
    memory_buffers.append(memory_buffer)
    # Remaining space is 10 ** 4.
    assert_create_raises_plasma_full(self, 10 ** 4 + 1)

    _, memory_buffer, _ = create_object(self.plasma_client, 9 * 10 ** 3, 0)
    memory_buffers.append(memory_buffer)
    # Remaining space is 10 ** 3.
    assert_create_raises_plasma_full(self, 10 ** 3 + 1)

    _, memory_buffer, _ = create_object(self.plasma_client, 9 * 10 ** 2, 0)
    memory_buffers.append(memory_buffer)
    # Remaining space is 10 ** 2.
    assert_create_raises_plasma_full(self, 10 ** 2 + 1)

    _, memory_buffer, _ = create_object(self.plasma_client, 9 * 10 ** 1, 0)
    memory_buffers.append(memory_buffer)
    # Remaining space is 10 ** 1.
    assert_create_raises_plasma_full(self, 10 ** 1 + 1)

    _, memory_buffer, _ = create_object(self.plasma_client, 9 * 10 ** 0, 0)
    memory_buffers.append(memory_buffer)
    # Remaining space is 10 ** 0.
    assert_create_raises_plasma_full(self, 10 ** 0 + 1)

    _, memory_buffer, _ = create_object(self.plasma_client, 1, 0)
コード例 #8
0
ファイル: test.py プロジェクト: rohanpadhye/ray
    def integration_many_tasks_helper(self, timesync=True):
        # There should be three db clients, the global scheduler, the local
        # scheduler, and the plasma manager.
        self.assertEqual(
            len(self.redis_client.keys("{}*".format(DB_CLIENT_PREFIX))),
            2 * NUM_CLUSTER_NODES + 1)
        num_return_vals = [0, 1, 2, 3, 5, 10]

        # Submit a bunch of tasks to Redis.
        num_tasks = 1000
        for _ in range(num_tasks):
            # Create a new object for each task.
            data_size = np.random.randint(1 << 20)
            metadata_size = np.random.randint(1 << 10)
            plasma_client = self.plasma_clients[0]
            object_dep, memory_buffer, metadata = create_object(plasma_client,
                                                                data_size,
                                                                metadata_size,
                                                                seal=True)
            if timesync:
                # Give 10ms for object info handler to fire (long enough to yield CPU).
                time.sleep(0.010)
            task = photon.Task(random_driver_id(), random_function_id(),
                               [photon.ObjectID(object_dep)],
                               num_return_vals[0], random_task_id(), 0)
            self.photon_clients[0].submit(task)
        # Check that there are the correct number of tasks in Redis and that they
        # all get assigned to the local scheduler.
        num_retries = 10
        num_tasks_done = 0
        while num_retries > 0:
            task_entries = self.redis_client.keys("{}*".format(TASK_PREFIX))
            self.assertLessEqual(len(task_entries), num_tasks)
            # First, check if all tasks made it to Redis.
            if len(task_entries) == num_tasks:
                task_contents = [
                    self.redis_client.hgetall(task_entries[i])
                    for i in range(len(task_entries))
                ]
                task_statuses = [
                    int(contents[b"state"]) for contents in task_contents
                ]
                self.assertTrue(
                    all([
                        status in [
                            TASK_STATUS_WAITING, TASK_STATUS_SCHEDULED,
                            TASK_STATUS_QUEUED
                        ] for status in task_statuses
                    ]))
                num_tasks_done = task_statuses.count(TASK_STATUS_QUEUED)
                num_tasks_scheduled = task_statuses.count(
                    TASK_STATUS_SCHEDULED)
                num_tasks_waiting = task_statuses.count(TASK_STATUS_WAITING)
                print(
                    "tasks in Redis = {}, tasks waiting = {}, tasks scheduled = {}, tasks queued = {}, retries left = {}"
                    .format(len(task_entries), num_tasks_waiting,
                            num_tasks_scheduled, num_tasks_done, num_retries))
                if all(
                    [status == TASK_STATUS_QUEUED
                     for status in task_statuses]):
                    # We're done, so pass.
                    break
            num_retries -= 1
            time.sleep(0.1)

        if num_tasks_done != num_tasks:
            # At least one of the tasks failed to schedule.
            self.tearDown()
            sys.exit(2)