Exemple #1
0
    def train(self):
        file = open('log_reward', 'w')
        """ train agent"""
        mainWorker = Worker.remote(self.env, self.hp)
        worker = ActorPool([
            Worker.remote(gym.make(self.hp.env_name), self.hp)
            for i in range(8)
        ])
        print(worker)
        for step in range(self.hp.nb_steps):
            #generate random pertrutbation
            deltas = self.policy.sample_deltas()

            positive_rewards = worker.map(
                lambda a, v: a.explore.remote(*v),
                [(self.normalizer, self.policy, 'positive', deltas[i])
                 for i in range(self.hp.nb_directions)])
            positive_rewards = [i for i in positive_rewards]

            negative_rewards = worker.map(
                lambda a, v: a.explore.remote(*v),
                [(self.normalizer, self.policy, 'negative', deltas[i])
                 for i in range(self.hp.nb_directions)])
            negative_rewards = [i for i in negative_rewards]

            # gathering the rewards
            all_rewards = np.array(positive_rewards + negative_rewards)

            #get the standard deviation of all rewards
            sigma_r = all_rewards.std()

            #soring the rewards to generate rollouts for updating weight
            scores = {
                k: max(r_pos, r_neg)
                for k, (r_pos, r_neg
                        ) in enumerate(zip(positive_rewards, negative_rewards))
            }
            order = sorted(scores.keys(),
                           key=lambda x: scores[x],
                           reverse=True)[:self.hp.nb_best_directions]
            rollouts = [(positive_rewards[k], negative_rewards[k], deltas[k])
                        for k in order]

            #update the policy with new weight
            self.policy.update(rollouts, sigma_r)
            #print result
            reward_evaluation = ray.get(
                mainWorker.explore.remote(self.normalizer, self.policy))
            print("Step ", step, "=> Reward: ", reward_evaluation)
            file.write(str(reward_evaluation) + '\n')
        return self.policy.tetha
Exemple #2
0
def test_map_gh23107(init):
    sleep_time = 40

    # Reference - https://github.com/ray-project/ray/issues/23107
    @ray.remote
    class DummyActor:
        async def identity(self, s):
            if s == 6:
                await asyncio.sleep(sleep_time)
            return s, time.time()

    def func(a, v):
        return a.identity.remote(v)

    map_values = [1, 2, 3, 4, 5]

    pool_map = ActorPool([DummyActor.remote() for i in range(2)])
    pool_map.submit(func, 6)
    start_time = time.time()
    gen = pool_map.map(func, map_values)
    assert all(elem[0] in [1, 2, 3, 4, 5] for elem in list(gen))
    assert all(
        abs(elem[1] - start_time) < sleep_time in [1, 2, 3, 4, 5]
        for elem in list(gen))

    pool_map_unordered = ActorPool([DummyActor.remote() for i in range(2)])
    pool_map_unordered.submit(func, 6)
    start_time = time.time()
    gen = pool_map_unordered.map_unordered(func, map_values)
    assert all(elem[0] in [1, 2, 3, 4, 5] for elem in list(gen))
    assert all(
        abs(elem[1] - start_time) < sleep_time in [1, 2, 3, 4, 5]
        for elem in list(gen))
Exemple #3
0
def extract_flame(fps):
    files = list(DATASET_DIR.glob(f"*/*/video_{fps}fps.mp4"))
    for i, video_file in enumerate(
            tqdm(files, desc="Extracting flame parameters", leave=False)):
        flame_h5_file = video_file.parent / f"flame_{fps}fps.h5"
        if flame_h5_file.exists():
            continue
        flame_dir = video_file.parent / f"flame_{fps}fps"
        gender = get_gender(video_file.parent.parent.name,
                            video_file.parent.name)
        template_path = BASE_DIR / CONFIG["flame"][f"model_path_{gender}"]
        # with open(template_model_fname, "rb") as f:
        #     template = pickle.load(f, encoding="latin1")

        ringnet_file = video_file.parent / f"ringnet_{fps}fps.h5"
        openface_file = video_file.parent / f"openface_{fps}fps.csv"
        neutral_mesh_faces = Mesh(filename=str(video_file.parent /
                                               "neutral_mesh.ply")).f

        f = h5py.File(ringnet_file, "r")["flame_params"]

        pool = ActorPool([
            FrameOptimizer.remote(neutral_mesh_faces, template_path)
            for _ in range(8)
        ])
        openface_data = list(csv.reader(openface_file.open()))[1:]
        data = f["pose"], f["shape"], f["expression"], openface_data
        flame_dir.mkdir(parents=True, exist_ok=True)
        runners = []
        for i, (pose, shape, expression, openface) in enumerate(zip(*data), 1):
            flame_file = flame_dir / f"{i:06}.npy"
            if flame_file.exists():
                continue

            # Get 68 facial landmarks
            landmarks = [float(x) for x in openface[299:435]]
            # reshape the landmarks so that they are 2x51 (cut of the jaw (17 landmarks))
            target_2d_lmks = np.array(landmarks).reshape(2, -1).T[17:]
            runners.append(
                (pose, shape, expression, target_2d_lmks, flame_file))

        for file_name, flame_params in tqdm(
                pool.map(lambda a, v: a.fit_lmk2d_v2.remote(*v), runners),
                total=len(runners),
                leave=False,
        ):
            np.save(file_name, flame_params)

        np_files = list(flame_dir.glob("*.npy"))
        assert len(np_files) == len(openface_data)

        results = defaultdict(list)
        for file in flame_dir.glob("*.npy"):
            for key, value in np.load(file, allow_pickle=True).item().items():
                results[key].append(value)
        with h5py.File(flame_h5_file, "w") as f:
            for key, value in results.items():
                f.create_dataset(key, data=np.vstack(value))
Exemple #4
0
def test_map_gh23107(init):
    # Reference - https://github.com/ray-project/ray/issues/23107
    @ray.remote
    class DummyActor:
        async def identity(self, s):
            return s

    def func(a, v):
        return a.identity.remote(v)

    map_values = [1, 2, 3, 4, 5]

    pool_map = ActorPool([DummyActor.remote() for i in range(2)])
    pool_map.submit(func, 6)
    gen = pool_map.map(func, map_values)
    assert list(gen) == [1, 2, 3, 4, 5]

    pool_map_unordered = ActorPool([DummyActor.remote() for i in range(2)])
    pool_map_unordered.submit(func, 6)
    gen = pool_map_unordered.map(func, map_values)
    assert all(elem in [1, 2, 3, 4, 5] for elem in list(gen))
Exemple #5
0
def test_map(init):
    @ray.remote
    class MyActor:
        def __init__(self):
            pass

        def f(self, x):
            return x + 1

        def double(self, x):
            return 2 * x

    actors = [MyActor.remote() for _ in range(4)]
    pool = ActorPool(actors)

    index = 0
    for v in pool.map(lambda a, v: a.double.remote(v), range(5)):
        assert v == 2 * index
        index += 1
Exemple #6
0
def test_map_unordered(init):
    @ray.remote
    class MyActor:
        def __init__(self):
            pass

        def f(self, x):
            return x + 1

        def double(self, x):
            return 2 * x

    actors = [MyActor.remote() for _ in range(4)]
    pool = ActorPool(actors)

    total = []
    for v in pool.map(lambda a, v: a.double.remote(v), range(5)):
        total += [v]

    assert all(elem in [0, 2, 4, 6, 8] for elem in total)
Exemple #7
0
import ray
from ray.util import ActorPool


@ray.remote
class Actor:
    def double(self, n):
        return n * 2


a1, a2 = Actor.remote(), Actor.remote()
pool = ActorPool([a1, a2])

# pool.map(..) returns a Python generator object ActorPool.map
gen = pool.map(lambda a, v: a.double.remote(v), [1, 2, 3, 4])
print(list(gen))
# [2, 4, 6, 8]
Exemple #8
0

@ray.remote
class PoolActor:
    def __init__(self):
        self.id = str(uuid4())

    def computation(self, num):
        print(f'Actor with id {self.id} waiting for {num} sec')
        for x in range(num):
            sleep(1)
            print(f'Actor with id {self.id} slept for {x} sec')
        return num


# Create actors and add them to the pool
a1, a2, a3 = PoolActor.remote(), PoolActor.remote(), PoolActor.remote()
pool = ActorPool([a1, a2, a3])

print(list(pool.map(lambda a, v: a.computation.remote(v), [3, 4, 5, 4])))

pool.submit(lambda a, v: a.computation.remote(v), 3)
pool.submit(lambda a, v: a.computation.remote(v), 4)
pool.submit(lambda a, v: a.computation.remote(v), 5)
pool.submit(lambda a, v: a.computation.remote(v), 4)

print(pool.get_next())
print(pool.get_next())
print(pool.get_next())
print(pool.get_next())
Exemple #9
0
class RayRunner(RunnerBase):
    def __init__(self, *, random_seed: int = 0, volumes_dir: str = None, scratch_dir: str = None,
                 store_results=False, n_workers=None, blocklist=()) -> None:
        if not ray.is_initialized():
            ray.init()

        super().__init__(random_seed=random_seed, volumes_dir=volumes_dir, scratch_dir=scratch_dir)
        self.data_handler = DataHandler.remote()
        self.ray_executor = RayExecutor.remote(random_seed=random_seed,
                                               volumes_dir=volumes_dir, scratch_dir=scratch_dir,
                                               store_results=store_results,blocklist=blocklist)

        if n_workers is None:
            n_workers = multiprocessing.cpu_count()
        self.actor_pool = ActorPool([
            RayExecutor.remote(random_seed=random_seed, volumes_dir=volumes_dir,
                               scratch_dir=scratch_dir, store_results=store_results,
                               blocklist=blocklist) for _ in range(n_workers)]
        )

        # Wait for primitives to be load on the workers
        # time.sleep(len(d3m_index.search()) * 0.15)

    def stop_ray(self):
        ray.shutdown()

    def get_request(self, request_id: str):
        return ray.get(ray.ObjectID(binascii.unhexlify(request_id)))

    def fit_pipeline_request(self, problem_description: Problem, pipeline: Pipeline,
                             input_data: typing.Sequence[ContainerType], *, timeout: float = None,
                             expose_outputs: bool = False) -> str:

        request_id = self.data_handler.add_data.remote(input_data)
        input_data_id = ray.get(request_id)
        request_id = self.ray_executor.fit_pipeline.remote(self.data_handler, problem_description, pipeline, input_data_id,
                                                           timeout=timeout, expose_outputs=expose_outputs)
        return request_id.hex()

    def produce_pipeline_request(self, fitted_pipeline_id: str, input_data: typing.Sequence[ContainerType], *,
                                 timeout: float = None, expose_outputs: bool = False) -> str:
        request_id = self.data_handler.add_data.remote(input_data)
        input_data_id = ray.get(request_id)
        request_id = self.ray_executor.produce_pipeline.remote(self.data_handler, fitted_pipeline_id, input_data_id, timeout=timeout,
                                                               expose_outputs=expose_outputs)
        return request_id.hex()

    def evaluate_pipeline_request(
            self, problem_description: Problem, pipeline: Pipeline,
            input_data: typing.Sequence[ContainerType], *, metrics: typing.Sequence[typing.Dict],
            data_preparation_pipeline: Pipeline = None, scoring_pipeline: Pipeline = None,
            data_preparation_params: typing.Dict[str, str] = None, scoring_params: typing.Dict[str, str] = None,
            timeout: float = None
    ) -> str:
        request_id = self.data_handler.add_data.remote(input_data)
        input_data_id = ray.get(request_id)

        request_id = self.ray_executor.evaluate_pipeline.remote(
            self.data_handler, problem_description, pipeline, input_data_id, metrics=metrics,
            data_preparation_pipeline=data_preparation_pipeline, scoring_pipeline=scoring_pipeline,
            data_preparation_params=data_preparation_params, scoring_params=scoring_params, timeout=timeout
        )
        return request_id.hex()

    def fitted_pipeline_id_exists(self, fitted_pipeline_id):
        request_id = self.ray_executor.fitted_pipeline_id_exists.remote(fitted_pipeline_id)
        return ray.get(request_id)

    def evaluate_pipelines(
            self, problem_description: Problem, pipelines: typing.Sequence[Pipeline],
            input_data: typing.Sequence[ContainerType], *, metrics: typing.Sequence[typing.Dict],
            data_preparation_pipeline: Pipeline = None, scoring_pipeline: Pipeline = None,
            data_preparation_params: typing.Dict[str, str] = None, scoring_params: typing.Dict[str, str] = None,
            timeout: float = None
    ) -> typing.Sequence[PipelineResult]:
        request_id = self.data_handler.add_data.remote(input_data)
        input_data_id = ray.get(request_id)

        args = []
        for pipeline in pipelines:
            args.append({
                'data_handler': self.data_handler, 'problem_description': problem_description, 'pipeline': pipeline,
                'input_data_id': input_data_id, 'metrics': metrics, 'data_preparation_pipeline': data_preparation_pipeline,
                'scoring_pipeline': scoring_pipeline,'data_preparation_params': data_preparation_params,
                'scoring_params': scoring_params,'timeout': timeout
            })

        return self.actor_pool.map(lambda actor, arg: actor.evaluate_pipeline.remote(**arg), args)