Пример #1
0
def automl_fixture():
    from zoo import init_spark_on_local
    from zoo.ray.util.raycontext import RayContext
    sc = init_spark_on_local(cores=4, spark_log_level="INFO")
    ray_ctx = RayContext(sc=sc, object_store_memory="1g")
    ray_ctx.init()
    yield
    ray_ctx.stop()
    sc.stop()
Пример #2
0
 def test_local(self):
     node_num = 4
     sc = init_spark_on_local(cores=node_num)
     ray_ctx = RayContext(sc=sc)
     ray_ctx.init()
     actors = [TestRay.remote() for i in range(0, node_num)]
     print([ray.get(actor.hostname.remote()) for actor in actors])
     ray_ctx.stop()
     sc.stop()
     for process_info in ray_ctx.ray_processesMonitor.process_infos:
         for pid in process_info.pids:
             assert not psutil.pid_exists(pid)
Пример #3
0
 def test_local(self):
     node_num = 4
     sc = init_spark_on_local(cores=node_num)
     ray_ctx = RayContext(sc=sc, object_store_memory="1g")
     ray_ctx.init()
     actors = [TestRay.remote() for i in range(0, node_num)]
     print(ray.get([actor.hostname.remote() for actor in actors]))
     ray_ctx.stop()
     time.sleep(3)
     # repeat
     print("-------------------first repeat begin!------------------")
     ray_ctx = RayContext(sc=sc, object_store_memory="1g")
     ray_ctx.init()
     actors = [TestRay.remote() for i in range(0, node_num)]
     print(ray.get([actor.hostname.remote() for actor in actors]))
     ray_ctx.stop()
     sc.stop()
     time.sleep(3)
     for process_info in ray_ctx.ray_processesMonitor.process_infos:
         for pid in process_info.pids:
             assert not psutil.pid_exists(pid)
Пример #4
0
                        type=str,
                        help="turn on yarn mode by passing the hadoop path"
                        "configuration folder. Otherwise, turn on local mode.")

    args, _ = parser.parse_known_args()

    if args.hadoop_conf:
        sc = init_spark_on_yarn(hadoop_conf=args.hadoop_conf,
                                conda_name="rayexample",
                                num_executor=args.num_replicas,
                                executor_cores=88,
                                executor_memory="10g",
                                driver_memory="3g",
                                driver_cores=4,
                                extra_executor_memory_for_ray="2g")
        ray_ctx = RayContext(sc=sc, object_store_memory="5g")
        ray_ctx.init()
    else:
        ray.init(redis_address=args.redis_address)
        # sc = init_spark_on_local(cores=44)
        # ray_ctx = RayContext(sc=sc, object_store_memory="5g")

    #ray.init(redis_address=args.redis_address)

    if args.tune:
        tune_example(num_replicas=args.num_replicas, use_gpu=args.use_gpu)
    else:
        train_example(num_replicas=args.num_replicas,
                      batch_size=args.batch_size,
                      use_gpu=args.use_gpu)
Пример #5
0
                        num_executor=slave_num,
                        executor_cores=28,
                        executor_memory="10g",
                        driver_memory="2g",
                        driver_cores=4,
                        extra_executor_memory_for_ray="30g",
                        spark_conf={"hello": "world"})

ray_ctx = RayContext(sc=sc,
                     object_store_memory="25g",
                     extra_params={"temp-dir": "/tmp/hello/"},
                     env={
                         "http_proxy": "http://child-prc.intel.com:913",
                         "http_proxys": "http://child-prc.intel.com:913"
                     })
ray_ctx.init(object_store_memory="2g", num_cores=0, labels="", extra_params={})


@ray.remote
class TestRay():
    def hostname(self):
        import socket
        return socket.gethostname()

    def check_cv2(self):
        # conda install -c conda-forge opencv==3.4.2
        import cv2
        return cv2.__version__

    def ip(self):
        import ray.services as rservices