Example #1
0
    def run_graph_manager(self, graph_manager: 'GraphManager',
                          args: argparse.Namespace):
        if args.distributed_coach and not graph_manager.agent_params.algorithm.distributed_coach_synchronization_type:
            screen.error(
                "{} algorithm is not supported using distributed Coach.".
                format(graph_manager.agent_params.algorithm))

        if args.distributed_coach and args.checkpoint_save_secs and graph_manager.agent_params.algorithm.distributed_coach_synchronization_type == DistributedCoachSynchronizationType.SYNC:
            screen.warning(
                "The --checkpoint_save_secs or -s argument will be ignored as SYNC distributed coach sync type is used. Checkpoint will be saved every training iteration."
            )

        if args.distributed_coach and not args.checkpoint_save_secs and graph_manager.agent_params.algorithm.distributed_coach_synchronization_type == DistributedCoachSynchronizationType.ASYNC:
            screen.error(
                "Distributed coach with ASYNC distributed coach sync type requires --checkpoint_save_secs or -s."
            )

        # Intel optimized TF seems to run significantly faster when limiting to a single OMP thread.
        # This will not affect GPU runs.
        os.environ["OMP_NUM_THREADS"] = "1"

        # turn TF debug prints off
        if args.framework == Frameworks.tensorflow:
            os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_verbosity)

        # turn off the summary at the end of the run if necessary
        if not args.no_summary and not args.distributed_coach:
            atexit.register(logger.summarize_experiment)
            screen.change_terminal_title(args.experiment_name)

        task_parameters = TaskParameters(
            framework_type=args.framework,
            evaluate_only=args.evaluate,
            experiment_path=args.experiment_path,
            seed=args.seed,
            use_cpu=args.use_cpu,
            checkpoint_save_secs=args.checkpoint_save_secs,
            checkpoint_restore_dir=args.checkpoint_restore_dir,
            checkpoint_save_dir=args.checkpoint_save_dir,
            export_onnx_graph=args.export_onnx_graph,
            apply_stop_condition=args.apply_stop_condition)

        # open dashboard
        if args.open_dashboard:
            open_dashboard(args.experiment_path)

        if args.distributed_coach and args.distributed_coach_run_type != RunType.ORCHESTRATOR:
            handle_distributed_coach_tasks(graph_manager, args,
                                           task_parameters)
            return

        if args.distributed_coach and args.distributed_coach_run_type == RunType.ORCHESTRATOR:
            handle_distributed_coach_orchestrator(args)
            return

        # Single-threaded runs
        if args.num_workers == 1:
            self.start_single_threaded(task_parameters, graph_manager, args)
        else:
            self.start_multi_threaded(graph_manager, args)
Example #2
0
    def run_graph_manager(self, graph_manager: 'GraphManager', args: argparse.Namespace):
        if args.distributed_coach and not graph_manager.agent_params.algorithm.distributed_coach_synchronization_type:
            screen.error("{} algorithm is not supported using distributed Coach.".format(graph_manager.agent_params.algorithm))

        # Intel optimized TF seems to run significantly faster when limiting to a single OMP thread.
        # This will not affect GPU runs.
        os.environ["OMP_NUM_THREADS"] = "1"

        # turn TF debug prints off
        if args.framework == Frameworks.tensorflow:
            os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_verbosity)

        # turn off the summary at the end of the run if necessary
        if not args.no_summary and not args.distributed_coach:
            atexit.register(logger.summarize_experiment)
            screen.change_terminal_title(args.experiment_name)

        # open dashboard
        if args.open_dashboard:
            open_dashboard(args.experiment_path)

        if args.distributed_coach and args.distributed_coach_run_type != RunType.ORCHESTRATOR:
            handle_distributed_coach_tasks(graph_manager, args)
            return

        if args.distributed_coach and args.distributed_coach_run_type == RunType.ORCHESTRATOR:
            handle_distributed_coach_orchestrator(graph_manager, args)
            return

        # Single-threaded runs
        if args.num_workers == 1:
            self.start_single_threaded(graph_manager, args)
        else:
            self.start_multi_threaded(graph_manager, args)
Example #3
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-p',
        '--preset',
        help=
        "(string) Name of a preset to run (class name from the 'presets' directory.)",
        default=None,
        type=str)
    parser.add_argument('-l',
                        '--list',
                        help="(flag) List all available presets",
                        action='store_true')
    parser.add_argument(
        '-e',
        '--experiment_name',
        help="(string) Experiment name to be used to store the results.",
        default='',
        type=str)
    parser.add_argument('-r',
                        '--render',
                        help="(flag) Render environment",
                        action='store_true')
    parser.add_argument(
        '-f',
        '--framework',
        help="(string) Neural network framework. Available values: tensorflow",
        default='tensorflow',
        type=str)
    parser.add_argument(
        '-n',
        '--num_workers',
        help="(int) Number of workers for multi-process based agents, e.g. A3C",
        default=1,
        type=int)
    parser.add_argument(
        '-c',
        '--use_cpu',
        help=
        "(flag) Use only the cpu for training. If a GPU is not available, this flag will have no "
        "effect and the CPU will be used either way.",
        action='store_true')
    parser.add_argument(
        '-ew',
        '--evaluation_worker',
        help=
        "(int) If multiple workers are used, add an evaluation worker as well which will "
        "evaluate asynchronously and independently during the training. NOTE: this worker will "
        "ignore the evaluation settings in the preset's ScheduleParams.",
        action='store_true')
    parser.add_argument(
        '--play',
        help="(flag) Play as a human by controlling the game with the keyboard. "
        "This option will save a replay buffer with the game play.",
        action='store_true')
    parser.add_argument(
        '--evaluate',
        help="(flag) Run evaluation only. This is a convenient way to disable "
        "training in order to evaluate an existing checkpoint.",
        action='store_true')
    parser.add_argument(
        '-v',
        '--verbosity',
        help=
        "(flag) Sets the verbosity level of Coach print outs. Can be either low or high.",
        default="low",
        type=str)
    parser.add_argument('-tfv',
                        '--tf_verbosity',
                        help="(flag) TensorFlow verbosity level",
                        default=3,
                        type=int)
    parser.add_argument(
        '-s',
        '--save_checkpoint_secs',
        help="(int) Time in seconds between saving checkpoints of the model.",
        default=None,
        type=int)
    parser.add_argument(
        '-crd',
        '--checkpoint_restore_dir',
        help=
        '(string) Path to a folder containing a checkpoint to restore the model from.',
        type=str)
    parser.add_argument('-dg',
                        '--dump_gifs',
                        help="(flag) Enable the gif saving functionality.",
                        action='store_true')
    parser.add_argument('-dm',
                        '--dump_mp4',
                        help="(flag) Enable the mp4 saving functionality.",
                        action='store_true')
    parser.add_argument(
        '-at',
        '--agent_type',
        help=
        "(string) Choose an agent type class to override on top of the selected preset. "
        "If no preset is defined, a preset can be set from the command-line by combining settings "
        "which are set by using --agent_type, --experiment_type, --environemnt_type",
        default=None,
        type=str)
    parser.add_argument(
        '-et',
        '--environment_type',
        help=
        "(string) Choose an environment type class to override on top of the selected preset."
        "If no preset is defined, a preset can be set from the command-line by combining settings "
        "which are set by using --agent_type, --experiment_type, --environemnt_type",
        default=None,
        type=str)
    parser.add_argument(
        '-ept',
        '--exploration_policy_type',
        help=
        "(string) Choose an exploration policy type class to override on top of the selected "
        "preset."
        "If no preset is defined, a preset can be set from the command-line by combining settings "
        "which are set by using --agent_type, --experiment_type, --environemnt_type",
        default=None,
        type=str)
    parser.add_argument(
        '-lvl',
        '--level',
        help=
        "(string) Choose the level that will be played in the environment that was selected."
        "This value will override the level parameter in the environment class.",
        default=None,
        type=str)
    parser.add_argument(
        '-cp',
        '--custom_parameter',
        help=
        "(string) Semicolon separated parameters used to override specific parameters on top of"
        " the selected preset (or on top of the command-line assembled one). "
        "Whenever a parameter value is a string, it should be inputted as '\\\"string\\\"'. "
        "For ex.: "
        "\"visualization.render=False; num_training_iterations=500; optimizer='rmsprop'\"",
        default=None,
        type=str)
    parser.add_argument('--print_networks_summary',
                        help="(flag) Print network summary to stdout",
                        action='store_true')
    parser.add_argument(
        '-tb',
        '--tensorboard',
        help=
        "(flag) When using the TensorFlow backend, enable TensorBoard log dumps. ",
        action='store_true')
    parser.add_argument(
        '-ns',
        '--no_summary',
        help=
        "(flag) Prevent Coach from printing a summary and asking questions at the end of runs",
        action='store_true')
    parser.add_argument(
        '-d',
        '--open_dashboard',
        help="(flag) Open dashboard with the experiment when the run starts",
        action='store_true')
    parser.add_argument('--seed',
                        help="(int) A seed to use for running the experiment",
                        default=None,
                        type=int)

    parser.add_argument(
        '--ray_redis_address',
        help=
        "The address of the Redis server to connect to. If this address is not provided,\
                         then this command will start Redis, a global scheduler, a local scheduler, \
                         a plasma store, a plasma manager, and some workers. \
                         It will also kill these processes when Python exits.",
        default=None,
        type=str)

    parser.add_argument(
        '--ray_num_cpus',
        help=
        "Number of cpus the user wishes all local schedulers to be configured with",
        default=None,
        type=int)

    parser.add_argument(
        '--ray_num_gpus',
        help=
        "Number of gpus the user wishes all local schedulers to be configured with",
        default=None,
        type=int)

    parser.add_argument(
        '--on_devcloud',
        help=
        "Number of gpus the user wishes all local schedulers to be configured with",
        default=False,
        type=bool)

    args = parse_arguments(parser)

    graph_manager = get_graph_manager_from_args(args)

    # Intel optimized TF seems to run significantly faster when limiting to a single OMP thread.
    # This will not affect GPU runs.
    # os.environ["OMP_NUM_THREADS"] = "1"

    # turn TF debug prints off
    if args.framework == Frameworks.tensorflow:
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_verbosity)

    # turn off the summary at the end of the run if necessary
    if not args.no_summary:
        atexit.register(logger.summarize_experiment)
        screen.change_terminal_title(args.experiment_name)

    # open dashboard
    if args.open_dashboard:
        open_dashboard(args.experiment_path)

    # Single-threaded runs
    if args.num_workers == 1:
        # Start the training or evaluation
        task_parameters = TaskParameters(
            framework_type=
            "tensorflow",  # TODO: tensorflow should'nt be hardcoded
            evaluate_only=args.evaluate,
            experiment_path=args.experiment_path,
            seed=args.seed,
            use_cpu=args.use_cpu,
            save_checkpoint_secs=args.save_checkpoint_secs)
        task_parameters.__dict__ = add_items_to_dict(task_parameters.__dict__,
                                                     args.__dict__)

        start_graph(graph_manager=graph_manager,
                    task_parameters=task_parameters)
        #start_graph_ray.remote(graph_manager,task_parameters)

    # Multi-threaded runs
    else:
        #ray.init(redis_address=args.ray_redis_address,
        #    num_cpus=args.ray_num_cpus,
        #    num_gpus=args.ray_num_gpus)

        total_tasks = args.num_workers
        if args.evaluation_worker:
            total_tasks += 1

        if args.on_devcloud:
            ips = create_worker_devcloud(args.num_workers)

            @ray.remote
            def f():
                time.sleep(0.01)
                #os.system('/usr/local/bin/qstat')
                return ray.services.get_node_ip_address()

            if args.on_devcloud:
                ips = set(ray.get([f.remote() for _ in range(1000)]))

            home_ip = socket.gethostbyname(socket.gethostname())

            worker_ips = [z for z in ips if z != home_ip]
            worker_hosts = ",".join(
                ["{}:{}".format(n, get_open_port()) for n in ips])

        else:
            ray.init()
            worker_hosts = ",".join([
                "localhost:{}".format(get_open_port())
                for i in range(total_tasks)
            ])

        ps_hosts = "localhost:{}".format(get_open_port())

        @ray.remote
        def start_distributed_task(job_type,
                                   task_index,
                                   evaluation_worker=False):

            task_parameters = DistributedTaskParameters(
                framework_type=
                "tensorflow",  # TODO: tensorflow should'nt be hardcoded
                parameters_server_hosts=ps_hosts,
                worker_hosts=worker_hosts,
                job_type=job_type,
                task_index=task_index,
                evaluate_only=evaluation_worker,
                use_cpu=args.use_cpu,
                num_tasks=total_tasks,  # training tasks + 1 evaluation task
                num_training_tasks=args.num_workers,
                experiment_path=args.experiment_path,
                shared_memory_scratchpad=None,
                seed=args.seed + task_index if args.seed is not None else
                None)  # each worker gets a different seed
            task_parameters.__dict__ = add_items_to_dict(
                task_parameters.__dict__, args.__dict__)
            # we assume that only the evaluation workers are rendering

            graph_manager.visualization_parameters.render = args.render and evaluation_worker
            start_graph(graph_manager, task_parameters)
            #p = Process(target=start_graph, args=(graph_manager, task_parameters))
            #p.start()
            return

        @ray.remote
        def start_distributed_ray_task(job_type,
                                       task_index,
                                       evaluation_worker=False):
            task_parameters = DistributedTaskParameters(
                framework_type=
                "tensorflow",  # TODO: tensorflow should'nt be hardcoded
                parameters_server_hosts=ps_hosts,
                worker_hosts=worker_hosts,
                job_type=job_type,
                task_index=task_index,
                evaluate_only=evaluation_worker,
                use_cpu=args.use_cpu,
                num_tasks=total_tasks,  # training tasks + 1 evaluation task
                num_training_tasks=args.num_workers,
                experiment_path=args.experiment_path,
                shared_memory_scratchpad=None,
                seed=args.seed + task_index if args.seed is not None else
                None)  # each worker gets a different seed
            task_parameters.__dict__ = add_items_to_dict(
                task_parameters.__dict__, args.__dict__)
            # we assume that only the evaluation workers are rendering
            graph_manager.visualization_parameters.render = args.render and evaluation_worker
            start_graph(graph_manager, task_parameters)
            return 1

        # parameter server
        parameter_server = start_distributed_task.remote("ps", 0)

        # training workers
        # wait a bit before spawning the non chief workers in order to make sure the session is already created
        workers = []
        workers.append(start_distributed_task.remote("worker", 0))
        time.sleep(2)

        for task_index in range(1, args.num_workers):
            workers.append(start_distributed_task.remote("worker", task_index))