def evaluate(params):
    # file params
    experiment_path = os.path.join(params.output_data_dir)
    logger.experiment_path = os.path.join(experiment_path, 'evaluation')
    params.checkpoint_restore_dir = os.path.join(params.input_data_dir, 'checkpoint')
    checkpoint_file = os.path.join(params.checkpoint_restore_dir, 'checkpoint')

    inplace_change(checkpoint_file, "/opt/ml/output/data/checkpoint", ".")
    # Note that due to a tensorflow issue (https://github.com/tensorflow/tensorflow/issues/9146) we need to replace
    # the absolute path for the evaluation-from-a-checkpointed-model to work

    vis_params = VisualizationParameters()
    vis_params.dump_gifs = True

    task_params = TaskParameters(evaluate_only=True, experiment_path=logger.experiment_path)
    task_params.__dict__ = add_items_to_dict(task_params.__dict__, params.__dict__)

    graph_manager = BasicRLGraphManager(
        agent_params=ClippedPPOAgentParameters(),
        env_params=GymVectorEnvironment(level='TSP_env:TSPEasyEnv'),
        schedule_params=ScheduleParameters(),
        vis_params=vis_params
    )
    graph_manager = graph_manager.create_graph(task_parameters=task_params)
    graph_manager.evaluate(EnvironmentSteps(5))
def evaluate(params):
    # file params
    experiment_path = os.path.join(params.output_data_dir)
    logger.experiment_path = os.path.join(experiment_path, 'evaluation')
    params.checkpoint_restore_dir = os.path.join(params.input_data_dir,
                                                 'checkpoint')
    checkpoint_file = os.path.join(params.checkpoint_restore_dir, 'checkpoint')

    inplace_change(checkpoint_file, "/opt/ml/output/data/checkpoint", ".")
    # Note that due to a tensorflow issue (https://github.com/tensorflow/tensorflow/issues/9146) we need to replace
    # the absolute path for the evaluation-from-a-checkpointed-model to work

    vis_params = VisualizationParameters()
    vis_params.dump_gifs = True

    task_params = TaskParameters(evaluate_only=True,
                                 experiment_path=logger.experiment_path)
    task_params.__dict__ = add_items_to_dict(task_params.__dict__,
                                             params.__dict__)

    graph_manager = BasicRLGraphManager(
        agent_params=ClippedPPOAgentParameters(),
        env_params=GymVectorEnvironment(level='TSP_env:TSPEasyEnv'),
        schedule_params=ScheduleParameters(),
        vis_params=vis_params)
    graph_manager = graph_manager.create_graph(task_parameters=task_params)
    graph_manager.evaluate(EnvironmentSteps(5))
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--markov-preset-file',
                        help="(string) Name of a preset file to run in Markov's preset directory.",
                        type=str,
                        default=os.environ.get("MARKOV_PRESET_FILE", "mars_presets.py"))
    parser.add_argument('-c', '--local_model_directory',
                        help='(string) Path to a folder containing a checkpoint to restore the model from.',
                        type=str,
                        default=os.environ.get("LOCAL_MODEL_DIRECTORY", "./checkpoint"))
    parser.add_argument('-n', '--num_workers',
                        help="(int) Number of workers for multi-process based agents, e.g. A3C",
                        default=1,
                        type=int)
    parser.add_argument('--model-s3-bucket',
                        help='(string) S3 bucket where trained models are stored. It contains model checkpoints.',
                        type=str,
                        default=os.environ.get("MODEL_S3_BUCKET"))
    parser.add_argument('--model-s3-prefix',
                        help='(string) S3 prefix where trained models are stored. It contains model checkpoints.',
                        type=str,
                        default=os.environ.get("MODEL_S3_PREFIX"))
    parser.add_argument('--aws-region',
                        help='(string) AWS region',
                        type=str,
                        default=os.environ.get("ROS_AWS_REGION", "us-west-1"))
    parser.add_argument('--checkpoint-save-secs',
                        help="(int) Time period in second between 2 checkpoints",
                        type=int,
                        default=600)
    parser.add_argument('--save-frozen-graph',
                        help="(bool) True if we need to store the frozen graph",
                        type=bool,
                        default=True)

    args = parser.parse_args()

    if args.markov_preset_file:
        markov_path = imp.find_module("markov")[1]
        preset_location = os.path.join(markov_path, "presets", args.markov_preset_file)
        path_and_module = preset_location + ":graph_manager"
        graph_manager = short_dynamic_import(path_and_module, ignore_module_case=True)

    else:
        raise ValueError("Unable to determine preset file")

    # TODO: support other frameworks
    task_parameters = TaskParameters(framework_type=Frameworks.tensorflow,
                                     checkpoint_save_secs=args.checkpoint_save_secs)
    task_parameters.__dict__['checkpoint_save_dir'] = args.local_model_directory
    task_parameters.__dict__ = add_items_to_dict(task_parameters.__dict__, args.__dict__)

#    data_store_params_instance = S3BotoDataStoreParameters(bucket_name=args.model_s3_bucket,
#                                                           s3_folder=args.model_s3_prefix,
#                                                           checkpoint_dir=args.local_model_directory,
#                                                           aws_region=args.aws_region)


    #data_store = S3BotoDataStore(data_store_params_instance)

    #if args.save_frozen_graph:
    #    data_store.graph_manager = graph_manager

    #graph_manager.data_store_params = data_store_params_instance
    #graph_manager.data_store = data_store
    graph_manager.should_stop = should_stop_training_based_on_evaluation
    start_graph(graph_manager=graph_manager, task_parameters=task_parameters)
Exemplo n.º 4
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-p',
        '--preset',
        help=
        "(string) Name of a preset to run (class name from the 'presets' directory.)",
        default=None,
        type=str)
    parser.add_argument('-l',
                        '--list',
                        help="(flag) List all available presets",
                        action='store_true')
    parser.add_argument(
        '-e',
        '--experiment_name',
        help="(string) Experiment name to be used to store the results.",
        default='',
        type=str)
    parser.add_argument('-r',
                        '--render',
                        help="(flag) Render environment",
                        action='store_true')
    parser.add_argument(
        '-f',
        '--framework',
        help="(string) Neural network framework. Available values: tensorflow",
        default='tensorflow',
        type=str)
    parser.add_argument(
        '-n',
        '--num_workers',
        help="(int) Number of workers for multi-process based agents, e.g. A3C",
        default=1,
        type=int)
    parser.add_argument(
        '-c',
        '--use_cpu',
        help=
        "(flag) Use only the cpu for training. If a GPU is not available, this flag will have no "
        "effect and the CPU will be used either way.",
        action='store_true')
    parser.add_argument(
        '-ew',
        '--evaluation_worker',
        help=
        "(int) If multiple workers are used, add an evaluation worker as well which will "
        "evaluate asynchronously and independently during the training. NOTE: this worker will "
        "ignore the evaluation settings in the preset's ScheduleParams.",
        action='store_true')
    parser.add_argument(
        '--play',
        help="(flag) Play as a human by controlling the game with the keyboard. "
        "This option will save a replay buffer with the game play.",
        action='store_true')
    parser.add_argument(
        '--evaluate',
        help="(flag) Run evaluation only. This is a convenient way to disable "
        "training in order to evaluate an existing checkpoint.",
        action='store_true')
    parser.add_argument(
        '-v',
        '--verbosity',
        help=
        "(flag) Sets the verbosity level of Coach print outs. Can be either low or high.",
        default="low",
        type=str)
    parser.add_argument('-tfv',
                        '--tf_verbosity',
                        help="(flag) TensorFlow verbosity level",
                        default=3,
                        type=int)
    parser.add_argument(
        '-s',
        '--save_checkpoint_secs',
        help="(int) Time in seconds between saving checkpoints of the model.",
        default=None,
        type=int)
    parser.add_argument(
        '-crd',
        '--checkpoint_restore_dir',
        help=
        '(string) Path to a folder containing a checkpoint to restore the model from.',
        type=str)
    parser.add_argument('-dg',
                        '--dump_gifs',
                        help="(flag) Enable the gif saving functionality.",
                        action='store_true')
    parser.add_argument('-dm',
                        '--dump_mp4',
                        help="(flag) Enable the mp4 saving functionality.",
                        action='store_true')
    parser.add_argument(
        '-at',
        '--agent_type',
        help=
        "(string) Choose an agent type class to override on top of the selected preset. "
        "If no preset is defined, a preset can be set from the command-line by combining settings "
        "which are set by using --agent_type, --experiment_type, --environemnt_type",
        default=None,
        type=str)
    parser.add_argument(
        '-et',
        '--environment_type',
        help=
        "(string) Choose an environment type class to override on top of the selected preset."
        "If no preset is defined, a preset can be set from the command-line by combining settings "
        "which are set by using --agent_type, --experiment_type, --environemnt_type",
        default=None,
        type=str)
    parser.add_argument(
        '-ept',
        '--exploration_policy_type',
        help=
        "(string) Choose an exploration policy type class to override on top of the selected "
        "preset."
        "If no preset is defined, a preset can be set from the command-line by combining settings "
        "which are set by using --agent_type, --experiment_type, --environemnt_type",
        default=None,
        type=str)
    parser.add_argument(
        '-lvl',
        '--level',
        help=
        "(string) Choose the level that will be played in the environment that was selected."
        "This value will override the level parameter in the environment class.",
        default=None,
        type=str)
    parser.add_argument(
        '-cp',
        '--custom_parameter',
        help=
        "(string) Semicolon separated parameters used to override specific parameters on top of"
        " the selected preset (or on top of the command-line assembled one). "
        "Whenever a parameter value is a string, it should be inputted as '\\\"string\\\"'. "
        "For ex.: "
        "\"visualization.render=False; num_training_iterations=500; optimizer='rmsprop'\"",
        default=None,
        type=str)
    parser.add_argument('--print_networks_summary',
                        help="(flag) Print network summary to stdout",
                        action='store_true')
    parser.add_argument(
        '-tb',
        '--tensorboard',
        help=
        "(flag) When using the TensorFlow backend, enable TensorBoard log dumps. ",
        action='store_true')
    parser.add_argument(
        '-ns',
        '--no_summary',
        help=
        "(flag) Prevent Coach from printing a summary and asking questions at the end of runs",
        action='store_true')
    parser.add_argument(
        '-d',
        '--open_dashboard',
        help="(flag) Open dashboard with the experiment when the run starts",
        action='store_true')
    parser.add_argument('--seed',
                        help="(int) A seed to use for running the experiment",
                        default=None,
                        type=int)

    parser.add_argument(
        '--ray_redis_address',
        help=
        "The address of the Redis server to connect to. If this address is not provided,\
                         then this command will start Redis, a global scheduler, a local scheduler, \
                         a plasma store, a plasma manager, and some workers. \
                         It will also kill these processes when Python exits.",
        default=None,
        type=str)

    parser.add_argument(
        '--ray_num_cpus',
        help=
        "Number of cpus the user wishes all local schedulers to be configured with",
        default=None,
        type=int)

    parser.add_argument(
        '--ray_num_gpus',
        help=
        "Number of gpus the user wishes all local schedulers to be configured with",
        default=None,
        type=int)

    parser.add_argument(
        '--on_devcloud',
        help=
        "Number of gpus the user wishes all local schedulers to be configured with",
        default=False,
        type=bool)

    args = parse_arguments(parser)

    graph_manager = get_graph_manager_from_args(args)

    # Intel optimized TF seems to run significantly faster when limiting to a single OMP thread.
    # This will not affect GPU runs.
    # os.environ["OMP_NUM_THREADS"] = "1"

    # turn TF debug prints off
    if args.framework == Frameworks.tensorflow:
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_verbosity)

    # turn off the summary at the end of the run if necessary
    if not args.no_summary:
        atexit.register(logger.summarize_experiment)
        screen.change_terminal_title(args.experiment_name)

    # open dashboard
    if args.open_dashboard:
        open_dashboard(args.experiment_path)

    # Single-threaded runs
    if args.num_workers == 1:
        # Start the training or evaluation
        task_parameters = TaskParameters(
            framework_type=
            "tensorflow",  # TODO: tensorflow should'nt be hardcoded
            evaluate_only=args.evaluate,
            experiment_path=args.experiment_path,
            seed=args.seed,
            use_cpu=args.use_cpu,
            save_checkpoint_secs=args.save_checkpoint_secs)
        task_parameters.__dict__ = add_items_to_dict(task_parameters.__dict__,
                                                     args.__dict__)

        start_graph(graph_manager=graph_manager,
                    task_parameters=task_parameters)
        #start_graph_ray.remote(graph_manager,task_parameters)

    # Multi-threaded runs
    else:
        #ray.init(redis_address=args.ray_redis_address,
        #    num_cpus=args.ray_num_cpus,
        #    num_gpus=args.ray_num_gpus)

        total_tasks = args.num_workers
        if args.evaluation_worker:
            total_tasks += 1

        if args.on_devcloud:
            ips = create_worker_devcloud(args.num_workers)

            @ray.remote
            def f():
                time.sleep(0.01)
                #os.system('/usr/local/bin/qstat')
                return ray.services.get_node_ip_address()

            if args.on_devcloud:
                ips = set(ray.get([f.remote() for _ in range(1000)]))

            home_ip = socket.gethostbyname(socket.gethostname())

            worker_ips = [z for z in ips if z != home_ip]
            worker_hosts = ",".join(
                ["{}:{}".format(n, get_open_port()) for n in ips])

        else:
            ray.init()
            worker_hosts = ",".join([
                "localhost:{}".format(get_open_port())
                for i in range(total_tasks)
            ])

        ps_hosts = "localhost:{}".format(get_open_port())

        @ray.remote
        def start_distributed_task(job_type,
                                   task_index,
                                   evaluation_worker=False):

            task_parameters = DistributedTaskParameters(
                framework_type=
                "tensorflow",  # TODO: tensorflow should'nt be hardcoded
                parameters_server_hosts=ps_hosts,
                worker_hosts=worker_hosts,
                job_type=job_type,
                task_index=task_index,
                evaluate_only=evaluation_worker,
                use_cpu=args.use_cpu,
                num_tasks=total_tasks,  # training tasks + 1 evaluation task
                num_training_tasks=args.num_workers,
                experiment_path=args.experiment_path,
                shared_memory_scratchpad=None,
                seed=args.seed + task_index if args.seed is not None else
                None)  # each worker gets a different seed
            task_parameters.__dict__ = add_items_to_dict(
                task_parameters.__dict__, args.__dict__)
            # we assume that only the evaluation workers are rendering

            graph_manager.visualization_parameters.render = args.render and evaluation_worker
            start_graph(graph_manager, task_parameters)
            #p = Process(target=start_graph, args=(graph_manager, task_parameters))
            #p.start()
            return

        @ray.remote
        def start_distributed_ray_task(job_type,
                                       task_index,
                                       evaluation_worker=False):
            task_parameters = DistributedTaskParameters(
                framework_type=
                "tensorflow",  # TODO: tensorflow should'nt be hardcoded
                parameters_server_hosts=ps_hosts,
                worker_hosts=worker_hosts,
                job_type=job_type,
                task_index=task_index,
                evaluate_only=evaluation_worker,
                use_cpu=args.use_cpu,
                num_tasks=total_tasks,  # training tasks + 1 evaluation task
                num_training_tasks=args.num_workers,
                experiment_path=args.experiment_path,
                shared_memory_scratchpad=None,
                seed=args.seed + task_index if args.seed is not None else
                None)  # each worker gets a different seed
            task_parameters.__dict__ = add_items_to_dict(
                task_parameters.__dict__, args.__dict__)
            # we assume that only the evaluation workers are rendering
            graph_manager.visualization_parameters.render = args.render and evaluation_worker
            start_graph(graph_manager, task_parameters)
            return 1

        # parameter server
        parameter_server = start_distributed_task.remote("ps", 0)

        # training workers
        # wait a bit before spawning the non chief workers in order to make sure the session is already created
        workers = []
        workers.append(start_distributed_task.remote("worker", 0))
        time.sleep(2)

        for task_index in range(1, args.num_workers):
            workers.append(start_distributed_task.remote("worker", task_index))