Ejemplo n.º 1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("folder_path", type=str)
    # parser.add_argument("--num_iters", type=int)
    args = parser.parse_args()
    base = Path(os.getcwd())
    base = base / args.folder_path

    path_and_iter = get_path_and_iters(base)

    resolution = 20
    x_bounds = (-1, 1)
    y_bounds = (-1, 1)

    report = HTMLReport(str(base / 'report.html'), images_per_row=1)

    # for path, itr in takespread(path_and_iter, args):
    for path, itr in [path_and_iter[-1]]:
        report.add_text("Path: %s" % path)
        print("Loading: %s" % path)
        data = joblib.load(str(path))
        qf = data['qf']
        env = data['env']
        qf.train(False)

        start_state = env.reset()
        report.add_text("Start State = {}".format(start_state))
        report.add_text("Start XY = {}".format(
            position_from_angles(np.expand_dims(start_state, 0))))
        goal_states = [start_state]
        goal_states += [env.sample_goal_for_rollout() for _ in range(5)]
        for goal_state in goal_states:
            qf_eval = create_qf_eval_fnct(qf, start_state, goal_state)
            qf_heatmap = vu.make_heat_map(
                qf_eval,
                x_bounds=x_bounds,
                y_bounds=y_bounds,
                resolution=resolution,
            )

            fig = create_figure(
                ['Estimated'],
                [qf_heatmap],
            )
            img = vu.save_image(fig)
            report.add_image(
                img, "Goal State = {}\nGoal XY = {}".format(
                    goal_state,
                    position_from_angles(np.expand_dims(goal_state, 0))))

    abs_path = osp.abspath(report.path)
    print("Report saved to: {}".format(abs_path))
    report.save()
    open_report = query_yes_no("Open report?", default="yes")
    if open_report:
        cmd = "xdg-open {}".format(abs_path)
        print(cmd)
        subprocess.call(cmd, shell=True)
Ejemplo n.º 2
0
def kill_f(pattern):
    print("trying to kill the pattern: ", pattern)
    to_kill = []
    to_kill_ids = {}
    for instance in get_all_instances():
        name = get_name_tag(instance)
        if name is None or pattern in name:
            instance_id = instance['InstanceId']
            region = instance['Region']
            if name is None:
                if any([
                        x['GroupName'] in config.AWS_SECURITY_GROUPS
                        for x in instance['SecurityGroups']
                ]):
                    if query_yes_no(
                            question=
                            "Kill instance {} without name in region {} (security groups {})?"
                            .format(instance_id, region, [
                                x['GroupName']
                                for x in instance['SecurityGroups']
                            ])):
                        name = instance_id
            if name:
                if region not in to_kill_ids:
                    to_kill_ids[region] = []
                to_kill_ids[region].append(instance_id)
                to_kill.append(name)

    print("This will kill the following jobs:")
    print(", ".join(sorted(to_kill)))
    if query_yes_no(question="Proceed?", default="no"):
        for client in get_clients():
            print("Terminating instances in region", client.region)
            ids = to_kill_ids.get(client.region, [])
            if len(ids) > 0:
                client.terminate_instances(
                    InstanceIds=to_kill_ids.get(client.region, []))
Ejemplo n.º 3
0
def kill(job):
    to_kill = []
    to_kill_ids = {}
    for instance in get_all_instances():
        name = get_name_tag(instance)
        if name == job:
            region = instance['Region']
            if region not in to_kill_ids:
                to_kill_ids[region] = []
            to_kill_ids[region].append(instance['InstanceId'])
            to_kill.append(name)
            break

    print("This will kill the following jobs:")
    print(", ".join(sorted(to_kill)))
    if query_yes_no(question="Proceed?", default="no"):
        for client in get_clients():
            print("Terminating instances in region", client.region)
            ids = to_kill_ids.get(client.region, [])
            if len(ids) > 0:
                client.terminate_instances(
                    InstanceIds=to_kill_ids.get(client.region, []))
Ejemplo n.º 4
0
def generate_report(fanova_info: FanovaInfo, base_dir, param_name_to_log=None):
    if param_name_to_log is None:
        param_name_to_log = {}
    f, config_space, X, Y, categorical_remapping, variants_list = fanova_info
    report = HTMLReport(
        osp.join(base_dir, 'report.html'),
        images_per_row=3,
    )

    vis = visualizer.Visualizer(f, config_space)
    cs_params = config_space.get_hyperparameters()
    importances = [get_param_importance(f, param) for param in cs_params]
    are_logs = [
        is_data_log_uniformly_distributed(X[:, i])
        for i in range(len(cs_params))
    ]
    data = sorted(
        zip(cs_params, importances, are_logs),
        key=lambda x: -x[1],
    )
    """
    List out how the categorical hyperparameters were mapped.
    """
    for name, remapping in categorical_remapping.items():
        report.add_text("Remapping for {}:".format(name))
        for key, value in remapping.inverse_dict.items():
            report.add_text("\t{} = {}\n".format(key, value))
    """
    Plot individual marginals.
    """
    print("Creating marginal plots")
    for param, importance, is_log in data:
        param_name = param.name
        if param_name in param_name_to_log:
            is_log = param_name_to_log[param_name]
        if isinstance(param, CategoricalHyperparameter):
            vis.plot_categorical_marginal(param_name, show=False)
        else:
            vis.plot_marginal(param_name, show=False, log_scale=is_log)
        img = vu.save_image()
        report.add_image(
            img,
            "Marginal for {}.\nImportance = {}".format(param_name, importance),
        )
    """
    Plot pairwise marginals.
    """
    print("Creating pairwise-marginal plots")
    num_params = len(cs_params)
    num_pairs = num_params * (num_params + 1) // 2
    pair_and_importance = (f.get_most_important_pairwise_marginals(num_pairs))
    for combi, importance in pair_and_importance.items():
        param_names = []
        for p in combi:
            param_names.append(cs_params[p].name)
        info_text = "Pairwise Marginal for {}.\nImportance = {}".format(
            param_names,
            importance,
        ),
        if any(is_categorical(f, name) for name in param_names):
            report.add_text(info_text)
            continue
        plot_pairwise_marginal(vis, combi, show=False)
        img = vu.save_image()
        report.add_image(
            img,
            txt=info_text,
        )
    """
    List the top 10 parameters.
    """
    N = min(10, len(Y))
    Y[np.isnan(Y)] = np.nanmin(Y) - 1
    best_idxs = Y.argsort()[-N:][::-1]
    all_param_names = [p.name for p in config_space.get_hyperparameters()]
    for rank, i in enumerate(best_idxs):
        variant = variants_list[i]
        report.add_text("Rank {} params, with score = {}:".format(
            rank + 1, Y[i]))
        for name, value in zip(all_param_names, X[i, :]):
            report.add_text("\t{} = {}\n".format(name, value))
        report.add_text("\texp_name = {}\n".format(variant['exp_name']))
        report.add_text("\tunique_id = {}\n".format(variant['unique_id']))

    print("Guesses for is_log")
    print("{")
    for param, _, is_log in data:
        name = param.name
        print("    '{}': {},".format(name, is_log))
    print("}")
    """
    Ask user if they want to see the report
    """
    abs_path = osp.abspath(report.path)
    print("Report saved to: {}".format(abs_path))
    report.save()
    open_report = query_yes_no("Open report?", default="yes")
    if open_report:
        cmd = "xdg-open {}".format(abs_path)
        print(cmd)
        subprocess.call(cmd, shell=True)
Ejemplo n.º 5
0
def run_experiment(
        method_call,
        mode='local',
        exp_prefix='default',
        seed=None,
        variant=None,
        exp_id=0,
        unique_id=None,
        prepend_date_to_exp_prefix=True,
        use_gpu=False,
        snapshot_mode='last',
        snapshot_gap=1,
        n_parallel=0,
        base_log_dir=None,
        sync_interval=180,
        local_input_dir_to_mount_point_dict=None,  # TODO(vitchyr): test this
):
    """
    Usage:

    ```
    def foo(variant):
        x = variant['x']
        y = variant['y']
        logger.log("sum", x+y)

    variant = {
        'x': 4,
        'y': 3,
    }
    run_experiment(foo, variant, exp_prefix="my-experiment")
    ```

    Results are saved to
    `base_log_dir/<date>-my-experiment/<date>-my-experiment-<unique-id>`

    By default, the base_log_dir is determined by
    `config.LOCAL_LOG_DIR/`

    :param method_call: a function that takes in a dictionary as argument
    :param mode: 'local', 'local_docker', or 'ec2'
    :param exp_prefix: name of experiment
    :param seed: Seed for this specific trial.
    :param variant: Dictionary
    :param exp_id: One experiment = one variant setting + multiple seeds
    :param unique_id: If not set, the unique id is generated.
    :param prepend_date_to_exp_prefix: If False, do not prepend the date to
    the experiment directory.
    :param use_gpu:
    :param snapshot_mode: See rllab.logger
    :param snapshot_gap: See rllab.logger
    :param n_parallel:
    :param base_log_dir: Will over
    :param sync_interval: How often to sync s3 data (in seconds).
    :param local_input_dir_to_mount_point_dict: Dictionary for doodad.
    :return:
    """
    try:
        import doodad
        import doodad.mode
        import doodad.mount as mount
        from doodad.utils import REPO_DIR
    except ImportError:
        return run_experiment_old(
            method_call,
            exp_prefix=exp_prefix,
            seed=seed,
            variant=variant,
            time_it=True,
            mode=mode,
            exp_id=exp_id,
            unique_id=unique_id,
            prepend_date_to_exp_prefix=prepend_date_to_exp_prefix,
            use_gpu=use_gpu,
            snapshot_mode=snapshot_mode,
            snapshot_gap=snapshot_gap,
            n_parallel=n_parallel,
            base_log_dir=base_log_dir,
            periodic_sync_interval=sync_interval,
        )
    global ec2_okayed
    global gpu_ec2_okayed
    if local_input_dir_to_mount_point_dict is None:
        local_input_dir_to_mount_point_dict = {}
    else:
        raise NotImplementedError("TODO(vitchyr): Implement this")
    # Modify some of the inputs
    if seed is None:
        seed = random.randint(0, 100000)
    if variant is None:
        variant = {}
    for key, value in ppp.recursive_items(variant):
        # This check isn't really necessary, but it's to prevent myself from
        # forgetting to pass a variant through dot_map_dict_to_nested_dict.
        if "." in key:
            raise Exception(
                "Variants should not have periods in keys. Did you mean to "
                "convert {} into a nested dictionary?".format(key)
            )
    if unique_id is None:
        unique_id = str(uuid.uuid4())
    if prepend_date_to_exp_prefix:
        exp_prefix = time.strftime("%m-%d") + "-" + exp_prefix
    variant['seed'] = str(seed)
    variant['exp_id'] = str(exp_id)
    variant['unique_id'] = str(unique_id)
    logger.log("Variant:")
    logger.log(json.dumps(ppp.dict_to_safe_json(variant), indent=2))

    mode_str_to_doodad_mode = {
        'local': doodad.mode.Local(),
        'local_docker': doodad.mode.LocalDocker(
            image=config.DOODAD_DOCKER_IMAGE,
        ),
        'ec2': doodad.mode.EC2AutoconfigDocker(
            image=config.DOODAD_DOCKER_IMAGE,
            region='us-east-2',
            instance_type='c4.large',
            spot_price=0.03,
            s3_log_prefix=exp_prefix,
            s3_log_name="{}-id{}-s{}".format(exp_prefix, exp_id, seed),
        ),
    }

    if base_log_dir is None:
        base_log_dir = config.LOCAL_LOG_DIR
    output_mount_point = config.OUTPUT_DIR_FOR_DOODAD_TARGET
    mounts = [
        mount.MountLocal(local_dir=REPO_DIR, pythonpath=True),
    ]
    for code_dir in config.CODE_DIRS_TO_MOUNT:
        mounts.append(mount.MountLocal(local_dir=code_dir, pythonpath=True))
    for dir, mount_point in local_input_dir_to_mount_point_dict.items():
        mounts.append(mount.MountLocal(
            local_dir=dir,
            mount_point=mount_point,
            pythonpath=False,
        ))

    if mode != 'local':
        for non_code_mapping in config.DIR_AND_MOUNT_POINT_MAPPINGS:
            mounts.append(mount.MountLocal(**non_code_mapping))

    if mode == 'ec2':
        if not ec2_okayed and not query_yes_no(
                "EC2 costs money. Are you sure you want to run?"
        ):
            sys.exit(1)
        if not gpu_ec2_okayed and use_gpu:
            if not query_yes_no(
                    "EC2 is more expensive with GPUs. Confirm?"
            ):
                sys.exit(1)
            gpu_ec2_okayed = True
        ec2_okayed = True
        output_mount = mount.MountS3(
            s3_path='',
            mount_point=output_mount_point,
            output=True,
            sync_interval=sync_interval,
        )
        # This will be over-written by the snapshot dir, but I'm setting it for
        # good measure.
        base_log_dir_for_script = output_mount_point
        # The snapshot dir needs to be specified for S3 because S3 will
        # automatically create the experiment director and sub-directory.
        snapshot_dir_for_script = output_mount_point
    elif mode == 'local':
        output_mount = mount.MountLocal(
            local_dir=base_log_dir,
            mount_point=None,  # For purely local mode, skip mounting.
            output=True,
        )
        base_log_dir_for_script = base_log_dir
        # The snapshot dir will be automatically created
        snapshot_dir_for_script = None
    else:
        output_mount = mount.MountLocal(
            local_dir=base_log_dir,
            mount_point=output_mount_point,
            output=True,
        )
        base_log_dir_for_script = output_mount_point
        # The snapshot dir will be automatically created
        snapshot_dir_for_script = None
    mounts.append(output_mount)

    repo = git.Repo(os.getcwd())
    code_diff = repo.git.diff(None)
    if len(code_diff) > 5000:
        logger.log("Git diff %d greater than 5000. Not saving diff." % len(code_diff))
        code_diff = None
    run_experiment_kwargs = dict(
        exp_prefix=exp_prefix,
        variant=variant,
        exp_id=exp_id,
        seed=seed,
        use_gpu=use_gpu,
        snapshot_mode=snapshot_mode,
        snapshot_gap=snapshot_gap,
        code_diff=code_diff,
        commit_hash=repo.head.commit.hexsha,
        script_name=main.__file__,
        n_parallel=n_parallel,
        base_log_dir=base_log_dir_for_script,
    )
    doodad.launch_python(
        target=config.RUN_DOODAD_EXPERIMENT_SCRIPT_PATH,
        mode=mode_str_to_doodad_mode[mode],
        mount_points=mounts,
        args={
            'method_call': method_call,
            'output_dir': snapshot_dir_for_script,
            'run_experiment_kwargs': run_experiment_kwargs,
        },
        use_cloudpickle=True,
        fake_display=True if mode != 'local' else False,
    )
Ejemplo n.º 6
0
def run_experiment_old(
        task,
        exp_prefix='default',
        seed=None,
        variant=None,
        time_it=True,
        save_profile=False,
        profile_file='time_log.prof',
        mode='here',
        exp_id=0,
        unique_id=None,
        prepend_date_to_exp_prefix=True,
        use_gpu=False,
        snapshot_mode='last',
        snapshot_gap=1,
        n_parallel=0,
        base_log_dir=None,
        **run_experiment_lite_kwargs
):
    """
    Run a task via the rllab interface, i.e. serialize it and then run it via
    the run_experiment_lite script.

    This will soon be deprecated.

    :param task:
    :param exp_prefix:
    :param seed:
    :param variant:
    :param time_it: Add a "time" command to the python command?
    :param save_profile: Create a cProfile log?
    :param profile_file: Where to save the cProfile log.
    :param mode: 'here' will run the code in line, without any serialization
    Other options include 'local', 'local_docker', and 'ec2'. See
    run_experiment_lite documentation to learn what those modes do.
    :param exp_id: Experiment ID. Should be unique across all
    experiments. Note that one experiment may correspond to multiple seeds.
    :param unique_id: Unique ID should be unique across all runs--even different
    seeds!
    :param prepend_date_to_exp_prefix: If True, prefix "month-day_" to
    exp_prefix
    :param run_experiment_lite_kwargs: kwargs to be passed to
    `run_experiment_lite`
    :return:
    """
    if seed is None:
        seed = random.randint(0, 100000)
    if variant is None:
        variant = {}
    if unique_id is None:
        unique_id = str(uuid.uuid4())
    if prepend_date_to_exp_prefix:
        exp_prefix = time.strftime("%m-%d") + "_" + exp_prefix
    variant['seed'] = str(seed)
    variant['exp_id'] = str(exp_id)
    variant['unique_id'] = str(unique_id)
    logger.log("Variant:")
    logger.log(json.dumps(ppp.dict_to_safe_json(variant), indent=2))
    command_words = []
    if time_it:
        command_words.append('time')
    command_words.append('python')
    if save_profile:
        command_words += ['-m cProfile -o', profile_file]
    repo = git.Repo(os.getcwd())
    diff_string = repo.git.diff(None)
    commit_hash = repo.head.commit.hexsha
    script_name = "tmp"
    if mode == 'here':
        log_dir, exp_name = create_log_dir(exp_prefix, exp_id, seed,
                                           base_log_dir)
        data = dict(
            log_dir=log_dir,
            exp_name=exp_name,
            mode=mode,
            variant=variant,
            exp_id=exp_id,
            exp_prefix=exp_prefix,
            seed=seed,
            use_gpu=use_gpu,
            snapshot_mode=snapshot_mode,
            snapshot_gap=snapshot_gap,
            diff_string=diff_string,
            commit_hash=commit_hash,
            n_parallel=n_parallel,
            base_log_dir=base_log_dir,
            script_name=script_name,
        )
        save_experiment_data(data, log_dir)
    if mode == 'here':
        run_experiment_here(
            task,
            exp_prefix=exp_prefix,
            variant=variant,
            exp_id=exp_id,
            seed=seed,
            use_gpu=use_gpu,
            snapshot_mode=snapshot_mode,
            snapshot_gap=snapshot_gap,
            code_diff=diff_string,
            commit_hash=commit_hash,
            script_name=script_name,
            n_parallel=n_parallel,
            base_log_dir=base_log_dir,
        )
    else:
        if mode == "ec2" and use_gpu:
            if not query_yes_no(
                    "EC2 is more expensive with GPUs. Confirm?"
            ):
                sys.exit(1)
        code_diff = (
            base64.b64encode(cloudpickle.dumps(diff_string)).decode("utf-8")
        )
        run_experiment_lite(
            task,
            snapshot_mode=snapshot_mode,
            snapshot_gap=snapshot_gap,
            exp_prefix=exp_prefix,
            variant=variant,
            seed=seed,
            use_cloudpickle=True,
            python_command=' '.join(command_words),
            mode=mode,
            use_gpu=use_gpu,
            script="railrl/scripts/run_experiment_lite.py",
            code_diff=code_diff,
            commit_hash=commit_hash,
            script_name=script_name,
            n_parallel=n_parallel,
            **run_experiment_lite_kwargs
        )