Example #1
0
def plot(cfg):
    log.info("============= Configuration =============")
    log.info(f"Config:\n{cfg.pretty()}")
    log.info("=========================================")
    hv_characterization()
    quit()
    # Yaw control
    yaw_dir = "/Users/nato/Documents/Berkeley/Research/Codebases/dynamics-learn/sweeps/2020-05-13/08-01-09/metric.name=Yaw,robot=iono_sim/"
    ex = "0/trial_33.dat"
    yaw_ex = yaw_dir+ex
    # plot_sweep_1(yaw_dir)
    plot_rollout_dat(yaw_ex)
    quit()
    # dir=2020-02-10/15-39-36
    files = glob.glob(hydra.utils.get_original_cwd() + '/outputs/' + cfg.dir + '/*/**.dat')
    ms = []
    cl = []
    for g in files:
        mse, clust = torch.load(g)
        if clust < 500:
            continue
        ms.append(mse)
        cl.append(clust)

    # ms = np.array(ms)
    # cl = np.array(cl)

    # Non clustered data
    full_size = [4000]
    base = [0.6844194601919266,
            0.6426670856359498,
            0.6760970001662061,
            0.7867345088097977,
            0.6402819700817463,
            0.6432612884414582,
            0.614643476721318,
            0.673518857099874,
            0.5565854257191823,
            0.9437187183401807]
    #
    # for b in base:
    #     ms.append(b)
    #     cl.append(full_size[0])

    cl, ms = zip(*sorted(zip(cl, ms)))
    ids = np.unique(cl)

    cl_arr = np.stack(cl).reshape((len(ids), -1))
    ms_arr = np.stack(ms).reshape((len(ids), -1))

    import matplotlib.pyplot as plt
    import plotly.graph_objects as go

    colors = plt.get_cmap('tab10').colors
    traces = []
    i = 1
    cs_str = 'rgb' + str(colors[i])

    err_traces, xs, ys = generate_errorbar_traces(ms_arr.T, xs=cl_arr.T.tolist(), color=cs_str,
                                                  name=f"Clustered Training")
    for t in err_traces:
        traces.append(t)

    layout = dict( #title=f"Test Set Prediction Error",  # (Env: {env_name})",
                  xaxis={'title': 'Cluster Size (Log Scale)',
                         'autorange': 'reversed',
                         'range':[3.7, 2.6]
                         },
                  yaxis={'title': 'Prediction Mean Squared Error',
                         'range':[.3,1]},
                  font=dict(family='Times New Roman', size=33, color='#7f7f7f'),
                  xaxis_type="log",
                  # yaxis_type="log",
                  height=600,
                  width=1300,
                  margin=dict(l=0, r=0, b=0, t=0),
                  plot_bgcolor='white',
                  legend={'x': .6, 'y': .05, 'bgcolor': 'rgba(50, 50, 50, .03)'})


    fig = go.Figure(
        data=traces,
        layout=layout,
    )

    fig.add_trace(
        # Line Horizontal
        go.Scatter(
            mode="lines",
            x=[max(ids), min(ids)],
            y=[np.mean(base), np.mean(base)],
            line=dict(
                color="gray",
                width=4,
                dash="dashdot",
            ),
            name='Default Training (4000 Datapoints)'
        ))

    import plotly.io as pio
    pio.show(fig)
    fig.write_image('clustering_thin.pdf')
    quit()
    hv_characterization()

    ######################################################################
    logs = defaultdict(list)
    configs = defaultdict(list)
    logs_dirs = ['/Users/nol/Documents/code-bases/dynamicslearn/multirun/2019-12-16/20-01-04/', ]

    def load_log(directory, trial_file=None):
        if '.hydra' in os.listdir(directory):
            full_conf = OmegaConf.load(f"{directory}/.hydra/config.yaml")
        else:
            full_conf = OmegaConf.load(f"{directory}/config.yaml")
        trial_files = glob.glob(f"{directory}/trial_*.dat")
        if len(trial_files) > 1:
            if trial_file is not None:
                last_trial_log = f"{directory}/{trial_file}"
            else:
                last_trial_log = max(trial_files, key=os.path.getctime)
            vis_log = torch.load(last_trial_log)
            logs[log_dir].append(vis_log)
            configs[log_dir].append(full_conf)

    for log_dir in logs_dirs:
        if os.path.exists(os.path.join(log_dir, 'config.yaml')):
            log.info(f"Loading latest trial from {log_dir}")
            d = os.path.join(log_dir)
            load_log(d)
        else:
            # Assuming directory with multiple identical experiments (dir/0, dir/1 ..)
            latest = defaultdict(list)
            for ld in os.listdir(log_dir):
                directory = os.path.join(log_dir, ld)
                if os.path.isdir(directory):
                    trial_files = glob.glob(f"{directory}/trial_*.dat")
                    if len(trial_files) == 0:
                        continue
                    last_trial_log = max(trial_files, key=os.path.getctime)
                    last_trial_log = last_trial_log[len(directory) + 1:]
                    latest[log_dir].append(last_trial_log)

            for ld in os.listdir(log_dir):
                if ld == '.slurm': continue
                log_subdir = os.path.join(log_dir, ld)
                if os.path.isdir(log_subdir):
                    # Load data for the smallest trial number from all sub directories
                    if len(latest[log_dir]) == 0:
                        log.warn(f"No trial files found under {log_dir}")
                        break
                    trial_file = natsorted(latest[log_dir])[0]
                    load_log(log_subdir, trial_file)

    # To display the figure defined by this dict, use the low-level plotly.io.show function
    plot_rewards_over_trials(logs)
Example #2
0
def mpc(cfg):
    log.info("============= Configuration =============")
    log.info(f"Config:\n{cfg.pretty()}")
    log.info("=========================================")

    env_name = cfg.env.params.name
    env = gym.make(env_name)
    env.reset()
    full_rewards = []

    if cfg.metric.name == 'Living':
        metric = living_reward
    elif cfg.metric.name == 'Rotation':
        metric = rotation_mat
    elif cfg.metric.name == 'Square':
        metric = squ_cost
    elif cfg.metric.name == 'Yaw':
        metric = yaw_r
    else:
        raise ValueError("Improper metric name passed")

    for s in range(cfg.experiment.seeds):
        log.info(f"Random Seed: {s}")
        total_costs = []
        data_rand = []
        total_steps = []
        r = 0
        while r < cfg.experiment.random:
            data_r = rollout(env, RandomController(env, cfg), cfg.experiment, metric=metric)
            plot_rollout(data_r[0], data_r[1], pry=cfg.pid.params.pry, save=cfg.save, loc=f"/R_{r}")
            rews = data_r[-2]
            sim_error = data_r[-1]
            if sim_error:
                print("Repeating strange simulation")
                continue
            # rand_costs.append(np.sum(rews) / len(rews))  # for minimization
            total_costs.append(np.sum(rews))  # for minimization
            # log.info(f" - Cost {np.sum(rews) / cfg.experiment.r_len}")
            r += 1

            # data_sample = subsample(data_r, cfg.policy.params.period)
            data_rand.append(data_r)
            total_steps.append(0)

        X, dX, U = to_XUdX(data_r)
        X, dX, U = combine_data(data_rand[:-1], (X, dX, U))
        msg = "Random Rollouts completed of "
        msg += f"Mean Cumulative reward {np.mean(total_costs)}, "
        msg += f"Mean Flight length {cfg.policy.params.period * np.mean([np.shape(d[0])[0] for d in data_rand])}"
        log.info(msg)

        trial_log = dict(
            env_name=cfg.env.params.name,
            model=None,
            seed=cfg.random_seed,
            raw_data=data_rand,
            trial_num=-1,
            rewards=total_costs,
            steps=total_steps,
            nll=None,
        )
        save_log(cfg, -1, trial_log)

        model, train_log = train_model(X, U, dX, cfg.model)

        for i in range(cfg.experiment.num_roll-cfg.experiment.random):
            controller = MPController(env, model, cfg)

            r = 0
            cum_costs = []
            data_rs = []
            while r < cfg.experiment.repeat:
                data_r = rollout(env, controller, cfg.experiment, metric=metric)
                plot_rollout(data_r[0], data_r[1], pry=cfg.pid.params.pry, save=cfg.save, loc=f"/{str(i)}_{r}")
                rews = data_r[-2]
                sim_error = data_r[-1]

                if sim_error:
                    print("Repeating strange simulation")
                    continue
                # cum_costs.append(np.sum(rews) / len(rews))  # for minimization
                total_costs.append(np.sum(rews))  # for minimization
                # log.info(f" - Cost {np.sum(rews) / cfg.experiment.r_len}")
                r += 1

                # data_sample = subsample(data_r, cfg.policy.params.period)
                data_rs.append(data_r)
                total_steps.append(np.shape(X)[0])

            X, dX, U = combine_data(data_rs, (X, dX, U))
            msg = "Rollouts completed of "
            msg += f"Mean Cumulative reward {np.mean(total_costs)}, " #/ cfg.experiment.r_len
            msg += f"Mean Flight length {cfg.policy.params.period * np.mean([np.shape(d[0])[0] for d in data_rs])}"
            log.info(msg)

            trial_log = dict(
                env_name=cfg.env.params.name,
                model=model,
                seed=cfg.random_seed,
                raw_data=data_rs,
                trial_num=i,
                rewards=total_costs,
                steps=total_steps,
                nll=train_log,
            )
            save_log(cfg, i, trial_log)

            model, train_log = train_model(X, U, dX, cfg.model)

        fig = plot_rewards_over_trials(np.transpose(np.stack([total_costs])), env_name, save=True)
        fig.write_image(os.getcwd() + "/learning-curve.pdf")
def sac_experiment(cfg):
    log.info("============= Configuration =============")
    log.info(f"Config:\n{cfg.pretty()}")
    log.info("=========================================")

    real_env = gym.make(cfg.env.params.name)
    set_seed_everywhere(cfg.random_seed)

    obs_dim = cfg.model.params.dx
    action_dim = cfg.model.params.du
    target_entropy_coef = 1
    batch_size = cfg.alg.params.batch_size  # 512
    discount = cfg.alg.trainer.discount  # .99
    tau = cfg.alg.trainer.tau  # .005
    policy_freq = cfg.alg.trainer.target_update_period  # 2
    replay_buffer_size = int(cfg.alg.replay_buffer_size)  # 1000000
    start_steps = cfg.alg.params.start_steps  # 10000
    eval_freq = cfg.alg.params.eval_freq  # 10000
    max_steps = int(cfg.alg.params.max_steps)  # 2E6
    num_eval_episodes = cfg.alg.params.num_eval_episodes  # 5
    num_eval_timesteps = cfg.alg.params.num_eval_timesteps  # 1000
    num_rl_updates = 1
    model_dir = None

    replay_buffer = ReplayBuffer(obs_dim, action_dim, cfg.device,
                                 replay_buffer_size)

    policy = SAC(
        cfg.device,
        obs_dim,
        action_dim,
        hidden_dim=cfg.alg.layer_size,
        hidden_depth=cfg.alg.num_layers,
        initial_temperature=cfg.alg.trainer.initial_temp,
        actor_lr=cfg.alg.trainer.actor_lr,  # 1E-3,
        critic_lr=cfg.alg.trainer.critic_lr,  # 1E-3,
        actor_beta=cfg.alg.trainer.actor_beta,  # 0.9,
        critic_beta=cfg.alg.trainer.critic_beta,  # 0.9,
        log_std_min=cfg.alg.trainer.log_std_min,  # -10,
        log_std_max=cfg.alg.trainer.log_std_max,
        period=cfg.policy.params.period)  # 2)

    step = 0
    steps_since_eval = 0
    episode_num = 0
    episode_reward = 0
    episode_success = 0
    episode_step = 0
    saved_idx = 0
    done = True
    returns = None
    target_entropy = -action_dim * target_entropy_coef

    if cfg.metric.name == 'Living':
        metric = living_reward
    elif cfg.metric.name == 'Rotation':
        metric = rotation_mat
    elif cfg.metric.name == 'Square':
        metric = squ_cost
    elif cfg.metric.name == 'Yaw':
        metric = yaw_r
    else:
        raise ValueError("Improper metric name passed")

    to_plot_rewards = []
    total_steps = []
    rewards = evaluate_policy(real_env,
                              policy,
                              step,
                              log,
                              num_eval_episodes,
                              num_eval_timesteps,
                              None,
                              metric=metric)

    to_plot_rewards.append(rewards)
    total_steps.append(0)

    env = gym.make(cfg.env.params.name)

    # from gym import spaces
    # env.action_space = spaces.Box(low=np.array([0, 0, 0, 0]),
    #                                    high=np.array([65535, 65535, 65535, 65535]),
    #                                    dtype=np.int32)

    layout = dict(
        title=
        f"Learning Curve Reward vs Number of Steps Trials (Env: {cfg.env.params.name}, Alg: {cfg.policy.mode})",
        xaxis={'title': f"Steps*{eval_freq}"},
        yaxis={'title': f"Avg Reward Num:{num_eval_episodes}"},
        font=dict(family='Times New Roman', size=18, color='#7f7f7f'),
        legend={
            'x': .83,
            'y': .05,
            'bgcolor': 'rgba(50, 50, 50, .03)'
        })

    while step < max_steps:
        # log.info(f"===================================")
        if step % 1000 == 0:
            log.info(f"Step {step}")

        if done:
            # Evaluate episode
            if steps_since_eval >= eval_freq:
                steps_since_eval %= eval_freq
                log.info(f"eval/episode: {episode_num}")
                returns = evaluate_policy(env,
                                          policy,
                                          step,
                                          log,
                                          num_eval_episodes,
                                          num_eval_timesteps,
                                          None,
                                          metric=metric)
                to_plot_rewards.append(returns)
                total_steps.append(step)

                if model_dir is not None:
                    policy.save(model_dir, step)

            # log.info(f"train/episode_reward', episode_reward, step)

            obs = env.reset()
            done = False
            episode_reward = 0
            episode_success = 0
            episode_step = 0
            episode_num += 1

            # log.info(f"train/episode', episode_num, step)

        # Select action randomly or according to policy
        if step < start_steps:
            action = env.action_space.sample()
            action_scale = action
        else:
            with torch.no_grad():
                with eval_mode(policy):
                    action = policy.sample_action(obs)
                    action_scale = env.action_space.high * (action + 1) / 2

        if step >= start_steps:
            num_updates = start_steps if step == start_steps else num_rl_updates
            for _ in range(num_updates):
                policy.update(replay_buffer,
                              step,
                              log,
                              batch_size,
                              discount,
                              tau,
                              policy_freq,
                              target_entropy=target_entropy)

        next_obs, reward, done, _ = env.step(action_scale)
        # print(next_obs[:3])
        # done_bool = 0 if episode_step + 1 == env._max_episode_steps else float(done)
        done = 1 if episode_step + 1 == num_eval_timesteps else float(done)
        reward = metric(next_obs, action)
        episode_reward += reward

        replay_buffer.add(obs, action_scale, reward, next_obs, done)

        obs = next_obs

        episode_step += 1
        step += 1
        steps_since_eval += 1
        if (step % eval_freq) == 0:
            trial_log = dict(
                env_name=cfg.env.params.name,
                trial_num=saved_idx,
                replay_buffer=replay_buffer if cfg.save_replay else [],
                steps=total_steps,
                policy=policy,
                rewards=to_plot_rewards,
            )
            save_log(cfg, step, trial_log)
            saved_idx += 1

    plot_rewards_over_trials(to_plot_rewards, cfg.env.params.name, save=True)
def mpc(cfg):
    log.info("============= Configuration =============")
    log.info(f"Config:\n{cfg.pretty()}")
    log.info("=========================================")

    # plot_results_yaw(pts=cfg.data)
    # quit()

    env_name = cfg.env.params.name
    env = gym.make(env_name)
    env.reset()

    env.seed(cfg.random_seed, inertial=cfg.experiment.inertial)
    if cfg.experiment.inertial:
        log.info(
            f"Running experiment with interial prop x:{env.Ixx}, y:{env.Iyy}")

    # full_rewards = []
    # temp = hydra.utils.get_original_cwd() + '/outputs/2020-07-11/17-17-05/trial_3.dat'
    # dat = torch.load(temp)
    # actions = dat['raw_data'][0][1]
    # l = []
    #
    # yaw_actions = np.array([
    #     [1500, 1500, 1500, 1500],
    #     [2000, 1000, 1000, 2000],
    #     [1000, 2000, 2000, 1000],
    #     [2000, 2000, 1000, 1000],
    #     [1000, 1000, 2000, 2000],
    # ])
    #
    # def find_ind(arr):
    #     if np.all(np.equal(arr, [1500, 1500, 1500, 1500])):
    #         return 0
    #     elif np.all(np.equal(arr, [2000, 1000, 1000, 2000])):
    #         return 1
    #     elif np.all(np.equal(arr, [1000, 2000, 2000, 1000])):
    #         return 3
    #     elif np.all(np.equal(arr, [2000, 2000, 1000, 1000])):
    #         return 2
    #     else: # [1000, 1000, 2000, 2000]
    #         return 4
    #
    # for act in actions:
    #     act = act.numpy()
    #     id = find_ind(act)
    #     l.append(id)
    #
    # initial = l[:24]
    # states = dat['raw_data'][0][0][:25]
    # yaw_value = np.rad2deg(states[-1][0])-np.rad2deg(states[0][0])
    # print(f"Yaw after 25 steps{yaw_value}")
    # plot_lie(initial)
    # # plot_rollout(np.stack(dat['raw_data'][0][0])[:500,:3], dat['raw_data'][0][1], loc="/yaw_plt", save=True, only_x=True, legend=False)
    # quit()

    if cfg.metric.name == 'Living':
        metric = living_reward
        log.info(f"Using metric living reward")
    elif cfg.metric.name == 'Rotation':
        metric = rotation_mat
        log.info(f"Using metric rotation matrix")
    elif cfg.metric.name == 'Square':
        metric = squ_cost
        log.info(f"Using metric square cost")
    elif cfg.metric.name == 'Yaw':
        metric = yaw_r
        log.info(f"Using metric yaw sliding mode")
    elif cfg.metric.name == 'Yaw2':
        metric = yaw_r2
        log.info(f"Using metric yaw base")
    elif cfg.metric.name == 'Yaw3':
        metric = yaw_r3
        log.info(f"Using metric yaw rate")
    else:
        raise ValueError("Improper metric name passed")

    for s in range(cfg.experiment.seeds):
        log.info(f"Random Seed: {s}")
        total_costs = []
        data_rand = []
        total_steps = []
        r = 0
        while r < cfg.experiment.random:
            data_r = rollout(env,
                             RandomController(env, cfg),
                             cfg.experiment,
                             metric=metric)
            if env_name != 'CartPoleContEnv-v0':
                plot_rollout(data_r[0],
                             data_r[1],
                             pry=cfg.pid.params.pry,
                             save=cfg.save,
                             loc=f"/R_{r}")
            rews = data_r[-2]
            sim_error = data_r[-1]
            if sim_error:
                print("Repeating strange simulation")
                continue
            # rand_costs.append(np.sum(rews) / len(rews))  # for minimization
            total_costs.append(np.sum(rews))  # for minimization
            # log.info(f" - Cost {np.sum(rews) / cfg.experiment.r_len}")
            r += 1

            # data_sample = subsample(data_r, cfg.policy.params.period)
            data_rand.append(data_r)
            total_steps.append(0)

        X, dX, U = to_XUdX(data_r)
        X, dX, U = combine_data(data_rand[:-1], (X, dX, U))
        msg = "Random Rollouts completed of "
        msg += f"Mean Cumulative reward {np.mean(total_costs)}, "
        msg += f"Mean length {np.mean([len(a[0]) for a in data_rand])}"
        log.info(msg)
        last_yaw = np.max(np.abs(np.stack(data_r[0])[:, 2]))  #data_r[0][-1][2]

        trial_log = dict(
            env_name=cfg.env.params.name,
            # model=model,
            seed=cfg.random_seed,
            raw_data=data_r,
            # yaw_num=last_yaw,
            trial_num=-1,
            rewards=total_costs,
            steps=total_steps,
            # nll=train_log,
        )
        save_log(cfg, -1, trial_log)

        model, train_log = train_model(X.squeeze(), U, dX.squeeze(), cfg.model)

        for i in range(cfg.experiment.num_roll - cfg.experiment.random):
            controller = MPController(env, model, cfg)

            r = 0
            # cum_costs = []
            data_rs = []
            while r < cfg.experiment.repeat:
                data_r = rollout(env,
                                 controller,
                                 cfg.experiment,
                                 metric=metric)
                plot_rollout(data_r[0],
                             data_r[1],
                             pry=cfg.pid.params.pry,
                             save=cfg.save,
                             loc=f"/{str(i)}_{r}")
                rews = data_r[-2]
                sim_error = data_r[-1]

                if sim_error:
                    print("Repeating strange simulation")
                    continue
                # cum_costs.append(np.sum(rews) / len(rews))  # for minimization
                total_costs.append(np.sum(rews))  # for minimization
                # log.info(f" - Cost {np.sum(rews) / cfg.experiment.r_len}")
                r += 1

                # data_sample = subsample(data_r, cfg.policy.params.period)
                data_rs.append(data_r)
                total_steps.append(np.shape(X)[0])

            X, dX, U = combine_data(data_rs, (X, dX, U))
            msg = "Rollouts completed of "
            msg += f"Cumulative reward {total_costs[-1]}, "  # / cfg.experiment.r_len
            msg += f"length {len(data_r[0])}"
            # log.info(f"Final yaw {180*np.array(data_r[0][-1][2])/np.pi}")
            log.info(msg)
            last_yaw = np.max(np.abs(np.stack(
                data_r[0])[:, 2]))  #data_r[0][-1][2]

            trial_log = dict(
                env_name=cfg.env.params.name,
                # model=model,
                seed=cfg.random_seed,
                raw_data=data_r,
                # yaw_num=last_yaw,
                trial_num=i,
                rewards=total_costs,
                steps=total_steps,
                nll=train_log,
            )
            save_log(cfg, i, trial_log)

            model, train_log = train_model(X, U, dX, cfg.model)

        fig = plot_rewards_over_trials(np.transpose(np.stack([total_costs])),
                                       env_name,
                                       save=True)
        fig.write_image(os.getcwd() + "/learning-curve.pdf")