Пример #1
0
def test_network():
    """
    This function create RNN instance based on parameters saved on disc and also creates the CartPole instance.
    The actual work of evaluation prediction results is done in open_loop_prediction_experiment function
    """

    # Create a copy of the network suitable for inference (stateful and with sequence length one)
    net_for_inference, net_for_inference_info, normalization_info = \
        get_net_and_norm_info(a, time_series_length=1,
                              batch_size=1, stateful=True)

    # region In either case testing is done on a data collected offline
    paths_to_datafiles_test = get_paths_to_datafiles(a.test_files)
    test_dfs = load_data(paths_to_datafiles_test)
    test_dfs_norm = normalize_df(test_dfs, normalization_info)
    test_set = Dataset(test_dfs_norm,
                       a,
                       shuffle=False,
                       inputs=net_for_inference_info.inputs,
                       outputs=net_for_inference_info.outputs)

    ground_truth, net_outputs, time_axis = \
        open_loop_prediction_experiment(net_for_inference, net_for_inference_info,
                                        test_set, normalization_info,
                                        experiment_length=a.test_len)

    run_test_gui(net_for_inference_info.inputs, net_for_inference_info.outputs,
                 ground_truth, net_outputs, time_axis)
    def setup(self, initial_state: pd.DataFrame, prediction_denorm=False):

        self.rnn_internal_states = get_internal_states(self.net)
        initial_state_normed = normalize_df(
            copy.copy(initial_state[self.rnn_inputs_names[1:]]),
            self.normalization_info)
        self.rnn_current_input_without_Q = initial_state_normed.to_numpy(
            dtype=np.float32, copy=True).squeeze()
        if prediction_denorm:
            self.prediction_denorm = True
        else:
            self.prediction_denorm = False
Пример #3
0
    def predict(self, Q) -> pd.DataFrame:

        if len(Q) != self.horizon:
            raise IndexError(
                'Number of provided control inputs does not match the horizon')
        else:
            Q_hat = np.atleast_1d(np.asarray(Q).squeeze())

        # t0 = timeit.default_timer()
        yp_hat = np.zeros(self.horizon + 1, dtype=object)

        for k in range(self.horizon):
            if k == 0:
                yp_hat[0] = deepcopy(self.s)
                s_next = deepcopy(self.s)

            t0 = timeit.default_timer()
            s_next = mpc_next_state(s_next,
                                    self.p,
                                    Q2u(Q_hat[k], self.p),
                                    dt=self.dt)
            s_next.angle_cos = np.cos(s_next.angle)
            s_next.angle_sin = np.sin(s_next.angle)
            t1 = timeit.default_timer()
            # self.eq_eval_time.append((t1 - t0) * 1.0e6)
            yp_hat[k + 1] = s_next

        all_features = []
        for k in range(len(yp_hat)):
            s = yp_hat[k]
            if k < self.horizon:
                Q = Q_hat[k]
            else:
                Q = Q_hat[k - 1]
            timestep_features = [
                Q, s.angle_cos, s.angle_sin, s.angle, s.angleD, s.position,
                s.positionD
            ]
            all_features.append(timestep_features)
        all_features = np.asarray(all_features)
        self.prediction_list.values[:, :] = all_features
        # self.prediction_list = normalize_df(self.prediction_list, self.normalization_info)

        predictions = copy.copy(self.prediction_list)

        if self.prediction_denorm:
            return predictions
        else:
            return normalize_df(predictions, self.normalization_info)
max_error_1 = pd.DataFrame(0, index=np.arange(N_predictions), columns=features)
max_error_2 = pd.DataFrame(0, index=np.arange(N_predictions), columns=features)

# For relative error (meaningless?)
# max_targets_1 = pd.DataFrame(0, index=np.arange(N_predictions), columns=features)
# max_targets_2 = pd.DataFrame(0, index=np.arange(N_predictions), columns=features)
print('Calculating errors')
for i in tqdm(range(N_predictions)):
    prediction_1 = predictions_1[i][features]
    prediction_2 = predictions_2[i][features]
    target = df[features]
    target = target.iloc[autoregres_at_after_start +
                         i:autoregres_at_after_start + i + horizon + 1]
    target = target.reset_index(drop=True)
    if not prediction_denorm:
        target = normalize_df(target, normalization_info)

    error_1 = prediction_1 - target
    error_2 = prediction_2 - target

    # error_1_abs = error_1.abs()
    # error_2_abs = error_2.abs()
    #
    # idx_max_1 = error_1_abs.idxmax(axis=0).values
    # idx_max_2 = error_2_abs.idxmax(axis=0).values

    # For relative error
    # max_val_1 = []
    # max_val_2 = []
    # for idx in range(len(features)):
    #     max_val_1.append(target.at[target.index[idx_max_1[idx]], features[idx]])
def plot_results(net,
                 args,
                 dataset=None,
                 normalization_info = None,
                 time_axes=None,
                 filepath=None,
                 inputs_list=None,
                 outputs_list=None,
                 closed_loop_list=None,
                 seq_len=None,
                 warm_up_len=None,
                 closed_loop_enabled=False,
                 comment='',
                 rnn_full_name=None,
                 save=False,
                 close_loop_idx=512):
    """
    This function accepts RNN instance, arguments and CartPole instance.
    It runs one random experiment with CartPole,
    inputs the data into RNN and check how well RNN predicts CartPole state one time step ahead of time
    """

    rnn_full_name = net.rnn_full_name

    if filepath is None:
        filepath = args.val_file_name
        if type(filepath) == list:
            filepath = filepath[0]

    if warm_up_len is None:
        warm_up_len = args.warm_up_len

    if seq_len is None:
        seq_len = args.seq_len

    if inputs_list is None:
        inputs_list = args.inputs_list
        if inputs_list is None:
            raise ValueError('RNN inputs not provided!')

    if outputs_list is None:
        outputs_list = args.outputs_list
        if outputs_list is None:
            raise ValueError('RNN outputs not provided!')

    if closed_loop_enabled and (closed_loop_list is None):
        closed_loop_list = args.close_loop_for
        if closed_loop_list is None:
            raise ValueError('RNN closed-loop-inputs not provided!')

    net.reset()
    net.eval()
    device = get_device()

    if normalization_info is None:
        normalization_info = load_normalization_info(args.PATH_TO_EXPERIMENT_RECORDINGS, rnn_full_name)

    if dataset is None or time_axes is None:
        test_dfs, time_axes = load_data(args, filepath)
        test_dfs_norm = normalize_df(test_dfs, normalization_info)
        test_set = Dataset(test_dfs_norm, args, time_axes=time_axes, seq_len=seq_len)
        del test_dfs
    else:
        test_set = copy.deepcopy(dataset)
        test_set.reset_seq_len(seq_len=seq_len)

    # Format the experiment data
    features, targets, time_axis = test_set.get_experiment(1)  # Put number in brackets to get the same idx at every run

    features_pd = pd.DataFrame(data=features, columns=inputs_list)
    targets_pd = pd.DataFrame(data=targets, columns=outputs_list)

    rnn_outputs = pd.DataFrame(columns=outputs_list)

    warm_up_idx = 0
    rnn_input_0 = copy.deepcopy(features_pd.iloc[0])
    # Does not bring anything. Why? 0-state shouldn't have zero internal state due to biases...
    while warm_up_idx < warm_up_len:
        rnn_input = rnn_input_0
        rnn_input = np.squeeze(rnn_input.to_numpy())
        rnn_input = torch.from_numpy(rnn_input).float().unsqueeze(0).unsqueeze(0).to(device)
        net(rnn_input=rnn_input)
        warm_up_idx += 1
    net.outputs = []
    net.sample_counter = 0

    idx_cl = 0
    close_the_loop = False

    for index, row in features_pd.iterrows():
        rnn_input = pd.DataFrame(copy.deepcopy(row)).transpose().reset_index(drop=True)
        if idx_cl == close_loop_idx:
            close_the_loop = True
        if closed_loop_enabled and close_the_loop and (normalized_rnn_output is not None):
            rnn_input[closed_loop_list] = normalized_rnn_output[closed_loop_list]
        rnn_input = np.squeeze(rnn_input.to_numpy())
        rnn_input = torch.from_numpy(rnn_input).float().unsqueeze(0).unsqueeze(0).to(device)
        normalized_rnn_output = net(rnn_input=rnn_input)
        normalized_rnn_output = np.squeeze(normalized_rnn_output.detach().cpu().numpy()).tolist()
        normalized_rnn_output = copy.deepcopy(pd.DataFrame(data=[normalized_rnn_output], columns=outputs_list))
        rnn_outputs = rnn_outputs.append(copy.deepcopy(normalized_rnn_output), ignore_index=True)
        idx_cl += 1

    targets_pd_denorm = denormalize_df(targets_pd, normalization_info)
    rnn_outputs_denorm = denormalize_df(rnn_outputs, normalization_info)
    fig, axs = plot_results_specific(targets_pd_denorm, rnn_outputs_denorm, time_axis, comment, closed_loop_enabled, close_loop_idx)

    plt.show()

    if save:
        # Make folders if not yet exist
        try:
            os.makedirs('save_plots')
        except FileExistsError:
            pass
        dateTimeObj = datetime.now()
        timestampStr = dateTimeObj.strftime("-%d%b%Y_%H%M%S")
        if rnn_full_name is not None:
            fig.savefig('./save_plots/' + rnn_full_name + timestampStr + '.png')
        else:
            fig.savefig('./save_plots/' + timestampStr + '.png')