コード例 #1
0
ファイル: core.py プロジェクト: robinhenry/meng
def run(N_exp, dataset, sensor_class):

    # Load true load flow.
    V_org, I_org, P_org, Q_org, current_idx = load_true_data(dataset)

    V_real_noise_all = []
    V_imag_noise_all = []

    for n in range(N_exp):

        # Add noise to load flow data.
        V_meas, _, P_meas, Q_meas, std_abs, std_ang, std_real, std_imag, _, _ = \
            simulate_noisy_meas(sensor_class, V_org, I_org, current_idx)

        # Compute noise on real part of voltage.
        # V_real_noise_all.append((V_org.real - V_meas.real) / std_real)
        # V_imag_noise_all.append((V_org.imag - V_meas.imag) / std_imag)
        V_real_noise_all.append((V_org.real - V_meas.real))
        V_imag_noise_all.append((V_org.imag - V_meas.imag))

    V_real_noise_all = np.array(V_real_noise_all).flatten()
    V_imag_noise_all = np.array(V_imag_noise_all).flatten()

    fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(12, 5))

    probplot(V_real_noise_all, plot=ax1)
    ax1.set_ylabel('Quantiles of real part of $X$ error')
    ax1.set_xlabel('Standard normal quantiles')
    ax1.set_title('')

    probplot(V_imag_noise_all, plot=ax2)
    ax2.set_ylabel('Quantiles of imaginary part of $X$ error')
    ax2.set_xlabel('Standard normal quantiles')
    ax2.set_title('')

    # Change size of scatter points.
    ax1.get_lines()[0].set_markersize(5.0)
    ax2.get_lines()[0].set_markersize(5.0)

    my_io.save_figure(f'figures/qq_{sensor_class}_{dataset}.png', fig)
    plt.show()

    print('Done!')
コード例 #2
0
ファイル: run_M2.py プロジェクト: robinhenry/meng
def run(dataset, sensor_class, coeff_type, N_exp, tau, freq, which_i, k_pcr,
        qr, folder, pre_filtering):
    """
    Run `N_exp` sensitivity coefficient estimation experiments using Model 1.

    Model 1 is the linear model of the form: dx = du S + w.

    Parameters
    ----------
    dataset : str
        The unique dataset ID (see `meng.constants`).
    sensor_class : float
        The PMU measurement sensor class (see `meng.constants`).
    coeff_type : {'magn', 'real-imag'}
        Which sensitivity coefficients to estimate.
    N_exp : int
        The number of experiments to run.
    tau : int
        The time window size.
    freq : int
        How often (in timesteps) to estimate the coefficients).
    which_i : list of int or 'all'
        Which coefficients to estimate (eg for voltage coefficients, at which
        buses). Note: a value of 1 corresponds to the 1st bus after the slack.
    k_pcr : int or None
        How many principal components to keep in PCR; set to None to skip the
        PCR step.
    qr : bool
        Set to True to solve the LS problem using Q-R decomposition.
    folder : str
        The name of the experiment folder in which the results are stored.
    pre_filtering : bool
        Whether to do pre-filtering or not.
    """

    is_real_imag = coeff_type == 'real_imag'

    # Create the folder in which the experiment results will be stored.
    exp_logger = ExperimentLogger(folder, locals())

    # Load true load flow.
    V_org, I_org, P_org, Q_org, current_idx = load_true_data(dataset)

    # Select the noise std that corresponds to the type of coefficient(s).
    std_abs = constants.SENSOR_STD_ABS[sensor_class]
    std_arg = constants.SENSOR_STD_ANG[sensor_class]
    std_real, std_imag = noise.project(np.abs(V_org), std_abs, std_arg)
    std_a = std_real[1:] if is_real_imag else std_abs
    std_b = std_imag[1:] if is_real_imag else None

    # Pre-filtering step.
    if pre_filtering:
        pf_mask = prefilter(np.abs(V_org), std_abs)
    else:
        pf_mask = np.ones(V_org.shape[1], dtype=np.bool)

    # Extract the list of which coefficients to estimate.
    which_i = utils.unpack_which_i(which_i, V_org.shape[0])

    # Load the true sensitivity coefficients of interest.
    coefficients = load_coefficients(dataset)
    S_a_true, S_b_true = utils.coefficients_setup(coefficients, coeff_type,
                                                  which_i)
    S_a_true = {k: v[:, pf_mask] for k, v in S_a_true.items()}
    if S_b_true is not None:
        S_b_true = {k: v[:, pf_mask] for k, v in S_b_true.items()}
    del coefficients

    # Transform voltage phasor measurements into either
    # (|x|, Re{x}, or Im{x}) based on `coeff_type`.
    X_org_a, X_org_b = utils.dependent_vars_setup(V_org, coeff_type)

    # Remove slack bus measurements.
    PQ_org = np.vstack((P_org[1:], Q_org[1:]))[:, pf_mask]
    X_org_a = X_org_a[1:, pf_mask]
    if is_real_imag:
        X_org_b = X_org_b[1:, pf_mask]

    # Extract the true bias term using the true coefficients and the true power
    # injections (from the load flow).
    V0_a_org, V0_b_org = {}, {}
    for x_i, s_a in S_a_true.items():
        v0 = X_org_a[x_i - 1] - np.sum(s_a * PQ_org, axis=0)
        V0_a_org[x_i] = v0
        if is_real_imag:
            s_b = S_b_true[x_i]
            v0 = X_org_b[x_i - 1] - np.sum(s_b * PQ_org, axis=0)

    # Run `N_exp` experiments (noise realizations).
    S_a_all, S_b_all, V_est_all, V_true_coeff_all, V0_a_all, V0_b_all =  \
        {}, {}, {}, {}, {}, {}
    ts, cns_a_all, cns_b_all = [], [], []
    for n in range(N_exp):

        # Add noise to load flow data.
        V_meas, _, P_meas, Q_meas, _, _, _, _, _, _ = \
            simulate_noisy_meas(sensor_class, V_org, I_org, current_idx)

        # Transform voltage phasor measurements into either
        # (|x|, Re{x}, or Im{x}) based on `coeff_type`.
        X_meas_a, X_meas_b = utils.dependent_vars_setup(V_meas, coeff_type)

        # Remove slack bus measurements and add bias row (last one).
        PQ_meas = np.vstack((P_meas[1:], Q_meas[1:]))[:, pf_mask]
        PQ_meas = np.vstack((PQ_meas, np.ones(PQ_meas.shape[1])))
        X_meas_a = X_meas_a[1:, pf_mask]
        if is_real_imag:
            X_meas_b = X_meas_b[1:, pf_mask]
        else:
            dX_meas_b = None

        # Estimate the coefficients.
        use_sigma = False
        S_a, ts, cns_a = linear_model(X_meas_a, PQ_meas, use_sigma, tau, freq,
                                      which_i, k_pcr, qr)
        if is_real_imag:
            S_b, _, cns_b = linear_model(X_meas_b, PQ_meas, use_sigma, tau,
                                         freq, which_i, k_pcr, qr)
        else:
            S_b, cns_b = None, None

        # Extract the bias terms V0 from the estimated coeefficients.
        V0_a = {x_i: v[-1] for x_i, v in S_a.items()}
        V0_b = {x_i: v[-1] for x_i, v in S_b.items()} if is_real_imag else None

        S_a = {x_i: v[:-1] for x_i, v in S_a.items()}
        S_b = {x_i: v[:-1] for x_i, v in S_b.items()} if is_real_imag else None

        # Construct estimated |X| (using estimated coefficients).
        PQ_meas = PQ_meas[:-1, ts]
        V_est = _estimate_Vmagn(PQ_meas, V0_a, V0_b, S_a, S_b, is_real_imag)

        # Construct estimated |X| (using true coefficients).
        S_a_true_ts = {k: v[:, ts] for k, v in S_a_true.items()}
        S_b_true_ts = {k: v[:, ts]
                       for k, v in S_b_true.items()} if is_real_imag else None

        V0_a_org_ts = {k: v[ts] for k, v in V0_a_org.items()}
        V0_b_org_ts = {k: v[ts]
                       for k, v in V0_b_org.items()} if is_real_imag else None
        V_true_coeff = _estimate_Vmagn(PQ_meas, V0_a_org_ts, V0_b_org_ts,
                                       S_a_true_ts, S_b_true_ts, is_real_imag)

        # Store experiment results.
        _add_results_to_dict(S_a, S_a_all)
        if is_real_imag:
            _add_results_to_dict(S_b, S_b_all)
        _add_results_to_dict(V_est, V_est_all)
        _add_results_to_dict(V_true_coeff, V_true_coeff_all)
        cns_a_all.append(cns_a)
        if is_real_imag:
            cns_b_all.append(cns_b)

    # Compute the mean of the estimated coefficients and predicted dx.
    compute_dict_mean = lambda x: {k: np.mean(v, axis=0) for k, v in x.items()}
    S_a_mean = compute_dict_mean(S_a_all)
    S_b_mean = compute_dict_mean(S_b_all) if is_real_imag else None
    V_mean = compute_dict_mean(V_est_all)
    V_true_coeff_mean = compute_dict_mean(V_true_coeff_all)
    V0_a_mean = compute_dict_mean(V0_a_all)

    # Compute the std of the estimated coefficients and predicted dx.
    compute_dict_std = lambda x: {k: np.std(v, axis=0) for k, v in x.items()}
    S_a_std = compute_dict_std(S_a_all)
    S_b_std = compute_dict_std(S_b_all) if is_real_imag else None
    V_std = compute_dict_std(V_est_all)
    V_true_coeff_std = compute_dict_std(V_true_coeff_all)
    V0_b_std = compute_dict_std(V0_b_all)

    # Compute the true voltage magnitude deviations (from the load flow).
    V_load_flow = np.abs(V_org[1:])
    V_load_flow = {i: V_load_flow[i - 1, ts] for i in which_i}

    # Compute the mean of the condition numbers.
    cns_a_mean = np.mean(cns_a_all, axis=0)
    cns_b_mean = np.mean(cns_b_all, axis=0) if cns_b_all else []

    # Compute Cramer-Rao lower bound on the coefficient estimations.
    crlbs_a, crlbs_b, crlb_cns_a, crlb_cns_b = [], [], [], []
    for t in ts:

        # Compute the average std over the time window for real-imag coefficients.
        if not is_real_imag:
            std = std_a
        else:
            std = np.mean(std_a[:, t - tau:t], axis=1)
            std = np.hstack((std, std))

        H = PQ_org[:, pf_mask][:, t - tau:t].T
        lb, cn = crlb.compute_crlb(H, std)
        crlbs_a.append(lb)
        crlb_cns_a.append(cn)

        if is_real_imag:
            std = np.mean(std_b[:, t - tau:t], axis=1)
            std = np.hstack((std, std))
            lb, cn = crlb.compute_crlb(H, std)
            crlbs_b.append(lb)
            crlb_cns_b.append(cn)

    crlbs_a = np.vstack(crlbs_a).T
    if is_real_imag:
        crlbs_b = np.vstack(crlbs_b).T

    # Keep the true coefficients for the timesteps of interest only.
    S_a_true = {k: v[:, ts] for k, v in S_a_true.items()}
    S_b_true = {k: v[:, ts]
                for k, v in S_b_true.items()} if is_real_imag else {}

    # Store numerical results to files.
    data = {
        'S_a_mean': S_a_mean,
        'S_a_std': S_a_std,
        'S_b_mean': S_b_mean,
        'S_b_std': S_b_std,
        'V_mean': V_mean,
        'V_std': V_std,
        'S_a_true': S_a_true,
        'S_b_true': S_b_true,
        'V_load_flow': V_load_flow,
        'V_true_coeff_mean': V_true_coeff_mean,
        'V_true_coeff_std': V_true_coeff_std,
        'ts': ts,
        'cns_a': cns_a_mean,
        'cns_b': cns_b_mean,
        'crlb_a': crlbs_a,
        'crlb_b': crlbs_b
    }
    exp_logger.save_data(data, 'results')

    #######################################
    ############## PLOTTING ###############
    #######################################
    xlabel = 'Time (s)'

    # Estimated coefficients (for |.| or Re{.}).
    fig, axs = plt.subplots(len(which_i) + 1,
                            1,
                            figsize=(10, 2.5 * len(which_i)),
                            sharex=True)

    for ax, x_i in zip(axs[:-1], which_i):
        y_true = (S_a_true[x_i][x_i - 1], np.zeros(len(ts)))
        y_est = (S_a_mean[x_i][x_i - 1], S_a_std[x_i][x_i - 1])

        if not is_real_imag:
            labels = [
                plotting_labels.magn_coeff(x_i, x_i, 'P', True),
                plotting_labels.magn_coeff(x_i, x_i, 'P', False),
            ]
        else:
            labels = [
                plotting_labels.real_coeff(x_i, x_i, 'P', True),
                plotting_labels.real_coeff(x_i, x_i, 'P', False),
            ]
        ylabel = 'Sens. Coeff.'

        plotting.shaded_plot(ts, [y_true, y_est],
                             ylabel=ylabel,
                             ax=ax,
                             labels=labels)

    ax = axs[-1]
    ax.plot(ts, cns_a_mean, label='Condition number')
    ax.set_yscale('log')
    ax.set_xlabel(xlabel)
    ax.set_ylabel('Condition number')

    exp_logger.save_figure(fig, 'coefficients_a')

    # Estimated coefficients (for Im{.}).
    if is_real_imag:
        fig, axs = plt.subplots(len(which_i) + 1,
                                1,
                                figsize=(10, 2.5 * len(which_i)),
                                sharex=True)

        for ax, x_i in zip(axs[:-1], which_i):
            y_true = (S_b_true[x_i][x_i - 1], np.zeros(len(ts)))
            y_est = (S_b_mean[x_i][x_i - 1], S_b_std[x_i][x_i - 1])

            labels = [
                plotting_labels.imag_coeff(x_i, x_i, 'P', True),
                plotting_labels.imag_coeff(x_i, x_i, 'P', False),
            ]
            ylabel = 'Sens. Coeff.'

            plotting.shaded_plot(ts, [y_true, y_est],
                                 ylabel=ylabel,
                                 ax=ax,
                                 labels=labels)

        ax = axs[-1]
        ax.plot(ts, cns_b_mean, label='Condition number')
        ax.set_yscale('log')
        ax.set_xlabel(xlabel)
        ax.set_ylabel('Condition number')

        exp_logger.save_figure(fig, 'coefficients_b')

    # Estimated d|X| (using (a) load flow, (b) true coefficients,
    # (c) estimated coefficients).
    fig, axs = plt.subplots(len(which_i) + 1,
                            1,
                            figsize=(10, 2.5 * len(which_i)),
                            sharex=True)

    for ax, x_i in zip(axs[:-1], which_i):
        y_lf = (V_load_flow[x_i], np.zeros(len(ts)))
        y_true_coeff = (V_true_coeff_mean[x_i], V_true_coeff_std[x_i])
        y_est = (V_mean[x_i], V_std[x_i])

        labels = ['Load flow', 'Using true SCs', 'Using est. SCs']
        ylabel = r'$|V_{%d}|$' % x_i

        plotting.shaded_plot(ts, [y_lf, y_true_coeff, y_est],
                             labels=labels,
                             ylabel=ylabel,
                             ax=ax)

    ax = axs[-1]
    ax.plot(ts, cns_a_mean, label='Condition number')
    ax.set_yscale('log')
    ax.set_xlabel(xlabel)
    ax.set_ylabel('Condition number')

    exp_logger.save_figure(fig, 'X')

    # Cramer-Rao lower bound and estimation variance (for |.| or Re{.}).
    fig, axs = plt.subplots(len(which_i) + 1,
                            1,
                            figsize=(10, 2.5 * len(which_i)),
                            sharex=True)

    for ax, x_i in zip(axs[:-1], which_i):
        lb = crlbs_a[x_i - 1]
        variance = S_a_std[x_i][x_i - 1]**2

        labels = ['CRLB', 'Variance']
        ylabel = 'Est. variance'

        plotting.single_plot(ts, [lb, variance],
                             labels=labels,
                             ax=ax,
                             ylabel=ylabel)

    ax = axs[-1]
    ax.plot(ts, crlb_cns_a, label='CRLB condition number')
    ax.set_yscale('log')
    ax.set_xlabel(xlabel)
    ax.set_ylabel('CRLB condition number')

    exp_logger.save_figure(fig, 'crlb_a')

    # Cramer-Rao lower bound and estimation variance (for Im{.}).
    if is_real_imag:
        fig, axs = plt.subplots(len(which_i) + 1,
                                1,
                                figsize=(10, 2.5 * len(which_i)),
                                sharex=True)

        for ax, x_i in zip(axs[:-1], which_i):
            lb = crlbs_b[x_i - 1]
            variance = S_b_std[x_i][x_i - 1]**2

            labels = ['CRLB', 'Variance']
            ylabel = 'Est. variance'

            plotting.single_plot(ts, [lb, variance],
                                 labels=labels,
                                 ax=ax,
                                 ylabel=ylabel)

        ax = axs[-1]
        ax.plot(ts, crlb_cns_b, label='CRLB condition number')
        ax.set_yscale('log')
        ax.set_xlabel(xlabel)
        ax.set_ylabel('CRLB condition number')

        exp_logger.save_figure(fig, 'crlb_b')

    return
コード例 #3
0
def run(dataset, sensor_class, coeff_type, N_exp, use_sigma, tau, freq, k_nn,
        which_i, k_pcr, qr, folder, pre_filtering, epochs):
    """
    Run `N_exp` sensitivity coefficient estimation experiments using Model 1.

    Model 1 is the linear model of the form: dx = du S + w.

    Parameters
    ----------
    dataset : str
        The unique dataset ID (see `meng.constants`).
    sensor_class : float
        The PMU measurement sensor class (see `meng.constants`).
    coeff_type : {'magn', 'real-imag'}
        Which sensitivity coefficients to estimate.
    N_exp : int
        The number of experiments to run.
    use_sigma : bool
        Whether or not to use the correlation matrix :math:`\Sigma`. If False, it
        is set to the identity matrix I.
    tau : int
        The time window size.
    freq : int
        How often (in timesteps) to estimate the coefficients).
    k_nn : int
        The number of past timesteps given as input to the neural network.
    which_i : list of int or 'all'
        Which coefficients to estimate (eg for voltage coefficients, at which
        buses). Note: a value of 1 corresponds to the 1st bus after the slack.
    k_pcr : int or None
        How many principal components to keep in PCR; set to None to skip the
        PCR step.
    qr : bool
        Set to True to solve the LS problem using Q-R decomposition.
    folder : str
        The name of the experiment folder in which the results are stored.
    pre_filtering : bool
        Whether to do pre-filtering or not.
    epochs : int
        The number of epochs during whith to train the neural network.
    """

    is_real_imag = coeff_type == 'real_imag'

    # Create the folder in which the experiment results will be stored.
    exp_logger = ExperimentLogger(folder, locals())

    # Load true load flow.
    V_org, I_org, P_org, Q_org, current_idx = load_true_data(dataset)

    # Select the noise std that corresponds to the type of coefficient(s).
    std_abs = constants.SENSOR_STD_ABS[sensor_class]
    std_arg = constants.SENSOR_STD_ANG[sensor_class]
    std_real, std_imag = noise.project(np.abs(V_org), std_abs, std_arg)
    std_a = std_real[1:] if is_real_imag else std_abs
    std_b = std_imag[1:] if is_real_imag else None

    # Pre-estimation pre-filtering step (doing nothing atm).
    pf_mask = np.ones(V_org.shape[1], dtype=np.bool)

    # Extract the list of which coefficients to estimate.
    which_i = utils.unpack_which_i(which_i, V_org.shape[0])

    # Load the true sensitivity coefficients of interest.
    coefficients = load_coefficients(dataset)
    S_a_true, S_b_true = utils.coefficients_setup(coefficients, coeff_type, which_i)
    S_a_true = {k: v[:, pf_mask] for k, v in S_a_true.items()}
    if S_b_true is not None:
        S_b_true = {k: v[:, pf_mask] for k, v in S_b_true.items()}
    del coefficients

    # Transform voltage phasor measurements into either
    # (|x|, Re{x}, or Im{x}) based on `coeff_type`.
    X_org_a, X_org_b = utils.dependent_vars_setup(V_org, coeff_type)

    # Remove slack bus measurements and create delta matrices.
    dPQ_org = np.vstack((np.diff(P_org[1:], axis=1),
                         np.diff(Q_org[1:], axis=1)))[:, pf_mask[1:]]
    dX_org_a = np.diff(X_org_a[1:], axis=1)[:, pf_mask[1:]]
    if is_real_imag:
        dX_org_b = np.diff(X_org_b[1:], axis=1)[:, pf_mask[1:]]

    # Run `N_exp` experiments (noise realizations).
    S_a_all, S_b_all, dV_est_all, dV_true_coeff_all =  {}, {}, {}, {}
    S_a_nn_all, dV_nn_all, ts_nn = {}, {}, []
    ts, cns_a_all, cns_b_all = [], [], []
    for n in range(N_exp):

        # Add noise to load flow data.
        V_meas, _, P_meas, Q_meas, _, _, _, _, _, _ = \
            simulate_noisy_meas(sensor_class, V_org, I_org, current_idx)

        # Transform voltage phasor measurements into either
        # (|x|, Re{x}, or Im{x}) based on `coeff_type`.
        X_meas_a, X_meas_b = utils.dependent_vars_setup(V_meas, coeff_type)

        # Remove slack bus measurements and create delta matrices.
        dPQ_meas = np.vstack((np.diff(P_meas[1:], axis=1),
                              np.diff(Q_meas[1:], axis=1)))[:, pf_mask[1:]]
        dX_meas_a = np.diff(X_meas_a[1:], axis=1)[:, pf_mask[1:]]
        if is_real_imag:
            dX_meas_b = np.diff(X_meas_b[1:], axis=1)[:, pf_mask[1:]]
        else:
            dX_meas_b = None

        # Pre-filtering step: check which measurements are valid.
        if pre_filtering:
            valid_timesteps = np.any(dPQ_meas > std_abs, axis=0)
        else:
            valid_timesteps = np.ones(dPQ_meas.shape[1])

        # Estimate the coefficients using linear nn_models.
        S_a, ts, cns_a = linear_model(dX_meas_a, dPQ_meas, use_sigma, tau,
                                        freq, which_i, k_pcr, qr, valid_timesteps)
        if is_real_imag:
            S_b, _, cns_b = linear_model(dX_meas_b, dPQ_meas, use_sigma, tau,
                                            freq, which_i, k_pcr, qr, valid_timesteps)
        else:
            S_b, cns_b = None, None

        # Construct estimated |X| (using estimated coefficients).
        dPQ_meas = dPQ_meas[:, ts]
        dV_est = _estimate_dVmagn(dPQ_meas, S_a, S_b, is_real_imag)

        # Construct estimated |X| (using true coefficients).
        S_a_true_ts = {k: v[:, ts] for k, v in S_a_true.items()}
        S_b_true_ts = {k: v[:, ts] for k, v in S_b_true.items()} if S_b is not None else None
        dV_true_coeff = _estimate_dVmagn(dPQ_meas, S_a_true_ts, S_b_true_ts,
                                         is_real_imag)

        # Store experiment results.
        _add_results_to_dict(S_a, S_a_all)
        # _add_results_to_dict(S_a_nn, S_a_nn_all)
        if is_real_imag:
            _add_results_to_dict(S_b, S_b_all)
        _add_results_to_dict(dV_est, dV_est_all)
        _add_results_to_dict(dV_true_coeff, dV_true_coeff_all)
        # _add_results_to_dict(dV_nn, dV_nn_all)
        cns_a_all.append(cns_a)
        if is_real_imag:
            cns_b_all.append(cns_b)

    # Compute the mean of the estimated coefficients and predicted dx.
    compute_dict_mean = lambda x: {k: np.mean(v, axis=0) for k, v in x.items()}
    S_a_mean = compute_dict_mean(S_a_all)
    S_a_nn_mean = compute_dict_mean(S_a_nn_all)
    S_b_mean = compute_dict_mean(S_b_all) if is_real_imag else None
    dV_mean = compute_dict_mean(dV_est_all)
    dV_true_coeff_mean = compute_dict_mean(dV_true_coeff_all)
    dV_nn_mean = compute_dict_mean(dV_nn_all)

    # Compute the std of the estimated coefficients and predicted dx.
    # compute_dict_std = lambda x: {k: np.std(v, axis=0) for k, v in x.items()}

    def _compute_dict_std(d):
        answer = {}
        for k, v in d.items():
            answer[k] = np.std(v, axis=0)

        return answer

    S_a_std = _compute_dict_std(S_a_all)
    S_a_nn_std = _compute_dict_std(S_a_nn_all)
    S_b_std = _compute_dict_std(S_b_all) if is_real_imag else None
    dV_std = _compute_dict_std(dV_est_all)
    dV_true_coeff_std = _compute_dict_std(dV_true_coeff_all)
    dV_nn_std = _compute_dict_std(dV_nn_all)

    # Compute the true voltage magnitude deviations (from the load flow).
    dV_load_flow = np.diff(np.abs(V_org[1:]), axis=1)
    dV_load_flow = {i: dV_load_flow[i-1, ts] for i in which_i}

    # Compute the mean of the condition numbers.
    cns_a_mean = np.mean(cns_a_all, axis=0)
    cns_b_mean = np.mean(cns_b_all, axis=0) if cns_b_all else []

    # Compute Cramer-Rao lower bound on the coefficient estimations.
    crlbs_a, crlbs_b, crlb_cns_a, crlb_cns_b = [], [], [], []
    for t in ts:

        # Compute the average std over the time window for real-imag coefficients.
        if not is_real_imag:
            std = std_a
        else:
            std = np.mean(std_a[:, t-tau: t], axis=1)
            std = np.hstack((std, std))

        H = dPQ_org[:, pf_mask[1:]][:, t-tau: t].T
        lb, cn = crlb.compute_crlb(H, std, use_sigma)
        crlbs_a.append(lb)
        crlb_cns_a.append(cn)

        if is_real_imag:
            std = np.mean(std_b[:, t-tau: t], axis=1)
            std = np.hstack((std, std))
            lb, cn = crlb.compute_crlb(H, std, use_sigma)
            crlbs_b.append(lb)
            crlb_cns_b.append(cn)

    crlbs_a = np.vstack(crlbs_a).T
    if is_real_imag:
        crlbs_b = np.vstack(crlbs_b).T

    # Keep the true coefficients for the timesteps of interest only.
    S_a_true = {k: v[:, ts] for k, v in S_a_true.items()}
    S_b_true = {k: v[:, ts] for k, v in S_b_true.items()} if is_real_imag else {}

    # Store numerical results to files.
    data = {
        'S_a_mean': S_a_mean,
        'S_a_std': S_a_std,
        'S_b_mean': S_b_mean,
        'S_b_std': S_b_std,
        'dV_mean': dV_mean,
        'dV_std': dV_std,
        'S_a_true': S_a_true,
        'S_b_true': S_b_true,
        'dV_load_flow': dV_load_flow,
        'dV_true_coeff_mean': dV_true_coeff_mean,
        'dV_true_coeff_std': dV_true_coeff_std,
        'ts': ts,
        'cns_a': cns_a_mean,
        'cns_b': cns_b_mean,
        'crlb_a': crlbs_a,
        'crlb_b': crlbs_b,
        'S_a_nn_mean': S_a_nn_mean,
        'S_a_nn_std': S_a_nn_std,
        'dV_nn_mean': dV_nn_mean,
        'dV_nn_std': dV_nn_std,
        'ts_nn': ts_nn
    }
    exp_logger.save_data(data, 'results')


    #######################################
    ############## PLOTTING ###############
    #######################################
    xlabel = 'Time (s)'

    # Select timesteps estimated by neural network to plot.
    # nn_mask = np.array(ts) - ts_nn[0]
    # nn_mask = nn_mask[nn_mask >= 0]
    # ts_nn_plot = ts_nn_plot = ts_nn[nn_mask]

    # Estimated coefficients (for |.| or Re{.}).
    fig, axs = plt.subplots(len(which_i)+1, 1, figsize=(10, 2.5*len(which_i)),
                            sharex=True)

    for ax, x_i in zip(axs[:-1], which_i):
        y_true = (S_a_true[x_i][x_i-1], np.zeros(len(ts)))
        y_est = (S_a_mean[x_i][x_i-1], S_a_std[x_i][x_i-1])
        # y_nn = (S_a_nn_mean[x_i][x_i-1][nn_mask], S_a_nn_std[x_i][x_i-1][nn_mask])

        if not is_real_imag:
            labels = [plotting_labels.magn_coeff(x_i, x_i, 'P', 'true'),
                      plotting_labels.magn_coeff(x_i, x_i, 'P', 'M1')]
                      # plotting_labels.magn_coeff(x_i, x_i, 'P', 'NN')]
        else:
            labels = [plotting_labels.real_coeff(x_i, x_i, 'P', 'true'),
                      plotting_labels.real_coeff(x_i, x_i, 'P', 'M1')]
                      # plotting_labels.real_coeff(x_i, x_i, 'P', 'NN'),]
        ylabel = 'Sens. Coeff.'

        plotting.shaded_plot([ts, ts], [y_true, y_est],
                             ylabel=ylabel, ax=ax, labels=labels)

    ax = axs[-1]
    ax.plot(ts, cns_a_mean, label='Condition number')
    ax.set_yscale('log')
    ax.set_xlabel(xlabel)
    ax.set_ylabel('Condition number')

    exp_logger.save_figure(fig, 'coefficients_a')

    # Estimated coefficients (for Im{.}).
    if is_real_imag:
        fig, axs = plt.subplots(len(which_i) + 1, 1,
                                figsize=(10, 2.5 * len(which_i)), sharex=True)

        for ax, x_i in zip(axs[:-1], which_i):
            y_true = (S_b_true[x_i][x_i - 1], np.zeros(len(ts)))
            y_est = (S_b_mean[x_i][x_i - 1], S_b_std[x_i][x_i - 1])

            labels = [plotting_labels.imag_coeff(x_i, x_i, 'P', True),
                      plotting_labels.imag_coeff(x_i, x_i, 'P', False),]
            ylabel = 'Sens. Coeff.'

            plotting.shaded_plot(ts, [y_true, y_est], ylabel=ylabel,
                                           ax=ax, labels=labels)

        ax = axs[-1]
        ax.plot(ts, cns_b_mean, label='Condition number')
        ax.set_yscale('log')
        ax.set_xlabel(xlabel)
        ax.set_ylabel('Condition number')

        exp_logger.save_figure(fig, 'coefficients_b')

    # Estimated d|X| (using (a) load flow, (b) true coefficients,
    # (c) estimated coefficients).
    fig, axs = plt.subplots(len(which_i)+1, 1, figsize=(10, 2.5*len(which_i)),
                            sharex=True)

    for ax, x_i in zip(axs[:-1], which_i):
        y_lf = (dV_load_flow[x_i], np.zeros(len(ts)))
        y_true_coeff = (dV_true_coeff_mean[x_i], dV_true_coeff_std[x_i])
        y_est = (dV_mean[x_i], dV_std[x_i])
        # y_nn = (dV_nn_mean[x_i][nn_mask], dV_nn_std[x_i][nn_mask])

        labels = ['Load flow', 'True SCs', 'M1', 'NN']
        ylabel = r'$\Delta|V_{%d}|$' % x_i

        plotting.shaded_plot([ts, ts, ts],
                             [y_lf, y_true_coeff, y_est],
                             labels=labels, ylabel=ylabel, ax=ax)

    ax = axs[-1]
    ax.plot(ts, cns_a_mean, label='Condition number')
    ax.set_yscale('log')
    ax.set_xlabel(xlabel)
    ax.set_ylabel('Condition number')

    exp_logger.save_figure(fig, 'dV')

    # Cramer-Rao lower bound and estimation variance (for |.| or Re{.}).
    fig, axs = plt.subplots(len(which_i)+1, 1, figsize=(10, 2.5*len(which_i)),
                            sharex=True)

    for ax, x_i in zip(axs[:-1], which_i):
        lb = crlbs_a[x_i-1]
        variance = S_a_std[x_i][x_i-1] ** 2

        labels = ['CRLB', 'Variance']
        ylabel = 'Est. variance'

        plotting.single_plot(ts, [lb, variance], labels=labels, ax=ax,
                             ylabel=ylabel)

    ax = axs[-1]
    ax.plot(ts, crlb_cns_a, label='CRLB condition number')
    ax.set_yscale('log')
    ax.set_xlabel(xlabel)
    ax.set_ylabel('CRLB condition number')

    exp_logger.save_figure(fig, 'crlb_a')

    # Cramer-Rao lower bound and estimation variance (for Im{.}).
    if is_real_imag:
        fig, axs = plt.subplots(len(which_i) + 1, 1,
                                figsize=(10, 2.5 * len(which_i)),
                                sharex=True)

        for ax, x_i in zip(axs[:-1], which_i):
            lb = crlbs_b[x_i - 1]
            variance = S_b_std[x_i][x_i - 1] ** 2

            labels = ['CRLB', 'Variance']
            ylabel = 'Est. variance'

            plotting.single_plot(ts, [lb, variance], labels=labels, ax=ax,
                                 ylabel=ylabel)

        ax = axs[-1]
        ax.plot(ts, crlb_cns_b, label='CRLB condition number')
        ax.set_yscale('log')
        ax.set_xlabel(xlabel)
        ax.set_ylabel('CRLB condition number')

        exp_logger.save_figure(fig, 'crlb_b')

    plt.show()

    return
コード例 #4
0
ファイル: lstm_normalization.py プロジェクト: robinhenry/meng
V_org_im = np.imag(V_org[1:])
PQ_org = np.vstack((P_org[1:], Q_org[1:]))
N, T = V_org_magn.shape
PQ_org_bias = np.vstack((PQ_org, np.ones(T)))

# Create measurement delta matrices used in Model 1.
dV_org_magn = np.diff(V_org_magn, axis=1)
dV_org_re = np.diff(V_org_re, axis=1)
dV_org_im = np.diff(V_org_im, axis=1)
dPQ_org = np.diff(PQ_org, axis=1)

################### GENERATE NOISY DATA ###################

# Add noise to load flow data.
V_meas, _, P_meas, Q_meas, std_abs, std_ang, std_re, std_im, _, _ = \
    simulate_noisy_meas(sensor_class, V_org, I_org, current_idx)

# Remove slack measurements and create measurement matrices used in Model 2.
V_meas = V_meas[1:]
PQ_meas = np.vstack((P_meas[1:], Q_meas[1:]))

################### SELECT TYPE OF COEFFICIENT ###################

V_meas = np.abs(V_meas)

# Load true coefficients.
coefficients = load_coefficients(dataset)
Kp_true = coefficients['vmagn_p'][x_i - 1, x_i - 1]
Kq_true = coefficients['vmagn_q'][x_i - 1, x_i - 1]

################### SPLIT TRAINING, VALIDATION AND TESTING DATA ###################
コード例 #5
0
ファイル: run_M1.py プロジェクト: robinhenry/meng
def run(dataset, sensor_class, which_i, coeff_type, k_nn, epochs, folder,
        T_train, freq):

    # Create the folder in which the experiment results will be stored.
    exp_logger = ExperimentLogger(folder, locals())

    # Load true load flow.
    V_org, I_org, P_org, Q_org, current_idx = load_true_data(dataset)
    T = V_org.shape[1]

    # Extract the list of which coefficients to estimate.
    which_i = utils.unpack_which_i(which_i, V_org.shape[0])

    # Select the noise std that corresponds to the type of coefficient(s).
    std_abs = constants.SENSOR_STD_ABS[sensor_class]
    std_arg = constants.SENSOR_STD_ANG[sensor_class]

    # Transform voltage phasor measurements into either
    # (|x|, Re{x}, or Im{x}) based on `coeff_type`.
    X_org_a, X_org_b = utils.dependent_vars_setup(V_org, coeff_type)

    # Remove slack bus measurements and create delta matrices.
    dPQ_org = np.vstack((np.diff(P_org[1:], axis=1),
                         np.diff(Q_org[1:], axis=1)))
    dX_org_a = np.diff(X_org_a[1:], axis=1)

    # Add noise to load flow data.
    V_meas, _, P_meas, Q_meas, _, _, _, _, _, _ = \
        simulate_noisy_meas(sensor_class, V_org, I_org, current_idx)

    # Transform voltage phasor measurements into either
    # (|x|, Re{x}, or Im{x}) based on `coeff_type`.
    X_meas_a, X_meas_b = utils.dependent_vars_setup(V_meas, coeff_type)

    # Remove slack bus measurements and create delta matrices.
    dPQ_meas = np.vstack((np.diff(P_meas[1:], axis=1),
                          np.diff(Q_meas[1:], axis=1)))
    dX_meas_a = np.diff(X_meas_a[1:], axis=1)

    # Load the true sensitivity coefficients of interest.
    coefficients = load_coefficients(dataset)
    S_a_true, S_b_true = utils.coefficients_setup(coefficients, coeff_type, which_i)
    del coefficients

    # Compute the true voltage magnitude deviations (from the load flow).
    dV_load_flow = np.diff(np.abs(V_org[1:]), axis=1)

    # T_plot = 200
    t_nn = np.arange(T_train + k_nn, T)  # which timesteps the NN estimates.
    t_plot = np.arange(T_train + k_nn, T, freq)   # which timesteps should be plotted.
    t_nn_plot = np.arange(0, T - T_train - k_nn, freq)  # the idx of the nn estimations that should be plotted

    X_train, X_test = dX_meas_a[:, :T_train], dX_meas_a[:, T_train:]
    PQ_train, PQ_test = dPQ_meas[:, :T_train], dPQ_meas[:, T_train:]

    fig_1, axs_1 = plt.subplots(len(which_i), 1, sharex=True, figsize=(10, 2.5*len(which_i)))
    fig_2, axs_2 = plt.subplots(len(which_i), 1, sharex=True, figsize=(10, 2.5*len(which_i)))

    for idx, x_i in enumerate(which_i):

        training_dataset = build_training_dataloader(X_train, PQ_train, x_i, k_nn)
        in_shape = (dX_meas_a.shape[0] + dPQ_meas.shape[0]) * k_nn
        hidden_shapes = [128, 128]
        sc_matrix_shape = (dPQ_meas.shape[0])

        model = FeedForward(in_shape, hidden_shapes, sc_matrix_shape, k_nn)
        train_loss = nn.train(model, training_dataset, lr=1e-3, epochs=epochs, l2=0.)

        # Use the neural net to estimate the coefficients for the last 12 hours.
        testing_dataset = build_testing_dataloader(X_test, PQ_test, x_i, k_nn)
        S_a_nn, dV_nn, dV_nn_true = model.predict(testing_dataset)

        # Plot estimated coefficient.
        ax = axs_1[idx]
        ax.plot(t_plot, S_a_true[x_i][x_i - 1][t_plot], label='True')
        ax.plot(t_plot, S_a_nn[x_i - 1][t_nn_plot], label='Neural net')
        ax.legend(loc='upper right')

        # Plot predicted d|X|.
        ax = axs_2[idx]
        ax.plot(t_plot, dV_load_flow[x_i-1][t_plot], label='True')
        ax.plot(t_plot, dV_nn[t_nn_plot], label='Neural net')
        ax.legend(loc='upper right')

    exp_logger.save_figure(fig_1, 'coefficients')
    exp_logger.save_figure(fig_2, 'dV')

    plt.show()

    print('Done!')
コード例 #6
0
def run(x_i, sensor_class, seed):

    ####################### PARAMETERS #######################

    # General parameters.
    dataset = 'cigre13'
    sc_train = sensor_class

    # Training (12h), validation (6h), testing (6h) splits.
    ts_train = np.arange(0, 12 * 3600)
    ts_val   = np.arange(12 * 3600, 18 * 3600)
    ts_test  = np.arange(18 * 3600, 24 * 3600 - 1)
    T_train, T_val, T_test = len(ts_train), len(ts_val), len(ts_test)

    # Feedforward neural network parameters.
    k_nn = 10
    nn_epoch_max = 10
    hidden_shape = [128, 64]

    # LSTM parameters.
    k_lstm = 50
    hidden_layer_size = 64
    lstm_epoch_max = 10
    batch_size = 64

    # Least squares model.
    tau = 1000
    freq = 50


    ####################### SET RANDOM SEED #######################
    torch.manual_seed(seed)
    np.random.seed(seed)


    ####################### LOAD TRUE DATA #######################

    # Load true load flow.
    V_org, I_org, P_org, Q_org, current_idx = load_true_data(dataset)

    # Remove slack measurements and create measurement matrices used in Model 2.
    V_org_magn = np.abs(V_org[1:])
    N, T = V_org_magn.shape

    # Create measurement delta matrix to be used as target.
    dV_org_magn = np.diff(V_org_magn, axis=1)


    ################### GENERATE NOISY DATA FOR TRAINING ###################

    # Add noise to load flow data.
    V_meas, _, P_meas, Q_meas, std_abs, std_ang, std_re, std_im, _, _ = \
        simulate_noisy_meas(sc_train, V_org, I_org, current_idx)

    # Remove slack measurements and create measurement matrices used in Model 2.
    V_meas = V_meas[1:]
    PQ_meas = np.vstack((P_meas[1:], Q_meas[1:]))


    ################### SELECT TYPE OF COEFFICIENT ###################

    # Select type of dependent variable.
    V_meas = np.abs(V_meas)

    # Load true coefficients.
    coefficients = load_coefficients(dataset)
    Kp_true = coefficients['vmagn_p'][x_i - 1, x_i - 1]
    Kq_true = coefficients['vmagn_q'][x_i - 1, x_i - 1]


    ################### SPLIT TRAINING, VALIDATION AND TESTING DATA ###################

    # Train matrices
    V_meas_tr  = V_meas[:, ts_train]
    PQ_meas_tr = PQ_meas[:, ts_train]
    X_train = np.vstack((V_meas_tr, PQ_meas_tr))

    # Validation matrices.
    V_meas_val  = V_meas[:, ts_val]
    PQ_meas_val = PQ_meas[:, ts_val]
    X_val = np.vstack((V_meas_val, PQ_meas_val))


    ################### PRE-PROCESS DATA ###################

    # Normalize training input data.
    norm_scaler = MinMaxScaler()
    X_train = norm_scaler.fit_transform(X_train.T).T
    X_val = norm_scaler.transform(X_val.T).T


    ################### FEEDFORWARD NEURAL NET ###################
    print('Training feedforward neural net...')

    in_shape = 3 * N * k_nn
    out_shape = 2 * N

    # Build training, validation, and test sets.
    train_data = nn.build_training_dataloader(X_train, PQ_meas_tr, V_meas_tr, x_i, k_nn)
    val_data = nn.build_training_dataloader(X_val, PQ_meas_val, V_meas_val, x_i, k_nn)

    # Initialize and train the models.
    nn_model = nn.FeedForward(in_shape, hidden_shape, out_shape, k_nn)
    nn_model, _ = fc.nn.train(nn_model, train_data, val_data, epochs=nn_epoch_max)


    ################### LSTM NEURAL NET ###################
    print('\nTraining LSTMs...')

    in_shape = 3 * N
    out_shape = 2 * N

    # Build training and validation sets.
    train_data = lstm.build_dataloader(X_train, PQ_meas_tr, V_meas_tr, x_i, k_lstm, batch_size)
    val_data   = lstm.build_dataloader(X_val, PQ_meas_val, V_meas_val, x_i, k_lstm, batch_size)

    # Initialize and train the models.
    lstm_model = lstm.LSTM(in_shape, hidden_layer_size, out_shape, batch_size)
    lstm_model, _ = fc.lstm.train(lstm_model, train_data, val_data, lr=1e-3, epochs=lstm_epoch_max, l2=0.)


    for sc_test in [0., 0.2, 0.5, 1.0]:

        folder = f'cross_trained_{dataset}_train{sc_train}_test{sc_test}'
        logger = ComparisonLogger(folder)


        ################### GENERATE NOISY DATA FOR TESTING ###################

        # Add noise to load flow data.
        V_meas, _, P_meas, Q_meas, std_abs, std_ang, std_re, std_im, _, _ = \
            simulate_noisy_meas(sc_test, V_org, I_org, current_idx)

        # Remove slack measurements and create measurement matrices used in Model 2.
        V_meas = V_meas[1:]
        PQ_meas = np.vstack((P_meas[1:], Q_meas[1:]))
        PQ_meas_bias = np.vstack((PQ_meas, np.ones(T)))


        ################### SELECT TYPE OF COEFFICIENT ###################

        # Select type of dependent variable.
        V_meas = np.abs(V_meas)
        dPQ_meas = np.diff(PQ_meas, axis=1)


        ################### SPLIT TESTING DATA ###################

        # Testing matrices.
        V_meas_test  = V_meas[:, ts_test]
        PQ_meas_test = PQ_meas[:, ts_test]
        X_test = np.vstack((V_meas_test, PQ_meas_test))
        PQ_meas_bias_test = PQ_meas_bias[:, ts_test]
        dPQ_meas_test = dPQ_meas[:, ts_test]


        ################### PRE-PROCESS DATA ###################

        # Normalize training input data.
        X_test = norm_scaler.transform(X_test.T).T


        ################### INFERENCE WITH PRE-TRAINED MODELS ###################

        # Feedforward model.
        test_data = nn.build_testing_dataloader(X_test, PQ_meas_test, V_meas_test, x_i, k_nn)
        S_nn, y_pred_nn, _ = nn_model.predict(test_data)
        ts_nn = np.arange(k_nn-1, T_test-1)

        # LSTM model.
        test_data  = lstm.build_dataloader(X_test, PQ_meas_test, V_meas_test, x_i, k_lstm, batch_size)
        S_lstm, y_pred_lstm, _ = lstm.predict(lstm_model, test_data, batch_size)
        ts_lstm = np.arange(k_nn-1, T_test-1)


        ################### LEAST SQUARES MODEL ###################
        print('\tLeast squares estimation...')

        which_i = np.array([x_i])
        valid_timesteps = np.ones(T_test - 1).astype(np.bool)
        use_sigma = False
        k_pcr = None
        qr = False

        S_ls, ts_ls, _ = linear.linear_model(V_meas_test, PQ_meas_bias_test, use_sigma,
                                             tau, freq, which_i, k_pcr, qr, valid_timesteps)

        # Remove bias terms.
        S_ls = {a: b[:-1] for a, b in S_ls.items()}

        y_pred_ls = fc.linear.lm_estimate_dVmagn(dPQ_meas_test[:, ts_ls], S_ls, None, False)
        S_ls, y_pred_ls = S_ls[x_i], y_pred_ls[x_i]

        ################### VISUALIZE RESULTS ON TEST SET ###################

        ts_all = ts_test[ts_ls]
        ts_all_hour = ts_all / 3600
        x_nn = ts_ls - k_nn + 1
        x_lstm = ts_ls - k_lstm + 1

        fig = plt.figure(figsize=(10, 5))
        gs = fig.add_gridspec(3, 4, hspace=0.05)

        # Plot Kp coefficients.
        ax = fig.add_subplot(gs[0, :-1])
        ax.plot(ts_all_hour, Kp_true[ts_all], label='True')
        ax.plot(ts_all_hour, S_ls[x_i - 1], label='LS')
        ax.plot(ts_all_hour, S_nn[x_i - 1, x_nn], label='NN')
        ax.plot(ts_all_hour, S_lstm[x_i - 1, x_lstm], label='LSTM')
        ax.set_ylabel(r'$\partial |V_{%d}|/\partial P_{%d}$' % (x_i, x_i))
        ax.set_xticks([])

        # Plot Kq coefficients.
        ax = fig.add_subplot(gs[1, :-1])
        ax.plot(ts_all_hour, Kq_true[ts_all], label='True')
        ax.plot(ts_all_hour, S_ls[x_i - 1 + N], label='LS')
        ax.plot(ts_all_hour, S_nn[x_i - 1 + N, x_nn], label='NN')
        ax.plot(ts_all_hour, S_lstm[x_i - 1 + N, x_lstm], label='LSTM')
        ax.legend(loc='upper right')
        ax.set_ylabel(r'$\partial |V_{%d}|/\partial Q_{%d}$' % (x_i, x_i))
        ax.set_xticks([])

        # Plot dV.
        ax = fig.add_subplot(gs[2, :-1])
        ax.plot(ts_all_hour[::2], dV_org_magn[x_i - 1, ts_all[::2]], label='True')
        ax.plot(ts_all_hour[::2], y_pred_ls[::2], label='LS')
        ax.plot(ts_all_hour[::2], y_pred_nn[x_nn[::2]], label='NN')
        ax.plot(ts_all_hour[::2], y_pred_lstm[x_lstm[::2]], label='LSTM')
        ax.set_ylabel(r'$\Delta |V_{%d}|$' % (x_i))
        ax.set_xlabel('Time (h)')

        # Plot Kp errors.
        ax = fig.add_subplot(gs[0, -1])
        e_ls   = 100 * norm_e(Kp_true[ts_all], S_ls[x_i - 1])
        e_nn   = 100 * norm_e(Kp_true[ts_all], S_nn[x_i - 1, x_nn])
        e_lstm = 100 * norm_e(Kp_true[ts_all], S_lstm[x_i - 1, x_lstm])
        ax.boxplot([e_ls, e_nn, e_lstm], labels=['LS', 'NN', 'LSTM'])
        ax.set_xticks([])

        # Plot Kq errors.
        ax = fig.add_subplot(gs[1, -1])
        e_ls   = 100 * norm_e(Kq_true[ts_all], S_ls[x_i - 1 + N])
        e_nn   = 100 * norm_e(Kq_true[ts_all], S_nn[x_i - 1 + N, x_nn])
        e_lstm = 100 * norm_e(Kq_true[ts_all], S_lstm[x_i - 1 + N, x_lstm])
        ax.boxplot([e_ls, e_nn, e_lstm], labels=['LS', 'NN', 'LSTM'])
        ax.set_ylabel('Normalized error [%]')
        ax.set_xticks([])

        # Plot d|V| errors.
        ax = fig.add_subplot(gs[2, -1])
        e_ls = 100 * norm_e(dV_org_magn[x_i - 1, ts_all], y_pred_ls)
        e_nn = 100 * norm_e(dV_org_magn[x_i - 1, ts_all], y_pred_nn[x_nn])
        e_lstm = 100 * norm_e(dV_org_magn[x_i - 1, ts_all], y_pred_lstm[x_lstm])
        ax.boxplot([e_ls, e_nn, e_lstm], labels=['LS', 'NN', 'LSTM'], showfliers=False)

        gs.tight_layout(fig)
        plt.show()

        logger.save_fig(fig, f'x_{x_i}_s{seed}.png')

        print('Done!')