Ejemplo n.º 1
0
            y_vec = np.array(
                [cancelled_y[i:i + size - L_w] for i in range(L_w + 1)])

            z = np.matmul(W.conj().T, y_vec)

            d_hat = m.demodulate_qpsk(z)
            d_hat_len = d_hat.shape[0]
            error = np.sum(system_model.d_s[0:d_hat_len] != d_hat)

            error_array[sigma_index][trials_index] = error

    result = Result(params, error_array, loss_array, val_loss_array)
    with open(output_dir + '/result.pkl', 'wb') as f:
        pickle.dump(result, f)

    ber_fig, ber_ax = graph.new_snr_ber_canvas(params['SNR_MIN'],
                                               params['SNR_MAX'])
    n_sum = d_hat_len * params['SNR_AVERAGE']

    errors_sum = np.sum(error_array, axis=1)
    bers = errors_sum / n_sum
    ber_ax.plot(snrs_db,
                bers,
                color="k",
                marker='o',
                linestyle='--',
                label="Previous research + MMSE")
    ber_ax.legend()
    plt.savefig(output_dir + '/SNR_BER.pdf', bbox_inches='tight')

    output_png_path = output_dir + '/SNR_BER.png'
    plt.savefig(output_png_path, bbox_inches='tight')
Ejemplo n.º 2
0
                params.PA, params.LNA, params.RX_IQI, params.nHidden,
                params.optimizer, params.learningRate, params.momentum,
                params.trainingRatio, params.nEpochs, params.batchSize,
                params.delay, params.standardization)

            errors[sigma_index][trials_index] = nn_model.error
            losss[sigma_index][trials_index][:] = nn_model.history.history[
                'loss']
            val_losss[sigma_index][trials_index][:] = nn_model.history.history[
                'val_loss']

    result = Result(params, errors, losss, val_losss)
    with open(output_dir + '/result.pkl', 'wb') as f:
        pickle.dump(result, f)

    ber_fig, ber_ax = graph.new_snr_ber_canvas(params.SNR_MIN, params.SNR_MAX)
    n_sum = params.test_bits * params.SNR_AVERAGE
    errors_sum = np.sum(errors, axis=1)
    bers = errors_sum / n_sum
    ber_ax.plot(snrs_db,
                bers,
                color="k",
                marker='o',
                linestyle='--',
                label="OFDM")

    plt.tight_layout()
    plt.savefig(output_dir + '/SNR_BER.pdf', bbox_inches='tight')

    # slack通知用
    output_png_path = output_dir + '/SNR_BER.png'
Ejemplo n.º 3
0
if __name__ == '__main__':
    SIMULATIONS_NAME = os.path.basename(__file__).split('.')[0]
    load_files = 1  # 同じ条件で読み込む数

    dirname = settings.dirname_current_datetime(SIMULATIONS_NAME)
    # dirname = "../results/keep/ofdm_fde_system_model_chain_load"
    settings.init_output(dirname)

    param_path = "../results/keep/fig031921/ber_3/params.json"
    params = settings.load_param(param_path)
    # n_sum = params["test_bits"] * params['SNR_AVERAGE'] * load_files
    n_sum = params['test_bits'] * params['trials']

    snrs_db = np.linspace(params['graph_x_min'], params['graph_x_max'], params['graph_x_num'])
    fig, ax = graph.new_snr_ber_canvas(params['graph_x_min'], params['graph_x_max'], -4)
    ax.set_yticks([10**0, 10**-1, 10**-2, 10**-3, 10**-4])

    previous_n_sum = params['previous_test_bits'] * params['trials']
    pkl_path = "../results/keep/fig031921/ber_3/result.pkl"
    result = load_pkl_file(pkl_path)
    errors_sum = np.sum(result.non_cancell_error_array, axis=1)
    bers = errors_sum / previous_n_sum
    np.place(bers, bers == 0, None)
    ax.plot(snrs_db, bers, color='k', marker='x', linestyle=':', label="w/o canceller " + r"$(I=1)$", ms=10)

    errors_sum = np.sum(result.previous_errors, axis=1)
    bers = errors_sum / previous_n_sum
    np.place(bers, bers == 0, None)
    ax.plot(snrs_db, bers, color='k', marker='o', linestyle='--', label="Conventional " + r"$(I=1)$" + " [5]" , ms=10, markerfacecolor='None')
                params.nHidden, params.optimizer, params.learningRate,
                params.momentum, params.trainingRatio, params.nEpochs,
                params.batchSize, params.delay, params.standardization)

            errors[graph_x_index][trials_index] = nn_model.error
            losss[graph_x_index][trials_index][:] = nn_model.history.history[
                'loss']
            val_losss[graph_x_index][
                trials_index][:] = nn_model.history.history['val_loss']

    result = Result(params, errors, losss, val_losss, non_cancell_error_array,
                    previous_errors, previous_losss, previous_val_losss)
    with open(output_dir + '/result.pkl', 'wb') as f:
        pickle.dump(result, f)

    ber_fig, ber_ax = graph.new_snr_ber_canvas(params.graph_x_min,
                                               params.graph_x_max, -6)

    n_sum = params.previous_test_bits * params.trials
    errors_sum = np.sum(non_cancell_error_array, axis=1)
    bers = errors_sum / n_sum
    ber_ax.plot(graph_x_array,
                bers,
                color="k",
                marker='x',
                linestyle='--',
                label="w/o canceller")

    n_sum = params.previous_test_bits * params.trials
    errors_sum = np.sum(previous_errors, axis=1)
    bers = errors_sum / n_sum
    ber_ax.plot(graph_x_array,
Ejemplo n.º 5
0
            ])  # [[x[n], x[n-1]], x[x-1], x[n-1]]のように通信路の数に合わせる
            chanels_s = h_s * chanels_s
            y_s = np.sum(chanels_s, axis=1)

            r = y_s + m.awgn(y_s.shape, sigma)
            size = r.shape[0]
            r_vec = np.array([r[i:i + size - L_w] for i in range(L_w + 1)])

            W = mmse(H, noise_var)
            z = np.matmul(W.conj().T, r_vec)

            d_hat = m.demodulate_qpsk(z)
            error = np.sum(d_s[0:d_hat.shape[0]] != d_hat)

            error_array[sigma_index][trials_index] = error

    ber_fig, ber_ax = graph.new_snr_ber_canvas(snr_min, snr_max)
    n_sum = (n - 2 * h_s_len) * ave

    errors_sum = np.sum(error_array, axis=1)
    bers = errors_sum / n_sum
    ber_ax.plot(snrs_db,
                bers,
                color="k",
                marker='o',
                linestyle='--',
                label="MMSE")

    ber_ax.legend()
    plt.show()