params.nHidden, params.optimizer, params.learningRate,
                params.momentum, params.trainingRatio, params.nEpochs,
                params.batchSize, params.delay, params.standardization)

            errors[graph_x_index][trials_index] = nn_model.error
            losss[graph_x_index][trials_index][:] = nn_model.history.history[
                'loss']
            val_losss[graph_x_index][
                trials_index][:] = nn_model.history.history['val_loss']

    result = Result(params, errors, losss, val_losss, non_cancell_error_array,
                    previous_errors, previous_losss, previous_val_losss)
    with open(output_dir + '/result.pkl', 'wb') as f:
        pickle.dump(result, f)

    ber_fig, ber_ax = graph.new_ber_canvas("phi", params.graph_x_min,
                                           params.graph_x_max)

    n_sum = params.previous_test_bits * params.trials
    errors_sum = np.sum(non_cancell_error_array, axis=1)
    bers = errors_sum / n_sum
    ber_ax.plot(graph_x_array,
                bers,
                color="k",
                marker='x',
                linestyle='--',
                label="w/o canceller")

    n_sum = params.previous_test_bits * params.trials
    errors_sum = np.sum(previous_errors, axis=1)
    bers = errors_sum / n_sum
    ber_ax.plot(graph_x_array,
                params.TX_IQI, params.PA, params.LNA, params.RX_IQI,
                params.nHidden, params.optimizer, params.learningRate,
                params.momentum, params.trainingRatio, params.nEpochs,
                params.batchSize, delay, params.standardization)

            errors[delay_array_index][trials_index] = nn_model.error
            losss[delay_array_index][
                trials_index][:] = nn_model.history.history['loss']
            val_losss[delay_array_index][
                trials_index][:] = nn_model.history.history['val_loss']

    result = Result(params, errors, losss, val_losss)
    with open(output_dir + '/result.pkl', 'wb') as f:
        pickle.dump(result, f)

    ber_fig, ber_ax = graph.new_ber_canvas("delay", params.delay_min,
                                           params.delay_max)
    n_sum = params.test_bits * params.trials
    errors_sum = np.sum(errors, axis=1)
    bers = errors_sum / n_sum
    ber_ax.plot(delay_array,
                bers,
                color="k",
                marker='o',
                linestyle='--',
                label="OFDM")

    plt.tight_layout()
    plt.savefig(output_dir + '/SNR_BER.pdf', bbox_inches='tight')

    # slack通知用
    output_png_path = output_dir + '/SNR_BER.png'
if __name__ == '__main__':
    SIMULATIONS_NAME = os.path.basename(__file__).split('.')[0]
    load_files = 1  # 同じ条件で読み込む数

    dirname = settings.dirname_current_datetime(SIMULATIONS_NAME)
    # dirname = "../results/keep/ofdm_fde_system_model_chain_load"
    settings.init_output(dirname)

    param_path = "../results/comex/results/ofdm_fde_system_model_epoch/params.json"
    params = settings.load_param(param_path)
    # n_sum = params["test_bits"] * params['SNR_AVERAGE'] * load_files
    n_sum = params['test_bits'] * params['trials']

    snrs_db = np.linspace(params['graph_x_min'], params['graph_x_max'],
                          params['graph_x_num'])
    fig, ax = graph.new_ber_canvas("Training Epoch", params['graph_x_min'],
                                   params['graph_x_max'], -5)
    ax.set_yticks([10**0, 10**-1, 10**-2, 10**-3, 10**-4, 10**-5])
    # ax.set_xticks(np.linspace(-0.5, 0.5, 6))

    pkl_path = "../results/comex/results/ofdm_fde_system_model_epoch/result.pkl"
    result = load_pkl_file(pkl_path)

    # previous_n_sum = params['previous_test_bits'] * params['trials']
    # errors_sum = np.sum(result.non_cancell_error_array, axis=1)
    # bers = errors_sum / previous_n_sum
    # np.place(bers, bers == 0, None)
    # ax.plot(snrs_db, bers, color='k', marker='x', linestyle='--', label="w/o canceller", ms=12)
    #
    # errors_sum = np.sum(result.previous_errors, axis=1)
    # bers = errors_sum / previous_n_sum
    # np.place(bers, bers == 0, None)
Ejemplo n.º 4
0
                params.gamma, params.phi, params.PA_IBO, params.PA_rho,
                params.LNA_IBO, params.LNA_rho, h_si, h_s, params.h_si_len,
                params.h_s_len, params.receive_antenna, params.TX_IQI,
                params.PA, params.LNA, params.RX_IQI, params.nHidden,
                params.optimizer, params.learningRate, params.momentum,
                params.trainingRatio, nEpochs, params.batchSize, params.delay,
                params.standardization)

            errors[graph_x_index][trials_index] = nn_model.error

    result = Result(params, errors)
    with open(output_dir + '/result.pkl', 'wb') as f:
        pickle.dump(result, f)

    ber_fig, ber_ax = graph.new_ber_canvas("Training Epoch",
                                           params.graph_x_min,
                                           params.graph_x_max)

    n_sum = params.test_bits * params.trials
    errors_sum = np.sum(errors, axis=1)
    bers = errors_sum / n_sum
    ber_ax.plot(graph_x_array,
                bers,
                color="k",
                marker='o',
                linestyle='--',
                label="Proposal")

    plt.tight_layout()
    # ber_ax.legend()
    plt.savefig(output_dir + '/SNR_BER.pdf', bbox_inches='tight')
                params.PA, params.LNA, params.RX_IQI, params.nHidden,
                params.optimizer, params.learningRate, params.momentum,
                params.trainingRatio, params.nEpochs, params.batchSize,
                params.delay, params.standardization)

            errors[antennae_array_index][trials_index] = nn_model.error
            losss[antennae_array_index][
                trials_index][:] = nn_model.history.history['loss']
            val_losss[antennae_array_index][
                trials_index][:] = nn_model.history.history['val_loss']

    result = Result(params, errors, losss, val_losss)
    with open(output_dir + '/result.pkl', 'wb') as f:
        pickle.dump(result, f)

    ber_fig, ber_ax = graph.new_ber_canvas("antennae", params.antennae_min,
                                           params.antennae_max)
    n_sum = params.test_bits * params.trials
    errors_sum = np.sum(errors, axis=1)
    bers = errors_sum / n_sum
    ber_ax.plot(antennae_array,
                bers,
                color="k",
                marker='o',
                linestyle='--',
                label="OFDM")

    plt.tight_layout()
    plt.savefig(output_dir + '/SNR_BER.pdf', bbox_inches='tight')

    # slack通知用
    output_png_path = output_dir + '/SNR_BER.png'
Ejemplo n.º 6
0
                params.trainingRatio,
                params.nEpochs,
                params.batchSize,
                params.delay,
                params.standardization
            )

            errors[gamma_array_index][trials_index] = nn_model.error
            losss[gamma_array_index][trials_index][:] = nn_model.history.history['loss']
            val_losss[gamma_array_index][trials_index][:] = nn_model.history.history['val_loss']

    result = Result(params, errors, losss, val_losss)
    with open(output_dir + '/result.pkl', 'wb') as f:
        pickle.dump(result, f)

    ber_fig, ber_ax = graph.new_ber_canvas("gamma", params.gamma_min, params.gamma_max)
    n_sum = params.test_bits * params.trials
    errors_sum = np.sum(errors, axis=1)
    bers = errors_sum / n_sum
    ber_ax.plot(gamma_array, bers, color="k", marker='o', linestyle='--', label="OFDM")

    plt.tight_layout()
    plt.savefig(output_dir + '/SNR_BER.pdf', bbox_inches='tight')

    # slack通知用
    output_png_path = output_dir + '/SNR_BER.png'
    plt.savefig(output_png_path, bbox_inches='tight')

    for gamma_index, gamma in enumerate(gamma_array):
        learn_fig, learn_ax = graph.new_learning_curve_canvas(params.nEpochs)
        loss_avg = np.mean(losss[gamma_index], axis=0).T
Ejemplo n.º 7
0
    SIMULATIONS_NAME = os.path.basename(__file__).split('.')[0]
    load_files = 1  # 同じ条件で読み込む数

    dirname = settings.dirname_current_datetime(SIMULATIONS_NAME)
    # dirname = "../results/keep/ofdm_fde_system_model_chain_load"
    settings.init_output(dirname)

    param_path = "../results/keep/fig031921/gamma/params.json"
    params = settings.load_param(param_path)
    # n_sum = params["test_bits"] * params['SNR_AVERAGE'] * load_files
    n_sum = params['test_bits'] * params['trials']

    snrs_db = np.linspace(params['graph_x_min'], params['graph_x_max'],
                          params['graph_x_num'])
    fig, ax = graph.new_ber_canvas(
        "amplitude error " + r'$\gamma^{\rm T}$, $\gamma^{\rm R}_i$', -0.5,
        0.5, -5)

    ax.set_yticks([10**0, 10**-1, 10**-2, 10**-3, 10**-4, 10**-5])
    ax.set_xticks(np.linspace(-0.5, 0.5, 6))

    pkl_path = "../results/keep/fig031921/gamma_wo/result.pkl"
    result = load_pkl_file(pkl_path)
    previous_n_sum = params['previous_test_bits'] * params['trials']
    errors_sum = np.sum(result.non_cancell_error_array, axis=1)
    bers = errors_sum / previous_n_sum
    np.place(bers, bers == 0, None)
    ax.plot(snrs_db,
            bers,
            color='k',
            marker='x',
    load_files = 1  # 同じ条件で読み込む数

    dirname = settings.dirname_current_datetime(SIMULATIONS_NAME)
    # dirname = "../results/keep/" + SIMULATIONS_NAME
    settings.init_output(dirname)

    # param_path = "../results/ofdm_fde_with_previous/2021/01/29/13_02_26/params.json"
    param_path = "../results/keep/ofdm_fde_with_previous/12_35_55/params.json"

    params_dict = settings.load_param(param_path)
    params = Params.from_params_dict(params_dict)
    n_sum = params.test_bits * params.trials

    trials_num = params.trials
    graph_x_array = np.linspace(1, params.trials, trials_num, dtype=int)
    fig, ax = graph.new_ber_canvas('trials', 0, params.trials, -5)

    # pkl_path = "../results/ofdm_fde_with_previous/2021/01/29/13_02_26/result.pkl"
    # pkl_path = "../results/ofdm_fde_system_model/2021/01/22/01_25_45/result.pkl"
    pkl_path = "../results/keep/ofdm_fde_with_previous/12_35_55/result.pkl"

    result = load_pkl_file(pkl_path)
    SNR = 25  # 使わないけど後からわかりやすいように定義しておく
    SNR_errors = result.errors[-1]

    errors_sum = np.sum(result.errors, axis=1)
    bers = np.zeros((trials_num))
    for i, trials in enumerate(graph_x_array):
        error = np.sum(SNR_errors[:trials])
        n_sum = params.test_bits * trials
        ber = error / n_sum
Ejemplo n.º 9
0
if __name__ == '__main__':
    SIMULATIONS_NAME = os.path.basename(__file__).split('.')[0]
    load_files = 1  # 同じ条件で読み込む数

    dirname = settings.dirname_current_datetime(SIMULATIONS_NAME)
    # dirname = "../results/keep/ofdm_fde_system_model_chain_load"
    settings.init_output(dirname)

    param_path = "../results/comex/results/iqi_phi/params.json"
    params = settings.load_param(param_path)
    # n_sum = params["test_bits"] * params['SNR_AVERAGE'] * load_files
    n_sum = params['test_bits'] * params['trials']

    snrs_db = np.linspace(params['graph_x_min'], params['graph_x_max'], params['graph_x_num'])
    fig, ax = graph.new_ber_canvas("phase error " + r'$\phi$', params['graph_x_min'], params['graph_x_max'], -4)
    ax.set_yticks([10**0, 10**-1, 10**-2, 10**-3, 10**-4])
    # ax.set_xticks(np.linspace(-0.5, 0.5, 6))


    pkl_path = "../results/comex/results/iqi_phi/result.pkl"
    result = load_pkl_file(pkl_path)

    previous_n_sum = params['previous_test_bits'] * params['trials']
    errors_sum = np.sum(result.non_cancell_error_array, axis=1)
    bers = errors_sum / previous_n_sum
    np.place(bers, bers == 0, None)
    ax.plot(snrs_db, bers, color='k', marker='x', linestyle=':', label="w/o canceller", ms=12)

    errors_sum = np.sum(result.previous_errors, axis=1)
    bers = errors_sum / previous_n_sum
                params.nHidden, params.optimizer, params.learningRate,
                params.momentum, params.trainingRatio, params.nEpochs,
                params.batchSize, params.delay, params.standardization)

            errors[graph_x_index][trials_index] = nn_model.error
            losss[graph_x_index][trials_index][:] = nn_model.history.history[
                'loss']
            val_losss[graph_x_index][
                trials_index][:] = nn_model.history.history['val_loss']

    result = Result(params, errors, losss, val_losss, non_cancell_error_array,
                    previous_errors, previous_losss, previous_val_losss)
    with open(output_dir + '/result.pkl', 'wb') as f:
        pickle.dump(result, f)

    ber_fig, ber_ax = graph.new_ber_canvas("PA IBO [dB]", params.graph_x_min,
                                           params.graph_x_max)

    n_sum = params.previous_test_bits * params.trials
    errors_sum = np.sum(non_cancell_error_array, axis=1)
    bers = errors_sum / n_sum
    ber_ax.plot(graph_x_array,
                bers,
                color="k",
                marker='x',
                linestyle='--',
                label="w/o canceller")

    n_sum = params.previous_test_bits * params.trials
    errors_sum = np.sum(previous_errors, axis=1)
    bers = errors_sum / n_sum
    ber_ax.plot(graph_x_array,
    SIMULATIONS_NAME = os.path.basename(__file__).split('.')[0]
    load_files = 1  # 同じ条件で読み込む数

    dirname = settings.dirname_current_datetime(SIMULATIONS_NAME)
    # dirname = "../results/keep/ofdm_fde_system_model_chain_load"
    settings.init_output(dirname)

    param_path = "../results/keep/fig031921/delay/params.json"
    params = settings.load_param(param_path)
    # n_sum = params["test_bits"] * params['SNR_AVERAGE'] * load_files
    n_sum = params['test_bits'] * params['trials']

    snrs_db = np.linspace(params['delay_min'], params['delay_max'],
                          params['delay_num'])
    fig, ax = graph.new_ber_canvas("decision delay " + r"$\delta$",
                                   params['delay_min'], params['delay_max'],
                                   -5)
    # ax.set_yticks([10 ** 0, 10 ** -1, 10 ** -2, 10 ** -3, 10 ** -5])

    pkl_path = "../results/keep/fig031921/delay/result.pkl"
    result = load_pkl_file(pkl_path)
    errors_sum = np.sum(result.errors, axis=1)
    bers = errors_sum / n_sum
    np.place(bers, bers == 0, None)
    ax.plot(snrs_db,
            bers,
            color='k',
            marker='d',
            linestyle='-',
            label="decision delay",
            ms=12)
Ejemplo n.º 12
0
                params.trainingRatio,
                params.nEpochs,
                params.batchSize,
                params.delay,
                params.standardization
            )

            errors[PA_IBO_array_index][trials_index] = nn_model.error
            losss[PA_IBO_array_index][trials_index][:] = nn_model.history.history['loss']
            val_losss[PA_IBO_array_index][trials_index][:] = nn_model.history.history['val_loss']

    result = Result(params, errors, losss, val_losss)
    with open(output_dir + '/result.pkl', 'wb') as f:
        pickle.dump(result, f)

    ber_fig, ber_ax = graph.new_ber_canvas("PA_IBO [dB]", params.PA_IBO_MIN, params.PA_IBO_MAX,)
    n_sum = params.test_bits * params.trials
    errors_sum = np.sum(errors, axis=1)
    bers = errors_sum / n_sum
    ber_ax.plot(PA_IBO_array, bers, color="k", marker='o', linestyle='--', label="OFDM")

    plt.tight_layout()
    plt.savefig(output_dir + '/SNR_BER.pdf', bbox_inches='tight')

    # slack通知用
    output_png_path = output_dir + '/SNR_BER.png'
    plt.savefig(output_png_path, bbox_inches='tight')

    for PA_IBO_index, PA_IBO in enumerate(PA_IBO_array):
        learn_fig, learn_ax = graph.new_learning_curve_canvas(params.nEpochs)
        loss_avg = np.mean(losss[PA_IBO_index], axis=0).T
if __name__ == '__main__':
    SIMULATIONS_NAME = os.path.basename(__file__).split('.')[0]
    load_files = 1  # 同じ条件で読み込む数

    dirname = settings.dirname_current_datetime(SIMULATIONS_NAME)
    # dirname = "../results/keep/ofdm_fde_system_model_chain_load"
    settings.init_output(dirname)

    param_path = "../results/keep/fig031921/pa_ibo/params.json"
    params = settings.load_param(param_path)
    # n_sum = params["test_bits"] * params['SNR_AVERAGE'] * load_files
    n_sum = params['test_bits'] * params['trials']

    snrs_db = np.linspace(params['graph_x_min'], params['graph_x_max'],
                          params['graph_x_num'])
    fig, ax = graph.new_ber_canvas("IBO [dB]", params['graph_x_min'],
                                   params['graph_x_max'], -5)
    ax.set_yticks([10**0, 10**-1, 10**-2, 10**-3, 10**-4, 10**-5])
    # ax.set_xticks(np.linspace(-0.5, 0.5, 6))

    pkl_path = "../results/keep/fig031921/pa_ibo_wo/result.pkl"
    result = load_pkl_file(pkl_path)

    previous_n_sum = params['previous_test_bits'] * params['trials']
    errors_sum = np.sum(result.non_cancell_error_array, axis=1)
    bers = errors_sum / previous_n_sum
    np.place(bers, bers == 0, None)
    ax.plot(snrs_db,
            bers,
            color='k',
            marker='x',
            linestyle=':',