Beispiel #1
0
def gen_plot(analysis_id, table_name, metric):
    db = ScaDatabase(databases_root_folder + table_name)

    analysis = db.select_analysis(Analysis, analysis_id)

    if metric == "Guessing_Entropy":
        result_key_byte = db.select_values_from_analysis_json(KeyRank, analysis_id)
    elif metric == "Success_Rate":
        result_key_byte = db.select_values_from_analysis_json(SuccessRate, analysis_id)
    else:
        result_key_byte = []
        all_metrics_names = db.select_metrics(Metric, analysis_id)

        if metric == "accuracy":
            for metric_name in all_metrics_names:
                if metric in metric_name:
                    if "grid_search" in analysis.settings or "random_search" in analysis.settings:
                        if "best" in metric_name:
                            result_key_byte.append(db.select_values_from_metric(Metric, metric_name, analysis_id)[0])
                    else:
                        result_key_byte.append(db.select_values_from_metric(Metric, metric_name, analysis_id)[0])
        elif metric == "loss":
            for metric_name in all_metrics_names:
                if metric in metric_name:
                    if "grid_search" in analysis.settings or "random_search" in analysis.settings:
                        if "best" in metric_name:
                            result_key_byte.append(db.select_values_from_metric(Metric, metric_name, analysis_id)[0])
                    else:
                        result_key_byte.append(db.select_values_from_metric(Metric, metric_name, analysis_id)[0])
        else:
            result_key_byte.append(db.select_values_from_metric(Metric, metric, analysis_id)[0])

    my_dpi = 100
    plt.figure(figsize=(800 / my_dpi, 600 / my_dpi), dpi=my_dpi)
    dir_analysis_id = "resources/figures/{}".format(analysis_id)
    if not os.path.exists(dir_analysis_id):
        os.makedirs(dir_analysis_id)
    if metric == "Guessing_Entropy" or metric == "Success_Rate":
        for res in result_key_byte:
            plt.plot(np.arange(res['report_interval'], (len(res['values']) + 1) * res['report_interval'], res['report_interval']),
                     res['values'],
                     label=res['metric'])
            plt.legend(loc='best', fontsize=13)
            plt.xlim([1, len(res['values']) * res['report_interval']])
            plt.ylabel(metric.replace("_", " "), fontsize=13)
            if metric == "Guessing_Entropy" or metric == "Success_Rate":
                plt.xlabel("Attack Traces", fontsize=13)
            else:
                plt.xlabel("Epochs", fontsize=13)
            plt.grid(ls='--')
        plt.savefig("{}/{}_{}_{}.png".format(dir_analysis_id, metric, analysis_id, table_name.replace(".sqlite", "")),
                    format="png")
    else:
        for res in result_key_byte:
            plt.plot(np.arange(1, len(res['values']) + 1, 1), res['values'], label=res['metric'])
            plt.legend(loc='best', fontsize=13)
            plt.xlim([1, len(res['values'])])
            plt.ylabel(metric.replace("_", " "), fontsize=13)
            plt.xlabel("Epochs", fontsize=13)
            plt.grid(ls='--')
        plt.savefig("{}/{}_{}_{}.png".format(dir_analysis_id, metric, analysis_id, table_name.replace(".sqlite", "")),
                    format="png")

    return "ok"
def generate_script(script_filename, databases_root_folder, table_name,
                    analysis_id):
    script_py_file = open(
        "scripts/{}_{}.py".format(script_filename,
                                  table_name.replace(".sqlite", "")), "w+")

    script_py_file.write("from tensorflow.keras.optimizers import *\n")
    script_py_file.write("from tensorflow.keras.layers import *\n")
    script_py_file.write("from tensorflow.keras.models import *\n")
    script_py_file.write("from aisy.sca_deep_learning_aes import AisyAes\n")

    db = ScaDatabase(databases_root_folder + table_name)

    analysis = db.select_analysis(Analysis, analysis_id)
    neural_network_model = db.select_from_analysis(NeuralNetwork, analysis_id)

    # get training hyper-parameters information from database
    hyper_parameters = db.select_all_from_analysis(HyperParameter, analysis_id)
    hyper_parameters_single = hyper_parameters[0].hyper_parameters[0]

    hyper_parameter_search = None
    if len(hyper_parameters) > 1:
        hyper_parameter_search = db.select_from_analysis(
            HyperParameterSearch, analysis.id)

    leakage_models = db.select_from_analysis(LeakageModel, analysis_id)
    leakage_model_parameters = leakage_models.leakage_model[0]

    script_py_file.write('\naisy = AisyAes()')
    script_py_file.write('\naisy.set_dataset("{}")'.format(analysis.dataset))
    script_py_file.write('\naisy.set_database_name("{}")'.format(table_name))
    script_py_file.write('\naisy.set_aes_leakage_model(')
    for index, key in enumerate(leakage_model_parameters):
        if isinstance(leakage_model_parameters[key], str):
            script_py_file.write('{}="{}"'.format(
                key, leakage_model_parameters[key]))
        else:
            script_py_file.write('{}={}'.format(key,
                                                leakage_model_parameters[key]))
        if index < len(leakage_model_parameters) - 1:
            script_py_file.write(', ')
    script_py_file.write(')')
    script_py_file.write('\naisy.set_key("{}")'.format(
        hyper_parameters_single['key']))
    script_py_file.write('\naisy.set_number_of_profiling_traces({})'.format(
        hyper_parameters_single['profiling_traces']))
    script_py_file.write('\naisy.set_number_of_attack_traces({})'.format(
        hyper_parameters_single['attack_traces']))
    script_py_file.write('\naisy.set_first_sample({})'.format(
        hyper_parameters_single['first_sample']))
    script_py_file.write('\naisy.set_number_of_samples({})'.format(
        hyper_parameters_single['number_of_samples']))
    script_py_file.write('\naisy.set_batch_size({})'.format(
        hyper_parameters_single['batch_size']))
    script_py_file.write('\naisy.set_epochs({})'.format(
        hyper_parameters_single['epochs']))
    if len(hyper_parameters) == 1:
        script_py_file.write('\n\n\n{}'.format(
            neural_network_model.description))
        script_py_file.write('\n\naisy.set_neural_network({})'.format(
            neural_network_model.model_name))
    else:
        if "grid_search" in analysis.settings:
            script_py_file.write('\ngrid_search = {}'.format(
                analysis.settings["grid_search"]))
        if "random_search" in analysis.settings:
            script_py_file.write('\nrandom_search = {}'.format(
                analysis.settings["random_search"]))
    if "early_stopping" in analysis.settings:
        script_py_file.write('\nearly_stopping = {}'.format(
            analysis.settings["early_stopping"]))

    script_py_file.write('\n\naisy.run(')
    script_py_file.write('\n    key_rank_executions={},'.format(
        analysis.settings["key_rank_executions"]))
    script_py_file.write('\n    key_rank_report_interval={},'.format(
        analysis.settings["key_rank_report_interval"]))
    script_py_file.write('\n    key_rank_attack_traces={},'.format(
        analysis.settings["key_rank_attack_traces"]))
    if "early_stopping" in analysis.settings:
        script_py_file.write('\n    early_stopping=early_stopping,')
    if "ensemble" in analysis.settings:
        script_py_file.write('\n    ensemble=[{}],'.format(
            analysis.settings["ensemble"]))
    if len(hyper_parameters) == 1:
        script_py_file.write('\n)\n')
    else:
        if hyper_parameter_search.search_type == "Grid Search":
            script_py_file.write('\n    grid_search=grid_search')
        else:
            script_py_file.write('\n    random_search=random_search')
        script_py_file.write('\n)\n')
    script_py_file.close()
Beispiel #3
0
def result(analysis_id, table_name):
    db = ScaDatabase(databases_root_folder + table_name)

    # get neural network information from database
    analysis = db.select_analysis(Analysis, analysis_id)

    sca_views = ScaViews(analysis_id, db)

    all_metric_plots = sca_views.metric_plots()

    all_accuracy_plots = sca_views.accuracy_plots()
    dash_app_accuracy.layout = html.Div(children=[all_accuracy_plots])
    all_loss_plots = sca_views.loss_plots()
    dash_app_loss.layout = html.Div(children=[all_loss_plots])
    if "ensemble" in analysis.settings:
        all_key_rank_plots = sca_views.ensemble_plots_key_rank()
    else:
        all_key_rank_plots = sca_views.key_rank_plots()
    dash_app_key_ranks.layout = html.Div(children=[all_key_rank_plots])
    if "ensemble" in analysis.settings:
        all_success_rate_plots = sca_views.ensemble_plots_success_rate()
    else:
        all_success_rate_plots = sca_views.success_rate_plots()
    dash_app_success_rates.layout = html.Div(children=[all_success_rate_plots])

    # get neural network information from database
    neural_network_model = db.select_from_analysis(NeuralNetwork, analysis_id)

    # get training hyper-parameters information from database
    hyper_parameters = db.select_all_from_analysis(HyperParameter, analysis_id)
    training_hyper_parameters = []
    for hp in hyper_parameters:
        training_hyper_parameters.append(hp.hyper_parameters)

    hyper_parameter_search = []
    if len(hyper_parameters) > 1:
        hyper_parameter_search = db.select_from_analysis(HyperParameterSearch, analysis.id)

    # get leakage model information from database
    leakage_models = db.select_from_analysis(LeakageModel, analysis_id)
    leakage_model_parameters = leakage_models.leakage_model

    # get visualization plots
    all_visualization_plots = sca_views.visualization_plots()
    all_visualization_heatmap_plots = sca_views.visualization_plots_heatmap()

    # confusion matrix plot
    all_confusion_matrix_plots = sca_views.confusion_matrix_plots()

    return render_template("dashboard/result.html",
                           all_plots=all_metric_plots,
                           all_key_rank_plots=all_key_rank_plots,
                           all_success_rate_plots=all_success_rate_plots,
                           neural_network_description=neural_network_model.description,
                           training_hyper_parameters=training_hyper_parameters,
                           hyper_parameters=hyper_parameters,
                           hyper_parameter_search=hyper_parameter_search,
                           leakage_model_parameters=leakage_model_parameters,
                           all_visualization_plots=all_visualization_plots,
                           all_visualization_heatmap_plots=all_visualization_heatmap_plots,
                           all_confusion_matrix_plots=all_confusion_matrix_plots,
                           analysis=analysis)