Beispiel #1
0
def search(table_name):
    if os.path.exists(databases_root_folder + table_name):

        db = ScaDatabase(databases_root_folder + table_name)
        analysis_all = db.select_all(Analysis)
        analyses = []

        hp = []

        for analysis in analysis_all:

            final_key_ranks = db.select_final_key_rank_json_from_analysis(KeyRank, analysis.id)

            if len(final_key_ranks) > 0:
                hyper_parameters = db.select_from_analysis(HyperParameter, analysis.id)
                training_hyper_parameters = hyper_parameters.hyper_parameters
                training_hyper_parameters[0]['guessing_entropy'] = final_key_ranks[0][0]['key_rank']
                hp.append(training_hyper_parameters[0])

        exp = hip.Experiment().from_iterable(hp)
        exp.display_data(hip.Displays.PARALLEL_PLOT).update({
            'hide': ['uid', 'key_rank', 'key'],  # Hide some columns
            'order': ['guessing_entropy'],  # Put column time first on the left
        })
        exp.validate()
        exp.to_html("webapp/templates/hiplot.html")

        return render_template("dashboard/search.html", analyses=analyses)
    return render_template("dashboard/search.html", analyses=[])
Beispiel #2
0
def table():
    db_files = []
    # r=root, d=directories, f = files
    for r, d, f in os.walk(databases_root_folder):
        for file in f:
            if file.endswith(".sqlite"):
                db_files.append(file)

    all_tables = []
    all_tables_names = []

    for db_file in db_files:

        if os.path.exists(databases_root_folder + db_file):

            db = ScaDatabase(databases_root_folder + db_file)
            analysis_all = db.select_all(Analysis)
            analyses = []

            for analysis in analysis_all:
                if not analysis.deleted:
                    localtimezone = pytz.timezone(os.getenv("TIME_ZONE"))
                    analysis_datetime = datetime.strptime(str(analysis.datetime), "%Y-%m-%d %H:%M:%S.%f").astimezone(
                        localtimezone).__format__(
                        "%b %d, %Y %H:%M:%S")

                    final_key_ranks = db.select_final_key_rank_json_from_analysis(KeyRank, analysis.id)
                    final_success_rates = db.select_final_success_rate_from_analysis(SuccessRate, analysis.id)
                    neural_network = db.select_from_analysis(NeuralNetwork, analysis.id)

                    analyses.append({
                        "id": analysis.id,
                        "datetime": analysis_datetime,
                        "dataset": analysis.dataset,
                        "settings": analysis.settings,
                        "elapsed_time": time.strftime('%H:%M:%S', time.gmtime(analysis.elapsed_time)),
                        "key_ranks": final_key_ranks,
                        "success_rates": final_success_rates,
                        "neural_network_name": "not ready" if neural_network is None else neural_network.model_name
                    })

            all_tables.append(analyses)
            all_tables_names.append(db_file)

    return render_template("tables.html", all_tables=all_tables, all_tables_names=all_tables_names)
def generate_script(script_filename, databases_root_folder, table_name,
                    analysis_id):
    script_py_file = open(
        "scripts/{}_{}.py".format(script_filename,
                                  table_name.replace(".sqlite", "")), "w+")

    script_py_file.write("from tensorflow.keras.optimizers import *\n")
    script_py_file.write("from tensorflow.keras.layers import *\n")
    script_py_file.write("from tensorflow.keras.models import *\n")
    script_py_file.write("from aisy.sca_deep_learning_aes import AisyAes\n")

    db = ScaDatabase(databases_root_folder + table_name)

    analysis = db.select_analysis(Analysis, analysis_id)
    neural_network_model = db.select_from_analysis(NeuralNetwork, analysis_id)

    # get training hyper-parameters information from database
    hyper_parameters = db.select_all_from_analysis(HyperParameter, analysis_id)
    hyper_parameters_single = hyper_parameters[0].hyper_parameters[0]

    hyper_parameter_search = None
    if len(hyper_parameters) > 1:
        hyper_parameter_search = db.select_from_analysis(
            HyperParameterSearch, analysis.id)

    leakage_models = db.select_from_analysis(LeakageModel, analysis_id)
    leakage_model_parameters = leakage_models.leakage_model[0]

    script_py_file.write('\naisy = AisyAes()')
    script_py_file.write('\naisy.set_dataset("{}")'.format(analysis.dataset))
    script_py_file.write('\naisy.set_database_name("{}")'.format(table_name))
    script_py_file.write('\naisy.set_aes_leakage_model(')
    for index, key in enumerate(leakage_model_parameters):
        if isinstance(leakage_model_parameters[key], str):
            script_py_file.write('{}="{}"'.format(
                key, leakage_model_parameters[key]))
        else:
            script_py_file.write('{}={}'.format(key,
                                                leakage_model_parameters[key]))
        if index < len(leakage_model_parameters) - 1:
            script_py_file.write(', ')
    script_py_file.write(')')
    script_py_file.write('\naisy.set_key("{}")'.format(
        hyper_parameters_single['key']))
    script_py_file.write('\naisy.set_number_of_profiling_traces({})'.format(
        hyper_parameters_single['profiling_traces']))
    script_py_file.write('\naisy.set_number_of_attack_traces({})'.format(
        hyper_parameters_single['attack_traces']))
    script_py_file.write('\naisy.set_first_sample({})'.format(
        hyper_parameters_single['first_sample']))
    script_py_file.write('\naisy.set_number_of_samples({})'.format(
        hyper_parameters_single['number_of_samples']))
    script_py_file.write('\naisy.set_batch_size({})'.format(
        hyper_parameters_single['batch_size']))
    script_py_file.write('\naisy.set_epochs({})'.format(
        hyper_parameters_single['epochs']))
    if len(hyper_parameters) == 1:
        script_py_file.write('\n\n\n{}'.format(
            neural_network_model.description))
        script_py_file.write('\n\naisy.set_neural_network({})'.format(
            neural_network_model.model_name))
    else:
        if "grid_search" in analysis.settings:
            script_py_file.write('\ngrid_search = {}'.format(
                analysis.settings["grid_search"]))
        if "random_search" in analysis.settings:
            script_py_file.write('\nrandom_search = {}'.format(
                analysis.settings["random_search"]))
    if "early_stopping" in analysis.settings:
        script_py_file.write('\nearly_stopping = {}'.format(
            analysis.settings["early_stopping"]))

    script_py_file.write('\n\naisy.run(')
    script_py_file.write('\n    key_rank_executions={},'.format(
        analysis.settings["key_rank_executions"]))
    script_py_file.write('\n    key_rank_report_interval={},'.format(
        analysis.settings["key_rank_report_interval"]))
    script_py_file.write('\n    key_rank_attack_traces={},'.format(
        analysis.settings["key_rank_attack_traces"]))
    if "early_stopping" in analysis.settings:
        script_py_file.write('\n    early_stopping=early_stopping,')
    if "ensemble" in analysis.settings:
        script_py_file.write('\n    ensemble=[{}],'.format(
            analysis.settings["ensemble"]))
    if len(hyper_parameters) == 1:
        script_py_file.write('\n)\n')
    else:
        if hyper_parameter_search.search_type == "Grid Search":
            script_py_file.write('\n    grid_search=grid_search')
        else:
            script_py_file.write('\n    random_search=random_search')
        script_py_file.write('\n)\n')
    script_py_file.close()
Beispiel #4
0
 def __init__(self, database_name, db_filename, dataset, settings, elapsed_time):
     self.db = ScaDatabase(database_name)
     tables.base().metadata.create_all(self.db.engine)
     new_insert = Analysis(db_filename=db_filename, dataset=dataset, settings=settings, elapsed_time=elapsed_time, deleted=False)
     self.analysis_id = self.db.insert(new_insert)
Beispiel #5
0
class ScaDatabaseInserts:

    def __init__(self, database_name, db_filename, dataset, settings, elapsed_time):
        self.db = ScaDatabase(database_name)
        tables.base().metadata.create_all(self.db.engine)
        new_insert = Analysis(db_filename=db_filename, dataset=dataset, settings=settings, elapsed_time=elapsed_time, deleted=False)
        self.analysis_id = self.db.insert(new_insert)

    def get_analysis_id(self):
        return self.analysis_id

    def update_elapsed_time_analysis(self, elapsed_time):
        self.db.session.query(Analysis).filter(Analysis.id == self.analysis_id).update({"elapsed_time": elapsed_time})
        self.db.session.commit()

    def save_hyper_parameters(self, hyper_parameters):
        new_insert = HyperParameter(hyper_parameters=hyper_parameters, analysis_id=self.analysis_id)
        return self.db.insert(new_insert)

    def save_neural_network(self, description, model_name):
        new_insert = NeuralNetwork(model_name=model_name, description=description, analysis_id=self.analysis_id)
        return self.db.insert(new_insert)

    def save_leakage_model(self, leakage_model):
        new_insert = LeakageModel(leakage_model=leakage_model, analysis_id=self.analysis_id)
        return self.db.insert(new_insert)

    def save_metric(self, data, key_byte, metric):
        for value in data:
            new_insert = Metric(value=value, key_byte=key_byte, metric=metric, analysis_id=self.analysis_id)
            self.db.insert(new_insert)

    def save_key_rank_json(self, values, key_byte, report_interval, metric):
        new_insert = KeyRank(values=values, key_byte=key_byte, report_interval=report_interval, metric=metric,
                             analysis_id=self.analysis_id)
        self.db.insert(new_insert)

    def save_success_rate_json(self, values, key_byte, report_interval, metric):
        new_insert = SuccessRate(values=values, key_byte=key_byte, report_interval=report_interval, metric=metric,
                                 analysis_id=self.analysis_id)
        self.db.insert(new_insert)

    def save_visualization(self, values, epoch, key_byte, report_interval, metric):
        new_insert = Visualization(values=values, epoch=epoch, key_byte=key_byte, report_interval=report_interval, metric=metric,
                                   analysis_id=self.analysis_id)
        self.db.insert(new_insert)

    def save_hyper_parameters_search(self, search_type, hyper_parameters, best_hyper_parameters):
        new_insert = HyperParameterSearch(search_type=search_type, hyper_parameters_settings=hyper_parameters,
                                          best_hyper_parameters=best_hyper_parameters, analysis_id=self.analysis_id)
        return self.db.insert(new_insert)

    def save_confusion_matrix(self, y_pred, y_true, key_byte):
        new_insert = ConfusionMatrix(y_pred=y_pred, y_true=y_true, key_byte=key_byte, analysis_id=self.analysis_id)
        self.db.insert(new_insert)

    def save_probability_rank(self, ranks, classes, correct_key_byte, key_guess, title, key_byte):
        new_insert = ProbabilityRank(ranks=ranks, classes=classes, correct_key_byte=correct_key_byte, key_guess=key_guess,
                                     title=title, key_byte=key_byte, analysis_id=self.analysis_id)
        self.db.insert(new_insert)
Beispiel #6
0
def delete_analysis(analysis_id, table_name):
    db = ScaDatabase(databases_root_folder + table_name)
    db.soft_delete_analysis_from_table(Analysis, analysis_id)

    return "ok"
Beispiel #7
0
def gen_plot(analysis_id, table_name, metric):
    db = ScaDatabase(databases_root_folder + table_name)

    analysis = db.select_analysis(Analysis, analysis_id)

    if metric == "Guessing_Entropy":
        result_key_byte = db.select_values_from_analysis_json(KeyRank, analysis_id)
    elif metric == "Success_Rate":
        result_key_byte = db.select_values_from_analysis_json(SuccessRate, analysis_id)
    else:
        result_key_byte = []
        all_metrics_names = db.select_metrics(Metric, analysis_id)

        if metric == "accuracy":
            for metric_name in all_metrics_names:
                if metric in metric_name:
                    if "grid_search" in analysis.settings or "random_search" in analysis.settings:
                        if "best" in metric_name:
                            result_key_byte.append(db.select_values_from_metric(Metric, metric_name, analysis_id)[0])
                    else:
                        result_key_byte.append(db.select_values_from_metric(Metric, metric_name, analysis_id)[0])
        elif metric == "loss":
            for metric_name in all_metrics_names:
                if metric in metric_name:
                    if "grid_search" in analysis.settings or "random_search" in analysis.settings:
                        if "best" in metric_name:
                            result_key_byte.append(db.select_values_from_metric(Metric, metric_name, analysis_id)[0])
                    else:
                        result_key_byte.append(db.select_values_from_metric(Metric, metric_name, analysis_id)[0])
        else:
            result_key_byte.append(db.select_values_from_metric(Metric, metric, analysis_id)[0])

    my_dpi = 100
    plt.figure(figsize=(800 / my_dpi, 600 / my_dpi), dpi=my_dpi)
    dir_analysis_id = "resources/figures/{}".format(analysis_id)
    if not os.path.exists(dir_analysis_id):
        os.makedirs(dir_analysis_id)
    if metric == "Guessing_Entropy" or metric == "Success_Rate":
        for res in result_key_byte:
            plt.plot(np.arange(res['report_interval'], (len(res['values']) + 1) * res['report_interval'], res['report_interval']),
                     res['values'],
                     label=res['metric'])
            plt.legend(loc='best', fontsize=13)
            plt.xlim([1, len(res['values']) * res['report_interval']])
            plt.ylabel(metric.replace("_", " "), fontsize=13)
            if metric == "Guessing_Entropy" or metric == "Success_Rate":
                plt.xlabel("Attack Traces", fontsize=13)
            else:
                plt.xlabel("Epochs", fontsize=13)
            plt.grid(ls='--')
        plt.savefig("{}/{}_{}_{}.png".format(dir_analysis_id, metric, analysis_id, table_name.replace(".sqlite", "")),
                    format="png")
    else:
        for res in result_key_byte:
            plt.plot(np.arange(1, len(res['values']) + 1, 1), res['values'], label=res['metric'])
            plt.legend(loc='best', fontsize=13)
            plt.xlim([1, len(res['values'])])
            plt.ylabel(metric.replace("_", " "), fontsize=13)
            plt.xlabel("Epochs", fontsize=13)
            plt.grid(ls='--')
        plt.savefig("{}/{}_{}_{}.png".format(dir_analysis_id, metric, analysis_id, table_name.replace(".sqlite", "")),
                    format="png")

    return "ok"
Beispiel #8
0
def result(analysis_id, table_name):
    db = ScaDatabase(databases_root_folder + table_name)

    # get neural network information from database
    analysis = db.select_analysis(Analysis, analysis_id)

    sca_views = ScaViews(analysis_id, db)

    all_metric_plots = sca_views.metric_plots()

    all_accuracy_plots = sca_views.accuracy_plots()
    dash_app_accuracy.layout = html.Div(children=[all_accuracy_plots])
    all_loss_plots = sca_views.loss_plots()
    dash_app_loss.layout = html.Div(children=[all_loss_plots])
    if "ensemble" in analysis.settings:
        all_key_rank_plots = sca_views.ensemble_plots_key_rank()
    else:
        all_key_rank_plots = sca_views.key_rank_plots()
    dash_app_key_ranks.layout = html.Div(children=[all_key_rank_plots])
    if "ensemble" in analysis.settings:
        all_success_rate_plots = sca_views.ensemble_plots_success_rate()
    else:
        all_success_rate_plots = sca_views.success_rate_plots()
    dash_app_success_rates.layout = html.Div(children=[all_success_rate_plots])

    # get neural network information from database
    neural_network_model = db.select_from_analysis(NeuralNetwork, analysis_id)

    # get training hyper-parameters information from database
    hyper_parameters = db.select_all_from_analysis(HyperParameter, analysis_id)
    training_hyper_parameters = []
    for hp in hyper_parameters:
        training_hyper_parameters.append(hp.hyper_parameters)

    hyper_parameter_search = []
    if len(hyper_parameters) > 1:
        hyper_parameter_search = db.select_from_analysis(HyperParameterSearch, analysis.id)

    # get leakage model information from database
    leakage_models = db.select_from_analysis(LeakageModel, analysis_id)
    leakage_model_parameters = leakage_models.leakage_model

    # get visualization plots
    all_visualization_plots = sca_views.visualization_plots()
    all_visualization_heatmap_plots = sca_views.visualization_plots_heatmap()

    # confusion matrix plot
    all_confusion_matrix_plots = sca_views.confusion_matrix_plots()

    return render_template("dashboard/result.html",
                           all_plots=all_metric_plots,
                           all_key_rank_plots=all_key_rank_plots,
                           all_success_rate_plots=all_success_rate_plots,
                           neural_network_description=neural_network_model.description,
                           training_hyper_parameters=training_hyper_parameters,
                           hyper_parameters=hyper_parameters,
                           hyper_parameter_search=hyper_parameter_search,
                           leakage_model_parameters=leakage_model_parameters,
                           all_visualization_plots=all_visualization_plots,
                           all_visualization_heatmap_plots=all_visualization_heatmap_plots,
                           all_confusion_matrix_plots=all_confusion_matrix_plots,
                           analysis=analysis)