Example #1
0
def search(table_name):
    if os.path.exists(databases_root_folder + table_name):

        db = ScaDatabase(databases_root_folder + table_name)
        analysis_all = db.select_all(Analysis)
        analyses = []

        hp = []

        for analysis in analysis_all:

            final_key_ranks = db.select_final_key_rank_json_from_analysis(KeyRank, analysis.id)

            if len(final_key_ranks) > 0:
                hyper_parameters = db.select_from_analysis(HyperParameter, analysis.id)
                training_hyper_parameters = hyper_parameters.hyper_parameters
                training_hyper_parameters[0]['guessing_entropy'] = final_key_ranks[0][0]['key_rank']
                hp.append(training_hyper_parameters[0])

        exp = hip.Experiment().from_iterable(hp)
        exp.display_data(hip.Displays.PARALLEL_PLOT).update({
            'hide': ['uid', 'key_rank', 'key'],  # Hide some columns
            'order': ['guessing_entropy'],  # Put column time first on the left
        })
        exp.validate()
        exp.to_html("webapp/templates/hiplot.html")

        return render_template("dashboard/search.html", analyses=analyses)
    return render_template("dashboard/search.html", analyses=[])
Example #2
0
def table():
    db_files = []
    # r=root, d=directories, f = files
    for r, d, f in os.walk(databases_root_folder):
        for file in f:
            if file.endswith(".sqlite"):
                db_files.append(file)

    all_tables = []
    all_tables_names = []

    for db_file in db_files:

        if os.path.exists(databases_root_folder + db_file):

            db = ScaDatabase(databases_root_folder + db_file)
            analysis_all = db.select_all(Analysis)
            analyses = []

            for analysis in analysis_all:
                if not analysis.deleted:
                    localtimezone = pytz.timezone(os.getenv("TIME_ZONE"))
                    analysis_datetime = datetime.strptime(str(analysis.datetime), "%Y-%m-%d %H:%M:%S.%f").astimezone(
                        localtimezone).__format__(
                        "%b %d, %Y %H:%M:%S")

                    final_key_ranks = db.select_final_key_rank_json_from_analysis(KeyRank, analysis.id)
                    final_success_rates = db.select_final_success_rate_from_analysis(SuccessRate, analysis.id)
                    neural_network = db.select_from_analysis(NeuralNetwork, analysis.id)

                    analyses.append({
                        "id": analysis.id,
                        "datetime": analysis_datetime,
                        "dataset": analysis.dataset,
                        "settings": analysis.settings,
                        "elapsed_time": time.strftime('%H:%M:%S', time.gmtime(analysis.elapsed_time)),
                        "key_ranks": final_key_ranks,
                        "success_rates": final_success_rates,
                        "neural_network_name": "not ready" if neural_network is None else neural_network.model_name
                    })

            all_tables.append(analyses)
            all_tables_names.append(db_file)

    return render_template("tables.html", all_tables=all_tables, all_tables_names=all_tables_names)
def generate_script(script_filename, databases_root_folder, table_name,
                    analysis_id):
    script_py_file = open(
        "scripts/{}_{}.py".format(script_filename,
                                  table_name.replace(".sqlite", "")), "w+")

    script_py_file.write("from tensorflow.keras.optimizers import *\n")
    script_py_file.write("from tensorflow.keras.layers import *\n")
    script_py_file.write("from tensorflow.keras.models import *\n")
    script_py_file.write("from aisy.sca_deep_learning_aes import AisyAes\n")

    db = ScaDatabase(databases_root_folder + table_name)

    analysis = db.select_analysis(Analysis, analysis_id)
    neural_network_model = db.select_from_analysis(NeuralNetwork, analysis_id)

    # get training hyper-parameters information from database
    hyper_parameters = db.select_all_from_analysis(HyperParameter, analysis_id)
    hyper_parameters_single = hyper_parameters[0].hyper_parameters[0]

    hyper_parameter_search = None
    if len(hyper_parameters) > 1:
        hyper_parameter_search = db.select_from_analysis(
            HyperParameterSearch, analysis.id)

    leakage_models = db.select_from_analysis(LeakageModel, analysis_id)
    leakage_model_parameters = leakage_models.leakage_model[0]

    script_py_file.write('\naisy = AisyAes()')
    script_py_file.write('\naisy.set_dataset("{}")'.format(analysis.dataset))
    script_py_file.write('\naisy.set_database_name("{}")'.format(table_name))
    script_py_file.write('\naisy.set_aes_leakage_model(')
    for index, key in enumerate(leakage_model_parameters):
        if isinstance(leakage_model_parameters[key], str):
            script_py_file.write('{}="{}"'.format(
                key, leakage_model_parameters[key]))
        else:
            script_py_file.write('{}={}'.format(key,
                                                leakage_model_parameters[key]))
        if index < len(leakage_model_parameters) - 1:
            script_py_file.write(', ')
    script_py_file.write(')')
    script_py_file.write('\naisy.set_key("{}")'.format(
        hyper_parameters_single['key']))
    script_py_file.write('\naisy.set_number_of_profiling_traces({})'.format(
        hyper_parameters_single['profiling_traces']))
    script_py_file.write('\naisy.set_number_of_attack_traces({})'.format(
        hyper_parameters_single['attack_traces']))
    script_py_file.write('\naisy.set_first_sample({})'.format(
        hyper_parameters_single['first_sample']))
    script_py_file.write('\naisy.set_number_of_samples({})'.format(
        hyper_parameters_single['number_of_samples']))
    script_py_file.write('\naisy.set_batch_size({})'.format(
        hyper_parameters_single['batch_size']))
    script_py_file.write('\naisy.set_epochs({})'.format(
        hyper_parameters_single['epochs']))
    if len(hyper_parameters) == 1:
        script_py_file.write('\n\n\n{}'.format(
            neural_network_model.description))
        script_py_file.write('\n\naisy.set_neural_network({})'.format(
            neural_network_model.model_name))
    else:
        if "grid_search" in analysis.settings:
            script_py_file.write('\ngrid_search = {}'.format(
                analysis.settings["grid_search"]))
        if "random_search" in analysis.settings:
            script_py_file.write('\nrandom_search = {}'.format(
                analysis.settings["random_search"]))
    if "early_stopping" in analysis.settings:
        script_py_file.write('\nearly_stopping = {}'.format(
            analysis.settings["early_stopping"]))

    script_py_file.write('\n\naisy.run(')
    script_py_file.write('\n    key_rank_executions={},'.format(
        analysis.settings["key_rank_executions"]))
    script_py_file.write('\n    key_rank_report_interval={},'.format(
        analysis.settings["key_rank_report_interval"]))
    script_py_file.write('\n    key_rank_attack_traces={},'.format(
        analysis.settings["key_rank_attack_traces"]))
    if "early_stopping" in analysis.settings:
        script_py_file.write('\n    early_stopping=early_stopping,')
    if "ensemble" in analysis.settings:
        script_py_file.write('\n    ensemble=[{}],'.format(
            analysis.settings["ensemble"]))
    if len(hyper_parameters) == 1:
        script_py_file.write('\n)\n')
    else:
        if hyper_parameter_search.search_type == "Grid Search":
            script_py_file.write('\n    grid_search=grid_search')
        else:
            script_py_file.write('\n    random_search=random_search')
        script_py_file.write('\n)\n')
    script_py_file.close()
Example #4
0
def result(analysis_id, table_name):
    db = ScaDatabase(databases_root_folder + table_name)

    # get neural network information from database
    analysis = db.select_analysis(Analysis, analysis_id)

    sca_views = ScaViews(analysis_id, db)

    all_metric_plots = sca_views.metric_plots()

    all_accuracy_plots = sca_views.accuracy_plots()
    dash_app_accuracy.layout = html.Div(children=[all_accuracy_plots])
    all_loss_plots = sca_views.loss_plots()
    dash_app_loss.layout = html.Div(children=[all_loss_plots])
    if "ensemble" in analysis.settings:
        all_key_rank_plots = sca_views.ensemble_plots_key_rank()
    else:
        all_key_rank_plots = sca_views.key_rank_plots()
    dash_app_key_ranks.layout = html.Div(children=[all_key_rank_plots])
    if "ensemble" in analysis.settings:
        all_success_rate_plots = sca_views.ensemble_plots_success_rate()
    else:
        all_success_rate_plots = sca_views.success_rate_plots()
    dash_app_success_rates.layout = html.Div(children=[all_success_rate_plots])

    # get neural network information from database
    neural_network_model = db.select_from_analysis(NeuralNetwork, analysis_id)

    # get training hyper-parameters information from database
    hyper_parameters = db.select_all_from_analysis(HyperParameter, analysis_id)
    training_hyper_parameters = []
    for hp in hyper_parameters:
        training_hyper_parameters.append(hp.hyper_parameters)

    hyper_parameter_search = []
    if len(hyper_parameters) > 1:
        hyper_parameter_search = db.select_from_analysis(HyperParameterSearch, analysis.id)

    # get leakage model information from database
    leakage_models = db.select_from_analysis(LeakageModel, analysis_id)
    leakage_model_parameters = leakage_models.leakage_model

    # get visualization plots
    all_visualization_plots = sca_views.visualization_plots()
    all_visualization_heatmap_plots = sca_views.visualization_plots_heatmap()

    # confusion matrix plot
    all_confusion_matrix_plots = sca_views.confusion_matrix_plots()

    return render_template("dashboard/result.html",
                           all_plots=all_metric_plots,
                           all_key_rank_plots=all_key_rank_plots,
                           all_success_rate_plots=all_success_rate_plots,
                           neural_network_description=neural_network_model.description,
                           training_hyper_parameters=training_hyper_parameters,
                           hyper_parameters=hyper_parameters,
                           hyper_parameter_search=hyper_parameter_search,
                           leakage_model_parameters=leakage_model_parameters,
                           all_visualization_plots=all_visualization_plots,
                           all_visualization_heatmap_plots=all_visualization_heatmap_plots,
                           all_confusion_matrix_plots=all_confusion_matrix_plots,
                           analysis=analysis)