Beispiel #1
0
def print_real_circuits_info(path_csv_test, path_csv_test_real):
    test = pd.read_csv(path_csv_test)
    features = get_features(test)

    if 1:
        ckt_path = os.path.join(get_project_directory(), 'circuits',
                                'LGSynth89')
        ckt_list = [
            '5xp1.txt', 'alu2_synth.txt', 'alu4_synth.txt', 'cm138a_synth.txt',
            'cu_synth.txt', 'f51m_synth.txt', 'misex1.txt', 'misex3.txt',
            'misex3c.txt', 'x2_synth.txt'
        ]
    if 0:
        ckt_path = os.path.join(get_project_directory(), 'temp',
                                'machine_learning')
        ckt_list = [
            'ckt-19999.txt', 'ckt-19998.txt', 'ckt-19997.txt', 'ckt-19996.txt',
            'ckt-19995.txt', 'ckt-19994.txt', 'ckt-19993.txt', 'ckt-19992.txt',
            'ckt-19991.txt', 'ckt-19990.txt'
        ]
    ckt_list_path = []
    for ckt in ckt_list:
        ckt_list_path.append(os.path.join(ckt_path, ckt))

    total = 0
    spval = dict()
    rel = dict()
    params = dict()
    for cp in ckt_list_path:
        ckt_init = sa.read_scheme(cp)
        ckt = create_circuit_external_yosys(ckt_init)
        if ckt is not None:
            if check_for_bufs(ckt) == 1:
                print('Problem [BUFs]')
                exit()
            if check_ouputs_connected(ckt) == 0:
                print('Problem [Output connections]')
                exit()
            print('Success')

        (reliability,
         vulnerability_map) = external_vulnerability_map(ckt, 10000)
        rel[total] = reliability
        params[total] = get_ckt_parameters(ckt)
        spval[total] = nt.singlepass_method_lk(ckt)
        params[total]['single_pass_value'] = spval[total]
        for f in features:
            if f not in params[total].keys():
                params[total][f] = -1
        total += 1
    print_resulted_csv(params, rel, path_csv_test_real, 0, len(params))
def goEspresso_external(tt, cir, exact=True):
    dfile = get_project_directory()
    tt_file = os.path.join(dfile, "temp", "espresso-tt.txt")
    if exact:
        type = '-Dexact'
    else:
        type = ''
    write_truth_table_to_espresso(tt, cir, tt_file)

    ostype = "win32"
    exe = os.path.join(dfile, "utils", "bin", ostype, "espresso",
                       "espresso.exe") + " " + type + " " + tt_file
    try:
        ret = subprocess.check_output(exe, shell=True).decode('UTF-8')
    except:
        print('ESPRESSO FAILED')
    lines = ret.splitlines()
    rows = lines[5:5 + int(lines[4][2:])]
    ins = []
    outs = []
    for row in rows:
        [i, o] = row.split(' ')
        ins.append(i.replace('-', 'X'))
        outs.append(o)
    data = {i: [] for i in range(cir.outputs())}
    for i in range(len(outs)):
        for j in range(cir.outputs()):
            if outs[i][j] == '1':
                data[j].append(ins[i])

    return data
Beispiel #3
0
def find_ckts_chromosomes(num):
    total = 0
    print('Calc CKT Chromosomes...')
    max_len = 0
    chromos = dict()
    path_json = os.path.join(get_project_directory(), 'temp',
                             'machine_learning', 'chromos.json')
    while total < num:
        path_opt = os.path.join(get_project_directory(), 'temp',
                                'machine_learning', 'ckt-{}.txt'.format(total))
        ckt = sa.read_scheme(path_opt)
        chromos[total] = sch2chromo(ckt)
        if len(chromos[total][1]) > max_len:
            max_len = len(chromos[total][1])
        total += 1
    save_json(path_json, chromos)
    return max_len, chromos
Beispiel #4
0
def find_ckts_singlepass_values(params, num):
    total = 0
    print('Calc CKT Single Pass Values...')
    spval = dict()
    path_json = os.path.join(get_project_directory(), 'temp',
                             'machine_learning', 'sp_values.json')
    if (os.path.isfile(path_json)):
        spval = load_json(path_json)
    while total < num:
        path_opt = os.path.join(get_project_directory(), 'temp',
                                'machine_learning', 'ckt-{}.txt'.format(total))
        ckt = sa.read_scheme(path_opt)
        if total not in spval:
            spval[total] = nt.singlepass_method_lk(ckt)
        params[total]['single_pass_value'] = spval[total]
        total += 1

    save_json(path_json, spval)
    return spval
Beispiel #5
0
def find_ckts_parameters(num):
    total = 0
    print('Calc CKT Parameters...')
    params = dict()
    path_json = os.path.join(get_project_directory(), 'temp',
                             'machine_learning', 'parameters.json')
    if (os.path.isfile(path_json)):
        params = load_json3(path_json)
    while total < num:
        path_opt = os.path.join(get_project_directory(), 'temp',
                                'machine_learning', 'ckt-{}.txt'.format(total))
        ckt = sa.read_scheme(path_opt)
        if total not in params:
            params[total] = get_ckt_parameters(ckt)
        total += 1

    if not os.path.isfile(path_json):
        save_json(path_json, params)
    return params
Beispiel #6
0
def find_reliability_values(num):
    total = 0
    rel = dict()
    path_json = os.path.join(get_project_directory(), 'temp',
                             'machine_learning', 'reliability.json')
    if (os.path.isfile(path_json)):
        rel = load_json(path_json)
    while total < num:
        if total in rel.keys():
            print(
                'Reliability for test {} already exists: {}. Skipping!'.format(
                    total, rel[total]))
            total += 1
            continue
        file_name = os.path.join(get_project_directory(), 'temp',
                                 'machine_learning',
                                 'ckt-{}.txt'.format(total))
        ckt = sa.read_scheme(file_name)
        (reliability,
         vulnerability_map) = external_vulnerability_map(ckt, 10000)
        rel[total] = reliability
        total += 1
    save_json(path_json, rel)
    return rel
Beispiel #7
0
def generate_ckts(num):
    total = 0
    min_inputs = 6
    max_inputs = 16
    min_outputs = 6
    max_outputs = 16
    min_level = 16
    max_level = 50
    while total < num:
        path_init = os.path.join(get_project_directory(), 'temp',
                                 'machine_learning',
                                 'ckt-{}-initial.txt'.format(total))
        path_opt = os.path.join(get_project_directory(), 'temp',
                                'machine_learning', 'ckt-{}.txt'.format(total))
        if (os.path.isfile(path_opt)):
            print('File ckt-{}.txt already exists. Skipping!'.format(total))
            total += 1
            continue
        inp_num = random.randint(min_inputs, max_inputs)
        out_num = random.randint(min_outputs, max_outputs)
        level_num = random.randint(min_level, max_level)
        print('Generate ckt. I: {} O: {} L: {}'.format(inp_num, out_num,
                                                       level_num))
        ckt = generate_random_ckt(inp_num, out_num, level_num)
        minim = create_circuit_external_yosys(ckt)
        if minim is not None:
            if check_for_bufs(minim) == 1:
                print('Problem [BUFs]')
                continue
            if check_ouputs_connected(minim) == 0:
                print('Problem [Output connections]')
                continue
            print('Success')
            ckt.print_circuit_in_file(path_init)
            minim.print_circuit_in_file(path_opt)
            total += 1
def create_circuit_external_yosys (circuit):
    dfile = get_project_directory()
    run_path = os.path.join(dfile, "utils", "bin", "win32", "yosys")
    yosys_exe = os.path.join(run_path, "yosys.exe")
    circuit_file = os.path.join(dfile, "temp", "tmp_sheme_yosys.v")
    run_file = os.path.join(dfile, "temp", "tmp_runfile_yosys.txt")
    synth_file = os.path.join(dfile, "temp", "tmp_synth.v")
    converted_circuit_file = os.path.join(dfile, "temp", "tmp_synth_conv.txt")
    graph_file = os.path.join(dfile, "temp", "synth.svg")
    debug_file = os.path.join(dfile, "temp", "yosys_fail.txt")

    if os.path.isfile(circuit_file):
        os.remove(circuit_file)
    if os.path.isfile(run_file):
        os.remove(run_file)
    if os.path.isfile(synth_file):
        os.remove(synth_file)
    if os.path.isfile(converted_circuit_file):
        os.remove(converted_circuit_file)

    print_circuit_in_verilog_file(circuit, "circ", circuit_file)
    print_run_file(run_file, circuit_file, synth_file, graph_file)
    #print_run_file_opt(run_file, circuit_file, synth_file, graph_file)
    exe = yosys_exe + " < " + run_file
    try:
        ret = subprocess.check_output(exe, shell=True, cwd=run_path).decode('UTF-8')
    except:
        ret = 'Error'

    if not os.path.isfile(synth_file):
        # Если была проблема с Yosys выводим схему для последующего дебага
        circuit.print_circuit_in_file(debug_file)
        print('Yosys error')
        return None

    convert_file_to_relic_format(circuit, synth_file, converted_circuit_file)
    if os.path.isfile(converted_circuit_file) == False:
        return None
    new_ckt = sa.read_scheme(converted_circuit_file)
    return new_ckt
Beispiel #9
0
def run_xgboost(train_csv, test_csv, xgb_model):

    # Начальное значение для генератора случаных чисел (можно менять каждый запуск)
    random_state = 51
    # Сколько от данных брать на валидацию (в данном случае 10%)
    test_size = 0.1
    # Сколько максимально нужно итераци
    num_boost_round = 100
    # После скольки итераций если не было улучшений останавливать прогон
    early_stopping_rounds = 100
    # Параметры XGBoost
    eta = 0.2
    max_depth = 5
    subsample = 0.95
    colsample_bytree = 0.9

    print("Load train.csv")
    train = pd.read_csv(train_csv)
    features = get_features(train)
    print('Features: ' + str(features))
    print("Load test.csv")
    test = pd.read_csv(test_csv)

    print("Split train")
    X_train, X_valid = train_test_split(train,
                                        test_size=test_size,
                                        random_state=random_state)
    y_train = X_train['reliability']
    y_valid = X_valid['reliability']

    if os.path.isfile(xgb_model) and 1:
        print(
            'Model already exists: {}. Delete it if you want to recalculate.'.
            format(xgb_model))
        gbm = xgb.Booster()  #init model
        gbm.load_model(xgb_model)  # load data
    else:
        params = {
            "objective": "reg:linear",
            # "eval_metric": "rmse",
            "eta": eta,
            "max_depth": max_depth,
            "silent": 1,
            "subsample": subsample,
            "colsample_bytree": colsample_bytree,
            "seed": random_state
        }

        dtrain = xgb.DMatrix(X_train[features], y_train)
        dvalid = xgb.DMatrix(X_valid[features], y_valid)

        watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
        gbm = xgb.train(params,
                        dtrain,
                        num_boost_round,
                        evals=watchlist,
                        feval=diffValid_xg,
                        early_stopping_rounds=early_stopping_rounds,
                        verbose_eval=True)

        print('Saving model...')
        gbm.save_model(xgb_model)

    gbm.dump_model(xgb_model + '.raw.txt')

    print("Validating")
    if hasattr(gbm, 'best_ntree_limit'):
        yhat = gbm.predict(xgb.DMatrix(X_valid[features]),
                           ntree_limit=gbm.best_ntree_limit)
    else:
        yhat = gbm.predict(xgb.DMatrix(X_valid[features]))
    correct = rmse(y_valid.values, yhat)
    print('RMSE value: {:.6f}'.format(correct))

    mean = statistics.mean(train['reliability'])
    stdev = statistics.stdev(train['reliability'])
    min = train['reliability'].min()
    max = train['reliability'].max()
    print('Reliability mean: ' + str(mean))
    print('Reliability stdev: ' + str(stdev))
    print('Reliability range form {} to {}'.format(min, max))

    diff = y_valid.values - yhat
    mean = statistics.mean(diff)
    stdev = statistics.stdev(diff)
    min = diff.min()
    max = diff.max()
    min_pos = diff.tolist().index(diff.min())
    max_pos = diff.tolist().index(diff.max())
    print('Critical test min: {}, {}, {}, {}'.format(
        min_pos, X_valid['id'].iloc[min_pos],
        X_valid['reliability'].iloc[min_pos],
        X_valid['reliability'].iloc[min_pos] - yhat[min_pos]))
    print('Critical test max: {}, {}, {}, {}'.format(
        max_pos, X_valid['id'].iloc[max_pos],
        X_valid['reliability'].iloc[max_pos],
        X_valid['reliability'].iloc[max_pos] - yhat[max_pos]))
    print('Prediction difference mean: ' + str(mean))
    print('Prediction difference stdev: ' + str(stdev))
    print('Prediction difference range form {} to {}'.format(min, max))
    print('Max difference percent: {} %'.format(
        findMaximumError(yhat, y_valid.values)))

    print_importance(features, gbm)
    png_imp_file_path = os.path.join(get_project_directory(), 'temp',
                                     'machine_learning',
                                     'feature_importance_plot.png')
    create_feature_importance_image(features, gbm, png_imp_file_path, 30)

    if hasattr(gbm, 'best_ntree_limit'):
        final_prediction = gbm.predict(xgb.DMatrix(test[features]),
                                       ntree_limit=gbm.best_ntree_limit)
    else:
        final_prediction = gbm.predict(xgb.DMatrix(test[features]))
    correct = rmse(test['reliability'].values, final_prediction)

    print('Prediction on test set. RMSE value: {:.6f}'.format(correct))
    diff = test['reliability'].values - final_prediction
    mean = statistics.mean(diff)
    stdev = statistics.stdev(diff)
    min = diff.min()
    max = diff.max()
    print('Prediction difference mean: ' + str(mean))
    print('Prediction difference stdev: ' + str(stdev))
    print('Prediction difference range form {} to {}'.format(min, max))
    print('Max difference percent: {} %'.format(
        findMaximumError(final_prediction, test['reliability'].values)))

    prediction_file_path = os.path.join(get_project_directory(), 'temp',
                                        'machine_learning',
                                        'last_prediction.json')
    save_json(prediction_file_path, final_prediction.tolist())

    diff = (test['reliability'] - final_prediction) / test['reliability']
    diff *= 100
    png_hist_file_path = os.path.join(get_project_directory(), 'temp',
                                      'machine_learning', 'diff_histogram.png')
    create_histogram(diff.tolist(), png_hist_file_path)

    return gbm
Beispiel #10
0
def debug_ckt():
    file_name = os.path.join(get_project_directory(), 'temp',
                             'machine_learning', 'ckt-{}.txt'.format(2))
    ckt = sa.read_scheme(file_name)
    (reliability, vulnerability_map) = external_vulnerability_map(ckt, 10000)
    print(reliability)
Beispiel #11
0
    test = pd.read_csv(test_csv)
    features = get_features(test)
    print('Features: ' + str(features))
    yhat = gbm.predict(xgb.DMatrix(test[features]))
    correct = rmse(test['reliability'], yhat)
    real = test['reliability'].tolist()
    for i in range(len(yhat)):
        perc = 100 * (yhat[i] - real[i]) / real[i]
        print("Real: {} Predicted: {} Diff: {}%".format(
            round(real[i], 2), round(yhat[i], 2), round(perc, 2)))
    print('RMSE value: {:.6f}'.format(correct))


# debug_ckt()
random.seed(1)
path_csv_train = os.path.join(get_project_directory(), 'temp',
                              'machine_learning', 'CSV', '!train.csv')
path_csv_test = os.path.join(get_project_directory(), 'temp',
                             'machine_learning', 'CSV', '!test.csv')
path_csv_test_real_circ = os.path.join(get_project_directory(), 'temp',
                                       'machine_learning', 'CSV',
                                       '!test_real.csv')
xgb_model = os.path.join(get_project_directory(), 'temp', 'machine_learning',
                         'models', 'model.xgb')

if 0:
    test_num = 20000
    # generate_ckts(test_num)
    rel = find_reliability_values(test_num)
    params = find_ckts_parameters(test_num)
    sp_values = find_ckts_singlepass_values(params, test_num)