Esempio n. 1
0
def get_network(name, **kwargs):
    if name == 'resnet32':
        return Resnet32Model(**kwargs)
    if name == 'mlp':
        return MultiLayerPerceptron(**kwargs)
    else:
        raise NotImplementedError
Esempio n. 2
0
    def train_multilayer_perceptron(self,
                                    distribution,
                                    fan_in,
                                    n_hidden,
                                    fan_out,
                                    learning_rate=0.012):

        X = tt.fmatrix('X')
        y = tt.fmatrix('y')

        classifier = MultiLayerPerceptron(X, distribution, fan_in, n_hidden,
                                          fan_out)

        cost = (classifier.neg_log_like(y) + 0.00 * classifier.L1 +
                0.0001 * classifier.L2_sqr)

        gparams = [
            tt.grad(cost=cost, wrt=param) for param in classifier.params
        ]

        updates = [(param, param - learning_rate * gparam)
                   for param, gparam in zip(classifier.params, gparams)]

        train = theano.function(inputs=[X, y],
                                outputs=cost,
                                updates=updates,
                                allow_input_downcast=True)

        test = theano.function(inputs=[X, y],
                               outputs=classifier.errors(y),
                               allow_input_downcast=True)

        validate = theano.function(inputs=[X, y],
                                   outputs=classifier.errors(y),
                                   allow_input_downcast=True)

        predict = theano.function(inputs=[X],
                                  outputs=classifier.logRegressionLayer.y_pred,
                                  allow_input_downcast=True)

        self.early_stopping(classifier, train, test, validate, predict)
Esempio n. 3
0
def create_model(ema=False):
    model = Model(28*28)
    if ema:
        for param in model.parameters():
            param.detach_()
    return model
def repeat_experiment_n_times(lstm,
                              rf,
                              xg_reg,
                              scenario,
                              times_to_repeat=100,
                              adversarial_attack=False,
                              evasion_attack=False,
                              is_white_box_attack=True,
                              use_lstm_for_adversarial=False):
    tn_s = []
    tp_s = []
    fp_s = []
    fn_s = []
    f1_s = []
    balanced_accuracies = []
    precisions = []
    recalls = []
    aucpr_s = []
    roc_aucs = []

    num_decisions_taken_by_lstm,\
    num_decisions_taken_by_rf, \
    num_decisions_taken_by_xgb, \
    num_decisions_correctly_taken_from_lstm, \
    num_decisions_correctly_taken_from_lstm_and_not_from_xgb_or_rf = 0, 0, 0, 0, 0

    for i in range(times_to_repeat):
        print("Iteration", i)
        x_test_set, y_test_set = sequences_crafting_for_classification.get_test_set(
            scenario=scenario)

        x_val, y_val, x_test, y_test = evaluation.get_val_test_set(
            x_test_set, y_test_set, val_size=0.25)
        x_val_supervised = x_val[:, len(x_val[0]) - 1, :]
        x_test_supervised = x_test[:, len(x_val[0]) - 1, :]

        if adversarial_attack or evasion_attack:
            # getting train set for training
            if is_white_box_attack:
                print("Using as training set, the real one - whitebox attack")
                dataset_type = REAL_DATASET
            else:
                print("Using as training set, the old one - blackbox attack")
                dataset_type = OLD_DATASET

            x_train, y_train = sequences_crafting_for_classification.get_train_set(
                dataset_type=dataset_type)
            x_train_supervised = x_train[:, look_back, :]
            if adversarial_attack:
                print("Crafting an adversarial attack")
                if not use_lstm_for_adversarial:
                    print("The attacker will use a Multilayer perceptron")
                    # training multilayer perceptron
                    # todo: hyper param tuning multilayer perceptron
                    adversarial_model = MultiLayerPerceptron.create_fit_model(
                        x_train_supervised, y_train)
                    # crafting adversarial samples
                    x_test_supervised = x_test[:, len(x_test[0]) - 1, :]
                    frauds = x_test_supervised[np.where(y_test == 1)]

                    adversarial_samples = fgsm.craft_sample(frauds,
                                                            adversarial_model,
                                                            epsilon=0.01)

                    x_test[np.where(y_test == 1),
                           len(x_test[0]) - 1] = adversarial_samples
                    x_test_supervised = x_test[:, len(x_test[0]) - 1, :]
                else:
                    print("The attacker will use a LSTM network")
                    # train the network using the right params
                    if is_white_box_attack:
                        if USING_AGGREGATED_FEATURES:
                            params = BEST_PARAMS_LSTM_REAL_DATASET_AGGREGATED
                        else:
                            params = BEST_PARAMS_LSTM_REAL_DATASET_NO_AGGREGATED
                    else:
                        if USING_AGGREGATED_FEATURES:
                            params = BEST_PARAMS_LSTM_OLD_DATASET_AGGREGATED
                        else:
                            params = BEST_PARAMS_LSTM_OLD_DATASET_NO_AGGREGATED
                    adversarial_model = LSTM_classifier.create_fit_model(
                        x_train, y_train, look_back, params=params)
                    frauds = x_test[np.where(y_test == 1)]
                    adversarial_samples = fgsm.craft_sample(frauds,
                                                            adversarial_model,
                                                            epsilon=0.01)
                    x_test[np.where(y_test == 1)] = adversarial_samples
                    x_test_supervised = x_test[:, len(x_test[0]) - 1, :]

            if evasion_attack:
                print("Crafting an evasion attack")
                # train the network using the right params
                if is_white_box_attack:
                    if USING_AGGREGATED_FEATURES:
                        params = BEST_PARAMS_RF_REAL_DATASET_AGGREGATED
                    else:
                        params = BEST_PARAMS_RF_REAL_DATASET_NO_AGGREGATED
                else:
                    if USING_AGGREGATED_FEATURES:
                        params = BEST_PARAMS_RF_OLD_DATASET_AGGREGATED
                    else:
                        params = BEST_PARAMS_RF_OLD_DATASET_NO_AGGREGATED
                # training the oracle
                oracle = RF.create_model(x_train_supervised,
                                         y_train,
                                         params=params)

                # get the oracle threshold
                y_val_pred_oracle = oracle.predict_proba(x_val_supervised)
                oracle_threshold = evaluation.find_best_threshold_fixed_fpr(
                    y_val, y_val_pred_oracle[:, 1])

                # if the oracle predicts the fraud as fraud -> discard it, otherwise inject in real bank system
                y_pred_oracle = rf.predict_proba(x_test_supervised)
                y_pred_oracle = y_pred_oracle[:, 1].ravel()
                y_pred_oracle = np.array(
                    evaluation.adjusted_classes(y_pred_oracle,
                                                oracle_threshold))

                x_test = x_test[(np.where((
                    (y_test == 1) & (y_pred_oracle == 0)) | (y_test == 0)))]
                y_test = y_test[(np.where((
                    (y_test == 1) & (y_pred_oracle == 0)) | (y_test == 0)))]
                x_test_supervised = x_test[:, len(x_test[0]) - 1, :]
        try:
            # a, b, c, d, e = 0, 0, 0, 0, 0
            # y_test_pred, not_by_xgb, not_by_rf, not_found_by_others = predict_test_based_on_voting(lstm, rf, xg_reg, x_val, x_val_supervised, y_val, x_test, x_test_supervised, y_test)
            y_test_pred, a, b, c, d, e = predict_test_based_on_more_confident(
                lstm, rf, xg_reg, x_val, x_val_supervised, y_val, x_test,
                x_test_supervised, y_test)
            # y_test_pred, a, b, c, d, e = predict_test_based_on_expon(lstm, rf, xg_reg, x_val, x_val_supervised, y_val, x_test, x_test_supervised, y_test)
            # y_test_pred = predict_test_based_on_sum(lstm, rf, xg_reg, x_val, x_val_supervised, y_val, x_test, x_test_supervised)
            # y_test_pred = predict_test_based_on_more_confident_and_majority_voting(lstm, rf, xg_reg, x_val, x_val_supervised, y_val, x_test, x_test_supervised, y_test)
            # not_found_by_xgboost += not_by_xgb
            # not_by_rf += not_by_rf
            # not_found_by_others += not_by_others

            y_test_pred = np.array(y_test_pred)
            confusion, f1, balanced_accuracy, precision, recall, aucpr, roc_auc = evaluation.get_performance(
                y_test, y_test_pred, threshold=True)
            tn = confusion[0, 0]
            tp = confusion[1, 1]
            fp = confusion[0, 1]
            fn = confusion[1, 0]

            tn_s.append(tn)
            tp_s.append(tp)
            fp_s.append(fp)
            fn_s.append(fn)
            f1_s.append(f1)

            num_decisions_taken_by_lstm += a
            num_decisions_taken_by_rf += b
            num_decisions_taken_by_xgb += c
            num_decisions_correctly_taken_from_lstm += d
            num_decisions_correctly_taken_from_lstm_and_not_from_xgb_or_rf += e

            balanced_accuracies.append(balanced_accuracy)
            precisions.append(precision)
            recalls.append(recall)
            aucpr_s.append(aucpr)
            roc_aucs.append(roc_auc)
        except RuntimeError:
            i -= 1

    print("Num decisions taken from lstm: ",
          num_decisions_taken_by_lstm / times_to_repeat)
    print("Num decisions taken by rf: ",
          num_decisions_taken_by_rf / times_to_repeat)
    print("Num decisions taken by xgb: ",
          num_decisions_taken_by_xgb / times_to_repeat)
    print("Num decisions taken by lstm correctly taken: ",
          num_decisions_correctly_taken_from_lstm / times_to_repeat)
    print(
        "Num decisions taken by lstm correctly taken and not by others: ",
        num_decisions_correctly_taken_from_lstm_and_not_from_xgb_or_rf /
        times_to_repeat)
    evaluation.print_results(
        np.array(tn_s).mean(),
        np.array(fp_s).mean(),
        np.array(fn_s).mean(),
        np.array(tp_s).mean(),
        np.array(f1_s).mean(),
        np.array(balanced_accuracies).mean(),
        np.array(precisions).mean(),
        np.array(recalls).mean(),
        np.array(aucpr_s).mean(),
        np.array(roc_aucs).mean())
Esempio n. 5
0
def exp(args):
    #Get Image Data
    x_train, y_train, x_test, y_test, prior_test = load_dataset(args.dataset)

    #Get Sets Sizes
    args.set_sizes = get_set_sizes(args.sets, len(x_train), args.set_size_gen)

    #Randomly Generate Priors Pi
    args.Pi = get_Pi(args.sets, args.Pi_gen)

    #Sample Data According to Pi and Sets Sizes
    U_sets, priors_corr = get_U_sets(args.sets, y_train, args.set_sizes,
                                     args.Pi)

    print('Data prepared!')
    print("set_sizes: " + str(args.set_sizes))
    print("test class prior: " + str(prior_test))
    print("Pi: " + str(args.Pi))

    # Get Model
    ExpModel = MultiLayerPerceptron(dataset=args.dataset,
                                    sets=args.sets,
                                    set_sizes=args.set_sizes,
                                    Pi=args.Pi,
                                    mode=args.mode,
                                    weight_decay=args.weightdecay)

    # Schedule Learning Rate if not specified
    if args.learningrate == -1:
        args.learningrate = lr_scheduler(args.dataset, args.sets, args.mode)

    # Get optimizer
    ExpModel.optimizer = Adam(args.optimizer,
                              lr=args.learningrate,
                              decay=args.lr_decay)

    # Build Model
    input_shape = x_train[0].shape
    ExpModel.build_model(priors_corr,
                         prior_test,
                         args.Pi,
                         input_shape,
                         mode=args.mode)

    #-----------------------------------------------------Start Training-----------------------------------------------------#
    history, loss_test = ExpModel.fit_model(U_sets=U_sets,
                                            x_train_total=x_train,
                                            batch_size=args.batchsize,
                                            epochs=args.epoch,
                                            x_test=x_test,
                                            y_test=y_test,
                                            Pi=args.Pi,
                                            priors_corr=priors_corr,
                                            prior_test=prior_test,
                                            mode=args.mode)
    np_loss_test = np.array(loss_test)
    np_loss_train = np.array(history['loss'])

    plot_curve(np_loss_test,
               args.epoch,
               label=args.mode,
               phase='test',
               dataset=args.dataset)
    plot_curve(np_loss_train,
               args.epoch,
               label=args.mode,
               phase='train',
               dataset=args.dataset)

    #---------------------------------------------Save files----------------------------------------------------------------#
    save_data(args, U_sets, priors_corr, prior_test, np_loss_train,
              np_loss_test)
def experiment_with_cdf(lstm,
                        scale_lstm,
                        loc_lstm,
                        mean_lstm,
                        std_lstm,
                        threshold_lstm,
                        rf,
                        scale_rf,
                        loc_rf,
                        mean_rf,
                        std_rf,
                        threshold_rf,
                        xg_reg,
                        scale_xgb,
                        loc_xgb,
                        mean_xgb,
                        std_xgb,
                        threshold_xgb,
                        scenario,
                        adversarial_attack=False,
                        evasion_attack=False,
                        is_white_box_attack=True,
                        use_lstm_for_adversarial=False):
    x_test, y_test = sequences_crafting_for_classification.get_test_set(
        scenario=scenario)
    x_test_supervised = x_test[:, len(x_test[0]) - 1, :]

    if adversarial_attack or evasion_attack:
        # getting train set for training
        if is_white_box_attack:
            print("whitebox attack")
            dataset_type = INJECTED_DATASET
        else:
            print("blackbox attack")
            dataset_type = OLD_DATASET

        x_train, y_train = sequences_crafting_for_classification.get_train_set(
            dataset_type=dataset_type)
        x_train_supervised = x_train[:, look_back, :]
        if adversarial_attack:
            print("Crafting an adversarial attack")
            if not use_lstm_for_adversarial:
                print("The attacker will use a Multilayer perceptron")
                # training multilayer perceptron
                # todo: hyper param tuning multilayer perceptron
                adversarial_model = MultiLayerPerceptron.create_fit_model(
                    x_train_supervised, y_train)
                # crafting adversarial samples
                x_test_supervised = x_test[:, len(x_test[0]) - 1, :]
                frauds = x_test_supervised[np.where(y_test == 1)]

                adversarial_samples = fgsm.craft_sample(frauds,
                                                        adversarial_model,
                                                        epsilon=0.01)

                x_test[np.where(y_test == 1),
                       len(x_test[0]) - 1] = adversarial_samples
                x_test_supervised = x_test[:, len(x_test[0]) - 1, :]
            else:
                print("The attacker will use a LSTM network")
                # train the network using the right params
                if is_white_box_attack:
                    if USING_AGGREGATED_FEATURES:
                        params = BEST_PARAMS_LSTM_AGGREGATED
                    else:
                        params = BEST_PARAMS_LSTM_NO_AGGREGATED
                else:
                    if USING_AGGREGATED_FEATURES:
                        params = BEST_PARAMS_LSTM_OLD_DATASET_AGGREGATED
                    else:
                        params = BEST_PARAMS_LSTM_OLD_DATASET_NO_AGGREGATED
                adversarial_model, _ = LSTM_classifier.create_fit_model(
                    x_train, y_train, look_back, params=params)
                frauds = x_test[np.where(y_test == 1)]
                adversarial_samples = fgsm.craft_sample(frauds,
                                                        adversarial_model,
                                                        epsilon=0.1)
                x_test[np.where(y_test == 1)] = adversarial_samples
                x_test_supervised = x_test[:, len(x_test[0]) - 1, :]

        if evasion_attack:
            print("Crafting an evasion attack")
            # train the network using the right params
            if is_white_box_attack:
                if USING_AGGREGATED_FEATURES:
                    params = BEST_PARAMS_RF_AGGREGATED
                else:
                    params = BEST_PARAMS_RF_NO_AGGREGATED
            else:
                if USING_AGGREGATED_FEATURES:
                    params = BEST_PARAMS_RF_OLD_DATASET_AGGREGATED
                else:
                    params = BEST_PARAMS_RF_OLD_DATASET_NO_AGGREGATED
            # training the oracle
            oracle, oracle_threshold = RF.create_model(x_train_supervised,
                                                       y_train,
                                                       params=params)

            # if the oracle predicts the fraud as fraud -> discard it, otherwise inject in real bank system
            y_pred_oracle = oracle.predict_proba(x_test_supervised)
            y_pred_oracle = y_pred_oracle[:, 1].ravel()
            y_pred_oracle = np.array(
                evaluation.adjusted_classes(y_pred_oracle, oracle_threshold))

            x_test = x_test[(np.where(((y_test == 1) & (y_pred_oracle == 0))
                                      | (y_test == 0)))]
            y_test = y_test[(np.where(((y_test == 1) & (y_pred_oracle == 0))
                                      | (y_test == 0)))]
            x_test_supervised = x_test[:, len(x_test[0]) - 1, :]

    y_test_pred, thresholds, num_decisions_taken_by_lstm, num_decisions_taken_by_rf, num_decisions_taken_by_xgb = predict_test_based_on_expon(
        lstm, scale_lstm, loc_lstm, mean_lstm, std_lstm, threshold_lstm, rf,
        scale_rf, loc_rf, mean_rf, std_rf, threshold_rf, xg_reg, scale_xgb,
        loc_xgb, mean_xgb, std_xgb, threshold_xgb, x_test, x_test_supervised,
        y_test)
    y_test_pred = np.array(y_test_pred)
    confusion, f1, balanced_accuracy, precision, recall, aucpr, roc_auc, fpr_values, tpr_values, accuracy, matthews_coeff = evaluation.get_performance(
        y_test, y_test_pred, thresholds)
    tn = confusion[0, 0]
    tp = confusion[1, 1]
    fp = confusion[0, 1]
    fn = confusion[1, 0]

    print("Num decisions taken from lstm: ", num_decisions_taken_by_lstm)
    print("Num decisions taken by rf: ", num_decisions_taken_by_rf)
    print("Num decisions taken by xgb: ", num_decisions_taken_by_xgb)
    evaluation.print_results(tn, fp, fn, tp, f1, balanced_accuracy, precision,
                             recall, aucpr, roc_auc, fpr_values, tpr_values,
                             accuracy, matthews_coeff)
Esempio n. 7
0
rb = ReplayBuffer(args.buffer_size)

input_size = np.array(env.observation_space.shape).prod()
output_size = env.action_space.n
# q_network = QNetwork(input_size, output_size).to(device)
# target_network = QNetwork(input_size, output_size).to(device)
# target_network.load_state_dict(q_network.state_dict())

model_config = {
    "layers": [128, 128],
    "in": input_size,
    "out": output_size
}

q_network = MultiLayerPerceptron(model_config).to(device)
target_network = MultiLayerPerceptron(model_config).to(device)
target_network.load_state_dict(q_network.state_dict())


optimizer = optim.Adam(q_network.parameters(), lr=args.learning_rate)

loss_fn = nn.MSELoss()

print(device.__repr__())
print(q_network)

if not os.path.exists(models_path):
    os.mkdir(models_path)

if TRAIN:
Esempio n. 8
0
def experiment(lstm,
               threshold_lstm,
               xg_reg,
               threshold_xgb,
               rf,
               threshold_rf,
               scenario,
               adversarial_attack=False,
               evasion_attack=False,
               is_white_box_attack=True,
               use_lstm_for_adversarial=False):
    x_test, y_test = sequences_crafting_for_classification.get_test_set(
        scenario=scenario)

    if adversarial_attack or evasion_attack:
        # getting train set for training
        if is_white_box_attack:
            print("Using as traing set, the real one - whitebox attack")
            dataset_type = INJECTED_DATASET
        else:
            print("Using as traing set, the old one - blackbox attack")
            dataset_type = OLD_DATASET

        x_train, y_train = sequences_crafting_for_classification.get_train_set(
            dataset_type=dataset_type)
        x_train_supervised = x_train[:, look_back, :]

        x_test_supervised = x_test[:, len(x_test[0]) - 1, :]
        if adversarial_attack:
            print("Crafting an adversarial attack")
            if not use_lstm_for_adversarial:
                print("The attacker will use a Multilayer perceptron")
                adversarial_model = MultiLayerPerceptron.create_fit_model(
                    x_train_supervised, y_train)
                frauds = x_test_supervised[np.where(y_test == 1)]
                adversarial_samples = fgsm.craft_sample(frauds,
                                                        adversarial_model,
                                                        epsilon=0.01)
                # in lstm samples, must be changed the last transaction of the sequence
                x_test[np.where(y_test == 1),
                       len(x_test[0]) - 1] = adversarial_samples
                x_test_supervised = x_test[:, len(x_test[0]) - 1, :]
            else:
                print("The attacker will use a LSTM network")
                # train the network using the right params
                if is_white_box_attack:
                    if USING_AGGREGATED_FEATURES:
                        params = BEST_PARAMS_LSTM_REAL_DATASET_AGGREGATED
                    else:
                        params = BEST_PARAMS_LSTM_REAL_DATASET_NO_AGGREGATED
                else:
                    if USING_AGGREGATED_FEATURES:
                        params = BEST_PARAMS_LSTM_OLD_DATASET_AGGREGATED
                    else:
                        params = BEST_PARAMS_LSTM_OLD_DATASET_NO_AGGREGATED
                frauds = x_test[np.where(y_test == 1)]
                adversarial_model, _ = LSTM_classifier.create_fit_model(
                    x_train, y_train, look_back, params=params)
                adversarial_samples = fgsm.craft_sample(frauds,
                                                        adversarial_model,
                                                        epsilon=0.1)
                # in lstm samples, must be changed the last transaction of the sequence
                x_test[np.where(y_test == 1)] = adversarial_samples
                x_test_supervised = x_test[:, len(x_test[0]) - 1, :]

        if evasion_attack:
            print("Crafting an evasion attack")
            # train the network using the right params
            if is_white_box_attack:
                if USING_AGGREGATED_FEATURES:
                    params = BEST_PARAMS_RF_AGGREGATED
                else:
                    params = BEST_PARAMS_RF_NO_AGGREGATED
            else:
                if USING_AGGREGATED_FEATURES:
                    params = BEST_PARAMS_RF_OLD_DATASET_AGGREGATED
                else:
                    params = BEST_PARAMS_RF_OLD_DATASET_NO_AGGREGATED
            # training the oracle
            oracle, oracle_threshold = RF.create_model(x_train_supervised,
                                                       y_train,
                                                       params=params)

            # if the oracle predicts the fraud as fraud -> discard it, otherwise inject in real bank system
            y_pred_oracle = rf.predict_proba(x_test_supervised)
            y_pred_oracle = y_pred_oracle[:, 1].ravel()
            y_pred_oracle = np.array(
                evaluation.adjusted_classes(y_pred_oracle, oracle_threshold))

            x_test = x_test[(np.where((y_test == 1) & (y_pred_oracle == 0)
                                      | (y_test == 0)))]
            y_test = y_test[(np.where((y_test == 1) & (y_pred_oracle == 0)
                                      | (y_test == 0)))]
            x_test_supervised = x_test[:, len(x_test[0]) - 1, :]

        # predicting test set
        y_pred_lstm = lstm.predict(x_test)
        y_pred_rf = rf.predict_proba(x_test_supervised)
        y_pred_xgb = xg_reg.predict_proba(x_test_supervised)
        y_pred_lstm = y_pred_lstm.ravel()
        y_pred_rf = y_pred_rf[:, 1].ravel()
        y_pred_xgb = y_pred_xgb[:, 1].ravel()

        print("LSTM")
        evaluation.evaluate(y_test, y_pred_lstm, threshold_lstm)

        print("RF")
        evaluation.evaluate(y_test, y_pred_rf, threshold_rf)

        print("Xgboost")
        evaluation.evaluate(y_test, y_pred_xgb, threshold_xgb)

    if not adversarial_attack and not evasion_attack:
        x_test_supervised = x_test[:, len(x_test[0]) - 1, :]

        x_train, y_train = sequences_crafting_for_classification.get_train_set(
        )
        x_train_supervised = x_train[:, look_back, :]

        print("LSTM")
        y_pred_lstm = lstm.predict(x_test)
        evaluation.evaluate(y_test, y_pred_lstm, threshold_lstm)
        explainability.explain_dataset(lstm, x_train, x_test, threshold_lstm,
                                       y_test)

        print("RF")
        y_pred_rf = rf.predict_proba(x_test_supervised)[:, 1]
        evaluation.evaluate(y_test, y_pred_rf, threshold_rf)
        explainability.explain_dataset(rf, x_train_supervised,
                                       x_test_supervised, threshold_rf, y_test)

        print("Xgboost")
        y_pred_xgb = xg_reg.predict_proba(x_test_supervised)[:, 1]
        evaluation.evaluate(y_test, y_pred_xgb, threshold_xgb)
        explainability.explain_dataset(xg_reg, x_train_supervised,
                                       x_test_supervised, threshold_xgb,
                                       y_test)

    y_pred_lstm = evaluation.adjusted_classes(y_pred_lstm, threshold_lstm)
    y_pred_rf = evaluation.adjusted_classes(y_pred_rf, threshold_rf)
    y_pred_xgb = evaluation.adjusted_classes(y_pred_xgb, threshold_xgb)

    lstm_fraud_indices = evaluation.get_fraud_indices(y_test, y_pred_lstm)
    rf_fraud_indices = evaluation.get_fraud_indices(y_test, y_pred_rf)
    xgboost_fraud_indices = evaluation.get_fraud_indices(y_test, y_pred_xgb)
    evaluation.print_frauds_stats(lstm_fraud_indices, rf_fraud_indices,
                                  xgboost_fraud_indices)

    lstm_genuine_indices = evaluation.get_genuine_indices(y_test, y_pred_lstm)
    rf_genuine_indices = evaluation.get_genuine_indices(y_test, y_pred_rf)
    xgboost_genuine_indices = evaluation.get_genuine_indices(
        y_test, y_pred_xgb)
    evaluation.print_genuine_stats(lstm_genuine_indices, rf_genuine_indices,
                                   xgboost_genuine_indices)