Example #1
0
def main():
    #print(sys.argv)
    test_set_path = sys.argv[1]
    output_file_path = sys.argv[2]

    X_test = dataset_manip.load_images(load_directory(test_set_path)) / 255

    #model = Model(image_shape = (77, 71, 1), num_classes = 10, model_path = './model_files/model', batch_size = 512, first_run = False)
    #dataset_manip.store_predictions(dataset_manip.get_filenames(test_set_path), model.predict(X_test), output_file_path)

    ens = Ensemble(input_shape=(77, 71, 1),
                   num_classes=10,
                   num_models=11,
                   batch_size=512,
                   path='./ensemble_files',
                   load=True)
    dataset_manip.store_predictions(dataset_manip.get_filenames(test_set_path),
                                    ens.predict(X_test), output_file_path)
def main():
    # Dataset path
    dataset_name = ['credit_card_clients_balanced', 'credit_card_clients']

    for data_name in dataset_name:
        dataset_path = os.getcwd() + "\\dataset\\" + data_name + ".csv"
        dataset = pd.read_csv(dataset_path, encoding='utf-8')

        # Datasets columns
        data_x = dataset[[
            'X1', 'X2', 'X3', 'X4', 'X5', 'X6', 'X7', 'X8', 'X9', 'X10', 'X11',
            'X12', 'X13', 'X14', 'X15', 'X16', 'X17', 'X18', 'X19', 'X20',
            'X21', 'X22', 'X23'
        ]]
        data_y = dataset['Y']

        # Preprocessing data
        min_max_scaler = preprocessing.MinMaxScaler()
        X_normalized = min_max_scaler.fit_transform(data_x)

        acc_rate = []
        reject_rate = []

        # Runs to test the model
        for i in range(20):
            print('---------------- Ensemble -----------------')
            print('--- MLP - SVM - KNN - GMM - Naive Bayes ---')
            print(i + 1, 'of 20 iterations')
            X_train, X_test, y_train, y_test = train_test_split(X_normalized,
                                                                data_y,
                                                                test_size=0.2)
            y_train = np.array(y_train)
            y_test = np.array(y_test)

            model = Ensemble()
            model.train(X_train, y_train, gridSearch=False)
            y_hat = model.predict(X_test)

            error, reject = model.evaluate(y_hat, y_test)
            acc_rate.append(1 - error)
            reject_rate.append(reject)

        graphics(acc_rate, reject_rate, data_name)
Example #3
0
    dices = np.zeros((n_files, 134))
    errors = np.zeros((n_files, ))

    pred_functions = {}
    dices_mean = []
    for atlas_id in xrange(n_files):
        # for atlas_id in xrange(1):
        start_time = time.clock()

        print "Atlas: {}".format(atlas_id)

        # brain_batches = data_gen.generate_single_atlas(atlas_id, None, region_centroids, batch_size, True)

        # vx_all, pred_all = net.predict_from_generator(brain_batches, scaler, pred_functions)
        vx_all, pred_all = ensemble_net.predict(data_gen, atlas_id, None,
                                                region_centroids, batch_size,
                                                scaler, pred_functions, True)

        # Construct the predicted image
        img_true = data_gen.atlases[atlas_id][1]
        img_pred = create_img_from_pred(vx_all, pred_all, img_true.shape)

        # Compute the dice coefficient and the error
        non_zo = img_pred.nonzero() or img_true.nonzero()
        pred = img_pred[non_zo]
        true = img_true[non_zo]

        dice_regions = compute_dice(pred, true, n_out)
        err_global = error_rate(pred, true)

        dices_all, errs = ensemble_net.stat_of_all_models(img_true, n_out)