Exemplo n.º 1
0
def fractal_modeldata(filename):
    scores = []
    print(filename)
    X, Y = loaddata(filename, 31)
    np.random.seed(13)
    indices = np.random.permutation(2030)
    test_size = int(0.1 * len(indices))
    X_train = X[indices[:-test_size]]
    Y_train = Y[indices[:-test_size]]
    X_test = X[indices[-test_size:]]
    Y_test = Y[indices[-test_size:]]
    # relu, sigmoid
    classifier = SupervisedDBNClassification(hidden_layers_structure=[30, 30],
                                             learning_rate_rbm=0.05,
                                             learning_rate=0.1,
                                             n_epochs_rbm=10,
                                             n_iter_backprop=1000,
                                             batch_size=16,
                                             activation_function='sigmoid',
                                             dropout_p=0.1,
                                             verbose=0)

    classifier.fit(X_train, Y_train)
    Y_pred = classifier.predict(X_test)
    print(accuracy_score(Y_test, Y_pred)*100)
    print(classification_report(Y_test, Y_pred))
def main():
    train_data, train_label = read_data("TRAIN", IMAGE_SIZE)
    test_data, test_label = read_data("TEST", IMAGE_SIZE)

    # flat data
    flatten_train_data = train_data.reshape(np.size(train_data, 0), -1)
    flatten_test_data = test_data.reshape(np.size(test_data, 0), -1)

    flatten_train_data, train_label = nudge_dataset(flatten_train_data,
                                                    train_label)

    # flatten_train_data = np.concatenate([flatten_train_data, gaussian_filter1d(flatten_train_data, sigma=0.5)])
    # train_label = np.concatenate([train_label for _ in range(2)])

    # normalize data
    flatten_train_data = min_max_normalize(flatten_train_data)
    flatten_test_data = min_max_normalize(flatten_test_data)

    expanded_train_data = np.expand_dims(
        flatten_train_data.reshape((-1, ) + IMAGE_SIZE), -1)
    expanded_test_data = np.expand_dims(
        flatten_test_data.reshape((-1, ) + IMAGE_SIZE), -1)

    dbn = SupervisedDBNClassification(hidden_layers_structure=[128, 64],
                                      learning_rate_rbm=0.001,
                                      learning_rate=0.001,
                                      n_epochs_rbm=20,
                                      n_iter_backprop=10000,
                                      batch_size=32,
                                      activation_function='relu',
                                      dropout_p=0.2)
    dbn.fit(flatten_train_data, train_label)
    evaluate(np.asarray(list(dbn.predict(flatten_test_data))), test_label,
             "DBN")
Exemplo n.º 3
0
def dbn(train_x, train_y, num_classes=3):
    model = SupervisedDBNClassification(learning_rate_rbm=0.05,
                                             learning_rate=0.1,
                                             n_epochs_rbm=10,
                                             n_iter_backprop=100,
                                             batch_size=32,
                                             activation_function='relu',
                                             dropout_p=0.2)
    model.fit(train_x, train_y)
    return model
Exemplo n.º 4
0
def create_model():
	classifier = SupervisedDBNClassification(hidden_layers_structure=[256, 256],
                                             learning_rate_rbm=0.05,
                                             learning_rate=0.1,
                                             n_epochs_rbm=10,
                                             n_iter_backprop=500,
                                             batch_size=32,
                                             activation_function='relu',
                                             dropout_p=0.1,verbose=False)
	return classifier
Exemplo n.º 5
0
def dbn():
    estim = SupervisedDBNClassification(
        hidden_layers_structure=[256, 256, 256, 256, 256, 256],
        learning_rate_rbm=0.05,
        learning_rate=0.1,
        n_epochs_rbm=10,
        n_iter_backprop=100,
        batch_size=32,
        activation_function='relu',
        dropout_p=0.2,
        verbose=0)
    estim.fit(x_train, y_train)
    print("f1score", f1_score(estim.predict(x_test), y_test))
    print("accuracy score", accuracy_score(estim.predict(x_test), y_test))
    return 0
Exemplo n.º 6
0
def fractal_modeldata(filename):
    scores = []
    print(filename)
    X, Y = loaddata(filename, 99)

    for i in range(1):
        np.random.seed(13)
        indices = np.random.permutation(1000)
        test_size = int(0.1 * len(indices))
        X_train = X[indices[:-test_size]]
        Y_train = Y[indices[:-test_size]]
        X_test = X[indices[-test_size:]]
        Y_test = Y[indices[-test_size:]]
        # relu, sigmoid
        classifier = SupervisedDBNClassification(
            hidden_layers_structure=[256, 256],
            learning_rate_rbm=0.05,
            learning_rate=0.2,
            n_epochs_rbm=30,
            n_iter_backprop=2000,
            batch_size=16,
            activation_function='sigmoid',
            dropout_p=0.1,
            verbose=0)
        classifier.fit(X_train, Y_train)
        Y_pred = classifier.predict(X_test)
        scores.append(accuracy_score(Y_test, Y_pred))
        print(classification_report(Y_test, Y_pred))
        fpr, tpr, threshold = roc_curve(Y_test, Y_pred)
        roc_auc = auc(fpr, tpr)
        plt.title('Receiver Operating Characteristic')
        plt.plot(fpr, tpr, 'b', label='AUC = %0.2f' % roc_auc)
        plt.legend(loc='lower right')
        plt.plot([0, 1], [0, 1], 'r--')
        plt.xlim([0, 1])
        plt.ylim([0, 1])
        plt.ylabel('True Positive Rate')
        plt.xlabel('False Positive Rate')
        plt.show()

    print('All Accuracy Scores in Cross: ' + str(scores))
    print('Mean Accuracy Scores: ' + str(np.mean(scores)))
def example():
    np.random.seed(1337)  # for reproducibility
    from sklearn.datasets import load_digits
    from sklearn.model_selection import train_test_split
    from sklearn.metrics.classification import accuracy_score

    from dbn.tensorflow import SupervisedDBNClassification

    # Loading dataset
    digits = load_digits()
    X, Y = digits.data, digits.target

    # Data scaling
    X = (X / 16).astype(np.float32)

    # Splitting data
    X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                        Y,
                                                        test_size=0.2,
                                                        random_state=0)

    # Training
    classifier = SupervisedDBNClassification(
        hidden_layers_structure=[256, 256],
        learning_rate_rbm=0.05,
        learning_rate=0.1,
        n_epochs_rbm=10,
        n_iter_backprop=100,
        batch_size=32,
        activation_function='relu',
        dropout_p=0.2)
    print(X_train.shape, Y_train.shape)
    classifier.fit(X_train, Y_train)

    # Test
    Y_pred = np.asarray(list(classifier.predict(X_test)))
    print('Done.\nAccuracy: %f' % accuracy_score(Y_test, Y_pred))
# into feature vectors. The input to fit_transform should be a list of
# strings.
train_data_features = vectorizer.fit_transform(clean_train_LAPD)
test_data_features = vectorizer.transform(clean_test_LAPD)

# Numpy arrays are easy to work with, so convert the result to an
# array
np.asarray(train_data_features)
np.asarray(test_data_features)

# Training
classifier = SupervisedDBNClassification(
    hidden_layers_structure=[500, 250, 100],
    learning_rate_rbm=0.1,
    learning_rate=0.0001,
    n_epochs_rbm=50,
    n_iter_backprop=500,
    batch_size=16,
    activation_function='sigmoid',
    dropout_p=0)
classifier.fit(train_data_features.toarray(), train["Problematic"])

# Test
Y_pred = classifier.predict(test_data_features.toarray())
Y_p = classifier.predict_proba(test_data_features.toarray())
Y_n = classifier.predict_proba_dict(test_data_features.toarray())
print(Y_n)
print(Y_p)
print(Y_p)
print(Y_pred)
print(test["Problematic"])
Exemplo n.º 9
0
setting = read_setting(open('setting.txt'))
file_out = open('../result/dbn.log', 'a')


X_train = np.loadtxt('../data/train_dataset_new.txt')
X_test = np.loadtxt('../data/test_dataset_new.txt')

Y_train = np.array([np.argmax(row) for row in np.loadtxt('../data/train_labels_new.txt')])
Y_test = np.array([np.argmax(row) for row in np.loadtxt('../data/test_labels_new.txt')])

# Training
classifier = SupervisedDBNClassification(hidden_layers_structure=setting['hidden_layers_structure'],
                                         learning_rate_rbm=float(setting['learning_rate_rbm']),
                                         learning_rate=float(setting['learning_rate']),
                                         n_epochs_rbm=int(setting['n_epochs_rbm']),
                                         n_iter_backprop=int(setting['n_iter_backprop']),
                                         batch_size=int(setting['batch_size']),
                                         activation_function=setting['activation_function'],
                                         dropout_p=float(setting['dropout_p']),
                                         l2_regularization=float(setting['l2_regularization']),
                                         contrastive_divergence_iter=int(setting['contrastive_divergence_iter']))

classifier.fit(X_train, Y_train)

# Test
Y_pred = classifier.predict(X_train)
accuracy = accuracy_score(Y_train, Y_pred)
print('Done.\nAccuracy: %f' % accuracy)

file_out.write('\n\n-------------------------------\n\n')

for line in open('setting.txt'):
Exemplo n.º 10
0
def run(params):

    # ##################### get parameters and define logger ################

    # device
    os.environ['CUDA_VISIBLE_DEVICES'] = str(params.gpu)

    # get parameters
    data_name = params.data.data_name
    data_dir = params.data.data_dir
    target_dir = params.data.target_dir
    train_prop = params.data.train_prop
    val_prop = params.data.val_prop

    train_params = params.train
    method_name = params.method_name
    result_dir = params.result_dir
    folder_level = params.folder_level

    train_prop = train_prop if train_prop < 1 else int(train_prop)
    val_prop = val_prop if val_prop < 1 else int(val_prop)

    result_root = result_dir
    local_v = locals()
    for s in folder_level:
        result_dir = check_path(os.path.join(result_dir, str(local_v[s])))

    # define output dirs
    acc_dir = os.path.join(result_root, 'accuracy.csv')
    log_dir = os.path.join(result_dir, 'train.log')
    model_dir = os.path.join(result_dir, 'weights.pkl')
    # soft_dir = os.path.join(result_dir, 'soft_label.mat')
    # loss_dir = os.path.join(result_dir, 'loss_curve.png')

    # define logger
    logger = define_logger(log_dir)

    # print parameters
    num1 = 25
    num2 = 100
    logger.info('%s begin a new training: %s %s' %
                ('#' * num1, method_name, '#' * num1))
    params_str = recur_str_dict_for_show(params, total_space=num2)
    logger.info('show parameters ... \n%s' % params_str)

    # ########################### get data, train ############################

    logger.info('get data ...')
    mask_dir = os.path.dirname(data_dir)
    data, target = read_data(data_dir, target_dir)
    train_mask, val_mask, test_mask = load_masks(mask_dir, target, train_prop,
                                                 val_prop)
    x_train, y_train = get_vector_samples(data, target, train_mask)

    logger.info('get model ...')
    from dbn.tensorflow import SupervisedDBNClassification
    classifier = SupervisedDBNClassification(**train_params)

    logger.info('begin to train ...')
    s = time.time()
    classifier.fit(x_train, y_train)
    e = time.time()
    train_time = e - s
    logger.info('training time: %.4fs' % train_time)

    logger.info('save model ...')
    classifier.save(model_dir)

    # ########################### predict, output ###########################

    all_data = data.reshape(-1, data.shape[1] * data.shape[2]).T

    classifier = SupervisedDBNClassification.load(model_dir)

    logger.info('begin to predict ...')
    s = time.time()
    pred = classifier.predict(all_data)
    pred = np.array(pred)
    pred = pred.reshape(target.shape) + 1
    e = time.time()
    pred_time = (e - s)
    logger.info('predicted time: %.4fs' % pred_time)

    # output predicted map(png/mat), accuracy table and other records
    logger.info('save classification maps etc. ...')
    train_records = {
        'train_time': '%.4f' % train_time,
        'pred_time': '%.4f' % pred_time
    }

    ro = ResultOutput(pred,
                      data,
                      target,
                      train_mask,
                      val_mask,
                      test_mask,
                      result_dir,
                      acc_dir,
                      hyper_params=params,
                      train_records=train_records)
    ro.output()
Exemplo n.º 11
0
np.random.seed(1337)  # for reproducibility
from sklearn.metrics.classification import accuracy_score

from dbn.tensorflow import SupervisedDBNClassification
from Rafd import Rafd

# Splitting data
rafd = Rafd("entrenamiento/")
X_train, X_test, Y_train, Y_test = rafd.getData()

# Training
classifier = SupervisedDBNClassification(hidden_layers_structure=[256, 256],
                                         learning_rate_rbm=0.05,
                                         learning_rate=0.001,
                                         n_epochs_rbm=15,
                                         n_iter_backprop=100,
                                         batch_size=32,
                                         activation_function='sigmoid',
                                         dropout_p=0.2)
classifier.fit(X_train, Y_train)

# Save the model
classifier.save('model.pkl')

# Restore it
classifier = SupervisedDBNClassification.load('model.pkl')

# Test
Y_pred = classifier.predict(X_test)
print('Done.\nAccuracy: %f' % accuracy_score(Y_test, Y_pred))
Exemplo n.º 12
0

if __name__ == '__main__':
    # Load and normalize the dataset
    kddcup = fetch_kddcup99(subset='smtp', shuffle=True, random_state=1000)

    ss = StandardScaler()
    X = ss.fit_transform(kddcup['data']).astype(np.float32)

    le = LabelEncoder()
    Y = le.fit_transform(kddcup['target']).astype(np.float32)

    # Create train and test sets
    X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=1000)

    # Train the model
    classifier = SupervisedDBNClassification(hidden_layers_structure=[64, 64],
                                             learning_rate_rbm=0.001,
                                             learning_rate=0.01,
                                             n_epochs_rbm=20,
                                             n_iter_backprop=150,
                                             batch_size=256,
                                             activation_function='relu',
                                             dropout_p=0.25)

    classifier.fit(X_train, Y_train)

    Y_pred = classifier.predict(X_test)
    print('Accuracy score: {}'.format(accuracy_score(Y_test, Y_pred)))

Exemplo n.º 13
0
        else:
            if (os.stat(filename).st_size != 0):
                X_Test = np.asarray([[word_to_index[w] for w in sent[:-1]]
                                     for sent in tokenized_sentences])
                y_Test = np.transpose(np.asarray([1] * (len(list(sentences)))))

    # Truncate and pad input sequences
    X_Train = sequence.pad_sequences(X_Train, maxlen=max_review_length)
    X_Test = sequence.pad_sequences(X_Test, maxlen=max_review_length)

    # Training
    classifier = SupervisedDBNClassification(
        hidden_layers_structure=list(
            map(int, FLAGS.hidden_layers_structure.split(","))),
        learning_rate_rbm=FLAGS.learning_rate_rbm,
        learning_rate=FLAGS.learning_rate,
        n_epochs_rbm=FLAGS.n_epochs_rbm,
        n_iter_backprop=FLAGS.n_iter_backprop,
        batch_size=FLAGS.batch_size,
        activation_function=FLAGS.activation_function,
        dropout_p=FLAGS.dropout_p)
    classifier.fit(X_Train, y_Train)

    # Test

    Y_pred = classifier.predict(X_Test)
    Y_p = classifier.predict_proba(X_Test)
    Y_n = classifier.predict_proba_dict(X_Test)
    print(Y_n)
    print(Y_p)
    print(Y_p)
    print(Y_pred)
Exemplo n.º 14
0

def image_subset(index, x, y):
    xs = []
    ys = []
    for i in range(len(x)):
        if y[i] < index:
            xs.append(x[i])
            ys.append(y[i])
    return np.array(xs), np.array(ys)


dbn = SupervisedDBNClassification(hidden_layers_structure=[1024, 512, 256],
                                  learning_rate_rbm=learning_rate_rbm,
                                  learning_rate=learning_rate,
                                  n_epochs_rbm=n_epochs_rbm,
                                  n_iter_backprop=n_iter_backprop,
                                  batch_size=batch_size,
                                  activation_function='sigmoid',
                                  dropout_p=0.2)

(cx_train, cy_train), (cx_test, cy_test) = cifar10.load_data()

cx_train, cy_train = image_subset(num_classes, cx_train, cy_train)
cx_test, cy_test = image_subset(num_classes, cx_test, cy_test)

if use_all:
    train_ex = len(cx_train)
    test_ex = len(cx_test)
print('Using {} training and {} testing'.format(train_ex, test_ex))

if use_color:
Exemplo n.º 15
0
    def train(self, training_data, training_z):
        """Trains the classifier
        
        Parameters:
        -----------
        training_data: numpy array, size Ngalaxes x Nbands
          training data, each row is a galaxy, each column is a band as per
          band defined above
        training_z: numpy array, size Ngalaxies
          true redshift for the training sample
        """

        from dbn.tensorflow import SupervisedDBNClassification

        self.training_z = training_z

        # Create value-added data
        print("Creating value-added training data")
        self.training_data = get_valueadded_data(
            training_data, self.bands, self.opt['errors'], self.opt['colors'],
            self.opt['band_triplets'], self.opt['band_triplets_errors'],
            self.opt['heal_undetected'], self.wants_arrays)

        data_scaler = self.opt[
            'data_scaler'] if 'data_scaler' in self.opt else 'MinMaxScaler'
        n_bin = self.opt['bins']
        train_percent = self.opt[
            'train_percent'] if 'train_percent' in self.opt else 1
        n_epochs_rbm = self.opt[
            'n_epochs_rbm'] if 'n_epochs_rbm' in self.opt else 2
        activation = self.opt[
            'activation'] if 'activation' in self.opt else 'relu'
        learning_rate_rbm = self.opt[
            'learning_rate_rbm'] if 'learning_rate_rbm' in self.opt else 0.05
        learning_rate = self.opt[
            'learning_rate'] if 'learning_rate' in self.opt else 0.1
        n_iter_backprop = self.opt[
            'n_iter_backprop'] if 'n_iter_backprop' in self.opt else 25
        batch_size = self.opt['batch_size'] if 'batch_size' in self.opt else 32
        dropout_p = self.opt['dropout_p'] if 'dropout_p' in self.opt else 0.2
        hidden_layers_structure = self.opt[
            'hidden_layers_structure'] if 'hidden_layers_structure' in self.opt else [
                256, 256
            ]

        print("Finding bins for training data")

        # Data rescaling
        self.scaler = getattr(preprocessing, data_scaler)()

        print(f"Using {data_scaler} to rescale data for better results")

        # Fit scaler on data and use the same scaler in the future when needed
        self.scaler.fit(self.training_data)

        # apply transform to get rescaled values
        self.training_data = self.scaler.transform(
            self.training_data
        )  # inverse: data_original = scaler.inverse_transform(data_rescaled)

        # Now put the training data into redshift bins.
        # Use zero so that the one object with minimum
        # z in the whole survey will be in the lowest bin
        training_bin = np.zeros(self.training_z.size)

        # Find the edges that split the redshifts into n_z bins of
        # equal number counts in each
        p = np.linspace(0, 100, n_bin + 1)
        z_edges = np.percentile(self.training_z, p)

        # Now find all the objects in each of these bins
        for i in range(n_bin):
            z_low = z_edges[i]
            z_high = z_edges[i + 1]
            training_bin[(self.training_z > z_low)
                         & (self.training_z < z_high)] = i

        if 0 < train_percent < 100:
            # for speed, cut down to ?% of original size
            print(
                f'Cutting down to {train_percent}% of original training sample size for speed.'
            )
            cut = np.random.uniform(0, 1,
                                    self.training_z.size) < train_percent / 100
            training_bin = training_bin[cut]
            self.training_data = self.training_data[cut]
        elif train_percent == 100:
            pass
        else:
            raise ValueError('train_percent is not valid')

        print('Setting up the layers for DBN')
        # Set up the layers
        classifier = SupervisedDBNClassification(
            hidden_layers_structure=hidden_layers_structure,
            learning_rate_rbm=learning_rate_rbm,
            learning_rate=learning_rate,
            n_epochs_rbm=n_epochs_rbm,
            n_iter_backprop=n_iter_backprop,
            batch_size=batch_size,
            activation_function=activation,
            dropout_p=dropout_p)

        # Train the model
        print("Fitting classifier")
        classifier.fit(self.training_data, training_bin)

        self.classifier = classifier
        self.z_edges = z_edges
Exemplo n.º 16
0
import numpy as np

np.random.seed(1337)  # for reproducibility
from sklearn.model_selection import train_test_split
from sklearn.metrics.classification import accuracy_score
from dbn.tensorflow import SupervisedDBNClassification
# use "from dbn import SupervisedDBNClassification" for computations on CPU with numpy
from sklearn.datasets import load_iris

iris = load_iris()
data_x = iris.data
data_y = iris.target

x_train, x_test, y_train, y_test = train_test_split(data_x,
                                                    data_y,
                                                    test_size=0.2,
                                                    random_state=42)

# Training
classifier = SupervisedDBNClassification(
    hidden_layers_structure=[500, 1000, 500],
    learning_rate_rbm=0.05,
    learning_rate=0.1,
    n_epochs_rbm=20,  # RBM training steps
    n_iter_backprop=50,  # ANN training steps
    activation_function='relu',
    dropout_p=0.2)

classifier.fit(x_train, y_train)
Exemplo n.º 17
0
    labels[i] = output[i]

print "Begin DBN model"
X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                    Y,
                                                    test_size=0.1,
                                                    random_state=i)
dbn_model = DBN([X_train.shape[1], 300, 2],
                learn_rates=0.3,
                learn_rate_decays=0.9,
                epochs=100,
                verbose=1)
dbn_model.fit(X_train, Y_train)
y_true, y_pred = Y_test, dbn_model.predict(X_test)  # Get our predictions
print(classification_report(y_true, y_pred))  # Classification on each digit
print 'The accuracy is:', accuracy_score(y_true, y_pred)

print "Begin DBN V2 model"
classifier = SupervisedDBNClassification(hidden_layers_structure=[1000, 200],
                                         learning_rate_rbm=0.05,
                                         learning_rate=0.1,
                                         n_epochs_rbm=10,
                                         n_iter_backprop=100,
                                         batch_size=32,
                                         activation_function='relu',
                                         dropout_p=0.2)
classifier.fit(X_train, Y_train[:, 0])
# Test
Y_pred = classifier.predict(X_test)
print 'Done.\nAccuracy: %f' % accuracy_score(Y_test, Y_pred)
Exemplo n.º 18
0
        test_pos = generateDatasets(filename, "positive")
        for i in xrange(0, len(test_pos["LAPD"])):
            clean_test_LAPD.append(" ".join(
                KaggleWord2VecUtility.review_to_wordlist(
                    test_pos["LAPD"][i], False)))

########################################################################################################
# GENERATE MODEL
########################################################################################################

# Training
classifier = SupervisedDBNClassification(
    hidden_layers_structure=hidden_layers_units,
    learning_rate_rbm=learning_rate_rbm,
    learning_rate=learning_rate_backprop,
    n_epochs_rbm=n_epochs_rbm,
    n_iter_backprop=n_iter_backprop,
    batch_size=batch_size,
    activation_function=activation_function,
    dropout_p=dropout_p)

########################################################################################################
# GENERATE WORD EMBEDDING
########################################################################################################

# Use word embeddings
if word_embedding == 'True':
    if w2v_dictionary != "None":
        # Load w2v model
        print "Loading w2v model..."
        model = Word2Vec.load(w2v_dictionary)
originally downloaded from https://github.com/albertbup/deep-belief-network
on March 29, 2017, and modified slightly to support python3. The
DBN folder is covered under the MIT license.
"""
from __future__ import print_function
from __future__ import division
from dbn.tensorflow import SupervisedDBNClassification
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import accuracy_score
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
train_ex = 8000
dbn = SupervisedDBNClassification(hidden_layers_structure=[1024, 1024],
                                  learning_rate_rbm=0.1,
                                  learning_rate=0.1,
                                  n_epochs_rbm=10,
                                  n_iter_backprop=100,
                                  batch_size=100,
                                  activation_function='sigmoid',
                                  dropout_p=0.2)


def unpickle(file):
    """Load data"""
    import pickle
    with open(file, 'rb') as source:
        ret_dict = pickle.load(source, encoding='bytes')
    return ret_dict


def get_data():
    """
Exemplo n.º 20
0
    X = np.empty([len(f), 62500])
    for i in range(len(f)):
        tmp = np.load(f[i])
        tmp = np.reshape(tmp, [1, 62500])
        X[i, :] = tmp

    return X, Y


for tz in range(5, 6):
    print('\nWorking on Zone == {}'.format(tz))
    Matt_Net = SupervisedDBNClassification(
        hidden_layers_structure=[80, 160, 40],
        learning_rate_rbm=0.05,
        learning_rate=0.1,
        n_epochs_rbm=100,
        n_iter_backprop=400,
        batch_size=8,
        activation_function='relu',
        dropout_p=0.2)
    # Split Data
    X, Y = get_dataset(tz)
    X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                        Y,
                                                        test_size=0.2,
                                                        random_state=0)
    print('Size of training set == {}, Size of testing set == {}\n'.format(
        len(X_train), len(X_test)))

    start_time = timer()
    tot_start = start_time