Ejemplo n.º 1
0
 def deep_belief_network_prediction(
     self,
     learning_rate,
     training_iterations,
     testing_iterations=10,
     hidden_layer_sizes_array=[10, 10],
 ):
     accuracy_list = []
     for x in range(testing_iterations):
         self.prepare_training_data_from_csv_data(self.csv_data)
         classifier = SupervisedDBNClassification(
             hidden_layers_structure=hidden_layer_sizes_array,
             learning_rate_rbm=learning_rate / 2,
             learning_rate=learning_rate,
             n_epochs_rbm=int(training_iterations / 10),
             n_iter_backprop=training_iterations,
             batch_size=256,
             activation_function="relu",
             dropout_p=0.2,
         )
         classifier.fit(self.x_data_training, self.y_data_training)
         y_data_prediction = classifier.predict(self.x_data_testing)
         classifier_accuracy = accuracy_score(self.y_data_testing, y_data_prediction)
         accuracy_list.append(classifier_accuracy)
     return max(accuracy_list)
Ejemplo n.º 2
0
def foo():  # всё ок

    import os

    dir_name = 'experiment2'

    work_path = os.getcwd()  # current working dir
    path = os.path.join(work_path, dir_name)
    print("The current working directory is %s" % work_path)

    if (os.path.exists(path) == False):
        os.mkdir(path)
    else:
        print('Directory already exist')

    savedir = path
    import os
    filename = os.path.join(savedir, 'model.joblib')

    from sklearn.datasets import load_breast_cancer
    X, Y = load_breast_cancer(return_X_y=True)

    #from sklearn.gaussian_process import GaussianProcessClassifier
    #to_persist=GaussianProcessClassifier()

    #from lightning.classification import AdaGradClassifier
    #to_persist=AdaGradClassifier()

    from dbn import SupervisedDBNClassification
    to_persist = SupervisedDBNClassification()

    to_persist.fit(X[:400], Y[:400])

    print(filename)

    import joblib
    joblib.dump(to_persist, filename)

    # load from file
    import joblib
    clf = joblib.load(filename)

    print(clf.score(X[400:], Y[400:]))
Ejemplo n.º 3
0
# Data scaling
X = (X / 16).astype(np.float32)

# Splitting data
X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                    Y,
                                                    test_size=0.2,
                                                    random_state=0)

# Training
classifier = SupervisedDBNClassification(
    hidden_layers_structure=[1000, 1000, 1000],
    learning_rate_rbm=0.05,
    learning_rate=0.1,
    n_epochs_rbm=15,
    n_iter_backprop=50,
    batch_size=32,
    activation_function='relu',
    dropout_p=0.2)
classifier.fit(X_train, Y_train)

# Save the model
classifier.save('model.pkl')

# Restore it
classifier = SupervisedDBNClassification.load('model.pkl')

# Test
Y_pred = classifier.predict(X_test)
print 'Done.\nAccuracy: %f' % accuracy_score(Y_test, Y_pred)
winsound.Beep(1000, 440)
classnames_16, train_data_16, train_labels_16, test_data_16, test_labels_16 = load_dataset(
    'data/caltech101_silhouettes_16_split1.mat')
classnames_28, train_data_28, train_labels_28, test_data_28, test_labels_28 = load_dataset(
    'data/caltech101_silhouettes_28_split1.mat')
classnames_16, train_data_16, train_labels_16, test_data_16, test_labels_16 = filter_dataset(
    classnames_16, train_data_16, train_labels_16, test_data_16,
    test_labels_16)
classnames_28, train_data_28, train_labels_28, test_data_28, test_labels_28 = filter_dataset(
    classnames_28, train_data_28, train_labels_28, test_data_28,
    test_labels_28)

# classifier.fit(np.array(train_data_16).astype('int64'), train_labels_16)
# Y_pred = classifier.predict(np.array(test_data_16).astype('int64'))
# print('Done.\nAccuracy: %f' % accuracy_score(test_labels_16, Y_pred))
classifier.fit(np.array(train_data_28).astype('int64'), train_labels_28)
Y_pred = classifier.predict(np.array(test_data_28).astype('int64'))
print('Done.\nAccuracy: %f' % accuracy_score(test_labels_28, Y_pred))
data = np.zeros([len(classnames_28), len(classnames_28)])
for i in range(0, len(Y_pred) - 1):
    Y_pred[i] = Y_pred[i] - 34
# for i in range(0,len(test_labels_28)-1):
#     print(test_labels_28[i]-34)
#     test_labels_28[i] = test_labels_28[i]-34
for i in range(0, len(Y_pred) - 1):
    data[test_labels_28[i] - 34 - 1,
         Y_pred[i] - 1] = data[test_labels_28[i] - 34 - 1, Y_pred[i] - 1] + 1
print(data)

with open('wyniki.csv', mode='w') as wyniki:
    wyniki_writer = csv.writer(wyniki, dialect='excel')
Ejemplo n.º 5
0
#X_scaled_train = preprocessing.scale(X_train)
min_max_scaler = preprocessing.MinMaxScaler()
X_scaled_train = min_max_scaler.fit_transform(X_train)
y_train = train_set[1:, -1]
X_test = test_set[:5000, 1:-1]
#X_scaled_test = preprocessing.scale(X_test)
min_max_scaler = preprocessing.MinMaxScaler()
X_scaled_test = min_max_scaler.fit_transform(X_test)
y_test = test_set[:5000, -1]

# Training
clf = SupervisedDBNClassification(
    hidden_layers_structure=[1024, 512],
    learning_rate_rbm=0.05,
    learning_rate=0.1,
    n_epochs_rbm=3,
    n_iter_backprop=10,
    batch_size=128,
    activation_function='sigmoid',  # relu->error
    dropout_p=0.2)
clf.fit(X_train, y_train)

# Save the model
clf.save('model.pkl')

# Restore it
classifier = SupervisedDBNClassification.load('model.pkl')

# Test
y_pred = classifier.predict(X_test)
print('Done.\nAccuracy: %f' % accuracy_score(y_test, y_pred))