예제 #1
0
def dbn():
    estim = SupervisedDBNClassification(
        hidden_layers_structure=[256, 256, 256, 256, 256, 256],
        learning_rate_rbm=0.05,
        learning_rate=0.1,
        n_epochs_rbm=10,
        n_iter_backprop=100,
        batch_size=32,
        activation_function='relu',
        dropout_p=0.2,
        verbose=0)
    estim.fit(x_train, y_train)
    print("f1score", f1_score(estim.predict(x_test), y_test))
    print("accuracy score", accuracy_score(estim.predict(x_test), y_test))
    return 0
def main():
    train_data, train_label = read_data("TRAIN", IMAGE_SIZE)
    test_data, test_label = read_data("TEST", IMAGE_SIZE)

    # flat data
    flatten_train_data = train_data.reshape(np.size(train_data, 0), -1)
    flatten_test_data = test_data.reshape(np.size(test_data, 0), -1)

    flatten_train_data, train_label = nudge_dataset(flatten_train_data,
                                                    train_label)

    # flatten_train_data = np.concatenate([flatten_train_data, gaussian_filter1d(flatten_train_data, sigma=0.5)])
    # train_label = np.concatenate([train_label for _ in range(2)])

    # normalize data
    flatten_train_data = min_max_normalize(flatten_train_data)
    flatten_test_data = min_max_normalize(flatten_test_data)

    expanded_train_data = np.expand_dims(
        flatten_train_data.reshape((-1, ) + IMAGE_SIZE), -1)
    expanded_test_data = np.expand_dims(
        flatten_test_data.reshape((-1, ) + IMAGE_SIZE), -1)

    dbn = SupervisedDBNClassification(hidden_layers_structure=[128, 64],
                                      learning_rate_rbm=0.001,
                                      learning_rate=0.001,
                                      n_epochs_rbm=20,
                                      n_iter_backprop=10000,
                                      batch_size=32,
                                      activation_function='relu',
                                      dropout_p=0.2)
    dbn.fit(flatten_train_data, train_label)
    evaluate(np.asarray(list(dbn.predict(flatten_test_data))), test_label,
             "DBN")
예제 #3
0
def fractal_modeldata(filename):
    scores = []
    print(filename)
    X, Y = loaddata(filename, 31)
    np.random.seed(13)
    indices = np.random.permutation(2030)
    test_size = int(0.1 * len(indices))
    X_train = X[indices[:-test_size]]
    Y_train = Y[indices[:-test_size]]
    X_test = X[indices[-test_size:]]
    Y_test = Y[indices[-test_size:]]
    # relu, sigmoid
    classifier = SupervisedDBNClassification(hidden_layers_structure=[30, 30],
                                             learning_rate_rbm=0.05,
                                             learning_rate=0.1,
                                             n_epochs_rbm=10,
                                             n_iter_backprop=1000,
                                             batch_size=16,
                                             activation_function='sigmoid',
                                             dropout_p=0.1,
                                             verbose=0)

    classifier.fit(X_train, Y_train)
    Y_pred = classifier.predict(X_test)
    print(accuracy_score(Y_test, Y_pred)*100)
    print(classification_report(Y_test, Y_pred))
예제 #4
0
def fractal_modeldata(filename):
    scores = []
    print(filename)
    X, Y = loaddata(filename, 99)

    for i in range(1):
        np.random.seed(13)
        indices = np.random.permutation(1000)
        test_size = int(0.1 * len(indices))
        X_train = X[indices[:-test_size]]
        Y_train = Y[indices[:-test_size]]
        X_test = X[indices[-test_size:]]
        Y_test = Y[indices[-test_size:]]
        # relu, sigmoid
        classifier = SupervisedDBNClassification(
            hidden_layers_structure=[256, 256],
            learning_rate_rbm=0.05,
            learning_rate=0.2,
            n_epochs_rbm=30,
            n_iter_backprop=2000,
            batch_size=16,
            activation_function='sigmoid',
            dropout_p=0.1,
            verbose=0)
        classifier.fit(X_train, Y_train)
        Y_pred = classifier.predict(X_test)
        scores.append(accuracy_score(Y_test, Y_pred))
        print(classification_report(Y_test, Y_pred))
        fpr, tpr, threshold = roc_curve(Y_test, Y_pred)
        roc_auc = auc(fpr, tpr)
        plt.title('Receiver Operating Characteristic')
        plt.plot(fpr, tpr, 'b', label='AUC = %0.2f' % roc_auc)
        plt.legend(loc='lower right')
        plt.plot([0, 1], [0, 1], 'r--')
        plt.xlim([0, 1])
        plt.ylim([0, 1])
        plt.ylabel('True Positive Rate')
        plt.xlabel('False Positive Rate')
        plt.show()

    print('All Accuracy Scores in Cross: ' + str(scores))
    print('Mean Accuracy Scores: ' + str(np.mean(scores)))
def example():
    np.random.seed(1337)  # for reproducibility
    from sklearn.datasets import load_digits
    from sklearn.model_selection import train_test_split
    from sklearn.metrics.classification import accuracy_score

    from dbn.tensorflow import SupervisedDBNClassification

    # Loading dataset
    digits = load_digits()
    X, Y = digits.data, digits.target

    # Data scaling
    X = (X / 16).astype(np.float32)

    # Splitting data
    X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                        Y,
                                                        test_size=0.2,
                                                        random_state=0)

    # Training
    classifier = SupervisedDBNClassification(
        hidden_layers_structure=[256, 256],
        learning_rate_rbm=0.05,
        learning_rate=0.1,
        n_epochs_rbm=10,
        n_iter_backprop=100,
        batch_size=32,
        activation_function='relu',
        dropout_p=0.2)
    print(X_train.shape, Y_train.shape)
    classifier.fit(X_train, Y_train)

    # Test
    Y_pred = np.asarray(list(classifier.predict(X_test)))
    print('Done.\nAccuracy: %f' % accuracy_score(Y_test, Y_pred))
예제 #6
0
파일: DBNfinal.py 프로젝트: nikhil-garg/EEG
#print(result)

#DBN ####################################################################3

classifier = SupervisedDBNClassification(hidden_layers_structure=[500, 500],
                                         learning_rate_rbm=0.05,
                                         learning_rate=0.1,
                                         n_epochs_rbm=10,
                                         n_iter_backprop=100,
                                         batch_size=32,
                                         activation_function='relu',
                                         dropout_p=0.2)
_ = classifier.fit(X_train, Y_train)

print(classification_report(Y_test, classifier.predict(X_test)))

Confusion_matrix_plot('DBN',
                      classifier.predict(X_test),
                      Y_test,
                      classes=['1', '2', '3', '4', 'R', 'W'],
                      cmap=plt.get_cmap('Blues'))

resultDBN = classifier.predict(X_test)

####combination######################################

listLR = [0, 2, 3]
listDBN = [1, 4, 5]
final = []
예제 #7
0
# Training
classifier = SupervisedDBNClassification(hidden_layers_structure=setting['hidden_layers_structure'],
                                         learning_rate_rbm=float(setting['learning_rate_rbm']),
                                         learning_rate=float(setting['learning_rate']),
                                         n_epochs_rbm=int(setting['n_epochs_rbm']),
                                         n_iter_backprop=int(setting['n_iter_backprop']),
                                         batch_size=int(setting['batch_size']),
                                         activation_function=setting['activation_function'],
                                         dropout_p=float(setting['dropout_p']),
                                         l2_regularization=float(setting['l2_regularization']),
                                         contrastive_divergence_iter=int(setting['contrastive_divergence_iter']))

classifier.fit(X_train, Y_train)

# Test
Y_pred = classifier.predict(X_train)
accuracy = accuracy_score(Y_train, Y_pred)
print('Done.\nAccuracy: %f' % accuracy)

file_out.write('\n\n-------------------------------\n\n')

for line in open('setting.txt'):
    file_out.write(line)

file_out.write(str(accuracy) + '\n')
file_out.close()




예제 #8
0
    # and learns the vocabulary; second, it transforms our training data
    # into feature vectors. The input to fit_transform should be a list of
    # strings.
    train_data_features = vectorizer.fit_transform(clean_train_LAPD)
    test_data_features = vectorizer.transform(clean_test_LAPD)

    # Numpy arrays are easy to work with, so convert the result to an array
    np.asarray(train_data_features)
    np.asarray(test_data_features)

    ###################
    # TRAIN THE MODEL #
    ###################
    classifier.fit(train_data_features.toarray(), train["Problematic"])

########################################################################################################
# EVALUATE THE MODEL
########################################################################################################

Y_pred = classifier.predict(testDataVecs)
Y_p = classifier.predict_proba(testDataVecs)
Y_n = classifier.predict_proba_dict(testDataVecs)
print(Y_n)
print(Y_p)
print(Y_p)
print(Y_pred)
print(test["Problematic"])
# print('Done.\nAccuracy: %f' % accuracy_score(test["Problematic"], Y_pred))
# res = [[Y_p[0, 0], Y_p[0, 1], Y_pred, test["Problematic"]]]
# writer.writerows(res)
예제 #9
0
def run(params):

    # ##################### get parameters and define logger ################

    # device
    os.environ['CUDA_VISIBLE_DEVICES'] = str(params.gpu)

    # get parameters
    data_name = params.data.data_name
    data_dir = params.data.data_dir
    target_dir = params.data.target_dir
    train_prop = params.data.train_prop
    val_prop = params.data.val_prop

    train_params = params.train
    method_name = params.method_name
    result_dir = params.result_dir
    folder_level = params.folder_level

    train_prop = train_prop if train_prop < 1 else int(train_prop)
    val_prop = val_prop if val_prop < 1 else int(val_prop)

    result_root = result_dir
    local_v = locals()
    for s in folder_level:
        result_dir = check_path(os.path.join(result_dir, str(local_v[s])))

    # define output dirs
    acc_dir = os.path.join(result_root, 'accuracy.csv')
    log_dir = os.path.join(result_dir, 'train.log')
    model_dir = os.path.join(result_dir, 'weights.pkl')
    # soft_dir = os.path.join(result_dir, 'soft_label.mat')
    # loss_dir = os.path.join(result_dir, 'loss_curve.png')

    # define logger
    logger = define_logger(log_dir)

    # print parameters
    num1 = 25
    num2 = 100
    logger.info('%s begin a new training: %s %s' %
                ('#' * num1, method_name, '#' * num1))
    params_str = recur_str_dict_for_show(params, total_space=num2)
    logger.info('show parameters ... \n%s' % params_str)

    # ########################### get data, train ############################

    logger.info('get data ...')
    mask_dir = os.path.dirname(data_dir)
    data, target = read_data(data_dir, target_dir)
    train_mask, val_mask, test_mask = load_masks(mask_dir, target, train_prop,
                                                 val_prop)
    x_train, y_train = get_vector_samples(data, target, train_mask)

    logger.info('get model ...')
    from dbn.tensorflow import SupervisedDBNClassification
    classifier = SupervisedDBNClassification(**train_params)

    logger.info('begin to train ...')
    s = time.time()
    classifier.fit(x_train, y_train)
    e = time.time()
    train_time = e - s
    logger.info('training time: %.4fs' % train_time)

    logger.info('save model ...')
    classifier.save(model_dir)

    # ########################### predict, output ###########################

    all_data = data.reshape(-1, data.shape[1] * data.shape[2]).T

    classifier = SupervisedDBNClassification.load(model_dir)

    logger.info('begin to predict ...')
    s = time.time()
    pred = classifier.predict(all_data)
    pred = np.array(pred)
    pred = pred.reshape(target.shape) + 1
    e = time.time()
    pred_time = (e - s)
    logger.info('predicted time: %.4fs' % pred_time)

    # output predicted map(png/mat), accuracy table and other records
    logger.info('save classification maps etc. ...')
    train_records = {
        'train_time': '%.4f' % train_time,
        'pred_time': '%.4f' % pred_time
    }

    ro = ResultOutput(pred,
                      data,
                      target,
                      train_mask,
                      val_mask,
                      test_mask,
                      result_dir,
                      acc_dir,
                      hyper_params=params,
                      train_records=train_records)
    ro.output()
예제 #10
0
np.random.seed(1337)  # for reproducibility
from sklearn.metrics.classification import accuracy_score

from dbn.tensorflow import SupervisedDBNClassification
from Rafd import Rafd

# Splitting data
rafd = Rafd("entrenamiento/")
X_train, X_test, Y_train, Y_test = rafd.getData()

# Training
classifier = SupervisedDBNClassification(hidden_layers_structure=[256, 256],
                                         learning_rate_rbm=0.05,
                                         learning_rate=0.001,
                                         n_epochs_rbm=15,
                                         n_iter_backprop=100,
                                         batch_size=32,
                                         activation_function='sigmoid',
                                         dropout_p=0.2)
classifier.fit(X_train, Y_train)

# Save the model
classifier.save('model.pkl')

# Restore it
classifier = SupervisedDBNClassification.load('model.pkl')

# Test
Y_pred = classifier.predict(X_test)
print('Done.\nAccuracy: %f' % accuracy_score(Y_test, Y_pred))
예제 #11
0
if use_color:
    x_train = np.array([x.flatten() / 255 for x in cx_train[:train_ex]])
    x_test = np.array([x.flatten() / 255 for x in cx_test[:test_ex]])
else:
    x_train = np.array(
        [rgb2gray(x).flatten() / 255 for x in cx_train[:train_ex]])
    x_test = np.array([rgb2gray(x).flatten() / 255 for x in cx_test[:test_ex]])
y_train = cy_train[:train_ex].flatten()
y_test = cy_test[:test_ex].flatten()

with warnings.catch_warnings():
    warnings.simplefilter('ignore', category=RuntimeWarning)
    dbn.fit(x_train, y_train)

predictions = list(dbn.predict(x_test))
accuracy = accuracy_score(y_test, predictions)
print('Accuracy: {0}'.format(accuracy))

if not use_color:
    plt.set_cmap('gray')

fig = plt.figure()
for i in range(10):
    subplt = plt.subplot(2, 10, i + 1)
    hot_index = predictions[i]
    subplt.set_title('Act')
    subplt.axis('off')
    act_image = np.reshape(x_test[i], img_shape)
    if use_color:
        subplt.imshow(act_image)
예제 #12
0
    'ENIP', 'GVCP', 'NBNS', 'SSDP', 'TCP'
]].copy()
y = data[['Safe']].copy()

train_x, test_x, train_y, test_y = train_test_split(
    x, y, test_size=0.2)  #, random_state = 0)
train_x = train_x.values
train_y = train_y.values
train_y = train_y[:, 0]
test_x = test_x.values
test_y = test_y.values
test_y = test_y[:, 0]

classifier = SupervisedDBNClassification(hidden_layers_structure=[256, 256],
                                         learning_rate_rbm=0.05,
                                         learning_rate=0.1,
                                         n_epochs_rbm=10,
                                         n_iter_backprop=100,
                                         batch_size=32,
                                         activation_function='sigmoid',
                                         dropout_p=0.2)

classifier.fit(train_x, train_y)

classifier.save('model.pkl')
# Restore it
classifier = SupervisedDBNClassification.load('model.pkl')

Y_pred = classifier.predict(test_x)
print('Done.\nAccuracy: %f' % accuracy_score(test_y, Y_pred))
from sklearn.cross_validation import train_test_split
from sklearn.metrics.classification import accuracy_score

from dbn.tensorflow import SupervisedDBNClassification


# Loading dataset
digits = load_digits()
X, Y = digits.data, digits.target

# Data scaling
X = (X / 16).astype(np.float32)

# Splitting data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)

# Training
classifier = SupervisedDBNClassification(hidden_layers_structure=[256, 256],
                                         learning_rate_rbm=0.05,
                                         learning_rate=0.1,
                                         n_epochs_rbm=10,
                                         n_iter_backprop=100,
                                         batch_size=32,
                                         activation_function='relu',
                                         dropout_p=0.2)
classifier.fit(X_train, Y_train)

# Test
Y_pred = classifier.predict(X_test)
print 'Done.\nAccuracy: %f' % accuracy_score(Y_test, Y_pred)
sPickleToArr(y_train, "y_train_mel.p")
sPickleToArr(x_test, "x_test_mel.p")
sPickleToArr(y_test, "y_test_mel.p")

#x_train = sPickle.s_load(open(source_path + "x_train_mel.p")) 
#y_train = sPickle.s_load(open(source_path + "y_train_mel.p")) 
#x_test = sPickle.s_load(open(source_path + "x_test_mel.p")) 
#y_test = sPickle.s_load(open(source_path + "y_test_mel.p")) 
# Splitting data

# Training
classifier = SupervisedDBNClassification(hidden_layers_structure=[256, 256],
                                         learning_rate_rbm=0.05,
                                         learning_rate=0.1,
                                         n_epochs_rbm=10,
                                         n_iter_backprop=100,
                                         batch_size=32,
                                         activation_function='relu',
                                         dropout_p=0.2)
classifier.fit(x_train, y_train)

# Save the model
classifier.save('model.pkl')

# Restore it
classifier = SupervisedDBNClassification.load('model.pkl')

# Test
y_pred = classifier.predict(x_test)
print('Done.\nAccuracy: %f' % accuracy_score(y_test, y_pred))
mlp = SupervisedDBNClassification(hidden_layers_structure=[19, 30, 19],
                                  learning_rate_rbm=0.05,
                                  learning_rate=0.1,
                                  n_epochs_rbm=10,
                                  n_iter_backprop=50,
                                  batch_size=32,
                                  activation_function='relu',
                                  dropout_p=0.2)

mlp.fit(X_train, y_train)
# Save the model
mlp.save('model.pkl')
# Restoreit
mlp = SupervisedDBNClassification.load('model.pkl')

predictions = mlp.predict(X_test)

RMSE_sum = 0

list = []
for x in range(0, len(X_test)):

    RMSE_sum = RMSE_sum + ((y_test[x] - predictions[x])**2)
    list.append(abs(y_test[x] - predictions[x]))

RMSE = math.sqrt(RMSE_sum / len(X_test))
print("RMSE :", RMSE)
print("Mean of predictions : ", np.mean(predictions))
print("Mean of residuals : ", np.mean(list))
print("Standard deviation : ", np.std(list, ddof=1))
# Numpy arrays are easy to work with, so convert the result to an
# array
np.asarray(train_data_features)
np.asarray(test_data_features)

# Training
classifier = SupervisedDBNClassification(
    hidden_layers_structure=[500, 250, 100],
    learning_rate_rbm=0.1,
    learning_rate=0.0001,
    n_epochs_rbm=50,
    n_iter_backprop=500,
    batch_size=16,
    activation_function='sigmoid',
    dropout_p=0)
classifier.fit(train_data_features.toarray(), train["Problematic"])

# Test
Y_pred = classifier.predict(test_data_features.toarray())
Y_p = classifier.predict_proba(test_data_features.toarray())
Y_n = classifier.predict_proba_dict(test_data_features.toarray())
print(Y_n)
print(Y_p)
print(Y_p)
print(Y_pred)
print(test["Problematic"])
print('Done.\nAccuracy: %f' % accuracy_score(test["Problematic"], Y_pred))
# res = [[Y_p[0, 0], Y_p[0, 1], Y_pred, test["Problematic"]]]
# writer.writerows(res)
예제 #17
0
    print("Ukuran x_test : ",x_test.shape)
    print("Ukuran y_test",y_test.shape)

    classifier = SupervisedDBNClassification(hidden_layers_structure=[len(np.asarray(X)[train]),len(np.asarray(Y)[train])],
                                         learning_rate_rbm=0.05,
                                         learning_rate=0.1,
                                         n_epochs_rbm=10,
                                         n_iter_backprop=100,
                                         batch_size=32,
                                         activation_function='relu',
                                         dropout_p=0.2)
    tr_loss=classifier.fit(np.asarray(X)[train], np.asarray(Y)[train])
    val_loss = classifier.fit(np.asarray(X)[test], np.asarray(Y)[test])
    train_loss.append(tr_loss)
    validation_loss.append(val_loss)
    predict_train = classifier.predict(np.asarray(X)[train])
    accuracy_train = accuracy_score(np.asarray(Y)[train], predict_train)
    acc_train.append(accuracy_train)    
    pred_train.append(predict_train)
    predict_test = classifier.predict(np.asarray(X)[test])
    pred_test.append(predict_test)
    accuracy_test = accuracy_score(np.asarray(Y)[test], predict_test)
    acc_test.append(accuracy_test)
    #predict=classifier.predict(np.asarray(X)[test])
    no+=1
    print('Pada model {0} Accuracy Train adalah : {1} %'.format (no,accuracy_train))    
    print('Pada model {0} Accuracy Test adalah: {1} %'.format (no,accuracy_test))    
    print('Pada model {0} berikut klasifikasi dari hasilnya: {1} '.format(no,classification_report(Y_test, Y_pred)))      


print('Done.\nAccuracy: %f' % accuracy_score(Y_test, Y_pred))
예제 #18
0
# print training time
time_train_end = time.clock()
print("Training finished, training time: %g seconds \n" %
      (time_train_end - time_train_start))
'''
# Save the model
classifier.save('model.pkl')

# Restore it
classifier = SupervisedDBNClassification.load('model.pkl')
'''
# start counting time for testing
time_test_start = time.clock()

# Test
y_pred = classifier.predict(X_test)
print('Testing finished.\nAccuracy: %f' % accuracy_score(y_test, y_pred))

# print testing time
time_test_end = time.clock()
print("Testing finished, testing time: %g seconds  \n" %
      (time_test_end - time_test_start))

# perform even test prediction
y_pred_even = classifier.predict(X_test_even)
print('Testing finished.\nAccuracy of even test set: %f' %
      accuracy_score(y_test_even, y_pred_even))

# Calculate running time
print("--- Total running time: %g seconds ---" % (time.clock() - start_time))
def get_data():
    """
    Loads the data in.
    """
    tmp = unpickle("CIFAR-3.pickle")
    labels = []
    for index in range(len(tmp['y'])):
        if tmp['y'][index, 0] == 1:
            #airplane
            labels.append(1)
        elif tmp['y'][index, 1] == 1:
            #dog
            labels.append(2)
        else:
            #boat
            labels.append(3)
    x_train = tmp['x'][:train_ex]
    x_train /= 255
    y_train = labels[:train_ex]
    x_test = tmp['x'][train_ex:]
    x_test /= 255
    y_test = labels[train_ex:]
    return x_train, y_train, x_test, y_test


x_train, y_train, x_test, y_test = get_data()
dbn.fit(x_train, y_train)
predictions = dbn.predict(x_test)
accuracy = accuracy_score(y_test, list(predictions))
print('Accuracy: {0}'.format(accuracy))
예제 #20
0
        dropout_p=0.2)
    # Split Data
    X, Y = get_dataset(tz)
    X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                        Y,
                                                        test_size=0.2,
                                                        random_state=0)
    print('Size of training set == {}, Size of testing set == {}\n'.format(
        len(X_train), len(X_test)))

    start_time = timer()
    tot_start = start_time
    Matt_Net.pre_train(X_train)
    print('Time to pretrain == {:5.3f} seconds\n'.format(timer() - start_time))

    start_time = timer()
    Matt_Net.fit(X_train, Y_train, False)
    print('Time to fit == {:5.3f} seconds\n'.format(timer() - start_time))
    print('Total time == {:5.3f} seconds\n'.format(timer() - tot_start))

    Matt_Net.save('train/Matt_Net_Zone_{}.pkl'.format(tz))

    Y_pred = Matt_Net.predict(X_test)
    start_time = timer()
    score = accuracy_score(Y_test, Y_pred)
    print(
        'Done, time to predict == {:5.3}\nAccuracy == {} for zone {}\n'.format(
            timer() - start_time, score, tz))

    del Matt_Net