def main():
    train_data, train_label = read_data("TRAIN", IMAGE_SIZE)
    test_data, test_label = read_data("TEST", IMAGE_SIZE)

    # flat data
    flatten_train_data = train_data.reshape(np.size(train_data, 0), -1)
    flatten_test_data = test_data.reshape(np.size(test_data, 0), -1)

    flatten_train_data, train_label = nudge_dataset(flatten_train_data,
                                                    train_label)

    # flatten_train_data = np.concatenate([flatten_train_data, gaussian_filter1d(flatten_train_data, sigma=0.5)])
    # train_label = np.concatenate([train_label for _ in range(2)])

    # normalize data
    flatten_train_data = min_max_normalize(flatten_train_data)
    flatten_test_data = min_max_normalize(flatten_test_data)

    expanded_train_data = np.expand_dims(
        flatten_train_data.reshape((-1, ) + IMAGE_SIZE), -1)
    expanded_test_data = np.expand_dims(
        flatten_test_data.reshape((-1, ) + IMAGE_SIZE), -1)

    dbn = SupervisedDBNClassification(hidden_layers_structure=[128, 64],
                                      learning_rate_rbm=0.001,
                                      learning_rate=0.001,
                                      n_epochs_rbm=20,
                                      n_iter_backprop=10000,
                                      batch_size=32,
                                      activation_function='relu',
                                      dropout_p=0.2)
    dbn.fit(flatten_train_data, train_label)
    evaluate(np.asarray(list(dbn.predict(flatten_test_data))), test_label,
             "DBN")
Ejemplo n.º 2
0
def fractal_modeldata(filename):
    scores = []
    print(filename)
    X, Y = loaddata(filename, 31)
    np.random.seed(13)
    indices = np.random.permutation(2030)
    test_size = int(0.1 * len(indices))
    X_train = X[indices[:-test_size]]
    Y_train = Y[indices[:-test_size]]
    X_test = X[indices[-test_size:]]
    Y_test = Y[indices[-test_size:]]
    # relu, sigmoid
    classifier = SupervisedDBNClassification(hidden_layers_structure=[30, 30],
                                             learning_rate_rbm=0.05,
                                             learning_rate=0.1,
                                             n_epochs_rbm=10,
                                             n_iter_backprop=1000,
                                             batch_size=16,
                                             activation_function='sigmoid',
                                             dropout_p=0.1,
                                             verbose=0)

    classifier.fit(X_train, Y_train)
    Y_pred = classifier.predict(X_test)
    print(accuracy_score(Y_test, Y_pred)*100)
    print(classification_report(Y_test, Y_pred))
Ejemplo n.º 3
0
def dbn(train_x, train_y, num_classes=3):
    model = SupervisedDBNClassification(learning_rate_rbm=0.05,
                                             learning_rate=0.1,
                                             n_epochs_rbm=10,
                                             n_iter_backprop=100,
                                             batch_size=32,
                                             activation_function='relu',
                                             dropout_p=0.2)
    model.fit(train_x, train_y)
    return model
Ejemplo n.º 4
0
def create_model():
	classifier = SupervisedDBNClassification(hidden_layers_structure=[256, 256],
                                             learning_rate_rbm=0.05,
                                             learning_rate=0.1,
                                             n_epochs_rbm=10,
                                             n_iter_backprop=500,
                                             batch_size=32,
                                             activation_function='relu',
                                             dropout_p=0.1,verbose=False)
	return classifier
Ejemplo n.º 5
0
def fractal_modeldata(filename):
    scores = []
    print(filename)
    X, Y = loaddata(filename, 99)

    for i in range(1):
        np.random.seed(13)
        indices = np.random.permutation(1000)
        test_size = int(0.1 * len(indices))
        X_train = X[indices[:-test_size]]
        Y_train = Y[indices[:-test_size]]
        X_test = X[indices[-test_size:]]
        Y_test = Y[indices[-test_size:]]
        # relu, sigmoid
        classifier = SupervisedDBNClassification(
            hidden_layers_structure=[256, 256],
            learning_rate_rbm=0.05,
            learning_rate=0.2,
            n_epochs_rbm=30,
            n_iter_backprop=2000,
            batch_size=16,
            activation_function='sigmoid',
            dropout_p=0.1,
            verbose=0)
        classifier.fit(X_train, Y_train)
        Y_pred = classifier.predict(X_test)
        scores.append(accuracy_score(Y_test, Y_pred))
        print(classification_report(Y_test, Y_pred))
        fpr, tpr, threshold = roc_curve(Y_test, Y_pred)
        roc_auc = auc(fpr, tpr)
        plt.title('Receiver Operating Characteristic')
        plt.plot(fpr, tpr, 'b', label='AUC = %0.2f' % roc_auc)
        plt.legend(loc='lower right')
        plt.plot([0, 1], [0, 1], 'r--')
        plt.xlim([0, 1])
        plt.ylim([0, 1])
        plt.ylabel('True Positive Rate')
        plt.xlabel('False Positive Rate')
        plt.show()

    print('All Accuracy Scores in Cross: ' + str(scores))
    print('Mean Accuracy Scores: ' + str(np.mean(scores)))
def example():
    np.random.seed(1337)  # for reproducibility
    from sklearn.datasets import load_digits
    from sklearn.model_selection import train_test_split
    from sklearn.metrics.classification import accuracy_score

    from dbn.tensorflow import SupervisedDBNClassification

    # Loading dataset
    digits = load_digits()
    X, Y = digits.data, digits.target

    # Data scaling
    X = (X / 16).astype(np.float32)

    # Splitting data
    X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                        Y,
                                                        test_size=0.2,
                                                        random_state=0)

    # Training
    classifier = SupervisedDBNClassification(
        hidden_layers_structure=[256, 256],
        learning_rate_rbm=0.05,
        learning_rate=0.1,
        n_epochs_rbm=10,
        n_iter_backprop=100,
        batch_size=32,
        activation_function='relu',
        dropout_p=0.2)
    print(X_train.shape, Y_train.shape)
    classifier.fit(X_train, Y_train)

    # Test
    Y_pred = np.asarray(list(classifier.predict(X_test)))
    print('Done.\nAccuracy: %f' % accuracy_score(Y_test, Y_pred))
Ejemplo n.º 7
0
def dbn():
    estim = SupervisedDBNClassification(
        hidden_layers_structure=[256, 256, 256, 256, 256, 256],
        learning_rate_rbm=0.05,
        learning_rate=0.1,
        n_epochs_rbm=10,
        n_iter_backprop=100,
        batch_size=32,
        activation_function='relu',
        dropout_p=0.2,
        verbose=0)
    estim.fit(x_train, y_train)
    print("f1score", f1_score(estim.predict(x_test), y_test))
    print("accuracy score", accuracy_score(estim.predict(x_test), y_test))
    return 0
Ejemplo n.º 8
0
y = dataset.iloc[:, -1].values

from imblearn.over_sampling import SMOTE, ADASYN
X_resampled, y_resampled = ADASYN().fit_resample(X, y)

clf_smote = LinearSVC().fit(X_resampled, y_resampled)

X_resampled = X_resampled.astype(np.float32)

############### feature scaling
from sklearn.preprocessing import StandardScaler
X_resampled = StandardScaler().fit_transform(X_resampled)

# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_resampled,
                                                    y_resampled,
                                                    test_size=0.2,
                                                    random_state=0)

classifier = SupervisedDBNClassification(hidden_layers_structure=[256, 256],
                                         learning_rate_rbm=0.05,
                                         learning_rate=0.1,
                                         n_epochs_rbm=10,
                                         n_iter_backprop=100,
                                         batch_size=32,
                                         activation_function='relu',
                                         dropout_p=0.2)

classifier.fit(X_train, y_train)
Ejemplo n.º 9
0
import numpy as np
np.random.seed(1337)  # for reproducibility
from sklearn.model_selection import train_test_split
from sklearn.metrics.classification import accuracy_score
from dbn.tensorflow import SupervisedDBNClassification
# use "from dbn import SupervisedDBNClassification" for computations on CPU with numpy
from sklearn.datasets import load_iris


iris = load_iris()
data_x = iris.data
data_y = iris.target


x_train, x_test, y_train, y_test = train_test_split(data_x, data_y, test_size=0.2, random_state=42)

# Training
classifier = SupervisedDBNClassification(hidden_layers_structure=[500, 1000, 500],
                                         learning_rate_rbm=0.05,
                                         learning_rate=0.1,
                                         n_epochs_rbm=20, # RBM training steps
                                         n_iter_backprop=50, # ANN training steps
                                         activation_function='relu',
                                         dropout_p=0.2)

classifier.fit(x_train, y_train)
Ejemplo n.º 10
0
Confusion_matrix_plot('Logistic regression',
                      logistic_classifier.predict(X_test),
                      Y_test,
                      classes=['1', '2', '3', '4', 'R', 'W'],
                      cmap=plt.get_cmap('Blues'))

resultLR = logistic_classifier.predict(X_test)

#print(result)

#DBN ####################################################################3

classifier = SupervisedDBNClassification(hidden_layers_structure=[500, 500],
                                         learning_rate_rbm=0.05,
                                         learning_rate=0.1,
                                         n_epochs_rbm=10,
                                         n_iter_backprop=100,
                                         batch_size=32,
                                         activation_function='relu',
                                         dropout_p=0.2)
_ = classifier.fit(X_train, Y_train)

print(classification_report(Y_test, classifier.predict(X_test)))

Confusion_matrix_plot('DBN',
                      classifier.predict(X_test),
                      Y_test,
                      classes=['1', '2', '3', '4', 'R', 'W'],
                      cmap=plt.get_cmap('Blues'))

resultDBN = classifier.predict(X_test)
Ejemplo n.º 11
0
if __name__ == "__main__":
    path = sys.argv[0]
    path = path[:-(len(path) - path.rindex("/") - 1)]
    #load config of the model
    jsonfilename, jsonvariationfilename, classifiername, classificationfilename, classificationlevel, k, data_max = LoadConfig(
        modelname)
    #represent sequences of the test dataset as k-mer vector
    testfilename = GetBase(testfastafilename)
    matrixfilename = testfilename + "." + str(k) + ".matrix"
    command = path + "fasta2matrix.py " + str(
        k) + " " + testfastafilename + " " + matrixfilename
    os.system(command)
    testseqIDs, testinputs, testlabels, classes, nb_classes, input_length, level = loadData(
        matrixfilename, data_max, classificationfilename, classificationlevel)
    #load model
    model = SupervisedDBNClassification.load(classifiername)
    #predict labels for test dataset
    pred_labels = model.predict(testinputs)
    probas = model.predict_proba(testinputs)
    #load classes with sequences
    classeswithsequences = {}
    variation = {}
    testseqrecords = []
    if minprobaforBlast <= 1.0:
        testseqrecords = list(SeqIO.parse(testfastafilename, "fasta"))
        #load classes
        with open(jsonfilename) as json_file:
            classeswithsequences = json.load(json_file)
        #load variation
#		if not os.path.exists(jsonvariationfilename):
#			basename=modelname
print(puredata.describe().transpose())

X = puredata.drop('Survival-Time', axis=1)
y = puredata['Survival-Time']

X_train, X_test, y_train, y_test = train_test_split(X, y)
y_test = y_test.tolist()
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)

mlp = SupervisedDBNClassification(hidden_layers_structure=[19, 30, 19],
                                  learning_rate_rbm=0.05,
                                  learning_rate=0.1,
                                  n_epochs_rbm=10,
                                  n_iter_backprop=50,
                                  batch_size=32,
                                  activation_function='relu',
                                  dropout_p=0.2)

mlp.fit(X_train, y_train)
# Save the model
mlp.save('model.pkl')
# Restoreit
mlp = SupervisedDBNClassification.load('model.pkl')

predictions = mlp.predict(X_test)

RMSE_sum = 0

list = []
Ejemplo n.º 13
0
    print('Loading in ' + files)
    f = glob.glob(files)
    full_Y = np.genfromtxt('converted_stage1_labels.csv', delimiter=',')
    Y = full_Y[:, tz - 1]
    X = np.empty([len(f), 62500])
    for i in range(len(f)):
        tmp = np.load(f[i])
        tmp = np.reshape(tmp, [1, 62500])
        X[i, :] = tmp

    return X, Y


f = open(Model_Location + 'Output_Acc_Sum.txt', 'w')
for tz in range(0, 17):
    print('\nChecking Accuracy of NN for zone {}'.format(tz + 1))
    filename = Model_Location + 'Matt_Net_Zone_{}.pkl'.format(tz + 1)
    print('Loading ' + filename)
    My_Net = SupervisedDBNClassification.load(filename)
    X, Y = get_dataset(tz + 1)
    X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                        Y,
                                                        test_size=0.2,
                                                        random_state=0)
    Yp = My_Net.predict(X_test)
    score = accuracy_score(Y_test, Yp)
    print('NN for Zone {} accuracy == {}'.format(tz + 1, score))
    f.write('Zone, {}, accuracy, {}\n'.format(tz + 1, score))

f.close()
Ejemplo n.º 14
0
# im_arr = np.frombuffer(img_bytes, dtype=np.uint8)
# convert array into image
# img = cv2.imdecode(im_arr, flags=cv2.IMREAD_COLOR)
img = cv2.imread("img.jpg")
# convert BGR format to RGB format
rgb_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# detect the (x,y)-coordinates of the bounding boxes
# corresponding to each face in the input image
# we are assuming the the boxes of faces are the SAME FACE or SAME PERSON
boxes = face_recognition.face_locations(rgb_image, model='hog')
# compute the facial embedding for the face
# creates a vector of 128 numbers representing the face
encodings = face_recognition.face_encodings(rgb_image, boxes)
# change into numpy array
encodings = np.array(encodings)
# replace value < 0 into 0 (replace negative value to zero)
encodings = encodings.clip(0)

if len(encodings > 0):
    # load model
    classifier = SupervisedDBNClassification.load('model.pkl')
    # klasifikasi
    pred = classifier.predict(encodings)
else:
    pred = ['bukandosen']

# Hasil
output = f'{pred[0]}'
print(output)
sys.stdout.flush()
    # Create the testing data (Positive Examples)
    else:
        if (os.stat(filename).st_size != 0):
            X_Test = np.asarray([[word_to_index[w] for w in sent[:-1]] for sent in tokenized_sentences])
            y_Test = np.transpose(np.asarray([1] * (len(list(sentences)))))

# Truncate and pad input sequences
X_Train = sequence.pad_sequences(X_Train, maxlen = max_review_length)
X_Test = sequence.pad_sequences(X_Test, maxlen = max_review_length)

# Training
classifier = SupervisedDBNClassification(hidden_layers_structure=[500,250,100],
                                             learning_rate_rbm=0.1,
                                             learning_rate=0.0001,
                                             n_epochs_rbm=50,
                                             n_iter_backprop=500,
                                             batch_size=16,
                                             activation_function='sigmoid',
                                             dropout_p=0.25)
classifier.fit(X_Train, y_Train)

# Test
Y_pred = classifier.predict(X_Test)
Y_p = classifier.predict_proba(X_Test)
Y_n = classifier.predict_proba_dict(X_Test)
print(Y_n)
print(Y_p)
print(Y_p)
print(Y_pred)
print(y_Test)
print('Done.\nAccuracy: %f' % accuracy_score(y_Test, Y_pred))
# into feature vectors. The input to fit_transform should be a list of
# strings.
train_data_features = vectorizer.fit_transform(clean_train_LAPD)
test_data_features = vectorizer.transform(clean_test_LAPD)

# Numpy arrays are easy to work with, so convert the result to an
# array
np.asarray(train_data_features)
np.asarray(test_data_features)

# Training
classifier = SupervisedDBNClassification(
    hidden_layers_structure=[500, 250, 100],
    learning_rate_rbm=0.1,
    learning_rate=0.0001,
    n_epochs_rbm=50,
    n_iter_backprop=500,
    batch_size=16,
    activation_function='sigmoid',
    dropout_p=0)
classifier.fit(train_data_features.toarray(), train["Problematic"])

# Test
Y_pred = classifier.predict(test_data_features.toarray())
Y_p = classifier.predict_proba(test_data_features.toarray())
Y_n = classifier.predict_proba_dict(test_data_features.toarray())
print(Y_n)
print(Y_p)
print(Y_p)
print(Y_pred)
print(test["Problematic"])
Ejemplo n.º 17
0
    def train(self, training_data, training_z):
        """Trains the classifier
        
        Parameters:
        -----------
        training_data: numpy array, size Ngalaxes x Nbands
          training data, each row is a galaxy, each column is a band as per
          band defined above
        training_z: numpy array, size Ngalaxies
          true redshift for the training sample
        """

        from dbn.tensorflow import SupervisedDBNClassification

        self.training_z = training_z

        # Create value-added data
        print("Creating value-added training data")
        self.training_data = get_valueadded_data(
            training_data, self.bands, self.opt['errors'], self.opt['colors'],
            self.opt['band_triplets'], self.opt['band_triplets_errors'],
            self.opt['heal_undetected'], self.wants_arrays)

        data_scaler = self.opt[
            'data_scaler'] if 'data_scaler' in self.opt else 'MinMaxScaler'
        n_bin = self.opt['bins']
        train_percent = self.opt[
            'train_percent'] if 'train_percent' in self.opt else 1
        n_epochs_rbm = self.opt[
            'n_epochs_rbm'] if 'n_epochs_rbm' in self.opt else 2
        activation = self.opt[
            'activation'] if 'activation' in self.opt else 'relu'
        learning_rate_rbm = self.opt[
            'learning_rate_rbm'] if 'learning_rate_rbm' in self.opt else 0.05
        learning_rate = self.opt[
            'learning_rate'] if 'learning_rate' in self.opt else 0.1
        n_iter_backprop = self.opt[
            'n_iter_backprop'] if 'n_iter_backprop' in self.opt else 25
        batch_size = self.opt['batch_size'] if 'batch_size' in self.opt else 32
        dropout_p = self.opt['dropout_p'] if 'dropout_p' in self.opt else 0.2
        hidden_layers_structure = self.opt[
            'hidden_layers_structure'] if 'hidden_layers_structure' in self.opt else [
                256, 256
            ]

        print("Finding bins for training data")

        # Data rescaling
        self.scaler = getattr(preprocessing, data_scaler)()

        print(f"Using {data_scaler} to rescale data for better results")

        # Fit scaler on data and use the same scaler in the future when needed
        self.scaler.fit(self.training_data)

        # apply transform to get rescaled values
        self.training_data = self.scaler.transform(
            self.training_data
        )  # inverse: data_original = scaler.inverse_transform(data_rescaled)

        # Now put the training data into redshift bins.
        # Use zero so that the one object with minimum
        # z in the whole survey will be in the lowest bin
        training_bin = np.zeros(self.training_z.size)

        # Find the edges that split the redshifts into n_z bins of
        # equal number counts in each
        p = np.linspace(0, 100, n_bin + 1)
        z_edges = np.percentile(self.training_z, p)

        # Now find all the objects in each of these bins
        for i in range(n_bin):
            z_low = z_edges[i]
            z_high = z_edges[i + 1]
            training_bin[(self.training_z > z_low)
                         & (self.training_z < z_high)] = i

        if 0 < train_percent < 100:
            # for speed, cut down to ?% of original size
            print(
                f'Cutting down to {train_percent}% of original training sample size for speed.'
            )
            cut = np.random.uniform(0, 1,
                                    self.training_z.size) < train_percent / 100
            training_bin = training_bin[cut]
            self.training_data = self.training_data[cut]
        elif train_percent == 100:
            pass
        else:
            raise ValueError('train_percent is not valid')

        print('Setting up the layers for DBN')
        # Set up the layers
        classifier = SupervisedDBNClassification(
            hidden_layers_structure=hidden_layers_structure,
            learning_rate_rbm=learning_rate_rbm,
            learning_rate=learning_rate,
            n_epochs_rbm=n_epochs_rbm,
            n_iter_backprop=n_iter_backprop,
            batch_size=batch_size,
            activation_function=activation,
            dropout_p=dropout_p)

        # Train the model
        print("Fitting classifier")
        classifier.fit(self.training_data, training_bin)

        self.classifier = classifier
        self.z_edges = z_edges
Ejemplo n.º 18
0

if __name__ == '__main__':
    # Load and normalize the dataset
    kddcup = fetch_kddcup99(subset='smtp', shuffle=True, random_state=1000)

    ss = StandardScaler()
    X = ss.fit_transform(kddcup['data']).astype(np.float32)

    le = LabelEncoder()
    Y = le.fit_transform(kddcup['target']).astype(np.float32)

    # Create train and test sets
    X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=1000)

    # Train the model
    classifier = SupervisedDBNClassification(hidden_layers_structure=[64, 64],
                                             learning_rate_rbm=0.001,
                                             learning_rate=0.01,
                                             n_epochs_rbm=20,
                                             n_iter_backprop=150,
                                             batch_size=256,
                                             activation_function='relu',
                                             dropout_p=0.25)

    classifier.fit(X_train, Y_train)

    Y_pred = classifier.predict(X_test)
    print('Accuracy score: {}'.format(accuracy_score(Y_test, Y_pred)))

Ejemplo n.º 19
0
    x_train = np.asarray(X)[train]
    y_train = np.asarray(Y)[train]
    x_test = np.asarray(X)[test]
    y_test = np.asarray(Y)[test]

    mdl+=1
    print("Ukuran data model ",mdl)
    print("Ukuran x_train : ",x_train.shape)
    print("Ukuran y_train",y_train.shape)
    print("Ukuran x_test : ",x_test.shape)
    print("Ukuran y_test",y_test.shape)

    classifier = SupervisedDBNClassification(hidden_layers_structure=[len(np.asarray(X)[train]),len(np.asarray(Y)[train])],
                                         learning_rate_rbm=0.05,
                                         learning_rate=0.1,
                                         n_epochs_rbm=10,
                                         n_iter_backprop=100,
                                         batch_size=32,
                                         activation_function='relu',
                                         dropout_p=0.2)
    tr_loss=classifier.fit(np.asarray(X)[train], np.asarray(Y)[train])
    val_loss = classifier.fit(np.asarray(X)[test], np.asarray(Y)[test])
    train_loss.append(tr_loss)
    validation_loss.append(val_loss)
    predict_train = classifier.predict(np.asarray(X)[train])
    accuracy_train = accuracy_score(np.asarray(Y)[train], predict_train)
    acc_train.append(accuracy_train)    
    pred_train.append(predict_train)
    predict_test = classifier.predict(np.asarray(X)[test])
    pred_test.append(predict_test)
    accuracy_test = accuracy_score(np.asarray(Y)[test], predict_test)
    acc_test.append(accuracy_test)
Ejemplo n.º 20
0

def image_subset(index, x, y):
    xs = []
    ys = []
    for i in range(len(x)):
        if y[i] < index:
            xs.append(x[i])
            ys.append(y[i])
    return np.array(xs), np.array(ys)


dbn = SupervisedDBNClassification(hidden_layers_structure=[1024, 512, 256],
                                  learning_rate_rbm=learning_rate_rbm,
                                  learning_rate=learning_rate,
                                  n_epochs_rbm=n_epochs_rbm,
                                  n_iter_backprop=n_iter_backprop,
                                  batch_size=batch_size,
                                  activation_function='sigmoid',
                                  dropout_p=0.2)

(cx_train, cy_train), (cx_test, cy_test) = cifar10.load_data()

cx_train, cy_train = image_subset(num_classes, cx_train, cy_train)
cx_test, cy_test = image_subset(num_classes, cx_test, cy_test)

if use_all:
    train_ex = len(cx_train)
    test_ex = len(cx_test)
print('Using {} training and {} testing'.format(train_ex, test_ex))

if use_color:
from sklearn.cross_validation import train_test_split
from sklearn.metrics.classification import accuracy_score

from dbn.tensorflow import SupervisedDBNClassification


# Loading dataset
digits = load_digits()
X, Y = digits.data, digits.target

# Data scaling
X = (X / 16).astype(np.float32)

# Splitting data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)

# Training
classifier = SupervisedDBNClassification(hidden_layers_structure=[256, 256],
                                         learning_rate_rbm=0.05,
                                         learning_rate=0.1,
                                         n_epochs_rbm=10,
                                         n_iter_backprop=100,
                                         batch_size=32,
                                         activation_function='relu',
                                         dropout_p=0.2)
classifier.fit(X_train, Y_train)

# Test
Y_pred = classifier.predict(X_test)
print 'Done.\nAccuracy: %f' % accuracy_score(Y_test, Y_pred)
Ejemplo n.º 22
0
np.random.seed(1337)  # for reproducibility
from sklearn.metrics.classification import accuracy_score

from dbn.tensorflow import SupervisedDBNClassification
from Rafd import Rafd

# Splitting data
rafd = Rafd("entrenamiento/")
X_train, X_test, Y_train, Y_test = rafd.getData()

# Training
classifier = SupervisedDBNClassification(hidden_layers_structure=[256, 256],
                                         learning_rate_rbm=0.05,
                                         learning_rate=0.001,
                                         n_epochs_rbm=15,
                                         n_iter_backprop=100,
                                         batch_size=32,
                                         activation_function='sigmoid',
                                         dropout_p=0.2)
classifier.fit(X_train, Y_train)

# Save the model
classifier.save('model.pkl')

# Restore it
classifier = SupervisedDBNClassification.load('model.pkl')

# Test
Y_pred = classifier.predict(X_test)
print('Done.\nAccuracy: %f' % accuracy_score(Y_test, Y_pred))
Ejemplo n.º 23
0


min_max_scaler = MinMaxScaler()
X = min_max_scaler.fit_transform(X)


X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size = 0.22, random_state = 101)

#'''


classifier2 = SupervisedDBNClassification(hidden_layers_structure=[8, 50],
                                         learning_rate_rbm=0.05,
                                         learning_rate=0.1,
                                         n_epochs_rbm=30,
                                         n_iter_backprop=100,
                                         batch_size=64,
                                         activation_function='relu',
                                         dropout_p=0.2)
classifier2.fit(X_train, Y_train)
# Save the model
classifier2.save('models/breast_cancer_origin_3.pkl')

# Restore it
#classifier2 = SupervisedDBNClassification.load('models/breast_cancer_origin_2.pkl')

# Test
#X_test = min_max_scaler.transform(X_test)
Y_pred = classifier2.predict(X_test)
print('Accuracy: %f' % accuracy_score(Y_test, Y_pred))
Ejemplo n.º 24
0
        else:
            if (os.stat(filename).st_size != 0):
                X_Test = np.asarray([[word_to_index[w] for w in sent[:-1]]
                                     for sent in tokenized_sentences])
                y_Test = np.transpose(np.asarray([1] * (len(list(sentences)))))

    # Truncate and pad input sequences
    X_Train = sequence.pad_sequences(X_Train, maxlen=max_review_length)
    X_Test = sequence.pad_sequences(X_Test, maxlen=max_review_length)

    # Training
    classifier = SupervisedDBNClassification(
        hidden_layers_structure=list(
            map(int, FLAGS.hidden_layers_structure.split(","))),
        learning_rate_rbm=FLAGS.learning_rate_rbm,
        learning_rate=FLAGS.learning_rate,
        n_epochs_rbm=FLAGS.n_epochs_rbm,
        n_iter_backprop=FLAGS.n_iter_backprop,
        batch_size=FLAGS.batch_size,
        activation_function=FLAGS.activation_function,
        dropout_p=FLAGS.dropout_p)
    classifier.fit(X_Train, y_Train)

    # Test

    Y_pred = classifier.predict(X_Test)
    Y_p = classifier.predict_proba(X_Test)
    Y_n = classifier.predict_proba_dict(X_Test)
    print(Y_n)
    print(Y_p)
    print(Y_p)
    print(Y_pred)
Ejemplo n.º 25
0
print('y_train', y_train.shape)
print('x_test', x_test.shape)
print('y_test', y_test.shape)

#x_test = x_test[:2000]
#y_test = y_test[:2000]

#print('x_train: ', x_train.shape[0])
#print('x_test number: ', x_test.shape[0])

# Training
classifier = SupervisedDBNClassification(
    hidden_layers_structure=[2304, 256, 64],
    learning_rate_rbm=0.05,
    learning_rate=0.1,
    n_epochs_rbm=10,
    n_iter_backprop=60,
    batch_size=64,
    activation_function='relu',
    dropout_p=0.2)
classifier.fit(x_train, y_train)

# Save the model
classifier.save('models/model_mnist.pkl')

# Restore it
#classifier = SupervisedDBNClassification.load('model.pkl')

# Test
Y_pred = classifier.predict(x_test)
print('Done.\nAccuracy: %f' % accuracy_score(y_test, Y_pred))
Ejemplo n.º 26
0
import numpy as np

np.random.seed(1337)  # for reproducibility
from sklearn.model_selection import train_test_split
from sklearn.metrics.classification import accuracy_score
from dbn.tensorflow import SupervisedDBNClassification
# use "from dbn import SupervisedDBNClassification" for computations on CPU with numpy
from sklearn.datasets import load_iris

iris = load_iris()
data_x = iris.data
data_y = iris.target

x_train, x_test, y_train, y_test = train_test_split(data_x,
                                                    data_y,
                                                    test_size=0.2,
                                                    random_state=42)

# Training
classifier = SupervisedDBNClassification(
    hidden_layers_structure=[500, 1000, 500],
    learning_rate_rbm=0.05,
    learning_rate=0.1,
    n_epochs_rbm=20,  # RBM training steps
    n_iter_backprop=50,  # ANN training steps
    activation_function='relu',
    dropout_p=0.2)

classifier.fit(x_train, y_train)
Ejemplo n.º 27
0
#                   learning_rate_rbm=0.05, # learning rate para RBM pretraining
#                   learning_rate=0.1, # Learning rate para el backpropagation de los nodos
#                   n_epochs_rbm=10, # Epochs para RBM pretraining (n epochs x capa)
#                   n_iter_backprop=100, # Packpropagation iterations
#                   batch_size=46, # Tamaño de batch
#                   activation_function='sigmoid', # Función de activación
#                   dropout_p=0.1) # Dropout de nodos para evitar overfitting

# classifier.fit(X_train, y_train) # Entrenamiento del modelo según datos procesados en 3.2.2

#%% [markdown]
# ##### 3.2.4 Obtención de  resultados con modelo ya entrenado

#%%
# En primer lugar, cargamos los datos
tclassifier = DBNC.load('model.pkl')

# Datos trained classifier
tclassifier, tclassifier.unsupervised_dbn.hidden_layers_structure


#%%
# Predicción de test
y_pred = tclassifier.predict(X_test)


#%%
# Análisis de resultados
print(classification_report(y_test, y_pred))
print(confusion_matrix(y_test, y_pred))
Ejemplo n.º 28
0
y_train = y_train.flatten()
y_test = y_test.flatten()

time_load_end = time.clock()
print("Loading finished, loading time: %g seconds" %
      (time_load_end - time_load_start))

X_test_even, y_test_even = fipr.load_csv("test_file_even.csv", True)
y_test_even = y_test_even.flatten()

# Build DBN classifier
classifier = SupervisedDBNClassification(hidden_layers_structure=[20, 20],
                                         learning_rate_rbm=0.05,
                                         learning_rate=0.1,
                                         n_epochs_rbm=10,
                                         n_iter_backprop=100,
                                         batch_size=100,
                                         activation_function='relu',
                                         dropout_p=0.2)

# start counting time for training
time_train_start = time.clock()

# Training
classifier.fit(X_train, y_train)

# print training time
time_train_end = time.clock()
print("Training finished, training time: %g seconds \n" %
      (time_train_end - time_train_start))
'''
Ejemplo n.º 29
0
    labels[i] = output[i]

print "Begin DBN model"
X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                    Y,
                                                    test_size=0.1,
                                                    random_state=i)
dbn_model = DBN([X_train.shape[1], 300, 2],
                learn_rates=0.3,
                learn_rate_decays=0.9,
                epochs=100,
                verbose=1)
dbn_model.fit(X_train, Y_train)
y_true, y_pred = Y_test, dbn_model.predict(X_test)  # Get our predictions
print(classification_report(y_true, y_pred))  # Classification on each digit
print 'The accuracy is:', accuracy_score(y_true, y_pred)

print "Begin DBN V2 model"
classifier = SupervisedDBNClassification(hidden_layers_structure=[1000, 200],
                                         learning_rate_rbm=0.05,
                                         learning_rate=0.1,
                                         n_epochs_rbm=10,
                                         n_iter_backprop=100,
                                         batch_size=32,
                                         activation_function='relu',
                                         dropout_p=0.2)
classifier.fit(X_train, Y_train[:, 0])
# Test
Y_pred = classifier.predict(X_test)
print 'Done.\nAccuracy: %f' % accuracy_score(Y_test, Y_pred)
Ejemplo n.º 30
0
        test_pos = generateDatasets(filename, "positive")
        for i in xrange(0, len(test_pos["LAPD"])):
            clean_test_LAPD.append(" ".join(
                KaggleWord2VecUtility.review_to_wordlist(
                    test_pos["LAPD"][i], False)))

########################################################################################################
# GENERATE MODEL
########################################################################################################

# Training
classifier = SupervisedDBNClassification(
    hidden_layers_structure=hidden_layers_units,
    learning_rate_rbm=learning_rate_rbm,
    learning_rate=learning_rate_backprop,
    n_epochs_rbm=n_epochs_rbm,
    n_iter_backprop=n_iter_backprop,
    batch_size=batch_size,
    activation_function=activation_function,
    dropout_p=dropout_p)

########################################################################################################
# GENERATE WORD EMBEDDING
########################################################################################################

# Use word embeddings
if word_embedding == 'True':
    if w2v_dictionary != "None":
        # Load w2v model
        print "Loading w2v model..."
        model = Word2Vec.load(w2v_dictionary)
originally downloaded from https://github.com/albertbup/deep-belief-network
on March 29, 2017, and modified slightly to support python3. The
DBN folder is covered under the MIT license.
"""
from __future__ import print_function
from __future__ import division
from dbn.tensorflow import SupervisedDBNClassification
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import accuracy_score
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
train_ex = 8000
dbn = SupervisedDBNClassification(hidden_layers_structure=[1024, 1024],
                                  learning_rate_rbm=0.1,
                                  learning_rate=0.1,
                                  n_epochs_rbm=10,
                                  n_iter_backprop=100,
                                  batch_size=100,
                                  activation_function='sigmoid',
                                  dropout_p=0.2)


def unpickle(file):
    """Load data"""
    import pickle
    with open(file, 'rb') as source:
        ret_dict = pickle.load(source, encoding='bytes')
    return ret_dict


def get_data():
    """
Ejemplo n.º 32
0
setting = read_setting(open('setting.txt'))
file_out = open('../result/dbn.log', 'a')


X_train = np.loadtxt('../data/train_dataset_new.txt')
X_test = np.loadtxt('../data/test_dataset_new.txt')

Y_train = np.array([np.argmax(row) for row in np.loadtxt('../data/train_labels_new.txt')])
Y_test = np.array([np.argmax(row) for row in np.loadtxt('../data/test_labels_new.txt')])

# Training
classifier = SupervisedDBNClassification(hidden_layers_structure=setting['hidden_layers_structure'],
                                         learning_rate_rbm=float(setting['learning_rate_rbm']),
                                         learning_rate=float(setting['learning_rate']),
                                         n_epochs_rbm=int(setting['n_epochs_rbm']),
                                         n_iter_backprop=int(setting['n_iter_backprop']),
                                         batch_size=int(setting['batch_size']),
                                         activation_function=setting['activation_function'],
                                         dropout_p=float(setting['dropout_p']),
                                         l2_regularization=float(setting['l2_regularization']),
                                         contrastive_divergence_iter=int(setting['contrastive_divergence_iter']))

classifier.fit(X_train, Y_train)

# Test
Y_pred = classifier.predict(X_train)
accuracy = accuracy_score(Y_train, Y_pred)
print('Done.\nAccuracy: %f' % accuracy)

file_out.write('\n\n-------------------------------\n\n')

for line in open('setting.txt'):