Example #1
0
def build_model():
    #config.gpu_options.per_process_gpu_memory_fraction = 0.01
    with tf.Session(config=config) as sess:
        #make sure input_data 's placeholder shape corresponds to model shape
        network = input_data(shape=[None, 28, 28, 1])
        #shape: None (Placeholder for number of training sets in batch), 28 by 28 pixels, 1 channel (greyscale)
        #now network becomes a 4D tensor with dimensions [batch, height, width, in_channels]
        network = conv_2d(network, 16, [5, 5], activation='relu')
        #now network becomes a 4D tensor with dimensions [batch, new height, new width, n_filters]
        network = max_pool_2d(network, [2, 2])
        #filter size [2,2], stride is implied: 2
        network = conv_2d(network, 64, 3, activation='relu')
        #use relu to account for non-linearity
        network = max_pool_2d(network, 2)
        #same as [2,2]
        network = fully_connected(network, 512, activation='relu')
        network = dropout(network, 0.5)
        network = fully_connected(network, 24, activation='softmax')
        network = regression(network,
                             optimizer='sgd',
                             loss='categorical_crossentropy',
                             learning_rate=0.01)
        #always remember to test different optimizers, set learning rates accordingly too
        model = DNN(network,
                    tensorboard_verbose=3,
                    checkpoint_path='sign_language_model.ckpt')

    return (model)
def main():
    x, y, x_test, y_test, img_prep, img_aug = get_data()
    print(y.shape)
    stop
    #     with tf.device('/gpu:0'):
    #         with tf.contrib.framework.arg_scope([tflearn.variables.variable], device='/cpu:0'):
    model = my_model(img_prep, img_aug)
    network = DNN(model)
    a = time.time()
    network.fit(x,
                y,
                n_epoch=100,
                shuffle=True,
                validation_set=(x_test, y_test),
                show_metric=True,
                batch_size=100,
                run_id='aa2')
    b = time.time()
    print('total time: {}'.format(b - a))
    return
def main():
    x, y, x_test, y_test, img_prep, img_aug = get_data()
    x, y, x_test, y_test = datasets.load_cifar10_dataset(
        './cifar-10-batches-py', mode='supervised')
    print(x.shape)
    print(y.shape)
    y = onehot_labels(y)
    y_test = onehot_labels(y_test)
    print(y.shape)
    dae_weights = np.load(
        './data/dae/forcnn_sigmoid_sigmoid_snp_0.4_675-encw.npy')
    dae_bias = np.load(
        './data/dae/forcnn_sigmoid_sigmoid_snp_0.4_675-encbh.npy')
    print(dae_weights.shape, dae_bias.shape, x.shape)
    encoded_data = np.add(np.dot(x, dae_weights), dae_bias)
    encoded_data = encoded_data.reshape(x.shape[0], 3, 15,
                                        15).transpose(0, 2, 3, 1)
    print(encoded_data.shape)

    encoded_data_test = np.add(np.dot(x_test, dae_weights), dae_bias)
    encoded_data_test = encoded_data_test.reshape(x_test.shape[0], 3, 15,
                                                  15).transpose(0, 2, 3, 1)

    #     with tf.device('/gpu:0'):
    #         with tf.contrib.framework.arg_scope([tflearn.variables.variable], device='/cpu:0'):
    model = my_model(img_prep, img_aug)
    network = DNN(model)
    a = time.time()
    network.fit(encoded_data,
                y,
                n_epoch=100,
                shuffle=True,
                validation_set=(encoded_data_test, y_test),
                show_metric=True,
                batch_size=100,
                run_id='aa2')
    b = time.time()
    print('total time taken: {}'.format(b - a))
    return
Example #4
0
def main():
    x, y, x_test, y_test, img_prep, img_aug = get_data()
    #     with tf.device('/gpu:0'):
    #         with tf.contrib.framework.arg_scope([tflearn.variables.variable], device='/cpu:0'):
    model, features = my_model(img_prep, img_aug)
    network = DNN(model)
    network.fit(x,
                y,
                n_epoch=100,
                shuffle=True,
                validation_set=(x_test, y_test),
                show_metric=True,
                batch_size=100,
                run_id='aa2')
    network.save('lenet5_model.tflearn')
    return
Example #5
0
def create_model():
    cnn = input_data(name='Input', shape=[None, slice_size, slice_size, 1])

    cnn = conv_2d(cnn, 64, 2, activation='elu', weights_init='xavier')
    cnn = max_pool_2d(cnn, 2)

    cnn = conv_2d(cnn, 128, 2, activation='elu', weights_init='xavier')
    cnn = max_pool_2d(cnn, 2)

    cnn = conv_2d(cnn, 256, 2, activation='elu', weights_init='xavier')
    cnn = max_pool_2d(cnn, 2)

    cnn = conv_2d(cnn, 512, 2, activation='elu', weights_init='xavier')
    cnn = max_pool_2d(cnn, 2)

    cnn = fully_connected(cnn, 1024, activation='elu')
    cnn = dropout(cnn, dropout_rate)

    cnn = fully_connected(cnn, no_classes, activation='softmax')
    cnn = regression(cnn, n_classes=no_classes)

    return DNN(cnn)
Example #6
0
#shape: None (Placeholder for number of training sets in batch), 28 by 28 pixels, 1 channel (greyscale)
#now network becomes a 4D tensor with dimensions [batch, height, width, in_channels]
network = conv_2d(network, 16, [5,5], activation='relu')
#now network becomes a 4D tensor with dimensions [batch, new height, new width, n_filters]
network = max_pool_2d(network, [2,2])
#filter size [2,2], stride is implied: 2
network = conv_2d(network, 64, 3, activation='relu')
#use relu to account for non-linearity
network = max_pool_2d(network, 2)
#same as [2,2]
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.5)
network = fully_connected(network, 24, activation='softmax')
network = regression(network, optimizer='sgd', loss='categorical_crossentropy',learning_rate=0.01)
#always remember to test different optimizers, set learning rates accordingly too
model = DNN(network, tensorboard_verbose=3, checkpoint_path='sign_language_model.ckpt')
model.load('ckpts/sign-language-classifier.tfl', weights_only=True)

while True:
    #ret stands for retrieve (cap.retrieve()), frame is the returned videcapture.grabbed
    #read combines features of retrieve and grab
    ret, frame = cap.read()

    # change to grayscale
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    # resize frame before reshaping using numpy array
    frame = cv2.resize(frame, (28,28))
    cv2.imshow('object detect', frame)
    #reshape to input tensor dimensions
    gray = np.reshape(frame, (-1,28,28,1))
    print(gray)
Example #7
0
# Regression using SGD with learning rate decay and Top-3 accuracy
sgd = tflearn.SGD(learning_rate=0.01, lr_decay=0.96, decay_step=1000)
adam = tflearn.Adam(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False, name="Adam")
# loss = tflearn.losses.L2()
# top_k = tflearn.metrics.Top_k(6)
accu  = tflearn.metrics.Accuracy()
net = tflearn.regression(softmax, optimizer=adam, metric=accu)
# optimizer = tflearn.optimizers.Optimizer(learning_rate=0.01, False, "")
# loss = tf.reduce_mean(tf.nn.log_poisson_loss(logits=softmax, labels=Y))
# optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)
# init = tf.global_variables_initializer()


dnn = DNN(net, clip_gradients=5.0, tensorboard_verbose=0,
          tensorboard_dir='/tmp/tflearn_logs/', checkpoint_path=None,
          best_checkpoint_path=None, max_checkpoints=None,
          session=None, best_val_accuracy=0.0)

dnn.fit(train_X, train_Y, 10, validation_set=(test_X, test_Y),
        show_metric=True, run_id="dense_model")

pre_Y = dnn.predict(test_X)
# pre_pro_Y = dnn.predict_proba(test_X)


# pre_Y = clf.predict(test_X)
# pre_pro_Y = clf.predict_proba(test_X)
#
# def accurcy(test_Y, pre_Y):
#     print(len(pre_Y), len(test_Y))
#     all_size = len(test_X)
# BUILDING MODEL ==================================================================

hyperparameters = {'seq_length': seq_length, 'use_deep_CNN': True, 'use_RNN': False,
                       'num_filters': 45, 'pool_width': 25, 'conv_width': 10, 'L1': 0, 'dropout': 0.2, 'L2': 0}

## parameters needed, todo - refactor code into class/oo model
nb_filters = hyperparameters['num_filters']
conv_width = hyperparameters['conv_width']
lost_units = hyperparameters['dropout']
pool_width = hyperparameters['pool_width']
num_tasks = np.shape(y)[-1]

## the magic of TF-Learn:

model = input_data(shape=input_shape, name='input') # todo regularizers.
model = conv_2d(model, nb_filters, (4, conv_width), activation='relu') # defaults to He et al w_init
model = dropout(model, 1 - lost_units)
model = conv_2d(model, nb_filters, (1, conv_width), activation='relu') # TODO: allow extra num filters
model = dropout(model, 1 - lost_units)
model = conv_2d(model, nb_filters, (1, conv_width), activation='relu') # TODO: allow extra num filters
model = dropout(model, 1 - lost_units)
model = max_pool_2d(model, (1, pool_width))
model = fully_connected(model, num_tasks, activation='sigmoid')
model = regression(model, optimizer='adam', loss='binary_crossentropy')

## Compile into DNN (Deep Neural Network) TFLearn Object, use member fxns to train/fit
TF_model = DNN(model, tensorboard_verbose=0)
TF_model.fit(X_train, y_train, n_epoch=100, validation_set=0.2, batch_size=128,
             run_id='1.0-RP-TFLearn-ipynb-run0001-2016-07-11')
    ##从训练集中读取数据
    #csvPath = "H:\\FaceImageDataset\\FERET_80_80\\gendercsv.txt"
    #face_list, label_list = readCSV(csvPath)

    ##对即将输入网络的数据进行预处理
    #face_array = imgPreprocess(face_list)
    #face_array = face_array.reshape((-1, 80, 80, 1))
    #label_array = one_hot(label_list)

    #搭建卷积神经网络
    network = createCnn(0.0008)
    data_save_path = "H:\\FaceImageDataset\\FERET_80_80\\gender_classfier_data_0.0008"
    tensorboard_dir = data_save_path + "\\tensorboard_data"
    cnn_model = DNN(network,
                    tensorboard_verbose=3,
                    tensorboard_dir=tensorboard_dir,
                    checkpoint_path=tensorboard_dir +
                    "\\gender_classfier.tfl.ckpt")

    # #训练
    # cnn_model.fit(face_array, label_array, n_epoch= 60,
    #               validation_set=0.2,
    #               show_metric=True, batch_size=200, shuffle=True,
    #               snapshot_epoch=False,
    #               run_id="gender_classfier")
    #
    # cnn_model.save(data_save_path + "\\gender_classfier.tfl")
    # print("Network trained and saved as gender_classfier.tfl!")

    #载入模型
    cnn_model.load(data_save_path + "\\gender_classfier.tfl")
Example #10
0
def main():
    a = time.time()
    x, y, x_test, y_test, img_prep, img_aug = get_data(
    )  # modified for spectra
    b = time.time()

    x = x.reshape((x.shape[0], x.shape[1], 1))
    x_test = x_test.reshape((x_test.shape[0], x_test.shape[1], 1))

    print('data time: {}'.format(b - a))
    #     with tf.device('/gpu:0'):
    #         with tf.contrib.framework.arg_scope([tflearn.variables.variable], device='/cpu:0'):
    model = my_model(img_prep, img_aug)
    network = DNN(model)
    a = time.time()
    network.fit(x,
                y,
                n_epoch=1,
                shuffle=True,
                validation_set=(x_test, y_test),
                show_metric=True,
                batch_size=32,
                run_id='aa2')
    print(network.evaluate(x_test[0:32, :], y_test[0:32, :]))
    b = time.time()
    print('total time: {}'.format(b - a))

    # evali= model.evaluate(x_test, y_test)
    # print("Accuracy of the model is :", evali)
    divideby = 100
    dindex = int(x_test.shape[0] / divideby)
    labels = np.zeros((x_test.shape[0], 7))
    for i in range(divideby):
        start = i * dindex
        end = start + dindex
        prob_y = network.predict(x_test[start:end, :])
        y = network.predict_label(x_test[start:end, :])
        predictions = np.argmax(y, axis=1)
        for j in range(len(predictions)):
            labels[start + j, predictions[j]] = 1

    appendme = np.array([0, 1, 2, 3, 4, 5, 6])
    appendme = appendme.reshape((7, ))
    y_test_decode = onenothot_labels(y_test)
    y_test_decode = np.concatenate((y_test_decode, appendme))
    labels_decode = onenothot_labels(labels)
    labels_decode = np.concatenate((labels_decode, appendme))

    accuracy = float(np.sum(labels_decode == y_test_decode)) / float(
        y_test_decode.shape[0])

    class_names = [
        'Background', 'HEU', 'WGPu', 'I131', 'Co60', 'Tc99', 'HEUandTc99'
    ]

    cnf_matrix = confusion_matrix(y_test_decode, labels_decode)

    # print("The predicted labels are :", lables[f])
    # prediction = model.predict(testImages)
    # print("The predicted probabilities are :", prediction[f])
    fig = plt.figure()
    class_names = [
        'Background', 'HEU', 'WGPu', 'I131', 'Co60', 'Tc99', 'HEUandTc99'
    ]
    plot_confusion_matrix(cnf_matrix,
                          classes=class_names,
                          normalize=True,
                          title='Normalized confusion matrix')
    fig.savefig('classification_confusion_matrix.png')
    return
Example #11
0
                     data_augmentation=img_aug)
network = conv_2d(network, 32, 3, activation='relu')
network = max_pool_2d(network, 2)
network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2)
network = fully_connected(network, 784, activation='relu')
network = dropout(network, 0.5)
network = fully_connected(network, 26, activation='softmax')

network = regression(network,
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=0.001)

model = DNN(network,
            tensorboard_verbose=0,
            checkpoint_path='sign-language-classifier.tfl.ckpt')

model.fit(x_train,
          y_train,
          n_epoch=1,
          shuffle=True,
          validation_set=(x_test, y_test),
          show_metric=True,
          batch_size=96,
          snapshot_epoch=True,
          run_id='sign-language-classifier')

model.save("sign-language-classifier.tfl")

print(DNN.evaluate(x_test, y_test, batch_size=96))
Example #12
0
        network = regression(network,
                             optimizer='sgd',
                             loss='categorical_crossentropy',
                             learning_rate=0.01)
        #always remember to test different optimizers, set learning rates accordingly too
        model = DNN(network,
                    tensorboard_verbose=3,
                    checkpoint_path='sign_language_model.ckpt')

    return (model)


def train():
    model = build_model()
    model.fit(x_train,
              y_train,
              n_epoch=10,
              shuffle=True,
              validation_set=(x_test, y_test),
              show_metric=True,
              batch_size=20)
    # feed dict { 'inputs': x_train } { 'targets': y_train }
    model.save("ckpts/sign-language-classifier.tfl")
    return (model)


visualize()
train()

print(DNN.evaluate(model, x_test, y_test, batch_size=96))
def main():
    x, y, x_test, y_test, img_prep, img_aug = get_data()
    #     with tf.device('/gpu:0'):
    #         with tf.contrib.framework.arg_scope([tflearn.variables.variable], device='/cpu:0'):
    model, features = my_model(img_prep, img_aug)
    network = DNN(model)
    # network.fit(x, y, n_epoch=100, shuffle=True, validation_set=(x_test, y_test), show_metric=True,
    #             batch_size=100, run_id='aa2')
    # task 3 stuff
    network.load('./lenet5_run.tflearn')
    feature_generator = DNN(features, session=network.session)
    if len(glob.glob('./lenet5_svm_features.npy')) != 1:
        svm_features = np.zeros((0, 512))
        for i in range(x.shape[0]):
            if i % 1000 == 0:
                print(i, svm_features.shape)
            chuckmein = x[i, :, :].reshape(
                (1, x.shape[1], x.shape[2], x.shape[3]))
            svm_features = np.vstack(
                (svm_features, feature_generator.predict(chuckmein)))
        np.save('./lenet5_svm_features.npy', svm_features)
    else:
        svm_features = np.load('./lenet5_svm_features.npy')

    if len(glob.glob('./lenet5_svm_features_test.npy')) != 1:
        svm_features_test = np.zeros((0, 512))
        for i in range(x_test.shape[0]):
            chuckmein = x_test[i, :, :].reshape(
                (1, x.shape[1], x.shape[2], x.shape[3]))
            svm_features_test = np.vstack(
                (svm_features_test, feature_generator.predict(chuckmein)))
        np.save('./lenet5_svm_features_test.npy', svm_features_test)
    else:
        svm_features_test = np.load('./lenet5_svm_features_test.npy')
    #  from here it's y vs. y_predict
    svm_y = np.zeros((y.shape[0], ))
    svm_y_test = np.zeros((y_test.shape[0]))
    for i in range(y.shape[0]):
        # print(y[i, :] == 1)
        mask = y[i, :] == 1
        meh = list(compress(range(len(mask)), mask))
        svm_y[i] = meh[0]
    for i in range(y_test.shape[0]):
        mask = y_test[i, :] == 1
        meh = list(compress(range(len(mask)), mask))
        svm_y_test[i] = meh[0]

    # clf = svm.SVC()
    # clf.fit(svm_features, svm_y)
    # predicted_y = clf.predict(svm_features_test)
    # accuracy_mask = svm_y_test == predicted_y
    # accuracy = float(len(list(compress(range(len(accuracy_mask)), accuracy_mask)))) / float(len(accuracy_mask))
    # print(accuracy)

    n_estimators = 10
    n_jobs = 4
    start = time.time()
    clf = OneVsRestClassifier(
        BaggingClassifier(SVC(kernel='linear',
                              probability=True,
                              class_weight=None),
                          max_samples=1.0 / n_estimators,
                          n_estimators=n_estimators,
                          n_jobs=n_jobs))
    clf.fit(svm_features, svm_y)
    end = time.time()
    print("Bagging SVC", end - start, clf.score(svm_features_test, svm_y_test))

    # y_test vs. predicted_y metric

    return