Ejemplo n.º 1
0
def alexnet():
    X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))

    # Building 'AlexNet'
    network = input_data(shape=[None, 227, 227, 3])
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 17, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)

    # Training
    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=2)
    model.fit(X, Y, n_epoch=1000, validation_set=0.1, shuffle=True,
              show_metric=True, batch_size=64, snapshot_step=200,
              snapshot_epoch=False, run_id='alexnet')
Ejemplo n.º 2
0
def cnn():
    X, Y, testX, testY = mnist.load_data(one_hot=True)
    X = X.reshape([-1, 28, 28, 1])
    testX = testX.reshape([-1, 28, 28, 1])

    # Building convolutional network
    network = input_data(shape=[None, 28, 28, 1], name='input')
    network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 10, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.01,
                         loss='categorical_crossentropy', name='target')

    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit({'input': X}, {'target': Y}, n_epoch=20,
               validation_set=({'input': testX}, {'target': testY}),
               snapshot_step=100, show_metric=True, run_id='cnn_demo')
Ejemplo n.º 3
0
def neural_network_model(input_size):

    network = input_data(shape=[None, input_size, 1], name='input')

    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=LR,
                         loss='categorical_crossentropy', name='targets')
    model = tflearn.DNN(network, tensorboard_dir='log')

    return model
Ejemplo n.º 4
0
def do_cnn_doc2vec_2d(trainX, testX, trainY, testY):
    print "CNN and doc2vec 2d"

    trainX = trainX.reshape([-1, max_features, max_document_length, 1])
    testX = testX.reshape([-1, max_features, max_document_length, 1])


    # Building convolutional network
    network = input_data(shape=[None, max_features, max_document_length, 1], name='input')
    network = conv_2d(network, 16, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 10, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.01,
                         loss='categorical_crossentropy', name='target')

    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit({'input': trainX}, {'target': trainY}, n_epoch=20,
               validation_set=({'input': testX}, {'target': testY}),
               snapshot_step=100, show_metric=True, run_id='review')
Ejemplo n.º 5
0
def _model1():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    network = input_data(shape=[None, inputSize, inputSize, dim],
                 name='input',
                 data_preprocessing=img_prep,
                 data_augmentation=img_aug)

    network = conv_2d(network, 32, 3, strides = 4, activation='relu')
    network = max_pool_2d(network, 2, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, 3, strides = 2, activation='relu')
    network = max_pool_2d(network, 2, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, len(Y[0]), activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                 loss='categorical_crossentropy', name='target')

    model = tflearn.DNN(network, tensorboard_verbose=3)
    model.fit(X, Y, n_epoch=epochNum, validation_set=(xTest, yTest),
       snapshot_step=500, show_metric=True, batch_size=batchNum, shuffle=True, run_id=_id + 'artClassification')
    if modelStore: model.save(_id + '-model.tflearn')
Ejemplo n.º 6
0
def train_nmf_network(mfcc_array, sdr_array, n_epochs, take):
    """

    :param mfcc_array:
    :param sdr_array:
    :param n_epochs:
    :param take:
    :return:
    """
    with tf.Graph().as_default():
        network = input_data(shape=[None, 13, 100, 1])
        network = conv_2d(network, 32, [5, 5], activation="relu", regularizer="L2")
        network = max_pool_2d(network, 2)
        network = conv_2d(network, 64, [5, 5], activation="relu", regularizer="L2")
        network = max_pool_2d(network, 2)
        network = fully_connected(network, 128, activation="relu")
        network = dropout(network, 0.8)
        network = fully_connected(network, 256, activation="relu")
        network = dropout(network, 0.8)
        network = fully_connected(network, 1, activation="linear")
        regress = tflearn.regression(network, optimizer="rmsprop", loss="mean_square", learning_rate=0.001)

        # Training
        model = tflearn.DNN(regress)  # , session=sess)
        model.fit(
            mfcc_array,
            sdr_array,
            n_epoch=n_epochs,
            snapshot_step=1000,
            show_metric=True,
            run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take),
        )

        return model
Ejemplo n.º 7
0
def neural_network_model(input_size):
    """
    Function is to build NN based on the input size
    :param input_size: feature size of each observation
    :return: tensorflow model
    """
    network = input_data(shape=[None, input_size], name='input')

    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, learning_rate=LR,  name='targets')
    model = tflearn.DNN(network, tensorboard_dir='logs/ann/ann_0')

    return model
Ejemplo n.º 8
0
def train_repet_network(beat_spectrum_array, sdr_array, n_epochs, take):
    """

    :param beat_spectrum_array:
    :param sdr_array:
    :param n_epochs:
    :param take:
    :return:
    """
    beat_spec_len = 432
    with tf.Graph().as_default():
        input_layer = input_data(shape=[None, beat_spec_len, 1])
        conv1 = conv_1d(input_layer, 32, 4, activation="relu", regularizer="L2")
        max_pool1 = max_pool_1d(conv1, 2)
        conv2 = conv_1d(max_pool1, 64, 80, activation="relu", regularizer="L2")
        max_pool2 = max_pool_1d(conv2, 2)
        fully1 = fully_connected(max_pool2, 128, activation="relu")
        dropout1 = dropout(fully1, 0.8)
        fully2 = fully_connected(dropout1, 256, activation="relu")
        dropout2 = dropout(fully2, 0.8)
        linear = fully_connected(dropout2, 1, activation="linear")
        regress = tflearn.regression(linear, optimizer="rmsprop", loss="mean_square", learning_rate=0.001)

        # Training
        model = tflearn.DNN(regress)  # , session=sess)
        model.fit(
            beat_spectrum_array,
            sdr_array,
            n_epoch=n_epochs,
            snapshot_step=1000,
            show_metric=True,
            run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take),
        )

        return model
Ejemplo n.º 9
0
def alexnet(width, height, lr, output=3):
    network = input_data(shape=[None, width, height, 1], name='input')
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
 def build_network(self):
   # Building 'AlexNet'
   # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
   # https://github.com/DT42/squeezenet_demo
   # https://github.com/yhenon/pysqueezenet/blob/master/squeezenet.py
   print('[+] Building CNN')
   self.network = input_data(shape = [None, SIZE_FACE, SIZE_FACE, 1])
   self.network = conv_2d(self.network, 96, 11, strides = 4, activation = 'relu')
   self.network = max_pool_2d(self.network, 3, strides = 2)
   self.network = local_response_normalization(self.network)
   self.network = conv_2d(self.network, 256, 5, activation = 'relu')
   self.network = max_pool_2d(self.network, 3, strides = 2)
   self.network = local_response_normalization(self.network)
   self.network = conv_2d(self.network, 256, 3, activation = 'relu')
   self.network = max_pool_2d(self.network, 3, strides = 2)
   self.network = local_response_normalization(self.network)
   self.network = fully_connected(self.network, 1024, activation = 'tanh')
   self.network = dropout(self.network, 0.5)
   self.network = fully_connected(self.network, 1024, activation = 'tanh')
   self.network = dropout(self.network, 0.5)
   self.network = fully_connected(self.network, len(EMOTIONS), activation = 'softmax')
   self.network = regression(self.network,
     optimizer = 'momentum',
     loss = 'categorical_crossentropy')
   self.model = tflearn.DNN(
     self.network,
     checkpoint_path = SAVE_DIRECTORY + '/alexnet_mood_recognition',
     max_checkpoints = 1,
     tensorboard_verbose = 2
   )
   self.load_model()
def main():
    pickle_folder = '../pickles_rolloff'
    pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]
    pickle_folders_to_load = sorted(pickle_folders_to_load)

    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'sim_mat'
    beat_spec_len = 432

    # training params
    n_classes = 16
    training_percent = 0.85
    testing_percent = 0.15
    validation_percent = 0.00


    # set up training, testing, & validation partitions
    print('Loading sim_mat and sdrs')
    sim_mat_array, sdr_array = get_generated_data(feature, fg_or_bg, sdr_type)
    print('sim_mat and sdrs loaded')

    print('splitting and grooming data')
    train, test, validate = split_into_sets(len(pickle_folders_to_load), training_percent,
                                            testing_percent, validation_percent)

    trainX = np.expand_dims([sim_mat_array[i] for i in train], -1)
    trainY = np.expand_dims([sdr_array[i] for i in train], -1)
    testX = np.expand_dims([sim_mat_array[i] for i in test], -1)
    testY = np.array([sdr_array[i] for i in test])

    print('setting up CNN')
    # Building convolutional network
    network = input_data(shape=[None, beat_spec_len, beat_spec_len, 1])
    network = conv_2d(network, 32, 10, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 20, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 1, activation='linear')
    regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01)

    print('running CNN')
    # Training
    model = tflearn.DNN(regress, tensorboard_verbose=1)
    model.fit(trainX, trainY, n_epoch=10,
              snapshot_step=1000, show_metric=True, run_id='{} classes'.format(n_classes - 1))

    predicted = np.array(model.predict(testX))[:,0]

    print('plotting')
    plot(testY, predicted)
def build_network():
    network = tflearn.input_data(shape=[None, 2])
    network = tflearn.fully_connected(network, 64, activation='relu')
    network = dropout(network, 0.9)
    network = tflearn.fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.9)
    network = tflearn.fully_connected(network, 2, activation='softmax')
    network = tflearn.regression(network, optimizer='sgd', learning_rate=0.1,
                           loss='categorical_crossentropy')
    return network
Ejemplo n.º 13
0
def main():
    pickle_folder = '../pickles_rolloff'
    pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]
    pickle_folders_to_load = sorted(pickle_folders_to_load)

    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'beat_spec'
    beat_spec_len = 432

    # training params
    n_classes = 16
    training_percent = 0.85
    testing_percent = 0.15
    validation_percent = 0.00


    # set up training, testing, & validation partitions
    beat_spec_array, sdr_array = load_beat_spec_and_sdrs(pickle_folders_to_load, pickle_folder,
                                                         feature, fg_or_bg, sdr_type)

    train, test, validate = split_into_sets(len(pickle_folders_to_load), training_percent,
                                            testing_percent, validation_percent)

    trainX = np.expand_dims([beat_spec_array[i] for i in train], -1)
    trainY = np.expand_dims([sdr_array[i] for i in train], -1)
    testX = np.expand_dims([beat_spec_array[i] for i in test], -1)
    testY = np.array([sdr_array[i] for i in test])

    # Building convolutional network
    network = input_data(shape=[None, beat_spec_len, 1])
    network = conv_1d(network, 32, 4, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = conv_1d(network, 64, 80, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='relu') # look for non-tanh things???
    network = dropout(network, 0.8)
    network = fully_connected(network, 1, activation='linear')
    regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01)

    # Training
    model = tflearn.DNN(regress, tensorboard_verbose=1)
    model.fit(trainX, trainY, n_epoch=100,
              snapshot_step=1000, show_metric=True, run_id='relus_100_3')

    predicted = np.array(model.predict(testX))[:,0]
    # pprint.pprint()
    print("Test MSE: ", np.square(testY - predicted).mean())
    plot(testY, predicted)
Ejemplo n.º 14
0
def build_model_1_conv(learning_rate, input_shape, nb_classes, base_path , drop):
    network = input_data(shape=input_shape, name='input')
    network = conv_2d(network, 64, [4, 16], activation='relu')
    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, drop)
    network = fully_connected(network, 64, activation='relu')
    network = dropout(network, drop)
    network = fully_connected(network, nb_classes, activation='softmax')
    network = regression(network, optimizer='sgd', learning_rate=learning_rate,
                         loss='categorical_crossentropy', name='target')
    model = tflearn.DNN(network, tensorboard_verbose=3, tensorboard_dir=base_path + "/tflearn_logs/",
                        checkpoint_path=base_path + "/checkpoints/step")
    return model
def main():
    pickle_folder = 'pickles_combined'


    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'beat_spec'

    # training params
    training_percent = 0.85
    testing_percent = 0.15
    validation_percent = 0.00
    beat_spec_max = 355


    # set up training, testing, & validation partitions
    beat_spec_array, sdr_array = unpickle_beat_spec_and_sdrs(pickle_folder, beat_spec_max)

    train, test, validate = split_into_sets(len(beat_spec_array), training_percent,
                                            testing_percent, validation_percent)

    trainX = np.expand_dims([beat_spec_array[i] for i in train], -1)
    trainY = np.expand_dims([sdr_array[i] for i in train], -1)
    testX = np.expand_dims([beat_spec_array[i] for i in test], -1)
    testY = np.array([sdr_array[i] for i in test])

    # Building convolutional network
    network = input_data(shape=[None, beat_spec_max, 1])
    network = conv_1d(network, 32, 4, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = conv_1d(network, 64, 80, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='relu') # look for non-tanh things???
    network = dropout(network, 0.8)
    network = fully_connected(network, 1, activation='linear')
    regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01)

    start = time.time()
    # Training
    model = tflearn.DNN(regress, tensorboard_verbose=1)
    model.fit(trainX, trainY, n_epoch=2000,
              snapshot_step=1000, show_metric=True, run_id='mir1k_2000_truncate')
    elapsed = (time.time() - start)

    predicted = np.array(model.predict(testX))[:,0]
    print("Test MSE: ", np.square(testY - predicted).mean())
    print(elapsed, "seconds")
    plot(testY, predicted)
Ejemplo n.º 16
0
 def make_core_network(network):
     network = tflearn.reshape(network, [-1, 28, 28, 1], name="reshape")
     network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
     network = max_pool_2d(network, 2)
     network = local_response_normalization(network)
     network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
     network = max_pool_2d(network, 2)
     network = local_response_normalization(network)
     network = fully_connected(network, 128, activation='tanh')
     network = dropout(network, 0.8)
     network = fully_connected(network, 256, activation='tanh')
     network = dropout(network, 0.8)
     network = fully_connected(network, 10, activation='softmax')
     return network
Ejemplo n.º 17
0
def _model3():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    network = input_data(shape=[None, inputSize, inputSize, dim],
                             data_preprocessing=img_prep,
                             data_augmentation=img_aug)
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, len(yTest[0]), activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)
    print('Model has been made!!!?')
    # Training
    model = tflearn.DNN(network, checkpoint_path='model_densenet_cifar10',
                        max_checkpoints=10, tensorboard_verbose=0,
                        clip_gradients=0.)
    model.load(_path)
    pred = model.predict(xTest)

    df = pd.DataFrame(pred)
    df.to_csv(_path + ".csv")

    newList = pred.copy()
    newList = convert2(newList)
    if _CSV: makeCSV(newList)
    pred = convert2(pred)
    pred = convert3(pred)
    yTest = convert3(yTest)
    print(metrics.confusion_matrix(yTest, pred))
    print(metrics.classification_report(yTest, pred))
    print('Accuracy', accuracy_score(yTest, pred))
    print()
    if _wrFile: writeTest(pred)
Ejemplo n.º 18
0
def main():
    """

    :return:
    """
    pickle_folder = '../NMF/mfcc_pickles'
    pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]

    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'mfcc_clusters'
    beat_spec_len = 432
    n_epochs = 200
    take = 1

    # set up training, testing, & validation partitions
    mfcc_array, sdr_array = load_mfcc_and_sdrs(pickle_folders_to_load, pickle_folder,
                                                    feature, fg_or_bg, sdr_type)

    mfcc_array = np.expand_dims(mfcc_array, -1)
    sdr_array = np.expand_dims(sdr_array, -1)

    # Building convolutional network
    network = input_data(shape=[None, 13, 100, 1])
    network = conv_2d(network, 32, [5, 5], activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, [5, 5], activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)
    network = fully_connected(network, 1, activation='linear')
    regress = tflearn.regression(network, optimizer='rmsprop', loss='mean_square', learning_rate=0.001)

    start = time.time()
    # Training
    model = tflearn.DNN(regress)  # , session=sess)
    model.fit(mfcc_array, sdr_array, n_epoch=n_epochs,
              snapshot_step=1000, show_metric=True,
              run_id='repet_save_{0}_epochs_take_{1}'.format(n_epochs, take))
    elapsed = (time.time() - start)
    print('Finished training after ' + elapsed + 'seconds. Saving...')

    model_output_folder = 'network_outputs/'
    model_output_file = join(model_output_folder, 'nmf_save_{0}_epochs_take_{1}'.format(n_epochs, take))

    model.save(model_output_file)
Ejemplo n.º 19
0
def  do_cnn(trainX, trainY,testX, testY):
    global n_words
    # Data preprocessing
    # Sequence padding
    trainX = pad_sequences(trainX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
    testX = pad_sequences(testX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Building convolutional network
    network = input_data(shape=[None, MAX_DOCUMENT_LENGTH], name='input')
    network = tflearn.embedding(network, input_dim=n_words+1, output_dim=128)
    branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
    branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
    branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.5)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')
    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(trainX, trainY, n_epoch = 20, shuffle=True, validation_set=(testX, testY), show_metric=True, batch_size=32)
Ejemplo n.º 20
0
def createModel(nbClasses,imageSize):
	print("[+] Creating model...")
	convnet = input_data(shape=[None, imageSize, imageSize, 1], name='input')

	convnet = conv_2d(convnet, 64, 2, activation='elu', weights_init="Xavier")
	convnet = max_pool_2d(convnet, 2)

	convnet = conv_2d(convnet, 128, 2, activation='elu', weights_init="Xavier")
	convnet = max_pool_2d(convnet, 2)

	convnet = conv_2d(convnet, 256, 2, activation='elu', weights_init="Xavier")
	convnet = max_pool_2d(convnet, 2)

	convnet = conv_2d(convnet, 512, 2, activation='elu', weights_init="Xavier")
	convnet = max_pool_2d(convnet, 2)

	convnet = fully_connected(convnet, 1024, activation='elu')
	convnet = dropout(convnet, 0.5)

	convnet = fully_connected(convnet, nbClasses, activation='softmax')
	convnet = regression(convnet, optimizer='rmsprop', loss='categorical_crossentropy')

	model = tflearn.DNN(convnet)
	print("    Model created! ✅")
	return model
Ejemplo n.º 21
0
def do_cnn_doc2vec(trainX, testX, trainY, testY):
    global max_features
    print "CNN and doc2vec"

    #trainX = pad_sequences(trainX, maxlen=max_features, value=0.)
    #testX = pad_sequences(testX, maxlen=max_features, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Building convolutional network
    network = input_data(shape=[None,max_features], name='input')
    network = tflearn.embedding(network, input_dim=1000000, output_dim=128,validate_indices=False)
    branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
    branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
    branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.8)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')
    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(trainX, trainY,
              n_epoch=5, shuffle=True, validation_set=(testX, testY),
              show_metric=True, batch_size=100,run_id="review")
Ejemplo n.º 22
0
def get_cnn_model(checkpoint_path='cnn_servo_model', width=72, height=48, depth=3, session=None):
    
    # Inputs
    network = input_data(shape=[None, height, width, depth], name='input')

    # Convolution no.1
    # Relu introduces non linearity into training
    network = conv_2d(network, 8, [5, 3], activation='relu')

    # Convolution no.2
    network = conv_2d(network, 12, [5, 8], activation='relu')
    
    # Convolution no.3
    network = conv_2d(network, 16, [5, 16], activation='relu')

    # Convolution no.4
    network = conv_2d(network, 24, [3, 20], activation='relu')

    # Convolution no.5
    network = conv_2d(network, 24, [3, 24], activation='relu')

    # Fully connected no.1
    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)

    # Fully connected no.2
    network = fully_connected(network, 100, activation='relu')
    network = dropout(network, 0.8)

    # Fully connected no.3
    network = fully_connected(network, 50, activation='relu')
    network = dropout(network, 0.8)

    # Fully connected no.4
    network = fully_connected(network, 10, activation='relu')
    network = dropout(network, 0.8)
 
    # Fully connected no.5
    network = fully_connected(network, 1, activation='tanh')

    # Regression
    network = regression(network, loss='mean_square', metric='accuracy', learning_rate=1e-4,name='target') 

    # Verbosity yay nay
    # 0 = nothing
    model = tflearn.DNN(network, tensorboard_verbose=2, checkpoint_path=checkpoint_path, session=session) 
    return model
Ejemplo n.º 23
0
def do_cnn_word2vec_2d(trainX, testX, trainY, testY):
    global max_features
    global max_document_length
    print "CNN and word2vec2d"
    y_test = testY
    #trainX = pad_sequences(trainX, maxlen=max_features, value=0.)
    #testX = pad_sequences(testX, maxlen=max_features, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Building convolutional network
    network = input_data(shape=[None,max_document_length,max_features,1], name='input')

    network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.01,
                         loss='categorical_crossentropy', name='target')

    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(trainX, trainY,
              n_epoch=5, shuffle=True, validation_set=(testX, testY),
              show_metric=True,run_id="sms")

    y_predict_list = model.predict(testX)
    print y_predict_list

    y_predict = []
    for i in y_predict_list:
        print  i[0]
        if i[0] > 0.5:
            y_predict.append(0)
        else:
            y_predict.append(1)

    print(classification_report(y_test, y_predict))
    print metrics.confusion_matrix(y_test, y_predict)
def main():
    """

    :return:
    """
    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'sim_mat'
    beat_spec_len = 432

    # set up training, testing, & validation partitions
    sim_mat_array, sdr_array = get_generated_data(feature, fg_or_bg, sdr_type)

    # training params
    n_classes = 10
    n_training_steps = 1000
    training_step_size = 100
    training_percent = 0.85
    testing_percent = 0.15
    validation_percent = 0.00

    sdr_array_1h, hist = sdrs_to_one_hots(sdr_array, n_classes, True)

    train, test, validate = split_into_sets(len(sim_mat_array), training_percent,
                                            testing_percent, validation_percent)

    # Building convolutional network
    network = input_data(shape=[None, beat_spec_len, 1], name='input')
    network = conv_1d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_1d(network, 2)
    # network = local_response_normalization(network)
    # network = batch_normalization(network)
    # network = conv_1d(network, 64, 3, activation='relu', regularizer="L2")
    # network = max_pool_1d(network, 2)
    # network = local_response_normalization(network)
    # network = batch_normalization(network)
    # network = fully_connected(network, 128, activation='tanh')
    # network = dropout(network, 0.5)
    network = fully_connected(network, 512, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, n_classes, activation='softmax')
    # network = fully_connected(network, 1, activation='linear')
    network = regression(network, optimizer='adagrad', learning_rate=0.01,
                         loss='categorical_crossentropy', name='target')

    X = np.expand_dims(sim_mat_array, -1)
    Y = np.array(sdr_array_1h)
    # X = np.expand_dims([beat_spec_array[i] for i in train], -1)
    # Y = np.array([sdr_array_1h[i] for i in train])
    # testX = np.expand_dims([beat_spec_array[i] for i in test], -1)
    # testY = np.array([sdr_array[i] for i in test])

    # Training
    model = tflearn.DNN(network, tensorboard_verbose=1)
    model.fit({'input': X}, {'target': Y}, n_epoch=20,
              validation_set=0.1,
              snapshot_step=1000, show_metric=True, run_id='{} classes'.format(n_classes - 1))
Ejemplo n.º 25
0
def vggnet():
    X, Y = oxflower17.load_data(one_hot=True,resize_pics=(227, 227))

    # Building 'VGG Network'
    network = input_data(shape=[None, 227, 227, 3])

    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = conv_2d(network, 128, 3, activation='relu')
    network = conv_2d(network, 128, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = conv_2d(network, 256, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2)

    network = fully_connected(network, 4096, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 17, activation='softmax')

    network = regression(network, optimizer='rmsprop',
                         loss='categorical_crossentropy',
                         learning_rate=0.0001)

    # Training
    model = tflearn.DNN(network, checkpoint_path='model_vgg',
                        max_checkpoints=1, tensorboard_verbose=0)
    model.fit(X, Y, n_epoch=500, shuffle=True,
              show_metric=True, batch_size=32, snapshot_step=500,
              snapshot_epoch=False, run_id='vgg')
Ejemplo n.º 26
0
def create_model(learning_rate, input_shape, nb_classes, base_path, drop=1):
    network = input_data(shape=input_shape, name='input')
    network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, drop)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, drop)
    network = fully_connected(network, nb_classes, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=learning_rate,
                         loss='categorical_crossentropy', name='target')
    model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path=base_path + "/checkpoints/step")

    return model
Ejemplo n.º 27
0
def main():
    """
    Trains a CNN architecture and plots the results over a validation set.
    Returns:

    """

    # Load the SDR and hist data
    data = load_data('reverb_pan_full_sdr.txt', 'pickle/')

    # split data into train and test sets
    test_percent = 0.15
    train, test, validate = split_into_sets(len(data['sdr']), 1-test_percent,
                                            test_percent, 0)

    x_train = np.expand_dims([data['input'][i] for i in train], -1)
    y_train = np.expand_dims([data['sdr'][i] for i in train], -1)
    x_test = np.expand_dims([data['input'][i] for i in test], -1)
    y_test = np.expand_dims([data['sdr'][i] for i in test], -1)

    # construct the CNN.
    inp = input_data(shape=[None, 50, 50, 1], name='input')
    # two convolutional layers with max pooling
    conv1 = conv_2d(inp, 32, [5, 5], activation='relu', regularizer="L2")
    max_pool = max_pool_2d(conv1, 2)
    conv2 = conv_2d(max_pool, 64, [5, 5], activation='relu', regularizer="L2")
    max_pool2 = max_pool_2d(conv2, 2)
    # two fully connected layers
    full = fully_connected(max_pool2, 128, activation='tanh')
    full = dropout(full, 0.8)
    full2 = fully_connected(full, 256, activation='tanh')
    full2 = dropout(full2, 0.8)
    # output regression node
    out = fully_connected(full2, 1, activation='linear')
    network = regression(out, optimizer='sgd', learning_rate=0.01, name='target', loss='mean_square')

    model = tflearn.DNN(network, tensorboard_verbose=1, checkpoint_path='checkpoint.p',
                        tensorboard_dir='tmp/tflearn_logs/')

    model.fit({'input': x_train}, {'target': y_train}, n_epoch=1000, validation_set=(x_test, y_test),
              snapshot_step=10000, run_id='convnet_duet_3x3')

    predicted = np.array(model.predict(x_test))[:,0]
    plot(y_test, predicted)
Ejemplo n.º 28
0
def _model1():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    network = input_data(shape=[None, inputSize, inputSize, dim],
                 name='input',
                 data_preprocessing=img_prep,
                 data_augmentation=img_aug)

    network = conv_2d(network, 32, 3, strides = 4, activation='relu')
    network = max_pool_2d(network, 2, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, 3, strides = 2, activation='relu')
    network = max_pool_2d(network, 2, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, len(yTest[0]), activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                 loss='categorical_crossentropy', name='target')

    model = tflearn.DNN(network, tensorboard_verbose=3)
    model.load(_path)
    pred = model.predict(xTest)

    df = pd.DataFrame(pred)
    df.to_csv(_path + ".csv")

    newList = pred.copy()
    newList = convert2(newList)
    if _CSV: makeCSV(newList)
    pred = convert2(pred)
    pred = convert3(pred)
    yTest = convert3(yTest)
    print(metrics.confusion_matrix(yTest, pred))
    print(metrics.classification_report(yTest, pred))
    print('Accuracy', accuracy_score(yTest, pred))
    print()
    if _wrFile: writeTest(pred)
Ejemplo n.º 29
0
def cnn():
    network = input_data(shape=[None, IMAGE_HEIGHT, IMAGE_WIDTH, 1], name='input')
    network = conv_2d(network, 8, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = batch_normalization(network)
    network = conv_2d(network, 16, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = batch_normalization(network)
    network = conv_2d(network, 16, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = batch_normalization(network)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, CODE_LEN * MAX_CHAR, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')
    return network
Ejemplo n.º 30
0
def do_cnn(x,y):
    global max_document_length
    print "CNN and tf"
    trainX, testX, trainY, testY = train_test_split(x, y, test_size=0.4, random_state=0)
    y_test=testY

    trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
    testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Building convolutional network
    network = input_data(shape=[None,max_document_length], name='input')
    network = tflearn.embedding(network, input_dim=1000000, output_dim=128)
    branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
    branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
    branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool(network)
    network = dropout(network, 0.8)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')

    model = tflearn.DNN(network, tensorboard_verbose=0)
    #if not os.path.exists(pkl_file):
        # Training
    model.fit(trainX, trainY,
                  n_epoch=5, shuffle=True, validation_set=0.1,
                  show_metric=True, batch_size=100,run_id="webshell")
    #    model.save(pkl_file)
    #else:
    #    model.load(pkl_file)

    y_predict_list=model.predict(testX)
    #y_predict = list(model.predict(testX,as_iterable=True))

    y_predict=[]
    for i in y_predict_list:
        print  i[0]
        if i[0] > 0.5:
            y_predict.append(0)
        else:
            y_predict.append(1)
    print 'y_predict_list:'
    print y_predict_list
    print 'y_predict:'
    print  y_predict
    #print  y_test

    do_metrics(y_test, y_predict)
Ejemplo n.º 31
0
def otherception3(width, height, frame_count, lr, output=9, model_name = 'otherception.model', device = 'gpu', num = '0'):
  #  with tf.device('/{}:{}'.format(device,num)):
        network = input_data(shape=[None, width, height,3], name='input')
        conv1_7_7 = conv_2d(network, 64, 28, strides=4, activation='relu', name = 'conv1_7_7_s2')
        pool1_3_3 = max_pool_2d(conv1_7_7, 9,strides=4)
        pool1_3_3 = local_response_normalization(pool1_3_3)
        conv2_3_3_reduce = conv_2d(pool1_3_3, 64,1, activation='relu',name = 'conv2_3_3_reduce')
        conv2_3_3 = conv_2d(conv2_3_3_reduce, 192,12, activation='relu', name='conv2_3_3')
        conv2_3_3 = local_response_normalization(conv2_3_3)
        pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=12, strides=2, name='pool2_3_3_s2')
        inception_3a_1_1 = conv_2d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1')
        inception_3a_3_3_reduce = conv_2d(pool2_3_3, 96,1, activation='relu', name='inception_3a_3_3_reduce')
        inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce, 128,filter_size=12,  activation='relu', name = 'inception_3a_3_3')
        inception_3a_5_5_reduce = conv_2d(pool2_3_3,16, filter_size=1,activation='relu', name ='inception_3a_5_5_reduce' )
        inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce, 32, filter_size=15, activation='relu', name= 'inception_3a_5_5')
        inception_3a_pool = max_pool_2d(pool2_3_3, kernel_size=12, strides=1, )
        inception_3a_pool_1_1 = conv_2d(inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1')

        # merge the inception_3a__
        inception_3a_output = merge([inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1], mode='concat', axis=3)

        inception_3b_1_1 = conv_2d(inception_3a_output, 128,filter_size=1,activation='relu', name= 'inception_3b_1_1' )
        inception_3b_3_3_reduce = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_3_3_reduce')
        inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce, 192, filter_size=9,  activation='relu',name='inception_3b_3_3')
        inception_3b_5_5_reduce = conv_2d(inception_3a_output, 32, filter_size=1, activation='relu', name = 'inception_3b_5_5_reduce')
        inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce, 96, filter_size=15,  name = 'inception_3b_5_5')
        inception_3b_pool = max_pool_2d(inception_3a_output, kernel_size=12, strides=1,  name='inception_3b_pool')
        inception_3b_pool_1_1 = conv_2d(inception_3b_pool, 64, filter_size=1,activation='relu', name='inception_3b_pool_1_1')

        #merge the inception_3b_*
        inception_3b_output = merge([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1], mode='concat',axis=3,name='inception_3b_output')

        pool3_3_3 = max_pool_2d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3')
        inception_4a_1_1 = conv_2d(pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1')
        inception_4a_3_3_reduce = conv_2d(pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce')
        inception_4a_3_3 = conv_2d(inception_4a_3_3_reduce, 208, filter_size=3,  activation='relu', name='inception_4a_3_3')
        inception_4a_5_5_reduce = conv_2d(pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce')
        inception_4a_5_5 = conv_2d(inception_4a_5_5_reduce, 48, filter_size=5,  activation='relu', name='inception_4a_5_5')
        inception_4a_pool = max_pool_2d(pool3_3_3, kernel_size=3, strides=1,  name='inception_4a_pool')
        inception_4a_pool_1_1 = conv_2d(inception_4a_pool, 64, filter_size=1, activation='relu', name='inception_4a_pool_1_1')

        inception_4a_output = merge([inception_4a_1_1, inception_4a_3_3, inception_4a_5_5, inception_4a_pool_1_1], mode='concat', axis=3, name='inception_4a_output')


        inception_4b_1_1 = conv_2d(inception_4a_output, 160, filter_size=1, activation='relu', name='inception_4a_1_1')
        inception_4b_3_3_reduce = conv_2d(inception_4a_output, 112, filter_size=1, activation='relu', name='inception_4b_3_3_reduce')
        inception_4b_3_3 = conv_2d(inception_4b_3_3_reduce, 224, filter_size=3, activation='relu', name='inception_4b_3_3')
        inception_4b_5_5_reduce = conv_2d(inception_4a_output, 24, filter_size=1, activation='relu', name='inception_4b_5_5_reduce')
        inception_4b_5_5 = conv_2d(inception_4b_5_5_reduce, 64, filter_size=5,  activation='relu', name='inception_4b_5_5')

        inception_4b_pool = max_pool_2d(inception_4a_output, kernel_size=3, strides=1,  name='inception_4b_pool')
        inception_4b_pool_1_1 = conv_2d(inception_4b_pool, 64, filter_size=1, activation='relu', name='inception_4b_pool_1_1')

        inception_4b_output = merge([inception_4b_1_1, inception_4b_3_3, inception_4b_5_5, inception_4b_pool_1_1], mode='concat', axis=3, name='inception_4b_output')


        inception_4c_1_1 = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu',name='inception_4c_1_1')
        inception_4c_3_3_reduce = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_3_3_reduce')
        inception_4c_3_3 = conv_2d(inception_4c_3_3_reduce, 256,  filter_size=3, activation='relu', name='inception_4c_3_3')
        inception_4c_5_5_reduce = conv_2d(inception_4b_output, 24, filter_size=1, activation='relu', name='inception_4c_5_5_reduce')
        inception_4c_5_5 = conv_2d(inception_4c_5_5_reduce, 64,  filter_size=5, activation='relu', name='inception_4c_5_5')

        inception_4c_pool = max_pool_2d(inception_4b_output, kernel_size=3, strides=1)
        inception_4c_pool_1_1 = conv_2d(inception_4c_pool, 64, filter_size=1, activation='relu', name='inception_4c_pool_1_1')

        inception_4c_output = merge([inception_4c_1_1, inception_4c_3_3, inception_4c_5_5, inception_4c_pool_1_1], mode='concat', axis=3,name='inception_4c_output')

        inception_4d_1_1 = conv_2d(inception_4c_output, 112, filter_size=1, activation='relu', name='inception_4d_1_1')
        inception_4d_3_3_reduce = conv_2d(inception_4c_output, 144, filter_size=1, activation='relu', name='inception_4d_3_3_reduce')
        inception_4d_3_3 = conv_2d(inception_4d_3_3_reduce, 288, filter_size=3, activation='relu', name='inception_4d_3_3')
        inception_4d_5_5_reduce = conv_2d(inception_4c_output, 32, filter_size=1, activation='relu', name='inception_4d_5_5_reduce')
        inception_4d_5_5 = conv_2d(inception_4d_5_5_reduce, 64, filter_size=5,  activation='relu', name='inception_4d_5_5')
        inception_4d_pool = max_pool_2d(inception_4c_output, kernel_size=3, strides=1,  name='inception_4d_pool')
        inception_4d_pool_1_1 = conv_2d(inception_4d_pool, 64, filter_size=1, activation='relu', name='inception_4d_pool_1_1')

        inception_4d_output = merge([inception_4d_1_1, inception_4d_3_3, inception_4d_5_5, inception_4d_pool_1_1], mode='concat', axis=3, name='inception_4d_output')

        inception_4e_1_1 = conv_2d(inception_4d_output, 256, filter_size=1, activation='relu', name='inception_4e_1_1')
        inception_4e_3_3_reduce = conv_2d(inception_4d_output, 160, filter_size=1, activation='relu', name='inception_4e_3_3_reduce')
        inception_4e_3_3 = conv_2d(inception_4e_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_4e_3_3')
        inception_4e_5_5_reduce = conv_2d(inception_4d_output, 32, filter_size=1, activation='relu', name='inception_4e_5_5_reduce')
        inception_4e_5_5 = conv_2d(inception_4e_5_5_reduce, 128,  filter_size=5, activation='relu', name='inception_4e_5_5')
        inception_4e_pool = max_pool_2d(inception_4d_output, kernel_size=3, strides=1,  name='inception_4e_pool')
        inception_4e_pool_1_1 = conv_2d(inception_4e_pool, 128, filter_size=1, activation='relu', name='inception_4e_pool_1_1')


        inception_4e_output = merge([inception_4e_1_1, inception_4e_3_3, inception_4e_5_5,inception_4e_pool_1_1],axis=3, mode='concat')

        pool4_3_3 = max_pool_2d(inception_4e_output, kernel_size=3, strides=2, name='pool_3_3')


        inception_5a_1_1 = conv_2d(pool4_3_3, 256, filter_size=1, activation='relu', name='inception_5a_1_1')
        inception_5a_3_3_reduce = conv_2d(pool4_3_3, 160, filter_size=1, activation='relu', name='inception_5a_3_3_reduce')
        inception_5a_3_3 = conv_2d(inception_5a_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_5a_3_3')
        inception_5a_5_5_reduce = conv_2d(pool4_3_3, 32, filter_size=1, activation='relu', name='inception_5a_5_5_reduce')
        inception_5a_5_5 = conv_2d(inception_5a_5_5_reduce, 128, filter_size=5,  activation='relu', name='inception_5a_5_5')
        inception_5a_pool = max_pool_2d(pool4_3_3, kernel_size=3, strides=1,  name='inception_5a_pool')
        inception_5a_pool_1_1 = conv_2d(inception_5a_pool, 128, filter_size=1,activation='relu', name='inception_5a_pool_1_1')

        inception_5a_output = merge([inception_5a_1_1, inception_5a_3_3, inception_5a_5_5, inception_5a_pool_1_1], axis=3,mode='concat')


        inception_5b_1_1 = conv_2d(inception_5a_output, 384, filter_size=1,activation='relu', name='inception_5b_1_1')
        inception_5b_3_3_reduce = conv_2d(inception_5a_output, 192, filter_size=1, activation='relu', name='inception_5b_3_3_reduce')
        inception_5b_3_3 = conv_2d(inception_5b_3_3_reduce, 384,  filter_size=3,activation='relu', name='inception_5b_3_3')
        inception_5b_5_5_reduce = conv_2d(inception_5a_output, 48, filter_size=1, activation='relu', name='inception_5b_5_5_reduce')
        inception_5b_5_5 = conv_2d(inception_5b_5_5_reduce,128, filter_size=5,  activation='relu', name='inception_5b_5_5' )
        inception_5b_pool = max_pool_2d(inception_5a_output, kernel_size=3, strides=1,  name='inception_5b_pool')
        inception_5b_pool_1_1 = conv_2d(inception_5b_pool, 128, filter_size=1, activation='relu', name='inception_5b_pool_1_1')
        inception_5b_output = merge([inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_pool_1_1], axis=3, mode='concat')

        pool5_7_7 = avg_pool_2d(inception_5b_output, kernel_size=7, strides=1)
        pool5_7_7 = dropout(pool5_7_7, 0.4)

        
        loss = fully_connected(pool5_7_7, output,activation='softmax')


        
        network = regression(loss, optimizer='momentum',
                             loss='categorical_crossentropy',
                             learning_rate=lr, name='targets')
        
        model = tflearn.DNN(network,
                            max_checkpoints=0, tensorboard_verbose=0,tensorboard_dir='log')

        return model
Ejemplo n.º 32
0
covnet = max_pool_2d(covnet, 2)

covnet = conv_2d(covnet, 32, 2, activation='relu')
covnet = max_pool_2d(covnet, 2)

covnet = conv_2d(covnet, 64, 2, activation='relu')
covnet = max_pool_2d(covnet, 2)

covnet = conv_2d(covnet, 32, 2, activation='relu')
covnet = max_pool_2d(covnet, 2)

covnet = conv_2d(covnet, 64, 2, activation='relu')
covnet = max_pool_2d(covnet, 2)

covnet = fully_connected(covnet, 1024, activation='relu')
covnet = dropout(covnet, 0.8)

covnet = fully_connected(covnet, 3, activation='softmax')
covnet = regression(
	covnet, 
	optimizer='adam', 
	learning_rate=learning_rate, 
	loss='categorical_crossentropy', 
	name='targets'
	)

model = tflearn.DNN(covnet, tensorboard_dir = 'log')

# if os.path.exists('{}.meta'.format(MODEL_NAME)):
#     model.load(MODEL_NAME)
#     print('model loaded')
                     data_preprocessing=img_prep,
                     data_augmentation=img_aug)

network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, _dropout)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 2, activation='softmax')
network = regression(network,
                     optimizer='momentum',
                     loss='categorical_crossentropy',
                     learning_rate=_learning_rate)

#comment train/load sections accordingly
# Train the model
model = tflearn.DNN(network, tensorboard_dir='log', tensorboard_verbose=0)

#load the trained model
model.load(model_path)
Ejemplo n.º 34
0
        X_train_sd = X_train_sd.reshape([-1, 13, 107, 1])

        # Building convolutional network
        network = input_data(shape=[None, 13, 107, 1], name='input')
        network = conv_2d(network,
                          32,
                          3,
                          activation='relu',
                          regularizer="L2",
                          name='conv1')
        network = max_pool_2d(network, 2, name='max1')
        network = fully_connected(network,
                                  128,
                                  activation='tanh',
                                  name='dense1')
        network = dropout(network, 0.8, name='drop1')
        network = fully_connected(network, 2, activation='softmax')
        network = regression(network,
                             optimizer='adam',
                             learning_rate=0.01,
                             loss='categorical_crossentropy',
                             name='target')

        # Define model with checkpoint (autosave)
        model = tflearn.DNN(network, tensorboard_verbose=3)

        # Train model with checkpoint every epoch and every 500 steps
        model.fit(X_train_sd,
                  Y_train,
                  n_epoch=n_epoch,
                  show_metric=True,
Ejemplo n.º 35
0
def create_cnn_3d_VGG16():

    #img_prep = ImagePreprocessing()
    #img_prep.add_featurewise_zero_center(mean=0.25)

    network = input_data(
        shape=[None, IMG_SIZE_PX, IMG_SIZE_PX, IMG_SIZE_PX, 1])

    network = conv_3d(network, 64, 3)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = conv_3d(network, 64, 3)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = max_pool_3d(network, 2, strides=2)

    network = conv_3d(network, 64, 3)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = conv_3d(network, 32, 3)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = max_pool_3d(network, 2, strides=2)

    network = conv_3d(network, 256, 3)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = conv_3d(network, 256, 3)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = conv_3d(network, 256, 3)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = max_pool_3d(network, 2, strides=2)

    network = conv_3d(network, 512, 3)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = conv_3d(network, 512, 3)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = conv_3d(network, 512, 3)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = max_pool_3d(network, 2, strides=2)

    network = conv_3d(network, 512, 3)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = conv_3d(network, 512, 3)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = conv_3d(network, 512, 3)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = max_pool_3d(network, 2, strides=2)

    network = fully_connected(network, 2048)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = dropout(network, keep_rate)

    network = fully_connected(network, 2048)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = dropout(network, keep_rate)

    output = fully_connected(network, num_class, activation='softmax')

    network = regression(output,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)
    return network
Ejemplo n.º 36
0
X = X.reshape([-1, 28, 28, 1])  #reshaping 784 pixel image into flat 28*28
test_x = test_x.reshape([-1, 28, 28,
                         1])  #reshaping 784 pixel image into flat 28*28

convnet = input_data(shape=[None, 28, 28, 1], name='input')

convnet = conv_2d(convnet, 32, 2, activation='relu')  #Convolutional Layer 1
convnet = max_pool_2d(convnet, 2)  #Pooling Layer 1

convnet = conv_2d(convnet, 64, 2, activation='relu')  #Convolutional Layer 2
convnet = max_pool_2d(convnet, 2)  #Pooling Layer 2

convnet = fully_connected(convnet, 1024,
                          activation='relu')  #Fully Connected Layer
convnet = dropout(convnet, 0.8)  # basically 80% of neurons will fire

convnet = fully_connected(convnet, 10,
                          activation='softmax')  #Output Layer with Softmax
convnet = regression(convnet,
                     optimizer='adam',
                     learning_rate=0.01,
                     loss='categorical_crossentropy',
                     name='targets')  #Regression layer

model = tflearn.models.DNN(
    convnet, tensorboard_dir='log'
)  #Deep Neural Network Model with TensorBoard log directory

#After training commenting this part of code
Ejemplo n.º 37
0
inception_5b_pool = max_pool_2d(inception_5a_output,
                                kernel_size=3,
                                strides=1,
                                name='inception_5b_pool')
inception_5b_pool_1_1 = conv_2d(inception_5b_pool,
                                128,
                                filter_size=1,
                                activation='relu',
                                name='inception_5b_pool_1_1')
inception_5b_output = merge([
    inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_pool_1_1
],
                            axis=3,
                            mode='concat')
pool5_7_7 = avg_pool_2d(inception_5b_output, kernel_size=7, strides=1)
pool5_7_7 = dropout(pool5_7_7, 0.4)
network = fully_connected(pool5_7_7, 2, activation='softmax')

acc = Accuracy(name="Accuracy")
network = regression(network,
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=0.0005,
                     metric=acc)
model = tflearn.DNN(network,
                    checkpoint_path='jun_glnet_cat_dog.tflearn',
                    max_checkpoints=1,
                    tensorboard_verbose=3,
                    tensorboard_dir='tmp/tflearn_logs/')
model.fit(trainX,
          trainY,
Ejemplo n.º 38
0
train_x, train_y, test_x, test_y = tflearn.datasets.mnist.load_data(
    one_hot=True)

train_x = train_x.reshape(-1, 28, 28, 1)
test_x = test_x.reshape(-1, 28, 28, 1)

# In[3]:

# 定义神经网络模型
conv_net = input_data(shape=[None, 28, 28, 1], name='input')
conv_net = conv_2d(conv_net, 32, 2, activation='relu')
conv_net = max_pool_2d(conv_net, 2)
conv_net = conv_2d(conv_net, 64, 2, activation='relu')
conv_net = max_pool_2d(conv_net, 2)
conv_net = fully_connected(conv_net, 1024, activation='relu')
conv_net = dropout(conv_net, 0.8)
conv_net = fully_connected(conv_net, 10, activation='softmax')
conv_net = regression(conv_net,
                      optimizer='adam',
                      loss='categorical_crossentropy',
                      name='output')

# In[4]:

model = tflearn.DNN(conv_net)

# In[5]:

# 训练
model.fit({'input': train_x}, {'output': train_y},
          n_epoch=13,
def build_model(optimizer=HYPERPARAMS.optimizer,
                optimizer_param=HYPERPARAMS.optimizer_param,
                learning_rate=HYPERPARAMS.learning_rate,
                keep_prob=HYPERPARAMS.keep_prob,
                learning_rate_decay=HYPERPARAMS.learning_rate_decay,
                decay_step=HYPERPARAMS.decay_step):

    images_network = input_data(
        shape=[None, NETWORK.input_size, NETWORK.input_size, 1], name='input1')
    images_network = conv_2d(images_network,
                             64,
                             5,
                             activation=NETWORK.activation)
    #images_network = local_response_normalization(images_network)
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = max_pool_2d(images_network, 3, strides=2)
    images_network = conv_2d(images_network,
                             64,
                             5,
                             activation=NETWORK.activation)
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = max_pool_2d(images_network, 3, strides=2)
    images_network = conv_2d(images_network,
                             128,
                             4,
                             activation=NETWORK.activation)
    if NETWORK.use_batchnorm_after_conv_layers:
        images_network = batch_normalization(images_network)
    images_network = dropout(images_network, keep_prob=keep_prob)
    images_network = fully_connected(images_network,
                                     1024,
                                     activation=NETWORK.activation)
    if NETWORK.use_batchnorm_after_fully_connected_layers:
        images_network = batch_normalization(images_network)

    if NETWORK.use_landmarks or NETWORK.use_hog_and_landmarks:
        if NETWORK.use_hog_sliding_window_and_landmarks:
            landmarks_network = input_data(shape=[None, 2728], name='input2')
        elif NETWORK.use_hog_and_landmarks:
            landmarks_network = input_data(shape=[None, 208], name='input2')
        else:
            landmarks_network = input_data(shape=[None, 68, 2], name='input2')
        landmarks_network = fully_connected(landmarks_network,
                                            1024,
                                            activation=NETWORK.activation)
        if NETWORK.use_batchnorm_after_fully_connected_layers:
            landmarks_network = batch_normalization(landmarks_network)
        landmarks_network = fully_connected(landmarks_network,
                                            40,
                                            activation=NETWORK.activation)
        if NETWORK.use_batchnorm_after_fully_connected_layers:
            landmarks_network = batch_normalization(landmarks_network)
        images_network = fully_connected(images_network,
                                         40,
                                         activation=NETWORK.activation)
        network = merge([images_network, landmarks_network], 'concat', axis=1)
    else:
        network = images_network
    network = fully_connected(network,
                              NETWORK.output_size,
                              activation='softmax')

    if optimizer == 'momentum':
        optimizer = Momentum(learning_rate=learning_rate,
                             momentum=optimizer_param,
                             lr_decay=learning_rate_decay,
                             decay_step=decay_step)
    elif optimizer == 'adam':
        optimizer = Adam(learning_rate=learning_rate,
                         beta1=optimizer_param,
                         beta2=learning_rate_decay)
    else:
        print "Unknown optimizer: {}".format(optimizer)
    network = regression(network,
                         optimizer=optimizer,
                         loss=NETWORK.loss,
                         learning_rate=learning_rate,
                         name='output')

    return network
        drug_gru_2_gate_matrix.append(v)
    elif "GRU_3/GRU_3/GRUCell/Candidate/Linear/Matrix" in v.name:
        drug_gru_2_candidate_matrix.append(v)
    elif "GRU_3/GRU_3/GRUCell/Gates/Linear/Bias" in v.name:
        drug_gru_2_gate_bias.append(v)
    elif "GRU_3/GRU_3/GRUCell/Candidate/Linear/Bias" in v.name:
        drug_gru_2_candidate_bias.append(v)
    elif "Embedding_1" in v.name:
        drug_embd_W.append(v)

fc_1 = fully_connected(pool_2,
                       num1_neurons,
                       activation='leakyrelu',
                       weights_init="xavier",
                       name='fully1')
drop_2 = dropout(fc_1, drop_out)
fc_2 = fully_connected(drop_2,
                       num2_neurons,
                       activation='leakyrelu',
                       weights_init="xavier",
                       name='fully2')
drop_3 = dropout(fc_2, drop_out)
fc_3 = fully_connected(drop_3,
                       num3_neurons,
                       activation='leakyrelu',
                       weights_init="xavier",
                       name='fully3')
drop_4 = dropout(fc_3, drop_out)
linear = fully_connected(drop_4, 5, activation='softmax', name='fully4')
classification = regression(linear,
                            optimizer='adam',
def searchlist(request):
    image= request.GET.get('img')

    img_name= image.split('-')[0]

    import glob
    import shutil
    import os
    import cv2  # working with, mainly resizing, images
    import numpy as np  # dealing with arrays
    from random import shuffle  # mixing up or currently ordered data that might lead our network astray in training.
    from tqdm import tqdm  # a nice pretty percentage bar for tasks. Thanks to viewer Daniel BA1/4hler for this suggestion
    if img_name == 'd':
        verify_dir = 'testpicture/0'
    elif img_name == 'm':
        verify_dir = 'testpicture/1'
    elif img_name == 'a':
        verify_dir = 'testpicture/2'
    else:
        verify_dir = 'testpicture/4'
    IMG_SIZE = 50
    LR = 1e-3
    MODEL_NAME = 'diabetic-{}-{}.model'.format(LR, '2conv-basic')

    def process_verify_data():
        verifying_data = []
        for img in tqdm(os.listdir(verify_dir)):
            path = os.path.join(verify_dir, img)
            img_num = img.split('.')[0]
            img = cv2.imread(path, cv2.IMREAD_COLOR)
            img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
            verifying_data.append([np.array(img), img_num])
        np.save('verify_data.npy', verifying_data)
        return verifying_data

    verify_data = process_verify_data()
    # verify_data = np.load('verify_data.npy')

    import tflearn
    from tflearn.layers.conv import conv_2d, max_pool_2d
    from tflearn.layers.core import input_data, dropout, fully_connected
    from tflearn.layers.estimator import regression
    import tensorflow as tf

    tf.reset_default_graph()

    convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 3], name='input')

    convnet = conv_2d(convnet, 32, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = conv_2d(convnet, 64, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = conv_2d(convnet, 128, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = conv_2d(convnet, 32, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = conv_2d(convnet, 64, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = fully_connected(convnet, 1024, activation='relu')
    convnet = dropout(convnet, 0.8)

    convnet = fully_connected(convnet, 4, activation='softmax')
    convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')

    model = tflearn.DNN(convnet, tensorboard_dir='log')

    if os.path.exists('{}.meta'.format(MODEL_NAME)):
        model.load(MODEL_NAME)
        print('model loaded!')

    import matplotlib.pyplot as plt

    fig = plt.figure()
    for num, data in enumerate(verify_data):

        img_num = data[1]
        img_data = data[0]

        y = fig.add_subplot(3, 4, num + 1)
        orig = img_data
        data = img_data.reshape(IMG_SIZE, IMG_SIZE, 3)
        # model_out = model.predict([data])[0]
        model_out = model.predict([data])[0]

        if np.argmax(model_out) == 0:
            str_label = 'No Diabetic Retinopathy !'
            rating= '0'
        elif np.argmax(model_out) == 1:
            str_label = 'Mild Diabetic Retinopathy !'
            rating = '1'
        elif np.argmax(model_out) == 2:
            str_label = 'Moderate Diabetic Retinopathy!'
            rating = '2'

        elif np.argmax(model_out) == 3:
            str_label = 'Proliferative Diabetic Retinopathy !'
            rating = '3'


        image_url = '/images/' + image
        context = {
            "image" : image,
            "url": image_url,
            "str_label" : str_label,
            "rating" : rating,
        }



        return render(request, 'sajilo/searchlist.html', context)
Ejemplo n.º 42
0
convnet = max_pool_2d(convnet, 2)
convnet = local_response_normalization(convnet)
print(convnet.shape)

convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = local_response_normalization(convnet)
convnet = max_pool_2d(convnet, 2)
print(convnet.shape)

convnet = conv_2d(convnet, 128, 5, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = local_response_normalization(convnet)
print(convnet.shape)

convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.5)

convnet = fully_connected(convnet, 512, activation='relu')
convnet = dropout(convnet, 0.5)

convnet = fully_connected(convnet, 5, activation='softmax')
convnet = regression(convnet,
                     optimizer='adam',
                     learning_rate=LR,
                     loss='categorical_crossentropy',
                     name='targets')

model = tflearn.DNN(convnet)
model.load("/home/pallab/gestures-cnn/tfmodels/" + MODEL_NAME)

cap = cv2.VideoCapture(0)
Ejemplo n.º 43
0
#Layer3
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

#Layer4
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

#Layer5
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

#Layer6
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)

#Output Layer
convnet = fully_connected(convnet, 2, activation='softmax')
convnet = regression(convnet,
                     optimizer='adam',
                     learning_rate=LR,
                     loss='categorical_crossentropy',
                     name='targets')

#print(train_data)

model = tflearn.models.DNN(convnet, tensorboard_dir='log')

if os.path.exists('{}.meta'.format(MODEL_NAME)):
    model.load(MODEL_NAME)
img_prep.add_featurewise_stdnorm()

# Real-time data augmentation
img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=25.)

# Convolutional network building
network = input_data(shape=[None, 32, 32, 3],
                     data_preprocessing=img_prep,
                     data_augmentation=img_aug)
network = conv_2d(network, 32, 3, activation='relu')
network = max_pool_2d(network, 2)
network = conv_2d(network, 64, 3, activation='relu')
network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.5)
network = fully_connected(network, 10, activation='softmax')
#network = regression(network, optimizer='adam',
                     #loss='categorical_crossentropy',
                     #learning_rate=0.001)
network = regression(network, optimizer='adadelta',
                     loss='categorical_crossentropy',
                     learning_rate=1)

# Train using classifier
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit(X, Y, n_epoch=50, shuffle=True, validation_set=(X_test, Y_test),
          show_metric=True, batch_size=96, run_id='cifar10_cnn')
Ejemplo n.º 45
0

trainingData = createTrainData()

import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression

NeuralNet = input_data(shape=[None, imageSize, imageSize, 1], name='input')
NeuralNet = conv_2d(NeuralNet, 32, 2, activation='relu')
NeuralNet = max_pool_2d(NeuralNet, 2)
NeuralNet = conv_2d(NeuralNet, 64, 2, activation='relu')
NeuralNet = max_pool_2d(NeuralNet, 2)
NeuralNet = fully_connected(NeuralNet, 1024, activation='relu')
NeuralNet = dropout(NeuralNet, 0.8)
NeuralNet = fully_connected(NeuralNet, 2, activation='softmax')
NeuralNet = regression(NeuralNet, optimizer='adam', learning_rate=LearningRate, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(NeuralNet, tensorboard_dir='log')


shuffle(trainingData)
train = trainingData[:-100]
test = trainingData[-100:]

X = numpy.array([i[0] for i in train]).reshape(-1,imageSize, imageSize, 1)
Y = [i[1] for i in train]

test_x = numpy.array([i[0] for i in test]).reshape(-1,imageSize, imageSize, 1)
test_y = [i[1] for i in test]
Ejemplo n.º 46
0
# Convolutional network building
# Hyper params:
_learning_rate = 0.001
_dropout = 0.5

network = input_data(shape=[None, 20, 20, 3],
                     data_preprocessing=img_prep,
                     data_augmentation=img_aug)
network = conv_2d(network, 32, 3, activation='relu')
network = max_pool_2d(network, 2)
network = conv_2d(network, 64, 3, activation='relu')
network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, _dropout)
network = fully_connected(network, 2, activation='softmax')
network = regression(network,
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=_learning_rate)
model = tflearn.DNN(network, tensorboard_dir='log', tensorboard_verbose=0)

#load the trained model
model.load(model_path)

img = Image.open(images_path + "image1.jpg")

fig, ax = plt.subplots(1)
ax.imshow(img)
Ejemplo n.º 47
0
    def videoFetch(self, thread_id):
        path = self.path
        img_width = 175
        img_height = 150
        testing_data = []
        start_time = time.time()
        cap = cv2.VideoCapture(path)
        frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1
        fragment_size = frame_count / 8
        init_frame = math.floor(fragment_size * thread_id)
        print(
            "Thread {} starting Frame Extraction from {}th frame. Please wait for sometime."
            .format(thread_id, init_frame))
        end_frame = math.floor(fragment_size * (thread_id + 1) - 1)
        count = init_frame
        cap.set(1, init_frame)
        print("Frame Extraction in Progress by Thread {}".format(thread_id))
        while cap.isOpened():
            ret, frame = cap.read()
            if (ret):
                img = cv2.resize(frame, (img_width, img_height))
                img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                img_num = "%#05d" % (count + 1)
                testing_data.append([np.array(img), img_num])
            count = count + 1
            if (count == end_frame):
                end_time = time.time()
                cap.release()
                print(
                    "Thread {} finished extracting frames.\n{} frames found by Thread {}"
                    .format(thread_id, end_frame - init_frame, thread_id))
                print("It took {} seconds for Frame Extraction by Thread {}".
                      format(end_time - start_time, thread_id))
                break
        # np.save('/home/ghost/Desktop/ecc/test_data_{}.npy'.format(thread_id), testing_data)
        IMG_SIZE1 = 175
        IMG_SIZE2 = 150
        LR = 0.0001
        MODEL_NAME = 'ECR-{}-{}.model'.format(LR, '2conv-basic')
        tf.reset_default_graph()
        convnet = input_data(shape=[None, IMG_SIZE1, IMG_SIZE2, 1],
                             name='input')
        convnet = conv_2d(convnet, 32, 2, activation='relu')
        convnet = max_pool_2d(convnet, 2)
        convnet = conv_2d(convnet, 32, 2, activation='relu')
        convnet = max_pool_2d(convnet, 2)
        convnet = conv_2d(convnet, 32, 2, activation='relu')
        convnet = max_pool_2d(convnet, 2)
        convnet = conv_2d(convnet, 64, 2, activation='relu')
        convnet = max_pool_2d(convnet, 2)
        convnet = dropout(convnet, 0.3)
        convnet = conv_2d(convnet, 64, 2, activation='relu')
        convnet = max_pool_2d(convnet, 2)
        convnet = dropout(convnet, 0.3)
        convnet = conv_2d(convnet, 64, 2, activation='relu')
        convnet = max_pool_2d(convnet, 2)
        convnet = dropout(convnet, 0.3)
        convnet = conv_2d(convnet, 128, 2, activation='relu')
        convnet = max_pool_2d(convnet, 2)
        convnet = conv_2d(convnet, 128, 2, activation='relu')
        convnet = max_pool_2d(convnet, 2)
        convnet = conv_2d(convnet, 128, 2, activation='relu')
        convnet = max_pool_2d(convnet, 2)
        convnet = flatten(convnet)

        convnet = fully_connected(convnet, 256, activation='relu')
        convnet = dropout(convnet, 0.3)
        convnet = fully_connected(convnet, 512, activation='relu')
        convnet = dropout(convnet, 0.3)
        convnet = fully_connected(convnet, 1024, activation='relu')
        convnet = fully_connected(convnet, 2, activation='softmax')
        convnet = regression(convnet,
                             optimizer='adam',
                             learning_rate=LR,
                             loss='binary_crossentropy',
                             name='targets')
        model = tflearn.DNN(convnet, tensorboard_dir='log')
        if os.path.exists('{}.meta'.format(MODEL_NAME)):
            model.load(MODEL_NAME)
            print('Explicit Content Censor Loaded by Thread {}'.format(
                thread_id))
        explicit = 0
        non_explicit = 0
        print('Video Censoring started by Thread {}'.format(thread_id))
        for num, data in enumerate(testing_data[:]):
            # explicit: [1,0]
            # normal: [0,1]
            img_data = data[0]
            img_no = data[1]
            data = img_data.reshape(IMG_SIZE1, IMG_SIZE2, 1)
            model_out = model.predict([data])[0]
            actual_frame_num = init_frame + num
            if (np.argmax(model_out) == 0):
                explicit_frames.append(actual_frame_num)
y_train = Y
X_test = testX
y_test = testY

X_train = X_train.reshape([-1, 28, 28, 1])
X_test = X_test.reshape([-1, 28, 28, 1])

network = input_data(shape=[None, 28, 28, 1], name='input')
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh', name='dense1')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh', name='dense2')
network = dropout(network, 0.8)
network = fully_connected(network, 10, activation='softmax')
network = regression(network,
                     optimizer='adam',
                     learning_rate=0.01,
                     loss='categorical_crossentropy',
                     name='target')

model = tflearn.DNN(network, tensorboard_verbose=0)

model.fit({'input': X_train}, {'target': y_train},
          n_epoch=20,
          validation_set=({
              'input': X_test
Ejemplo n.º 49
0
def deepLearning():

    import os as os
    import cv2
    import numpy as np

    img_size = 50
    lr = 1e-3
    epoch = 100
    step = 500

    img_test_dir = 'D:\\code\\16462\\static\\preprocessed_images'

    model_name = "D:\\code\\16462\\deeplearning\\Processed_PA-0.001-8tryconv-basic-project-50.model"

    import tflearn
    from tflearn.layers.conv import conv_2d, max_pool_2d
    from tflearn.layers.core import input_data, dropout, fully_connected
    from tflearn.layers.estimator import regression

    import tensorflow as tf

    tf.reset_default_graph()

    convnet = input_data(shape=[None, img_size, img_size, 1], name='input')

    convnet = conv_2d(convnet, 32, 5, activation='relu')
    convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 64, 5, activation='relu')
    convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 32, 5, activation='relu')
    convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 64, 5, activation='relu')
    convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 32, 5, activation='relu')
    convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 64, 5, activation='relu')
    convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 32, 5, activation='relu')
    convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 64, 5, activation='relu')
    convnet = max_pool_2d(convnet, 2)

    convnet = fully_connected(convnet, 1024, activation='relu')
    convnet = dropout(convnet, 0.5)

    convnet = fully_connected(convnet, 2, activation='softmax')
    convnet = regression(convnet,
                         optimizer='adam',
                         learning_rate=lr,
                         loss='categorical_crossentropy',
                         name='targets')

    model = tflearn.DNN(convnet, tensorboard_dir='log')

    model.load(model_name)

    def label_img(img):
        #word_label=img.split('.')[-3]
        if img == 'Cardiomegaly': return [1, 0]
        elif img == 'No Cardiomegaly': return [0, 1]

    def create_img_test_data():
        img_testing_data = []
        #count=0
        for img in os.listdir(img_test_dir):

            path = os.path.join(img_test_dir, img)
            img = cv2.resize(cv2.imread(path, cv2.IMREAD_GRAYSCALE),
                             (img_size, img_size))
            img_testing_data.append([np.array(img)])

        np.save('new_img_test_data_trainDown.npy', img_testing_data)
        return img_testing_data

    create_img_test_data()

    test_img = np.load('new_img_test_data_trainDown.npy')

    # import matplotlib.pyplot as plt

    # plt.rcParams['figure.figsize'] = (15,15)

    result = []

    for num, data in enumerate(test_img[:]):

        count = 0
        #img_num = data[1]
        img_data = data[0]

        orig = img_data
        data = img_data.reshape(img_size, img_size, 1)

        model_out = model.predict([data])[0]

        if np.argmax(model_out) == 1: str_label = 'No Cardiomegaly'
        else: str_label = 'Cardiomegaly'

        result.append(str_label)

    return result
    # print(result)


# deepLearning()
Ejemplo n.º 50
0
def network1(train, train_amount):
    global model
    # region SPLIT DATA FOR TRAIN/VALIDATION
    train_amount = int(train_amount * 5.5 / 6)

    x_train = np.array([i[0] for i in train[:train_amount]
                        ]).reshape(-1, s.IMG_SIZE, s.IMG_SIZE, 1)
    x_train = x_train / 255.0
    y_train = [i[1] for i in train[:train_amount]]

    x_validation = np.array([i[0] for i in train[train_amount:]
                             ]).reshape(-1, s.IMG_SIZE, s.IMG_SIZE, 1)
    x_validation = x_validation / 255.0
    y_validation = [i[1] for i in train[train_amount:]]
    # endregion

    # region NETWORK
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center(mean=[0.4735053442384178])

    network = input_data(shape=[None, s.IMG_SIZE, s.IMG_SIZE, 1],
                         name='input',
                         data_preprocessing=img_prep)

    network = conv_2d(network, 32, 3, activation='relu', scope='conv1_1')
    network = conv_2d(network, 64, 3, activation='relu', scope='conv1_2')
    network = max_pool_2d(network, 2, strides=2, name='maxpool_1')

    network = conv_2d(network, 128, 3, activation='relu', scope='conv2_1')
    network = max_pool_2d(network, 2, strides=2, name='maxpool_2')

    network = conv_2d(network, 128, 3, activation='relu', scope='conv3_1')
    network = max_pool_2d(network, 2, strides=2, name='maxpool_3')

    network = conv_2d(network, 256, 3, activation='relu', scope='conv4_1')
    network = max_pool_2d(network, 2, strides=2, name='maxpool_4')

    network = fully_connected(network, 1024, activation='relu', scope='fc5')
    network = dropout(network, 0.5, name='dropout_1')

    network = fully_connected(network, 1024, activation='relu', scope='fc6')
    network = dropout(network, 0.5, name='dropout_2')

    network = fully_connected(network,
                              s.len_animals,
                              activation='softmax',
                              scope='fc7')

    network = regression(network,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=s.LR,
                         name='targets')

    model = tflearn.DNN(network, tensorboard_verbose=0, tensorboard_dir='log')
    # endregion

    if os.path.exists('{}.meta'.format(s.MODEL_NAME)):
        model.load(s.MODEL_NAME)
        print('Model loaded')

    # region TRAIN1
    model.fit(x_train,
              y_train,
              n_epoch=12,
              validation_set=({
                  'input': x_validation
              }, {
                  'targets': y_validation
              }),
              shuffle=True,
              snapshot_epoch=True,
              show_metric=True,
              batch_size=100,
              run_id=s.MODEL_NAME)
    # endregion

    # region SAVE
    model.save(s.MODEL_NAME)
    print('Network trained and saved as {0}'.format(s.MODEL_NAME))
Ejemplo n.º 51
0
train, test, train_labels, test_labels = train_test_split(X, Y, test_size=0.09)

train = train.reshape([-1, 28, 28, 1])
test = test.reshape([-1, 28, 28, 1])

cnn = input_data(shape=[None, 28, 28, 1], name='input')

cnn = conv_2d(cnn, 32, 2, activation='relu')
cnn = max_pool_2d(cnn, 2)

cnn = conv_2d(cnn, 64, 2, activation='relu')
cnn = max_pool_2d(cnn, 2)

cnn = fully_connected(cnn, 28, activation='relu')
cnn = dropout(cnn, 0.8)

cnn = fully_connected(cnn, 3, activation='softmax')
cnn = regression(cnn,
                 optimizer='adam',
                 learning_rate=0.01,
                 loss='categorical_crossentropy',
                 name='targets')

model = tflearn.DNN(cnn)

model.fit({'input': train}, {'targets': train_labels},
          n_epoch=10,
          validation_set=({
              'input': test
          }, {
Ejemplo n.º 52
0
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.embedding_ops import embedding
from tflearn.layers.recurrent import bidirectional_rnn, BasicLSTMCell
from tflearn.layers.estimator import regression

# IMDB Dataset loading
train, test, _ = imdb.load_data(path='imdb.pkl',
                                n_words=10000,
                                valid_portion=0.1)
trainX, trainY = train
testX, testY = test

# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=200, value=0.)
testX = pad_sequences(testX, maxlen=200, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY)
testY = to_categorical(testY)

# Network building
net = input_data(shape=[None, 200])
net = embedding(net, input_dim=20000, output_dim=128)
net = bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(128))
net = dropout(net, 0.5)
net = fully_connected(net, 2, activation='softmax')
net = regression(net, optimizer='adam', loss='categorical_crossentropy')

# Training
model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=64)
Ejemplo n.º 53
0
    def create_train_data():
    training_data = []
    for img in tqdm(os.listdir(TRAIN_DIR)):
        label = label_img(img)
        path = os.path.join(TRAIN_DIR,img)
        img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
        img = cv2.resize(img, (IMG_SIZE,IMG_SIZE))
        training_data.append([np.array(img),np.array(label)])
    shuffle(training_data)
    np.save('train_data.npy', training_data)
    return training_data
    
    def process_test_data():
    testing_data = []
    for img in tqdm(os.listdir(TEST_DIR)):
        path = os.path.join(TEST_DIR,img)
        img_num = img.split('.')[0]
        img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
        img = cv2.resize(img, (IMG_SIZE,IMG_SIZE))
        testing_data.append([np.array(img), img_num])
        
    shuffle(testing_data)
    np.save('test_data.npy', testing_data)
    return testing_data
    
    train_data = create_train_data()
# If you have already created the dataset:
#train_data = np.load('train_data.npy')

import tflearn
import tensorflow as tf
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
tf.reset_default_graph()

convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], name='input')

convnet = conv_2d(convnet, 32, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)

convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)

convnet = conv_2d(convnet, 128, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)

convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)

convnet = conv_2d(convnet, 32, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)

convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)

convnet = fully_connected(convnet, 2, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')

model = tflearn.DNN(convnet, tensorboard_dir='log')



if os.path.exists('C:/Users/H/Desktop/KaggleDogsvsCats/{}.meta'.format(MODEL_NAME)):
    model.load(MODEL_NAME)
    print('model loaded!')

train = train_data[:-500]
test = train_data[-500:]

X = np.array([i[0] for i in train]).reshape(-1,IMG_SIZE,IMG_SIZE,1)
Y = [i[1] for i in train]

test_x = np.array([i[0] for i in test]).reshape(-1,IMG_SIZE,IMG_SIZE,1)
test_y = [i[1] for i in test]

model.fit({'input': X}, {'targets': Y}, n_epoch=10, validation_set=({'input': test_x}, {'targets': test_y}), 
    snapshot_step=500, show_metric=True, run_id=MODEL_NAME)

model.save(MODEL_NAME)

import matplotlib.pyplot as plt

# if you need to create the data:
#test_data = process_test_data()
# if you already have some saved:
test_data = np.load('test_data.npy')

fig=plt.figure()

for num,data in enumerate(test_data[:12]):
    # cat: [1,0]
    # dog: [0,1]
    
    img_num = data[1]
    img_data = data[0]
    
    y = fig.add_subplot(3,4,num+1)
    orig = img_data
    data = img_data.reshape(IMG_SIZE,IMG_SIZE,1)
    #model_out = model.predict([data])[0]
    model_out = model.predict([data])[0]
    
    if np.argmax(model_out) == 1: str_label='Dog'
    else: str_label='Cat'
        
    y.imshow(orig,cmap='gray')
    plt.title(str_label)
    y.axes.get_xaxis().set_visible(False)
    y.axes.get_yaxis().set_visible(False)
plt.show()

with open('submission_file.csv','w') as f:
    f.write('id,label\n')
            
with open('submission_file.csv','a') as f:
    for data in tqdm(test_data):
        img_num = data[1]
        img_data = data[0]
        orig = img_data
        data = img_data.reshape(IMG_SIZE,IMG_SIZE,1)
        model_out = model.predict([data])[0]
        f.write('{},{}\n'.format(img_num,model_out[1]))
Ejemplo n.º 54
0
convnet = max_pool_2d(convnet, 2)

convnet = conv_2d(convnet, 256, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

convnet = conv_2d(convnet, 256, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

convnet = conv_2d(convnet, 128, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

convnet = fully_connected(convnet, 1000, activation='relu')
convnet = dropout(convnet, 0.75)

convnet = fully_connected(convnet, 6, activation='softmax')

convnet = regression(convnet,
                     optimizer='adam',
                     learning_rate=0.001,
                     loss='categorical_crossentropy',
                     name='regression')

model = tflearn.DNN(convnet, tensorboard_verbose=0)

# Load Saved Model
model.load("TrainedModel/GestureRecogModel.tfl")

Ejemplo n.º 55
0
def get_network_architecture(image_width, image_height, number_of_classes, learning_rate):

    number_of_channels = 1

    network = input_data(
        shape=[None, image_width, image_height, number_of_channels],
        data_preprocessing=img_prep,
        data_augmentation=img_aug,
        name='InputData'
    )

    """
        def conv_2d(incoming, nb_filters, filter_size, strides=1, padding='same',
                    activation='linear', bias='True', weights_init='uniform_scaling',
                    bias_init='zeros', regularizer=None, weight_decay=0.001,
                    trainable=True, restore=True, reuse=False, scope=None,
                    name='Conv2D')

        network = conv_2d(network, 32, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_1')
        network = max_pool_2d(network, (2, 2), strides=2, padding='same', name='MaxPool2D_1')
        network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_1')
        network = dropout(network, 0.5, name='Dropout_1')
        network = batch_normalization(network, name='BatchNormalization')
        network = flatten(network, name='Flatten')
        network = fully_connected(network, 512, activation='relu', name='FullyConnected_1')
        network = fully_connected(network, number_of_classes, activation='softmax', name='FullyConnected_Final')

        print('  {}: {}'.format('Conv2D................', network.shape))
        print('  {}: {}'.format('MaxPool2D.............', network.shape))
        print('  {}: {}'.format('Dropout...............', network.shape))
        print('  {}: {}'.format('BatchNormalization....', network.shape))
        print('  {}: {}'.format('Flatten...............', network.shape))
        print('  {}: {}'.format('FullyConnected........', network.shape))
        print('  {}: {}'.format('FullyConnected_Final..', network.shape))

        CONV / FC -> Dropout -> BN -> activation function -> ...

        Convolutional filters: { 32, 64, 128 }
        Convolutional filter sizes: { 1, 3, 5, 11 }
        Convolutional strides: 1
        Activation: ReLu

        Pooling kernel sizes: { 2, 3, 4, 5 }
        Pooling kernel strides: 2

        Dropout probability: 0.5
            - Higher probability of keeping in earlier stages
            - Lower probability of keeping in later stages
    """

    print('\nNetwork architecture:')
    print('  {}: {}'.format('InputData.............', network.shape))

    network = conv_2d(network, 16, (7, 7), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_1')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_1')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = conv_2d(network, 16, (7, 7), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_2')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_2')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_1')
    print('  {}: {}'.format('AvgPool2D.............', network.shape))
    network = dropout(network, 0.5, name='Dropout_1')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = conv_2d(network, 32, (5, 5), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_3')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_3')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = conv_2d(network, 32, (5, 5), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_4')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_4')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_2')
    print('  {}: {}'.format('AvgPool2D.............', network.shape))
    network = dropout(network, 0.5, name='Dropout_2')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = conv_2d(network, 64, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_5')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_5')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = conv_2d(network, 64, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_6')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_6')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_3')
    print('  {}: {}'.format('AvgPool2D.............', network.shape))
    network = dropout(network, 0.5, name='Dropout_3')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = conv_2d(network, 128, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_7')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_7')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = conv_2d(network, 128, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_8')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_8')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_4')
    print('  {}: {}'.format('AvgPool2D.............', network.shape))
    network = dropout(network, 0.5, name='Dropout_4')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = conv_2d(network, 256, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_9')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_9')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = conv_2d(network, 256, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_10')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_10')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_5')
    print('  {}: {}'.format('AvgPool2D.............', network.shape))
    network = dropout(network, 0.5, name='Dropout_5')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = flatten(network, name='Flatten')
    print('  {}: {}'.format('Flatten...............', network.shape))


    network = fully_connected(network, 512, activation='relu', name='FullyConnected_1')
    print('  {}: {}'.format('FullyConnected........', network.shape))
    network = dropout(network, 0.5, name='Dropout_6')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = fully_connected(network, number_of_classes, activation='softmax', name="FullyConnected_Final")
    print('  {}: {}'.format('FullyConnected_Final..', network.shape))


    optimizer = Adam(learning_rate=learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False, name='Adam')
    # optimizer = SGD(learning_rate=learning_rate, lr_decay=0.01, decay_step=100, staircase=False, use_locking=False, name='SGD')
    # optimizer = RMSProp(learning_rate=learning_rate, decay=0.9, momentum=0.9, epsilon=1e-10, use_locking=False, name='RMSProp')
    # optimizer = Momentum(learning_rate=learning_rate, momentum=0.9, lr_decay=0.01, decay_step=100, staircase=False, use_locking=False, name='Momentum')

    metric = Accuracy(name='Accuracy')
    # metric = R2(name='Standard Error')
    # metric = WeightedR2(name='Weighted Standard Error')
    # metric = Top_k(k=6, name='Top K')


    network = regression(
        network,
        optimizer=optimizer,
        loss='categorical_crossentropy',
        metric=metric,
        learning_rate=learning_rate,
        name='Regression'
    )

    return network
                  name='conv1_3_3_4',
                  weights_init='Xavier')
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network,
                  512,
                  3,
                  strides=1,
                  activation='relu',
                  regularizer='L2',
                  name='conv1_3_3_5',
                  weights_init='Xavier')
network = max_pool_2d(network, 2, strides=2)
# Fully Connected Layer
network = fully_connected(network, 512, activation='relu')
# Dropout layer
network = dropout(network, 1)
# Fully Connected Layer
#network = fully_connected(network, 512, activation='relu')
# Dropout layer
#network = dropout(network, 1)
# Fully Connected Layer
network = fully_connected(network, 6, activation='softmax')
# Final network
network = regression(network,
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=0.0007,
                     metric=acc)

# The model with details on where to save
# Will save in current directory
def main():
    f = open("results.txt", "w+")

    get_data = create_training_data()
    train_data = get_data[0]
    data_indeces = get_data[1]

    convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 3], name='input')

    convnet = conv_2d(convnet, 32, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = conv_2d(convnet, 64, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = conv_2d(convnet, 128, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = conv_2d(convnet, 32, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = conv_2d(convnet, 64, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = fully_connected(convnet, 1024, activation='relu')
    convnet = dropout(convnet, 0.8)

    convnet = fully_connected(convnet, 19, activation='softmax')
    convnet = regression(convnet,
                         optimizer='adam',
                         learning_rate=LR,
                         loss='categorical_crossentropy',
                         name='targets')

    model = tflearn.DNN(convnet, tensorboard_dir='log')

    k_fold = 5
    fold_number = 0

    MODEL_NEW_NAME = 'jycropdisease-fold-new{}-{}-{}.model'.format(
        fold_number + 1, LR, '2conv-basic')

    data = train_data
    print("======= FOLD %d =======" % (fold_number + 1))
    split_data = split_training_data(data, data_indeces, k_fold, fold_number)

    train = split_data['train_data']
    test = split_data['test_data']

    X = np.array([i[0] for i in train]).reshape(-1, IMG_SIZE, IMG_SIZE, 3)
    Y = [i[1] for i in train]

    test_x = np.array([i[0] for i in test]).reshape(-1, IMG_SIZE, IMG_SIZE, 3)
    test_y = [i[1] for i in test]

    model.fit({'input': X}, {'targets': Y},
              n_epoch=8,
              validation_set=({
                  'input': test_x
              }, {
                  'targets': test_y
              }),
              snapshot_step=1000,
              show_metric=True,
              run_id=MODEL_NEW_NAME)

    model.save(MODEL_NEW_NAME)
    f.close()
Ejemplo n.º 58
0
    def build_network(self):
        # Smaller 'AlexNet'
        # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
        print('[+] Building CNN')
        '''
        # Why 3 hidden layers?
        # 1986: Backpropagation - Usually more than 3 hidden layer is not helpful
        '''

        '''
        [-]input layer
        #This layer is use for inputting data to a network.
        #List of int, to create a new placeholder
        # shape = [batch, height, width, in_channels]
        '''
        self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1]) # add data whose shape is [None,48, 48 ,1] into an 'input_data' layer

        '''
        [-]conv_2d
        #arg1 - incoming: [batch, height, width, in_channels]
        #arg2 - nb_filter: The number oft convolution filters
        #arg3 - filter_size( kernel size ) : Size of filters
        #strides - default : 1
        '''
        self.network = conv_2d(self.network, 64, 5, activation='relu') # 1st layer
        #self.network = local_response_normalization(self.network) #
        '''
        [-]max pooling 2D
        
        # arg1 - incoming:
        # arg2 - kernel_size: Pooling kernel size
        # arg3 - strides : stides of conv operation  e.g,(0,0)->(0,2)->(0,4)
        '''
        self.network = max_pool_2d(self.network, 3, strides=2) # pool

        self.network = conv_2d(self.network, 64, 5, activation='relu') # 2nd layer
        self.network = max_pool_2d(self.network, 3, strides=2) # pool

        self.network = conv_2d(self.network, 128, 4, activation='relu') # 3rd layer
        '''
        [-]Dropout
        reference: tflearn.org/layers/core/#dropout
        Introduction:
        #Outputs the input element scaled up by 1/keep_prob. The scaling is so that the expected sum is unchanged
        #By default, each element is kept or dropped independently. If noise_shape is specified, it must be broadcastable to the shape of x, and only dimensions with noise_shape[i] == shape(x)[i] will make
        independent decisions. For example, if shape(x) = [k, l, m, n] and noise_shape = [k, 1, 1, n], each batch and channel component will be kept independently and each row and column will be kept or not kept together
        
        #arg1 - incoming: []
        #arg2 - keep_prob: A float representing the probability that each element is kept
        '''
        self.network = dropout(self.network, 0.3) # final: output layer

        '''
        [-]fully_connected
        return : 2D Tensor[samples, n_units]
        
        arg1 - incoming: 2+D Tensor []
        arg2 - n_units: the # of units for this layer
        '''
        self.network = fully_connected(self.network, 3072, activation='relu') # A fully connected layer
        self.network = fully_connected(
            self.network, len(EMOTIONS), activation='softmax') # A fully connected layer
        '''
        [-]regression
        To apply a regreesion to the provided input.
        # optimizer: Optimizer to use
        # loss: Loss function used by this layer optimizer  
        '''
        self.network = regression(
            self.network,
            optimizer='momentum',
            loss='categorical_crossentropy'
        )# conput loss and optimizer

        '''
        Deep Neural Network Model
        # network: NN to be used
        # checkpoint_path : the path to store model file
        # max_checkpoint: Maximum amount of checkpoints
        # tensorboard_verbose: Summary verbose level, it accepts different levels of tensorboard logs.
        '''
        self.model = tflearn.DNN(
            self.network,
            checkpoint_path=SAVE_DIRECTORY + '/emotion_recognition',
            max_checkpoints=1,
            tensorboard_verbose=2
        ) #model max_checkpoints = 1: save only one model file.
        self.load_model()
Ejemplo n.º 59
0
def construct_inceptionv1onfire(x, y):

    # Build network as per architecture in [Dunnings/Breckon, 2018]

    network = input_data(shape=[None, y, x, 3])

    conv1_7_7 = conv_2d(network,
                        64,
                        5,
                        strides=2,
                        activation='relu',
                        name='conv1_7_7_s2')

    pool1_3_3 = max_pool_2d(conv1_7_7, 3, strides=2)
    pool1_3_3 = local_response_normalization(pool1_3_3)

    conv2_3_3_reduce = conv_2d(pool1_3_3,
                               64,
                               1,
                               activation='relu',
                               name='conv2_3_3_reduce')
    conv2_3_3 = conv_2d(conv2_3_3_reduce,
                        128,
                        3,
                        activation='relu',
                        name='conv2_3_3')

    conv2_3_3 = local_response_normalization(conv2_3_3)
    pool2_3_3 = max_pool_2d(conv2_3_3,
                            kernel_size=3,
                            strides=2,
                            name='pool2_3_3_s2')

    inception_3a_1_1 = conv_2d(pool2_3_3,
                               64,
                               1,
                               activation='relu',
                               name='inception_3a_1_1')

    inception_3a_3_3_reduce = conv_2d(pool2_3_3,
                                      96,
                                      1,
                                      activation='relu',
                                      name='inception_3a_3_3_reduce')
    inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce,
                               128,
                               filter_size=3,
                               activation='relu',
                               name='inception_3a_3_3')
    inception_3a_5_5_reduce = conv_2d(pool2_3_3,
                                      16,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_3a_5_5_reduce')
    inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce,
                               32,
                               filter_size=5,
                               activation='relu',
                               name='inception_3a_5_5')
    inception_3a_pool = max_pool_2d(
        pool2_3_3,
        kernel_size=3,
        strides=1,
    )
    inception_3a_pool_1_1 = conv_2d(inception_3a_pool,
                                    32,
                                    filter_size=1,
                                    activation='relu',
                                    name='inception_3a_pool_1_1')

    # merge the inception_3a__
    inception_3a_output = merge([
        inception_3a_1_1, inception_3a_3_3, inception_3a_5_5,
        inception_3a_pool_1_1
    ],
                                mode='concat',
                                axis=3)

    inception_3b_1_1 = conv_2d(inception_3a_output,
                               128,
                               filter_size=1,
                               activation='relu',
                               name='inception_3b_1_1')
    inception_3b_3_3_reduce = conv_2d(inception_3a_output,
                                      128,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_3b_3_3_reduce')
    inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce,
                               192,
                               filter_size=3,
                               activation='relu',
                               name='inception_3b_3_3')
    inception_3b_5_5_reduce = conv_2d(inception_3a_output,
                                      32,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_3b_5_5_reduce')
    inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce,
                               96,
                               filter_size=5,
                               name='inception_3b_5_5')
    inception_3b_pool = max_pool_2d(inception_3a_output,
                                    kernel_size=3,
                                    strides=1,
                                    name='inception_3b_pool')
    inception_3b_pool_1_1 = conv_2d(inception_3b_pool,
                                    64,
                                    filter_size=1,
                                    activation='relu',
                                    name='inception_3b_pool_1_1')

    #merge the inception_3b_*
    inception_3b_output = merge([
        inception_3b_1_1, inception_3b_3_3, inception_3b_5_5,
        inception_3b_pool_1_1
    ],
                                mode='concat',
                                axis=3,
                                name='inception_3b_output')

    pool3_3_3 = max_pool_2d(inception_3b_output,
                            kernel_size=3,
                            strides=2,
                            name='pool3_3_3')
    inception_4a_1_1 = conv_2d(pool3_3_3,
                               192,
                               filter_size=1,
                               activation='relu',
                               name='inception_4a_1_1')
    inception_4a_3_3_reduce = conv_2d(pool3_3_3,
                                      96,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_4a_3_3_reduce')
    inception_4a_3_3 = conv_2d(inception_4a_3_3_reduce,
                               208,
                               filter_size=3,
                               activation='relu',
                               name='inception_4a_3_3')
    inception_4a_5_5_reduce = conv_2d(pool3_3_3,
                                      16,
                                      filter_size=1,
                                      activation='relu',
                                      name='inception_4a_5_5_reduce')
    inception_4a_5_5 = conv_2d(inception_4a_5_5_reduce,
                               48,
                               filter_size=5,
                               activation='relu',
                               name='inception_4a_5_5')
    inception_4a_pool = max_pool_2d(pool3_3_3,
                                    kernel_size=3,
                                    strides=1,
                                    name='inception_4a_pool')
    inception_4a_pool_1_1 = conv_2d(inception_4a_pool,
                                    64,
                                    filter_size=1,
                                    activation='relu',
                                    name='inception_4a_pool_1_1')

    inception_4a_output = merge([
        inception_4a_1_1, inception_4a_3_3, inception_4a_5_5,
        inception_4a_pool_1_1
    ],
                                mode='concat',
                                axis=3,
                                name='inception_4a_output')

    pool5_7_7 = avg_pool_2d(inception_4a_output, kernel_size=5, strides=1)
    pool5_7_7 = dropout(pool5_7_7, 0.4)
    loss = fully_connected(pool5_7_7, 2, activation='softmax')
    network = regression(loss,
                         optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)
    model = tflearn.DNN(network,
                        checkpoint_path='inceptiononv1onfire',
                        max_checkpoints=1,
                        tensorboard_verbose=2)

    return model
tf.reset_default_graph()

convent = input_data(shape = [ None , IMG_SIZE , IMG_SIZE , 1] , name = 'input')
convent = conv_2d(convent , 32 , 5 ,activation='relu')
convent = max_pool_2d(convent, 5)
convent = conv_2d(convent , 64 , 5 , activation = 'relu')
convent = max_pool_2d(convent, 5)
convent = conv_2d(convent , 128 , 5 ,activation = 'relu')
convent = max_pool_2d(convent , 5)
convent = conv_2d(convent , 64 , 5 ,activation = 'relu')
convent = max_pool_2d(convent, 5)
convent = conv_2d(convent , 32 , 5 ,activation='relu')
convent = max_pool_2d(convent, 5)
convent = fully_connected(convent, 1024 , activation = 'relu')
convent = dropout(convent , 0.8)
convent = fully_connected(convent , 2 , activation = 'softmax')
convent = regression(convent , optimizer = 'adam' , learning_rate = LR , loss = 'categorical_crossentropy' , name = 'targets' )

model = tflearn.DNN(convent ,  tensorboard_dir='log' , tensorboard_verbose = 0)
model.fit( { 'input' : X_train }, { 'targets' : y_train} , n_epoch = 10 ,
          validation_set = ( { 'input' : X_test} , {'targets' : y_test}),
          snapshot_step = 500 , show_metric = True , run_id = MODEL_NAME )


fig = plt.figure(figsize = (16,12))

for num , data in enumerate(test_data[:16]) :
    img_num = data[1]
    img_data = data[0]