Example #1
0
def alexnet():
    X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))

    # Building 'AlexNet'
    network = input_data(shape=[None, 227, 227, 3])
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 17, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)

    # Training
    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=2)
    model.fit(X, Y, n_epoch=1000, validation_set=0.1, shuffle=True,
              show_metric=True, batch_size=64, snapshot_step=200,
              snapshot_epoch=False, run_id='alexnet')
 def build_network(self):
   # Building 'AlexNet'
   # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
   # https://github.com/DT42/squeezenet_demo
   # https://github.com/yhenon/pysqueezenet/blob/master/squeezenet.py
   print('[+] Building CNN')
   self.network = input_data(shape = [None, SIZE_FACE, SIZE_FACE, 1])
   self.network = conv_2d(self.network, 96, 11, strides = 4, activation = 'relu')
   self.network = max_pool_2d(self.network, 3, strides = 2)
   self.network = local_response_normalization(self.network)
   self.network = conv_2d(self.network, 256, 5, activation = 'relu')
   self.network = max_pool_2d(self.network, 3, strides = 2)
   self.network = local_response_normalization(self.network)
   self.network = conv_2d(self.network, 256, 3, activation = 'relu')
   self.network = max_pool_2d(self.network, 3, strides = 2)
   self.network = local_response_normalization(self.network)
   self.network = fully_connected(self.network, 1024, activation = 'tanh')
   self.network = dropout(self.network, 0.5)
   self.network = fully_connected(self.network, 1024, activation = 'tanh')
   self.network = dropout(self.network, 0.5)
   self.network = fully_connected(self.network, len(EMOTIONS), activation = 'softmax')
   self.network = regression(self.network,
     optimizer = 'momentum',
     loss = 'categorical_crossentropy')
   self.model = tflearn.DNN(
     self.network,
     checkpoint_path = SAVE_DIRECTORY + '/alexnet_mood_recognition',
     max_checkpoints = 1,
     tensorboard_verbose = 2
   )
   self.load_model()
def createModel(nbClasses,imageSize):
	print("[+] Creating model...")
	convnet = input_data(shape=[None, imageSize, imageSize, 1], name='input')

	convnet = conv_2d(convnet, 64, 2, activation='elu', weights_init="Xavier")
	convnet = max_pool_2d(convnet, 2)

	convnet = conv_2d(convnet, 128, 2, activation='elu', weights_init="Xavier")
	convnet = max_pool_2d(convnet, 2)

	convnet = conv_2d(convnet, 256, 2, activation='elu', weights_init="Xavier")
	convnet = max_pool_2d(convnet, 2)

	convnet = conv_2d(convnet, 512, 2, activation='elu', weights_init="Xavier")
	convnet = max_pool_2d(convnet, 2)

	convnet = fully_connected(convnet, 1024, activation='elu')
	convnet = dropout(convnet, 0.5)

	convnet = fully_connected(convnet, nbClasses, activation='softmax')
	convnet = regression(convnet, optimizer='rmsprop', loss='categorical_crossentropy')

	model = tflearn.DNN(convnet)
	print("    Model created! ✅")
	return model
def model_for_type(neural_net_type, tile_size, on_band_count):
    """The neural_net_type can be: one_layer_relu,
                                   one_layer_relu_conv,
                                   two_layer_relu_conv."""
    network = tflearn.input_data(shape=[None, tile_size, tile_size, on_band_count])

    # NN architectures mirror ch. 3 of www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
    if neural_net_type == "one_layer_relu":
        network = tflearn.fully_connected(network, 64, activation="relu")
    elif neural_net_type == "one_layer_relu_conv":
        network = conv_2d(network, 64, 12, strides=4, activation="relu")
        network = max_pool_2d(network, 3)
    elif neural_net_type == "two_layer_relu_conv":
        network = conv_2d(network, 64, 12, strides=4, activation="relu")
        network = max_pool_2d(network, 3)
        network = conv_2d(network, 128, 4, activation="relu")
    else:
        print("ERROR: exiting, unknown layer type for neural net")

    # classify as road or not road
    softmax = tflearn.fully_connected(network, 2, activation="softmax")

    # hyperparameters based on www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
    momentum = tflearn.optimizers.Momentum(learning_rate=0.005, momentum=0.9, lr_decay=0.0002, name="Momentum")

    net = tflearn.regression(softmax, optimizer=momentum, loss="categorical_crossentropy")

    return tflearn.DNN(net, tensorboard_verbose=0)
Example #5
0
def _model1():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    network = input_data(shape=[None, inputSize, inputSize, dim],
                 name='input',
                 data_preprocessing=img_prep,
                 data_augmentation=img_aug)

    network = conv_2d(network, 32, 3, strides = 4, activation='relu')
    network = max_pool_2d(network, 2, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, 3, strides = 2, activation='relu')
    network = max_pool_2d(network, 2, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, len(Y[0]), activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                 loss='categorical_crossentropy', name='target')

    model = tflearn.DNN(network, tensorboard_verbose=3)
    model.fit(X, Y, n_epoch=epochNum, validation_set=(xTest, yTest),
       snapshot_step=500, show_metric=True, batch_size=batchNum, shuffle=True, run_id=_id + 'artClassification')
    if modelStore: model.save(_id + '-model.tflearn')
Example #6
0
def train_nmf_network(mfcc_array, sdr_array, n_epochs, take):
    """

    :param mfcc_array:
    :param sdr_array:
    :param n_epochs:
    :param take:
    :return:
    """
    with tf.Graph().as_default():
        network = input_data(shape=[None, 13, 100, 1])
        network = conv_2d(network, 32, [5, 5], activation="relu", regularizer="L2")
        network = max_pool_2d(network, 2)
        network = conv_2d(network, 64, [5, 5], activation="relu", regularizer="L2")
        network = max_pool_2d(network, 2)
        network = fully_connected(network, 128, activation="relu")
        network = dropout(network, 0.8)
        network = fully_connected(network, 256, activation="relu")
        network = dropout(network, 0.8)
        network = fully_connected(network, 1, activation="linear")
        regress = tflearn.regression(network, optimizer="rmsprop", loss="mean_square", learning_rate=0.001)

        # Training
        model = tflearn.DNN(regress)  # , session=sess)
        model.fit(
            mfcc_array,
            sdr_array,
            n_epoch=n_epochs,
            snapshot_step=1000,
            show_metric=True,
            run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take),
        )

        return model
Example #7
0
def alexnet(width, height, lr, output=3):
    network = input_data(shape=[None, width, height, 1], name='input')
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
Example #8
0
def cnn():
    X, Y, testX, testY = mnist.load_data(one_hot=True)
    X = X.reshape([-1, 28, 28, 1])
    testX = testX.reshape([-1, 28, 28, 1])

    # Building convolutional network
    network = input_data(shape=[None, 28, 28, 1], name='input')
    network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 10, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.01,
                         loss='categorical_crossentropy', name='target')

    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit({'input': X}, {'target': Y}, n_epoch=20,
               validation_set=({'input': testX}, {'target': testY}),
               snapshot_step=100, show_metric=True, run_id='cnn_demo')
Example #9
0
def do_cnn_doc2vec_2d(trainX, testX, trainY, testY):
    print "CNN and doc2vec 2d"

    trainX = trainX.reshape([-1, max_features, max_document_length, 1])
    testX = testX.reshape([-1, max_features, max_document_length, 1])


    # Building convolutional network
    network = input_data(shape=[None, max_features, max_document_length, 1], name='input')
    network = conv_2d(network, 16, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 10, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.01,
                         loss='categorical_crossentropy', name='target')

    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit({'input': trainX}, {'target': trainY}, n_epoch=20,
               validation_set=({'input': testX}, {'target': testY}),
               snapshot_step=100, show_metric=True, run_id='review')
def main():
    pickle_folder = '../pickles_rolloff'
    pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]
    pickle_folders_to_load = sorted(pickle_folders_to_load)

    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'sim_mat'
    beat_spec_len = 432

    # training params
    n_classes = 16
    training_percent = 0.85
    testing_percent = 0.15
    validation_percent = 0.00


    # set up training, testing, & validation partitions
    print('Loading sim_mat and sdrs')
    sim_mat_array, sdr_array = get_generated_data(feature, fg_or_bg, sdr_type)
    print('sim_mat and sdrs loaded')

    print('splitting and grooming data')
    train, test, validate = split_into_sets(len(pickle_folders_to_load), training_percent,
                                            testing_percent, validation_percent)

    trainX = np.expand_dims([sim_mat_array[i] for i in train], -1)
    trainY = np.expand_dims([sdr_array[i] for i in train], -1)
    testX = np.expand_dims([sim_mat_array[i] for i in test], -1)
    testY = np.array([sdr_array[i] for i in test])

    print('setting up CNN')
    # Building convolutional network
    network = input_data(shape=[None, beat_spec_len, beat_spec_len, 1])
    network = conv_2d(network, 32, 10, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 20, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 1, activation='linear')
    regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01)

    print('running CNN')
    # Training
    model = tflearn.DNN(regress, tensorboard_verbose=1)
    model.fit(trainX, trainY, n_epoch=10,
              snapshot_step=1000, show_metric=True, run_id='{} classes'.format(n_classes - 1))

    predicted = np.array(model.predict(testX))[:,0]

    print('plotting')
    plot(testY, predicted)
Example #11
0
def _model3():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    network = input_data(shape=[None, inputSize, inputSize, dim],
                             data_preprocessing=img_prep,
                             data_augmentation=img_aug)
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, len(yTest[0]), activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)
    print('Model has been made!!!?')
    # Training
    model = tflearn.DNN(network, checkpoint_path='model_densenet_cifar10',
                        max_checkpoints=10, tensorboard_verbose=0,
                        clip_gradients=0.)
    model.load(_path)
    pred = model.predict(xTest)

    df = pd.DataFrame(pred)
    df.to_csv(_path + ".csv")

    newList = pred.copy()
    newList = convert2(newList)
    if _CSV: makeCSV(newList)
    pred = convert2(pred)
    pred = convert3(pred)
    yTest = convert3(yTest)
    print(metrics.confusion_matrix(yTest, pred))
    print(metrics.classification_report(yTest, pred))
    print('Accuracy', accuracy_score(yTest, pred))
    print()
    if _wrFile: writeTest(pred)
 def make_core_network(network):
     network = tflearn.reshape(network, [-1, 28, 28, 1], name="reshape")
     network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
     network = max_pool_2d(network, 2)
     network = local_response_normalization(network)
     network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
     network = max_pool_2d(network, 2)
     network = local_response_normalization(network)
     network = fully_connected(network, 128, activation='tanh')
     network = dropout(network, 0.8)
     network = fully_connected(network, 256, activation='tanh')
     network = dropout(network, 0.8)
     network = fully_connected(network, 10, activation='softmax')
     return network
    def generate_network(self):
        """ Return tflearn cnn network.
        """
        print(self.image_size, self.n_epoch, self.batch_size, self.person_ids)
        print(type(self.image_size), type(self.n_epoch),
              type(self.batch_size), type(self.person_ids))
        if not isinstance(self.image_size, list) \
            or not isinstance(self.n_epoch, int) \
            or not isinstance(self.batch_size, int) \
            or not isinstance(self.person_ids, list):
        # if self.image_size is None or self.n_epoch is None or \
        #     self.batch_size is None or self.person_ids is None:
            raise ValueError("Insufficient values to generate network.\n"
                             "Need (n_epoch, int), (batch_size, int),"
                             "(image_size, list), (person_ids, list).")

        # Real-time data preprocessing
        img_prep = ImagePreprocessing()
        img_prep.add_featurewise_zero_center()
        img_prep.add_featurewise_stdnorm()

        # Real-time data augmentation
        img_aug = ImageAugmentation()
        img_aug.add_random_rotation(max_angle=25.)
        img_aug.add_random_flip_leftright()

        # Convolutional network building
        network = input_data(
            shape=[None, self.image_size[0], self.image_size[1], 3],
            data_preprocessing=img_prep,
            data_augmentation=img_aug)
        network = conv_2d(network, self.image_size[0], self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = max_pool_2d(network, 2)
        network = conv_2d(network, self.image_size[0] * 2,
                          self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = conv_2d(network, self.image_size[0] * 2,
                          self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = max_pool_2d(network, 2)
        network = fully_connected(network, self.image_size[0] * 2**4,
                                  activation='relu')
        network = dropout(network, 0.5)
        network = fully_connected(network, self.person_num,
                                  activation='softmax')
        network = regression(network, optimizer='adam',
                             loss='categorical_crossentropy',
                             learning_rate=0.001)
        return network
Example #14
0
def build_network(image_size, batch_size=None, n_channels=3):
    network = input_data(shape=[batch_size, image_size[0], image_size[1], n_channels],
                     data_preprocessing=img_prep,
                     data_augmentation=img_aug)
    network = conv_2d(network, 16, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 32, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = fully_connected(network, num_classes, activation='softmax')
    network = regression(network, optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.0001)

    return network
Example #15
0
def setup_model(checkpoint_path=None):
    """Sets up a deep belief network for image classification based on the set up described in

    :param checkpoint_path: string path describing prefix for model checkpoints
    :returns: Deep Neural Network
    :rtype: tflearn.DNN

    References:
        - Machine Learning is Fun! Part 3: Deep Learning and Convolutional Neural Networks

    Links:
        - https://medium.com/@ageitgey/machine-learning-is-fun-part-3-deep-learning-and-convolutional-neural-networks-f40359318721

    """
     # Make sure the data is normalized
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Create extra synthetic training data by flipping, rotating and blurring the
    # images on our data set.
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)
    img_aug.add_random_blur(sigma_max=3.)

    # Input is a 32x32 image with 3 color channels (red, green and blue)
    network = input_data(shape=[None, 32, 32, 3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
    network = conv_2d(network, 32, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)
    if checkpoint_path:
        model = tflearn.DNN(network, tensorboard_verbose=3,
                            checkpoint_path=checkpoint_path)
    else:
        model = tflearn.DNN(network, tensorboard_verbose=3)

    return model
def main():
    """

    :return:
    """
    pickle_folder = '../NMF/mfcc_pickles'
    pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]

    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'mfcc_clusters'
    beat_spec_len = 432
    n_epochs = 200
    take = 1

    # set up training, testing, & validation partitions
    mfcc_array, sdr_array = load_mfcc_and_sdrs(pickle_folders_to_load, pickle_folder,
                                                    feature, fg_or_bg, sdr_type)

    mfcc_array = np.expand_dims(mfcc_array, -1)
    sdr_array = np.expand_dims(sdr_array, -1)

    # Building convolutional network
    network = input_data(shape=[None, 13, 100, 1])
    network = conv_2d(network, 32, [5, 5], activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, [5, 5], activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)
    network = fully_connected(network, 1, activation='linear')
    regress = tflearn.regression(network, optimizer='rmsprop', loss='mean_square', learning_rate=0.001)

    start = time.time()
    # Training
    model = tflearn.DNN(regress)  # , session=sess)
    model.fit(mfcc_array, sdr_array, n_epoch=n_epochs,
              snapshot_step=1000, show_metric=True,
              run_id='repet_save_{0}_epochs_take_{1}'.format(n_epochs, take))
    elapsed = (time.time() - start)
    print('Finished training after ' + elapsed + 'seconds. Saving...')

    model_output_folder = 'network_outputs/'
    model_output_file = join(model_output_folder, 'nmf_save_{0}_epochs_take_{1}'.format(n_epochs, take))

    model.save(model_output_file)
Example #17
0
 def block8(net, scale=1.0, activation="relu"):
     tower_conv = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_1x1')))
     tower_conv1_0 = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
     tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 224, [1,3], bias=False, activation=None, name='Conv2d_0b_1x3')))
     tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 256, [3,1], bias=False, name='Conv2d_0c_3x1')))
     tower_mixed = merge([tower_conv,tower_conv1_2], mode='concat', axis=3)
     tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1')))
     net += scale * tower_out
     if activation:
         if isinstance(activation, str):
             net = activations.get(activation)(net)
         elif hasattr(activation, '__call__'):
             net = activation(net)
         else:
             raise ValueError("Invalid Activation.")
     return net
Example #18
0
def get_cnn_model(checkpoint_path='cnn_servo_model', width=72, height=48, depth=3, session=None):
    
    # Inputs
    network = input_data(shape=[None, height, width, depth], name='input')

    # Convolution no.1
    # Relu introduces non linearity into training
    network = conv_2d(network, 8, [5, 3], activation='relu')

    # Convolution no.2
    network = conv_2d(network, 12, [5, 8], activation='relu')
    
    # Convolution no.3
    network = conv_2d(network, 16, [5, 16], activation='relu')

    # Convolution no.4
    network = conv_2d(network, 24, [3, 20], activation='relu')

    # Convolution no.5
    network = conv_2d(network, 24, [3, 24], activation='relu')

    # Fully connected no.1
    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)

    # Fully connected no.2
    network = fully_connected(network, 100, activation='relu')
    network = dropout(network, 0.8)

    # Fully connected no.3
    network = fully_connected(network, 50, activation='relu')
    network = dropout(network, 0.8)

    # Fully connected no.4
    network = fully_connected(network, 10, activation='relu')
    network = dropout(network, 0.8)
 
    # Fully connected no.5
    network = fully_connected(network, 1, activation='tanh')

    # Regression
    network = regression(network, loss='mean_square', metric='accuracy', learning_rate=1e-4,name='target') 

    # Verbosity yay nay
    # 0 = nothing
    model = tflearn.DNN(network, tensorboard_verbose=2, checkpoint_path=checkpoint_path, session=session) 
    return model
Example #19
0
def convolutional_neural_network(width=5, height=6):
    """Create the neural network model.

    Args:
        width: Width of the pseudo image
        height: Height of the pseudo image

    Returns:
        convnet: Output

    """
    # Initialize key variables
    conv1_filter_count = 32
    conv2_filter_count = 64
    fc_units = 1024
    image_height = height
    image_width = width
    filter_size = 2
    pooling_kernel_size = 2
    keep_probability = 0.6
    fully_connected_units = 10

    # Create the convolutional network stuff
    convnet = input_data(
        shape=[None, image_width, image_height, 1], name='input')

    convnet = conv_2d(
        convnet, conv1_filter_count, filter_size, activation='relu')
    convnet = max_pool_2d(convnet, pooling_kernel_size)

    convnet = conv_2d(
        convnet, conv2_filter_count, filter_size, activation='relu')
    convnet = max_pool_2d(convnet, pooling_kernel_size)

    convnet = fully_connected(convnet, fc_units, activation='relu')
    convnet = dropout(convnet, keep_probability)

    convnet = fully_connected(
        convnet, fully_connected_units, activation='softmax')
    convnet = regression(
        convnet,
        optimizer='adam',
        learning_rate=0.01,
        loss='categorical_crossentropy',
        name='targets')

    return convnet
Example #20
0
def do_cnn_word2vec_2d(trainX, testX, trainY, testY):
    global max_features
    global max_document_length
    print "CNN and word2vec2d"
    y_test = testY
    #trainX = pad_sequences(trainX, maxlen=max_features, value=0.)
    #testX = pad_sequences(testX, maxlen=max_features, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Building convolutional network
    network = input_data(shape=[None,max_document_length,max_features,1], name='input')

    network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.01,
                         loss='categorical_crossentropy', name='target')

    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(trainX, trainY,
              n_epoch=5, shuffle=True, validation_set=(testX, testY),
              show_metric=True,run_id="sms")

    y_predict_list = model.predict(testX)
    print y_predict_list

    y_predict = []
    for i in y_predict_list:
        print  i[0]
        if i[0] > 0.5:
            y_predict.append(0)
        else:
            y_predict.append(1)

    print(classification_report(y_test, y_predict))
    print metrics.confusion_matrix(y_test, y_predict)
Example #21
0
def create_model(learning_rate, input_shape, nb_classes, base_path, drop=1):
    network = input_data(shape=input_shape, name='input')
    network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, drop)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, drop)
    network = fully_connected(network, nb_classes, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=learning_rate,
                         loss='categorical_crossentropy', name='target')
    model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path=base_path + "/checkpoints/step")

    return model
Example #22
0
def build_model():
	network = input_data(shape=[None, 128, 128, 1], name='input')
	network = conv_2d(network, nb_filter=2, filter_size=5, strides=1, activation='tanh')
	network = fully_connected(network, 1, activation='linear')
	network = regression(network, optimizer='adam', learning_rate=0.001,
						 loss='mean_square', name='target')

	model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path='checkpoints/road_model1')
	return model
Example #23
0
def build_model_2_conv(learning_rate, input_shape, nb_classes, base_path, drop):
    network = input_data(shape=input_shape, name='input')
    network = conv_2d(network, 64, [4, 16], activation='relu')
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, [4, 16], activation='relu')
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, drop)
    network = fully_connected(network, 64, activation='relu')
    network = dropout(network, drop)
    network = fully_connected(network, nb_classes, activation='softmax')
    network = regression(network, optimizer='sgd', learning_rate=learning_rate,
                         loss='categorical_crossentropy', name='target')
    model = tflearn.DNN(network, tensorboard_verbose=3, tensorboard_dir=base_path + "/tflearn_logs/",
                        checkpoint_path=base_path + "/checkpoints/step")
    return model
Example #24
0
def train_neural_net(convolution_patch_size,
	                   bands_to_use,
	                   image_size,
	                   train_images, 
                     train_labels, 
                     test_images, 
                     test_labels,
                     number_of_batches,
                     batch_size):  

  on_band_count = 0
  for b in bands_to_use:
    if b == 1:
      on_band_count += 1

  train_images = train_images.astype(numpy.float32)
  train_images = (train_images - 127.5) / 127.5
    
  test_images = test_images.astype(numpy.float32)
  test_images = (test_images - 127.5) / 127.5

  # Convolutional network building
  network = input_data(shape=[None, image_size, image_size, on_band_count])
  network = conv_2d(network, 32, convolution_patch_size, activation='relu')
  network = max_pool_2d(network, 2)
  network = conv_2d(network, 64, convolution_patch_size, activation='relu')
  network = conv_2d(network, 64, convolution_patch_size, activation='relu')
  network = max_pool_2d(network, 2)
  network = fully_connected(network, 512, activation='relu')
  network = dropout(network, 0.5)
  network = fully_connected(network, 2, activation='softmax')
  network = regression(network, optimizer='adam',
                       loss='categorical_crossentropy',
                       learning_rate=0.001)

  # batch_size was originally 96
  # n_epoch was originally 50
  # each epoch is 170 steps I think
  # Train using classifier
  model = tflearn.DNN(network, tensorboard_verbose=0)
  model.fit(train_images, train_labels, n_epoch=int(number_of_batches/100), shuffle=False, validation_set=(test_images, test_labels),
            show_metric=True, batch_size=batch_size, run_id='cifar10_cnn')

  return model.predict(test_images)
Example #25
0
def main():
    """
    Trains a CNN architecture and plots the results over a validation set.
    Returns:

    """

    # Load the SDR and hist data
    data = load_data('reverb_pan_full_sdr.txt', 'pickle/')

    # split data into train and test sets
    test_percent = 0.15
    train, test, validate = split_into_sets(len(data['sdr']), 1-test_percent,
                                            test_percent, 0)

    x_train = np.expand_dims([data['input'][i] for i in train], -1)
    y_train = np.expand_dims([data['sdr'][i] for i in train], -1)
    x_test = np.expand_dims([data['input'][i] for i in test], -1)
    y_test = np.expand_dims([data['sdr'][i] for i in test], -1)

    # construct the CNN.
    inp = input_data(shape=[None, 50, 50, 1], name='input')
    # two convolutional layers with max pooling
    conv1 = conv_2d(inp, 32, [5, 5], activation='relu', regularizer="L2")
    max_pool = max_pool_2d(conv1, 2)
    conv2 = conv_2d(max_pool, 64, [5, 5], activation='relu', regularizer="L2")
    max_pool2 = max_pool_2d(conv2, 2)
    # two fully connected layers
    full = fully_connected(max_pool2, 128, activation='tanh')
    full = dropout(full, 0.8)
    full2 = fully_connected(full, 256, activation='tanh')
    full2 = dropout(full2, 0.8)
    # output regression node
    out = fully_connected(full2, 1, activation='linear')
    network = regression(out, optimizer='sgd', learning_rate=0.01, name='target', loss='mean_square')

    model = tflearn.DNN(network, tensorboard_verbose=1, checkpoint_path='checkpoint.p',
                        tensorboard_dir='tmp/tflearn_logs/')

    model.fit({'input': x_train}, {'target': y_train}, n_epoch=1000, validation_set=(x_test, y_test),
              snapshot_step=10000, run_id='convnet_duet_3x3')

    predicted = np.array(model.predict(x_test))[:,0]
    plot(y_test, predicted)
Example #26
0
def _model1():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    network = input_data(shape=[None, inputSize, inputSize, dim],
                 name='input',
                 data_preprocessing=img_prep,
                 data_augmentation=img_aug)

    network = conv_2d(network, 32, 3, strides = 4, activation='relu')
    network = max_pool_2d(network, 2, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, 3, strides = 2, activation='relu')
    network = max_pool_2d(network, 2, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, len(yTest[0]), activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                 loss='categorical_crossentropy', name='target')

    model = tflearn.DNN(network, tensorboard_verbose=3)
    model.load(_path)
    pred = model.predict(xTest)

    df = pd.DataFrame(pred)
    df.to_csv(_path + ".csv")

    newList = pred.copy()
    newList = convert2(newList)
    if _CSV: makeCSV(newList)
    pred = convert2(pred)
    pred = convert3(pred)
    yTest = convert3(yTest)
    print(metrics.confusion_matrix(yTest, pred))
    print(metrics.classification_report(yTest, pred))
    print('Accuracy', accuracy_score(yTest, pred))
    print()
    if _wrFile: writeTest(pred)
Example #27
0
def cnn():
    network = input_data(shape=[None, IMAGE_HEIGHT, IMAGE_WIDTH, 1], name='input')
    network = conv_2d(network, 8, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = batch_normalization(network)
    network = conv_2d(network, 16, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = batch_normalization(network)
    network = conv_2d(network, 16, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = batch_normalization(network)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, CODE_LEN * MAX_CHAR, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')
    return network
Example #28
0
def build_model():
	init = tf.truncated_normal_initializer(stddev=1e-4)

	network = input_data(shape=[None, 128, 128, 1], name='input')
	network = conv_2d(network, nb_filter=2, filter_size=5, strides=2, activation='tanh', weights_init=init)
	network = fully_connected(network, 1, activation='tanh', weights_init=init)
	network = regression(network, optimizer='sgd', learning_rate=learning_rate,
						 loss='mean_square', name='target')

	model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path='checkpoints/road_model1')
	return model
Example #29
0
def do_cnn_word2vec_2d_345(trainX, testX, trainY, testY):
    global max_features
    global max_document_length
    print "CNN and word2vec_2d_345"
    y_test = testY

    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Building convolutional network
    network = input_data(shape=[None,max_document_length,max_features,1], name='input')
    network = tflearn.embedding(network, input_dim=1, output_dim=128,validate_indices=False)
    branch1 = conv_2d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
    branch2 = conv_2d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
    branch3 = conv_2d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool_2d(network)
    network = dropout(network, 0.8)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')
    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(trainX, trainY,
              n_epoch=5, shuffle=True, validation_set=(testX, testY),
              show_metric=True, batch_size=100,run_id="sms")

    y_predict_list = model.predict(testX)
    print y_predict_list

    y_predict = []
    for i in y_predict_list:
        print  i[0]
        if i[0] > 0.5:
            y_predict.append(0)
        else:
            y_predict.append(1)

    print(classification_report(y_test, y_predict))
    print metrics.confusion_matrix(y_test, y_predict)
Example #30
0
def stop_dnn():
    img_pre_processing = ImagePreprocessing()

    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=10.)

    network = input_data(shape=[None, 32, 32, 3],
                         data_preprocessing=img_pre_processing,
                         data_augmentation=img_aug)
    network = conv_2d(network, 32, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', loss='categorical_crossentropy',
                         learning_rate=0.001)
    return network