예제 #1
0
def do_cnn_doc2vec_2d(trainX, testX, trainY, testY):
    print "CNN and doc2vec 2d"

    trainX = trainX.reshape([-1, max_features, max_document_length, 1])
    testX = testX.reshape([-1, max_features, max_document_length, 1])


    # Building convolutional network
    network = input_data(shape=[None, max_features, max_document_length, 1], name='input')
    network = conv_2d(network, 16, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 10, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.01,
                         loss='categorical_crossentropy', name='target')

    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit({'input': trainX}, {'target': trainY}, n_epoch=20,
               validation_set=({'input': testX}, {'target': testY}),
               snapshot_step=100, show_metric=True, run_id='review')
 def build_network(self):
   # Building 'AlexNet'
   # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
   # https://github.com/DT42/squeezenet_demo
   # https://github.com/yhenon/pysqueezenet/blob/master/squeezenet.py
   print('[+] Building CNN')
   self.network = input_data(shape = [None, SIZE_FACE, SIZE_FACE, 1])
   self.network = conv_2d(self.network, 96, 11, strides = 4, activation = 'relu')
   self.network = max_pool_2d(self.network, 3, strides = 2)
   self.network = local_response_normalization(self.network)
   self.network = conv_2d(self.network, 256, 5, activation = 'relu')
   self.network = max_pool_2d(self.network, 3, strides = 2)
   self.network = local_response_normalization(self.network)
   self.network = conv_2d(self.network, 256, 3, activation = 'relu')
   self.network = max_pool_2d(self.network, 3, strides = 2)
   self.network = local_response_normalization(self.network)
   self.network = fully_connected(self.network, 1024, activation = 'tanh')
   self.network = dropout(self.network, 0.5)
   self.network = fully_connected(self.network, 1024, activation = 'tanh')
   self.network = dropout(self.network, 0.5)
   self.network = fully_connected(self.network, len(EMOTIONS), activation = 'softmax')
   self.network = regression(self.network,
     optimizer = 'momentum',
     loss = 'categorical_crossentropy')
   self.model = tflearn.DNN(
     self.network,
     checkpoint_path = SAVE_DIRECTORY + '/alexnet_mood_recognition',
     max_checkpoints = 1,
     tensorboard_verbose = 2
   )
   self.load_model()
예제 #3
0
파일: models.py 프로젝트: gcm0621/pygta5
def alexnet(width, height, lr, output=3):
    network = input_data(shape=[None, width, height, 1], name='input')
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
예제 #4
0
def cnn():
    X, Y, testX, testY = mnist.load_data(one_hot=True)
    X = X.reshape([-1, 28, 28, 1])
    testX = testX.reshape([-1, 28, 28, 1])

    # Building convolutional network
    network = input_data(shape=[None, 28, 28, 1], name='input')
    network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 10, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.01,
                         loss='categorical_crossentropy', name='target')

    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit({'input': X}, {'target': Y}, n_epoch=20,
               validation_set=({'input': testX}, {'target': testY}),
               snapshot_step=100, show_metric=True, run_id='cnn_demo')
예제 #5
0
def alexnet():
    X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))

    # Building 'AlexNet'
    network = input_data(shape=[None, 227, 227, 3])
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 17, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)

    # Training
    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=2)
    model.fit(X, Y, n_epoch=1000, validation_set=0.1, shuffle=True,
              show_metric=True, batch_size=64, snapshot_step=200,
              snapshot_epoch=False, run_id='alexnet')
예제 #6
0
def _model1():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    network = input_data(shape=[None, inputSize, inputSize, dim],
                 name='input',
                 data_preprocessing=img_prep,
                 data_augmentation=img_aug)

    network = conv_2d(network, 32, 3, strides = 4, activation='relu')
    network = max_pool_2d(network, 2, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, 3, strides = 2, activation='relu')
    network = max_pool_2d(network, 2, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, len(Y[0]), activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                 loss='categorical_crossentropy', name='target')

    model = tflearn.DNN(network, tensorboard_verbose=3)
    model.fit(X, Y, n_epoch=epochNum, validation_set=(xTest, yTest),
       snapshot_step=500, show_metric=True, batch_size=batchNum, shuffle=True, run_id=_id + 'artClassification')
    if modelStore: model.save(_id + '-model.tflearn')
예제 #7
0
def train_nmf_network(mfcc_array, sdr_array, n_epochs, take):
    """

    :param mfcc_array:
    :param sdr_array:
    :param n_epochs:
    :param take:
    :return:
    """
    with tf.Graph().as_default():
        network = input_data(shape=[None, 13, 100, 1])
        network = conv_2d(network, 32, [5, 5], activation="relu", regularizer="L2")
        network = max_pool_2d(network, 2)
        network = conv_2d(network, 64, [5, 5], activation="relu", regularizer="L2")
        network = max_pool_2d(network, 2)
        network = fully_connected(network, 128, activation="relu")
        network = dropout(network, 0.8)
        network = fully_connected(network, 256, activation="relu")
        network = dropout(network, 0.8)
        network = fully_connected(network, 1, activation="linear")
        regress = tflearn.regression(network, optimizer="rmsprop", loss="mean_square", learning_rate=0.001)

        # Training
        model = tflearn.DNN(regress)  # , session=sess)
        model.fit(
            mfcc_array,
            sdr_array,
            n_epoch=n_epochs,
            snapshot_step=1000,
            show_metric=True,
            run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take),
        )

        return model
예제 #8
0
def createModel(nbClasses,imageSize):
	print("[+] Creating model...")
	convnet = input_data(shape=[None, imageSize, imageSize, 1], name='input')

	convnet = conv_2d(convnet, 64, 2, activation='elu', weights_init="Xavier")
	convnet = max_pool_2d(convnet, 2)

	convnet = conv_2d(convnet, 128, 2, activation='elu', weights_init="Xavier")
	convnet = max_pool_2d(convnet, 2)

	convnet = conv_2d(convnet, 256, 2, activation='elu', weights_init="Xavier")
	convnet = max_pool_2d(convnet, 2)

	convnet = conv_2d(convnet, 512, 2, activation='elu', weights_init="Xavier")
	convnet = max_pool_2d(convnet, 2)

	convnet = fully_connected(convnet, 1024, activation='elu')
	convnet = dropout(convnet, 0.5)

	convnet = fully_connected(convnet, nbClasses, activation='softmax')
	convnet = regression(convnet, optimizer='rmsprop', loss='categorical_crossentropy')

	model = tflearn.DNN(convnet)
	print("    Model created! ✅")
	return model
예제 #9
0
def model_for_type(neural_net_type, tile_size, on_band_count):
    """The neural_net_type can be: one_layer_relu,
                                   one_layer_relu_conv,
                                   two_layer_relu_conv."""
    network = tflearn.input_data(shape=[None, tile_size, tile_size, on_band_count])

    # NN architectures mirror ch. 3 of www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
    if neural_net_type == "one_layer_relu":
        network = tflearn.fully_connected(network, 64, activation="relu")
    elif neural_net_type == "one_layer_relu_conv":
        network = conv_2d(network, 64, 12, strides=4, activation="relu")
        network = max_pool_2d(network, 3)
    elif neural_net_type == "two_layer_relu_conv":
        network = conv_2d(network, 64, 12, strides=4, activation="relu")
        network = max_pool_2d(network, 3)
        network = conv_2d(network, 128, 4, activation="relu")
    else:
        print("ERROR: exiting, unknown layer type for neural net")

    # classify as road or not road
    softmax = tflearn.fully_connected(network, 2, activation="softmax")

    # hyperparameters based on www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
    momentum = tflearn.optimizers.Momentum(learning_rate=0.005, momentum=0.9, lr_decay=0.0002, name="Momentum")

    net = tflearn.regression(softmax, optimizer=momentum, loss="categorical_crossentropy")

    return tflearn.DNN(net, tensorboard_verbose=0)
def main():
    pickle_folder = '../pickles_rolloff'
    pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]
    pickle_folders_to_load = sorted(pickle_folders_to_load)

    # pickle parameters
    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'sim_mat'
    beat_spec_len = 432

    # training params
    n_classes = 16
    training_percent = 0.85
    testing_percent = 0.15
    validation_percent = 0.00


    # set up training, testing, & validation partitions
    print('Loading sim_mat and sdrs')
    sim_mat_array, sdr_array = get_generated_data(feature, fg_or_bg, sdr_type)
    print('sim_mat and sdrs loaded')

    print('splitting and grooming data')
    train, test, validate = split_into_sets(len(pickle_folders_to_load), training_percent,
                                            testing_percent, validation_percent)

    trainX = np.expand_dims([sim_mat_array[i] for i in train], -1)
    trainY = np.expand_dims([sdr_array[i] for i in train], -1)
    testX = np.expand_dims([sim_mat_array[i] for i in test], -1)
    testY = np.array([sdr_array[i] for i in test])

    print('setting up CNN')
    # Building convolutional network
    network = input_data(shape=[None, beat_spec_len, beat_spec_len, 1])
    network = conv_2d(network, 32, 10, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 20, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 1, activation='linear')
    regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01)

    print('running CNN')
    # Training
    model = tflearn.DNN(regress, tensorboard_verbose=1)
    model.fit(trainX, trainY, n_epoch=10,
              snapshot_step=1000, show_metric=True, run_id='{} classes'.format(n_classes - 1))

    predicted = np.array(model.predict(testX))[:,0]

    print('plotting')
    plot(testY, predicted)
예제 #11
0
    def generate_network(self):
        """ Return tflearn cnn network.
        """
        print(self.image_size, self.n_epoch, self.batch_size, self.person_ids)
        print(type(self.image_size), type(self.n_epoch),
              type(self.batch_size), type(self.person_ids))
        if not isinstance(self.image_size, list) \
            or not isinstance(self.n_epoch, int) \
            or not isinstance(self.batch_size, int) \
            or not isinstance(self.person_ids, list):
        # if self.image_size is None or self.n_epoch is None or \
        #     self.batch_size is None or self.person_ids is None:
            raise ValueError("Insufficient values to generate network.\n"
                             "Need (n_epoch, int), (batch_size, int),"
                             "(image_size, list), (person_ids, list).")

        # Real-time data preprocessing
        img_prep = ImagePreprocessing()
        img_prep.add_featurewise_zero_center()
        img_prep.add_featurewise_stdnorm()

        # Real-time data augmentation
        img_aug = ImageAugmentation()
        img_aug.add_random_rotation(max_angle=25.)
        img_aug.add_random_flip_leftright()

        # Convolutional network building
        network = input_data(
            shape=[None, self.image_size[0], self.image_size[1], 3],
            data_preprocessing=img_prep,
            data_augmentation=img_aug)
        network = conv_2d(network, self.image_size[0], self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = max_pool_2d(network, 2)
        network = conv_2d(network, self.image_size[0] * 2,
                          self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = conv_2d(network, self.image_size[0] * 2,
                          self.IMAGE_CHANNEL_NUM,
                          activation='relu')
        network = max_pool_2d(network, 2)
        network = fully_connected(network, self.image_size[0] * 2**4,
                                  activation='relu')
        network = dropout(network, 0.5)
        network = fully_connected(network, self.person_num,
                                  activation='softmax')
        network = regression(network, optimizer='adam',
                             loss='categorical_crossentropy',
                             learning_rate=0.001)
        return network
예제 #12
0
 def make_core_network(network):
     network = tflearn.reshape(network, [-1, 28, 28, 1], name="reshape")
     network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
     network = max_pool_2d(network, 2)
     network = local_response_normalization(network)
     network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
     network = max_pool_2d(network, 2)
     network = local_response_normalization(network)
     network = fully_connected(network, 128, activation='tanh')
     network = dropout(network, 0.8)
     network = fully_connected(network, 256, activation='tanh')
     network = dropout(network, 0.8)
     network = fully_connected(network, 10, activation='softmax')
     return network
예제 #13
0
def _model3():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    network = input_data(shape=[None, inputSize, inputSize, dim],
                             data_preprocessing=img_prep,
                             data_augmentation=img_aug)
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, len(yTest[0]), activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)
    print('Model has been made!!!?')
    # Training
    model = tflearn.DNN(network, checkpoint_path='model_densenet_cifar10',
                        max_checkpoints=10, tensorboard_verbose=0,
                        clip_gradients=0.)
    model.load(_path)
    pred = model.predict(xTest)

    df = pd.DataFrame(pred)
    df.to_csv(_path + ".csv")

    newList = pred.copy()
    newList = convert2(newList)
    if _CSV: makeCSV(newList)
    pred = convert2(pred)
    pred = convert3(pred)
    yTest = convert3(yTest)
    print(metrics.confusion_matrix(yTest, pred))
    print(metrics.classification_report(yTest, pred))
    print('Accuracy', accuracy_score(yTest, pred))
    print()
    if _wrFile: writeTest(pred)
예제 #14
0
def build_network(image_size, batch_size=None, n_channels=3):
    network = input_data(shape=[batch_size, image_size[0], image_size[1], n_channels],
                     data_preprocessing=img_prep,
                     data_augmentation=img_aug)
    network = conv_2d(network, 16, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 32, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = fully_connected(network, num_classes, activation='softmax')
    network = regression(network, optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.0001)

    return network
def main():
    """

    :return:
    """
    pickle_folder = '../NMF/mfcc_pickles'
    pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]

    fg_or_bg = 'background'
    sdr_type = 'sdr'
    feature = 'mfcc_clusters'
    beat_spec_len = 432
    n_epochs = 200
    take = 1

    # set up training, testing, & validation partitions
    mfcc_array, sdr_array = load_mfcc_and_sdrs(pickle_folders_to_load, pickle_folder,
                                                    feature, fg_or_bg, sdr_type)

    mfcc_array = np.expand_dims(mfcc_array, -1)
    sdr_array = np.expand_dims(sdr_array, -1)

    # Building convolutional network
    network = input_data(shape=[None, 13, 100, 1])
    network = conv_2d(network, 32, [5, 5], activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, [5, 5], activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)
    network = fully_connected(network, 1, activation='linear')
    regress = tflearn.regression(network, optimizer='rmsprop', loss='mean_square', learning_rate=0.001)

    start = time.time()
    # Training
    model = tflearn.DNN(regress)  # , session=sess)
    model.fit(mfcc_array, sdr_array, n_epoch=n_epochs,
              snapshot_step=1000, show_metric=True,
              run_id='repet_save_{0}_epochs_take_{1}'.format(n_epochs, take))
    elapsed = (time.time() - start)
    print('Finished training after ' + elapsed + 'seconds. Saving...')

    model_output_folder = 'network_outputs/'
    model_output_file = join(model_output_folder, 'nmf_save_{0}_epochs_take_{1}'.format(n_epochs, take))

    model.save(model_output_file)
예제 #16
0
def setup_model(checkpoint_path=None):
    """Sets up a deep belief network for image classification based on the set up described in

    :param checkpoint_path: string path describing prefix for model checkpoints
    :returns: Deep Neural Network
    :rtype: tflearn.DNN

    References:
        - Machine Learning is Fun! Part 3: Deep Learning and Convolutional Neural Networks

    Links:
        - https://medium.com/@ageitgey/machine-learning-is-fun-part-3-deep-learning-and-convolutional-neural-networks-f40359318721

    """
     # Make sure the data is normalized
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Create extra synthetic training data by flipping, rotating and blurring the
    # images on our data set.
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)
    img_aug.add_random_blur(sigma_max=3.)

    # Input is a 32x32 image with 3 color channels (red, green and blue)
    network = input_data(shape=[None, 32, 32, 3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
    network = conv_2d(network, 32, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)
    if checkpoint_path:
        model = tflearn.DNN(network, tensorboard_verbose=3,
                            checkpoint_path=checkpoint_path)
    else:
        model = tflearn.DNN(network, tensorboard_verbose=3)

    return model
예제 #17
0
 def block8(net, scale=1.0, activation="relu"):
     tower_conv = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_1x1')))
     tower_conv1_0 = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_0a_1x1')))
     tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 224, [1,3], bias=False, activation=None, name='Conv2d_0b_1x3')))
     tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 256, [3,1], bias=False, name='Conv2d_0c_3x1')))
     tower_mixed = merge([tower_conv,tower_conv1_2], mode='concat', axis=3)
     tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1')))
     net += scale * tower_out
     if activation:
         if isinstance(activation, str):
             net = activations.get(activation)(net)
         elif hasattr(activation, '__call__'):
             net = activation(net)
         else:
             raise ValueError("Invalid Activation.")
     return net
예제 #18
0
def convolutional_neural_network(width=5, height=6):
    """Create the neural network model.

    Args:
        width: Width of the pseudo image
        height: Height of the pseudo image

    Returns:
        convnet: Output

    """
    # Initialize key variables
    conv1_filter_count = 32
    conv2_filter_count = 64
    fc_units = 1024
    image_height = height
    image_width = width
    filter_size = 2
    pooling_kernel_size = 2
    keep_probability = 0.6
    fully_connected_units = 10

    # Create the convolutional network stuff
    convnet = input_data(
        shape=[None, image_width, image_height, 1], name='input')

    convnet = conv_2d(
        convnet, conv1_filter_count, filter_size, activation='relu')
    convnet = max_pool_2d(convnet, pooling_kernel_size)

    convnet = conv_2d(
        convnet, conv2_filter_count, filter_size, activation='relu')
    convnet = max_pool_2d(convnet, pooling_kernel_size)

    convnet = fully_connected(convnet, fc_units, activation='relu')
    convnet = dropout(convnet, keep_probability)

    convnet = fully_connected(
        convnet, fully_connected_units, activation='softmax')
    convnet = regression(
        convnet,
        optimizer='adam',
        learning_rate=0.01,
        loss='categorical_crossentropy',
        name='targets')

    return convnet
예제 #19
0
def get_cnn_model(checkpoint_path='cnn_servo_model', width=72, height=48, depth=3, session=None):
    
    # Inputs
    network = input_data(shape=[None, height, width, depth], name='input')

    # Convolution no.1
    # Relu introduces non linearity into training
    network = conv_2d(network, 8, [5, 3], activation='relu')

    # Convolution no.2
    network = conv_2d(network, 12, [5, 8], activation='relu')
    
    # Convolution no.3
    network = conv_2d(network, 16, [5, 16], activation='relu')

    # Convolution no.4
    network = conv_2d(network, 24, [3, 20], activation='relu')

    # Convolution no.5
    network = conv_2d(network, 24, [3, 24], activation='relu')

    # Fully connected no.1
    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)

    # Fully connected no.2
    network = fully_connected(network, 100, activation='relu')
    network = dropout(network, 0.8)

    # Fully connected no.3
    network = fully_connected(network, 50, activation='relu')
    network = dropout(network, 0.8)

    # Fully connected no.4
    network = fully_connected(network, 10, activation='relu')
    network = dropout(network, 0.8)
 
    # Fully connected no.5
    network = fully_connected(network, 1, activation='tanh')

    # Regression
    network = regression(network, loss='mean_square', metric='accuracy', learning_rate=1e-4,name='target') 

    # Verbosity yay nay
    # 0 = nothing
    model = tflearn.DNN(network, tensorboard_verbose=2, checkpoint_path=checkpoint_path, session=session) 
    return model
예제 #20
0
def do_cnn_word2vec_2d(trainX, testX, trainY, testY):
    global max_features
    global max_document_length
    print "CNN and word2vec2d"
    y_test = testY
    #trainX = pad_sequences(trainX, maxlen=max_features, value=0.)
    #testX = pad_sequences(testX, maxlen=max_features, value=0.)
    # Converting labels to binary vectors
    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Building convolutional network
    network = input_data(shape=[None,max_document_length,max_features,1], name='input')

    network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.01,
                         loss='categorical_crossentropy', name='target')

    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(trainX, trainY,
              n_epoch=5, shuffle=True, validation_set=(testX, testY),
              show_metric=True,run_id="sms")

    y_predict_list = model.predict(testX)
    print y_predict_list

    y_predict = []
    for i in y_predict_list:
        print  i[0]
        if i[0] > 0.5:
            y_predict.append(0)
        else:
            y_predict.append(1)

    print(classification_report(y_test, y_predict))
    print metrics.confusion_matrix(y_test, y_predict)
예제 #21
0
파일: models.py 프로젝트: ErwanGalline/test
def create_model(learning_rate, input_shape, nb_classes, base_path, drop=1):
    network = input_data(shape=input_shape, name='input')
    network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, drop)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, drop)
    network = fully_connected(network, nb_classes, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=learning_rate,
                         loss='categorical_crossentropy', name='target')
    model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path=base_path + "/checkpoints/step")

    return model
예제 #22
0
def build_model():
	network = input_data(shape=[None, 128, 128, 1], name='input')
	network = conv_2d(network, nb_filter=2, filter_size=5, strides=1, activation='tanh')
	network = fully_connected(network, 1, activation='linear')
	network = regression(network, optimizer='adam', learning_rate=0.001,
						 loss='mean_square', name='target')

	model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path='checkpoints/road_model1')
	return model
예제 #23
0
파일: models.py 프로젝트: ErwanGalline/test
def build_model_2_conv(learning_rate, input_shape, nb_classes, base_path, drop):
    network = input_data(shape=input_shape, name='input')
    network = conv_2d(network, 64, [4, 16], activation='relu')
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, [4, 16], activation='relu')
    network = max_pool_2d(network, 2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='relu')
    network = dropout(network, drop)
    network = fully_connected(network, 64, activation='relu')
    network = dropout(network, drop)
    network = fully_connected(network, nb_classes, activation='softmax')
    network = regression(network, optimizer='sgd', learning_rate=learning_rate,
                         loss='categorical_crossentropy', name='target')
    model = tflearn.DNN(network, tensorboard_verbose=3, tensorboard_dir=base_path + "/tflearn_logs/",
                        checkpoint_path=base_path + "/checkpoints/step")
    return model
예제 #24
0
def main():
    """
    Trains a CNN architecture and plots the results over a validation set.
    Returns:

    """

    # Load the SDR and hist data
    data = load_data('reverb_pan_full_sdr.txt', 'pickle/')

    # split data into train and test sets
    test_percent = 0.15
    train, test, validate = split_into_sets(len(data['sdr']), 1-test_percent,
                                            test_percent, 0)

    x_train = np.expand_dims([data['input'][i] for i in train], -1)
    y_train = np.expand_dims([data['sdr'][i] for i in train], -1)
    x_test = np.expand_dims([data['input'][i] for i in test], -1)
    y_test = np.expand_dims([data['sdr'][i] for i in test], -1)

    # construct the CNN.
    inp = input_data(shape=[None, 50, 50, 1], name='input')
    # two convolutional layers with max pooling
    conv1 = conv_2d(inp, 32, [5, 5], activation='relu', regularizer="L2")
    max_pool = max_pool_2d(conv1, 2)
    conv2 = conv_2d(max_pool, 64, [5, 5], activation='relu', regularizer="L2")
    max_pool2 = max_pool_2d(conv2, 2)
    # two fully connected layers
    full = fully_connected(max_pool2, 128, activation='tanh')
    full = dropout(full, 0.8)
    full2 = fully_connected(full, 256, activation='tanh')
    full2 = dropout(full2, 0.8)
    # output regression node
    out = fully_connected(full2, 1, activation='linear')
    network = regression(out, optimizer='sgd', learning_rate=0.01, name='target', loss='mean_square')

    model = tflearn.DNN(network, tensorboard_verbose=1, checkpoint_path='checkpoint.p',
                        tensorboard_dir='tmp/tflearn_logs/')

    model.fit({'input': x_train}, {'target': y_train}, n_epoch=1000, validation_set=(x_test, y_test),
              snapshot_step=10000, run_id='convnet_duet_3x3')

    predicted = np.array(model.predict(x_test))[:,0]
    plot(y_test, predicted)
예제 #25
0
def train_neural_net(convolution_patch_size,
	                   bands_to_use,
	                   image_size,
	                   train_images, 
                     train_labels, 
                     test_images, 
                     test_labels,
                     number_of_batches,
                     batch_size):  

  on_band_count = 0
  for b in bands_to_use:
    if b == 1:
      on_band_count += 1

  train_images = train_images.astype(numpy.float32)
  train_images = (train_images - 127.5) / 127.5
    
  test_images = test_images.astype(numpy.float32)
  test_images = (test_images - 127.5) / 127.5

  # Convolutional network building
  network = input_data(shape=[None, image_size, image_size, on_band_count])
  network = conv_2d(network, 32, convolution_patch_size, activation='relu')
  network = max_pool_2d(network, 2)
  network = conv_2d(network, 64, convolution_patch_size, activation='relu')
  network = conv_2d(network, 64, convolution_patch_size, activation='relu')
  network = max_pool_2d(network, 2)
  network = fully_connected(network, 512, activation='relu')
  network = dropout(network, 0.5)
  network = fully_connected(network, 2, activation='softmax')
  network = regression(network, optimizer='adam',
                       loss='categorical_crossentropy',
                       learning_rate=0.001)

  # batch_size was originally 96
  # n_epoch was originally 50
  # each epoch is 170 steps I think
  # Train using classifier
  model = tflearn.DNN(network, tensorboard_verbose=0)
  model.fit(train_images, train_labels, n_epoch=int(number_of_batches/100), shuffle=False, validation_set=(test_images, test_labels),
            show_metric=True, batch_size=batch_size, run_id='cifar10_cnn')

  return model.predict(test_images)
예제 #26
0
파일: train.py 프로젝트: Tairy/python-lab
def cnn():
    network = input_data(shape=[None, IMAGE_HEIGHT, IMAGE_WIDTH, 1], name='input')
    network = conv_2d(network, 8, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = batch_normalization(network)
    network = conv_2d(network, 16, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = batch_normalization(network)
    network = conv_2d(network, 16, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = batch_normalization(network)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, CODE_LEN * MAX_CHAR, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')
    return network
예제 #27
0
def _model1():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    network = input_data(shape=[None, inputSize, inputSize, dim],
                 name='input',
                 data_preprocessing=img_prep,
                 data_augmentation=img_aug)

    network = conv_2d(network, 32, 3, strides = 4, activation='relu')
    network = max_pool_2d(network, 2, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 64, 3, strides = 2, activation='relu')
    network = max_pool_2d(network, 2, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 128, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, 256, activation='tanh')
    network = dropout(network, 0.8)
    network = fully_connected(network, len(yTest[0]), activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                 loss='categorical_crossentropy', name='target')

    model = tflearn.DNN(network, tensorboard_verbose=3)
    model.load(_path)
    pred = model.predict(xTest)

    df = pd.DataFrame(pred)
    df.to_csv(_path + ".csv")

    newList = pred.copy()
    newList = convert2(newList)
    if _CSV: makeCSV(newList)
    pred = convert2(pred)
    pred = convert3(pred)
    yTest = convert3(yTest)
    print(metrics.confusion_matrix(yTest, pred))
    print(metrics.classification_report(yTest, pred))
    print('Accuracy', accuracy_score(yTest, pred))
    print()
    if _wrFile: writeTest(pred)
예제 #28
0
def build_model():
	init = tf.truncated_normal_initializer(stddev=1e-4)

	network = input_data(shape=[None, 128, 128, 1], name='input')
	network = conv_2d(network, nb_filter=2, filter_size=5, strides=2, activation='tanh', weights_init=init)
	network = fully_connected(network, 1, activation='tanh', weights_init=init)
	network = regression(network, optimizer='sgd', learning_rate=learning_rate,
						 loss='mean_square', name='target')

	model = tflearn.DNN(network, tensorboard_verbose=0, checkpoint_path='checkpoints/road_model1')
	return model
예제 #29
0
def do_cnn_word2vec_2d_345(trainX, testX, trainY, testY):
    global max_features
    global max_document_length
    print "CNN and word2vec_2d_345"
    y_test = testY

    trainY = to_categorical(trainY, nb_classes=2)
    testY = to_categorical(testY, nb_classes=2)

    # Building convolutional network
    network = input_data(shape=[None,max_document_length,max_features,1], name='input')
    network = tflearn.embedding(network, input_dim=1, output_dim=128,validate_indices=False)
    branch1 = conv_2d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
    branch2 = conv_2d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
    branch3 = conv_2d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
    network = merge([branch1, branch2, branch3], mode='concat', axis=1)
    network = tf.expand_dims(network, 2)
    network = global_max_pool_2d(network)
    network = dropout(network, 0.8)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', learning_rate=0.001,
                         loss='categorical_crossentropy', name='target')
    # Training
    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.fit(trainX, trainY,
              n_epoch=5, shuffle=True, validation_set=(testX, testY),
              show_metric=True, batch_size=100,run_id="sms")

    y_predict_list = model.predict(testX)
    print y_predict_list

    y_predict = []
    for i in y_predict_list:
        print  i[0]
        if i[0] > 0.5:
            y_predict.append(0)
        else:
            y_predict.append(1)

    print(classification_report(y_test, y_predict))
    print metrics.confusion_matrix(y_test, y_predict)
예제 #30
0
def stop_dnn():
    img_pre_processing = ImagePreprocessing()

    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=10.)

    network = input_data(shape=[None, 32, 32, 3],
                         data_preprocessing=img_pre_processing,
                         data_augmentation=img_aug)
    network = conv_2d(network, 32, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.5)
    network = fully_connected(network, 2, activation='softmax')
    network = regression(network, optimizer='adam', loss='categorical_crossentropy',
                         learning_rate=0.001)
    return network
    img = Image.open(infilename)
    img = img.resize((256, 256), Image.ANTIALIAS)
    img.load()
    data = np.asarray(img, dtype="float32")
    img = None
    data = data.reshape([-1, 256, 256, 3])
    return data


# Define our network architecture:

# Input is a 32x32 image with 3 color channels (red, green and blue)
network = input_data(shape=[None, 256, 256, 3])
# Step 1: Convolution
network = conv_2d(
    network, 12, 5, strides=1,
    activation='relu')  #Network, number of filters, filter size sidelength

# Step 2: Max pooling
network = max_pool_2d(network, 3)

# Step 3: Convolution again
network = conv_2d(network, 48, 4, strides=2, activation='relu')

# Step 2: Max pooling
network = max_pool_2d(network, 3)

# Step 4: Convolution yet again
network = conv_2d(network, 96, 4, strides=2, activation='relu')

# Step 5: Max pooling again
예제 #32
0
    def videoFetch(self, thread_id):
        path = self.path
        img_width = 175
        img_height = 150
        testing_data = []
        start_time = time.time()
        cap = cv2.VideoCapture(path)
        frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1
        fragment_size = frame_count / 8
        init_frame = math.floor(fragment_size * thread_id)
        print(
            "Thread {} starting Frame Extraction from {}th frame. Please wait for sometime."
            .format(thread_id, init_frame))
        end_frame = math.floor(fragment_size * (thread_id + 1) - 1)
        count = init_frame
        cap.set(1, init_frame)
        print("Frame Extraction in Progress by Thread {}".format(thread_id))
        while cap.isOpened():
            ret, frame = cap.read()
            if (ret):
                img = cv2.resize(frame, (img_width, img_height))
                img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                img_num = "%#05d" % (count + 1)
                testing_data.append([np.array(img), img_num])
            count = count + 1
            if (count == end_frame):
                end_time = time.time()
                cap.release()
                print(
                    "Thread {} finished extracting frames.\n{} frames found by Thread {}"
                    .format(thread_id, end_frame - init_frame, thread_id))
                print("It took {} seconds for Frame Extraction by Thread {}".
                      format(end_time - start_time, thread_id))
                break
        # np.save('/home/ghost/Desktop/ecc/test_data_{}.npy'.format(thread_id), testing_data)
        IMG_SIZE1 = 175
        IMG_SIZE2 = 150
        LR = 0.0001
        MODEL_NAME = 'ECR-{}-{}.model'.format(LR, '2conv-basic')
        tf.reset_default_graph()
        convnet = input_data(shape=[None, IMG_SIZE1, IMG_SIZE2, 1],
                             name='input')
        convnet = conv_2d(convnet, 32, 2, activation='relu')
        convnet = max_pool_2d(convnet, 2)
        convnet = conv_2d(convnet, 32, 2, activation='relu')
        convnet = max_pool_2d(convnet, 2)
        convnet = conv_2d(convnet, 32, 2, activation='relu')
        convnet = max_pool_2d(convnet, 2)
        convnet = conv_2d(convnet, 64, 2, activation='relu')
        convnet = max_pool_2d(convnet, 2)
        convnet = dropout(convnet, 0.3)
        convnet = conv_2d(convnet, 64, 2, activation='relu')
        convnet = max_pool_2d(convnet, 2)
        convnet = dropout(convnet, 0.3)
        convnet = conv_2d(convnet, 64, 2, activation='relu')
        convnet = max_pool_2d(convnet, 2)
        convnet = dropout(convnet, 0.3)
        convnet = conv_2d(convnet, 128, 2, activation='relu')
        convnet = max_pool_2d(convnet, 2)
        convnet = conv_2d(convnet, 128, 2, activation='relu')
        convnet = max_pool_2d(convnet, 2)
        convnet = conv_2d(convnet, 128, 2, activation='relu')
        convnet = max_pool_2d(convnet, 2)
        convnet = flatten(convnet)

        convnet = fully_connected(convnet, 256, activation='relu')
        convnet = dropout(convnet, 0.3)
        convnet = fully_connected(convnet, 512, activation='relu')
        convnet = dropout(convnet, 0.3)
        convnet = fully_connected(convnet, 1024, activation='relu')
        convnet = fully_connected(convnet, 2, activation='softmax')
        convnet = regression(convnet,
                             optimizer='adam',
                             learning_rate=LR,
                             loss='binary_crossentropy',
                             name='targets')
        model = tflearn.DNN(convnet, tensorboard_dir='log')
        if os.path.exists('{}.meta'.format(MODEL_NAME)):
            model.load(MODEL_NAME)
            print('Explicit Content Censor Loaded by Thread {}'.format(
                thread_id))
        explicit = 0
        non_explicit = 0
        print('Video Censoring started by Thread {}'.format(thread_id))
        for num, data in enumerate(testing_data[:]):
            # explicit: [1,0]
            # normal: [0,1]
            img_data = data[0]
            img_no = data[1]
            data = img_data.reshape(IMG_SIZE1, IMG_SIZE2, 1)
            model_out = model.predict([data])[0]
            actual_frame_num = init_frame + num
            if (np.argmax(model_out) == 0):
                explicit_frames.append(actual_frame_num)
예제 #33
0
파일: main.py 프로젝트: schinaid/012
convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], name='input')

#convnet = conv_2d(convnet, 32, 2, ativation = 'relu')
#convnet = max_pool_2d(convert, 2)

#convnet = conv_2d(convnet, 64, 2, ativation = 'relu')
#convnet = max_pool_2d(convert, 2)

#convnet = conv_2d(convnet, 32, 2, ativation = 'relu')
#convnet = max_pool_2d(convert, 2)

#convnet = conv_2d(convnet, 64, 2, ativation = 'relu')
#convnet = max_pool_2d(convert, 2)

convnet = conv_2d(convnet, 32, 2, ativation='relu')
convnet = max_pool_2d(convert, 2)

convnet = conv_2d(convnet, 64, 2, ativation='relu')
convnet = max_pool_2d(convert, 2)

convnet = fully_connected(convnet, 1024, 2, ativation='relu')
convnet = dropout(convert, 0.8)

convnet = fully_connected(convnet, 2, ativation='softmax')
convnet = regression(convnet,
                     optimizer='adam',
                     learning_rate=LR,
                     loss='categorical_crossentropy',
                     name='target')
예제 #34
0
        testX.append(
            np.fromfile(SOURCEPATH + '\\' + files[i - 1]).reshape(
                [y_dim, x_dim, 1]))
        testY.append(target[i - 1])

#trainX, validX, trainY, validY = train_test_split(trainX, trainY, train_size = 800, random_state = randomseed)

trainX = np.array(trainX)
#trainX = trainX.reshape([-1, y_dim, x_dim, 1])
trainY = np.array(trainY)

# Building convolutional network
network = input_data(shape=[None, y_dim, x_dim, 1], name='input')
network = conv_2d(network,
                  16,
                  3,
                  strides=1,
                  activation='relu',
                  regularizer="L2")
#network = max_pool_2d(network, 2)
#network = dropout(network, 0.9)
network = local_response_normalization(network)
network = conv_2d(network,
                  32,
                  5,
                  strides=2,
                  activation='relu',
                  regularizer="L2")
#network = max_pool_2d(network, 2)
#network = dropout(network, 0.9)
network = local_response_normalization(network)
network = conv_2d(network,
예제 #35
0
    def build_network(self):
        # Smaller 'AlexNet'
        #https://github.com/tflearn/tflearn/blob/master/examples/images/VGG19.py
        print('[+] Building VGG')
        print('[-] COLOR: ' + str(COLOR))
        print('[-] BATH_SIZE' + str(BATH_SIZE_CONSTANT))
        print('[-] EXPERIMENTAL_LABEL' + EXPERIMENTO_LABEL)

        self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, COLOR])

        self.network = conv_2d(self.network, 64, 3, activation='relu')
        self.network = conv_2d(self.network, 64, 3, activation='relu')
        self.network = max_pool_2d(self.network, 2, strides=2)

        self.network = conv_2d(self.network, 128, 3, activation='relu')
        self.network = conv_2d(self.network, 128, 3, activation='relu')
        self.network = max_pool_2d(self.network, 2, strides=2)

        self.network = conv_2d(self.network, 256, 3, activation='relu')
        self.network = conv_2d(self.network, 256, 3, activation='relu')
        self.network = conv_2d(self.network, 256, 3, activation='relu')
        self.network = conv_2d(self.network, 256, 3, activation='relu')
        self.network = max_pool_2d(self.network, 2, strides=2)

        self.network = conv_2d(self.network, 512, 3, activation='relu')
        self.network = conv_2d(self.network, 512, 3, activation='relu')
        self.network = conv_2d(self.network, 512, 3, activation='relu')
        self.network = conv_2d(self.network, 512, 3, activation='relu')
        self.network = max_pool_2d(self.network, 2, strides=2)

        self.network = conv_2d(self.network, 512, 3, activation='relu')
        self.network = conv_2d(self.network, 512, 3, activation='relu')
        self.network = conv_2d(self.network, 512, 3, activation='relu')
        self.network = conv_2d(self.network, 512, 3, activation='relu')
        self.network = max_pool_2d(self.network, 2, strides=2)

        self.network = fully_connected(self.network, 4096, activation='relu')
        self.network = dropout(self.network, 0.5)
        self.network = fully_connected(self.network, 4096, activation='relu')
        self.network = dropout(self.network, 0.5)

        self.network = fully_connected(self.network,
                                       len(EMOTIONS),
                                       activation='softmax')

        self.network = regression(self.network,
                                  optimizer='rmsprop',
                                  loss='categorical_crossentropy',
                                  learning_rate=0.001)

        self.model = tflearn.DNN(self.network,
                                 checkpoint_path=CHECKPOINT_DIR,
                                 max_checkpoints=1,
                                 tensorboard_dir=TENSORBOARD_DIR,
                                 tensorboard_verbose=1)
예제 #36
0
    def create_train_data():
    training_data = []
    for img in tqdm(os.listdir(TRAIN_DIR)):
        label = label_img(img)
        path = os.path.join(TRAIN_DIR,img)
        img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
        img = cv2.resize(img, (IMG_SIZE,IMG_SIZE))
        training_data.append([np.array(img),np.array(label)])
    shuffle(training_data)
    np.save('train_data.npy', training_data)
    return training_data
    
    def process_test_data():
    testing_data = []
    for img in tqdm(os.listdir(TEST_DIR)):
        path = os.path.join(TEST_DIR,img)
        img_num = img.split('.')[0]
        img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
        img = cv2.resize(img, (IMG_SIZE,IMG_SIZE))
        testing_data.append([np.array(img), img_num])
        
    shuffle(testing_data)
    np.save('test_data.npy', testing_data)
    return testing_data
    
    train_data = create_train_data()
# If you have already created the dataset:
#train_data = np.load('train_data.npy')

import tflearn
import tensorflow as tf
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
tf.reset_default_graph()

convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], name='input')

convnet = conv_2d(convnet, 32, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)

convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)

convnet = conv_2d(convnet, 128, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)

convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)

convnet = conv_2d(convnet, 32, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)

convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)

convnet = fully_connected(convnet, 2, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')

model = tflearn.DNN(convnet, tensorboard_dir='log')



if os.path.exists('C:/Users/H/Desktop/KaggleDogsvsCats/{}.meta'.format(MODEL_NAME)):
    model.load(MODEL_NAME)
    print('model loaded!')

train = train_data[:-500]
test = train_data[-500:]

X = np.array([i[0] for i in train]).reshape(-1,IMG_SIZE,IMG_SIZE,1)
Y = [i[1] for i in train]

test_x = np.array([i[0] for i in test]).reshape(-1,IMG_SIZE,IMG_SIZE,1)
test_y = [i[1] for i in test]

model.fit({'input': X}, {'targets': Y}, n_epoch=10, validation_set=({'input': test_x}, {'targets': test_y}), 
    snapshot_step=500, show_metric=True, run_id=MODEL_NAME)

model.save(MODEL_NAME)

import matplotlib.pyplot as plt

# if you need to create the data:
#test_data = process_test_data()
# if you already have some saved:
test_data = np.load('test_data.npy')

fig=plt.figure()

for num,data in enumerate(test_data[:12]):
    # cat: [1,0]
    # dog: [0,1]
    
    img_num = data[1]
    img_data = data[0]
    
    y = fig.add_subplot(3,4,num+1)
    orig = img_data
    data = img_data.reshape(IMG_SIZE,IMG_SIZE,1)
    #model_out = model.predict([data])[0]
    model_out = model.predict([data])[0]
    
    if np.argmax(model_out) == 1: str_label='Dog'
    else: str_label='Cat'
        
    y.imshow(orig,cmap='gray')
    plt.title(str_label)
    y.axes.get_xaxis().set_visible(False)
    y.axes.get_yaxis().set_visible(False)
plt.show()

with open('submission_file.csv','w') as f:
    f.write('id,label\n')
            
with open('submission_file.csv','a') as f:
    for data in tqdm(test_data):
        img_num = data[1]
        img_data = data[0]
        orig = img_data
        data = img_data.reshape(IMG_SIZE,IMG_SIZE,1)
        model_out = model.predict([data])[0]
        f.write('{},{}\n'.format(img_num,model_out[1]))
예제 #37
0
        # save the Standardizer
        joblib.dump(
            sc, 'Saved_Models/CNN_n_epochs_{}/standard.pkl'.format(n_epoch))

        X_train_sd = sc.transform(X_train)
        X_test_sd = sc.transform(X_test)

        X_test_sd = X_test_sd.reshape([-1, 13, 107, 1])
        X_train_sd = X_train_sd.reshape([-1, 13, 107, 1])

        # Building convolutional network
        network = input_data(shape=[None, 13, 107, 1], name='input')
        network = conv_2d(network,
                          32,
                          3,
                          activation='relu',
                          regularizer="L2",
                          name='conv1')
        network = max_pool_2d(network, 2, name='max1')
        network = fully_connected(network,
                                  128,
                                  activation='tanh',
                                  name='dense1')
        network = dropout(network, 0.8, name='drop1')
        network = fully_connected(network, 2, activation='softmax')
        network = regression(network,
                             optimizer='adam',
                             learning_rate=0.01,
                             loss='categorical_crossentropy',
                             name='target')
예제 #38
0
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression

import tflearn.datasets.oxflower17 as oxflower17
X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))

network = input_data(shape=[None, 227, 227, 3])
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 17, activation='softmax')
network = regression(network, optimizer='momentum',
                     loss='categorical_crossentropy', learning_rate=0.001)

model = tflearn.DNN(network, checkpoint_path='model_alexnet', 
                    max_checkpoints=1, tensorboard_verbose=2)
예제 #39
0
def getPredictedClass():
    # Predict
    image = cv2.imread('Temp.png')
    gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    prediction = model.predict([gray_image.reshape(89, 100, 1)])
    return np.argmax(prediction), (
        np.amax(prediction) /
        (prediction[0][0] + prediction[0][1] + prediction[0][2] +
         prediction[0][3] + prediction[0][4] + prediction[0][5]))


# Model defined
tf.reset_default_graph()
convnet = input_data(shape=[None, 89, 100, 1], name='input')
convnet = conv_2d(convnet, 32, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

convnet = conv_2d(convnet, 128, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

convnet = conv_2d(convnet, 256, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

convnet = conv_2d(convnet, 256, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

convnet = conv_2d(convnet, 128, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
예제 #40
0
def network1(train, train_amount):
    global model
    # region SPLIT DATA FOR TRAIN/VALIDATION
    train_amount = int(train_amount * 5.5 / 6)

    x_train = np.array([i[0] for i in train[:train_amount]
                        ]).reshape(-1, s.IMG_SIZE, s.IMG_SIZE, 1)
    x_train = x_train / 255.0
    y_train = [i[1] for i in train[:train_amount]]

    x_validation = np.array([i[0] for i in train[train_amount:]
                             ]).reshape(-1, s.IMG_SIZE, s.IMG_SIZE, 1)
    x_validation = x_validation / 255.0
    y_validation = [i[1] for i in train[train_amount:]]
    # endregion

    # region NETWORK
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center(mean=[0.4735053442384178])

    network = input_data(shape=[None, s.IMG_SIZE, s.IMG_SIZE, 1],
                         name='input',
                         data_preprocessing=img_prep)

    network = conv_2d(network, 32, 3, activation='relu', scope='conv1_1')
    network = conv_2d(network, 64, 3, activation='relu', scope='conv1_2')
    network = max_pool_2d(network, 2, strides=2, name='maxpool_1')

    network = conv_2d(network, 128, 3, activation='relu', scope='conv2_1')
    network = max_pool_2d(network, 2, strides=2, name='maxpool_2')

    network = conv_2d(network, 128, 3, activation='relu', scope='conv3_1')
    network = max_pool_2d(network, 2, strides=2, name='maxpool_3')

    network = conv_2d(network, 256, 3, activation='relu', scope='conv4_1')
    network = max_pool_2d(network, 2, strides=2, name='maxpool_4')

    network = fully_connected(network, 1024, activation='relu', scope='fc5')
    network = dropout(network, 0.5, name='dropout_1')

    network = fully_connected(network, 1024, activation='relu', scope='fc6')
    network = dropout(network, 0.5, name='dropout_2')

    network = fully_connected(network,
                              s.len_animals,
                              activation='softmax',
                              scope='fc7')

    network = regression(network,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=s.LR,
                         name='targets')

    model = tflearn.DNN(network, tensorboard_verbose=0, tensorboard_dir='log')
    # endregion

    if os.path.exists('{}.meta'.format(s.MODEL_NAME)):
        model.load(s.MODEL_NAME)
        print('Model loaded')

    # region TRAIN1
    model.fit(x_train,
              y_train,
              n_epoch=12,
              validation_set=({
                  'input': x_validation
              }, {
                  'targets': y_validation
              }),
              shuffle=True,
              snapshot_epoch=True,
              show_metric=True,
              batch_size=100,
              run_id=s.MODEL_NAME)
    # endregion

    # region SAVE
    model.save(s.MODEL_NAME)
    print('Network trained and saved as {0}'.format(s.MODEL_NAME))
예제 #41
0
basically one_hot makes 0 = [1,0,0,0,0,0,0,0,0,0]
1 = [0,1,0,0,0,0,0,0,0,0]
...
...
...

"""

X = X.reshape([-1, 28, 28, 1])  #reshaping 784 pixel image into flat 28*28
test_x = test_x.reshape([-1, 28, 28,
                         1])  #reshaping 784 pixel image into flat 28*28

convnet = input_data(shape=[None, 28, 28, 1], name='input')

convnet = conv_2d(convnet, 32, 2, activation='relu')  #Convolutional Layer 1
convnet = max_pool_2d(convnet, 2)  #Pooling Layer 1

convnet = conv_2d(convnet, 64, 2, activation='relu')  #Convolutional Layer 2
convnet = max_pool_2d(convnet, 2)  #Pooling Layer 2

convnet = fully_connected(convnet, 1024,
                          activation='relu')  #Fully Connected Layer
convnet = dropout(convnet, 0.8)  # basically 80% of neurons will fire

convnet = fully_connected(convnet, 10,
                          activation='softmax')  #Output Layer with Softmax
convnet = regression(convnet,
                     optimizer='adam',
                     learning_rate=0.01,
                     loss='categorical_crossentropy',
X, Y = image_preloader(train_file,
                       image_shape=(height, width),
                       mode='file',
                       categorical_labels=True,
                       normalize=True)
testX, testY = image_preloader(test_file,
                               image_shape=(height, width),
                               mode='file',
                               categorical_labels=True,
                               normalize=True)

network = input_data(shape=[None, width, height], name='input')
network = tflearn.layers.core.reshape(network, [-1, width, height, 1],
                                      name='Reshape')

network = conv_2d(network, 64, 1, activation='relu', regularizer="L2")
network = batch_normalization(network)
network = max_pool_2d(network, 2)
network = fully_connected(network, 64, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
# Build neural network and train
network = regression(network,
                     optimizer='adam',
                     learning_rate=0.001,
                     loss='categorical_crossentropy',
                     name='target')
model = tflearn.DNN(network, tensorboard_verbose=0)

model.fit(X,
          Y,
예제 #43
0
# coding=utf-8
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
import tflearn.datasets.oxflower17 as oxflower17

# 使用第三方库实现AlexNet
# 牛津大学的鲜花数据集:17个类别的鲜花数据,
# 每个类别 80 张图片,并且图片有大量的姿态和光的变化
X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))

# 构建AlexNet网络模型
network = input_data(shape=[None, 227, 227, 3])  # 输入:?x227x227x3
network = conv_2d(network, 96, 11, strides=4,
                  activation='relu')  # fliter:96x96x3x11, 4, 输出:?x57x57x96
network = max_pool_2d(network, 3, strides=2)  # kernel:3x3, 2, 输出:?x29x29x96
network = local_response_normalization(network)
network = conv_2d(network, 256, 5,
                  activation='relu')  # fliter:5x5x96x256, 1, 输出:?x29x29x256
network = max_pool_2d(network, 3, strides=2)  # kernel:3x3, 2, 输出:?x15x15x256
network = local_response_normalization(network)
network = conv_2d(network, 384, 3,
                  activation='relu')  # fliter:3x3x256x384, 1, 输出:?x15x15x384
network = conv_2d(network, 384, 3,
                  activation='relu')  # fliter:3x3x384x384, 1, 输出:?x15x15x384
network = conv_2d(network, 256, 3,
                  activation='relu')  # fliter:3x3x384x256, 1, 输出:?x15x15x256
network = max_pool_2d(network, 3, strides=2)  # kernel:3x3, 2, 输出:?x8x8x256
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')  # 输出:?x4096
예제 #44
0
    def build_network(self):
        # Smaller 'AlexNet'
        # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
        print('[+] Building CNN')
        '''
        # Why 3 hidden layers?
        # 1986: Backpropagation - Usually more than 3 hidden layer is not helpful
        '''

        '''
        [-]input layer
        #This layer is use for inputting data to a network.
        #List of int, to create a new placeholder
        # shape = [batch, height, width, in_channels]
        '''
        self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1]) # add data whose shape is [None,48, 48 ,1] into an 'input_data' layer

        '''
        [-]conv_2d
        #arg1 - incoming: [batch, height, width, in_channels]
        #arg2 - nb_filter: The number oft convolution filters
        #arg3 - filter_size( kernel size ) : Size of filters
        #strides - default : 1
        '''
        self.network = conv_2d(self.network, 64, 5, activation='relu') # 1st layer
        #self.network = local_response_normalization(self.network) #
        '''
        [-]max pooling 2D
        
        # arg1 - incoming:
        # arg2 - kernel_size: Pooling kernel size
        # arg3 - strides : stides of conv operation  e.g,(0,0)->(0,2)->(0,4)
        '''
        self.network = max_pool_2d(self.network, 3, strides=2) # pool

        self.network = conv_2d(self.network, 64, 5, activation='relu') # 2nd layer
        self.network = max_pool_2d(self.network, 3, strides=2) # pool

        self.network = conv_2d(self.network, 128, 4, activation='relu') # 3rd layer
        '''
        [-]Dropout
        reference: tflearn.org/layers/core/#dropout
        Introduction:
        #Outputs the input element scaled up by 1/keep_prob. The scaling is so that the expected sum is unchanged
        #By default, each element is kept or dropped independently. If noise_shape is specified, it must be broadcastable to the shape of x, and only dimensions with noise_shape[i] == shape(x)[i] will make
        independent decisions. For example, if shape(x) = [k, l, m, n] and noise_shape = [k, 1, 1, n], each batch and channel component will be kept independently and each row and column will be kept or not kept together
        
        #arg1 - incoming: []
        #arg2 - keep_prob: A float representing the probability that each element is kept
        '''
        self.network = dropout(self.network, 0.3) # final: output layer

        '''
        [-]fully_connected
        return : 2D Tensor[samples, n_units]
        
        arg1 - incoming: 2+D Tensor []
        arg2 - n_units: the # of units for this layer
        '''
        self.network = fully_connected(self.network, 3072, activation='relu') # A fully connected layer
        self.network = fully_connected(
            self.network, len(EMOTIONS), activation='softmax') # A fully connected layer
        '''
        [-]regression
        To apply a regreesion to the provided input.
        # optimizer: Optimizer to use
        # loss: Loss function used by this layer optimizer  
        '''
        self.network = regression(
            self.network,
            optimizer='momentum',
            loss='categorical_crossentropy'
        )# conput loss and optimizer

        '''
        Deep Neural Network Model
        # network: NN to be used
        # checkpoint_path : the path to store model file
        # max_checkpoint: Maximum amount of checkpoints
        # tensorboard_verbose: Summary verbose level, it accepts different levels of tensorboard logs.
        '''
        self.model = tflearn.DNN(
            self.network,
            checkpoint_path=SAVE_DIRECTORY + '/emotion_recognition',
            max_checkpoints=1,
            tensorboard_verbose=2
        ) #model max_checkpoints = 1: save only one model file.
        self.load_model()
예제 #45
0
# Real-time image preprocessing
img_prep = tflearn.ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()

# Real-time data augmentation
img_aug = tflearn.ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=20.0)

# Building 'VGG Network'
network = input_data(shape=[None, 224, 224, 3],
                     data_preprocessing=img_prep,
                     data_augmentation=img_aug)

network = conv_2d(network, 64, 3, activation='relu')
network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)

network = conv_2d(network, 128, 3, activation='relu')
network = conv_2d(network, 128, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)

network = conv_2d(network, 256, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)

network = conv_2d(network, 512, 3, activation='relu')
network = conv_2d(network, 512, 3, activation='relu')
network = conv_2d(network, 512, 3, activation='relu')
img_prep.add_featurewise_stdnorm()

# Real-time data augmentation
img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_flip_updown()
img_aug.add_random_rotation(max_angle=90.)

acc = Accuracy()

network = input_data(shape=[None, 28, 28, 4], data_augmentation=img_aug)
# Conv layers
network = conv_2d(network,
                  64,
                  3,
                  strides=1,
                  activation='relu',
                  regularizer='L2',
                  name='conv1_3_3_1',
                  weights_init='Xavier')
#network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network,
                  128,
                  3,
                  strides=1,
                  activation='relu',
                  regularizer='L2',
                  name='conv1_3_3_2',
                  weights_init='Xavier')
#network = max_pool_2d(network, 2, strides=2)
#network = conv_2d(network, 64, 3, strides=1, activation='relu', name = 'conv1_3_3_3')
#network = max_pool_2d(network, 2, strides=2)
    def _build_graph(self, resize_by_pad=False):
        # Building 'VGG Network'
        network = input_data(shape=[None, 224, 224, 3])

        network = conv_2d(network, 64, 3, activation='relu')
        network = conv_2d(network, 64, 3, activation='relu')
        network = max_pool_2d(network, 2, strides=2)

        network = conv_2d(network, 128, 3, activation='relu')
        network = conv_2d(network, 128, 3, activation='relu')
        network = max_pool_2d(network, 2, strides=2)

        network = conv_2d(network, 256, 3, activation='relu')
        network = conv_2d(network, 256, 3, activation='relu')
        network = conv_2d(network, 256, 3, activation='relu')
        network = max_pool_2d(network, 2, strides=2)

        network = conv_2d(network, 512, 3, activation='relu')
        network = conv_2d(network, 512, 3, activation='relu')
        network = conv_2d(network, 512, 3, activation='relu')
        network = max_pool_2d(network, 2, strides=2)

        network = conv_2d(network, 512, 3, activation='relu')
        network = conv_2d(network, 512, 3, activation='relu')
        network = conv_2d(network, 512, 3, activation='relu')
        network = max_pool_2d(network, 2, strides=2)

        network = fully_connected(network, 4096, activation='relu')
        network = dropout(network, 0.5)
        network = fully_connected(network, 4096, activation='relu')
        network = dropout(network, 0.5)
        network = fully_connected(network, 17, activation='softmax')

        network = regression(network, optimizer='rmsprop',
                             loss='categorical_crossentropy',
                             learning_rate=0.001)
        self.model = tflearn.DNN(network, checkpoint_path='model_vgg',
                            max_checkpoints=1, tensorboard_verbose=0)
예제 #48
0
Aux = []
for i in range(len(Y_ab)):
    aux = np.asarray(Y_ab[i][0])
    aux = scipy.ndimage.zoom(aux, (1.230769231, 0.909090909), order=3)
    aux = (np.array(aux.reshape(16, 40, 1)))
    Aux.append(aux)

Y_ab = Aux

print('iniciando treino!', file=arq)

print(np.array(Y_ab).shape, np.array(X_ab).shape)

encoder = input_data(shape=(None, 16, 200, 1))
encoder = tflearn.layers.core.dropout(encoder, 0.8)
encoder = conv_2d(encoder, 16, 7, activation='crelu')
print(encoder.get_shape, file=arq)
#encoder = dropout(encoder, 0.8)
encoder = max_pool_2d(encoder, [1, 5])

# 16x40
print(encoder.get_shape, file=arq)
encoder = conv_2d(encoder, 16, 7, activation='crelu')
encoder = max_pool_2d(encoder, [1, 2])
#encoder = tflearn.layers.core.dropout (encoder,0.8)
# 16x20
print(encoder.get_shape, file=arq)
encoder = conv_2d(encoder, 8, 7, activation='crelu')
encoder = max_pool_2d(encoder, [2, 2])
#encoder = local_response_normalization(encoder)
# 8x10
예제 #49
0
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8)
#sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

speakers = []
#speakers = data.get_speakers()
file = open('spkrs_list.txt', 'r')
lines = file.readlines()
for line in lines:
    speaker = line.replace('\n', '')
    speakers.append(speaker)
number_classes = len(speakers)
print("speakers", speakers)

# Building 'AlexNet'
network = input_data(shape=[None, 227, 227, 3])
network = conv_2d(network, 48, 9, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = tflearn.batch_normalization(network)
#network = local_response_normalization(network)
network = conv_2d(network, 128, 3, activation='relu')
network = conv_2d(network, 128, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = tflearn.batch_normalization(network)
#network = local_response_normalization(network)
network = conv_2d(network, 256, 3, activation='relu')
network = conv_2d(network, 192, 3, activation='relu')
network = conv_2d(network, 192, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = tflearn.batch_normalization(network)
#network = local_response_normalization(network)
network = fully_connected(network, 4096)
예제 #50
0
# Image transformations
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()
img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=25.)
img_aug.add_random_crop([64, 64], padding=4)

# Define network
network = input_data(shape=[None, 64, 64, 3],
                     data_preprocessing=img_prep,
                     data_augmentation=img_aug)
conv1_7_7 = conv_2d(network,
                    64,
                    7,
                    strides=2,
                    activation='relu',
                    name='conv1_7_7_s2')
pool1_3_3 = max_pool_2d(conv1_7_7, 3, strides=2)
pool1_3_3 = local_response_normalization(pool1_3_3)
conv2_3_3_reduce = conv_2d(pool1_3_3,
                           64,
                           1,
                           activation='relu',
                           name='conv2_3_3_reduce')
conv2_3_3 = conv_2d(conv2_3_3_reduce,
                    192,
                    3,
                    activation='relu',
                    name='conv2_3_3')
conv2_3_3 = local_response_normalization(conv2_3_3)
예제 #51
0
def get_network_architecture(image_width, image_height, number_of_classes, learning_rate):

    number_of_channels = 1

    network = input_data(
        shape=[None, image_width, image_height, number_of_channels],
        data_preprocessing=img_prep,
        data_augmentation=img_aug,
        name='InputData'
    )

    """
        def conv_2d(incoming, nb_filters, filter_size, strides=1, padding='same',
                    activation='linear', bias='True', weights_init='uniform_scaling',
                    bias_init='zeros', regularizer=None, weight_decay=0.001,
                    trainable=True, restore=True, reuse=False, scope=None,
                    name='Conv2D')

        network = conv_2d(network, 32, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_1')
        network = max_pool_2d(network, (2, 2), strides=2, padding='same', name='MaxPool2D_1')
        network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_1')
        network = dropout(network, 0.5, name='Dropout_1')
        network = batch_normalization(network, name='BatchNormalization')
        network = flatten(network, name='Flatten')
        network = fully_connected(network, 512, activation='relu', name='FullyConnected_1')
        network = fully_connected(network, number_of_classes, activation='softmax', name='FullyConnected_Final')

        print('  {}: {}'.format('Conv2D................', network.shape))
        print('  {}: {}'.format('MaxPool2D.............', network.shape))
        print('  {}: {}'.format('Dropout...............', network.shape))
        print('  {}: {}'.format('BatchNormalization....', network.shape))
        print('  {}: {}'.format('Flatten...............', network.shape))
        print('  {}: {}'.format('FullyConnected........', network.shape))
        print('  {}: {}'.format('FullyConnected_Final..', network.shape))

        CONV / FC -> Dropout -> BN -> activation function -> ...

        Convolutional filters: { 32, 64, 128 }
        Convolutional filter sizes: { 1, 3, 5, 11 }
        Convolutional strides: 1
        Activation: ReLu

        Pooling kernel sizes: { 2, 3, 4, 5 }
        Pooling kernel strides: 2

        Dropout probability: 0.5
            - Higher probability of keeping in earlier stages
            - Lower probability of keeping in later stages
    """

    print('\nNetwork architecture:')
    print('  {}: {}'.format('InputData.............', network.shape))

    network = conv_2d(network, 16, (7, 7), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_1')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_1')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = conv_2d(network, 16, (7, 7), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_2')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_2')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_1')
    print('  {}: {}'.format('AvgPool2D.............', network.shape))
    network = dropout(network, 0.5, name='Dropout_1')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = conv_2d(network, 32, (5, 5), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_3')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_3')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = conv_2d(network, 32, (5, 5), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_4')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_4')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_2')
    print('  {}: {}'.format('AvgPool2D.............', network.shape))
    network = dropout(network, 0.5, name='Dropout_2')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = conv_2d(network, 64, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_5')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_5')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = conv_2d(network, 64, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_6')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_6')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_3')
    print('  {}: {}'.format('AvgPool2D.............', network.shape))
    network = dropout(network, 0.5, name='Dropout_3')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = conv_2d(network, 128, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_7')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_7')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = conv_2d(network, 128, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_8')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_8')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_4')
    print('  {}: {}'.format('AvgPool2D.............', network.shape))
    network = dropout(network, 0.5, name='Dropout_4')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = conv_2d(network, 256, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_9')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_9')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = conv_2d(network, 256, (3, 3), strides=1, padding='same', activation='relu', regularizer='L2', name='Conv2D_10')
    print('  {}: {}'.format('Conv2D................', network.shape))
    network = batch_normalization(network, name='BatchNormalization_10')
    print('  {}: {}'.format('BatchNormalization....', network.shape))
    network = avg_pool_2d(network, (2, 2), strides=2, padding='same', name='AvgPool2D_5')
    print('  {}: {}'.format('AvgPool2D.............', network.shape))
    network = dropout(network, 0.5, name='Dropout_5')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = flatten(network, name='Flatten')
    print('  {}: {}'.format('Flatten...............', network.shape))


    network = fully_connected(network, 512, activation='relu', name='FullyConnected_1')
    print('  {}: {}'.format('FullyConnected........', network.shape))
    network = dropout(network, 0.5, name='Dropout_6')
    print('  {}: {}'.format('Dropout...............', network.shape))


    network = fully_connected(network, number_of_classes, activation='softmax', name="FullyConnected_Final")
    print('  {}: {}'.format('FullyConnected_Final..', network.shape))


    optimizer = Adam(learning_rate=learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False, name='Adam')
    # optimizer = SGD(learning_rate=learning_rate, lr_decay=0.01, decay_step=100, staircase=False, use_locking=False, name='SGD')
    # optimizer = RMSProp(learning_rate=learning_rate, decay=0.9, momentum=0.9, epsilon=1e-10, use_locking=False, name='RMSProp')
    # optimizer = Momentum(learning_rate=learning_rate, momentum=0.9, lr_decay=0.01, decay_step=100, staircase=False, use_locking=False, name='Momentum')

    metric = Accuracy(name='Accuracy')
    # metric = R2(name='Standard Error')
    # metric = WeightedR2(name='Weighted Standard Error')
    # metric = Top_k(k=6, name='Top K')


    network = regression(
        network,
        optimizer=optimizer,
        loss='categorical_crossentropy',
        metric=metric,
        learning_rate=learning_rate,
        name='Regression'
    )

    return network
예제 #52
0
        return training_data


train_data = create_train_data()

import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression

import tensorflow as tf
tf.reset_default_graph()

convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], name='input')

convnet = conv_2d(convnet, 32, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)

convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)

convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)

convnet = fully_connected(convnet, 2, activation='softmax')
convnet = regression(convnet,
                     optimizer='adam',
                     learning_rate=LEARNING_RATE,
                     loss='categorical_crossentropy',
                     name='targets')
예제 #53
0
파일: tflearn_usage.py 프로젝트: carbo-T/TF
# -*- coding:utf-8 -*-

import tflearn
from tflearn.layers.core import input_data, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression

import tflearn.datasets.mnist as mnist

trainX, trainY, testX, testY = mnist.load_data(data_dir='../MNIST_data',
                                               one_hot=True)
trainX = trainX.reshape([-1, 28, 28, 1])
testX = testX.reshape([-1, 28, 28, 1])
net = input_data(shape=[None, 28, 28, 1], name='input')
net = conv_2d(net, 6, 5, activation='relu')
net = max_pool_2d(net, 2)
net = conv_2d(net, 16, 5, activation='relu')
net = max_pool_2d(net, 2)
net = fully_connected(net, 500, activation='relu')
net = fully_connected(net, 10, activation='relu')

net = regression(net,
                 optimizer='sgd',
                 learning_rate=0.01,
                 loss='categorical_crossentropy')
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(trainX,
          trainY,
          n_epoch=20,
          validation_set=([testX, testY]),
          show_metric=True)
예제 #54
0
        images_temp = data['train']
        labels_temp = data['train_labels']
    image_array = np.vstack((image_array, images_temp))
    label_array = np.vstack((label_array, labels_temp))

X = image_array[1:, :]
Y = label_array[1:, :]

train, test, train_labels, test_labels = train_test_split(X, Y, test_size=0.09)

train = train.reshape([-1, 28, 28, 1])
test = test.reshape([-1, 28, 28, 1])

cnn = input_data(shape=[None, 28, 28, 1], name='input')

cnn = conv_2d(cnn, 32, 2, activation='relu')
cnn = max_pool_2d(cnn, 2)

cnn = conv_2d(cnn, 64, 2, activation='relu')
cnn = max_pool_2d(cnn, 2)

cnn = fully_connected(cnn, 28, activation='relu')
cnn = dropout(cnn, 0.8)

cnn = fully_connected(cnn, 3, activation='softmax')
cnn = regression(cnn,
                 optimizer='adam',
                 learning_rate=0.01,
                 loss='categorical_crossentropy',
                 name='targets')
예제 #55
0
def deepLearning():

    import os as os
    import cv2
    import numpy as np

    img_size = 50
    lr = 1e-3
    epoch = 100
    step = 500

    img_test_dir = 'D:\\code\\16462\\static\\preprocessed_images'

    model_name = "D:\\code\\16462\\deeplearning\\Processed_PA-0.001-8tryconv-basic-project-50.model"

    import tflearn
    from tflearn.layers.conv import conv_2d, max_pool_2d
    from tflearn.layers.core import input_data, dropout, fully_connected
    from tflearn.layers.estimator import regression

    import tensorflow as tf

    tf.reset_default_graph()

    convnet = input_data(shape=[None, img_size, img_size, 1], name='input')

    convnet = conv_2d(convnet, 32, 5, activation='relu')
    convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 64, 5, activation='relu')
    convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 32, 5, activation='relu')
    convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 64, 5, activation='relu')
    convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 32, 5, activation='relu')
    convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 64, 5, activation='relu')
    convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 32, 5, activation='relu')
    convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 64, 5, activation='relu')
    convnet = max_pool_2d(convnet, 2)

    convnet = fully_connected(convnet, 1024, activation='relu')
    convnet = dropout(convnet, 0.5)

    convnet = fully_connected(convnet, 2, activation='softmax')
    convnet = regression(convnet,
                         optimizer='adam',
                         learning_rate=lr,
                         loss='categorical_crossentropy',
                         name='targets')

    model = tflearn.DNN(convnet, tensorboard_dir='log')

    model.load(model_name)

    def label_img(img):
        #word_label=img.split('.')[-3]
        if img == 'Cardiomegaly': return [1, 0]
        elif img == 'No Cardiomegaly': return [0, 1]

    def create_img_test_data():
        img_testing_data = []
        #count=0
        for img in os.listdir(img_test_dir):

            path = os.path.join(img_test_dir, img)
            img = cv2.resize(cv2.imread(path, cv2.IMREAD_GRAYSCALE),
                             (img_size, img_size))
            img_testing_data.append([np.array(img)])

        np.save('new_img_test_data_trainDown.npy', img_testing_data)
        return img_testing_data

    create_img_test_data()

    test_img = np.load('new_img_test_data_trainDown.npy')

    # import matplotlib.pyplot as plt

    # plt.rcParams['figure.figsize'] = (15,15)

    result = []

    for num, data in enumerate(test_img[:]):

        count = 0
        #img_num = data[1]
        img_data = data[0]

        orig = img_data
        data = img_data.reshape(img_size, img_size, 1)

        model_out = model.predict([data])[0]

        if np.argmax(model_out) == 1: str_label = 'No Cardiomegaly'
        else: str_label = 'Cardiomegaly'

        result.append(str_label)

    return result
    # print(result)


# deepLearning()
예제 #56
0
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression
from utils import Dataset, draw_heatmap_with_test_data, test_accuracy, draw_heatmap_with_realtime, \
    draw_double_cross_heatmap

num_square = 12
row = 67
col = 100
block_size = 3
n_classes = num_square
n_epoch = 50
learning_rate = 0.0001

cnn2 = input_data(shape=[None, row, col, 1], name='input2')

cnn2 = conv_2d(cnn2, 64, 2, activation='relu')
cnn2 = max_pool_2d(cnn2, 2)

cnn2 = conv_2d(cnn2, 64, 2, activation='relu')
cnn2 = max_pool_2d(cnn2, 2)

cnn2 = conv_2d(cnn2, 64, 2, activation='relu')
cnn2 = max_pool_2d(cnn2, 2)

cnn2 = conv_2d(cnn2, 64, 2, activation='relu')
cnn2 = max_pool_2d(cnn2, 2)

cnn2 = conv_2d(cnn2, 64, 2, activation='relu')
cnn2 = max_pool_2d(cnn2, 2)

cnn2 = conv_2d(cnn2, 64, 2, activation='relu')
예제 #57
0
img_prep.add_featurewise_stdnorm()

# Real-time data augmentation
img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=25.)

# Convolutional network building
# Hyper params:
_learning_rate = 0.001
_dropout = 0.5

network = input_data(shape=[None, 20, 20, 3],
                     data_preprocessing=img_prep,
                     data_augmentation=img_aug)
network = conv_2d(network, 32, 3, activation='relu')
network = max_pool_2d(network, 2)
network = conv_2d(network, 64, 3, activation='relu')
network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, _dropout)
network = fully_connected(network, 2, activation='softmax')
network = regression(network,
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     learning_rate=_learning_rate)
model = tflearn.DNN(network, tensorboard_dir='log', tensorboard_verbose=0)

#load the trained model
model.load(model_path)
예제 #58
0
        image_num = image.split()[0]
        image = cv2.resize(cv2.imread(path,cv2.IMREAD_GRAYSCALE),(imageSize,imageSize))
        testingData.append([numpy.array(image),image_num])
    numpy.save('testing_data.npy',testingData)
    return testingData


trainingData = createTrainData()

import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression

NeuralNet = input_data(shape=[None, imageSize, imageSize, 1], name='input')
NeuralNet = conv_2d(NeuralNet, 32, 2, activation='relu')
NeuralNet = max_pool_2d(NeuralNet, 2)
NeuralNet = conv_2d(NeuralNet, 64, 2, activation='relu')
NeuralNet = max_pool_2d(NeuralNet, 2)
NeuralNet = fully_connected(NeuralNet, 1024, activation='relu')
NeuralNet = dropout(NeuralNet, 0.8)
NeuralNet = fully_connected(NeuralNet, 2, activation='softmax')
NeuralNet = regression(NeuralNet, optimizer='adam', learning_rate=LearningRate, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(NeuralNet, tensorboard_dir='log')


shuffle(trainingData)
train = trainingData[:-100]
test = trainingData[-100:]

X = numpy.array([i[0] for i in train]).reshape(-1,imageSize, imageSize, 1)
예제 #59
0
img_distortion = ImageAugmentation()

# only flip left/right for shape training
img_distortion.add_random_flip_leftright()
img_distortion.add_random_blur(sigma_max=1.)

###
### network architecture
###
print("setting up network")
network = input_data(shape=[None, 64, 64, 1],
                     data_preprocessing=img_preprocessor,
                     data_augmentation=img_distortion)

# convolution 2
network = conv_2d(network, 16, 5, activation='relu')
# max pooling 2
network = max_pool_2d(network, 2)
# convolution 2
network = conv_2d(network, 16, 5, activation='relu')
# max pooling 2
network = max_pool_2d(network, 2)
# convolution 2
network = conv_2d(network, 16, 5, activation='relu')
# max pooling 2
network = max_pool_2d(network, 2)
# fully-connected
network = fully_connected(network, 128, activation='relu')
# fully-connected
network = fully_connected(network, 128, activation='relu')
def main():
    f = open("results.txt", "w+")

    get_data = create_training_data()
    train_data = get_data[0]
    data_indeces = get_data[1]

    convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 3], name='input')

    convnet = conv_2d(convnet, 32, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = conv_2d(convnet, 64, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = conv_2d(convnet, 128, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = conv_2d(convnet, 32, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = conv_2d(convnet, 64, 3, activation='relu')
    convnet = max_pool_2d(convnet, 3)

    convnet = fully_connected(convnet, 1024, activation='relu')
    convnet = dropout(convnet, 0.8)

    convnet = fully_connected(convnet, 19, activation='softmax')
    convnet = regression(convnet,
                         optimizer='adam',
                         learning_rate=LR,
                         loss='categorical_crossentropy',
                         name='targets')

    model = tflearn.DNN(convnet, tensorboard_dir='log')

    k_fold = 5
    fold_number = 0

    MODEL_NEW_NAME = 'jycropdisease-fold-new{}-{}-{}.model'.format(
        fold_number + 1, LR, '2conv-basic')

    data = train_data
    print("======= FOLD %d =======" % (fold_number + 1))
    split_data = split_training_data(data, data_indeces, k_fold, fold_number)

    train = split_data['train_data']
    test = split_data['test_data']

    X = np.array([i[0] for i in train]).reshape(-1, IMG_SIZE, IMG_SIZE, 3)
    Y = [i[1] for i in train]

    test_x = np.array([i[0] for i in test]).reshape(-1, IMG_SIZE, IMG_SIZE, 3)
    test_y = [i[1] for i in test]

    model.fit({'input': X}, {'targets': Y},
              n_epoch=8,
              validation_set=({
                  'input': test_x
              }, {
                  'targets': test_y
              }),
              snapshot_step=1000,
              show_metric=True,
              run_id=MODEL_NEW_NAME)

    model.save(MODEL_NEW_NAME)
    f.close()